diff --git a/cmd/main.go b/cmd/main.go index 242a3867..70e7800a 100644 --- a/cmd/main.go +++ b/cmd/main.go @@ -63,7 +63,7 @@ func main() { } loader := manifestreader.NewManifestLoader(f) - invFactory := inventory.ConfigMapClientFactory{StatusPolicy: inventory.StatusPolicyNone} + invFactory := inventory.ConfigMapClientFactory{} names := []string{"init", "apply", "destroy", "diff", "preview", "status"} subCmds := []*cobra.Command{ diff --git a/cmd/status/cmdstatus.go b/cmd/status/cmdstatus.go index 0ad87a8e..a519f955 100644 --- a/cmd/status/cmdstatus.go +++ b/cmd/status/cmdstatus.go @@ -95,13 +95,13 @@ type Runner struct { timeout time.Duration output string - invType string - inventoryNames string - inventoryNameSet map[string]bool - namespaces string - namespaceSet map[string]bool - statuses string - statusSet map[string]bool + invType string + inventoryNames string + inventoryIDSet map[string]bool + namespaces string + namespaceSet map[string]bool + statuses string + statusSet map[string]bool PollerFactoryFunc func(cmdutil.Factory) (poller.Poller, error) } @@ -124,9 +124,9 @@ func (r *Runner) preRunE(*cobra.Command, []string) error { } if r.inventoryNames != "" { - r.inventoryNameSet = make(map[string]bool) + r.inventoryIDSet = make(map[string]bool) for _, name := range strings.Split(r.inventoryNames, ",") { - r.inventoryNameSet[name] = true + r.inventoryIDSet[name] = true } } @@ -164,14 +164,16 @@ func (r *Runner) loadInvFromDisk(cmd *cobra.Command, args []string) (*printer.Pr // Based on the inventory template manifest we look up the inventory // from the live state using the inventory client. - identifiers, err := invClient.GetClusterObjs(cmd.Context(), inv) + clusterInventory, err := invClient.Get(cmd.Context(), inv, inventory.GetOptions{}) if err != nil { return nil, err } + identifiers := clusterInventory.ObjectRefs() + printData := printer.PrintData{ Identifiers: object.ObjMetadataSet{}, - InvNameMap: make(map[object.ObjMetadata]string), + InvIDMap: make(map[object.ObjMetadata]fmt.Stringer), StatusSet: r.statusSet, } @@ -179,7 +181,7 @@ func (r *Runner) loadInvFromDisk(cmd *cobra.Command, args []string) (*printer.Pr // check if the object is under one of the targeted namespaces if _, ok := r.namespaceSet[obj.Namespace]; ok || len(r.namespaceSet) == 0 { // add to the map for future reference - printData.InvNameMap[obj] = inv.Name() + printData.InvIDMap[obj] = inv.ID() // append to identifiers printData.Identifiers = append(printData.Identifiers, obj) } @@ -197,18 +199,23 @@ func (r *Runner) listInvFromCluster() (*printer.PrintData, error) { // initialize maps in printData printData := printer.PrintData{ Identifiers: object.ObjMetadataSet{}, - InvNameMap: make(map[object.ObjMetadata]string), + InvIDMap: make(map[object.ObjMetadata]fmt.Stringer), StatusSet: r.statusSet, } - identifiersMap, err := invClient.ListClusterInventoryObjs(r.ctx) + inventories, err := invClient.List(r.ctx, inventory.ListOptions{}) if err != nil { return nil, err } - for invName, identifiers := range identifiersMap { + identifiersMap := make(map[inventory.ID]object.ObjMetadataSet) + for _, inv := range inventories { + identifiersMap[inv.ID()] = inv.ObjectRefs() + } + + for invID, identifiers := range identifiersMap { // Check if there are targeted inventory names and include the current inventory name - if _, ok := r.inventoryNameSet[invName]; !ok && len(r.inventoryNameSet) != 0 { + if _, ok := r.inventoryIDSet[invID.String()]; !ok && len(r.inventoryIDSet) != 0 { continue } // Filter objects @@ -216,7 +223,7 @@ func (r *Runner) listInvFromCluster() (*printer.PrintData, error) { // check if the object is under one of the targeted namespaces if _, ok := r.namespaceSet[obj.Namespace]; ok || len(r.namespaceSet) == 0 { // add to the map for future reference - printData.InvNameMap[obj] = invName + printData.InvIDMap[obj] = invID // append to identifiers printData.Identifiers = append(printData.Identifiers, obj) } diff --git a/cmd/status/cmdstatus_test.go b/cmd/status/cmdstatus_test.go index cf0f196c..9019078a 100644 --- a/cmd/status/cmdstatus_test.go +++ b/cmd/status/cmdstatus_test.go @@ -31,8 +31,8 @@ kind: ConfigMap apiVersion: v1 metadata: labels: - cli-utils.sigs.k8s.io/inventory-id: test - name: foo + cli-utils.sigs.k8s.io/inventory-id: inventory-id + name: inventory-name namespace: default ` depObject = object.ObjMetadata{ @@ -121,8 +121,8 @@ func TestCommand(t *testing.T) { }, }, expectedOutput: ` -foo/deployment.apps/default/foo is InProgress: inProgress -foo/statefulset.apps/default/bar is Current: current +inventory-id/deployment.apps/default/foo is InProgress: inProgress +inventory-id/statefulset.apps/default/bar is Current: current `, }, "wait for all current": { @@ -168,10 +168,10 @@ foo/statefulset.apps/default/bar is Current: current }, }, expectedOutput: ` -foo/deployment.apps/default/foo is InProgress: inProgress -foo/statefulset.apps/default/bar is InProgress: inProgress -foo/statefulset.apps/default/bar is Current: current -foo/deployment.apps/default/foo is Current: current +inventory-id/deployment.apps/default/foo is InProgress: inProgress +inventory-id/statefulset.apps/default/bar is InProgress: inProgress +inventory-id/statefulset.apps/default/bar is Current: current +inventory-id/deployment.apps/default/foo is Current: current `, }, "wait for all deleted": { @@ -201,8 +201,8 @@ foo/deployment.apps/default/foo is Current: current }, }, expectedOutput: ` -foo/statefulset.apps/default/bar is NotFound: notFound -foo/deployment.apps/default/foo is NotFound: notFound +inventory-id/statefulset.apps/default/bar is NotFound: notFound +inventory-id/deployment.apps/default/foo is NotFound: notFound `, }, "forever with timeout": { @@ -233,8 +233,8 @@ foo/deployment.apps/default/foo is NotFound: notFound }, }, expectedOutput: ` -foo/statefulset.apps/default/bar is InProgress: inProgress -foo/deployment.apps/default/foo is InProgress: inProgress +inventory-id/statefulset.apps/default/bar is InProgress: inProgress +inventory-id/deployment.apps/default/foo is InProgress: inProgress `, }, } @@ -283,7 +283,7 @@ foo/deployment.apps/default/foo is InProgress: inProgress "name": "foo", "timestamp": "", "type": "status", - "inventory-name": "foo", + "inventory-name": "inventory-id", "status": "InProgress", "message": "inProgress", }, @@ -294,7 +294,7 @@ foo/deployment.apps/default/foo is InProgress: inProgress "name": "bar", "timestamp": "", "type": "status", - "inventory-name": "foo", + "inventory-name": "inventory-id", "status": "Current", "message": "current", }, @@ -350,7 +350,7 @@ foo/deployment.apps/default/foo is InProgress: inProgress "name": "foo", "timestamp": "", "type": "status", - "inventory-name": "foo", + "inventory-name": "inventory-id", "status": "InProgress", "message": "inProgress", }, @@ -361,7 +361,7 @@ foo/deployment.apps/default/foo is InProgress: inProgress "name": "bar", "timestamp": "", "type": "status", - "inventory-name": "foo", + "inventory-name": "inventory-id", "status": "InProgress", "message": "inProgress", }, @@ -372,7 +372,7 @@ foo/deployment.apps/default/foo is InProgress: inProgress "name": "bar", "timestamp": "", "type": "status", - "inventory-name": "foo", + "inventory-name": "inventory-id", "status": "Current", "message": "current", }, @@ -383,7 +383,7 @@ foo/deployment.apps/default/foo is InProgress: inProgress "name": "foo", "timestamp": "", "type": "status", - "inventory-name": "foo", + "inventory-name": "inventory-id", "status": "Current", "message": "current", }, @@ -423,7 +423,7 @@ foo/deployment.apps/default/foo is InProgress: inProgress "name": "bar", "timestamp": "", "type": "status", - "inventory-name": "foo", + "inventory-name": "inventory-id", "status": "NotFound", "message": "notFound", }, @@ -434,7 +434,7 @@ foo/deployment.apps/default/foo is InProgress: inProgress "name": "foo", "timestamp": "", "type": "status", - "inventory-name": "foo", + "inventory-name": "inventory-id", "status": "NotFound", "message": "notFound", }, @@ -475,7 +475,7 @@ foo/deployment.apps/default/foo is InProgress: inProgress "name": "bar", "timestamp": "", "type": "status", - "inventory-name": "foo", + "inventory-name": "inventory-id", "status": "InProgress", "message": "inProgress", }, @@ -486,7 +486,7 @@ foo/deployment.apps/default/foo is InProgress: inProgress "name": "foo", "timestamp": "", "type": "status", - "inventory-name": "foo", + "inventory-name": "inventory-id", "status": "InProgress", "message": "inProgress", }, diff --git a/cmd/status/printers/event/printer.go b/cmd/status/printers/event/printer.go index 845bd780..304015a4 100644 --- a/cmd/status/printers/event/printer.go +++ b/cmd/status/printers/event/printer.go @@ -67,9 +67,9 @@ func (ep *Printer) printStatusEvent(se pollevent.Event) error { switch se.Type { case pollevent.ResourceUpdateEvent: id := se.Resource.Identifier - var invName string + var invName fmt.Stringer var ok bool - if invName, ok = ep.Data.InvNameMap[id]; !ok { + if invName, ok = ep.Data.InvIDMap[id]; !ok { return fmt.Errorf("%s: resource not found", id) } // filter out status that are not assigned diff --git a/cmd/status/printers/json/printer.go b/cmd/status/printers/json/printer.go index e65e074e..8b29d3ae 100644 --- a/cmd/status/printers/json/printer.go +++ b/cmd/status/printers/json/printer.go @@ -69,9 +69,9 @@ func (ep *Printer) printStatusEvent(se pollevent.Event) error { switch se.Type { case pollevent.ResourceUpdateEvent: id := se.Resource.Identifier - var invName string + var invName fmt.Stringer var ok bool - if invName, ok = ep.Data.InvNameMap[id]; !ok { + if invName, ok = ep.Data.InvIDMap[id]; !ok { return fmt.Errorf("%s: resource not found", id) } // filter out status that are not assigned diff --git a/cmd/status/printers/printer/printer.go b/cmd/status/printers/printer/printer.go index b89a4bc3..f1d840f9 100644 --- a/cmd/status/printers/printer/printer.go +++ b/cmd/status/printers/printer/printer.go @@ -4,6 +4,8 @@ package printer import ( + "fmt" + "sigs.k8s.io/cli-utils/pkg/kstatus/polling/collector" "sigs.k8s.io/cli-utils/pkg/kstatus/polling/event" "sigs.k8s.io/cli-utils/pkg/object" @@ -12,7 +14,7 @@ import ( // PrintData records data required for printing type PrintData struct { Identifiers object.ObjMetadataSet - InvNameMap map[object.ObjMetadata]string + InvIDMap map[object.ObjMetadata]fmt.Stringer StatusSet map[string]bool } diff --git a/cmd/status/printers/table/adapter.go b/cmd/status/printers/table/adapter.go index 35207504..ada5418b 100644 --- a/cmd/status/printers/table/adapter.go +++ b/cmd/status/printers/table/adapter.go @@ -4,6 +4,7 @@ package table import ( + "fmt" "strings" "sigs.k8s.io/cli-utils/pkg/kstatus/polling/collector" @@ -17,13 +18,13 @@ import ( // needed by the BaseTablePrinter. type CollectorAdapter struct { collector *collector.ResourceStatusCollector - invNameMap map[object.ObjMetadata]string + invNameMap map[object.ObjMetadata]fmt.Stringer statusSet map[string]bool } type ResourceInfo struct { resourceStatus *pe.ResourceStatus - invName string + invName fmt.Stringer } func (r *ResourceInfo) Identifier() object.ObjMetadata { diff --git a/cmd/status/printers/table/printer.go b/cmd/status/printers/table/printer.go index f0acf565..8cca425d 100644 --- a/cmd/status/printers/table/printer.go +++ b/cmd/status/printers/table/printer.go @@ -47,7 +47,7 @@ func (t *Printer) Print(ch <-chan event.Event, identifiers object.ObjMetadataSet // printing the latest state on a regular cadence. printCompleted := t.runPrintLoop(&CollectorAdapter{ collector: coll, - invNameMap: t.PrintData.InvNameMap, + invNameMap: t.PrintData.InvIDMap, statusSet: t.PrintData.StatusSet, }, stop) @@ -77,7 +77,7 @@ var invNameColumn = table.ColumnDef{ ColumnHeader: "INVENTORY_NAME", ColumnWidth: 30, PrintResourceFunc: func(w io.Writer, width int, r table.Resource) (int, error) { - group := r.(*ResourceInfo).invName + group := r.(*ResourceInfo).invName.String() if len(group) > width { group = group[:width] } diff --git a/pkg/apis/actuation/types.go b/pkg/apis/actuation/types.go index 0f1dd642..63cf3d6c 100644 --- a/pkg/apis/actuation/types.go +++ b/pkg/apis/actuation/types.go @@ -4,31 +4,9 @@ package actuation import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" ) -// Inventory represents the inventory object in memory. -// Inventory is currently only used for in-memory storage and not serialized to -// disk or to the API server. -type Inventory struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata,omitempty"` - - Spec InventorySpec `json:"spec,omitempty"` - Status InventoryStatus `json:"status,omitempty"` -} - -// InventorySpec is the specification of the desired/expected inventory state. -type InventorySpec struct { - Objects []ObjectReference `json:"objects,omitempty"` -} - -// InventoryStatus is the status of the current/last-known inventory state. -type InventoryStatus struct { - Objects []ObjectStatus `json:"objects,omitempty"` -} - // ObjectReference is a reference to a KRM resource by name and kind. // // Kubernetes only stores one API Version for each Kind at any given time, diff --git a/pkg/apis/actuation/zz_generated.deepcopy.go b/pkg/apis/actuation/zz_generated.deepcopy.go index 1045f12c..35d40082 100644 --- a/pkg/apis/actuation/zz_generated.deepcopy.go +++ b/pkg/apis/actuation/zz_generated.deepcopy.go @@ -1,4 +1,5 @@ //go:build !ignore_autogenerated +// +build !ignore_autogenerated // Copyright 2023 The Kubernetes Authors. // SPDX-License-Identifier: Apache-2.0 @@ -7,68 +8,6 @@ package actuation -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Inventory) DeepCopyInto(out *Inventory) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Inventory. -func (in *Inventory) DeepCopy() *Inventory { - if in == nil { - return nil - } - out := new(Inventory) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *InventorySpec) DeepCopyInto(out *InventorySpec) { - *out = *in - if in.Objects != nil { - in, out := &in.Objects, &out.Objects - *out = make([]ObjectReference, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InventorySpec. -func (in *InventorySpec) DeepCopy() *InventorySpec { - if in == nil { - return nil - } - out := new(InventorySpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *InventoryStatus) DeepCopyInto(out *InventoryStatus) { - *out = *in - if in.Objects != nil { - in, out := &in.Objects, &out.Objects - *out = make([]ObjectStatus, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InventoryStatus. -func (in *InventoryStatus) DeepCopy() *InventoryStatus { - if in == nil { - return nil - } - out := new(InventoryStatus) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ObjectReference) DeepCopyInto(out *ObjectReference) { *out = *in diff --git a/pkg/apply/applier.go b/pkg/apply/applier.go index b0c99b0a..ad76e3f4 100644 --- a/pkg/apply/applier.go +++ b/pkg/apply/applier.go @@ -8,6 +8,7 @@ import ( "fmt" "time" + apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/sets" @@ -48,13 +49,14 @@ type Applier struct { openAPIGetter discovery.OpenAPISchemaInterface mapper meta.RESTMapper infoHelper info.Helper + statusPolicy inventory.StatusPolicy } // prepareObjects returns the set of objects to apply and to prune or // an error if one occurred. -func (a *Applier) prepareObjects(ctx context.Context, localInv inventory.Info, localObjs object.UnstructuredSet, +func (a *Applier) prepareObjects(ctx context.Context, clusterInv inventory.Inventory, localObjs object.UnstructuredSet, o ApplierOptions) (object.UnstructuredSet, object.UnstructuredSet, error) { - if localInv == nil { + if clusterInv == nil { return nil, nil, fmt.Errorf("the local inventory can't be nil") } if err := inventory.ValidateNoInventory(localObjs); err != nil { @@ -62,9 +64,9 @@ func (a *Applier) prepareObjects(ctx context.Context, localInv inventory.Info, l } // Add the inventory annotation to the resources being applied. for _, localObj := range localObjs { - inventory.AddInventoryIDAnnotation(localObj, localInv) + inventory.AddInventoryIDAnnotation(localObj, clusterInv.ID()) } - pruneObjs, err := a.pruner.GetPruneObjs(ctx, localInv, localObjs, prune.Options{ + pruneObjs, err := a.pruner.GetPruneObjs(ctx, clusterInv, localObjs, prune.Options{ DryRunStrategy: o.DryRunStrategy, }) if err != nil { @@ -96,8 +98,21 @@ func (a *Applier) Run(ctx context.Context, invInfo inventory.Info, objects objec } validator.Validate(objects) + inv, err := a.invClient.Get(ctx, invInfo, inventory.GetOptions{}) + if apierrors.IsNotFound(err) { + inv = invInfo.NewEmptyInventory() + } else if err != nil { + handleError(eventChannel, err) + return + } + if inv.ID() != invInfo.ID() { + handleError(eventChannel, fmt.Errorf("expected inventory object to have inventory-id %q but got %q", + invInfo.ID(), inv.ID())) + return + } + // Decide which objects to apply and which to prune - applyObjs, pruneObjs, err := a.prepareObjects(ctx, invInfo, objects, options) + applyObjs, pruneObjs, err := a.prepareObjects(ctx, inv, objects, options) if err != nil { handleError(eventChannel, err) return @@ -154,11 +169,13 @@ func (a *Applier) Run(ctx context.Context, invInfo inventory.Info, objects objec OpenAPIGetter: a.openAPIGetter, InfoHelper: a.infoHelper, Mapper: a.mapper, + Inventory: inv, InvClient: a.invClient, Collector: vCollector, ApplyFilters: applyFilters, ApplyMutators: applyMutators, PruneFilters: pruneFilters, + StatusPolicy: a.statusPolicy, } opts := solver.Options{ ServerSideOptions: options.ServerSideOptions, @@ -175,7 +192,6 @@ func (a *Applier) Run(ctx context.Context, invInfo inventory.Info, objects objec taskQueue := taskBuilder. WithApplyObjects(applyObjs). WithPruneObjects(pruneObjs). - WithInventory(invInfo). Build(taskContext, opts) klog.V(4).Infof("validation errors: %d", len(vCollector.Errors)) diff --git a/pkg/apply/applier_builder.go b/pkg/apply/applier_builder.go index e964d110..368766ce 100644 --- a/pkg/apply/applier_builder.go +++ b/pkg/apply/applier_builder.go @@ -44,6 +44,7 @@ func (b *ApplierBuilder) Build() (*Applier, error) { openAPIGetter: bx.discoClient, mapper: bx.mapper, infoHelper: info.NewHelper(bx.mapper, bx.unstructuredClientForMapping), + statusPolicy: bx.statusPolicy, }, nil } @@ -91,3 +92,8 @@ func (b *ApplierBuilder) WithStatusWatcherFilters(filters *watcher.Filters) *App b.statusWatcherFilters = filters return b } + +func (b *ApplierBuilder) WithStatusPolicy(statusPolicy inventory.StatusPolicy) *ApplierBuilder { + b.statusPolicy = statusPolicy + return b +} diff --git a/pkg/apply/applier_test.go b/pkg/apply/applier_test.go index 824a4741..bf283f88 100644 --- a/pkg/apply/applier_test.go +++ b/pkg/apply/applier_test.go @@ -1431,7 +1431,7 @@ func TestApplier(t *testing.T) { testCtx, testCancel := context.WithTimeout(context.Background(), testTimeout) defer testCancel() // cleanup - invInfo, err := tc.invInfo.toWrapped() + invInfo, err := tc.invInfo.toInfo() require.NoError(t, err) eventChannel := applier.Run(runCtx, invInfo, tc.resources, tc.options) @@ -1874,7 +1874,7 @@ func TestApplierCancel(t *testing.T) { testCtx, testCancel := context.WithTimeout(context.Background(), tc.testTimeout) defer testCancel() // cleanup - invInfo, err := tc.invInfo.toWrapped() + invInfo, err := tc.invInfo.toInfo() require.NoError(t, err) eventChannel := applier.Run(runCtx, invInfo, tc.resources, tc.options) @@ -1976,7 +1976,6 @@ func TestReadAndPrepareObjects(t *testing.T) { }{ "objects include inventory": { invInfo: inventoryInfo{ - name: invInfo.Name(), namespace: invInfo.Namespace(), id: invInfo.ID(), }, @@ -1985,7 +1984,6 @@ func TestReadAndPrepareObjects(t *testing.T) { }, "empty inventory, empty objects, apply none, prune none": { invInfo: inventoryInfo{ - name: invInfo.Name(), namespace: invInfo.Namespace(), id: invInfo.ID(), }, @@ -1993,7 +1991,6 @@ func TestReadAndPrepareObjects(t *testing.T) { "one in inventory, empty objects, prune one": { clusterObjs: object.UnstructuredSet{obj1}, invInfo: inventoryInfo{ - name: invInfo.Name(), namespace: invInfo.Namespace(), id: invInfo.ID(), set: object.ObjMetadataSet{ @@ -2004,7 +2001,6 @@ func TestReadAndPrepareObjects(t *testing.T) { }, "all in inventory, apply all": { invInfo: inventoryInfo{ - name: invInfo.Name(), namespace: invInfo.Namespace(), id: invInfo.ID(), set: object.ObjMetadataSet{ @@ -2018,7 +2014,6 @@ func TestReadAndPrepareObjects(t *testing.T) { "disjoint set, apply new, prune old": { clusterObjs: object.UnstructuredSet{obj2}, invInfo: inventoryInfo{ - name: invInfo.Name(), namespace: invInfo.Namespace(), id: invInfo.ID(), set: object.ObjMetadataSet{ @@ -2032,7 +2027,6 @@ func TestReadAndPrepareObjects(t *testing.T) { "most in inventory, apply all": { clusterObjs: object.UnstructuredSet{obj2}, invInfo: inventoryInfo{ - name: invInfo.Name(), namespace: invInfo.Namespace(), id: invInfo.ID(), set: object.ObjMetadataSet{ diff --git a/pkg/apply/builder.go b/pkg/apply/builder.go index 75602e74..97c59339 100644 --- a/pkg/apply/builder.go +++ b/pkg/apply/builder.go @@ -28,6 +28,7 @@ type commonBuilder struct { unstructuredClientForMapping func(*meta.RESTMapping) (resource.RESTClient, error) statusWatcher watcher.StatusWatcher statusWatcherFilters *watcher.Filters + statusPolicy inventory.StatusPolicy } func (cb *commonBuilder) finalize() (*commonBuilder, error) { diff --git a/pkg/apply/common_test.go b/pkg/apply/common_test.go index 8ab9b8ec..2590f5ac 100644 --- a/pkg/apply/common_test.go +++ b/pkg/apply/common_test.go @@ -38,7 +38,7 @@ import ( type inventoryInfo struct { name string namespace string - id string + id inventory.ID set object.ObjMetadataSet } @@ -56,7 +56,7 @@ func (i inventoryInfo) toUnstructured() *unstructured.Unstructured { "name": i.name, "namespace": i.namespace, "labels": map[string]interface{}{ - common.InventoryLabel: i.id, + common.InventoryLabel: i.id.String(), }, }, "data": invMap, @@ -64,7 +64,11 @@ func (i inventoryInfo) toUnstructured() *unstructured.Unstructured { } } -func (i inventoryInfo) toWrapped() (inventory.Info, error) { +func (i inventoryInfo) toWrapped() (inventory.Inventory, error) { + return inventory.ConfigMapToInventoryObj(i.toUnstructured()) +} + +func (i inventoryInfo) toInfo() (inventory.Info, error) { return inventory.ConfigMapToInventoryInfo(i.toUnstructured()) } @@ -125,7 +129,7 @@ func newTestInventory( ) inventory.Client { // Use an Client with a fakeInfoHelper to allow generating Info // objects that use the FakeRESTClient as the UnstructuredClient. - invClient, err := inventory.ConfigMapClientFactory{StatusPolicy: inventory.StatusPolicyAll}.NewClient(tf) + invClient, err := inventory.ConfigMapClientFactory{}.NewClient(tf) require.NoError(t, err) return invClient } diff --git a/pkg/apply/destroyer.go b/pkg/apply/destroyer.go index ae3c5f73..9783aeb1 100644 --- a/pkg/apply/destroyer.go +++ b/pkg/apply/destroyer.go @@ -8,6 +8,7 @@ import ( "fmt" "time" + apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/discovery" @@ -38,6 +39,7 @@ type Destroyer struct { client dynamic.Interface openAPIGetter discovery.OpenAPISchemaInterface infoHelper info.Helper + statusPolicy inventory.StatusPolicy } type DestroyerOptions struct { @@ -79,10 +81,23 @@ func (d *Destroyer) Run(ctx context.Context, invInfo inventory.Info, options Des setDestroyerDefaults(&options) go func() { defer close(eventChannel) + inv, err := d.invClient.Get(ctx, invInfo, inventory.GetOptions{}) + if apierrors.IsNotFound(err) { + inv = invInfo.NewEmptyInventory() + } else if err != nil { + handleError(eventChannel, err) + return + } + if inv.ID() != invInfo.ID() { + handleError(eventChannel, fmt.Errorf("expected inventory object to have inventory-id %q but got %q", + invInfo.ID(), inv.ID())) + return + } + // Retrieve the objects to be deleted from the cluster. Second parameter is empty // because no local objects returns all inventory objects for deletion. emptyLocalObjs := object.UnstructuredSet{} - deleteObjs, err := d.pruner.GetPruneObjs(ctx, invInfo, emptyLocalObjs, prune.Options{ + deleteObjs, err := d.pruner.GetPruneObjs(ctx, inv, emptyLocalObjs, prune.Options{ DryRunStrategy: options.DryRunStrategy, }) if err != nil { @@ -123,8 +138,10 @@ func (d *Destroyer) Run(ctx context.Context, invInfo inventory.Info, options Des InfoHelper: d.infoHelper, Mapper: d.mapper, InvClient: d.invClient, + Inventory: inv, Collector: vCollector, PruneFilters: deleteFilters, + StatusPolicy: d.statusPolicy, } opts := solver.Options{ Destroy: true, @@ -138,7 +155,6 @@ func (d *Destroyer) Run(ctx context.Context, invInfo inventory.Info, options Des // Build the ordered set of tasks to execute. taskQueue := taskBuilder. WithPruneObjects(deleteObjs). - WithInventory(invInfo). Build(taskContext, opts) klog.V(4).Infof("validation errors: %d", len(vCollector.Errors)) diff --git a/pkg/apply/destroyer_builder.go b/pkg/apply/destroyer_builder.go index 6927284d..08aefb2a 100644 --- a/pkg/apply/destroyer_builder.go +++ b/pkg/apply/destroyer_builder.go @@ -44,6 +44,7 @@ func (b *DestroyerBuilder) Build() (*Destroyer, error) { client: bx.client, openAPIGetter: bx.discoClient, infoHelper: info.NewHelper(bx.mapper, bx.unstructuredClientForMapping), + statusPolicy: bx.statusPolicy, }, nil } @@ -91,3 +92,8 @@ func (b *DestroyerBuilder) WithStatusWatcherFilters(filters *watcher.Filters) *D b.statusWatcherFilters = filters return b } + +func (b *DestroyerBuilder) WithStatusPolicy(statusPolicy inventory.StatusPolicy) *DestroyerBuilder { + b.statusPolicy = statusPolicy + return b +} diff --git a/pkg/apply/destroyer_test.go b/pkg/apply/destroyer_test.go index 6712d2b0..bc1ef339 100644 --- a/pkg/apply/destroyer_test.go +++ b/pkg/apply/destroyer_test.go @@ -12,7 +12,6 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "sigs.k8s.io/cli-utils/pkg/apply/event" - "sigs.k8s.io/cli-utils/pkg/inventory" pollevent "sigs.k8s.io/cli-utils/pkg/kstatus/polling/event" "sigs.k8s.io/cli-utils/pkg/kstatus/status" "sigs.k8s.io/cli-utils/pkg/object" @@ -314,15 +313,13 @@ func TestDestroyerCancel(t *testing.T) { t.Run(tn, func(t *testing.T) { statusWatcher := newFakeWatcher(tc.statusEvents) - invInfo, err := tc.invInfo.toWrapped() - require.NoError(t, err) - cm, err := inventory.InvInfoToConfigMap(invInfo) + invInfo, err := tc.invInfo.toInfo() require.NoError(t, err) destroyer := newTestDestroyer(t, tc.invInfo, // Add the inventory to the cluster (to allow deletion) - append(tc.clusterObjs, cm), + append(tc.clusterObjs, tc.invInfo.toUnstructured()), statusWatcher, ) diff --git a/pkg/apply/prune/prune.go b/pkg/apply/prune/prune.go index cd8a085d..151e1c39 100644 --- a/pkg/apply/prune/prune.go +++ b/pkg/apply/prune/prune.go @@ -235,17 +235,13 @@ func (p *Pruner) removeInventoryAnnotation(ctx context.Context, obj *unstructure // if one occurs. func (p *Pruner) GetPruneObjs( ctx context.Context, - inv inventory.Info, + inv inventory.Inventory, objs object.UnstructuredSet, opts Options, ) (object.UnstructuredSet, error) { ids := object.UnstructuredSetToObjMetadataSet(objs) - invIDs, err := p.InvClient.GetClusterObjs(ctx, inv) - if err != nil { - return nil, err - } // only return objects that were in the inventory but not in the object set - ids = invIDs.Diff(ids) + ids = inv.ObjectRefs().Diff(ids) objs = object.UnstructuredSet{} for _, id := range ids { pruneObj, err := p.getObject(ctx, id) diff --git a/pkg/apply/prune/prune_test.go b/pkg/apply/prune/prune_test.go index b62fcb62..458a65a1 100644 --- a/pkg/apply/prune/prune_test.go +++ b/pkg/apply/prune/prune_test.go @@ -122,21 +122,14 @@ metadata: // Returns a inventory object with the inventory set from // the passed "children". -func createInventoryInfo(children ...*unstructured.Unstructured) (inventory.Info, error) { +func newInventory(children ...*unstructured.Unstructured) (inventory.Inventory, error) { inventoryObjCopy := inventoryObj.DeepCopy() wrappedInv, err := inventory.ConfigMapToInventoryObj(inventoryObjCopy) if err != nil { return nil, err } - objs := object.UnstructuredSetToObjMetadataSet(children) - if err := wrappedInv.Store(objs, nil); err != nil { - return nil, err - } - obj, err := wrappedInv.GetObject() - if err != nil { - return nil, err - } - return inventory.ConfigMapToInventoryInfo(obj) + wrappedInv.SetObjectRefs(object.UnstructuredSetToObjMetadataSet(children)) + return wrappedInv, nil } // podDeletionPrevention object contains the "on-remove:keep" lifecycle directive. @@ -862,7 +855,7 @@ func TestGetPruneObjs(t *testing.T) { Mapper: testrestmapper.TestOnlyStaticRESTMapper(scheme.Scheme, scheme.Scheme.PrioritizedVersionsAllGroups()...), } - currentInventory, err := createInventoryInfo(tc.prevInventory...) + currentInventory, err := newInventory(tc.prevInventory...) require.NoError(t, err) actualObjs, err := po.GetPruneObjs(t.Context(), currentInventory, tc.localObjs, Options{}) if err != nil { diff --git a/pkg/apply/solver/solver.go b/pkg/apply/solver/solver.go index 2c26ee09..0a8461ae 100644 --- a/pkg/apply/solver/solver.go +++ b/pkg/apply/solver/solver.go @@ -43,7 +43,9 @@ type TaskQueueBuilder struct { OpenAPIGetter discovery.OpenAPISchemaInterface InfoHelper info.Helper Mapper meta.RESTMapper - InvClient inventory.Client + Inventory inventory.Inventory + InvClient inventory.WriteClient + StatusPolicy inventory.StatusPolicy // Collector is used to collect validation errors and invalid objects. // Invalid objects will be filtered and not be injected into tasks. Collector *validation.Collector @@ -56,7 +58,6 @@ type TaskQueueBuilder struct { pruneCounter int waitCounter int - invInfo inventory.Info applyObjs object.UnstructuredSet pruneObjs object.UnstructuredSet } @@ -100,12 +101,6 @@ type Options struct { InventoryPolicy inventory.Policy } -// WithInventory sets the inventory info and returns the builder for chaining. -func (t *TaskQueueBuilder) WithInventory(inv inventory.Info) *TaskQueueBuilder { - t.invInfo = inv - return t -} - // WithApplyObjects sets the apply objects and returns the builder for chaining. func (t *TaskQueueBuilder) WithApplyObjects(applyObjs object.UnstructuredSet) *TaskQueueBuilder { t.applyObjs = applyObjs @@ -162,7 +157,7 @@ func (t *TaskQueueBuilder) Build(taskContext *taskrunner.TaskContext, o Options) InvClient: t.InvClient, DynamicClient: t.DynamicClient, Mapper: t.Mapper, - InvInfo: t.invInfo, + Inventory: t.Inventory, Objects: applyObjs, DryRun: o.DryRunStrategy, }) @@ -213,7 +208,6 @@ func (t *TaskQueueBuilder) Build(taskContext *taskrunner.TaskContext, o Options) } } - prevInvIDs, _ := t.InvClient.GetClusterObjs(taskContext.Context(), t.invInfo) klog.V(2).Infoln("adding delete/update inventory task") var taskName string if o.Destroy { @@ -222,12 +216,12 @@ func (t *TaskQueueBuilder) Build(taskContext *taskrunner.TaskContext, o Options) taskName = "inventory-set-0" } tasks = append(tasks, &task.DeleteOrUpdateInvTask{ - TaskName: taskName, - InvClient: t.InvClient, - InvInfo: t.invInfo, - PrevInventory: prevInvIDs, - DryRun: o.DryRunStrategy, - Destroy: o.Destroy, + TaskName: taskName, + Inventory: t.Inventory, + InvClient: t.InvClient, + StatusPolicy: t.StatusPolicy, + DryRun: o.DryRunStrategy, + Destroy: o.Destroy, }) return &TaskQueue{tasks: tasks} diff --git a/pkg/apply/solver/solver_test.go b/pkg/apply/solver/solver_test.go index 39cfb727..de48e1aa 100644 --- a/pkg/apply/solver/solver_test.go +++ b/pkg/apply/solver/solver_test.go @@ -10,7 +10,6 @@ import ( "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "sigs.k8s.io/cli-utils/pkg/apis/actuation" "sigs.k8s.io/cli-utils/pkg/apply/prune" @@ -117,7 +116,7 @@ func newInvObject(name, namespace, inventoryID string) *unstructured.Unstructure common.InventoryLabel: inventoryID, }, }, - "data": map[string]string{}, + "data": map[string]interface{}{}, }, } } @@ -131,9 +130,7 @@ func TestTaskQueueBuilder_ApplyBuild(t *testing.T) { inventoryInfoComparer(), ) - invInfo, err := inventory.ConfigMapToInventoryInfo(newInvObject( - "abc-123", "default", "test")) - require.NoError(t, err) + uObj := newInvObject("abc-123", "default", "test") testCases := map[string]struct { applyObjs []*unstructured.Unstructured @@ -148,14 +145,11 @@ func TestTaskQueueBuilder_ApplyBuild(t *testing.T) { &task.InvAddTask{ TaskName: "inventory-add-0", InvClient: &inventory.FakeClient{}, - InvInfo: invInfo, Objects: object.UnstructuredSet{}, }, &task.DeleteOrUpdateInvTask{ - TaskName: "inventory-set-0", - InvClient: &inventory.FakeClient{}, - InvInfo: invInfo, - PrevInventory: object.ObjMetadataSet{}, + TaskName: "inventory-set-0", + InvClient: &inventory.FakeClient{}, }, }, }, @@ -167,7 +161,6 @@ func TestTaskQueueBuilder_ApplyBuild(t *testing.T) { &task.InvAddTask{ TaskName: "inventory-add-0", InvClient: &inventory.FakeClient{}, - InvInfo: invInfo, Objects: object.UnstructuredSet{ testutil.Unstructured(t, resources["deployment"]), }, @@ -188,10 +181,6 @@ func TestTaskQueueBuilder_ApplyBuild(t *testing.T) { &task.DeleteOrUpdateInvTask{ TaskName: "inventory-set-0", InvClient: &inventory.FakeClient{}, - InvInfo: invInfo, - PrevInventory: object.ObjMetadataSet{ - testutil.ToIdentifier(t, resources["deployment"]), - }, }, }, expectedStatus: []actuation.ObjectStatus{ @@ -214,7 +203,6 @@ func TestTaskQueueBuilder_ApplyBuild(t *testing.T) { &task.InvAddTask{ TaskName: "inventory-add-0", InvClient: &inventory.FakeClient{}, - InvInfo: invInfo, Objects: object.UnstructuredSet{ testutil.Unstructured(t, resources["deployment"]), testutil.Unstructured(t, resources["secret"]), @@ -239,11 +227,6 @@ func TestTaskQueueBuilder_ApplyBuild(t *testing.T) { &task.DeleteOrUpdateInvTask{ TaskName: "inventory-set-0", InvClient: &inventory.FakeClient{}, - InvInfo: invInfo, - PrevInventory: object.ObjMetadataSet{ - testutil.ToIdentifier(t, resources["deployment"]), - testutil.ToIdentifier(t, resources["secret"]), - }, }, }, expectedStatus: []actuation.ObjectStatus{ @@ -277,7 +260,6 @@ func TestTaskQueueBuilder_ApplyBuild(t *testing.T) { &task.InvAddTask{ TaskName: "inventory-add-0", InvClient: &inventory.FakeClient{}, - InvInfo: invInfo, Objects: object.UnstructuredSet{ testutil.Unstructured(t, resources["secret"]), testutil.Unstructured(t, resources["deployment"]), @@ -303,11 +285,6 @@ func TestTaskQueueBuilder_ApplyBuild(t *testing.T) { &task.DeleteOrUpdateInvTask{ TaskName: "inventory-set-0", InvClient: &inventory.FakeClient{}, - InvInfo: invInfo, - PrevInventory: object.ObjMetadataSet{ - testutil.ToIdentifier(t, resources["secret"]), - testutil.ToIdentifier(t, resources["deployment"]), - }, }, }, expectedStatus: []actuation.ObjectStatus{ @@ -343,7 +320,6 @@ func TestTaskQueueBuilder_ApplyBuild(t *testing.T) { &task.InvAddTask{ TaskName: "inventory-add-0", InvClient: &inventory.FakeClient{}, - InvInfo: invInfo, Objects: object.UnstructuredSet{ testutil.Unstructured(t, resources["deployment"]), testutil.Unstructured(t, resources["secret"]), @@ -361,12 +337,7 @@ func TestTaskQueueBuilder_ApplyBuild(t *testing.T) { &task.DeleteOrUpdateInvTask{ TaskName: "inventory-set-0", InvClient: &inventory.FakeClient{}, - InvInfo: invInfo, - PrevInventory: object.ObjMetadataSet{ - testutil.ToIdentifier(t, resources["deployment"]), - testutil.ToIdentifier(t, resources["secret"]), - }, - DryRun: common.DryRunClient, + DryRun: common.DryRunClient, }, }, expectedStatus: []actuation.ObjectStatus{ @@ -402,7 +373,6 @@ func TestTaskQueueBuilder_ApplyBuild(t *testing.T) { &task.InvAddTask{ TaskName: "inventory-add-0", InvClient: &inventory.FakeClient{}, - InvInfo: invInfo, Objects: object.UnstructuredSet{ testutil.Unstructured(t, resources["pod"]), testutil.Unstructured(t, resources["default-pod"]), @@ -420,12 +390,7 @@ func TestTaskQueueBuilder_ApplyBuild(t *testing.T) { &task.DeleteOrUpdateInvTask{ TaskName: "inventory-set-0", InvClient: &inventory.FakeClient{}, - InvInfo: invInfo, - PrevInventory: object.ObjMetadataSet{ - testutil.ToIdentifier(t, resources["pod"]), - testutil.ToIdentifier(t, resources["default-pod"]), - }, - DryRun: common.DryRunServer, + DryRun: common.DryRunServer, }, }, expectedStatus: []actuation.ObjectStatus{ @@ -457,7 +422,6 @@ func TestTaskQueueBuilder_ApplyBuild(t *testing.T) { &task.InvAddTask{ TaskName: "inventory-add-0", InvClient: &inventory.FakeClient{}, - InvInfo: invInfo, Objects: object.UnstructuredSet{ testutil.Unstructured(t, resources["crontab1"]), testutil.Unstructured(t, resources["crd"]), @@ -497,12 +461,6 @@ func TestTaskQueueBuilder_ApplyBuild(t *testing.T) { &task.DeleteOrUpdateInvTask{ TaskName: "inventory-set-0", InvClient: &inventory.FakeClient{}, - InvInfo: invInfo, - PrevInventory: object.ObjMetadataSet{ - testutil.ToIdentifier(t, resources["crontab1"]), - testutil.ToIdentifier(t, resources["crd"]), - testutil.ToIdentifier(t, resources["crontab2"]), - }, }, }, expectedStatus: []actuation.ObjectStatus{ @@ -546,7 +504,6 @@ func TestTaskQueueBuilder_ApplyBuild(t *testing.T) { &task.InvAddTask{ TaskName: "inventory-add-0", InvClient: &inventory.FakeClient{}, - InvInfo: invInfo, Objects: object.UnstructuredSet{ testutil.Unstructured(t, resources["crontab1"]), testutil.Unstructured(t, resources["crd"]), @@ -572,13 +529,7 @@ func TestTaskQueueBuilder_ApplyBuild(t *testing.T) { &task.DeleteOrUpdateInvTask{ TaskName: "inventory-set-0", InvClient: &inventory.FakeClient{}, - InvInfo: invInfo, - PrevInventory: object.ObjMetadataSet{ - testutil.ToIdentifier(t, resources["crontab1"]), - testutil.ToIdentifier(t, resources["crd"]), - testutil.ToIdentifier(t, resources["crontab2"]), - }, - DryRun: common.DryRunClient, + DryRun: common.DryRunClient, }, }, expectedStatus: []actuation.ObjectStatus{ @@ -618,7 +569,6 @@ func TestTaskQueueBuilder_ApplyBuild(t *testing.T) { &task.InvAddTask{ TaskName: "inventory-add-0", InvClient: &inventory.FakeClient{}, - InvInfo: invInfo, Objects: object.UnstructuredSet{ testutil.Unstructured(t, resources["namespace"]), testutil.Unstructured(t, resources["pod"]), @@ -658,12 +608,6 @@ func TestTaskQueueBuilder_ApplyBuild(t *testing.T) { &task.DeleteOrUpdateInvTask{ TaskName: "inventory-set-0", InvClient: &inventory.FakeClient{}, - InvInfo: invInfo, - PrevInventory: object.ObjMetadataSet{ - testutil.ToIdentifier(t, resources["namespace"]), - testutil.ToIdentifier(t, resources["pod"]), - testutil.ToIdentifier(t, resources["secret"]), - }, }, }, expectedStatus: []actuation.ObjectStatus{ @@ -703,7 +647,6 @@ func TestTaskQueueBuilder_ApplyBuild(t *testing.T) { &task.InvAddTask{ TaskName: "inventory-add-0", InvClient: &inventory.FakeClient{}, - InvInfo: invInfo, Objects: object.UnstructuredSet{ testutil.Unstructured(t, resources["deployment"], testutil.AddDependsOn(t, testutil.ToIdentifier(t, resources["secret"]))), @@ -742,11 +685,6 @@ func TestTaskQueueBuilder_ApplyBuild(t *testing.T) { &task.DeleteOrUpdateInvTask{ TaskName: "inventory-set-0", InvClient: &inventory.FakeClient{}, - InvInfo: invInfo, - PrevInventory: object.ObjMetadataSet{ - testutil.ToIdentifier(t, resources["deployment"]), - testutil.ToIdentifier(t, resources["secret"]), - }, }, }, expectedStatus: []actuation.ObjectStatus{ @@ -798,6 +736,9 @@ func TestTaskQueueBuilder_ApplyBuild(t *testing.T) { for tn, tc := range testCases { t.Run(tn, func(t *testing.T) { mapper := testutil.NewFakeRESTMapper() + inventoryObj := &inventory.UnstructuredInventory{ + ClusterObj: uObj, + } // inject mapper for equality comparison for _, t := range tc.expectedTasks { switch typedTask := t.(type) { @@ -807,6 +748,9 @@ func TestTaskQueueBuilder_ApplyBuild(t *testing.T) { typedTask.Mapper = mapper case *task.InvAddTask: typedTask.Mapper = mapper + typedTask.Inventory = inventoryObj + case *task.DeleteOrUpdateInvTask: + typedTask.Inventory = inventoryObj } } @@ -816,12 +760,12 @@ func TestTaskQueueBuilder_ApplyBuild(t *testing.T) { tqb := TaskQueueBuilder{ Pruner: pruner, Mapper: mapper, + Inventory: inventoryObj, InvClient: fakeInvClient, Collector: vCollector, } taskContext := taskrunner.NewTaskContext(t.Context(), nil, nil) - tq := tqb.WithInventory(invInfo). - WithApplyObjects(tc.applyObjs). + tq := tqb.WithApplyObjects(tc.applyObjs). Build(taskContext, tc.options) err := vCollector.ToError() if tc.expectedError != nil { @@ -831,7 +775,7 @@ func TestTaskQueueBuilder_ApplyBuild(t *testing.T) { assert.NoError(t, err) asserter.Equal(t, tc.expectedTasks, tq.tasks) - actualStatus := taskContext.InventoryManager().Inventory().Status.Objects + actualStatus := taskContext.InventoryManager().Inventory().ObjStatuses testutil.AssertEqual(t, tc.expectedStatus, actualStatus) }) } @@ -846,9 +790,7 @@ func TestTaskQueueBuilder_PruneBuild(t *testing.T) { inventoryInfoComparer(), ) - invInfo, err := inventory.ConfigMapToInventoryInfo(newInvObject( - "abc-123", "default", "test")) - require.NoError(t, err) + uObj := newInvObject("abc-123", "default", "test") testCases := map[string]struct { pruneObjs []*unstructured.Unstructured @@ -864,14 +806,11 @@ func TestTaskQueueBuilder_PruneBuild(t *testing.T) { &task.InvAddTask{ TaskName: "inventory-add-0", InvClient: &inventory.FakeClient{}, - InvInfo: invInfo, Objects: object.UnstructuredSet{}, }, &task.DeleteOrUpdateInvTask{ - TaskName: "inventory-set-0", - InvClient: &inventory.FakeClient{}, - InvInfo: invInfo, - PrevInventory: object.ObjMetadataSet{}, + TaskName: "inventory-set-0", + InvClient: &inventory.FakeClient{}, }, }, }, @@ -884,7 +823,6 @@ func TestTaskQueueBuilder_PruneBuild(t *testing.T) { &task.InvAddTask{ TaskName: "inventory-add-0", InvClient: &inventory.FakeClient{}, - InvInfo: invInfo, Objects: object.UnstructuredSet{}, }, &task.PruneTask{ @@ -903,10 +841,6 @@ func TestTaskQueueBuilder_PruneBuild(t *testing.T) { &task.DeleteOrUpdateInvTask{ TaskName: "inventory-set-0", InvClient: &inventory.FakeClient{}, - InvInfo: invInfo, - PrevInventory: object.ObjMetadataSet{ - testutil.ToIdentifier(t, resources["default-pod"]), - }, }, }, expectedStatus: []actuation.ObjectStatus{ @@ -930,7 +864,6 @@ func TestTaskQueueBuilder_PruneBuild(t *testing.T) { &task.InvAddTask{ TaskName: "inventory-add-0", InvClient: &inventory.FakeClient{}, - InvInfo: invInfo, Objects: object.UnstructuredSet{}, }, &task.PruneTask{ @@ -951,11 +884,6 @@ func TestTaskQueueBuilder_PruneBuild(t *testing.T) { &task.DeleteOrUpdateInvTask{ TaskName: "inventory-set-0", InvClient: &inventory.FakeClient{}, - InvInfo: invInfo, - PrevInventory: object.ObjMetadataSet{ - testutil.ToIdentifier(t, resources["default-pod"]), - testutil.ToIdentifier(t, resources["pod"]), - }, }, }, expectedStatus: []actuation.ObjectStatus{ @@ -989,7 +917,6 @@ func TestTaskQueueBuilder_PruneBuild(t *testing.T) { &task.InvAddTask{ TaskName: "inventory-add-0", InvClient: &inventory.FakeClient{}, - InvInfo: invInfo, Objects: object.UnstructuredSet{}, }, &task.PruneTask{ @@ -1022,11 +949,6 @@ func TestTaskQueueBuilder_PruneBuild(t *testing.T) { &task.DeleteOrUpdateInvTask{ TaskName: "inventory-set-0", InvClient: &inventory.FakeClient{}, - InvInfo: invInfo, - PrevInventory: object.ObjMetadataSet{ - testutil.ToIdentifier(t, resources["pod"]), - testutil.ToIdentifier(t, resources["secret"]), - }, }, }, expectedStatus: []actuation.ObjectStatus{ @@ -1060,7 +982,6 @@ func TestTaskQueueBuilder_PruneBuild(t *testing.T) { &task.InvAddTask{ TaskName: "inventory-add-0", InvClient: &inventory.FakeClient{}, - InvInfo: invInfo, Objects: object.UnstructuredSet{}, }, &task.PruneTask{ @@ -1080,10 +1001,6 @@ func TestTaskQueueBuilder_PruneBuild(t *testing.T) { &task.DeleteOrUpdateInvTask{ TaskName: "inventory-set-0", InvClient: &inventory.FakeClient{}, - InvInfo: invInfo, - PrevInventory: object.ObjMetadataSet{ - testutil.ToIdentifier(t, resources["pod"]), - }, }, }, expectedStatus: []actuation.ObjectStatus{ @@ -1112,7 +1029,6 @@ func TestTaskQueueBuilder_PruneBuild(t *testing.T) { &task.InvAddTask{ TaskName: "inventory-add-0", InvClient: &inventory.FakeClient{}, - InvInfo: invInfo, Objects: object.UnstructuredSet{}, DryRun: common.DryRunServer, }, @@ -1127,12 +1043,7 @@ func TestTaskQueueBuilder_PruneBuild(t *testing.T) { &task.DeleteOrUpdateInvTask{ TaskName: "inventory-set-0", InvClient: &inventory.FakeClient{}, - InvInfo: invInfo, - PrevInventory: object.ObjMetadataSet{ - testutil.ToIdentifier(t, resources["pod"]), - testutil.ToIdentifier(t, resources["default-pod"]), - }, - DryRun: common.DryRunServer, + DryRun: common.DryRunServer, }, }, expectedStatus: []actuation.ObjectStatus{ @@ -1166,7 +1077,6 @@ func TestTaskQueueBuilder_PruneBuild(t *testing.T) { &task.InvAddTask{ TaskName: "inventory-add-0", InvClient: &inventory.FakeClient{}, - InvInfo: invInfo, Objects: object.UnstructuredSet{}, }, &task.PruneTask{ @@ -1200,12 +1110,6 @@ func TestTaskQueueBuilder_PruneBuild(t *testing.T) { &task.DeleteOrUpdateInvTask{ TaskName: "inventory-set-0", InvClient: &inventory.FakeClient{}, - InvInfo: invInfo, - PrevInventory: object.ObjMetadataSet{ - testutil.ToIdentifier(t, resources["crontab1"]), - testutil.ToIdentifier(t, resources["crd"]), - testutil.ToIdentifier(t, resources["crontab2"]), - }, }, }, expectedStatus: []actuation.ObjectStatus{ @@ -1250,7 +1154,6 @@ func TestTaskQueueBuilder_PruneBuild(t *testing.T) { &task.InvAddTask{ TaskName: "inventory-add-0", InvClient: &inventory.FakeClient{}, - InvInfo: invInfo, Objects: object.UnstructuredSet{}, DryRun: common.DryRunClient, }, @@ -1272,13 +1175,7 @@ func TestTaskQueueBuilder_PruneBuild(t *testing.T) { &task.DeleteOrUpdateInvTask{ TaskName: "inventory-set-0", InvClient: &inventory.FakeClient{}, - InvInfo: invInfo, - PrevInventory: object.ObjMetadataSet{ - testutil.ToIdentifier(t, resources["crontab1"]), - testutil.ToIdentifier(t, resources["crd"]), - testutil.ToIdentifier(t, resources["crontab2"]), - }, - DryRun: common.DryRunClient, + DryRun: common.DryRunClient, }, }, expectedStatus: []actuation.ObjectStatus{ @@ -1319,7 +1216,6 @@ func TestTaskQueueBuilder_PruneBuild(t *testing.T) { &task.InvAddTask{ TaskName: "inventory-add-0", InvClient: &inventory.FakeClient{}, - InvInfo: invInfo, Objects: object.UnstructuredSet{}, }, &task.PruneTask{ @@ -1353,12 +1249,6 @@ func TestTaskQueueBuilder_PruneBuild(t *testing.T) { &task.DeleteOrUpdateInvTask{ TaskName: "inventory-set-0", InvClient: &inventory.FakeClient{}, - InvInfo: invInfo, - PrevInventory: object.ObjMetadataSet{ - testutil.ToIdentifier(t, resources["namespace"]), - testutil.ToIdentifier(t, resources["pod"]), - testutil.ToIdentifier(t, resources["secret"]), - }, }, }, expectedStatus: []actuation.ObjectStatus{ @@ -1427,7 +1317,6 @@ func TestTaskQueueBuilder_PruneBuild(t *testing.T) { &task.InvAddTask{ TaskName: "inventory-add-0", InvClient: &inventory.FakeClient{}, - InvInfo: invInfo, Objects: object.UnstructuredSet{}, }, &task.PruneTask{ @@ -1447,10 +1336,6 @@ func TestTaskQueueBuilder_PruneBuild(t *testing.T) { &task.DeleteOrUpdateInvTask{ TaskName: "inventory-set-0", InvClient: &inventory.FakeClient{}, - InvInfo: invInfo, - PrevInventory: object.ObjMetadataSet{ - testutil.ToIdentifier(t, resources["pod"]), - }, }, }, expectedError: validation.NewError( @@ -1475,6 +1360,9 @@ func TestTaskQueueBuilder_PruneBuild(t *testing.T) { for tn, tc := range testCases { t.Run(tn, func(t *testing.T) { mapper := testutil.NewFakeRESTMapper() + inventoryObj := &inventory.UnstructuredInventory{ + ClusterObj: uObj, + } // inject mapper & pruner for equality comparison for _, t := range tc.expectedTasks { switch typedTask := t.(type) { @@ -1484,6 +1372,9 @@ func TestTaskQueueBuilder_PruneBuild(t *testing.T) { typedTask.Mapper = mapper case *task.InvAddTask: typedTask.Mapper = mapper + typedTask.Inventory = inventoryObj + case *task.DeleteOrUpdateInvTask: + typedTask.Inventory = inventoryObj } } @@ -1494,11 +1385,11 @@ func TestTaskQueueBuilder_PruneBuild(t *testing.T) { Pruner: pruner, Mapper: mapper, InvClient: fakeInvClient, + Inventory: inventoryObj, Collector: vCollector, } taskContext := taskrunner.NewTaskContext(t.Context(), nil, nil) - tq := tqb.WithInventory(invInfo). - WithPruneObjects(tc.pruneObjs). + tq := tqb.WithPruneObjects(tc.pruneObjs). Build(taskContext, tc.options) err := vCollector.ToError() if tc.expectedError != nil { @@ -1508,7 +1399,7 @@ func TestTaskQueueBuilder_PruneBuild(t *testing.T) { assert.NoError(t, err) asserter.Equal(t, tc.expectedTasks, tq.tasks) - actualStatus := taskContext.InventoryManager().Inventory().Status.Objects + actualStatus := taskContext.InventoryManager().Inventory().ObjStatuses testutil.AssertEqual(t, tc.expectedStatus, actualStatus) }) } @@ -1523,9 +1414,7 @@ func TestTaskQueueBuilder_ApplyPruneBuild(t *testing.T) { inventoryInfoComparer(), ) - invInfo, err := inventory.ConfigMapToInventoryInfo(newInvObject( - "abc-123", "default", "test")) - require.NoError(t, err) + uObj := newInvObject("abc-123", "default", "test") testCases := map[string]struct { inventoryIDs object.ObjMetadataSet @@ -1551,7 +1440,6 @@ func TestTaskQueueBuilder_ApplyPruneBuild(t *testing.T) { &task.InvAddTask{ TaskName: "inventory-add-0", InvClient: &inventory.FakeClient{}, - InvInfo: invInfo, Objects: object.UnstructuredSet{ testutil.Unstructured(t, resources["deployment"]), }, @@ -1585,10 +1473,6 @@ func TestTaskQueueBuilder_ApplyPruneBuild(t *testing.T) { &task.DeleteOrUpdateInvTask{ TaskName: "inventory-set-0", InvClient: &inventory.FakeClient{}, - InvInfo: invInfo, - PrevInventory: object.ObjMetadataSet{ - testutil.ToIdentifier(t, resources["secret"]), - }, }, }, expectedStatus: []actuation.ObjectStatus{ @@ -1625,7 +1509,6 @@ func TestTaskQueueBuilder_ApplyPruneBuild(t *testing.T) { &task.InvAddTask{ TaskName: "inventory-add-0", InvClient: &inventory.FakeClient{}, - InvInfo: invInfo, Objects: object.UnstructuredSet{ testutil.Unstructured(t, resources["deployment"]), }, @@ -1646,10 +1529,6 @@ func TestTaskQueueBuilder_ApplyPruneBuild(t *testing.T) { &task.DeleteOrUpdateInvTask{ TaskName: "inventory-set-0", InvClient: &inventory.FakeClient{}, - InvInfo: invInfo, - PrevInventory: object.ObjMetadataSet{ - testutil.ToIdentifier(t, resources["secret"]), - }, }, }, expectedStatus: []actuation.ObjectStatus{ @@ -1684,7 +1563,6 @@ func TestTaskQueueBuilder_ApplyPruneBuild(t *testing.T) { &task.InvAddTask{ TaskName: "inventory-add-0", InvClient: &inventory.FakeClient{}, - InvInfo: invInfo, Objects: object.UnstructuredSet{ testutil.Unstructured(t, resources["deployment"], testutil.AddDependsOn(t, testutil.ToIdentifier(t, resources["secret"]))), @@ -1720,10 +1598,6 @@ func TestTaskQueueBuilder_ApplyPruneBuild(t *testing.T) { &task.DeleteOrUpdateInvTask{ TaskName: "inventory-set-0", InvClient: &inventory.FakeClient{}, - InvInfo: invInfo, - PrevInventory: object.ObjMetadataSet{ - testutil.ToIdentifier(t, resources["secret"]), - }, }, }, expectedStatus: []actuation.ObjectStatus{ @@ -1765,7 +1639,6 @@ func TestTaskQueueBuilder_ApplyPruneBuild(t *testing.T) { &task.InvAddTask{ TaskName: "inventory-add-0", InvClient: &inventory.FakeClient{}, - InvInfo: invInfo, Objects: object.UnstructuredSet{ testutil.Unstructured(t, resources["deployment"]), }, @@ -1800,10 +1673,6 @@ func TestTaskQueueBuilder_ApplyPruneBuild(t *testing.T) { &task.DeleteOrUpdateInvTask{ TaskName: "inventory-set-0", InvClient: &inventory.FakeClient{}, - InvInfo: invInfo, - PrevInventory: object.ObjMetadataSet{ - testutil.ToIdentifier(t, resources["secret"]), - }, }, }, expectedStatus: []actuation.ObjectStatus{ @@ -1830,6 +1699,9 @@ func TestTaskQueueBuilder_ApplyPruneBuild(t *testing.T) { for tn, tc := range testCases { t.Run(tn, func(t *testing.T) { mapper := testutil.NewFakeRESTMapper() + inventoryObj := &inventory.UnstructuredInventory{ + ClusterObj: uObj, + } // inject mapper & pruner for equality comparison for _, t := range tc.expectedTasks { switch typedTask := t.(type) { @@ -1841,6 +1713,9 @@ func TestTaskQueueBuilder_ApplyPruneBuild(t *testing.T) { typedTask.Mapper = mapper case *task.InvAddTask: typedTask.Mapper = mapper + typedTask.Inventory = inventoryObj + case *task.DeleteOrUpdateInvTask: + typedTask.Inventory = inventoryObj } } @@ -1850,11 +1725,11 @@ func TestTaskQueueBuilder_ApplyPruneBuild(t *testing.T) { Pruner: pruner, Mapper: mapper, InvClient: fakeInvClient, + Inventory: inventoryObj, Collector: vCollector, } taskContext := taskrunner.NewTaskContext(t.Context(), nil, nil) - tq := tqb.WithInventory(invInfo). - WithApplyObjects(tc.applyObjs). + tq := tqb.WithApplyObjects(tc.applyObjs). WithPruneObjects(tc.pruneObjs). Build(taskContext, tc.options) @@ -1867,7 +1742,7 @@ func TestTaskQueueBuilder_ApplyPruneBuild(t *testing.T) { asserter.Equal(t, tc.expectedTasks, tq.tasks) - actualStatus := taskContext.InventoryManager().Inventory().Status.Objects + actualStatus := taskContext.InventoryManager().Inventory().ObjStatuses testutil.AssertEqual(t, tc.expectedStatus, actualStatus) }) } @@ -1907,8 +1782,6 @@ func fakeClientComparer() cmp.Option { func inventoryInfoComparer() cmp.Option { return cmp.Comparer(func(x, y inventory.Info) bool { return x.ID() == y.ID() && - x.Name() == y.Name() && - x.Namespace() == y.Namespace() && - x.Strategy() == y.Strategy() + x.Namespace() == y.Namespace() }) } diff --git a/pkg/apply/task/delete_inv_task_test.go b/pkg/apply/task/delete_inv_task_test.go index a2ea8eb9..1948ca02 100644 --- a/pkg/apply/task/delete_inv_task_test.go +++ b/pkg/apply/task/delete_inv_task_test.go @@ -81,7 +81,7 @@ func TestDeleteInvTask(t *testing.T) { } for name, tc := range testCases { t.Run(name, func(t *testing.T) { - client := inventory.NewFakeClient(object.ObjMetadataSet{}) + client := inventory.NewFakeClient(tc.prevInventory) client.Err = tc.err eventChannel := make(chan event.Event) resourceCache := cache.NewResourceCacheMap() @@ -105,12 +105,11 @@ func TestDeleteInvTask(t *testing.T) { } task := DeleteOrUpdateInvTask{ - TaskName: taskName, - InvClient: client, - InvInfo: nil, - DryRun: common.DryRunNone, - PrevInventory: tc.prevInventory, - Destroy: true, + TaskName: taskName, + InvClient: client, + DryRun: common.DryRunNone, + Inventory: client.Inv, + Destroy: true, } if taskName != task.Name() { t.Errorf("expected task name (%s), got (%s)", taskName, task.Name()) @@ -121,15 +120,17 @@ func TestDeleteInvTask(t *testing.T) { if tc.err != result.Err { t.Errorf("running DeleteOrUpdateInvTask expected error (%s), got (%s)", tc.err, result.Err) } - } else { - if result.Err != nil { - t.Errorf("unexpected error running DeleteOrUpdateInvTask: %s", result.Err) - } + return + } + if result.Err != nil { + t.Errorf("unexpected error running DeleteOrUpdateInvTask: %s", result.Err) + } + actual, _ := client.Get(context.TODO(), nil, inventory.GetOptions{}) + if len(tc.expectedObjs) > 0 { + testutil.AssertEqual(t, tc.expectedObjs, actual.ObjectRefs(), + "Actual cluster objects (%d) do not match expected cluster objects (%d)", + len(actual.ObjectRefs()), len(tc.expectedObjs)) } - actual, _ := client.GetClusterObjs(context.Background(), nil) - testutil.AssertEqual(t, tc.expectedObjs, actual, - "Actual cluster objects (%d) do not match expected cluster objects (%d)", - len(actual), len(tc.expectedObjs)) }) } } diff --git a/pkg/apply/task/inv_add_task.go b/pkg/apply/task/inv_add_task.go index 9f562477..b5b2cfca 100644 --- a/pkg/apply/task/inv_add_task.go +++ b/pkg/apply/task/inv_add_task.go @@ -30,10 +30,10 @@ var ( // before the actual object is applied. type InvAddTask struct { TaskName string - InvClient inventory.Client + Inventory inventory.Inventory + InvClient inventory.WriteClient DynamicClient dynamic.Interface Mapper meta.RESTMapper - InvInfo inventory.Info Objects object.UnstructuredSet DryRun common.DryRunStrategy } @@ -60,8 +60,8 @@ func (i *InvAddTask) Start(taskContext *taskrunner.TaskContext) { return } // If the inventory is namespaced, ensure the namespace exists - if i.InvInfo.Namespace() != "" { - if invNamespace := inventoryNamespaceInSet(i.InvInfo, i.Objects); invNamespace != nil { + if i.Inventory.Namespace() != "" { + if invNamespace := inventoryNamespaceInSet(i.Inventory, i.Objects); invNamespace != nil { if err := i.createNamespace(taskContext.Context(), invNamespace, i.DryRun); err != nil { err = fmt.Errorf("failed to create inventory namespace: %w", err) i.sendTaskResult(taskContext, err) @@ -71,7 +71,16 @@ func (i *InvAddTask) Start(taskContext *taskrunner.TaskContext) { } klog.V(4).Infof("merging %d local objects into inventory", len(i.Objects)) currentObjs := object.UnstructuredSetToObjMetadataSet(i.Objects) - _, err := i.InvClient.Merge(taskContext.Context(), i.InvInfo, currentObjs, i.DryRun) + inventoryObjs := i.Inventory.ObjectRefs() + unionObjs := inventoryObjs.Union(currentObjs) + + i.Inventory.SetObjectRefs(unionObjs) + + var err error + if !i.DryRun.ClientOrServerDryRun() { + err = i.InvClient.CreateOrUpdate(taskContext.Context(), i.Inventory, inventory.UpdateOptions{}) + } + i.sendTaskResult(taskContext, err) }() } @@ -94,7 +103,7 @@ func inventoryNamespaceInSet(inv inventory.Info, objs object.UnstructuredSet) *u for _, obj := range objs { gvk := obj.GetObjectKind().GroupVersionKind() if gvk == namespaceGVKv1 && obj.GetName() == invNamespace { - inventory.AddInventoryIDAnnotation(obj, inv) + inventory.AddInventoryIDAnnotation(obj, inv.ID()) return obj } } diff --git a/pkg/apply/task/inv_add_task_test.go b/pkg/apply/task/inv_add_task_test.go index 4564f95a..47ca655e 100644 --- a/pkg/apply/task/inv_add_task_test.go +++ b/pkg/apply/task/inv_add_task_test.go @@ -19,15 +19,13 @@ import ( "sigs.k8s.io/cli-utils/pkg/object" ) -var namespace = "test-namespace" - var inventoryObj = &unstructured.Unstructured{ Object: map[string]interface{}{ "apiVersion": "v1", "kind": "ConfigMap", "metadata": map[string]interface{}{ - "name": "test-inventory-obj", - "namespace": namespace, + "name": inventory.TestInventoryName, + "namespace": inventory.TestInventoryNamespace, "labels": map[string]interface{}{ common.InventoryLabel: "test-app-label", }, @@ -41,7 +39,7 @@ var obj1 = &unstructured.Unstructured{ "kind": "Pod", "metadata": map[string]interface{}{ "name": "obj1", - "namespace": namespace, + "namespace": inventory.TestInventoryNamespace, }, }, } @@ -52,7 +50,7 @@ var obj2 = &unstructured.Unstructured{ "kind": "Job", "metadata": map[string]interface{}{ "name": "obj2", - "namespace": namespace, + "namespace": inventory.TestInventoryNamespace, }, }, } @@ -73,7 +71,7 @@ var nsObj = &unstructured.Unstructured{ "apiVersion": "v1", "kind": "Namespace", "metadata": map[string]interface{}{ - "name": namespace, + "name": inventory.TestInventoryNamespace, }, }, } @@ -81,8 +79,6 @@ var nsObj = &unstructured.Unstructured{ const taskName = "test-inventory-task" func TestInvAddTask(t *testing.T) { - localInv, err := inventory.ConfigMapToInventoryInfo(inventoryObj) - require.NoError(t, err) id1 := object.UnstructuredToObjMetadata(obj1) id2 := object.UnstructuredToObjMetadata(obj2) id3 := object.UnstructuredToObjMetadata(obj3) @@ -134,7 +130,7 @@ func TestInvAddTask(t *testing.T) { eventChannel := make(chan event.Event) resourceCache := cache.NewResourceCacheMap() taskContext := taskrunner.NewTaskContext(t.Context(), eventChannel, resourceCache) - tf := cmdtesting.NewTestFactory().WithNamespace(namespace) + tf := cmdtesting.NewTestFactory().WithNamespace(inventory.TestInventoryNamespace) defer tf.Cleanup() createdNamespace := false @@ -151,7 +147,7 @@ func TestInvAddTask(t *testing.T) { task := InvAddTask{ TaskName: taskName, InvClient: client, - InvInfo: localInv, + Inventory: client.Inv, Objects: tc.applyObjs, DynamicClient: tf.FakeDynamicClient, Mapper: mapper, @@ -168,8 +164,9 @@ func TestInvAddTask(t *testing.T) { if result.Err != nil { t.Errorf("unexpected error running InvAddTask: %s", result.Err) } - actual, _ := client.GetClusterObjs(t.Context(), nil) - if !tc.expectedObjs.Equal(actual) { + // argument doesn't matter for fake client, it always returns cached obj + actual, _ := client.Get(t.Context(), nil, inventory.GetOptions{}) + if !tc.expectedObjs.Equal(actual.ObjectRefs()) { t.Errorf("expected merged inventory (%s), got (%s)", tc.expectedObjs, actual) } if createdNamespace != tc.expectCreateNamespace { @@ -180,12 +177,12 @@ func TestInvAddTask(t *testing.T) { } func TestInventoryNamespaceInSet(t *testing.T) { - localInv, err := inventory.ConfigMapToInventoryInfo(inventoryObj) + localInv, err := inventory.ConfigMapToInventoryObj(inventoryObj) require.NoError(t, err) - inventoryNamespace := createNamespace(namespace) + inventoryNamespace := createNamespace(inventory.TestInventoryNamespace) tests := map[string]struct { - inv inventory.Info + inv inventory.Inventory objects []*unstructured.Unstructured namespace *unstructured.Unstructured }{ diff --git a/pkg/apply/task/inv_set_task.go b/pkg/apply/task/inv_set_task.go index 0323ab5f..9e573aec 100644 --- a/pkg/apply/task/inv_set_task.go +++ b/pkg/apply/task/inv_set_task.go @@ -18,13 +18,13 @@ import ( // DeleteOrUpdateInvTask encapsulates structures necessary to set the // inventory references at the end of the apply/prune. type DeleteOrUpdateInvTask struct { - TaskName string - InvClient inventory.Client - InvInfo inventory.Info - PrevInventory object.ObjMetadataSet - DryRun common.DryRunStrategy + TaskName string + Inventory inventory.Inventory + InvClient inventory.WriteClient + DryRun common.DryRunStrategy // if Destroy is set, the inventory will be deleted if all objects were successfully pruned - Destroy bool + Destroy bool + StatusPolicy inventory.StatusPolicy } func (i *DeleteOrUpdateInvTask) Name() string { @@ -97,12 +97,13 @@ func (i *DeleteOrUpdateInvTask) updateInventory(taskContext *taskrunner.TaskCont klog.V(4).Infof("set inventory %d successful applies", len(appliedObjs)) invObjs = invObjs.Union(appliedObjs) + prevInventory := i.Inventory.ObjectRefs() // If an object failed to apply and was previously stored in the inventory, // then keep it in the inventory so it can be applied/pruned next time. // This will remove new resources that failed to apply from the inventory, // because even tho they were added by InvAddTask, the PrevInventory // represents the inventory before the pipeline has run. - applyFailures := i.PrevInventory.Intersection(im.FailedApplies()) + applyFailures := prevInventory.Intersection(im.FailedApplies()) klog.V(4).Infof("keep in inventory %d failed applies", len(applyFailures)) invObjs = invObjs.Union(applyFailures) @@ -111,7 +112,7 @@ func (i *DeleteOrUpdateInvTask) updateInventory(taskContext *taskrunner.TaskCont // It's likely that all the skipped applies are already in the inventory, // because the apply filters all currently depend on cluster state, // but we're doing the intersection anyway just to be sure. - applySkips := i.PrevInventory.Intersection(im.SkippedApplies()) + applySkips := prevInventory.Intersection(im.SkippedApplies()) klog.V(4).Infof("keep in inventory %d skipped applies", len(applySkips)) invObjs = invObjs.Union(applySkips) @@ -120,7 +121,7 @@ func (i *DeleteOrUpdateInvTask) updateInventory(taskContext *taskrunner.TaskCont // It's likely that all the delete failures are already in the inventory, // because the set of resources to prune comes from the inventory, // but we're doing the intersection anyway just to be sure. - pruneFailures := i.PrevInventory.Intersection(im.FailedDeletes()) + pruneFailures := prevInventory.Intersection(im.FailedDeletes()) klog.V(4).Infof("set inventory %d failed prunes", len(pruneFailures)) invObjs = invObjs.Union(pruneFailures) @@ -129,19 +130,19 @@ func (i *DeleteOrUpdateInvTask) updateInventory(taskContext *taskrunner.TaskCont // It's likely that all the skipped deletes are already in the inventory, // because the set of resources to prune comes from the inventory, // but we're doing the intersection anyway just to be sure. - pruneSkips := i.PrevInventory.Intersection(im.SkippedDeletes()) + pruneSkips := prevInventory.Intersection(im.SkippedDeletes()) klog.V(4).Infof("keep in inventory %d skipped prunes", len(pruneSkips)) invObjs = invObjs.Union(pruneSkips) // If an object failed to reconcile and was previously stored in the inventory, // then keep it in the inventory so it can be waited on next time. - reconcileFailures := i.PrevInventory.Intersection(im.FailedReconciles()) + reconcileFailures := prevInventory.Intersection(im.FailedReconciles()) klog.V(4).Infof("set inventory %d failed reconciles", len(reconcileFailures)) invObjs = invObjs.Union(reconcileFailures) // If an object timed out reconciling and was previously stored in the inventory, // then keep it in the inventory so it can be waited on next time. - reconcileTimeouts := i.PrevInventory.Intersection(im.TimeoutReconciles()) + reconcileTimeouts := prevInventory.Intersection(im.TimeoutReconciles()) klog.V(4).Infof("keep in inventory %d timeout reconciles", len(reconcileTimeouts)) invObjs = invObjs.Union(reconcileTimeouts) @@ -152,24 +153,44 @@ func (i *DeleteOrUpdateInvTask) updateInventory(taskContext *taskrunner.TaskCont // If an object is invalid and was previously stored in the inventory, // then keep it in the inventory so it can be applied/pruned next time. - invalidObjects := i.PrevInventory.Intersection(taskContext.InvalidObjects()) + invalidObjects := prevInventory.Intersection(taskContext.InvalidObjects()) klog.V(4).Infof("keep in inventory %d invalid objects", len(invalidObjects)) invObjs = invObjs.Union(invalidObjects) klog.V(4).Infof("get the apply status for %d objects", len(invObjs)) - objStatus := taskContext.InventoryManager().Inventory().Status.Objects + objStatus := taskContext.InventoryManager().Inventory().ObjStatuses klog.V(4).Infof("set inventory %d total objects", len(invObjs)) - err := i.InvClient.Replace(taskContext.Context(), i.InvInfo, invObjs, objStatus, i.DryRun) + // Exit before updating the inventory, but after logging the above changes + if i.DryRun.ClientOrServerDryRun() { + klog.V(4).Infoln("dry-run replace inventory object: not applied") + return nil + } + + i.Inventory.SetObjectRefs(invObjs) + if err := i.InvClient.CreateOrUpdate(taskContext.Context(), i.Inventory, inventory.UpdateOptions{}); err != nil { + return err + } + + if i.StatusPolicy == inventory.StatusPolicyAll { + i.Inventory.SetObjectStatuses(objStatus) + if err := i.InvClient.UpdateStatus(taskContext.Context(), i.Inventory, inventory.UpdateOptions{}); err != nil { + return err + } + } klog.V(2).Infof("inventory set task completing (name: %q)", i.TaskName) - return err + return nil } // deleteInventory deletes the inventory object from the cluster. func (i *DeleteOrUpdateInvTask) deleteInventory(ctx context.Context) error { klog.V(2).Infof("delete inventory task starting (name: %q)", i.Name()) - err := i.InvClient.DeleteInventoryObj(ctx, i.InvInfo, i.DryRun) + if i.DryRun.ClientOrServerDryRun() { + klog.V(4).Infoln("dry-run delete inventory object: not deleted") + return nil + } + err := i.InvClient.Delete(ctx, i.Inventory, inventory.DeleteOptions{}) // Not found is not error, since this means it was already deleted. if apierrors.IsNotFound(err) { err = nil diff --git a/pkg/apply/task/inv_set_task_test.go b/pkg/apply/task/inv_set_task_test.go index 9f6b9020..8ce1b972 100644 --- a/pkg/apply/task/inv_set_task_test.go +++ b/pkg/apply/task/inv_set_task_test.go @@ -191,17 +191,16 @@ func TestInvSetTask(t *testing.T) { for name, tc := range tests { t.Run(name, func(t *testing.T) { - client := inventory.NewFakeClient(object.ObjMetadataSet{}) + client := inventory.NewFakeClient(tc.prevInventory) eventChannel := make(chan event.Event) resourceCache := cache.NewResourceCacheMap() taskContext := taskrunner.NewTaskContext(t.Context(), eventChannel, resourceCache) task := DeleteOrUpdateInvTask{ - TaskName: taskName, - InvClient: client, - InvInfo: nil, - PrevInventory: tc.prevInventory, - Destroy: false, + TaskName: taskName, + InvClient: client, + Inventory: client.Inv, + Destroy: false, } im := taskContext.InventoryManager() for _, applyObj := range tc.appliedObjs { @@ -246,10 +245,10 @@ func TestInvSetTask(t *testing.T) { if result.Err != nil { t.Errorf("unexpected error running InvAddTask: %s", result.Err) } - actual, _ := client.GetClusterObjs(t.Context(), nil) - testutil.AssertEqual(t, tc.expectedObjs, actual, + actual, _ := client.Get(t.Context(), nil, inventory.GetOptions{}) + testutil.AssertEqual(t, tc.expectedObjs, actual.ObjectRefs(), "Actual cluster objects (%d) do not match expected cluster objects (%d)", - len(actual), len(tc.expectedObjs)) + len(actual.ObjectRefs()), len(tc.expectedObjs)) }) } } diff --git a/pkg/apply/taskrunner/task_test.go b/pkg/apply/taskrunner/task_test.go index 45c4646a..fc16ebfc 100644 --- a/pkg/apply/taskrunner/task_test.go +++ b/pkg/apply/taskrunner/task_test.go @@ -216,41 +216,39 @@ loop: "Actual events (%d) do not match expected events (%d)", len(receivedEvents), len(expectedEvents)) - expectedInventory := actuation.Inventory{ - Status: actuation.InventoryStatus{ - Objects: []actuation.ObjectStatus{ - { - ObjectReference: inventory.ObjectReferenceFromObjMetadata(testDeployment1ID), - Strategy: actuation.ActuationStrategyApply, - Actuation: actuation.ActuationSucceeded, - Reconcile: actuation.ReconcileSucceeded, - UID: testDeployment1.GetUID(), - Generation: testDeployment1.GetGeneration(), - }, - { - ObjectReference: inventory.ObjectReferenceFromObjMetadata(testDeployment2ID), - Strategy: actuation.ActuationStrategyApply, - Actuation: actuation.ActuationSucceeded, - Reconcile: actuation.ReconcileSucceeded, - UID: testDeployment2.GetUID(), - Generation: testDeployment2.GetGeneration(), - }, - { - ObjectReference: inventory.ObjectReferenceFromObjMetadata(testDeployment3ID), - Strategy: actuation.ActuationStrategyApply, - Actuation: actuation.ActuationFailed, - Reconcile: actuation.ReconcileSkipped, - }, - { - ObjectReference: inventory.ObjectReferenceFromObjMetadata(testDeployment4ID), - Strategy: actuation.ActuationStrategyApply, - Actuation: actuation.ActuationSkipped, - Reconcile: actuation.ReconcileSkipped, - }, + expectedInventory := inventory.BaseInventory{ + ObjStatuses: []actuation.ObjectStatus{ + { + ObjectReference: inventory.ObjectReferenceFromObjMetadata(testDeployment1ID), + Strategy: actuation.ActuationStrategyApply, + Actuation: actuation.ActuationSucceeded, + Reconcile: actuation.ReconcileSucceeded, + UID: testDeployment1.GetUID(), + Generation: testDeployment1.GetGeneration(), + }, + { + ObjectReference: inventory.ObjectReferenceFromObjMetadata(testDeployment2ID), + Strategy: actuation.ActuationStrategyApply, + Actuation: actuation.ActuationSucceeded, + Reconcile: actuation.ReconcileSucceeded, + UID: testDeployment2.GetUID(), + Generation: testDeployment2.GetGeneration(), + }, + { + ObjectReference: inventory.ObjectReferenceFromObjMetadata(testDeployment3ID), + Strategy: actuation.ActuationStrategyApply, + Actuation: actuation.ActuationFailed, + Reconcile: actuation.ReconcileSkipped, + }, + { + ObjectReference: inventory.ObjectReferenceFromObjMetadata(testDeployment4ID), + Strategy: actuation.ActuationStrategyApply, + Actuation: actuation.ActuationSkipped, + Reconcile: actuation.ReconcileSkipped, }, }, } - testutil.AssertEqual(t, &expectedInventory, taskContext.InventoryManager().Inventory()) + testutil.AssertEqual(t, expectedInventory, taskContext.InventoryManager().Inventory()) } func TestWaitTask_Timeout(t *testing.T) { @@ -388,41 +386,39 @@ loop: "Actual events (%d) do not match expected events (%d)", len(receivedEvents), len(expectedEvents)) - expectedInventory := actuation.Inventory{ - Status: actuation.InventoryStatus{ - Objects: []actuation.ObjectStatus{ - { - ObjectReference: inventory.ObjectReferenceFromObjMetadata(testDeployment1ID), - Strategy: actuation.ActuationStrategyApply, - Actuation: actuation.ActuationSucceeded, - Reconcile: actuation.ReconcileSucceeded, - UID: testDeployment1.GetUID(), - Generation: testDeployment1.GetGeneration(), - }, - { - ObjectReference: inventory.ObjectReferenceFromObjMetadata(testDeployment2ID), - Strategy: actuation.ActuationStrategyApply, - Actuation: actuation.ActuationSucceeded, - Reconcile: actuation.ReconcileTimeout, - UID: testDeployment2.GetUID(), - Generation: testDeployment2.GetGeneration(), - }, - { - ObjectReference: inventory.ObjectReferenceFromObjMetadata(testDeployment3ID), - Strategy: actuation.ActuationStrategyApply, - Actuation: actuation.ActuationFailed, - Reconcile: actuation.ReconcileSkipped, - }, - { - ObjectReference: inventory.ObjectReferenceFromObjMetadata(testDeployment4ID), - Strategy: actuation.ActuationStrategyApply, - Actuation: actuation.ActuationSkipped, - Reconcile: actuation.ReconcileSkipped, - }, + expectedInventory := inventory.BaseInventory{ + ObjStatuses: []actuation.ObjectStatus{ + { + ObjectReference: inventory.ObjectReferenceFromObjMetadata(testDeployment1ID), + Strategy: actuation.ActuationStrategyApply, + Actuation: actuation.ActuationSucceeded, + Reconcile: actuation.ReconcileSucceeded, + UID: testDeployment1.GetUID(), + Generation: testDeployment1.GetGeneration(), + }, + { + ObjectReference: inventory.ObjectReferenceFromObjMetadata(testDeployment2ID), + Strategy: actuation.ActuationStrategyApply, + Actuation: actuation.ActuationSucceeded, + Reconcile: actuation.ReconcileTimeout, + UID: testDeployment2.GetUID(), + Generation: testDeployment2.GetGeneration(), + }, + { + ObjectReference: inventory.ObjectReferenceFromObjMetadata(testDeployment3ID), + Strategy: actuation.ActuationStrategyApply, + Actuation: actuation.ActuationFailed, + Reconcile: actuation.ReconcileSkipped, + }, + { + ObjectReference: inventory.ObjectReferenceFromObjMetadata(testDeployment4ID), + Strategy: actuation.ActuationStrategyApply, + Actuation: actuation.ActuationSkipped, + Reconcile: actuation.ReconcileSkipped, }, }, } - testutil.AssertEqual(t, &expectedInventory, taskContext.InventoryManager().Inventory()) + testutil.AssertEqual(t, expectedInventory, taskContext.InventoryManager().Inventory()) } func TestWaitTask_StartAndComplete(t *testing.T) { @@ -493,21 +489,19 @@ loop: "Actual events (%d) do not match expected events (%d)", len(receivedEvents), len(expectedEvents)) - expectedInventory := actuation.Inventory{ - Status: actuation.InventoryStatus{ - Objects: []actuation.ObjectStatus{ - { - ObjectReference: inventory.ObjectReferenceFromObjMetadata(testDeploymentID), - Strategy: actuation.ActuationStrategyApply, - Actuation: actuation.ActuationSucceeded, - Reconcile: actuation.ReconcileSucceeded, - UID: testDeployment.GetUID(), - Generation: testDeployment.GetGeneration(), - }, + expectedInventory := inventory.BaseInventory{ + ObjStatuses: []actuation.ObjectStatus{ + { + ObjectReference: inventory.ObjectReferenceFromObjMetadata(testDeploymentID), + Strategy: actuation.ActuationStrategyApply, + Actuation: actuation.ActuationSucceeded, + Reconcile: actuation.ReconcileSucceeded, + UID: testDeployment.GetUID(), + Generation: testDeployment.GetGeneration(), }, }, } - testutil.AssertEqual(t, &expectedInventory, taskContext.InventoryManager().Inventory()) + testutil.AssertEqual(t, expectedInventory, taskContext.InventoryManager().Inventory()) } func TestWaitTask_Cancel(t *testing.T) { @@ -580,21 +574,19 @@ loop: "Actual events (%d) do not match expected events (%d)", len(receivedEvents), len(expectedEvents)) - expectedInventory := actuation.Inventory{ - Status: actuation.InventoryStatus{ - Objects: []actuation.ObjectStatus{ - { - ObjectReference: inventory.ObjectReferenceFromObjMetadata(testDeploymentID), - Strategy: actuation.ActuationStrategyApply, - Actuation: actuation.ActuationSucceeded, - Reconcile: actuation.ReconcilePending, - UID: testDeployment.GetUID(), - Generation: testDeployment.GetGeneration(), - }, + expectedInventory := inventory.BaseInventory{ + ObjStatuses: []actuation.ObjectStatus{ + { + ObjectReference: inventory.ObjectReferenceFromObjMetadata(testDeploymentID), + Strategy: actuation.ActuationStrategyApply, + Actuation: actuation.ActuationSucceeded, + Reconcile: actuation.ReconcilePending, + UID: testDeployment.GetUID(), + Generation: testDeployment.GetGeneration(), }, }, } - testutil.AssertEqual(t, &expectedInventory, taskContext.InventoryManager().Inventory()) + testutil.AssertEqual(t, expectedInventory, taskContext.InventoryManager().Inventory()) } func TestWaitTask_SingleTaskResult(t *testing.T) { @@ -688,21 +680,19 @@ loop: } assert.Equal(t, expectedResults, receivedResults) - expectedInventory := actuation.Inventory{ - Status: actuation.InventoryStatus{ - Objects: []actuation.ObjectStatus{ - { - ObjectReference: inventory.ObjectReferenceFromObjMetadata(testDeploymentID), - Strategy: actuation.ActuationStrategyApply, - Actuation: actuation.ActuationSucceeded, - Reconcile: actuation.ReconcileSucceeded, - UID: testDeployment.GetUID(), - Generation: testDeployment.GetGeneration(), - }, + expectedInventory := inventory.BaseInventory{ + ObjStatuses: []actuation.ObjectStatus{ + { + ObjectReference: inventory.ObjectReferenceFromObjMetadata(testDeploymentID), + Strategy: actuation.ActuationStrategyApply, + Actuation: actuation.ActuationSucceeded, + Reconcile: actuation.ReconcileSucceeded, + UID: testDeployment.GetUID(), + Generation: testDeployment.GetGeneration(), }, }, } - testutil.AssertEqual(t, &expectedInventory, taskContext.InventoryManager().Inventory()) + testutil.AssertEqual(t, expectedInventory, taskContext.InventoryManager().Inventory()) } func TestWaitTask_Failed(t *testing.T) { @@ -723,7 +713,7 @@ func TestWaitTask_Failed(t *testing.T) { eventsFunc func(*cache.ResourceCacheMap, *WaitTask, *TaskContext) waitTimeout time.Duration expectedEvents []event.Event - expectedInventory *actuation.Inventory + expectedInventory inventory.BaseInventory }{ "continue on failed if others InProgress": { configureTaskContextFunc: func(taskContext *TaskContext) { @@ -791,25 +781,23 @@ func TestWaitTask_Failed(t *testing.T) { }, }, }, - expectedInventory: &actuation.Inventory{ - Status: actuation.InventoryStatus{ - Objects: []actuation.ObjectStatus{ - { - ObjectReference: inventory.ObjectReferenceFromObjMetadata(testDeployment1ID), - Strategy: actuation.ActuationStrategyApply, - Actuation: actuation.ActuationSucceeded, - Reconcile: actuation.ReconcileFailed, - UID: testDeployment1.GetUID(), - Generation: testDeployment1.GetGeneration(), - }, - { - ObjectReference: inventory.ObjectReferenceFromObjMetadata(testDeployment2ID), - Strategy: actuation.ActuationStrategyApply, - Actuation: actuation.ActuationSucceeded, - Reconcile: actuation.ReconcileSucceeded, - UID: testDeployment2.GetUID(), - Generation: testDeployment2.GetGeneration(), - }, + expectedInventory: inventory.BaseInventory{ + ObjStatuses: []actuation.ObjectStatus{ + { + ObjectReference: inventory.ObjectReferenceFromObjMetadata(testDeployment1ID), + Strategy: actuation.ActuationStrategyApply, + Actuation: actuation.ActuationSucceeded, + Reconcile: actuation.ReconcileFailed, + UID: testDeployment1.GetUID(), + Generation: testDeployment1.GetGeneration(), + }, + { + ObjectReference: inventory.ObjectReferenceFromObjMetadata(testDeployment2ID), + Strategy: actuation.ActuationStrategyApply, + Actuation: actuation.ActuationSucceeded, + Reconcile: actuation.ReconcileSucceeded, + UID: testDeployment2.GetUID(), + Generation: testDeployment2.GetGeneration(), }, }, }, @@ -874,25 +862,23 @@ func TestWaitTask_Failed(t *testing.T) { }, }, }, - expectedInventory: &actuation.Inventory{ - Status: actuation.InventoryStatus{ - Objects: []actuation.ObjectStatus{ - { - ObjectReference: inventory.ObjectReferenceFromObjMetadata(testDeployment1ID), - Strategy: actuation.ActuationStrategyApply, - Actuation: actuation.ActuationSucceeded, - Reconcile: actuation.ReconcileFailed, - UID: testDeployment1.GetUID(), - Generation: testDeployment1.GetGeneration(), - }, - { - ObjectReference: inventory.ObjectReferenceFromObjMetadata(testDeployment2ID), - Strategy: actuation.ActuationStrategyApply, - Actuation: actuation.ActuationSucceeded, - Reconcile: actuation.ReconcileSucceeded, - UID: testDeployment2.GetUID(), - Generation: testDeployment2.GetGeneration(), - }, + expectedInventory: inventory.BaseInventory{ + ObjStatuses: []actuation.ObjectStatus{ + { + ObjectReference: inventory.ObjectReferenceFromObjMetadata(testDeployment1ID), + Strategy: actuation.ActuationStrategyApply, + Actuation: actuation.ActuationSucceeded, + Reconcile: actuation.ReconcileFailed, + UID: testDeployment1.GetUID(), + Generation: testDeployment1.GetGeneration(), + }, + { + ObjectReference: inventory.ObjectReferenceFromObjMetadata(testDeployment2ID), + Strategy: actuation.ActuationStrategyApply, + Actuation: actuation.ActuationSucceeded, + Reconcile: actuation.ReconcileSucceeded, + UID: testDeployment2.GetUID(), + Generation: testDeployment2.GetGeneration(), }, }, }, @@ -972,25 +958,23 @@ func TestWaitTask_Failed(t *testing.T) { }, }, }, - expectedInventory: &actuation.Inventory{ - Status: actuation.InventoryStatus{ - Objects: []actuation.ObjectStatus{ - { - ObjectReference: inventory.ObjectReferenceFromObjMetadata(testDeployment1ID), - Strategy: actuation.ActuationStrategyApply, - Actuation: actuation.ActuationSucceeded, - Reconcile: actuation.ReconcileSucceeded, - UID: testDeployment1.GetUID(), - Generation: testDeployment1.GetGeneration(), - }, - { - ObjectReference: inventory.ObjectReferenceFromObjMetadata(testDeployment2ID), - Strategy: actuation.ActuationStrategyApply, - Actuation: actuation.ActuationSucceeded, - Reconcile: actuation.ReconcileSucceeded, - UID: testDeployment2.GetUID(), - Generation: testDeployment2.GetGeneration(), - }, + expectedInventory: inventory.BaseInventory{ + ObjStatuses: []actuation.ObjectStatus{ + { + ObjectReference: inventory.ObjectReferenceFromObjMetadata(testDeployment1ID), + Strategy: actuation.ActuationStrategyApply, + Actuation: actuation.ActuationSucceeded, + Reconcile: actuation.ReconcileSucceeded, + UID: testDeployment1.GetUID(), + Generation: testDeployment1.GetGeneration(), + }, + { + ObjectReference: inventory.ObjectReferenceFromObjMetadata(testDeployment2ID), + Strategy: actuation.ActuationStrategyApply, + Actuation: actuation.ActuationSucceeded, + Reconcile: actuation.ReconcileSucceeded, + UID: testDeployment2.GetUID(), + Generation: testDeployment2.GetGeneration(), }, }, }, @@ -1079,25 +1063,23 @@ func TestWaitTask_Failed(t *testing.T) { }, }, }, - expectedInventory: &actuation.Inventory{ - Status: actuation.InventoryStatus{ - Objects: []actuation.ObjectStatus{ - { - ObjectReference: inventory.ObjectReferenceFromObjMetadata(testDeployment1ID), - Strategy: actuation.ActuationStrategyApply, - Actuation: actuation.ActuationSucceeded, - Reconcile: actuation.ReconcileTimeout, - UID: testDeployment1.GetUID(), - Generation: testDeployment1.GetGeneration(), - }, - { - ObjectReference: inventory.ObjectReferenceFromObjMetadata(testDeployment2ID), - Strategy: actuation.ActuationStrategyApply, - Actuation: actuation.ActuationSucceeded, - Reconcile: actuation.ReconcileSucceeded, - UID: testDeployment2.GetUID(), - Generation: testDeployment2.GetGeneration(), - }, + expectedInventory: inventory.BaseInventory{ + ObjStatuses: []actuation.ObjectStatus{ + { + ObjectReference: inventory.ObjectReferenceFromObjMetadata(testDeployment1ID), + Strategy: actuation.ActuationStrategyApply, + Actuation: actuation.ActuationSucceeded, + Reconcile: actuation.ReconcileTimeout, + UID: testDeployment1.GetUID(), + Generation: testDeployment1.GetGeneration(), + }, + { + ObjectReference: inventory.ObjectReferenceFromObjMetadata(testDeployment2ID), + Strategy: actuation.ActuationStrategyApply, + Actuation: actuation.ActuationSucceeded, + Reconcile: actuation.ReconcileSucceeded, + UID: testDeployment2.GetUID(), + Generation: testDeployment2.GetGeneration(), }, }, }, @@ -1176,7 +1158,7 @@ func TestWaitTask_UIDChanged(t *testing.T) { eventsFunc func(*cache.ResourceCacheMap, *WaitTask, *TaskContext) waitTimeout time.Duration expectedEvents []event.Event - expectedInventory *actuation.Inventory + expectedInventory inventory.BaseInventory }{ "UID changed after apply means reconcile failure": { condition: AllCurrent, @@ -1240,27 +1222,25 @@ func TestWaitTask_UIDChanged(t *testing.T) { }, }, }, - expectedInventory: &actuation.Inventory{ - Status: actuation.InventoryStatus{ - Objects: []actuation.ObjectStatus{ - { - ObjectReference: inventory.ObjectReferenceFromObjMetadata(testDeployment1ID), - Strategy: actuation.ActuationStrategyApply, - Actuation: actuation.ActuationSucceeded, - // UID change causes failure after apply - Reconcile: actuation.ReconcileFailed, - // Recorded UID should be from the applied object, not the new replacement - UID: testDeployment1.GetUID(), - Generation: testDeployment1.GetGeneration(), - }, - { - ObjectReference: inventory.ObjectReferenceFromObjMetadata(testDeployment2ID), - Strategy: actuation.ActuationStrategyApply, - Actuation: actuation.ActuationSucceeded, - Reconcile: actuation.ReconcileSucceeded, - UID: testDeployment2.GetUID(), - Generation: testDeployment2.GetGeneration(), - }, + expectedInventory: inventory.BaseInventory{ + ObjStatuses: []actuation.ObjectStatus{ + { + ObjectReference: inventory.ObjectReferenceFromObjMetadata(testDeployment1ID), + Strategy: actuation.ActuationStrategyApply, + Actuation: actuation.ActuationSucceeded, + // UID change causes failure after apply + Reconcile: actuation.ReconcileFailed, + // Recorded UID should be from the applied object, not the new replacement + UID: testDeployment1.GetUID(), + Generation: testDeployment1.GetGeneration(), + }, + { + ObjectReference: inventory.ObjectReferenceFromObjMetadata(testDeployment2ID), + Strategy: actuation.ActuationStrategyApply, + Actuation: actuation.ActuationSucceeded, + Reconcile: actuation.ReconcileSucceeded, + UID: testDeployment2.GetUID(), + Generation: testDeployment2.GetGeneration(), }, }, }, @@ -1327,27 +1307,25 @@ func TestWaitTask_UIDChanged(t *testing.T) { }, }, }, - expectedInventory: &actuation.Inventory{ - Status: actuation.InventoryStatus{ - Objects: []actuation.ObjectStatus{ - { - ObjectReference: inventory.ObjectReferenceFromObjMetadata(testDeployment1ID), - Strategy: actuation.ActuationStrategyDelete, - Actuation: actuation.ActuationSucceeded, - // UID change causes success after delete - Reconcile: actuation.ReconcileSucceeded, - // Recorded UID should be from the deleted object, not the new replacement - UID: testDeployment1.GetUID(), - // Deleted generation is unknown - }, - { - ObjectReference: inventory.ObjectReferenceFromObjMetadata(testDeployment2ID), - Strategy: actuation.ActuationStrategyDelete, - Actuation: actuation.ActuationSucceeded, - Reconcile: actuation.ReconcileSucceeded, - UID: testDeployment2.GetUID(), - // Deleted generation is unknown - }, + expectedInventory: inventory.BaseInventory{ + ObjStatuses: []actuation.ObjectStatus{ + { + ObjectReference: inventory.ObjectReferenceFromObjMetadata(testDeployment1ID), + Strategy: actuation.ActuationStrategyDelete, + Actuation: actuation.ActuationSucceeded, + // UID change causes success after delete + Reconcile: actuation.ReconcileSucceeded, + // Recorded UID should be from the deleted object, not the new replacement + UID: testDeployment1.GetUID(), + // Deleted generation is unknown + }, + { + ObjectReference: inventory.ObjectReferenceFromObjMetadata(testDeployment2ID), + Strategy: actuation.ActuationStrategyDelete, + Actuation: actuation.ActuationSucceeded, + Reconcile: actuation.ReconcileSucceeded, + UID: testDeployment2.GetUID(), + // Deleted generation is unknown }, }, }, diff --git a/pkg/inventory/configmap-client-factory.go b/pkg/inventory/configmap-client-factory.go index df3e4fd6..811edd74 100644 --- a/pkg/inventory/configmap-client-factory.go +++ b/pkg/inventory/configmap-client-factory.go @@ -18,10 +18,8 @@ type ClientFactory interface { // ConfigMapClientFactory is a factory that creates instances of inventory clients // which are backed by ConfigMaps. -type ConfigMapClientFactory struct { - StatusPolicy StatusPolicy -} +type ConfigMapClientFactory struct{} func (ccf ConfigMapClientFactory) NewClient(factory cmdutil.Factory) (Client, error) { - return NewClient(factory, ConfigMapToInventoryObj, InvInfoToConfigMap, ccf.StatusPolicy, ConfigMapGVK) + return NewUnstructuredClient(factory, configMapToInventory, inventoryToConfigMap, ConfigMapGVK) } diff --git a/pkg/inventory/fake-inventory-client.go b/pkg/inventory/fake-inventory-client.go index 8056d039..7e9fad46 100644 --- a/pkg/inventory/fake-inventory-client.go +++ b/pkg/inventory/fake-inventory-client.go @@ -6,23 +6,28 @@ package inventory import ( "context" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" cmdutil "k8s.io/kubectl/pkg/cmd/util" - "sigs.k8s.io/cli-utils/pkg/apis/actuation" - "sigs.k8s.io/cli-utils/pkg/common" "sigs.k8s.io/cli-utils/pkg/object" ) +const ( + // TestInventoryName is the name of the fake inventory used for unit tests + TestInventoryName = "test-inventory" + // TestInventoryNamespace is the namespace of the fake inventory used for unit tests + TestInventoryNamespace = "test-namespace" +) + // FakeClient is a testing implementation of the Client interface. type FakeClient struct { - Objs object.ObjMetadataSet - Status []actuation.ObjectStatus - Err error + Inv Inventory + Err error } var ( _ Client = &FakeClient{} _ ClientFactory = FakeClientFactory{} + + _ Inventory = &FakeInventory{} ) type FakeClientFactory object.ObjMetadataSet @@ -32,60 +37,61 @@ func (f FakeClientFactory) NewClient(cmdutil.Factory) (Client, error) { } // NewFakeClient returns a FakeClient. -func NewFakeClient(initObjs object.ObjMetadataSet) *FakeClient { +func NewFakeClient(objs object.ObjMetadataSet) *FakeClient { return &FakeClient{ - Objs: initObjs, - Err: nil, - } -} - -// GetClusterObjs returns currently stored set of objects. -func (fic *FakeClient) GetClusterObjs(context.Context, Info) (object.ObjMetadataSet, error) { - if fic.Err != nil { - return object.ObjMetadataSet{}, fic.Err + Inv: &FakeInventory{ + InventoryID: TestInventoryName, + InventoryNamespace: TestInventoryNamespace, + BaseInventory: BaseInventory{ + Objs: objs, + }, + }, + Err: nil, } - return fic.Objs, nil } -// Merge stores the passed objects with the current stored cluster inventory -// objects. Returns the set difference of the current set of objects minus -// the passed set of objects, or an error if one is set up. -func (fic *FakeClient) Merge(_ context.Context, _ Info, objs object.ObjMetadataSet, _ common.DryRunStrategy) (object.ObjMetadataSet, error) { +// Get returns currently stored inventory. +func (fic *FakeClient) Get(ctx context.Context, id Info, opts GetOptions) (Inventory, error) { if fic.Err != nil { - return object.ObjMetadataSet{}, fic.Err + return nil, fic.Err } - diffObjs := fic.Objs.Diff(objs) - fic.Objs = fic.Objs.Union(objs) - return diffObjs, nil + return fic.Inv, nil } -// Replace the stored cluster inventory objs with the passed obj, or an +// CreateOrUpdate the stored cluster inventory objs with the passed obj, or an // error if one is set up. -func (fic *FakeClient) Replace(_ context.Context, _ Info, objs object.ObjMetadataSet, status []actuation.ObjectStatus, - _ common.DryRunStrategy) error { +func (fic *FakeClient) CreateOrUpdate(ctx context.Context, inv Inventory, opts UpdateOptions) error { if fic.Err != nil { return fic.Err } - fic.Objs = objs - fic.Status = status + fic.Inv = inv return nil } -// DeleteInventoryObj returns an error if one is forced; does nothing otherwise. -func (fic *FakeClient) DeleteInventoryObj(context.Context, Info, common.DryRunStrategy) error { +// UpdateStatus the stored cluster inventory objs with the passed obj, or an +// error if one is set up. +func (fic *FakeClient) UpdateStatus(ctx context.Context, inv Inventory, opts UpdateOptions) error { if fic.Err != nil { return fic.Err } + fic.Inv = inv return nil } -func (fic *FakeClient) ApplyInventoryNamespace(*unstructured.Unstructured, common.DryRunStrategy) error { +// Delete returns an error if one is forced; does nothing otherwise. +func (fic *FakeClient) Delete(ctx context.Context, id Info, opts DeleteOptions) error { if fic.Err != nil { return fic.Err } return nil } +// List the in-cluster inventory +// Performs a simple in-place update on the ConfigMap +func (fic *FakeClient) List(ctx context.Context, opts ListOptions) ([]Inventory, error) { + return nil, nil +} + // SetError forces an error on the subsequent client call if it returns an error. func (fic *FakeClient) SetError(err error) { fic.Err = err @@ -96,14 +102,20 @@ func (fic *FakeClient) ClearError() { fic.Err = nil } -func (fic *FakeClient) GetClusterInventoryInfo(Info) (*unstructured.Unstructured, error) { - return nil, nil +type FakeInventory struct { + BaseInventory + InventoryID ID + InventoryNamespace string +} + +func (fi *FakeInventory) ID() ID { + return fi.InventoryID } -func (fic *FakeClient) GetClusterInventoryObjs(_ Info) (object.UnstructuredSet, error) { - return object.UnstructuredSet{}, nil +func (fi *FakeInventory) Namespace() string { + return fi.InventoryNamespace } -func (fic *FakeClient) ListClusterInventoryObjs(_ context.Context) (map[string]object.ObjMetadataSet, error) { - return map[string]object.ObjMetadataSet{}, nil +func (fi *FakeInventory) NewEmptyInventory() Inventory { + return fi } diff --git a/pkg/inventory/inventory-client.go b/pkg/inventory/inventory-client.go index 20386052..a792ebb6 100644 --- a/pkg/inventory/inventory-client.go +++ b/pkg/inventory/inventory-client.go @@ -6,473 +6,396 @@ package inventory import ( "context" "fmt" + "maps" apierrors "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/client-go/discovery" + "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/dynamic" + "k8s.io/client-go/util/retry" "k8s.io/klog/v2" cmdutil "k8s.io/kubectl/pkg/cmd/util" "sigs.k8s.io/cli-utils/pkg/apis/actuation" "sigs.k8s.io/cli-utils/pkg/common" "sigs.k8s.io/cli-utils/pkg/object" + "sigs.k8s.io/controller-runtime/pkg/client" ) // Client expresses an interface for interacting with // objects which store references to objects (inventory objects). type Client interface { - // GetClusterObjs returns the set of previously applied objects as ObjMetadata, - // or an error if one occurred. This set of previously applied object references - // is stored in the inventory objects living in the cluster. - GetClusterObjs(ctx context.Context, inv Info) (object.ObjMetadataSet, error) - // Merge applies the union of the passed objects with the currently - // stored objects in the inventory object. Returns the set of - // objects which are not in the passed objects (objects to be pruned). - // Otherwise, returns an error if one happened. - Merge(ctx context.Context, inv Info, objs object.ObjMetadataSet, dryRun common.DryRunStrategy) (object.ObjMetadataSet, error) - // Replace replaces the set of objects stored in the inventory - // object with the passed set of objects, or an error if one occurs. - Replace(ctx context.Context, inv Info, objs object.ObjMetadataSet, status []actuation.ObjectStatus, dryRun common.DryRunStrategy) error - // DeleteInventoryObj deletes the passed inventory object from the APIServer. - DeleteInventoryObj(ctx context.Context, inv Info, dryRun common.DryRunStrategy) error - // ListClusterInventoryObjs returns a map mapping from inventory name to a list of cluster inventory objects - ListClusterInventoryObjs(ctx context.Context) (map[string]object.ObjMetadataSet, error) -} - -// ClusterClient is a concrete implementation of the -// Client interface. -type ClusterClient struct { - dc dynamic.Interface - discoveryClient discovery.CachedDiscoveryInterface - mapper meta.RESTMapper - InventoryFactoryFunc StorageFactoryFunc - invToUnstructuredFunc ToUnstructuredFunc - statusPolicy StatusPolicy - gvk schema.GroupVersionKind -} - -var _ Client = &ClusterClient{} - -// NewClient returns a concrete implementation of the -// Client interface or an error. -func NewClient(factory cmdutil.Factory, - invFunc StorageFactoryFunc, - invToUnstructuredFunc ToUnstructuredFunc, - statusPolicy StatusPolicy, - gvk schema.GroupVersionKind, -) (*ClusterClient, error) { - dc, err := factory.DynamicClient() - if err != nil { - return nil, err - } - mapper, err := factory.ToRESTMapper() - if err != nil { - return nil, err - } - discoveryClinet, err := factory.ToDiscoveryClient() - if err != nil { - return nil, err - } - clusterClient := ClusterClient{ - dc: dc, - discoveryClient: discoveryClinet, - mapper: mapper, - InventoryFactoryFunc: invFunc, - invToUnstructuredFunc: invToUnstructuredFunc, - statusPolicy: statusPolicy, - gvk: gvk, - } - return &clusterClient, nil -} - -// Merge stores the union of the passed objects with the objects currently -// stored in the cluster inventory object. Retrieves and caches the cluster -// inventory object. Returns the set differrence of the cluster inventory -// objects and the currently applied objects. This is the set of objects -// to prune. Creates the initial cluster inventory object storing the passed -// objects if an inventory object does not exist. Returns an error if one -// occurred. -func (cic *ClusterClient) Merge(ctx context.Context, localInv Info, objs object.ObjMetadataSet, dryRun common.DryRunStrategy) (object.ObjMetadataSet, error) { - pruneIDs := object.ObjMetadataSet{} - invObj, err := cic.invToUnstructuredFunc(localInv) - if err != nil { - return pruneIDs, err - } - clusterInv, err := cic.getClusterInventoryInfo(ctx, localInv) - if err != nil { - return pruneIDs, err - } + ReadClient + WriteClient + Factory +} - // Inventory does not exist on the cluster. - if clusterInv == nil { - // Wrap inventory object and store the inventory in it. - var status []actuation.ObjectStatus - if cic.statusPolicy == StatusPolicyAll { - status = getObjStatus(nil, objs) - } - inv, err := cic.InventoryFactoryFunc(invObj) - if err != nil { - return nil, err - } - if err := inv.Store(objs, status); err != nil { - return nil, err - } - klog.V(4).Infof("creating initial inventory object with %d objects", len(objs)) +// ID is a unique identifier for an inventory object. +// It's used by the inventory client to get/update/delete the object. +// For example, it could be a name or a label depending on the inventory client +// implementation. +type ID string - if dryRun.ClientOrServerDryRun() { - klog.V(4).Infof("dry-run create inventory object: not created") - return nil, nil - } +// String implements the fmt.Stringer interface for ID +func (id ID) String() string { + return string(id) +} - err = inv.Apply(ctx, cic.dc, cic.mapper, cic.statusPolicy) - return nil, err - } +// Info provides the minimal information for the applier +// to create, look up and update an inventory. +// The inventory object can be any type, the Provider in the applier +// needs to know how to create, look up and update it based +// on the Info. +type Info interface { + // ID of the inventory object. + // The inventory client uses this to determine how to get/update the object(s) + // from the cluster. + ID() ID - // Update existing cluster inventory with merged union of objects - clusterObjs, err := cic.GetClusterObjs(ctx, localInv) - if err != nil { - return pruneIDs, err - } - pruneIDs = clusterObjs.Diff(objs) - unionObjs := clusterObjs.Union(objs) - var status []actuation.ObjectStatus - if cic.statusPolicy == StatusPolicyAll { - status = getObjStatus(pruneIDs, unionObjs) - } - klog.V(4).Infof("num objects to prune: %d", len(pruneIDs)) - klog.V(4).Infof("num merged objects to store in inventory: %d", len(unionObjs)) - wrappedInv, err := cic.InventoryFactoryFunc(clusterInv) - if err != nil { - return pruneIDs, err - } - if err = wrappedInv.Store(unionObjs, status); err != nil { - return pruneIDs, err - } + // Namespace of the inventory object. + // It should be the value of the field .metadata.namespace. + Namespace() string - // Update not required when all objects in inventory are the same and - // status does not need to be updated. If status is stored, always update the - // inventory to store the latest status. - if objs.Equal(clusterObjs) && cic.statusPolicy == StatusPolicyNone { - return pruneIDs, nil - } + GetLabels() map[string]string - if dryRun.ClientOrServerDryRun() { - klog.V(4).Infof("dry-run create inventory object: not created") - return pruneIDs, nil - } - err = wrappedInv.Apply(ctx, cic.dc, cic.mapper, cic.statusPolicy) - return pruneIDs, err + GetAnnotations() map[string]string + + DeepCopy() Info } -// Replace stores the passed objects in the cluster inventory object, or -// an error if one occurred. -func (cic *ClusterClient) Replace(ctx context.Context, localInv Info, objs object.ObjMetadataSet, status []actuation.ObjectStatus, - dryRun common.DryRunStrategy) error { - // Skip entire function for dry-run. - if dryRun.ClientOrServerDryRun() { - klog.V(4).Infoln("dry-run replace inventory object: not applied") - return nil - } - clusterInv, err := cic.getClusterInventoryInfo(ctx, localInv) - if err != nil { - return fmt.Errorf("failed to read inventory from cluster: %w", err) - } +type SimpleInfo struct { + id ID + namespace string + labels map[string]string + annotations map[string]string +} - clusterObjs, err := cic.GetClusterObjs(ctx, localInv) - if err != nil { - return fmt.Errorf("failed to read inventory objects from cluster: %w", err) - } +func (i SimpleInfo) ID() ID { + return i.id +} - clusterInv, wrappedInv, err := cic.replaceInventory(clusterInv, objs, status) - if err != nil { - return err +func (i SimpleInfo) Namespace() string { + return i.namespace +} + +func (i SimpleInfo) GetLabels() map[string]string { + return i.labels +} + +func (i SimpleInfo) GetAnnotations() map[string]string { + return i.annotations +} + +func (i SimpleInfo) DeepCopy() Info { + return SimpleInfo{ + id: i.id, + namespace: i.namespace, + labels: maps.Clone(i.labels), + annotations: maps.Clone(i.annotations), } +} - // Update not required when all objects in inventory are the same and - // status does not need to be updated. If status is stored, always update the - // inventory to store the latest status. - if objs.Equal(clusterObjs) && cic.statusPolicy == StatusPolicyNone { - return nil +func NewSimpleInfo(id ID, namespace string, labels, annotations map[string]string) SimpleInfo { + return SimpleInfo{ + id: id, + namespace: namespace, + labels: labels, + annotations: annotations, } +} - klog.V(4).Infof("replace cluster inventory: %s/%s", clusterInv.GetNamespace(), clusterInv.GetName()) - klog.V(4).Infof("replace cluster inventory %d objects", len(objs)) +type SingleObjectInfo struct { + SimpleInfo - if err := wrappedInv.ApplyWithPrune(ctx, cic.dc, cic.mapper, cic.statusPolicy, objs); err != nil { - return fmt.Errorf("failed to write updated inventory to cluster: %w", err) - } + name string +} - return nil +func (i SingleObjectInfo) Name() string { + return i.name } -// replaceInventory stores the passed objects into the passed inventory object. -func (cic *ClusterClient) replaceInventory(inv *unstructured.Unstructured, objs object.ObjMetadataSet, - status []actuation.ObjectStatus) (*unstructured.Unstructured, Storage, error) { - if cic.statusPolicy == StatusPolicyNone { - status = nil - } - wrappedInv, err := cic.InventoryFactoryFunc(inv) - if err != nil { - return nil, nil, err - } - if err := wrappedInv.Store(objs, status); err != nil { - return nil, nil, err - } - clusterInv, err := wrappedInv.GetObject() - if err != nil { - return nil, nil, err +func NewSingleObjectInfo(id ID, nn types.NamespacedName, labels, annotations map[string]string) SingleObjectInfo { + return SingleObjectInfo{ + SimpleInfo: SimpleInfo{ + id: id, + namespace: nn.Namespace, + labels: labels, + annotations: annotations, + }, + name: nn.Name, } +} - return clusterInv, wrappedInv, nil +type Factory interface { + // NewInventory returns an empty initialized inventory object. + // This is used in the case that there is no existing object on the cluster. + NewInventory(Info) (Inventory, error) } -// DeleteInventoryObj deletes the inventory object from the cluster. -func (cic *ClusterClient) DeleteInventoryObj(ctx context.Context, localInv Info, dryRun common.DryRunStrategy) error { - if localInv == nil { - return fmt.Errorf("retrieving cluster inventory object with nil local inventory") - } - switch localInv.Strategy() { - case NameStrategy: - obj, err := cic.invToUnstructuredFunc(localInv) - if err != nil { - return err - } - return cic.deleteInventoryObjByName(ctx, obj, dryRun) - case LabelStrategy: - return cic.deleteInventoryObjsByLabel(ctx, localInv, dryRun) - default: - panic(fmt.Errorf("unknown inventory strategy: %s", localInv.Strategy())) - } +type Inventory interface { + Info() Info + ObjectRefs() object.ObjMetadataSet + ObjectStatuses() []actuation.ObjectStatus + SetObjectRefs(object.ObjMetadataSet) + SetObjectStatuses([]actuation.ObjectStatus) } -func (cic *ClusterClient) deleteInventoryObjsByLabel(ctx context.Context, inv Info, dryRun common.DryRunStrategy) error { - clusterInvObjs, err := cic.getClusterInventoryObjsByLabel(ctx, inv) - if err != nil { - return err - } - for _, invObj := range clusterInvObjs { - if err := cic.deleteInventoryObjByName(ctx, invObj, dryRun); err != nil { - return err - } - } - return nil +type ReadClient interface { + Get(ctx context.Context, inv Info, opts GetOptions) (Inventory, error) + List(ctx context.Context, opts ListOptions) ([]Inventory, error) } -// GetClusterObjs returns the objects stored in the cluster inventory object, or -// an error if one occurred. -func (cic *ClusterClient) GetClusterObjs(ctx context.Context, localInv Info) (object.ObjMetadataSet, error) { - var objs object.ObjMetadataSet - clusterInv, err := cic.getClusterInventoryInfo(ctx, localInv) - if err != nil { - return objs, fmt.Errorf("failed to read inventory from cluster: %w", err) - } - // First time; no inventory obj yet. - if clusterInv == nil { - return objs, nil - } - wrapped, err := cic.InventoryFactoryFunc(clusterInv) - if err != nil { - return objs, err - } - return wrapped.Load() -} - -// getClusterInventoryObj returns a pointer to the cluster inventory object, or -// an error if one occurred. Returns the cached cluster inventory object if it -// has been previously retrieved. Uses the ResourceBuilder to retrieve the -// inventory object in the cluster, using the namespace, group resource, and -// inventory label. Merges multiple inventory objects into one if it retrieves -// more than one (this should be very rare). -// -// TODO(seans3): Remove the special case code to merge multiple cluster inventory -// objects once we've determined that this case is no longer possible. -func (cic *ClusterClient) getClusterInventoryInfo(ctx context.Context, inv Info) (*unstructured.Unstructured, error) { - clusterInvObjects, err := cic.getClusterInventoryObjs(ctx, inv) - if err != nil { - return nil, fmt.Errorf("failed to read inventory objects from cluster: %w", err) - } +type WriteClient interface { + CreateOrUpdate(ctx context.Context, inv Inventory, opts UpdateOptions) error + UpdateStatus(ctx context.Context, inv Inventory, opts UpdateOptions) error + Delete(ctx context.Context, inv Info, opts DeleteOptions) error +} - var clusterInv *unstructured.Unstructured - if len(clusterInvObjects) == 1 { - clusterInv = clusterInvObjects[0] - } else if l := len(clusterInvObjects); l > 1 { - return nil, fmt.Errorf("found %d inventory objects with inventory id %s", l, inv.ID()) - } - return clusterInv, nil +type UpdateOptions struct{} + +type GetOptions struct{} + +type ListOptions struct{} + +type DeleteOptions struct{} + +var _ Client = &UnstructuredClient{} + +// UnstructuredInventory implements Inventory while also tracking the actual +// KRM object from the cluster. This enables the client to update the +// same object and utilize resourceVersion checks. +type UnstructuredInventory struct { + BaseInventory + // ClusterObj represents the KRM which was last fetched from the cluster. + // used by the client implementation to performs updates on the object. + ClusterObj *unstructured.Unstructured } -func (cic *ClusterClient) getClusterInventoryObjsByLabel(ctx context.Context, inv Info) (object.UnstructuredSet, error) { - localInv, err := cic.invToUnstructuredFunc(inv) - if err != nil { - return nil, err - } - if localInv == nil { - return nil, fmt.Errorf("retrieving cluster inventory object with nil local inventory") - } - localObj := object.UnstructuredToObjMetadata(localInv) - mapping, err := cic.getMapping(localInv) - if err != nil { - return nil, err - } - groupResource := mapping.Resource.GroupResource().String() - namespace := localObj.Namespace - label, err := retrieveInventoryLabel(localInv) - if err != nil { - return nil, err +func (ui *UnstructuredInventory) Info() Info { + if ui.ClusterObj == nil { + return SingleObjectInfo{} } - labelSelector := fmt.Sprintf("%s=%s", common.InventoryLabel, label) - klog.V(4).Infof("inventory object fetch by label (group: %q, namespace: %q, selector: %q)", groupResource, namespace, labelSelector) + // TODO: DeepCopy labels & annotations? + return NewSingleObjectInfo(ui.ID(), client.ObjectKeyFromObject(ui.ClusterObj), + maps.Clone(ui.ClusterObj.GetLabels()), maps.Clone(ui.ClusterObj.GetAnnotations())) +} - uList, err := cic.dc.Resource(mapping.Resource).Namespace(namespace).List(ctx, metav1.ListOptions{ - LabelSelector: labelSelector, - }) - if err != nil { - return nil, err - } - var invList []*unstructured.Unstructured - for i := range uList.Items { - invList = append(invList, &uList.Items[i]) +func (ui *UnstructuredInventory) ID() ID { + if ui.ClusterObj == nil { + return "" } - return invList, nil + // Empty string if not set. + return ID(ui.ClusterObj.GetLabels()[common.InventoryLabel]) +} + +var _ Inventory = &UnstructuredInventory{} + +// BaseInventory is a boilerplate struct that contains the basic methods +// to implement Inventory. Can be extended for different inventory implementations. +type BaseInventory struct { + // Objs and ObjStatuses are in memory representations of the inventory which are + // read and manipulated by the applier. + Objs object.ObjMetadataSet + ObjStatuses []actuation.ObjectStatus +} + +func (inv *BaseInventory) ObjectRefs() object.ObjMetadataSet { + return inv.Objs } -func (cic *ClusterClient) getClusterInventoryObjsByName(ctx context.Context, inv Info) (object.UnstructuredSet, error) { - localInv, err := cic.invToUnstructuredFunc(inv) +func (inv *BaseInventory) ObjectStatuses() []actuation.ObjectStatus { + return inv.ObjStatuses +} + +func (inv *BaseInventory) SetObjectRefs(objs object.ObjMetadataSet) { + inv.Objs = objs +} + +func (inv *BaseInventory) SetObjectStatuses(statuses []actuation.ObjectStatus) { + inv.ObjStatuses = statuses +} + +type FromUnstructuredFunc func(*unstructured.Unstructured) (*UnstructuredInventory, error) +type ToUnstructuredFunc func(*UnstructuredInventory) (*unstructured.Unstructured, error) + +// UnstructuredClient implements the inventory client interface for a single unstructured object +type UnstructuredClient struct { + client dynamic.NamespaceableResourceInterface + fromUnstructured FromUnstructuredFunc + toUnstructured ToUnstructuredFunc + gvk schema.GroupVersionKind +} + +func NewUnstructuredClient(factory cmdutil.Factory, + from FromUnstructuredFunc, + to ToUnstructuredFunc, + gvk schema.GroupVersionKind) (*UnstructuredClient, error) { + dc, err := factory.DynamicClient() if err != nil { return nil, err } - if localInv == nil { - return nil, fmt.Errorf("retrieving cluster inventory object with nil local inventory") - } - - mapping, err := cic.getMapping(localInv) + mapper, err := factory.ToRESTMapper() if err != nil { return nil, err } - - klog.V(4).Infof("inventory object fetch by name (namespace: %q, name: %q)", inv.Namespace(), inv.Name()) - clusterInv, err := cic.dc.Resource(mapping.Resource).Namespace(inv.Namespace()). - Get(ctx, inv.Name(), metav1.GetOptions{}) - if err != nil && !apierrors.IsNotFound(err) { + mapping, err := mapper.RESTMapping(gvk.GroupKind(), gvk.Version) + if err != nil { return nil, err } - if apierrors.IsNotFound(err) { - return object.UnstructuredSet{}, nil + unstructuredClient := &UnstructuredClient{ + client: dc.Resource(mapping.Resource), + fromUnstructured: from, + toUnstructured: to, + gvk: gvk, } - if inv.ID() != "" { - if inventoryID, err := retrieveInventoryLabel(clusterInv); err != nil { - return nil, err - } else if inv.ID() != inventoryID { - return nil, fmt.Errorf("inventory-id of inventory object %s/%s in cluster doesn't match provided id %q", - inv.Namespace(), inv.Name(), inv.ID()) - } - } - return object.UnstructuredSet{clusterInv}, nil + return unstructuredClient, nil } -func (cic *ClusterClient) getClusterInventoryObjs(ctx context.Context, inv Info) (object.UnstructuredSet, error) { - if inv == nil { - return nil, fmt.Errorf("inventoryInfo must be specified") - } - - var clusterInvObjects object.UnstructuredSet - var err error - switch inv.Strategy() { - case NameStrategy: - clusterInvObjects, err = cic.getClusterInventoryObjsByName(ctx, inv) - case LabelStrategy: - clusterInvObjects, err = cic.getClusterInventoryObjsByLabel(ctx, inv) - default: - panic(fmt.Errorf("unknown inventory strategy: %s", inv.Strategy())) - } - return clusterInvObjects, err +func (cic *UnstructuredClient) NewInventory(inv Info) (Inventory, error) { + soi := inv.(SingleObjectInfo) + obj := &unstructured.Unstructured{} + obj.SetName(soi.Name()) + obj.SetNamespace(soi.Namespace()) + obj.SetLabels(maps.Clone(soi.GetLabels())) + obj.SetAnnotations(maps.Clone(soi.GetAnnotations())) + return cic.fromUnstructured(obj) } -func (cic *ClusterClient) ListClusterInventoryObjs(ctx context.Context) (map[string]object.ObjMetadataSet, error) { - // Define the mapping - mapping, err := cic.mapper.RESTMapping(cic.gvk.GroupKind(), cic.gvk.Version) +// Get the in-cluster inventory +func (cic *UnstructuredClient) Get(ctx context.Context, invInfo Info, _ GetOptions) (Inventory, error) { + inv, ok := invInfo.(SingleObjectInfo) + if !ok { + return nil, fmt.Errorf("expected SingleObjectInfo") + } + obj, err := cic.client.Namespace(invInfo.Namespace()).Get(ctx, inv.Name(), metav1.GetOptions{}) if err != nil { return nil, err } + return cic.fromUnstructured(obj) +} - // retrieve the list from the cluster - clusterInvs, err := cic.dc.Resource(mapping.Resource).List(ctx, metav1.ListOptions{}) - if err != nil && !apierrors.IsNotFound(err) { +// List the in-cluster inventory +// Used by the CLI commands +func (cic *UnstructuredClient) List(ctx context.Context, _ ListOptions) ([]Inventory, error) { + objs, err := cic.client.List(ctx, metav1.ListOptions{}) + if err != nil { return nil, err } - if apierrors.IsNotFound(err) { - return map[string]object.ObjMetadataSet{}, nil - } - - identifiers := make(map[string]object.ObjMetadataSet) - - for i, inv := range clusterInvs.Items { - invName := inv.GetName() - identifiers[invName] = object.ObjMetadataSet{} - invObj, err := cic.InventoryFactoryFunc(&clusterInvs.Items[i]) - if err != nil { - return nil, err - } - wrappedInvObjSlice, err := invObj.Load() + var inventories []Inventory + for _, obj := range objs.Items { + uInv, err := cic.fromUnstructured(&obj) if err != nil { return nil, err } - identifiers[invName] = append(identifiers[invName], wrappedInvObjSlice...) + inventories = append(inventories, uInv) } - - return identifiers, nil + return inventories, nil } -// deleteInventoryObjByName deletes the passed inventory object from the APIServer, or -// an error if one occurs. -func (cic *ClusterClient) deleteInventoryObjByName(ctx context.Context, obj *unstructured.Unstructured, dryRun common.DryRunStrategy) error { - if obj == nil { - return fmt.Errorf("attempting delete a nil inventory object") - } - if dryRun.ClientOrServerDryRun() { - klog.V(4).Infof("dry-run delete inventory object: not deleted") +// CreateOrUpdate the in-cluster inventory +// Updates the unstructured object, or creates it if it doesn't exist +func (cic *UnstructuredClient) CreateOrUpdate(ctx context.Context, inv Inventory, _ UpdateOptions) error { + ui, ok := inv.(*UnstructuredInventory) + if !ok { + return fmt.Errorf("expected UnstructuredInventory") + } + if ui == nil { + return fmt.Errorf("inventory is nil") + } + // TODO: avoid deepcopy-ing the labels and annotations + invInfo := inv.Info().(SingleObjectInfo) + // Attempt to retry on a resource conflict error to avoid needing to retry the + // entire Apply/Destroy when there's a transient conflict. + return retry.RetryOnConflict(retry.DefaultRetry, func() error { + create := false + obj, err := cic.client.Namespace(invInfo.Namespace()).Get(ctx, invInfo.Name(), metav1.GetOptions{}) + if apierrors.IsNotFound(err) { + create = true + } else if err != nil { + return err + } + if obj != nil { // Obj is nil when IsNotFound, in this case keep initial/empty obj + ui.ClusterObj = obj + } + uObj, err := cic.toUnstructured(ui) + if err != nil { + return err + } + var newObj *unstructured.Unstructured + if create { + klog.V(4).Infof("creating inventory object %s/%s/%s", cic.gvk, uObj.GetNamespace(), uObj.GetName()) + newObj, err = cic.client.Namespace(uObj.GetNamespace()).Create(ctx, uObj, metav1.CreateOptions{}) + if err != nil { + return err + } + } else { + klog.V(4).Infof("updating inventory object %s/%s/%s", cic.gvk, uObj.GetNamespace(), uObj.GetName()) + newObj, err = cic.client.Namespace(uObj.GetNamespace()).Update(ctx, uObj, metav1.UpdateOptions{}) + if err != nil { + return err + } + } + ui.ClusterObj = newObj return nil - } - - mapping, err := cic.getMapping(obj) - if err != nil { - return err - } - - klog.V(4).Infof("deleting inventory object: %s/%s", obj.GetNamespace(), obj.GetName()) - return cic.dc.Resource(mapping.Resource).Namespace(obj.GetNamespace()). - Delete(ctx, obj.GetName(), metav1.DeleteOptions{}) + }) } -// getMapping returns the RESTMapping for the provided resource. -func (cic *ClusterClient) getMapping(obj *unstructured.Unstructured) (*meta.RESTMapping, error) { - return cic.mapper.RESTMapping(obj.GroupVersionKind().GroupKind(), obj.GroupVersionKind().Version) +// UpdateStatus updates the status of the in-cluster inventory +// Performs a simple in-place update on the unstructured object +func (cic *UnstructuredClient) UpdateStatus(ctx context.Context, inv Inventory, _ UpdateOptions) error { + ui, ok := inv.(*UnstructuredInventory) + if !ok { + return fmt.Errorf("expected UnstructuredInventory") + } + if ui == nil { + return fmt.Errorf("inventory is nil") + } + // TODO: avoid deepcopy-ing the labels and annotations + invInfo := inv.Info().(SingleObjectInfo) + // Attempt to retry on a resource conflict error to avoid needing to retry the + // entire Apply/Destroy when there's a transient conflict. + return retry.RetryOnConflict(retry.DefaultRetry, func() error { + obj, err := cic.client.Namespace(invInfo.Namespace()).Get(ctx, invInfo.Name(), metav1.GetOptions{}) + if err != nil { + return err + } + ui.ClusterObj = obj + uObj, err := cic.toUnstructured(ui) + if err != nil { + return err + } + // Update observedGeneration, if it exists + _, ok, err = unstructured.NestedInt64(uObj.Object, "status", "observedGeneration") + if err != nil { + return err + } + if ok { + err = unstructured.SetNestedField(uObj.Object, uObj.GetGeneration(), "status", "observedGeneration") + if err != nil { + return err + } + } + klog.V(4).Infof("updating status of inventory object %s/%s/%s", cic.gvk, uObj.GetNamespace(), uObj.GetName()) + newObj, err := cic.client.Namespace(uObj.GetNamespace()).UpdateStatus(ctx, uObj, metav1.UpdateOptions{}) + if err != nil { + return fmt.Errorf("updateStatus: %w", err) + } + ui.ClusterObj = newObj + return nil + }) } -// getObjStatus returns the list of object status -// at the beginning of an apply process. -func getObjStatus(pruneIDs, unionIDs []object.ObjMetadata) []actuation.ObjectStatus { - status := []actuation.ObjectStatus{} - for _, obj := range unionIDs { - status = append(status, - actuation.ObjectStatus{ - ObjectReference: ObjectReferenceFromObjMetadata(obj), - Strategy: actuation.ActuationStrategyApply, - Actuation: actuation.ActuationPending, - Reconcile: actuation.ReconcilePending, - }) +// Delete the in-cluster inventory +// Performs a simple deletion of the unstructured object +func (cic *UnstructuredClient) Delete(ctx context.Context, invInfo Info, _ DeleteOptions) error { + soi, ok := invInfo.(SingleObjectInfo) + if !ok { + return fmt.Errorf("expected SingleObjectInfo") } - for _, obj := range pruneIDs { - status = append(status, - actuation.ObjectStatus{ - ObjectReference: ObjectReferenceFromObjMetadata(obj), - Strategy: actuation.ActuationStrategyDelete, - Actuation: actuation.ActuationPending, - Reconcile: actuation.ReconcilePending, - }) + err := cic.client.Namespace(soi.Namespace()).Delete(ctx, soi.Name(), metav1.DeleteOptions{}) + if err != nil && !apierrors.IsNotFound(err) { + return fmt.Errorf("delete: %w", err) } - return status + return nil } diff --git a/pkg/inventory/inventory-client_test.go b/pkg/inventory/inventory-client_test.go index bdf150d6..74265120 100644 --- a/pkg/inventory/inventory-client_test.go +++ b/pkg/inventory/inventory-client_test.go @@ -4,18 +4,15 @@ package inventory import ( - "fmt" + "context" "testing" - "github.com/google/go-cmp/cmp" "github.com/stretchr/testify/require" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" "k8s.io/cli-runtime/pkg/resource" clienttesting "k8s.io/client-go/testing" cmdtesting "k8s.io/kubectl/pkg/cmd/testing" "sigs.k8s.io/cli-utils/pkg/apis/actuation" - "sigs.k8s.io/cli-utils/pkg/common" "sigs.k8s.io/cli-utils/pkg/object" ) @@ -28,19 +25,7 @@ func podStatus(info *resource.Info) actuation.ObjectStatus { } } -func podData(name string) map[string]string { - return map[string]string{ - fmt.Sprintf("test-inventory-namespace_%s__Pod", name): "{\"actuation\":\"Succeeded\",\"reconcile\":\"Succeeded\",\"strategy\":\"Apply\"}", - } -} - -func podDataNoStatus(name string) map[string]string { - return map[string]string{ - fmt.Sprintf("test-inventory-namespace_%s__Pod", name): "", - } -} - -func TestGetClusterInventoryInfo(t *testing.T) { +func TestGet(t *testing.T) { localInv, err := ConfigMapToInventoryInfo(inventoryObj) require.NoError(t, err) tests := map[string]struct { @@ -83,316 +68,159 @@ func TestGetClusterInventoryInfo(t *testing.T) { }, } - tf := cmdtesting.NewTestFactory().WithNamespace(testNamespace) - defer tf.Cleanup() - for name, tc := range tests { t.Run(name, func(t *testing.T) { - invClient, err := NewClient(tf, - ConfigMapToInventoryObj, InvInfoToConfigMap, tc.statusPolicy, ConfigMapGVK) + tf := cmdtesting.NewTestFactory().WithNamespace(testNamespace) + defer tf.Cleanup() + tf.FakeDynamicClient.PrependReactor("get", "configmaps", func(action clienttesting.Action) (handled bool, ret runtime.Object, err error) { + cm, _ := inventoryToConfigMap(&UnstructuredInventory{ + ClusterObj: copyInventoryInfo(), + BaseInventory: BaseInventory{ + Objs: tc.localObjs, + ObjStatuses: tc.objStatus, + }, + }) + return true, cm, nil + }) + invClient, err := NewUnstructuredClient(tf, + configMapToInventory, inventoryToConfigMap, ConfigMapGVK) require.NoError(t, err) - if tc.inv != nil { - wrapped, err := storeObjsInInventory(tc.inv, tc.localObjs, tc.objStatus) - require.NoError(t, err) - err = wrapped.Apply(t.Context(), invClient.dc, invClient.mapper, tc.statusPolicy) - require.NoError(t, err) - } - clusterInv, err := invClient.getClusterInventoryInfo(t.Context(), tc.inv) + clusterInv, err := invClient.Get(t.Context(), tc.inv, GetOptions{}) if tc.isError { require.Error(t, err) return } require.NoError(t, err) - require.NotNil(t, clusterInv) - wrapped, err := ConfigMapToInventoryObj(clusterInv) - require.NoError(t, err) - clusterObjs, err := wrapped.Load() - require.NoError(t, err) - if !tc.localObjs.Equal(clusterObjs) { - t.Fatalf("expected cluster objs (%v), got (%v)", tc.localObjs, clusterObjs) + if clusterInv != nil { + if !tc.localObjs.Equal(clusterInv.ObjectRefs()) { + t.Fatalf("expected cluster objs (%v), got (%v)", tc.localObjs, clusterInv.ObjectRefs()) + } } }) } } -func TestMerge(t *testing.T) { +func TestCreateOrUpdate(t *testing.T) { tests := map[string]struct { - statusPolicy StatusPolicy - localInv Info - localObjs object.ObjMetadataSet - clusterObjs object.ObjMetadataSet - pruneObjs object.ObjMetadataSet - isError bool + inventory *UnstructuredInventory + createObjs object.ObjMetadataSet + updateObjs object.ObjMetadataSet + isError bool }{ "Nil local inventory object is error": { - localInv: nil, - localObjs: object.ObjMetadataSet{}, - clusterObjs: object.ObjMetadataSet{}, - pruneObjs: object.ObjMetadataSet{}, - isError: true, - statusPolicy: StatusPolicyAll, - }, - "Cluster and local inventories empty: no prune objects; no change": { - localInv: copyInventory(t), - localObjs: object.ObjMetadataSet{}, - clusterObjs: object.ObjMetadataSet{}, - pruneObjs: object.ObjMetadataSet{}, - isError: false, - statusPolicy: StatusPolicyAll, + inventory: nil, + createObjs: object.ObjMetadataSet{}, + isError: true, }, - "Cluster and local inventories same: no prune objects; no change": { - localInv: copyInventory(t), - localObjs: object.ObjMetadataSet{ - ignoreErrInfoToObjMeta(pod1Info), + "Create and update inventory with empty object set": { + inventory: &UnstructuredInventory{ + ClusterObj: copyInventoryInfo(), }, - clusterObjs: object.ObjMetadataSet{ - ignoreErrInfoToObjMeta(pod1Info), - }, - pruneObjs: object.ObjMetadataSet{}, - isError: false, - statusPolicy: StatusPolicyAll, + createObjs: object.ObjMetadataSet{}, + updateObjs: object.ObjMetadataSet{}, + isError: false, }, - "Cluster two obj, local one: prune obj": { - localInv: copyInventory(t), - localObjs: object.ObjMetadataSet{ - ignoreErrInfoToObjMeta(pod1Info), + "Create and Update inventory with identical object set": { + inventory: &UnstructuredInventory{ + ClusterObj: copyInventoryInfo(), }, - clusterObjs: object.ObjMetadataSet{ + createObjs: object.ObjMetadataSet{ ignoreErrInfoToObjMeta(pod1Info), - ignoreErrInfoToObjMeta(pod3Info), }, - pruneObjs: object.ObjMetadataSet{ - ignoreErrInfoToObjMeta(pod3Info), - }, - statusPolicy: StatusPolicyAll, - isError: false, - }, - "Cluster multiple objs, local multiple different objs: prune objs": { - localInv: copyInventory(t), - localObjs: object.ObjMetadataSet{ - ignoreErrInfoToObjMeta(pod2Info), - }, - clusterObjs: object.ObjMetadataSet{ + updateObjs: object.ObjMetadataSet{ ignoreErrInfoToObjMeta(pod1Info), - ignoreErrInfoToObjMeta(pod2Info), - ignoreErrInfoToObjMeta(pod3Info)}, - pruneObjs: object.ObjMetadataSet{ - ignoreErrInfoToObjMeta(pod1Info), - ignoreErrInfoToObjMeta(pod3Info), }, - statusPolicy: StatusPolicyAll, - isError: false, - }, - } - - for name, tc := range tests { - for i := range common.Strategies { - drs := common.Strategies[i] - t.Run(name, func(t *testing.T) { - tf := cmdtesting.NewTestFactory().WithNamespace(testNamespace) - defer tf.Cleanup() - - tf.FakeDynamicClient.PrependReactor("list", "configmaps", toReactionFunc(tc.clusterObjs)) - // Create the local inventory object storing "tc.localObjs" - invClient, err := NewClient(tf, - ConfigMapToInventoryObj, InvInfoToConfigMap, tc.statusPolicy, ConfigMapGVK) - require.NoError(t, err) - - // Call "Merge" to create the union of clusterObjs and localObjs. - pruneObjs, err := invClient.Merge(t.Context(), tc.localInv, tc.localObjs, drs) - if tc.isError { - if err == nil { - t.Fatalf("expected error but received none") - } - return - } - if !tc.isError && err != nil { - t.Fatalf("unexpected error: %s", err) - } - if !tc.pruneObjs.Equal(pruneObjs) { - t.Errorf("expected (%v) prune objs; got (%v)", tc.pruneObjs, pruneObjs) - } - }) - } - } -} - -func TestReplace(t *testing.T) { - tests := map[string]struct { - statusPolicy StatusPolicy - localObjs object.ObjMetadataSet - clusterObjs object.ObjMetadataSet - objStatus []actuation.ObjectStatus - data map[string]string - }{ - "Cluster and local inventories empty": { - localObjs: object.ObjMetadataSet{}, - clusterObjs: object.ObjMetadataSet{}, - data: map[string]string{}, + isError: false, }, - "Cluster and local inventories same": { - localObjs: object.ObjMetadataSet{ - ignoreErrInfoToObjMeta(pod1Info), + "Create and Update inventory with expanding object set": { + inventory: &UnstructuredInventory{ + ClusterObj: copyInventoryInfo(), }, - clusterObjs: object.ObjMetadataSet{ + createObjs: object.ObjMetadataSet{ ignoreErrInfoToObjMeta(pod1Info), }, - objStatus: []actuation.ObjectStatus{podStatus(pod1Info)}, - data: podData("pod-1"), - statusPolicy: StatusPolicyAll, - }, - "Cluster two obj, local one": { - localObjs: object.ObjMetadataSet{ - ignoreErrInfoToObjMeta(pod1Info), - }, - clusterObjs: object.ObjMetadataSet{ + updateObjs: object.ObjMetadataSet{ ignoreErrInfoToObjMeta(pod1Info), ignoreErrInfoToObjMeta(pod3Info), }, - objStatus: []actuation.ObjectStatus{podStatus(pod1Info), podStatus(pod3Info)}, - data: podData("pod-1"), - statusPolicy: StatusPolicyAll, + isError: false, }, - "Cluster multiple objs, local multiple different objs": { - localObjs: object.ObjMetadataSet{ - ignoreErrInfoToObjMeta(pod2Info), + "Create and Update inventory with shrinking object set": { + inventory: &UnstructuredInventory{ + ClusterObj: copyInventoryInfo(), }, - clusterObjs: object.ObjMetadataSet{ + createObjs: object.ObjMetadataSet{ ignoreErrInfoToObjMeta(pod1Info), ignoreErrInfoToObjMeta(pod2Info), - ignoreErrInfoToObjMeta(pod3Info)}, - objStatus: []actuation.ObjectStatus{podStatus(pod2Info), podStatus(pod1Info), podStatus(pod3Info)}, - data: podData("pod-2"), - statusPolicy: StatusPolicyAll, - }, - "Cluster multiple objs, local multiple different objs with StatusPolicyNone": { - localObjs: object.ObjMetadataSet{ - ignoreErrInfoToObjMeta(pod2Info), + ignoreErrInfoToObjMeta(pod3Info), }, - clusterObjs: object.ObjMetadataSet{ + updateObjs: object.ObjMetadataSet{ ignoreErrInfoToObjMeta(pod1Info), - ignoreErrInfoToObjMeta(pod2Info), - ignoreErrInfoToObjMeta(pod3Info)}, - objStatus: []actuation.ObjectStatus{podStatus(pod2Info), podStatus(pod1Info), podStatus(pod3Info)}, - data: podDataNoStatus("pod-2"), - statusPolicy: StatusPolicyNone, + ignoreErrInfoToObjMeta(pod3Info), + }, + isError: false, }, } - tf := cmdtesting.NewTestFactory().WithNamespace(testNamespace) - defer tf.Cleanup() - - // Client and server dry-run do not throw errors. - invClient, err := NewClient(tf, - ConfigMapToInventoryObj, InvInfoToConfigMap, StatusPolicyAll, ConfigMapGVK) - require.NoError(t, err) - err = invClient.Replace(t.Context(), copyInventory(t), object.ObjMetadataSet{}, nil, common.DryRunClient) - if err != nil { - t.Fatalf("unexpected error received: %s", err) - } - err = invClient.Replace(t.Context(), copyInventory(t), object.ObjMetadataSet{}, nil, common.DryRunServer) - if err != nil { - t.Fatalf("unexpected error received: %s", err) - } - for name, tc := range tests { t.Run(name, func(t *testing.T) { - // Create inventory client, and store the cluster objs in the inventory object. - invClient, err := NewClient(tf, - ConfigMapToInventoryObj, InvInfoToConfigMap, tc.statusPolicy, ConfigMapGVK) - require.NoError(t, err) - wrappedInv, err := invClient.InventoryFactoryFunc(inventoryObj) + tf := cmdtesting.NewTestFactory().WithNamespace(testNamespace) + defer tf.Cleanup() + + var updateCalls int + var createCalls int + tf.FakeDynamicClient.PrependReactor("update", "configmaps", func(action clienttesting.Action) (handled bool, ret runtime.Object, err error) { + updateCalls++ + return false, nil, nil + }) + tf.FakeDynamicClient.PrependReactor("create", "configmaps", func(action clienttesting.Action) (handled bool, ret runtime.Object, err error) { + createCalls++ + return false, nil, nil + }) + + // Create the local inventory object storing "tc.localObjs" + invClient, err := NewUnstructuredClient(tf, + configMapToInventory, inventoryToConfigMap, ConfigMapGVK) require.NoError(t, err) - if err := wrappedInv.Store(tc.clusterObjs, tc.objStatus); err != nil { - t.Fatalf("unexpected error storing inventory objects: %s", err) - } - inv, err := wrappedInv.GetObject() - if err != nil { - t.Fatalf("unexpected error storing inventory objects: %s", err) + + inventory := tc.inventory + if inventory != nil { + inventory.SetObjectRefs(tc.createObjs) } - // Call replaceInventory with the new set of "localObjs" - inv, _, err = invClient.replaceInventory(inv, tc.localObjs, tc.objStatus) - if err != nil { - t.Fatalf("unexpected error received: %s", err) + // Call Update an initial time should create the object + err = invClient.CreateOrUpdate(context.TODO(), inventory, UpdateOptions{}) + if tc.isError { + require.Error(t, err) + return } - wrappedInv, err = invClient.InventoryFactoryFunc(inv) require.NoError(t, err) - // Validate that the stored objects are now the "localObjs". - actualObjs, err := wrappedInv.Load() - if err != nil { - t.Fatalf("unexpected error received: %s", err) - } - if !tc.localObjs.Equal(actualObjs) { - t.Errorf("expected objects (%s), got (%s)", tc.localObjs, actualObjs) - } - data, _, err := unstructured.NestedStringMap(inv.Object, "data") - if err != nil { - t.Fatalf("unexpected error received: %s", err) + if updateCalls != 0 || createCalls != 1 { // Update should fail, causing create + t.Fatalf("expected 0 update but got %d and 1 create but got %d", updateCalls, createCalls) } - if diff := cmp.Diff(data, tc.data); diff != "" { - t.Fatal(diff) + inv, err := invClient.Get(context.TODO(), tc.inventory, GetOptions{}) + require.NoError(t, err) + if !tc.createObjs.Equal(inv.ObjectRefs()) { + t.Fatalf("expected %v to equal %v", tc.createObjs, inv.ObjectRefs()) } - }) - } -} - -func TestGetClusterObjs(t *testing.T) { - tests := map[string]struct { - statusPolicy StatusPolicy - localInv Info - clusterObjs object.ObjMetadataSet - isError bool - }{ - "Nil cluster inventory is error": { - localInv: nil, - clusterObjs: object.ObjMetadataSet{}, - isError: true, - }, - "No cluster objs": { - localInv: copyInventory(t), - clusterObjs: object.ObjMetadataSet{}, - isError: false, - }, - "Single cluster obj": { - localInv: copyInventory(t), - clusterObjs: object.ObjMetadataSet{ignoreErrInfoToObjMeta(pod1Info)}, - isError: false, - }, - "Multiple cluster objs": { - localInv: copyInventory(t), - clusterObjs: object.ObjMetadataSet{ignoreErrInfoToObjMeta(pod1Info), ignoreErrInfoToObjMeta(pod3Info)}, - isError: false, - }, - } - for name, tc := range tests { - t.Run(name, func(t *testing.T) { - tf := cmdtesting.NewTestFactory().WithNamespace(testNamespace) - defer tf.Cleanup() - tf.FakeDynamicClient.PrependReactor("list", "configmaps", toReactionFunc(tc.clusterObjs)) - - invClient, err := NewClient(tf, - ConfigMapToInventoryObj, InvInfoToConfigMap, tc.statusPolicy, ConfigMapGVK) + inventory.SetObjectRefs(tc.updateObjs) + // Call Update a second time should update the existing object + err = invClient.CreateOrUpdate(context.TODO(), inventory, UpdateOptions{}) require.NoError(t, err) - clusterObjs, err := invClient.GetClusterObjs(t.Context(), tc.localInv) - if tc.isError { - if err == nil { - t.Fatalf("expected error but received none") - } - return - } - if !tc.isError && err != nil { - t.Fatalf("unexpected error received: %s", err) + if updateCalls != 1 || createCalls != 1 { // Update should succeed, create not called again + t.Fatalf("expected 1 update but got %d and 1 create but got %d", updateCalls, createCalls) } - if !tc.clusterObjs.Equal(clusterObjs) { - t.Errorf("expected (%v) cluster inventory objs; got (%v)", tc.clusterObjs, clusterObjs) + inv, err = invClient.Get(context.TODO(), tc.inventory, GetOptions{}) + require.NoError(t, err) + if !tc.updateObjs.Equal(inv.ObjectRefs()) { + t.Fatalf("expected %v to equal %v", tc.updateObjs, inv.ObjectRefs()) } }) } } -func TestDeleteInventoryObj(t *testing.T) { +func TestDelete(t *testing.T) { localInv, err := ConfigMapToInventoryInfo(inventoryObj) require.NoError(t, err) tests := map[string]struct { @@ -400,12 +228,12 @@ func TestDeleteInventoryObj(t *testing.T) { inv Info localObjs object.ObjMetadataSet objStatus []actuation.ObjectStatus - wantError bool + wantErr bool }{ "Nil local inventory object is an error": { inv: nil, localObjs: object.ObjMetadataSet{}, - wantError: true, + wantErr: true, }, "Empty local inventory object": { inv: localInv, @@ -433,32 +261,20 @@ func TestDeleteInventoryObj(t *testing.T) { } for name, tc := range tests { - for i := range common.Strategies { - drs := common.Strategies[i] - t.Run(name, func(t *testing.T) { - tf := cmdtesting.NewTestFactory().WithNamespace(testNamespace) - defer tf.Cleanup() + t.Run(name, func(t *testing.T) { + tf := cmdtesting.NewTestFactory().WithNamespace(testNamespace) + defer tf.Cleanup() - invClient, err := NewClient(tf, - ConfigMapToInventoryObj, InvInfoToConfigMap, tc.statusPolicy, ConfigMapGVK) + invClient, err := NewUnstructuredClient(tf, + configMapToInventory, inventoryToConfigMap, ConfigMapGVK) + require.NoError(t, err) + err = invClient.Delete(context.TODO(), tc.inv, DeleteOptions{}) + if tc.wantErr { + require.Error(t, err) + } else { require.NoError(t, err) - var obj *unstructured.Unstructured - if tc.inv != nil { - wrapped, err := storeObjsInInventory(tc.inv, tc.localObjs, tc.objStatus) - require.NoError(t, err) - err = wrapped.Apply(t.Context(), invClient.dc, invClient.mapper, tc.statusPolicy) - require.NoError(t, err) - obj, err = wrapped.GetObject() - require.NoError(t, err) - } - err = invClient.deleteInventoryObjByName(t.Context(), obj, drs) - if tc.wantError { - require.Error(t, err) - } else { - require.NoError(t, err) - } - }) - } + } + }) } } @@ -466,32 +282,3 @@ func ignoreErrInfoToObjMeta(info *resource.Info) object.ObjMetadata { objMeta, _ := object.InfoToObjMeta(info) return objMeta } - -func toReactionFunc(objs object.ObjMetadataSet) clienttesting.ReactionFunc { - return func(action clienttesting.Action) (bool, runtime.Object, error) { - u := copyInventoryInfo() - err := unstructured.SetNestedStringMap(u.Object, objs.ToStringMap(), "data") - if err != nil { - return true, nil, err - } - list := &unstructured.UnstructuredList{} - list.Items = []unstructured.Unstructured{*u} - return true, list, err - } -} - -func storeObjsInInventory(info Info, objs object.ObjMetadataSet, status []actuation.ObjectStatus) (Storage, error) { - cm, err := InvInfoToConfigMap(info) - if err != nil { - return nil, err - } - wrapped, err := ConfigMapToInventoryObj(cm) - if err != nil { - return nil, err - } - err = wrapped.Store(objs, status) - if err != nil { - return nil, err - } - return wrapped, err -} diff --git a/pkg/inventory/inventory-info.go b/pkg/inventory/inventory-info.go deleted file mode 100644 index 085becc3..00000000 --- a/pkg/inventory/inventory-info.go +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright 2020 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -package inventory - -type Strategy string - -const ( - NameStrategy Strategy = "name" - LabelStrategy Strategy = "label" -) - -// Info provides the minimal information for the applier -// to create, look up and update an inventory. -// The inventory object can be any type, the Provider in the applier -// needs to know how to create, look up and update it based -// on the Info. -type Info interface { - // Namespace of the inventory object. - // It should be the value of the field .metadata.namespace. - Namespace() string - - // Name of the inventory object. - // It should be the value of the field .metadata.name. - Name() string - - // ID of the inventory object. It is optional. - // The Provider contained in the applier should know - // if the Id is necessary and how to use it for pruning objects. - ID() string - - Strategy() Strategy -} diff --git a/pkg/inventory/inventory_test.go b/pkg/inventory/inventory_test.go index b30776a3..6fa8f3d9 100644 --- a/pkg/inventory/inventory_test.go +++ b/pkg/inventory/inventory_test.go @@ -7,7 +7,6 @@ import ( "regexp" "testing" - "github.com/stretchr/testify/require" "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/cli-runtime/pkg/resource" @@ -414,10 +413,3 @@ func TestLegacyInventoryName(t *testing.T) { func copyInventoryInfo() *unstructured.Unstructured { return inventoryObj.DeepCopy() } - -func copyInventory(t *testing.T) Info { - u := inventoryObj.DeepCopy() - invInfoObj, err := ConfigMapToInventoryInfo(u) - require.NoError(t, err) - return invInfoObj -} diff --git a/pkg/inventory/inventorycm.go b/pkg/inventory/inventorycm.go index 74b70762..d5c9ce9e 100644 --- a/pkg/inventory/inventorycm.go +++ b/pkg/inventory/inventorycm.go @@ -9,19 +9,12 @@ package inventory import ( - "context" "encoding/json" "fmt" - apierrors "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/api/meta" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/client-go/dynamic" - "k8s.io/klog/v2" "sigs.k8s.io/cli-utils/pkg/apis/actuation" - "sigs.k8s.io/cli-utils/pkg/common" "sigs.k8s.io/cli-utils/pkg/object" ) @@ -34,182 +27,69 @@ var ConfigMapGVK = schema.GroupVersionKind{ // ConfigMapToInventoryObj takes a passed ConfigMap (as a resource.Info), // wraps it with the ConfigMap and upcasts the wrapper as // an the Inventory interface. -func ConfigMapToInventoryObj(inv *unstructured.Unstructured) (Storage, error) { - return &ConfigMap{inv: inv}, nil +func ConfigMapToInventoryObj(inv *unstructured.Unstructured) (Inventory, error) { + return configMapToInventory(inv) } // ConfigMapToInventoryInfo takes a passed ConfigMap (as a resource.Info), // wraps it with the ConfigMap and upcasts the wrapper as // an the Info interface. func ConfigMapToInventoryInfo(inv *unstructured.Unstructured) (Info, error) { - return &ConfigMap{inv: inv}, nil + return configMapToInventory(inv) } -func InvInfoToConfigMap(inv Info) (*unstructured.Unstructured, error) { - icm, ok := inv.(*ConfigMap) - if ok { - return icm.inv, nil +// buildDataMap converts the inventory to the storage format to be used in a ConfigMap +func buildDataMap(objMetas object.ObjMetadataSet, objStatus []actuation.ObjectStatus) map[string]string { + objMap := map[string]string{} + objStatusMap := map[object.ObjMetadata]actuation.ObjectStatus{} + for _, status := range objStatus { + objStatusMap[ObjMetadataFromObjectReference(status.ObjectReference)] = status } - return nil, fmt.Errorf("failed to convert to ConfigMap") -} - -// ConfigMap wraps a ConfigMap resource and implements -// the Inventory interface. This wrapper loads and stores the -// object metadata (inventory) to and from the wrapped ConfigMap. -type ConfigMap struct { - inv *unstructured.Unstructured - objMetas object.ObjMetadataSet - objStatus []actuation.ObjectStatus -} - -var _ Info = &ConfigMap{} -var _ Storage = &ConfigMap{} - -func (icm *ConfigMap) Name() string { - return icm.inv.GetName() -} - -func (icm *ConfigMap) Namespace() string { - return icm.inv.GetNamespace() -} - -func (icm *ConfigMap) ID() string { - // Empty string if not set. - return icm.inv.GetLabels()[common.InventoryLabel] -} - -func (icm *ConfigMap) Strategy() Strategy { - return LabelStrategy + for _, objMetadata := range objMetas { + if status, found := objStatusMap[objMetadata]; found { + objMap[objMetadata.String()] = stringFrom(status) + } else { + // It's possible that the passed in status doesn't any object status + objMap[objMetadata.String()] = "" + } + } + return objMap } -func (icm *ConfigMap) UnstructuredInventory() *unstructured.Unstructured { - return icm.inv -} +var _ ToUnstructuredFunc = inventoryToConfigMap +var _ FromUnstructuredFunc = configMapToInventory -// Load is an Inventory interface function returning the set of -// object metadata from the wrapped ConfigMap, or an error. -func (icm *ConfigMap) Load() (object.ObjMetadataSet, error) { - objs := object.ObjMetadataSet{} - objMap, exists, err := unstructured.NestedStringMap(icm.inv.Object, "data") +func configMapToInventory(configMap *unstructured.Unstructured) (*UnstructuredInventory, error) { + inv := &UnstructuredInventory{ + ClusterObj: configMap, + } + objMap, exists, err := unstructured.NestedStringMap(configMap.Object, "data") if err != nil { err := fmt.Errorf("error retrieving object metadata from inventory object") - return objs, err + return nil, err } if exists { for objStr := range objMap { obj, err := object.ParseObjMetadata(objStr) if err != nil { - return objs, err + return nil, err } - objs = append(objs, obj) + inv.Objs = append(inv.Objs, obj) } } - return objs, nil + return inv, nil } -// Store is an Inventory interface function implemented to store -// the object metadata in the wrapped ConfigMap. Actual storing -// happens in "GetObject". -func (icm *ConfigMap) Store(objMetas object.ObjMetadataSet, status []actuation.ObjectStatus) error { - icm.objMetas = objMetas - icm.objStatus = status - return nil -} - -// GetObject returns the wrapped object (ConfigMap) as a resource.Info -// or an error if one occurs. -func (icm *ConfigMap) GetObject() (*unstructured.Unstructured, error) { - // Create the objMap of all the resources, and compute the hash. - objMap := buildObjMap(icm.objMetas, icm.objStatus) - // Create the inventory object by copying the template. - invCopy := icm.inv.DeepCopy() +func inventoryToConfigMap(inv *UnstructuredInventory) (*unstructured.Unstructured, error) { + newConfigMap := inv.ClusterObj.DeepCopy() + dataMap := buildDataMap(inv.ObjectRefs(), inv.ObjectStatuses()) // Adds the inventory map to the ConfigMap "data" section. - err := unstructured.SetNestedStringMap(invCopy.UnstructuredContent(), - objMap, "data") + err := unstructured.SetNestedStringMap(newConfigMap.UnstructuredContent(), + dataMap, "data") if err != nil { return nil, err } - return invCopy, nil -} - -// Apply is an Storage interface function implemented to apply the inventory -// object. StatusPolicy is not needed since ConfigMaps do not have a status subresource. -func (icm *ConfigMap) Apply(ctx context.Context, dc dynamic.Interface, mapper meta.RESTMapper, _ StatusPolicy) error { - invInfo, namespacedClient, err := icm.getNamespacedClient(dc, mapper) - if err != nil { - return err - } - - // Get cluster object, if exsists. - clusterObj, err := namespacedClient.Get(ctx, invInfo.GetName(), metav1.GetOptions{}) - if err != nil && !apierrors.IsNotFound(err) { - return err - } - - // Create cluster inventory object, if it does not exist on cluster. - if clusterObj == nil { - klog.V(4).Infof("creating inventory object: %s/%s", invInfo.GetNamespace(), invInfo.GetName()) - _, err = namespacedClient.Create(ctx, invInfo, metav1.CreateOptions{}) - return err - } - - // Update the cluster inventory object instead. - klog.V(4).Infof("updating inventory object: %s/%s", invInfo.GetNamespace(), invInfo.GetName()) - _, err = namespacedClient.Update(ctx, invInfo, metav1.UpdateOptions{}) - return err -} - -// ApplyWithPrune is a Storage interface function implemented to apply the inventory object with a list of objects -// to be pruned. StatusPolicy is not needed since ConfigMaps do not have a status subresource. -func (icm *ConfigMap) ApplyWithPrune(ctx context.Context, dc dynamic.Interface, mapper meta.RESTMapper, _ StatusPolicy, _ object.ObjMetadataSet) error { - invInfo, namespacedClient, err := icm.getNamespacedClient(dc, mapper) - if err != nil { - return err - } - - // Update the cluster inventory object. - klog.V(4).Infof("updating inventory object: %s/%s", invInfo.GetNamespace(), invInfo.GetName()) - _, err = namespacedClient.Update(ctx, invInfo, metav1.UpdateOptions{}) - return err -} - -// getNamespacedClient is a helper function for Apply and ApplyWithPrune that creates a namespaced client for interacting with the live -// cluster, as well as returning the ConfigMap object as a wrapped resource.Info object. -func (icm *ConfigMap) getNamespacedClient(dc dynamic.Interface, mapper meta.RESTMapper) (*unstructured.Unstructured, dynamic.ResourceInterface, error) { - invInfo, err := icm.GetObject() - if err != nil { - return nil, nil, err - } - if invInfo == nil { - return nil, nil, fmt.Errorf("attempting to create a nil inventory object") - } - - mapping, err := mapper.RESTMapping(invInfo.GroupVersionKind().GroupKind(), invInfo.GroupVersionKind().Version) - if err != nil { - return nil, nil, err - } - - // Create client to interact with cluster. - namespacedClient := dc.Resource(mapping.Resource).Namespace(invInfo.GetNamespace()) - - return invInfo, namespacedClient, nil -} - -func buildObjMap(objMetas object.ObjMetadataSet, objStatus []actuation.ObjectStatus) map[string]string { - objMap := map[string]string{} - objStatusMap := map[object.ObjMetadata]actuation.ObjectStatus{} - for _, status := range objStatus { - objStatusMap[ObjMetadataFromObjectReference(status.ObjectReference)] = status - } - for _, objMetadata := range objMetas { - if status, found := objStatusMap[objMetadata]; found { - objMap[objMetadata.String()] = stringFrom(status) - } else { - // It's possible that the passed in status doesn't any object status - objMap[objMetadata.String()] = "" - } - } - return objMap + return newConfigMap, err } func stringFrom(status actuation.ObjectStatus) string { diff --git a/pkg/inventory/inventorycm_test.go b/pkg/inventory/inventorycm_test.go index 70222042..46196a77 100644 --- a/pkg/inventory/inventorycm_test.go +++ b/pkg/inventory/inventorycm_test.go @@ -63,7 +63,7 @@ func TestBuildObjMap(t *testing.T) { } for name, tc := range tests { t.Run(name, func(t *testing.T) { - actual := buildObjMap(tc.objSet, tc.objStatus) + actual := buildDataMap(tc.objSet, tc.objStatus) if diff := cmp.Diff(actual, tc.expected); diff != "" { t.Error(diff) } diff --git a/pkg/inventory/manager.go b/pkg/inventory/manager.go index e9f18563..c9821b27 100644 --- a/pkg/inventory/manager.go +++ b/pkg/inventory/manager.go @@ -14,18 +14,18 @@ import ( // Manager wraps an Inventory with convenience methods that use ObjMetadata. type Manager struct { - inventory *actuation.Inventory + inventory BaseInventory } // NewManager returns a new manager instance. func NewManager() *Manager { return &Manager{ - inventory: &actuation.Inventory{}, + inventory: BaseInventory{}, } } // Inventory returns the in-memory version of the managed inventory. -func (tc *Manager) Inventory() *actuation.Inventory { +func (tc *Manager) Inventory() BaseInventory { return tc.inventory } @@ -33,9 +33,9 @@ func (tc *Manager) Inventory() *actuation.Inventory { // The returned status is a pointer and can be updated in-place for efficiency. func (tc *Manager) ObjectStatus(id object.ObjMetadata) (*actuation.ObjectStatus, bool) { ref := ObjectReferenceFromObjMetadata(id) - for i, objStatus := range tc.inventory.Status.Objects { + for i, objStatus := range tc.inventory.ObjStatuses { if objStatus.ObjectReference == ref { - return &(tc.inventory.Status.Objects[i]), true + return &(tc.inventory.ObjStatuses[i]), true } } return nil, false @@ -45,7 +45,7 @@ func (tc *Manager) ObjectStatus(id object.ObjMetadata) (*actuation.ObjectStatus, // specified actuation strategy and status. func (tc *Manager) ObjectsWithActuationStatus(strategy actuation.ActuationStrategy, status actuation.ActuationStatus) object.ObjMetadataSet { var ids object.ObjMetadataSet - for _, objStatus := range tc.inventory.Status.Objects { + for _, objStatus := range tc.inventory.ObjStatuses { if objStatus.Strategy == strategy && objStatus.Actuation == status { ids = append(ids, ObjMetadataFromObjectReference(objStatus.ObjectReference)) } @@ -57,7 +57,7 @@ func (tc *Manager) ObjectsWithActuationStatus(strategy actuation.ActuationStrate // specified reconcile status, regardless of actuation strategy. func (tc *Manager) ObjectsWithReconcileStatus(status actuation.ReconcileStatus) object.ObjMetadataSet { var ids object.ObjMetadataSet - for _, objStatus := range tc.inventory.Status.Objects { + for _, objStatus := range tc.inventory.ObjStatuses { if objStatus.Reconcile == status { ids = append(ids, ObjMetadataFromObjectReference(objStatus.ObjectReference)) } @@ -67,13 +67,13 @@ func (tc *Manager) ObjectsWithReconcileStatus(status actuation.ReconcileStatus) // SetObjectStatus updates or adds an ObjectStatus record to the inventory. func (tc *Manager) SetObjectStatus(newObjStatus actuation.ObjectStatus) { - for i, oldObjStatus := range tc.inventory.Status.Objects { + for i, oldObjStatus := range tc.inventory.ObjStatuses { if oldObjStatus.ObjectReference == newObjStatus.ObjectReference { - tc.inventory.Status.Objects[i] = newObjStatus + tc.inventory.ObjStatuses[i] = newObjStatus return } } - tc.inventory.Status.Objects = append(tc.inventory.Status.Objects, newObjStatus) + tc.inventory.ObjStatuses = append(tc.inventory.ObjStatuses, newObjStatus) } // IsSuccessfulApply returns true if the object apply was successful @@ -119,7 +119,7 @@ func (tc *Manager) AppliedResourceUID(id object.ObjMetadata) (types.UID, bool) { // successfully applied resources. func (tc *Manager) AppliedResourceUIDs() sets.String { // nolint:staticcheck uids := sets.NewString() - for _, objStatus := range tc.inventory.Status.Objects { + for _, objStatus := range tc.inventory.ObjStatuses { if objStatus.Strategy == actuation.ActuationStrategyApply && objStatus.Actuation == actuation.ActuationSucceeded { if objStatus.UID != "" { diff --git a/pkg/inventory/policy.go b/pkg/inventory/policy.go index 0f906a56..f1772eb7 100644 --- a/pkg/inventory/policy.go +++ b/pkg/inventory/policy.go @@ -89,7 +89,7 @@ func IDMatch(inv Info, obj *unstructured.Unstructured) IDMatchStatus { if !found { return Empty } - if value == inv.ID() { + if value == inv.ID().String() { return Match } return NoMatch @@ -141,11 +141,11 @@ func CanPrune(inv Info, obj *unstructured.Unstructured, policy Policy) (bool, er } } -func AddInventoryIDAnnotation(obj *unstructured.Unstructured, inv Info) { +func AddInventoryIDAnnotation(obj *unstructured.Unstructured, inventoryID ID) { annotations := obj.GetAnnotations() if annotations == nil { annotations = make(map[string]string) } - annotations[OwningInventoryKey] = inv.ID() + annotations[OwningInventoryKey] = inventoryID.String() obj.SetAnnotations(annotations) } diff --git a/pkg/inventory/policy_test.go b/pkg/inventory/policy_test.go index 72196d99..6a50072e 100644 --- a/pkg/inventory/policy_test.go +++ b/pkg/inventory/policy_test.go @@ -24,12 +24,12 @@ func (i *fakeInventoryInfo) Namespace() string { return "" } -func (i *fakeInventoryInfo) ID() string { - return i.id +func (i *fakeInventoryInfo) ID() ID { + return ID(i.id) } -func (i *fakeInventoryInfo) Strategy() Strategy { - return NameStrategy +func (i *fakeInventoryInfo) NewEmptyInventory() Inventory { + panic("unimplemented") } func testObjectWithAnnotation(key, val string) *unstructured.Unstructured { diff --git a/pkg/inventory/storage.go b/pkg/inventory/storage.go index ebfcf7ca..736d6b32 100644 --- a/pkg/inventory/storage.go +++ b/pkg/inventory/storage.go @@ -12,15 +12,11 @@ package inventory import ( - "context" "fmt" "strings" - "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/client-go/dynamic" "k8s.io/klog/v2" - "sigs.k8s.io/cli-utils/pkg/apis/actuation" "sigs.k8s.io/cli-utils/pkg/common" "sigs.k8s.io/cli-utils/pkg/object" ) @@ -28,35 +24,6 @@ import ( // The default inventory name stored in the inventory template. const legacyInvName = "inventory" -// Storage describes methods necessary for an object which -// can persist the object metadata for pruning and other group -// operations. -type Storage interface { - // Load retrieves the set of object metadata from the inventory object - Load() (object.ObjMetadataSet, error) - // Store the set of object metadata in the inventory object. This will - // replace the metadata, spec and status. - Store(objs object.ObjMetadataSet, status []actuation.ObjectStatus) error - // GetObject returns the object that stores the inventory - GetObject() (*unstructured.Unstructured, error) - // Apply applies the inventory object. This utility function is used - // in InventoryClient.Merge and merges the metadata, spec and status. - Apply(context.Context, dynamic.Interface, meta.RESTMapper, StatusPolicy) error - // ApplyWithPrune applies the inventory object with a set of pruneIDs of - // objects to be pruned (object.ObjMetadataSet). This function is used in - // InventoryClient.Replace. pruneIDs are required for enabling custom logic - // handling of multiple ResourceGroup inventories. - ApplyWithPrune(context.Context, dynamic.Interface, meta.RESTMapper, StatusPolicy, object.ObjMetadataSet) error -} - -// StorageFactoryFunc creates the object which implements the Inventory -// interface from the passed info object. -type StorageFactoryFunc func(*unstructured.Unstructured) (Storage, error) - -// ToUnstructuredFunc returns the unstructured object for the -// given Info. -type ToUnstructuredFunc func(Info) (*unstructured.Unstructured, error) - // FindInventoryObj returns the "Inventory" object (ConfigMap with // inventory label) if it exists, or nil if it does not exist. func FindInventoryObj(objs object.UnstructuredSet) *unstructured.Unstructured { diff --git a/test/e2e/customprovider/provider.go b/test/e2e/customprovider/provider.go index 80395386..c48387b4 100644 --- a/test/e2e/customprovider/provider.go +++ b/test/e2e/customprovider/provider.go @@ -4,19 +4,11 @@ package customprovider import ( - "context" - "fmt" "strings" - apierrors "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/api/meta" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/client-go/dynamic" "k8s.io/kubectl/pkg/cmd/util" - "sigs.k8s.io/cli-utils/pkg/apis/actuation" - "sigs.k8s.io/cli-utils/pkg/common" "sigs.k8s.io/cli-utils/pkg/inventory" "sigs.k8s.io/cli-utils/pkg/object" ) @@ -112,86 +104,12 @@ type CustomClientFactory struct { } func (CustomClientFactory) NewClient(factory util.Factory) (inventory.Client, error) { - return inventory.NewClient(factory, - WrapInventoryObj, invToUnstructuredFunc, inventory.StatusPolicyAll, inventory.ConfigMapGVK) + return inventory.NewUnstructuredClient(factory, fromUnstructured, toUnstructured, InventoryGVK) } -func invToUnstructuredFunc(inv inventory.Info) (*unstructured.Unstructured, error) { - switch invInfo := inv.(type) { - case *InventoryCustomType: - return invInfo.inv, nil - default: - return nil, fmt.Errorf("wrong type: want %T but got %T", InventoryCustomType{}, inv) - } -} - -func WrapInventoryObj(obj *unstructured.Unstructured) (inventory.Storage, error) { - return &InventoryCustomType{inv: obj}, nil -} - -func WrapInventoryInfoObj(obj *unstructured.Unstructured) inventory.Info { - return &InventoryCustomType{inv: obj} -} - -var _ inventory.Storage = &InventoryCustomType{} -var _ inventory.Info = &InventoryCustomType{} - -type InventoryCustomType struct { - inv *unstructured.Unstructured -} - -func (i InventoryCustomType) Namespace() string { - return i.inv.GetNamespace() -} - -func (i InventoryCustomType) Name() string { - return i.inv.GetName() -} - -func (i InventoryCustomType) Strategy() inventory.Strategy { - return inventory.NameStrategy -} - -func (i InventoryCustomType) ID() string { - labels := i.inv.GetLabels() - id, found := labels[common.InventoryLabel] - if !found { - return "" - } - return id -} - -func (i InventoryCustomType) Load() (object.ObjMetadataSet, error) { - var inv object.ObjMetadataSet - s, found, err := unstructured.NestedSlice(i.inv.Object, "spec", "objects") - if err != nil { - return inv, err - } - if !found { - return inv, nil - } - for _, item := range s { - m := item.(map[string]interface{}) - namespace, _, _ := unstructured.NestedString(m, "namespace") - name, _, _ := unstructured.NestedString(m, "name") - group, _, _ := unstructured.NestedString(m, "group") - kind, _, _ := unstructured.NestedString(m, "kind") - id := object.ObjMetadata{ - Namespace: namespace, - Name: name, - GroupKind: schema.GroupKind{ - Group: group, - Kind: kind, - }, - } - inv = append(inv, id) - } - return inv, nil -} - -func (i InventoryCustomType) Store(objs object.ObjMetadataSet, status []actuation.ObjectStatus) error { +func toUnstructured(inv *inventory.UnstructuredInventory) (*unstructured.Unstructured, error) { var specObjs []interface{} - for _, obj := range objs { + for _, obj := range inv.Objs { specObjs = append(specObjs, map[string]interface{}{ "group": obj.GroupKind.Group, "kind": obj.GroupKind.Kind, @@ -200,7 +118,7 @@ func (i InventoryCustomType) Store(objs object.ObjMetadataSet, status []actuatio }) } var statusObjs []interface{} - for _, objStatus := range status { + for _, objStatus := range inv.ObjStatuses { statusObjs = append(statusObjs, map[string]interface{}{ "group": objStatus.Group, "kind": objStatus.Kind, @@ -211,96 +129,60 @@ func (i InventoryCustomType) Store(objs object.ObjMetadataSet, status []actuatio "reconcile": objStatus.Reconcile.String(), }) } + objCopy := inv.ClusterObj.DeepCopy() if len(specObjs) > 0 { - err := unstructured.SetNestedSlice(i.inv.Object, specObjs, "spec", "objects") + err := unstructured.SetNestedSlice(objCopy.Object, specObjs, "spec", "objects") if err != nil { - return err + return nil, err } } else { - unstructured.RemoveNestedField(i.inv.Object, "spec") + unstructured.RemoveNestedField(objCopy.Object, "spec") } if len(statusObjs) > 0 { - err := unstructured.SetNestedSlice(i.inv.Object, statusObjs, "status", "objects") + err := unstructured.SetNestedSlice(objCopy.Object, statusObjs, "status", "objects") if err != nil { - return err + return nil, err } } else { - unstructured.RemoveNestedField(i.inv.Object, "status") + unstructured.RemoveNestedField(objCopy.Object, "status") } - return nil -} - -func (i InventoryCustomType) GetObject() (*unstructured.Unstructured, error) { - return i.inv, nil + return objCopy, nil } -// Apply is an Inventory interface function implemented to apply the inventory -// object. -func (i InventoryCustomType) Apply(ctx context.Context, dc dynamic.Interface, mapper meta.RESTMapper, _ inventory.StatusPolicy) error { - invInfo, namespacedClient, err := i.getNamespacedClient(dc, mapper) - if err != nil { - return err - } - - // Get cluster object, if exsists. - clusterObj, err := namespacedClient.Get(ctx, invInfo.GetName(), metav1.GetOptions{}) - if err != nil && !apierrors.IsNotFound(err) { - return err - } - - var appliedObj *unstructured.Unstructured - - if clusterObj == nil { - // Create cluster inventory object, if it does not exist on cluster. - appliedObj, err = namespacedClient.Create(ctx, invInfo, metav1.CreateOptions{}) - } else { - // Update the cluster inventory object instead. - appliedObj, err = namespacedClient.Update(ctx, invInfo, metav1.UpdateOptions{}) +func fromUnstructured(obj *unstructured.Unstructured) (*inventory.UnstructuredInventory, error) { + inv := &inventory.UnstructuredInventory{ + ClusterObj: obj, } + s, found, err := unstructured.NestedSlice(obj.Object, "spec", "objects") if err != nil { - return err + return nil, err } - - // Update status. - invInfo.SetResourceVersion(appliedObj.GetResourceVersion()) - _, err = namespacedClient.UpdateStatus(ctx, invInfo, metav1.UpdateOptions{}) - return err -} - -func (i InventoryCustomType) ApplyWithPrune(ctx context.Context, dc dynamic.Interface, mapper meta.RESTMapper, _ inventory.StatusPolicy, _ object.ObjMetadataSet) error { - invInfo, namespacedClient, err := i.getNamespacedClient(dc, mapper) - if err != nil { - return err + if !found { + return inv, nil } - - // Update the cluster inventory object. - appliedObj, err := namespacedClient.Update(ctx, invInfo, metav1.UpdateOptions{}) - if err != nil { - return err + for _, item := range s { + m := item.(map[string]interface{}) + namespace, _, _ := unstructured.NestedString(m, "namespace") + name, _, _ := unstructured.NestedString(m, "name") + group, _, _ := unstructured.NestedString(m, "group") + kind, _, _ := unstructured.NestedString(m, "kind") + id := object.ObjMetadata{ + Namespace: namespace, + Name: name, + GroupKind: schema.GroupKind{ + Group: group, + Kind: kind, + }, + } + inv.Objs = append(inv.Objs, id) } - - // Update status. - invInfo.SetResourceVersion(appliedObj.GetResourceVersion()) - _, err = namespacedClient.UpdateStatus(ctx, invInfo, metav1.UpdateOptions{}) - return err + return inv, nil } -func (i InventoryCustomType) getNamespacedClient(dc dynamic.Interface, mapper meta.RESTMapper) (*unstructured.Unstructured, dynamic.ResourceInterface, error) { - invInfo, err := i.GetObject() - if err != nil { - return nil, nil, err - } - if invInfo == nil { - return nil, nil, fmt.Errorf("attempting to create a nil inventory object") - } - - mapping, err := mapper.RESTMapping(invInfo.GroupVersionKind().GroupKind(), invInfo.GroupVersionKind().Version) +func WrapInventoryInfoObj(obj *unstructured.Unstructured) inventory.Info { + inv, err := fromUnstructured(obj) if err != nil { - return nil, nil, err + panic(err) } - - // Create client to interact with cluster. - namespacedClient := dc.Resource(mapping.Resource).Namespace(invInfo.GetNamespace()) - - return invInfo, namespacedClient, nil + return inv } diff --git a/test/e2e/deletion_prevention_test.go b/test/e2e/deletion_prevention_test.go index f15c7021..5460adef 100644 --- a/test/e2e/deletion_prevention_test.go +++ b/test/e2e/deletion_prevention_test.go @@ -38,15 +38,15 @@ func deletionPreventionTest(ctx context.Context, c client.Client, invConfig invc By("Verify deployment created") obj := e2eutil.AssertUnstructuredExists(ctx, c, e2eutil.WithNamespace(e2eutil.ManifestToUnstructured(deployment1), namespaceName)) - Expect(obj.GetAnnotations()[inventory.OwningInventoryKey]).To(Equal(inventoryInfo.ID())) + Expect(obj.GetAnnotations()[inventory.OwningInventoryKey]).To(Equal(inventoryInfo.ID().String())) By("Verify pod1 created") obj = e2eutil.AssertUnstructuredExists(ctx, c, e2eutil.WithNamespace(e2eutil.ManifestToUnstructured(pod1), namespaceName)) - Expect(obj.GetAnnotations()[inventory.OwningInventoryKey]).To(Equal(inventoryInfo.ID())) + Expect(obj.GetAnnotations()[inventory.OwningInventoryKey]).To(Equal(inventoryInfo.ID().String())) By("Verify pod2 created") obj = e2eutil.AssertUnstructuredExists(ctx, c, e2eutil.WithNamespace(e2eutil.ManifestToUnstructured(pod2), namespaceName)) - Expect(obj.GetAnnotations()[inventory.OwningInventoryKey]).To(Equal(inventoryInfo.ID())) + Expect(obj.GetAnnotations()[inventory.OwningInventoryKey]).To(Equal(inventoryInfo.ID().String())) By("Verify the inventory size is 3") invConfig.InvSizeVerifyFunc(ctx, c, inventoryName, namespaceName, inventoryID, 3, 3) @@ -63,15 +63,15 @@ func deletionPreventionTest(ctx context.Context, c client.Client, invConfig invc By("Verify deployment still exists and has the config.k8s.io/owning-inventory annotation") obj = e2eutil.AssertUnstructuredExists(ctx, c, e2eutil.WithNamespace(e2eutil.ManifestToUnstructured(deployment1), namespaceName)) - Expect(obj.GetAnnotations()[inventory.OwningInventoryKey]).To(Equal(inventoryInfo.ID())) + Expect(obj.GetAnnotations()[inventory.OwningInventoryKey]).To(Equal(inventoryInfo.ID().String())) By("Verify pod1 still exits and has the config.k8s.io/owning-inventory annotation") obj = e2eutil.AssertUnstructuredExists(ctx, c, e2eutil.WithNamespace(e2eutil.ManifestToUnstructured(pod1), namespaceName)) - Expect(obj.GetAnnotations()[inventory.OwningInventoryKey]).To(Equal(inventoryInfo.ID())) + Expect(obj.GetAnnotations()[inventory.OwningInventoryKey]).To(Equal(inventoryInfo.ID().String())) By("Verify pod2 still exits and has the config.k8s.io/owning-inventory annotation") obj = e2eutil.AssertUnstructuredExists(ctx, c, e2eutil.WithNamespace(e2eutil.ManifestToUnstructured(pod2), namespaceName)) - Expect(obj.GetAnnotations()[inventory.OwningInventoryKey]).To(Equal(inventoryInfo.ID())) + Expect(obj.GetAnnotations()[inventory.OwningInventoryKey]).To(Equal(inventoryInfo.ID().String())) By("Verify the inventory size is still 3") invConfig.InvSizeVerifyFunc(ctx, c, inventoryName, namespaceName, inventoryID, 3, 3) @@ -87,7 +87,7 @@ func deletionPreventionTest(ctx context.Context, c client.Client, invConfig invc By("Verify deployment still exists and has the config.k8s.io/owning-inventory annotation") obj = e2eutil.AssertUnstructuredExists(ctx, c, e2eutil.WithNamespace(e2eutil.ManifestToUnstructured(deployment1), namespaceName)) - Expect(obj.GetAnnotations()[inventory.OwningInventoryKey]).To(Equal(inventoryInfo.ID())) + Expect(obj.GetAnnotations()[inventory.OwningInventoryKey]).To(Equal(inventoryInfo.ID().String())) By("Verify pod1 still exits and does not have the config.k8s.io/owning-inventory annotation") obj = e2eutil.AssertUnstructuredExists(ctx, c, e2eutil.WithNamespace(e2eutil.ManifestToUnstructured(pod1), namespaceName)) diff --git a/test/e2e/invconfig/configmap.go b/test/e2e/invconfig/configmap.go index edbbc593..4dbc7dd1 100644 --- a/test/e2e/invconfig/configmap.go +++ b/test/e2e/invconfig/configmap.go @@ -19,7 +19,6 @@ import ( func NewConfigMapTypeInvConfig(cfg *rest.Config) InventoryConfig { return InventoryConfig{ ClientConfig: cfg, - Strategy: inventory.LabelStrategy, FactoryFunc: cmInventoryManifest, InvWrapperFunc: func(obj *unstructured.Unstructured) inventory.Info { info, err := inventory.ConfigMapToInventoryInfo(obj) @@ -37,18 +36,14 @@ func NewConfigMapTypeInvConfig(cfg *rest.Config) InventoryConfig { func newDefaultInvApplierFactory(cfg *rest.Config) applierFactoryFunc { cfgPtrCopy := cfg return func() *apply.Applier { - return newApplier(inventory.ConfigMapClientFactory{ - StatusPolicy: inventory.StatusPolicyAll, - }, cfgPtrCopy) + return newApplier(inventory.ConfigMapClientFactory{}, inventory.StatusPolicyNone, cfgPtrCopy) } } func newDefaultInvDestroyerFactory(cfg *rest.Config) destroyerFactoryFunc { cfgPtrCopy := cfg return func() *apply.Destroyer { - return newDestroyer(inventory.ConfigMapClientFactory{ - StatusPolicy: inventory.StatusPolicyAll, - }, cfgPtrCopy) + return newDestroyer(inventory.ConfigMapClientFactory{}, inventory.StatusPolicyNone, cfgPtrCopy) } } diff --git a/test/e2e/invconfig/custom.go b/test/e2e/invconfig/custom.go index c0deb0dd..11c9b9d8 100644 --- a/test/e2e/invconfig/custom.go +++ b/test/e2e/invconfig/custom.go @@ -25,7 +25,6 @@ import ( func NewCustomTypeInvConfig(cfg *rest.Config) InventoryConfig { return InventoryConfig{ ClientConfig: cfg, - Strategy: inventory.NameStrategy, FactoryFunc: customInventoryManifest, InvWrapperFunc: customprovider.WrapInventoryInfoObj, ApplierFactoryFunc: newCustomInvApplierFactory(cfg), @@ -39,14 +38,14 @@ func NewCustomTypeInvConfig(cfg *rest.Config) InventoryConfig { func newCustomInvApplierFactory(cfg *rest.Config) applierFactoryFunc { cfgPtrCopy := cfg return func() *apply.Applier { - return newApplier(customprovider.CustomClientFactory{}, cfgPtrCopy) + return newApplier(customprovider.CustomClientFactory{}, inventory.StatusPolicyAll, cfgPtrCopy) } } func newCustomInvDestroyerFactory(cfg *rest.Config) destroyerFactoryFunc { cfgPtrCopy := cfg return func() *apply.Destroyer { - return newDestroyer(customprovider.CustomClientFactory{}, cfgPtrCopy) + return newDestroyer(customprovider.CustomClientFactory{}, inventory.StatusPolicyAll, cfgPtrCopy) } } diff --git a/test/e2e/invconfig/invconfig.go b/test/e2e/invconfig/invconfig.go index e929e3a8..639cfcb8 100644 --- a/test/e2e/invconfig/invconfig.go +++ b/test/e2e/invconfig/invconfig.go @@ -5,7 +5,6 @@ package invconfig import ( "context" - "fmt" "github.com/onsi/gomega" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" @@ -14,7 +13,6 @@ import ( "k8s.io/kubectl/pkg/cmd/util" "sigs.k8s.io/cli-utils/pkg/apply" "sigs.k8s.io/cli-utils/pkg/inventory" - "sigs.k8s.io/cli-utils/test/e2e/e2eutil" "sigs.k8s.io/controller-runtime/pkg/client" ) @@ -28,7 +26,6 @@ type invNotExistsFunc func(ctx context.Context, c client.Client, name, namespace type InventoryConfig struct { ClientConfig *rest.Config - Strategy inventory.Strategy FactoryFunc inventoryFactoryFunc InvWrapperFunc invWrapperFunc ApplierFactoryFunc applierFactoryFunc @@ -39,14 +36,7 @@ type InventoryConfig struct { } func CreateInventoryInfo(invConfig InventoryConfig, inventoryName, namespaceName, inventoryID string) inventory.Info { - switch invConfig.Strategy { - case inventory.NameStrategy: - return invConfig.InvWrapperFunc(invConfig.FactoryFunc(inventoryName, namespaceName, e2eutil.RandomString("inventory-"))) - case inventory.LabelStrategy: - return invConfig.InvWrapperFunc(invConfig.FactoryFunc(e2eutil.RandomString("inventory-"), namespaceName, inventoryID)) - default: - panic(fmt.Errorf("unknown inventory strategy %q", invConfig.Strategy)) - } + return invConfig.InvWrapperFunc(invConfig.FactoryFunc(inventoryName, namespaceName, inventoryID)) } func newFactory(cfg *rest.Config) util.Factory { @@ -61,7 +51,7 @@ func newFactory(cfg *rest.Config) util.Factory { return util.NewFactory(matchVersionKubeConfigFlags) } -func newApplier(invFactory inventory.ClientFactory, cfg *rest.Config) *apply.Applier { +func newApplier(invFactory inventory.ClientFactory, statusPolicy inventory.StatusPolicy, cfg *rest.Config) *apply.Applier { f := newFactory(cfg) invClient, err := invFactory.NewClient(f) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -69,12 +59,13 @@ func newApplier(invFactory inventory.ClientFactory, cfg *rest.Config) *apply.App a, err := apply.NewApplierBuilder(). WithFactory(f). WithInventoryClient(invClient). + WithStatusPolicy(statusPolicy). Build() gomega.Expect(err).NotTo(gomega.HaveOccurred()) return a } -func newDestroyer(invFactory inventory.ClientFactory, cfg *rest.Config) *apply.Destroyer { +func newDestroyer(invFactory inventory.ClientFactory, statusPolicy inventory.StatusPolicy, cfg *rest.Config) *apply.Destroyer { f := newFactory(cfg) invClient, err := invFactory.NewClient(f) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -82,6 +73,7 @@ func newDestroyer(invFactory inventory.ClientFactory, cfg *rest.Config) *apply.D d, err := apply.NewDestroyerBuilder(). WithFactory(f). WithInventoryClient(invClient). + WithStatusPolicy(statusPolicy). Build() gomega.Expect(err).NotTo(gomega.HaveOccurred()) return d diff --git a/test/e2e/name_inv_strategy_test.go b/test/e2e/name_inv_strategy_test.go index abcae389..bab2a440 100644 --- a/test/e2e/name_inv_strategy_test.go +++ b/test/e2e/name_inv_strategy_test.go @@ -47,7 +47,5 @@ func applyWithExistingInvTest(ctx context.Context, c client.Client, invConfig in By("Verify that we get the correct error") Expect(err).To(HaveOccurred()) - Expect(err.Error()).To(ContainSubstring( - fmt.Sprintf("inventory-id of inventory object %s/%s in cluster doesn't match provided id", - namespaceName, inventoryName))) + Expect(err.Error()).To(ContainSubstring("expected inventory object to have inventory-id")) }