diff --git a/docs/platform/view/db-driver.md b/docs/platform/view/db-driver.md index ac752e6e8..8eea336b3 100644 --- a/docs/platform/view/db-driver.md +++ b/docs/platform/view/db-driver.md @@ -40,7 +40,7 @@ However, they all share one common interface such as the following: type BindingStore interface { GetLongTerm(ephemeral view.Identity) (view.Identity, error) HaveSameBinding(this, that view.Identity) (bool, error) - PutBinding(ephemeral, longTerm view.Identity) error + PutBindings(longTerm view.Identity, ephemeral ...view.Identity) error } type AuditInfoStore interface { diff --git a/platform/common/driver/kvs.go b/platform/common/driver/kvs.go index 1dc025873..b2eae42f9 100644 --- a/platform/common/driver/kvs.go +++ b/platform/common/driver/kvs.go @@ -30,7 +30,7 @@ type AuditInfoStore interface { type BindingStore interface { GetLongTerm(ctx context.Context, ephemeral view.Identity) (view.Identity, error) HaveSameBinding(ctx context.Context, this, that view.Identity) (bool, error) - PutBinding(ctx context.Context, ephemeral, longTerm view.Identity) error + PutBindings(ctx context.Context, longTerm view.Identity, ephemeral ...view.Identity) error } type MetadataStore[K any, M any] interface { diff --git a/platform/fabric/core/generic/endpoint/resolver.go b/platform/fabric/core/generic/endpoint/resolver.go index 4164e3e32..f8be97260 100644 --- a/platform/fabric/core/generic/endpoint/resolver.go +++ b/platform/fabric/core/generic/endpoint/resolver.go @@ -37,7 +37,7 @@ func (r *Resolver) GetIdentity() (view.Identity, error) { } type Service interface { - Bind(ctx context.Context, longTerm view.Identity, ephemeral view.Identity) error + Bind(ctx context.Context, longTerm view.Identity, ephemeral ...view.Identity) error AddResolver(name string, domain string, addresses map[string]string, aliases []string, id []byte) (view.Identity, error) AddPublicKeyExtractor(extractor endpoint.PublicKeyExtractor) error } diff --git a/platform/fabric/core/generic/msp/driver/driver.go b/platform/fabric/core/generic/msp/driver/driver.go index d7ac3937e..83daa2ce2 100644 --- a/platform/fabric/core/generic/msp/driver/driver.go +++ b/platform/fabric/core/generic/msp/driver/driver.go @@ -37,7 +37,7 @@ type SignerService interface { } type BinderService interface { - Bind(ctx context.Context, longTerm view.Identity, ephemeral view.Identity) error + Bind(ctx context.Context, longTerm view.Identity, ephemeral ...view.Identity) error GetIdentity(label string, pkiID []byte) (view.Identity, error) } diff --git a/platform/fabric/core/generic/ordering/bft.go b/platform/fabric/core/generic/ordering/bft.go index c2654d7c6..004ec9919 100644 --- a/platform/fabric/core/generic/ordering/bft.go +++ b/platform/fabric/core/generic/ordering/bft.go @@ -177,7 +177,9 @@ func (o *BFTBroadcaster) getConnection(ctx context.Context, to *grpc.ConnectionC return nil, errors.Wrapf(err, "failed to new a broadcast, rpcStatus=%+v", rpcStatus) } - stream, err := oClient.Broadcast(ctx) + // Get the broadcast stream to receive a reply of Acknowledgement for each common.Envelope in order, indicating success or type of failure. + // Notice that this stream is shared, therefore its context must be something different from the context of the current broadcast request + stream, err := oClient.Broadcast(context.Background()) if err != nil { client.Close() return nil, errors.Wrapf(err, "failed creating orderer stream for %s", to.Address) diff --git a/platform/fabric/core/generic/ordering/cft.go b/platform/fabric/core/generic/ordering/cft.go index 074f18156..c566df28a 100644 --- a/platform/fabric/core/generic/ordering/cft.go +++ b/platform/fabric/core/generic/ordering/cft.go @@ -137,7 +137,9 @@ func (o *CFTBroadcaster) getConnection(ctx context.Context) (*Connection, error) return nil, errors.Wrapf(err, "failed to new a broadcast for %s, rpcStatus=%+v", to.Address, rpcStatus) } - stream, err := oClient.Broadcast(ctx) + // Get the broadcast stream to receive a reply of Acknowledgement for each common.Envelope in order, indicating success or type of failure. + // Notice that this stream is shared, therefore its context must be something different from the context of the current broadcast request + stream, err := oClient.Broadcast(context.Background()) if err != nil { client.Close() return nil, errors.Wrapf(err, "failed creating orderer stream for %s", to.Address) diff --git a/platform/fabric/sdk/dig/sdk_test.go b/platform/fabric/sdk/dig/sdk_test.go index ace733e58..f8a736f22 100644 --- a/platform/fabric/sdk/dig/sdk_test.go +++ b/platform/fabric/sdk/dig/sdk_test.go @@ -1,5 +1,5 @@ /* -Copyright IBM Corp All Rights Reserved. +Copyright IBM Corp. All Rights Reserved. SPDX-License-Identifier: Apache-2.0 */ diff --git a/platform/view/sdk/dig/support/endpoint/resolver.go b/platform/view/sdk/dig/support/endpoint/resolver.go index c8872d255..50fd4c454 100644 --- a/platform/view/sdk/dig/support/endpoint/resolver.go +++ b/platform/view/sdk/dig/support/endpoint/resolver.go @@ -55,7 +55,7 @@ type IdentityService interface { } type Backend interface { - Bind(ctx context.Context, longTerm view.Identity, ephemeral view.Identity) error + Bind(ctx context.Context, longTerm view.Identity, ephemeral ...view.Identity) error AddResolver(name string, domain string, addresses map[string]string, aliases []string, id []byte) (view.Identity, error) } diff --git a/platform/view/services/endpoint/mock/binding_store.go b/platform/view/services/endpoint/mock/binding_store.go index 97aab57c7..036641054 100644 --- a/platform/view/services/endpoint/mock/binding_store.go +++ b/platform/view/services/endpoint/mock/binding_store.go @@ -39,17 +39,17 @@ type BindingStore struct { result1 bool result2 error } - PutBindingStub func(context.Context, view.Identity, view.Identity) error - putBindingMutex sync.RWMutex - putBindingArgsForCall []struct { + PutBindingsStub func(context.Context, view.Identity, ...view.Identity) error + putBindingsMutex sync.RWMutex + putBindingsArgsForCall []struct { arg1 context.Context arg2 view.Identity - arg3 view.Identity + arg3 []view.Identity } - putBindingReturns struct { + putBindingsReturns struct { result1 error } - putBindingReturnsOnCall map[int]struct { + putBindingsReturnsOnCall map[int]struct { result1 error } invocations map[string][][]interface{} @@ -187,20 +187,20 @@ func (fake *BindingStore) HaveSameBindingReturnsOnCall(i int, result1 bool, resu }{result1, result2} } -func (fake *BindingStore) PutBinding(arg1 context.Context, arg2 view.Identity, arg3 view.Identity) error { - fake.putBindingMutex.Lock() - ret, specificReturn := fake.putBindingReturnsOnCall[len(fake.putBindingArgsForCall)] - fake.putBindingArgsForCall = append(fake.putBindingArgsForCall, struct { +func (fake *BindingStore) PutBindings(arg1 context.Context, arg2 view.Identity, arg3 ...view.Identity) error { + fake.putBindingsMutex.Lock() + ret, specificReturn := fake.putBindingsReturnsOnCall[len(fake.putBindingsArgsForCall)] + fake.putBindingsArgsForCall = append(fake.putBindingsArgsForCall, struct { arg1 context.Context arg2 view.Identity - arg3 view.Identity + arg3 []view.Identity }{arg1, arg2, arg3}) - stub := fake.PutBindingStub - fakeReturns := fake.putBindingReturns - fake.recordInvocation("PutBinding", []interface{}{arg1, arg2, arg3}) - fake.putBindingMutex.Unlock() + stub := fake.PutBindingsStub + fakeReturns := fake.putBindingsReturns + fake.recordInvocation("PutBindings", []interface{}{arg1, arg2, arg3}) + fake.putBindingsMutex.Unlock() if stub != nil { - return stub(arg1, arg2, arg3) + return stub(arg1, arg2, arg3...) } if specificReturn { return ret.result1 @@ -208,44 +208,44 @@ func (fake *BindingStore) PutBinding(arg1 context.Context, arg2 view.Identity, a return fakeReturns.result1 } -func (fake *BindingStore) PutBindingCallCount() int { - fake.putBindingMutex.RLock() - defer fake.putBindingMutex.RUnlock() - return len(fake.putBindingArgsForCall) +func (fake *BindingStore) PutBindingsCallCount() int { + fake.putBindingsMutex.RLock() + defer fake.putBindingsMutex.RUnlock() + return len(fake.putBindingsArgsForCall) } -func (fake *BindingStore) PutBindingCalls(stub func(context.Context, view.Identity, view.Identity) error) { - fake.putBindingMutex.Lock() - defer fake.putBindingMutex.Unlock() - fake.PutBindingStub = stub +func (fake *BindingStore) PutBindingsCalls(stub func(context.Context, view.Identity, ...view.Identity) error) { + fake.putBindingsMutex.Lock() + defer fake.putBindingsMutex.Unlock() + fake.PutBindingsStub = stub } -func (fake *BindingStore) PutBindingArgsForCall(i int) (context.Context, view.Identity, view.Identity) { - fake.putBindingMutex.RLock() - defer fake.putBindingMutex.RUnlock() - argsForCall := fake.putBindingArgsForCall[i] +func (fake *BindingStore) PutBindingsArgsForCall(i int) (context.Context, view.Identity, []view.Identity) { + fake.putBindingsMutex.RLock() + defer fake.putBindingsMutex.RUnlock() + argsForCall := fake.putBindingsArgsForCall[i] return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3 } -func (fake *BindingStore) PutBindingReturns(result1 error) { - fake.putBindingMutex.Lock() - defer fake.putBindingMutex.Unlock() - fake.PutBindingStub = nil - fake.putBindingReturns = struct { +func (fake *BindingStore) PutBindingsReturns(result1 error) { + fake.putBindingsMutex.Lock() + defer fake.putBindingsMutex.Unlock() + fake.PutBindingsStub = nil + fake.putBindingsReturns = struct { result1 error }{result1} } -func (fake *BindingStore) PutBindingReturnsOnCall(i int, result1 error) { - fake.putBindingMutex.Lock() - defer fake.putBindingMutex.Unlock() - fake.PutBindingStub = nil - if fake.putBindingReturnsOnCall == nil { - fake.putBindingReturnsOnCall = make(map[int]struct { +func (fake *BindingStore) PutBindingsReturnsOnCall(i int, result1 error) { + fake.putBindingsMutex.Lock() + defer fake.putBindingsMutex.Unlock() + fake.PutBindingsStub = nil + if fake.putBindingsReturnsOnCall == nil { + fake.putBindingsReturnsOnCall = make(map[int]struct { result1 error }) } - fake.putBindingReturnsOnCall[i] = struct { + fake.putBindingsReturnsOnCall[i] = struct { result1 error }{result1} } @@ -253,12 +253,6 @@ func (fake *BindingStore) PutBindingReturnsOnCall(i int, result1 error) { func (fake *BindingStore) Invocations() map[string][][]interface{} { fake.invocationsMutex.RLock() defer fake.invocationsMutex.RUnlock() - fake.getLongTermMutex.RLock() - defer fake.getLongTermMutex.RUnlock() - fake.haveSameBindingMutex.RLock() - defer fake.haveSameBindingMutex.RUnlock() - fake.putBindingMutex.RLock() - defer fake.putBindingMutex.RUnlock() copiedInvocations := map[string][][]interface{}{} for key, value := range fake.invocations { copiedInvocations[key] = value diff --git a/platform/view/services/endpoint/service.go b/platform/view/services/endpoint/service.go index e2993bd6f..ddd04cad9 100644 --- a/platform/view/services/endpoint/service.go +++ b/platform/view/services/endpoint/service.go @@ -146,18 +146,20 @@ func (r *Service) GetResolver(ctx context.Context, id view.Identity) (*Resolver, return r.resolver(ctx, id) } -func (r *Service) Bind(ctx context.Context, longTerm view.Identity, ephemeral view.Identity) error { - if longTerm.Equal(ephemeral) { - logger.DebugfContext(ctx, "cannot bind [%s] to [%s], they are the same", longTerm, ephemeral) +func (r *Service) Bind(ctx context.Context, longTerm view.Identity, ephemeralIDs ...view.Identity) error { + // filter out any identities equal to the longTerm identity + var toBind []view.Identity + for _, id := range ephemeralIDs { + if !longTerm.Equal(id) { + toBind = append(toBind, id) + } + } + if len(toBind) == 0 { return nil } - - logger.DebugfContext(ctx, "bind [%s] to [%s]", ephemeral, longTerm) - - if err := r.bindingKVS.PutBinding(ctx, ephemeral, longTerm); err != nil { - return errors.WithMessagef(err, "failed storing binding of [%s] to [%s]", ephemeral.UniqueID(), longTerm.UniqueID()) + if err := r.bindingKVS.PutBindings(ctx, longTerm, toBind...); err != nil { + return errors.WithMessagef(err, "failed storing bindings") } - return nil } diff --git a/platform/view/services/endpoint/service_test.go b/platform/view/services/endpoint/service_test.go index 33d4c6f5f..8bc9e6feb 100644 --- a/platform/view/services/endpoint/service_test.go +++ b/platform/view/services/endpoint/service_test.go @@ -26,7 +26,9 @@ func (k mockKVS) GetLongTerm(ctx context.Context, ephemeral view.Identity) (view func (k mockKVS) HaveSameBinding(ctx context.Context, this, that view.Identity) (bool, error) { return false, nil } -func (k mockKVS) PutBinding(ctx context.Context, ephemeral, longTerm view.Identity) error { return nil } +func (k mockKVS) PutBindings(ctx context.Context, longTerm view.Identity, ephemeral ...view.Identity) error { + return nil +} type mockExtractor struct{} @@ -58,7 +60,7 @@ func TestPKIResolveConcurrency(t *testing.T) { func TestGetIdentity(t *testing.T) { // setup bindingStore := &mock.BindingStore{} - bindingStore.PutBindingReturns(nil) + bindingStore.PutBindingsReturns(nil) service, err := endpoint.NewService(bindingStore) require.NoError(t, err) @@ -73,7 +75,7 @@ func TestGetIdentity(t *testing.T) { require.NoError(t, err) resolvers := service.Resolvers() assert.Len(t, resolvers, 1) - assert.Equal(t, 0, bindingStore.PutBindingCallCount()) + assert.Equal(t, 0, bindingStore.PutBindingsCallCount()) _, err = service.AddResolver( "alice", @@ -85,7 +87,7 @@ func TestGetIdentity(t *testing.T) { require.NoError(t, err) resolvers = service.Resolvers() assert.Len(t, resolvers, 1) - assert.Equal(t, 1, bindingStore.PutBindingCallCount()) + assert.Equal(t, 1, bindingStore.PutBindingsCallCount()) err = service.AddPublicKeyExtractor(ext) require.NoError(t, err) diff --git a/platform/view/services/id/mock/config_provider.go b/platform/view/services/id/mock/config_provider.go index 42758797e..ccf7f8217 100644 --- a/platform/view/services/id/mock/config_provider.go +++ b/platform/view/services/id/mock/config_provider.go @@ -231,12 +231,6 @@ func (fake *ConfigProvider) TranslatePathReturnsOnCall(i int, result1 string) { func (fake *ConfigProvider) Invocations() map[string][][]interface{} { fake.invocationsMutex.RLock() defer fake.invocationsMutex.RUnlock() - fake.getPathMutex.RLock() - defer fake.getPathMutex.RUnlock() - fake.getStringSliceMutex.RLock() - defer fake.getStringSliceMutex.RUnlock() - fake.translatePathMutex.RLock() - defer fake.translatePathMutex.RUnlock() copiedInvocations := map[string][][]interface{}{} for key, value := range fake.invocations { copiedInvocations[key] = value diff --git a/platform/view/services/id/mock/sig_service.go b/platform/view/services/id/mock/sig_service.go index df04694f9..345a64939 100644 --- a/platform/view/services/id/mock/sig_service.go +++ b/platform/view/services/id/mock/sig_service.go @@ -11,12 +11,13 @@ import ( ) type SigService struct { - RegisterSignerStub func(view.Identity, driver.Signer, driver.Verifier) error + RegisterSignerStub func(context.Context, view.Identity, driver.Signer, driver.Verifier) error registerSignerMutex sync.RWMutex registerSignerArgsForCall []struct { - arg1 view.Identity - arg2 driver.Signer - arg3 driver.Verifier + arg1 context.Context + arg2 view.Identity + arg3 driver.Signer + arg4 driver.Verifier } registerSignerReturns struct { result1 error @@ -28,20 +29,21 @@ type SigService struct { invocationsMutex sync.RWMutex } -func (fake *SigService) RegisterSigner(ctx context.Context, arg1 view.Identity, arg2 driver.Signer, arg3 driver.Verifier) error { +func (fake *SigService) RegisterSigner(arg1 context.Context, arg2 view.Identity, arg3 driver.Signer, arg4 driver.Verifier) error { fake.registerSignerMutex.Lock() ret, specificReturn := fake.registerSignerReturnsOnCall[len(fake.registerSignerArgsForCall)] fake.registerSignerArgsForCall = append(fake.registerSignerArgsForCall, struct { - arg1 view.Identity - arg2 driver.Signer - arg3 driver.Verifier - }{arg1, arg2, arg3}) + arg1 context.Context + arg2 view.Identity + arg3 driver.Signer + arg4 driver.Verifier + }{arg1, arg2, arg3, arg4}) stub := fake.RegisterSignerStub fakeReturns := fake.registerSignerReturns - fake.recordInvocation("RegisterSigner", []interface{}{arg1, arg2, arg3}) + fake.recordInvocation("RegisterSigner", []interface{}{arg1, arg2, arg3, arg4}) fake.registerSignerMutex.Unlock() if stub != nil { - return stub(arg1, arg2, arg3) + return stub(arg1, arg2, arg3, arg4) } if specificReturn { return ret.result1 @@ -55,17 +57,17 @@ func (fake *SigService) RegisterSignerCallCount() int { return len(fake.registerSignerArgsForCall) } -func (fake *SigService) RegisterSignerCalls(stub func(view.Identity, driver.Signer, driver.Verifier) error) { +func (fake *SigService) RegisterSignerCalls(stub func(context.Context, view.Identity, driver.Signer, driver.Verifier) error) { fake.registerSignerMutex.Lock() defer fake.registerSignerMutex.Unlock() fake.RegisterSignerStub = stub } -func (fake *SigService) RegisterSignerArgsForCall(i int) (view.Identity, driver.Signer, driver.Verifier) { +func (fake *SigService) RegisterSignerArgsForCall(i int) (context.Context, view.Identity, driver.Signer, driver.Verifier) { fake.registerSignerMutex.RLock() defer fake.registerSignerMutex.RUnlock() argsForCall := fake.registerSignerArgsForCall[i] - return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3 + return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3, argsForCall.arg4 } func (fake *SigService) RegisterSignerReturns(result1 error) { @@ -94,8 +96,6 @@ func (fake *SigService) RegisterSignerReturnsOnCall(i int, result1 error) { func (fake *SigService) Invocations() map[string][][]interface{} { fake.invocationsMutex.RLock() defer fake.invocationsMutex.RUnlock() - fake.registerSignerMutex.RLock() - defer fake.registerSignerMutex.RUnlock() copiedInvocations := map[string][][]interface{}{} for key, value := range fake.invocations { copiedInvocations[key] = value diff --git a/platform/view/services/storage/driver/sql/common/binding.go b/platform/view/services/storage/driver/sql/common/binding.go index 0e6410e5e..2d6519952 100644 --- a/platform/view/services/storage/driver/sql/common/binding.go +++ b/platform/view/services/storage/driver/sql/common/binding.go @@ -10,6 +10,7 @@ import ( "context" "database/sql" "fmt" + "strings" "github.com/hyperledger-labs/fabric-smart-client/pkg/utils/errors" "github.com/hyperledger-labs/fabric-smart-client/platform/common/utils" @@ -20,6 +21,8 @@ import ( "github.com/hyperledger-labs/fabric-smart-client/platform/view/view" ) +const BindingStoreMaxEphemerals = 1000 + func NewBindingStore(readDB *sql.DB, writeDB WriteDB, table string, errorWrapper driver.SQLErrorWrapper, ci common.CondInterpreter) *BindingStore { return &BindingStore{ table: table, @@ -88,3 +91,60 @@ func (db *BindingStore) CreateSchema() error { long_term_id BYTEA NOT NULL );`, db.table)) } + +func (db *BindingStore) PutBindings(ctx context.Context, longTerm view.Identity, ephemerals ...view.Identity) error { + if len(ephemerals) == 0 { + return nil + } + if len(ephemerals) > BindingStoreMaxEphemerals { + return errors.Errorf("Too many ephemerals (%d). Max allowed is %d", len(ephemerals), BindingStoreMaxEphemerals) + } + if longTerm == nil { + return nil + } + + logger.DebugfContext(ctx, "put bindings for %d ephemeral(s) with long term [%s]", len(ephemerals), longTerm.UniqueID()) + + // Resolve canonical long-term ID + if lt, err := db.GetLongTerm(ctx, longTerm); err != nil { + return err + } else if lt != nil && !lt.IsNone() { + logger.DebugfContext(ctx, "replacing [%s] with long term [%s]", longTerm.UniqueID(), lt.UniqueID()) + longTerm = lt + } else { + logger.DebugfContext(ctx, "Id [%s] is an unregistered long term ID", longTerm.UniqueID()) + } + + // Build single INSERT with multiple VALUES + // prepare query placeholder and arguments + placeholders := make([]string, len(ephemerals)+1) + args := make([]any, 0, (len(ephemerals)+1)*2) + + // first item it the longTerm itself + i := 0 + placeholders[i] = fmt.Sprintf("($%d,$%d)", i*2+1, i*2+2) + args = append(args, longTerm.UniqueID(), longTerm) + + // next we go through our ephemerals + for _, eph := range ephemerals { + i++ + placeholders[i] = fmt.Sprintf("($%d,$%d)", i*2+1, i*2+2) + args = append(args, eph.UniqueID(), longTerm) + } + + query := fmt.Sprintf(`INSERT INTO %s (ephemeral_hash, long_term_id) VALUES %s ON CONFLICT DO NOTHING;`, + db.table, strings.Join(placeholders, ",")) + + logger.DebugfContext(ctx, "executing bulk insert: %s", query) + + _, err := db.writeDB.ExecContext(ctx, query, args...) + if err == nil { + logger.DebugfContext(ctx, "long-term and ephemeral ids registered [%s,%s]", longTerm, ephemerals) + return nil + } + if errors.Is(db.errorWrapper.WrapError(err), driver.UniqueKeyViolation) { + logger.InfofContext(ctx, "some tuples [%v, %s] already in db. Skipping...", ephemerals, longTerm) + return nil + } + return errors.Wrapf(err, "failed executing query [%s]", query) +} diff --git a/platform/view/services/storage/driver/sql/common/test_utils.go b/platform/view/services/storage/driver/sql/common/test_utils.go index b29dd1174..bd2877b5d 100644 --- a/platform/view/services/storage/driver/sql/common/test_utils.go +++ b/platform/view/services/storage/driver/sql/common/test_utils.go @@ -7,10 +7,14 @@ SPDX-License-Identifier: Apache-2.0 package common import ( + "context" + "strconv" "testing" "github.com/hyperledger-labs/fabric-smart-client/platform/common/utils" "github.com/hyperledger-labs/fabric-smart-client/platform/view/services/storage/driver" + "github.com/hyperledger-labs/fabric-smart-client/platform/view/view" + "github.com/stretchr/testify/require" _ "modernc.org/sqlite" ) @@ -63,3 +67,46 @@ func TestCases(t *testing.T, }) } } + +func TestPutBindingsMultipleEphemeralsCommon(t *testing.T, db *BindingStore) { + ctx := context.Background() + + // Input identities + longTerm := view.Identity("long") + e1 := view.Identity("eph1") + e2 := view.Identity("eph2") + + // Check that store does not have bindings for e1 and e2 + lt, err := db.GetLongTerm(ctx, e1) + require.NoError(t, err) + require.ElementsMatch(t, len(lt), 0) + lt, err = db.GetLongTerm(ctx, e2) + require.NoError(t, err) + require.ElementsMatch(t, len(lt), 0) + + // Create new bindings + err = db.PutBindings(ctx, longTerm, e1, e2) + require.NoError(t, err) + + // Check that the bindings where correctly written + lt, err = db.GetLongTerm(ctx, e1) + require.NoError(t, err) + require.ElementsMatch(t, lt, longTerm) + + lt, err = db.GetLongTerm(ctx, e2) + require.NoError(t, err) + require.ElementsMatch(t, lt, longTerm) +} + +func TestManyManyPutBindingsCommon(t *testing.T, db *BindingStore) { + ctx := context.Background() + + // Input identities + longTerm := view.Identity("long") + e := []view.Identity{} + for i := 0; i < BindingStoreMaxEphemerals+1; i++ { + e = append(e, view.Identity("eph"+strconv.Itoa(i))) + } + err := db.PutBindings(ctx, longTerm, e...) + require.Error(t, err) +} diff --git a/platform/view/services/storage/driver/sql/postgres/binding.go b/platform/view/services/storage/driver/sql/postgres/binding.go index 9fe0f0434..12a663e17 100644 --- a/platform/view/services/storage/driver/sql/postgres/binding.go +++ b/platform/view/services/storage/driver/sql/postgres/binding.go @@ -7,15 +7,11 @@ SPDX-License-Identifier: Apache-2.0 package postgres import ( - "context" "database/sql" - "fmt" - "github.com/hyperledger-labs/fabric-smart-client/pkg/utils/errors" "github.com/hyperledger-labs/fabric-smart-client/platform/view/services/storage/driver" common2 "github.com/hyperledger-labs/fabric-smart-client/platform/view/services/storage/driver/common" common3 "github.com/hyperledger-labs/fabric-smart-client/platform/view/services/storage/driver/sql/common" - "github.com/hyperledger-labs/fabric-smart-client/platform/view/view" ) type BindingStore struct { @@ -38,30 +34,3 @@ func newBindingStore(readDB, writeDB *sql.DB, table string) *BindingStore { errorWrapper: errorWrapper, } } -func (db *BindingStore) PutBinding(ctx context.Context, ephemeral, longTerm view.Identity) error { - logger.DebugfContext(ctx, "Put binding for pair [%s:%s]", ephemeral.UniqueID(), longTerm.UniqueID()) - if lt, err := db.GetLongTerm(ctx, longTerm); err != nil { - return err - } else if lt != nil && !lt.IsNone() { - logger.DebugfContext(ctx, "Replacing [%s] with long term [%s]", longTerm.UniqueID(), lt.UniqueID()) - longTerm = lt - } else { - logger.DebugfContext(ctx, "Id [%s] is an unregistered long term ID", longTerm.UniqueID()) - } - query := fmt.Sprintf(` - INSERT INTO %s (ephemeral_hash, long_term_id) - VALUES ($1, $2), ($3, $4) - ON CONFLICT DO NOTHING - `, db.table) - logger.Debug(query, ephemeral.UniqueID(), longTerm.UniqueID()) - _, err := db.writeDB.ExecContext(ctx, query, ephemeral.UniqueID(), longTerm, longTerm.UniqueID(), longTerm) - if err == nil { - logger.DebugfContext(ctx, "Long-term and ephemeral ids registered [%s,%s]", longTerm, ephemeral) - return nil - } - if errors.Is(db.errorWrapper.WrapError(err), driver.UniqueKeyViolation) { - logger.Infof("tuple [%s,%s] already in db. Skipping...", ephemeral, longTerm) - return nil - } - return errors.Wrapf(err, "failed executing query [%s]", query) -} diff --git a/platform/view/services/storage/driver/sql/postgres/binding_test.go b/platform/view/services/storage/driver/sql/postgres/binding_test.go new file mode 100644 index 000000000..4c63ea936 --- /dev/null +++ b/platform/view/services/storage/driver/sql/postgres/binding_test.go @@ -0,0 +1,45 @@ +/* +Copyright IBM Corp. All Rights Reserved. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package postgres + +import ( + "testing" + + testing2 "github.com/hyperledger-labs/fabric-smart-client/platform/view/services/storage/driver/common/testing" + "github.com/hyperledger-labs/fabric-smart-client/platform/view/services/storage/driver/sql/common" + + "github.com/stretchr/testify/require" +) + +func newBindingStoreForTests(t *testing.T) (func(), *BindingStore) { + // When running this test together with other tests; it may happen that a container instance is still running + // we give this test a slow start ... + WaitForPostgresContainerStopped() + t.Log("starting postgres") + terminate, pgConnStr, err := StartPostgres(t, false) + require.NoError(t, err) + t.Log("postgres ready") + + cp := NewConfigProvider(testing2.MockConfig(Config{ + DataSource: pgConnStr, + })) + db, err := NewPersistenceWithOpts(cp, NewDbProvider(), "", NewBindingStore) + require.NoError(t, err) + return terminate, db +} + +func TestPutBindingsMultipleEphemeralsPostgres(t *testing.T) { + terminate, db := newBindingStoreForTests(t) + defer terminate() + common.TestPutBindingsMultipleEphemeralsCommon(t, db.BindingStore) +} + +func TestManyManyPutBindingsPostgres(t *testing.T) { + terminate, db := newBindingStoreForTests(t) + defer terminate() + common.TestManyManyPutBindingsCommon(t, db.BindingStore) +} diff --git a/platform/view/services/storage/driver/sql/postgres/test_utils.go b/platform/view/services/storage/driver/sql/postgres/test_utils.go index 5b606dd09..64111f83b 100644 --- a/platform/view/services/storage/driver/sql/postgres/test_utils.go +++ b/platform/view/services/storage/driver/sql/postgres/test_utils.go @@ -17,6 +17,7 @@ import ( "time" "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/filters" "github.com/docker/docker/client" "github.com/docker/go-connections/nat" "github.com/hashicorp/consul/sdk/freeport" @@ -30,6 +31,7 @@ import ( // itests will not be recognized as a domain, so Podman will still prefix it with localhost // Hence we use fsc.itests as domain const PostgresImage = "fsc.itests/postgres:latest" +const PostgresContainerName = "fsc-postgres" type Logger interface { Log(...any) @@ -257,7 +259,7 @@ func StartPostgres(t Logger, printLogs bool) (func(), string, error) { c := ContainerConfig{ Image: getEnv("POSTGRES_IMAGE", PostgresImage), - Container: getEnv("POSTGRES_CONTAINER", "fsc-postgres"), + Container: getEnv("POSTGRES_CONTAINER", PostgresContainerName), DbConfig: &DbConfig{ DBName: getEnv("POSTGRES_DB", "testdb"), User: getEnv("POSTGRES_USER", "pgx_md5"), @@ -279,3 +281,40 @@ func getEnv(key, fallback string) string { } return fallback } + +func WaitForPostgresContainerStopped() error { + const interval = 200 * time.Millisecond + const maxWait = 10 * time.Second + + cli, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation()) + if err != nil { + return err + } + defer cli.Close() + + deadline := time.Now().Add(maxWait) + for { + if time.Now().After(deadline) { + return fmt.Errorf("timeout waiting for container %q to stop", PostgresContainerName) + } + + f := filters.NewArgs() + f.Add("name", PostgresContainerName) + f.Add("status", "running") + + opts := container.ListOptions{ + All: true, + Filters: f, + } + + containers, err := cli.ContainerList(context.Background(), opts) + if err != nil { + return err + } + if len(containers) == 0 { + return nil // no running container with that name + } + + time.Sleep(interval) + } +} diff --git a/platform/view/services/storage/driver/sql/sqlite/binding.go b/platform/view/services/storage/driver/sql/sqlite/binding.go index 266dc05ad..cef53ac44 100644 --- a/platform/view/services/storage/driver/sql/sqlite/binding.go +++ b/platform/view/services/storage/driver/sql/sqlite/binding.go @@ -7,15 +7,11 @@ SPDX-License-Identifier: Apache-2.0 package sqlite import ( - "context" "database/sql" - "fmt" - "github.com/hyperledger-labs/fabric-smart-client/pkg/utils/errors" "github.com/hyperledger-labs/fabric-smart-client/platform/view/services/storage/driver" common3 "github.com/hyperledger-labs/fabric-smart-client/platform/view/services/storage/driver/common" common2 "github.com/hyperledger-labs/fabric-smart-client/platform/view/services/storage/driver/sql/common" - "github.com/hyperledger-labs/fabric-smart-client/platform/view/view" ) type BindingStore struct { @@ -38,36 +34,3 @@ func newBindingStore(readDB *sql.DB, writeDB common2.WriteDB, table string) *Bin errorWrapper: errorWrapper, } } -func (db *BindingStore) PutBinding(ctx context.Context, ephemeral, longTerm view.Identity) error { - logger.DebugfContext(ctx, "put binding for pair [%s:%s]", ephemeral.UniqueID(), longTerm.UniqueID()) - if lt, err := db.GetLongTerm(ctx, longTerm); err != nil { - return err - } else if lt != nil && !lt.IsNone() { - logger.DebugfContext(ctx, "replacing [%s] with long term [%s]", longTerm.UniqueID(), lt.UniqueID()) - longTerm = lt - } else { - logger.DebugfContext(ctx, "Id [%s] is an unregistered long term ID", longTerm.UniqueID()) - } - query := fmt.Sprintf(` - BEGIN; - INSERT INTO %s (ephemeral_hash, long_term_id) - VALUES ($1, $2) - ON CONFLICT DO NOTHING; - INSERT INTO %s (ephemeral_hash, long_term_id) - VALUES ($3, $4) - ON CONFLICT DO NOTHING; - COMMIT; - `, db.table, db.table) - - logger.Debug(query, ephemeral.UniqueID(), longTerm.UniqueID()) - _, err := db.writeDB.ExecContext(ctx, query, ephemeral.UniqueID(), longTerm, longTerm.UniqueID(), longTerm) - if err == nil { - logger.DebugfContext(ctx, "long-term and ephemeral ids registered [%s,%s]", longTerm, ephemeral) - return nil - } - if errors.Is(db.errorWrapper.WrapError(err), driver.UniqueKeyViolation) { - logger.InfofContext(ctx, "tuple [%s,%s] already in db. Skipping...", ephemeral, longTerm) - return nil - } - return errors.Wrapf(err, "failed executing query [%s]", query) -} diff --git a/platform/view/services/storage/driver/sql/sqlite/binding_test.go b/platform/view/services/storage/driver/sql/sqlite/binding_test.go new file mode 100644 index 000000000..cb7c1ca52 --- /dev/null +++ b/platform/view/services/storage/driver/sql/sqlite/binding_test.go @@ -0,0 +1,39 @@ +/* +Copyright IBM Corp. All Rights Reserved. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package sqlite + +import ( + "fmt" + "path" + "testing" + + "github.com/hyperledger-labs/fabric-smart-client/platform/common/utils" + "github.com/hyperledger-labs/fabric-smart-client/platform/view/services/storage/driver/sql/common" + "github.com/stretchr/testify/assert" +) + +func newBindingStoreForTests(t *testing.T) *BindingStore { + tempDir := t.TempDir() + o := Opts{ + DataSource: fmt.Sprintf("file:%s.sqlite?_pragma=busy_timeout(1000)", path.Join(tempDir, "benchmark")), + } + dbs := utils.MustGet(open(o)) + tables := common.GetTableNames(o.TablePrefix, o.TableNameParams...) + db := newBindingStore(dbs.ReadDB, dbs.WriteDB, tables.Binding) + assert.NoError(t, db.CreateSchema()) + return db +} + +func TestPutBindingsMultipleEphemeralsSqlite(t *testing.T) { + db := newBindingStoreForTests(t) + common.TestPutBindingsMultipleEphemeralsCommon(t, db.BindingStore) +} + +func TestManyManyPutBindingsSqlite(t *testing.T) { + db := newBindingStoreForTests(t) + common.TestManyManyPutBindingsCommon(t, db.BindingStore) +} diff --git a/platform/view/services/storage/kvs/kvs_test.go b/platform/view/services/storage/kvs/kvs_test.go index 89d251a23..6eb218194 100644 --- a/platform/view/services/storage/kvs/kvs_test.go +++ b/platform/view/services/storage/kvs/kvs_test.go @@ -176,10 +176,9 @@ func TestSQLiteKVS(t *testing.T) { } func TestPostgresKVS(t *testing.T) { - // When running this test together with other tests; it may happen that a container instance is still running + // When running this test together with other tests; it may happen that a container instance is still running // we give this test a slow start ... - time.Sleep(5 * time.Second) - + postgres2.WaitForPostgresContainerStopped() t.Log("starting postgres") terminate, pgConnStr, err := postgres2.StartPostgres(t, false) if err != nil { diff --git a/platform/view/services/view/manager.go b/platform/view/services/view/manager.go index 989d95637..a30271628 100644 --- a/platform/view/services/view/manager.go +++ b/platform/view/services/view/manager.go @@ -120,14 +120,10 @@ func (cm *Manager) InitiateView(view view.View, ctx context.Context) (interface{ } func (cm *Manager) InitiateViewWithIdentity(view view.View, id view.Identity, ctx context.Context) (interface{}, error) { - // Get the managers context - cm.contextsMu.Lock() - cctx := cm.ctx - cm.contextsMu.Unlock() - if cctx == nil { - cctx = context.Background() + if ctx == nil { + ctx = cm.getCurrentContext() } - ctx = trace.ContextWithSpanContext(cctx, trace.SpanContextFromContext(ctx)) + ctx = trace.ContextWithSpanContext(ctx, trace.SpanContextFromContext(ctx)) viewContext, err := NewContextForInitiator( "", ctx, diff --git a/platform/view/services/view/manager_test.go b/platform/view/services/view/manager_test.go index b1bbcc889..c3c98fd5b 100644 --- a/platform/view/services/view/manager_test.go +++ b/platform/view/services/view/manager_test.go @@ -13,6 +13,7 @@ import ( "time" "github.com/hyperledger-labs/fabric-smart-client/pkg/utils" + "github.com/hyperledger-labs/fabric-smart-client/pkg/utils/errors" "github.com/hyperledger-labs/fabric-smart-client/platform/view/services/metrics/disabled" view2 "github.com/hyperledger-labs/fabric-smart-client/platform/view/services/view" "github.com/hyperledger-labs/fabric-smart-client/platform/view/services/view/mock" @@ -50,6 +51,18 @@ func (a DummyView) Call(context view.Context) (interface{}, error) { return nil, nil } +type ContextKey string + +type DummyViewContextCheck struct{} + +func (a DummyViewContextCheck) Call(ctx view.Context) (interface{}, error) { + v, ok := ctx.Context().Value(ContextKey("test")).(string) + if !ok { + return nil, errors.Errorf("context value %s not found", ContextKey("test")) + } + return v, nil +} + type DummyFactory struct{} func (d *DummyFactory) NewView(in []byte) (view.View, error) { @@ -93,10 +106,11 @@ func TestManagerRace(t *testing.T) { wg := &sync.WaitGroup{} for i := 0; i < 100; i++ { - wg.Add(7) + wg.Add(8) go registerFactory(t, wg, manager) go newView(t, wg, manager) go callView(t, wg, manager) + go callViewWithAugmentedContext(t, wg, manager) go getContext(t, wg, manager) go initiateView(t, wg, manager) go start(t, wg, manager, ctx) @@ -153,6 +167,15 @@ func callView(t *testing.T, wg *sync.WaitGroup, m Manager) { assert.NoError(t, err) } +func callViewWithAugmentedContext(t *testing.T, wg *sync.WaitGroup, m Manager) { + ctx := context.Background() + ctx = context.WithValue(ctx, ContextKey("test"), "pineapple") + v, err := m.InitiateView(&DummyViewContextCheck{}, ctx) + wg.Done() + assert.NoError(t, err) + assert.Equal(t, "pineapple", v) +} + func newView(t *testing.T, wg *sync.WaitGroup, m Manager) { _, err := m.NewView(utils.GenerateUUID(), nil) wg.Done() diff --git a/platform/view/services/view/mock/comm_layer.go b/platform/view/services/view/mock/comm_layer.go index 17f25ad15..55bee0242 100644 --- a/platform/view/services/view/mock/comm_layer.go +++ b/platform/view/services/view/mock/comm_layer.go @@ -5,149 +5,181 @@ import ( "context" "sync" - view2 "github.com/hyperledger-labs/fabric-smart-client/platform/view/services/view" - "github.com/hyperledger-labs/fabric-smart-client/platform/view/view" + "github.com/hyperledger-labs/fabric-smart-client/platform/view/services/view" + viewa "github.com/hyperledger-labs/fabric-smart-client/platform/view/view" ) type CommLayer struct { - NewSessionWithIDStub func(sessionID, contextID, endpoint string, pkid []byte, caller view.Identity, msg *view.Message) (view.Session, error) - newSessionWithIDMutex sync.RWMutex - newSessionWithIDArgsForCall []struct { - sessionID string - contextID string - endpoint string - pkid []byte - caller view.Identity - msg *view.Message + DeleteSessionsStub func(context.Context, string) + deleteSessionsMutex sync.RWMutex + deleteSessionsArgsForCall []struct { + arg1 context.Context + arg2 string } - newSessionWithIDReturns struct { - result1 view.Session + MasterSessionStub func() (viewa.Session, error) + masterSessionMutex sync.RWMutex + masterSessionArgsForCall []struct { + } + masterSessionReturns struct { + result1 viewa.Session result2 error } - newSessionWithIDReturnsOnCall map[int]struct { - result1 view.Session + masterSessionReturnsOnCall map[int]struct { + result1 viewa.Session result2 error } - NewSessionStub func(caller string, contextID string, endpoint string, pkid []byte) (view.Session, error) + NewSessionStub func(string, string, string, []byte) (viewa.Session, error) newSessionMutex sync.RWMutex newSessionArgsForCall []struct { - caller string - contextID string - endpoint string - pkid []byte + arg1 string + arg2 string + arg3 string + arg4 []byte } newSessionReturns struct { - result1 view.Session + result1 viewa.Session result2 error } newSessionReturnsOnCall map[int]struct { - result1 view.Session + result1 viewa.Session result2 error } - MasterSessionStub func() (view.Session, error) - masterSessionMutex sync.RWMutex - masterSessionArgsForCall []struct{} - masterSessionReturns struct { - result1 view.Session - result2 error + NewSessionWithIDStub func(string, string, string, []byte, viewa.Identity, *viewa.Message) (viewa.Session, error) + newSessionWithIDMutex sync.RWMutex + newSessionWithIDArgsForCall []struct { + arg1 string + arg2 string + arg3 string + arg4 []byte + arg5 viewa.Identity + arg6 *viewa.Message } - masterSessionReturnsOnCall map[int]struct { - result1 view.Session + newSessionWithIDReturns struct { + result1 viewa.Session result2 error } - DeleteSessionsStub func(sessionID string) - deleteSessionsMutex sync.RWMutex - deleteSessionsArgsForCall []struct { - sessionID string + newSessionWithIDReturnsOnCall map[int]struct { + result1 viewa.Session + result2 error } invocations map[string][][]interface{} invocationsMutex sync.RWMutex } -func (fake *CommLayer) NewSessionWithID(sessionID string, contextID string, endpoint string, pkid []byte, caller view.Identity, msg *view.Message) (view.Session, error) { - var pkidCopy []byte - if pkid != nil { - pkidCopy = make([]byte, len(pkid)) - copy(pkidCopy, pkid) +func (fake *CommLayer) DeleteSessions(arg1 context.Context, arg2 string) { + fake.deleteSessionsMutex.Lock() + fake.deleteSessionsArgsForCall = append(fake.deleteSessionsArgsForCall, struct { + arg1 context.Context + arg2 string + }{arg1, arg2}) + stub := fake.DeleteSessionsStub + fake.recordInvocation("DeleteSessions", []interface{}{arg1, arg2}) + fake.deleteSessionsMutex.Unlock() + if stub != nil { + fake.DeleteSessionsStub(arg1, arg2) } - fake.newSessionWithIDMutex.Lock() - ret, specificReturn := fake.newSessionWithIDReturnsOnCall[len(fake.newSessionWithIDArgsForCall)] - fake.newSessionWithIDArgsForCall = append(fake.newSessionWithIDArgsForCall, struct { - sessionID string - contextID string - endpoint string - pkid []byte - caller view.Identity - msg *view.Message - }{sessionID, contextID, endpoint, pkidCopy, caller, msg}) - fake.recordInvocation("NewSessionWithID", []interface{}{sessionID, contextID, endpoint, pkidCopy, caller, msg}) - fake.newSessionWithIDMutex.Unlock() - if fake.NewSessionWithIDStub != nil { - return fake.NewSessionWithIDStub(sessionID, contextID, endpoint, pkid, caller, msg) +} + +func (fake *CommLayer) DeleteSessionsCallCount() int { + fake.deleteSessionsMutex.RLock() + defer fake.deleteSessionsMutex.RUnlock() + return len(fake.deleteSessionsArgsForCall) +} + +func (fake *CommLayer) DeleteSessionsCalls(stub func(context.Context, string)) { + fake.deleteSessionsMutex.Lock() + defer fake.deleteSessionsMutex.Unlock() + fake.DeleteSessionsStub = stub +} + +func (fake *CommLayer) DeleteSessionsArgsForCall(i int) (context.Context, string) { + fake.deleteSessionsMutex.RLock() + defer fake.deleteSessionsMutex.RUnlock() + argsForCall := fake.deleteSessionsArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2 +} + +func (fake *CommLayer) MasterSession() (viewa.Session, error) { + fake.masterSessionMutex.Lock() + ret, specificReturn := fake.masterSessionReturnsOnCall[len(fake.masterSessionArgsForCall)] + fake.masterSessionArgsForCall = append(fake.masterSessionArgsForCall, struct { + }{}) + stub := fake.MasterSessionStub + fakeReturns := fake.masterSessionReturns + fake.recordInvocation("MasterSession", []interface{}{}) + fake.masterSessionMutex.Unlock() + if stub != nil { + return stub() } if specificReturn { return ret.result1, ret.result2 } - return fake.newSessionWithIDReturns.result1, fake.newSessionWithIDReturns.result2 + return fakeReturns.result1, fakeReturns.result2 } -func (fake *CommLayer) NewSessionWithIDCallCount() int { - fake.newSessionWithIDMutex.RLock() - defer fake.newSessionWithIDMutex.RUnlock() - return len(fake.newSessionWithIDArgsForCall) +func (fake *CommLayer) MasterSessionCallCount() int { + fake.masterSessionMutex.RLock() + defer fake.masterSessionMutex.RUnlock() + return len(fake.masterSessionArgsForCall) } -func (fake *CommLayer) NewSessionWithIDArgsForCall(i int) (string, string, string, []byte, view.Identity, *view.Message) { - fake.newSessionWithIDMutex.RLock() - defer fake.newSessionWithIDMutex.RUnlock() - return fake.newSessionWithIDArgsForCall[i].sessionID, fake.newSessionWithIDArgsForCall[i].contextID, fake.newSessionWithIDArgsForCall[i].endpoint, fake.newSessionWithIDArgsForCall[i].pkid, fake.newSessionWithIDArgsForCall[i].caller, fake.newSessionWithIDArgsForCall[i].msg +func (fake *CommLayer) MasterSessionCalls(stub func() (viewa.Session, error)) { + fake.masterSessionMutex.Lock() + defer fake.masterSessionMutex.Unlock() + fake.MasterSessionStub = stub } -func (fake *CommLayer) NewSessionWithIDReturns(result1 view.Session, result2 error) { - fake.NewSessionWithIDStub = nil - fake.newSessionWithIDReturns = struct { - result1 view.Session +func (fake *CommLayer) MasterSessionReturns(result1 viewa.Session, result2 error) { + fake.masterSessionMutex.Lock() + defer fake.masterSessionMutex.Unlock() + fake.MasterSessionStub = nil + fake.masterSessionReturns = struct { + result1 viewa.Session result2 error }{result1, result2} } -func (fake *CommLayer) NewSessionWithIDReturnsOnCall(i int, result1 view.Session, result2 error) { - fake.NewSessionWithIDStub = nil - if fake.newSessionWithIDReturnsOnCall == nil { - fake.newSessionWithIDReturnsOnCall = make(map[int]struct { - result1 view.Session +func (fake *CommLayer) MasterSessionReturnsOnCall(i int, result1 viewa.Session, result2 error) { + fake.masterSessionMutex.Lock() + defer fake.masterSessionMutex.Unlock() + fake.MasterSessionStub = nil + if fake.masterSessionReturnsOnCall == nil { + fake.masterSessionReturnsOnCall = make(map[int]struct { + result1 viewa.Session result2 error }) } - fake.newSessionWithIDReturnsOnCall[i] = struct { - result1 view.Session + fake.masterSessionReturnsOnCall[i] = struct { + result1 viewa.Session result2 error }{result1, result2} } -func (fake *CommLayer) NewSession(caller string, contextID string, endpoint string, pkid []byte) (view.Session, error) { - var pkidCopy []byte - if pkid != nil { - pkidCopy = make([]byte, len(pkid)) - copy(pkidCopy, pkid) +func (fake *CommLayer) NewSession(arg1 string, arg2 string, arg3 string, arg4 []byte) (viewa.Session, error) { + var arg4Copy []byte + if arg4 != nil { + arg4Copy = make([]byte, len(arg4)) + copy(arg4Copy, arg4) } fake.newSessionMutex.Lock() ret, specificReturn := fake.newSessionReturnsOnCall[len(fake.newSessionArgsForCall)] fake.newSessionArgsForCall = append(fake.newSessionArgsForCall, struct { - caller string - contextID string - endpoint string - pkid []byte - }{caller, contextID, endpoint, pkidCopy}) - fake.recordInvocation("NewSession", []interface{}{caller, contextID, endpoint, pkidCopy}) + arg1 string + arg2 string + arg3 string + arg4 []byte + }{arg1, arg2, arg3, arg4Copy}) + stub := fake.NewSessionStub + fakeReturns := fake.newSessionReturns + fake.recordInvocation("NewSession", []interface{}{arg1, arg2, arg3, arg4Copy}) fake.newSessionMutex.Unlock() - if fake.NewSessionStub != nil { - return fake.NewSessionStub(caller, contextID, endpoint, pkid) + if stub != nil { + return stub(arg1, arg2, arg3, arg4) } if specificReturn { return ret.result1, ret.result2 } - return fake.newSessionReturns.result1, fake.newSessionReturns.result2 + return fakeReturns.result1, fakeReturns.result2 } func (fake *CommLayer) NewSessionCallCount() int { @@ -156,112 +188,130 @@ func (fake *CommLayer) NewSessionCallCount() int { return len(fake.newSessionArgsForCall) } +func (fake *CommLayer) NewSessionCalls(stub func(string, string, string, []byte) (viewa.Session, error)) { + fake.newSessionMutex.Lock() + defer fake.newSessionMutex.Unlock() + fake.NewSessionStub = stub +} + func (fake *CommLayer) NewSessionArgsForCall(i int) (string, string, string, []byte) { fake.newSessionMutex.RLock() defer fake.newSessionMutex.RUnlock() - return fake.newSessionArgsForCall[i].caller, fake.newSessionArgsForCall[i].contextID, fake.newSessionArgsForCall[i].endpoint, fake.newSessionArgsForCall[i].pkid + argsForCall := fake.newSessionArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3, argsForCall.arg4 } -func (fake *CommLayer) NewSessionReturns(result1 view.Session, result2 error) { +func (fake *CommLayer) NewSessionReturns(result1 viewa.Session, result2 error) { + fake.newSessionMutex.Lock() + defer fake.newSessionMutex.Unlock() fake.NewSessionStub = nil fake.newSessionReturns = struct { - result1 view.Session + result1 viewa.Session result2 error }{result1, result2} } -func (fake *CommLayer) NewSessionReturnsOnCall(i int, result1 view.Session, result2 error) { +func (fake *CommLayer) NewSessionReturnsOnCall(i int, result1 viewa.Session, result2 error) { + fake.newSessionMutex.Lock() + defer fake.newSessionMutex.Unlock() fake.NewSessionStub = nil if fake.newSessionReturnsOnCall == nil { fake.newSessionReturnsOnCall = make(map[int]struct { - result1 view.Session + result1 viewa.Session result2 error }) } fake.newSessionReturnsOnCall[i] = struct { - result1 view.Session + result1 viewa.Session result2 error }{result1, result2} } -func (fake *CommLayer) MasterSession() (view.Session, error) { - fake.masterSessionMutex.Lock() - ret, specificReturn := fake.masterSessionReturnsOnCall[len(fake.masterSessionArgsForCall)] - fake.masterSessionArgsForCall = append(fake.masterSessionArgsForCall, struct{}{}) - fake.recordInvocation("MasterSession", []interface{}{}) - fake.masterSessionMutex.Unlock() - if fake.MasterSessionStub != nil { - return fake.MasterSessionStub() +func (fake *CommLayer) NewSessionWithID(arg1 string, arg2 string, arg3 string, arg4 []byte, arg5 viewa.Identity, arg6 *viewa.Message) (viewa.Session, error) { + var arg4Copy []byte + if arg4 != nil { + arg4Copy = make([]byte, len(arg4)) + copy(arg4Copy, arg4) + } + fake.newSessionWithIDMutex.Lock() + ret, specificReturn := fake.newSessionWithIDReturnsOnCall[len(fake.newSessionWithIDArgsForCall)] + fake.newSessionWithIDArgsForCall = append(fake.newSessionWithIDArgsForCall, struct { + arg1 string + arg2 string + arg3 string + arg4 []byte + arg5 viewa.Identity + arg6 *viewa.Message + }{arg1, arg2, arg3, arg4Copy, arg5, arg6}) + stub := fake.NewSessionWithIDStub + fakeReturns := fake.newSessionWithIDReturns + fake.recordInvocation("NewSessionWithID", []interface{}{arg1, arg2, arg3, arg4Copy, arg5, arg6}) + fake.newSessionWithIDMutex.Unlock() + if stub != nil { + return stub(arg1, arg2, arg3, arg4, arg5, arg6) } if specificReturn { return ret.result1, ret.result2 } - return fake.masterSessionReturns.result1, fake.masterSessionReturns.result2 + return fakeReturns.result1, fakeReturns.result2 } -func (fake *CommLayer) MasterSessionCallCount() int { - fake.masterSessionMutex.RLock() - defer fake.masterSessionMutex.RUnlock() - return len(fake.masterSessionArgsForCall) +func (fake *CommLayer) NewSessionWithIDCallCount() int { + fake.newSessionWithIDMutex.RLock() + defer fake.newSessionWithIDMutex.RUnlock() + return len(fake.newSessionWithIDArgsForCall) } -func (fake *CommLayer) MasterSessionReturns(result1 view.Session, result2 error) { - fake.MasterSessionStub = nil - fake.masterSessionReturns = struct { - result1 view.Session +func (fake *CommLayer) NewSessionWithIDCalls(stub func(string, string, string, []byte, viewa.Identity, *viewa.Message) (viewa.Session, error)) { + fake.newSessionWithIDMutex.Lock() + defer fake.newSessionWithIDMutex.Unlock() + fake.NewSessionWithIDStub = stub +} + +func (fake *CommLayer) NewSessionWithIDArgsForCall(i int) (string, string, string, []byte, viewa.Identity, *viewa.Message) { + fake.newSessionWithIDMutex.RLock() + defer fake.newSessionWithIDMutex.RUnlock() + argsForCall := fake.newSessionWithIDArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3, argsForCall.arg4, argsForCall.arg5, argsForCall.arg6 +} + +func (fake *CommLayer) NewSessionWithIDReturns(result1 viewa.Session, result2 error) { + fake.newSessionWithIDMutex.Lock() + defer fake.newSessionWithIDMutex.Unlock() + fake.NewSessionWithIDStub = nil + fake.newSessionWithIDReturns = struct { + result1 viewa.Session result2 error }{result1, result2} } -func (fake *CommLayer) MasterSessionReturnsOnCall(i int, result1 view.Session, result2 error) { - fake.MasterSessionStub = nil - if fake.masterSessionReturnsOnCall == nil { - fake.masterSessionReturnsOnCall = make(map[int]struct { - result1 view.Session +func (fake *CommLayer) NewSessionWithIDReturnsOnCall(i int, result1 viewa.Session, result2 error) { + fake.newSessionWithIDMutex.Lock() + defer fake.newSessionWithIDMutex.Unlock() + fake.NewSessionWithIDStub = nil + if fake.newSessionWithIDReturnsOnCall == nil { + fake.newSessionWithIDReturnsOnCall = make(map[int]struct { + result1 viewa.Session result2 error }) } - fake.masterSessionReturnsOnCall[i] = struct { - result1 view.Session + fake.newSessionWithIDReturnsOnCall[i] = struct { + result1 viewa.Session result2 error }{result1, result2} } -func (fake *CommLayer) DeleteSessions(ctx context.Context, sessionID string) { - fake.deleteSessionsMutex.Lock() - fake.deleteSessionsArgsForCall = append(fake.deleteSessionsArgsForCall, struct { - sessionID string - }{sessionID}) - fake.recordInvocation("DeleteSessions", []interface{}{sessionID}) - fake.deleteSessionsMutex.Unlock() - if fake.DeleteSessionsStub != nil { - fake.DeleteSessionsStub(sessionID) - } -} - -func (fake *CommLayer) DeleteSessionsCallCount() int { - fake.deleteSessionsMutex.RLock() - defer fake.deleteSessionsMutex.RUnlock() - return len(fake.deleteSessionsArgsForCall) -} - -func (fake *CommLayer) DeleteSessionsArgsForCall(i int) string { - fake.deleteSessionsMutex.RLock() - defer fake.deleteSessionsMutex.RUnlock() - return fake.deleteSessionsArgsForCall[i].sessionID -} - func (fake *CommLayer) Invocations() map[string][][]interface{} { fake.invocationsMutex.RLock() defer fake.invocationsMutex.RUnlock() - fake.newSessionWithIDMutex.RLock() - defer fake.newSessionWithIDMutex.RUnlock() - fake.newSessionMutex.RLock() - defer fake.newSessionMutex.RUnlock() - fake.masterSessionMutex.RLock() - defer fake.masterSessionMutex.RUnlock() fake.deleteSessionsMutex.RLock() defer fake.deleteSessionsMutex.RUnlock() + fake.masterSessionMutex.RLock() + defer fake.masterSessionMutex.RUnlock() + fake.newSessionMutex.RLock() + defer fake.newSessionMutex.RUnlock() + fake.newSessionWithIDMutex.RLock() + defer fake.newSessionWithIDMutex.RUnlock() copiedInvocations := map[string][][]interface{}{} for key, value := range fake.invocations { copiedInvocations[key] = value @@ -281,4 +331,4 @@ func (fake *CommLayer) recordInvocation(key string, args []interface{}) { fake.invocations[key] = append(fake.invocations[key], args) } -var _ view2.CommLayer = new(CommLayer) +var _ view.CommLayer = new(CommLayer) diff --git a/platform/view/services/view/mock/session_factory.go b/platform/view/services/view/mock/session_factory.go index 0be85b283..768d5d5bf 100644 --- a/platform/view/services/view/mock/session_factory.go +++ b/platform/view/services/view/mock/session_factory.go @@ -5,138 +5,113 @@ import ( "context" "sync" - view2 "github.com/hyperledger-labs/fabric-smart-client/platform/view/services/view" - "github.com/hyperledger-labs/fabric-smart-client/platform/view/view" + "github.com/hyperledger-labs/fabric-smart-client/platform/view/services/view" + viewa "github.com/hyperledger-labs/fabric-smart-client/platform/view/view" ) type SessionFactory struct { - NewSessionWithIDStub func(sessionID, contextID, endpoint string, pkid []byte, caller view.Identity, msg *view.Message) (view.Session, error) - newSessionWithIDMutex sync.RWMutex - newSessionWithIDArgsForCall []struct { - sessionID string - contextID string - endpoint string - pkid []byte - caller view.Identity - msg *view.Message - } - newSessionWithIDReturns struct { - result1 view.Session - result2 error - } - newSessionWithIDReturnsOnCall map[int]struct { - result1 view.Session - result2 error + DeleteSessionsStub func(context.Context, string) + deleteSessionsMutex sync.RWMutex + deleteSessionsArgsForCall []struct { + arg1 context.Context + arg2 string } - NewSessionStub func(caller string, contextID string, endpoint string, pkid []byte) (view.Session, error) + NewSessionStub func(string, string, string, []byte) (viewa.Session, error) newSessionMutex sync.RWMutex newSessionArgsForCall []struct { - caller string - contextID string - endpoint string - pkid []byte + arg1 string + arg2 string + arg3 string + arg4 []byte } newSessionReturns struct { - result1 view.Session + result1 viewa.Session result2 error } newSessionReturnsOnCall map[int]struct { - result1 view.Session + result1 viewa.Session result2 error } - DeleteSessionsStub func(sessionID string) - deleteSessionsMutex sync.RWMutex - deleteSessionsArgsForCall []struct { - sessionID string + NewSessionWithIDStub func(string, string, string, []byte, viewa.Identity, *viewa.Message) (viewa.Session, error) + newSessionWithIDMutex sync.RWMutex + newSessionWithIDArgsForCall []struct { + arg1 string + arg2 string + arg3 string + arg4 []byte + arg5 viewa.Identity + arg6 *viewa.Message + } + newSessionWithIDReturns struct { + result1 viewa.Session + result2 error + } + newSessionWithIDReturnsOnCall map[int]struct { + result1 viewa.Session + result2 error } invocations map[string][][]interface{} invocationsMutex sync.RWMutex } -func (fake *SessionFactory) NewSessionWithID(sessionID string, contextID string, endpoint string, pkid []byte, caller view.Identity, msg *view.Message) (view.Session, error) { - var pkidCopy []byte - if pkid != nil { - pkidCopy = make([]byte, len(pkid)) - copy(pkidCopy, pkid) - } - fake.newSessionWithIDMutex.Lock() - ret, specificReturn := fake.newSessionWithIDReturnsOnCall[len(fake.newSessionWithIDArgsForCall)] - fake.newSessionWithIDArgsForCall = append(fake.newSessionWithIDArgsForCall, struct { - sessionID string - contextID string - endpoint string - pkid []byte - caller view.Identity - msg *view.Message - }{sessionID, contextID, endpoint, pkidCopy, caller, msg}) - fake.recordInvocation("NewSessionWithID", []interface{}{sessionID, contextID, endpoint, pkidCopy, caller, msg}) - fake.newSessionWithIDMutex.Unlock() - if fake.NewSessionWithIDStub != nil { - return fake.NewSessionWithIDStub(sessionID, contextID, endpoint, pkid, caller, msg) - } - if specificReturn { - return ret.result1, ret.result2 +func (fake *SessionFactory) DeleteSessions(arg1 context.Context, arg2 string) { + fake.deleteSessionsMutex.Lock() + fake.deleteSessionsArgsForCall = append(fake.deleteSessionsArgsForCall, struct { + arg1 context.Context + arg2 string + }{arg1, arg2}) + stub := fake.DeleteSessionsStub + fake.recordInvocation("DeleteSessions", []interface{}{arg1, arg2}) + fake.deleteSessionsMutex.Unlock() + if stub != nil { + fake.DeleteSessionsStub(arg1, arg2) } - return fake.newSessionWithIDReturns.result1, fake.newSessionWithIDReturns.result2 -} - -func (fake *SessionFactory) NewSessionWithIDCallCount() int { - fake.newSessionWithIDMutex.RLock() - defer fake.newSessionWithIDMutex.RUnlock() - return len(fake.newSessionWithIDArgsForCall) } -func (fake *SessionFactory) NewSessionWithIDArgsForCall(i int) (string, string, string, []byte, view.Identity, *view.Message) { - fake.newSessionWithIDMutex.RLock() - defer fake.newSessionWithIDMutex.RUnlock() - return fake.newSessionWithIDArgsForCall[i].sessionID, fake.newSessionWithIDArgsForCall[i].contextID, fake.newSessionWithIDArgsForCall[i].endpoint, fake.newSessionWithIDArgsForCall[i].pkid, fake.newSessionWithIDArgsForCall[i].caller, fake.newSessionWithIDArgsForCall[i].msg +func (fake *SessionFactory) DeleteSessionsCallCount() int { + fake.deleteSessionsMutex.RLock() + defer fake.deleteSessionsMutex.RUnlock() + return len(fake.deleteSessionsArgsForCall) } -func (fake *SessionFactory) NewSessionWithIDReturns(result1 view.Session, result2 error) { - fake.NewSessionWithIDStub = nil - fake.newSessionWithIDReturns = struct { - result1 view.Session - result2 error - }{result1, result2} +func (fake *SessionFactory) DeleteSessionsCalls(stub func(context.Context, string)) { + fake.deleteSessionsMutex.Lock() + defer fake.deleteSessionsMutex.Unlock() + fake.DeleteSessionsStub = stub } -func (fake *SessionFactory) NewSessionWithIDReturnsOnCall(i int, result1 view.Session, result2 error) { - fake.NewSessionWithIDStub = nil - if fake.newSessionWithIDReturnsOnCall == nil { - fake.newSessionWithIDReturnsOnCall = make(map[int]struct { - result1 view.Session - result2 error - }) - } - fake.newSessionWithIDReturnsOnCall[i] = struct { - result1 view.Session - result2 error - }{result1, result2} +func (fake *SessionFactory) DeleteSessionsArgsForCall(i int) (context.Context, string) { + fake.deleteSessionsMutex.RLock() + defer fake.deleteSessionsMutex.RUnlock() + argsForCall := fake.deleteSessionsArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2 } -func (fake *SessionFactory) NewSession(caller string, contextID string, endpoint string, pkid []byte) (view.Session, error) { - var pkidCopy []byte - if pkid != nil { - pkidCopy = make([]byte, len(pkid)) - copy(pkidCopy, pkid) +func (fake *SessionFactory) NewSession(arg1 string, arg2 string, arg3 string, arg4 []byte) (viewa.Session, error) { + var arg4Copy []byte + if arg4 != nil { + arg4Copy = make([]byte, len(arg4)) + copy(arg4Copy, arg4) } fake.newSessionMutex.Lock() ret, specificReturn := fake.newSessionReturnsOnCall[len(fake.newSessionArgsForCall)] fake.newSessionArgsForCall = append(fake.newSessionArgsForCall, struct { - caller string - contextID string - endpoint string - pkid []byte - }{caller, contextID, endpoint, pkidCopy}) - fake.recordInvocation("NewSession", []interface{}{caller, contextID, endpoint, pkidCopy}) + arg1 string + arg2 string + arg3 string + arg4 []byte + }{arg1, arg2, arg3, arg4Copy}) + stub := fake.NewSessionStub + fakeReturns := fake.newSessionReturns + fake.recordInvocation("NewSession", []interface{}{arg1, arg2, arg3, arg4Copy}) fake.newSessionMutex.Unlock() - if fake.NewSessionStub != nil { - return fake.NewSessionStub(caller, contextID, endpoint, pkid) + if stub != nil { + return stub(arg1, arg2, arg3, arg4) } if specificReturn { return ret.result1, ret.result2 } - return fake.newSessionReturns.result1, fake.newSessionReturns.result2 + return fakeReturns.result1, fakeReturns.result2 } func (fake *SessionFactory) NewSessionCallCount() int { @@ -145,67 +120,128 @@ func (fake *SessionFactory) NewSessionCallCount() int { return len(fake.newSessionArgsForCall) } +func (fake *SessionFactory) NewSessionCalls(stub func(string, string, string, []byte) (viewa.Session, error)) { + fake.newSessionMutex.Lock() + defer fake.newSessionMutex.Unlock() + fake.NewSessionStub = stub +} + func (fake *SessionFactory) NewSessionArgsForCall(i int) (string, string, string, []byte) { fake.newSessionMutex.RLock() defer fake.newSessionMutex.RUnlock() - return fake.newSessionArgsForCall[i].caller, fake.newSessionArgsForCall[i].contextID, fake.newSessionArgsForCall[i].endpoint, fake.newSessionArgsForCall[i].pkid + argsForCall := fake.newSessionArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3, argsForCall.arg4 } -func (fake *SessionFactory) NewSessionReturns(result1 view.Session, result2 error) { +func (fake *SessionFactory) NewSessionReturns(result1 viewa.Session, result2 error) { + fake.newSessionMutex.Lock() + defer fake.newSessionMutex.Unlock() fake.NewSessionStub = nil fake.newSessionReturns = struct { - result1 view.Session + result1 viewa.Session result2 error }{result1, result2} } -func (fake *SessionFactory) NewSessionReturnsOnCall(i int, result1 view.Session, result2 error) { +func (fake *SessionFactory) NewSessionReturnsOnCall(i int, result1 viewa.Session, result2 error) { + fake.newSessionMutex.Lock() + defer fake.newSessionMutex.Unlock() fake.NewSessionStub = nil if fake.newSessionReturnsOnCall == nil { fake.newSessionReturnsOnCall = make(map[int]struct { - result1 view.Session + result1 viewa.Session result2 error }) } fake.newSessionReturnsOnCall[i] = struct { - result1 view.Session + result1 viewa.Session result2 error }{result1, result2} } -func (fake *SessionFactory) DeleteSessions(ctx context.Context, sessionID string) { - fake.deleteSessionsMutex.Lock() - fake.deleteSessionsArgsForCall = append(fake.deleteSessionsArgsForCall, struct { - sessionID string - }{sessionID}) - fake.recordInvocation("DeleteSessions", []interface{}{sessionID}) - fake.deleteSessionsMutex.Unlock() - if fake.DeleteSessionsStub != nil { - fake.DeleteSessionsStub(sessionID) +func (fake *SessionFactory) NewSessionWithID(arg1 string, arg2 string, arg3 string, arg4 []byte, arg5 viewa.Identity, arg6 *viewa.Message) (viewa.Session, error) { + var arg4Copy []byte + if arg4 != nil { + arg4Copy = make([]byte, len(arg4)) + copy(arg4Copy, arg4) } + fake.newSessionWithIDMutex.Lock() + ret, specificReturn := fake.newSessionWithIDReturnsOnCall[len(fake.newSessionWithIDArgsForCall)] + fake.newSessionWithIDArgsForCall = append(fake.newSessionWithIDArgsForCall, struct { + arg1 string + arg2 string + arg3 string + arg4 []byte + arg5 viewa.Identity + arg6 *viewa.Message + }{arg1, arg2, arg3, arg4Copy, arg5, arg6}) + stub := fake.NewSessionWithIDStub + fakeReturns := fake.newSessionWithIDReturns + fake.recordInvocation("NewSessionWithID", []interface{}{arg1, arg2, arg3, arg4Copy, arg5, arg6}) + fake.newSessionWithIDMutex.Unlock() + if stub != nil { + return stub(arg1, arg2, arg3, arg4, arg5, arg6) + } + if specificReturn { + return ret.result1, ret.result2 + } + return fakeReturns.result1, fakeReturns.result2 } -func (fake *SessionFactory) DeleteSessionsCallCount() int { - fake.deleteSessionsMutex.RLock() - defer fake.deleteSessionsMutex.RUnlock() - return len(fake.deleteSessionsArgsForCall) +func (fake *SessionFactory) NewSessionWithIDCallCount() int { + fake.newSessionWithIDMutex.RLock() + defer fake.newSessionWithIDMutex.RUnlock() + return len(fake.newSessionWithIDArgsForCall) } -func (fake *SessionFactory) DeleteSessionsArgsForCall(i int) string { - fake.deleteSessionsMutex.RLock() - defer fake.deleteSessionsMutex.RUnlock() - return fake.deleteSessionsArgsForCall[i].sessionID +func (fake *SessionFactory) NewSessionWithIDCalls(stub func(string, string, string, []byte, viewa.Identity, *viewa.Message) (viewa.Session, error)) { + fake.newSessionWithIDMutex.Lock() + defer fake.newSessionWithIDMutex.Unlock() + fake.NewSessionWithIDStub = stub +} + +func (fake *SessionFactory) NewSessionWithIDArgsForCall(i int) (string, string, string, []byte, viewa.Identity, *viewa.Message) { + fake.newSessionWithIDMutex.RLock() + defer fake.newSessionWithIDMutex.RUnlock() + argsForCall := fake.newSessionWithIDArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3, argsForCall.arg4, argsForCall.arg5, argsForCall.arg6 +} + +func (fake *SessionFactory) NewSessionWithIDReturns(result1 viewa.Session, result2 error) { + fake.newSessionWithIDMutex.Lock() + defer fake.newSessionWithIDMutex.Unlock() + fake.NewSessionWithIDStub = nil + fake.newSessionWithIDReturns = struct { + result1 viewa.Session + result2 error + }{result1, result2} +} + +func (fake *SessionFactory) NewSessionWithIDReturnsOnCall(i int, result1 viewa.Session, result2 error) { + fake.newSessionWithIDMutex.Lock() + defer fake.newSessionWithIDMutex.Unlock() + fake.NewSessionWithIDStub = nil + if fake.newSessionWithIDReturnsOnCall == nil { + fake.newSessionWithIDReturnsOnCall = make(map[int]struct { + result1 viewa.Session + result2 error + }) + } + fake.newSessionWithIDReturnsOnCall[i] = struct { + result1 viewa.Session + result2 error + }{result1, result2} } func (fake *SessionFactory) Invocations() map[string][][]interface{} { fake.invocationsMutex.RLock() defer fake.invocationsMutex.RUnlock() - fake.newSessionWithIDMutex.RLock() - defer fake.newSessionWithIDMutex.RUnlock() - fake.newSessionMutex.RLock() - defer fake.newSessionMutex.RUnlock() fake.deleteSessionsMutex.RLock() defer fake.deleteSessionsMutex.RUnlock() + fake.newSessionMutex.RLock() + defer fake.newSessionMutex.RUnlock() + fake.newSessionWithIDMutex.RLock() + defer fake.newSessionWithIDMutex.RUnlock() copiedInvocations := map[string][][]interface{}{} for key, value := range fake.invocations { copiedInvocations[key] = value @@ -225,4 +261,4 @@ func (fake *SessionFactory) recordInvocation(key string, args []interface{}) { fake.invocations[key] = append(fake.invocations[key], args) } -var _ view2.SessionFactory = new(SessionFactory) +var _ view.SessionFactory = new(SessionFactory)