Skip to content
Open
Show file tree
Hide file tree
Changes from 12 commits
Commits
Show all changes
18 commits
Select commit Hold shift + click to select a range
2eddb15
make the v2 creds provider be the default
swatimodi-scout Nov 3, 2025
cabcec5
feat: Upgrade kinesis github.com/vmware/vmware-go-kcl-v1 to v2.
devendrapohekar-scout Oct 31, 2025
112fd8d
Resolved conflicts
swatimodi-scout Nov 11, 2025
de8646f
Update common/authentication/aws/client.go
swatimodi-scout Nov 11, 2025
90e071c
Update bindings/aws/kinesis/kinesis.go
swatimodi-scout Nov 11, 2025
648a318
Resolved comments given by acroca
swatimodi-scout Nov 11, 2025
b686d94
Removed v1 credentials for kinesis
swatimodi-scout Nov 13, 2025
9a70082
updated client.go
swatimodi-scout Nov 13, 2025
9dafc6d
Update AWS SDK to v2 and refactor Kinesis integration
rideshnath-scout Nov 13, 2025
8b93022
Merge branch 'main' into feat/kinesis-binding-vmware-go-kcl-v2-latest
swatimodi-scout Nov 14, 2025
2ee207c
Merge branch 'main' into feat/kinesis-binding-vmware-go-kcl-v2-latest
swatimodi-scout Nov 14, 2025
9b003aa
refactor: migrate Kinesis integration to AWS SDK v2 and update relate…
rideshnath-scout Nov 18, 2025
6ebe1a4
refactor: streamline AWS Kinesis client creation and remove unused au…
rideshnath-scout Nov 19, 2025
c064187
feat: add applicationName metadata field to Kinesis binding
rideshnath-scout Nov 19, 2025
716b8a1
refactor: update AWS SDK v2 dependencies and improve import organization
rideshnath-scout Nov 20, 2025
bf33f52
refactor: correct variable naming and improve context handling in tes…
rideshnath-scout Nov 20, 2025
d8f5103
refactor: reorder AWS SDK imports (resolve lint issues)
rideshnath-scout Nov 20, 2025
bd2639a
Merge branch 'main' into feat/kinesis-binding-vmware-go-kcl-v2-latest
swatimodi-scout Nov 21, 2025
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
171 changes: 106 additions & 65 deletions bindings/aws/kinesis/kinesis.go
Original file line number Diff line number Diff line change
Expand Up @@ -22,13 +22,16 @@ import (
"sync/atomic"
"time"

"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/service/kinesis"
"github.com/aws/aws-sdk-go-v2/aws"
awsv2config "github.com/aws/aws-sdk-go-v2/config"
v2creds "github.com/aws/aws-sdk-go-v2/credentials"
"github.com/aws/aws-sdk-go-v2/service/kinesis"
"github.com/aws/aws-sdk-go-v2/service/kinesis/types"
"github.com/cenkalti/backoff/v4"
"github.com/google/uuid"
"github.com/vmware/vmware-go-kcl/clientlibrary/interfaces"
"github.com/vmware/vmware-go-kcl/clientlibrary/worker"
"github.com/vmware/vmware-go-kcl-v2/clientlibrary/config"
"github.com/vmware/vmware-go-kcl-v2/clientlibrary/interfaces"
"github.com/vmware/vmware-go-kcl-v2/clientlibrary/worker"

"github.com/dapr/components-contrib/bindings"
awsAuth "github.com/dapr/components-contrib/common/authentication/aws"
Expand All @@ -42,17 +45,19 @@ type AWSKinesis struct {
authProvider awsAuth.Provider
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Do we still need the authProvider?

metadata *kinesisMetadata

worker *worker.Worker

streamName string
consumerName string
consumerARN *string
logger logger.Logger
consumerMode string

closed atomic.Bool
closeCh chan struct{}
wg sync.WaitGroup
worker *worker.Worker
kinesisClient *kinesis.Client
v2Credentials aws.CredentialsProvider

streamName string
consumerName string
consumerARN *string
logger logger.Logger
consumerMode string
applicationName string
closed atomic.Bool
closeCh chan struct{}
wg sync.WaitGroup
}
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Add kinesis client here and init with sdk v2

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Add kinesis client here and init with sdk v2

Done


// TODO: we need to clean up the metadata fields here and update this binding to use the builtin aws auth provider and reflect in metadata.yaml
Expand All @@ -65,6 +70,7 @@ type kinesisMetadata struct {
SecretKey string `json:"secretKey" mapstructure:"secretKey"`
SessionToken string `json:"sessionToken" mapstructure:"sessionToken"`
KinesisConsumerMode string `json:"mode" mapstructure:"mode"`
ApplicationName string `json:"applicationName" mapstructure:"applicationName"`
}

const (
Expand Down Expand Up @@ -116,6 +122,7 @@ func (a *AWSKinesis) Init(ctx context.Context, metadata bindings.Metadata) error
a.consumerMode = m.KinesisConsumerMode
a.streamName = m.StreamName
a.consumerName = m.ConsumerName
a.applicationName = m.ApplicationName
a.metadata = m

opts := awsAuth.Options{
Expand All @@ -132,6 +139,12 @@ func (a *AWSKinesis) Init(ctx context.Context, metadata bindings.Metadata) error
return err
}
a.authProvider = provider
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.


// Create AWS SDK v2 client
if err := a.createKinesisClient(ctx); err != nil {
return err
}

return nil
}

Expand All @@ -144,7 +157,7 @@ func (a *AWSKinesis) Invoke(ctx context.Context, req *bindings.InvokeRequest) (*
if partitionKey == "" {
partitionKey = uuid.New().String()
}
_, err := a.authProvider.Kinesis().Kinesis.PutRecordWithContext(ctx, &kinesis.PutRecordInput{
_, err := a.kinesisClient.PutRecord(ctx, &kinesis.PutRecordInput{
StreamName: &a.metadata.StreamName,
Data: req.Data,
PartitionKey: &partitionKey,
Expand All @@ -158,21 +171,23 @@ func (a *AWSKinesis) Read(ctx context.Context, handler bindings.Handler) (err er
return errors.New("binding is closed")
}

if a.metadata.KinesisConsumerMode == SharedThroughput {
switch a.metadata.KinesisConsumerMode {
case SharedThroughput:
// initalize worker configuration
config := a.workerCfg(ctx, a.streamName, a.metadata.Region, a.consumerMode, a.applicationName)
// Configure the KCL worker with custom endpoints for LocalStack
config := a.authProvider.Kinesis().WorkerCfg(ctx, a.streamName, a.consumerName, a.consumerMode)
if a.metadata.Endpoint != "" {
config.KinesisEndpoint = a.metadata.Endpoint
config.DynamoDBEndpoint = a.metadata.Endpoint
config = config.WithKinesisEndpoint(a.metadata.Endpoint)
config = config.WithDynamoDBEndpoint(a.metadata.Endpoint)
}
a.worker = worker.NewWorker(a.recordProcessorFactory(ctx, handler), config)
err = a.worker.Start()
if err != nil {
return err
}
} else if a.metadata.KinesisConsumerMode == ExtendedFanout {
case ExtendedFanout:
var stream *kinesis.DescribeStreamOutput
stream, err = a.authProvider.Kinesis().Kinesis.DescribeStream(&kinesis.DescribeStreamInput{StreamName: &a.metadata.StreamName})
stream, err = a.kinesisClient.DescribeStream(ctx, &kinesis.DescribeStreamInput{StreamName: &a.metadata.StreamName})
if err != nil {
return err
}
Expand All @@ -182,7 +197,7 @@ func (a *AWSKinesis) Read(ctx context.Context, handler bindings.Handler) (err er
}
}

stream, err := a.authProvider.Kinesis().Stream(ctx, a.streamName)
stream, err := a.getStreamARN(ctx, a.streamName)
if err != nil {
return fmt.Errorf("failed to get kinesis stream arn: %v", err)
}
Expand All @@ -194,9 +209,10 @@ func (a *AWSKinesis) Read(ctx context.Context, handler bindings.Handler) (err er
case <-ctx.Done():
case <-a.closeCh:
}
if a.metadata.KinesisConsumerMode == SharedThroughput {
switch a.metadata.KinesisConsumerMode {
case SharedThroughput:
a.worker.Shutdown()
} else if a.metadata.KinesisConsumerMode == ExtendedFanout {
case ExtendedFanout:
a.deregisterConsumer(ctx, stream, a.consumerARN)
}
}()
Expand All @@ -205,7 +221,7 @@ func (a *AWSKinesis) Read(ctx context.Context, handler bindings.Handler) (err er
}

// Subscribe to all shards.
func (a *AWSKinesis) Subscribe(ctx context.Context, streamDesc kinesis.StreamDescription, handler bindings.Handler) error {
func (a *AWSKinesis) Subscribe(ctx context.Context, streamDesc types.StreamDescription, handler bindings.Handler) error {
consumerARN, err := a.ensureConsumer(ctx, streamDesc.StreamARN)
if err != nil {
a.logger.Error(err)
Expand All @@ -216,7 +232,7 @@ func (a *AWSKinesis) Subscribe(ctx context.Context, streamDesc kinesis.StreamDes

a.wg.Add(len(streamDesc.Shards))
for i, shard := range streamDesc.Shards {
go func(idx int, s *kinesis.Shard) {
go func(idx int, s types.Shard) {
defer a.wg.Done()

// Reconnection backoff
Expand All @@ -232,14 +248,14 @@ func (a *AWSKinesis) Subscribe(ctx context.Context, streamDesc kinesis.StreamDes
return
default:
}
sub, err := a.authProvider.Kinesis().Kinesis.SubscribeToShardWithContext(ctx, &kinesis.SubscribeToShardInput{
sub, err := a.kinesisClient.SubscribeToShard(ctx, &kinesis.SubscribeToShardInput{
ConsumerARN: consumerARN,
ShardId: s.ShardId,
StartingPosition: &kinesis.StartingPosition{Type: aws.String(kinesis.ShardIteratorTypeLatest)},
StartingPosition: &types.StartingPosition{Type: types.ShardIteratorTypeLatest},
})
if err != nil {
wait := bo.NextBackOff()
a.logger.Errorf("Error while reading from shard %v: %v. Attempting to reconnect in %s...", s.ShardId, err, wait)
a.logger.Errorf("Error while reading from shard %v: %v. Attempting to reconnect in %s...", *s.ShardId, err, wait)
select {
case <-ctx.Done():
return
Expand All @@ -252,10 +268,10 @@ func (a *AWSKinesis) Subscribe(ctx context.Context, streamDesc kinesis.StreamDes
bo.Reset()

// Process events
for event := range sub.EventStream.Events() {
for event := range sub.GetStream().Events() {
switch e := event.(type) {
case *kinesis.SubscribeToShardEvent:
for _, rec := range e.Records {
case *types.SubscribeToShardEventStreamMemberSubscribeToShardEvent:
for _, rec := range e.Value.Records {
handler(ctx, &bindings.ReadResponse{
Data: rec.Data,
})
Expand Down Expand Up @@ -284,7 +300,7 @@ func (a *AWSKinesis) ensureConsumer(ctx context.Context, streamARN *string) (*st
// Only set timeout on consumer call.
conCtx, cancel := context.WithTimeout(ctx, 30*time.Second)
defer cancel()
consumer, err := a.authProvider.Kinesis().Kinesis.DescribeStreamConsumerWithContext(conCtx, &kinesis.DescribeStreamConsumerInput{
consumer, err := a.kinesisClient.DescribeStreamConsumer(conCtx, &kinesis.DescribeStreamConsumerInput{
ConsumerName: &a.metadata.ConsumerName,
StreamARN: streamARN,
})
Expand All @@ -296,7 +312,7 @@ func (a *AWSKinesis) ensureConsumer(ctx context.Context, streamARN *string) (*st
}

func (a *AWSKinesis) registerConsumer(ctx context.Context, streamARN *string) (*string, error) {
consumer, err := a.authProvider.Kinesis().Kinesis.RegisterStreamConsumerWithContext(ctx, &kinesis.RegisterStreamConsumerInput{
consumer, err := a.kinesisClient.RegisterStreamConsumer(ctx, &kinesis.RegisterStreamConsumerInput{
ConsumerName: &a.metadata.ConsumerName,
StreamARN: streamARN,
})
Expand All @@ -319,7 +335,7 @@ func (a *AWSKinesis) deregisterConsumer(ctx context.Context, streamARN *string,
if a.consumerARN != nil {
// Use a background context because the running context may have been canceled already
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
_, err := a.authProvider.Kinesis().Kinesis.DeregisterStreamConsumerWithContext(ctx, &kinesis.DeregisterStreamConsumerInput{
_, err := a.kinesisClient.DeregisterStreamConsumer(ctx, &kinesis.DeregisterStreamConsumerInput{
ConsumerARN: consumerARN,
StreamARN: streamARN,
ConsumerName: &a.metadata.ConsumerName,
Expand All @@ -332,34 +348,19 @@ func (a *AWSKinesis) deregisterConsumer(ctx context.Context, streamARN *string,
return nil
}

func (a *AWSKinesis) waitUntilConsumerExists(ctx aws.Context, input *kinesis.DescribeStreamConsumerInput, opts ...request.WaiterOption) error {
w := request.Waiter{
Name: "WaitUntilConsumerExists",
MaxAttempts: 18,
Delay: request.ConstantWaiterDelay(10 * time.Second),
Acceptors: []request.WaiterAcceptor{
{
State: request.SuccessWaiterState,
Matcher: request.PathWaiterMatch, Argument: "ConsumerDescription.ConsumerStatus",
Expected: "ACTIVE",
},
},
NewRequest: func(opts []request.Option) (*request.Request, error) {
var inCpy *kinesis.DescribeStreamConsumerInput
if input != nil {
tmp := *input
inCpy = &tmp
}
req, _ := a.authProvider.Kinesis().Kinesis.DescribeStreamConsumerRequest(inCpy)
req.SetContext(ctx)
req.ApplyOptions(opts...)

return req, nil
},
func (a *AWSKinesis) waitUntilConsumerExists(ctx context.Context, input *kinesis.DescribeStreamConsumerInput) error {
// Poll until consumer is active
for i := 0; i < 18; i++ {
consumer, err := a.kinesisClient.DescribeStreamConsumer(ctx, input)
if err != nil {
return err
}
if consumer.ConsumerDescription.ConsumerStatus == types.ConsumerStatusActive {
return nil
}
time.Sleep(10 * time.Second)
}
w.ApplyOptions(opts...)

return w.WaitWithContext(ctx)
return fmt.Errorf("consumer did not become active within timeout")
}

func (a *AWSKinesis) parseMetadata(meta bindings.Metadata) (*kinesisMetadata, error) {
Expand Down Expand Up @@ -388,7 +389,7 @@ func (r *recordProcessorFactory) CreateProcessor() interfaces.IRecordProcessor {
}

func (p *recordProcessor) Initialize(input *interfaces.InitializationInput) {
p.logger.Infof("Processing ShardId: %v at checkpoint: %v", input.ShardId, aws.StringValue(input.ExtendedSequenceNumber.SequenceNumber))
p.logger.Infof("Processing ShardId: %v at checkpoint: %v", input.ShardId, *input.ExtendedSequenceNumber.SequenceNumber)
}

func (p *recordProcessor) ProcessRecords(input *interfaces.ProcessRecordsInput) {
Expand All @@ -414,6 +415,46 @@ func (p *recordProcessor) Shutdown(input *interfaces.ShutdownInput) {
}
}

func (a *AWSKinesis) createKinesisClient(ctx context.Context) error {
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This method probably need to receive the aws config and return the kinesis client. Assign the client in the Init method

Copy link

@rideshnath-scout rideshnath-scout Nov 19, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Done, please check

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@javier-aliaga Is there anything that we need to fix on our side for lint or test failing?

// Convert v1 credentials to v2
if v1Creds, err := a.authProvider.Kinesis().Credentials.Get(); err == nil {
a.v2Credentials = v2creds.NewStaticCredentialsProvider(v1Creds.AccessKeyID, v1Creds.SecretAccessKey, v1Creds.SessionToken)
} else {
// Fallback to default v2 config if conversion failed
v2Config, err := awsv2config.LoadDefaultConfig(ctx, awsv2config.WithRegion(a.authProvider.Kinesis().Region))
if err != nil {
return err
}
a.v2Credentials = v2Config.Credentials
}

// Create v2 config and Kinesis client
v2Config := aws.Config{
Region: a.authProvider.Kinesis().Region,
Credentials: a.v2Credentials,
}
a.kinesisClient = kinesis.NewFromConfig(v2Config)
return nil
}

func (a *AWSKinesis) getStreamARN(ctx context.Context, streamName string) (*string, error) {
stream, err := a.kinesisClient.DescribeStream(ctx, &kinesis.DescribeStreamInput{
StreamName: &streamName,
})
if err != nil {
return nil, err
}
return stream.StreamDescription.StreamARN, nil
}

func (a *AWSKinesis) workerCfg(_ context.Context, stream, region, mode, applicationName string) *config.KinesisClientLibConfiguration {
const sharedMode = "shared"
if mode == sharedMode {
return config.NewKinesisClientLibConfigWithCredential(applicationName, stream, region, "", a.v2Credentials)
}
return nil
}

// GetComponentMetadata returns the metadata of the component.
func (a *AWSKinesis) GetComponentMetadata() (metadataInfo metadata.MetadataMap) {
metadataStruct := &kinesisMetadata{}
Expand Down
Loading