Skip to content

Commit f718468

Browse files
Disregard events of invalid transactions (#158)
* Disregard events of invalid transactions Signed-off-by: Nithin <[email protected]> * Temp revert graceful shutdown changes Signed-off-by: Nithin <[email protected]> --------- Signed-off-by: Nithin <[email protected]>
1 parent c9c800c commit f718468

File tree

2 files changed

+19
-87
lines changed

2 files changed

+19
-87
lines changed

src/main/java/hlf/java/rest/client/listener/ChaincodeEventListener.java

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -47,6 +47,16 @@ public void chaincodeEventListener(ContractEvent contractEvent) {
4747
: StringUtils.EMPTY;
4848
boolean isValidTransaction = contractEvent.getTransactionEvent().isValid();
4949

50+
if (!isValidTransaction) {
51+
log.warn(
52+
"Inbound event with TxID {} and Block-Number {} with Event-Name {} is marked as invalid and will be skipped "
53+
+ "from publishing",
54+
txId,
55+
blockNumber,
56+
eventName);
57+
return;
58+
}
59+
5060
if (recencyTransactionContext.validateAndRemoveTransactionContext(txId)) {
5161
publishChaincodeEvent(
5262
txId, chaincodeId, eventName, payload, channelName, blockNumber, isValidTransaction);

src/main/java/hlf/java/rest/client/listener/DynamicKafkaListener.java

Lines changed: 9 additions & 87 deletions
Original file line numberDiff line numberDiff line change
@@ -7,17 +7,14 @@
77
import java.util.List;
88
import java.util.concurrent.CompletableFuture;
99
import java.util.concurrent.ExecutionException;
10-
import java.util.concurrent.atomic.AtomicInteger;
1110
import lombok.Getter;
1211
import lombok.SneakyThrows;
1312
import lombok.extern.slf4j.Slf4j;
1413
import org.apache.kafka.clients.consumer.ConsumerRecord;
1514
import org.springframework.beans.factory.annotation.Autowired;
16-
import org.springframework.beans.factory.annotation.Value;
1715
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
1816
import org.springframework.cloud.context.scope.refresh.RefreshScopeRefreshedEvent;
1917
import org.springframework.context.annotation.Configuration;
20-
import org.springframework.context.event.ContextClosedEvent;
2118
import org.springframework.context.event.ContextRefreshedEvent;
2219
import org.springframework.context.event.EventListener;
2320
import org.springframework.core.task.TaskExecutor;
@@ -54,17 +51,6 @@ public class DynamicKafkaListener {
5451

5552
@Autowired private CommonErrorHandler topicTransactionErrorHandler;
5653

57-
private final AtomicInteger inFlightRecords = new AtomicInteger(0);
58-
59-
@Value("${kafka.general.consumer-shutdown-timeout-in-sec:30}")
60-
private int shutdownTimeoutInSeconds;
61-
62-
@EventListener(ContextClosedEvent.class)
63-
public void onContextClosed() {
64-
log.info("Application context closing, performing graceful Kafka shutdown");
65-
performGracefulShutdown();
66-
}
67-
6854
@EventListener
6955
public void handleEvent(ContextRefreshedEvent event) {
7056
log.info("Initializing Kafka Consumers..");
@@ -142,122 +128,58 @@ private Object determineMessageListenerForTransactions(KafkaProperties.Consumer
142128
*/
143129
private Object getMultithreadedBatchAcknowledgingMessageListener() {
144130
return new BatchAcknowledgingMessageListener<String, String>() {
131+
145132
@SneakyThrows
146133
@Override
147134
public void onMessage(
148135
List<ConsumerRecord<String, String>> consumerRecords, Acknowledgment acknowledgment) {
149136
log.debug("Consumer got assigned with a Batch of size : {}", consumerRecords.size());
150137

151-
// Track the number of records we're processing
152-
inFlightRecords.addAndGet(consumerRecords.size());
153-
154138
List<CompletableFuture<Void>> transactionSubmissionTasks = new ArrayList<>();
155139

156140
// Dispatch workers for asynchronously processing Individual records
157141
for (ConsumerRecord<String, String> message : consumerRecords) {
158142
transactionSubmissionTasks.add(
159143
CompletableFuture.runAsync(
160144
() -> {
161-
try {
162-
transactionConsumer.listen(message);
163-
} finally {
164-
// No need to decrement here as we'll do it after all tasks complete or fail
165-
}
145+
transactionConsumer.listen(message);
166146
},
167147
defaultTaskExecutor));
168148
}
169149

170-
boolean batchSuccess = true;
171-
int failedIndex = -1;
172-
173150
for (int i = 0; i < transactionSubmissionTasks.size(); i++) {
174151
try {
175152
transactionSubmissionTasks.get(i).get();
176153
} catch (InterruptedException | ExecutionException e) {
177-
batchSuccess = false;
178-
failedIndex = i;
179154

180155
final Throwable cause = e.getCause();
181156

182157
if (cause instanceof ServiceException) {
183158
log.error(
184159
"One of the Consumer Record in Async Batch Processor failed with message {}",
185160
cause.getMessage());
161+
throw new BatchListenerFailedException(
162+
"Failed to process a Consumer Record from the Batch", i);
186163
}
187164

188165
if (cause instanceof InterruptedException) {
189166
throw e;
190167
}
191168
}
192169
}
193-
194-
// Always decrement the counter for all records in the batch
195-
inFlightRecords.addAndGet(-consumerRecords.size());
196-
197170
// If the entire Records were processed successfully, Ack & commit the entire Batch
198-
if (batchSuccess) {
199-
acknowledgment.acknowledge();
200-
} else {
201-
throw new BatchListenerFailedException(
202-
"Failed to process a Consumer Record from the Batch", failedIndex);
203-
}
171+
acknowledgment.acknowledge();
204172
}
205173
};
206174
}
207175

208176
private Object getPerRecordAcknowledgingListener() {
177+
209178
return (AcknowledgingMessageListener<String, String>)
210179
(message, acknowledgment) -> {
211-
try {
212-
// Increment counter before processing
213-
inFlightRecords.incrementAndGet();
214-
215-
transactionConsumer.listen(message);
216-
// Manually ack the single Record
217-
acknowledgment.acknowledge();
218-
} finally {
219-
// Always decrement counter, even if exception occurred
220-
inFlightRecords.decrementAndGet();
221-
}
180+
transactionConsumer.listen(message);
181+
// Manually ack the single Record
182+
acknowledgment.acknowledge();
222183
};
223184
}
224-
225-
private void performGracefulShutdown() {
226-
log.info("Starting graceful shutdown of Kafka consumers");
227-
228-
// Stop all containers from polling new messages
229-
if (!CollectionUtils.isEmpty(existingContainers)) {
230-
existingContainers.forEach(
231-
container -> {
232-
log.info("Stopping container: {}", container.metrics().keySet().iterator().next());
233-
container.stop();
234-
});
235-
}
236-
237-
// Wait for in-flight messages to be processed
238-
log.info(
239-
"All Kafka containers stopped from polling. Waiting for {} in-flight records to be processed...",
240-
inFlightRecords.get());
241-
242-
long startTime = System.currentTimeMillis();
243-
244-
try {
245-
while (inFlightRecords.get() > 0
246-
&& System.currentTimeMillis() - startTime < (shutdownTimeoutInSeconds * 1000L)) {
247-
log.info("Still waiting for {} records to be acknowledged", inFlightRecords.get());
248-
Thread.sleep(500);
249-
}
250-
} catch (InterruptedException e) {
251-
Thread.currentThread().interrupt();
252-
log.error("Interrupted during shutdown wait", e);
253-
}
254-
255-
if (inFlightRecords.get() > 0) {
256-
log.warn("{} records were not acknowledged before shutdown timeout", inFlightRecords.get());
257-
} else {
258-
log.info("All records successfully processed and acknowledged");
259-
}
260-
261-
log.info("Kafka consumer graceful shutdown completed");
262-
}
263185
}

0 commit comments

Comments
 (0)