Search code examples
apache-kafkaspring-kafka

Spring kafka do not retry not committed offsets


How can i stop spring kafka do not retry not readed messages from topic. For example is i kill application and then restart it my consumer is starting consuming not consumed messages. How can i prevent it?

@Bean
public ConsumerFactory<String, String> manualConsumerFactory() {
    Map<String, Object> configs = consumerConfigs();
    configs.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, false);
    configs.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
    return new DefaultKafkaConsumerFactory<>(configs);
}

/**
 * Kafka manual ack listener container factory kafka listener container factory.
 *
 * @return the kafka listener container factory
 */
@Bean
public KafkaListenerContainerFactory<ConcurrentMessageListenerContainer<String, String>> kafkaManualAckListenerContainerFactory() {
    ConcurrentKafkaListenerContainerFactory<String, String> factory = new ConcurrentKafkaListenerContainerFactory<>();
    factory.setConsumerFactory(manualConsumerFactory());
    ContainerProperties props = factory.getContainerProperties();
    props.setAckMode(ContainerProperties.AckMode.MANUAL_IMMEDIATE);
    return factory;
}

@Override
@EventListener
public void processSettlementFile(final Notification notification) {
    LOG.info("Handling message [{}]", notification);


    try {
        final Map<String, JobParameter> parameters = new HashMap<>();
        parameters.put("fileName", new JobParameter("1-101-D-2017-212-volume-per-transaction.csv"));
        parameters.put("bucket", new JobParameter("bucket-name-can-be-passed-also-from-kafka-todo"));
        final JobParameters jobParameters = new JobParameters(parameters);

        final JobExecution execution = jobLauncher.run(succeededTransactionCsvFileToDatabaseJob, jobParameters);
        LOG.info("Job Execution Status: " + execution.getStatus());
    } catch (JobExecutionAlreadyRunningException | JobRestartException | JobInstanceAlreadyCompleteException | JobParametersInvalidException e) {
        LOG.error("Failed to process job..", e);
    }
}

@KafkaListener(topics = "topic", groupId = "processor-service", clientIdPrefix = "string", containerFactory = "kafkaManualAckListenerContainerFactory")
public void listenAsString(@Payload final String payload, Acknowledgment acknowledgment, final ConsumerRecord<String, String> consumerRecord) throws TopicEventException {

    applicationEventPublisher.publishEvent(object);
    acknowledgment.acknowledge();
}

Solution

  • You can add a ConsumerAwareRebalanceListener to the container configuration and call consumer.seekToEnd(partitions) in onPartitionsAssigned().