Kafka 消费者在偏移量提交后无法读取所有消息(错误=OFFSET_OUT_OF_RANGE
Kafka Consumer not able to read all message after offset commit (error=OFFSET_OUT_OF_RANGE
我创建了消费者来批量接收消息,
消费者配置:
allow.auto.create.topics = false
auto.commit.interval.ms = 5000
auto.offset.reset = latest
bootstrap.servers = [localhost:9092]
check.crcs = true
client.dns.lookup = default
client.id =
client.rack =
connections.max.idle.ms = 540000
default.api.timeout.ms = 60000
enable.auto.commit = false
exclude.internal.topics = true
fetch.max.bytes = 52428800
fetch.max.wait.ms = 500
fetch.min.bytes = 1
group.id = cm-persistence-notification
group.instance.id = null
heartbeat.interval.ms = 3000
interceptor.classes = []
internal.leave.group.on.close = true
isolation.level = read_uncommitted
key.deserializer = class org.apache.kafka.common.serialization.StringDeserializer
max.partition.fetch.bytes = 1048576
max.poll.interval.ms = 300000
max.poll.records = 1000
metadata.max.age.ms = 300000
metric.reporters = []
metrics.num.samples = 2
metrics.recording.level = INFO
metrics.sample.window.ms = 30000
partition.assignment.strategy = [class org.apache.kafka.clients.consumer.RangeAssignor]
receive.buffer.bytes = 65536
reconnect.backoff.max.ms = 1000
reconnect.backoff.ms = 50
request.timeout.ms = 30000
retry.backoff.ms = 100
sasl.client.callback.handler.class = null
sasl.jaas.config = null
sasl.kerberos.kinit.cmd = /usr/bin/kinit
sasl.kerberos.min.time.before.relogin = 60000
sasl.kerberos.service.name = null
sasl.kerberos.ticket.renew.jitter = 0.05
sasl.kerberos.ticket.renew.window.factor = 0.8
sasl.login.callback.handler.class = null
sasl.login.class = null
sasl.login.refresh.buffer.seconds = 300
sasl.login.refresh.min.period.seconds = 60
sasl.login.refresh.window.factor = 0.8
sasl.login.refresh.window.jitter = 0.05
sasl.mechanism = GSSAPI
security.protocol = PLAINTEXT
send.buffer.bytes = 131072
session.timeout.ms = 10000
ssl.cipher.suites = null
ssl.enabled.protocols = [TLSv1.2, TLSv1.1, TLSv1]
ssl.endpoint.identification.algorithm = https
ssl.key.password = null
ssl.keymanager.algorithm = SunX509
ssl.keystore.location = null
ssl.keystore.password = null
ssl.keystore.type = JKS
ssl.protocol = TLS
ssl.provider = null
ssl.secure.random.implementation = null
ssl.trustmanager.algorithm = PKIX
ssl.truststore.location = null
ssl.truststore.password = null
ssl.truststore.type = JKS
value.deserializer = class org.apache.kafka.common.serialization.StringDeserializer
Spring 启动配置:
public KafkaListenerContainerFactory<ConcurrentMessageListenerContainer<String, String>> cmPersistenceListenerContainerFactory(
KafkaProperties kafkaProperties )
{
ConcurrentKafkaListenerContainerFactory<String, String> containerFactory =
new ConcurrentKafkaListenerContainerFactory<>();
Map<String, Object> consumerProperties = kafkaProperties.buildConsumerProperties();
consumerProperties.put( ConsumerConfig.MAX_POLL_RECORDS_CONFIG, "1000" );
consumerProperties.put( ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, false );
consumerProperties.put( ConsumerConfig.ALLOW_AUTO_CREATE_TOPICS_CONFIG, false );
containerFactory
.setConsumerFactory(
new DefaultKafkaConsumerFactory<>(
consumerProperties, new StringDeserializer(), new StringDeserializer() ) );
containerFactory.setBatchListener( true );
containerFactory.getContainerProperties().setCommitLogLevel(LogIfLevelEnabled.Level.INFO);
containerFactory.getContainerProperties().setAckMode( AckMode.MANUAL_IMMEDIATE );
return containerFactory;
}
@Bean
public KafkaAdmin kafkaAdmin( KafkaProperties kafkaProperties )
{
return new KafkaAdmin( kafkaProperties.buildAdminProperties() );
}
听众Class:
@KafkaListener( id = "batch-listener-0", topics = "topic1", groupId = "test", containerFactory = KafkaConsumerConfiguration.CONTAINER_FACTORY_NAME )
public void receive(
@Payload List<String> messages,
@Header( KafkaHeaders.RECEIVED_MESSAGE_KEY ) List<String> keys,
@Header( KafkaHeaders.RECEIVED_PARTITION_ID ) List<Integer> partitions,
@Header( KafkaHeaders.RECEIVED_TOPIC ) List<String> topics,
@Header( KafkaHeaders.OFFSET ) List<Long> offsets,
Acknowledgment ack )
{
long startTime = System.currentTimeMillis();
handleNotifications( messages ); // will take more than 5s to process all messages
long endTime = System.currentTimeMillis();
long timeElapsed = endTime - startTime;
LOGGER.info( "Execution Time :{}", timeElapsed );
ack.acknowledge();
LOGGER.info( "Acknowledgment Success" );
}
我在处理消息后使用手动确认。
我找到了一些调试日志:
在上面的调试日志中,****获取偏移量发生在偏移量提交之前,该偏移量未提交所以它返回 OFFSET_OUT_OF_RANGE,消费者在那之后无法收到任何消息,是否有任何在消费者代码中处理此错误的方法或如何仅在提交后获取偏移量 ****
得到答案:
分区日志文件正在删除一些,但消费者仍在寻找已删除的日志文件,
我创建了消费者来批量接收消息, 消费者配置:
allow.auto.create.topics = false
auto.commit.interval.ms = 5000
auto.offset.reset = latest
bootstrap.servers = [localhost:9092]
check.crcs = true
client.dns.lookup = default
client.id =
client.rack =
connections.max.idle.ms = 540000
default.api.timeout.ms = 60000
enable.auto.commit = false
exclude.internal.topics = true
fetch.max.bytes = 52428800
fetch.max.wait.ms = 500
fetch.min.bytes = 1
group.id = cm-persistence-notification
group.instance.id = null
heartbeat.interval.ms = 3000
interceptor.classes = []
internal.leave.group.on.close = true
isolation.level = read_uncommitted
key.deserializer = class org.apache.kafka.common.serialization.StringDeserializer
max.partition.fetch.bytes = 1048576
max.poll.interval.ms = 300000
max.poll.records = 1000
metadata.max.age.ms = 300000
metric.reporters = []
metrics.num.samples = 2
metrics.recording.level = INFO
metrics.sample.window.ms = 30000
partition.assignment.strategy = [class org.apache.kafka.clients.consumer.RangeAssignor]
receive.buffer.bytes = 65536
reconnect.backoff.max.ms = 1000
reconnect.backoff.ms = 50
request.timeout.ms = 30000
retry.backoff.ms = 100
sasl.client.callback.handler.class = null
sasl.jaas.config = null
sasl.kerberos.kinit.cmd = /usr/bin/kinit
sasl.kerberos.min.time.before.relogin = 60000
sasl.kerberos.service.name = null
sasl.kerberos.ticket.renew.jitter = 0.05
sasl.kerberos.ticket.renew.window.factor = 0.8
sasl.login.callback.handler.class = null
sasl.login.class = null
sasl.login.refresh.buffer.seconds = 300
sasl.login.refresh.min.period.seconds = 60
sasl.login.refresh.window.factor = 0.8
sasl.login.refresh.window.jitter = 0.05
sasl.mechanism = GSSAPI
security.protocol = PLAINTEXT
send.buffer.bytes = 131072
session.timeout.ms = 10000
ssl.cipher.suites = null
ssl.enabled.protocols = [TLSv1.2, TLSv1.1, TLSv1]
ssl.endpoint.identification.algorithm = https
ssl.key.password = null
ssl.keymanager.algorithm = SunX509
ssl.keystore.location = null
ssl.keystore.password = null
ssl.keystore.type = JKS
ssl.protocol = TLS
ssl.provider = null
ssl.secure.random.implementation = null
ssl.trustmanager.algorithm = PKIX
ssl.truststore.location = null
ssl.truststore.password = null
ssl.truststore.type = JKS
value.deserializer = class org.apache.kafka.common.serialization.StringDeserializer
Spring 启动配置:
public KafkaListenerContainerFactory<ConcurrentMessageListenerContainer<String, String>> cmPersistenceListenerContainerFactory(
KafkaProperties kafkaProperties )
{
ConcurrentKafkaListenerContainerFactory<String, String> containerFactory =
new ConcurrentKafkaListenerContainerFactory<>();
Map<String, Object> consumerProperties = kafkaProperties.buildConsumerProperties();
consumerProperties.put( ConsumerConfig.MAX_POLL_RECORDS_CONFIG, "1000" );
consumerProperties.put( ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, false );
consumerProperties.put( ConsumerConfig.ALLOW_AUTO_CREATE_TOPICS_CONFIG, false );
containerFactory
.setConsumerFactory(
new DefaultKafkaConsumerFactory<>(
consumerProperties, new StringDeserializer(), new StringDeserializer() ) );
containerFactory.setBatchListener( true );
containerFactory.getContainerProperties().setCommitLogLevel(LogIfLevelEnabled.Level.INFO);
containerFactory.getContainerProperties().setAckMode( AckMode.MANUAL_IMMEDIATE );
return containerFactory;
}
@Bean
public KafkaAdmin kafkaAdmin( KafkaProperties kafkaProperties )
{
return new KafkaAdmin( kafkaProperties.buildAdminProperties() );
}
听众Class:
@KafkaListener( id = "batch-listener-0", topics = "topic1", groupId = "test", containerFactory = KafkaConsumerConfiguration.CONTAINER_FACTORY_NAME )
public void receive(
@Payload List<String> messages,
@Header( KafkaHeaders.RECEIVED_MESSAGE_KEY ) List<String> keys,
@Header( KafkaHeaders.RECEIVED_PARTITION_ID ) List<Integer> partitions,
@Header( KafkaHeaders.RECEIVED_TOPIC ) List<String> topics,
@Header( KafkaHeaders.OFFSET ) List<Long> offsets,
Acknowledgment ack )
{
long startTime = System.currentTimeMillis();
handleNotifications( messages ); // will take more than 5s to process all messages
long endTime = System.currentTimeMillis();
long timeElapsed = endTime - startTime;
LOGGER.info( "Execution Time :{}", timeElapsed );
ack.acknowledge();
LOGGER.info( "Acknowledgment Success" );
}
我在处理消息后使用手动确认。
我找到了一些调试日志:
在上面的调试日志中,****获取偏移量发生在偏移量提交之前,该偏移量未提交所以它返回 OFFSET_OUT_OF_RANGE,消费者在那之后无法收到任何消息,是否有任何在消费者代码中处理此错误的方法或如何仅在提交后获取偏移量 ****
得到答案:
分区日志文件正在删除一些,但消费者仍在寻找已删除的日志文件,