Kafka从指定位置拉取消息 商商

某次Kafka扩partition时,消息消费出现异常,需要拉取出问题时间段内的消息重新消费。
``java

  for (int i = 0; i < 12; i++) {
      try {
          fixKafkaData(i, Boolean.TRUE.equals(fixNow));
      } catch (Exception e) {
          log.error("exception", e);
      }
  }

public void fixKafkaData(int pollPartition, Boolean fixNow) {
    Config config = ConfigService.getConfig("middleware.kafka-xxx");
    String kafkaServer = config.getProperty("spring.kafka.bootstrap-servers", "");
    Properties props = new Properties();
    props.put("bootstrap.servers", kafkaServer);
    props.put("group.id", GROUP_ID);
    props.put("enable.auto.commit", "true");
    // props.put("auto.commit.interval.ms", "1000");
    props.put("session.timeout.ms", "30000");
    // props.put("auto.offset.reset", "earliest");
    props.put("key.deserializer", StringDeserializer.class.getName());
    props.put("value.deserializer", StringDeserializer.class.getName());
    KafkaConsumer<String, String> consumer = new KafkaConsumer<>(props);

    Map<TopicPartition, Long> map = Maps.newHashMap();
    List<PartitionInfo> partitions = consumer.partitionsFor(TOPIC_NAME);
    log.info("partitions.size={}", partitions.size());
    for (PartitionInfo partitionInfo : partitions) {
        map.put(new TopicPartition(TOPIC_NAME, partitionInfo.partition()), beginTime.getTime());
    }
    Map<TopicPartition, OffsetAndTimestamp> parMap = consumer.offsetsForTimes(map);
    for (Map.Entry<TopicPartition, OffsetAndTimestamp> entry : parMap.entrySet()) {
        TopicPartition topicPartition = entry.getKey();
        int partition = topicPartition.partition();
        OffsetAndTimestamp offsetAndTimestamp = entry.getValue();
        log.info("TopicPartition topicPartition={}", topicPartition);
        log.info("TopicPartition offsetAndTimestamp={}", offsetAndTimestamp);
        //根据消费里的timestamp确定offset
        if (partition == pollPartition) {
            long offset = offsetAndTimestamp.offset();
            consumer.assign(Collections.singletonList(topicPartition));
            consumer.seek(topicPartition, offset);
            // break;
        }
    }
    //拉取消息
    boolean isBreak = false;
    while (true) {
        ConsumerRecords<String, String> poll = consumer.poll(1000);
        for (ConsumerRecord<String, String> record : poll) {
            int partition = record.partition();
            if (record.timestamp() <= endTime.getTime()) {
                String value = record.value();
                String key = record.key();
                long timestamp = record.timestamp();

                log.info("partition={}", partition);
                Map<String, Object> logMap = Maps.newHashMap();
                logMap.put("key", key);
                logMap.put("value", value);
                logMap.put("timestamp", timestamp);
                logMap.put("partition", partition);
                log.info("{}", JSON.toJSONString(logMap));
                try {
                    processKafka(value, fixNow);
                } catch (Exception e) {
                    log.error("processKafka exception, value={}", value, e);
                }
            } else {
                log.info("break....timestamp={}", record.timestamp());
                log.info("break....partition={}", partition);
                isBreak = true;
            }
        }
        if (isBreak) {
            break;
        }
    }
}

``

积跬步以致千里,积小流以成江海。
2016年5月之前的博文发布于51cto,链接地址:shamrock.blog.51cto.com
2016年5月之后博文发布与cnblogs上。
Github地址 https://github.com/umgsai
Keep moving~!!!
原文地址:https://www.cnblogs.com/umgsai/p/15592303.html