springboot监听kafka(不使用spring-kafka)

一、不使用spring-kafka的原因

  kafka服务端版本为0.10.0.1-Ipv20191220-hbp2.1.0,为避免版本问题导致监听失败,客户端也采用0.10.0.1版本,客户端0.10.0.1版本对应的spring-kafka版本为1.1.0,实际开发过程中发现spring-kafka1.1.0版本与parent 版本会出现冲突,因此舍弃该方案。

二、代码

kafkaconfig:

#kafka配置
bootstrap.servers=10.194.101.240:9092
enable.auto.commit=true
auto.commit.interval.ms=1000
key.deserializer=org.apache.kafka.common.serialization.StringDeserializer
value.deserializer=org.apache.kafka.common.serialization.StringDeserializer
group.id=firstindagvhemgr
#kafka topic
kafkaTopicName=test
@Configuration
@Slf4j
public class KafkaConfigNew {

    @Autowired
    private Environment environment;

    @Autowired
    private KafkaConsumerListener kafkaConsumerListener;

    public static KafkaConsumer<String, String> kafkaConsumer;

    @Bean
    public void loadKafkaConfig() {
        Properties p = new Properties();
        p.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, environment.getProperty("bootstrap.servers"));
        p.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, environment.getProperty("key.deserializer"));
        p.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, environment.getProperty("value.deserializer"));
        p.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, environment.getProperty("enable.auto.commit"));
        p.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, environment.getProperty("auto.commit.interval.ms"));
        p.put(ConsumerConfig.GROUP_ID_CONFIG, environment.getProperty("group.id"));

        kafkaConsumer = new KafkaConsumer<String, String>(p);
        kafkaConsumer.subscribe(Collections.singletonList(environment.getProperty("kafkaTopicName")));// 订阅消息
        log.info("消息订阅成功!kafka配置:" + p.toString());
        //启动消息监听线程
        KafkaListenerJob kafkaListenerJob = new KafkaListenerJob(kafkaConsumerListener);
        Thread t = new Thread(kafkaListenerJob);
        t.start();
    }
}
@Slf4j
public class KafkaListenerJob implements Runnable {

    private KafkaConsumerListener kafkaConsumerListener;

    //注入消息监听处理类
    public KafkaListenerJob(KafkaConsumerListener kafkaConsumerListener) {
        this.kafkaConsumerListener = kafkaConsumerListener;
    }

    @Override
    public void run() {
        log.info("kafka消息监听任务已启动!");
        //进行消息监听
        while (true) {
            ConsumerRecords<String, String> records = KafkaConfigNew.kafkaConsumer.poll(100);
            //log.info("poll数据:" + JSON.toJSONString(records));
            for (ConsumerRecord<String, String> record : records) {
                try {
                    kafkaConsumerListener.listen(record);
                } catch (Exception e) {
                    log.error("消息消费异常!", e);
                }
            }
        }
    }
}
@Slf4j
@Service
public class KafkaConsumerListener {

    //校验mapper
    @Autowired
    private KafkaDataCheckMapper kafkaDataCheckMapper;

    @Autowired
    private FTPUtils ftpUtils;

    /**
     * kafka消息处理类
     *
     * @param consumerRecord
     */
    //@KafkaListener(topics = "${kafkaTopicName}")
    public void listen(ConsumerRecord<String, String> consumerRecord) {
        String value = (String) consumerRecord.value();
        log.info("接收到一条消息:" + value);
    }
}
原文地址:https://www.cnblogs.com/jxxblogs/p/14067998.html