引入相關(guān)依賴
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter</artifactId>
</dependency>
<dependency>
<groupId>org.springframework.kafka</groupId>
<artifactId>spring-kafka</artifactId>
<version>1.1.1.RELEASE</version>
</dependency>
從依賴項(xiàng)的引入即可看出林艘,當(dāng)前spring boot(1.4.2)還不支持完全以配置項(xiàng)的配置來實(shí)現(xiàn)與kafka的無縫集成徽惋。也就意味著必須通過java config的方式進(jìn)行手工配置簇搅。
定義kafka基礎(chǔ)配置
與redisTemplate及jdbcTemplate等類似浮驳。spring同樣提供了org.springframework.kafka.core.KafkaTemplate作為kafka相關(guān)api操作的入口。
import java.util.HashMap;
import java.util.Map;
import org.apache.kafka.clients.producer.ProducerConfig;
import org.apache.kafka.common.serialization.StringSerializer;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.kafka.annotation.EnableKafka;
import org.springframework.kafka.core.DefaultKafkaProducerFactory;
import org.springframework.kafka.core.KafkaTemplate;
import org.springframework.kafka.core.ProducerFactory;
@Configuration
@EnableKafka
public class KafkaProducerConfig {
public Map<String, Object> producerConfigs() {
Map<String, Object> props = new HashMap<>();
props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "192.168.179.200:9092");
props.put(ProducerConfig.RETRIES_CONFIG, 0);
props.put(ProducerConfig.BATCH_SIZE_CONFIG, 4096);
props.put(ProducerConfig.LINGER_MS_CONFIG, 1);
props.put(ProducerConfig.BUFFER_MEMORY_CONFIG, 40960);
props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
return props;
}
public ProducerFactory<String, String> producerFactory() {
return new DefaultKafkaProducerFactory<>(producerConfigs());
}
@Bean
public KafkaTemplate<String, String> kafkaTemplate() {
return new KafkaTemplate<String, String>(producerFactory());
}
}
KafkaTemplate依賴于ProducerFactory麸祷,而創(chuàng)建ProducerFactory時則通過一個Map指定kafka相關(guān)配置參數(shù)选脊。通過KafkaTemplate對象即可實(shí)現(xiàn)消息發(fā)送杭抠。
kafkaTemplate.send("test-topic", "hello");
or
kafkaTemplate.send("test-topic", "key-1", "hello");
監(jiān)聽消息配置
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.common.serialization.StringDeserializer;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.kafka.annotation.EnableKafka;
import org.springframework.kafka.config.ConcurrentKafkaListenerContainerFactory;
import org.springframework.kafka.config.KafkaListenerContainerFactory;
import org.springframework.kafka.core.ConsumerFactory;
import org.springframework.kafka.core.DefaultKafkaConsumerFactory;
import org.springframework.kafka.listener.ConcurrentMessageListenerContainer;
import java.util.HashMap;
import java.util.Map;
@Configuration
@EnableKafka
public class KafkaConsumerConfig {
@Bean
public KafkaListenerContainerFactory<ConcurrentMessageListenerContainer<String, String>> kafkaListenerContainerFactory() {
ConcurrentKafkaListenerContainerFactory<String, String> factory = new ConcurrentKafkaListenerContainerFactory<>();
factory.setConsumerFactory(consumerFactory());
factory.setConcurrency(3);
factory.getContainerProperties().setPollTimeout(3000);
return factory;
}
public ConsumerFactory<String, String> consumerFactory() {
return new DefaultKafkaConsumerFactory<>(consumerConfigs());
}
public Map<String, Object> consumerConfigs() {
Map<String, Object> propsMap = new HashMap<>();
propsMap.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "192.168.179.200:9092");
propsMap.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, false);
propsMap.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, "100");
propsMap.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, "15000");
propsMap.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
propsMap.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
propsMap.put(ConsumerConfig.GROUP_ID_CONFIG, "test-group");
propsMap.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "latest");
return propsMap;
}
@Bean
public Listener listener() {
return new Listener();
}
}
實(shí)現(xiàn)消息監(jiān)聽的最終目標(biāo)是得到監(jiān)聽器對象。該監(jiān)聽器對象自行實(shí)現(xiàn)恳啥。
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.springframework.kafka.annotation.KafkaListener;
import java.util.Optional;
public class Listener {
@KafkaListener(topics = {"test-topic"})
public void listen(ConsumerRecord<?, ?> record) {
Optional<?> kafkaMessage = Optional.ofNullable(record.value());
if (kafkaMessage.isPresent()) {
Object message = kafkaMessage.get();
System.out.println("listen1 " + message);
}
}
}
只需用@KafkaListener指定哪個方法處理消息即可偏灿。同時指定該方法用于監(jiān)聽kafka中哪些topic。
注意事項(xiàng)
定義監(jiān)聽消息配置時钝的,GROUP_ID_CONFIG配置項(xiàng)的值用于指定消費(fèi)者組的名稱翁垂,如果同組中存在多個監(jiān)聽器對象則只有一個監(jiān)聽器對象能收到消息。
@KafkaListener中topics屬性用于指定kafka topic名稱扁藕,topic名稱由消息生產(chǎn)者指定沮峡,也就是由kafkaTemplate在發(fā)送消息時指定。
KEY_DESERIALIZER_CLASS_CONFIG與VALUE_DESERIALIZER_CLASS_CONFIG指定key和value的編碼亿柑、解碼策略。kafka用key值確定value存放在哪個分區(qū)中棍弄。
后記
時間是解決問題的有效手段之一望薄。
在spring boot 1.5版本中即可實(shí)現(xiàn)spring boot與kafka Auto-configuration