一 環境搭建
1.安裝:mac 下使用 brew 安裝,會自動安裝 zookeeper 依賴環境
brew install kafka
需要一些時間等待下載安裝環境
2. 安裝目錄: /user/local/Cellar/kafka/2.0.0 // 2.0.0 是安裝版本,根據自己的安裝版本而定
配置文件目錄: /usr/local/etc/kafka/server.properties
/usr/local/etc/kafka/zookeeper.properties
3. 啓動 zookeeper: 先要進入 kafka 的安裝目錄下,以下命令都在 在 /user/local/Cellar/kafka/2.0.0 目錄下 執行
cd /usr/local/Cellar/kafka/2.0.0
啓動命令:
./bin/zookeeper-server-start /usr/local/etc/kafka/zookeeper.properties &
4. zookeeper 啓動之後啓動 kafka,
./bin/kafka-server-start /usr/local/etc/kafka/server.properties &
5. 創建 topic , MY_TEST_TOPIC
./bin/kafka-topics --create --zookeeper localhost:2181 --replication-factor 1 --partitions 1 --topic MY_TEST_TOPIC
6. 查看創建的 topic 列表
./bin/kafka-topics --list --zookeeper localhost:2181
7. 使用 kafka 自帶的生產者和消費者進行測試
生產者:
./bin/kafka-console-producer --broker-list localhost:9092 --topic MY_TEST_TOPIC
終端會出現 " > " , 此時輸入需要的發送內容
8. 消費者,可以另起一個終端 進入 /user/local/Cellar/kafka/2.0.0 目錄下
./bin/kafka-console-consumer --bootstrap-server localhost:9092 --topic MY_TEST_TPOIC --from-beginning
此時,在生產者終端裏面輸入數據,在消費者終端中可以實時接收到數據
二 java代碼測試
開發環境: IDEA 創建 SpringBoot 的 maven 項目
1. 在 pom 文件中添加 依賴
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-test</artifactId>
<scope>test</scope>
</dependency>
<dependencyManagement>
<dependencies>
<dependency>
<groupId>org.springframework.cloud</groupId>
<artifactId>spring-cloud-dependencies</artifactId>
<version>Dalston.SR1</version>
<type>pom</type>
<scope>import</scope>
</dependency>
</dependencies>
</dependencyManagement>
2. 編寫配置類 ConfigureAPI
package com.chargedot.kafkademo.config;
public class ConfigureAPI {
public final static String Group_ID = "test";
public final static String TOPIC = "MY_TEST_TOPIC";
public final static String MY_TOPIC = "DWD-ERROR-REQ";
public final static int BUFFER_SIZE = 64 * 1024;
public final static int TIMEOUT = 20000;
public final static int INTERVAL = 10000;
public final static String BROKER_LIST = "127.0.0.1:9092";
public final static int GET_MEG_INTERVAL = 1000;
}
3. 編寫生產者 JProducer
public class JProducer implements Runnable {
private Producer<String,String>producer;
public JProducer(){
Properties props = new Properties();
props.put("bootstrap.servers", ConfigureAPI.BROKER_LIST);
props.put("acks","all");
props.put("retries",0);
props.put("batch.size",16384);
props.put("linger.ms",1);
props.put("buffer.memory",33554432);
props.put("key.serializer","org.apache.kafka.common.serialization.StringSerializer");
props.put("value.serializer","org.apache.kafka.common.serialization.StringSerializer");
props.put("request.required.acks", "-1");
producer = new KafkaProducer<String, String>(props);
}
@Override
public void run() {
try {
String data = "hello world";
producer.send(new ProducerRecord<String, String>(ConfigureAPI.TOPIC,data));
System.out.println("send data = " + data);
}catch (Exception e){
System.out.println("exception = " + e.getMessage());
}finally {
// producer.close();
}
}
public static void main(String[] args){
ScheduledExecutorService pool = Executors.newScheduledThreadPool(5);
// pool.schedule(new JProducer(),2, TimeUnit.SECONDS);
// 創建定時線程循環發送 msg
pool.scheduleAtFixedRate(new JProducer(),1,5,TimeUnit.SECONDS);
}
}
4. 編寫 消費者 JConsumer
public class JConsumer implements Runnable {
private KafkaConsumer<String,String>kafkaConsumer;
private KafkaTemplate<String,String>kafkaTemplate;
JConsumer(){
Properties props = new Properties();
props.put("bootstrap.servers", ConfigureAPI.BROKER_LIST);
props.put("group.id",ConfigureAPI.Group_ID);
props.put("enable.auto.commit",true);
props.put("auto.commit.interval.ms",1000);
props.put("session.timeout.ms",30000);
props.put("key.deserializer","org.apache.kafka.common.serialization.StringDeserializer");
props.put("value.deserializer","org.apache.kafka.common.serialization.StringDeserializer");
kafkaConsumer = new KafkaConsumer<String, String>(props);
kafkaConsumer.subscribe(Arrays.asList(ConfigureAPI.TOPIC,ConfigureAPI.MY_TOPIC));
}
@Override
public void run() {
while (true) {
System.out.println("runloop recieve message");
ConsumerRecords<String, String> consumerRecords = kafkaConsumer.poll(ConfigureAPI.GET_MEG_INTERVAL);
for (ConsumerRecord<String, String> record : consumerRecords) {
Object recordValue = record.value();
if (recordValue instanceof Map){
System.out.println("record 是一個 map 對象");
}else if(recordValue instanceof String){
// json 串
System.out.println("record 是一個字符類型");
String jsonStr = (String) recordValue;
// json 串 --> map
Map msgMap = JacksonUtil.json2Bean(jsonStr,Map.class);
System.out.println("msgMap = " + msgMap);
}else {
}
showMessage("接收到的信息 key = " + record.key() + "value = " + record.value());
}
}
}
public void showMessage(String msg){
System.out.println(msg);
}
public static void main(String[] args){
ExecutorService pool = Executors.newCachedThreadPool();
pool.execute(new JConsumer());
pool.shutdown();
}
}
5. 工具類:JackSonUtil
**
* @Description: json工具類
*/
public class JacksonUtil {
private static ObjectMapper mapper;
public static synchronized ObjectMapper getMapperInstance(boolean createNew) {
if (createNew) {
return new ObjectMapper();
} else if (mapper == null) {
mapper = new ObjectMapper();
}
return mapper;
}
/**
* @param json str
* @param class1
* @param <T>
* @return bean
*/
public static <T> T json2Bean(String json, Class<T> class1) {
try {
mapper = getMapperInstance(false);
return mapper.readValue(json, class1);
} catch (JsonParseException e) {
e.printStackTrace();
} catch (JsonMappingException e) {
e.printStackTrace();
} catch (IOException e) {
e.printStackTrace();
}
return null;
}
/**
* @param bean
* @return json
*/
public static String bean2Json(Object bean) {
try {
mapper = getMapperInstance(false);
return mapper.writeValueAsString(bean);
} catch (JsonGenerationException e) {
e.printStackTrace();
} catch (JsonMappingException e) {
e.printStackTrace();
} catch (IOException e) {
e.printStackTrace();
}
return null;
}
/**
* @param json
* @param class1
* @param <T>
* @return List
*/
public static <T> List<T> json2List(String json, Class<T> class1) {
try {
mapper = getMapperInstance(false);
JavaType javaType = mapper.getTypeFactory().constructParametricType(List.class, class1);
//如果是Map類型 mapper.getTypeFactory().constructParametricType(HashMap.class,String.class, Bean.class);
List<T> list = (List<T>) mapper.readValue(json, javaType);
return list;
} catch (Exception e) {
e.printStackTrace();
}
return null;
}
/**
* @param list
* @return json
*/
public static String list2Json(List<?> list) {
try {
mapper.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false);
return mapper.writeValueAsString(list);
} catch (Exception e) {
e.printStackTrace();
}
return null;
}
/**
* @param json
* @param type
* @return Map
*/
public static Map<?, ?> json2Map(String json, TypeReference<?> type) {
try {
mapper.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false);
return (Map<?, ?>) mapper.readValue(json, type);
} catch (Exception e) {
e.printStackTrace();
}
return null;
}
/**
* @param map
* @return json
*/
public static String map2Json(Map<?, ?> map) {
try {
mapper.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false);
return mapper.writeValueAsString(map);
} catch (Exception e) {
e.printStackTrace();
}
return null;
}
}
6. 先啓動消費者服務,在啓動生產者,此時可以在消費者控制檯中看到生產中發送的內容
7. 服務啓動時報錯問題:使用客戶端連接正常,使用 java 代碼連接報錯: Connection refused: no further information
解決辦法:修改kafka服務的配置文件:
cd /usr/local/etc/kafka/
1. 使用 vim 打開 server.properties
2.修改如下文件內容
3. 重啓 kafka 服務,再次連接即可連接成功