kafka生產者,消費者

問題1:telnet ip 9092端口不通,控制檯錯誤:

[AdminClient clientId=adminclient-1] Connection to node -1 (/172.16.2.6:9092) could not be established. Broker may not be available.

答案:server.properties配置文件listeners=PLAINTEXT://:9092改成listeners=PLAINTEXT://ip:9092就可以了

1.pom.xml

<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
    <modelVersion>4.0.0</modelVersion>
    <parent>
        <groupId>org.springframework.boot</groupId>
        <artifactId>spring-boot-starter-parent</artifactId>
        <version>2.2.1.RELEASE</version>
        <relativePath/> <!-- lookup parent from repository -->
    </parent>
    <groupId>mis_timertask_service</groupId>
    <artifactId>mis_timertask_service</artifactId>
    <version>0.0.1-SNAPSHOT</version>
    <name>mis_timertask_service</name>
    <description>mis timer task service</description>

    <properties>
        <java.version>1.8</java.version>
    </properties>

    <dependencies>
        <dependency>
            <groupId>org.springframework.boot</groupId>
            <artifactId>spring-boot-starter-web</artifactId>
        </dependency>
        <dependency>
            <groupId>org.springframework.kafka</groupId>
            <artifactId>spring-kafka</artifactId>
        </dependency>
        <dependency>
            <groupId>org.mybatis.spring.boot</groupId>
            <artifactId>mybatis-spring-boot-starter</artifactId>
            <version>2.1.1</version>
        </dependency>

        <dependency>
            <groupId>mysql</groupId>
            <artifactId>mysql-connector-java</artifactId>
            <scope>runtime</scope>
        </dependency>
        <!--<dependency>-->
            <!--<groupId>org.springframework.cloud</groupId>-->
            <!--<artifactId>spring-cloud-starter-netflix-eureka-client</artifactId>-->
            <!--<version>2.0.0.RELEASE</version>-->
        <!--</dependency>-->
        <!--<dependency>-->
            <!--<groupId>org.springframework.cloud</groupId>-->
            <!--<artifactId>spring-cloud-openfeign</artifactId>-->
            <!--<version>2.0.1.RELEASE</version>-->
            <!--<type>pom</type>-->
            <!--<scope>import</scope>-->
        <!--</dependency>-->
        <!--<dependency>-->
            <!--<groupId>org.springframework.cloud</groupId>-->
            <!--<artifactId>spring-cloud-starter-openfeign</artifactId>-->
        <!--</dependency>-->
        <!--數據庫連接池-->
        <dependency>
            <groupId>com.alibaba</groupId>
            <artifactId>druid-spring-boot-starter</artifactId>
            <version>1.1.10</version>
        </dependency>
        <!--緩存-->
        <dependency>
            <groupId>org.springframework.boot</groupId>
            <artifactId>spring-boot-starter-cache</artifactId>
        </dependency>
        <dependency>
            <groupId>net.sf.ehcache</groupId>
            <artifactId>ehcache</artifactId>
        </dependency>
        <dependency>
            <groupId>org.springframework.boot</groupId>
            <artifactId>spring-boot-starter-test</artifactId>
            <scope>test</scope>
            <exclusions>
                <exclusion>
                    <groupId>org.junit.vintage</groupId>
                    <artifactId>junit-vintage-engine</artifactId>
                </exclusion>
            </exclusions>
        </dependency>
        <dependency>
            <groupId>com.taobao</groupId>
            <artifactId>taobao-sdk</artifactId>
            <version>2.3</version>
            <scope>system</scope>
            <systemPath>${project.basedir}/src/main/resources/lib/taobao-sdk-java-20191021.jar</systemPath>
        </dependency>
        <!-- 引入swagger包 -->
        <dependency>
            <groupId>io.springfox</groupId>
            <artifactId>springfox-swagger2</artifactId>
            <version>2.4.0</version>
        </dependency>
        <dependency>
            <groupId>io.springfox</groupId>
            <artifactId>springfox-swagger-ui</artifactId>
            <version>2.4.0</version>
        </dependency>
    </dependencies>

    <build>
        <plugins>
            <plugin>
                <groupId>org.springframework.boot</groupId>
                <artifactId>spring-boot-maven-plugin</artifactId>
                <configuration>
                    <includeSystemScope>true</includeSystemScope>
                </configuration>
            </plugin>
            <plugin>
                <groupId>org.apache.maven.plugins</groupId>
                <artifactId>maven-surefire-plugin</artifactId>
                <configuration>
                    <skipTests>true</skipTests>
                </configuration>
            </plugin>
            <plugin>
                <groupId>org.mybatis.generator</groupId>
                <artifactId>mybatis-generator-maven-plugin</artifactId>
                <version>1.3.5</version>
                <executions>
                    <execution>
                        <id>Generate MyBatis Artifacts</id>
                        <goals>
                            <goal>generate</goal>
                        </goals>
                        <phase>deploy</phase>
                    </execution>
                </executions>
                <configuration>
                    <configurationFile>src/main/resources/generatorConfig.xml</configurationFile>
                </configuration>
            </plugin>
        </plugins>
    </build>

</project>

2.application.properties

#============== kafka ===================
# 指定kafka server的地址,集羣配多個,中間,逗號隔開
spring.kafka.bootstrap-servers=ip:9092

#=============== provider  =======================
# 寫入失敗時,重試次數。當leader節點失效,一個repli節點會替代成爲leader節點,此時可能出現寫入失敗,
# 當retris爲0時,produce不會重複。retirs重發,此時repli節點完全成爲leader節點,不會產生消息丟失。
spring.kafka.producer.retries=1
# 每次批量發送消息的數量,produce積累到一定數據,一次發送
spring.kafka.producer.batch-size=16384
# produce積累數據一次發送,緩存大小達到buffer.memory就發送數據
spring.kafka.producer.buffer-memory=33554432

#procedure要求leader在考慮完成請求之前收到的確認數,用於控制發送記錄在服務端的持久化,其值可以爲如下:
#acks = 0 如果設置爲零,則生產者將不會等待來自服務器的任何確認,該記錄將立即添加到套接字緩衝區並視爲已發送。在這種情況下,無法保證服務器已收到記錄,並且重試配置將不會生效(因爲客戶端通常不會知道任何故障),爲每條記錄返回的偏移量始終設置爲-1。
#acks = 1 這意味着leader會將記錄寫入其本地日誌,但無需等待所有副本服務器的完全確認即可做出迴應,在這種情況下,如果leader在確認記錄後立即失敗,但在將數據複製到所有的副本服務器之前,則記錄將會丟失。
#acks = all 這意味着leader將等待完整的同步副本集以確認記錄,這保證了只要至少一個同步副本服務器仍然存活,記錄就不會丟失,這是最強有力的保證,這相當於acks = -1的設置。
#可以設置的值爲:all, -1, 0, 1
spring.kafka.producer.acks=1

# 指定消息key和消息體的編解碼方式
spring.kafka.producer.key-serializer=org.apache.kafka.common.serialization.StringSerializer
spring.kafka.producer.value-serializer=org.apache.kafka.common.serialization.StringSerializer


#=============== consumer  =======================
# 指定默認消費者group id --> 由於在kafka中,同一組中的consumer不會讀取到同一個消息,依靠groud.id設置組名
spring.kafka.consumer.group-id=testGroup
# smallest和largest纔有效,如果smallest重新0開始讀取,如果是largest從logfile的offset讀取。一般情況下我們都是設置smallest
spring.kafka.consumer.auto-offset-reset=latest
# enable.auto.commit:true --> 設置自動提交offset
spring.kafka.consumer.enable-auto-commit=true
#如果'enable.auto.commit'爲true,則消費者偏移自動提交給Kafka的頻率(以毫秒爲單位),默認值爲5000。
spring.kafka.consumer.auto-commit-interval=100

# 指定消息key和消息體的編解碼方式
spring.kafka.consumer.key-deserializer=org.apache.kafka.common.serialization.StringDeserializer
spring.kafka.consumer.value-deserializer=org.apache.kafka.common.serialization.StringDeserializer

3.生產者

package mis.kafka.producer;

import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.kafka.core.KafkaTemplate;
import org.springframework.web.bind.annotation.RequestMapping;
import org.springframework.web.bind.annotation.RequestMethod;
import org.springframework.web.bind.annotation.RequestParam;
import org.springframework.web.bind.annotation.RestController;

@RestController
public class ProducerTest {
    @Autowired
    private KafkaTemplate kafkaTemplate;
    @RequestMapping(value = "/kafkaTestProducer",method = RequestMethod.GET)
    public boolean send(@RequestParam String message){
        kafkaTemplate.send("test",message);
        return true;
    }
    @RequestMapping("/hello")
    public String hello(){
        return "hello";
    }
}

4.消費者

package mis.kafka.consumer;

import org.springframework.kafka.annotation.KafkaListener;
import org.springframework.stereotype.Service;
import java.util.Date;
@Service
public class KafkaConsumerTest {
    @KafkaListener(topics ="test" , groupId = "testGroup" )
    public void receiveTestTopicMessage(String message){
        System.out.println("=========="+new Date()+"message:"+message);
    }

}

 5.測試

 

 

 

 

https://www.jianshu.com/p/0a85bfbb9f5f

大數據通用的序列化器——Apache Avro

1. 簡介

Apache Avro(以下簡稱 Avro)是一種與編程語言無關的序列化格式。Doug Cutting 創建了這個項目,目的是提供一種共享數據文件的方式。

Avro 數據通過與語言無關的 schema 來定義。schema 通過 JSON 來描述,數據被序列化成二進制文件或 JSON 文件,不過一般會使用二進制文件。Avro 在讀寫文件時需要用到 schema,schema 一般會被內嵌在數據文件裏。

Avro 有一個很有意思的特性是,當負責寫消息的應用程序使用了新的 schema,負責讀消息的應用程序可以繼續處理消息而無需做任何改動。

到寫本篇博客的時間爲止,avro的最新版本爲1.8.2

2. 創建 maven 工程

(1) 加入 avro 依賴

 

<dependency>
  <groupId>org.apache.avro</groupId>
  <artifactId>avro</artifactId>
  <version>1.8.2</version>
</dependency>

(2) 加入 avro 插件的依賴

 

<plugin>
  <groupId>org.apache.avro</groupId>
  <artifactId>avro-maven-plugin</artifactId>
  <version>1.8.2</version>
  <executions>
    <execution>
      <phase>generate-sources</phase>
      <goals>
        <goal>schema</goal>
      </goals>
      <configuration>
        <sourceDirectory>${project.basedir}/src/main/avro/</sourceDirectory>
        <outputDirectory>${project.basedir}/src/main/java/</outputDirectory>
      </configuration>
    </execution>
  </executions>
</plugin>
<plugin>
  <groupId>org.apache.maven.plugins</groupId>
  <artifactId>maven-compiler-plugin</artifactId>
  <configuration>
    <source>1.6</source>
    <target>1.6</target>
  </configuration>
</plugin>

以上是官網列出的 avro 插件的依賴,其中提供了 maven 的編譯插件,該插件使用JDK1.6版本來編譯代碼,我在這裏改爲了1.8,因爲我的JDK版本是1.8

 

<plugin>
  <groupId>org.apache.maven.plugins</groupId>
  <artifactId>maven-compiler-plugin</artifactId>
  <configuration>
    <source>1.8</source>
    <target>1.8</target>
  </configuration>
</plugin>
<pluginManagement>
    <plugins>
        <!--This plugin's configuration is used to store Eclipse m2e settings only. It has no influence on the Maven build itself.-->
        <plugin>
            <groupId>org.eclipse.m2e</groupId>
            <artifactId>lifecycle-mapping</artifactId>
            <version>1.0.0</version>
            <configuration>
                <lifecycleMappingMetadata>
                    <pluginExecutions>
                        <pluginExecution>
                            <pluginExecutionFilter>
                                <groupId>org.apache.avro</groupId>
                                <artifactId>avro-maven-plugin</artifactId>
                                <versionRange>[1.8.2,)</versionRange>
                                <goals>
                                    <goal>schema</goal>
                                </goals>
                            </pluginExecutionFilter>
                            <action>
                                <ignore></ignore>
                            </action>
                        </pluginExecution>
                    </pluginExecutions>
                </lifecycleMappingMetadata>
            </configuration>
        </plugin>
    </plugins>
</pluginManagement>

3. 使用 avro

(1) 通過生成代碼的方式使用 avro

<1> 定義 schema 文件

注意在 avro 插件的依賴中定義的兩個路徑

<configuration>
    <sourceDirectory>${project.basedir}/src/main/avro/</sourceDirectory>
    <outputDirectory>${project.basedir}/src/main/java/</outputDirectory>
</configuration>

該配置的意思是,根據/src/main/avro/下的schema文件,生成對應的類文件到/src/main/java/下,所以我們先創建一個資源文件夾/src/main/avro

然後再在該資源文件夾下創建 schema 文件,這裏定義一個簡單的schema文件user.avsc,注意,後綴一定是avsc,其中的內容如下:

{
    "namespace": "com.avro.example",
    "type": "record",
    "name": "User",
    "fields": [
        {"name": "name", "type": "string"},
        {"name": "favorite_number",  "type": ["int", "null"]},
        {"name": "favorite_color", "type": ["string", "null"]}
    ]
}
  • namespace:定義了根據 schema 文件生成的類的包名
  • type:固定寫法
  • name:生成的類的名稱
  • fields:定義了生成的類中的屬性的名稱和類型,其中"type": ["int", "null"]的意思是,favorite_number 這個屬性是int類型,但可以爲null

avro 支持的類型有null、boolean、int、long、float、double、bytes、string這些基本類型和record、enum、array、map、union、fixed這些複雜類型,關於複雜類型可以參考官網的說明:http://avro.apache.org/docs/current/spec.html#schema_complex,本文只是一個入門

<2> 生成 User 類

在編譯程序之前,項目中是沒有com.avro.example.User這個類的:

在運行 maven build compile 後,就生成這個類:

<3> 序列化

package com.avro.serializer;

import java.io.File;
import java.io.IOException;

import org.apache.avro.file.DataFileWriter;
import org.apache.avro.io.DatumWriter;
import org.apache.avro.specific.SpecificDatumWriter;

import com.avro.example.User;

/**
 * @Title AvroSerializerTest.java 
 * @Description 使用 avro 對 com.avro.example.User 類的對象進行序列化
 * @Author YangYunhe
 * @Date 2018-06-21 15:42:02
 */
public class AvroSerializerTest {
    
    public static void main(String[] args) throws IOException {
        
        User user1 = new User();
        user1.setName("Tom");
        user1.setFavoriteNumber(7);
        
        User user2 = new User("Jack", 15, "red");
        
        User user3 = User.newBuilder()
                .setName("Harry")
                .setFavoriteNumber(1)
                .setFavoriteColor("green")
                .build();
        
        DatumWriter<User> userDatumWriter = new SpecificDatumWriter<>(User.class);
        DataFileWriter<User> dataFileWriter = new DataFileWriter<User>(userDatumWriter);
        dataFileWriter.create(user1.getSchema(), new File("users.avro"));
        dataFileWriter.append(user1);
        dataFileWriter.append(user2);
        dataFileWriter.append(user3);
        dataFileWriter.close();
        
    }

}

運行以上程序,就會把這3個User對象經過 avro 序列化後寫到了項目根目錄下的"user.avro"文件中:

<4> 反序列化

package com.avro.deserializer;

import java.io.File;
import java.io.IOException;

import org.apache.avro.file.DataFileReader;
import org.apache.avro.io.DatumReader;
import org.apache.avro.specific.SpecificDatumReader;

import com.avro.example.User;

/**
 * @Title AvroDeSerializerTest.java 
 * @Description 解析 avro 序列化後的對象
 * @Author YangYunhe
 * @Date 2018-06-21 15:58:10
 */
public class AvroDeSerializerTest {
    
    public static void main(String[] args) throws IOException {
        
        DatumReader<User> userDatumReader = new SpecificDatumReader<User>(User.class);
        DataFileReader<User> dataFileReader = new DataFileReader<User>(new File("users.avro"), userDatumReader);
        User user = null;
        while (dataFileReader.hasNext()) {
            user = dataFileReader.next(user);
            System.out.println(user);
        }
    }
}

程序運行結果爲:
{"name": "Tom", "favorite_number": 7, "favorite_color": null}
{"name": "Jack", "favorite_number": 15, "favorite_color": "red"}
{"name": "Harry", "favorite_number": 1, "favorite_color": "green"}

(2) 通過不生成代碼的方式使用 avro

<1> 序列化

package com.avro.serializer;

import java.io.File;
import java.io.IOException;

import org.apache.avro.Schema;
import org.apache.avro.file.DataFileWriter;
import org.apache.avro.generic.GenericData;
import org.apache.avro.generic.GenericDatumWriter;
import org.apache.avro.generic.GenericRecord;
import org.apache.avro.io.DatumWriter;

import com.avro.deserializer.AvroDeSerializerWithoutCodeGenerationTest;

/**
 * @Title AvroSerializerWithoutCodeGenerationTest.java 
 * @Description 通過不生成代碼的方式使用avro序列化User對象
 * @Author YangYunhe
 * @Date 2018-06-21 16:04:13
 */
public class AvroSerializerWithoutCodeGenerationTest {
    
    public static void main(String[] args) throws IOException {
        
        String avscFilePath = 
                AvroDeSerializerWithoutCodeGenerationTest.class.getClassLoader().getResource("user.avsc").getPath();
        Schema schema = new Schema.Parser().parse(new File(avscFilePath));
        
        GenericRecord user1 = new GenericData.Record(schema);
        user1.put("name", "Tony");
        user1.put("favorite_number", 18);

        GenericRecord user2 = new GenericData.Record(schema);
        user2.put("name", "Ben");
        user2.put("favorite_number", 3);
        user2.put("favorite_color", "red");
        
        File file = new File("user2.avro");
        DatumWriter<GenericRecord> datumWriter = new GenericDatumWriter<GenericRecord>(schema);
        DataFileWriter<GenericRecord> dataFileWriter = new DataFileWriter<GenericRecord>(datumWriter);
        dataFileWriter.create(schema, file);
        dataFileWriter.append(user1);
        dataFileWriter.append(user2);
        dataFileWriter.close();
    }
}

<2> 反序列化

package com.avro.deserializer;

import java.io.File;
import java.io.IOException;

import org.apache.avro.Schema;
import org.apache.avro.file.DataFileReader;
import org.apache.avro.generic.GenericDatumReader;
import org.apache.avro.generic.GenericRecord;
import org.apache.avro.io.DatumReader;

/**
 * @Title AvroDeSerializerWithoutCodeGenerationTest.java 
 * @Description 通過不生成代碼的方式使用avro反序列化
 * @Author YangYunhe
 * @Date 2018-06-21 16:07:44
 */
public class AvroDeSerializerWithoutCodeGenerationTest {
    
    public static void main(String[] args) throws IOException {
        String avscFilePath = 
                AvroDeSerializerWithoutCodeGenerationTest.class.getClassLoader().getResource("user.avsc").getPath();
        Schema schema = new Schema.Parser().parse(new File(avscFilePath));
        File file = new File("user2.avro");
        DatumReader<GenericRecord> datumReader = new GenericDatumReader<GenericRecord>(schema);
        DataFileReader<GenericRecord> dataFileReader = new DataFileReader<GenericRecord>(file, datumReader);
        GenericRecord user = null;
        while (dataFileReader.hasNext()) {
            user = dataFileReader.next(user);
            System.out.println(user);
        }
    }
}

程序運行結果:
{"name": "Tony", "favorite_number": 18, "favorite_color": null}
{"name": "Ben", "favorite_number": 3, "favorite_color": "red"}

 

 

https://www.cnblogs.com/jiutingxiangzi/p/10983534.html

1.添加maven依賴

<dependency>
            <groupId>org.springframework.kafka</groupId>
            <artifactId>spring-kafka</artifactId>
            <version>1.3.9.RELEASE</version>
        </dependency>

        <dependency>
            <groupId>org.apache.avro</groupId>
            <artifactId>avro</artifactId>
            <version>1.9.0</version>
        </dependency>

        <dependency>
            <groupId>io.confluent</groupId>
            <artifactId>kafka-avro-serializer</artifactId>
            <version>5.2.1</version>
        </dependency>

2.添加mavenplugin

<plugin>
                <groupId>org.apache.avro</groupId>
                <artifactId>avro-maven-plugin</artifactId>
                <version>1.9.0</version>
                <executions>
                    <execution>
                        <phase>generate-sources</phase>
                        <goals>
                            <goal>schema</goal>
                        </goals>
                        <configuration>
                            <sourceDirectory>avsc問價路徑</sourceDirectory>
                            <outputDirectory>avro代碼生成路徑</outputDirectory>
                        </configuration>
                    </execution>
                </executions>
            </plugin> 

3.kafka配置

spring:
    kafka:
        bootstrap-servers: xxx
        producer:
            key-serializer: io.confluent.kafka.serializers.KafkaAvroSerializer
            value-serializer: io.confluent.kafka.serializers.KafkaAvroSerializer
        consumer:
            group-id: test
            key-deserializer: io.confluent.kafka.serializers.KafkaAvroDeserializer
            value-deserializer: io.confluent.kafka.serializers.KafkaAvroDeserializer
        properties:
            schema.registry.url: xxx
            security.protocol: SASL_PLAINTEXT
            sasl:
                mechanism: PLAIN
                jaas.config: org.apache.kafka.common.security.plain.PlainLoginModule required username="" password="";
        template:
            default-topic: xxx

4.kafka producer

@Component
public class Producer {
    @Autowired
    private KafkaTemplate kafkaTemplate;

    public void send(AvroRecord record) {
        if (Objects.isNull(record)) {
            return;
        }
        LOGGER.info(record.toString());
        kafkaTemplate.sendDefault("key", record);
    }
}

5.kafka consumer

@Component
public class Consumer {
   
    @KafkaListener(topics = "xxx")
    public void consume(ConsumerRecord<String, AvroRecord> message) 
    {
        LOGGER.info("receive message:");
        LOGGER.info("topic:" + message.topic());
        LOGGER.info("key:" + message.key());
        LOGGER.info("value:" + message.value());
    }
}

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章