Kafka--doublehappy

步驟

文章參考

1.創建topic
/opt/cloudera/parcels/KAFKA/lib/kafka/bin/kafka-topics.sh \
--create \
--zookeeper hadoop001:2181,hadoop002:2181,hadoop003:2181/kafka \
--replication-factor 3 \
--partitions 3 \
--topic test_perf

在這裏插入圖片描述

2.配置生產者和消費者 趨勢圖
SELECT total_kafka_bytes_received_rate_across_kafka_broker_topics ,total_kafka_bytes_fetched_rate_across_kafka_broker_topics
WHERE entityName = "kafka:test_perf" AND category = KAFKA_TOPIC

在這裏插入圖片描述

3.壓測命令信息

壓測消息數(單位:W):

寫入Kafka消息:

10	:
./kafka-producer-perf-test \
--topic test_perf \
--num-records 100000 \
--record-size 1000  \
--throughput 2000 \
--producer-props bootstrap.servers=hadoop001:9092,hadoop002:9092,hadoop003:9092

100	:
./kafka-producer-perf-test \
--topic test_perf \
--num-records 1000000 \
--record-size 2000  \
--throughput 5000 \
--producer-props bootstrap.servers=hadoop001:9092,hadoop002:9092,hadoop003:9092

1000	
./kafka-producer-perf-test \
--topic test_perf \
--num-records 10000000 \
--record-size 2000  \
--throughput 5000 \
--producer-props bootstrap.servers=hadoop001:9092,hadoop002:9092,hadoop003:9092

注意:
kafka-producer-perf-test.sh 腳本命令的參數解析(以100w寫入消息爲例):
--topic topic名稱,本例爲test_perf
--num-records 總共需要發送的消息數,本例爲100000
--record-size 每個記錄的字節數,本例爲1000
--throughput 每秒鐘發送的記錄數,本例爲5000
--producer-props bootstrap.servers=localhost:9092 


消費kafka消息:

10	
./kafka-consumer-perf-test \
--broker-list hadoop001:9092,hadoop002:9092,hadoop003:9092 \
--topic test_perf \
--fetch-size 1048576 \
--messages 1000000 \
--threads 1

100	
./kafka-consumer-perf-test \
--broker-list hadoop001:9092,hadoop002:9092,hadoop003:9092 \
--topic test_perf \
--fetch-size 1048576 \
--messages 10000000 \
--threads 1

1000	
./kafka-consumer-perf-test\
--broker-list hadoop001:9092,hadoop002:9092,hadoop003:9092 \
--topic test_perf \
--fetch-size 1048576 \
--messages 10000000 \
--threads 1

注意:
kafka-consumer-perf-test.sh 腳本命令的參數爲:
--broker-list 指定kafka ;
--topic 指定topic的名稱,本例爲test_perf,中寫入的消息;
--fetch-size 指定每次fetch的數據的大小,本例爲1048576,也就是1M
--messages 總共要消費的消息個數,本例爲1000000,100w

結果

100w
1.寫入

[root@hadoop002 bin]# ./kafka-producer-perf-test \
> --topic test_perf \
> --num-records 1000000 \
> --record-size 2000  \
> --throughput 5000 \
> --producer-props bootstrap.servers=hadoop001:9092,hadoop002:9092,hadoop003:9092
20/03/10 21:03:18 INFO producer.ProducerConfig: ProducerConfig values: 
        acks = 1
        batch.size = 16384
        bootstrap.servers = [hadoop001:9092, hadoop002:9092, hadoop003:9092]
        buffer.memory = 33554432
        client.dns.lookup = default
        client.id = 
        compression.type = none
        connections.max.idle.ms = 540000
        delivery.timeout.ms = 120000
        enable.idempotence = false
        interceptor.classes = []
        key.serializer = class org.apache.kafka.common.serialization.ByteArraySerializer
        linger.ms = 0
        max.block.ms = 60000
        max.in.flight.requests.per.connection = 5
        max.request.size = 1048576
        metadata.max.age.ms = 300000
        metric.reporters = []
        metrics.num.samples = 2
        metrics.recording.level = INFO
        metrics.sample.window.ms = 30000
        partitioner.class = class org.apache.kafka.clients.producer.internals.DefaultPartitioner
        receive.buffer.bytes = 32768
        reconnect.backoff.max.ms = 1000
        reconnect.backoff.ms = 50
        request.timeout.ms = 30000
        retries = 2147483647
        retry.backoff.ms = 100
        sasl.client.callback.handler.class = null
        sasl.jaas.config = null
        sasl.kerberos.kinit.cmd = /usr/bin/kinit
        sasl.kerberos.min.time.before.relogin = 60000
        sasl.kerberos.service.name = null
        sasl.kerberos.ticket.renew.jitter = 0.05
        sasl.kerberos.ticket.renew.window.factor = 0.8
        sasl.login.callback.handler.class = null
        sasl.login.class = null
        sasl.login.refresh.buffer.seconds = 300
        sasl.login.refresh.min.period.seconds = 60
        sasl.login.refresh.window.factor = 0.8
        sasl.login.refresh.window.jitter = 0.05
        sasl.mechanism = GSSAPI
        security.protocol = PLAINTEXT
        send.buffer.bytes = 131072
        ssl.cipher.suites = null
        ssl.enabled.protocols = [TLSv1.2, TLSv1.1, TLSv1]
        ssl.endpoint.identification.algorithm = null
        ssl.key.password = null
        ssl.keymanager.algorithm = SunX509
        ssl.keystore.location = null
        ssl.keystore.password = null
        ssl.keystore.type = JKS
        ssl.protocol = TLS
        ssl.provider = null
        ssl.secure.random.implementation = null
        ssl.trustmanager.algorithm = PKIX
        ssl.truststore.location = null
        ssl.truststore.password = null
        ssl.truststore.type = JKS
        transaction.timeout.ms = 60000
        transactional.id = null
        value.serializer = class org.apache.kafka.common.serialization.ByteArraySerializer

20/03/10 21:03:18 INFO utils.AppInfoParser: Kafka version: 2.2.1-kafka-4.1.0
20/03/10 21:03:18 INFO utils.AppInfoParser: Kafka commitId: unknown
20/03/10 21:03:18 INFO clients.Metadata: Cluster ID: JZbklxHlQ1yopcrZVCpH_w
24997 records sent, 4999.4 records/sec (9.54 MB/sec), 25.3 ms avg latency, 406.0 ms max latency.
24953 records sent, 4984.6 records/sec (9.51 MB/sec), 24.0 ms avg latency, 330.0 ms max latency.
25087 records sent, 5017.4 records/sec (9.57 MB/sec), 16.1 ms avg latency, 521.0 ms max latency.
25010 records sent, 5001.0 records/sec (9.54 MB/sec), 11.7 ms avg latency, 522.0 ms max latency.
25006 records sent, 5001.2 records/sec (9.54 MB/sec), 16.2 ms avg latency, 510.0 ms max latency.
24949 records sent, 4907.4 records/sec (9.36 MB/sec), 0.6 ms avg latency, 96.0 ms max latency.
25485 records sent, 5096.0 records/sec (9.72 MB/sec), 14.2 ms avg latency, 521.0 ms max latency.
25000 records sent, 5000.0 records/sec (9.54 MB/sec), 5.0 ms avg latency, 317.0 ms max latency.
25010 records sent, 5001.0 records/sec (9.54 MB/sec), 11.4 ms avg latency, 529.0 ms max latency.
24998 records sent, 4999.6 records/sec (9.54 MB/sec), 9.0 ms avg latency, 330.0 ms max latency.
24989 records sent, 4996.8 records/sec (9.53 MB/sec), 11.0 ms avg latency, 515.0 ms max latency.
24533 records sent, 4877.3 records/sec (9.30 MB/sec), 0.6 ms avg latency, 127.0 ms max latency.
25635 records sent, 5127.0 records/sec (9.78 MB/sec), 8.8 ms avg latency, 328.0 ms max latency.
25000 records sent, 5000.0 records/sec (9.54 MB/sec), 4.5 ms avg latency, 310.0 ms max latency.
25000 records sent, 4999.0 records/sec (9.53 MB/sec), 13.9 ms avg latency, 540.0 ms max latency.
24995 records sent, 4999.0 records/sec (9.53 MB/sec), 9.2 ms avg latency, 131.0 ms max latency.
25017 records sent, 5003.4 records/sec (9.54 MB/sec), 14.0 ms avg latency, 539.0 ms max latency.
24598 records sent, 4919.6 records/sec (9.38 MB/sec), 1.4 ms avg latency, 84.0 ms max latency.
25405 records sent, 5081.0 records/sec (9.69 MB/sec), 13.9 ms avg latency, 527.0 ms max latency.
25000 records sent, 5000.0 records/sec (9.54 MB/sec), 4.6 ms avg latency, 313.0 ms max latency.
25002 records sent, 5000.4 records/sec (9.54 MB/sec), 11.3 ms avg latency, 522.0 ms max latency.
24996 records sent, 4999.2 records/sec (9.54 MB/sec), 11.5 ms avg latency, 405.0 ms max latency.
25002 records sent, 5000.4 records/sec (9.54 MB/sec), 14.0 ms avg latency, 517.0 ms max latency.
24415 records sent, 4874.2 records/sec (9.30 MB/sec), 0.6 ms avg latency, 127.0 ms max latency.
25635 records sent, 5126.0 records/sec (9.78 MB/sec), 11.8 ms avg latency, 539.0 ms max latency.
25010 records sent, 5001.0 records/sec (9.54 MB/sec), 10.9 ms avg latency, 521.0 ms max latency.
25015 records sent, 5002.0 records/sec (9.54 MB/sec), 6.2 ms avg latency, 217.0 ms max latency.
25007 records sent, 5001.4 records/sec (9.54 MB/sec), 14.7 ms avg latency, 521.0 ms max latency.
24998 records sent, 4999.6 records/sec (9.54 MB/sec), 9.8 ms avg latency, 328.0 ms max latency.
24568 records sent, 4912.6 records/sec (9.37 MB/sec), 0.8 ms avg latency, 78.0 ms max latency.
25437 records sent, 5087.4 records/sec (9.70 MB/sec), 13.5 ms avg latency, 521.0 ms max latency.
25005 records sent, 5000.0 records/sec (9.54 MB/sec), 10.9 ms avg latency, 522.0 ms max latency.
25005 records sent, 5001.0 records/sec (9.54 MB/sec), 13.5 ms avg latency, 519.0 ms max latency.
25006 records sent, 5001.2 records/sec (9.54 MB/sec), 10.8 ms avg latency, 504.0 ms max latency.
25004 records sent, 4999.8 records/sec (9.54 MB/sec), 12.3 ms avg latency, 509.0 ms max latency.
24555 records sent, 4910.0 records/sec (9.37 MB/sec), 0.7 ms avg latency, 60.0 ms max latency.
25455 records sent, 5091.0 records/sec (9.71 MB/sec), 14.6 ms avg latency, 522.0 ms max latency.
25003 records sent, 5000.6 records/sec (9.54 MB/sec), 10.2 ms avg latency, 524.0 ms max latency.
24997 records sent, 4999.4 records/sec (9.54 MB/sec), 9.0 ms avg latency, 333.0 ms max latency.
20/03/10 21:06:38 INFO producer.KafkaProducer: [Producer clientId=producer-1] Closing the Kafka producer with timeoutMillis = 9223372036854775807 ms.
1000000 records sent, 4999.650024 records/sec (9.54 MB/sec), 10.47 ms avg latency, 540.00 ms max latency, 1 ms 50th, 30 ms 95th, 307 ms 99th, 498 ms 99.9th.
[root@hadoop002 bin]# 

2.消費

[root@hadoop002 bin]# ./kafka-consumer-perf-test --broker-list hadoop001:9092,hadoop002:9092,hadoop003:9092 --topic test_perf --fetch-size 1048576 --messages 1000000 --threads 1
20/03/10 21:04:31 INFO utils.Log4jControllerRegistration$: Registered kafka:type=kafka.Log4jController MBean
20/03/10 21:04:31 INFO tools.ConsumerPerformance$: Starting consumer...
start.time, end.time, data.consumed.in.MB, MB.sec, data.consumed.in.nMsg, nMsg.sec, rebalance.time.ms, fetch.time.ms, fetch.MB.sec, fetch.nMsg.sec
20/03/10 21:04:31 INFO consumer.ConsumerConfig: ConsumerConfig values: 
        auto.commit.interval.ms = 5000
        auto.offset.reset = earliest
        bootstrap.servers = [hadoop001:9092, hadoop002:9092, hadoop003:9092]
        check.crcs = false
        client.dns.lookup = default
        client.id = 
        connections.max.idle.ms = 540000
        default.api.timeout.ms = 60000
        enable.auto.commit = true
        exclude.internal.topics = true
        fetch.max.bytes = 52428800
        fetch.max.wait.ms = 500
        fetch.min.bytes = 1
        group.id = perf-consumer-22544
        heartbeat.interval.ms = 3000
        interceptor.classes = []
        internal.leave.group.on.close = true
        isolation.level = read_uncommitted
        key.deserializer = class org.apache.kafka.common.serialization.ByteArrayDeserializer
        max.partition.fetch.bytes = 1048576
        max.poll.interval.ms = 300000
        max.poll.records = 500
        metadata.max.age.ms = 300000
        metric.reporters = []
        metrics.num.samples = 2
        metrics.recording.level = INFO
        metrics.sample.window.ms = 30000
        partition.assignment.strategy = [class org.apache.kafka.clients.consumer.RangeAssignor]
        receive.buffer.bytes = 2097152
        reconnect.backoff.max.ms = 1000
        reconnect.backoff.ms = 50
        request.timeout.ms = 30000
        retry.backoff.ms = 100
        sasl.client.callback.handler.class = null
        sasl.jaas.config = null
        sasl.kerberos.kinit.cmd = /usr/bin/kinit
        sasl.kerberos.min.time.before.relogin = 60000
        sasl.kerberos.service.name = null
        sasl.kerberos.ticket.renew.jitter = 0.05
        sasl.kerberos.ticket.renew.window.factor = 0.8
        sasl.login.callback.handler.class = null
        sasl.login.class = null
        sasl.login.refresh.buffer.seconds = 300
        sasl.login.refresh.min.period.seconds = 60
        sasl.login.refresh.window.factor = 0.8
        sasl.login.refresh.window.jitter = 0.05
        sasl.mechanism = GSSAPI
        security.protocol = PLAINTEXT
        send.buffer.bytes = 131072
        session.timeout.ms = 10000
        ssl.cipher.suites = null
        ssl.enabled.protocols = [TLSv1.2, TLSv1.1, TLSv1]
        ssl.endpoint.identification.algorithm = null
        ssl.key.password = null
        ssl.keymanager.algorithm = SunX509
        ssl.keystore.location = null
        ssl.keystore.password = null
        ssl.keystore.type = JKS
        ssl.protocol = TLS
        ssl.provider = null
        ssl.secure.random.implementation = null
        ssl.trustmanager.algorithm = PKIX
        ssl.truststore.location = null
        ssl.truststore.password = null
        ssl.truststore.type = JKS
        value.deserializer = class org.apache.kafka.common.serialization.ByteArrayDeserializer

20/03/10 21:04:31 INFO utils.AppInfoParser: Kafka version: 2.2.1-kafka-4.1.0
20/03/10 21:04:31 INFO utils.AppInfoParser: Kafka commitId: unknown
20/03/10 21:04:31 INFO consumer.KafkaConsumer: [Consumer clientId=consumer-1, groupId=perf-consumer-22544] Subscribed to topic(s): test_perf
20/03/10 21:04:32 INFO clients.Metadata: Cluster ID: JZbklxHlQ1yopcrZVCpH_w
20/03/10 21:04:32 INFO internals.AbstractCoordinator: [Consumer clientId=consumer-1, groupId=perf-consumer-22544] Discovered group coordinator hadoop001:9092 (id: 2147483646 rack: null)
20/03/10 21:04:32 INFO internals.ConsumerCoordinator: [Consumer clientId=consumer-1, groupId=perf-consumer-22544] Revoking previously assigned partitions []
20/03/10 21:04:32 INFO internals.AbstractCoordinator: [Consumer clientId=consumer-1, groupId=perf-consumer-22544] (Re-)joining group
20/03/10 21:04:32 INFO internals.AbstractCoordinator: [Consumer clientId=consumer-1, groupId=perf-consumer-22544] (Re-)joining group
20/03/10 21:04:35 INFO internals.AbstractCoordinator: [Consumer clientId=consumer-1, groupId=perf-consumer-22544] Successfully joined group with generation 1
20/03/10 21:04:35 INFO internals.ConsumerCoordinator: [Consumer clientId=consumer-1, groupId=perf-consumer-22544] Setting newly assigned partitions: test_perf-1, test_perf-0, test_perf-2
20/03/10 21:04:35 INFO internals.Fetcher: [Consumer clientId=consumer-1, groupId=perf-consumer-22544] Resetting offset for partition test_perf-2 to offset 0.
20/03/10 21:04:35 INFO internals.Fetcher: [Consumer clientId=consumer-1, groupId=perf-consumer-22544] Resetting offset for partition test_perf-1 to offset 0.
20/03/10 21:04:35 INFO internals.Fetcher: [Consumer clientId=consumer-1, groupId=perf-consumer-22544] Resetting offset for partition test_perf-0 to offset 0.
20/03/10 21:04:45 INFO clients.FetchSessionHandler: [Consumer clientId=consumer-1, groupId=perf-consumer-22544] Node 1 was unable to process the fetch request with (sessionId=1848062024, epoch=2779): INVALID_FETCH_SESSION_EPOCH.
20/03/10 21:05:07 INFO clients.FetchSessionHandler: [Consumer clientId=consumer-1, groupId=perf-consumer-22544] Node 3 was unable to process the fetch request with (sessionId=1939689354, epoch=15950): INVALID_FETCH_SESSION_EPOCH.
20/03/10 21:05:08 INFO clients.FetchSessionHandler: [Consumer clientId=consumer-1, groupId=perf-consumer-22544] Node 3 was unable to process the fetch request with (sessionId=1296743084, epoch=616): INVALID_FETCH_SESSION_EPOCH.
20/03/10 21:05:16 INFO clients.FetchSessionHandler: [Consumer clientId=consumer-1, groupId=perf-consumer-22544] Node 3 was unable to process the fetch request with (sessionId=107406006, epoch=5057): INVALID_FETCH_SESSION_EPOCH.
20/03/10 21:05:20 INFO clients.FetchSessionHandler: [Consumer clientId=consumer-1, groupId=perf-consumer-22544] Node 1 was unable to process the fetch request with (sessionId=453315699, epoch=16700): INVALID_FETCH_SESSION_EPOCH.
20/03/10 21:05:30 INFO clients.FetchSessionHandler: [Consumer clientId=consumer-1, groupId=perf-consumer-22544] Node 1 was unable to process the fetch request with (sessionId=1309416404, epoch=4687): INVALID_FETCH_SESSION_EPOCH.
20/03/10 21:05:40 INFO clients.FetchSessionHandler: [Consumer clientId=consumer-1, groupId=perf-consumer-22544] Node 1 was unable to process the fetch request with (sessionId=1131711713, epoch=4943): INVALID_FETCH_SESSION_EPOCH.
20/03/10 21:05:58 INFO clients.FetchSessionHandler: [Consumer clientId=consumer-1, groupId=perf-consumer-22544] Node 1 was unable to process the fetch request with (sessionId=817162043, epoch=8515): INVALID_FETCH_SESSION_EPOCH.
20/03/10 21:06:00 INFO clients.FetchSessionHandler: [Consumer clientId=consumer-1, groupId=perf-consumer-22544] Node 3 was unable to process the fetch request with (sessionId=1264470942, epoch=26644): INVALID_FETCH_SESSION_EPOCH.
20/03/10 21:06:00 INFO clients.FetchSessionHandler: [Consumer clientId=consumer-1, groupId=perf-consumer-22544] Node 3 was unable to process the fetch request with (sessionId=700425225, epoch=138): INVALID_FETCH_SESSION_EPOCH.
20/03/10 21:06:00 INFO clients.FetchSessionHandler: [Consumer clientId=consumer-1, groupId=perf-consumer-22544] Node 1 was unable to process the fetch request with (sessionId=1035054884, epoch=928): INVALID_FETCH_SESSION_EPOCH.
20/03/10 21:06:01 INFO clients.FetchSessionHandler: [Consumer clientId=consumer-1, groupId=perf-consumer-22544] Node 3 was unable to process the fetch request with (sessionId=642495924, epoch=253): INVALID_FETCH_SESSION_EPOCH.
20/03/10 21:06:06 INFO clients.FetchSessionHandler: [Consumer clientId=consumer-1, groupId=perf-consumer-22544] Node 3 was unable to process the fetch request with (sessionId=623039930, epoch=3364): INVALID_FETCH_SESSION_EPOCH.
20/03/10 21:06:17 INFO clients.FetchSessionHandler: [Consumer clientId=consumer-1, groupId=perf-consumer-22544] Node 3 was unable to process the fetch request with (sessionId=55542294, epoch=6767): INVALID_FETCH_SESSION_EPOCH.
20/03/10 21:06:17 INFO clients.FetchSessionHandler: [Consumer clientId=consumer-1, groupId=perf-consumer-22544] Node 1 was unable to process the fetch request with (sessionId=1612253082, epoch=7934): INVALID_FETCH_SESSION_EPOCH.
20/03/10 21:06:18 INFO internals.AbstractCoordinator: [Consumer clientId=consumer-1, groupId=perf-consumer-22544] Member consumer-1-f3b23b34-3ac3-4e54-8253-b84529fe990f sending LeaveGroup request to coordinator hadoop001:9092 (id: 2147483646 rack: null)
2020-03-10 21:04:31:864, 2020-03-10 21:06:18:736, 1811.9869, 16.9547, 1000003, 9357.0159, 3105, 103767, 17.4621, 9637.0041
[root@hadoop002 bin]#


即:

start.time, end.time, data.consumed.in.MB, MB.sec, data.consumed.in.nMsg, nMsg.sec, rebalance.time.ms, fetch.time.ms, fetch.MB.sec, fetch.nMsg.sec
2020-03-10 21:04:31:864, 2020-03-10 21:06:18:736, 1811.9869, 16.9547, 1000003, 9357.0159, 3105, 103767, 17.4621, 9637.0041


以本例中消費10w條MQ消息爲例總共消費了1811.9869M的數據,每秒消費數據大小爲16.9547M,總共消費了1000003條消息,每秒消費9637.0041條消息。

監控圖:
藍色 :生產
在這裏插入圖片描述
綠色:消費
在這裏插入圖片描述

總結:
1.生產者和消費者的速度是一樣嘛?
由於趨勢度吻合,所以速度一樣的 

2.爲什麼消費者的曲線和生產者的曲線趨勢度 吻合?
及時消費
麼有壓力

3.爲什麼消費者的曲線比生產者的曲線要高?
生產: value
消費: key ,topic,partition,offset,value等 
消費的字節比生產的字節多。
發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章