kafkaConsumer.cpp
/** @file RestServer.h
* @note HangZhou Hikvision System Technology Co., Ltd. All Right Reserved.
* @brief kafka Consumer基類
* @author yuzhen([email protected])/lixiaogang5([email protected])
* @date 2018/4/18
* @note v1.0.0 Created
* @history
* @warning
*/
#include "kafkaConsumer.h"
#include "KafkaApi.h"
#include "KafkaConsumerFaceStaticLib.h"
#include "ZookeeperBase.h"
KafkaConsumer& KafkaConsumer::Instance()
{
static KafkaConsumer kafkaConsumer;
return kafkaConsumer;
}
/**@fn KafkaConsumer
* @brief kafka構造函數
* @param[in] const char *pszHost 127.0.0.1:9092
* @param[in] const char *pszTopic HUMAN_INFO_TOPIC
* return NONE
*/
KafkaConsumer::KafkaConsumer()
:m_szHost("")
,m_szTopic("")
,m_rk(NULL)
,m_bThreadExit(HPR_FALSE)
{
}
/**@fn ~KafkaConsumer
* @brief kafka析構函數
* @param[in] NONE
* @param[out] NONE
* return NONE
*/
KafkaConsumer::~KafkaConsumer(HPR_VOID)
{
Uninit();
}
/**@fn Init
* @brief kafka模塊資源初始化
* @param[in] NONE
* @param[out] NONE
* return HPR_TRUE/HPR_FALSE
*/
HPR_BOOL KafkaConsumer::Init(const string &urls, const string &topic)
{
HPR_INT32 iRetVal = HPR_FALSE;
HPR_BOOL bFlag = HPR_FALSE;
HPR_INT8 errstr[512] = {0};
rd_kafka_topic_conf_t *topic_conf = NULL;
rd_kafka_topic_t *rkt = NULL;
rd_kafka_conf_t *conf = NULL;
const struct rd_kafka_metadata *metadata = NULL;
HPR_INT32 i = 0, j = 0;
rd_kafka_resp_err_t err_t;
do
{
if(urls.empty() || topic.empty())
{
SYS_ERROR("Init param is null urls[%p] topic[%p]",urls.c_str(),topic.c_str());
iRetVal = HPR_ERROR;
break;
}
ZookeeperBase::Instance().SetZooKeeperList(urls); //設置zookeeper單機/集羣地址信息
ZookeeperBase::Instance().Init();
string kafkaurl;
kafkaurl = ZookeeperBase::Instance().GetBrokerList();
HPR_Strncpy(m_szHost, kafkaurl.c_str(), sizeof(m_szHost));
//kafka集羣
//HPR_Strncpy(m_szHost, "10.14.96.167:9092,10.15.117.18:9092,10.15.117.24:9092", sizeof(m_szHost)); //暫時寫死,後期從配置文件獲取10.14.96.167:9092,10.15.117.18:9092,10.15.117.24:9092
HPR_Strncpy(m_szTopic, topic.c_str(), sizeof(m_szTopic)); //HUMAN_INFO_TOPIC
//kafka單機
//HPR_Strncpy(m_szHost, "127.0.0.1:2181", sizeof(m_szHost)); //暫時寫死,後期從配置文件獲取10.14.96.167:9092,10.15.117.18:9092,10.15.117.24:9092
//HPR_Strncpy(m_szTopic,"_lxg", sizeof(m_szTopic)); //HUMAN_INFO_TOPIC
SYS_INFO("kafkaConsumer init start. m_szHost[%s] m_szTopic[%s] kafkaVersion[%s]",m_szHost,m_szTopic,rd_kafka_version_str());
/* kafka的配置 */
conf = rd_kafka_conf_new();
rd_kafka_conf_set(conf, "offset.store.method", "broker", errstr, sizeof(errstr));
/* 設置數據發送的壓縮格式 */
//rd_kafka_conf_set(conf, "compression.codec", "snappy", NULL, 0);
/* kafka topic的配置 */
topic_conf = rd_kafka_topic_conf_new();
//rd_kafka_topic_conf_set(topic_conf, "offset.store.method", "broker", errstr, sizeof(errstr));
/* kafka consumer的創建 */
m_rk = rd_kafka_new(RD_KAFKA_CONSUMER, conf, errstr, sizeof(errstr));
if (NULL == m_rk)
{
SYS_ERROR("rd_kafka_new err[%s]", errstr);
iRetVal = HPR_FALSE;
break;
}
SYS_DEBUG("Kafka handle name[%s]",rd_kafka_name(m_rk));
/* kafka topic的創建 */
rkt = rd_kafka_topic_new(m_rk, m_szTopic, topic_conf);
if(NULL == rkt)
{
SYS_ERROR("rd_kafka_topic_new err[%p]",rkt);
iRetVal = HPR_FALSE;
break;
}
SYS_DEBUG("Topic name[%s]",rd_kafka_topic_name(rkt));
topic_conf = NULL;
/* 向kafka中添加broker對象 */
if (rd_kafka_brokers_add(m_rk, m_szHost) < 1)
{
SYS_ERROR("rd_kafka_brokers_add err.");
iRetVal = HPR_FALSE;
break;
}
err_t = rd_kafka_metadata(m_rk, HPR_FALSE, rkt, &metadata, 5000);
if (RD_KAFKA_RESP_ERR_NO_ERROR != err_t )
{
SYS_ERROR("rd_kafka_metadata err .errno[%d] errstr[%s]",rd_kafka_errno2err(err_t),rd_kafka_err2str(err_t));
iRetVal = HPR_FALSE;
break;
}
/* partition相關信息寫入zk需要時間 */
HPR_INT32 partitionNum = 0;
while(HPR_TRUE)
{
string strkafkaPartitionPath = "/brokers/topics/" + topic;
ZookeeperBase::Instance().GetpartionNum(strkafkaPartitionPath.c_str(), partitionNum);
if (partitionNum > 0)
{
break;
}
else
{
HPR_Sleep(100);
}
}
SYS_INFO("partitions num[%d]", partitionNum);
m_partitionsinfo.clear();
for (HPR_INT32 i = 0; i < partitionNum; ++i)
{
SYS_INFO("partitions id[%d]", i);
m_partitionsinfo[i] = HPR_FALSE;
}
rd_kafka_metadata_destroy(metadata);
iRetVal = HPR_OK;
}while(0);
if (NULL != rkt)
{
rd_kafka_topic_destroy(rkt);
rkt = NULL;
}
//rkt = NULL;
/* Destroy topic */
if (HPR_OK != iRetVal)
{
/* Destroy topic */
/* Destroy handle */
Uninit();
if(rd_kafka_wait_destroyed(2000))
{
SYS_ERROR("All kafka objects are now not destroyed.");
iRetVal = HPR_FALSE;
}
}
SYS_INFO("KafkaConsumer Init end.");
return iRetVal;
}
/**@fn Uninit
* @brief kafka模塊資源釋放
* @param[in] NONE
* @param[out] NONE
* return HPR_TRUE/HPR_FALSE
*/
HPR_BOOL KafkaConsumer::Uninit()
{
SYS_INFO("KafkaConsumer Uninit start.");
m_bThreadExit = HPR_TRUE;
memset(m_szHost,0x00,sizeof(m_szHost));
memset(m_szTopic,0x00,sizeof(m_szTopic));
while(1)
{
map<int,int>::iterator it;
for(it=m_partitionsinfo.begin();
it!=m_partitionsinfo.end(); it++)
{
if (it->second == HPR_TRUE)
{
break;
}
}
if (it==m_partitionsinfo.end())
{
break;
}
HPR_Sleep(100);
SYS_INFO("wait kafka_consumer_task exit");
}
if (NULL != m_rk)
{
rd_kafka_destroy(m_rk);
m_rk = NULL;
}
SYS_INFO("KafkaConsumer Uninit end.");
return HPR_OK;
}
/**@fn Start
* @brief kafka消費服務啓動
* @param[in] NONE
* @param[out] NONE
* return HPR_BOOL
*/
HPR_BOOL KafkaConsumer::Start()
{
HPR_BOOL iRetVal = HPR_FALSE;
HPR_BOOL iConsumerSuc = HPR_TRUE;
SYS_INFO("Start kafka consumption.");
do
{
if(m_bThreadExit == HPR_TRUE)
{
SYS_ERROR("Start kafka consumption err. m_bThreadExit [%d]",m_bThreadExit);
iRetVal = HPR_FALSE;
break;
}
map<int,int>::iterator it;
for(it = m_partitionsinfo.begin();
it != m_partitionsinfo.end(); it++)
{
Thread_Create_Param *param = new (std::nothrow)(Thread_Create_Param);
if (NULL == param)
{
SYS_ERROR("Thread_Create_Param object new err, try again.");
continue;
}
param->pClass = (HPR_VOID *)this;
param->pParam = (HPR_VOID *)&it->first;
if (HPR_ThreadDetached_Create(CycleKafkaConsumerThread, (HPR_VOID *)param, 8*1024*1024) == HPR_FALSE)
{
it->second = HPR_FALSE;
iConsumerSuc = HPR_FALSE;
SYS_ERROR("HPR_ThreadDetached_Create kafka_consumer_thread id[%d] err", it->first);
break;
}
SYS_INFO("AAAThread_Create_Param object ok.");
}
if(HPR_TRUE == iConsumerSuc)
iRetVal = HPR_TRUE;
}while(0);
SYS_INFO("End kafka consumption.");
return iRetVal;
}
/**@fn Stop
* @brief kafka模塊消費服務停止
* @param[in] NONE
* @param[out] NONE
* return HPR_BOOL
*/
HPR_BOOL KafkaConsumer::Stop()
{
SYS_INFO("KafkaConsumer stop.");
return HPR_TRUE;
}
/**@fn kafka_consumer_thread
* @brief kafka模塊消費線程
* @param[in] NONE
* @param[out] NONE
* return HPR_TRUE/HPR_FALSE
*/
HPR_VOID* CALLBACK KafkaConsumer::CycleKafkaConsumerThread(HPR_VOID* param)
{
do
{
if(NULL == param)
{
SYS_ERROR("CycleKafkaConsumerThread param is null param[%p]", param);
break;
}
Thread_Create_Param *pThreadParam = static_cast<Thread_Create_Param*>(param);
KafkaConsumer *pClass = static_cast<KafkaConsumer *>(pThreadParam->pClass);
HPR_INT32 iPartitionsId = *(static_cast<HPR_INT32 *>(pThreadParam->pParam));
if (NULL != pClass)
{
pClass->StartKafkaConsumerTask(iPartitionsId);
}
delete pThreadParam;
}while(0);
return NULL;
}
/**@fn kafka_consumer_func
* @brief kafka模塊消費函數
* @param[in] rd_kafka_topic_t *rkt
* @param[in] HPR_INT32 iPartitionsId
* return HPR_VOID
*/
HPR_VOID KafkaConsumer::RealKafkaConsumerFunc(rd_kafka_topic_t *rkt, HPR_INT32 iPartitionsId)
{
SYS_ERROR("RealKafkaConsumerFunc not support");
HPR_Sleep(1000);
}
/**@fn kafka_consumer_task
* @brief kafka獲取接收處理任務
* @param[in] HPR_INT32 iPartitionsId 主題分區id
* @param[out] NONE
* return HPR_VOID
*/
HPR_VOID KafkaConsumer::StartKafkaConsumerTask(HPR_INT32 iPartitionsId)
{
rd_kafka_topic_conf_t *topic_conf = NULL;
rd_kafka_topic_t *rkt = NULL;
rd_kafka_resp_err_t err_t;
HPR_INT32 iRet = HPR_TRUE;
//rd_kafka_conf_t *conf = NULL;
map<int,int>::iterator it;
it = m_partitionsinfo.find(iPartitionsId);
if (m_partitionsinfo.end() != it)
{
it->second = HPR_TRUE;
}
SYS_INFO("kafka_consumer_task Broker[%s] Topic[%s] partitionId[%d] start", m_szHost, m_szTopic, iPartitionsId);
while (HPR_FALSE == m_bThreadExit)
{
/* kafka topic的配置 */
topic_conf = rd_kafka_topic_conf_new();
//rd_kafka_topic_conf_set(topic_conf, "offset.store.method", "broker", errstr, sizeof(errstr));
/* kafka topic的創建 */
rkt = rd_kafka_topic_new(m_rk, m_szTopic, topic_conf);
topic_conf = NULL;
if ((iRet= rd_kafka_consume_start(rkt, iPartitionsId, RD_KAFKA_OFFSET_STORED)) != 0)
{
rd_kafka_topic_destroy(rkt);
err_t = rd_kafka_errno2err(iRet);
SYS_ERROR("rd_kafka_consume_start err .errno[%d] errstr[%s]",err_t,rd_kafka_err2str(err_t));
HPR_Sleep(1000);
continue;
}
/* 終端輸入字符串 */
while (HPR_FALSE == m_bThreadExit)
{
RealKafkaConsumerFunc(rkt, iPartitionsId);
}
rd_kafka_consume_stop(rkt, iPartitionsId);
/* Destroy topic */
rd_kafka_topic_destroy(rkt);
}
SYS_INFO("kafka_consumer_task Broker[%s] Topic[%s] partitionId[%d] exit", m_szHost, m_szTopic, iPartitionsId);
it = m_partitionsinfo.find(iPartitionsId);
if (m_partitionsinfo.end() != it)
{
it->second = HPR_FALSE;
}
return;
}
/**@fn metadata_print
* @brief 打印元數據容器中的信息
* @param[in] const HPR_INT8 *topic 主題
* @param[in] const struct rd_kafka_metadata *metadata 元數據容器
* return HPR_VOID
*/
HPR_VOID KafkaConsumer::MetadataContainerPrint(const HPR_INT8 *topic, const struct rd_kafka_metadata *metadata)
{
HPR_INT32 i, j, k;
SYS_INFO("Metadata for %s (from broker %d: %s):\n",
topic ? : "all topics",
metadata->orig_broker_id,
metadata->orig_broker_name);
/* Iterate brokers */
SYS_INFO(" %i brokers:", metadata->broker_cnt);
for (i = 0 ; i < metadata->broker_cnt ; i++)
SYS_INFO(" broker %d at %s:%i",
metadata->brokers[i].id,
metadata->brokers[i].host,
metadata->brokers[i].port);
/* Iterate topics */
SYS_INFO(" %i topics:", metadata->topic_cnt);
for (i = 0 ; i < metadata->topic_cnt ; i++)
{
const struct rd_kafka_metadata_topic *t = &metadata->topics[i];
SYS_INFO(" topic \"%s\" with %i partitions:",
t->topic,
t->partition_cnt);
if (t->err)
{
SYS_INFO(" %s", rd_kafka_err2str(t->err));
if (t->err == RD_KAFKA_RESP_ERR_LEADER_NOT_AVAILABLE)
SYS_INFO(" (try again)");
}
/* Iterate topic's partitions */
for (j = 0 ; j < t->partition_cnt ; j++)
{
const struct rd_kafka_metadata_partition *p;
p = &t->partitions[j];
SYS_INFO(" partition %d, "
"leader %d, replicas: ",
p->id, p->leader);
/* Iterate partition's replicas */
for (k = 0 ; k < p->replica_cnt ; k++)
SYS_INFO("%s%d", k > 0 ? ",":"", p->replicas[k]);
/* Iterate partition's ISRs */
SYS_INFO(", isrs: ");
for (k = 0 ; k < p->isr_cnt ; k++)
SYS_INFO("%s%d", k > 0 ? ",":"", p->isrs[k]);
if (p->err)
SYS_INFO(", %s", rd_kafka_err2str(p->err));
}
}
}
kafkaConsumer.h
/** @file RestServer.h
* @note HangZhou Hikvision System Technology Co., Ltd. All Right Reserved.
* @brief kafka Consumer»ùÀà
* @author yuzhen([email protected])/lixiaogang5([email protected])
* @date 2018/4/18
* @note v1.0.0 Created
* @history
* @warning
*/
#ifndef _KAFKA_CONSUMER_H_
#define _KAFKA_CONSUMER_H_
#include <string.h>
#include "rdkafka.h"
#include "HPR_Types.h"
#include "HPR_Thread.h"
#include "HPR_Utils.h"
#include "HPR_String.h"
#include "LoadConf.h"
#include "CenterLog.h"
typedef struct
{
HPR_VOID *pClass;
HPR_VOID *pParam;
}Thread_Create_Param;
class KafkaConsumer
{
public:
static KafkaConsumer& Instance();
KafkaConsumer(HPR_VOID);
~KafkaConsumer(HPR_VOID);
HPR_BOOL Init(const std::string &urls, const std::string &topic);
HPR_BOOL Uninit();
HPR_BOOL Start();
HPR_BOOL Stop();
static HPR_VOID* CALLBACK CycleKafkaConsumerThread(HPR_VOID* param);
HPR_VOID StartKafkaConsumerTask(HPR_INT32 iPartitionsId);
virtual HPR_VOID RealKafkaConsumerFunc(rd_kafka_topic_t *rkt, HPR_INT32 iPartitionsId);
private:
HPR_VOID MetadataContainerPrint(const HPR_INT8 *topic, const struct rd_kafka_metadata *metadata);
private:
/*默認構造函數*/
// KafkaConsumer() = default;
private:
HPR_INT8 m_szHost[100]; //kafka的主機名和端口號 (127.0.0.1:9092) 55byte
HPR_INT8 m_szTopic[64]; //kafka的topic name
rd_kafka_t *m_rk; //kafka基礎容器,提供全局配置和共享狀態
map <HPR_INT32, HPR_INT32> m_partitionsinfo; //map<分區號,TRUE/FALSE>
HPR_BOOL m_bThreadExit; // 線程退出標誌
};
#endif
kafkaFaceStaticLib.cpp
/** @file RestServer.h
* @note HangZhou Hikvision System Technology Co., Ltd. All Right Reserved.
* @brief KafkaConsumerFaceSnap kafka數據消費類
* @author lixiaogang5([email protected])
* @date 2018/4/18
* @note v1.0.0 Created
* @history
* @warning
*/
#include "KafkaConsumerFaceStaticLib.h"
#include "CenterMsgDealRes.h"
#include "opcode.h"
/**@fn Instance
* @brief kafkaConsumerFaceSnap實例句柄
* @param[in] NONE
* @param[in] NONE
* return KafkaConsumerFaceSnap
*/
KafkaConsumerFaceStaticLib& KafkaConsumerFaceStaticLib::Instance()
{
static KafkaConsumerFaceStaticLib kafkaConSumerFaceSnap;
return kafkaConSumerFaceSnap;
}
/**@fn KafkaConsumerFaceSnap
* @brief kafkaConsumerFaceSnap構造函數
* @param[in] NONE
* @param[out] NONE
* return NONE
*/
KafkaConsumerFaceStaticLib::KafkaConsumerFaceStaticLib()
{
}
/**@fn ~KafkaConsumerFaceSnap
* @brief kafkaConsumerFaceSnap析構函數
* @param[in] NONE
* @param[out] NONE
* return NONE
*/
KafkaConsumerFaceStaticLib:: ~KafkaConsumerFaceStaticLib ()
{
}
/**@fn kafka_consumer_func
* @brief kafkaConsumerFaceSnap消費函數
* @param[in] HPR_INT32 iPartitionsId 分區id
* @param[out] rd_kafka_topic_t *rkt kafka主題
* return HPR_VOID
*/
HPR_VOID KafkaConsumerFaceStaticLib::RealKafkaConsumerFunc(rd_kafka_topic_t *rkt, HPR_INT32 iPartitionsId)
{
do
{
HPR_INT32 iRetVal = HPR_OK;
iRetVal = rd_kafka_consume_callback(rkt, iPartitionsId, 1000, kafkaConsume_Cb, NULL); //回調函數的消費效率最高
if(-1 == iRetVal)
{
SYS_ERROR("rd_kafka_consume_callback err. errNo[%d] errStr[%s]",rd_kafka_errno2err(iRetVal), rd_kafka_err2str(rd_kafka_errno2err(iRetVal)));
break;
}
}while(0);
return;
}
/**@fn kafkaConsume_Cb
* @brief kafkaConsumerFaceSnap kafka消費回調函數(效率比其他兩種高)
* @param[in] rd_kafka_message_t *rkmessage kafka有效載荷消息
* @param[in] HPR_VOID *opaque
* return HPR_VOID
*/
HPR_VOID KafkaConsumerFaceStaticLib::kafkaConsume_Cb(rd_kafka_message_t *rkmessage, HPR_VOID *opaque)
{
do
{
if(NULL == rkmessage || NULL == rkmessage->payload || 0 == rkmessage->len)
{
//SYS_ERROR("kafkaConsume_Cb param err. rkmessage[%p] payload[%p] len[%lu]",rkmessage,rkmessage->payload,rkmessage->len);
//broker的消息體爲空,表示當前沒有生產者向該分區生產數據.一旦分區片段中有消息,則會立刻調用該回調
break;
}
/*具體數據解析處理*/
//if(rd_kafka_message_errstr(rkmessage))
//{
// SYS_ERROR("KafkaConsume_Cb err[%s] ",rd_kafka_message_errstr(rkmessage));
// break;
//}
char* kafkaMsg = new char[rkmessage->len + 1];
memset(kafkaMsg, 0, rkmessage->len + 1);
memcpy(kafkaMsg, (char*)rkmessage->payload, rkmessage->len);
RecvMsg recvMsg;
recvMsg.m_proType = KAFKA_TYPE;
recvMsg.m_iMsgType = KAFKA_STATIC_HUMAN_BATCH_INSERT;
recvMsg.m_pMsg = kafkaMsg;
CenterMsgDealRes::Instance().OnRecvKafkaMsg(recvMsg);
}while(0);
return;
}