RocketMQ源码解析之Producer启动

阅读须知

文章中使用/* */注释的方法会做深入分析

正文

使用RocketMQ发送普通消息时,一般我们会进行如下配置:

<bean id="rocketmqProducer" class="org.apache.rocketmq.client.producer.DefaultMQProducer" init-method="start"
          destroy-method="shutdown">
    <property name="producerGroup" value="${rocketmq.producer.group}"/>
    <property name="namesrvAddr" value="${rocketmq.nameserver.address}"/>
</bean>

我们看到配置中 init-method 配置了 start 方法,所以在 bean 的初始化过程中会调用 DefaultMQProducer.start 方法:
DefaultMQProducer:

public void start() throws MQClientException {
    this.setProducerGroup(withNamespace(this.producerGroup));
    /* producer启动 */
    this.defaultMQProducerImpl.start();
    // TraceDispatcher是异步传输数据接口
    if (null != traceDispatcher) {
        try {
            traceDispatcher.start(this.getNamesrvAddr(), this.getAccessChannel());
        } catch (MQClientException e) {
            log.warn("trace dispatcher start failed ", e);
        }
    }
}

DefaultMQProducerImpl:

public void start() throws MQClientException {
    this.start(true);
}

DefaultMQProducerImpl:

public void start(final boolean startFactory) throws MQClientException {
    switch (this.serviceState) {
        case CREATE_JUST:
            this.serviceState = ServiceState.START_FAILED;
            // 检查配置,主要检查配置的 producerGroup 的格式、长度、是否与默认 producerGroup 冲突等
            this.checkConfig();
            if (!this.defaultMQProducer.getProducerGroup().equals(MixAll.CLIENT_INNER_PRODUCER_GROUP)) {
            	// 将实例名称转换为进程ID
                this.defaultMQProducer.changeInstanceNameToPID();
            }
            /* 获取并创建 client */
            this.mQClientFactory = MQClientManager.getInstance().getAndCreateMQClientInstance(this.defaultMQProducer, rpcHook, this.eventLoopGroup, this.eventExecutorGroup);
            // 注册 producer,将 group 和 producer 的映射放入 producerTable 中
            boolean registerOK = mQClientFactory.registerProducer(this.defaultMQProducer.getProducerGroup(), this);
            if (!registerOK) {
                this.serviceState = ServiceState.CREATE_JUST;
                throw new MQClientException("The producer group[" + this.defaultMQProducer.getProducerGroup()
                    + "] has been created before, specify another name please." + FAQUrl.suggestTodo(FAQUrl.GROUP_NAME_DUPLICATE_URL),
                    null);
            }
            this.topicPublishInfoTable.put(this.defaultMQProducer.getCreateTopicKey(), new TopicPublishInfo());
            if (startFactory) {
            	/* 启动 */
                mQClientFactory.start();
            }
            log.info("the producer [{}] start OK. sendMessageWithVIPChannel={}", this.defaultMQProducer.getProducerGroup(),
                this.defaultMQProducer.isSendMessageWithVIPChannel());
            // 将状态置为运行中
            this.serviceState = ServiceState.RUNNING;
            break;
        case RUNNING:
        case START_FAILED:
        case SHUTDOWN_ALREADY:
            throw new MQClientException("The producer service state not OK, maybe started once, "
                + this.serviceState
                + FAQUrl.suggestTodo(FAQUrl.CLIENT_SERVICE_NOT_OK),
                null);
        default:
            break;
    }
    // 向所有 broker 发送心跳
    this.mQClientFactory.sendHeartbeatToAllBrokerWithLock();
}

MQClientManager:

public MQClientInstance getAndCreateMQClientInstance(final ClientConfig clientConfig, RPCHook rpcHook) {
	// 拼接 clientId
    String clientId = clientConfig.buildMQClientId();
    // 初始化 MQClientInstance 内部的成员属性
    MQClientInstance instance = this.factoryTable.get(clientId);
    if (null == instance) {
        instance =
            new MQClientInstance(clientConfig.cloneClientConfig(),
                this.factoryIndexGenerator.getAndIncrement(), clientId, rpcHook);
        // 保存 clientId 和 client 对象的映射
        MQClientInstance prev = this.factoryTable.putIfAbsent(clientId, instance);
        if (prev != null) {
            instance = prev;
            log.warn("Returned Previous MQClientInstance for clientId:[{}]", clientId);
        } else {
            log.info("Created new MQClientInstance for clientId:[{}]", clientId);
        }
    }
    return instance;
}

关于MQ Client的启动流程,Client端的Producer和Consumer都是复用的同样的流程,所以后面的流程对于Producer和Consumer是通用的。
MQClientInstance:

public void start() throws MQClientException {
    synchronized (this) {
        switch (this.serviceState) {
            case CREATE_JUST:
                this.serviceState = ServiceState.START_FAILED;
                // 如果未指定具体的NameServer地址,则从NameServer服务器查找地址
                if (null == this.clientConfig.getNamesrvAddr()) {
                    this.mQClientAPIImpl.fetchNameServerAddr();
                }
                /* 启动netty */
                this.mQClientAPIImpl.start();
                /* 启动一些定时任务 */
                this.startScheduledTask();
                // 启动拉取消息服务
                this.pullMessageService.start();
                // 启动重新平衡服务
                this.rebalanceService.start();
                // 启动推送消息服务
                this.defaultMQProducer.getDefaultMQProducerImpl().start(false);
                log.info("the client factory [{}] start OK", this.clientId);
                this.serviceState = ServiceState.RUNNING;
                break;
            case RUNNING:
                break;
            case SHUTDOWN_ALREADY:
                break;
            case START_FAILED:
                throw new MQClientException("The Factory object[" + this.getClientId() + "] has been created before, and failed.", null);
            default:
                break;
        }
    }
}

MQClientAPIImpl:

public void start() {
    this.remotingClient.start();
}

NettyRemotingClient:

public void start() {
    this.defaultEventExecutorGroup = new DefaultEventExecutorGroup(
        nettyClientConfig.getClientWorkerThreads(),
        new ThreadFactory() {
            private AtomicInteger threadIndex = new AtomicInteger(0);
            @Override
            public Thread newThread(Runnable r) {
                return new Thread(r, "NettyClientWorkerThread_" + this.threadIndex.incrementAndGet());
            }
        });
    Bootstrap handler = this.bootstrap.group(this.eventLoopGroupWorker).channel(NioSocketChannel.class)
        .option(ChannelOption.TCP_NODELAY, true)
        .option(ChannelOption.SO_KEEPALIVE, false)
        .option(ChannelOption.CONNECT_TIMEOUT_MILLIS, nettyClientConfig.getConnectTimeoutMillis())
        .option(ChannelOption.SO_SNDBUF, nettyClientConfig.getClientSocketSndBufSize())
        .option(ChannelOption.SO_RCVBUF, nettyClientConfig.getClientSocketRcvBufSize())
        .handler(new ChannelInitializer<SocketChannel>() {
            @Override
            public void initChannel(SocketChannel ch) throws Exception {
                ChannelPipeline pipeline = ch.pipeline();
                if (nettyClientConfig.isUseTLS()) {
                    if (null != sslContext) {
                        pipeline.addFirst(defaultEventExecutorGroup, "sslHandler", sslContext.newHandler(ch.alloc()));
                        log.info("Prepend SSL handler");
                    } else {
                        log.warn("Connections are insecure as SSLContext is null!");
                    }
                }
                pipeline.addLast(
                    defaultEventExecutorGroup,
                    new NettyEncoder(),
                    new NettyDecoder(),
                    new IdleStateHandler(0, 0, nettyClientConfig.getClientChannelMaxIdleTimeSeconds()),
                    new NettyConnectManageHandler(),
                    // 注册请求处理器
                    new NettyClientHandler());
            }
        });
    this.timer.scheduleAtFixedRate(new TimerTask() {
        @Override
        public void run() {
            try {
            	// 定期调用此方法以扫描和终止已弃用的请求
                NettyRemotingClient.this.scanResponseTable();
            } catch (Throwable e) {
                log.error("scanResponseTable exception", e);
            }
        }
    }, 1000 * 3, 1000);
    if (this.channelEventListener != null) {
        this.nettyEventExecutor.start();
    }
}

MQClientInstance:

private void startScheduledTask() {
	// 定时获取NameServer地址
    if (null == this.clientConfig.getNamesrvAddr()) {
        this.scheduledExecutorService.scheduleAtFixedRate(new Runnable() {
            @Override
            public void run() {
                try {
                    MQClientInstance.this.mQClientAPIImpl.fetchNameServerAddr();
                } catch (Exception e) {
                    log.error("ScheduledTask fetchNameServerAddr exception", e);
                }
            }
        }, 1000 * 10, 1000 * 60 * 2, TimeUnit.MILLISECONDS);
    }
    // 定时更新topic路由信息,主要通过向 NameServer 发送 GET_ROUTEINTO_BY_TOPIC 请求来获取最新的 topic 路由信息,然后更新内部消费者的订阅信息和生产者的发布信息
    this.scheduledExecutorService.scheduleAtFixedRate(new Runnable() {
        @Override
        public void run() {
            try {
                MQClientInstance.this.updateTopicRouteInfoFromNameServer();
            } catch (Exception e) {
                log.error("ScheduledTask updateTopicRouteInfoFromNameServer exception", e);
            }
        }
    }, 10, this.clientConfig.getPollNameServerInterval(), TimeUnit.MILLISECONDS);
    // 定时清理离线的Broker,向所有Broker发送心跳
    // 同样的,这里客户端的生产者和消费者数据都会封装到心跳数据中通过 HEART_BEAT 命令统一发送给Broker,Broker负责处理心跳清楚的的处理器为 ClientManageProcessor
    this.scheduledExecutorService.scheduleAtFixedRate(new Runnable() {
        @Override
        public void run() {
            try {
                MQClientInstance.this.cleanOfflineBroker();
                MQClientInstance.this.sendHeartbeatToAllBrokerWithLock();
            } catch (Exception e) {
                log.error("ScheduledTask sendHeartbeatToAllBroker exception", e);
            }
        }
    }, 1000, this.clientConfig.getHeartbeatBrokerInterval(), TimeUnit.MILLISECONDS);
    // 定时持久化消费偏移量,这里的持久化分两种,一种是写入到本地文件,另一种是更新到Broker端
    this.scheduledExecutorService.scheduleAtFixedRate(new Runnable() {
        @Override
        public void run() {
            try {
                MQClientInstance.this.persistAllConsumerOffset();
            } catch (Exception e) {
                log.error("ScheduledTask persistAllConsumerOffset exception", e);
            }
        }
    }, 1000 * 10, this.clientConfig.getPersistConsumerOffsetInterval(), TimeUnit.MILLISECONDS);
    // 定时调整线程池,调整线程池的策略是根据当前处理队列中的消息总数和调整线程池的阈值进行计算和比较来确定对线程池中的线程数量进行增还是减
    // 但是增和减的实现策略是空的,所以默认情况下这个定时任务是没作用的
    this.scheduledExecutorService.scheduleAtFixedRate(new Runnable() {
        @Override
        public void run() {
            try {
                MQClientInstance.this.adjustThreadPool();
            } catch (Exception e) {
                log.error("ScheduledTask adjustThreadPool exception", e);
            }
        }
    }, 1, 1, TimeUnit.MINUTES);
}

到这里,Producer的启动流程就分析完成了。

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章