Fabric源碼分析之三啓動流程代Orderer分析

一、排序節點的啓動

本來是想把Peer和Orderer放到一起,結果發現內容太多了,不得不拆開。其實明白了Peer的流程,Orderer也就差不多了,不同的是由於功能的不同,啓動的服務肯定有所不同,諸如通信、數據庫等。但到了底層,其生成的方式應該類似,下面就看一下相關的排序節點的源碼。

二、啓動流程

如果覺得Peer的啓動入口函數有點簡單,那麼Orderer更簡單:
1、啓動

func main() {
	server.Main()
}

所以就是想在這兒偷懶也沒辦法了,否則這分析就沒法分析了,只好跳到調用的server.Main裏看看

func Main() {
	//kingpin是一個命令行的解析工具,正好和Peer中的cobra相輔相成
	fullCmd := kingpin.MustParse(app.Parse(os.Args[1:]))

	// "version" command
	if fullCmd == version.FullCommand() {
		fmt.Println(metadata.GetVersionInfo())
		return
	}

  //拿到相關的配置文件數據 .yaml
	conf, err := localconfig.Load()
	if err != nil {
		logger.Error("failed to parse config: ", err)
		os.Exit(1)
	}

	//初始化日誌
	initializeLogging()
	//初始化本地MSP
	initializeLocalMsp(conf)

  //格式化打印
	prettyPrintStruct(conf)
	//按配置啓動命令
	Start(fullCmd, conf)
}

看代碼,仍然是挺簡單的,所以這時要提高警惕了,當代碼一直簡單下來,會誤導你的大腦分析也簡單化。可是,這是不可能的,Order可是整個區塊鏈系統中最重要的一部分——出塊可是它來做的。
初始化日誌和格式化打印暫時不關心,初始化本地MSP(包括簽名證書啥的),其實就是調用mgmt.go中的loadLocaMSP,然後Setup就可以了。這個放到後面MSP相關來分析。
那麼,只有最後一個函數Start,是讓人覺得有乾貨的地方。先把代碼晾一下:

func Start(cmd string, conf *localconfig.TopLevel) {
	//命令和配置文件被傳遞進來
	//引導塊(創世塊)校驗
	bootstrapBlock := extractBootstrapBlock(conf)
	if err := ValidateBootstrapBlock(bootstrapBlock); err != nil {
		logger.Panicf("Failed validating bootstrap block: %v", err)
	}

  //處理相關的配置信息和監聽信息
	opsSystem := newOperationsSystem(conf.Operations, conf.Metrics)
	err := opsSystem.Start()  //啓動監聽
	if err != nil {
		logger.Panicf("failed to initialize operations subsystem: %s", err)
	}
	defer opsSystem.Stop()
	metricsProvider := opsSystem.Provider

  //創建帳本工廠
	lf, _ := createLedgerFactory(conf, metricsProvider)
	sysChanLastConfigBlock := extractSysChanLastConfig(lf, bootstrapBlock)
	clusterBootBlock := selectClusterBootBlock(bootstrapBlock, sysChanLastConfigBlock)
  //集羣類型
	clusterType := isClusterType(clusterBootBlock)
	//創建MSP本地簽名實例對象---空對象
	signer := localmsp.NewSigner()

  //初始化集羣客戶配置,簽名、私鑰、證書和TLS等
	clusterClientConfig := initializeClusterClientConfig(conf, clusterType, bootstrapBlock)
	clusterDialer := &cluster.PredicateDialer{
		ClientConfig: clusterClientConfig,
	}
  //創建複製對象--集羣中使用
	r := createReplicator(lf, bootstrapBlock, conf, clusterClientConfig.SecOpts, signer)
	// Only clusters that are equipped with a recent config block can replicate.
	if clusterType && conf.General.GenesisMethod == "file" {
		r.replicateIfNeeded(bootstrapBlock)
	}
  //同前,創建觀察者並監控相關信息到日誌
	logObserver := floggingmetrics.NewObserver(metricsProvider)
	flogging.Global.SetObserver(logObserver)

  //利用前面生的配置初始GRPC服務---構造CA證書組件對象
	serverConfig := initializeServerConfig(conf, metricsProvider)
	//這當中會利用SecureOptions、KeepaliveOptions來保存TLS的公私鑰,C/S兩端的證書以及
	//呼應時間、等待時間等。
	grpcServer := initializeGrpcServer(conf, serverConfig)
	//證書支持
	caSupport := &comm.CredentialSupport{
		AppRootCAsByChain:           make(map[string]comm.CertificateBundle),
		OrdererRootCAsByChainAndOrg: make(comm.OrgRootCAs),
		ClientRootCAs:               serverConfig.SecOpts.ClientRootCAs,
	}

	clusterServerConfig := serverConfig
	clusterGRPCServer := grpcServer
	if clusterType {
		clusterServerConfig, clusterGRPCServer = configureClusterListener(conf, serverConfig, grpcServer, ioutil.ReadFile)
	}

	var servers = []*comm.GRPCServer{grpcServer}
	// If we have a separate gRPC server for the cluster, we need to update its TLS
	// CA certificate pool too.
	//處理集羣中的單獨GRPC服務器
	if clusterGRPCServer != grpcServer {
		servers = append(servers, clusterGRPCServer)
	}
  //TLS連接認證的回調函數,更新每個通道的TLS客戶端CA證書
	tlsCallback := func(bundle *channelconfig.Bundle) {
		// only need to do this if mutual TLS is required or if the orderer node is part of a cluster
		if grpcServer.MutualTLSRequired() || clusterType {
			logger.Debug("Executing callback to update root CAs")
			updateTrustedRoots(caSupport, bundle, servers...)
			if clusterType {
				updateClusterDialer(caSupport, clusterDialer, clusterClientConfig.SecOpts.ServerRootCAs)
			}
		}
	}

  //生成新的簽名頭
	sigHdr, err := signer.NewSignatureHeader()
	if err != nil {
		logger.Panicf("Failed creating a signature header: %v", err)
	}

	expirationLogger := flogging.MustGetLogger("certmonitor")
	crypto.TrackExpiration(
		serverConfig.SecOpts.UseTLS,
		serverConfig.SecOpts.Certificate,
		[][]byte{clusterClientConfig.SecOpts.Certificate},
		sigHdr.Creator,
		expirationLogger.Warnf, // This can be used to piggyback a metric event in the future
		time.Now(),
		time.AfterFunc)

  //多通道註冊初始化--創建數據存儲路徑,包括索引數據庫和通道區塊數據
	manager := initializeMultichannelRegistrar(clusterBootBlock, r, clusterDialer, clusterServerConfig, clusterGRPCServer, conf, signer, metricsProvider, opsSystem, lf, tlsCallback)
	//設置TLS雙向認證標誌
	mutualTLS := serverConfig.SecOpts.UseTLS && serverConfig.SecOpts.RequireClientCert
	expiration := conf.General.Authentication.NoExpirationChecks
	//創建服排序Order服務
	server := NewServer(manager, metricsProvider, &conf.Debug, conf.General.Authentication.TimeWindow, mutualTLS, expiration)

	logger.Infof("Starting %s", metadata.GetVersionInfo())
	go handleSignals(addPlatformSignals(map[os.Signal]func(){
		syscall.SIGTERM: func() {
			grpcServer.Stop()
			if clusterGRPCServer != grpcServer {
				clusterGRPCServer.Stop()
			}
		},
	}))

	if clusterGRPCServer != grpcServer {
		logger.Info("Starting cluster listener on", clusterGRPCServer.Address())
		//啓動集羣單獨GRPC服務
		go clusterGRPCServer.Start()
	}
  //初始化Profile服務,用來啓動監聽,在Peer中有類似代碼
	initializeProfilingService(conf)
	//將Order排序服務註冊到GRP服務中
	ab.RegisterAtomicBroadcastServer(grpcServer.Server(), server)
	logger.Info("Beginning to serve requests")
	//啓動GRPC服務,開始監聽Peer請求
	grpcServer.Start()
}

已經應該對大段代碼免疫了。這一大塊代碼裏面還隱藏着很多的小模塊,下面繼續分析帳本部分:

//引導塊(創世塊)
func extractBootstrapBlock(conf *localconfig.TopLevel) *cb.Block {
	var bootstrapBlock *cb.Block

	// Select the bootstrapping mechanism
	switch conf.General.GenesisMethod {
	case "provisional":
		bootstrapBlock = encoder.New(genesisconfig.Load(conf.General.GenesisProfile)).GenesisBlockForChannel(conf.General.SystemChannel)
	case "file":
		bootstrapBlock = file.New(conf.General.GenesisFile).GenesisBlock()
	default:
		logger.Panic("Unknown genesis method:", conf.General.GenesisMethod)
	}

	return bootstrapBlock
}
func New(fileName string) bootstrap.Helper {
	return &fileBootstrapper{
		//此處需要解析用工具創建的創世塊文件
		GenesisBlockFile: fileName,
	}
}
type Block struct {
	Header               *BlockHeader   `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
	Data                 *BlockData     `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"`
	Metadata             *BlockMetadata `protobuf:"bytes,3,opt,name=metadata,proto3" json:"metadata,omitempty"`
	XXX_NoUnkeyedLiteral struct{}       `json:"-"`
	XXX_unrecognized     []byte         `json:"-"`
	XXX_sizecache        int32          `json:"-"`
}
func (b *fileBootstrapper) GenesisBlock() *cb.Block {
	bootstrapFile, fileErr := ioutil.ReadFile(b.GenesisBlockFile)
	if fileErr != nil {
		panic(errors.Errorf("unable to bootstrap orderer. Error reading genesis block file: %v", fileErr))
	}
	genesisBlock := &cb.Block{}
	unmarshallErr := proto.Unmarshal(bootstrapFile, genesisBlock)
	if unmarshallErr != nil {
		panic(errors.Errorf("unable to bootstrap orderer. Error unmarshalling genesis block: %v", unmarshallErr))

	}
	return genesisBlock
}
//帳本工廠
func createLedgerFactory(conf *config.TopLevel, metricsProvider metrics.Provider) (blockledger.Factory, string) {
	var lf blockledger.Factory
	var ld string
	switch conf.General.LedgerType {
	case "file":
		ld = conf.FileLedger.Location
		if ld == "" {
			ld = createTempDir(conf.FileLedger.Prefix)
		}
		logger.Debug("Ledger dir:", ld)
		//創建一個新的帳本對象-不同的通道保有自己的帳本,保存在不同的子目錄下
		lf = fileledger.New(ld, metricsProvider)
		// The file-based ledger stores the blocks for each channel
		// in a fsblkstorage.ChainsDir sub-directory that we have
		// to create separately. Otherwise the call to the ledger
		// Factory's ChainIDs below will fail (dir won't exist).
		createSubDir(ld, fsblkstorage.ChainsDir)
	case "json":
		ld = conf.FileLedger.Location
		if ld == "" {
			ld = createTempDir(conf.FileLedger.Prefix)
		}
		logger.Debug("Ledger dir:", ld)
		lf = jsonledger.New(ld)
	case "ram":
		fallthrough
	default:
		//內存型帳本
		lf = ramledger.New(int(conf.RAMLedger.HistorySize))
	}
	return lf, ld
}
func NewProvider(conf *Conf, indexConfig *blkstorage.IndexConfig, metricsProvider metrics.Provider) blkstorage.BlockStoreProvider {
	p := leveldbhelper.NewProvider(&leveldbhelper.Conf{DBPath: conf.getIndexDir()})
	// create stats instance at provider level and pass to newFsBlockStore
	stats := newStats(metricsProvider)
	return &FsBlockstoreProvider{conf, indexConfig, p, stats}
}

再看一下多通道的註冊初始化:

func initializeMultichannelRegistrar(
	bootstrapBlock *cb.Block,
	ri *replicationInitiator,
	clusterDialer *cluster.PredicateDialer,
	srvConf comm.ServerConfig,
	srv *comm.GRPCServer,
	conf *localconfig.TopLevel,
	signer crypto.LocalSigner,
	metricsProvider metrics.Provider,
	healthChecker healthChecker,
	lf blockledger.Factory,
	callbacks ...channelconfig.BundleActor,
) *multichannel.Registrar {
	//通過創世塊獲得相關ID
	genesisBlock := extractBootstrapBlock(conf)
	// Are we bootstrapping?
	if len(lf.ChainIDs()) == 0 {
		initializeBootstrapChannel(genesisBlock, lf)
	} else {
		logger.Info("Not bootstrapping because of existing channels")
	}

	consenters := make(map[string]consensus.Consenter)

	//傳入帳本工廠對象,共識等信息
	registrar := multichannel.NewRegistrar(*conf, lf, signer, metricsProvider, callbacks...)

	consenters["solo"] = solo.New()
	var kafkaMetrics *kafka.Metrics
	consenters["kafka"], kafkaMetrics = kafka.New(conf.Kafka, metricsProvider, healthChecker)
	// Note, we pass a 'nil' channel here, we could pass a channel that
	// closes if we wished to cleanup this routine on exit.
	go kafkaMetrics.PollGoMetricsUntilStop(time.Minute, nil)
	if isClusterType(bootstrapBlock) {
		//RAFT出現了,在這裏
		initializeEtcdraftConsenter(consenters, conf, lf, clusterDialer, bootstrapBlock, ri, srvConf, srv, registrar, metricsProvider)
	}
	registrar.Initialize(consenters)
	return registrar
}
func initializeBootstrapChannel(genesisBlock *cb.Block, lf blockledger.Factory) {
	//具體的取得通道ID在這裏
	chainID, err := utils.GetChainIDFromBlock(genesisBlock)
	if err != nil {
		logger.Fatal("Failed to parse channel ID from genesis block:", err)
	}
	//繼續獲取帳本對象
	gl, err := lf.GetOrCreate(chainID)
	if err != nil {
		logger.Fatal("Failed to create the system channel:", err)
	}

  //將通過通道ID獲取的通道帳本內加入創世區塊
	//gl.Append(genesisBlock) => fsBlockStore.AddBlock(block
	//*common.Block)=>blockfileMgr.addBlock(block *common.Block)
	//首先使用ProtoBuf將創世塊文件序列化爲字節數組,根據大小判斷新區塊是否生成(前面提到的64M)
	//將區塊長度加入區塊文件,將上面的序列化數組加入區塊文件,
	//更新索引數據庫,更新檢查點和區塊鏈信息
	if err := gl.Append(genesisBlock); err != nil {
		logger.Fatal("Could not write genesis block to ledger:", err)
	}
}

其實可以發現,通過解析創世塊,通過其得到通道ID是很多初始化函數都調用的,這個有點意思。GetOrCreate接口有四個繼承實現的地方,都在common/ledger/blockledger下面的四個目錄內。它調用blkstorageProvider.OpenBlockStore然後再調用common/ledger/blkstroage/fs_blockstroe_provider.go中的OpenBlockStore。即:

func (flf *fileLedgerFactory) GetOrCreate(chainID string) (blockledger.ReadWriter, error) {
	flf.mutex.Lock()
	defer flf.mutex.Unlock()

	key := chainID
	// check cache
	ledger, ok := flf.ledgers[key]
	if ok {
		return ledger, nil
	}
	// open fresh
	blockStore, err := flf.blkstorageProvider.OpenBlockStore(key)
	if err != nil {
		return nil, err
	}
	ledger = NewFileLedger(blockStore)
	flf.ledgers[key] = ledger  //存儲下面代碼生成的fsBlockStore對象實例
	return ledger, nil
}
type DBHandle struct {
	dbName string
	db     *DB
}
func (p *FsBlockstoreProvider) OpenBlockStore(ledgerid string) (blkstorage.BlockStore, error) {
	indexStoreHandle := p.leveldbProvider.GetDBHandle(ledgerid)
	return newFsBlockStore(ledgerid, p.conf, p.indexConfig, indexStoreHandle, p.stats), nil
}
type fsBlockStore struct {
	id      string
	conf    *Conf
	//前邊的事件註冊
	fileMgr *blockfileMgr
	stats   *ledgerStats
}
type blockfileMgr struct {
	//通道ID目錄
	rootDir           string
	//配置文件內容:order_data目錄和最大塊大小,默認64M
	conf              *Conf
	//通過通道ID得到LEVELDB的句柄,即通道ID和LEVELDB二者綁定
	db                *leveldbhelper.DBHandle
	//區塊鏈索引結構體,實現了Index接口
	index             index
	//區塊檢查點相關信息
	cpInfo            *checkpointInfo
	//信息訪問的互斥鎖
	cpInfoCond        *sync.Cond
	//區塊的實際路徑和句柄
	currentFileWriter *blockfileWriter
	//區塊簡要信息,高度,HASH,上一塊的HASH
	bcInfo            atomic.Value
}
func newFsBlockStore(id string, conf *Conf, indexConfig *blkstorage.IndexConfig,
	dbHandle *leveldbhelper.DBHandle, stats *stats) *fsBlockStore {
	//創建fileMgr
	fileMgr := newBlockfileMgr(id, conf, indexConfig, dbHandle)

	// create ledgerStats and initialize blockchain_height stat
	ledgerStats := stats.ledgerStats(id)
	//獲取相關對象
	info := fileMgr.getBlockchainInfo()
	ledgerStats.updateBlockchainHeight(info.Height)

	return &fsBlockStore{id, conf, fileMgr, ledgerStats}
}
//newBlockfileMgr函數調用下面這個函數生成索引數據庫
func newBlockIndex(indexConfig *blkstorage.IndexConfig, db *leveldbhelper.DBHandle) (*blockIndex, error) {
	indexItems := indexConfig.AttrsToIndex
	logger.Debugf("newBlockIndex() - indexItems:[%s]", indexItems)
	indexItemsMap := make(map[blkstorage.IndexableAttr]bool)
	for _, indexItem := range indexItems {
		indexItemsMap[indexItem] = true
	}
	// This dependency is needed because the index 'IndexableAttrTxID' is used for detecting the duplicate txid
	// and the results are reused in the other two indexes. Ideally, all three indexes should be merged into one
	// for efficiency purpose - [FAB-10587]
	if (indexItemsMap[blkstorage.IndexableAttrTxValidationCode] || indexItemsMap[blkstorage.IndexableAttrBlockTxID]) &&
		!indexItemsMap[blkstorage.IndexableAttrTxID] {
		return nil, errors.Errorf("dependent index [%s] is not enabled for [%s] or [%s]",
			blkstorage.IndexableAttrTxID, blkstorage.IndexableAttrTxValidationCode, blkstorage.IndexableAttrBlockTxID)
	}
	return &blockIndex{indexItemsMap, db}, nil
}

其實從上面的代碼中就可以看出拿到通道ID後就可以拿到相應的帳本,就可以構建ChainSupport,而有了它,就可以將相關的對象以KV形式存儲起來。下面是它的定義:

type ChainSupport struct {
	//賬本資源對象含通道配置資源對象(configResources類 型)與區塊賬本對象(FileLedger類型)
	 *ledgerResources  
 //過濾通道消息,默認有四個:
 //Empty-RejectRule拒絕空消息過濾器
 //expirationRejectRule拒絕過期的簽名者身份證書的過濾器
 //MaxBytesRule檢驗消息最大長度(默認 98MB)的過濾器
 //sigFilter驗證消息簽名是否滿足ChannelWriters(/Channel/Writers)通道寫權限策略要求的過濾器
	 msgprocessor.Processor
 // 構造新區塊並提交區塊文件,創建新通道和更新通道配置。
 //初始化時設置最新的區塊號lastBlock、通道配置序號lastConfigSeq、 最新的配置區塊
 //號lastConfigBlockNum、多通道註冊管理器Registrar對象(創建新應用通道)以及關聯通道的
 //鏈支持對象(更新通道配置)
	 *BlockWriter
 //共識排序服務對交易排序,然後提交到緩存交易消息列表,
 //執行打包出塊、通道管理等操作
	 consensus.Chain
 //消息切割組件
 //獲取指定通道上 的Orderer配置,包含共識組件類型、交易出塊週期時間、區塊最大字節數、通道限制參///數(如通道數 量)等。
 //基於該配置創建消息切割組件(receiver類型),按出塊規則切分將本地的緩存交易消息列表
 //交由區塊賬本寫組件構造新區塊,並提交到 賬本區塊文件
	 cutter blockcutter.Receiver
	 // 本地簽名人
	 crypto.LocalSigner
}

chain := newChainSupport(  // 構造應用通道的鏈支持對象
	 r,
	 ledgerResources,
	 consenters,
	 signer)
r.chains[chainID] = chain // 將鏈支持對象註冊到多通道管理器

chain.start()  // 啓動鏈支持對象

2、廣播服務
廣播服務分爲以下幾種:
正常的交易信息,新增通道信息,更新通道信息,看正面的代碼:

func NewServer(
	r *multichannel.Registrar,
	metricsProvider metrics.Provider,
	debug *localconfig.Debug,
	timeWindow time.Duration,
	mutualTLS bool,
	expirationCheckDisabled bool,
) ab.AtomicBroadcastServer {
	s := &server{
		dh: deliver.NewHandler(
			deliverSupport{Registrar: r},
			timeWindow,
			mutualTLS,
			deliver.NewMetrics(metricsProvider),
			expirationCheckDisabled,
		),
		bh: &broadcast.Handler{
			SupportRegistrar: broadcastSupport{Registrar: r},
			Metrics:          broadcast.NewMetrics(metricsProvider),
		},
		debug:     debug,
		Registrar: r,
	}
	return s
}
//接收客戶端消息
func (s *server) Broadcast(srv ab.AtomicBroadcast_BroadcastServer) error {
	logger.Debugf("Starting new Broadcast handler")
	defer func() {
		if r := recover(); r != nil {
			logger.Criticalf("Broadcast client triggered panic: %s\n%s", r, debug.Stack())
		}
		logger.Debugf("Closing Broadcast stream")
	}()
	return s.bh.Handle(&broadcastMsgTracer{
		AtomicBroadcast_BroadcastServer: srv,
		msgTracer: msgTracer{
			debug:    s.debug,
			function: "Broadcast",
		},
	})
}
//處理消息-類似於比特幣或EOS的消息處理中心
func (bh *Handler) Handle(srv ab.AtomicBroadcast_BroadcastServer) error {
	addr := util.ExtractRemoteAddress(srv.Context())
	logger.Debugf("Starting new broadcast loop for %s", addr)
	for {
		msg, err := srv.Recv()
		if err == io.EOF {
			logger.Debugf("Received EOF from %s, hangup", addr)
			return nil
		}
		if err != nil {
			logger.Warningf("Error reading from %s: %s", addr, err)
			return err
		}

    //驗證消息,開始排序
		resp := bh.ProcessMessage(msg, addr)
		err = srv.Send(resp)
		if resp.Status != cb.Status_SUCCESS {
			return err
		}

		if err != nil {
			logger.Warningf("Error sending to %s: %s", addr, err)
			return err
		}
	}

}

3、分發服務
分發服務包手以下幾個部分:
接收客戶端發來的請求,解析客戶端消息.看下面的代碼:

func (s *server) Deliver(srv ab.AtomicBroadcast_DeliverServer) error {
	logger.Debugf("Starting new Deliver handler")
	defer func() {
		if r := recover(); r != nil {
			logger.Criticalf("Deliver client triggered panic: %s\n%s", r, debug.Stack())
		}
		logger.Debugf("Closing Deliver stream")
	}()

	policyChecker := func(env *cb.Envelope, channelID string) error {
		chain := s.GetChain(channelID)
		if chain == nil {
			return errors.Errorf("channel %s not found", channelID)
		}
		// In maintenance mode, we typically require the signature of /Channel/Orderer/Readers.
		// This will block Deliver requests from peers (which normally satisfy /Channel/Readers).
		sf := msgprocessor.NewSigFilter(policies.ChannelReaders, policies.ChannelOrdererReaders, chain)
		return sf.Apply(env)
	}
	deliverServer := &deliver.Server{
		PolicyChecker: deliver.PolicyCheckerFunc(policyChecker),
		Receiver: &deliverMsgTracer{
			Receiver: srv,
			msgTracer: msgTracer{
				debug:    s.debug,
				function: "Deliver",
			},
		},
		ResponseSender: &responseSender{
			AtomicBroadcast_DeliverServer: srv,
		},
	}
	return s.dh.Handle(srv.Context(), deliverServer)
}
func (h *Handler) Handle(ctx context.Context, srv *Server) error {
	addr := util.ExtractRemoteAddress(ctx)
	logger.Debugf("Starting new deliver loop for %s", addr)
	h.Metrics.StreamsOpened.Add(1)
	defer h.Metrics.StreamsClosed.Add(1)
	for {
		logger.Debugf("Attempting to read seek info message from %s", addr)
		envelope, err := srv.Recv()
		if err == io.EOF {
			logger.Debugf("Received EOF from %s, hangup", addr)
			return nil
		}
		if err != nil {
			logger.Warningf("Error reading from %s: %s", addr, err)
			return err
		}

    //分發區塊信息
		status, err := h.deliverBlocks(ctx, srv, envelope)
		if err != nil {
			return err
		}

		err = srv.SendStatusResponse(status)
		if status != cb.Status_SUCCESS {
			return err
		}
		if err != nil {
			logger.Warningf("Error sending to %s: %s", addr, err)
			return err
		}

		logger.Debugf("Waiting for new SeekInfo from %s", addr)
	}
}

通過分析客戶端的消息,獲取帳本上的相關區塊數據並回復給客戶端,如果尚未得到請求的區塊,則一直阻塞直到得到相關請求信息。

4、共識相關
這裏主要是共識的服務啓動和相關的排序切割等,看代碼:

//在前面的initializeMultichannelRegistrar調用了registrar.Initialize(consenters)
func (r *Registrar) Initialize(consenters map[string]consensus.Consenter) {
	r.consenters = consenters
	existingChains := r.ledgerFactory.ChainIDs()

	for _, chainID := range existingChains {
		rl, err := r.ledgerFactory.GetOrCreate(chainID)
		if err != nil {
			logger.Panicf("Ledger factory reported chainID %s but could not retrieve it: %s", chainID, err)
		}
		configTx := configTx(rl)
		if configTx == nil {
			logger.Panic("Programming error, configTx should never be nil here")
		}
		ledgerResources := r.newLedgerResources(configTx)
		chainID := ledgerResources.ConfigtxValidator().ChainID()

		if _, ok := ledgerResources.ConsortiumsConfig(); ok {
			if r.systemChannelID != "" {
				logger.Panicf("There appear to be two system chains %s and %s", r.systemChannelID, chainID)
			}

			chain := newChainSupport(
				r,
				ledgerResources,
				r.consenters,
				r.signer,
				r.blockcutterMetrics,
			)
			r.templator = msgprocessor.NewDefaultTemplator(chain)
			chain.Processor = msgprocessor.NewSystemChannel(chain, r.templator, msgprocessor.CreateSystemChannelFilters(r, chain, r.config))

			// Retrieve genesis block to log its hash. See FAB-5450 for the purpose
			iter, pos := rl.Iterator(&ab.SeekPosition{Type: &ab.SeekPosition_Oldest{Oldest: &ab.SeekOldest{}}})
			defer iter.Close()
			if pos != uint64(0) {
				logger.Panicf("Error iterating over system channel: '%s', expected position 0, got %d", chainID, pos)
			}
			genesisBlock, status := iter.Next()
			if status != cb.Status_SUCCESS {
				logger.Panicf("Error reading genesis block of system channel '%s'", chainID)
			}
			logger.Infof("Starting system channel '%s' with genesis block hash %x and orderer type %s",
				chainID, genesisBlock.Header.Hash(), chain.SharedConfig().ConsensusType())

			r.chains[chainID] = chain
			r.systemChannelID = chainID
			r.systemChannel = chain
			// We delay starting this chain, as it might try to copy and replace the chains map via newChain before the map is fully built
			defer chain.start()
		} else {
			logger.Debugf("Starting chain: %s", chainID)
			chain := newChainSupport(
				r,
				ledgerResources,
				r.consenters,
				r.signer,
				r.blockcutterMetrics,
			)
			r.chains[chainID] = chain
			//啓動共識-共識有多個版本,都在order/consensus目錄下,啓動也是相關部分
			chain.start()
		}

	}
......
}
func (cs *ChainSupport) start() {
	cs.Chain.Start()
}
func (chain *chainImpl) Start() {
	//實際啓動相關服務
	go startThread(chain)
}
func startThread(chain *chainImpl) {
	var err error

	// Create topic if it does not exist (requires Kafka v0.10.1.0)
	err = setupTopicForChannel(chain.consenter.retryOptions(), chain.haltChan, chain.SharedConfig().KafkaBrokers(), chain.consenter.brokerConfig(), chain.consenter.topicDetail(), chain.channel)
	if err != nil {
		// log for now and fallback to auto create topics setting for broker
		logger.Infof("[channel: %s]: failed to create Kafka topic = %s", chain.channel.topic(), err)
	}

......
	chain.processMessagesToBlocks() // Keep up to date with the channel
}
//RAFT
func initializeEtcdraftConsenter(
	consenters map[string]consensus.Consenter,
	conf *localconfig.TopLevel,
	lf blockledger.Factory,
	clusterDialer *cluster.PredicateDialer,
	bootstrapBlock *cb.Block,
	ri *replicationInitiator,
	srvConf comm.ServerConfig,
	srv *comm.GRPCServer,
	registrar *multichannel.Registrar,
	metricsProvider metrics.Provider,
) {
	replicationRefreshInterval := conf.General.Cluster.ReplicationBackgroundRefreshInterval
	if replicationRefreshInterval == 0 {
		replicationRefreshInterval = defaultReplicationBackgroundRefreshInterval
	}

	systemChannelName, err := utils.GetChainIDFromBlock(bootstrapBlock)
	if err != nil {
		ri.logger.Panicf("Failed extracting system channel name from bootstrap block: %v", err)
	}
	systemLedger, err := lf.GetOrCreate(systemChannelName)
	if err != nil {
		ri.logger.Panicf("Failed obtaining system channel (%s) ledger: %v", systemChannelName, err)
	}
	getConfigBlock := func() *cb.Block {
		return multichannel.ConfigBlock(systemLedger)
	}

	exponentialSleep := exponentialDurationSeries(replicationBackgroundInitialRefreshInterval, replicationRefreshInterval)
	ticker := newTicker(exponentialSleep)

	icr := &inactiveChainReplicator{
		logger:                            logger,
		scheduleChan:                      ticker.C,
		quitChan:                          make(chan struct{}),
		replicator:                        ri,
		chains2CreationCallbacks:          make(map[string]chainCreation),
		retrieveLastSysChannelConfigBlock: getConfigBlock,
		registerChain:                     ri.registerChain,
	}

	// Use the inactiveChainReplicator as a channel lister, since it has knowledge
	// of all inactive chains.
	// This is to prevent us pulling the entire system chain when attempting to enumerate
	// the channels in the system.
	ri.channelLister = icr

	go icr.run()
	raftConsenter := etcdraft.New(clusterDialer, conf, srvConf, srv, registrar, icr, metricsProvider)
	consenters["etcdraft"] = raftConsenter
}

正面就是排序和切割:

func (r *receiver) Ordered(msg *cb.Envelope) (messageBatches [][]*cb.Envelope, pending bool) {
	if len(r.pendingBatch) == 0 {
		// We are beginning a new batch, mark the time
		r.PendingBatchStartTime = time.Now()
	}

	ordererConfig, ok := r.sharedConfigFetcher.OrdererConfig()
	if !ok {
		logger.Panicf("Could not retrieve orderer config to query batch parameters, block cutting is not possible")
	}

	batchSize := ordererConfig.BatchSize()

	messageSizeBytes := messageSizeBytes(msg)
	if messageSizeBytes > batchSize.PreferredMaxBytes {
		logger.Debugf("The current message, with %v bytes, is larger than the preferred batch size of %v bytes and will be isolated.", messageSizeBytes, batchSize.PreferredMaxBytes)

		// cut pending batch, if it has any messages
		if len(r.pendingBatch) > 0 {
			messageBatch := r.Cut()
			messageBatches = append(messageBatches, messageBatch)
		}

		// create new batch with single message
		messageBatches = append(messageBatches, []*cb.Envelope{msg})

		// Record that this batch took no time to fill
		r.Metrics.BlockFillDuration.With("channel", r.ChannelID).Observe(0)

		return
	}

	messageWillOverflowBatchSizeBytes := r.pendingBatchSizeBytes+messageSizeBytes > batchSize.PreferredMaxBytes

	if messageWillOverflowBatchSizeBytes {
		logger.Debugf("The current message, with %v bytes, will overflow the pending batch of %v bytes.", messageSizeBytes, r.pendingBatchSizeBytes)
		logger.Debugf("Pending batch would overflow if current message is added, cutting batch now.")
		messageBatch := r.Cut()
		r.PendingBatchStartTime = time.Now()
		messageBatches = append(messageBatches, messageBatch)
	}

	logger.Debugf("Enqueuing message into batch")
	r.pendingBatch = append(r.pendingBatch, msg)
	r.pendingBatchSizeBytes += messageSizeBytes
	pending = true

	if uint32(len(r.pendingBatch)) >= batchSize.MaxMessageCount {
		logger.Debugf("Batch size met, cutting batch")
		messageBatch := r.Cut()
		messageBatches = append(messageBatches, messageBatch)
		pending = false
	}

	return
}

// Cut returns the current batch and starts a new one
func (r *receiver) Cut() []*cb.Envelope {
	r.Metrics.BlockFillDuration.With("channel", r.ChannelID).Observe(time.Since(r.PendingBatchStartTime).Seconds())
	r.PendingBatchStartTime = time.Time{}
	batch := r.pendingBatch
	r.pendingBatch = nil
	r.pendingBatchSizeBytes = 0
	return batch
}

代碼真心的不少,需要認真的分析。

三、總結

Orderer節點要比Peer複雜一些,但基本上也是按套路在走。麻煩在於細節太多,很多細小的部分可能把握不太準確,還需要推敲。

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章