Fabric源碼分析之八commiter(記帳)節點分析

一、記帳節點

區塊鏈的一個特點是分佈式記帳,即各個節點都要承載相同的一份數據。在聯盟鏈中,因爲一些節點的配置有所不同,所以可能有些節點沒有記帳功能或者說不需要完整的帳本。因此本篇就針對具有記帳功能的相關源碼進行分析。記帳其實就是數據存儲的過程,根據實際情況寫入或更新相關數據庫。
記帳的功能主要包括交易的驗證和帳本提交兩個主要的方面,具體到數據的存儲和區塊的具體的數據結構等會在後面專門相關的篇章進行分析。

二、功能概述

fabric中,在排序節點完成出塊後,將消息打包給Leader節點,然後由其完成驗證和區塊的分發提交功能。交易的數據包含公開數據和隱私數據,記帳節點在正式提交帳本前需要對當中的交易進行校驗,在Farbic中,排序節點是不需要對交易進行驗證的。驗證的內容包括格式、簽名等。同時會調用VSCC(驗證系統鏈碼)對交易的合法性和背書策略的有效性進行驗證。然後進行模擬執行結果讀寫集,進行MVCC版本控制。最後將數據提交到存儲的相關數據庫,建立索引信息並保存到索引數據庫,同時將相關的信息更新的狀態數據庫。然後將有效數據存儲到歷史數據庫並清理相關隱私數據庫。先看一下交易驗證和提交兩個模塊的的相關數據結構:(core/committer)

提交器部分:


//提交器部分
type Committer interface {

	// CommitWithPvtData block and private data into the ledger
  //數據寫入帳本
	CommitWithPvtData(blockAndPvtData *ledger.BlockAndPvtData, commitOpts *ledger.CommitOptions) error

	// GetPvtDataAndBlockByNum retrieves block with private data with given
	// sequence number用給定的數據檢索區塊的序列號
	GetPvtDataAndBlockByNum(seqNum uint64) (*ledger.BlockAndPvtData, error)

	// GetPvtDataByNum returns a slice of the private data from the ledger
	// for given block and based on the filter which indicates a map of
	// collections and namespaces of private data to retrieve
  //從指定塊中返回私有數據的切片,同時通過過濾器檢索私有數據的集合和名稱空間的映射
	GetPvtDataByNum(blockNum uint64, filter ledger.PvtNsCollFilter) ([]*ledger.TxPvtData, error)

	// Get recent block sequence number
  //獲得區塊高度索引
	LedgerHeight() (uint64, error)

	// DoesPvtDataInfoExistInLedger returns true if the ledger has pvtdata info
	// about a given block number.
  //返回指定塊號是否存在私有數據的信息
	DoesPvtDataInfoExistInLedger(blockNum uint64) (bool, error)

	// Gets blocks with sequence numbers provided in the slice
  //根據序號獲得區塊
	GetBlocks(blockSeqs []uint64) []*common.Block

	// GetConfigHistoryRetriever returns the ConfigHistoryRetriever
  //根據歷史配置取得器取得歷史取得器
	GetConfigHistoryRetriever() (ledger.ConfigHistoryRetriever, error)

	// CommitPvtDataOfOldBlocks commits the private data corresponding to already committed block
	// If hashes for some of the private data supplied in this function does not match
	// the corresponding hash present in the block, the unmatched private data is not
	// committed and instead the mismatch inforation is returned back
  //提交私有數據,並在此過程中判斷私有數據的哈希值是滯匹配,否則返回不匹配的消息
	CommitPvtDataOfOldBlocks(blockPvtData []*ledger.BlockPvtData) ([]*ledger.PvtdataHashMismatch, error)

	// GetMissingPvtDataTracker return the MissingPvtDataTracker
  //獲得缺失私有數據的跟蹤器
	GetMissingPvtDataTracker() (ledger.MissingPvtDataTracker, error)

	// Closes committing service
	Close()
}
//抽象的帳本接口和相關命令
type PeerLedgerSupport interface {
	GetPvtDataAndBlockByNum(blockNum uint64, filter ledger.PvtNsCollFilter) (*ledger.BlockAndPvtData, error)

	GetPvtDataByNum(blockNum uint64, filter ledger.PvtNsCollFilter) ([]*ledger.TxPvtData, error)

	CommitWithPvtData(blockAndPvtdata *ledger.BlockAndPvtData, commitOpts *ledger.CommitOptions) error

	CommitPvtDataOfOldBlocks(blockPvtData []*ledger.BlockPvtData) ([]*ledger.PvtdataHashMismatch, error)

	GetBlockchainInfo() (*common.BlockchainInfo, error)

	DoesPvtDataInfoExist(blockNum uint64) (bool, error)

	GetBlockByNumber(blockNumber uint64) (*common.Block, error)

	GetConfigHistoryRetriever() (ledger.ConfigHistoryRetriever, error)

	GetMissingPvtDataTracker() (ledger.MissingPvtDataTracker, error)

	Close()
}

// LedgerCommitter is the implementation of  Committer interface
// it keeps the reference to the ledger to commit blocks and retrieve
// chain information
//對Committer接口的實現
type LedgerCommitter struct {
	PeerLedgerSupport
	eventer ConfigBlockEventer
}
// ConfigBlockEventer callback function proto type to define action
// upon arrival on new configuaration update block
//根據配置更新塊
type ConfigBlockEventer func(block *common.Block) error

驗證器部分:


/Validator interface which defines API to validate block transactions
// and return the bit array mask indicating invalid transactions which
// didn't pass validation.
type Validator interface {
	Validate(block *common.Block) error
}

// private interface to decouple tx validator
// and vscc execution, in order to increase
// testability of TxValidator
type vsccValidator interface {
	VSCCValidateTx(seq int, payload *common.Payload, envBytes []byte, block *common.Block) (error, peer.TxValidationCode)
}

// implementation of Validator interface, keeps
// reference to the ledger to enable tx simulation
// and execution of vscc
type TxValidator struct {
	ChainID string
	Support Support
	Vscc    vsccValidator
}
// VsccValidatorImpl is the implementation used to call
// the vscc chaincode and validate block transactions
type VsccValidatorImpl struct {
	chainID         string
	support         Support
	sccprovider     sysccprovider.SystemChaincodeProvider
	pluginValidator *PluginValidator
}

三、啓動

首先回到提交功能模塊的整個流程來看一看,下面是源碼
1、入口(peer/start.go)

//core/scc/cscc/configure.go
//invoke()->InvokeNoShim()->joinChain
func joinChain(chainID string, block *common.Block, ccp ccprovider.ChaincodeProvider, sccp sysccprovider.SystemChaincodeProvider) pb.Response {
	if err := peer.CreateChainFromBlock(block, ccp, sccp); err != nil {
		return shim.Error(err.Error())
	}

	peer.InitChain(chainID)

	return shim.Success(nil)
}
//peer.go
// CreateChainFromBlock creates a new chain from config block
func CreateChainFromBlock(cb *common.Block, ccp ccprovider.ChaincodeProvider, sccp sysccprovider.SystemChaincodeProvider) error {
	cid, err := utils.GetChainIDFromBlock(cb)
	if err != nil {
		return err
	}

	var l ledger.PeerLedger
	if l, err = ledgermgmt.CreateLedger(cb); err != nil {
		return errors.WithMessage(err, "cannot create ledger from genesis block")
	}

  //啓動相關服務
	return createChain(cid, l, cb, ccp, sccp, pluginMapper)
}

2、創建功能接口

// createChain creates a new chain object and insert it into the chains
func createChain(
	cid string,
	ledger ledger.PeerLedger,
	cb *common.Block,
	ccp ccprovider.ChaincodeProvider,
	sccp sysccprovider.SystemChaincodeProvider,
	pm txvalidator.PluginMapper,
) error {

	chanConf, err := retrievePersistedChannelConfig(ledger)
	if err != nil {
		return err
	}

	......

  //創建VCS對象,主要是chainSupport
	vcs := struct {
		*chainSupport
		*semaphore.Weighted
	}{cs, validationWorkersSemaphore}
  //創建交易驗證器
	validator := txvalidator.NewTxValidator(cid, vcs, sccp, pm)
  //創建帳本提交器
	c := committer.NewLedgerCommitterReactive(ledger, func(block *common.Block) error {
    //得到ID,通過ID可以獲取更多的資源
		chainID, err := utils.GetChainIDFromBlock(block)
		if err != nil {
			return err
		}

    //設置指定鏈的當前配置塊---前面提到過好多次可以通過ID查找通道等
		return SetCurrConfigBlock(block, chainID)
	})

  //獲得排序服務配置
	oc, ok := bundle.OrdererConfig()
	if !ok {
		return errors.New("no orderer config in bundle")
	}

  //組織及MSP的判斷
	ordererAddressesByOrg := make(map[string][]string)
	var ordererOrganizations []string
	for _, ordererOrg := range oc.Organizations() {
		ordererOrganizations = append(ordererOrganizations, ordererOrg.MSPID())
		if len(ordererOrg.Endpoints()) == 0 {
			continue
		}
		ordererAddressesByOrg[ordererOrg.MSPID()] = ordererOrg.Endpoints()
	}

	ordererAddresses := bundle.ChannelConfig().OrdererAddresses()
	if len(ordererAddresses) == 0 && len(ordererAddressesByOrg) == 0 {
		return errors.New("no ordering service endpoint provided in configuration block")
	}

	ordererAddressOverrides, err := GetOrdererAddressOverrides()
	if err != nil {
		return errors.Errorf("failed to get override addresses: %s", err)
	}

	// TODO: does someone need to call Close() on the transientStoreFactory at shutdown of the peer?
  //創建私有數據的的存儲對象
	store, err := TransientStoreFactory.OpenStore(bundle.ConfigtxValidator().ChainID())
	if err != nil {
		return errors.Wrapf(err, "[channel %s] failed opening transient store", bundle.ConfigtxValidator().ChainID())
	}

	csStoreSupport := &CollectionSupport{
		PeerLedger: ledger,
	}
	simpleCollectionStore := privdata.NewSimpleCollectionStore(csStoreSupport)

	oac := service.OrdererAddressConfig{
		Addresses:        ordererAddresses,
		AddressesByOrg:   ordererAddressesByOrg,
		Organizations:    ordererOrganizations,
		AddressOverrides: ordererAddressOverrides,
	}

  //初始化指定通道上Gossip消息模塊,如果爲主節點,則從排序節點得到數據,否則從組織其它節點同步數據
	service.GetGossipService().InitializeChannel(bundle.ConfigtxValidator().ChainID(), oac, service.Support{
		Validator:            validator,
		Committer:            c,
		Store:                store,
		Cs:                   simpleCollectionStore,
		IdDeserializeFactory: csStoreSupport,
		CapabilityProvider:   cp,
	})

	chains.Lock()
	defer chains.Unlock()
	chains.list[cid] = &chain{
		cs:        cs,
		cb:        cb,
		committer: c,
	}

	return nil
}

3、初始化

// InitializeChannel allocates the state provider and should be invoked once per channel per execution
func (g *gossipServiceImpl) InitializeChannel(chainID string, oac OrdererAddressConfig, support Support) {
	g.lock.Lock()
	defer g.lock.Unlock()
	// Initialize new state provider for given committer
	logger.Debug("Creating state provider for chainID", chainID)
	servicesAdapter := &state.ServicesMediator{GossipAdapter: g, MCSAdapter: g.mcs}

	// Embed transient store and committer APIs to fulfill
	// DataStore interface to capture ability of retrieving
	// private data
	storeSupport := &DataStoreSupport{
		TransientStore: support.Store,
		Committer:      support.Committer,
	}
	// Initialize private data fetcher
	dataRetriever := privdata2.NewDataRetriever(storeSupport)
	collectionAccessFactory := privdata2.NewCollectionAccessFactory(support.IdDeserializeFactory)
	fetcher := privdata2.NewPuller(g.metrics.PrivdataMetrics, support.Cs, g.gossipSvc, dataRetriever,
		collectionAccessFactory, chainID, privdata2.GetBtlPullMargin())

	coordinatorConfig := privdata2.CoordinatorConfig{
		TransientBlockRetention:        privdata2.GetTransientBlockRetention(),
		PullRetryThreshold:             viper.GetDuration("peer.gossip.pvtData.pullRetryThreshold"),
		SkipPullingInvalidTransactions: viper.GetBool("peer.gossip.pvtData.skipPullingInvalidTransactionsDuringCommit"),
	}

	coordinator := privdata2.NewCoordinator(privdata2.Support{
		ChainID:            chainID,
		CollectionStore:    support.Cs,
		Validator:          support.Validator,
		TransientStore:     support.Store,
		Committer:          support.Committer,
		Fetcher:            fetcher,
		CapabilityProvider: support.CapabilityProvider,
	}, g.createSelfSignedData(), g.metrics.PrivdataMetrics, coordinatorConfig)

	reconcilerConfig := privdata2.GetReconcilerConfig()
	var reconciler privdata2.PvtDataReconciler

	if reconcilerConfig.IsEnabled {
		reconciler = privdata2.NewReconciler(chainID, g.metrics.PrivdataMetrics,
			support.Committer, fetcher, reconcilerConfig)
	} else {
		reconciler = &privdata2.NoOpReconciler{}
	}

	pushAckTimeout := viper.GetDuration("peer.gossip.pvtData.pushAckTimeout")
  //隱私數據處理的句柄
	g.privateHandlers[chainID] = privateHandler{
		support:     support,
		coordinator: coordinator,
		distributor: privdata2.NewDistributor(chainID, g, collectionAccessFactory, g.metrics.PrivdataMetrics, pushAckTimeout),
		reconciler:  reconciler,
	}
	g.privateHandlers[chainID].reconciler.Start()

  //創建指定通道上的State模塊,負責Block的同步等---此處會啓動一個協程來處理區塊數據
	g.chains[chainID] = state.NewGossipStateProvider(chainID, servicesAdapter, coordinator,
		g.metrics.StateMetrics, getStateConfiguration())
	if g.deliveryService[chainID] == nil {
		var err error
    //如果沒有與排序服務器的連接,則創建gRPC通信
		g.deliveryService[chainID], err = g.deliveryFactory.Service(g, oac, g.mcs)
		if err != nil {
			logger.Warningf("Cannot create delivery client, due to %+v", errors.WithStack(err))
		}
	}

	// Delivery service might be nil only if it was not able to get connected
	// to the ordering service
	if g.deliveryService[chainID] != nil {
		// Parameters:
		//              - peer.gossip.useLeaderElection
		//              - peer.gossip.orgLeader
		//
		// are mutual exclusive, setting both to true is not defined, hence
		// peer will panic and terminate
		leaderElection := viper.GetBool("peer.gossip.useLeaderElection")
		isStaticOrgLeader := viper.GetBool("peer.gossip.orgLeader")

		if leaderElection && isStaticOrgLeader {
			logger.Panic("Setting both orgLeader and useLeaderElection to true isn't supported, aborting execution")
		}

    //選舉狀態
		if leaderElection {
      //指定其爲組織中Leader節點並負責塊同步
			logger.Debug("Delivery uses dynamic leader election mechanism, channel", chainID)
			g.leaderElection[chainID] = g.newLeaderElectionComponent(chainID, g.onStatusChangeFactory(chainID,
				support.Committer), g.metrics.ElectionMetrics)
    //指定狀態
		} else if isStaticOrgLeader {
			logger.Debug("This peer is configured to connect to ordering service for blocks delivery, channel", chainID)
      //從排序節點得到區塊數據並同步到可達的Peer
      g.deliveryService[chainID].StartDeliverForChannel(chainID, support.Committer, func() {})
		} else {
			logger.Debug("This peer is not configured to connect to ordering service for blocks delivery, channel", chainID)
		}
	} else {
		logger.Warning("Delivery client is down won't be able to pull blocks for chain", chainID)
	}

}

這個函數會把coordinator模塊註冊到Gossip服務上,此模塊主要是用來協調處理隱私數據的相關增刪處理(包括遠程摘取),並和其相關的私有數據處理接口PrivateHandlers和通道關聯。PrivateHandlers字典用來處理隱私數據的句柄,包括分發與保存隱私數據。chains字典(map[string]state.GossipStateProvider)處理消息Msg和State。

四、提交

區塊的同步是需要和相關的排序服務和Leader通信的,所以上面提到了兩種狀態。這裏看一下區塊數據的流向,網絡監聽服務收到DataMsg消息處理,然後啓動協程來處理數據,看一下代碼流程:

//gossip/state/state.go
// NewGossipStateProvider creates state provider with coordinator instance
// to orchestrate arrival of private rwsets and blocks before committing them into the ledger.
func NewGossipStateProvider(chainID string, services *ServicesMediator, ledger ledgerResources, stateMetrics *metrics.StateMetrics, config *Configuration) GossipStateProvider {

	gossipChan, _ := services.Accept(func(message interface{}) bool {
		// Get only data messages
		return message.(*proto.GossipMessage).IsDataMsg() &&
			bytes.Equal(message.(*proto.GossipMessage).Channel, []byte(chainID))
	}, false)

	remoteStateMsgFilter := func(message interface{}) bool {
		receivedMsg := message.(proto.ReceivedMessage)
		msg := receivedMsg.GetGossipMessage()
		if !(msg.IsRemoteStateMessage() || msg.GetPrivateData() != nil) {
			return false
		}
		// Ensure we deal only with messages that belong to this channel
		if !bytes.Equal(msg.Channel, []byte(chainID)) {
			return false
		}
		connInfo := receivedMsg.GetConnectionInfo()
		authErr := services.VerifyByChannel(msg.Channel, connInfo.Identity, connInfo.Auth.Signature, connInfo.Auth.SignedData)
		if authErr != nil {
			logger.Warning("Got unauthorized request from", string(connInfo.Identity))
			return false
		}
		return true
	}

	// Filter message which are only relevant for nodeMetastate transfer
	_, commChan := services.Accept(remoteStateMsgFilter, true)

	height, err := ledger.LedgerHeight()
	if height == 0 {
		// Panic here since this is an indication of invalid situation which should not happen in normal
		// code path.
		logger.Panic("Committer height cannot be zero, ledger should include at least one block (genesis).")
	}

	if err != nil {
		logger.Error("Could not read ledger info to obtain current ledger height due to: ", errors.WithStack(err))
		// Exiting as without ledger it will be impossible
		// to deliver new blocks
		return nil
	}

	s := &GossipStateProviderImpl{
		// MessageCryptoService
		mediator: services,

		// Chain ID
		chainID: chainID,

		// Channel to read new messages from
		gossipChan: gossipChan,

		// Channel to read direct messages from other peers
		commChan: commChan,

		// Create a queue for payloads, wrapped in a metrics buffer
		payloads: &metricsBuffer{
			PayloadsBuffer: NewPayloadsBuffer(height),
			sizeMetrics:    stateMetrics.PayloadBufferSize,
			chainID:        chainID,
		},

		ledger: ledger,

		stateResponseCh: make(chan proto.ReceivedMessage, config.ChannelBufferSize),

		stateRequestCh: make(chan proto.ReceivedMessage, config.ChannelBufferSize),

		stopCh: make(chan struct{}, 1),

		stateTransferActive: 0,

		once: sync.Once{},

		requestValidator: &stateRequestValidator{},

		config: config,

		stateMetrics: stateMetrics,
	}

	logger.Infof("Updating metadata information for channel %s, "+
		"current ledger sequence is at = %d, next expected block is = %d", chainID, height-1, s.payloads.Next())
	logger.Debug("Updating gossip ledger height to", height)
	services.UpdateLedgerHeight(height, common2.ChainID(s.chainID))

	s.done.Add(4)

	// Listen for incoming communication
	go s.listen()
	// Deliver in order messages into the incoming channel
  //重點看這個地方
	go s.deliverPayloads()
	if s.config.EnableStateTransfer {
		// Execute anti entropy to fill missing gaps
		go s.antiEntropy()
	}
	// Taking care of state request messages
	go s.processStateRequests()

	return s
}
func (s *GossipStateProviderImpl) deliverPayloads() {
	defer s.done.Done()

	for {
		select {
		// Wait for notification that next seq has arrived
		case <-s.payloads.Ready():
			logger.Debugf("[%s] Ready to transfer payloads (blocks) to the ledger, next block number is = [%d]", s.chainID, s.payloads.Next())
			// Collect all subsequent payloads
			for payload := s.payloads.Pop(); payload != nil; payload = s.payloads.Pop() {
				rawBlock := &common.Block{}
				if err := pb.Unmarshal(payload.Data, rawBlock); err != nil {
					logger.Errorf("Error getting block with seqNum = %d due to (%+v)...dropping block", payload.SeqNum, errors.WithStack(err))
					continue
				}
				if rawBlock.Data == nil || rawBlock.Header == nil {
					logger.Errorf("Block with claimed sequence %d has no header (%v) or data (%v)",
						payload.SeqNum, rawBlock.Header, rawBlock.Data)
					continue
				}
				logger.Debugf("[%s] Transferring block [%d] with %d transaction(s) to the ledger", s.chainID, payload.SeqNum, len(rawBlock.Data.Data))

				// Read all private data into slice
				var p util.PvtDataCollections
				if payload.PrivateData != nil {
					err := p.Unmarshal(payload.PrivateData)
					if err != nil {
						logger.Errorf("Wasn't able to unmarshal private data for block seqNum = %d due to (%+v)...dropping block", payload.SeqNum, errors.WithStack(err))
						continue
					}
				}
        //這裏提交區塊數據
				if err := s.commitBlock(rawBlock, p); err != nil {
					if executionErr, isExecutionErr := err.(*vsccErrors.VSCCExecutionFailureError); isExecutionErr {
						logger.Errorf("Failed executing VSCC due to %v. Aborting chain processing", executionErr)
						return
					}
					logger.Panicf("Cannot commit block to the ledger due to %+v", errors.WithStack(err))
				}
			}
		case <-s.stopCh:
			s.stopCh <- struct{}{}
			logger.Debug("State provider has been stopped, finishing to push new blocks.")
			return
		}
	}
}
func (s *GossipStateProviderImpl) commitBlock(block *common.Block, pvtData util.PvtDataCollections) error {

	t1 := time.Now()

	// Commit block with available private transactions
	if err := s.ledger.StoreBlock(block, pvtData); err != nil {
		logger.Errorf("Got error while committing(%+v)", errors.WithStack(err))
		return err
	}

	sinceT1 := time.Since(t1)
	s.stateMetrics.CommitDuration.With("channel", s.chainID).Observe(sinceT1.Seconds())

	// Update ledger height
	s.mediator.UpdateLedgerHeight(block.Header.Number+1, common2.ChainID(s.chainID))
	logger.Debugf("[%s] Committed block [%d] with %d transaction(s)",
		s.chainID, block.Header.Number, len(block.Data.Data))

	s.stateMetrics.Height.With("channel", s.chainID).Set(float64(block.Header.Number + 1))

	return nil
}

最後來到了真正的存儲的地方:

//gossip/privdata/coordinator.go
// StoreBlock stores block with private data into the ledger
func (c *coordinator) StoreBlock(block *common.Block, privateDataSets util.PvtDataCollections) error {
  //數據和頭的驗證
	if block.Data == nil {
		return errors.New("Block data is empty")
	}
	if block.Header == nil {
		return errors.New("Block header is nil")
	}

	logger.Infof("[%s] Received block [%d] from buffer", c.ChainID, block.Header.Number)

	logger.Debugf("[%s] Validating block [%d]", c.ChainID, block.Header.Number)

  //對交易的驗證--VSCC調用也在其中
	validationStart := time.Now()
	err := c.Validator.Validate(block)
	c.reportValidationDuration(time.Since(validationStart))
	if err != nil {
		logger.Errorf("Validation failed: %+v", err)
		return err
	}

  //區塊公開數據和隱私數據對象
	blockAndPvtData := &ledger.BlockAndPvtData{
		Block:          block,
		PvtData:        make(ledger.TxPvtDataMap),
		MissingPvtData: make(ledger.TxMissingPvtDataMap),
	}

  //判斷隱私數據的是否存在
	exist, err := c.DoesPvtDataInfoExistInLedger(block.Header.Number)
	if err != nil {
		return err
	}

  //存在則處理它
	if exist {
		commitOpts := &ledger.CommitOptions{FetchPvtDataFromLedger: true}
    //提交隱私數據
		return c.CommitWithPvtData(blockAndPvtData, commitOpts)
	}

  //生成缺失的私密數據讀寫集
	listMissingStart := time.Now()
	ownedRWsets, err := computeOwnedRWsets(block, privateDataSets)
	if err != nil {
		logger.Warning("Failed computing owned RWSets", err)
		return err
	}

  //得到區塊中缺失數據
	privateInfo, err := c.listMissingPrivateData(block, ownedRWsets)
	if err != nil {
		logger.Warning(err)
		return err
	}

	// if the peer is configured to not pull private rwset of invalid
	// transaction during block commit, we need to delete those
	// missing entries from the missingKeys list (to be used for pulling rwset
	// from other peers). Instead add them to the block's private data
	// missing list so that the private data reconciler can pull them later.
	if c.skipPullingInvalidTransactions {
		txsFilter := txValidationFlags(block.Metadata.Metadata[common.BlockMetadataIndex_TRANSACTIONS_FILTER])
		for missingRWS := range privateInfo.missingKeys {
			if txsFilter[missingRWS.seqInBlock] != uint8(peer.TxValidationCode_VALID) {
				blockAndPvtData.MissingPvtData.Add(missingRWS.seqInBlock, missingRWS.namespace, missingRWS.collection, true)
				delete(privateInfo.missingKeys, missingRWS)
			}
		}
	}

	c.reportListMissingPrivateDataDuration(time.Since(listMissingStart))

	retryThresh := c.pullRetryThreshold
	var bFetchFromPeers bool // defaults to false
	if len(privateInfo.missingKeys) == 0 {
		logger.Debugf("[%s] No missing collection private write sets to fetch from remote peers", c.ChainID)
	} else {
		bFetchFromPeers = true
		logger.Debugf("[%s] Could not find all collection private write sets in local peer transient store for block [%d].", c.ChainID, block.Header.Number)
		logger.Debugf("[%s] Fetching %d collection private write sets from remote peers for a maximum duration of %s", c.ChainID, len(privateInfo.missingKeys), retryThresh)
	}
	startPull := time.Now()
	limit := startPull.Add(retryThresh)
	for len(privateInfo.missingKeys) > 0 && time.Now().Before(limit) {
		c.fetchFromPeers(block.Header.Number, ownedRWsets, privateInfo)
		// If succeeded to fetch everything, no need to sleep before
		// retry
		if len(privateInfo.missingKeys) == 0 {
			break
		}
		time.Sleep(pullRetrySleepInterval)
	}
	elapsedPull := int64(time.Since(startPull) / time.Millisecond) // duration in ms

	c.reportFetchDuration(time.Since(startPull))

	// Only log results if we actually attempted to fetch
	if bFetchFromPeers {
		if len(privateInfo.missingKeys) == 0 {
			logger.Infof("[%s] Fetched all missing collection private write sets from remote peers for block [%d] (%dms)", c.ChainID, block.Header.Number, elapsedPull)
		} else {
			logger.Warningf("[%s] Could not fetch all missing collection private write sets from remote peers. Will commit block [%d] with missing private write sets:[%v]",
				c.ChainID, block.Header.Number, privateInfo.missingKeys)
		}
	}

	// populate the private RWSets passed to the ledger
	for seqInBlock, nsRWS := range ownedRWsets.bySeqsInBlock() {
		rwsets := nsRWS.toRWSet()
		logger.Debugf("[%s] Added %d namespace private write sets for block [%d], tran [%d]", c.ChainID, len(rwsets.NsPvtRwset), block.Header.Number, seqInBlock)
		blockAndPvtData.PvtData[seqInBlock] = &ledger.TxPvtData{
			SeqInBlock: seqInBlock,
			WriteSet:   rwsets,
		}
	}

	// populate missing RWSets to be passed to the ledger
	for missingRWS := range privateInfo.missingKeys {
		blockAndPvtData.MissingPvtData.Add(missingRWS.seqInBlock, missingRWS.namespace, missingRWS.collection, true)
	}

	// populate missing RWSets for ineligible collections to be passed to the ledger
	for _, missingRWS := range privateInfo.missingRWSButIneligible {
		blockAndPvtData.MissingPvtData.Add(missingRWS.seqInBlock, missingRWS.namespace, missingRWS.collection, false)
	}

	// commit block and private data
	commitStart := time.Now()
	err = c.CommitWithPvtData(blockAndPvtData, &ledger.CommitOptions{})
	c.reportCommitDuration(time.Since(commitStart))
	if err != nil {
		return errors.Wrap(err, "commit failed")
	}

	purgeStart := time.Now()

	if len(blockAndPvtData.PvtData) > 0 {
		// Finally, purge all transactions in block - valid or not valid.
    //清除無效塊或事務
		if err := c.PurgeByTxids(privateInfo.txns); err != nil {
			logger.Error("Purging transactions", privateInfo.txns, "failed:", err)
		}
	}

  //清除指定高度下的隱私對象讀寫集
	seq := block.Header.Number
	if seq%c.transientBlockRetention == 0 && seq > c.transientBlockRetention {
		err := c.PurgeByHeight(seq - c.transientBlockRetention)
		if err != nil {
			logger.Error("Failed purging data from transient store at block", seq, ":", err)
		}
	}

	c.reportPurgeDuration(time.Since(purgeStart))

	return nil
}
// listMissingPrivateData identifies missing private write sets and attempts to retrieve them from local transient store
func (c *coordinator) listMissingPrivateData(block *common.Block, ownedRWsets map[rwSetKey][]byte) (*privateDataInfo, error) {
  //區塊的校驗
	if block.Metadata == nil || len(block.Metadata.Metadata) <= int(common.BlockMetadataIndex_TRANSACTIONS_FILTER) {
		return nil, errors.New("Block.Metadata is nil or Block.Metadata lacks a Tx filter bitmap")
	}
	txsFilter := txValidationFlags(block.Metadata.Metadata[common.BlockMetadataIndex_TRANSACTIONS_FILTER])
	if len(txsFilter) != len(block.Data.Data) {
		return nil, errors.Errorf("Block data size(%d) is different from Tx filter size(%d)", len(block.Data.Data), len(txsFilter))
	}

	sources := make(map[rwSetKey][]*peer.Endorsement)
	privateRWsetsInBlock := make(map[rwSetKey]struct{})
	missing := make(rwSetKeysByTxIDs)
	data := blockData(block.Data.Data)
	bi := &transactionInspector{
		sources:              sources,
		missingKeys:          missing,
		ownedRWsets:          ownedRWsets,
		privateRWsetsInBlock: privateRWsetsInBlock,
		coordinator:          c,
	}
	storePvtDataOfInvalidTx := c.Support.CapabilityProvider.Capabilities().StorePvtDataOfInvalidTx()
  //獲取交易列表並記錄缺失隱私數據的KEY到數據對象中
	txList, err := data.forEachTxn(storePvtDataOfInvalidTx, txsFilter, bi.inspectTransaction)
	if err != nil {
		return nil, err
	}

	privateInfo := &privateDataInfo{
		sources:                 sources,
		missingKeysByTxIDs:      missing,
		txns:                    txList,
		missingRWSButIneligible: bi.missingRWSButIneligible,
	}

	logger.Debug("Retrieving private write sets for", len(privateInfo.missingKeysByTxIDs), "transactions from transient store")

	// Put into ownedRWsets RW sets that are missing and found in the transient store
  //在存儲對象中獲得缺失的隱私對象讀寫集並保存到ownedRWsets中
	c.fetchMissingFromTransientStore(privateInfo.missingKeysByTxIDs, ownedRWsets)

	// In the end, iterate over the ownedRWsets, and if the key doesn't exist in
	// the privateRWsetsInBlock - delete it from the ownedRWsets
  //刪除非自有讀寫集即過濾數據
	for k := range ownedRWsets {
		if _, exists := privateRWsetsInBlock[k]; !exists {
			logger.Warning("Removed", k.namespace, k.collection, "hash", k.hash, "from the data passed to the ledger")
			delete(ownedRWsets, k)
		}
	}

	privateInfo.missingKeys = privateInfo.missingKeysByTxIDs.flatten()
	// Remove all keys we already own
	privateInfo.missingKeys.exclude(func(key rwSetKey) bool {
		_, exists := ownedRWsets[key]
		return exists
	})

	return privateInfo, nil
}

上面基本就把交易的提交部分分析了一遍,分析一下數據塊和私密數據的寫入和提交,帳本提交器中的CommitWithPvtData是真正提交器工作的起始:

//core/committer/comitter_impl.go
// CommitWithPvtData commits blocks atomically with private data
func (lc *LedgerCommitter) CommitWithPvtData(blockAndPvtData *ledger.BlockAndPvtData, commitOpts *ledger.CommitOptions) error {
	// Do validation and whatever needed before
	// committing new block
	//對提交區塊進行預處理
	if err := lc.preCommit(blockAndPvtData.Block); err != nil {
		return err
	}

	// Committing new block
	if err := lc.PeerLedgerSupport.CommitWithPvtData(blockAndPvtData, commitOpts); err != nil {
		return err
	}

	return nil
}
// preCommit takes care to validate the block and update based on its
// content判斷是不是配置區塊,是則執行 lc.eventer,發送解析消息並更新相關通道
func (lc *LedgerCommitter) preCommit(block *common.Block) error {
	// Updating CSCC with new configuration block
	if utils.IsConfigBlock(block) {
		logger.Debug("Received configuration update, calling CSCC ConfigUpdate")
		if err := lc.eventer(block); err != nil {
			return errors.WithMessage(err, "could not update CSCC with new configuration update")
		}
	}
	return nil
}
//core/ledger/kvledger/kv_ledger.go
// CommitWithPvtData commits the block and the corresponding pvt data in an atomic operation
func (l *kvLedger) CommitWithPvtData(pvtdataAndBlock *ledger.BlockAndPvtData, commitOpts *ledger.CommitOptions) error {
	var err error
	block := pvtdataAndBlock.Block
	blockNo := pvtdataAndBlock.Block.Header.Number

	startBlockProcessing := time.Now()
	if commitOpts.FetchPvtDataFromLedger {
		// when we reach here, it means that the pvtdata store has the
		// pvtdata associated with this block but the stateDB might not
		// have it. During the commit of this block, no update would
		// happen in the pvtdata store as it already has the required data.

		// if there is any missing pvtData, reconciler will fetch them
		// and update both the pvtdataStore and stateDB. Hence, we can
		// fetch what is available in the pvtDataStore. If any or
		// all of the pvtdata associated with the block got expired
		// and no longer available in pvtdataStore, eventually these
		// pvtdata would get expired in the stateDB as well (though it
		// would miss the pvtData until then)
		//協調器會處理pvtdata和stateDB中的具體情況是滯一致,包括更新、修改等。同時將數據寫入相關的數據庫中
		txPvtData, err := l.blockStore.GetPvtDataByNum(blockNo, nil)
		if err != nil {
			return err
		}
		pvtdataAndBlock.PvtData = convertTxPvtDataArrayToMap(txPvtData)
	}

	logger.Debugf("[%s] Validating state for block [%d]", l.ledgerID, blockNo)
	//利用交易管理器上的驗證器進行讀寫集的MVCC檢查,只檢查有效性。驗證和標記版本,取得經驗證的批量數據操作對。其只包含寫數據即寫入和刪除。
	txstatsInfo, updateBatchBytes, err := l.txtmgmt.ValidateAndPrepare(pvtdataAndBlock, true)
	if err != nil {
		return err
	}
	elapsedBlockProcessing := time.Since(startBlockProcessing)

	startBlockstorageAndPvtdataCommit := time.Now()
	logger.Debugf("[%s] Adding CommitHash to the block [%d]", l.ledgerID, blockNo)
	// we need to ensure that only after a gensis block, commitHash is computed
	// and added to the block. In other words, only after joining a new channel
	// or peer reset, the commitHash would be added to the block
	if block.Header.Number == 1 || l.commitHash != nil {
		l.addBlockCommitHash(pvtdataAndBlock.Block, updateBatchBytes)
	}

	logger.Debugf("[%s] Committing block [%d] to storage", l.ledgerID, blockNo)
	l.blockAPIsRWLock.Lock()
	defer l.blockAPIsRWLock.Unlock()
	//又一個調用接口繼承的相同方法
	if err = l.blockStore.CommitWithPvtData(pvtdataAndBlock); err != nil {
		return err
	}
	elapsedBlockstorageAndPvtdataCommit := time.Since(startBlockstorageAndPvtdataCommit)

	startCommitState := time.Now()
	logger.Debugf("[%s] Committing block [%d] transactions to state database", l.ledgerID, blockNo)
	//提交有效數據到狀態數據庫
	if err = l.txtmgmt.Commit(); err != nil {
		panic(errors.WithMessage(err, "error during commit to txmgr"))
	}
	elapsedCommitState := time.Since(startCommitState)

	// History database could be written in parallel with state and/or async as a future optimization,
	// although it has not been a bottleneck...no need to clutter the log with elapsed duration.
	if ledgerconfig.IsHistoryDBEnabled() {
		logger.Debugf("[%s] Committing block [%d] transactions to history database", l.ledgerID, blockNo)
		//更新區塊數據庫歷史數據庫
		if err := l.historyDB.Commit(block); err != nil {
			panic(errors.WithMessage(err, "Error during commit to history db"))
		}
	}

	logger.Infof("[%s] Committed block [%d] with %d transaction(s) in %dms (state_validation=%dms block_and_pvtdata_commit=%dms state_commit=%dms)"+
		" commitHash=[%x]",
		l.ledgerID, block.Header.Number, len(block.Data.Data),
		time.Since(startBlockProcessing)/time.Millisecond,
		elapsedBlockProcessing/time.Millisecond,
		elapsedBlockstorageAndPvtdataCommit/time.Millisecond,
		elapsedCommitState/time.Millisecond,
		l.commitHash,
	)
	//更新當前區塊的狀態
	l.updateBlockStats(
		elapsedBlockProcessing,
		elapsedBlockstorageAndPvtdataCommit,
		elapsedCommitState,
		txstatsInfo,
	)
	return nil
}
//core/ledger/kvLedger/txmgmt/txmgr/lockbasedtxmgr/lockbased_txmgr.go
// ValidateAndPrepare implements method in interface `txmgmt.TxMgr`
func (txmgr *LockBasedTxMgr) ValidateAndPrepare(blockAndPvtdata *ledger.BlockAndPvtData, doMVCCValidation bool) (
	[]*txmgr.TxStatInfo, []byte, error,
) {
	// Among ValidateAndPrepare(), PrepareExpiringKeys(), and
	// RemoveStaleAndCommitPvtDataOfOldBlocks(), we can allow only one
	// function to execute at a time. The reason is that each function calls
	// LoadCommittedVersions() which would clear the existing entries in the
	// transient buffer and load new entries (such a transient buffer is not
	// applicable for the golevelDB). As a result, these three functions can
	// interleave and nullify the optimization provided by the bulk read API.
	// Once the ledger cache (FAB-103) is introduced and existing
	// LoadCommittedVersions() is refactored to return a map, we can allow
	// these three functions to execute parallely.
	logger.Debugf("Waiting for purge mgr to finish the background job of computing expirying keys for the block")
	txmgr.pvtdataPurgeMgr.WaitForPrepareToFinish()
	txmgr.oldBlockCommit.Lock()
	defer txmgr.oldBlockCommit.Unlock()
	logger.Debug("lock acquired on oldBlockCommit for validating read set version against the committed version")

	block := blockAndPvtdata.Block
	logger.Debugf("Validating new block with num trans = [%d]", len(block.Data.Data))
	//利用驗證器執行並獲取Batch對象
	batch, txstatsInfo, err := txmgr.validator.ValidateAndPrepareBatch(blockAndPvtdata, doMVCCValidation)
	if err != nil {
		txmgr.reset()
		return nil, nil, err
	}
	txmgr.current = &current{block: block, batch: batch}
	//調用交易管理監聽器分發到指定的句柄
	if err := txmgr.invokeNamespaceListeners(); err != nil {
		txmgr.reset()
		return nil, nil, err
	}

	updateBytesBuilder := &privacyenabledstate.UpdatesBytesBuilder{}
	//生成更新數據
	updateBytes, err := updateBytesBuilder.DeterministicBytesForPubAndHashUpdates(batch)
	return txstatsInfo, updateBytes, err
}
//core/ledger/kvLedger/txmgmt/validator/valimpl/default_impl.go
// ValidateAndPrepareBatch implements the function in interface validator.Validator
func (impl *DefaultImpl) ValidateAndPrepareBatch(blockAndPvtdata *ledger.BlockAndPvtData,
	doMVCCValidation bool) (*privacyenabledstate.UpdateBatch, []*txmgr.TxStatInfo, error) {
	block := blockAndPvtdata.Block
	logger.Debugf("ValidateAndPrepareBatch() for block number = [%d]", block.Header.Number)
	var internalBlock *internal.Block
	var txsStatInfo []*txmgr.TxStatInfo
	var pubAndHashUpdates *internal.PubAndHashUpdates
	var pvtUpdates *privacyenabledstate.PvtUpdateBatch
	var err error

	logger.Debug("preprocessing ProtoBlock...")
	//解析common.Blcok區塊對象,將將其轉換爲valinternal.Block
	if internalBlock, txsStatInfo, err = preprocessProtoBlock(impl.txmgr, impl.db.ValidateKeyValue, block, doMVCCValidation); err != nil {
		return nil, nil, err
	}

	if pubAndHashUpdates, err = impl.internalValidator.ValidateAndPrepareBatch(internalBlock, doMVCCValidation); err != nil {
		return nil, nil, err
	}
	logger.Debug("validating rwset...")
	if pvtUpdates, err = validateAndPreparePvtBatch(internalBlock, impl.db, pubAndHashUpdates, blockAndPvtdata.PvtData); err != nil {
		return nil, nil, err
	}
	logger.Debug("postprocessing ProtoBlock...")
	//通過驗證對象更新區塊無數據
	postprocessProtoBlock(block, internalBlock)
	logger.Debug("ValidateAndPrepareBatch() complete")

	txsFilter := util.TxValidationFlags(block.Metadata.Metadata[common.BlockMetadataIndex_TRANSACTIONS_FILTER])
	for i := range txsFilter {
		txsStatInfo[i].ValidationCode = txsFilter.Flag(i)
	}
	return &privacyenabledstate.UpdateBatch{
		PubUpdates:  pubAndHashUpdates.PubUpdates,
		HashUpdates: pubAndHashUpdates.HashUpdates,
		PvtUpdates:  pvtUpdates,
	}, txsStatInfo, nil
}
//core/ledger/kvLedger/txmgmt/validator/valimpl/helper.go
// preprocessProtoBlock parses the proto instance of block into 'Block' structure.
// The retuned 'Block' structure contains only transactions that are endorser transactions and are not alredy marked as invalid
func preprocessProtoBlock(txMgr txmgr.TxMgr,
	validateKVFunc func(key string, value []byte) error,
	block *common.Block, doMVCCValidation bool,
) (*internal.Block, []*txmgr.TxStatInfo, error) {
	b := &internal.Block{Num: block.Header.Number}
	txsStatInfo := []*txmgr.TxStatInfo{}
	// Committer validator has already set validation flags based on well formed tran checks
	//獲取交易驗證碼列表
	txsFilter := util.TxValidationFlags(block.Metadata.Metadata[common.BlockMetadataIndex_TRANSACTIONS_FILTER])
	for txIndex, envBytes := range block.Data.Data {
		var env *common.Envelope
		var chdr *common.ChannelHeader
		var payload *common.Payload
		var err error
		txStatInfo := &txmgr.TxStatInfo{TxType: -1}
		//添加交易狀態信息
		txsStatInfo = append(txsStatInfo, txStatInfo)
		//解析數據流獲取envelope數據結構對象
		if env, err = utils.GetEnvelopeFromBlock(envBytes); err == nil {
      //獲取有效數據
			if payload, err = utils.GetPayload(env); err == nil {
				//解析數據流的通道頭
				chdr, err = utils.UnmarshalChannelHeader(payload.Header.ChannelHeader)
			}
		}

		//檢查交易的有效性
		if txsFilter.IsInvalid(txIndex) {
			// Skipping invalid transaction
			logger.Warningf("Channel [%s]: Block [%d] Transaction index [%d] TxId [%s]"+
				" marked as invalid by committer. Reason code [%s]",
				chdr.GetChannelId(), block.Header.Number, txIndex, chdr.GetTxId(),
				txsFilter.Flag(txIndex).String())
			//無效直接跳過
			continue
		}
		if err != nil {
			return nil, nil, err
		}

    //交易的讀寫集獲取,並根據交易 類進行處理
		var txRWSet *rwsetutil.TxRwSet
		//獲得交易類型
		txType := common.HeaderType(chdr.Type)
		logger.Debugf("txType=%s", txType)
		txStatInfo.TxType = txType
		//普通背書交易
		if txType == common.HeaderType_ENDORSER_TRANSACTION {
			// extract actions from the envelope message
			//從數據封裝中取得Actions,即交易的類型動作和相關的提案信息綁定
			respPayload, err := utils.GetActionFromEnvelope(envBytes)
			if err != nil {
				txsFilter.SetFlag(txIndex, peer.TxValidationCode_NIL_TXACTION)
				continue
			}
			txStatInfo.ChaincodeID = respPayload.ChaincodeId
			txRWSet = &rwsetutil.TxRwSet{}

			//解析結果讀寫集
			if err = txRWSet.FromProtoBytes(respPayload.Results); err != nil {
				txsFilter.SetFlag(txIndex, peer.TxValidationCode_INVALID_OTHER_REASON)
				//跳過錯誤交易
				continue
			}
		} else {
			//處理非背書交易
			rwsetProto, err := processNonEndorserTx(env, chdr.TxId, txType, txMgr, !doMVCCValidation)
			if _, ok := err.(*customtx.InvalidTxError); ok {
				txsFilter.SetFlag(txIndex, peer.TxValidationCode_INVALID_OTHER_REASON)
				continue
			}
			if err != nil {
				return nil, nil, err
			}
			if rwsetProto != nil {
				//處理結果讀寫集
				if txRWSet, err = rwsetutil.TxRwSetFromProtoMsg(rwsetProto); err != nil {
					return nil, nil, err
				}
			}
		}
		if txRWSet != nil {
			txStatInfo.NumCollections = txRWSet.NumCollections()
			if err := validateWriteset(txRWSet, validateKVFunc); err != nil {
				logger.Warningf("Channel [%s]: Block [%d] Transaction index [%d] TxId [%s]"+
					" marked as invalid. Reason code [%s]",
					chdr.GetChannelId(), block.Header.Number, txIndex, chdr.GetTxId(), peer.TxValidationCode_INVALID_WRITESET)
				txsFilter.SetFlag(txIndex, peer.TxValidationCode_INVALID_WRITESET)
				continue
			}

			//添加交易到列表
			b.Txs = append(b.Txs, &internal.Transaction{IndexInBlock: txIndex, ID: chdr.TxId, RWSet: txRWSet})
		}
	}
	return b, txsStatInfo, nil
}
//helper.go
func processNonEndorserTx(txEnv *common.Envelope, txid string, txType common.HeaderType, txmgr txmgr.TxMgr, synchingState bool) (*rwset.TxReadWriteSet, error) {
	logger.Debugf("Performing custom processing for transaction [txid=%s], [txType=%s]", txid, txType)
	processor := customtx.GetProcessor(txType)
	logger.Debugf("Processor for custom tx processing:%#v", processor)
	if processor == nil {
		return nil, nil
	}

	var err error
	var sim ledger.TxSimulator
	var simRes *ledger.TxSimulationResults
	if sim, err = txmgr.NewTxSimulator(txid); err != nil {
		return nil, err
	}
	defer sim.Done()
	if err = processor.GenerateSimulationResults(txEnv, sim, synchingState); err != nil {
		return nil, err
	}
	if simRes, err = sim.GetTxSimulationResults(); err != nil {
		return nil, err
	}
	return simRes.PubSimulationResults, nil
}
//core/peer/configtx_processor.go
func (tp *configtxProcessor) GenerateSimulationResults(txEnv *common.Envelope, simulator ledger.TxSimulator, initializingLedger bool) error {
	payload := utils.UnmarshalPayloadOrPanic(txEnv.Payload)
	channelHdr := utils.UnmarshalChannelHeaderOrPanic(payload.Header.ChannelHeader)
	txType := common.HeaderType(channelHdr.GetType())

	switch txType {
	case common.HeaderType_CONFIG:
		peerLogger.Debugf("Processing CONFIG")
		return processChannelConfigTx(txEnv, simulator)

	default:
		return fmt.Errorf("tx type [%s] is not expected", txType)
	}
}
func processChannelConfigTx(txEnv *common.Envelope, simulator ledger.TxSimulator) error {
	configEnvelope := &common.ConfigEnvelope{}
	if _, err := utils.UnmarshalEnvelopeOfType(txEnv, common.HeaderType_CONFIG, configEnvelope); err != nil {
		return err
	}
	channelConfig := configEnvelope.Config

  //這個將通道信息保存到交易模擬器中
	if err := persistConf(simulator, channelConfigKey, channelConfig); err != nil {
		return err
	}

	peerLogger.Debugf("channelConfig=%s", channelConfig)
	if channelConfig == nil {
		return fmt.Errorf("Channel config found nil")
	}

	return nil
}

func persistConf(simulator ledger.TxSimulator, key string, config *common.Config) error {
	serializedConfig, err := serialize(config)
	if err != nil {
		return err
	}
	return simulator.SetState(peerNamespace, key, serializedConfig)
}
// SetState implements method in interface `ledger.TxSimulator`
func (s *lockBasedTxSimulator) SetState(ns string, key string, value []byte) error {
	if err := s.checkWritePrecondition(key, value); err != nil {
		return err
	}
	s.rwsetBuilder.AddToWriteSet(ns, key, value)
	return nil
}
// AddToWriteSet adds a key and value to the write-set
func (b *RWSetBuilder) AddToWriteSet(ns string, key string, value []byte) {
	nsPubRwBuilder := b.getOrCreateNsPubRwBuilder(ns)
	nsPubRwBuilder.writeMap[key] = newKVWrite(key, value)
}

下來就要進行MVCC檢查和準備數據,ValidateAndPrepareBatch(core/ledger/kvLedger/txmgmt/validator/valimpl/default_impl.go)函數調用ValidateAndPrepareBatch(core/ledger/kvledger/txmgmt/validator/statebasedval/state_based_validator.go)函數,看一下代碼:

// ValidateAndPrepareBatch implements method in Validator interface
func (v *Validator) ValidateAndPrepareBatch(block *internal.Block, doMVCCValidation bool) (*internal.PubAndHashUpdates, error) {
	// Check whether statedb implements BulkOptimizable interface. For now,
	// only CouchDB implements BulkOptimizable to reduce the number of REST
	// API calls from peer to CouchDB instance.
	//驗證接口的批量傳輸並預加哉讀集
	if v.db.IsBulkOptimizable() {
		err := v.preLoadCommittedVersionOfRSet(block)
		if err != nil {
			return nil, err
		}
	}

  //創建共有數據和隱私數據哈希值批量操作--用於保存所有updates,即驗證通過的有效交易寫集體
	updates := internal.NewPubAndHashUpdates()
	for _, tx := range block.Txs {
		var validationCode peer.TxValidationCode
		var err error
		//驗證背書交易
		if validationCode, err = v.validateEndorserTX(tx.RWSet, doMVCCValidation, updates); err != nil {
			return nil, err
		}

		tx.ValidationCode = validationCode
		if validationCode == peer.TxValidationCode_VALID {
			logger.Debugf("Block [%d] Transaction index [%d] TxId [%s] marked as valid by state validator", block.Num, tx.IndexInBlock, tx.ID)
			committingTxHeight := version.NewHeight(block.Num, uint64(tx.IndexInBlock))
			updates.ApplyWriteSet(tx.RWSet, committingTxHeight, v.db)
		} else {
			logger.Warningf("Block [%d] Transaction index [%d] TxId [%s] marked as invalid by state validator. Reason code [%s]",
				block.Num, tx.IndexInBlock, tx.ID, validationCode.String())
		}
	}
	return updates, nil
}
// validateEndorserTX validates endorser transaction
func (v *Validator) validateEndorserTX(
	txRWSet *rwsetutil.TxRwSet,
	doMVCCValidation bool,
	updates *internal.PubAndHashUpdates) (peer.TxValidationCode, error) {

	var validationCode = peer.TxValidationCode_VALID
	var err error
	//mvccvalidation, may invalidate transaction
	if doMVCCValidation {
		validationCode, err = v.validateTx(txRWSet, updates)
	}
	return validationCode, err
}
func (v *Validator) validateTx(txRWSet *rwsetutil.TxRwSet, updates *internal.PubAndHashUpdates) (peer.TxValidationCode, error) {
	// Uncomment the following only for local debugging. Don't want to print data in the logs in production
	//logger.Debugf("validateTx - validating txRWSet: %s", spew.Sdump(txRWSet))
	for _, nsRWSet := range txRWSet.NsRwSets {
		ns := nsRWSet.NameSpace
		// Validate public reads驗證單個鍵讀集合
		if valid, err := v.validateReadSet(ns, nsRWSet.KvRwSet.Reads, updates.PubUpdates); !valid || err != nil {
			if err != nil {
				return peer.TxValidationCode(-1), err
			}
			return peer.TxValidationCode_MVCC_READ_CONFLICT, nil
		}
		// Validate range queries for phantom items驗證塊範圍內讀集合
		if valid, err := v.validateRangeQueries(ns, nsRWSet.KvRwSet.RangeQueriesInfo, updates.PubUpdates); !valid || err != nil {
			if err != nil {
				return peer.TxValidationCode(-1), err
			}
			return peer.TxValidationCode_PHANTOM_READ_CONFLICT, nil
		}
		// Validate hashes for private reads驗證隱私數據讀集合
		if valid, err := v.validateNsHashedReadSets(ns, nsRWSet.CollHashedRwSets, updates.HashUpdates); !valid || err != nil {
			if err != nil {
				return peer.TxValidationCode(-1), err
			}
			return peer.TxValidationCode_MVCC_READ_CONFLICT, nil
		}
	}
	return peer.TxValidationCode_VALID, nil
}

下面是對三種類型的源碼的分析,單個,範圍和隱私:

func (v *Validator) validateReadSet(ns string, kvReads []*kvrwset.KVRead, updates *privacyenabledstate.PubUpdateBatch) (bool, error) {
	for _, kvRead := range kvReads {
		if valid, err := v.validateKVRead(ns, kvRead, updates); !valid || err != nil {
			return valid, err
		}
	}
	return true, nil
}
// validateKVRead performs mvcc check for a key read during transaction simulation.
// i.e., it checks whether a key/version combination is already updated in the statedb (by an already committed block)
// or in the updates (by a preceding valid transaction in the current block)
func (v *Validator) validateKVRead(ns string, kvRead *kvrwset.KVRead, updates *privacyenabledstate.PubUpdateBatch) (bool, error) {
  //首先判斷更新批量操作集合updates
	if updates.Exists(ns, kvRead.Key) {
		return false, nil
	}

	//取得當前狀態數據庫狀態值
	committedVersion, err := v.db.GetVersion(ns, kvRead.Key)
	if err != nil {
		return false, err
	}

	logger.Debugf("Comparing versions for key [%s]: committed version=%#v and read version=%#v",
		kvRead.Key, committedVersion, rwsetutil.NewVersion(kvRead.Version))

	//檢查版本的一致,否則更新
	//AreSame用來比較版本的一致性
	if !version.AreSame(committedVersion, rwsetutil.NewVersion(kvRead.Version)) {
		logger.Debugf("Version mismatch for key [%s:%s]. Committed version = [%#v], Version in readSet [%#v]",
			ns, kvRead.Key, committedVersion, kvRead.Version)
		return false, nil
	}
	return true, nil
}
func (v *Validator) validateRangeQueries(ns string, rangeQueriesInfo []*kvrwset.RangeQueryInfo, updates *privacyenabledstate.PubUpdateBatch) (bool, error) {
	for _, rqi := range rangeQueriesInfo {
		if valid, err := v.validateRangeQuery(ns, rqi, updates); !valid || err != nil {
			return valid, err
		}
	}
	return true, nil
}

// validateRangeQuery performs a phantom read check i.e., it
// checks whether the results of the range query are still the same when executed on the
// statedb (latest state as of last committed block) + updates (prepared by the writes of preceding valid transactions
// in the current block and yet to be committed as part of group commit at the end of the validation of the block)
func (v *Validator) validateRangeQuery(ns string, rangeQueryInfo *kvrwset.RangeQueryInfo, updates *privacyenabledstate.PubUpdateBatch) (bool, error) {
	logger.Debugf("validateRangeQuery: ns=%s, rangeQueryInfo=%s", ns, rangeQueryInfo)

	// If during simulation, the caller had not exhausted the iterator so
	// rangeQueryInfo.EndKey is not actual endKey given by the caller in the range query
	// but rather it is the last key seen by the caller and hence the combinedItr should include the endKey in the results.
	includeEndKey := !rangeQueryInfo.ItrExhausted

  //創建組合迭代器
	combinedItr, err := newCombinedIterator(v.db, updates.UpdateBatch,
		ns, rangeQueryInfo.StartKey, rangeQueryInfo.EndKey, includeEndKey)
	if err != nil {
		return false, err
	}
	defer combinedItr.Close()
	var validator rangeQueryValidator
	//默克爾樹的摘要對象獲取和檢查
	if rangeQueryInfo.GetReadsMerkleHashes() != nil {
		logger.Debug(`Hashing results are present in the range query info hence, initiating hashing based validation`)
		validator = &rangeQueryHashValidator{}
	} else {
		logger.Debug(`Hashing results are not present in the range query info hence, initiating raw KVReads based validation`)
		validator = &rangeQueryResultsValidator{}
	}
	//初始化讀寫驗證器
	validator.init(rangeQueryInfo, combinedItr)
	//驗證數據的有效性
	return validator.validate()
}
func (m *RangeQueryInfo) GetReadsMerkleHashes() *QueryReadsMerkleSummary {
	if x, ok := m.GetReadsInfo().(*RangeQueryInfo_ReadsMerkleHashes); ok {
		return x.ReadsMerkleHashes
	}
	return nil
}
//此處只給了實現接口的兩個函數中的一個,其它請看源碼
func (v *rangeQueryResultsValidator) validate() (bool, error) {
	rqResults := v.rqInfo.GetRawReads().GetKvReads()
	itr := v.itr
	var result statedb.QueryResult
	var err error
	if result, err = itr.Next(); err != nil {
		return false, err
	}
	if len(rqResults) == 0 {
		return result == nil, nil
	}
	for i := 0; i < len(rqResults); i++ {
		kvRead := rqResults[i]
		logger.Debugf("comparing kvRead=[%#v] to queryResponse=[%#v]", kvRead, result)
		if result == nil {
			logger.Debugf("Query response nil. Key [%s] got deleted", kvRead.Key)
			return false, nil
		}
		versionedKV := result.(*statedb.VersionedKV)
		if versionedKV.Key != kvRead.Key {
			logger.Debugf("key name mismatch: Key in rwset = [%s], key in query results = [%s]", kvRead.Key, versionedKV.Key)
			return false, nil
		}
		if !version.AreSame(versionedKV.Version, convertToVersionHeight(kvRead.Version)) {
			logger.Debugf(`Version mismatch for key [%s]: Version in rwset = [%#v], latest version = [%#v]`,
				versionedKV.Key, versionedKV.Version, kvRead.Version)
			return false, nil
		}
		if result, err = itr.Next(); err != nil {
			return false, err
		}
	}
	if result != nil {
		// iterator is not exhausted - which means that there are extra results in the given range
		logger.Debugf("Extra result = [%#v]", result)
		return false, nil
	}
	return true, nil
}
func (v *Validator) validateNsHashedReadSets(ns string, collHashedRWSets []*rwsetutil.CollHashedRwSet,
	updates *privacyenabledstate.HashedUpdateBatch) (bool, error) {
	for _, collHashedRWSet := range collHashedRWSets {
		if valid, err := v.validateCollHashedReadSet(ns, collHashedRWSet.CollectionName, collHashedRWSet.HashedRwSet.HashedReads, updates); !valid || err != nil {
			return valid, err
		}
	}
	return true, nil
}

func (v *Validator) validateCollHashedReadSet(ns, coll string, kvReadHashes []*kvrwset.KVReadHash,
	updates *privacyenabledstate.HashedUpdateBatch) (bool, error) {
	for _, kvReadHash := range kvReadHashes {
		if valid, err := v.validateKVReadHash(ns, coll, kvReadHash, updates); !valid || err != nil {
			return valid, err
		}
	}
	return true, nil
}

下面該處理隱私數據了,在ValidateAndPrepareBatch調用了validateAndPreparePvtBatch函數中:

// validateAndPreparePvtBatch pulls out the private write-set for the transactions that are marked as valid
// by the internal public data validator. Finally, it validates (if not already self-endorsed) the pvt rwset against the
// corresponding hash present in the public rwset
func validateAndPreparePvtBatch(block *internal.Block, db privacyenabledstate.DB,
	pubAndHashUpdates *internal.PubAndHashUpdates, pvtdata map[uint64]*ledger.TxPvtData) (*privacyenabledstate.PvtUpdateBatch, error) {
	pvtUpdates := privacyenabledstate.NewPvtUpdateBatch()
	//遍歷並跳過區塊中無效交易
	metadataUpdates := metadataUpdates{}
	for _, tx := range block.Txs {
		if tx.ValidationCode != peer.TxValidationCode_VALID {
			continue
		}

		//跳過沒有隱私數據的交易
		if !tx.ContainsPvtWrites() {
			continue
		}

		//獲取指定的隱私交易
		txPvtdata := pvtdata[uint64(tx.IndexInBlock)]
		if txPvtdata == nil {
			continue
		}

		//檢查是否需要隱私數據交易,默認均爲True
		if requiresPvtdataValidation(txPvtdata) {
      //驗證隱私交易
			if err := validatePvtdata(tx, txPvtdata); err != nil {
				return nil, err
			}
		}
		var pvtRWSet *rwsetutil.TxPvtRwSet
		var err error

		//解析隱私數據寫集合
		if pvtRWSet, err = rwsetutil.TxPvtRwSetFromProtoMsg(txPvtdata.WriteSet); err != nil {
			return nil, err
		}
		//添加隱私的更新批量操作
		addPvtRWSetToPvtUpdateBatch(pvtRWSet, pvtUpdates, version.NewHeight(block.Num, uint64(tx.IndexInBlock)))
		//元數據隱私更新
		addEntriesToMetadataUpdates(metadataUpdates, pvtRWSet)
	}
	if err := incrementPvtdataVersionIfNeeded(metadataUpdates, pvtUpdates, pubAndHashUpdates, db); err != nil {
		return nil, err
	}
	return pvtUpdates, nil
}
// validPvtdata returns true if hashes of all the collections writeset present in the pvt data
// match with the corresponding hashes present in the public read-write set
func validatePvtdata(tx *internal.Transaction, pvtdata *ledger.TxPvtData) error {
	if pvtdata.WriteSet == nil {
		return nil
	}

	for _, nsPvtdata := range pvtdata.WriteSet.NsPvtRwset {
		for _, collPvtdata := range nsPvtdata.CollectionPvtRwset {
			//基於原始數據計算HASH
			collPvtdataHash := util.ComputeHash(collPvtdata.Rwset)
			//獲取交易中的數據哈希值
			hashInPubdata := tx.RetrieveHash(nsPvtdata.Namespace, collPvtdata.CollectionName)
			if !bytes.Equal(collPvtdataHash, hashInPubdata) {
				return &validator.ErrPvtdataHashMissmatch{
					Msg: fmt.Sprintf(`Hash of pvt data for collection [%s:%s] does not match with the corresponding hash in the public data.
					public hash = [%#v], pvt data hash = [%#v]`, nsPvtdata.Namespace, collPvtdata.CollectionName, hashInPubdata, collPvtdataHash),
				}
			}
		}
	}
	return nil
}
func TxPvtRwSetFromProtoMsg(protoMsg *rwset.TxPvtReadWriteSet) (*TxPvtRwSet, error) {
	txPvtRwset := &TxPvtRwSet{}
	var nsPvtRwSet *NsPvtRwSet
	var err error
	for _, nsRwSetProtoMsg := range protoMsg.NsPvtRwset {
		if nsPvtRwSet, err = nsPvtRwSetFromProtoMsg(nsRwSetProtoMsg); err != nil {
			return nil, err
		}
		txPvtRwset.NsPvtRwSet = append(txPvtRwset.NsPvtRwSet, nsPvtRwSet)
	}
	return txPvtRwset, nil
}
func addPvtRWSetToPvtUpdateBatch(pvtRWSet *rwsetutil.TxPvtRwSet, pvtUpdateBatch *privacyenabledstate.PvtUpdateBatch, ver *version.Height) {
	for _, ns := range pvtRWSet.NsPvtRwSet {
		for _, coll := range ns.CollPvtRwSets {
			for _, kvwrite := range coll.KvRwSet.Writes {
				if !kvwrite.IsDelete {
					pvtUpdateBatch.Put(ns.NameSpace, coll.CollectionName, kvwrite.Key, kvwrite.Value, ver)
				} else {
					pvtUpdateBatch.Delete(ns.NameSpace, coll.CollectionName, kvwrite.Key, ver)
				}
			}
		}
	}
}
//和前面的validateCollHashedReadSet相響應,
func addEntriesToMetadataUpdates(metadataUpdates metadataUpdates, pvtRWSet *rwsetutil.TxPvtRwSet) {
	for _, ns := range pvtRWSet.NsPvtRwSet {
		for _, coll := range ns.CollPvtRwSets {
			for _, metadataWrite := range coll.KvRwSet.MetadataWrites {
				ns, coll, key := ns.NameSpace, coll.CollectionName, metadataWrite.Key
				metadataUpdates[collKey{ns, coll, key}] = true
			}
		}
	}
}

最後更新元數據(\core\ledger\kvledger\txmgmt\validator\valimpl\helper.go):

//core\ledger\kvledger\txmgmt\validator\valimpl
func (impl *DefaultImpl) ValidateAndPrepareBatch{
	......
	logger.Debug("postprocessing ProtoBlock...")
	//通過驗證對象更新區塊無數據
	postprocessProtoBlock(block, internalBlock)
	logger.Debug("ValidateAndPrepareBatch() complete")
	......
}
// postprocessProtoBlock updates the proto block's validation flags (in metadata) by the results of validation process
func postprocessProtoBlock(block *common.Block, validatedBlock *internal.Block) {
	txsFilter := util.TxValidationFlags(block.Metadata.Metadata[common.BlockMetadataIndex_TRANSACTIONS_FILTER])
	for _, tx := range validatedBlock.Txs {
		txsFilter.SetFlag(tx.IndexInBlock, tx.ValidationCode)
	}
	block.Metadata.Metadata[common.BlockMetadataIndex_TRANSACTIONS_FILTER] = txsFilter
}

這樣就可以正式提交了,回到kvLedger類中的CommitWithPvtData,它會調用下面的相同名的函數:


//core/ledger/ledgerstorage/stroe.go
// CommitWithPvtData commits the block and the corresponding pvt data in an atomic operation
func (s *Store) CommitWithPvtData(blockAndPvtdata *ledger.BlockAndPvtData) error {
	blockNum := blockAndPvtdata.Block.Header.Number
	s.rwlock.Lock()
	defer s.rwlock.Unlock()

	pvtBlkStoreHt, err := s.pvtdataStore.LastCommittedBlockHeight()
	if err != nil {
		return err
	}

	writtenToPvtStore := false
	if pvtBlkStoreHt < blockNum+1 { // The pvt data store sanity check does not allow rewriting the pvt data.
		// when re-processing blocks (rejoin the channel or re-fetching last few block),
		// skip the pvt data commit to the pvtdata blockstore
		logger.Debugf("Writing block [%d] to pvt block store", blockNum)
		// If a state fork occurs during a regular block commit,
		// we have a mechanism to drop all blocks followed by refetching of blocks
		// and re-processing them. In the current way of doing this, we only drop
		// the block files (and related artifacts) but we do not drop/overwrite the
		// pvtdatastorage as it might leads to data loss.
		// During block reprocessing, as there is a possibility of an invalid pvtdata
		// transaction to become valid, we store the pvtdata of invalid transactions
		// too in the pvtdataStore as we do for the publicdata in the case of blockStore.
		//控制分叉
		pvtData, missingPvtData := constructPvtDataAndMissingData(blockAndPvtdata)
		//將隱私數據列表提交到帳本中
		if err := s.pvtdataStore.Prepare(blockAndPvtdata.Block.Header.Number, pvtData, missingPvtData); err != nil {
			return err
		}
		writtenToPvtStore = true
	} else {
		logger.Debugf("Skipping writing block [%d] to pvt block store as the store height is [%d]", blockNum, pvtBlkStoreHt)
	}

  //添加新塊,如果有錯誤隱私數據庫回滾
	if err := s.AddBlock(blockAndPvtdata.Block); err != nil {
		s.pvtdataStore.Rollback()
		return err
	}

	if pvtBlkStoreHt == blockNum+1 {
		// we reach here only when the pvtdataStore was ahead
		// of blockStore during the store opening time (would
		// occur after a peer rollback/reset).
		//重置或回滾的處理
		s.isPvtstoreAheadOfBlockstore.Store(false)
	}

	if writtenToPvtStore {
		//隱私數據庫提交確認
		return s.pvtdataStore.Commit()
	}
	return nil
}
// Prepare implements the function in the interface `Store`
func (s *store) Prepare(blockNum uint64, pvtData []*ledger.TxPvtData, missingPvtData ledger.TxMissingPvtDataMap) error {
	if s.batchPending {
		return &ErrIllegalCall{`A pending batch exists as as result of last invoke to "Prepare" call.
			 Invoke "Commit" or "Rollback" on the pending batch before invoking "Prepare" function`}
	}
	//得到下一個區塊號
	expectedBlockNum := s.nextBlockNum()
	//檢查區塊的合法性
	if expectedBlockNum != blockNum {
		return &ErrIllegalArgs{fmt.Sprintf("Expected block number=%d, received block number=%d", expectedBlockNum, blockNum)}
	}

  //創建更新數據集
	batch := leveldbhelper.NewUpdateBatch()
	var err error
	var keyBytes, valBytes []byte

  //準備存儲數據接口
	storeEntries, err := prepareStoreEntries(blockNum, pvtData, s.btlPolicy, missingPvtData)
	if err != nil {
		return err
	}

  //編碼保存
	for _, dataEntry := range storeEntries.dataEntries {
		keyBytes = encodeDataKey(dataEntry.key)
		if valBytes, err = encodeDataValue(dataEntry.value); err != nil {
			return err
		}
		batch.Put(keyBytes, valBytes)
	}

  //編碼保存過期數據--主要用於後面的對數據的清理
	for _, expiryEntry := range storeEntries.expiryEntries {
		keyBytes = encodeExpiryKey(expiryEntry.key)
		if valBytes, err = encodeExpiryValue(expiryEntry.value); err != nil {
			return err
		}
		batch.Put(keyBytes, valBytes)
	}

	for missingDataKey, missingDataValue := range storeEntries.missingDataEntries {
		keyBytes = encodeMissingDataKey(&missingDataKey)
		if valBytes, err = encodeMissingDataValue(missingDataValue); err != nil {
			return err
		}
		batch.Put(keyBytes, valBytes)
	}

  //批量存入
	batch.Put(pendingCommitKey, emptyValue)
	if err := s.db.WriteBatch(batch, true); err != nil {
		return err
	}
	s.batchPending = true
	logger.Debugf("Saved %d private data write sets for block [%d]", len(pvtData), blockNum)
	return nil
}
// AddBlock adds a new block
//提交區塊數據到區塊數據文件,並保存檢查點,然後更新索引數據庫,然後廣播喚醒所有等待同步條件變量。通知有新區塊提交到帳本
//,調用mgr.updateBlockchainInfo,更新區塊鏈信息,如高度和和頭哈希等
func (store *fsBlockStore) AddBlock(block *common.Block) error {
	// track elapsed time to collect block commit time
	startBlockCommit := time.Now()
	result := store.fileMgr.addBlock(block)
	elapsedBlockCommit := time.Since(startBlockCommit)

	store.updateBlockStats(block.Header.Number, elapsedBlockCommit)

	return result
}
// Commit implements the function in the interface `Store`
func (s *store) Commit() error {
	if !s.batchPending {
		return &ErrIllegalCall{"No pending batch to commit"}
	}
	//得到提交塊號和批量數據對象,進行刪除和更新
	committingBlockNum := s.nextBlockNum()
	logger.Debugf("Committing private data for block [%d]", committingBlockNum)
	batch := leveldbhelper.NewUpdateBatch()
	batch.Delete(pendingCommitKey)
	batch.Put(lastCommittedBlkkey, encodeLastCommittedBlockVal(committingBlockNum))
	if err := s.db.WriteBatch(batch, true); err != nil {
		return err
	}
	s.batchPending = false
	s.isEmpty = false
	atomic.StoreUint64(&s.lastCommittedBlock, committingBlockNum)
	logger.Debugf("Committed private data for block [%d]", committingBlockNum)
	//處理隱私和過期記錄
	s.performPurgeIfScheduled(committingBlockNum)
	return nil
}
//如果AddBlock錯誤就Rollback
func (s *store) Rollback() error {
	if !s.batchPending {
		return &ErrIllegalCall{"No pending batch to rollback"}
	}
	blkNum := s.nextBlockNum()
	batch := leveldbhelper.NewUpdateBatch()
	itr := s.db.GetIterator(datakeyRange(blkNum))
	for itr.Next() {
		batch.Delete(itr.Key())
	}
	itr.Release()
	itr = s.db.GetIterator(eligibleMissingdatakeyRange(blkNum))
	for itr.Next() {
		batch.Delete(itr.Key())
	}
	itr.Release()
	batch.Delete(pendingCommitKey)
	if err := s.db.WriteBatch(batch, true); err != nil {
		return err
	}
	s.batchPending = false
	return nil
}

最後的Commit:

//core\ledger\kvledger\txmgmt\txmgr\lockbasedtxmgr/lockbased_txmgr.go
// Commit implements method in interface `txmgmt.TxMgr`
func (txmgr *LockBasedTxMgr) Commit() error {
	// we need to acquire a lock on oldBlockCommit. The following are the two reasons:
	// (1) the DeleteExpiredAndUpdateBookkeeping() would perform incorrect operation if
	//        toPurgeList is updated by RemoveStaleAndCommitPvtDataOfOldBlocks().
	// (2) RemoveStaleAndCommitPvtDataOfOldBlocks computes the update
	//     batch based on the current state and if we allow regular block commits at the
	//     same time, the former may overwrite the newer versions of the data and we may
	//     end up with an incorrect update batch.
  	//處理舊版本和數據
	txmgr.oldBlockCommit.Lock()
	defer txmgr.oldBlockCommit.Unlock()
	logger.Debug("lock acquired on oldBlockCommit for committing regular updates to state database")

	// When using the purge manager for the first block commit after peer start, the asynchronous function
	// 'PrepareForExpiringKeys' is invoked in-line. However, for the subsequent blocks commits, this function is invoked
	// in advance for the next block
	if !txmgr.pvtdataPurgeMgr.usedOnce {
		txmgr.pvtdataPurgeMgr.PrepareForExpiringKeys(txmgr.current.blockNum())
		txmgr.pvtdataPurgeMgr.usedOnce = true
	}
	defer func() {
		txmgr.pvtdataPurgeMgr.PrepareForExpiringKeys(txmgr.current.blockNum() + 1)
		logger.Debugf("launched the background routine for preparing keys to purge with the next block")
		txmgr.reset()
	}()

	logger.Debugf("Committing updates to state database")
	if txmgr.current == nil {
		panic("validateAndPrepare() method should have been called before calling commit()")
	}

	if err := txmgr.pvtdataPurgeMgr.DeleteExpiredAndUpdateBookkeeping(
		txmgr.current.batch.PvtUpdates, txmgr.current.batch.HashUpdates); err != nil {
		return err
	}

  //處理新版本
	commitHeight := version.NewHeight(txmgr.current.blockNum(), txmgr.current.maxTxNumber())
	txmgr.commitRWLock.Lock()
	logger.Debugf("Write lock acquired for committing updates to state database")

	//開始更新批量操作
	if err := txmgr.db.ApplyPrivacyAwareUpdates(txmgr.current.batch, commitHeight); err != nil {
		txmgr.commitRWLock.Unlock()
		return err
	}
	txmgr.commitRWLock.Unlock()
	// only while holding a lock on oldBlockCommit, we should clear the cache as the
	// cache is being used by the old pvtData committer to load the version of
	// hashedKeys. Also, note that the PrepareForExpiringKeys uses the cache.
	txmgr.clearCache()
	logger.Debugf("Updates committed to state database and the write lock is released")

	// purge manager should be called (in this call the purge mgr removes the expiry entries from schedules) after committing to statedb
	//刪除相關過期數據
	if err := txmgr.pvtdataPurgeMgr.BlockCommitDone(); err != nil {
		return err
	}
	// In the case of error state listeners will not recieve this call - instead a peer panic is caused by the ledger upon receiveing
	// an error from this function
	//更新狀態監聽器
	txmgr.updateStateListeners()
	return nil
}

// ApplyPrivacyAwareUpdates implements corresponding function in interface DB
//實現數據庫接口功能的調用
func (s *CommonStorageDB) ApplyPrivacyAwareUpdates(updates *UpdateBatch, height *version.Height) error {
	// combinedUpdates includes both updates to public db and private db, which are partitioned by a separate namespace
	combinedUpdates := updates.PubUpdates
	addPvtUpdates(combinedUpdates, updates.PvtUpdates)
	addHashedUpdates(combinedUpdates, updates.HashUpdates, !s.BytesKeySupported())
	s.metadataHint.setMetadataUsedFlag(updates)
	return s.VersionedDB.ApplyUpdates(combinedUpdates.UpdateBatch, height)
}
//同步更新狀態數據庫
func addPvtUpdates(pubUpdateBatch *PubUpdateBatch, pvtUpdateBatch *PvtUpdateBatch) {
	for ns, nsBatch := range pvtUpdateBatch.UpdateMap {
		for _, coll := range nsBatch.GetCollectionNames() {
			for key, vv := range nsBatch.GetUpdates(coll) {
				pubUpdateBatch.Update(derivePvtDataNs(ns, coll), key, vv)
			}
		}
	}
}
func addHashedUpdates(pubUpdateBatch *PubUpdateBatch, hashedUpdateBatch *HashedUpdateBatch, base64Key bool) {
	for ns, nsBatch := range hashedUpdateBatch.UpdateMap {
		for _, coll := range nsBatch.GetCollectionNames() {
			for key, vv := range nsBatch.GetUpdates(coll) {
				if base64Key {
					key = base64.StdEncoding.EncodeToString([]byte(key))
				}
				pubUpdateBatch.Update(deriveHashedDataNs(ns, coll), key, vv)
			}
		}
	}
}

//下面是LEVELDB實現
// ApplyUpdates implements method in VersionedDB interface
func (vdb *versionedDB) ApplyUpdates(batch *statedb.UpdateBatch, height *version.Height) error {
	dbBatch := leveldbhelper.NewUpdateBatch()
	namespaces := batch.GetUpdatedNamespaces()
	for _, ns := range namespaces {
		updates := batch.GetUpdates(ns)
		for k, vv := range updates {
			compositeKey := constructCompositeKey(ns, k)
			logger.Debugf("Channel [%s]: Applying key(string)=[%s] key(bytes)=[%#v]", vdb.dbName, string(compositeKey), compositeKey)

			if vv.Value == nil {
				dbBatch.Delete(compositeKey)
			} else {
				encodedVal, err := encodeValue(vv)
				if err != nil {
					return err
				}
				dbBatch.Put(compositeKey, encodedVal)
			}
		}
	}
	// Record a savepoint at a given height
	// If a given height is nil, it denotes that we are committing pvt data of old blocks.
	// In this case, we should not store a savepoint for recovery. The lastUpdatedOldBlockList
	// in the pvtstore acts as a savepoint for pvt data.
	if height != nil {
		dbBatch.Put(savePointKey, height.ToBytes())
	}
	// Setting snyc to true as a precaution, false may be an ok optimization after further testing.
	if err := vdb.db.WriteBatch(dbBatch, true); err != nil {
		return err
	}
	return nil
}

最後是新歷史數據庫(同樣只是LevelDB,CouchDB看源碼):

func (l *kvLedger) CommitWithPvtData(pvtdataAndBlock *ledger.BlockAndPvtData, commitOpts *ledger.CommitOptions) error {
	......
	if err := l.historyDB.Commit(block);
	......
}
//core\ledger\kvledger\history\historydb\historyleveldb/historyleveldb.go
// Commit implements method in HistoryDB interface
func (historyDB *historyDB) Commit(block *common.Block) error {

  //得到區塊號
	blockNo := block.Header.Number
	//Set the starting tranNo to 0
	var tranNo uint64

  //創建更新集體合
	dbBatch := leveldbhelper.NewUpdateBatch()

	logger.Debugf("Channel [%s]: Updating history database for blockNo [%v] with [%d] transactions",
		historyDB.dbName, blockNo, len(block.Data.Data))

	// Get the invalidation byte array for the block
	//獲取交易驗證碼列表
	txsFilter := util.TxValidationFlags(block.Metadata.Metadata[common.BlockMetadataIndex_TRANSACTIONS_FILTER])

	// write each tran's write set to history db
	//遍歷區塊交易數據,過濾無效交易
	for _, envBytes := range block.Data.Data {

		// If the tran is marked as invalid, skip it
		if txsFilter.IsInvalid(int(tranNo)) {
			logger.Debugf("Channel [%s]: Skipping history write for invalid transaction number %d",
				historyDB.dbName, tranNo)
			tranNo++
			continue
		}
    //解析獲取交易消息Envelope對象
		env, err := putils.GetEnvelopeFromBlock(envBytes)
		if err != nil {
			return err
		}

    //得到消息載荷
		payload, err := putils.GetPayload(env)
		if err != nil {
			return err
		}

    //解析得到通道頭數據
		chdr, err := putils.UnmarshalChannelHeader(payload.Header.ChannelHeader)
		if err != nil {
			return err
		}

		if common.HeaderType(chdr.Type) == common.HeaderType_ENDORSER_TRANSACTION {

			// extract actions from the envelope message
			respPayload, err := putils.GetActionFromEnvelope(envBytes)
			if err != nil {
				return err
			}

			//preparation for extracting RWSet from transaction
			txRWSet := &rwsetutil.TxRwSet{}

			// Get the Result from the Action and then Unmarshal
			// it into a TxReadWriteSet using custom unmarshalling
			//解析流數據生成讀寫集
			if err = txRWSet.FromProtoBytes(respPayload.Results); err != nil {
				return err
			}
			// for each transaction, loop through the namespaces and writesets
			// and add a history record for each write
			//遍歷讀寫集
			for _, nsRWSet := range txRWSet.NsRwSets {
				ns := nsRWSet.NameSpace

				for _, kvWrite := range nsRWSet.KvRwSet.Writes {
					writeKey := kvWrite.Key

					//composite key for history records is in the form ns~key~blockNo~tranNo
					//創建歷史記錄,組合方式爲ns~key~blockNo~tranNo
					compositeHistoryKey := historydb.ConstructCompositeHistoryKey(ns, writeKey, blockNo, tranNo)

					// No value is required, write an empty byte array (emptyValue) since Put() of nil is not allowed
					//寫入空的數據
					dbBatch.Put(compositeHistoryKey, emptyValue)
				}
			}

		} else {
			logger.Debugf("Skipping transaction [%d] since it is not an endorsement transaction\n", tranNo)
		}
		tranNo++
	}

	// add savepoint for recovery purpose
	//創建新的版本對象並增加保存點用於恢復
	height := version.NewHeight(blockNo, tranNo)
	dbBatch.Put(savePointKey, height.ToBytes())

	// write the block's history records and savepoint to LevelDB
	// Setting sync to true as a precaution, false may be an ok optimization after further testing.
	//更新到歷史數據庫
	if err := historyDB.db.WriteBatch(dbBatch, true); err != nil {
		return err
	}

	logger.Debugf("Channel [%s]: Updates committed to history database for blockNo [%v]", historyDB.dbName, blockNo)
	return nil
}

終於寫入數據庫成功了。

五、驗證

在上面的交易的提交部分中可以看到其實在提交的過程中調用了驗證的部分,但是沒有展開來解析,現在在這裏展開分析一下,看一下第一步的代碼:
Validate()方法(core/committer/txvalidator/validaotr.go):


func (v *TxValidator) Validate(block *common.Block) error {
	var err error
	var errPos int

	startValidation := time.Now() // timer to log Validate block duration
	logger.Debugf("[%s] START Block Validation for block [%d]", v.ChainID, block.Header.Number)

	// Initialize trans as valid here, then set invalidation reason code upon invalidation below
  //初始化有效標誌,並在以後可以設置無效
	txsfltr := ledgerUtil.NewTxValidationFlags(len(block.Data.Data))
	// txsChaincodeNames records all the invoked chaincodes by tx in a block
	txsChaincodeNames := make(map[int]*sysccprovider.ChaincodeInstance)
	// upgradedChaincodes records all the chaincodes that are upgraded in a block
	txsUpgradedChaincodes := make(map[int]*sysccprovider.ChaincodeInstance)
	// array of txids
	txidArray := make([]string, len(block.Data.Data))

  //開啓新協程,針對區塊內的交易進行驗證
	results := make(chan *blockValidationResult)
	go func() {
		for tIdx, d := range block.Data.Data {
			// ensure that we don't have too many concurrent validation workers
      //保持驗證過程的安全性--設置併發的數量
			v.Support.Acquire(context.Background(), 1)

			go func(index int, data []byte) {
				defer v.Support.Release(1)
        //驗證交易
				v.validateTx(&blockValidationRequest{
					d:     data,
					block: block,
					tIdx:  index,
				}, results)
			}(tIdx, d)
		}
	}()

	logger.Debugf("expecting %d block validation responses", len(block.Data.Data))

	// now we read responses in the order in which they come back
	for i := 0; i < len(block.Data.Data); i++ {
		res := <-results

		if res.err != nil {
			// if there is an error, we buffer its value, wait for
			// all workers to complete validation and then return
			// the error from the first tx in this block that returned an error
			logger.Debugf("got terminal error %s for idx %d", res.err, res.tIdx)

			if err == nil || res.tIdx < errPos {
				err = res.err
				errPos = res.tIdx
			}
		} else {
			// if there was no error, we set the txsfltr and we set the
			// txsChaincodeNames and txsUpgradedChaincodes maps
			logger.Debugf("got result for idx %d, code %d", res.tIdx, res.validationCode)

			txsfltr.SetFlag(res.tIdx, res.validationCode)

      //交易有效
			if res.validationCode == peer.TxValidationCode_VALID {
				if res.txsChaincodeName != nil {
          //設置鏈碼名稱
          txsChaincodeNames[res.tIdx] = res.txsChaincodeName
				}
        //升級鏈碼的設置
				if res.txsUpgradedChaincode != nil {
					txsUpgradedChaincodes[res.tIdx] = res.txsUpgradedChaincode
				}
				txidArray[res.tIdx] = res.txid
			}
		}
	}

	// if we're here, all workers have completed the validation.
	// If there was an error we return the error from the first
	// tx in this block that returned an error
	if err != nil {
		return err
	}

	// if we operate with this capability, we mark invalid any transaction that has a txid
	// which is equal to that of a previous tx in this block
  //判斷雙花
	if v.Support.Capabilities().ForbidDuplicateTXIdInBlock() {
		markTXIdDuplicates(txidArray, txsfltr)
	}

	// if we're here, all workers have completed validation and
	// no error was reported; we set the tx filter and return
	// success
  //處理升級鏈碼導致的標記爲非法的交易
	v.invalidTXsForUpgradeCC(txsChaincodeNames, txsUpgradedChaincodes, txsfltr)

	// make sure no transaction has skipped validation
  //驗證沒有漏掉交易
	err = v.allValidated(txsfltr, block)
	if err != nil {
		return err
	}

	// Initialize metadata structure
	utils.InitBlockMetadata(block)

  //設置交易過濾索引
	block.Metadata.Metadata[common.BlockMetadataIndex_TRANSACTIONS_FILTER] = txsfltr

	elapsedValidation := time.Since(startValidation) / time.Millisecond // duration in ms
	logger.Infof("[%s] Validated block [%d] in %dms", v.ChainID, block.Header.Number, elapsedValidation)

	return nil
}

這個有必要說明一下,這個函數的功能是執行塊的驗證。驗證塊的過程的事務是並行的(這意味着交易的並行)。過程是:提交併啓動goroutine協程中的tx驗證函數(使用信號量對併發驗證goroutine的數目)。提交器從協程讀取驗證結果(按照完成順序從結果頻道)。協程執行塊中txs的驗證並將結果通道中的驗證結果。
需要注意的是:
1、爲了方便操作,提交器啓動的協程排除相關事務,然後操作執行結果。
2、要使並行驗證生效並且不改變系統狀態,否則,就必須保證運行的順序。因此,必須保證事務的獨佔性。

接着看交易的驗證,交易的驗證函數對區塊結構、交易序號、交易數據、驗證器等進行了處理,針對指定的交易進行驗證並將結果放入Results通道中返回,代碼如下:


//core/committer/validator/validator.go
func (v *TxValidator) validateTx(req *blockValidationRequest, results chan<- *blockValidationResult) {
  //區塊
	block := req.block
  //交易數據
  d := req.d
  //交易序號
	tIdx := req.tIdx
  //交易ID
	txID := ""

	if d == nil {
		results <- &blockValidationResult{
			tIdx: tIdx,
		}
		return
	}

  //解析交易靈氣的Envelope結構的對象
	if env, err := utils.GetEnvelopeFromBlock(d); err != nil {
		logger.Warningf("Error getting tx from block: %+v", err)
		results <- &blockValidationResult{
			tIdx:           tIdx,
			validationCode: peer.TxValidationCode_INVALID_OTHER_REASON,
		}
		return
	} else if env != nil {
		// validate the transaction: here we check that the transaction
		// is properly formed, properly signed and that the security
		// chain binding proposal to endorsements to tx holds. We do
		// NOT check the validity of endorsements, though. That's a
		// job for VSCC below
		logger.Debugf("[%s] validateTx starts for block %p env %p txn %d", v.ChainID, block, env, tIdx)
		defer logger.Debugf("[%s] validateTx completes for block %p env %p txn %d", v.ChainID, block, env, tIdx)
    //數據
		var payload *common.Payload
		var err error
    //結果
    var txResult peer.TxValidationCode
    //交易鏈碼實例
		var txsChaincodeName *sysccprovider.ChaincodeInstance
    //升級鏈碼實例
		var txsUpgradedChaincode *sysccprovider.ChaincodeInstance

    //驗證交易的格式、簽名以及數據的一致性(保證未篡改)
		if payload, txResult = validation.ValidateTransaction(env, v.Support.Capabilities()); txResult != peer.TxValidationCode_VALID {
			logger.Errorf("Invalid transaction with index %d", tIdx)
			results <- &blockValidationResult{
				tIdx:           tIdx,
				validationCode: txResult,
			}
			return
		}

    //解析頭獲得通道ID
		chdr, err := utils.UnmarshalChannelHeader(payload.Header.ChannelHeader)
		if err != nil {
			logger.Warningf("Could not unmarshal channel header, err %s, skipping", err)
			results <- &blockValidationResult{
				tIdx:           tIdx,
				validationCode: peer.TxValidationCode_INVALID_OTHER_REASON,
			}
			return
		}

		channel := chdr.ChannelId
		logger.Debugf("Transaction is for channel %s", channel)

		if !v.chainExists(channel) {
			logger.Errorf("Dropping transaction for non-existent channel %s", channel)
			results <- &blockValidationResult{
				tIdx:           tIdx,
				validationCode: peer.TxValidationCode_TARGET_CHAIN_NOT_FOUND,
			}
			return
		}

    //經過背書的普通交易
		if common.HeaderType(chdr.Type) == common.HeaderType_ENDORSER_TRANSACTION {

			txID = chdr.TxId

			// Check duplicate transactions判斷雙重交易--防止重放攻擊
			erroneousResultEntry := v.checkTxIdDupsLedger(tIdx, chdr, v.Support.Ledger())
			if erroneousResultEntry != nil {
				results <- erroneousResultEntry
				return
			}

			// Validate tx with vscc and policy、
      //VSCC驗證交易--背書策略的驗證
			logger.Debug("Validating transaction vscc tx validate")
			err, cde := v.Vscc.VSCCValidateTx(tIdx, payload, d, block)
			if err != nil {
				logger.Errorf("VSCCValidateTx for transaction txId = %s returned error: %s", txID, err)
				switch err.(type) {
				case *commonerrors.VSCCExecutionFailureError:
					results <- &blockValidationResult{
						tIdx: tIdx,
						err:  err,
					}
					return
				case *commonerrors.VSCCInfoLookupFailureError:
					results <- &blockValidationResult{
						tIdx: tIdx,
						err:  err,
					}
					return
				default:
					results <- &blockValidationResult{
						tIdx:           tIdx,
						validationCode: cde,
					}
					return
				}
			}

      //獲得交易指定的鏈碼實例對象
			invokeCC, upgradeCC, err := v.getTxCCInstance(payload)
			if err != nil {
				logger.Errorf("Get chaincode instance from transaction txId = %s returned error: %+v", txID, err)
				results <- &blockValidationResult{
					tIdx:           tIdx,
					validationCode: peer.TxValidationCode_INVALID_OTHER_REASON,
				}
				return
			}
			txsChaincodeName = invokeCC
			if upgradeCC != nil {
				logger.Infof("Find chaincode upgrade transaction for chaincode %s on channel %s with new version %s", upgradeCC.ChaincodeName, upgradeCC.ChainID, upgradeCC.ChaincodeVersion)
				txsUpgradedChaincode = upgradeCC
			}
			// FAB-12971 comment out below block before v1.4 cut. Will uncomment after v1.4.
			/*
				} else if common.HeaderType(chdr.Type) == common.HeaderType_TOKEN_TRANSACTION {

					txID = chdr.TxId
					if !v.Support.Capabilities().FabToken() {
						logger.Errorf("FabToken capability is not enabled. Unsupported transaction type [%s] in block [%d] transaction [%d]",
							common.HeaderType(chdr.Type), block.Header.Number, tIdx)
						results <- &blockValidationResult{
							tIdx:           tIdx,
							validationCode: peer.TxValidationCode_UNSUPPORTED_TX_PAYLOAD,
						}
						return
					}

					// Check if there is a duplicate of such transaction in the ledger and
					// obtain the corresponding result that acknowledges the error type
					erroneousResultEntry := v.checkTxIdDupsLedger(tIdx, chdr, v.Support.Ledger())
					if erroneousResultEntry != nil {
						results <- erroneousResultEntry
						return
					}

					// Set the namespace of the invocation field
					txsChaincodeName = &sysccprovider.ChaincodeInstance{
						ChainID:          channel,
						ChaincodeName:    "Token",
						ChaincodeVersion: ""}
			*/
      //通道配置交易
		} else if common.HeaderType(chdr.Type) == common.HeaderType_CONFIG {
      //配置配置交易相關的數據對象
			configEnvelope, err := configtx.UnmarshalConfigEnvelope(payload.Data)
			if err != nil {
				err = errors.WithMessage(err, "error unmarshalling config which passed initial validity checks")
				logger.Criticalf("%+v", err)
				results <- &blockValidationResult{
					tIdx: tIdx,
					err:  err,
				}
				return
			}

      //更新配置
			if err := v.Support.Apply(configEnvelope); err != nil {
				err = errors.WithMessage(err, "error validating config which passed initial validity checks")
				logger.Criticalf("%+v", err)
				results <- &blockValidationResult{
					tIdx: tIdx,
					err:  err,
				}
				return
			}
			logger.Debugf("config transaction received for chain %s", channel)
		} else {
			logger.Warningf("Unknown transaction type [%s] in block number [%d] transaction index [%d]",
				common.HeaderType(chdr.Type), block.Header.Number, tIdx)
			results <- &blockValidationResult{
				tIdx:           tIdx,
				validationCode: peer.TxValidationCode_UNKNOWN_TX_TYPE,
			}
			return
		}

		if _, err := proto.Marshal(env); err != nil {
			logger.Warningf("Cannot marshal transaction: %s", err)
			results <- &blockValidationResult{
				tIdx:           tIdx,
				validationCode: peer.TxValidationCode_MARSHAL_TX_ERROR,
			}
			return
		}
		// Succeeded to pass down here, transaction is valid
    //交易寫入通道並返回結果
		results <- &blockValidationResult{
			tIdx:                 tIdx,
			txsChaincodeName:     txsChaincodeName,
			txsUpgradedChaincode: txsUpgradedChaincode,
			validationCode:       peer.TxValidationCode_VALID,
			txid:                 txID,
		}
		return
	} else {
		logger.Warning("Nil tx from block")
		results <- &blockValidationResult{
			tIdx:           tIdx,
			validationCode: peer.TxValidationCode_NIL_ENVELOPE,
		}
		return
	}
}

GetEnvelopeFromBlock這個函數涉及到一個Envelope的結構體數據:

// Envelope wraps a Payload with a signature so that the message may be authenticated
type Envelope struct {
	// A marshaled Payload
	Payload []byte `protobuf:"bytes,1,opt,name=payload,proto3" json:"payload,omitempty"`
	// A signature by the creator specified in the Payload header
	Signature            []byte   `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"`
	XXX_NoUnkeyedLiteral struct{} `json:"-"`
	XXX_unrecognized     []byte   `json:"-"`
	XXX_sizecache        int32    `json:"-"`
}

它是區塊中數據中交易的有效載荷,包括簽名數據等。
繼續看內部函數的實現ValidateTransaction,這個函數是進一步針對交易的格式、起初性和完整性的驗證,包括簽名等。

//core/committer/validator/msgvalidation.go
// ValidateTransaction checks that the transaction envelope is properly formed
func ValidateTransaction(e *common.Envelope, c channelconfig.ApplicationCapabilities) (*common.Payload, pb.TxValidationCode) {
	putilsLogger.Debugf("ValidateTransactionEnvelope starts for envelope %p", e)

	// check for nil argument--檢查參數的合法性
	if e == nil {
		putilsLogger.Errorf("Error: nil envelope")
		return nil, pb.TxValidationCode_NIL_ENVELOPE
	}

	// get the payload from the envelop--解析交易的有效載荷
	payload, err := utils.GetPayload(e)
	if err != nil {
		putilsLogger.Errorf("GetPayload returns err %s", err)
		return nil, pb.TxValidationCode_BAD_PAYLOAD
	}

	putilsLogger.Debugf("Header is %s", payload.Header)

	// validate the header 驗證頭部信息的正確性
	chdr, shdr, err := validateCommonHeader(payload.Header)
	if err != nil {
		putilsLogger.Errorf("validateCommonHeader returns err %s", err)
		return nil, pb.TxValidationCode_BAD_COMMON_HEADER
	}

	// validate the signature in the envelope
	//驗證簽名
	err = checkSignatureFromCreator(shdr.Creator, e.Signature, e.Payload, chdr.ChannelId)
	if err != nil {
		putilsLogger.Errorf("checkSignatureFromCreator returns err %s", err)
		return nil, pb.TxValidationCode_BAD_CREATOR_SIGNATURE
	}

	// TODO: ensure that creator can transact with us (some ACLs?) which set of APIs is supposed to give us this info?

	// continue the validation in a way that depends on the type specified in the header
	//分析消息通道的類型 ,頭部數據可獲取
	switch common.HeaderType(chdr.Type) {
		//普通交易類型
	case common.HeaderType_ENDORSER_TRANSACTION:
		// Verify that the transaction ID has been computed properly.
		// This check is needed to ensure that the lookup into the ledger
		// for the same TxID catches duplicates.
		//交易ID查重
		err = utils.CheckTxID(
			chdr.TxId,
			shdr.Nonce,
			shdr.Creator)

		if err != nil {
			putilsLogger.Errorf("CheckTxID returns err %s", err)
			return nil, pb.TxValidationCode_BAD_PROPOSAL_TXID
		}

    //驗證背書交易的數據合法性
		err = validateEndorserTransaction(payload.Data, payload.Header)
		putilsLogger.Debugf("ValidateTransactionEnvelope returns err %s", err)

		if err != nil {
			putilsLogger.Errorf("validateEndorserTransaction returns err %s", err)
			return payload, pb.TxValidationCode_INVALID_ENDORSER_TRANSACTION
		} else {
			return payload, pb.TxValidationCode_VALID
		}
		//配置消息
	case common.HeaderType_CONFIG:
		// Config transactions have signatures inside which will be validated, especially at genesis there may be no creator or
		// signature on the outermost envelope
    //驗證配置消息的數據有效笥
		err = validateConfigTransaction(payload.Data, payload.Header)

		if err != nil {
			putilsLogger.Errorf("validateConfigTransaction returns err %s", err)
			return payload, pb.TxValidationCode_INVALID_CONFIG_TRANSACTION
		} else {
			return payload, pb.TxValidationCode_VALID
		}
		//TOKEN交易類--有點類似於普通交易
	case common.HeaderType_TOKEN_TRANSACTION:
		// Verify that the transaction ID has been computed properly.
		// This check is needed to ensure that the lookup into the ledger
		// for the same TxID catches duplicates.
		err = utils.CheckTxID(
			chdr.TxId,
			shdr.Nonce,
			shdr.Creator)

		if err != nil {
			putilsLogger.Errorf("CheckTxID returns err %s", err)
			return nil, pb.TxValidationCode_BAD_PROPOSAL_TXID
		}

		return payload, pb.TxValidationCode_VALID
	default:
		return nil, pb.TxValidationCode_UNSUPPORTED_TX_PAYLOAD
	}
}

普通交易的驗證過後,就是根據系統鏈碼來調用檢驗背書策略等,即v.Vscc.VSCCValidateTx,看一下代碼:

// VSCCValidateTx executes vscc validation for transaction
func (v *VsccValidatorImpl) VSCCValidateTx(seq int, payload *common.Payload, envBytes []byte, block *common.Block) (error, peer.TxValidationCode) {
	chainID := v.chainID
	logger.Debugf("[%s] VSCCValidateTx starts for bytes %p", chainID, envBytes)

	// get header extensions so we have the chaincode ID
	//多拿數據,爲了方便獲得鏈碼ID等
	hdrExt, err := utils.GetChaincodeHeaderExtension(payload.Header)
	if err != nil {
		return err, peer.TxValidationCode_BAD_HEADER_EXTENSION
	}

	// get channel header解析通道頭
	chdr, err := utils.UnmarshalChannelHeader(payload.Header.ChannelHeader)
	if err != nil {
		return err, peer.TxValidationCode_BAD_CHANNEL_HEADER
	}

	/* obtain the list of namespaces we're writing stuff to;
	   at first, we establish a few facts about this invocation:
	   1) which namespaces does it write to?
	   2) does it write to LSCC's namespace?
	   3) does it write to any cc that cannot be invoked? */
	//系統鏈碼寫數據標誌位
	writesToLSCC := false
	//系統鏈碼不可調用寫數據標誌位
	writesToNonInvokableSCC := false
	//獲取鏈碼的Action
	respPayload, err := utils.GetActionFromEnvelope(envBytes)
	if err != nil {
		return errors.WithMessage(err, "GetActionFromEnvelope failed"), peer.TxValidationCode_BAD_RESPONSE_PAYLOAD
	}
	//交易讀寫集列表的生成,並從協議中將此讀寫集列表填充
	txRWSet := &rwsetutil.TxRwSet{}
	if err = txRWSet.FromProtoBytes(respPayload.Results); err != nil {
		return errors.WithMessage(err, "txRWSet.FromProtoBytes failed"), peer.TxValidationCode_BAD_RWSET
	}

	// Verify the header extension and response payload contain the ChaincodeId
	//驗證擴展頭和載荷中的ID
	if hdrExt.ChaincodeId == nil {
		return errors.New("nil ChaincodeId in header extension"), peer.TxValidationCode_INVALID_OTHER_REASON
	}

	if respPayload.ChaincodeId == nil {
		return errors.New("nil ChaincodeId in ChaincodeAction"), peer.TxValidationCode_INVALID_OTHER_REASON
	}

	// get name and version of the cc we invoked
	//獲取調用鏈碼的名稱和版本
	ccID := hdrExt.ChaincodeId.Name
	ccVer := respPayload.ChaincodeId.Version

	// sanity check on ccID
	if ccID == "" {
		err = errors.New("invalid chaincode ID")
		logger.Errorf("%+v", err)
		return err, peer.TxValidationCode_INVALID_OTHER_REASON
	}
	if ccID != respPayload.ChaincodeId.Name {
		err = errors.Errorf("inconsistent ccid info (%s/%s)", ccID, respPayload.ChaincodeId.Name)
		logger.Errorf("%+v", err)
		return err, peer.TxValidationCode_INVALID_OTHER_REASON
	}
	// sanity check on ccver
	if ccVer == "" {
		err = errors.New("invalid chaincode version")
		logger.Errorf("%+v", err)
		return err, peer.TxValidationCode_INVALID_OTHER_REASON
	}

	var wrNamespace []string
	alwaysEnforceOriginalNamespace := v.support.Capabilities().V1_2Validation()
	if alwaysEnforceOriginalNamespace {
		wrNamespace = append(wrNamespace, ccID)
		if respPayload.Events != nil {
			ccEvent := &peer.ChaincodeEvent{}
			if err = proto.Unmarshal(respPayload.Events, ccEvent); err != nil {
				return errors.Wrapf(err, "invalid chaincode event"), peer.TxValidationCode_INVALID_OTHER_REASON
			}
			if ccEvent.ChaincodeId != ccID {
				return errors.Errorf("chaincode event chaincode id does not match chaincode action chaincode id"), peer.TxValidationCode_INVALID_OTHER_REASON
			}
		}
	}

	namespaces := make(map[string]struct{})
	//遍歷讀寫集列表
	for _, ns := range txRWSet.NsRwSets {
		// check to make sure there is no duplicate namespace in txRWSet
		//讀寫集名空間的判定,防止相同名空間的重複
		if _, ok := namespaces[ns.NameSpace]; ok {
			return errors.Errorf("duplicate namespace '%s' in txRWSet", ns.NameSpace),
				peer.TxValidationCode_ILLEGAL_WRITESET
		}
		namespaces[ns.NameSpace] = struct{}{}

		if !v.txWritesToNamespace(ns) {
			continue
		}

		// Check to make sure we did not already populate this chaincode
		// name to avoid checking the same namespace twice
		if ns.NameSpace != ccID || !alwaysEnforceOriginalNamespace {
			//增加名空間
			wrNamespace = append(wrNamespace, ns.NameSpace)
		}

    //判定lscc的名空間是否存在
		if !writesToLSCC && ns.NameSpace == "lscc" {
			//設置LSCC讀寫標誌位
			writesToLSCC = true
		}

    //檢查是否存在不能被鏈碼調用的系統鏈碼並設置相關寫標誌位
		if !writesToNonInvokableSCC && v.sccprovider.IsSysCCAndNotInvokableCC2CC(ns.NameSpace) {
			writesToNonInvokableSCC = true
		}
    //不能被外部調用的系統鏈碼,並設置寫標誌位
		if !writesToNonInvokableSCC && v.sccprovider.IsSysCCAndNotInvokableExternal(ns.NameSpace) {
			writesToNonInvokableSCC = true
		}
	}

	// we've gathered all the info required to proceed to validation;
	// validation will behave differently depending on the type of
	// chaincode (system vs. application)
  //用戶鏈碼
	if !v.sccprovider.IsSysCC(ccID) {
		// if we're here, we know this is an invocation of an application chaincode;
		// first of all, we make sure that:
		// 1) we don't write to LSCC - an application chaincode is free to invoke LSCC
		//    for instance to get information about itself or another chaincode; however
		//    these legitimate invocations only ready from LSCC's namespace; currently
		//    only two functions of LSCC write to its namespace: deploy and upgrade and
		//    neither should be used by an application chaincode
		//用戶鏈碼是否更新LSCC名字空間
		if writesToLSCC {
			return errors.Errorf("chaincode %s attempted to write to the namespace of LSCC", ccID),
				peer.TxValidationCode_ILLEGAL_WRITESET
		}
		// 2) we don't write to the namespace of a chaincode that we cannot invoke - if
		//    the chaincode cannot be invoked in the first place, there's no legitimate
		//    way in which a transaction has a write set that writes to it; additionally
		//    we don't have any means of verifying whether the transaction had the rights
		//    to perform that write operation because in v1, system chaincodes do not have
		//    any endorsement policies to speak of. So if the chaincode can't be invoked
		//    it can't be written to by an invocation of an application chaincode
    //是否調用了不可調用的鏈碼數據
		if writesToNonInvokableSCC {
			return errors.Errorf("chaincode %s attempted to write to the namespace of a system chaincode that cannot be invoked", ccID),
				peer.TxValidationCode_ILLEGAL_WRITESET
		}

		// validate *EACH* read write set according to its chaincode's endorsement policy
		//根據鏈碼背書策略驗證讀寫集
		for _, ns := range wrNamespace {
			// Get latest chaincode version, vscc and validate policy
			//獲取最近的驗證鏈碼與VSCC名稱和背書策略
			txcc, vscc, policy, err := v.GetInfoForValidate(chdr, ns)
			if err != nil {
				logger.Errorf("GetInfoForValidate for txId = %s returned error: %+v", chdr.TxId, err)
				return err, peer.TxValidationCode_INVALID_OTHER_REASON
			}

			// if the namespace corresponds to the cc that was originally
			// invoked, we check that the version of the cc that was
			// invoked corresponds to the version that lscc has returned
			if ns == ccID && txcc.ChaincodeVersion != ccVer {
				err = errors.Errorf("chaincode %s:%s/%s didn't match %s:%s/%s in lscc", ccID, ccVer, chdr.ChannelId, txcc.ChaincodeName, txcc.ChaincodeVersion, chdr.ChannelId)
				logger.Errorf("%+v", err)
				return err, peer.TxValidationCode_EXPIRED_CHAINCODE
			}

			// do VSCC validation--開始驗證
			ctx := &Context{
				Seq:       seq,//序號
				Envelope:  envBytes,//交易數據
				Block:     block,//區塊
				TxID:      chdr.TxId,//交易ID
				Channel:   chdr.ChannelId,
				Namespace: ns,//名空間
				Policy:    policy,//背書策略
				VSCCName:  vscc.ChaincodeName,//VSCC鏈碼名稱
			}

			//看一下驗證的正主
			if err = v.VSCCValidateTxForCC(ctx); err != nil {
				switch err.(type) {
				case *commonerrors.VSCCEndorsementPolicyError:
					return err, peer.TxValidationCode_ENDORSEMENT_POLICY_FAILURE
				default:
					return err, peer.TxValidationCode_INVALID_OTHER_REASON
				}
			}
		}
	} else {
		// make sure that we can invoke this system chaincode - if the chaincode
		// cannot be invoked through a proposal to this peer, we have to drop the
		// transaction; if we didn't, we wouldn't know how to decide whether it's
		// valid or not because in v1, system chaincodes have no endorsement policy
		//是否調用外部不可調用的系統鏈碼
		if v.sccprovider.IsSysCCAndNotInvokableExternal(ccID) {
			return errors.Errorf("committing an invocation of cc %s is illegal", ccID),
				peer.TxValidationCode_ILLEGAL_WRITESET
		}

		// Get latest chaincode version, vscc and validate policy
		//獲取最新版本的背書策略
		_, vscc, policy, err := v.GetInfoForValidate(chdr, ccID)
		if err != nil {
			logger.Errorf("GetInfoForValidate for txId = %s returned error: %+v", chdr.TxId, err)
			return err, peer.TxValidationCode_INVALID_OTHER_REASON
		}

		// validate the transaction as an invocation of this system chaincode;
		// vscc will have to do custom validation for this system chaincode
		// currently, VSCC does custom validation for LSCC only; if an hlf
		// user creates a new system chaincode which is invokable from the outside
		// they have to modify VSCC to provide appropriate validation
		ctx := &Context{
			Seq:       seq,
			Envelope:  envBytes,
			Block:     block,
			TxID:      chdr.TxId,
			Channel:   chdr.ChannelId,
			Namespace: ccID,
			Policy:    policy,
			VSCCName:  vscc.ChaincodeName,
		}
		if err = v.VSCCValidateTxForCC(ctx); err != nil {
			switch err.(type) {
			case *commonerrors.VSCCEndorsementPolicyError:
				return err, peer.TxValidationCode_ENDORSEMENT_POLICY_FAILURE
			default:
				return err, peer.TxValidationCode_INVALID_OTHER_REASON
			}
		}
	}
	logger.Debugf("[%s] VSCCValidateTx completes env bytes %p", chainID, envBytes)
	return nil, peer.TxValidationCode_VALID
}

大段的代碼讓人感覺很鬱悶,不過只要一行行的看下來,其實也沒啥。具體的說明在代碼中進行了註釋,這裏說明一點,抓住重點VSCCValidateTxForCC函數,下面就分析一下這個函數:

func (v *VsccValidatorImpl) VSCCValidateTxForCC(ctx *Context) error {
	logger.Debug("Validating", ctx, "with plugin")
	//獲得驗證插件,並傳入上下文進行驗證
	err := v.pluginValidator.ValidateWithPlugin(ctx)
	if err == nil {
		return nil
	}
	// If the error is a pluggable validation execution error, cast it to the common errors ExecutionFailureError.
	if e, isExecutionError := err.(*validation.ExecutionFailureError); isExecutionError {
		return &commonerrors.VSCCExecutionFailureError{Err: e}
	}
	// Else, treat it as an endorsement error.
	return &commonerrors.VSCCEndorsementPolicyError{Err: err}
}
func (pv *PluginValidator) ValidateWithPlugin(ctx *Context) error {
	//獲得或者創建插件
	plugin, err := pv.getOrCreatePlugin(ctx)
	if err != nil {
		return &validation.ExecutionFailureError{
			Reason: fmt.Sprintf("plugin with name %s couldn't be used: %v", ctx.VSCCName, err),
		}
	}
	//驗證
	err = plugin.Validate(ctx.Block, ctx.Namespace, ctx.Seq, 0, SerializedPolicy(ctx.Policy))
	validityStatus := "valid"
	if err != nil {
		validityStatus = fmt.Sprintf("invalid: %v", err)
	}
	logger.Debug("Transaction", ctx.TxID, "appears to be", validityStatus)
	return err
}
//core/handlers/validation/builtin/default_validation.go/Validate()
func (v *DefaultValidation) Validate(block *common.Block, namespace string, txPosition int, actionPosition int, contextData ...validation.ContextDatum) error {
	if len(contextData) == 0 {
		logger.Panicf("Expected to receive policy bytes in context data")
	}

  //獲得Context內容中的策略
	serializedPolicy, isSerializedPolicy := contextData[0].(SerializedPolicy)
	if !isSerializedPolicy {
		logger.Panicf("Expected to receive a serialized policy in the first context data")
	}
	if block == nil || block.Data == nil {
		return errors.New("empty block")
	}
	if txPosition >= len(block.Data.Data) {
		return errors.Errorf("block has only %d transactions, but requested tx at position %d", len(block.Data.Data), txPosition)
	}
	if block.Header == nil {
		return errors.Errorf("no block header")
	}

	var err error
	//根據不同的版本,來調用不同的驗證方法
	switch {
	case v.Capabilities.V1_3Validation():
		err = v.TxValidatorV1_3.Validate(block, namespace, txPosition, actionPosition, serializedPolicy.Bytes())

	case v.Capabilities.V1_2Validation():
		fallthrough

	default:
		err = v.TxValidatorV1_2.Validate(block, namespace, txPosition, actionPosition, serializedPolicy.Bytes())
	}

	logger.Debugf("block %d, namespace: %s, tx %d validation results is: %v", block.Header.Number, namespace, txPosition, err)
	return convertErrorTypeOrPanic(err)
}
//1.2版本
func (vscc *Validator) Validate(
	block *common.Block,
	namespace string,
	txPosition int,
	actionPosition int,
	policyBytes []byte,
) commonerrors.TxValidationError {
	// get the envelope...
	env, err := utils.GetEnvelopeFromBlock(block.Data.Data[txPosition])
	if err != nil {
		logger.Errorf("VSCC error: GetEnvelope failed, err %s", err)
		return policyErr(err)
	}

	// ...and the payload...
	payl, err := utils.GetPayload(env)
	if err != nil {
		logger.Errorf("VSCC error: GetPayload failed, err %s", err)
		return policyErr(err)
	}

	chdr, err := utils.UnmarshalChannelHeader(payl.Header.ChannelHeader)
	if err != nil {
		return policyErr(err)
	}

	// validate the payload type
	if common.HeaderType(chdr.Type) != common.HeaderType_ENDORSER_TRANSACTION {
		logger.Errorf("Only Endorser Transactions are supported, provided type %d", chdr.Type)
		return policyErr(fmt.Errorf("Only Endorser Transactions are supported, provided type %d", chdr.Type))
	}

	// ...and the transaction...
	tx, err := utils.GetTransaction(payl.Data)
	if err != nil {
		logger.Errorf("VSCC error: GetTransaction failed, err %s", err)
		return policyErr(err)
	}

  //獲得鏈碼數據中的Action
	cap, err := utils.GetChaincodeActionPayload(tx.Actions[actionPosition].Payload)
	if err != nil {
		logger.Errorf("VSCC error: GetChaincodeActionPayload failed, err %s", err)
		return policyErr(err)
	}
  //簽名去重
	signatureSet, err := vscc.deduplicateIdentity(cap)
	if err != nil {
		return policyErr(err)
	}

	// evaluate the signature set against the policy
	//驗證背書策略
	err = vscc.policyEvaluator.Evaluate(policyBytes, signatureSet)
	if err != nil {
		logger.Warningf("Endorsement policy failure for transaction txid=%s, err: %s", chdr.GetTxId(), err.Error())
		if len(signatureSet) < len(cap.Action.Endorsements) {
			// Warning: duplicated identities exist, endorsement failure might be cause by this reason
			return policyErr(errors.New(DUPLICATED_IDENTITY_ERROR))
		}
		return policyErr(fmt.Errorf("VSCC error: endorsement policy failure, err: %s", err))
	}

	// do some extra validation that is specific to lscc
	//lscc名空間的驗證
	if namespace == "lscc" {
		logger.Debugf("VSCC info: doing special validation for LSCC")
		err := vscc.ValidateLSCCInvocation(chdr.ChannelId, env, cap, payl, vscc.capabilities)
		if err != nil {
			logger.Errorf("VSCC error: ValidateLSCCInvocation failed, err %s", err)
			return err
		}
	}

	return nil
}
//1.3版本
func (vscc *Validator) Validate(
	block *common.Block,
	namespace string,
	txPosition int,
	actionPosition int,
	policyBytes []byte,
) commonerrors.TxValidationError {
	vscc.stateBasedValidator.PreValidate(uint64(txPosition), block)

	va, err := vscc.extractValidationArtifacts(block, txPosition, actionPosition)
	if err != nil {
		vscc.stateBasedValidator.PostValidate(namespace, block.Header.Number, uint64(txPosition), err)
		return policyErr(err)
	}

	txverr := vscc.stateBasedValidator.Validate(
		namespace,
		block.Header.Number,
		uint64(txPosition),
		va.rwset,
		va.prp,
		policyBytes,
		va.endorsements,
	)
	if txverr != nil {
		logger.Errorf("VSCC error: stateBasedValidator.Validate failed, err %s", txverr)
		vscc.stateBasedValidator.PostValidate(namespace, block.Header.Number, uint64(txPosition), txverr)
		return txverr
	}

	// do some extra validation that is specific to lscc
	if namespace == "lscc" {
		logger.Debugf("VSCC info: doing special validation for LSCC")
		err := vscc.ValidateLSCCInvocation(va.chdr.ChannelId, va.env, va.cap, va.payl, vscc.capabilities)
		if err != nil {
			logger.Errorf("VSCC error: ValidateLSCCInvocation failed, err %s", err)
			vscc.stateBasedValidator.PostValidate(namespace, block.Header.Number, uint64(txPosition), err)
			return err
		}
	}

	vscc.stateBasedValidator.PostValidate(namespace, block.Header.Number, uint64(txPosition), nil)
	return nil
}

明顯的1.3比1.2代碼要少好多啊。不過其實檢驗的方式不同。1.3版本支持KEY級別的背書策略,最終會調用KeyLevelValidator.Validate()。從1.2開始,VSCC系統鏈碼封裝成插件,在Peer啓動時讀取core.yaml中handlers.validators.vscc這個配置項參數(默認爲DefaultValidation方法),最終調用plugin.Validate()方法,這裏按1.2分析下去:

// NewPolicy creates a new policy based on the policy bytes
func (pr *provider) NewPolicy(data []byte) (policies.Policy, proto.Message, error) {
	sigPolicy := &cb.SignaturePolicyEnvelope{}
	if err := proto.Unmarshal(data, sigPolicy); err != nil {
		return nil, nil, fmt.Errorf("Error unmarshaling to SignaturePolicy: %s", err)
	}

	if sigPolicy.Version != 0 {
		return nil, nil, fmt.Errorf("This evaluator only understands messages of version 0, but version was %d", sigPolicy.Version)
	}

	compiled, err := compile(sigPolicy.Rule, sigPolicy.Identities, pr.deserializer)
	if err != nil {
		return nil, nil, err
	}

	return &policy{
		evaluator:    compiled,
		deserializer: pr.deserializer,
	}, sigPolicy, nil

}
//common/cauthdsl/cauthdsl.go
//驗證的策略由下面這個函數來生成
// compile recursively builds a go evaluatable function corresponding to the policy specified, remember to call deduplicate on identities before
// passing them to this function for evaluation
func compile(policy *cb.SignaturePolicy, identities []*mb.MSPPrincipal, deserializer msp.IdentityDeserializer) (func([]IdentityAndSignature, []bool) bool, error) {
	if policy == nil {
		return nil, fmt.Errorf("Empty policy element")
	}

	switch t := policy.Type.(type) {
		//遞歸構造策略驗證方法直到遇到SignedBy策略類型---需要期望的N個實體簽名背書
	case *cb.SignaturePolicy_NOutOf_:
		policies := make([]func([]IdentityAndSignature, []bool) bool, len(t.NOutOf.Rules))
		for i, policy := range t.NOutOf.Rules {
			compiledPolicy, err := compile(policy, identities, deserializer)
			if err != nil {
				return nil, err
			}
			policies[i] = compiledPolicy

		}
		return func(signedData []IdentityAndSignature, used []bool) bool {
			grepKey := time.Now().UnixNano()
			cauthdslLogger.Debugf("%p gate %d evaluation starts", signedData, grepKey)
			verified := int32(0)
			_used := make([]bool, len(used))
			//遍歷子策略驗證方法
			for _, policy := range policies {
				copy(_used, used)
				//對指定的簽名數據進行驗證,查看其是否滿足子策略的要求
				if policy(signedData, _used) {
					//滿足則加1
					verified++
					//恢復簽名身份實體集合已匹配的標記列表 ,防止簽名數據不滿足驗證策略時可以恢復used數據
					copy(used, _used)
				}
			}

      //計算最終通過的驗證數量是否滿足要求
			if verified >= t.NOutOf.N {
				cauthdslLogger.Debugf("%p gate %d evaluation succeeds", signedData, grepKey)
			} else {
				cauthdslLogger.Debugf("%p gate %d evaluation fails", signedData, grepKey)
			}

      //返回驗證通過的數量
			return verified >= t.NOutOf.N
		}, nil
		//SignedBy策略類型--單個實體簽名,其必須匹配MSP的指定角色
	case *cb.SignaturePolicy_SignedBy:
		//索引的合法性檢查
		if t.SignedBy < 0 || t.SignedBy >= int32(len(identities)) {
			return nil, fmt.Errorf("identity index out of range, requested %v, but identities length is %d", t.SignedBy, len(identities))
		}
		//獲得指定簽名主體MSPprincipal實例
		signedByID := identities[t.SignedBy]
		return func(signedData []IdentityAndSignature, used []bool) bool {
			cauthdslLogger.Debugf("%p signed by %d principal evaluation starts (used %v)", signedData, t.SignedBy, used)
			//遍歷簽名數據列表,如果簽名身份實體集合中已匹配則略過
			for i, sd := range signedData {
				if used[i] {
					cauthdslLogger.Debugf("%p skipping identity %d because it has already been used", signedData, i)
					continue
				}
				if cauthdslLogger.IsEnabledFor(zapcore.DebugLevel) {
					// Unlike most places, this is a huge print statement, and worth checking log level before create garbage
					cauthdslLogger.Debugf("%p processing identity %d with bytes of %x", signedData, i, sd.Identity)
				}
				identity, err := sd.Identity()
				if err != nil {
					cauthdslLogger.Errorf("Principal deserialization failure (%s) for identity %d", err, i)
					continue
				}
				//身份ID是否滿足簽名策略
				err = identity.SatisfiesPrincipal(signedByID)
				if err != nil {
					cauthdslLogger.Debugf("%p identity %d does not satisfy principal: %s", signedData, i, err)
					continue
				}
				cauthdslLogger.Debugf("%p principal matched by identity %d", signedData, i)
				//是滯滿足背書策略,如果滿足則驗證簽名的真實性
				err = sd.Verify()
				if err != nil {
					cauthdslLogger.Debugf("%p signature for identity %d is invalid: %s", signedData, i, err)
					continue
				}
				cauthdslLogger.Debugf("%p principal evaluation succeeds for identity %d", signedData, i)
				used[i] = true
				return true
			}
			cauthdslLogger.Debugf("%p principal evaluation fails", signedData)
			return false
		}, nil
	default:
		return nil, fmt.Errorf("Unknown type: %T:%v", t, t)
	}
}
//core/committer/TxValidator/plugin_validator.go
func (id *PolicyEvaluator) Evaluate(policyBytes []byte, signatureSet []*common.SignedData) error {
	pp := cauthdsl.NewPolicyProvider(id.IdentityDeserializer)
	policy, _, err := pp.NewPolicy(policyBytes)
	if err != nil {
		return err
	}
	return policy.Evaluate(signatureSet)
}
//驗證的策略有很多類,只舉一個:common/policies/policy.go
// Evaluate takes a set of SignedData and evaluates whether this set of signatures satisfies the policy
func (p *policy) Evaluate(signatureSet []*cb.SignedData) error {
	if p == nil {
		return fmt.Errorf("No such policy")
	}
	idAndS := make([]IdentityAndSignature, len(signatureSet))
	for i, sd := range signatureSet {
		idAndS[i] = &deserializeAndVerify{
			signedData:   sd,
			deserializer: p.deserializer,
		}
	}

	ok := p.evaluator(deduplicate(idAndS), make([]bool, len(signatureSet)))
	if !ok {
		return errors.New("signature set did not satisfy policy")
	}
	return nil
}

驗證的SatisfiersPrincipal最終會調用BCCSP包中的satisfiesPrincipalInternalPreV13

func (msp *bccspmsp) satisfiesPrincipalInternalPreV13(id Identity, principal *m.MSPPrincipal) error {
	switch principal.PrincipalClassification {
	// in this case, we have to check whether the
	// identity has a role in the msp - member or admin
	case m.MSPPrincipal_ROLE:
		// Principal contains the msp role
		mspRole := &m.MSPRole{}
		err := proto.Unmarshal(principal.Principal, mspRole)
		if err != nil {
			return errors.Wrap(err, "could not unmarshal MSPRole from principal")
		}

		// at first, we check whether the MSP
		// identifier is the same as that of the identity
		if mspRole.MspIdentifier != msp.name {
			return errors.Errorf("the identity is a member of a different MSP (expected %s, got %s)", mspRole.MspIdentifier, id.GetMSPIdentifier())
		}

		// now we validate the different msp roles
		switch mspRole.Role {
		case m.MSPRole_MEMBER:
	......
	case m.MSPPrincipal_IDENTITY:
......

		return errors.New("The identities do not match")
	case m.MSPPrincipal_ORGANIZATION_UNIT:
......
		// we then check if the identity is valid with this MSP
		// and fail if it is not
		err = msp.Validate(id)
		if err != nil {
			return err
		}

		// now we check whether any of this identity's OUs match the requested one
		for _, ou := range id.GetOrganizationalUnits() {
......
	default:
		return errors.Errorf("invalid principal type %d", int32(principal.PrincipalClassification))
	}
}

此處就不對這個展開分析了,放到加密模塊部分。最後在交易的驗證函數中調用:

func (vscc *Validator) ValidateLSCCInvocation(
	chid string,
	env *common.Envelope,
	cap *pb.ChaincodeActionPayload,
	payl *common.Payload,
	ac channelconfig.ApplicationCapabilities,
) commonerrors.TxValidationError {
	//驗證參數的合法性
	cpp, err := utils.GetChaincodeProposalPayload(cap.ChaincodeProposalPayload)
	if err != nil {
		logger.Errorf("VSCC error: GetChaincodeProposalPayload failed, err %s", err)
		return policyErr(err)
	}

	cis := &pb.ChaincodeInvocationSpec{}
	err = proto.Unmarshal(cpp.Input, cis)
	if err != nil {
		logger.Errorf("VSCC error: Unmarshal ChaincodeInvocationSpec failed, err %s", err)
		return policyErr(err)
	}

	if cis.ChaincodeSpec == nil ||
		cis.ChaincodeSpec.Input == nil ||
		cis.ChaincodeSpec.Input.Args == nil {
		logger.Errorf("VSCC error: committing invalid vscc invocation")
		return policyErr(fmt.Errorf("malformed chaincode invocation spec"))
	}

	lsccFunc := string(cis.ChaincodeSpec.Input.Args[0])
	lsccArgs := cis.ChaincodeSpec.Input.Args[1:]

	logger.Debugf("VSCC info: ValidateLSCCInvocation acting on %s %#v", lsccFunc, lsccArgs)

  //處理Deploy和UPGRADE
	switch lsccFunc {
	case lscc.UPGRADE, lscc.DEPLOY:
		logger.Debugf("VSCC info: validating invocation of lscc function %s on arguments %#v", lsccFunc, lsccArgs)

		if len(lsccArgs) < 2 {
			return policyErr(fmt.Errorf("Wrong number of arguments for invocation lscc(%s): expected at least 2, received %d", lsccFunc, len(lsccArgs)))
		}

		if (!ac.PrivateChannelData() && len(lsccArgs) > 5) ||
			(ac.PrivateChannelData() && len(lsccArgs) > 6) {
			return policyErr(fmt.Errorf("Wrong number of arguments for invocation lscc(%s): received %d", lsccFunc, len(lsccArgs)))
		}

		cdsArgs, err := utils.GetChaincodeDeploymentSpec(lsccArgs[1], platforms.NewRegistry(
			// XXX We should definitely _not_ have this external dependency in VSCC
			// as adding a platform could cause non-determinism.  This is yet another
			// reason why all of this custom LSCC validation at commit time has no
			// long term hope of staying deterministic and needs to be removed.
			&golang.Platform{},
			&node.Platform{},
			&java.Platform{},
			&car.Platform{},
		))

		if err != nil {
			return policyErr(fmt.Errorf("GetChaincodeDeploymentSpec error %s", err))
		}

		if cdsArgs == nil || cdsArgs.ChaincodeSpec == nil || cdsArgs.ChaincodeSpec.ChaincodeId == nil ||
			cap.Action == nil || cap.Action.ProposalResponsePayload == nil {
			return policyErr(fmt.Errorf("VSCC error: invocation of lscc(%s) does not have appropriate arguments", lsccFunc))
		}

		// validate chaincode name
		ccName := cdsArgs.ChaincodeSpec.ChaincodeId.Name
		// it must comply with the lscc.ChaincodeNameRegExp
		if !lscc.ChaincodeNameRegExp.MatchString(ccName) {
			return policyErr(errors.Errorf("invalid chaincode name '%s'", ccName))
		}
		// it can't match the name of one of the system chaincodes
		if _, in := systemChaincodeNames[ccName]; in {
			return policyErr(errors.Errorf("chaincode name '%s' is reserved for system chaincodes", ccName))
		}

		// validate chaincode version
		ccVersion := cdsArgs.ChaincodeSpec.ChaincodeId.Version
		// it must comply with the lscc.ChaincodeVersionRegExp
		if !lscc.ChaincodeVersionRegExp.MatchString(ccVersion) {
			return policyErr(errors.Errorf("invalid chaincode version '%s'", ccVersion))
		}

		// get the rwset
		pRespPayload, err := utils.GetProposalResponsePayload(cap.Action.ProposalResponsePayload)
		if err != nil {
			return policyErr(fmt.Errorf("GetProposalResponsePayload error %s", err))
		}
		if pRespPayload.Extension == nil {
			return policyErr(fmt.Errorf("nil pRespPayload.Extension"))
		}
		respPayload, err := utils.GetChaincodeAction(pRespPayload.Extension)
		if err != nil {
			return policyErr(fmt.Errorf("GetChaincodeAction error %s", err))
		}
		txRWSet := &rwsetutil.TxRwSet{}
		if err = txRWSet.FromProtoBytes(respPayload.Results); err != nil {
			return policyErr(fmt.Errorf("txRWSet.FromProtoBytes error %s", err))
		}

		// extract the rwset for lscc
		var lsccrwset *kvrwset.KVRWSet
		for _, ns := range txRWSet.NsRwSets {
			logger.Debugf("Namespace %s", ns.NameSpace)
			if ns.NameSpace == "lscc" {
				lsccrwset = ns.KvRwSet
				break
			}
		}

		// retrieve from the ledger the entry for the chaincode at hand
		cdLedger, ccExistsOnLedger, err := vscc.getInstantiatedCC(chid, cdsArgs.ChaincodeSpec.ChaincodeId.Name)
		if err != nil {
			return &commonerrors.VSCCExecutionFailureError{Err: err}
		}

		/******************************************/
		/* security check 0 - validation of rwset */
		/******************************************/
		// there has to be a write-set
		if lsccrwset == nil {
			return policyErr(fmt.Errorf("No read write set for lscc was found"))
		}
		// there must be at least one write
		if len(lsccrwset.Writes) < 1 {
			return policyErr(fmt.Errorf("LSCC must issue at least one single putState upon deploy/upgrade"))
		}
		// the first key name must be the chaincode id provided in the deployment spec
		if lsccrwset.Writes[0].Key != cdsArgs.ChaincodeSpec.ChaincodeId.Name {
			return policyErr(fmt.Errorf("expected key %s, found %s", cdsArgs.ChaincodeSpec.ChaincodeId.Name, lsccrwset.Writes[0].Key))
		}
		// the value must be a ChaincodeData struct
		cdRWSet := &ccprovider.ChaincodeData{}
		err = proto.Unmarshal(lsccrwset.Writes[0].Value, cdRWSet)
		if err != nil {
			return policyErr(fmt.Errorf("unmarhsalling of ChaincodeData failed, error %s", err))
		}
		// the chaincode name in the lsccwriteset must match the chaincode name in the deployment spec
		if cdRWSet.Name != cdsArgs.ChaincodeSpec.ChaincodeId.Name {
			return policyErr(fmt.Errorf("expected cc name %s, found %s", cdsArgs.ChaincodeSpec.ChaincodeId.Name, cdRWSet.Name))
		}
		// the chaincode version in the lsccwriteset must match the chaincode version in the deployment spec
		if cdRWSet.Version != cdsArgs.ChaincodeSpec.ChaincodeId.Version {
			return policyErr(fmt.Errorf("expected cc version %s, found %s", cdsArgs.ChaincodeSpec.ChaincodeId.Version, cdRWSet.Version))
		}
		// it must only write to 2 namespaces: LSCC's and the cc that we are deploying/upgrading
		for _, ns := range txRWSet.NsRwSets {
			if ns.NameSpace != "lscc" && ns.NameSpace != cdRWSet.Name && len(ns.KvRwSet.Writes) > 0 {
				return policyErr(fmt.Errorf("LSCC invocation is attempting to write to namespace %s", ns.NameSpace))
			}
		}

		logger.Debugf("Validating %s for cc %s version %s", lsccFunc, cdRWSet.Name, cdRWSet.Version)

		switch lsccFunc {
		case lscc.DEPLOY:

			/******************************************************************/
			/* security check 1 - cc not in the LCCC table of instantiated cc */
			/******************************************************************/
			if ccExistsOnLedger {
				return policyErr(fmt.Errorf("Chaincode %s is already instantiated", cdsArgs.ChaincodeSpec.ChaincodeId.Name))
			}

			/****************************************************************************/
			/* security check 2 - validation of rwset (and of collections if enabled) */
			/****************************************************************************/
			if ac.PrivateChannelData() {
				// do extra validation for collections
				err := vscc.validateRWSetAndCollection(lsccrwset, cdRWSet, lsccArgs, lsccFunc, ac, chid)
				if err != nil {
					return err
				}
			} else {
				// there can only be a single ledger write
				if len(lsccrwset.Writes) != 1 {
					return policyErr(fmt.Errorf("LSCC can only issue a single putState upon deploy"))
				}
			}

			/*****************************************************/
			/* security check 3 - check the instantiation policy */
			/*****************************************************/
			pol := cdRWSet.InstantiationPolicy
			if pol == nil {
				return policyErr(fmt.Errorf("no instantiation policy was specified"))
			}
			// FIXME: could we actually pull the cds package from the
			// file system to verify whether the policy that is specified
			// here is the same as the one on disk?
			// PROS: we prevent attacks where the policy is replaced
			// CONS: this would be a point of non-determinism
			err := vscc.checkInstantiationPolicy(chid, env, pol, payl)
			if err != nil {
				return err
			}

		case lscc.UPGRADE:
			/**************************************************************/
			/* security check 1 - cc in the LCCC table of instantiated cc */
			/**************************************************************/
			if !ccExistsOnLedger {
				return policyErr(fmt.Errorf("Upgrading non-existent chaincode %s", cdsArgs.ChaincodeSpec.ChaincodeId.Name))
			}

			/**********************************************************/
			/* security check 2 - existing cc's version was different */
			/**********************************************************/
			if cdLedger.Version == cdsArgs.ChaincodeSpec.ChaincodeId.Version {
				return policyErr(fmt.Errorf("Existing version of the cc on the ledger (%s) should be different from the upgraded one", cdsArgs.ChaincodeSpec.ChaincodeId.Version))
			}

			/****************************************************************************/
			/* security check 3 validation of rwset (and of collections if enabled) */
			/****************************************************************************/
			// Only in v1.2, a collection can be updated during a chaincode upgrade
			if ac.V1_2Validation() {
				// do extra validation for collections
				err := vscc.validateRWSetAndCollection(lsccrwset, cdRWSet, lsccArgs, lsccFunc, ac, chid)
				if err != nil {
					return err
				}
			} else {
				// there can only be a single ledger write
				if len(lsccrwset.Writes) != 1 {
					return policyErr(fmt.Errorf("LSCC can only issue a single putState upon upgrade"))
				}
			}

			/*****************************************************/
			/* security check 4 - check the instantiation policy */
			/*****************************************************/
			pol := cdLedger.InstantiationPolicy
			if pol == nil {
				return policyErr(fmt.Errorf("No instantiation policy was specified"))
			}
			// FIXME: could we actually pull the cds package from the
			// file system to verify whether the policy that is specified
			// here is the same as the one on disk?
			// PROS: we prevent attacks where the policy is replaced
			// CONS: this would be a point of non-determinism
			err := vscc.checkInstantiationPolicy(chid, env, pol, payl)
			if err != nil {
				return err
			}

			/******************************************************************/
			/* security check 5 - check the instantiation policy in the rwset */
			/******************************************************************/
			if ac.V1_1Validation() {
				polNew := cdRWSet.InstantiationPolicy
				if polNew == nil {
					return policyErr(fmt.Errorf("No instantiation policy was specified"))
				}

				// no point in checking it again if they are the same policy
				if !bytes.Equal(polNew, pol) {
					err = vscc.checkInstantiationPolicy(chid, env, polNew, payl)
					if err != nil {
						return err
					}
				}
			}
		}

		// all is good!
		return nil
	default:
		return policyErr(fmt.Errorf("VSCC error: committing an invocation of function %s of lscc is invalid", lsccFunc))
	}
}

總結一下就是:首先要生成驗證策略,根據不同的策略生成的策略方法集返回到應用,通過此集合驗證交易的策略。然後調用BCCSP包的中satisfiesPrincipalInternalPreV13(),利用角色,身份和組織單元來驗證是否爲相同的MSP,證書是否有效,是不是Admin並驗證。驗證身份證書的一致性。驗證組織部門信息是否匹配。

六、總結

區塊提交是反覆驗證和模擬執行的結果,然後還會通知其它監聽的條件狀態變量更新,整個流程複雜而又細緻,不要漏過任何一行代碼,在分析過程中,還是吃這個苦頭的。區塊交易數據經過驗證後,經過VSCC和MVCC的檢查,就可以提交到區塊數據文件,更新區塊索引數據庫,保存隱私數據到隱私數據庫,同步有效交易數據到狀態數據庫,並最終把背書籤名的有效交易提到歷史數據庫。好好的看代碼,源碼面前,了無祕密。
在這裏插入圖片描述

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章