以太坊源碼之--Download源碼

Downloader源碼分析

1、介紹

downloader是以太坊中信息同步的主要模塊,每10s選擇自身所連接的節點中與totalDiffculty最大的一個節點進行同步,或者有新節點加入時候從新節點同步數據。

2、源碼分析

​ 節點中最常使用downloader模塊的是ProtocolManage的同步線程pm.syncer(),其每10s強制進行一次同步循環,或者新節點觸發其執行同步循環。

// syncer is responsible for periodically synchronising with the network, both
// downloading hashes and blocks as well as handling the announcement handler.
func (pm *ProtocolManager) syncer() {
	// Start and ensure cleanup of sync mechanisms
	pm.fetcher.Start()
	defer pm.fetcher.Stop()
	defer pm.downloader.Terminate()
	// forceSyncCycle = 10s
	// Wait for different events to fire synchronisation operations
	forceSync := time.NewTicker(forceSyncCycle)
	defer forceSync.Stop()

	for {
		select {
		case <-pm.newPeerCh:
			// Make sure we have peers to select from, then sync
			if pm.peers.Len() < minDesiredPeerCount {
				break
			}
			go pm.synchronise(pm.peers.BestPeer())

		case <-forceSync.C:
			// Force a sync even if not enough peers are present
			go pm.synchronise(pm.peers.BestPeer())

		case <-pm.noMorePeers:
			return
		}
	}
}

以上是syncer()方法,其又調用方法pm.synchronise(pm.peers.BestPeer())

// synchronise tries to sync up our local block chain with a remote peer.
func (pm *ProtocolManager) synchronise(peer *peer) {
	// Short circuit if no peers are available
	if peer == nil {
		return
	}
	// Make sure the peer's TD is higher than our own
	currentBlock := pm.blockchain.CurrentBlock()
	td := pm.blockchain.GetTd(currentBlock.Hash(), currentBlock.NumberU64())

	pHead, pTd := peer.Head()
	if pTd.Cmp(td) <= 0 {
		return
	}
	// Otherwise try to sync with the downloader
	mode := downloader.FullSync
	if atomic.LoadUint32(&pm.fastSync) == 1 {
		// Fast sync was explicitly requested, and explicitly granted
		mode = downloader.FastSync
	} else if currentBlock.NumberU64() == 0 && pm.blockchain.CurrentFastBlock().NumberU64() > 0 {
		// The database seems empty as the current block is the genesis. Yet the fast
		// block is ahead, so fast sync was enabled for this node at a certain point.
		// The only scenario where this can happen is if the user manually (or via a
		// bad block) rolled back a fast sync node below the sync point. In this case
		// however it's safe to reenable fast sync.
		atomic.StoreUint32(&pm.fastSync, 1)
		mode = downloader.FastSync
	}
	// 如果快速同步模式下,本地鏈快速區塊的難度值大於遠程節點,那麼直接返回
	if mode == downloader.FastSync {
		// Make sure the peer's total difficulty we are synchronizing is higher.
		if pm.blockchain.GetTdByHash(pm.blockchain.CurrentFastBlock().Hash()).Cmp(pTd) >= 0 {
			return
		}
	}

	// Run the sync cycle, and disable fast sync if we've went past the pivot block
	if err := pm.downloader.Synchronise(peer.id, pHead, pTd, mode); err != nil {
		return
	}
	if atomic.LoadUint32(&pm.fastSync) == 1 {
		log.Info("Fast sync complete, auto disabling")
		atomic.StoreUint32(&pm.fastSync, 0)
	}
	atomic.StoreUint32(&pm.acceptTxs, 1) // Mark initial sync done
	if head := pm.blockchain.CurrentBlock(); head.NumberU64() > 0 {
		// We've completed a sync cycle, notify all peers of new state. This path is
		// essential in star-topology networks where a gateway node needs to notify
		// all its out-of-date peers of the availability of a new block. This failure
		// scenario will most often crop up in private and hackathon networks with
		// degenerate connectivity, but it should be healthy for the mainnet too to
		// more reliably update peers or the local TD state.
		go pm.BroadcastBlock(head, false)
	}
}

pm.synchronise()方法獲取本地區塊鏈的td值,若大於遠程節點則本方法簡單返回,若小於則說明遠程節點數據比本地節點數據更新,則確定同步方式以後調用 pm.downloader.Synchronise(peer.id, pHead, pTd, mode)方法,這個方法是實際downloader模塊的開始。

// Synchronise tries to sync up our local block chain with a remote peer, both
// adding various sanity checks as well as wrapping it with various log entries.
func (d *Downloader) Synchronise(id string, head common.Hash, td *big.Int, mode SyncMode) error {
	err := d.synchronise(id, head, td, mode)
	..................................
}
// synchronise will select the peer and use it for synchronising. If an empty string is given
// it will use the best peer possible and synchronize if its TD is higher than our own. If any of the
// checks fail an error will be returned. This method is synchronous
func (d *Downloader) synchronise(id string, hash common.Hash, td *big.Int, mode SyncMode) error {
	// Mock out the synchronisation if testing
	if d.synchroniseMock != nil {
		return d.synchroniseMock(id, hash)
	}
	// Make sure only one goroutine is ever allowed past this point at once
	if !atomic.CompareAndSwapInt32(&d.synchronising, 0, 1) {
		return errBusy
	}
	defer atomic.StoreInt32(&d.synchronising, 0)

	// Post a user notification of the sync (only once per session)
	if atomic.CompareAndSwapInt32(&d.notified, 0, 1) {
		log.Info("Block synchronisation started")
	}
	// Reset the queue, peer set and wake channels to clean any internal leftover state
	d.queue.Reset()
	// 遍歷peerSet逐個調用peer.set()
	d.peers.Reset()
	// 清空相關通道數據--------------------------------------
	for _, ch := range []chan bool{d.bodyWakeCh, d.receiptWakeCh} {
		select {
		case <-ch:
		default:
		}
	}
	for _, ch := range []chan dataPack{d.headerCh, d.bodyCh, d.receiptCh} {
		for empty := false; !empty; {
			select {
			case <-ch:
			default:
				empty = true
			}
		}
	}
	for empty := false; !empty; {
		select {
		case <-d.headerProcCh:
		default:
			empty = true
		}
	}
	// 清空相關通道數據--------------------------------------
	// Create cancel channel for aborting mid-flight and mark the master peer
	d.cancelLock.Lock()
	d.cancelCh = make(chan struct{})
	d.cancelPeer = id
	d.cancelLock.Unlock()

	defer d.Cancel() // No matter what, we can't leave the cancel channel open

	// Set the requested sync mode, unless it's forbidden
	d.mode = mode

	// Retrieve the origin peer and initiate the downloading process
	p := d.peers.Peer(id)
	if p == nil {
		return errUnknownPeer
	}
	return d.syncWithPeer(p, hash, td)
}

以上方法是downloader模塊中的方法,實際做了同步前準備工作,主要有如下幾件事:

​ 1、修改同步狀態,確保只有一個同步線程在運行atomic.CompareAndSwapInt32(&d.synchronising, 0, 1)

​ 2、調用d.queue.Reset()d.peers.Reset() 將相關數據清空,待本次同步使用

​ 3、最後調用d.syncWithPeer(p, hash, td) 與遠程節點進行同步

// syncWithPeer starts a block synchronization based on the hash chain from the
// specified peer and head hash.
func (d *Downloader) syncWithPeer(p *peerConnection, hash common.Hash, td *big.Int) (err error) {
    // 發出同步開始事件,通知相關模塊停止工作(如挖礦模塊)
	d.mux.Post(StartEvent{})
	defer func() {
		// reset on error
		if err != nil {
			d.mux.Post(FailedEvent{err})
		} else {
			d.mux.Post(DoneEvent{})
		}
	}()
	if p.version < 62 {
		return errTooOld
	}

	log.Debug("Synchronising with the network", "peer", p.id, "eth", p.version, "head", hash, "td", td, "mode", d.mode)
	defer func(start time.Time) {
		log.Debug("Synchronisation terminated", "elapsed", time.Since(start))
	}(time.Now())
	// 向遠程節點發送請求返回對方最新區塊的區塊頭消息
	// Look up the sync boundaries: the common ancestor and the target block
	latest, err := d.fetchHeight(p)
	if err != nil {
		return err
	}
	// 對方peer區塊高度
	height := latest.Number.Uint64()
	// 尋找公共祖先
	origin, err := d.findAncestor(p, latest)
	if err != nil {
		return err
	}
	d.syncStatsLock.Lock()
	if d.syncStatsChainHeight <= origin || d.syncStatsChainOrigin > origin {
		d.syncStatsChainOrigin = origin
	}
	d.syncStatsChainHeight = height
	d.syncStatsLock.Unlock()

	// Ensure our origin point is below any fast sync pivot point
	// fsMinFullBlocks = 64
	pivot := uint64(0)
	if d.mode == FastSync {
		if height <= uint64(fsMinFullBlocks) {
			origin = 0
		} else {
			pivot = height - uint64(fsMinFullBlocks)
			if pivot <= origin {
				origin = pivot - 1
			}
		}
	}
	d.committed = 1
	if d.mode == FastSync && pivot != 0 {
		d.committed = 0
	}
	// Initiate the sync using a concurrent header and content retrieval algorithm
	d.queue.Prepare(origin+1, d.mode)
	if d.syncInitHook != nil {
		d.syncInitHook(origin, height)
	}

	fetchers := []func() error{
		func() error { return d.fetchHeaders(p, origin+1, pivot) }, // Headers are always retrieved
		func() error { return d.fetchBodies(origin + 1) },          // Bodies are retrieved during normal and fast sync
		func() error { return d.fetchReceipts(origin + 1) },        // Receipts are retrieved during fast sync
		func() error { return d.processHeaders(origin+1, pivot, td) },
	}
	if d.mode == FastSync {
		fetchers = append(fetchers, func() error { return d.processFastSyncContent(latest) })
	} else if d.mode == FullSync {
		fetchers = append(fetchers, d.processFullSyncContent)
	}
	return d.spawnSync(fetchers)
}

上述方法主要執行以下幾個步驟:

​ 1、廣播同步開始事件,讓相關的動作停止(如挖礦)

​ 2、調用d.findAncestor(p, latest)查找本地節點和遠程節點的共同祖先

​ 3、開始逐個執行 [d.fetchHeaders(p, origin+1, pivot),d.fetchBodies(origin + 1),d.fetchReceipts(origin + 1),d.processHeaders(origin+1, pivot, td)]+d.processFastSyncContent(latest) or d.processFullSyncContent 方法,方法流轉圖如下:

在這裏插入圖片描述

​ 下面逐個介紹[d.fetchHeaders(p, origin+1, pivot),d.fetchBodies(origin + 1),d.fetchReceipts(origin + 1),d.processHeaders(origin+1, pivot, td)]+d.processFastSyncContent(latest) or d.processFullSyncContent 方法

d.fetcherHeaders

首先看下getHeaders方法如何定義

getHeaders := func(from uint64) {
	request = time.Now()
	ttl = d.requestTTL()
	timeout.Reset(ttl)
	if skeleton {
		p.log.Trace("Fetching skeleton headers", "count", MaxHeaderFetch, "from", from)
		// MaxSkeletonSize = 128 MaxHeaderFetch  = 192
		go p.peer.RequestHeadersByNumber(from+uint64(MaxHeaderFetch)-1, MaxSkeletonSize, MaxHeaderFetch-1, false)
	} else {
		p.log.Trace("Fetching full headers", "count", MaxHeaderFetch, "from", from)
		go p.peer.RequestHeadersByNumber(from, MaxHeaderFetch, 0, false)
	}
}
// Start pulling the header chain skeleton until all is done
getHeaders(from)

getHeaders方法有兩種請求區塊頭的方法,若skeleton爲true(默認值)的時候,將先請求區塊頭骨架,從from+191塊開始,每隔192個獲取一個區塊頭,總共獲取128區塊頭。若skeleton爲false,那從From開始獲取192和區塊頭

for {
	select {
	case <-d.cancelCh:
		return errCancelHeaderFetch

	case packet := <-d.headerCh:
		// Make sure the active peer is giving us the skeleton headers
		if packet.PeerId() != p.id {
			log.Debug("Received skeleton from incorrect peer", "peer", packet.PeerId())
			break
		}
		headerReqTimer.UpdateSince(request)
		timeout.Stop()

		// If the skeleton's finished, pull any remaining head headers directly from the origin
		if packet.Items() == 0 && skeleton {
			skeleton = false
			getHeaders(from)
			continue
		}
		// If no more headers are inbound, notify the content fetchers and return
		if packet.Items() == 0 {
			// Don't abort header fetches while the pivot is downloading
			if atomic.LoadInt32(&d.committed) == 0 && pivot <= from {
				p.log.Debug("No headers, waiting for pivot commit")
				select {
				//fsHeaderContCheck      = 3 * time.Second
				case <-time.After(fsHeaderContCheck):
					getHeaders(from)
					continue
				case <-d.cancelCh:
					return errCancelHeaderFetch
				}
			}
			// Pivot done (or not in fast sync) and no more headers, terminate the process
			p.log.Debug("No more headers available")
			select {
			case d.headerProcCh <- nil:
				return nil
			case <-d.cancelCh:
				return errCancelHeaderFetch
			}
		}
		headers := packet.(*headerPack).headers

		// If we received a skeleton batch, resolve internals concurrently
		if skeleton {
			filled, proced, err := d.fillHeaderSkeleton(from, headers)
			if err != nil {
				p.log.Debug("Skeleton chain invalid", "err", err)
				return errInvalidChain
			}
			headers = filled[proced:]
			from += uint64(proced)
		} 
        .................................................
		// Insert all the new headers and fetch the next batch
		if len(headers) > 0 {
			p.log.Trace("Scheduling new headers", "count", len(headers), "from", from)
			select {
			case d.headerProcCh <- headers:
			case <-d.cancelCh:
				return errCancelHeaderFetch
			}
			from += uint64(len(headers))
			getHeaders(from)
		}
        ................................................
}

以上方法獲取getHeaders的結果進行處理,主要邏輯如下:

​ 1、首先如果packet.Items() == 0,則表明skeleton已經完成,將skeleton設置爲false,將剩餘的headers按順序獲取;
​ 2、如果收到了一個skeleton,則調用d.fillHeaderSkeleton(from, headers)從其他節點下載headers進行填充;
​ 3、填充完畢後,將headers寫入channel headerProcCh(下面的處理headers中處理),同時把from賦值爲新的from,然後進行下一批headers的獲取。

func (d *Downloader) processHeaders(origin uint64, pivot uint64, td *big.Int) error {
    	..............................................
	case headers := <-d.headerProcCh:
		// Terminate header processing if we synced up
		if len(headers) == 0 {
			// Notify everyone that headers are fully processed
			for _, ch := range []chan bool{d.bodyWakeCh, d.receiptWakeCh} {
				select {
				case ch <- false:
				case <-d.cancelCh:
				}
			}
			// If no headers were retrieved at all, the peer violated(違反) its TD promise that it had a
			// better chain compared to ours. The only exception is if its promised blocks were
			// already imported by other means (e.g. fetcher):
			//
			// R <remote peer>, L <local node>: Both at block 10
			// R: Mine block 11, and propagate it to L
			// L: Queue block 11 for import
			// L: Notice that R's head and TD increased compared to ours, start sync
			// L: Import of block 11 finishes
			// L: Sync begins, and finds common ancestor at 11
			// L: Request new headers up from 11 (R's TD was higher, it must have something)
			// R: Nothing to give
			if d.mode != LightSync {
				head := d.blockchain.CurrentBlock()
				if !gotHeaders && td.Cmp(d.blockchain.GetTd(head.Hash(), head.NumberU64())) > 0 {
					return errStallingPeer
				}
			}
			// If fast or light syncing, ensure promised headers are indeed delivered. This is
			// needed to detect scenarios where an attacker feeds a bad pivot and then bails out
			// of delivering the post-pivot blocks that would flag the invalid content.
			//
			// This check cannot be executed "as is" for full imports, since blocks may still be
			// queued for processing when the header download completes. However, as long as the
			// peer gave us something useful, we're already happy/progressed (above check).
			if d.mode == FastSync || d.mode == LightSync {
				head := d.lightchain.CurrentHeader()
				if td.Cmp(d.lightchain.GetTd(head.Hash(), head.Number.Uint64())) > 0 {
					return errStallingPeer
				}
			}
			// Disable any rollback and return
			rollback = nil
			return nil
		}
		// Otherwise split the chunk of headers into batches and process them
		gotHeaders = true

		for len(headers) > 0 {
			// Terminate if something failed in between processing chunks
			select {
			case <-d.cancelCh:
				return errCancelHeaderProcessing
			default:
			}
			// Select the next chunk of headers to import
			limit := maxHeadersProcess
			if limit > len(headers) {
				limit = len(headers)
			}
			chunk := headers[:limit]

			// In case of header only syncing, validate the chunk immediately
			if d.mode == FastSync || d.mode == LightSync {
				// Collect the yet unknown headers to mark them as uncertain
				unknown := make([]*types.Header, 0, len(headers))
				for _, header := range chunk {
					if !d.lightchain.HasHeader(header.Hash(), header.Number.Uint64()) {
						unknown = append(unknown, header)
					}
				}
				// If we're importing pure headers, verify based on their recentness
				frequency := fsHeaderCheckFrequency
				if chunk[len(chunk)-1].Number.Uint64()+uint64(fsHeaderForceVerify) > pivot {
					frequency = 1
				}
				if n, err := d.lightchain.InsertHeaderChain(chunk, frequency); err != nil {
					// If some headers were inserted, add them too to the rollback list
					if n > 0 {
						rollback = append(rollback, chunk[:n]...)
					}
					log.Debug("Invalid header encountered", "number", chunk[n].Number, "hash", chunk[n].Hash(), "err", err)
					return errInvalidChain
					}
				// All verifications passed, store newly found uncertain headers
				rollback = append(rollback, unknown...)
				if len(rollback) > fsHeaderSafetyNet {
					rollback = append(rollback[:0], rollback[len(rollback)-fsHeaderSafetyNet:]...)
				}
			}
			// Unless we're doing light chains, schedule the headers for associated content retrieval
			if d.mode == FullSync || d.mode == FastSync {
				// If we've reached the allowed number of pending headers, stall a bit
				for d.queue.PendingBlocks() >= maxQueuedHeaders || d.queue.PendingReceipts() >= maxQueuedHeaders {
					select {
					case <-d.cancelCh:
						return errCancelHeaderProcessing
					case <-time.After(time.Second):
					}
				}
				// Otherwise insert the headers for content retrieval
				inserts := d.queue.Schedule(chunk, origin)
				if len(inserts) != len(chunk) {
					log.Debug("Stale headers")
					return errBadPeer
				}
			}
			headers = headers[limit:]
			origin += uint64(limit)
		}
		............................................
		}
	}
}

​ channel headerProcCh通道的另一端在processHeaders()中,processHeaders()從通道中取出一部分headers進行處理。
​ 1、如果是fast或者light sync,每1K個header處理,調用lightchain.InsertHeaderChain()寫入header到leveldb數據庫
​ 2、然後如果當前是fast或者full sync模式後,d.queue.Schedule(chunk, origin)賦值blockTaskPool/blockTaskQueue和receiptTaskPool/receiptTaskQueue(only fast 模式下),供後續同步body和同步receipt使用;

d.fetchbodies
// fetchBodies iteratively downloads the scheduled block bodies, taking any
// available peers, reserving a chunk(塊) of blocks for each, waiting for delivery
// and also periodically checking for timeouts.
func (d *Downloader) fetchBodies(from uint64) error {
	log.Debug("Downloading block bodies", "origin", from)

	var (
		deliver = func(packet dataPack) (int, error) {
			pack := packet.(*bodyPack)
			return d.queue.DeliverBodies(pack.peerID, pack.transactions, pack.uncles)
		}
		expire   = func() map[string]int { return d.queue.ExpireBodies(d.requestTTL()) }
		fetch    = func(p *peerConnection, req *fetchRequest) error { return p.FetchBodies(req) }
		capacity = func(p *peerConnection) int { return p.BlockCapacity(d.requestRTT()) }
		setIdle  = func(p *peerConnection, accepted int) { p.SetBodiesIdle(accepted) }
	)
	err := d.fetchParts(errCancelBodyFetch, d.bodyCh, deliver, d.bodyWakeCh, expire,
		d.queue.PendingBlocks, d.queue.InFlightBlocks, d.queue.ShouldThrottleBlocks, d.queue.ReserveBodies,
		d.bodyFetchHook, fetch, d.queue.CancelBodies, capacity, d.peers.BodyIdlePeers, setIdle, "bodies")

	log.Debug("Block body download terminated", "err", err)
	return err
}

fetchBodies方法中主要是調用了fetchParts()
1、首先ReserveBodies()從bodyTaskPool中取出要同步的body;
2、調用fetch,也就是調用這裏的FetchBodies從節點獲取body,發送GetBlockBodiesMsg消息;
3、對端節點處理完成後發回消息BlockBodiesMsg,寫入channel bodyCh;
4、收到channel bodyCh的數據後,調用deliver函數,將Transactions和Uncles寫入resultCache。

d.fetchReceipts
// fetchReceipts iteratively downloads the scheduled block receipts, taking any
// available peers, reserving a chunk of receipts for each, waiting for delivery
// and also periodically checking for timeouts.
func (d *Downloader) fetchReceipts(from uint64) error {
	log.Debug("Downloading transaction receipts", "origin", from)

	var (
		deliver = func(packet dataPack) (int, error) {
		pack := packet.(*receiptPack)
		return d.queue.DeliverReceipts(pack.peerID, pack.receipts)
	}
	expire   = func() map[string]int { return d.queue.ExpireReceipts(d.requestTTL()) }
	fetch    = func(p *peerConnection, req *fetchRequest) error { return p.FetchReceipts(req) }
	capacity = func(p *peerConnection) int { return p.ReceiptCapacity(d.requestRTT()) }
	setIdle  = func(p *peerConnection, accepted int) { p.SetReceiptsIdle(accepted) }
	)
err := d.fetchParts(errCancelReceiptFetch, d.receiptCh, deliver, d.receiptWakeCh, expire,
d.queue.PendingReceipts, d.queue.InFlightReceipts, d.queue.ShouldThrottleReceipts, d.queue.ReserveReceipts,d.receiptFetchHook, fetch, d.queue.CancelReceipts, capacity, d.peers.ReceiptIdlePeers, setIdle, "receipts")

	log.Debug("Transaction receipt download terminated", "err", err)
	return err
}

fetchReceipts方法與fetchBodies如出一轍,也是調用了fetchParts()
1、首先ReserveBodies()從ReceiptTaskPool中取出要同步的Receipt;
2、調用fetch,也就是調用這裏的FetchReceipts從節點獲取receipts,發送GetReceiptsMsg消息;
3、對端節點處理完成後發回消息ReceiptsMsg,寫入channel receiptCh;
4、收到channel receiptCh的數據後,調用deliver函數,將Receipts寫入resultCache。

func (d *Downloader) processFullSyncContent() error {
	for {
		results := d.queue.Results(true)
		if len(results) == 0 {
			return nil
		}
		if d.chainInsertHook != nil {
			d.chainInsertHook(results)
		}
		if err := d.importBlockResults(results); err != nil {
			return err
		}
	}
}

processFullSyncContent是fullSycn模式下的同步,因爲在fullSync模式下Receipts沒有緩存到resultCache中,所以這一步邏輯很簡單,直接從緩存中取出body數據,然後執行交易生成狀態,最後寫進區塊鏈即可。

func (d *Downloader) processFastSyncContent(latest *types.Header) error {
	// Start syncing state of the reported head block. This should get us most of
	// the state of the pivot block.
	stateSync := d.syncState(latest.Root)
	defer stateSync.Cancel()
	go func() {
		if err := stateSync.Wait(); err != nil && err != errCancelStateFetch {
			d.queue.Close() // wake up Results
		}
	}()
	// Figure out the ideal pivot block. Note, that this goalpost may move if the
	// sync takes long enough for the chain head to move significantly.
	pivot := uint64(0)
	if height := latest.Number.Uint64(); height > uint64(fsMinFullBlocks) {
		pivot = height - uint64(fsMinFullBlocks)
	}
	// To cater for moving pivot points, track the pivot block and subsequently
	// accumulated download results separately.
	var (
		oldPivot *fetchResult   // Locked in pivot block, might change eventually
		oldTail  []*fetchResult // Downloaded content after the pivot
	)
	for {
		// Wait for the next batch of downloaded data to be available, and if the pivot
		// block became stale, move the goalpost
		results := d.queue.Results(oldPivot == nil) // Block if we're not monitoring pivot staleness
		if len(results) == 0 {
			// If pivot sync is done, stop
			if oldPivot == nil {
				return stateSync.Cancel()
			}
			// If sync failed, stop
			select {
			case <-d.cancelCh:
				return stateSync.Cancel()
			default:
			}
		}
		if d.chainInsertHook != nil {
			d.chainInsertHook(results)
		}
		if oldPivot != nil {
			results = append(append([]*fetchResult{oldPivot}, oldTail...), results...)
		}
		// Split around the pivot block and process the two sides via fast/full sync
		if atomic.LoadInt32(&d.committed) == 0 {
			latest = results[len(results)-1].Header
			if height := latest.Number.Uint64(); height > pivot+2*uint64(fsMinFullBlocks) {
				log.Warn("Pivot became stale, moving", "old", pivot, "new", height-uint64(fsMinFullBlocks))
				pivot = height - uint64(fsMinFullBlocks)
			}
		}
		P, beforeP, afterP := splitAroundPivot(pivot, results)
		if err := d.commitFastSyncData(beforeP, stateSync); err != nil {
			return err
		}
		if P != nil {
			// If new pivot block found, cancel old state retrieval and restart
			if oldPivot != P {
				stateSync.Cancel()

				stateSync = d.syncState(P.Header.Root)
				defer stateSync.Cancel()
				go func() {
					if err := stateSync.Wait(); err != nil && err != errCancelStateFetch {
						d.queue.Close() // wake up Results
					}
				}()
				oldPivot = P
			}
			// Wait for completion, occasionally checking for pivot staleness
			select {
			case <-stateSync.done:
				if stateSync.err != nil {
					return stateSync.err
				}
				if err := d.commitPivotBlock(P); err != nil {
					return err
				}
				oldPivot = nil

			case <-time.After(time.Second):
				oldTail = afterP
				continue
			}
		}
		// Fast sync done, pivot commit done, full import
		if err := d.importBlockResults(afterP); err != nil {
			return err
		}
	}
}

processFasrSyncContent是fastSync模式下的同步,由於Receipts、Transactions、Uncles都在resultCache中,邏輯上要下載收據然後還要多一步下載“狀態”並檢驗,然後再寫進區塊鏈:

​ 1、下載最新區塊的狀態d.syncState(lastest.Root);

​ 2、從緩存中拿到去處理的數據results;

​ 3、這隻pivot爲latestHeight - 64,調用splitAroundPivot()方法以pivot爲中心,將results分爲三個部分:beforeP,P,afterP;

​ 4、對beforeP的部分調用commitFastSyncData,將body和receipt都寫入區塊鏈;

​ 5、對P的部分更新狀態信息爲P block的狀態,把P對應的result(包含body和receipt)調用commitPivotBlock插入本地區塊鏈中,並調用FastSyncCommitHead記錄這個pivot的hash值,存在downloader中,標記爲快速同步的最後一個區塊hash值;

​ 6、對afterP調用d.importBlockResults,將body插入區塊鏈,而不插入receipt。因爲是最後64個區塊,所以此時數據庫中只有header和body,沒有receipt和狀態,要通過fullSync模式進行最後的同步。

以上是downloader的主要邏輯部分,還有相當多的細節代碼需要大家自己去細看。

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章