Linux物理內存描述三個層級

Linux物理內存描述三個層級的struct:

pglist_data//描述內存節點

zone//描述節點內的分區,有normal、DMA、highmem

page//描述一頁,通常爲4K大小

各結構體成員的具體含義,詳見下面代碼中的註釋,英文註釋清晰處請直接參考

/*
 * The pg_data_t structure is used in machines with CONFIG_DISCONTIGMEM
 * (mostly NUMA machines?) to denote a higher-level memory zone than the
 * zone denotes.
 *
 * On NUMA machines, each NUMA node would have a pg_data_t to describe
 * it's memory layout.
 *
 * Memory statistics and page replacement data structures are maintained on a
 * per-zone basis.
 */
struct bootmem_data;
typedef struct pglist_data {
        struct zone node_zones[MAX_NR_ZONES];//該節點內的內存區
	struct zonelist node_zonelists[MAX_ZONELISTS];//節點的備用內存區,也就是所有節點的內存區鏈表,當該節點沒有可用的內存時,就從備用內存區分配;事實上,除非分配內存時指定了GFP_THISNODE標誌,否則均從備用內存區內存區分配,選擇的優先順序是Highmem>Normal>DMA
	/*可用內存區數目*/
        int nr_zones;
#ifdef CONFIG_FLAT_NODE_MEM_MAP	/* means !SPARSEMEM */
	struct page *node_mem_map;//本節點第一個頁面的描述符指針
#endif
#ifndef CONFIG_NO_BOOTMEM
	struct bootmem_data *bdata;//內核啓動階段Bootmem分配器用來管理內存的struct,其成員node_bootmem_map是bit map指針,每一個bit描述一個頁是否已經被使用
#endif

	unsigned long node_start_pfn;//該節點內起始頁面的幀號,即該節點在全局mem_map中的index
	unsigned long node_present_pages; /* total number of physical pages */
	unsigned long node_spanned_pages; /* total size of physical page
					     range, including holes */
	int node_id;
	wait_queue_head_t kswapd_wait;//該節點的頁交換守護進程的等待隊列,在節點中的頁需要換出時使用
	struct task_struct *kswapd;//負該節點的頁交換的守護進程
	int kswapd_max_order;//最大可交換的頁數
} pg_data_t;
struct zone {
	/* Fields commonly accessed by the page allocator */

	/* zone watermarks, access with *_wmark_pages(zone) macros */
	unsigned long watermark[NR_WMARK];//管理區的三個水線值:高水線、低水線、MIN水線

#ifdef CONFIG_NUMA
	int node;//該內存區所屬的節點
	/*
	 * zone reclaim becomes active if more unmapped pages exist.
	 */
	unsigned long		min_unmapped_pages;//可回收頁面數超過此值啓動回收
	unsigned long		min_slab_pages;//本管理區中,用於slab的可回收頁面數大於此值時,將回收slab中的緩存頁
#endif
	struct per_cpu_pageset __percpu *pageset;//每個cpu頁面緩存,由單個頁組成的頁鏈表,用於在申請單個頁面時使用,
	//由於是每個cpu有自己的pageset,這樣可以避免使用鎖,避免該頁被其他cpu使用造成緩存失效,避免內存區被分解爲很多小塊,
	//另外per_cpu_pages中有三個成員count,high,batch,分別表示該緩存中的頁數、頁數上限,如果頁數超過了上限,就釋放batch個
	//頁回夥伴系統,如果沒有緩存頁可用分配就從buddy中釋放batch個頁到緩存,關於high、batch兩個值的計算分別由函數zone_batchsize,
	//setup_pageset完成,結論是:當本zone大約512M時,batch=32,high=6*32=192,當本zone小約512M時,batch=present_pages/1024/4,
	//也就是high~=0.15%的本區總內存
	/*
	 * free areas of different sizes
	 */
	spinlock_t		lock;//保護free_area的自旋鎖

#ifdef CONFIG_MEMORY_HOTPLUG
	/* see spanned/present_pages for more description */
	seqlock_t		span_seqlock;//保護spanned/present_pages,不發生熱插拔,這兩個值不會改變,故使用順序所seqlock,兩個變量含義下面介紹
#endif
	struct free_area	free_area[MAX_ORDER];//Buddy管理的11個隊列,每個隊列的節點管理的內存大小爲2^n個頁面,11個隊列,n從1~11

	ZONE_PADDING(_pad1_)//填充字段,確保後面成員緩存行對齊

	/* Fields commonly accessed by the page reclaim scanner */
	//Linux 中的頁面回收是基於 LRU(least recently used,即最近最少使用 ) 算法的。LRU 算法基於這樣一個事實,
	//過去一段時間內頻繁使用的頁面,在不久的將來很可能會被再次訪問到。反過來說,已經很久沒有訪問過的頁面在未來較短的時間內
	//也不會被頻繁訪問到。因此,在物理內存不夠用的情況下,這樣的頁面成爲被換出的最佳候選者。LRU 算法的基本原理很簡單,
	//爲每個物理頁面綁定一個計數器,用以標識該頁面的訪問頻度。
	spinlock_t		lru_lock;	
	struct zone_lru {
		struct list_head list;
	} lru[NR_LRU_LISTS];

	struct zone_reclaim_stat reclaim_stat;

	unsigned long		pages_scanned;	   /* since last reclaim */
	unsigned long		flags;		   /* zone flags, see below */

	/* Zone statistics */
	atomic_long_t		vm_stat[NR_VM_ZONE_STAT_ITEMS];

	/*
	 * prev_priority holds the scanning priority for this zone.  It is
	 * defined as the scanning priority at which we achieved our reclaim
	 * target at the previous try_to_free_pages() or balance_pgdat()
	 * invocation.
	 *
	 * We use prev_priority as a measure of how much stress page reclaim is
	 * under - it drives the swappiness decision: whether to unmap mapped
	 * pages.
	 *
	 * Access to both this field is quite racy even on uniprocessor.  But
	 * it is expected to average out OK.
	 */
	int prev_priority;

	/*
	 * The target ratio of ACTIVE_ANON to INACTIVE_ANON pages on
	 * this zone's LRU.  Maintained by the pageout code.
	 */
	unsigned int inactive_ratio;


	ZONE_PADDING(_pad2_)
	/* Rarely used or read-mostly fields */

	/*
	 * wait_table		-- the array holding the hash table
	 * wait_table_hash_nr_entries	-- the size of the hash table array
	 * wait_table_bits	-- wait_table_size == (1 << wait_table_bits)
	 *
	 * The purpose of all these is to keep track of the people
	 * waiting for a page to become available and make them
	 * runnable again when possible. The trouble is that this
	 * consumes a lot of space, especially when so few things
	 * wait on pages at a given time. So instead of using
	 * per-page waitqueues, we use a waitqueue hash table.
	 *
	 * The bucket discipline is to sleep on the same queue when
	 * colliding and wake all in that wait queue when removing.
	 * When something wakes, it must check to be sure its page is
	 * truly available, a la thundering herd. The cost of a
	 * collision is great, but given the expected load of the
	 * table, they should be so rare as to be outweighed by the
	 * benefits from the saved space.
	 *
	 * __wait_on_page_locked() and unlock_page() in mm/filemap.c, are the
	 * primary users of these fields, and in mm/page_alloc.c
	 * free_area_init_core() performs the initialization of them.
	 */
	wait_queue_head_t	* wait_table;
	unsigned long		wait_table_hash_nr_entries;
	unsigned long		wait_table_bits;

	/*
	 * Discontig memory support fields.
	 */
	struct pglist_data	*zone_pgdat;//該區所在的節點
	/* zone_start_pfn == zone_start_paddr >> PAGE_SHIFT */
	unsigned long		zone_start_pfn;//管理區的第一個頁面在全局mem_map中的偏移

	/*
	 * zone_start_pfn, spanned_pages and present_pages are all
	 * protected by span_seqlock.  It is a seqlock because it has
	 * to be read outside of zone->lock, and it is done in the main
	 * allocator path.  But, it is written quite infrequently.
	 *
	 * The lock is declared along with zone->lock because it is
	 * frequently read in proximity to zone->lock.  It's good to
	 * give them a chance of being in the same cacheline.
	 */
	unsigned long		spanned_pages;	/* total size, including holes */
	unsigned long		present_pages;	/* amount of memory (excluding holes) */

	/*
	 * rarely used fields:
	 */
	const char		*name;
} ____cacheline_internodealigned_in_smp;



/*
 * Each physical page in the system has a struct page associated with
 * it to keep track of whatever it is we are using the page for at the
 * moment. Note that we have no way to track which tasks are using
 * a page, though if it is a pagecache page, rmap structures can tell us
 * who is mapping it.
 */
struct page {
	unsigned long flags;		/* Atomic flags, some possibly
					 * updated asynchronously */
	atomic_t _count;		/* Usage count, see below. */
	union {
		atomic_t _mapcount;	/* Count of ptes mapped in mms,
					 * to show when page is mapped
					 * & limit reverse map searches.
					 */
		struct {		/* SLUB */
			u16 inuse;
			u16 objects;
		};
	};
	union {
	    struct {
		unsigned long private;		/* Mapping-private opaque data:
					 	 * usually used for buffer_heads
						 * if PagePrivate set; used for
						 * swp_entry_t if PageSwapCache;
						 * indicates order in the buddy
						 * system if PG_buddy is set.
						 */
		struct address_space *mapping;	/* If low bit clear, points to
						 * inode address_space, or NULL.
						 * If page mapped as anonymous
						 * memory, low bit is set, and
						 * it points to anon_vma object:
						 * see PAGE_MAPPING_ANON below.
						 */
	    };
#if USE_SPLIT_PTLOCKS
	    spinlock_t ptl;
#endif
	    struct kmem_cache *slab;	/* SLUB: Pointer to slab */
	    struct page *first_page;	/* Compound tail pages */如果該頁在buddy中,並且不是buddy中的第一個頁,那麼該指針指向第一個頁
	};
	union {
		pgoff_t index;		/* Our offset within mapping. */如果該頁是文件映射,那麼表示本頁面在文件中的偏移
		void *freelist;		/* SLUB: freelist req. slab lock */
	};
	struct list_head lru;		/* Pageout list, eg. active_list
					 * protected by zone->lru_lock !
					 */
	/*
	 * On machines where all RAM is mapped into kernel address space,
	 * we can simply calculate the virtual address. On machines with
	 * highmem some memory is mapped into kernel virtual memory
	 * dynamically, so we need a place to store that address.
	 * Note that this field could be 16 bits on x86 ... ;)
	 *
	 * Architectures with slow multiplication can define
	 * WANT_PAGE_VIRTUAL in asm/page.h
	 */
#if defined(WANT_PAGE_VIRTUAL)
	void *virtual;			/* Kernel virtual address (NULL if
					   not kmapped, ie. highmem) */
#endif /* WANT_PAGE_VIRTUAL */
#ifdef CONFIG_WANT_PAGE_DEBUG_FLAGS
	unsigned long debug_flags;	/* Use atomic bitops on this */
#endif

#ifdef CONFIG_KMEMCHECK
	/*
	 * kmemcheck wants to track the status of each byte in a page; this
	 * is a pointer to such a status block. NULL if not tracked.
	 */
	void *shadow;
#endif
};


發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章