Linux Slab分配器(六)--創建slab和銷燬slab

水平有限,描述不當之處還請之處,轉載請註明出處http://blog.csdn.net/vanbreaker/article/details/7673372

在滿足以下兩個條件時,slab分配器將爲高速緩存創建新的slab

1.請求分配對象,但本地高速緩存沒有空閒對象可以分配,需要填充

2.kmem_list3維護的鏈表中沒有slab或者所有的slab都處於FULL鏈表中

這時,調用cache_grow()創建slab增大緩存容量

 

下圖給出了cache_grow()的代碼流程

static int cache_grow(struct kmem_cache *cachep,
		gfp_t flags, int nodeid, void *objp)
{
	struct slab *slabp;
	size_t offset;
	gfp_t local_flags;
	struct kmem_list3 *l3;

	/*
	 * Be lazy and only check for valid flags here,  keeping it out of the
	 * critical path in kmem_cache_alloc().
	 */
	BUG_ON(flags & GFP_SLAB_BUG_MASK);
	local_flags = flags & (GFP_CONSTRAINT_MASK|GFP_RECLAIM_MASK);

	/* Take the l3 list lock to change the colour_next on this node */
	check_irq_off();
	l3 = cachep->nodelists[nodeid];
	spin_lock(&l3->list_lock);

	/* Get colour for the slab, and cal the next value. */
	/*確定待創建的slab的顏色編號*/
	offset = l3->colour_next;
	/*更新下一個slab的顏色編號*/
	l3->colour_next++;
	/*顏色編號必須小於顏色數*/
	if (l3->colour_next >= cachep->colour)
		l3->colour_next = 0;
	spin_unlock(&l3->list_lock);

	/*確定待創建的slab的顏色*/
	offset *= cachep->colour_off;

	if (local_flags & __GFP_WAIT)
		local_irq_enable();

	/*
	 * The test for missing atomic flag is performed here, rather than
	 * the more obvious place, simply to reduce the critical path length
	 * in kmem_cache_alloc(). If a caller is seriously mis-behaving they
	 * will eventually be caught here (where it matters).
	 */
	kmem_flagcheck(cachep, flags);

	/*
	 * Get mem for the objs.  Attempt to allocate a physical page from
	 * 'nodeid'.
	 */
	if (!objp)
		/*從夥伴系統分配頁框,這是slab分配器與夥伴系統的接口*/
		objp = kmem_getpages(cachep, local_flags, nodeid);
	if (!objp)
		goto failed;

	/* Get slab management. */
	/*分配slab管理區*/
	slabp = alloc_slabmgmt(cachep, objp, offset,
			local_flags & ~GFP_CONSTRAINT_MASK, nodeid);
	if (!slabp)
		goto opps1;

	/*建立頁面到slab和cache的映射,以便於根據obj迅速定位slab描述符和cache描述符*/
	slab_map_pages(cachep, slabp, objp);

	/*初始化對象*/
	cache_init_objs(cachep, slabp);

	if (local_flags & __GFP_WAIT)
		local_irq_disable();
	check_irq_off();
	spin_lock(&l3->list_lock);

	/* Make slab active. */
	/*將新創建的slab添加到free鏈表*/
	list_add_tail(&slabp->list, &(l3->slabs_free));
	STATS_INC_GROWN(cachep);
	l3->free_objects += cachep->num;
	spin_unlock(&l3->list_lock);
	return 1;
opps1:
	kmem_freepages(cachep, objp);
failed:
	if (local_flags & __GFP_WAIT)
		local_irq_disable();
	return 0;
}

 

輔助函數:

爲slab分配頁框

static void *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, int nodeid)
{
	struct page *page;
	int nr_pages;
	int i;

#ifndef CONFIG_MMU
	/*
	 * Nommu uses slab's for process anonymous memory allocations, and thus
	 * requires __GFP_COMP to properly refcount higher order allocations
	 */
	flags |= __GFP_COMP;
#endif

	flags |= cachep->gfpflags;
	if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
		flags |= __GFP_RECLAIMABLE;

	/*從特定的節點分配2^gfporder個連續頁*/
	page = alloc_pages_exact_node(nodeid, flags | __GFP_NOTRACK, cachep->gfporder);
	if (!page)
		return NULL;

	nr_pages = (1 << cachep->gfporder);
	if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
		add_zone_page_state(page_zone(page),
			NR_SLAB_RECLAIMABLE, nr_pages);
	else
		add_zone_page_state(page_zone(page),
			NR_SLAB_UNRECLAIMABLE, nr_pages);
	for (i = 0; i < nr_pages; i++)
		__SetPageSlab(page + i);

	if (kmemcheck_enabled && !(cachep->flags & SLAB_NOTRACK)) {
		kmemcheck_alloc_shadow(page, cachep->gfporder, flags, nodeid);

		if (cachep->ctor)
			kmemcheck_mark_uninitialized_pages(page, nr_pages);
		else
			kmemcheck_mark_unallocated_pages(page, nr_pages);
	}

	/*返回首頁的虛擬地址*/
	return page_address(page);
}



爲slab管理區分配空間:

static struct slab *alloc_slabmgmt(struct kmem_cache *cachep, void *objp,
				   int colour_off, gfp_t local_flags,
				   int nodeid)
{
	struct slab *slabp;

	/*如果slab管理區位於slab外,則在指定的slabp_cache中分配空間*/
	if (OFF_SLAB(cachep)) {
		/* Slab management obj is off-slab. */
		slabp = kmem_cache_alloc_node(cachep->slabp_cache,
					      local_flags, nodeid);
		/*
		 * If the first object in the slab is leaked (it's allocated
		 * but no one has a reference to it), we want to make sure
		 * kmemleak does not treat the ->s_mem pointer as a reference
		 * to the object. Otherwise we will not report the leak.
		 */
		kmemleak_scan_area(slabp, offsetof(struct slab, list),
				   sizeof(struct list_head), local_flags);
		if (!slabp)
			return NULL;
	} else {/*slab管理區處於slab中*/
		/*slab管理區從slab首部偏移顏色值的地方開始*/
		slabp = objp + colour_off;
		colour_off += cachep->slab_size;
	}
	slabp->inuse = 0;/*對象全爲空閒*/
	slabp->colouroff = colour_off;   /*刷新第一個對象的偏移*/
	slabp->s_mem = objp + colour_off;/*確定第一個對象的位置*/
	slabp->nodeid = nodeid;/*標識節點*/
	slabp->free = 0; /*下一個空閒對象位於s_mem起始處*/
	return slabp;
}


 

利用頁描述結構的lru域建立頁框到slab描述符和cache描述符的映射,實際就是使lru.next指向cache描述符,lru.prev指向slab描述符

static void slab_map_pages(struct kmem_cache *cache, struct slab *slab,
			   void *addr)
{
	int nr_pages;
	struct page *page;

	page = virt_to_page(addr);

	nr_pages = 1;
	if (likely(!PageCompound(page)))
		nr_pages <<= cache->gfporder;/*分配給slab的頁框數*/

	do {
		page_set_cache(page, cache);/*建立到cache的映射*/
		page_set_slab(page, slab);  /*建立到slab的映射*/
		page++;
	} while (--nr_pages);
}

 

static inline void page_set_cache(struct page *page, struct kmem_cache *cache)
{
	page->lru.next = (struct list_head *)cache;
}


 

static inline void page_set_slab(struct page *page, struct slab *slab)
{
	page->lru.prev = (struct list_head *)slab;
}

 

初始化對象

static void cache_init_objs(struct kmem_cache *cachep,
			    struct slab *slabp)
{
	int i;

	for (i = 0; i < cachep->num; i++) {
		/*得到第i個對象*/
		void *objp = index_to_obj(cachep, slabp, i);
#if DEBUG /*Debug相關操作*/
		/* need to poison the objs? */
		if (cachep->flags & SLAB_POISON)
			poison_obj(cachep, objp, POISON_FREE);
		if (cachep->flags & SLAB_STORE_USER)
			*dbg_userword(cachep, objp) = NULL;

		if (cachep->flags & SLAB_RED_ZONE) {
			*dbg_redzone1(cachep, objp) = RED_INACTIVE;
			*dbg_redzone2(cachep, objp) = RED_INACTIVE;
		}
		/*
		 * Constructors are not allowed to allocate memory from the same
		 * cache which they are a constructor for.  Otherwise, deadlock.
		 * They must also be threaded.
		 */
		if (cachep->ctor && !(cachep->flags & SLAB_POISON))
			cachep->ctor(objp + obj_offset(cachep));

		if (cachep->flags & SLAB_RED_ZONE) {
			if (*dbg_redzone2(cachep, objp) != RED_INACTIVE)
				slab_error(cachep, "constructor overwrote the"
					   " end of an object");
			if (*dbg_redzone1(cachep, objp) != RED_INACTIVE)
				slab_error(cachep, "constructor overwrote the"
					   " start of an object");
		}
		if ((cachep->buffer_size % PAGE_SIZE) == 0 &&
			    OFF_SLAB(cachep) && cachep->flags & SLAB_POISON)
			kernel_map_pages(virt_to_page(objp),
					 cachep->buffer_size / PAGE_SIZE, 0);
#else
		if (cachep->ctor)/*根據構造函數初始化對象*/
			cachep->ctor(objp);
#endif
		slab_bufctl(slabp)[i] = i + 1;/*確定下一個空閒對象爲後面相鄰的對象*/
	}
	slab_bufctl(slabp)[i - 1] = BUFCTL_END;
}


 

 銷燬slab就是釋放slab管理區和對象佔用的空間

static void slab_destroy(struct kmem_cache *cachep, struct slab *slabp)
{   
	/*用第一個對象的地址減去着色偏移量得到slab的起始地址*/
	void *addr = slabp->s_mem - slabp->colouroff;

	slab_destroy_debugcheck(cachep, slabp);

	/*如果選擇了RCU方式來銷燬slab,則通過RCU進行銷燬,這個表示還不太明白*/
	if (unlikely(cachep->flags & SLAB_DESTROY_BY_RCU)) {
		struct slab_rcu *slab_rcu;

		slab_rcu = (struct slab_rcu *)slabp;
		slab_rcu->cachep = cachep;
		slab_rcu->addr = addr;
		call_rcu(&slab_rcu->head, kmem_rcu_free);
	} else {
		/*將slab佔用的頁框釋放回夥伴系統*/
		kmem_freepages(cachep, addr);
		/*如果slab的管理區位於外部,則需要從對應的緩存中釋放管理區對象*/
		if (OFF_SLAB(cachep))
			kmem_cache_free(cachep->slabp_cache, slabp);
	}
}


 

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章