ptmalloc堆數據結構

概述

glibc-2.3.x. 之後,glibc 中集成了ptmalloc2

 

可以下載glibc源碼查看ptmalloc

http://ftp.gnu.org/gnu/glibc/

 

查看glibc版本

millionsky@ubuntu-16:~/tmp$ ldd --version

ldd (Ubuntu GLIBC 2.23-0ubuntu9) 2.23

 

這裏主要參考:

https://ctf-wiki.github.io/ctf-wiki/pwn/heap

 

本文參考的glibc源碼是glibc-2.25.tar.xz

ptmalloc堆數據結構

2.1 Chunk

2.1.1 Malloc_chunk

struct malloc_chunk {

 

INTERNAL_SIZE_T mchunk_prev_size; /* Size of previous chunk (if free). */

INTERNAL_SIZE_T mchunk_size; /* Size in bytes, including overhead. */

 

struct malloc_chunk *fd; /* double links -- used only if free. */

struct malloc_chunk *bk;

 

/* Only used for large blocks: pointer to next larger size. */

struct malloc_chunk *fd_nextsize; /* double links -- used only if free. */

struct malloc_chunk *bk_nextsize;

};

2.1.2 尺寸定義

#ifndef INTERNAL_SIZE_T

# define INTERNAL_SIZE_T size_t

#endif

 

/* The corresponding word size. */

#define SIZE_SZ (sizeof (INTERNAL_SIZE_T))

 

l INTERNAL_SIZE_Tsize_t32/64位整數

l SIZE_SZ32/64

2.1.3 Chunk的對齊

/* MALLOC_ALIGNMENT is the minimum alignment for malloc'ed chunks. It

must be a power of two at least 2 * SIZE_SZ, even on machines for

which smaller alignments would suffice. It may be defined as larger

than this though. Note however that code and data structures are

optimized for the case of 8-byte alignment. */

#ifndef MALLOC_ALIGNMENT

# define MALLOC_ALIGNMENT (2 * SIZE_SZ < __alignof__ (long double) \

? __alignof__ (long double) : 2 * SIZE_SZ)

#endif

 

/* The corresponding bit mask value. */

#define MALLOC_ALIGN_MASK (MALLOC_ALIGNMENT - 1)

 

l MALLOC_ALIGNMENT:對齊字節,8/16

注意:如果64位上size_t4字節,則對齊爲8

l MALLOC_ALIGN_MASK:對齊掩碼,0x7/0x0f

 

l 檢查分配給用戶的內存是否對齊

aligned_OK(m)判斷m是否對齊;

misaligned_chunk(p)p轉換爲對齊後的地址(向下取整);

/* Check if m has acceptable alignment */

 

#define aligned_OK(m) (((unsigned long)(m) & MALLOC_ALIGN_MASK) == 0)

 

#define misaligned_chunk(p) \

((uintptr_t)(MALLOC_ALIGNMENT == 2 * SIZE_SZ ? (p) : chunk2mem (p)) \

& MALLOC_ALIGN_MASK)

2.1.4 最小chunk

l Chunk的最小尺寸:16/32

最小的chunk包含前面4個字段;

/* The smallest possible chunk */

#define MIN_CHUNK_SIZE (offsetof(struct malloc_chunk, fd_nextsize))

 

注意:如果64位上size_t4字節,則最小的chunk24字節

 

最小malloc size

最小的malloc size是對齊後的最小chunk,大小爲16/32

 

注意:如果64位上size_t4字節,則MINSIZE32。此時MIN_CHUNK_SIZE24)和MINSIZE不一致。

 

/* The smallest size we can malloc is an aligned minimal chunk */

 

#define MINSIZE \

(unsigned long)(((MIN_CHUNK_SIZE+MALLOC_ALIGN_MASK) & ~MALLOC_ALIGN_MASK))

2.1.5 Malloc header<-->user pointer

Allocated chunk的前兩個字段稱爲 chunk header/malloc header,後面的部分稱爲user data。轉換隻要移動chunk header即可,即8/16字節;

 

/* conversion from malloc headers to user pointers, and back */

 

#define chunk2mem(p) ((void*)((char*)(p) + 2*SIZE_SZ))

#define mem2chunk(mem) ((mchunkptr)((char*)(mem) - 2*SIZE_SZ))

 

2.1.6 用戶大小檢查與轉換

l REQUEST_OUT_OF_RANGE:判斷請求字節是否超出範圍

MINSIZE16/32

則最大的請求大小不能超過0xFFFF FFE0/0xFFFF FFFF FFFF FFC0

這裏爲了簡化某些代碼,邊界制定得比較低,即時加上MINSIZE,也不好迴繞0

/*

Check if a request is so large that it would wrap around zero when

padded and aligned. To simplify some other code, the bound is made

low enough so that adding MINSIZE will also not wrap around zero.

*/

 

#define REQUEST_OUT_OF_RANGE(req) \

((unsigned long) (req) >=            \

(unsigned long) (INTERNAL_SIZE_T) (-2 * MINSIZE))

l request2size

Request2size:將用戶請求的大小轉換爲對齊後的chunk大小

MALLOC_ALIGNMENT至少是2*SIZE_SZ,這裏會補齊chunk header然後對齊。

 

/* pad request bytes into a usable size -- internal version */

 

#define request2size(req) \

(((req) + SIZE_SZ + MALLOC_ALIGN_MASK < MINSIZE) ? \

MINSIZE : \

((req) + SIZE_SZ + MALLOC_ALIGN_MASK) & ~MALLOC_ALIGN_MASK)

 

/* Same, except also perform argument check */

 

#define checked_request2size(req, sz) \

if (REQUEST_OUT_OF_RANGE (req)) {          \

__set_errno (ENOMEM);            \

return 0;                \

}                  \

(sz) = request2size (req);

 

l checked_request2size:先檢查範圍再進行大小轉換

#define checked_request2size(req, sz) \

if (REQUEST_OUT_OF_RANGE (req)) {          \

__set_errno (ENOMEM);            \

return 0;                \

}                  \

(sz) = request2size (req);

2.1.7 標記相關

P標記:

/* size field is or'ed with PREV_INUSE when previous adjacent chunk in use */

#define PREV_INUSE 0x1

 

/* extract inuse bit of previous chunk */

#define prev_inuse(p) ((p)->mchunk_size & PREV_INUSE)

 

M標記:

/* size field is or'ed with IS_MMAPPED if the chunk was obtained with mmap() */

#define IS_MMAPPED 0x2

 

/* check for mmap()'ed chunk */

#define chunk_is_mmapped(p) ((p)->mchunk_size & IS_MMAPPED)

 

A標記

/* size field is or'ed with NON_MAIN_ARENA if the chunk was obtained

from a non-main arena. This is only set immediately before handing

the chunk to the user, if necessary. */

#define NON_MAIN_ARENA 0x4

 

/* Check for chunk from main arena. */

#define chunk_main_arena(p) (((p)->mchunk_size & NON_MAIN_ARENA) == 0)

 

/* Mark a chunk as not being on the main arena. */

#define set_non_main_arena(p) ((p)->mchunk_size |= NON_MAIN_ARENA)

 

SIZE_BITS:解析大小時需要清除的掩碼

/*

Bits to mask off when extracting size

 

Note: IS_MMAPPED is intentionally not masked off from size field in

macros for which mmapped chunks should never be seen. This should

cause helpful core dumps to occur if it is tried by accident by

people extending or adapting this malloc.

*/

#define SIZE_BITS (PREV_INUSE | IS_MMAPPED | NON_MAIN_ARENA)

 

下一個chunkP標記的獲取/設置/清除

/* extract p's inuse bit */

#define inuse(p)               \

((((mchunkptr) (((char *) (p)) + chunksize (p)))->mchunk_size) & PREV_INUSE)

 

/* set/clear chunk as being inuse without otherwise disturbing */

#define set_inuse(p)               \

((mchunkptr) (((char *) (p)) + chunksize (p)))->mchunk_size |= PREV_INUSE

 

#define clear_inuse(p)               \

((mchunkptr) (((char *) (p)) + chunksize (p)))->mchunk_size &= ~(PREV_INUSE)

 

指定偏移處chunkP標記的獲取/設置/清除

/* check/set/clear inuse bits in known places */

#define inuse_bit_at_offset(p, s)          \

(((mchunkptr) (((char *) (p)) + (s)))->mchunk_size & PREV_INUSE)

 

#define set_inuse_bit_at_offset(p, s)          \

(((mchunkptr) (((char *) (p)) + (s)))->mchunk_size |= PREV_INUSE)

 

#define clear_inuse_bit_at_offset(p, s)          \

(((mchunkptr) (((char *) (p)) + (s)))->mchunk_size &= ~(PREV_INUSE))

2.1.8 chunk size相關

獲取chunk size

/* Get size, ignoring use bits */

#define chunksize(p) (chunksize_nomask (p) & ~(SIZE_BITS))

 

/* Like chunksize, but do not mask SIZE_BITS. */

#define chunksize_nomask(p) ((p)->mchunk_size)

 

設置chunk size

/* Set size at head, without disturbing its use bit */

#define set_head_size(p, s) ((p)->mchunk_size = (((p)->mchunk_size & SIZE_BITS) | (s)))

 

/* Set size/use field */

#define set_head(p, s) ((p)->mchunk_size = (s))

/* Set size at footer (only when chunk is not in use) */

#define set_foot(p, s) (((mchunkptr) ((char *) (p) + (s)))->mchunk_prev_size = (s))

2.1.9 獲取下一個物理chunk

/* Ptr to next physical malloc_chunk. */

#define next_chunk(p) ((mchunkptr) (((char *) (p)) + chunksize (p)))

2.1.10 獲取前一個chunk的信息

/* Size of the chunk below P. Only valid if prev_inuse (P). */

#define prev_size(p) ((p)->mchunk_prev_size)

 

/* Set the size of the chunk below P. Only valid if prev_inuse (P). */

#define set_prev_size(p, sz) ((p)->mchunk_prev_size = (sz))

 

/* Ptr to previous physical malloc_chunk. Only valid if prev_inuse (P). */

#define prev_chunk(p) ((mchunkptr) (((char *) (p)) - prev_size (p)))

2.1.11 獲取指定偏移的chunk

/* Treat space at ptr + offset as a chunk */

#define chunk_at_offset(p, s) ((mchunkptr) (((char *) (p)) + (s)))

2.1.12 Top chunk

初始情況下,我們可以將 unsorted chunk 作爲 top chunk

 

/* Conveniently, the unsorted bin can be used as dummy top on first call */

#define initial_top(M) (unsorted_chunks (M))

2.2 Bins

2.2.1 Bins數組

#define NBINS 128

typedef struct malloc_chunk* mchunkptr;

 

struct malloc_state

{

/* Normal bins packed as described above */

mchunkptr bins[NBINS * 2 - 2];

2.2.2 Bin定位

l Bin_at(m, i):指定bin索引,獲取bins數組中對應的bin header的地址

bin索引 Bins數組索引

0  不存在

1  2*(-1)

2  2*(0)

3  2*(1)

126  2*(124)

127 2*(125)

l Next_bin(b):獲取指定bin header的下一個bin header

 

typedef struct malloc_chunk *mbinptr;

 

/* addressing -- note that bin_at(0) does not exist */

#define bin_at(m, i) \

(mbinptr) (((char *) &((m)->bins[((i) - 1) * 2]))      \

- offsetof (struct malloc_chunk, fd))

 

/* analog of ++bin */

#define next_bin(b) ((mbinptr) ((char *) (b) + (sizeof (mchunkptr) << 1)))

 

/* Reminders about list directionality within bins */

#define first(b) ((b)->fd)

#define last(b) ((b)->bk)

 

l bin_index(sz)

#define bin_index(sz) \

((in_smallbin_range (sz)) ? smallbin_index (sz) : largebin_index (sz))

2.3 Fast bin

2.3.1 fastbinsY

l MAX_FAST_SIZE80/160字節,用戶請求的最大的fastbin

request2size處理(加上chunk_header並對齊)後,大小爲88/176字節;

 

l fastbin_index(sz):輸入chunk size,獲取fast bin index

Chunk sizefast bin index的對應:

Fast bin Index

Chunk size(32)

Chunk size(64)

0

16

32

1

24

48

2

32

64

3

40

80

4

48

96

5

56

112

6

64

128

7

72

144

8

80

160

9

88

176

 

l NFASTBINS10

 

/* The maximum fastbin request size we support */

#define MAX_FAST_SIZE (80 * SIZE_SZ / 4)

 

/* offset 2 to use otherwise unindexable first 2 bins */

#define fastbin_index(sz) \

((((unsigned int) (sz)) >> (SIZE_SZ == 8 ? 4 : 3)) - 2)

 

#define NFASTBINS (fastbin_index (request2size (MAX_FAST_SIZE)) + 1)

 

typedef struct malloc_chunk *mfastbinptr;

#define fastbin(ar_ptr, idx) ((ar_ptr)->fastbinsY[idx])

struct malloc_state

{

/* Fastbins */

mfastbinptr fastbinsY[NFASTBINS];

}

2.3.2 DEFAULT_MXFAST

l 64/128,默認的最大fast chunk用戶數據大小

 

#ifndef DEFAULT_MXFAST

#define DEFAULT_MXFAST (64 * SIZE_SZ / 4)

#endif

 

l MAX_FAST_SIZE80/160,系統支持的最大fast chunk size(用戶數據)

#define MAX_FAST_SIZE (80 * SIZE_SZ / 4)

 

l Malloc_init_state會初始化最大的fastbin(用戶數據)設置爲64/128

/*

Set value of max_fast.

Use impossibly small value if 0.

Precondition: there are no existing fastbin chunks.

Setting the value clears fastchunk bit but preserves noncontiguous bit.

*/

 

#define set_max_fast(s) \

global_max_fast = (((s) == 0)            \

? SMALLBIN_WIDTH : ((s + SIZE_SZ) & ~MALLOC_ALIGN_MASK))

#define get_max_fast() global_max_fast

 

static void

malloc_init_state (mstate av)

{

if (av == &main_arena)

set_max_fast (DEFAULT_MXFAST);

av->flags |= FASTCHUNKS_BIT;

//......

}

2.3.3 FASTCHUNKS_BIT

FASTCHUNKS_BIT指示可能有fastbin chunks

將一個chunk放入fastbin時,此標記被設置;

只在malloc_consolidate中被清除;

0表示真值,這樣啓動後have_fastchunks就爲真,簡化了初始化檢查;

 

Fast chunk bit 設置/清除/設置

#define FASTCHUNKS_BIT (1U)

 

#define have_fastchunks(M) (((M)->flags & FASTCHUNKS_BIT) == 0)

#define clear_fastchunks(M) catomic_or (&(M)->flags, FASTCHUNKS_BIT)

#define set_fastchunks(M) catomic_and (&(M)->flags, ~FASTCHUNKS_BIT)

 

struct malloc_state

{

/* Flags (formerly in max_fast). */

int flags;

}

 

2.3.4 NONCONTIGUOUS_BIT

NONCONTIGUOUS_BIT指示MORECORE沒有返回連續的區域;

初始值來自MORECORE_CONTIGUOUS,如果使用mmap替換sbrk,則會被動態改變;

 

// 主分配區中的MORECORE其實爲sbr(),默認返回連續虛擬地址空間

// 非主分配區使用mmap()分配大塊虛擬內存,然後進行切分來模擬主分配區的行爲

// 而默認情況下mmap映射區域是不保證虛擬地址空間連續的,所以非主分配區默認分配非連續虛擬地址空間。

 

取值/設置/清除

#define NONCONTIGUOUS_BIT (2U)

 

#define contiguous(M) (((M)->flags & NONCONTIGUOUS_BIT) == 0)

#define noncontiguous(M) (((M)->flags & NONCONTIGUOUS_BIT) != 0)

#define set_noncontiguous(M) ((M)->flags |= NONCONTIGUOUS_BIT)

#define set_contiguous(M) ((M)->flags &= ~NONCONTIGUOUS_BIT)

 

初始值

#ifndef MORECORE_CONTIGUOUS

#define MORECORE_CONTIGUOUS 1

#endif

 

MORECORE

MORECORE是從系統獲取內存的函數的名稱,默認爲sbrk

/* Definition for getting more memory from the OS. */

#define MORECORE (*__morecore)

#define MORECORE_FAILURE 0

void * __default_morecore (ptrdiff_t);

void *(*__morecore)(ptrdiff_t) = __default_morecore;

 

 

/* Allocate INCREMENT more bytes of data space,

and return the start of data space, or NULL on errors.

If INCREMENT is negative, shrink data space. */

void *

__default_morecore (ptrdiff_t increment)

{

void *result = (void *) __sbrk (increment);

if (result == (void *) -1)

return NULL;

 

return result;

}

 

2.3.5 ARENA_CORRUPTION_BIT

ARENA_CORRUPTION_BIT指示內存崩潰在arena上被檢測到;

這樣的arena不再用來分配chunks

檢測到崩潰之前在此arena上分配的內存不會被釋放;

 

檢測/設置

/* ARENA_CORRUPTION_BIT is set if a memory corruption was detected on the

arena. Such an arena is no longer used to allocate chunks. Chunks

allocated in that arena before detecting corruption are not freed. */

 

#define ARENA_CORRUPTION_BIT (4U)

#define arena_is_corrupt(A) (((A)->flags & ARENA_CORRUPTION_BIT))

#define set_arena_corrupt(A)  ((A)->flags |= ARENA_CORRUPTION_BIT)

 

2.3.6 FASTBIN_CONSOLIDATION_THRESHOLD

當釋放的 chunk 與該 chunk 相鄰的空閒 chunk 合併後的大小大於FASTBIN_CONSOLIDATION_THRESHOLD時,內存碎片可能比較多了,我們就需要把 fast bins 中的chunk都進行合併,以減少內存碎片對系統的影響。

/*

FASTBIN_CONSOLIDATION_THRESHOLD is the size of a chunk in free()

that triggers automatic consolidation of possibly-surrounding

fastbin chunks. This is a heuristic, so the exact value should not

matter too much. It is defined at half the default trim threshold as a

compromise heuristic to only attempt consolidation if it is likely

to lead to trimming. However, it is not dynamically tunable, since

consolidation reduces fragmentation surrounding large chunks even

if trimming is not used.

*/

 

#define FASTBIN_CONSOLIDATION_THRESHOLD (65536UL)

2.4 Small bin

SMALLBIN_WIDTH8/16

SMALLBIN_CORRECTION:是否需要對small bin的下標進行糾正;

MIN_LARGE_SIZE:最小的large chunk的大小,剛好大於small chunk的最大尺寸的對齊尺寸;

Small chunk的最大尺寸爲bin63的尺寸504/1008

MIN_LARGE_SIZE64*8/64*16,即512/1024

in_smallbin_range(sz):判斷chunk的大小是否在small bin範圍內

smallbin_index(sz) :根據chunk的大小得到small bin對應的索引

Small bin index

Chunk size(32)

Chunk size(64)

2

16

32

3

24

48

4

30

64

......

 

 

63

504

1008

 

ine NSMALLBINS 64

#define SMALLBIN_WIDTH MALLOC_ALIGNMENT

#define SMALLBIN_CORRECTION (MALLOC_ALIGNMENT > 2 * SIZE_SZ)

#define MIN_LARGE_SIZE ((NSMALLBINS - SMALLBIN_CORRECTION) * SMALLBIN_WIDTH)

 

#define in_smallbin_range(sz) \

((unsigned long) (sz) < (unsigned long) MIN_LARGE_SIZE)

 

#define smallbin_index(sz) \

((SMALLBIN_WIDTH == 16 ? (((unsigned) (sz)) >> 4) : (((unsigned) (sz)) >> 3))\

+ SMALLBIN_CORRECTION)

 

2.5 Large bin

large bins 中一共包括 63 bin,每個 bin 中的 chunk 的大小不一致,而是處於一定區間範圍內。此外,這 63 bin 被分成了 6 組,每組 bin 中的 chunk 大小的間隔是一樣的;

 

Larger bin近似對數分佈,bin_index中的差異是由於速度上的考慮;

64 bins of size 8                   //64個bin 大小間隔爲8,small bin

32 bins of size 64                  //large bin,如下

16 bins of size 512

8 bins of size 4096

4 bins of size 32768

2 bins of size 262144

1 bin of size what's left

 

Bin的索引和chunk size間隔(32位)

Bin count

Bin index

Chunk size

31

64

...

94

[512, +64)

...

[512+64*30, +64) [2432, 2496)

17

95

...

111

[2496, 2560)

...

[10240, +512)

9

112

...

120

[10752, 12288)

...+4096

[40960, +4096) [40960,45056)

3

120

121

122

123

[45056, 65536)

...+32768

 

[131072, +32768)

2

124

125

126

[163840, 262144)

[262144,+262144)

[524288,+262144)

1

126

[786432, *)

 

largebin_index_32:輸入大小,輸出large bin index

#define largebin_index_32(sz) \

(((((unsigned long) (sz)) >> 6) <= 38) ? 56 + (((unsigned long) (sz)) >> 6) :\

((((unsigned long) (sz)) >> 9) <= 20) ? 91 + (((unsigned long) (sz)) >> 9) :\

((((unsigned long) (sz)) >> 12) <= 10) ? 110 + (((unsigned long) (sz)) >> 12) :\

((((unsigned long) (sz)) >> 15) <= 4) ? 119 + (((unsigned long) (sz)) >> 15) :\

((((unsigned long) (sz)) >> 18) <= 2) ? 124 + (((unsigned long) (sz)) >> 18) :\

126)

 

largebin_index

#define largebin_index(sz) \

(SIZE_SZ == 8 ? largebin_index_64 (sz) \

: MALLOC_ALIGNMENT == 16 ? largebin_index_32_big (sz) \

: largebin_index_32 (sz))

2.6 unsorted bin

unsorted bin 可以視爲空閒 chunk 迴歸其所屬 bin 之前的緩衝區。

 

/* The otherwise unindexable 1-bin is used to hold unsorted chunks. */

#define unsorted_chunks(M) (bin_at (M, 1))

2.7 Binmap

Binmap的每一位標記對應的bin是否爲空:

0 - bin爲空

1 - bin非空

 

爲了幫助補償數目龐大的bin,一個索引結構用於bin-by-bin的搜索;

binmap是一個位向量,記錄了哪些bin是空的,這樣在遍歷的時候可以跳過這些bin

binempty的時候,對應的位不會馬上被清除,只有在malloc進行遍歷的時候,纔會被清除;

 

l BINMAPSHIFT:生成map word長度所需的比特數目;這裏爲5

l BITSPERMAP:每個map word32比特;

l BINMAPSIZE4map word的數目,總共128bin,需要128個位;每個map word 32比特,共需要4map word

l idx2block(i):將bin的索引轉換爲binmapblock索引;

因爲一個map word對應32bin,所以bin索引除以32後得到binmap索引;

l idx2bit(i):將bin索引轉換爲map word中的位索引;

首先將索引對32取模,然後移位獲取對應的掩碼

l mark_bin(m, i) :設置索引爲ibinbinmap標記

l unmark_bin(m, i):清除索引爲ibinbinmap標記

l get_binmap(m, i):獲取索引爲ibinbinmap標記

 

struct malloc_state

{

/* Bitmap of bins */

unsigned int binmap[BINMAPSIZE];

}

/*

Binmap

 

To help compensate for the large number of bins, a one-level index

structure is used for bin-by-bin searching. `binmap' is a

bitvector recording whether bins are definitely empty so they can

be skipped over during during traversals. The bits are NOT always

cleared as soon as bins are empty, but instead only

when they are noticed to be empty during traversal in malloc.

*/

 

/* Conservatively use 32 bits per map word, even if on 64bit system */

#define BINMAPSHIFT 5

#define BITSPERMAP (1U << BINMAPSHIFT)

#define BINMAPSIZE (NBINS / BITSPERMAP)

 

#define idx2block(i) ((i) >> BINMAPSHIFT)

#define idx2bit(i) ((1U << ((i) & ((1U << BINMAPSHIFT) - 1))))

 

#define mark_bin(m, i) ((m)->binmap[idx2block (i)] |= idx2bit (i))

#define unmark_bin(m, i) ((m)->binmap[idx2block (i)] &= ~(idx2bit (i)))

#define get_binmap(m, i) ((m)->binmap[idx2block (i)] & idx2bit (i))

多線程數據結構

3.1 Heap size

l DEFAULT_MMAP_THRESHOLD_MIN:mmap最小閾值,128K

l DEFAULT_MMAP_THRESHOLD_MAX:mmap最大閾值,512K/16M

l DEFAULT_MMAP_THRESHOLD:128K

#ifndef DEFAULT_MMAP_THRESHOLD_MIN

#define DEFAULT_MMAP_THRESHOLD_MIN (128 * 1024)

#endif

 

#ifndef DEFAULT_MMAP_THRESHOLD_MAX

/* For 32-bit platforms we cannot increase the maximum mmap

threshold much because it is also the minimum value for the

maximum heap size and its alignment. Going above 512k (i.e., 1M

for new heaps) wastes too much address space. */

# if __WORDSIZE == 32

# define DEFAULT_MMAP_THRESHOLD_MAX (512 * 1024)

# else

# define DEFAULT_MMAP_THRESHOLD_MAX (4 * 1024 * 1024 * sizeof(long))

# endif

#endif

 

#ifndef DEFAULT_MMAP_THRESHOLD

#define DEFAULT_MMAP_THRESHOLD DEFAULT_MMAP_THRESHOLD_MIN

#endif

 

l HEAP_MIN_SIZE32KB

l HEAP_MAX_SIZE512K/16M

/* Compile-time constants. */

 

#define HEAP_MIN_SIZE (32 * 1024)

#ifndef HEAP_MAX_SIZE

# ifdef DEFAULT_MMAP_THRESHOLD_MAX

# define HEAP_MAX_SIZE (2 * DEFAULT_MMAP_THRESHOLD_MAX)

# else

# define HEAP_MAX_SIZE (1024 * 1024) /* must be a power of two */

# endif

#endif

3.2 Heap Header(heap_info)

Ar_ptr:heap所屬的arena

Prev:前一個heap

Size:此heap的大小

Mprotect_size:heap中已經修改讀寫權限的大小

Pad: 填充,大小一般爲0

填充使得sizeof (heap_info) + 2 * SIZE_SZMALLOC_ALIGNMENT的整數倍;

Heap_info中有兩個指針,2size_t,所以使用-6*SIZE&MALLOC_ALIGN_MASK

對齊爲什麼是sizeof (heap_info) + 2 * SIZE_SZ

 

/* A heap is a single contiguous memory region holding (coalesceable)

malloc_chunks. It is allocated with mmap() and always starts at an

address aligned to HEAP_MAX_SIZE. */

 

typedef struct _heap_info

{

mstate ar_ptr; /* Arena for this heap. */

struct _heap_info *prev; /* Previous heap. */

size_t size; /* Current size in bytes. */

size_t mprotect_size; /* Size in bytes that has been mprotected

PROT_READ|PROT_WRITE. */

/* Make sure the following data is properly aligned, particularly

that sizeof (heap_info) + 2 * SIZE_SZ is a multiple of

MALLOC_ALIGNMENT. */

char pad[-6 * SIZE_SZ & MALLOC_ALIGN_MASK];

} heap_info;

 

3.3 Arena header(malloc_state)

注意,main arena malloc_state 並不是 heap segment 的一部分,而是一個全局變量,存儲在 libc.so 的數據段。

 

struct malloc_state;

typedef struct malloc_state *mstate;

struct malloc_state

{

/* Serialize access. */

__libc_lock_define (, mutex);

 

/* Flags (formerly in max_fast). */

int flags;

/* Fastbins */

mfastbinptr fastbinsY[NFASTBINS];

/* Base of the topmost chunk -- not otherwise kept in a bin */

mchunkptr top;

/* The remainder from the most recent split of a small request */

mchunkptr last_remainder;

/* Normal bins packed as described above */

mchunkptr bins[NBINS * 2 - 2];

/* Bitmap of bins */

unsigned int binmap[BINMAPSIZE];

/* Linked list */

struct malloc_state *next;

/* Linked list for free arenas. Access to this field is serialized

by free_list_lock in arena.c. */

struct malloc_state *next_free;

/* Number of threads attached to this arena. 0 if the arena is on

the free list. Access to this field is serialized by

free_list_lock in arena.c. */

INTERNAL_SIZE_T attached_threads;

/* Memory allocated from the system in this arena. */

INTERNAL_SIZE_T system_mem;

INTERNAL_SIZE_T max_system_mem;

};

 

l __libc_lock_define (, mutex);

該變量用於控制多線程串行訪問同一個arena

奇怪的是libc-lock.h中這個宏的定義是空的,真正的定義在哪裏呢?

#define __libc_lock_define(CLASS,NAME)

l Flags:一些標記位

#define FASTCHUNKS_BIT (1U)

#define NONCONTIGUOUS_BIT (2U)

#define ARENA_CORRUPTION_BIT (4U)

l fastbinsYfastbins鏈表指針

l Top:指向top chunk

l last_remainderLast Remainder chunk

l Bins:存儲 unstored bin/small bins/large bins的鏈表的表頭

l Binmap:記錄了哪些bin是空的,搜索時可替換

l Next:下一個arena header,單鏈表

l next_freefree arena的鏈表,需要使用free_list_lock串行訪問;

Free arena是指沒有附着線程的arena

l attached_threads:附着此arena的線程的數目,若爲free arena則爲0

l system_mem:此arena中從系統分配的內存;

l max_system_mem:此arena中從系統分配的最大內存;

3.4 main_arena

主線程的arena,這是一個靜態變量;

static struct malloc_state main_arena =

{

.mutex = _LIBC_LOCK_INITIALIZER,

.next = &main_arena,

.attached_threads = 1

};

3.5 arena_get

arena_get:請求一個arena,並對對應的mutex加鎖,

首先嚐試本線程最後成功加鎖的arena。這是最普遍的情況。

然後遍歷arena鏈表,如果沒有可用的,則創建一個新的。

 

static __thread mstate thread_arena attribute_tls_model_ie;

 

#define arena_get(ptr, size) do { \

ptr = thread_arena;            \

arena_lock (ptr, size);            \

} while (0)

 

#define arena_lock(ptr, size) do {           \

if (ptr && !arena_is_corrupt (ptr))        \

__libc_lock_lock (ptr->mutex);           \

else                 \

ptr = arena_get2 ((size), NULL);         \

} while (0)

參考文檔

1. https://ctf-wiki.github.io/ctf-wiki/pwn/heap/heap_structure/

2. https://ctf-wiki.github.io/ctf-wiki/pwn/heap/heap_implementation_details/


發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章