victim = _int_malloc (ar_ptr, bytes);//调用malloc来申请内存 /* Retry with another arena only if we were able to find a usable arena before. */ if (!victim && ar_ptr != NULL) { LIBC_PROBE (memory_malloc_retry, 1, bytes); ar_ptr = arena_get_retry (ar_ptr, bytes); victim = _int_malloc (ar_ptr, bytes); }//如果失败就再次寻找
if (ar_ptr != NULL) (void) mutex_unlock (&ar_ptr->mutex);//申请到了arena,释放分配区锁
/* Convert request size to internal form by adding SIZE_SZ bytes overhead plus possibly more to obtain necessary alignment and/or to obtain a size of at least MINSIZE, the smallest allocatable size. Also, checked_request2size traps (returning 0) request sizes that are so large that they wrap around zero when padded and aligned. */
/* If the size qualifies as a fastbin, first check corresponding bin. This code is safe to execute even if av is not yet initialized, so we can try it without checking, which saves some time on this fast path. */
/* If a small request, check regular bin. Since these "smallbins" hold one size each, no searching within bins is necessary. (For a large request, we need to wait until unsorted chunks are processed to find best fit. But for small ones, fits are exact anyway, so we can check now, which is faster.) */
if (in_smallbin_range(nb)) { // 获取 small bin 的索引 idx = smallbin_index(nb); // 获取对应 small bin 中的 chunk 指针 bin = bin_at(av, idx); // 先执行 victim = last(bin),获取 small bin 的最后一个 chunk // 如果 victim = bin ,那说明该 bin 为空。 // 如果不相等,那么会有两种情况 if ((victim = last(bin)) != bin) { // 第一种情况,small bin 还没有初始化。 if (victim == 0) /* initialization check */ // 执行初始化,将 fast bins 中的 chunk 进行合并 malloc_consolidate(av); // 第二种情况,small bin 中存在空闲的 chunk else { // 获取 small bin 中倒数第二个 chunk 。 bck = victim->bk; // 检查 bck->fd 是不是 victim,防止伪造 if (__glibc_unlikely(bck->fd != victim)) { errstr = "malloc(): smallbin double linked list corrupted"; goto errout; } // 设置 victim 对应的 inuse 位 set_inuse_bit_at_offset(victim, nb); // 修改 small bin 链表,将 small bin 的最后一个 chunk 取出来 bin->bk = bck; bck->fd = bin; // 如果不是 main_arena,设置对应的标志 if (av != &main_arena) set_non_main_arena(victim); // 细致的检查,非调试状态没有作用 check_malloced_chunk(av, victim, nb); // 将申请到的 chunk 转化为对应的 mem 状态 void *p = chunk2mem(victim); // 如果设置了 perturb_type , 则将获取到的chunk初始化为 perturb_type ^ 0xff alloc_perturb(p, bytes); return p; } } }
_int_malloc largebin部分
当 fast bin、small bin 中的 chunk 都不能满足用户请求 chunk 大小时,就会考虑是不是 large bin。但是,其实在 large bin 中并没有直接去扫描对应 bin 中的 chunk,而是先利用 malloc_consolidate(参见 malloc_state 相关函数) 函数处理 fast bin 中的 chunk,将有可能能够合并的 chunk 先进行合并后放到 unsorted bin 中,不能够合并的就直接放到 unsorted bin 中。
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17
/* If this is a large request, consolidate fastbins before continuing. While it might look excessive to kill all fastbins before even seeing if there is space available, this avoids fragmentation problems normally associated with fastbins. Also, in practice, programs tend to have runs of either small or large requests, but less often mixtures, so consolidation is not invoked all that often in most programs. And the programs that it is called frequently in otherwise tend to fragment. */
staticvoid _int_free(mstate av, mchunkptr p, int have_lock) { INTERNAL_SIZE_T size; /* its size */ mfastbinptr * fb; /* associated fastbin */ mchunkptr nextchunk; /* next contiguous chunk */ INTERNAL_SIZE_T nextsize; /* its size */ int nextinuse; /* true if nextchunk is used */ INTERNAL_SIZE_T prevsize; /* size of previous contiguous chunk */ mchunkptr bck; /* misc temp for linking */ mchunkptr fwd; /* misc temp for linking */
constchar *errstr = NULL; int locked = 0;
size = chunksize(p); //定义一些用的的变量 /* Little security check which won't hurt performance: the allocator never wrapps around at the end of the address space. Therefore we can exclude some size values which might appear here by accident or by "design" from some intruder. */ // 指针不能指向非法的地址 // 指针必须得对齐,2*SIZE_SZ if (__builtin_expect((uintptr_t) p > (uintptr_t) -size, 0) || __builtin_expect(misaligned_chunk(p), 0)) { errstr = "free(): invalid pointer"; errout: if (!have_lock && locked) __libc_lock_unlock(av->mutex); malloc_printerr(check_action, errstr, chunk2mem(p), av); return; } /* We know that each chunk is at least MINSIZE bytes in size or a multiple of MALLOC_ALIGNMENT. */ // 大小没有最小的chunk大,或者说,大小不是MALLOC_ALIGNMENT的整数倍 if (__glibc_unlikely(size < MINSIZE || !aligned_OK(size))) { errstr = "free(): invalid size"; goto errout; } // 检查该chunk是否处于使用状态,非调试状态下没有作用 check_inuse_chunk(av, p);
/* Atomically link P to its fastbin: P->FD = *FB; *FB = P; */ // old为 对应大小的fastbinY的fd值,也就是第一个对块的地址 mchunkptr old = *fb, old2; unsignedint old_idx = ~0u; do { // Check that the top of the bin is not the record we are going to add //检查 fastbin中对应的bin的第一项 是否 等于 p (新加入的堆块) if (__builtin_expect (old == p, 0)) { errstr = "double free or corruption (fasttop)"; goto errout; } //获取 fastbin中对应的bin的第一项的索引。 if (have_lock && old != NULL) old_idx = fastbin_index(chunksize(old)); //让 p 的fd指向 顶部的fastbin块 p->fd = old2 = old; } while ((old = catomic_compare_and_exchange_val_rel (fb, p, old2)) != old2); //catomic_compare_and_exchange_val_rel 功能是 如果*fb等于old2,则将*fb存储为p,返回old2; // *fb=p 也就是 让对应fastbin的fd指向 p(新加入的堆块)