libc-2.23——ptmalloc源码分析

写了几十道堆题了,但是在写堆题的时候,还是有很多不懂,所以打算自己逆一逆,来搞清楚大部分安全的条件

学习步骤大概是这样的:先看glibc堆内存管理,在自己逆一编,顺带一说,准备开始完结xp的学习和linux网络编程了

我这里借用了以前逆向fopen的函数

结构体介绍

struct malloc_state
{
  /* Serialize access.  */
  mutex_t mutex;

  /* Flags (formerly in max_fast).  */
  int flags;

  /* Fastbins */
  mfastbinptr fastbinsY[NFASTBINS];

  /* Base of the topmost chunk -- not otherwise kept in a bin */
  mchunkptr top;

  /* The remainder from the most recent split of a small request */
  mchunkptr last_remainder;

  /* Normal bins packed as described above */
  mchunkptr bins[NBINS * 2 - 2];

  /* Bitmap of bins */
  unsigned int binmap[BINMAPSIZE];

  /* Linked list */
  struct malloc_state *next;

  /* Linked list for free arenas.  Access to this field is serialized
     by free_list_lock in arena.c.  */
  struct malloc_state *next_free;

  /* Number of threads attached to this arena.  0 if the arena is on
     the free list.  Access to this field is serialized by
     free_list_lock in arena.c.  */
  INTERNAL_SIZE_T attached_threads;

  /* Memory allocated from the system in this arena.  */
  INTERNAL_SIZE_T system_mem;
  INTERNAL_SIZE_T max_system_mem;
};

宏的定义

#define checked_request2size(req, sz)                             
  if (REQUEST_OUT_OF_RANGE (req)) {                          
      __set_errno (ENOMEM);                              
      return 0;                                      
    }                                          
  (sz) = request2size (req);
#define REQUEST_OUT_OF_RANGE(req)                                 
  ((unsigned long) (req) >=                              
   (unsigned long) (INTERNAL_SIZE_T) (-2 * MINSIZE))

宏 atomic_forced_read(x),这一段宏,我通过查看汇编代码来进行了解了,意思就相当于*ptr一样,不过是原子操作

#ifndef atomic_forced_read
# define atomic_forced_read(x) 
  ({ __typeof (x) __x; __asm ("" : "=r" (__x) : "0" (x)); __x; })
#endif

测试源码

#include<stdio.h>

int main()
{
    FILE*fp=fopen("test","wb");
    char *ptr=malloc(0x20);
    return 0;
}

流程分析

函数调用的malloc对应的其是__libc_malloc函数

__libc_malloc

void *
__libc_malloc (size_t bytes)
{
  mstate ar_ptr;
  void *victim;

  void *(*hook) (size_t, const void *)
    = atomic_forced_read (__malloc_hook);
  if (__builtin_expect (hook != NULL, 0))
    return (*hook)(bytes, RETURN_ADDRESS (0));

  arena_get (ar_ptr, bytes);

  victim = _int_malloc (ar_ptr, bytes);
  /* Retry with another arena only if we were able to find a usable arena
     before.  */
  if (!victim && ar_ptr != NULL)
    {
      LIBC_PROBE (memory_malloc_retry, 1, bytes);
      ar_ptr = arena_get_retry (ar_ptr, bytes);
      victim = _int_malloc (ar_ptr, bytes);
    }

  if (ar_ptr != NULL)
    (void) mutex_unlock (&ar_ptr->mutex);

  assert (!victim || chunk_is_mmapped (mem2chunk (victim)) ||
          ar_ptr == arena_for_chunk (mem2chunk (victim)));
  return victim;
}
libc_hidden_def (__libc_malloc)

函数会检查hook的值是否为空,如果为空则去执行__malloc_hook然后返回,否则就会调用_int_malloc

static void *
_int_malloc (mstate av, size_t bytes)
{
  INTERNAL_SIZE_T nb;               /* normalized request size */
  unsigned int idx;                 /* associated bin index */
  mbinptr bin;                      /* associated bin */

  mchunkptr victim;                 /* inspected/selected chunk */
  INTERNAL_SIZE_T size;             /* its size */
  int victim_index;                 /* its bin index */

  mchunkptr remainder;              /* remainder from a split */
  unsigned long remainder_size;     /* its size */

  unsigned int block;               /* bit map traverser */
  unsigned int bit;                 /* bit map traverser */
  unsigned int map;                 /* current word of binmap */

  mchunkptr fwd;                    /* misc temp for linking */
  mchunkptr bck;                    /* misc temp for linking */

  const char *errstr = NULL;

  /*
     Convert request size to internal form by adding SIZE_SZ bytes
     overhead plus possibly more to obtain necessary alignment and/or
     to obtain a size of at least MINSIZE, the smallest allocatable
     size. Also, checked_request2size traps (returning 0) request sizes
     that are so large that they wrap around zero when padded and
     aligned.
   */

  checked_request2size (bytes, nb);  //将请求的字节对齐,并满足chunk的对齐要求

  /* There are no usable arenas.  Fall back to sysmalloc to get a chunk from
     mmap.  */
  if (__glibc_unlikely (av == NULL))  //没有可用的竞技场(说明还没有初始化)
    {
      void *p = sysmalloc (nb, av);  //调用sysmalloc系统调用,来初始化(后面会分析的)
      if (p != NULL)
    alloc_perturb (p, bytes);
      return p;
    }

  /*
     If the size qualifies as a fastbin, first check corresponding bin.
     This code is safe to execute even if av is not yet initialized, so we
     can try it without checking, which saves some time on this fast path.
   */

  if ((unsigned long) (nb) <= (unsigned long) (get_max_fast ()))//nb(转换后的字节大小)小于fastbin里最大的size
    {
      idx = fastbin_index (nb);      //通过size获取对应的下表
      mfastbinptr *fb = &fastbin (av, idx);  //从下标中,获取头部
      mchunkptr pp = *fb;            //获取fastbin的第一个chunk
      do
        {
          victim = pp;        
          if (victim == NULL)      //如果该对应大小没有chunk,就退出,否则
            break;
        }
      while ((pp = catomic_compare_and_exchange_val_acq (fb, victim->fd, victim))//相当于把头部取出来,然后再把fastbin给pp,把指针fd赋值给fb,并比较取出来的pp是否与victim是否为同一个
             != victim);
      if (victim != 0)  //如果当前victim不为空
        {
          if (__builtin_expect (fastbin_index (chunksize (victim)) != idx, 0))  //验证当前的victim的大小所对应的下表是否与申请的chunk的大小一样(所以我们再修改chunk的size时候需要把这个修改成需要的size)
            {
              errstr = "malloc(): memory corruption (fast)";        //不一样报错
            errout:
              malloc_printerr (check_action, errstr, chunk2mem (victim), av);
              return NULL;
            }
          check_remalloced_chunk (av, victim, nb);  //没看懂
          void *p = chunk2mem (victim);        //将chunk转换为内存指针
          alloc_perturb (p, bytes);          //这里也没懂,不过看函数名,也许是分配一个或者说设置些标志???找不到原函数
          return p;
        }
    }

  /*
     If a small request, check regular bin.  Since these "smallbins"
     hold one size each, no searching within bins is necessary.
     (For a large request, we need to wait until unsorted chunks are
     processed to find best fit. But for small ones, fits are exact
     anyway, so we can check now, which is faster.)
   */

  if (in_smallbin_range (nb))        //再fastbin中找不到,就来small bin中查找
    {
      idx = smallbin_index (nb);      //跟fasbin上面的一样
      bin = bin_at (av, idx);

      if ((victim = last (bin)) != bin)  //让victim为最后一个chunk,也就是说small bin是先进先出
        {
          if (victim == 0) /* initialization check */
            malloc_consolidate (av);    //small_bin为空,就会调用该函数进行fastbin合并(在后面一篇会介绍,这篇篇幅太长了)
          else
            {
              bck = victim->bk; 
    if (__glibc_unlikely (bck->fd != victim))//查看victim的上一个chunk的fd(下一个)是否等于victim(安全检查),没有检查victim的fd指针,所以说在unlink的时候,没必要管这里
                {
                  errstr = "malloc(): smallbin double linked list corrupted";
                  goto errout;
                }
              set_inuse_bit_at_offset (victim, nb);  //将victim的后面一个chunk的inuse位置1
              bin->bk = bck;      
              bck->fd = bin;

              if (av != &main_arena)      //设置main_arena标志
                victim->size |= NON_MAIN_ARENA;
              check_malloced_chunk (av, victim, nb);  //
              void *p = chunk2mem (victim);
              alloc_perturb (p, bytes);
              return p;
            }
        }
    }

  /*
     If this is a large request, consolidate fastbins before continuing.
     While it might look excessive to kill all fastbins before
     even seeing if there is space available, this avoids
     fragmentation problems normally associated with fastbins.
     Also, in practice, programs tend to have runs of either small or
     large requests, but less often mixtures, so consolidation is not
     invoked all that often in most programs. And the programs that
     it is called frequently in otherwise tend to fragment.
   */

  else
    {
      idx = largebin_index (nb);    //合并所有的物理相邻的chunk
      if (have_fastchunks (av))
        malloc_consolidate (av);
    }

  /*
     Process recently freed or remaindered chunks, taking one only if
     it is exact fit, or, if this a small request, the chunk is remainder from
     the most recent non-exact fit.  Place other traversed chunks in
     bins.  Note that this step is the only place in any routine where
     chunks are placed in bins.

     The outer loop here is needed because we might not realize until
     near the end of malloc that we should have consolidated, so must
     do so and retry. This happens at most once, and only when we would
     otherwise need to expand memory to service a "small" request.
   */

  for (;; )              //进入大循环
    {
      int iters = 0;
      while ((victim = unsorted_chunks (av)->bk) != unsorted_chunks (av))  //unsorted bin不为空
        {
          bck = victim->bk;
          if (__builtin_expect (victim->size <= 2 * SIZE_SZ, 0)        //检查chunk的size是否有溢出
              || __builtin_expect (victim->size > av->system_mem, 0))
            malloc_printerr (check_action, "malloc(): memory corruption",
                             chunk2mem (victim), av);
          size = chunksize (victim);                        //获取victim的size大小

          /*
             If a small request, try to use last remainder if it is the
             only chunk in unsorted bin.  This helps promote locality for
             runs of consecutive small requests. This is the only
             exception to best-fit, and applies only when there is
             no exact fit for a small chunk.
           */

          if (in_smallbin_range (nb) &&          //是否在small bin范围中
              bck == unsorted_chunks (av) &&        //取出unsorted bin是唯一chunk并且是否为last_remainder
              victim == av->last_remainder &&      //
              (unsigned long) (size) > (unsigned long) (nb + MINSIZE))  //last_remainder是否足够满足用户的大小
            {
              /* split and reattach remainder */
              remainder_size = size - nb;        //从small bin中切割chunk出来
              remainder = chunk_at_offset (victim, nb);
              unsorted_chunks (av)->bk = unsorted_chunks (av)->fd = remainder;  //并将remainder挂入unsorted bin中
              av->last_remainder = remainder;
              remainder->bk = remainder->fd = unsorted_chunks (av);    //让remainder的bk与fd指针指向av存储unsorted的位置
              if (!in_smallbin_range (remainder_size))    //查看remainder_size是否还满足在unsorted bin中
                {
                  remainder->fd_nextsize = NULL;
                  remainder->bk_nextsize = NULL;
                }

              set_head (victim, nb | PREV_INUSE |          //设置chunk的head头部
                        (av != &main_arena ? NON_MAIN_ARENA : 0));
              set_head (remainder, remainder_size | PREV_INUSE);
              set_foot (remainder, remainder_size);        //设置remainder的pre_size

              check_malloced_chunk (av, victim, nb);
              void *p = chunk2mem (victim);
              alloc_perturb (p, bytes);
              return p;
            }

          /* remove from unsorted list */
          unsorted_chunks (av)->bk = bck;        //将victim脱离链表
          bck->fd = unsorted_chunks (av);

          /* Take now instead of binning if exact fit */

          if (size == nb)
            {
              set_inuse_bit_at_offset (victim, size);  //如果unsorted bin正好等于所请求的chunk大小,则直接分配
              if (av != &main_arena)
                victim->size |= NON_MAIN_ARENA;
              check_malloced_chunk (av, victim, nb);
              void *p = chunk2mem (victim);
              alloc_perturb (p, bytes);
              return p;
            }

          /* place chunk in bin */

          if (in_smallbin_range (size))      //如果unsorted bin里的chunk大小正好再small bin范围内,则将其放入small bin中
            {
              victim_index = smallbin_index (size);
              bck = bin_at (av, victim_index);
              fwd = bck->fd;
            }
          else
            {
              victim_index = largebin_index (size);    //否则大小就在large bin中,并将其插入链表
              bck = bin_at (av, victim_index);
              fwd = bck->fd;

              /* maintain large bins in sorted order */
              if (fwd != bck)
                {
                  /* Or with inuse bit to speed comparisons */
                  size |= PREV_INUSE;
                  /* if smaller than smallest, bypass loop below */
                  assert ((bck->bk->size & NON_MAIN_ARENA) == 0);  //如果bck->bk不是main_arena的话发出断言
                  if ((unsigned long) (size) < (unsigned long) (bck->bk->size))  //将其按size大小顺序排好
                    {
                      fwd = bck;
                      bck = bck->bk;

                      victim->fd_nextsize = fwd->fd;
                      victim->bk_nextsize = fwd->fd->bk_nextsize;
                      fwd->fd->bk_nextsize = victim->bk_nextsize->fd_nextsize = victim;
                    }
                  else
                    {
                      assert ((fwd->size & NON_MAIN_ARENA) == 0);
                      while ((unsigned long) size < fwd->size)
                        {
                          fwd = fwd->fd_nextsize;
                          assert ((fwd->size & NON_MAIN_ARENA) == 0);
                        }

                      if ((unsigned long) size == (unsigned long) fwd->size)
                        /* Always insert in the second position.  */
                        fwd = fwd->fd;
                      else
                        {
                          victim->fd_nextsize = fwd;
                          victim->bk_nextsize = fwd->bk_nextsize;
                          fwd->bk_nextsize = victim;
                          victim->bk_nextsize->fd_nextsize = victim;
                        }
                      bck = fwd->bk;
                    }
                }
              else
                victim->fd_nextsize = victim->bk_nextsize = victim;
            }

          mark_bin (av, victim_index);
          victim->bk = bck;
          victim->fd = fwd;
          fwd->bk = victim;
          bck->fd = victim;

#define MAX_ITERS       10000
          if (++iters >= MAX_ITERS)
            break;
        }

      /*
         If a large request, scan through the chunks of current bin in
         sorted order to find smallest that fits.  Use the skip list for this.
       */

      if (!in_smallbin_range (nb))
        {
          bin = bin_at (av, idx);

          /* skip scan if empty or largest chunk is too small */
          if ((victim = first (bin)) != bin &&
              (unsigned long) (victim->size) >= (unsigned long) (nb))    //当前的large bin链表不为空并且victim的chunk大小要大于用户所申请的大小
            {
              victim = victim->bk_nextsize;                    
              while (((unsigned long) (size = chunksize (victim)) <    //再链表中找到一个比所请求的chunk要小的chunk
                      (unsigned long) (nb)))
                victim = victim->bk_nextsize;

              /* Avoid removing the first entry for a size so that the skip
                 list does not have to be rerouted.  */
              if (victim != last (bin) && victim->size == victim->fd->size)//当前的victim不等于bin的最后一个,并且victim的大小与fd指针所指的chunk大小相等就将victim指向下一个(这里好像是一种优化的操作,可以减少其他chunk的一些指针修改)
                victim = victim->fd;

              remainder_size = size - nb;
              unlink (av, victim, bck, fwd);  //将victim从链表中unlink出来(代码我会放在复习unlink的文章上说的)

              /* Exhaust */
              if (remainder_size < MINSIZE)    //如果剩下的size小于最小的size,就全部用完
                {
                  set_inuse_bit_at_offset (victim, size);
                  if (av != &main_arena)      //如果当前av部署main_arena
                    victim->size |= NON_MAIN_ARENA;
                }
              /* Split */
              else
                {
                  remainder = chunk_at_offset (victim, nb);    //将剩下的chunk的地址给mainder
                  /* We cannot assume the unsorted list is empty and therefore
                     have to perform a complete insert here.  */
                  bck = unsorted_chunks (av);            //将剩下的chunk放入unsorted bin中
                  fwd = bck->fd;
      if (__glibc_unlikely (fwd->bk != bck))
                    {
                      errstr = "malloc(): corrupted unsorted chunks";
                      goto errout;
                    }
                  remainder->bk = bck;
                  remainder->fd = fwd;
                  bck->fd = remainder;
                  fwd->bk = remainder;
if (!in_smallbin_range (remainder_size))  //如果不属于small bin
                    {
                      remainder->fd_nextsize = NULL;
                      remainder->bk_nextsize = NULL;
                    }
                  set_head (victim, nb | PREV_INUSE |
                            (av != &main_arena ? NON_MAIN_ARENA : 0));
                  set_head (remainder, remainder_size | PREV_INUSE);
                  set_foot (remainder, remainder_size);
                }
              check_malloced_chunk (av, victim, nb);
              void *p = chunk2mem (victim);
              alloc_perturb (p, bytes);
              return p;
            }
        }

      /*
         Search for a chunk by scanning bins, starting with next largest
         bin. This search is strictly by best-fit; i.e., the smallest
         (with ties going to approximately the least recently used) chunk
         that fits is selected.

         The bitmap avoids needing to check that most blocks are nonempty.
         The particular case of skipping all bins during warm-up phases
         when no chunks have been returned yet is faster than it might look.
       */

      ++idx;
      bin = bin_at (av, idx);
      block = idx2block (idx);
      map = av->binmap[block];
      bit = idx2bit (idx);

      for (;; )
        {
          /* Skip rest of block if there are no more set bits in this block.  */
          if (bit > map || bit == 0)
            {
              do
                {
                  if (++block >= BINMAPSIZE) /* out of bins */
                    goto use_top;
                }
              while ((map = av->binmap[block]) == 0);

              bin = bin_at (av, (block << BINMAPSHIFT));
              bit = 1;
            }

          /* Advance to bin with set bit. There must be one. */
          while ((bit & map) == 0)
            {
              bin = next_bin (bin);
              bit <<= 1;
              assert (bit != 0);
            }

          /* Inspect the bin. It is likely to be non-empty */
          victim = last (bin);

          /*  If a false alarm (empty bin), clear the bit. */
          if (victim == bin)
            {
              av->binmap[block] = map &= ~bit; /* Write through */
              bin = next_bin (bin);
              bit <<= 1;
            }

          else
            {
              size = chunksize (victim);

              /*  We know the first chunk in this bin is big enough to use. */
              assert ((unsigned long) (size) >= (unsigned long) (nb));

              remainder_size = size - nb;

              /* unlink */
              unlink (av, victim, bck, fwd);

              /* Exhaust */
              if (remainder_size < MINSIZE)
                {
                  set_inuse_bit_at_offset (victim, size);
                  if (av != &main_arena)
                    victim->size |= NON_MAIN_ARENA;
                }

              /* Split */
              else
                {
                  remainder = chunk_at_offset (victim, nb);

                  /* We cannot assume the unsorted list is empty and therefore
                     have to perform a complete insert here.  */
                  bck = unsorted_chunks (av);
                  fwd = bck->fd;
      if (__glibc_unlikely (fwd->bk != bck))
                    {
                      errstr = "malloc(): corrupted unsorted chunks 2";
                      goto errout;
                    }
                  remainder->bk = bck;
                  remainder->fd = fwd;
                  bck->fd = remainder;
                  fwd->bk = remainder;

                  /* advertise as last remainder */
                  if (in_smallbin_range (nb))//如果属于small bin
                    av->last_remainder = remainder;
                  if (!in_smallbin_range (remainder_size))
                    {
                      remainder->fd_nextsize = NULL;
                      remainder->bk_nextsize = NULL;
                    }
                  set_head (victim, nb | PREV_INUSE |
                            (av != &main_arena ? NON_MAIN_ARENA : 0));
                  set_head (remainder, remainder_size | PREV_INUSE);
                  set_foot (remainder, remainder_size);
                }
              check_malloced_chunk (av, victim, nb);
              void *p = chunk2mem (victim);
              alloc_perturb (p, bytes);
              return p;
            }
        }

    use_top:
      /*
         If large enough, split off the chunk bordering the end of memory
         (held in av->top). Note that this is in accord with the best-fit
         search rule.  In effect, av->top is treated as larger (and thus
         less well fitting) than any other available chunk since it can
         be extended to be as large as necessary (up to system
         limitations).

         We require that av->top always exists (i.e., has size >=
         MINSIZE) after initialization, so if it would otherwise be
         exhausted by current request, it is replenished. (The main
         reason for ensuring it exists is that we may need MINSIZE space
         to put in fenceposts in sysmalloc.)
       */

      victim = av->top;
      size = chunksize (victim);

      if ((unsigned long) (size) >= (unsigned long) (nb + MINSIZE))
        {
          remainder_size = size - nb;
          remainder = chunk_at_offset (victim, nb);
          av->top = remainder;
          set_head (victim, nb | PREV_INUSE |
                    (av != &main_arena ? NON_MAIN_ARENA : 0));
          set_head (remainder, remainder_size | PREV_INUSE);

          check_malloced_chunk (av, victim, nb);
          void *p = chunk2mem (victim);
          alloc_perturb (p, bytes);
          return p;
        }

      /* When we are using atomic ops to free fast chunks we can get
         here for all block sizes.  */
      else if (have_fastchunks (av))
        {
          malloc_consolidate (av);
          /* restore original bin index */
          if (in_smallbin_range (nb))
            idx = smallbin_index (nb);
          else
            idx = largebin_index (nb);
        }

      /*
         Otherwise, relay to handle system-dependent cases
       */
      else
        {
          void *p = sysmalloc (nb, av);
          if (p != NULL)
            alloc_perturb (p, bytes);
          return p;
        }
    }
}

还剩下一部分large size,我准备明天把large size和free的函数都逆完,并且再这周技术之前,逆完libc-2.23,libc-2.27的堆和文件相关操作的函数

关于mmap的先摸了

小总结

申请chunk的流程:

fastbin

  1. 首先程序会检查__malloc_hook是否存在,如果存在就执行__malloc_hook然后返回。
  2. __malloc_hook如果为空,就会检查av是否进行了初始化,如果为空,说明没有可用的heap,便会调用sysmalloc像系统申请空间
  3. 如果已经初始化过了就会检查所申请的chunk是否为fast bin范围内,然后取出头部的第一个chunk,验证当前的victim是否为空
  4. 如果不为空,查看当前的victim的size大小所对应的idx是否跟idx相对应(安全检查,防止人修改chunk的size域)否则会报错
  5. 接着将分配出来的victim给返回回来

 small bin

  1. 如果fast bin为空,或者说fast bin没有满足条件的chunk,那么就会验证请求的size是否满足small bin
  2. 满足的话,就会先取出small bin的最后一个也就是bin->bk,然后检查取出来的victim是否为空,如果是空的就会触发malloc_consolidate来合并物理相邻的chunk
  3. 如果不为空,就会验证victim的bk指针(bk所指的chunk我们称作bck),程序会验证bck的fd是否等于victim,如果不等于就会报错(如果我们修改small bin的bk指针,那么我们一定要让修改后的chunk的fd指针指向自己)
  4. 然后将选中的victim取出来

unsorted bin

  1. 如果请求的不满足任何一个small bin和fast bin,就会进入一个循环
  2. 该循环首先会检查unsorted bin是否为空,如果不为空,就会取出victim的bk所指的chunk(bck),并且检查当前的victim的size是否大于最小chunk,是否大于system_mem(一般不超过132K)
  3. 然后取出victim的size,然后检查我们所申请的chunk是否再small bin范围内,victim是否为unsorted bin中的为一块同时还是last_remainder,并且申请chunk后,大小还要大于最小chunk
  4. 此时会将remainder_size进行切割,然后查看remain_size是否满足small bin中不满足将其nextsize系列的指针置空,并且设置pre_size为自己本身大小然后返回
  5. 但如果都不符合上面的条件,就会初始取出一个属于unsorted bin中chunk,如果大小正好适合,就直接分配
  6. 如果不属于就查看该unsorted bin的size是否属于small bin中,是则放入small bin
  7. 不是则放入large bin中,并查看属于哪个bin
  8. 然后将size按顺序排好,以 fd_nextsize 递减排序
  9. 排好后,判断当前要插入的chunk要小于最小的chunk的话,则找到属于victim的位置,并将他插入属于他的位置
  10. 如果为空的话,则让victim的fd->nextsize等于自身

large bin

  1. 如果请求属于large bin,取出第一个large bin,并检查是否为空,victim是否满足要求
  2. 满足,就从最后一个chunk开始取(也就是从最小的开始取)
  3. 如果取出的victim不是最后一个chunk,并且victim的size等于victim的fd所指的chunk的size,就将victim等于后面一个(这样不需要修改fd和bk这两个指针所指的chunk的fd与bk还有nextsize系列的指针)
  4. 将当前的victim进行unlink,然后检查剩下的remainder_size是否满足最小chunk
  5. 满足,将remainder插入unsorted bin的链表中,并更新victim和remainder的头部

map bin

  1. 这个先摸了

由上面的分析,我们很容易就知道small bin和unsorted bin都是从最后一个开始取的,只有fast bin是从最开始取的,而large bin是从最满足chunk开始选的。

后面一篇libc-2.23——free源码分析

参考资料

https://ctf-wiki.org/pwn/linux/glibc-heap/implementation/malloc/#place-chunk-in-small-bin

原文地址:https://www.cnblogs.com/pppyyyzzz/p/14336521.html