[BACK]Return to jemalloc.c CVS log [TXT][DIR] Up to [cvs.NetBSD.org] / src / lib / libc / stdlib

Please note that diffs are not public domain; they are subject to the copyright notices on the relevant files.

Diff for /src/lib/libc/stdlib/jemalloc.c between version 1.16 and 1.19

version 1.16, 2007/12/04 17:43:51 version 1.19, 2008/06/23 10:46:25
Line 824  static void *pages_map_align(void *addr,
Line 824  static void *pages_map_align(void *addr,
 static void     pages_unmap(void *addr, size_t size);  static void     pages_unmap(void *addr, size_t size);
 static void     *chunk_alloc(size_t size);  static void     *chunk_alloc(size_t size);
 static void     chunk_dealloc(void *chunk, size_t size);  static void     chunk_dealloc(void *chunk, size_t size);
 static arena_t  *choose_arena_hard(void);  
 static void     arena_run_split(arena_t *arena, arena_run_t *run, size_t size);  static void     arena_run_split(arena_t *arena, arena_run_t *run, size_t size);
 static arena_chunk_t *arena_chunk_alloc(arena_t *arena);  static arena_chunk_t *arena_chunk_alloc(arena_t *arena);
 static void     arena_chunk_dealloc(arena_t *arena, arena_chunk_t *chunk);  static void     arena_chunk_dealloc(arena_t *arena, arena_chunk_t *chunk);
Line 1523  chunk_dealloc(void *chunk, size_t size)
Line 1522  chunk_dealloc(void *chunk, size_t size)
  */   */
   
 /*  /*
  * Choose an arena based on a per-thread value (fast-path code, calls slow-path   * Choose an arena based on a per-thread and (optimistically) per-CPU value.
  * code if necessary).   *
    * We maintain at least one block of arenas.  Usually there are more.
    * The blocks are $ncpu arenas in size.  Whole blocks are 'hashed'
    * amongst threads.  To accomplish this, next_arena advances only in
    * ncpu steps.
  */   */
 static inline arena_t *  static __noinline arena_t *
 choose_arena(void)  choose_arena_hard(void)
 {  {
         arena_t *ret;          unsigned i, curcpu;
           arena_t **map;
   
         /*          /* Initialize the current block of arenas and advance to next. */
          * We can only use TLS if this is a PIC library, since for the static          malloc_mutex_lock(&arenas_mtx);
          * library version, libc's malloc is used by TLS allocation, which          assert(next_arena % ncpus == 0);
          * introduces a bootstrapping issue.          assert(narenas % ncpus == 0);
          */          map = &arenas[next_arena];
         if (__isthreaded == false) {          set_arenas_map(map);
             /*          for (i = 0; i < ncpus; i++) {
              * Avoid the overhead of TLS for single-threaded operation.  If the                  if (arenas[next_arena] == NULL)
              * app switches to threaded mode, the initial thread may end up                          arenas_extend(next_arena);
              * being assigned to some other arena, but this one-time switch                  next_arena = (next_arena + 1) % narenas;
              * shouldn't cause significant issues.  
              */  
             return (arenas[0]);  
         }          }
           malloc_mutex_unlock(&arenas_mtx);
   
         ret = get_arenas_map();          /*
         if (ret == NULL)           * If we were unable to allocate an arena above, then default to
                 ret = choose_arena_hard();           * the first arena, which is always present.
            */
         assert(ret != NULL);          curcpu = thr_curcpu();
         return (ret);          if (map[curcpu] != NULL)
                   return map[curcpu];
           return arenas[0];
 }  }
   
 /*  static inline arena_t *
  * Choose an arena based on a per-thread value (slow-path code only, called  choose_arena(void)
  * only by choose_arena()).  
  */  
 static arena_t *  
 choose_arena_hard(void)  
 {  {
         arena_t *ret;          unsigned curcpu;
           arena_t **map;
   
         assert(__isthreaded);          map = get_arenas_map();
           curcpu = thr_curcpu();
           if (__predict_true(map != NULL && map[curcpu] != NULL))
                   return map[curcpu];
   
         /* Assign one of the arenas to this thread, in a round-robin fashion. */          return choose_arena_hard();
         malloc_mutex_lock(&arenas_mtx);  
         ret = arenas[next_arena];  
         if (ret == NULL)  
                 ret = arenas_extend(next_arena);  
         if (ret == NULL) {  
                 /*  
                  * Make sure that this function never returns NULL, so that  
                  * choose_arena() doesn't have to check for a NULL return  
                  * value.  
                  */  
                 ret = arenas[0];  
         }  
         next_arena = (next_arena + 1) % narenas;  
         malloc_mutex_unlock(&arenas_mtx);  
         set_arenas_map(ret);  
   
         return (ret);  
 }  }
   
 #ifndef lint  #ifndef lint
Line 3382  malloc_init_hard(void)
Line 3369  malloc_init_hard(void)
                         }                          }
                         break;                          break;
                 case 1:                  case 1:
                         if (issetugid() == 0 && (opts =                          if ((opts = getenv("MALLOC_OPTIONS")) != NULL &&
                             getenv("MALLOC_OPTIONS")) != NULL) {                              issetugid() == 0) {
                                 /*                                  /*
                                  * Do nothing; opts is already initialized to                                   * Do nothing; opts is already initialized to
                                  * the value of the MALLOC_OPTIONS environment                                   * the value of the MALLOC_OPTIONS environment

Legend:
Removed from v.1.16  
changed lines
  Added in v.1.19

CVSweb <webmaster@jp.NetBSD.org>