[BACK]Return to subr_pool.c CVS log [TXT][DIR] Up to [cvs.NetBSD.org] / src / sys / kern

Please note that diffs are not public domain; they are subject to the copyright notices on the relevant files.

Diff for /src/sys/kern/subr_pool.c between version 1.175 and 1.178

version 1.175, 2009/10/08 21:54:45 version 1.178, 2009/12/30 18:57:17
Line 104  static struct pool *drainpp;
Line 104  static struct pool *drainpp;
 static kmutex_t pool_head_lock;  static kmutex_t pool_head_lock;
 static kcondvar_t pool_busy;  static kcondvar_t pool_busy;
   
   /* This lock protects initialization of a potentially shared pool allocator */
   static kmutex_t pool_allocator_lock;
   
 typedef uint32_t pool_item_bitmap_t;  typedef uint32_t pool_item_bitmap_t;
 #define BITMAP_SIZE     (CHAR_BIT * sizeof(pool_item_bitmap_t))  #define BITMAP_SIZE     (CHAR_BIT * sizeof(pool_item_bitmap_t))
 #define BITMAP_MASK     (BITMAP_SIZE - 1)  #define BITMAP_MASK     (BITMAP_SIZE - 1)
Line 604  pool_subsystem_init(void)
Line 607  pool_subsystem_init(void)
   
         pool_init(&cache_cpu_pool, sizeof(pool_cache_cpu_t), coherency_unit,          pool_init(&cache_cpu_pool, sizeof(pool_cache_cpu_t), coherency_unit,
             0, 0, "pcachecpu", &pool_allocator_nointr, IPL_NONE);              0, 0, "pcachecpu", &pool_allocator_nointr, IPL_NONE);
   
           mutex_init(&pool_allocator_lock, MUTEX_DEFAULT, IPL_NONE);
 }  }
   
 /*  /*
Line 650  pool_init(struct pool *pp, size_t size, 
Line 655  pool_init(struct pool *pp, size_t size, 
                         palloc = &pool_allocator_nointr_fullpage;                          palloc = &pool_allocator_nointr_fullpage;
         }          }
 #endif /* POOL_SUBPAGE */  #endif /* POOL_SUBPAGE */
         if ((palloc->pa_flags & PA_INITIALIZED) == 0) {          mutex_enter(&pool_allocator_lock);
           if (palloc->pa_refcnt++ == 0) {
                 if (palloc->pa_pagesz == 0)                  if (palloc->pa_pagesz == 0)
                         palloc->pa_pagesz = PAGE_SIZE;                          palloc->pa_pagesz = PAGE_SIZE;
   
Line 663  pool_init(struct pool *pp, size_t size, 
Line 669  pool_init(struct pool *pp, size_t size, 
                 if (palloc->pa_backingmapptr != NULL) {                  if (palloc->pa_backingmapptr != NULL) {
                         pa_reclaim_register(palloc);                          pa_reclaim_register(palloc);
                 }                  }
                 palloc->pa_flags |= PA_INITIALIZED;  
         }          }
           mutex_exit(&pool_allocator_lock);
   
         if (align == 0)          if (align == 0)
                 align = ALIGN(1);                  align = ALIGN(1);
Line 892  pool_destroy(struct pool *pp)
Line 898  pool_destroy(struct pool *pp)
         TAILQ_REMOVE(&pp->pr_alloc->pa_list, pp, pr_alloc_list);          TAILQ_REMOVE(&pp->pr_alloc->pa_list, pp, pr_alloc_list);
         mutex_exit(&pp->pr_alloc->pa_lock);          mutex_exit(&pp->pr_alloc->pa_lock);
   
           mutex_enter(&pool_allocator_lock);
           if (--pp->pr_alloc->pa_refcnt == 0)
                   mutex_destroy(&pp->pr_alloc->pa_lock);
           mutex_exit(&pool_allocator_lock);
   
         mutex_enter(&pp->pr_lock);          mutex_enter(&pp->pr_lock);
   
         KASSERT(pp->pr_cache == NULL);          KASSERT(pp->pr_cache == NULL);
Line 2289  pool_cache_invalidate_groups(pool_cache_
Line 2300  pool_cache_invalidate_groups(pool_cache_
  *   *
  *      Invalidate a pool cache (destruct and release all of the   *      Invalidate a pool cache (destruct and release all of the
  *      cached objects).  Does not reclaim objects from the pool.   *      cached objects).  Does not reclaim objects from the pool.
    *
    *      Note: For pool caches that provide constructed objects, there
    *      is an assumption that another level of synchronization is occurring
    *      between the input to the constructor and the cache invalidation.
  */   */
 void  void
 pool_cache_invalidate(pool_cache_t pc)  pool_cache_invalidate(pool_cache_t pc)
 {  {
         pcg_t *full, *empty, *part;          pcg_t *full, *empty, *part;
           uint64_t where;
   
           if (ncpu < 2 || !mp_online) {
                   /*
                    * We might be called early enough in the boot process
                    * for the CPU data structures to not be fully initialized.
                    * In this case, simply gather the local CPU's cache now
                    * since it will be the only one running.
                    */
                   pool_cache_xcall(pc);
           } else {
                   /*
                    * Gather all of the CPU-specific caches into the
                    * global cache.
                    */
                   where = xc_broadcast(0, (xcfunc_t)pool_cache_xcall, pc, NULL);
                   xc_wait(where);
           }
   
         mutex_enter(&pc->pc_lock);          mutex_enter(&pc->pc_lock);
         full = pc->pc_fullgroups;          full = pc->pc_fullgroups;
Line 2313  pool_cache_invalidate(pool_cache_t pc)
Line 2346  pool_cache_invalidate(pool_cache_t pc)
 }  }
   
 /*  /*
  * pool_cache_invalidate_local:  
  *  
  *      Invalidate all local ('current CPU') cached objects in  
  *      pool cache.  
  *      It is caller's responsibility to ensure that no operation is  
  *      taking place on this pool cache while doing the local invalidation.  
  */  
 void  
 pool_cache_invalidate_local(pool_cache_t pc)  
 {  
         pool_cache_invalidate_cpu(pc, curcpu()->ci_index);  
 }  
   
 /*  
  * pool_cache_invalidate_cpu:   * pool_cache_invalidate_cpu:
  *   *
  *      Invalidate all CPU-bound cached objects in pool cache, the CPU being   *      Invalidate all CPU-bound cached objects in pool cache, the CPU being

Legend:
Removed from v.1.175  
changed lines
  Added in v.1.178

CVSweb <webmaster@jp.NetBSD.org>