[BACK]Return to subr_pool.c CVS log [TXT][DIR] Up to [cvs.NetBSD.org] / src / sys / kern

Please note that diffs are not public domain; they are subject to the copyright notices on the relevant files.

Diff for /src/sys/kern/subr_pool.c between version 1.190.2.2 and 1.190.2.3

version 1.190.2.2, 2012/05/23 10:08:11 version 1.190.2.3, 2012/10/30 17:22:34
Line 191  static bool pool_cache_get_slow(pool_cac
Line 191  static bool pool_cache_get_slow(pool_cac
 static void     pool_cache_cpu_init1(struct cpu_info *, pool_cache_t);  static void     pool_cache_cpu_init1(struct cpu_info *, pool_cache_t);
 static void     pool_cache_invalidate_groups(pool_cache_t, pcg_t *);  static void     pool_cache_invalidate_groups(pool_cache_t, pcg_t *);
 static void     pool_cache_invalidate_cpu(pool_cache_t, u_int);  static void     pool_cache_invalidate_cpu(pool_cache_t, u_int);
 static void     pool_cache_xcall(pool_cache_t);  static void     pool_cache_transfer(pool_cache_t);
   
 static int      pool_catchup(struct pool *);  static int      pool_catchup(struct pool *);
 static void     pool_prime_page(struct pool *, void *,  static void     pool_prime_page(struct pool *, void *,
Line 462  pool_init(struct pool *pp, size_t size, 
Line 462  pool_init(struct pool *pp, size_t size, 
         int off, slack;          int off, slack;
   
 #ifdef DEBUG  #ifdef DEBUG
           if (__predict_true(!cold))
                   mutex_enter(&pool_head_lock);
         /*          /*
          * Check that the pool hasn't already been initialised and           * Check that the pool hasn't already been initialised and
          * added to the list of all pools.           * added to the list of all pools.
Line 471  pool_init(struct pool *pp, size_t size, 
Line 473  pool_init(struct pool *pp, size_t size, 
                         panic("pool_init: pool %s already initialised",                          panic("pool_init: pool %s already initialised",
                             wchan);                              wchan);
         }          }
           if (__predict_true(!cold))
                   mutex_exit(&pool_head_lock);
 #endif  #endif
   
         if (palloc == NULL)          if (palloc == NULL)
Line 1300  pool_sethardlimit(struct pool *pp, int n
Line 1304  pool_sethardlimit(struct pool *pp, int n
 /*  /*
  * Release all complete pages that have not been used recently.   * Release all complete pages that have not been used recently.
  *   *
  * Might be called from interrupt context.   * Must not be called from interrupt context.
  */   */
 int  int
 pool_reclaim(struct pool *pp)  pool_reclaim(struct pool *pp)
Line 1311  pool_reclaim(struct pool *pp)
Line 1315  pool_reclaim(struct pool *pp)
         bool klock;          bool klock;
         int rv;          int rv;
   
         if (cpu_intr_p() || cpu_softintr_p()) {          KASSERT(!cpu_intr_p() && !cpu_softintr_p());
                 KASSERT(pp->pr_ipl != IPL_NONE);  
         }  
   
         if (pp->pr_drain_hook != NULL) {          if (pp->pr_drain_hook != NULL) {
                 /*                  /*
Line 1387  pool_reclaim(struct pool *pp)
Line 1389  pool_reclaim(struct pool *pp)
 }  }
   
 /*  /*
  * Drain pools, one at a time.  This is a two stage process;   * Drain pools, one at a time. The drained pool is returned within ppp.
  * drain_start kicks off a cross call to drain CPU-level caches  
  * if the pool has an associated pool_cache.  drain_end waits  
  * for those cross calls to finish, and then drains the cache  
  * (if any) and pool.  
  *   *
  * Note, must never be called from interrupt context.   * Note, must never be called from interrupt context.
  */   */
 void  bool
 pool_drain_start(struct pool **ppp, uint64_t *wp)  pool_drain(struct pool **ppp)
 {  {
           bool reclaimed;
         struct pool *pp;          struct pool *pp;
   
         KASSERT(!TAILQ_EMPTY(&pool_head));          KASSERT(!TAILQ_EMPTY(&pool_head));
Line 1422  pool_drain_start(struct pool **ppp, uint
Line 1421  pool_drain_start(struct pool **ppp, uint
         pp->pr_refcnt++;          pp->pr_refcnt++;
         mutex_exit(&pool_head_lock);          mutex_exit(&pool_head_lock);
   
         /* If there is a pool_cache, drain CPU level caches. */  
         *ppp = pp;  
         if (pp->pr_cache != NULL) {  
                 *wp = xc_broadcast(0, (xcfunc_t)pool_cache_xcall,  
                     pp->pr_cache, NULL);  
         }  
 }  
   
 bool  
 pool_drain_end(struct pool *pp, uint64_t where)  
 {  
         bool reclaimed;  
   
         if (pp == NULL)  
                 return false;  
   
         KASSERT(pp->pr_refcnt > 0);  
   
         /* Wait for remote draining to complete. */  
         if (pp->pr_cache != NULL)  
                 xc_wait(where);  
   
         /* Drain the cache (if any) and pool.. */          /* Drain the cache (if any) and pool.. */
         reclaimed = pool_reclaim(pp);          reclaimed = pool_reclaim(pp);
   
Line 1453  pool_drain_end(struct pool *pp, uint64_t
Line 1430  pool_drain_end(struct pool *pp, uint64_t
         cv_broadcast(&pool_busy);          cv_broadcast(&pool_busy);
         mutex_exit(&pool_head_lock);          mutex_exit(&pool_head_lock);
   
           if (ppp != NULL)
                   *ppp = pp;
   
         return reclaimed;          return reclaimed;
 }  }
   
Line 2007  pool_cache_invalidate_groups(pool_cache_
Line 1987  pool_cache_invalidate_groups(pool_cache_
  *      Note: For pool caches that provide constructed objects, there   *      Note: For pool caches that provide constructed objects, there
  *      is an assumption that another level of synchronization is occurring   *      is an assumption that another level of synchronization is occurring
  *      between the input to the constructor and the cache invalidation.   *      between the input to the constructor and the cache invalidation.
    *
    *      Invalidation is a costly process and should not be called from
    *      interrupt context.
  */   */
 void  void
 pool_cache_invalidate(pool_cache_t pc)  pool_cache_invalidate(pool_cache_t pc)
 {  {
         pcg_t *full, *empty, *part;  
 #if 0  
         uint64_t where;          uint64_t where;
           pcg_t *full, *empty, *part;
   
           KASSERT(!cpu_intr_p() && !cpu_softintr_p());
   
         if (ncpu < 2 || !mp_online) {          if (ncpu < 2 || !mp_online) {
                 /*                  /*
                  * We might be called early enough in the boot process                   * We might be called early enough in the boot process
                  * for the CPU data structures to not be fully initialized.                   * for the CPU data structures to not be fully initialized.
                  * In this case, simply gather the local CPU's cache now                   * In this case, transfer the content of the local CPU's
                  * since it will be the only one running.                   * cache back into global cache as only this CPU is currently
                    * running.
                  */                   */
                 pool_cache_xcall(pc);                  pool_cache_transfer(pc);
         } else {          } else {
                 /*                  /*
                  * Gather all of the CPU-specific caches into the                   * Signal all CPUs that they must transfer their local
                  * global cache.                   * cache back to the global pool then wait for the xcall to
                    * complete.
                  */                   */
                 where = xc_broadcast(0, (xcfunc_t)pool_cache_xcall, pc, NULL);                  where = xc_broadcast(0, (xcfunc_t)pool_cache_transfer,
                       pc, NULL);
                 xc_wait(where);                  xc_wait(where);
         }          }
 #endif  
           /* Empty pool caches, then invalidate objects */
         mutex_enter(&pc->pc_lock);          mutex_enter(&pc->pc_lock);
         full = pc->pc_fullgroups;          full = pc->pc_fullgroups;
         empty = pc->pc_emptygroups;          empty = pc->pc_emptygroups;
Line 2415  pool_cache_put_paddr(pool_cache_t pc, vo
Line 2403  pool_cache_put_paddr(pool_cache_t pc, vo
 }  }
   
 /*  /*
  * pool_cache_xcall:   * pool_cache_transfer:
  *   *
  *      Transfer objects from the per-CPU cache to the global cache.   *      Transfer objects from the per-CPU cache to the global cache.
  *      Run within a cross-call thread.   *      Run within a cross-call thread.
  */   */
 static void  static void
 pool_cache_xcall(pool_cache_t pc)  pool_cache_transfer(pool_cache_t pc)
 {  {
         pool_cache_cpu_t *cc;          pool_cache_cpu_t *cc;
         pcg_t *prev, *cur, **list;          pcg_t *prev, *cur, **list;

Legend:
Removed from v.1.190.2.2  
changed lines
  Added in v.1.190.2.3

CVSweb <webmaster@jp.NetBSD.org>