[BACK]Return to subr_pool.c CVS log [TXT][DIR] Up to [cvs.NetBSD.org] / src / sys / kern

Please note that diffs are not public domain; they are subject to the copyright notices on the relevant files.

Diff for /src/sys/kern/subr_pool.c between version 1.190.2.2 and 1.202

version 1.190.2.2, 2012/05/23 10:08:11 version 1.202, 2014/04/26 16:30:05
Line 67  __KERNEL_RCSID(0, "$NetBSD$");
Line 67  __KERNEL_RCSID(0, "$NetBSD$");
  * an internal pool of page headers (`phpool').   * an internal pool of page headers (`phpool').
  */   */
   
 /* List of all pools */  /* List of all pools. Non static as needed by 'vmstat -i' */
 static TAILQ_HEAD(, pool) pool_head = TAILQ_HEAD_INITIALIZER(pool_head);  TAILQ_HEAD(, pool) pool_head = TAILQ_HEAD_INITIALIZER(pool_head);
   
 /* Private pool for page header structures */  /* Private pool for page header structures */
 #define PHPOOL_MAX      8  #define PHPOOL_MAX      8
Line 191  static bool pool_cache_get_slow(pool_cac
Line 191  static bool pool_cache_get_slow(pool_cac
 static void     pool_cache_cpu_init1(struct cpu_info *, pool_cache_t);  static void     pool_cache_cpu_init1(struct cpu_info *, pool_cache_t);
 static void     pool_cache_invalidate_groups(pool_cache_t, pcg_t *);  static void     pool_cache_invalidate_groups(pool_cache_t, pcg_t *);
 static void     pool_cache_invalidate_cpu(pool_cache_t, u_int);  static void     pool_cache_invalidate_cpu(pool_cache_t, u_int);
 static void     pool_cache_xcall(pool_cache_t);  static void     pool_cache_transfer(pool_cache_t);
   
 static int      pool_catchup(struct pool *);  static int      pool_catchup(struct pool *);
 static void     pool_prime_page(struct pool *, void *,  static void     pool_prime_page(struct pool *, void *,
Line 203  static void *pool_allocator_alloc(struct
Line 203  static void *pool_allocator_alloc(struct
 static void     pool_allocator_free(struct pool *, void *);  static void     pool_allocator_free(struct pool *, void *);
   
 static void pool_print_pagelist(struct pool *, struct pool_pagelist *,  static void pool_print_pagelist(struct pool *, struct pool_pagelist *,
         void (*)(const char *, ...));          void (*)(const char *, ...) __printflike(1, 2));
 static void pool_print1(struct pool *, const char *,  static void pool_print1(struct pool *, const char *,
         void (*)(const char *, ...));          void (*)(const char *, ...) __printflike(1, 2));
   
 static int pool_chk_page(struct pool *, const char *,  static int pool_chk_page(struct pool *, const char *,
                          struct pool_item_header *);                           struct pool_item_header *);
Line 462  pool_init(struct pool *pp, size_t size, 
Line 462  pool_init(struct pool *pp, size_t size, 
         int off, slack;          int off, slack;
   
 #ifdef DEBUG  #ifdef DEBUG
           if (__predict_true(!cold))
                   mutex_enter(&pool_head_lock);
         /*          /*
          * Check that the pool hasn't already been initialised and           * Check that the pool hasn't already been initialised and
          * added to the list of all pools.           * added to the list of all pools.
Line 471  pool_init(struct pool *pp, size_t size, 
Line 473  pool_init(struct pool *pp, size_t size, 
                         panic("pool_init: pool %s already initialised",                          panic("pool_init: pool %s already initialised",
                             wchan);                              wchan);
         }          }
           if (__predict_true(!cold))
                   mutex_exit(&pool_head_lock);
 #endif  #endif
   
         if (palloc == NULL)          if (palloc == NULL)
Line 557  pool_init(struct pool *pp, size_t size, 
Line 561  pool_init(struct pool *pp, size_t size, 
         /* See the comment below about reserved bytes. */          /* See the comment below about reserved bytes. */
         trysize = palloc->pa_pagesz - ((align - ioff) % align);          trysize = palloc->pa_pagesz - ((align - ioff) % align);
         phsize = ALIGN(sizeof(struct pool_item_header));          phsize = ALIGN(sizeof(struct pool_item_header));
         if ((pp->pr_roflags & (PR_NOTOUCH | PR_NOALIGN)) == 0 &&          if (pp->pr_roflags & PR_PHINPAGE ||
               ((pp->pr_roflags & (PR_NOTOUCH | PR_NOALIGN)) == 0 &&
             (pp->pr_size < MIN(palloc->pa_pagesz / 16, phsize << 3) ||              (pp->pr_size < MIN(palloc->pa_pagesz / 16, phsize << 3) ||
             trysize / pp->pr_size == (trysize - phsize) / pp->pr_size)) {              trysize / pp->pr_size == (trysize - phsize) / pp->pr_size))) {
                 /* Use the end of the page for the page header */                  /* Use the end of the page for the page header */
                 pp->pr_roflags |= PR_PHINPAGE;                  pp->pr_roflags |= PR_PHINPAGE;
                 pp->pr_phoffset = off = palloc->pa_pagesz - phsize;                  pp->pr_phoffset = off = palloc->pa_pagesz - phsize;
Line 1300  pool_sethardlimit(struct pool *pp, int n
Line 1305  pool_sethardlimit(struct pool *pp, int n
 /*  /*
  * Release all complete pages that have not been used recently.   * Release all complete pages that have not been used recently.
  *   *
  * Might be called from interrupt context.   * Must not be called from interrupt context.
  */   */
 int  int
 pool_reclaim(struct pool *pp)  pool_reclaim(struct pool *pp)
Line 1311  pool_reclaim(struct pool *pp)
Line 1316  pool_reclaim(struct pool *pp)
         bool klock;          bool klock;
         int rv;          int rv;
   
         if (cpu_intr_p() || cpu_softintr_p()) {          KASSERT(!cpu_intr_p() && !cpu_softintr_p());
                 KASSERT(pp->pr_ipl != IPL_NONE);  
         }  
   
         if (pp->pr_drain_hook != NULL) {          if (pp->pr_drain_hook != NULL) {
                 /*                  /*
Line 1387  pool_reclaim(struct pool *pp)
Line 1390  pool_reclaim(struct pool *pp)
 }  }
   
 /*  /*
  * Drain pools, one at a time.  This is a two stage process;   * Drain pools, one at a time. The drained pool is returned within ppp.
  * drain_start kicks off a cross call to drain CPU-level caches  
  * if the pool has an associated pool_cache.  drain_end waits  
  * for those cross calls to finish, and then drains the cache  
  * (if any) and pool.  
  *   *
  * Note, must never be called from interrupt context.   * Note, must never be called from interrupt context.
  */   */
 void  bool
 pool_drain_start(struct pool **ppp, uint64_t *wp)  pool_drain(struct pool **ppp)
 {  {
           bool reclaimed;
         struct pool *pp;          struct pool *pp;
   
         KASSERT(!TAILQ_EMPTY(&pool_head));          KASSERT(!TAILQ_EMPTY(&pool_head));
Line 1422  pool_drain_start(struct pool **ppp, uint
Line 1422  pool_drain_start(struct pool **ppp, uint
         pp->pr_refcnt++;          pp->pr_refcnt++;
         mutex_exit(&pool_head_lock);          mutex_exit(&pool_head_lock);
   
         /* If there is a pool_cache, drain CPU level caches. */  
         *ppp = pp;  
         if (pp->pr_cache != NULL) {  
                 *wp = xc_broadcast(0, (xcfunc_t)pool_cache_xcall,  
                     pp->pr_cache, NULL);  
         }  
 }  
   
 bool  
 pool_drain_end(struct pool *pp, uint64_t where)  
 {  
         bool reclaimed;  
   
         if (pp == NULL)  
                 return false;  
   
         KASSERT(pp->pr_refcnt > 0);  
   
         /* Wait for remote draining to complete. */  
         if (pp->pr_cache != NULL)  
                 xc_wait(where);  
   
         /* Drain the cache (if any) and pool.. */          /* Drain the cache (if any) and pool.. */
         reclaimed = pool_reclaim(pp);          reclaimed = pool_reclaim(pp);
   
Line 1453  pool_drain_end(struct pool *pp, uint64_t
Line 1431  pool_drain_end(struct pool *pp, uint64_t
         cv_broadcast(&pool_busy);          cv_broadcast(&pool_busy);
         mutex_exit(&pool_head_lock);          mutex_exit(&pool_head_lock);
   
           if (ppp != NULL)
                   *ppp = pp;
   
         return reclaimed;          return reclaimed;
 }  }
   
Line 2007  pool_cache_invalidate_groups(pool_cache_
Line 1988  pool_cache_invalidate_groups(pool_cache_
  *      Note: For pool caches that provide constructed objects, there   *      Note: For pool caches that provide constructed objects, there
  *      is an assumption that another level of synchronization is occurring   *      is an assumption that another level of synchronization is occurring
  *      between the input to the constructor and the cache invalidation.   *      between the input to the constructor and the cache invalidation.
    *
    *      Invalidation is a costly process and should not be called from
    *      interrupt context.
  */   */
 void  void
 pool_cache_invalidate(pool_cache_t pc)  pool_cache_invalidate(pool_cache_t pc)
 {  {
         pcg_t *full, *empty, *part;  
 #if 0  
         uint64_t where;          uint64_t where;
           pcg_t *full, *empty, *part;
   
           KASSERT(!cpu_intr_p() && !cpu_softintr_p());
   
         if (ncpu < 2 || !mp_online) {          if (ncpu < 2 || !mp_online) {
                 /*                  /*
                  * We might be called early enough in the boot process                   * We might be called early enough in the boot process
                  * for the CPU data structures to not be fully initialized.                   * for the CPU data structures to not be fully initialized.
                  * In this case, simply gather the local CPU's cache now                   * In this case, transfer the content of the local CPU's
                  * since it will be the only one running.                   * cache back into global cache as only this CPU is currently
                    * running.
                  */                   */
                 pool_cache_xcall(pc);                  pool_cache_transfer(pc);
         } else {          } else {
                 /*                  /*
                  * Gather all of the CPU-specific caches into the                   * Signal all CPUs that they must transfer their local
                  * global cache.                   * cache back to the global pool then wait for the xcall to
                    * complete.
                  */                   */
                 where = xc_broadcast(0, (xcfunc_t)pool_cache_xcall, pc, NULL);                  where = xc_broadcast(0, (xcfunc_t)pool_cache_transfer,
                       pc, NULL);
                 xc_wait(where);                  xc_wait(where);
         }          }
 #endif  
           /* Empty pool caches, then invalidate objects */
         mutex_enter(&pc->pc_lock);          mutex_enter(&pc->pc_lock);
         full = pc->pc_fullgroups;          full = pc->pc_fullgroups;
         empty = pc->pc_emptygroups;          empty = pc->pc_emptygroups;
Line 2273  pool_cache_get_paddr(pool_cache_t pc, in
Line 2262  pool_cache_get_paddr(pool_cache_t pc, in
 static bool __noinline  static bool __noinline
 pool_cache_put_slow(pool_cache_cpu_t *cc, int s, void *object)  pool_cache_put_slow(pool_cache_cpu_t *cc, int s, void *object)
 {  {
           struct lwp *l = curlwp;
         pcg_t *pcg, *cur;          pcg_t *pcg, *cur;
         uint64_t ncsw;          uint64_t ncsw;
         pool_cache_t pc;          pool_cache_t pc;
Line 2283  pool_cache_put_slow(pool_cache_cpu_t *cc
Line 2273  pool_cache_put_slow(pool_cache_cpu_t *cc
         pc = cc->cc_cache;          pc = cc->cc_cache;
         pcg = NULL;          pcg = NULL;
         cc->cc_misses++;          cc->cc_misses++;
           ncsw = l->l_ncsw;
   
         /*          /*
          * If there are no empty groups in the cache then allocate one           * If there are no empty groups in the cache then allocate one
Line 2292  pool_cache_put_slow(pool_cache_cpu_t *cc
Line 2283  pool_cache_put_slow(pool_cache_cpu_t *cc
                 if (__predict_true(!pool_cache_disable)) {                  if (__predict_true(!pool_cache_disable)) {
                         pcg = pool_get(pc->pc_pcgpool, PR_NOWAIT);                          pcg = pool_get(pc->pc_pcgpool, PR_NOWAIT);
                 }                  }
                   /*
                    * If pool_get() blocked, then our view of
                    * the per-CPU data is invalid: retry.
                    */
                   if (__predict_false(l->l_ncsw != ncsw)) {
                           if (pcg != NULL) {
                                   pool_put(pc->pc_pcgpool, pcg);
                           }
                           return true;
                   }
                 if (__predict_true(pcg != NULL)) {                  if (__predict_true(pcg != NULL)) {
                         pcg->pcg_avail = 0;                          pcg->pcg_avail = 0;
                         pcg->pcg_size = pc->pc_pcgsize;                          pcg->pcg_size = pc->pc_pcgsize;
Line 2300  pool_cache_put_slow(pool_cache_cpu_t *cc
Line 2301  pool_cache_put_slow(pool_cache_cpu_t *cc
   
         /* Lock the cache. */          /* Lock the cache. */
         if (__predict_false(!mutex_tryenter(&pc->pc_lock))) {          if (__predict_false(!mutex_tryenter(&pc->pc_lock))) {
                 ncsw = curlwp->l_ncsw;  
                 mutex_enter(&pc->pc_lock);                  mutex_enter(&pc->pc_lock);
                 pc->pc_contended++;                  pc->pc_contended++;
   
Line 2308  pool_cache_put_slow(pool_cache_cpu_t *cc
Line 2308  pool_cache_put_slow(pool_cache_cpu_t *cc
                  * If we context switched while locking, then our view of                   * If we context switched while locking, then our view of
                  * the per-CPU data is invalid: retry.                   * the per-CPU data is invalid: retry.
                  */                   */
                 if (__predict_false(curlwp->l_ncsw != ncsw)) {                  if (__predict_false(l->l_ncsw != ncsw)) {
                         mutex_exit(&pc->pc_lock);                          mutex_exit(&pc->pc_lock);
                         if (pcg != NULL) {                          if (pcg != NULL) {
                                 pool_put(pc->pc_pcgpool, pcg);                                  pool_put(pc->pc_pcgpool, pcg);
Line 2415  pool_cache_put_paddr(pool_cache_t pc, vo
Line 2415  pool_cache_put_paddr(pool_cache_t pc, vo
 }  }
   
 /*  /*
  * pool_cache_xcall:   * pool_cache_transfer:
  *   *
  *      Transfer objects from the per-CPU cache to the global cache.   *      Transfer objects from the per-CPU cache to the global cache.
  *      Run within a cross-call thread.   *      Run within a cross-call thread.
  */   */
 static void  static void
 pool_cache_xcall(pool_cache_t pc)  pool_cache_transfer(pool_cache_t pc)
 {  {
         pool_cache_cpu_t *cc;          pool_cache_cpu_t *cc;
         pcg_t *prev, *cur, **list;          pcg_t *prev, *cur, **list;

Legend:
Removed from v.1.190.2.2  
changed lines
  Added in v.1.202

CVSweb <webmaster@jp.NetBSD.org>