[BACK]Return to subr_pool.c CVS log [TXT][DIR] Up to [cvs.NetBSD.org] / src / sys / kern

Please note that diffs are not public domain; they are subject to the copyright notices on the relevant files.

Diff for /src/sys/kern/subr_pool.c between version 1.170 and 1.175

version 1.170, 2008/10/15 08:13:17 version 1.175, 2009/10/08 21:54:45
Line 69  __KERNEL_RCSID(0, "$NetBSD$");
Line 69  __KERNEL_RCSID(0, "$NetBSD$");
  */   */
   
 /* List of all pools */  /* List of all pools */
 TAILQ_HEAD(,pool) pool_head = TAILQ_HEAD_INITIALIZER(pool_head);  static TAILQ_HEAD(, pool) pool_head = TAILQ_HEAD_INITIALIZER(pool_head);
   
 /* Private pool for page header structures */  /* Private pool for page header structures */
 #define PHPOOL_MAX      8  #define PHPOOL_MAX      8
Line 188  static bool pool_cache_get_slow(pool_cac
Line 188  static bool pool_cache_get_slow(pool_cac
                                     void **, paddr_t *, int);                                      void **, paddr_t *, int);
 static void     pool_cache_cpu_init1(struct cpu_info *, pool_cache_t);  static void     pool_cache_cpu_init1(struct cpu_info *, pool_cache_t);
 static void     pool_cache_invalidate_groups(pool_cache_t, pcg_t *);  static void     pool_cache_invalidate_groups(pool_cache_t, pcg_t *);
   static void     pool_cache_invalidate_cpu(pool_cache_t, u_int);
 static void     pool_cache_xcall(pool_cache_t);  static void     pool_cache_xcall(pool_cache_t);
   
 static int      pool_catchup(struct pool *);  static int      pool_catchup(struct pool *);
Line 587  void
Line 588  void
 pool_subsystem_init(void)  pool_subsystem_init(void)
 {  {
         struct pool_allocator *pa;          struct pool_allocator *pa;
         __link_set_decl(pools, struct link_pool_init);  
         struct link_pool_init * const *pi;  
   
         mutex_init(&pool_head_lock, MUTEX_DEFAULT, IPL_NONE);          mutex_init(&pool_head_lock, MUTEX_DEFAULT, IPL_NONE);
         cv_init(&pool_busy, "poolbusy");          cv_init(&pool_busy, "poolbusy");
   
         __link_set_foreach(pi, pools)  
                 pool_init((*pi)->pp, (*pi)->size, (*pi)->align,  
                     (*pi)->align_offset, (*pi)->flags, (*pi)->wchan,  
                     (*pi)->palloc, (*pi)->ipl);  
   
         while ((pa = SLIST_FIRST(&pa_deferinitq)) != NULL) {          while ((pa = SLIST_FIRST(&pa_deferinitq)) != NULL) {
                 KASSERT(pa->pa_backingmapptr != NULL);                  KASSERT(pa->pa_backingmapptr != NULL);
                 KASSERT(*pa->pa_backingmapptr != NULL);                  KASSERT(*pa->pa_backingmapptr != NULL);
Line 2129  void
Line 2123  void
 pool_cache_destroy(pool_cache_t pc)  pool_cache_destroy(pool_cache_t pc)
 {  {
         struct pool *pp = &pc->pc_pool;          struct pool *pp = &pc->pc_pool;
         pool_cache_cpu_t *cc;          u_int i;
         pcg_t *pcg;  
         int i;  
   
         /* Remove it from the global list. */          /* Remove it from the global list. */
         mutex_enter(&pool_head_lock);          mutex_enter(&pool_head_lock);
Line 2149  pool_cache_destroy(pool_cache_t pc)
Line 2141  pool_cache_destroy(pool_cache_t pc)
         mutex_exit(&pp->pr_lock);          mutex_exit(&pp->pr_lock);
   
         /* Destroy per-CPU data */          /* Destroy per-CPU data */
         for (i = 0; i < MAXCPUS; i++) {          for (i = 0; i < MAXCPUS; i++)
                 if ((cc = pc->pc_cpus[i]) == NULL)                  pool_cache_invalidate_cpu(pc, i);
                         continue;  
                 if ((pcg = cc->cc_current) != &pcg_dummy) {  
                         pcg->pcg_next = NULL;  
                         pool_cache_invalidate_groups(pc, pcg);  
                 }  
                 if ((pcg = cc->cc_previous) != &pcg_dummy) {  
                         pcg->pcg_next = NULL;  
                         pool_cache_invalidate_groups(pc, pcg);  
                 }  
                 if (cc != &pc->pc_cpu0)  
                         pool_put(&cache_cpu_pool, cc);  
         }  
   
         /* Finally, destroy it. */          /* Finally, destroy it. */
         mutex_destroy(&pc->pc_lock);          mutex_destroy(&pc->pc_lock);
Line 2332  pool_cache_invalidate(pool_cache_t pc)
Line 2312  pool_cache_invalidate(pool_cache_t pc)
         pool_cache_invalidate_groups(pc, part);          pool_cache_invalidate_groups(pc, part);
 }  }
   
   /*
    * pool_cache_invalidate_local:
    *
    *      Invalidate all local ('current CPU') cached objects in
    *      pool cache.
    *      It is caller's responsibility to ensure that no operation is
    *      taking place on this pool cache while doing the local invalidation.
    */
   void
   pool_cache_invalidate_local(pool_cache_t pc)
   {
           pool_cache_invalidate_cpu(pc, curcpu()->ci_index);
   }
   
   /*
    * pool_cache_invalidate_cpu:
    *
    *      Invalidate all CPU-bound cached objects in pool cache, the CPU being
    *      identified by its associated index.
    *      It is caller's responsibility to ensure that no operation is
    *      taking place on this pool cache while doing this invalidation.
    *      WARNING: as no inter-CPU locking is enforced, trying to invalidate
    *      pool cached objects from a CPU different from the one currently running
    *      may result in an undefined behaviour.
    */
   static void
   pool_cache_invalidate_cpu(pool_cache_t pc, u_int index)
   {
   
           pool_cache_cpu_t *cc;
           pcg_t *pcg;
   
           if ((cc = pc->pc_cpus[index]) == NULL)
                   return;
   
           if ((pcg = cc->cc_current) != &pcg_dummy) {
                   pcg->pcg_next = NULL;
                   pool_cache_invalidate_groups(pc, pcg);
           }
           if ((pcg = cc->cc_previous) != &pcg_dummy) {
                   pcg->pcg_next = NULL;
                   pool_cache_invalidate_groups(pc, pcg);
           }
           if (cc != &pc->pc_cpu0)
                   pool_put(&cache_cpu_pool, cc);
   
   }
   
 void  void
 pool_cache_set_drain_hook(pool_cache_t pc, void (*fn)(void *, int), void *arg)  pool_cache_set_drain_hook(pool_cache_t pc, void (*fn)(void *, int), void *arg)
 {  {
Line 2528  pool_cache_put_slow(pool_cache_cpu_t *cc
Line 2556  pool_cache_put_slow(pool_cache_cpu_t *cc
         KASSERT(cc->cc_previous->pcg_avail == cc->cc_previous->pcg_size);          KASSERT(cc->cc_previous->pcg_avail == cc->cc_previous->pcg_size);
   
         pc = cc->cc_cache;          pc = cc->cc_cache;
           pcg = NULL;
         cc->cc_misses++;          cc->cc_misses++;
   
           /*
            * If there are no empty groups in the cache then allocate one
            * while still unlocked.
            */
           if (__predict_false(pc->pc_emptygroups == NULL)) {
                   if (__predict_true(!pool_cache_disable)) {
                           pcg = pool_get(pc->pc_pcgpool, PR_NOWAIT);
                   }
                   if (__predict_true(pcg != NULL)) {
                           pcg->pcg_avail = 0;
                           pcg->pcg_size = pc->pc_pcgsize;
                   }
           }
   
         /* Lock the cache. */          /* Lock the cache. */
         if (__predict_false(!mutex_tryenter(&pc->pc_lock))) {          if (__predict_false(!mutex_tryenter(&pc->pc_lock))) {
                 ncsw = curlwp->l_ncsw;                  ncsw = curlwp->l_ncsw;
Line 2542  pool_cache_put_slow(pool_cache_cpu_t *cc
Line 2585  pool_cache_put_slow(pool_cache_cpu_t *cc
                  */                   */
                 if (__predict_false(curlwp->l_ncsw != ncsw)) {                  if (__predict_false(curlwp->l_ncsw != ncsw)) {
                         mutex_exit(&pc->pc_lock);                          mutex_exit(&pc->pc_lock);
                           if (pcg != NULL) {
                                   pool_put(pc->pc_pcgpool, pcg);
                           }
                         return true;                          return true;
                 }                  }
         }          }
   
         /* If there are no empty groups in the cache then allocate one. */          /* If there are no empty groups in the cache then allocate one. */
         if (__predict_false((pcg = pc->pc_emptygroups) == NULL)) {          if (pcg == NULL && pc->pc_emptygroups != NULL) {
                 if (__predict_true(!pool_cache_disable)) {                  pcg = pc->pc_emptygroups;
                         pcg = pool_get(pc->pc_pcgpool, PR_NOWAIT);  
                 }  
                 if (__predict_true(pcg != NULL)) {  
                         pcg->pcg_avail = 0;  
                         pcg->pcg_size = pc->pc_pcgsize;  
                 }  
         } else {  
                 pc->pc_emptygroups = pcg->pcg_next;                  pc->pc_emptygroups = pcg->pcg_next;
                 pc->pc_nempty--;                  pc->pc_nempty--;
         }          }
Line 2610  pool_cache_put_paddr(pool_cache_t pc, vo
Line 2649  pool_cache_put_paddr(pool_cache_t pc, vo
         pcg_t *pcg;          pcg_t *pcg;
         int s;          int s;
   
           KASSERT(object != NULL);
         FREECHECK_IN(&pc->pc_freecheck, object);          FREECHECK_IN(&pc->pc_freecheck, object);
   
         /* Lock out interrupts and disable preemption. */          /* Lock out interrupts and disable preemption. */

Legend:
Removed from v.1.170  
changed lines
  Added in v.1.175

CVSweb <webmaster@jp.NetBSD.org>