[BACK]Return to subr_pool.c CVS log [TXT][DIR] Up to [cvs.NetBSD.org] / src / sys / kern

Please note that diffs are not public domain; they are subject to the copyright notices on the relevant files.

Diff for /src/sys/kern/subr_pool.c between version 1.153 and 1.160

version 1.153, 2008/03/10 22:20:14 version 1.160, 2008/04/28 20:24:04
Line 16 
Line 16 
  * 2. Redistributions in binary form must reproduce the above copyright   * 2. Redistributions in binary form must reproduce the above copyright
  *    notice, this list of conditions and the following disclaimer in the   *    notice, this list of conditions and the following disclaimer in the
  *    documentation and/or other materials provided with the distribution.   *    documentation and/or other materials provided with the distribution.
  * 3. All advertising materials mentioning features or use of this software  
  *    must display the following acknowledgement:  
  *      This product includes software developed by the NetBSD  
  *      Foundation, Inc. and its contributors.  
  * 4. Neither the name of The NetBSD Foundation nor the names of its  
  *    contributors may be used to endorse or promote products derived  
  *    from this software without specific prior written permission.  
  *   *
  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS   * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED   * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
Line 612  pool_subsystem_init(void)
Line 605  pool_subsystem_init(void)
                 pa_reclaim_register(pa);                  pa_reclaim_register(pa);
         }          }
   
         pool_init(&cache_pool, sizeof(struct pool_cache), CACHE_LINE_SIZE,          pool_init(&cache_pool, sizeof(struct pool_cache), coherency_unit,
             0, 0, "pcache", &pool_allocator_nointr, IPL_NONE);              0, 0, "pcache", &pool_allocator_nointr, IPL_NONE);
   
         pool_init(&cache_cpu_pool, sizeof(pool_cache_cpu_t), CACHE_LINE_SIZE,          pool_init(&cache_cpu_pool, sizeof(pool_cache_cpu_t), coherency_unit,
             0, 0, "pcachecpu", &pool_allocator_nointr, IPL_NONE);              0, 0, "pcachecpu", &pool_allocator_nointr, IPL_NONE);
 }  }
   
Line 814  pool_init(struct pool *pp, size_t size, 
Line 807  pool_init(struct pool *pp, size_t size, 
         pp->pr_entered_file = NULL;          pp->pr_entered_file = NULL;
         pp->pr_entered_line = 0;          pp->pr_entered_line = 0;
   
         /*          mutex_init(&pp->pr_lock, MUTEX_DEFAULT, ipl);
          * XXXAD hack to prevent IP input processing from blocking.  
          */  
         if (ipl == IPL_SOFTNET) {  
                 mutex_init(&pp->pr_lock, MUTEX_DEFAULT, IPL_VM);  
         } else {  
                 mutex_init(&pp->pr_lock, MUTEX_DEFAULT, ipl);  
         }  
         cv_init(&pp->pr_cv, wchan);          cv_init(&pp->pr_cv, wchan);
         pp->pr_ipl = ipl;          pp->pr_ipl = ipl;
   
Line 855  pool_init(struct pool *pp, size_t size, 
Line 841  pool_init(struct pool *pp, size_t size, 
   
                 size = sizeof(pcg_t) +                  size = sizeof(pcg_t) +
                     (PCG_NOBJECTS_NORMAL - 1) * sizeof(pcgpair_t);                      (PCG_NOBJECTS_NORMAL - 1) * sizeof(pcgpair_t);
                 pool_init(&pcg_normal_pool, size, CACHE_LINE_SIZE, 0, 0,                  pool_init(&pcg_normal_pool, size, coherency_unit, 0, 0,
                     "pcgnormal", &pool_allocator_meta, IPL_VM);                      "pcgnormal", &pool_allocator_meta, IPL_VM);
   
                 size = sizeof(pcg_t) +                  size = sizeof(pcg_t) +
                     (PCG_NOBJECTS_LARGE - 1) * sizeof(pcgpair_t);                      (PCG_NOBJECTS_LARGE - 1) * sizeof(pcgpair_t);
                 pool_init(&pcg_large_pool, size, CACHE_LINE_SIZE, 0, 0,                  pool_init(&pcg_large_pool, size, coherency_unit, 0, 0,
                     "pcglarge", &pool_allocator_meta, IPL_VM);                      "pcglarge", &pool_allocator_meta, IPL_VM);
         }          }
   
Line 995  pool_get(struct pool *pp, int flags)
Line 981  pool_get(struct pool *pp, int flags)
   
 #endif /* DIAGNOSTIC */  #endif /* DIAGNOSTIC */
 #ifdef LOCKDEBUG  #ifdef LOCKDEBUG
         if (flags & PR_WAITOK)          if (flags & PR_WAITOK) {
                 ASSERT_SLEEPABLE(NULL, "pool_get(PR_WAITOK)");                  ASSERT_SLEEPABLE();
           }
 #endif  #endif
   
         mutex_enter(&pp->pr_lock);          mutex_enter(&pp->pr_lock);
Line 1628  pool_reclaim(struct pool *pp)
Line 1615  pool_reclaim(struct pool *pp)
         }          }
   
         /*          /*
          * XXXSMP Because mutexes at IPL_SOFTXXX are still spinlocks,           * XXXSMP Because we do not want to cause non-MPSAFE code
          * and we are called from the pagedaemon without kernel_lock.           * to block.
          * Does not apply to IPL_SOFTBIO.  
          */           */
         if (pp->pr_ipl == IPL_SOFTNET || pp->pr_ipl == IPL_SOFTCLOCK ||          if (pp->pr_ipl == IPL_SOFTNET || pp->pr_ipl == IPL_SOFTCLOCK ||
             pp->pr_ipl == IPL_SOFTSERIAL) {              pp->pr_ipl == IPL_SOFTSERIAL) {
Line 2073  pool_cache_bootstrap(pool_cache_t pc, si
Line 2059  pool_cache_bootstrap(pool_cache_t pc, si
         if (palloc == NULL && ipl == IPL_NONE)          if (palloc == NULL && ipl == IPL_NONE)
                 palloc = &pool_allocator_nointr;                  palloc = &pool_allocator_nointr;
         pool_init(pp, size, align, align_offset, flags, wchan, palloc, ipl);          pool_init(pp, size, align, align_offset, flags, wchan, palloc, ipl);
           mutex_init(&pc->pc_lock, MUTEX_DEFAULT, ipl);
         /*  
          * XXXAD hack to prevent IP input processing from blocking.  
          */  
         if (ipl == IPL_SOFTNET) {  
                 mutex_init(&pc->pc_lock, MUTEX_DEFAULT, IPL_VM);  
         } else {  
                 mutex_init(&pc->pc_lock, MUTEX_DEFAULT, ipl);  
         }  
   
         if (ctor == NULL) {          if (ctor == NULL) {
                 ctor = (int (*)(void *, void *, int))nullop;                  ctor = (int (*)(void *, void *, int))nullop;
Line 2205  pool_cache_cpu_init1(struct cpu_info *ci
Line 2183  pool_cache_cpu_init1(struct cpu_info *ci
         index = ci->ci_index;          index = ci->ci_index;
   
         KASSERT(index < MAXCPUS);          KASSERT(index < MAXCPUS);
         KASSERT(((uintptr_t)pc->pc_cpus & (CACHE_LINE_SIZE - 1)) == 0);  
   
         if ((cc = pc->pc_cpus[index]) != NULL) {          if ((cc = pc->pc_cpus[index]) != NULL) {
                 KASSERT(cc->cc_cpuindex == index);                  KASSERT(cc->cc_cpuindex == index);
Line 2392  pool_cache_cpu_enter(pool_cache_t pc, in
Line 2369  pool_cache_cpu_enter(pool_cache_t pc, in
          * CPU-local data.  To avoid touching shared state, we           * CPU-local data.  To avoid touching shared state, we
          * pull the neccessary information from CPU local data.           * pull the neccessary information from CPU local data.
          */           */
         crit_enter();          KPREEMPT_DISABLE(curlwp);
         cc = pc->pc_cpus[curcpu()->ci_index];          cc = pc->pc_cpus[curcpu()->ci_index];
         KASSERT(cc->cc_cache == pc);          KASSERT(cc->cc_cache == pc);
         if (cc->cc_ipl != IPL_NONE) {          if (cc->cc_ipl != IPL_NONE) {
                 *s = splraiseipl(cc->cc_iplcookie);                  *s = splraiseipl(cc->cc_iplcookie);
         }          }
         KASSERT(((uintptr_t)cc & (CACHE_LINE_SIZE - 1)) == 0);  
   
         return cc;          return cc;
 }  }
Line 2411  pool_cache_cpu_exit(pool_cache_cpu_t *cc
Line 2387  pool_cache_cpu_exit(pool_cache_cpu_t *cc
         if (cc->cc_ipl != IPL_NONE) {          if (cc->cc_ipl != IPL_NONE) {
                 splx(*s);                  splx(*s);
         }          }
         crit_exit();          KPREEMPT_ENABLE(curlwp);
 }  }
   
 #if __GNUC_PREREQ__(3, 0)  #if __GNUC_PREREQ__(3, 0)
Line 2521  pool_cache_get_paddr(pool_cache_t pc, in
Line 2497  pool_cache_get_paddr(pool_cache_t pc, in
         int s;          int s;
   
 #ifdef LOCKDEBUG  #ifdef LOCKDEBUG
         if (flags & PR_WAITOK)          if (flags & PR_WAITOK) {
                 ASSERT_SLEEPABLE(NULL, "pool_cache_get(PR_WAITOK)");                  ASSERT_SLEEPABLE();
           }
 #endif  #endif
   
         cc = pool_cache_cpu_enter(pc, &s);          cc = pool_cache_cpu_enter(pc, &s);

Legend:
Removed from v.1.153  
changed lines
  Added in v.1.160

CVSweb <webmaster@jp.NetBSD.org>