[BACK]Return to subr_pool.c CVS log [TXT][DIR] Up to [cvs.NetBSD.org] / src / sys / kern

Please note that diffs are not public domain; they are subject to the copyright notices on the relevant files.

Diff for /src/sys/kern/subr_pool.c between version 1.72 and 1.78

version 1.72, 2002/03/09 01:56:27 version 1.78, 2002/07/30 01:41:00
Line 145  struct pool_item {
Line 145  struct pool_item {
 /* The cache group pool. */  /* The cache group pool. */
 static struct pool pcgpool;  static struct pool pcgpool;
   
 /* The pool cache group. */  
 #define PCG_NOBJECTS            16  
 struct pool_cache_group {  
         TAILQ_ENTRY(pool_cache_group)  
                 pcg_list;       /* link in the pool cache's group list */  
         u_int   pcg_avail;      /* # available objects */  
                                 /* pointers to the objects */  
         void    *pcg_objects[PCG_NOBJECTS];  
 };  
   
 static void     pool_cache_reclaim(struct pool_cache *);  static void     pool_cache_reclaim(struct pool_cache *);
   
 static int      pool_catchup(struct pool *);  static int      pool_catchup(struct pool *);
Line 425  pool_init(struct pool *pp, size_t size, 
Line 415  pool_init(struct pool *pp, size_t size, 
         if (size < sizeof(struct pool_item))          if (size < sizeof(struct pool_item))
                 size = sizeof(struct pool_item);                  size = sizeof(struct pool_item);
   
         size = ALIGN(size);          size = roundup(size, align);
 #ifdef DIAGNOSTIC  #ifdef DIAGNOSTIC
         if (size > palloc->pa_pagesz)          if (size > palloc->pa_pagesz)
                 panic("pool_init: pool item size (%lu) too large",                  panic("pool_init: pool item size (%lu) too large",
Line 646  pool_get(struct pool *pp, int flags)
Line 636  pool_get(struct pool *pp, int flags)
 #ifdef DIAGNOSTIC  #ifdef DIAGNOSTIC
         if (__predict_false(curproc == NULL && doing_shutdown == 0 &&          if (__predict_false(curproc == NULL && doing_shutdown == 0 &&
                             (flags & PR_WAITOK) != 0))                              (flags & PR_WAITOK) != 0))
                 panic("pool_get: must have NOWAIT");                  panic("pool_get: %s: must have NOWAIT", pp->pr_wchan);
   
 #ifdef LOCKDEBUG  #ifdef LOCKDEBUG
         if (flags & PR_WAITOK)          if (flags & PR_WAITOK)
Line 1041  pool_put(struct pool *pp, void *v)
Line 1031  pool_put(struct pool *pp, void *v)
 #endif  #endif
   
 /*  /*
    * Add N items to the pool.
    */
   int
   pool_prime(struct pool *pp, int n)
   {
           struct pool_item_header *ph;
           caddr_t cp;
           int newpages;
   
           simple_lock(&pp->pr_slock);
   
           newpages = roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
   
           while (newpages-- > 0) {
                   simple_unlock(&pp->pr_slock);
                   cp = pool_allocator_alloc(pp, PR_NOWAIT);
                   if (__predict_true(cp != NULL))
                           ph = pool_alloc_item_header(pp, cp, PR_NOWAIT);
                   simple_lock(&pp->pr_slock);
   
                   if (__predict_false(cp == NULL || ph == NULL)) {
                           if (cp != NULL)
                                   pool_allocator_free(pp, cp);
                           break;
                   }
   
                   pool_prime_page(pp, cp, ph);
                   pp->pr_npagealloc++;
                   pp->pr_minpages++;
           }
   
           if (pp->pr_minpages >= pp->pr_maxpages)
                   pp->pr_maxpages = pp->pr_minpages + 1;  /* XXX */
   
           simple_unlock(&pp->pr_slock);
           return (0);
   }
   
   /*
  * Add a page worth of items to the pool.   * Add a page worth of items to the pool.
  *   *
  * Note, we must be called with the pool descriptor LOCKED.   * Note, we must be called with the pool descriptor LOCKED.
Line 1096  pool_prime_page(struct pool *pp, caddr_t
Line 1125  pool_prime_page(struct pool *pp, caddr_t
         while (n--) {          while (n--) {
                 pi = (struct pool_item *)cp;                  pi = (struct pool_item *)cp;
   
                   KASSERT(((((vaddr_t)pi) + ioff) & (align - 1)) == 0);
   
                 /* Insert on page list */                  /* Insert on page list */
                 TAILQ_INSERT_TAIL(&ph->ph_itemlist, pi, pi_list);                  TAILQ_INSERT_TAIL(&ph->ph_itemlist, pi, pi_list);
 #ifdef DIAGNOSTIC  #ifdef DIAGNOSTIC
Line 1120  pool_prime_page(struct pool *pp, caddr_t
Line 1151  pool_prime_page(struct pool *pp, caddr_t
  *   *
  * Note 1, we never wait for memory here, we let the caller decide what to do.   * Note 1, we never wait for memory here, we let the caller decide what to do.
  *   *
  * Note 2, this doesn't work with static pools.   * Note 2, we must be called with the pool already locked, and we return
  *  
  * Note 3, we must be called with the pool already locked, and we return  
  * with it locked.   * with it locked.
  */   */
 static int  static int
Line 1160  pool_catchup(struct pool *pp)
Line 1189  pool_catchup(struct pool *pp)
 void  void
 pool_setlowat(struct pool *pp, int n)  pool_setlowat(struct pool *pp, int n)
 {  {
         int error;  
   
         simple_lock(&pp->pr_slock);          simple_lock(&pp->pr_slock);
   
Line 1170  pool_setlowat(struct pool *pp, int n)
Line 1198  pool_setlowat(struct pool *pp, int n)
                 : roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;                  : roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
   
         /* Make sure we're caught up with the newly-set low water mark. */          /* Make sure we're caught up with the newly-set low water mark. */
         if (POOL_NEEDS_CATCHUP(pp) && (error = pool_catchup(pp) != 0)) {          if (POOL_NEEDS_CATCHUP(pp) && pool_catchup(pp) != 0) {
                 /*                  /*
                  * XXX: Should we log a warning?  Should we set up a timeout                   * XXX: Should we log a warning?  Should we set up a timeout
                  * to try again in a second or so?  The latter could break                   * to try again in a second or so?  The latter could break
Line 1916  pool_allocator_alloc(struct pool *org, i
Line 1944  pool_allocator_alloc(struct pool *org, i
                         TAILQ_INSERT_TAIL(&pa->pa_list, pp, pr_alloc_list);                          TAILQ_INSERT_TAIL(&pa->pa_list, pp, pr_alloc_list);
                         if (pp == org)                          if (pp == org)
                                 continue;                                  continue;
                         simple_unlock(&pa->pa_list);                          simple_unlock(&pa->pa_slock);
                         freed = pool_reclaim(pp);                          freed = pool_reclaim(pp);
                         simple_lock(&pa->pa_list);                          simple_lock(&pa->pa_slock);
                 } while ((pp = TAILQ_FIRST(&pa->pa_list)) != start &&                  } while ((pp = TAILQ_FIRST(&pa->pa_list)) != start &&
                          freed == 0);                           freed == 0);
   

Legend:
Removed from v.1.72  
changed lines
  Added in v.1.78

CVSweb <webmaster@jp.NetBSD.org>