[BACK]Return to subr_pool.c CVS log [TXT][DIR] Up to [cvs.NetBSD.org] / src / sys / kern

Please note that diffs are not public domain; they are subject to the copyright notices on the relevant files.

Diff for /src/sys/kern/subr_pool.c between version 1.235 and 1.236

version 1.235, 2019/03/11 20:38:27 version 1.236, 2019/03/13 20:56:33
Line 139  static int pool_bigidx(size_t);
Line 139  static int pool_bigidx(size_t);
 int pool_inactive_time = 10;  int pool_inactive_time = 10;
   
 /* Next candidate for drainage (see pool_drain()) */  /* Next candidate for drainage (see pool_drain()) */
 static struct pool      *drainpp;  static struct pool *drainpp;
   
 /* This lock protects both pool_head and drainpp. */  /* This lock protects both pool_head and drainpp. */
 static kmutex_t pool_head_lock;  static kmutex_t pool_head_lock;
Line 383  phtree_compare(struct pool_item_header *
Line 383  phtree_compare(struct pool_item_header *
 {  {
   
         /*          /*
          * we consider pool_item_header with smaller ph_page bigger.           * We consider pool_item_header with smaller ph_page bigger. This
          * (this unnatural ordering is for the benefit of pr_find_pagehead.)           * unnatural ordering is for the benefit of pr_find_pagehead.
          */           */
   
         if (a->ph_page < b->ph_page)          if (a->ph_page < b->ph_page)
                 return (1);                  return 1;
         else if (a->ph_page > b->ph_page)          else if (a->ph_page > b->ph_page)
                 return (-1);                  return -1;
         else          else
                 return (0);                  return 0;
 }  }
   
 SPLAY_PROTOTYPE(phtree, pool_item_header, ph_node, phtree_compare);  SPLAY_PROTOTYPE(phtree, pool_item_header, ph_node, phtree_compare);
Line 592  pool_init(struct pool *pp, size_t size, 
Line 591  pool_init(struct pool *pp, size_t size, 
                         palloc = &pool_allocator_kmem_fullpage;                          palloc = &pool_allocator_kmem_fullpage;
                 else if (palloc == &pool_allocator_nointr)                  else if (palloc == &pool_allocator_nointr)
                         palloc = &pool_allocator_nointr_fullpage;                          palloc = &pool_allocator_nointr_fullpage;
         }          }
 #endif /* POOL_SUBPAGE */  #endif /* POOL_SUBPAGE */
         if (!cold)          if (!cold)
                 mutex_enter(&pool_allocator_lock);                  mutex_enter(&pool_allocator_lock);
Line 653  pool_init(struct pool *pp, size_t size, 
Line 652  pool_init(struct pool *pp, size_t size, 
         pp->pr_freecheck = NULL;          pp->pr_freecheck = NULL;
         pool_redzone_init(pp, size);          pool_redzone_init(pp, size);
   
           /* Silently enforce '0 <= ioff < align'. */
           ioff %= align;
   
         /*          /*
          * Decide whether to put the page header off page to avoid           * Decide whether to put the page header off page to avoid wasting too
          * wasting too large a part of the page or too big item.           * large a part of the page or too big item. Off-page page headers go
          * Off-page page headers go on a hash table, so we can match           * on a hash table, so we can match a returned item with its header
          * a returned item with its header based on the page address.           * based on the page address. We use 1/16 of the page size and about 8
          * We use 1/16 of the page size and about 8 times of the item           * times of the item size as the threshold. (XXX: tune)
          * size as the threshold (XXX: tune)  
          *  
          * However, we'll put the header into the page if we can put  
          * it without wasting any items.  
          *           *
          * Silently enforce `0 <= ioff < align'.           * However, we'll put the header into the page if we can put it without
            * wasting any items.
          */           */
         pp->pr_itemoffset = ioff %= align;          pp->pr_itemoffset = ioff;
         /* See the comment below about reserved bytes. */  
         trysize = palloc->pa_pagesz - ((align - ioff) % align);          trysize = palloc->pa_pagesz - ((align - ioff) % align);
         phsize = ALIGN(sizeof(struct pool_item_header));          phsize = ALIGN(sizeof(struct pool_item_header));
         if (pp->pr_roflags & PR_PHINPAGE ||          if (pp->pr_roflags & PR_PHINPAGE ||
Line 829  pool_alloc_item_header(struct pool *pp, 
Line 827  pool_alloc_item_header(struct pool *pp, 
         else          else
                 ph = pool_get(pp->pr_phpool, flags);                  ph = pool_get(pp->pr_phpool, flags);
   
         return (ph);          return ph;
 }  }
   
 /*  /*
Line 900  pool_get(struct pool *pp, int flags)
Line 898  pool_get(struct pool *pp, int flags)
   
                 mutex_exit(&pp->pr_lock);                  mutex_exit(&pp->pr_lock);
                 KASSERT((flags & (PR_NOWAIT|PR_LIMITFAIL)) != 0);                  KASSERT((flags & (PR_NOWAIT|PR_LIMITFAIL)) != 0);
                 return (NULL);                  return NULL;
         }          }
   
         /*          /*
Line 943  pool_get(struct pool *pp, int flags)
Line 941  pool_get(struct pool *pp, int flags)
                         pp->pr_nfail++;                          pp->pr_nfail++;
                         mutex_exit(&pp->pr_lock);                          mutex_exit(&pp->pr_lock);
                         KASSERT((flags & (PR_WAITOK|PR_NOWAIT)) == PR_NOWAIT);                          KASSERT((flags & (PR_WAITOK|PR_NOWAIT)) == PR_NOWAIT);
                         return (NULL);                          return NULL;
                 }                  }
   
                 /* Start the allocation process over. */                  /* Start the allocation process over. */
Line 1126  pool_put(struct pool *pp, void *v)
Line 1124  pool_put(struct pool *pp, void *v)
 static int  static int
 pool_grow(struct pool *pp, int flags)  pool_grow(struct pool *pp, int flags)
 {  {
           struct pool_item_header *ph;
           char *cp;
   
         /*          /*
          * If there's a pool_grow in progress, wait for it to complete           * If there's a pool_grow in progress, wait for it to complete
          * and try again from the top.           * and try again from the top.
Line 1157  pool_grow(struct pool *pp, int flags)
Line 1158  pool_grow(struct pool *pp, int flags)
         else          else
                 pp->pr_flags |= PR_GROWINGNOWAIT;                  pp->pr_flags |= PR_GROWINGNOWAIT;
   
         char *cp = pool_allocator_alloc(pp, flags);          cp = pool_allocator_alloc(pp, flags);
         if (__predict_false(cp == NULL))          if (__predict_false(cp == NULL))
                 goto out;                  goto out;
   
         struct pool_item_header *ph = pool_alloc_item_header(pp, cp, flags);          ph = pool_alloc_item_header(pp, cp, flags);
         if (__predict_false(ph == NULL)) {          if (__predict_false(ph == NULL)) {
                 pool_allocator_free(pp, cp);                  pool_allocator_free(pp, cp);
                 goto out;                  goto out;
Line 1226  pool_prime(struct pool *pp, int n)
Line 1227  pool_prime(struct pool *pp, int n)
 static void  static void
 pool_prime_page(struct pool *pp, void *storage, struct pool_item_header *ph)  pool_prime_page(struct pool *pp, void *storage, struct pool_item_header *ph)
 {  {
         struct pool_item *pi;  
         void *cp = storage;  
         const unsigned int align = pp->pr_align;          const unsigned int align = pp->pr_align;
         const unsigned int ioff = pp->pr_itemoffset;          const unsigned int ioff = pp->pr_itemoffset;
           struct pool_item *pi;
           void *cp = storage;
         int n;          int n;
   
         KASSERT(mutex_owned(&pp->pr_lock));          KASSERT(mutex_owned(&pp->pr_lock));
Line 1259  pool_prime_page(struct pool *pp, void *s
Line 1260  pool_prime_page(struct pool *pp, void *s
                 pp->pr_curcolor = 0;                  pp->pr_curcolor = 0;
   
         /*          /*
          * Adjust storage to apply aligment to `pr_itemoffset' in each item.           * Adjust storage to apply alignment to `pr_itemoffset' in each item.
          */           */
         if (ioff != 0)          if (ioff != 0)
                 cp = (char *)cp + align - ioff;                  cp = (char *)cp + align - ioff;
Line 1439  pool_reclaim(struct pool *pp)
Line 1440  pool_reclaim(struct pool *pp)
                 if (klock) {                  if (klock) {
                         KERNEL_UNLOCK_ONE(NULL);                          KERNEL_UNLOCK_ONE(NULL);
                 }                  }
                 return (0);                  return 0;
         }          }
   
         LIST_INIT(&pq);          LIST_INIT(&pq);
Line 1481  pool_reclaim(struct pool *pp)
Line 1482  pool_reclaim(struct pool *pp)
                 KERNEL_UNLOCK_ONE(NULL);                  KERNEL_UNLOCK_ONE(NULL);
         }          }
   
         return (rv);          return rv;
 }  }
   
 /*  /*
Line 1803  pool_chk(struct pool *pp, const char *la
Line 1804  pool_chk(struct pool *pp, const char *la
   
 out:  out:
         mutex_exit(&pp->pr_lock);          mutex_exit(&pp->pr_lock);
         return (r);          return r;
 }  }
   
 /*  /*
Line 2501  pool_cache_put_slow(pool_cache_cpu_t *cc
Line 2502  pool_cache_put_slow(pool_cache_cpu_t *cc
         pool_cache_destruct_object(pc, object);          pool_cache_destruct_object(pc, object);
   
         return false;          return false;
 }  }
   
 /*  /*
  * pool_cache_put{,_paddr}:   * pool_cache_put{,_paddr}:
Line 2548  pool_cache_put_paddr(pool_cache_t pc, vo
Line 2549  pool_cache_put_paddr(pool_cache_t pc, vo
                 }                  }
   
                 /*                  /*
                  * Can't free to either group: try the slow path.                   * Can't free to either group: try the slow path.
                  * If put_slow() releases the object for us, it                   * If put_slow() releases the object for us, it
                  * will return false.  Otherwise we need to retry.                   * will return false.  Otherwise we need to retry.
                  */                   */

Legend:
Removed from v.1.235  
changed lines
  Added in v.1.236

CVSweb <webmaster@jp.NetBSD.org>