[BACK]Return to subr_pool.c CVS log [TXT][DIR] Up to [cvs.NetBSD.org] / src / sys / kern

Please note that diffs are not public domain; they are subject to the copyright notices on the relevant files.

Diff for /src/sys/kern/subr_pool.c between version 1.67 and 1.68

version 1.67, 2002/03/08 20:51:26 version 1.68, 2002/03/08 21:41:59
Line 456  pool_init(struct pool *pp, size_t size, 
Line 456  pool_init(struct pool *pp, size_t size, 
         pp->pr_hardlimit_ratecap.tv_usec = 0;          pp->pr_hardlimit_ratecap.tv_usec = 0;
         pp->pr_hardlimit_warning_last.tv_sec = 0;          pp->pr_hardlimit_warning_last.tv_sec = 0;
         pp->pr_hardlimit_warning_last.tv_usec = 0;          pp->pr_hardlimit_warning_last.tv_usec = 0;
           pp->pr_drain_hook = NULL;
           pp->pr_drain_hook_arg = NULL;
   
         /*          /*
          * Decide whether to put the page header off page to avoid           * Decide whether to put the page header off page to avoid
Line 596  pool_destroy(struct pool *pp)
Line 598  pool_destroy(struct pool *pp)
 #endif  #endif
 }  }
   
   void
   pool_set_drain_hook(struct pool *pp, void (*fn)(void *, int), void *arg)
   {
   
           /* XXX no locking -- must be used just after pool_init() */
   #ifdef DIAGNOSTIC
           if (pp->pr_drain_hook != NULL)
                   panic("pool_set_drain_hook(%s): already set", pp->pr_wchan);
   #endif
           pp->pr_drain_hook = fn;
           pp->pr_drain_hook_arg = arg;
   }
   
 static __inline struct pool_item_header *  static __inline struct pool_item_header *
 pool_alloc_item_header(struct pool *pp, caddr_t storage, int flags)  pool_alloc_item_header(struct pool *pp, caddr_t storage, int flags)
 {  {
Line 663  pool_get(struct pool *pp, int flags)
Line 678  pool_get(struct pool *pp, int flags)
         }          }
 #endif  #endif
         if (__predict_false(pp->pr_nout == pp->pr_hardlimit)) {          if (__predict_false(pp->pr_nout == pp->pr_hardlimit)) {
                   if (pp->pr_drain_hook != NULL) {
                           /*
                            * Since the drain hook is going to free things
                            * back to the pool, unlock, call the hook, re-lock,
                            * and check the hardlimit condition again.
                            */
                           pr_leave(pp);
                           simple_unlock(&pp->pr_slock);
                           (*pp->pr_drain_hook)(pp->pr_drain_hook_arg, flags);
                           simple_lock(&pp->pr_slock);
                           pr_enter(pp, file, line);
                           if (pp->pr_nout < pp->pr_hardlimit)
                                   goto startover;
                   }
   
                 if ((flags & PR_WAITOK) && !(flags & PR_LIMITFAIL)) {                  if ((flags & PR_WAITOK) && !(flags & PR_LIMITFAIL)) {
                         /*                          /*
                          * XXX: A warning isn't logged in this case.  Should                           * XXX: A warning isn't logged in this case.  Should
Line 1264  pool_reclaim(struct pool *pp)
Line 1294  pool_reclaim(struct pool *pp)
         if (pp->pr_roflags & PR_STATIC)          if (pp->pr_roflags & PR_STATIC)
                 return (0);                  return (0);
   
           if (pp->pr_drain_hook != NULL) {
                   /*
                    * The drain hook must be called with the pool unlocked.
                    */
                   (*pp->pr_drain_hook)(pp->pr_drain_hook_arg, PR_NOWAIT);
           }
   
         if (simple_lock_try(&pp->pr_slock) == 0)          if (simple_lock_try(&pp->pr_slock) == 0)
                 return (0);                  return (0);
         pr_enter(pp, file, line);          pr_enter(pp, file, line);
   
         TAILQ_INIT(&pq);          TAILQ_INIT(&pq);
   
         /*          /*
Line 1899  pool_allocator_alloc(struct pool *org, i
Line 1937  pool_allocator_alloc(struct pool *org, i
         do {          do {
                 if ((res = (*pa->pa_alloc)(org, flags)) != NULL)                  if ((res = (*pa->pa_alloc)(org, flags)) != NULL)
                         return (res);                          return (res);
                 if ((flags & PR_WAITOK) == 0)                  if ((flags & PR_WAITOK) == 0) {
                           /*
                            * We only run the drain hookhere if PR_NOWAIT.
                            * In other cases, the hook will be run in
                            * pool_reclaim().
                            */
                           if (org->pr_drain_hook != NULL) {
                                   (*org->pr_drain_hook)(org->pr_drain_hook_arg,
                                       flags);
                                   if ((res = (*pa->pa_alloc)(org, flags)) != NULL)
                                           return (res);
                           }
                         break;                          break;
                   }
   
                 /*                  /*
                  * Drain all pools, except "org", that use this                   * Drain all pools, except "org", that use this
Line 1913  pool_allocator_alloc(struct pool *org, i
Line 1963  pool_allocator_alloc(struct pool *org, i
                  * other way to have potentially sleeping pool_reclaim,                   * other way to have potentially sleeping pool_reclaim,
                  * non-sleeping locks on pool_allocator, and some                   * non-sleeping locks on pool_allocator, and some
                  * stirring of drained pools in the allocator.                   * stirring of drained pools in the allocator.
                    *
                    * XXX Maybe we should use pool_head_slock for locking
                    * the allocators?
                  */                   */
                 freed = 0;                  freed = 0;
   

Legend:
Removed from v.1.67  
changed lines
  Added in v.1.68

CVSweb <webmaster@jp.NetBSD.org>