[BACK]Return to subr_pool.c CVS log [TXT][DIR] Up to [cvs.NetBSD.org] / src / sys / kern

Please note that diffs are not public domain; they are subject to the copyright notices on the relevant files.

Diff for /src/sys/kern/subr_pool.c between version 1.122.4.1 and 1.126

version 1.122.4.1, 2006/10/22 06:07:11 version 1.126, 2007/02/21 23:00:05
Line 1 
Line 1 
 /*      $NetBSD$        */  /*      $NetBSD$        */
   
 /*-  /*-
  * Copyright (c) 1997, 1999, 2000 The NetBSD Foundation, Inc.   * Copyright (c) 1997, 1999, 2000, 2002 The NetBSD Foundation, Inc.
  * All rights reserved.   * All rights reserved.
  *   *
  * This code is derived from software contributed to The NetBSD Foundation   * This code is derived from software contributed to The NetBSD Foundation
Line 53  __KERNEL_RCSID(0, "$NetBSD$");
Line 53  __KERNEL_RCSID(0, "$NetBSD$");
 #include <sys/lock.h>  #include <sys/lock.h>
 #include <sys/pool.h>  #include <sys/pool.h>
 #include <sys/syslog.h>  #include <sys/syslog.h>
   #include <sys/debug.h>
   
 #include <uvm/uvm.h>  #include <uvm/uvm.h>
   
Line 473  pr_rmpage(struct pool *pp, struct pool_i
Line 474  pr_rmpage(struct pool *pp, struct pool_i
         pool_update_curpage(pp);          pool_update_curpage(pp);
 }  }
   
 static boolean_t  static bool
 pa_starved_p(struct pool_allocator *pa)  pa_starved_p(struct pool_allocator *pa)
 {  {
   
Line 484  pa_starved_p(struct pool_allocator *pa)
Line 485  pa_starved_p(struct pool_allocator *pa)
 }  }
   
 static int  static int
 pool_reclaim_callback(struct callback_entry *ce __unused, void *obj,  pool_reclaim_callback(struct callback_entry *ce, void *obj, void *arg)
     void *arg __unused)  
 {  {
         struct pool *pp = obj;          struct pool *pp = obj;
         struct pool_allocator *pa = pp->pr_alloc;          struct pool_allocator *pa = pp->pr_alloc;
Line 675  pool_init(struct pool *pp, size_t size, 
Line 675  pool_init(struct pool *pp, size_t size, 
         pp->pr_hardlimit_warning_last.tv_usec = 0;          pp->pr_hardlimit_warning_last.tv_usec = 0;
         pp->pr_drain_hook = NULL;          pp->pr_drain_hook = NULL;
         pp->pr_drain_hook_arg = NULL;          pp->pr_drain_hook_arg = NULL;
           pp->pr_freecheck = NULL;
   
         /*          /*
          * Decide whether to put the page header off page to avoid           * Decide whether to put the page header off page to avoid
Line 930  pool_get(struct pool *pp, int flags)
Line 931  pool_get(struct pool *pp, int flags)
 #ifdef LOCKDEBUG  #ifdef LOCKDEBUG
         if (flags & PR_WAITOK)          if (flags & PR_WAITOK)
                 ASSERT_SLEEPABLE(NULL, "pool_get(PR_WAITOK)");                  ASSERT_SLEEPABLE(NULL, "pool_get(PR_WAITOK)");
         SCHED_ASSERT_UNLOCKED();  
 #endif  #endif
   
         simple_lock(&pp->pr_slock);          simple_lock(&pp->pr_slock);
Line 1136  pool_get(struct pool *pp, int flags)
Line 1136  pool_get(struct pool *pp, int flags)
         }          }
   
         simple_unlock(&pp->pr_slock);          simple_unlock(&pp->pr_slock);
           KASSERT((((vaddr_t)v + pp->pr_itemoffset) & (pp->pr_align - 1)) == 0);
           FREECHECK_OUT(&pp->pr_freecheck, v);
         return (v);          return (v);
 }  }
   
Line 1149  pool_do_put(struct pool *pp, void *v, st
Line 1151  pool_do_put(struct pool *pp, void *v, st
         struct pool_item_header *ph;          struct pool_item_header *ph;
   
         LOCK_ASSERT(simple_lock_held(&pp->pr_slock));          LOCK_ASSERT(simple_lock_held(&pp->pr_slock));
         SCHED_ASSERT_UNLOCKED();          FREECHECK_IN(&pp->pr_freecheck, v);
   
 #ifdef DIAGNOSTIC  #ifdef DIAGNOSTIC
         if (__predict_false(pp->pr_nout == 0)) {          if (__predict_false(pp->pr_nout == 0)) {
Line 1372  pool_prime_page(struct pool *pp, caddr_t
Line 1374  pool_prime_page(struct pool *pp, caddr_t
 {  {
         struct pool_item *pi;          struct pool_item *pi;
         caddr_t cp = storage;          caddr_t cp = storage;
         unsigned int align = pp->pr_align;          const unsigned int align = pp->pr_align;
         unsigned int ioff = pp->pr_itemoffset;          const unsigned int ioff = pp->pr_itemoffset;
         int n;          int n;
   
         LOCK_ASSERT(simple_lock_held(&pp->pr_slock));          LOCK_ASSERT(simple_lock_held(&pp->pr_slock));
Line 1410  pool_prime_page(struct pool *pp, caddr_t
Line 1412  pool_prime_page(struct pool *pp, caddr_t
         if (ioff != 0)          if (ioff != 0)
                 cp = (caddr_t)(cp + (align - ioff));                  cp = (caddr_t)(cp + (align - ioff));
   
           KASSERT((((vaddr_t)cp + ioff) & (align - 1)) == 0);
   
         /*          /*
          * Insert remaining chunks on the bucket list.           * Insert remaining chunks on the bucket list.
          */           */
Line 1437  pool_prime_page(struct pool *pp, caddr_t
Line 1441  pool_prime_page(struct pool *pp, caddr_t
                         pi->pi_magic = PI_MAGIC;                          pi->pi_magic = PI_MAGIC;
 #endif  #endif
                         cp = (caddr_t)(cp + pp->pr_size);                          cp = (caddr_t)(cp + pp->pr_size);
   
                           KASSERT((((vaddr_t)cp + ioff) & (align - 1)) == 0);
                 }                  }
         }          }
   
Line 1620  pool_reclaim(struct pool *pp)
Line 1626  pool_reclaim(struct pool *pp)
  * Note, we must never be called from an interrupt context.   * Note, we must never be called from an interrupt context.
  */   */
 void  void
 pool_drain(void *arg __unused)  pool_drain(void *arg)
 {  {
         struct pool *pp;          struct pool *pp;
         int s;          int s;
Line 1704  pool_printit(struct pool *pp, const char
Line 1710  pool_printit(struct pool *pp, const char
 }  }
   
 static void  static void
 pool_print_pagelist(struct pool *pp __unused, struct pool_pagelist *pl,  pool_print_pagelist(struct pool *pp, struct pool_pagelist *pl,
     void (*pr)(const char *, ...))      void (*pr)(const char *, ...))
 {  {
         struct pool_item_header *ph;          struct pool_item_header *ph;
Line 2069  pool_cache_get_paddr(struct pool_cache *
Line 2075  pool_cache_get_paddr(struct pool_cache *
                                 return (NULL);                                  return (NULL);
                         }                          }
                 }                  }
                   KASSERT((((vaddr_t)object + pc->pc_pool->pr_itemoffset) &
                       (pc->pc_pool->pr_align - 1)) == 0);
                 if (object != NULL && pap != NULL) {                  if (object != NULL && pap != NULL) {
 #ifdef POOL_VTOPHYS  #ifdef POOL_VTOPHYS
                         *pap = POOL_VTOPHYS(object);                          *pap = POOL_VTOPHYS(object);
Line 2076  pool_cache_get_paddr(struct pool_cache *
Line 2084  pool_cache_get_paddr(struct pool_cache *
                         *pap = POOL_PADDR_INVALID;                          *pap = POOL_PADDR_INVALID;
 #endif  #endif
                 }                  }
   
                   FREECHECK_OUT(&pc->pc_freecheck, object);
                 return (object);                  return (object);
         }          }
   
Line 2089  pool_cache_get_paddr(struct pool_cache *
Line 2099  pool_cache_get_paddr(struct pool_cache *
         }          }
         simple_unlock(&pc->pc_slock);          simple_unlock(&pc->pc_slock);
   
           KASSERT((((vaddr_t)object + pc->pc_pool->pr_itemoffset) &
               (pc->pc_pool->pr_align - 1)) == 0);
           FREECHECK_OUT(&pc->pc_freecheck, object);
         return (object);          return (object);
 }  }
   
Line 2104  pool_cache_put_paddr(struct pool_cache *
Line 2117  pool_cache_put_paddr(struct pool_cache *
         struct pool_cache_group *pcg;          struct pool_cache_group *pcg;
         int s;          int s;
   
           FREECHECK_IN(&pc->pc_freecheck, object);
   
         if (__predict_false((pc->pc_pool->pr_flags & PR_WANTED) != 0)) {          if (__predict_false((pc->pc_pool->pr_flags & PR_WANTED) != 0)) {
                 goto destruct;                  goto destruct;
         }          }
Line 2354  pool_allocator_free(struct pool *pp, voi
Line 2369  pool_allocator_free(struct pool *pp, voi
 }  }
   
 void *  void *
 pool_page_alloc(struct pool *pp __unused, int flags)  pool_page_alloc(struct pool *pp, int flags)
 {  {
         boolean_t waitok = (flags & PR_WAITOK) ? TRUE : FALSE;          bool waitok = (flags & PR_WAITOK) ? TRUE : FALSE;
   
         return ((void *) uvm_km_alloc_poolpage_cache(kmem_map, waitok));          return ((void *) uvm_km_alloc_poolpage_cache(kmem_map, waitok));
 }  }
   
 void  void
 pool_page_free(struct pool *pp __unused, void *v)  pool_page_free(struct pool *pp, void *v)
 {  {
   
         uvm_km_free_poolpage_cache(kmem_map, (vaddr_t) v);          uvm_km_free_poolpage_cache(kmem_map, (vaddr_t) v);
 }  }
   
 static void *  static void *
 pool_page_alloc_meta(struct pool *pp __unused, int flags)  pool_page_alloc_meta(struct pool *pp, int flags)
 {  {
         boolean_t waitok = (flags & PR_WAITOK) ? TRUE : FALSE;          bool waitok = (flags & PR_WAITOK) ? TRUE : FALSE;
   
         return ((void *) uvm_km_alloc_poolpage(kmem_map, waitok));          return ((void *) uvm_km_alloc_poolpage(kmem_map, waitok));
 }  }
   
 static void  static void
 pool_page_free_meta(struct pool *pp __unused, void *v)  pool_page_free_meta(struct pool *pp, void *v)
 {  {
   
         uvm_km_free_poolpage(kmem_map, (vaddr_t) v);          uvm_km_free_poolpage(kmem_map, (vaddr_t) v);
Line 2421  pool_subpage_free_nointr(struct pool *pp
Line 2436  pool_subpage_free_nointr(struct pool *pp
 }  }
 #endif /* POOL_SUBPAGE */  #endif /* POOL_SUBPAGE */
 void *  void *
 pool_page_alloc_nointr(struct pool *pp __unused, int flags)  pool_page_alloc_nointr(struct pool *pp, int flags)
 {  {
         boolean_t waitok = (flags & PR_WAITOK) ? TRUE : FALSE;          bool waitok = (flags & PR_WAITOK) ? TRUE : FALSE;
   
         return ((void *) uvm_km_alloc_poolpage_cache(kernel_map, waitok));          return ((void *) uvm_km_alloc_poolpage_cache(kernel_map, waitok));
 }  }
   
 void  void
 pool_page_free_nointr(struct pool *pp __unused, void *v)  pool_page_free_nointr(struct pool *pp, void *v)
 {  {
   
         uvm_km_free_poolpage_cache(kernel_map, (vaddr_t) v);          uvm_km_free_poolpage_cache(kernel_map, (vaddr_t) v);

Legend:
Removed from v.1.122.4.1  
changed lines
  Added in v.1.126

CVSweb <webmaster@jp.NetBSD.org>