[BACK]Return to subr_pool.c CVS log [TXT][DIR] Up to [cvs.NetBSD.org] / src / sys / kern

Please note that diffs are not public domain; they are subject to the copyright notices on the relevant files.

Diff for /src/sys/kern/subr_pool.c between version 1.138 and 1.138.2.3

version 1.138, 2007/12/05 06:52:01 version 1.138.2.3, 2007/12/13 05:06:01
Line 40 
Line 40 
 #include <sys/cdefs.h>  #include <sys/cdefs.h>
 __KERNEL_RCSID(0, "$NetBSD$");  __KERNEL_RCSID(0, "$NetBSD$");
   
   #include "opt_ddb.h"
 #include "opt_pool.h"  #include "opt_pool.h"
 #include "opt_poollog.h"  #include "opt_poollog.h"
 #include "opt_lockdebug.h"  #include "opt_lockdebug.h"
Line 101  static void pool_page_free_meta(struct p
Line 102  static void pool_page_free_meta(struct p
 /* allocator for pool metadata */  /* allocator for pool metadata */
 struct pool_allocator pool_allocator_meta = {  struct pool_allocator pool_allocator_meta = {
         pool_page_alloc_meta, pool_page_free_meta,          pool_page_alloc_meta, pool_page_free_meta,
         .pa_backingmapptr = &kmem_map,          .pa_backingmapptr = &kernel_map,
 };  };
   
 /* # of seconds to retain page after last use */  /* # of seconds to retain page after last use */
Line 127  struct pool_item_header {
Line 128  struct pool_item_header {
         void *                  ph_page;        /* this page's address */          void *                  ph_page;        /* this page's address */
         struct timeval          ph_time;        /* last referenced */          struct timeval          ph_time;        /* last referenced */
         uint16_t                ph_nmissing;    /* # of chunks in use */          uint16_t                ph_nmissing;    /* # of chunks in use */
           uint16_t                ph_off;         /* start offset in page */
         union {          union {
                 /* !PR_NOTOUCH */                  /* !PR_NOTOUCH */
                 struct {                  struct {
Line 135  struct pool_item_header {
Line 137  struct pool_item_header {
                 } phu_normal;                  } phu_normal;
                 /* PR_NOTOUCH */                  /* PR_NOTOUCH */
                 struct {                  struct {
                         uint16_t phu_off;       /* start offset in page */                          pool_item_bitmap_t phu_bitmap[1];
                         pool_item_bitmap_t phu_bitmap[];  
                 } phu_notouch;                  } phu_notouch;
         } ph_u;          } ph_u;
 };  };
 #define ph_itemlist     ph_u.phu_normal.phu_itemlist  #define ph_itemlist     ph_u.phu_normal.phu_itemlist
 #define ph_off          ph_u.phu_notouch.phu_off  
 #define ph_bitmap       ph_u.phu_notouch.phu_bitmap  #define ph_bitmap       ph_u.phu_notouch.phu_bitmap
   
 struct pool_item {  struct pool_item {
Line 412  phtree_compare(struct pool_item_header *
Line 412  phtree_compare(struct pool_item_header *
 SPLAY_PROTOTYPE(phtree, pool_item_header, ph_node, phtree_compare);  SPLAY_PROTOTYPE(phtree, pool_item_header, ph_node, phtree_compare);
 SPLAY_GENERATE(phtree, pool_item_header, ph_node, phtree_compare);  SPLAY_GENERATE(phtree, pool_item_header, ph_node, phtree_compare);
   
   static inline struct pool_item_header *
   pr_find_pagehead_noalign(struct pool *pp, void *v)
   {
           struct pool_item_header *ph, tmp;
   
           tmp.ph_page = (void *)(uintptr_t)v;
           ph = SPLAY_FIND(phtree, &pp->pr_phtree, &tmp);
           if (ph == NULL) {
                   ph = SPLAY_ROOT(&pp->pr_phtree);
                   if (ph != NULL && phtree_compare(&tmp, ph) >= 0) {
                           ph = SPLAY_NEXT(phtree, &pp->pr_phtree, ph);
                   }
                   KASSERT(ph == NULL || phtree_compare(&tmp, ph) < 0);
           }
   
           return ph;
   }
   
 /*  /*
  * Return the pool page header based on item address.   * Return the pool page header based on item address.
  */   */
Line 421  pr_find_pagehead(struct pool *pp, void *
Line 439  pr_find_pagehead(struct pool *pp, void *
         struct pool_item_header *ph, tmp;          struct pool_item_header *ph, tmp;
   
         if ((pp->pr_roflags & PR_NOALIGN) != 0) {          if ((pp->pr_roflags & PR_NOALIGN) != 0) {
                 tmp.ph_page = (void *)(uintptr_t)v;                  ph = pr_find_pagehead_noalign(pp, v);
                 ph = SPLAY_FIND(phtree, &pp->pr_phtree, &tmp);  
                 if (ph == NULL) {  
                         ph = SPLAY_ROOT(&pp->pr_phtree);  
                         if (ph != NULL && phtree_compare(&tmp, ph) >= 0) {  
                                 ph = SPLAY_NEXT(phtree, &pp->pr_phtree, ph);  
                         }  
                         KASSERT(ph == NULL || phtree_compare(&tmp, ph) < 0);  
                 }  
         } else {          } else {
                 void *page =                  void *page =
                     (void *)((uintptr_t)v & pp->pr_alloc->pa_pagemask);                      (void *)((uintptr_t)v & pp->pr_alloc->pa_pagemask);
Line 790  pool_init(struct pool *pp, size_t size, 
Line 800  pool_init(struct pool *pp, size_t size, 
   
 #ifdef POOL_DIAGNOSTIC  #ifdef POOL_DIAGNOSTIC
         if (flags & PR_LOGGING) {          if (flags & PR_LOGGING) {
                 if (kmem_map == NULL ||                  if (kernel_map == NULL ||
                     (pp->pr_log = malloc(pool_logsize * sizeof(struct pool_log),                      (pp->pr_log = malloc(pool_logsize * sizeof(struct pool_log),
                      M_TEMP, M_NOWAIT)) == NULL)                       M_TEMP, M_NOWAIT)) == NULL)
                         pp->pr_roflags &= ~PR_LOGGING;                          pp->pr_roflags &= ~PR_LOGGING;
Line 1436  pool_prime_page(struct pool *pp, void *s
Line 1446  pool_prime_page(struct pool *pp, void *s
         /*          /*
          * Color this page.           * Color this page.
          */           */
         cp = (char *)cp + pp->pr_curcolor;          ph->ph_off = pp->pr_curcolor;
           cp = (char *)cp + ph->ph_off;
         if ((pp->pr_curcolor += align) > pp->pr_maxcolor)          if ((pp->pr_curcolor += align) > pp->pr_maxcolor)
                 pp->pr_curcolor = 0;                  pp->pr_curcolor = 0;
   
Line 2081  pool_cache_bootstrap(pool_cache_t pc, si
Line 2092  pool_cache_bootstrap(pool_cache_t pc, si
         /* Allocate per-CPU caches. */          /* Allocate per-CPU caches. */
         memset(pc->pc_cpus, 0, sizeof(pc->pc_cpus));          memset(pc->pc_cpus, 0, sizeof(pc->pc_cpus));
         pc->pc_ncpu = 0;          pc->pc_ncpu = 0;
         if (ncpu == 0) {          if (ncpu < 2) {
                 /* XXX For sparc: boot CPU is not attached yet. */                  /* XXX For sparc: boot CPU is not attached yet. */
                 pool_cache_cpu_init1(curcpu(), pc);                  pool_cache_cpu_init1(curcpu(), pc);
         } else {          } else {
Line 2111  pool_cache_bootstrap(pool_cache_t pc, si
Line 2122  pool_cache_bootstrap(pool_cache_t pc, si
 void  void
 pool_cache_destroy(pool_cache_t pc)  pool_cache_destroy(pool_cache_t pc)
 {  {
   
           pool_cache_bootstrap_destroy(pc);
           pool_put(&cache_pool, pc);
   }
   
   /*
    * pool_cache_bootstrap_destroy:
    *
    *      Kernel-private version of pool_cache_destroy().
    *      Destroy a pool cache initialized by pool_cache_bootstrap.
    */
   void
   pool_cache_bootstrap_destroy(pool_cache_t pc)
   {
         struct pool *pp = &pc->pc_pool;          struct pool *pp = &pc->pc_pool;
         pool_cache_cpu_t *cc;          pool_cache_cpu_t *cc;
         pcg_t *pcg;          pcg_t *pcg;
Line 2150  pool_cache_destroy(pool_cache_t pc)
Line 2175  pool_cache_destroy(pool_cache_t pc)
         /* Finally, destroy it. */          /* Finally, destroy it. */
         mutex_destroy(&pc->pc_lock);          mutex_destroy(&pc->pc_lock);
         pool_destroy(pp);          pool_destroy(pp);
         pool_put(&cache_pool, pc);  
 }  }
   
 /*  /*
Line 2742  void pool_page_free(struct pool *, void 
Line 2766  void pool_page_free(struct pool *, void 
 #ifdef POOL_SUBPAGE  #ifdef POOL_SUBPAGE
 struct pool_allocator pool_allocator_kmem_fullpage = {  struct pool_allocator pool_allocator_kmem_fullpage = {
         pool_page_alloc, pool_page_free, 0,          pool_page_alloc, pool_page_free, 0,
         .pa_backingmapptr = &kmem_map,          .pa_backingmapptr = &kernel_map,
 };  };
 #else  #else
 struct pool_allocator pool_allocator_kmem = {  struct pool_allocator pool_allocator_kmem = {
         pool_page_alloc, pool_page_free, 0,          pool_page_alloc, pool_page_free, 0,
         .pa_backingmapptr = &kmem_map,          .pa_backingmapptr = &kernel_map,
 };  };
 #endif  #endif
   
Line 2772  void pool_subpage_free(struct pool *, vo
Line 2796  void pool_subpage_free(struct pool *, vo
   
 struct pool_allocator pool_allocator_kmem = {  struct pool_allocator pool_allocator_kmem = {
         pool_subpage_alloc, pool_subpage_free, POOL_SUBPAGE,          pool_subpage_alloc, pool_subpage_free, POOL_SUBPAGE,
         .pa_backingmapptr = &kmem_map,          .pa_backingmapptr = &kernel_map,
 };  };
   
 void    *pool_subpage_alloc_nointr(struct pool *, int);  void    *pool_subpage_alloc_nointr(struct pool *, int);
Line 2780  void pool_subpage_free_nointr(struct poo
Line 2804  void pool_subpage_free_nointr(struct poo
   
 struct pool_allocator pool_allocator_nointr = {  struct pool_allocator pool_allocator_nointr = {
         pool_subpage_alloc, pool_subpage_free, POOL_SUBPAGE,          pool_subpage_alloc, pool_subpage_free, POOL_SUBPAGE,
         .pa_backingmapptr = &kmem_map,          .pa_backingmapptr = &kernel_map,
 };  };
 #endif /* POOL_SUBPAGE */  #endif /* POOL_SUBPAGE */
   
Line 2818  pool_page_alloc(struct pool *pp, int fla
Line 2842  pool_page_alloc(struct pool *pp, int fla
 {  {
         bool waitok = (flags & PR_WAITOK) ? true : false;          bool waitok = (flags & PR_WAITOK) ? true : false;
   
         return ((void *) uvm_km_alloc_poolpage_cache(kmem_map, waitok));          return ((void *) uvm_km_alloc_poolpage_cache(kernel_map, waitok));
 }  }
   
 void  void
 pool_page_free(struct pool *pp, void *v)  pool_page_free(struct pool *pp, void *v)
 {  {
   
         uvm_km_free_poolpage_cache(kmem_map, (vaddr_t) v);          uvm_km_free_poolpage_cache(kernel_map, (vaddr_t) v);
 }  }
   
 static void *  static void *
Line 2833  pool_page_alloc_meta(struct pool *pp, in
Line 2857  pool_page_alloc_meta(struct pool *pp, in
 {  {
         bool waitok = (flags & PR_WAITOK) ? true : false;          bool waitok = (flags & PR_WAITOK) ? true : false;
   
         return ((void *) uvm_km_alloc_poolpage(kmem_map, waitok));          return ((void *) uvm_km_alloc_poolpage(kernel_map, waitok));
 }  }
   
 static void  static void
 pool_page_free_meta(struct pool *pp, void *v)  pool_page_free_meta(struct pool *pp, void *v)
 {  {
   
         uvm_km_free_poolpage(kmem_map, (vaddr_t) v);          uvm_km_free_poolpage(kernel_map, (vaddr_t) v);
 }  }
   
 #ifdef POOL_SUBPAGE  #ifdef POOL_SUBPAGE
Line 2886  pool_page_free_nointr(struct pool *pp, v
Line 2910  pool_page_free_nointr(struct pool *pp, v
   
         uvm_km_free_poolpage_cache(kernel_map, (vaddr_t) v);          uvm_km_free_poolpage_cache(kernel_map, (vaddr_t) v);
 }  }
   
   #if defined(DDB)
   static bool
   pool_in_page(struct pool *pp, struct pool_item_header *ph, uintptr_t addr)
   {
   
           return (uintptr_t)ph->ph_page <= addr &&
               addr < (uintptr_t)ph->ph_page + pp->pr_alloc->pa_pagesz;
   }
   
   void
   pool_whatis(uintptr_t addr, void (*pr)(const char *, ...))
   {
           struct pool *pp;
   
           LIST_FOREACH(pp, &pool_head, pr_poollist) {
                   struct pool_item_header *ph;
                   uintptr_t item;
   
                   if ((pp->pr_roflags & PR_PHINPAGE) != 0) {
                           LIST_FOREACH(ph, &pp->pr_fullpages, ph_pagelist) {
                                   if (pool_in_page(pp, ph, addr)) {
                                           goto found;
                                   }
                           }
                           LIST_FOREACH(ph, &pp->pr_partpages, ph_pagelist) {
                                   if (pool_in_page(pp, ph, addr)) {
                                           goto found;
                                   }
                           }
                           continue;
                   } else {
                           ph = pr_find_pagehead_noalign(pp, (void *)addr);
                           if (ph == NULL || !pool_in_page(pp, ph, addr)) {
                                   continue;
                           }
                   }
   found:
                   item = (uintptr_t)ph->ph_page + ph->ph_off;
                   item = item + rounddown(addr - item, pp->pr_size);
                   (*pr)("%p is %p+%zu from POOL '%s'\n",
                       (void *)addr, item, (size_t)(addr - item),
                       pp->pr_wchan);
           }
   }
   #endif /* defined(DDB) */

Legend:
Removed from v.1.138  
changed lines
  Added in v.1.138.2.3

CVSweb <webmaster@jp.NetBSD.org>