[BACK]Return to subr_pool.c CVS log [TXT][DIR] Up to [cvs.NetBSD.org] / src / sys / kern

Please note that diffs are not public domain; they are subject to the copyright notices on the relevant files.

Diff for /src/sys/kern/subr_pool.c between version 1.138 and 1.138.2.2

version 1.138, 2007/12/05 06:52:01 version 1.138.2.2, 2007/12/10 12:56:10
Line 101  static void pool_page_free_meta(struct p
Line 101  static void pool_page_free_meta(struct p
 /* allocator for pool metadata */  /* allocator for pool metadata */
 struct pool_allocator pool_allocator_meta = {  struct pool_allocator pool_allocator_meta = {
         pool_page_alloc_meta, pool_page_free_meta,          pool_page_alloc_meta, pool_page_free_meta,
         .pa_backingmapptr = &kmem_map,          .pa_backingmapptr = &kernel_map,
 };  };
   
 /* # of seconds to retain page after last use */  /* # of seconds to retain page after last use */
Line 790  pool_init(struct pool *pp, size_t size, 
Line 790  pool_init(struct pool *pp, size_t size, 
   
 #ifdef POOL_DIAGNOSTIC  #ifdef POOL_DIAGNOSTIC
         if (flags & PR_LOGGING) {          if (flags & PR_LOGGING) {
                 if (kmem_map == NULL ||                  if (kernel_map == NULL ||
                     (pp->pr_log = malloc(pool_logsize * sizeof(struct pool_log),                      (pp->pr_log = malloc(pool_logsize * sizeof(struct pool_log),
                      M_TEMP, M_NOWAIT)) == NULL)                       M_TEMP, M_NOWAIT)) == NULL)
                         pp->pr_roflags &= ~PR_LOGGING;                          pp->pr_roflags &= ~PR_LOGGING;
Line 2111  pool_cache_bootstrap(pool_cache_t pc, si
Line 2111  pool_cache_bootstrap(pool_cache_t pc, si
 void  void
 pool_cache_destroy(pool_cache_t pc)  pool_cache_destroy(pool_cache_t pc)
 {  {
   
           pool_cache_bootstrap_destroy(pc);
           pool_put(&cache_pool, pc);
   }
   
   /*
    * pool_cache_bootstrap_destroy:
    *
    *      Kernel-private version of pool_cache_destroy().
    *      Destroy a pool cache initialized by pool_cache_bootstrap.
    */
   void
   pool_cache_bootstrap_destroy(pool_cache_t pc)
   {
         struct pool *pp = &pc->pc_pool;          struct pool *pp = &pc->pc_pool;
         pool_cache_cpu_t *cc;          pool_cache_cpu_t *cc;
         pcg_t *pcg;          pcg_t *pcg;
Line 2150  pool_cache_destroy(pool_cache_t pc)
Line 2164  pool_cache_destroy(pool_cache_t pc)
         /* Finally, destroy it. */          /* Finally, destroy it. */
         mutex_destroy(&pc->pc_lock);          mutex_destroy(&pc->pc_lock);
         pool_destroy(pp);          pool_destroy(pp);
         pool_put(&cache_pool, pc);  
 }  }
   
 /*  /*
Line 2742  void pool_page_free(struct pool *, void 
Line 2755  void pool_page_free(struct pool *, void 
 #ifdef POOL_SUBPAGE  #ifdef POOL_SUBPAGE
 struct pool_allocator pool_allocator_kmem_fullpage = {  struct pool_allocator pool_allocator_kmem_fullpage = {
         pool_page_alloc, pool_page_free, 0,          pool_page_alloc, pool_page_free, 0,
         .pa_backingmapptr = &kmem_map,          .pa_backingmapptr = &kernel_map,
 };  };
 #else  #else
 struct pool_allocator pool_allocator_kmem = {  struct pool_allocator pool_allocator_kmem = {
         pool_page_alloc, pool_page_free, 0,          pool_page_alloc, pool_page_free, 0,
         .pa_backingmapptr = &kmem_map,          .pa_backingmapptr = &kernel_map,
 };  };
 #endif  #endif
   
Line 2772  void pool_subpage_free(struct pool *, vo
Line 2785  void pool_subpage_free(struct pool *, vo
   
 struct pool_allocator pool_allocator_kmem = {  struct pool_allocator pool_allocator_kmem = {
         pool_subpage_alloc, pool_subpage_free, POOL_SUBPAGE,          pool_subpage_alloc, pool_subpage_free, POOL_SUBPAGE,
         .pa_backingmapptr = &kmem_map,          .pa_backingmapptr = &kernel_map,
 };  };
   
 void    *pool_subpage_alloc_nointr(struct pool *, int);  void    *pool_subpage_alloc_nointr(struct pool *, int);
Line 2780  void pool_subpage_free_nointr(struct poo
Line 2793  void pool_subpage_free_nointr(struct poo
   
 struct pool_allocator pool_allocator_nointr = {  struct pool_allocator pool_allocator_nointr = {
         pool_subpage_alloc, pool_subpage_free, POOL_SUBPAGE,          pool_subpage_alloc, pool_subpage_free, POOL_SUBPAGE,
         .pa_backingmapptr = &kmem_map,          .pa_backingmapptr = &kernel_map,
 };  };
 #endif /* POOL_SUBPAGE */  #endif /* POOL_SUBPAGE */
   
Line 2818  pool_page_alloc(struct pool *pp, int fla
Line 2831  pool_page_alloc(struct pool *pp, int fla
 {  {
         bool waitok = (flags & PR_WAITOK) ? true : false;          bool waitok = (flags & PR_WAITOK) ? true : false;
   
         return ((void *) uvm_km_alloc_poolpage_cache(kmem_map, waitok));          return ((void *) uvm_km_alloc_poolpage_cache(kernel_map, waitok));
 }  }
   
 void  void
 pool_page_free(struct pool *pp, void *v)  pool_page_free(struct pool *pp, void *v)
 {  {
   
         uvm_km_free_poolpage_cache(kmem_map, (vaddr_t) v);          uvm_km_free_poolpage_cache(kernel_map, (vaddr_t) v);
 }  }
   
 static void *  static void *
Line 2833  pool_page_alloc_meta(struct pool *pp, in
Line 2846  pool_page_alloc_meta(struct pool *pp, in
 {  {
         bool waitok = (flags & PR_WAITOK) ? true : false;          bool waitok = (flags & PR_WAITOK) ? true : false;
   
         return ((void *) uvm_km_alloc_poolpage(kmem_map, waitok));          return ((void *) uvm_km_alloc_poolpage(kernel_map, waitok));
 }  }
   
 static void  static void
 pool_page_free_meta(struct pool *pp, void *v)  pool_page_free_meta(struct pool *pp, void *v)
 {  {
   
         uvm_km_free_poolpage(kmem_map, (vaddr_t) v);          uvm_km_free_poolpage(kernel_map, (vaddr_t) v);
 }  }
   
 #ifdef POOL_SUBPAGE  #ifdef POOL_SUBPAGE

Legend:
Removed from v.1.138  
changed lines
  Added in v.1.138.2.2

CVSweb <webmaster@jp.NetBSD.org>