[BACK]Return to subr_pool.c CVS log [TXT][DIR] Up to [cvs.NetBSD.org] / src / sys / kern

Please note that diffs are not public domain; they are subject to the copyright notices on the relevant files.

Diff for /src/sys/kern/subr_pool.c between version 1.203 and 1.204

version 1.203, 2014/06/13 19:09:07 version 1.204, 2015/07/28 12:32:44
Line 1 
Line 1 
 /*      $NetBSD$        */  /*      $NetBSD$        */
   
 /*-  /*-
  * Copyright (c) 1997, 1999, 2000, 2002, 2007, 2008, 2010, 2014   * Copyright (c) 1997, 1999, 2000, 2002, 2007, 2008, 2010, 2014, 2015
  *     The NetBSD Foundation, Inc.   *     The NetBSD Foundation, Inc.
  * All rights reserved.   * All rights reserved.
  *   *
  * This code is derived from software contributed to The NetBSD Foundation   * This code is derived from software contributed to The NetBSD Foundation
  * by Paul Kranenburg; by Jason R. Thorpe of the Numerical Aerospace   * by Paul Kranenburg; by Jason R. Thorpe of the Numerical Aerospace
  * Simulation Facility, NASA Ames Research Center, and by Andrew Doran.   * Simulation Facility, NASA Ames Research Center; by Andrew Doran, and by
    * Maxime Villard.
  *   *
  * Redistribution and use in source and binary forms, with or without   * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions   * modification, are permitted provided that the following conditions
Line 82  static struct pool phpool[PHPOOL_MAX];
Line 83  static struct pool phpool[PHPOOL_MAX];
 static struct pool psppool;  static struct pool psppool;
 #endif  #endif
   
   #ifdef POOL_REDZONE
   # define POOL_REDZONE_SIZE 2
   static void pool_redzone_init(struct pool *, size_t);
   static void pool_redzone_fill(struct pool *, void *);
   static void pool_redzone_check(struct pool *, void *);
   #else
   # define pool_redzone_init(pp, sz)      /* NOTHING */
   # define pool_redzone_fill(pp, ptr)     /* NOTHING */
   # define pool_redzone_check(pp, ptr)    /* NOTHING */
   #endif
   
 static void *pool_page_alloc_meta(struct pool *, int);  static void *pool_page_alloc_meta(struct pool *, int);
 static void pool_page_free_meta(struct pool *, void *);  static void pool_page_free_meta(struct pool *, void *);
   
Line 459  pool_init(struct pool *pp, size_t size, 
Line 471  pool_init(struct pool *pp, size_t size, 
     const char *wchan, struct pool_allocator *palloc, int ipl)      const char *wchan, struct pool_allocator *palloc, int ipl)
 {  {
         struct pool *pp1;          struct pool *pp1;
         size_t trysize, phsize;          size_t trysize, phsize, prsize;
         int off, slack;          int off, slack;
   
 #ifdef DEBUG  #ifdef DEBUG
Line 506  pool_init(struct pool *pp, size_t size, 
Line 518  pool_init(struct pool *pp, size_t size, 
         if (align == 0)          if (align == 0)
                 align = ALIGN(1);                  align = ALIGN(1);
   
         if ((flags & PR_NOTOUCH) == 0 && size < sizeof(struct pool_item))          prsize = size;
                 size = sizeof(struct pool_item);          if ((flags & PR_NOTOUCH) == 0 && prsize < sizeof(struct pool_item))
                   prsize = sizeof(struct pool_item);
   
         size = roundup(size, align);          prsize = roundup(prsize, align);
 #ifdef DIAGNOSTIC  #ifdef DIAGNOSTIC
         if (size > palloc->pa_pagesz)          if (prsize > palloc->pa_pagesz)
                 panic("pool_init: pool item size (%zu) too large", size);                  panic("pool_init: pool item size (%zu) too large", prsize);
 #endif  #endif
   
         /*          /*
Line 529  pool_init(struct pool *pp, size_t size, 
Line 542  pool_init(struct pool *pp, size_t size, 
         pp->pr_maxpages = UINT_MAX;          pp->pr_maxpages = UINT_MAX;
         pp->pr_roflags = flags;          pp->pr_roflags = flags;
         pp->pr_flags = 0;          pp->pr_flags = 0;
         pp->pr_size = size;          pp->pr_size = prsize;
         pp->pr_align = align;          pp->pr_align = align;
         pp->pr_wchan = wchan;          pp->pr_wchan = wchan;
         pp->pr_alloc = palloc;          pp->pr_alloc = palloc;
Line 544  pool_init(struct pool *pp, size_t size, 
Line 557  pool_init(struct pool *pp, size_t size, 
         pp->pr_drain_hook = NULL;          pp->pr_drain_hook = NULL;
         pp->pr_drain_hook_arg = NULL;          pp->pr_drain_hook_arg = NULL;
         pp->pr_freecheck = NULL;          pp->pr_freecheck = NULL;
           pool_redzone_init(pp, size);
   
         /*          /*
          * Decide whether to put the page header off page to avoid           * Decide whether to put the page header off page to avoid
Line 935  pool_get(struct pool *pp, int flags)
Line 949  pool_get(struct pool *pp, int flags)
         mutex_exit(&pp->pr_lock);          mutex_exit(&pp->pr_lock);
         KASSERT((((vaddr_t)v + pp->pr_itemoffset) & (pp->pr_align - 1)) == 0);          KASSERT((((vaddr_t)v + pp->pr_itemoffset) & (pp->pr_align - 1)) == 0);
         FREECHECK_OUT(&pp->pr_freecheck, v);          FREECHECK_OUT(&pp->pr_freecheck, v);
           pool_redzone_fill(pp, v);
         return (v);          return (v);
 }  }
   
Line 948  pool_do_put(struct pool *pp, void *v, st
Line 963  pool_do_put(struct pool *pp, void *v, st
         struct pool_item_header *ph;          struct pool_item_header *ph;
   
         KASSERT(mutex_owned(&pp->pr_lock));          KASSERT(mutex_owned(&pp->pr_lock));
           pool_redzone_check(pp, v);
         FREECHECK_IN(&pp->pr_freecheck, v);          FREECHECK_IN(&pp->pr_freecheck, v);
         LOCKDEBUG_MEM_CHECK(v, pp->pr_size);          LOCKDEBUG_MEM_CHECK(v, pp->pr_size);
   
Line 2188  pool_cache_get_slow(pool_cache_cpu_t *cc
Line 2204  pool_cache_get_slow(pool_cache_cpu_t *cc
         }          }
   
         FREECHECK_OUT(&pc->pc_freecheck, object);          FREECHECK_OUT(&pc->pc_freecheck, object);
           pool_redzone_fill(&pc->pc_pool, object);
         return false;          return false;
 }  }
   
Line 2233  pool_cache_get_paddr(pool_cache_t pc, in
Line 2250  pool_cache_get_paddr(pool_cache_t pc, in
                         cc->cc_hits++;                          cc->cc_hits++;
                         splx(s);                          splx(s);
                         FREECHECK_OUT(&pc->pc_freecheck, object);                          FREECHECK_OUT(&pc->pc_freecheck, object);
                           pool_redzone_fill(&pc->pc_pool, object);
                         return object;                          return object;
                 }                  }
   
Line 2376  pool_cache_put_paddr(pool_cache_t pc, vo
Line 2394  pool_cache_put_paddr(pool_cache_t pc, vo
         int s;          int s;
   
         KASSERT(object != NULL);          KASSERT(object != NULL);
           pool_redzone_check(&pc->pc_pool, object);
         FREECHECK_IN(&pc->pc_freecheck, object);          FREECHECK_IN(&pc->pc_freecheck, object);
   
         /* Lock out interrupts and disable preemption. */          /* Lock out interrupts and disable preemption. */
Line 2597  pool_page_free_meta(struct pool *pp, voi
Line 2616  pool_page_free_meta(struct pool *pp, voi
         vmem_free(kmem_meta_arena, (vmem_addr_t)v, pp->pr_alloc->pa_pagesz);          vmem_free(kmem_meta_arena, (vmem_addr_t)v, pp->pr_alloc->pa_pagesz);
 }  }
   
   #ifdef POOL_REDZONE
   #if defined(_LP64)
   # define PRIME 0x9e37fffffffc0000UL
   #else /* defined(_LP64) */
   # define PRIME 0x9e3779b1
   #endif /* defined(_LP64) */
   #define STATIC_BYTE     0xFE
   CTASSERT(POOL_REDZONE_SIZE > 1);
   
   static inline uint8_t
   pool_pattern_generate(const void *p)
   {
           return (uint8_t)(((uintptr_t)p) * PRIME
              >> ((sizeof(uintptr_t) - sizeof(uint8_t))) * CHAR_BIT);
   }
   
   static void
   pool_redzone_init(struct pool *pp, size_t requested_size)
   {
           size_t nsz;
   
           if (pp->pr_roflags & PR_NOTOUCH) {
                   pp->pr_reqsize = 0;
                   pp->pr_redzone = false;
                   return;
           }
   
           /*
            * We may have extended the requested size earlier; check if
            * there's naturally space in the padding for a red zone.
            */
           if (pp->pr_size - requested_size >= POOL_REDZONE_SIZE) {
                   pp->pr_reqsize = requested_size;
                   pp->pr_redzone = true;
                   return;
           }
   
           /*
            * No space in the natural padding; check if we can extend a
            * bit the size of the pool.
            */
           nsz = roundup(pp->pr_size + POOL_REDZONE_SIZE, pp->pr_align);
           if (nsz <= pp->pr_alloc->pa_pagesz) {
                   /* Ok, we can */
                   pp->pr_size = nsz;
                   pp->pr_reqsize = requested_size;
                   pp->pr_redzone = true;
           } else {
                   /* No space for a red zone... snif :'( */
                   pp->pr_reqsize = 0;
                   pp->pr_redzone = false;
                   printf("pool redzone disabled for '%s'\n", pp->pr_wchan);
           }
   }
   
   static void
   pool_redzone_fill(struct pool *pp, void *p)
   {
           uint8_t *cp, pat;
           const uint8_t *ep;
   
           if (!pp->pr_redzone)
                   return;
   
           cp = (uint8_t *)p + pp->pr_reqsize;
           ep = cp + POOL_REDZONE_SIZE;
   
           /*
            * We really don't want the first byte of the red zone to be '\0';
            * an off-by-one in a string may not be properly detected.
            */
           pat = pool_pattern_generate(cp);
           *cp = (pat == '\0') ? STATIC_BYTE: pat;
           cp++;
   
           while (cp < ep) {
                   *cp = pool_pattern_generate(cp);
                   cp++;
           }
   }
   
   static void
   pool_redzone_check(struct pool *pp, void *p)
   {
           uint8_t *cp, pat, expected;
           const uint8_t *ep;
   
           if (!pp->pr_redzone)
                   return;
   
           cp = (uint8_t *)p + pp->pr_reqsize;
           ep = cp + POOL_REDZONE_SIZE;
   
           pat = pool_pattern_generate(cp);
           expected = (pat == '\0') ? STATIC_BYTE: pat;
           if (expected != *cp) {
                   panic("%s: %p: 0x%02x != 0x%02x\n",
                      __func__, cp, *cp, expected);
           }
           cp++;
   
           while (cp < ep) {
                   expected = pool_pattern_generate(cp);
                   if (*cp != expected) {
                           panic("%s: %p: 0x%02x != 0x%02x\n",
                              __func__, cp, *cp, expected);
                   }
                   cp++;
           }
   }
   
   #endif /* POOL_REDZONE */
   
   
 #ifdef POOL_SUBPAGE  #ifdef POOL_SUBPAGE
 /* Sub-page allocator, for machines with large hardware pages. */  /* Sub-page allocator, for machines with large hardware pages. */
 void *  void *

Legend:
Removed from v.1.203  
changed lines
  Added in v.1.204

CVSweb <webmaster@jp.NetBSD.org>