[BACK]Return to subr_pool.c CVS log [TXT][DIR] Up to [cvs.NetBSD.org] / src / sys / kern

Please note that diffs are not public domain; they are subject to the copyright notices on the relevant files.

Diff for /src/sys/kern/subr_pool.c between version 1.42 and 1.43

version 1.42, 2000/12/06 18:20:52 version 1.43, 2000/12/07 05:45:57
Line 1 
Line 1 
 /*      $NetBSD$        */  /*      $NetBSD$        */
   
 /*-  /*-
  * Copyright (c) 1997, 1999 The NetBSD Foundation, Inc.   * Copyright (c) 1997, 1999, 2000 The NetBSD Foundation, Inc.
  * All rights reserved.   * All rights reserved.
  *   *
  * This code is derived from software contributed to The NetBSD Foundation   * This code is derived from software contributed to The NetBSD Foundation
Line 100  struct pool_item {
Line 100  struct pool_item {
         TAILQ_ENTRY(pool_item)  pi_list;          TAILQ_ENTRY(pool_item)  pi_list;
 };  };
   
   
 #define PR_HASH_INDEX(pp,addr) \  #define PR_HASH_INDEX(pp,addr) \
         (((u_long)(addr) >> (pp)->pr_pageshift) & (PR_HASHTABSIZE - 1))          (((u_long)(addr) >> (pp)->pr_pageshift) & (PR_HASHTABSIZE - 1))
   
   /*
    * Pool cache management.
    *
    * Pool caches provide a way for constructed objects to be cached by the
    * pool subsystem.  This can lead to performance improvements by avoiding
    * needless object construction/destruction; it is deferred until absolutely
    * necessary.
    *
    * Caches are grouped into cache groups.  Each cache group references
    * up to 16 constructed objects.  When a cache allocates an object
    * from the pool, it calls the object's constructor and places it into
    * a cache group.  When a cache group frees an object back to the pool,
    * it first calls the object's destructor.  This allows the object to
    * persist in constructed form while freed to the cache.
    *
    * Multiple caches may exist for each pool.  This allows a single
    * object type to have multiple constructed forms.  The pool references
    * each cache, so that when a pool is drained by the pagedaemon, it can
    * drain each individual cache as well.  Each time a cache is drained,
    * the most idle cache group is freed to the pool in its entirety.
    *
    * Pool caches are layed on top of pools.  By layering them, we can avoid
    * the complexity of cache management for pools which would not benefit
    * from it.
    */
   
   /* The cache group pool. */
   static struct pool pcgpool;
   
   /* The pool cache group. */
   #define PCG_NOBJECTS            16
   struct pool_cache_group {
           TAILQ_ENTRY(pool_cache_group)
                   pcg_list;       /* link in the pool cache's group list */
           u_int   pcg_avail;      /* # available objects */
                                   /* pointers to the objects */
           void    *pcg_objects[PCG_NOBJECTS];
   };
   
   static void     pool_cache_reclaim(struct pool_cache *);
   
 static int      pool_catchup(struct pool *);  static int      pool_catchup(struct pool *);
 static void     pool_prime_page(struct pool *, caddr_t);  static void     pool_prime_page(struct pool *, caddr_t);
Line 384  pool_init(struct pool *pp, size_t size, 
Line 422  pool_init(struct pool *pp, size_t size, 
                 size = sizeof(struct pool_item);                  size = sizeof(struct pool_item);
   
         size = ALIGN(size);          size = ALIGN(size);
         if (size >= pagesz)          if (size > pagesz)
                 panic("pool_init: pool item size (%lu) too large",                  panic("pool_init: pool item size (%lu) too large",
                       (u_long)size);                        (u_long)size);
   
Line 392  pool_init(struct pool *pp, size_t size, 
Line 430  pool_init(struct pool *pp, size_t size, 
          * Initialize the pool structure.           * Initialize the pool structure.
          */           */
         TAILQ_INIT(&pp->pr_pagelist);          TAILQ_INIT(&pp->pr_pagelist);
           TAILQ_INIT(&pp->pr_cachelist);
         pp->pr_curpage = NULL;          pp->pr_curpage = NULL;
         pp->pr_npages = 0;          pp->pr_npages = 0;
         pp->pr_minitems = 0;          pp->pr_minitems = 0;
Line 447  pool_init(struct pool *pp, size_t size, 
Line 486  pool_init(struct pool *pp, size_t size, 
          */           */
         pp->pr_itemoffset = ioff = ioff % align;          pp->pr_itemoffset = ioff = ioff % align;
         pp->pr_itemsperpage = (off - ((align - ioff) % align)) / pp->pr_size;          pp->pr_itemsperpage = (off - ((align - ioff) % align)) / pp->pr_size;
           KASSERT(pp->pr_itemsperpage != 0);
   
         /*          /*
          * Use the slack between the chunks and the page header           * Use the slack between the chunks and the page header
Line 479  pool_init(struct pool *pp, size_t size, 
Line 519  pool_init(struct pool *pp, size_t size, 
         simple_lock_init(&pp->pr_slock);          simple_lock_init(&pp->pr_slock);
   
         /*          /*
          * Initialize private page header pool if we haven't done so yet.           * Initialize private page header pool and cache magazine pool if we
            * haven't done so yet.
          * XXX LOCKING.           * XXX LOCKING.
          */           */
         if (phpool.pr_size == 0) {          if (phpool.pr_size == 0) {
                 pool_init(&phpool, sizeof(struct pool_item_header), 0, 0,                  pool_init(&phpool, sizeof(struct pool_item_header), 0, 0,
                           0, "phpool", 0, 0, 0, 0);                      0, "phpool", 0, 0, 0, 0);
                   pool_init(&pcgpool, sizeof(struct pool_cache_group), 0, 0,
                       0, "pcgpool", 0, 0, 0, 0);
         }          }
   
         /* Insert into the list of all pools. */          /* Insert into the list of all pools. */
Line 500  void
Line 543  void
 pool_destroy(struct pool *pp)  pool_destroy(struct pool *pp)
 {  {
         struct pool_item_header *ph;          struct pool_item_header *ph;
           struct pool_cache *pc;
   
           /* Destroy all caches for this pool. */
           while ((pc = TAILQ_FIRST(&pp->pr_cachelist)) != NULL)
                   pool_cache_destroy(pc);
   
 #ifdef DIAGNOSTIC  #ifdef DIAGNOSTIC
         if (pp->pr_nout != 0) {          if (pp->pr_nout != 0) {
Line 762  _pool_get(struct pool *pp, int flags, co
Line 810  _pool_get(struct pool *pp, int flags, co
 }  }
   
 /*  /*
  * Return resource to the pool; must be called at appropriate spl level   * Internal version of pool_put().  Pool is already locked/entered.
  */   */
 void  static void
 _pool_put(struct pool *pp, void *v, const char *file, long line)  pool_do_put(struct pool *pp, void *v, const char *file, long line)
 {  {
         struct pool_item *pi = v;          struct pool_item *pi = v;
         struct pool_item_header *ph;          struct pool_item_header *ph;
Line 774  _pool_put(struct pool *pp, void *v, cons
Line 822  _pool_put(struct pool *pp, void *v, cons
   
         page = (caddr_t)((u_long)v & pp->pr_pagemask);          page = (caddr_t)((u_long)v & pp->pr_pagemask);
   
         simple_lock(&pp->pr_slock);  
         pr_enter(pp, file, line);  
   
 #ifdef DIAGNOSTIC  #ifdef DIAGNOSTIC
         if (__predict_false(pp->pr_nout == 0)) {          if (__predict_false(pp->pr_nout == 0)) {
                 printf("pool %s: putting with none out\n",                  printf("pool %s: putting with none out\n",
Line 829  _pool_put(struct pool *pp, void *v, cons
Line 874  _pool_put(struct pool *pp, void *v, cons
                 pp->pr_flags &= ~PR_WANTED;                  pp->pr_flags &= ~PR_WANTED;
                 if (ph->ph_nmissing == 0)                  if (ph->ph_nmissing == 0)
                         pp->pr_nidle++;                          pp->pr_nidle++;
                 pr_leave(pp);  
                 simple_unlock(&pp->pr_slock);  
                 wakeup((caddr_t)pp);                  wakeup((caddr_t)pp);
                 return;                  return;
         }          }
Line 894  _pool_put(struct pool *pp, void *v, cons
Line 937  _pool_put(struct pool *pp, void *v, cons
                 TAILQ_INSERT_HEAD(&pp->pr_pagelist, ph, ph_pagelist);                  TAILQ_INSERT_HEAD(&pp->pr_pagelist, ph, ph_pagelist);
                 pp->pr_curpage = ph;                  pp->pr_curpage = ph;
         }          }
   }
   
   /*
    * Return resource to the pool; must be called at appropriate spl level
    */
   void
   _pool_put(struct pool *pp, void *v, const char *file, long line)
   {
   
           simple_lock(&pp->pr_slock);
           pr_enter(pp, file, line);
   
           pool_do_put(pp, v, file, line);
   
         pr_leave(pp);          pr_leave(pp);
         simple_unlock(&pp->pr_slock);          simple_unlock(&pp->pr_slock);
   
 }  }
   
 /*  /*
Line 1188  void
Line 1243  void
 _pool_reclaim(struct pool *pp, const char *file, long line)  _pool_reclaim(struct pool *pp, const char *file, long line)
 {  {
         struct pool_item_header *ph, *phnext;          struct pool_item_header *ph, *phnext;
           struct pool_cache *pc;
         struct timeval curtime;          struct timeval curtime;
         int s;          int s;
   
Line 1198  _pool_reclaim(struct pool *pp, const cha
Line 1254  _pool_reclaim(struct pool *pp, const cha
                 return;                  return;
         pr_enter(pp, file, line);          pr_enter(pp, file, line);
   
           /*
            * Reclaim items from the pool's caches.
            */
           for (pc = TAILQ_FIRST(&pp->pr_cachelist); pc != NULL;
                pc = TAILQ_NEXT(pc, pc_poollist))
                   pool_cache_reclaim(pc);
   
         s = splclock();          s = splclock();
         curtime = mono_time;          curtime = mono_time;
         splx(s);          splx(s);
Line 1446  out:
Line 1509  out:
         simple_unlock(&pp->pr_slock);          simple_unlock(&pp->pr_slock);
         return (r);          return (r);
 }  }
   
   /*
    * pool_cache_init:
    *
    *      Initialize a pool cache.
    *
    *      NOTE: If the pool must be protected from interrupts, we expect
    *      to be called at the appropriate interrupt priority level.
    */
   void
   pool_cache_init(struct pool_cache *pc, struct pool *pp,
       int (*ctor)(void *, void *, int),
       void (*dtor)(void *, void *),
       void *arg)
   {
   
           TAILQ_INIT(&pc->pc_grouplist);
           simple_lock_init(&pc->pc_slock);
   
           pc->pc_allocfrom = NULL;
           pc->pc_freeto = NULL;
           pc->pc_pool = pp;
   
           pc->pc_ctor = ctor;
           pc->pc_dtor = dtor;
           pc->pc_arg  = arg;
   
           simple_lock(&pp->pr_slock);
           TAILQ_INSERT_TAIL(&pp->pr_cachelist, pc, pc_poollist);
           simple_unlock(&pp->pr_slock);
   }
   
   /*
    * pool_cache_destroy:
    *
    *      Destroy a pool cache.
    */
   void
   pool_cache_destroy(struct pool_cache *pc)
   {
           struct pool *pp = pc->pc_pool;
   
           /* First, invalidate the entire cache. */
           pool_cache_invalidate(pc);
   
           /* ...and remove it from the pool's cache list. */
           simple_lock(&pp->pr_slock);
           TAILQ_REMOVE(&pp->pr_cachelist, pc, pc_poollist);
           simple_unlock(&pp->pr_slock);
   }
   
   static __inline void *
   pcg_get(struct pool_cache_group *pcg)
   {
           void *object;
           u_int idx;
   
           KASSERT(pcg->pcg_avail <= PCG_NOBJECTS);
           idx = --pcg->pcg_avail;
   
           KASSERT(pcg->pcg_objects[idx] != NULL);
           object = pcg->pcg_objects[idx];
           pcg->pcg_objects[idx] = NULL;
   
           return (object);
   }
   
   static __inline void
   pcg_put(struct pool_cache_group *pcg, void *object)
   {
           u_int idx;
   
           KASSERT(pcg->pcg_avail < PCG_NOBJECTS);
           idx = pcg->pcg_avail++;
   
           KASSERT(pcg->pcg_objects[idx] == NULL);
           pcg->pcg_objects[idx] = object;
   }
   
   /*
    * pool_cache_get:
    *
    *      Get an object from a pool cache.
    */
   void *
   pool_cache_get(struct pool_cache *pc, int flags)
   {
           struct pool_cache_group *pcg;
           void *object;
   
           simple_lock(&pc->pc_slock);
   
           if ((pcg = pc->pc_allocfrom) == NULL) {
                   for (pcg = TAILQ_FIRST(&pc->pc_grouplist); pcg != NULL;
                        pcg = TAILQ_NEXT(pcg, pcg_list)) {
                           if (pcg->pcg_avail != 0) {
                                   pc->pc_allocfrom = pcg;
                                   goto have_group;
                           }
                   }
   
                   /*
                    * No groups with any available objects.  Allocate
                    * a new object, construct it, and return it to
                    * the caller.  We will allocate a group, if necessary,
                    * when the object is freed back to the cache.
                    */
                   simple_unlock(&pc->pc_slock);
                   object = pool_get(pc->pc_pool, flags);
                   if (object != NULL && pc->pc_ctor != NULL) {
                           if ((*pc->pc_ctor)(pc->pc_arg, object, flags) != 0) {
                                   pool_put(pc->pc_pool, object);
                                   return (NULL);
                           }
                   }
                   return (object);
           }
   
    have_group:
           object = pcg_get(pcg);
   
           if (pcg->pcg_avail == 0)
                   pc->pc_allocfrom = NULL;
   
           simple_unlock(&pc->pc_slock);
   
           return (object);
   }
   
   /*
    * pool_cache_put:
    *
    *      Put an object back to the pool cache.
    */
   void
   pool_cache_put(struct pool_cache *pc, void *object)
   {
           struct pool_cache_group *pcg;
   
           simple_lock(&pc->pc_slock);
   
           if ((pcg = pc->pc_freeto) == NULL) {
                   for (pcg = TAILQ_FIRST(&pc->pc_grouplist); pcg != NULL;
                        pcg = TAILQ_NEXT(pcg, pcg_list)) {
                           if (pcg->pcg_avail != PCG_NOBJECTS) {
                                   pc->pc_freeto = pcg;
                                   goto have_group;
                           }
                   }
   
                   /*
                    * No empty groups to free the object to.  Attempt to
                    * allocate one.  We don't unlock the cache here, since
                    * we never block.
                    */
                   pcg = pool_get(&pcgpool, PR_NOWAIT);
                   if (pcg != NULL) {
                           memset(pcg, 0, sizeof(*pcg));
                           TAILQ_INSERT_TAIL(&pc->pc_grouplist, pcg, pcg_list);
                           pc->pc_freeto = pcg;
                           goto have_group;
                   }
   
                   simple_unlock(&pc->pc_slock);
   
                   /*
                    * Unable to allocate a cache group; destruct the object
                    * and free it back to the pool.
                    */
                   if (pc->pc_dtor != NULL)
                           (*pc->pc_dtor)(pc->pc_arg, object);
                   pool_put(pc->pc_pool, object);
                   return;
           }
   
    have_group:
           pcg_put(pcg, object);
   
           if (pcg->pcg_avail == PCG_NOBJECTS)
                   pc->pc_freeto = NULL;
   
           simple_unlock(&pc->pc_slock);
   }
   
   /*
    * pool_cache_do_invalidate:
    *
    *      This internal function implements pool_cache_invalidate() and
    *      pool_cache_reclaim().
    */
   static void
   pool_cache_do_invalidate(struct pool_cache *pc, int free_groups,
       void (*putit)(struct pool *, void *, const char *, long))
   {
           struct pool_cache_group *pcg, *npcg;
           void *object;
   
           for (pcg = TAILQ_FIRST(&pc->pc_grouplist); pcg != NULL;
                pcg = npcg) {
                   npcg = TAILQ_NEXT(pcg, pcg_list);
                   while (pcg->pcg_avail != 0) {
                           object = pcg_get(pcg);
                           if (pc->pc_dtor != NULL)
                                   (*pc->pc_dtor)(pc->pc_arg, object);
                           (*putit)(pc->pc_pool, object, __FILE__, __LINE__);
                   }
                   if (free_groups) {
                           TAILQ_REMOVE(&pc->pc_grouplist, pcg, pcg_list);
                           pool_put(&pcgpool, pcg);
                   }
           }
   }
   
   /*
    * pool_cache_invalidate:
    *
    *      Invalidate a pool cache (destruct and release all of the
    *      cached objects).
    */
   void
   pool_cache_invalidate(struct pool_cache *pc)
   {
   
           simple_lock(&pc->pc_slock);
           pool_cache_do_invalidate(pc, 0, _pool_put);
           simple_unlock(&pc->pc_slock);
   }
   
   /*
    * pool_cache_reclaim:
    *
    *      Reclaim a pool cache for pool_reclaim().
    */
   static void
   pool_cache_reclaim(struct pool_cache *pc)
   {
   
           /*
            * We're locking in the opposite order (pool already
            * locked in pool_reclaim()), so use a try-lock instead.
            */
   
           if (simple_lock_try(&pc->pc_slock) == 0)
                   return;
           pool_cache_do_invalidate(pc, 1, pool_do_put);
           simple_unlock(&pc->pc_slock);
   }

Legend:
Removed from v.1.42  
changed lines
  Added in v.1.43

CVSweb <webmaster@jp.NetBSD.org>