Please note that diffs are not public domain; they are subject to the copyright notices on the relevant files. =================================================================== RCS file: /ftp/cvs/cvsroot/src/sys/kern/subr_pool.c,v rcsdiff: /ftp/cvs/cvsroot/src/sys/kern/subr_pool.c,v: warning: Unknown phrases like `commitid ...;' are present. retrieving revision 1.70 retrieving revision 1.76.6.1 diff -u -p -r1.70 -r1.76.6.1 --- src/sys/kern/subr_pool.c 2002/03/09 01:33:34 1.70 +++ src/sys/kern/subr_pool.c 2002/11/11 23:31:43 1.76.6.1 @@ -1,4 +1,4 @@ -/* $NetBSD: subr_pool.c,v 1.70 2002/03/09 01:33:34 thorpej Exp $ */ +/* $NetBSD: subr_pool.c,v 1.76.6.1 2002/11/11 23:31:43 he Exp $ */ /*- * Copyright (c) 1997, 1999, 2000 The NetBSD Foundation, Inc. @@ -38,7 +38,7 @@ */ #include -__KERNEL_RCSID(0, "$NetBSD: subr_pool.c,v 1.70 2002/03/09 01:33:34 thorpej Exp $"); +__KERNEL_RCSID(0, "$NetBSD: subr_pool.c,v 1.76.6.1 2002/11/11 23:31:43 he Exp $"); #include "opt_pool.h" #include "opt_poollog.h" @@ -145,16 +145,6 @@ struct pool_item { /* The cache group pool. */ static struct pool pcgpool; -/* The pool cache group. */ -#define PCG_NOBJECTS 16 -struct pool_cache_group { - TAILQ_ENTRY(pool_cache_group) - pcg_list; /* link in the pool cache's group list */ - u_int pcg_avail; /* # available objects */ - /* pointers to the objects */ - void *pcg_objects[PCG_NOBJECTS]; -}; - static void pool_cache_reclaim(struct pool_cache *); static int pool_catchup(struct pool *); @@ -425,7 +415,7 @@ pool_init(struct pool *pp, size_t size, if (size < sizeof(struct pool_item)) size = sizeof(struct pool_item); - size = ALIGN(size); + size = roundup(size, align); #ifdef DIAGNOSTIC if (size > palloc->pa_pagesz) panic("pool_init: pool item size (%lu) too large", @@ -959,7 +949,8 @@ pool_do_put(struct pool *pp, void *v) */ if (ph->ph_nmissing == 0) { pp->pr_nidle++; - if (pp->pr_npages > pp->pr_maxpages) { + if (pp->pr_npages > pp->pr_maxpages || + (pp->pr_alloc->pa_flags & PA_WANT) != 0) { pr_rmpage(pp, ph, NULL); } else { TAILQ_REMOVE(&pp->pr_pagelist, ph, ph_pagelist); @@ -1047,7 +1038,7 @@ pool_prime(struct pool *pp, int n) { struct pool_item_header *ph; caddr_t cp; - int newpages, error = 0; + int newpages; simple_lock(&pp->pr_slock); @@ -1061,7 +1052,6 @@ pool_prime(struct pool *pp, int n) simple_lock(&pp->pr_slock); if (__predict_false(cp == NULL || ph == NULL)) { - error = ENOMEM; if (cp != NULL) pool_allocator_free(pp, cp); break; @@ -1135,6 +1125,8 @@ pool_prime_page(struct pool *pp, caddr_t while (n--) { pi = (struct pool_item *)cp; + KASSERT(((((vaddr_t)pi) + ioff) & (align - 1)) == 0); + /* Insert on page list */ TAILQ_INSERT_TAIL(&ph->ph_itemlist, pi, pi_list); #ifdef DIAGNOSTIC @@ -1159,9 +1151,7 @@ pool_prime_page(struct pool *pp, caddr_t * * Note 1, we never wait for memory here, we let the caller decide what to do. * - * Note 2, this doesn't work with static pools. - * - * Note 3, we must be called with the pool already locked, and we return + * Note 2, we must be called with the pool already locked, and we return * with it locked. */ static int @@ -1199,7 +1189,6 @@ pool_catchup(struct pool *pp) void pool_setlowat(struct pool *pp, int n) { - int error; simple_lock(&pp->pr_slock); @@ -1209,7 +1198,7 @@ pool_setlowat(struct pool *pp, int n) : roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage; /* Make sure we're caught up with the newly-set low water mark. */ - if (POOL_NEEDS_CATCHUP(pp) && (error = pool_catchup(pp) != 0)) { + if (POOL_NEEDS_CATCHUP(pp) && pool_catchup(pp) != 0) { /* * XXX: Should we log a warning? Should we set up a timeout * to try again in a second or so? The latter could break @@ -1955,9 +1944,9 @@ pool_allocator_alloc(struct pool *org, i TAILQ_INSERT_TAIL(&pa->pa_list, pp, pr_alloc_list); if (pp == org) continue; - simple_unlock(&pa->pa_list); + simple_unlock(&pa->pa_slock); freed = pool_reclaim(pp); - simple_lock(&pa->pa_list); + simple_lock(&pa->pa_slock); } while ((pp = TAILQ_FIRST(&pa->pa_list)) != start && freed == 0);