Please note that diffs are not public domain; they are subject to the copyright notices on the relevant files. =================================================================== RCS file: /ftp/cvs/cvsroot/src/sys/kern/subr_pool.c,v rcsdiff: /ftp/cvs/cvsroot/src/sys/kern/subr_pool.c,v: warning: Unknown phrases like `commitid ...;' are present. retrieving revision 1.89 retrieving revision 1.96 diff -u -p -r1.89 -r1.96 --- src/sys/kern/subr_pool.c 2003/12/29 16:04:58 1.89 +++ src/sys/kern/subr_pool.c 2004/06/20 18:19:27 1.96 @@ -1,4 +1,4 @@ -/* $NetBSD: subr_pool.c,v 1.89 2003/12/29 16:04:58 yamt Exp $ */ +/* $NetBSD: subr_pool.c,v 1.96 2004/06/20 18:19:27 thorpej Exp $ */ /*- * Copyright (c) 1997, 1999, 2000 The NetBSD Foundation, Inc. @@ -38,7 +38,7 @@ */ #include -__KERNEL_RCSID(0, "$NetBSD: subr_pool.c,v 1.89 2003/12/29 16:04:58 yamt Exp $"); +__KERNEL_RCSID(0, "$NetBSD: subr_pool.c,v 1.96 2004/06/20 18:19:27 thorpej Exp $"); #include "opt_pool.h" #include "opt_poollog.h" @@ -317,6 +317,8 @@ pr_rmpage(struct pool *pp, struct pool_i { int s; + LOCK_ASSERT(!simple_lock_held(&pp->pr_slock) || pq != NULL); + /* * If the page was idle, decrement the idle page count. */ @@ -336,12 +338,13 @@ pr_rmpage(struct pool *pp, struct pool_i * Unlink a page from the pool and release it (or queue it for release). */ LIST_REMOVE(ph, ph_pagelist); + if ((pp->pr_roflags & PR_PHINPAGE) == 0) + SPLAY_REMOVE(phtree, &pp->pr_phtree, ph); if (pq) { LIST_INSERT_HEAD(pq, ph, ph_pagelist); } else { pool_allocator_free(pp, ph->ph_page); if ((pp->pr_roflags & PR_PHINPAGE) == 0) { - SPLAY_REMOVE(phtree, &pp->pr_phtree, ph); s = splvm(); pool_put(&phpool, ph); splx(s); @@ -354,6 +357,21 @@ pr_rmpage(struct pool *pp, struct pool_i } /* + * Initialize all the pools listed in the "pools" link set. + */ +void +link_pool_init(void) +{ + __link_set_decl(pools, struct link_pool_init); + struct link_pool_init * const *pi; + + __link_set_foreach(pi, pools) + pool_init((*pi)->pp, (*pi)->size, (*pi)->align, + (*pi)->align_offset, (*pi)->flags, (*pi)->wchan, + (*pi)->palloc); +} + +/* * Initialize the given pool resource structure. * * We export this routine to allow other kernel parts to declare @@ -364,6 +382,8 @@ pool_init(struct pool *pp, size_t size, const char *wchan, struct pool_allocator *palloc) { int off, slack; + size_t trysize, phsize; + int s; #ifdef POOL_DIAGNOSTIC /* @@ -455,16 +475,26 @@ pool_init(struct pool *pp, size_t size, /* * Decide whether to put the page header off page to avoid - * wasting too large a part of the page. Off-page page headers - * go on a hash table, so we can match a returned item - * with its header based on the page address. - * We use 1/16 of the page size as the threshold (XXX: tune) + * wasting too large a part of the page or too big item. + * Off-page page headers go on a hash table, so we can match + * a returned item with its header based on the page address. + * We use 1/16 of the page size and about 8 times of the item + * size as the threshold (XXX: tune) + * + * However, we'll put the header into the page if we can put + * it without wasting any items. + * + * Silently enforce `0 <= ioff < align'. */ - if (pp->pr_size < palloc->pa_pagesz/16) { + pp->pr_itemoffset = ioff %= align; + /* See the comment below about reserved bytes. */ + trysize = palloc->pa_pagesz - ((align - ioff) % align); + phsize = ALIGN(sizeof(struct pool_item_header)); + if (pp->pr_size < MIN(palloc->pa_pagesz / 16, phsize << 3) || + trysize / pp->pr_size == (trysize - phsize) / pp->pr_size) { /* Use the end of the page for the page header */ pp->pr_roflags |= PR_PHINPAGE; - pp->pr_phoffset = off = palloc->pa_pagesz - - ALIGN(sizeof(struct pool_item_header)); + pp->pr_phoffset = off = palloc->pa_pagesz - phsize; } else { /* The page header will be taken from our page header pool */ pp->pr_phoffset = 0; @@ -476,10 +506,7 @@ pool_init(struct pool *pp, size_t size, * Alignment is to take place at `ioff' within the item. This means * we must reserve up to `align - 1' bytes on the page to allow * appropriate positioning of each item. - * - * Silently enforce `0 <= ioff < align'. */ - pp->pr_itemoffset = ioff = ioff % align; pp->pr_itemsperpage = (off - ((align - ioff) % align)) / pp->pr_size; KASSERT(pp->pr_itemsperpage != 0); @@ -540,9 +567,11 @@ pool_init(struct pool *pp, size_t size, simple_unlock(&pool_head_slock); /* Insert this into the list of pools using this allocator. */ + s = splvm(); simple_lock(&palloc->pa_slock); TAILQ_INSERT_TAIL(&palloc->pa_list, pp, pr_alloc_list); simple_unlock(&palloc->pa_slock); + splx(s); } /* @@ -553,11 +582,14 @@ pool_destroy(struct pool *pp) { struct pool_item_header *ph; struct pool_cache *pc; + int s; /* Locking order: pool_allocator -> pool */ + s = splvm(); simple_lock(&pp->pr_alloc->pa_slock); TAILQ_REMOVE(&pp->pr_alloc->pa_list, pp, pr_alloc_list); simple_unlock(&pp->pr_alloc->pa_slock); + splx(s); /* Destroy all caches for this pool. */ while ((pc = TAILQ_FIRST(&pp->pr_cachelist)) != NULL) @@ -638,6 +670,9 @@ pool_get(struct pool *pp, int flags) void *v; #ifdef DIAGNOSTIC + if (__predict_false(pp->pr_itemsperpage == 0)) + panic("pool_get: pool %p: pr_itemsperpage is zero, " + "pool not initialized?", pp); if (__predict_false(curlwp == NULL && doing_shutdown == 0 && (flags & PR_WAITOK) != 0)) panic("pool_get: %s: must have NOWAIT", pp->pr_wchan); @@ -733,13 +768,14 @@ pool_get(struct pool *pp, int flags) v = pool_allocator_alloc(pp, flags); if (__predict_true(v != NULL)) ph = pool_alloc_item_header(pp, v, flags); - simple_lock(&pp->pr_slock); - pr_enter(pp, file, line); if (__predict_false(v == NULL || ph == NULL)) { if (v != NULL) pool_allocator_free(pp, v); + simple_lock(&pp->pr_slock); + pr_enter(pp, file, line); + /* * We were unable to allocate a page or item * header, but we released the lock during @@ -771,6 +807,8 @@ pool_get(struct pool *pp, int flags) } /* We have more memory; add it to the pool */ + simple_lock(&pp->pr_slock); + pr_enter(pp, file, line); pool_prime_page(pp, v, ph); pp->pr_npagealloc++; @@ -937,7 +975,9 @@ pool_do_put(struct pool *pp, void *v) * If this page is now empty, do one of two things: * * (1) If we have more pages than the page high water mark, - * free the page back to the system. + * free the page back to the system. ONLY CONSIDER + * FREEING BACK A PAGE IF WE HAVE MORE THAN OUR MINIMUM PAGE + * CLAIM. * * (2) Otherwise, move the page to the empty page list. * @@ -946,9 +986,12 @@ pool_do_put(struct pool *pp, void *v) */ if (ph->ph_nmissing == 0) { pp->pr_nidle++; - if (pp->pr_npages > pp->pr_maxpages || - (pp->pr_alloc->pa_flags & PA_WANT) != 0) { + if (pp->pr_npages > pp->pr_minpages && + (pp->pr_npages > pp->pr_maxpages || + (pp->pr_alloc->pa_flags & PA_WANT) != 0)) { + simple_unlock(&pp->pr_slock); pr_rmpage(pp, ph, NULL); + simple_lock(&pp->pr_slock); } else { LIST_REMOVE(ph, ph_pagelist); LIST_INSERT_HEAD(&pp->pr_emptypages, ph, ph_pagelist); @@ -1034,14 +1077,15 @@ pool_prime(struct pool *pp, int n) cp = pool_allocator_alloc(pp, PR_NOWAIT); if (__predict_true(cp != NULL)) ph = pool_alloc_item_header(pp, cp, PR_NOWAIT); - simple_lock(&pp->pr_slock); if (__predict_false(cp == NULL || ph == NULL)) { if (cp != NULL) pool_allocator_free(pp, cp); + simple_lock(&pp->pr_slock); break; } + simple_lock(&pp->pr_slock); pool_prime_page(pp, cp, ph); pp->pr_npagealloc++; pp->pr_minpages++; @@ -1069,6 +1113,8 @@ pool_prime_page(struct pool *pp, caddr_t int n; int s; + LOCK_ASSERT(simple_lock_held(&pp->pr_slock)); + #ifdef DIAGNOSTIC if (((u_long)cp & (pp->pr_alloc->pa_pagesz - 1)) != 0) panic("pool_prime_page: %s: unaligned page", pp->pr_wchan); @@ -1158,13 +1204,14 @@ pool_catchup(struct pool *pp) cp = pool_allocator_alloc(pp, PR_NOWAIT); if (__predict_true(cp != NULL)) ph = pool_alloc_item_header(pp, cp, PR_NOWAIT); - simple_lock(&pp->pr_slock); if (__predict_false(cp == NULL || ph == NULL)) { if (cp != NULL) pool_allocator_free(pp, cp); error = ENOMEM; + simple_lock(&pp->pr_slock); break; } + simple_lock(&pp->pr_slock); pool_prime_page(pp, cp, ph); pp->pr_npagealloc++; } @@ -1315,7 +1362,6 @@ pool_reclaim(struct pool *pp) if (pp->pr_roflags & PR_PHINPAGE) { continue; } - SPLAY_REMOVE(phtree, &pp->pr_phtree, ph); s = splvm(); pool_put(&phpool, ph); splx(s); @@ -1950,6 +1996,8 @@ pool_allocator_alloc(struct pool *org, i int s, freed; void *res; + LOCK_ASSERT(!simple_lock_held(&org->pr_slock)); + do { if ((res = (*pa->pa_alloc)(org, flags)) != NULL) return (res); @@ -2020,6 +2068,8 @@ pool_allocator_free(struct pool *pp, voi struct pool_allocator *pa = pp->pr_alloc; int s; + LOCK_ASSERT(!simple_lock_held(&pp->pr_slock)); + (*pa->pa_free)(pp, v); s = splvm(); @@ -2063,15 +2113,21 @@ pool_page_free(struct pool *pp, void *v) void * pool_subpage_alloc(struct pool *pp, int flags) { - - return (pool_get(&psppool, flags)); + void *v; + int s; + s = splvm(); + v = pool_get(&psppool, flags); + splx(s); + return v; } void pool_subpage_free(struct pool *pp, void *v) { - + int s; + s = splvm(); pool_put(&psppool, v); + splx(s); } /* We don't provide a real nointr allocator. Maybe later. */