Please note that diffs are not public domain; they are subject to the copyright notices on the relevant files. =================================================================== RCS file: /ftp/cvs/cvsroot/src/sys/kern/subr_pool.c,v rcsdiff: /ftp/cvs/cvsroot/src/sys/kern/subr_pool.c,v: warning: Unknown phrases like `commitid ...;' are present. retrieving revision 1.90 retrieving revision 1.93.2.1 diff -u -p -r1.90 -r1.93.2.1 --- src/sys/kern/subr_pool.c 2004/01/09 19:00:16 1.90 +++ src/sys/kern/subr_pool.c 2004/06/22 08:58:42 1.93.2.1 @@ -1,4 +1,4 @@ -/* $NetBSD: subr_pool.c,v 1.90 2004/01/09 19:00:16 thorpej Exp $ */ +/* $NetBSD: subr_pool.c,v 1.93.2.1 2004/06/22 08:58:42 tron Exp $ */ /*- * Copyright (c) 1997, 1999, 2000 The NetBSD Foundation, Inc. @@ -38,7 +38,7 @@ */ #include -__KERNEL_RCSID(0, "$NetBSD: subr_pool.c,v 1.90 2004/01/09 19:00:16 thorpej Exp $"); +__KERNEL_RCSID(0, "$NetBSD: subr_pool.c,v 1.93.2.1 2004/06/22 08:58:42 tron Exp $"); #include "opt_pool.h" #include "opt_poollog.h" @@ -317,6 +317,8 @@ pr_rmpage(struct pool *pp, struct pool_i { int s; + LOCK_ASSERT(!simple_lock_held(&pp->pr_slock) || pq != NULL); + /* * If the page was idle, decrement the idle page count. */ @@ -336,12 +338,13 @@ pr_rmpage(struct pool *pp, struct pool_i * Unlink a page from the pool and release it (or queue it for release). */ LIST_REMOVE(ph, ph_pagelist); + if ((pp->pr_roflags & PR_PHINPAGE) == 0) + SPLAY_REMOVE(phtree, &pp->pr_phtree, ph); if (pq) { LIST_INSERT_HEAD(pq, ph, ph_pagelist); } else { pool_allocator_free(pp, ph->ph_page); if ((pp->pr_roflags & PR_PHINPAGE) == 0) { - SPLAY_REMOVE(phtree, &pp->pr_phtree, ph); s = splvm(); pool_put(&phpool, ph); splx(s); @@ -364,6 +367,8 @@ pool_init(struct pool *pp, size_t size, const char *wchan, struct pool_allocator *palloc) { int off, slack; + size_t trysize, phsize; + int s; #ifdef POOL_DIAGNOSTIC /* @@ -455,16 +460,26 @@ pool_init(struct pool *pp, size_t size, /* * Decide whether to put the page header off page to avoid - * wasting too large a part of the page. Off-page page headers - * go on a hash table, so we can match a returned item - * with its header based on the page address. - * We use 1/16 of the page size as the threshold (XXX: tune) + * wasting too large a part of the page or too big item. + * Off-page page headers go on a hash table, so we can match + * a returned item with its header based on the page address. + * We use 1/16 of the page size and about 8 times of the item + * size as the threshold (XXX: tune) + * + * However, we'll put the header into the page if we can put + * it without wasting any items. + * + * Silently enforce `0 <= ioff < align'. */ - if (pp->pr_size < palloc->pa_pagesz/16) { + pp->pr_itemoffset = ioff %= align; + /* See the comment below about reserved bytes. */ + trysize = palloc->pa_pagesz - ((align - ioff) % align); + phsize = ALIGN(sizeof(struct pool_item_header)); + if (pp->pr_size < MIN(palloc->pa_pagesz / 16, phsize << 3) || + trysize / pp->pr_size == (trysize - phsize) / pp->pr_size) { /* Use the end of the page for the page header */ pp->pr_roflags |= PR_PHINPAGE; - pp->pr_phoffset = off = palloc->pa_pagesz - - ALIGN(sizeof(struct pool_item_header)); + pp->pr_phoffset = off = palloc->pa_pagesz - phsize; } else { /* The page header will be taken from our page header pool */ pp->pr_phoffset = 0; @@ -476,10 +491,7 @@ pool_init(struct pool *pp, size_t size, * Alignment is to take place at `ioff' within the item. This means * we must reserve up to `align - 1' bytes on the page to allow * appropriate positioning of each item. - * - * Silently enforce `0 <= ioff < align'. */ - pp->pr_itemoffset = ioff = ioff % align; pp->pr_itemsperpage = (off - ((align - ioff) % align)) / pp->pr_size; KASSERT(pp->pr_itemsperpage != 0); @@ -540,9 +552,11 @@ pool_init(struct pool *pp, size_t size, simple_unlock(&pool_head_slock); /* Insert this into the list of pools using this allocator. */ + s = splvm(); simple_lock(&palloc->pa_slock); TAILQ_INSERT_TAIL(&palloc->pa_list, pp, pr_alloc_list); simple_unlock(&palloc->pa_slock); + splx(s); } /* @@ -553,11 +567,14 @@ pool_destroy(struct pool *pp) { struct pool_item_header *ph; struct pool_cache *pc; + int s; /* Locking order: pool_allocator -> pool */ + s = splvm(); simple_lock(&pp->pr_alloc->pa_slock); TAILQ_REMOVE(&pp->pr_alloc->pa_list, pp, pr_alloc_list); simple_unlock(&pp->pr_alloc->pa_slock); + splx(s); /* Destroy all caches for this pool. */ while ((pc = TAILQ_FIRST(&pp->pr_cachelist)) != NULL) @@ -733,13 +750,14 @@ pool_get(struct pool *pp, int flags) v = pool_allocator_alloc(pp, flags); if (__predict_true(v != NULL)) ph = pool_alloc_item_header(pp, v, flags); - simple_lock(&pp->pr_slock); - pr_enter(pp, file, line); if (__predict_false(v == NULL || ph == NULL)) { if (v != NULL) pool_allocator_free(pp, v); + simple_lock(&pp->pr_slock); + pr_enter(pp, file, line); + /* * We were unable to allocate a page or item * header, but we released the lock during @@ -771,6 +789,8 @@ pool_get(struct pool *pp, int flags) } /* We have more memory; add it to the pool */ + simple_lock(&pp->pr_slock); + pr_enter(pp, file, line); pool_prime_page(pp, v, ph); pp->pr_npagealloc++; @@ -937,8 +957,7 @@ pool_do_put(struct pool *pp, void *v) * If this page is now empty, do one of two things: * * (1) If we have more pages than the page high water mark, - * or if we are flagged as immediately freeing back idle - * pages, free the page back to the system. ONLY CONSIDER + * free the page back to the system. ONLY CONSIDER * FREEING BACK A PAGE IF WE HAVE MORE THAN OUR MINIMUM PAGE * CLAIM. * @@ -951,9 +970,10 @@ pool_do_put(struct pool *pp, void *v) pp->pr_nidle++; if (pp->pr_npages > pp->pr_minpages && (pp->pr_npages > pp->pr_maxpages || - (pp->pr_roflags & PR_IMMEDRELEASE) != 0 || (pp->pr_alloc->pa_flags & PA_WANT) != 0)) { + simple_unlock(&pp->pr_slock); pr_rmpage(pp, ph, NULL); + simple_lock(&pp->pr_slock); } else { LIST_REMOVE(ph, ph_pagelist); LIST_INSERT_HEAD(&pp->pr_emptypages, ph, ph_pagelist); @@ -1039,14 +1059,15 @@ pool_prime(struct pool *pp, int n) cp = pool_allocator_alloc(pp, PR_NOWAIT); if (__predict_true(cp != NULL)) ph = pool_alloc_item_header(pp, cp, PR_NOWAIT); - simple_lock(&pp->pr_slock); if (__predict_false(cp == NULL || ph == NULL)) { if (cp != NULL) pool_allocator_free(pp, cp); + simple_lock(&pp->pr_slock); break; } + simple_lock(&pp->pr_slock); pool_prime_page(pp, cp, ph); pp->pr_npagealloc++; pp->pr_minpages++; @@ -1074,6 +1095,8 @@ pool_prime_page(struct pool *pp, caddr_t int n; int s; + LOCK_ASSERT(simple_lock_held(&pp->pr_slock)); + #ifdef DIAGNOSTIC if (((u_long)cp & (pp->pr_alloc->pa_pagesz - 1)) != 0) panic("pool_prime_page: %s: unaligned page", pp->pr_wchan); @@ -1163,13 +1186,14 @@ pool_catchup(struct pool *pp) cp = pool_allocator_alloc(pp, PR_NOWAIT); if (__predict_true(cp != NULL)) ph = pool_alloc_item_header(pp, cp, PR_NOWAIT); - simple_lock(&pp->pr_slock); if (__predict_false(cp == NULL || ph == NULL)) { if (cp != NULL) pool_allocator_free(pp, cp); error = ENOMEM; + simple_lock(&pp->pr_slock); break; } + simple_lock(&pp->pr_slock); pool_prime_page(pp, cp, ph); pp->pr_npagealloc++; } @@ -1320,7 +1344,6 @@ pool_reclaim(struct pool *pp) if (pp->pr_roflags & PR_PHINPAGE) { continue; } - SPLAY_REMOVE(phtree, &pp->pr_phtree, ph); s = splvm(); pool_put(&phpool, ph); splx(s); @@ -1955,6 +1978,8 @@ pool_allocator_alloc(struct pool *org, i int s, freed; void *res; + LOCK_ASSERT(!simple_lock_held(&org->pr_slock)); + do { if ((res = (*pa->pa_alloc)(org, flags)) != NULL) return (res); @@ -2025,6 +2050,8 @@ pool_allocator_free(struct pool *pp, voi struct pool_allocator *pa = pp->pr_alloc; int s; + LOCK_ASSERT(!simple_lock_held(&pp->pr_slock)); + (*pa->pa_free)(pp, v); s = splvm(); @@ -2068,15 +2095,21 @@ pool_page_free(struct pool *pp, void *v) void * pool_subpage_alloc(struct pool *pp, int flags) { - - return (pool_get(&psppool, flags)); + void *v; + int s; + s = splvm(); + v = pool_get(&psppool, flags); + splx(s); + return v; } void pool_subpage_free(struct pool *pp, void *v) { - + int s; + s = splvm(); pool_put(&psppool, v); + splx(s); } /* We don't provide a real nointr allocator. Maybe later. */