version 1.87.2.3, 2004/09/21 13:35:12 |
version 1.89, 2003/12/29 16:04:58 |
Line 317 pr_rmpage(struct pool *pp, struct pool_i |
|
Line 317 pr_rmpage(struct pool *pp, struct pool_i |
|
{ |
{ |
int s; |
int s; |
|
|
LOCK_ASSERT(!simple_lock_held(&pp->pr_slock) || pq != NULL); |
|
|
|
/* |
/* |
* If the page was idle, decrement the idle page count. |
* If the page was idle, decrement the idle page count. |
*/ |
*/ |
Line 338 pr_rmpage(struct pool *pp, struct pool_i |
|
Line 336 pr_rmpage(struct pool *pp, struct pool_i |
|
* Unlink a page from the pool and release it (or queue it for release). |
* Unlink a page from the pool and release it (or queue it for release). |
*/ |
*/ |
LIST_REMOVE(ph, ph_pagelist); |
LIST_REMOVE(ph, ph_pagelist); |
if ((pp->pr_roflags & PR_PHINPAGE) == 0) |
|
SPLAY_REMOVE(phtree, &pp->pr_phtree, ph); |
|
if (pq) { |
if (pq) { |
LIST_INSERT_HEAD(pq, ph, ph_pagelist); |
LIST_INSERT_HEAD(pq, ph, ph_pagelist); |
} else { |
} else { |
pool_allocator_free(pp, ph->ph_page); |
pool_allocator_free(pp, ph->ph_page); |
if ((pp->pr_roflags & PR_PHINPAGE) == 0) { |
if ((pp->pr_roflags & PR_PHINPAGE) == 0) { |
|
SPLAY_REMOVE(phtree, &pp->pr_phtree, ph); |
s = splvm(); |
s = splvm(); |
pool_put(&phpool, ph); |
pool_put(&phpool, ph); |
splx(s); |
splx(s); |
Line 357 pr_rmpage(struct pool *pp, struct pool_i |
|
Line 354 pr_rmpage(struct pool *pp, struct pool_i |
|
} |
} |
|
|
/* |
/* |
* Initialize all the pools listed in the "pools" link set. |
|
*/ |
|
void |
|
link_pool_init(void) |
|
{ |
|
__link_set_decl(pools, struct link_pool_init); |
|
struct link_pool_init * const *pi; |
|
|
|
__link_set_foreach(pi, pools) |
|
pool_init((*pi)->pp, (*pi)->size, (*pi)->align, |
|
(*pi)->align_offset, (*pi)->flags, (*pi)->wchan, |
|
(*pi)->palloc); |
|
} |
|
|
|
/* |
|
* Initialize the given pool resource structure. |
* Initialize the given pool resource structure. |
* |
* |
* We export this routine to allow other kernel parts to declare |
* We export this routine to allow other kernel parts to declare |
Line 382 pool_init(struct pool *pp, size_t size, |
|
Line 364 pool_init(struct pool *pp, size_t size, |
|
const char *wchan, struct pool_allocator *palloc) |
const char *wchan, struct pool_allocator *palloc) |
{ |
{ |
int off, slack; |
int off, slack; |
size_t trysize, phsize; |
|
int s; |
|
|
|
#ifdef POOL_DIAGNOSTIC |
#ifdef POOL_DIAGNOSTIC |
/* |
/* |
Line 475 pool_init(struct pool *pp, size_t size, |
|
Line 455 pool_init(struct pool *pp, size_t size, |
|
|
|
/* |
/* |
* Decide whether to put the page header off page to avoid |
* Decide whether to put the page header off page to avoid |
* wasting too large a part of the page or too big item. |
* wasting too large a part of the page. Off-page page headers |
* Off-page page headers go on a hash table, so we can match |
* go on a hash table, so we can match a returned item |
* a returned item with its header based on the page address. |
* with its header based on the page address. |
* We use 1/16 of the page size and about 8 times of the item |
* We use 1/16 of the page size as the threshold (XXX: tune) |
* size as the threshold (XXX: tune) |
|
* |
|
* However, we'll put the header into the page if we can put |
|
* it without wasting any items. |
|
* |
|
* Silently enforce `0 <= ioff < align'. |
|
*/ |
*/ |
pp->pr_itemoffset = ioff %= align; |
if (pp->pr_size < palloc->pa_pagesz/16) { |
/* See the comment below about reserved bytes. */ |
|
trysize = palloc->pa_pagesz - ((align - ioff) % align); |
|
phsize = ALIGN(sizeof(struct pool_item_header)); |
|
if (pp->pr_size < MIN(palloc->pa_pagesz / 16, phsize << 3) || |
|
trysize / pp->pr_size == (trysize - phsize) / pp->pr_size) { |
|
/* Use the end of the page for the page header */ |
/* Use the end of the page for the page header */ |
pp->pr_roflags |= PR_PHINPAGE; |
pp->pr_roflags |= PR_PHINPAGE; |
pp->pr_phoffset = off = palloc->pa_pagesz - phsize; |
pp->pr_phoffset = off = palloc->pa_pagesz - |
|
ALIGN(sizeof(struct pool_item_header)); |
} else { |
} else { |
/* The page header will be taken from our page header pool */ |
/* The page header will be taken from our page header pool */ |
pp->pr_phoffset = 0; |
pp->pr_phoffset = 0; |
Line 506 pool_init(struct pool *pp, size_t size, |
|
Line 476 pool_init(struct pool *pp, size_t size, |
|
* Alignment is to take place at `ioff' within the item. This means |
* Alignment is to take place at `ioff' within the item. This means |
* we must reserve up to `align - 1' bytes on the page to allow |
* we must reserve up to `align - 1' bytes on the page to allow |
* appropriate positioning of each item. |
* appropriate positioning of each item. |
|
* |
|
* Silently enforce `0 <= ioff < align'. |
*/ |
*/ |
|
pp->pr_itemoffset = ioff = ioff % align; |
pp->pr_itemsperpage = (off - ((align - ioff) % align)) / pp->pr_size; |
pp->pr_itemsperpage = (off - ((align - ioff) % align)) / pp->pr_size; |
KASSERT(pp->pr_itemsperpage != 0); |
KASSERT(pp->pr_itemsperpage != 0); |
|
|
Line 567 pool_init(struct pool *pp, size_t size, |
|
Line 540 pool_init(struct pool *pp, size_t size, |
|
simple_unlock(&pool_head_slock); |
simple_unlock(&pool_head_slock); |
|
|
/* Insert this into the list of pools using this allocator. */ |
/* Insert this into the list of pools using this allocator. */ |
s = splvm(); |
|
simple_lock(&palloc->pa_slock); |
simple_lock(&palloc->pa_slock); |
TAILQ_INSERT_TAIL(&palloc->pa_list, pp, pr_alloc_list); |
TAILQ_INSERT_TAIL(&palloc->pa_list, pp, pr_alloc_list); |
simple_unlock(&palloc->pa_slock); |
simple_unlock(&palloc->pa_slock); |
splx(s); |
|
} |
} |
|
|
/* |
/* |
Line 582 pool_destroy(struct pool *pp) |
|
Line 553 pool_destroy(struct pool *pp) |
|
{ |
{ |
struct pool_item_header *ph; |
struct pool_item_header *ph; |
struct pool_cache *pc; |
struct pool_cache *pc; |
int s; |
|
|
|
/* Locking order: pool_allocator -> pool */ |
/* Locking order: pool_allocator -> pool */ |
s = splvm(); |
|
simple_lock(&pp->pr_alloc->pa_slock); |
simple_lock(&pp->pr_alloc->pa_slock); |
TAILQ_REMOVE(&pp->pr_alloc->pa_list, pp, pr_alloc_list); |
TAILQ_REMOVE(&pp->pr_alloc->pa_list, pp, pr_alloc_list); |
simple_unlock(&pp->pr_alloc->pa_slock); |
simple_unlock(&pp->pr_alloc->pa_slock); |
splx(s); |
|
|
|
/* Destroy all caches for this pool. */ |
/* Destroy all caches for this pool. */ |
while ((pc = TAILQ_FIRST(&pp->pr_cachelist)) != NULL) |
while ((pc = TAILQ_FIRST(&pp->pr_cachelist)) != NULL) |
Line 670 pool_get(struct pool *pp, int flags) |
|
Line 638 pool_get(struct pool *pp, int flags) |
|
void *v; |
void *v; |
|
|
#ifdef DIAGNOSTIC |
#ifdef DIAGNOSTIC |
if (__predict_false(pp->pr_itemsperpage == 0)) |
|
panic("pool_get: pool %p: pr_itemsperpage is zero, " |
|
"pool not initialized?", pp); |
|
if (__predict_false(curlwp == NULL && doing_shutdown == 0 && |
if (__predict_false(curlwp == NULL && doing_shutdown == 0 && |
(flags & PR_WAITOK) != 0)) |
(flags & PR_WAITOK) != 0)) |
panic("pool_get: %s: must have NOWAIT", pp->pr_wchan); |
panic("pool_get: %s: must have NOWAIT", pp->pr_wchan); |
Line 768 pool_get(struct pool *pp, int flags) |
|
Line 733 pool_get(struct pool *pp, int flags) |
|
v = pool_allocator_alloc(pp, flags); |
v = pool_allocator_alloc(pp, flags); |
if (__predict_true(v != NULL)) |
if (__predict_true(v != NULL)) |
ph = pool_alloc_item_header(pp, v, flags); |
ph = pool_alloc_item_header(pp, v, flags); |
|
simple_lock(&pp->pr_slock); |
|
pr_enter(pp, file, line); |
|
|
if (__predict_false(v == NULL || ph == NULL)) { |
if (__predict_false(v == NULL || ph == NULL)) { |
if (v != NULL) |
if (v != NULL) |
pool_allocator_free(pp, v); |
pool_allocator_free(pp, v); |
|
|
simple_lock(&pp->pr_slock); |
|
pr_enter(pp, file, line); |
|
|
|
/* |
/* |
* We were unable to allocate a page or item |
* We were unable to allocate a page or item |
* header, but we released the lock during |
* header, but we released the lock during |
Line 807 pool_get(struct pool *pp, int flags) |
|
Line 771 pool_get(struct pool *pp, int flags) |
|
} |
} |
|
|
/* We have more memory; add it to the pool */ |
/* We have more memory; add it to the pool */ |
simple_lock(&pp->pr_slock); |
|
pr_enter(pp, file, line); |
|
pool_prime_page(pp, v, ph); |
pool_prime_page(pp, v, ph); |
pp->pr_npagealloc++; |
pp->pr_npagealloc++; |
|
|
Line 975 pool_do_put(struct pool *pp, void *v) |
|
Line 937 pool_do_put(struct pool *pp, void *v) |
|
* If this page is now empty, do one of two things: |
* If this page is now empty, do one of two things: |
* |
* |
* (1) If we have more pages than the page high water mark, |
* (1) If we have more pages than the page high water mark, |
* free the page back to the system. ONLY CONSIDER |
* free the page back to the system. |
* FREEING BACK A PAGE IF WE HAVE MORE THAN OUR MINIMUM PAGE |
|
* CLAIM. |
|
* |
* |
* (2) Otherwise, move the page to the empty page list. |
* (2) Otherwise, move the page to the empty page list. |
* |
* |
Line 986 pool_do_put(struct pool *pp, void *v) |
|
Line 946 pool_do_put(struct pool *pp, void *v) |
|
*/ |
*/ |
if (ph->ph_nmissing == 0) { |
if (ph->ph_nmissing == 0) { |
pp->pr_nidle++; |
pp->pr_nidle++; |
if (pp->pr_npages > pp->pr_minpages && |
if (pp->pr_npages > pp->pr_maxpages || |
(pp->pr_npages > pp->pr_maxpages || |
(pp->pr_alloc->pa_flags & PA_WANT) != 0) { |
(pp->pr_alloc->pa_flags & PA_WANT) != 0)) { |
|
simple_unlock(&pp->pr_slock); |
|
pr_rmpage(pp, ph, NULL); |
pr_rmpage(pp, ph, NULL); |
simple_lock(&pp->pr_slock); |
|
} else { |
} else { |
LIST_REMOVE(ph, ph_pagelist); |
LIST_REMOVE(ph, ph_pagelist); |
LIST_INSERT_HEAD(&pp->pr_emptypages, ph, ph_pagelist); |
LIST_INSERT_HEAD(&pp->pr_emptypages, ph, ph_pagelist); |
Line 1077 pool_prime(struct pool *pp, int n) |
|
Line 1034 pool_prime(struct pool *pp, int n) |
|
cp = pool_allocator_alloc(pp, PR_NOWAIT); |
cp = pool_allocator_alloc(pp, PR_NOWAIT); |
if (__predict_true(cp != NULL)) |
if (__predict_true(cp != NULL)) |
ph = pool_alloc_item_header(pp, cp, PR_NOWAIT); |
ph = pool_alloc_item_header(pp, cp, PR_NOWAIT); |
|
simple_lock(&pp->pr_slock); |
|
|
if (__predict_false(cp == NULL || ph == NULL)) { |
if (__predict_false(cp == NULL || ph == NULL)) { |
if (cp != NULL) |
if (cp != NULL) |
pool_allocator_free(pp, cp); |
pool_allocator_free(pp, cp); |
simple_lock(&pp->pr_slock); |
|
break; |
break; |
} |
} |
|
|
simple_lock(&pp->pr_slock); |
|
pool_prime_page(pp, cp, ph); |
pool_prime_page(pp, cp, ph); |
pp->pr_npagealloc++; |
pp->pr_npagealloc++; |
pp->pr_minpages++; |
pp->pr_minpages++; |
Line 1113 pool_prime_page(struct pool *pp, caddr_t |
|
Line 1069 pool_prime_page(struct pool *pp, caddr_t |
|
int n; |
int n; |
int s; |
int s; |
|
|
LOCK_ASSERT(simple_lock_held(&pp->pr_slock)); |
|
|
|
#ifdef DIAGNOSTIC |
#ifdef DIAGNOSTIC |
if (((u_long)cp & (pp->pr_alloc->pa_pagesz - 1)) != 0) |
if (((u_long)cp & (pp->pr_alloc->pa_pagesz - 1)) != 0) |
panic("pool_prime_page: %s: unaligned page", pp->pr_wchan); |
panic("pool_prime_page: %s: unaligned page", pp->pr_wchan); |
Line 1204 pool_catchup(struct pool *pp) |
|
Line 1158 pool_catchup(struct pool *pp) |
|
cp = pool_allocator_alloc(pp, PR_NOWAIT); |
cp = pool_allocator_alloc(pp, PR_NOWAIT); |
if (__predict_true(cp != NULL)) |
if (__predict_true(cp != NULL)) |
ph = pool_alloc_item_header(pp, cp, PR_NOWAIT); |
ph = pool_alloc_item_header(pp, cp, PR_NOWAIT); |
|
simple_lock(&pp->pr_slock); |
if (__predict_false(cp == NULL || ph == NULL)) { |
if (__predict_false(cp == NULL || ph == NULL)) { |
if (cp != NULL) |
if (cp != NULL) |
pool_allocator_free(pp, cp); |
pool_allocator_free(pp, cp); |
error = ENOMEM; |
error = ENOMEM; |
simple_lock(&pp->pr_slock); |
|
break; |
break; |
} |
} |
simple_lock(&pp->pr_slock); |
|
pool_prime_page(pp, cp, ph); |
pool_prime_page(pp, cp, ph); |
pp->pr_npagealloc++; |
pp->pr_npagealloc++; |
} |
} |
Line 1362 pool_reclaim(struct pool *pp) |
|
Line 1315 pool_reclaim(struct pool *pp) |
|
if (pp->pr_roflags & PR_PHINPAGE) { |
if (pp->pr_roflags & PR_PHINPAGE) { |
continue; |
continue; |
} |
} |
|
SPLAY_REMOVE(phtree, &pp->pr_phtree, ph); |
s = splvm(); |
s = splvm(); |
pool_put(&phpool, ph); |
pool_put(&phpool, ph); |
splx(s); |
splx(s); |
Line 1996 pool_allocator_alloc(struct pool *org, i |
|
Line 1950 pool_allocator_alloc(struct pool *org, i |
|
int s, freed; |
int s, freed; |
void *res; |
void *res; |
|
|
LOCK_ASSERT(!simple_lock_held(&org->pr_slock)); |
|
|
|
do { |
do { |
if ((res = (*pa->pa_alloc)(org, flags)) != NULL) |
if ((res = (*pa->pa_alloc)(org, flags)) != NULL) |
return (res); |
return (res); |
Line 2068 pool_allocator_free(struct pool *pp, voi |
|
Line 2020 pool_allocator_free(struct pool *pp, voi |
|
struct pool_allocator *pa = pp->pr_alloc; |
struct pool_allocator *pa = pp->pr_alloc; |
int s; |
int s; |
|
|
LOCK_ASSERT(!simple_lock_held(&pp->pr_slock)); |
|
|
|
(*pa->pa_free)(pp, v); |
(*pa->pa_free)(pp, v); |
|
|
s = splvm(); |
s = splvm(); |
Line 2113 pool_page_free(struct pool *pp, void *v) |
|
Line 2063 pool_page_free(struct pool *pp, void *v) |
|
void * |
void * |
pool_subpage_alloc(struct pool *pp, int flags) |
pool_subpage_alloc(struct pool *pp, int flags) |
{ |
{ |
void *v; |
|
int s; |
return (pool_get(&psppool, flags)); |
s = splvm(); |
|
v = pool_get(&psppool, flags); |
|
splx(s); |
|
return v; |
|
} |
} |
|
|
void |
void |
pool_subpage_free(struct pool *pp, void *v) |
pool_subpage_free(struct pool *pp, void *v) |
{ |
{ |
int s; |
|
s = splvm(); |
|
pool_put(&psppool, v); |
pool_put(&psppool, v); |
splx(s); |
|
} |
} |
|
|
/* We don't provide a real nointr allocator. Maybe later. */ |
/* We don't provide a real nointr allocator. Maybe later. */ |