version 1.207, 2017/03/14 03:13:50 |
version 1.207.6.1, 2018/02/27 09:07:32 |
Line 483 pool_init(struct pool *pp, size_t size, |
|
Line 483 pool_init(struct pool *pp, size_t size, |
|
*/ |
*/ |
TAILQ_FOREACH(pp1, &pool_head, pr_poollist) { |
TAILQ_FOREACH(pp1, &pool_head, pr_poollist) { |
if (pp == pp1) |
if (pp == pp1) |
panic("pool_init: pool %s already initialised", |
panic("%s: [%s] already initialised", __func__, |
wchan); |
wchan); |
} |
} |
if (__predict_true(!cold)) |
if (__predict_true(!cold)) |
Line 524 pool_init(struct pool *pp, size_t size, |
|
Line 524 pool_init(struct pool *pp, size_t size, |
|
|
|
prsize = roundup(prsize, align); |
prsize = roundup(prsize, align); |
KASSERTMSG((prsize <= palloc->pa_pagesz), |
KASSERTMSG((prsize <= palloc->pa_pagesz), |
"pool_init: pool item size (%zu) larger than page size (%u)", |
"%s: [%s] pool item size (%zu) larger than page size (%u)", |
prsize, palloc->pa_pagesz); |
__func__, wchan, prsize, palloc->pa_pagesz); |
|
|
/* |
/* |
* Initialize the pool structure. |
* Initialize the pool structure. |
Line 608 pool_init(struct pool *pp, size_t size, |
|
Line 608 pool_init(struct pool *pp, size_t size, |
|
* if you see this panic, consider to tweak |
* if you see this panic, consider to tweak |
* PHPOOL_MAX and PHPOOL_FREELIST_NELEM. |
* PHPOOL_MAX and PHPOOL_FREELIST_NELEM. |
*/ |
*/ |
panic("%s: too large itemsperpage(%d) for PR_NOTOUCH", |
panic("%s: [%s] too large itemsperpage(%d) for " |
|
"PR_NOTOUCH", __func__, |
pp->pr_wchan, pp->pr_itemsperpage); |
pp->pr_wchan, pp->pr_itemsperpage); |
} |
} |
pp->pr_phpool = &phpool[idx]; |
pp->pr_phpool = &phpool[idx]; |
Line 696 pool_destroy(struct pool *pp) |
|
Line 697 pool_destroy(struct pool *pp) |
|
|
|
KASSERT(pp->pr_cache == NULL); |
KASSERT(pp->pr_cache == NULL); |
KASSERTMSG((pp->pr_nout == 0), |
KASSERTMSG((pp->pr_nout == 0), |
"pool_destroy: pool busy: still out: %u", pp->pr_nout); |
"%s: pool busy: still out: %u", __func__, pp->pr_nout); |
KASSERT(LIST_EMPTY(&pp->pr_fullpages)); |
KASSERT(LIST_EMPTY(&pp->pr_fullpages)); |
KASSERT(LIST_EMPTY(&pp->pr_partpages)); |
KASSERT(LIST_EMPTY(&pp->pr_partpages)); |
|
|
Line 718 pool_set_drain_hook(struct pool *pp, voi |
|
Line 719 pool_set_drain_hook(struct pool *pp, voi |
|
|
|
/* XXX no locking -- must be used just after pool_init() */ |
/* XXX no locking -- must be used just after pool_init() */ |
KASSERTMSG((pp->pr_drain_hook == NULL), |
KASSERTMSG((pp->pr_drain_hook == NULL), |
"pool_set_drain_hook(%s): already set", pp->pr_wchan); |
"%s: [%s] already set", __func__, pp->pr_wchan); |
pp->pr_drain_hook = fn; |
pp->pr_drain_hook = fn; |
pp->pr_drain_hook_arg = arg; |
pp->pr_drain_hook_arg = arg; |
} |
} |
Line 729 pool_alloc_item_header(struct pool *pp, |
|
Line 730 pool_alloc_item_header(struct pool *pp, |
|
struct pool_item_header *ph; |
struct pool_item_header *ph; |
|
|
if ((pp->pr_roflags & PR_PHINPAGE) != 0) |
if ((pp->pr_roflags & PR_PHINPAGE) != 0) |
ph = (struct pool_item_header *) ((char *)storage + pp->pr_phoffset); |
ph = (void *)((char *)storage + pp->pr_phoffset); |
else |
else |
ph = pool_get(pp->pr_phpool, flags); |
ph = pool_get(pp->pr_phpool, flags); |
|
|
Line 746 pool_get(struct pool *pp, int flags) |
|
Line 747 pool_get(struct pool *pp, int flags) |
|
struct pool_item_header *ph; |
struct pool_item_header *ph; |
void *v; |
void *v; |
|
|
|
KASSERT(!(flags & PR_NOWAIT) != !(flags & PR_WAITOK)); |
KASSERTMSG((pp->pr_itemsperpage != 0), |
KASSERTMSG((pp->pr_itemsperpage != 0), |
"pool_get: pool '%s': pr_itemsperpage is zero, " |
"%s: [%s] pr_itemsperpage is zero, " |
"pool not initialized?", pp->pr_wchan); |
"pool not initialized?", __func__, pp->pr_wchan); |
KASSERTMSG((!(cpu_intr_p() || cpu_softintr_p()) |
KASSERTMSG((!(cpu_intr_p() || cpu_softintr_p()) |
|| pp->pr_ipl != IPL_NONE || cold || panicstr != NULL), |
|| pp->pr_ipl != IPL_NONE || cold || panicstr != NULL), |
"pool '%s' is IPL_NONE, but called from interrupt context", |
"%s: [%s] is IPL_NONE, but called from interrupt context", |
pp->pr_wchan); |
__func__, pp->pr_wchan); |
if (flags & PR_WAITOK) { |
if (flags & PR_WAITOK) { |
ASSERT_SLEEPABLE(); |
ASSERT_SLEEPABLE(); |
} |
} |
Line 765 pool_get(struct pool *pp, int flags) |
|
Line 767 pool_get(struct pool *pp, int flags) |
|
* the pool. |
* the pool. |
*/ |
*/ |
KASSERTMSG((pp->pr_nout <= pp->pr_hardlimit), |
KASSERTMSG((pp->pr_nout <= pp->pr_hardlimit), |
"pool_get: %s: crossed hard limit", pp->pr_wchan); |
"%s: %s: crossed hard limit", __func__, pp->pr_wchan); |
if (__predict_false(pp->pr_nout == pp->pr_hardlimit)) { |
if (__predict_false(pp->pr_nout == pp->pr_hardlimit)) { |
if (pp->pr_drain_hook != NULL) { |
if (pp->pr_drain_hook != NULL) { |
/* |
/* |
Line 786 pool_get(struct pool *pp, int flags) |
|
Line 788 pool_get(struct pool *pp, int flags) |
|
* it be? |
* it be? |
*/ |
*/ |
pp->pr_flags |= PR_WANTED; |
pp->pr_flags |= PR_WANTED; |
cv_wait(&pp->pr_cv, &pp->pr_lock); |
do { |
|
cv_wait(&pp->pr_cv, &pp->pr_lock); |
|
} while (pp->pr_flags & PR_WANTED); |
goto startover; |
goto startover; |
} |
} |
|
|
Line 801 pool_get(struct pool *pp, int flags) |
|
Line 805 pool_get(struct pool *pp, int flags) |
|
pp->pr_nfail++; |
pp->pr_nfail++; |
|
|
mutex_exit(&pp->pr_lock); |
mutex_exit(&pp->pr_lock); |
|
KASSERT((flags & (PR_NOWAIT|PR_LIMITFAIL)) != 0); |
return (NULL); |
return (NULL); |
} |
} |
|
|
Line 814 pool_get(struct pool *pp, int flags) |
|
Line 819 pool_get(struct pool *pp, int flags) |
|
int error; |
int error; |
|
|
KASSERTMSG((pp->pr_nitems == 0), |
KASSERTMSG((pp->pr_nitems == 0), |
"pool_get: nitems inconsistent" |
"%s: [%s] curpage NULL, inconsistent nitems %u", |
": %s: curpage NULL, nitems %u", |
__func__, pp->pr_wchan, pp->pr_nitems); |
pp->pr_wchan, pp->pr_nitems); |
|
|
|
/* |
/* |
* Call the back-end page allocator for more memory. |
* Call the back-end page allocator for more memory. |
Line 826 pool_get(struct pool *pp, int flags) |
|
Line 830 pool_get(struct pool *pp, int flags) |
|
error = pool_grow(pp, flags); |
error = pool_grow(pp, flags); |
if (error != 0) { |
if (error != 0) { |
/* |
/* |
|
* pool_grow aborts when another thread |
|
* is allocating a new page. Retry if it |
|
* waited for it. |
|
*/ |
|
if (error == ERESTART) |
|
goto startover; |
|
|
|
/* |
* We were unable to allocate a page or item |
* We were unable to allocate a page or item |
* header, but we released the lock during |
* header, but we released the lock during |
* allocation, so perhaps items were freed |
* allocation, so perhaps items were freed |
Line 836 pool_get(struct pool *pp, int flags) |
|
Line 848 pool_get(struct pool *pp, int flags) |
|
|
|
pp->pr_nfail++; |
pp->pr_nfail++; |
mutex_exit(&pp->pr_lock); |
mutex_exit(&pp->pr_lock); |
|
KASSERT((flags & (PR_WAITOK|PR_NOWAIT)) == PR_NOWAIT); |
return (NULL); |
return (NULL); |
} |
} |
|
|
Line 844 pool_get(struct pool *pp, int flags) |
|
Line 857 pool_get(struct pool *pp, int flags) |
|
} |
} |
if (pp->pr_roflags & PR_NOTOUCH) { |
if (pp->pr_roflags & PR_NOTOUCH) { |
KASSERTMSG((ph->ph_nmissing < pp->pr_itemsperpage), |
KASSERTMSG((ph->ph_nmissing < pp->pr_itemsperpage), |
"pool_get: %s: page empty", pp->pr_wchan); |
"%s: %s: page empty", __func__, pp->pr_wchan); |
v = pr_item_notouch_get(pp, ph); |
v = pr_item_notouch_get(pp, ph); |
} else { |
} else { |
v = pi = LIST_FIRST(&ph->ph_itemlist); |
v = pi = LIST_FIRST(&ph->ph_itemlist); |
if (__predict_false(v == NULL)) { |
if (__predict_false(v == NULL)) { |
mutex_exit(&pp->pr_lock); |
mutex_exit(&pp->pr_lock); |
panic("pool_get: %s: page empty", pp->pr_wchan); |
panic("%s: [%s] page empty", __func__, pp->pr_wchan); |
} |
} |
KASSERTMSG((pp->pr_nitems > 0), |
KASSERTMSG((pp->pr_nitems > 0), |
"pool_get: nitems inconsistent" |
"%s: [%s] nitems %u inconsistent on itemlist", |
": %s: items on itemlist, nitems %u", |
__func__, pp->pr_wchan, pp->pr_nitems); |
pp->pr_wchan, pp->pr_nitems); |
|
KASSERTMSG((pi->pi_magic == PI_MAGIC), |
KASSERTMSG((pi->pi_magic == PI_MAGIC), |
"pool_get(%s): free list modified: " |
"%s: [%s] free list modified: " |
"magic=%x; page %p; item addr %p", |
"magic=%x; page %p; item addr %p", __func__, |
pp->pr_wchan, pi->pi_magic, ph->ph_page, pi); |
pp->pr_wchan, pi->pi_magic, ph->ph_page, pi); |
|
|
/* |
/* |
Line 883 pool_get(struct pool *pp, int flags) |
|
Line 895 pool_get(struct pool *pp, int flags) |
|
if (ph->ph_nmissing == pp->pr_itemsperpage) { |
if (ph->ph_nmissing == pp->pr_itemsperpage) { |
KASSERTMSG(((pp->pr_roflags & PR_NOTOUCH) || |
KASSERTMSG(((pp->pr_roflags & PR_NOTOUCH) || |
LIST_EMPTY(&ph->ph_itemlist)), |
LIST_EMPTY(&ph->ph_itemlist)), |
"pool_get: %s: nmissing inconsistent", pp->pr_wchan); |
"%s: [%s] nmissing (%u) inconsistent", __func__, |
|
pp->pr_wchan, ph->ph_nmissing); |
/* |
/* |
* This page is now full. Move it to the full list |
* This page is now full. Move it to the full list |
* and select a new current page. |
* and select a new current page. |
Line 929 pool_do_put(struct pool *pp, void *v, st |
|
Line 942 pool_do_put(struct pool *pp, void *v, st |
|
LOCKDEBUG_MEM_CHECK(v, pp->pr_size); |
LOCKDEBUG_MEM_CHECK(v, pp->pr_size); |
|
|
KASSERTMSG((pp->pr_nout > 0), |
KASSERTMSG((pp->pr_nout > 0), |
"pool_put: pool %s: putting with none out", pp->pr_wchan); |
"%s: [%s] putting with none out", __func__, pp->pr_wchan); |
|
|
if (__predict_false((ph = pr_find_pagehead(pp, v)) == NULL)) { |
if (__predict_false((ph = pr_find_pagehead(pp, v)) == NULL)) { |
panic("pool_put: %s: page header missing", pp->pr_wchan); |
panic("%s: [%s] page header missing", __func__, pp->pr_wchan); |
} |
} |
|
|
/* |
/* |
Line 1045 pool_put(struct pool *pp, void *v) |
|
Line 1058 pool_put(struct pool *pp, void *v) |
|
static int |
static int |
pool_grow(struct pool *pp, int flags) |
pool_grow(struct pool *pp, int flags) |
{ |
{ |
struct pool_item_header *ph = NULL; |
/* |
char *cp; |
* If there's a pool_grow in progress, wait for it to complete |
|
* and try again from the top. |
mutex_exit(&pp->pr_lock); |
*/ |
cp = pool_allocator_alloc(pp, flags); |
if (pp->pr_flags & PR_GROWING) { |
if (__predict_true(cp != NULL)) { |
if (flags & PR_WAITOK) { |
ph = pool_alloc_item_header(pp, cp, flags); |
do { |
} |
cv_wait(&pp->pr_cv, &pp->pr_lock); |
if (__predict_false(cp == NULL || ph == NULL)) { |
} while (pp->pr_flags & PR_GROWING); |
if (cp != NULL) { |
return ERESTART; |
pool_allocator_free(pp, cp); |
} else { |
|
if (pp->pr_flags & PR_GROWINGNOWAIT) { |
|
/* |
|
* This needs an unlock/relock dance so |
|
* that the other caller has a chance to |
|
* run and actually do the thing. Note |
|
* that this is effectively a busy-wait. |
|
*/ |
|
mutex_exit(&pp->pr_lock); |
|
mutex_enter(&pp->pr_lock); |
|
return ERESTART; |
|
} |
|
return EWOULDBLOCK; |
} |
} |
mutex_enter(&pp->pr_lock); |
|
return ENOMEM; |
|
} |
} |
|
pp->pr_flags |= PR_GROWING; |
|
if (flags & PR_WAITOK) |
|
mutex_exit(&pp->pr_lock); |
|
else |
|
pp->pr_flags |= PR_GROWINGNOWAIT; |
|
|
mutex_enter(&pp->pr_lock); |
char *cp = pool_allocator_alloc(pp, flags); |
|
if (__predict_false(cp == NULL)) |
|
goto out; |
|
|
|
struct pool_item_header *ph = pool_alloc_item_header(pp, cp, flags); |
|
if (__predict_false(ph == NULL)) { |
|
pool_allocator_free(pp, cp); |
|
goto out; |
|
} |
|
|
|
if (flags & PR_WAITOK) |
|
mutex_enter(&pp->pr_lock); |
pool_prime_page(pp, cp, ph); |
pool_prime_page(pp, cp, ph); |
pp->pr_npagealloc++; |
pp->pr_npagealloc++; |
|
KASSERT(pp->pr_flags & PR_GROWING); |
|
pp->pr_flags &= ~(PR_GROWING|PR_GROWINGNOWAIT); |
|
/* |
|
* If anyone was waiting for pool_grow, notify them that we |
|
* may have just done it. |
|
*/ |
|
cv_broadcast(&pp->pr_cv); |
return 0; |
return 0; |
|
out: |
|
if (flags & PR_WAITOK) |
|
mutex_enter(&pp->pr_lock); |
|
KASSERT(pp->pr_flags & PR_GROWING); |
|
pp->pr_flags &= ~(PR_GROWING|PR_GROWINGNOWAIT); |
|
return ENOMEM; |
} |
} |
|
|
/* |
/* |
Line 1080 pool_prime(struct pool *pp, int n) |
|
Line 1132 pool_prime(struct pool *pp, int n) |
|
|
|
newpages = roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage; |
newpages = roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage; |
|
|
while (newpages-- > 0) { |
while (newpages > 0) { |
error = pool_grow(pp, PR_NOWAIT); |
error = pool_grow(pp, PR_NOWAIT); |
if (error) { |
if (error) { |
|
if (error == ERESTART) |
|
continue; |
break; |
break; |
} |
} |
pp->pr_minpages++; |
pp->pr_minpages++; |
|
newpages--; |
} |
} |
|
|
if (pp->pr_minpages >= pp->pr_maxpages) |
if (pp->pr_minpages >= pp->pr_maxpages) |
Line 1112 pool_prime_page(struct pool *pp, void *s |
|
Line 1167 pool_prime_page(struct pool *pp, void *s |
|
KASSERT(mutex_owned(&pp->pr_lock)); |
KASSERT(mutex_owned(&pp->pr_lock)); |
KASSERTMSG(((pp->pr_roflags & PR_NOALIGN) || |
KASSERTMSG(((pp->pr_roflags & PR_NOALIGN) || |
(((uintptr_t)cp & (pp->pr_alloc->pa_pagesz - 1)) == 0)), |
(((uintptr_t)cp & (pp->pr_alloc->pa_pagesz - 1)) == 0)), |
"pool_prime_page: %s: unaligned page: %p", pp->pr_wchan, cp); |
"%s: [%s] unaligned page: %p", __func__, pp->pr_wchan, cp); |
|
|
/* |
/* |
* Insert page header. |
* Insert page header. |
Line 1195 pool_catchup(struct pool *pp) |
|
Line 1250 pool_catchup(struct pool *pp) |
|
while (POOL_NEEDS_CATCHUP(pp)) { |
while (POOL_NEEDS_CATCHUP(pp)) { |
error = pool_grow(pp, PR_NOWAIT); |
error = pool_grow(pp, PR_NOWAIT); |
if (error) { |
if (error) { |
|
if (error == ERESTART) |
|
continue; |
break; |
break; |
} |
} |
} |
} |
Line 2134 pool_cache_get_slow(pool_cache_cpu_t *cc |
|
Line 2191 pool_cache_get_slow(pool_cache_cpu_t *cc |
|
|
|
object = pool_get(&pc->pc_pool, flags); |
object = pool_get(&pc->pc_pool, flags); |
*objectp = object; |
*objectp = object; |
if (__predict_false(object == NULL)) |
if (__predict_false(object == NULL)) { |
|
KASSERT((flags & (PR_WAITOK|PR_NOWAIT)) == PR_NOWAIT); |
return false; |
return false; |
|
} |
|
|
if (__predict_false((*pc->pc_ctor)(pc->pc_arg, object, flags) != 0)) { |
if (__predict_false((*pc->pc_ctor)(pc->pc_arg, object, flags) != 0)) { |
pool_put(&pc->pc_pool, object); |
pool_put(&pc->pc_pool, object); |
Line 2173 pool_cache_get_paddr(pool_cache_t pc, in |
|
Line 2232 pool_cache_get_paddr(pool_cache_t pc, in |
|
void *object; |
void *object; |
int s; |
int s; |
|
|
|
KASSERT(!(flags & PR_NOWAIT) != !(flags & PR_WAITOK)); |
KASSERTMSG((!cpu_intr_p() && !cpu_softintr_p()) || |
KASSERTMSG((!cpu_intr_p() && !cpu_softintr_p()) || |
(pc->pc_pool.pr_ipl != IPL_NONE || cold || panicstr != NULL), |
(pc->pc_pool.pr_ipl != IPL_NONE || cold || panicstr != NULL), |
"pool '%s' is IPL_NONE, but called from interrupt context\n", |
"%s: [%s] is IPL_NONE, but called from interrupt context", |
pc->pc_pool.pr_wchan); |
__func__, pc->pc_pool.pr_wchan); |
|
|
if (flags & PR_WAITOK) { |
if (flags & PR_WAITOK) { |
ASSERT_SLEEPABLE(); |
ASSERT_SLEEPABLE(); |
Line 2226 pool_cache_get_paddr(pool_cache_t pc, in |
|
Line 2286 pool_cache_get_paddr(pool_cache_t pc, in |
|
break; |
break; |
} |
} |
|
|
|
/* |
|
* We would like to KASSERT(object || (flags & PR_NOWAIT)), but |
|
* pool_cache_get can fail even in the PR_WAITOK case, if the |
|
* constructor fails. |
|
*/ |
return object; |
return object; |
} |
} |
|
|