version 1.63.2.1, 2001/11/12 21:18:52 |
version 1.68, 2002/03/08 21:41:59 |
Line 110 struct pool_item { |
|
Line 110 struct pool_item { |
|
}; |
}; |
|
|
#define PR_HASH_INDEX(pp,addr) \ |
#define PR_HASH_INDEX(pp,addr) \ |
(((u_long)(addr) >> (pp)->pr_pageshift) & (PR_HASHTABSIZE - 1)) |
(((u_long)(addr) >> (pp)->pr_alloc->pa_pageshift) & \ |
|
(PR_HASHTABSIZE - 1)) |
|
|
#define POOL_NEEDS_CATCHUP(pp) \ |
#define POOL_NEEDS_CATCHUP(pp) \ |
((pp)->pr_nitems < (pp)->pr_minitems) |
((pp)->pr_nitems < (pp)->pr_minitems) |
Line 159 static void pool_cache_reclaim(struct po |
|
Line 160 static void pool_cache_reclaim(struct po |
|
static int pool_catchup(struct pool *); |
static int pool_catchup(struct pool *); |
static void pool_prime_page(struct pool *, caddr_t, |
static void pool_prime_page(struct pool *, caddr_t, |
struct pool_item_header *); |
struct pool_item_header *); |
static void *pool_page_alloc(unsigned long, int, int); |
|
static void pool_page_free(void *, unsigned long, int); |
void *pool_allocator_alloc(struct pool *, int); |
#ifdef POOL_SUBPAGE |
void pool_allocator_free(struct pool *, void *); |
static void *pool_subpage_alloc(unsigned long, int, int); |
|
static void pool_subpage_free(void *, unsigned long, int); |
|
#endif |
|
|
|
static void pool_print1(struct pool *, const char *, |
static void pool_print1(struct pool *, const char *, |
void (*)(const char *, ...)); |
void (*)(const char *, ...)); |
Line 338 pr_rmpage(struct pool *pp, struct pool_i |
|
Line 336 pr_rmpage(struct pool *pp, struct pool_i |
|
if (pq) { |
if (pq) { |
TAILQ_INSERT_HEAD(pq, ph, ph_pagelist); |
TAILQ_INSERT_HEAD(pq, ph, ph_pagelist); |
} else { |
} else { |
(*pp->pr_free)(ph->ph_page, pp->pr_pagesz, pp->pr_mtype); |
pool_allocator_free(pp, ph->ph_page); |
if ((pp->pr_roflags & PR_PHINPAGE) == 0) { |
if ((pp->pr_roflags & PR_PHINPAGE) == 0) { |
LIST_REMOVE(ph, ph_hashlist); |
LIST_REMOVE(ph, ph_hashlist); |
s = splhigh(); |
s = splhigh(); |
Line 371 pr_rmpage(struct pool *pp, struct pool_i |
|
Line 369 pr_rmpage(struct pool *pp, struct pool_i |
|
*/ |
*/ |
void |
void |
pool_init(struct pool *pp, size_t size, u_int align, u_int ioff, int flags, |
pool_init(struct pool *pp, size_t size, u_int align, u_int ioff, int flags, |
const char *wchan, size_t pagesz, |
const char *wchan, struct pool_allocator *palloc) |
void *(*alloc)(unsigned long, int, int), |
|
void (*release)(void *, unsigned long, int), |
|
int mtype) |
|
{ |
{ |
int off, slack, i; |
int off, slack, i; |
|
|
Line 386 pool_init(struct pool *pp, size_t size, |
|
Line 381 pool_init(struct pool *pp, size_t size, |
|
flags |= PR_LOGGING; |
flags |= PR_LOGGING; |
#endif |
#endif |
|
|
|
#ifdef POOL_SUBPAGE |
/* |
/* |
* Check arguments and construct default values. |
* XXX We don't provide a real `nointr' back-end |
|
* yet; all sub-pages come from a kmem back-end. |
|
* maybe some day... |
*/ |
*/ |
if (!powerof2(pagesz)) |
if (palloc == NULL) { |
panic("pool_init: page size invalid (%lx)\n", (u_long)pagesz); |
extern struct pool_allocator pool_allocator_kmem_subpage; |
|
palloc = &pool_allocator_kmem_subpage; |
if (alloc == NULL && release == NULL) { |
} |
|
/* |
|
* We'll assume any user-specified back-end allocator |
|
* will deal with sub-pages, or simply don't care. |
|
*/ |
|
#else |
|
if (palloc == NULL) |
|
palloc = &pool_allocator_kmem; |
|
#endif /* POOL_SUBPAGE */ |
|
if ((palloc->pa_flags & PA_INITIALIZED) == 0) { |
|
if (palloc->pa_pagesz == 0) { |
#ifdef POOL_SUBPAGE |
#ifdef POOL_SUBPAGE |
alloc = pool_subpage_alloc; |
if (palloc == &pool_allocator_kmem) |
release = pool_subpage_free; |
palloc->pa_pagesz = PAGE_SIZE; |
pagesz = POOL_SUBPAGE; |
else |
|
palloc->pa_pagesz = POOL_SUBPAGE; |
#else |
#else |
alloc = pool_page_alloc; |
palloc->pa_pagesz = PAGE_SIZE; |
release = pool_page_free; |
#endif /* POOL_SUBPAGE */ |
pagesz = PAGE_SIZE; /* Rounds to PAGE_SIZE anyhow. */ |
} |
#endif |
|
} else if ((alloc != NULL && release != NULL) == 0) { |
TAILQ_INIT(&palloc->pa_list); |
/* If you specifiy one, must specify both. */ |
|
panic("pool_init: must specify alloc and release together"); |
simple_lock_init(&palloc->pa_slock); |
|
palloc->pa_pagemask = ~(palloc->pa_pagesz - 1); |
|
palloc->pa_pageshift = ffs(palloc->pa_pagesz) - 1; |
|
palloc->pa_flags |= PA_INITIALIZED; |
} |
} |
#ifdef POOL_SUBPAGE |
|
else if (alloc == pool_page_alloc_nointr && |
|
release == pool_page_free_nointr) |
|
pagesz = POOL_SUBPAGE; |
|
#endif |
|
|
|
if (pagesz == 0) |
|
pagesz = PAGE_SIZE; |
|
|
|
if (align == 0) |
if (align == 0) |
align = ALIGN(1); |
align = ALIGN(1); |
Line 422 pool_init(struct pool *pp, size_t size, |
|
Line 426 pool_init(struct pool *pp, size_t size, |
|
size = sizeof(struct pool_item); |
size = sizeof(struct pool_item); |
|
|
size = ALIGN(size); |
size = ALIGN(size); |
if (size > pagesz) |
#ifdef DIAGNOSTIC |
|
if (size > palloc->pa_pagesz) |
panic("pool_init: pool item size (%lu) too large", |
panic("pool_init: pool item size (%lu) too large", |
(u_long)size); |
(u_long)size); |
|
#endif |
|
|
/* |
/* |
* Initialize the pool structure. |
* Initialize the pool structure. |
Line 441 pool_init(struct pool *pp, size_t size, |
|
Line 447 pool_init(struct pool *pp, size_t size, |
|
pp->pr_size = size; |
pp->pr_size = size; |
pp->pr_align = align; |
pp->pr_align = align; |
pp->pr_wchan = wchan; |
pp->pr_wchan = wchan; |
pp->pr_mtype = mtype; |
pp->pr_alloc = palloc; |
pp->pr_alloc = alloc; |
|
pp->pr_free = release; |
|
pp->pr_pagesz = pagesz; |
|
pp->pr_pagemask = ~(pagesz - 1); |
|
pp->pr_pageshift = ffs(pagesz) - 1; |
|
pp->pr_nitems = 0; |
pp->pr_nitems = 0; |
pp->pr_nout = 0; |
pp->pr_nout = 0; |
pp->pr_hardlimit = UINT_MAX; |
pp->pr_hardlimit = UINT_MAX; |
Line 455 pool_init(struct pool *pp, size_t size, |
|
Line 456 pool_init(struct pool *pp, size_t size, |
|
pp->pr_hardlimit_ratecap.tv_usec = 0; |
pp->pr_hardlimit_ratecap.tv_usec = 0; |
pp->pr_hardlimit_warning_last.tv_sec = 0; |
pp->pr_hardlimit_warning_last.tv_sec = 0; |
pp->pr_hardlimit_warning_last.tv_usec = 0; |
pp->pr_hardlimit_warning_last.tv_usec = 0; |
|
pp->pr_drain_hook = NULL; |
|
pp->pr_drain_hook_arg = NULL; |
|
|
/* |
/* |
* Decide whether to put the page header off page to avoid |
* Decide whether to put the page header off page to avoid |
Line 463 pool_init(struct pool *pp, size_t size, |
|
Line 466 pool_init(struct pool *pp, size_t size, |
|
* with its header based on the page address. |
* with its header based on the page address. |
* We use 1/16 of the page size as the threshold (XXX: tune) |
* We use 1/16 of the page size as the threshold (XXX: tune) |
*/ |
*/ |
if (pp->pr_size < pagesz/16) { |
if (pp->pr_size < palloc->pa_pagesz/16) { |
/* Use the end of the page for the page header */ |
/* Use the end of the page for the page header */ |
pp->pr_roflags |= PR_PHINPAGE; |
pp->pr_roflags |= PR_PHINPAGE; |
pp->pr_phoffset = off = |
pp->pr_phoffset = off = palloc->pa_pagesz - |
pagesz - ALIGN(sizeof(struct pool_item_header)); |
ALIGN(sizeof(struct pool_item_header)); |
} else { |
} else { |
/* The page header will be taken from our page header pool */ |
/* The page header will be taken from our page header pool */ |
pp->pr_phoffset = 0; |
pp->pr_phoffset = 0; |
off = pagesz; |
off = palloc->pa_pagesz; |
for (i = 0; i < PR_HASHTABSIZE; i++) { |
for (i = 0; i < PR_HASHTABSIZE; i++) { |
LIST_INIT(&pp->pr_hashtab[i]); |
LIST_INIT(&pp->pr_hashtab[i]); |
} |
} |
Line 528 pool_init(struct pool *pp, size_t size, |
|
Line 531 pool_init(struct pool *pp, size_t size, |
|
if (phpool.pr_size == 0) { |
if (phpool.pr_size == 0) { |
#ifdef POOL_SUBPAGE |
#ifdef POOL_SUBPAGE |
pool_init(&phpool, sizeof(struct pool_item_header), 0, 0, 0, |
pool_init(&phpool, sizeof(struct pool_item_header), 0, 0, 0, |
"phpool", PAGE_SIZE, pool_page_alloc, pool_page_free, 0); |
"phpool", &pool_allocator_kmem); |
pool_init(&psppool, POOL_SUBPAGE, POOL_SUBPAGE, 0, |
pool_init(&psppool, POOL_SUBPAGE, POOL_SUBPAGE, 0, |
PR_RECURSIVE, "psppool", PAGE_SIZE, |
PR_RECURSIVE, "psppool", &pool_allocator_kmem); |
pool_page_alloc, pool_page_free, 0); |
|
#else |
#else |
pool_init(&phpool, sizeof(struct pool_item_header), 0, 0, |
pool_init(&phpool, sizeof(struct pool_item_header), 0, 0, |
0, "phpool", 0, 0, 0, 0); |
0, "phpool", NULL); |
#endif |
#endif |
pool_init(&pcgpool, sizeof(struct pool_cache_group), 0, 0, |
pool_init(&pcgpool, sizeof(struct pool_cache_group), 0, 0, |
0, "pcgpool", 0, 0, 0, 0); |
0, "pcgpool", NULL); |
} |
} |
|
|
/* Insert into the list of all pools. */ |
/* Insert into the list of all pools. */ |
simple_lock(&pool_head_slock); |
simple_lock(&pool_head_slock); |
TAILQ_INSERT_TAIL(&pool_head, pp, pr_poollist); |
TAILQ_INSERT_TAIL(&pool_head, pp, pr_poollist); |
simple_unlock(&pool_head_slock); |
simple_unlock(&pool_head_slock); |
|
|
|
/* Insert this into the list of pools using this allocator. */ |
|
simple_lock(&palloc->pa_slock); |
|
TAILQ_INSERT_TAIL(&palloc->pa_list, pp, pr_alloc_list); |
|
simple_unlock(&palloc->pa_slock); |
} |
} |
|
|
/* |
/* |
Line 555 pool_destroy(struct pool *pp) |
|
Line 562 pool_destroy(struct pool *pp) |
|
struct pool_item_header *ph; |
struct pool_item_header *ph; |
struct pool_cache *pc; |
struct pool_cache *pc; |
|
|
|
/* Locking order: pool_allocator -> pool */ |
|
simple_lock(&pp->pr_alloc->pa_slock); |
|
TAILQ_REMOVE(&pp->pr_alloc->pa_list, pp, pr_alloc_list); |
|
simple_unlock(&pp->pr_alloc->pa_slock); |
|
|
/* Destroy all caches for this pool. */ |
/* Destroy all caches for this pool. */ |
while ((pc = TAILQ_FIRST(&pp->pr_cachelist)) != NULL) |
while ((pc = TAILQ_FIRST(&pp->pr_cachelist)) != NULL) |
pool_cache_destroy(pc); |
pool_cache_destroy(pc); |
Line 584 pool_destroy(struct pool *pp) |
|
Line 596 pool_destroy(struct pool *pp) |
|
if ((pp->pr_roflags & PR_LOGGING) != 0) |
if ((pp->pr_roflags & PR_LOGGING) != 0) |
free(pp->pr_log, M_TEMP); |
free(pp->pr_log, M_TEMP); |
#endif |
#endif |
|
} |
|
|
|
void |
|
pool_set_drain_hook(struct pool *pp, void (*fn)(void *, int), void *arg) |
|
{ |
|
|
if (pp->pr_roflags & PR_FREEHEADER) |
/* XXX no locking -- must be used just after pool_init() */ |
free(pp, M_POOL); |
#ifdef DIAGNOSTIC |
|
if (pp->pr_drain_hook != NULL) |
|
panic("pool_set_drain_hook(%s): already set", pp->pr_wchan); |
|
#endif |
|
pp->pr_drain_hook = fn; |
|
pp->pr_drain_hook_arg = arg; |
} |
} |
|
|
static __inline struct pool_item_header * |
static __inline struct pool_item_header * |
Line 656 pool_get(struct pool *pp, int flags) |
|
Line 678 pool_get(struct pool *pp, int flags) |
|
} |
} |
#endif |
#endif |
if (__predict_false(pp->pr_nout == pp->pr_hardlimit)) { |
if (__predict_false(pp->pr_nout == pp->pr_hardlimit)) { |
|
if (pp->pr_drain_hook != NULL) { |
|
/* |
|
* Since the drain hook is going to free things |
|
* back to the pool, unlock, call the hook, re-lock, |
|
* and check the hardlimit condition again. |
|
*/ |
|
pr_leave(pp); |
|
simple_unlock(&pp->pr_slock); |
|
(*pp->pr_drain_hook)(pp->pr_drain_hook_arg, flags); |
|
simple_lock(&pp->pr_slock); |
|
pr_enter(pp, file, line); |
|
if (pp->pr_nout < pp->pr_hardlimit) |
|
goto startover; |
|
} |
|
|
if ((flags & PR_WAITOK) && !(flags & PR_LIMITFAIL)) { |
if ((flags & PR_WAITOK) && !(flags & PR_LIMITFAIL)) { |
/* |
/* |
* XXX: A warning isn't logged in this case. Should |
* XXX: A warning isn't logged in this case. Should |
Line 676 pool_get(struct pool *pp, int flags) |
|
Line 713 pool_get(struct pool *pp, int flags) |
|
&pp->pr_hardlimit_ratecap)) |
&pp->pr_hardlimit_ratecap)) |
log(LOG_ERR, "%s\n", pp->pr_hardlimit_warning); |
log(LOG_ERR, "%s\n", pp->pr_hardlimit_warning); |
|
|
if (flags & PR_URGENT) |
|
panic("pool_get: urgent"); |
|
|
|
pp->pr_nfail++; |
pp->pr_nfail++; |
|
|
pr_leave(pp); |
pr_leave(pp); |
Line 709 pool_get(struct pool *pp, int flags) |
|
Line 743 pool_get(struct pool *pp, int flags) |
|
*/ |
*/ |
pr_leave(pp); |
pr_leave(pp); |
simple_unlock(&pp->pr_slock); |
simple_unlock(&pp->pr_slock); |
v = (*pp->pr_alloc)(pp->pr_pagesz, flags, pp->pr_mtype); |
v = pool_allocator_alloc(pp, flags); |
if (__predict_true(v != NULL)) |
if (__predict_true(v != NULL)) |
ph = pool_alloc_item_header(pp, v, flags); |
ph = pool_alloc_item_header(pp, v, flags); |
simple_lock(&pp->pr_slock); |
simple_lock(&pp->pr_slock); |
Line 717 pool_get(struct pool *pp, int flags) |
|
Line 751 pool_get(struct pool *pp, int flags) |
|
|
|
if (__predict_false(v == NULL || ph == NULL)) { |
if (__predict_false(v == NULL || ph == NULL)) { |
if (v != NULL) |
if (v != NULL) |
(*pp->pr_free)(v, pp->pr_pagesz, pp->pr_mtype); |
pool_allocator_free(pp, v); |
|
|
/* |
/* |
* We were unable to allocate a page or item |
* We were unable to allocate a page or item |
Line 728 pool_get(struct pool *pp, int flags) |
|
Line 762 pool_get(struct pool *pp, int flags) |
|
if (pp->pr_curpage != NULL) |
if (pp->pr_curpage != NULL) |
goto startover; |
goto startover; |
|
|
if (flags & PR_URGENT) |
|
panic("pool_get: urgent"); |
|
|
|
if ((flags & PR_WAITOK) == 0) { |
if ((flags & PR_WAITOK) == 0) { |
pp->pr_nfail++; |
pp->pr_nfail++; |
pr_leave(pp); |
pr_leave(pp); |
Line 741 pool_get(struct pool *pp, int flags) |
|
Line 772 pool_get(struct pool *pp, int flags) |
|
/* |
/* |
* Wait for items to be returned to this pool. |
* Wait for items to be returned to this pool. |
* |
* |
* XXX: we actually want to wait just until |
|
* the page allocator has memory again. Depending |
|
* on this pool's usage, we might get stuck here |
|
* for a long time. |
|
* |
|
* XXX: maybe we should wake up once a second and |
* XXX: maybe we should wake up once a second and |
* try again? |
* try again? |
*/ |
*/ |
pp->pr_flags |= PR_WANTED; |
pp->pr_flags |= PR_WANTED; |
|
/* PA_WANTED is already set on the allocator. */ |
pr_leave(pp); |
pr_leave(pp); |
ltsleep(pp, PSWP, pp->pr_wchan, 0, &pp->pr_slock); |
ltsleep(pp, PSWP, pp->pr_wchan, 0, &pp->pr_slock); |
pr_enter(pp, file, line); |
pr_enter(pp, file, line); |
Line 777 pool_get(struct pool *pp, int flags) |
|
Line 804 pool_get(struct pool *pp, int flags) |
|
pp->pr_wchan, pp->pr_nitems); |
pp->pr_wchan, pp->pr_nitems); |
panic("pool_get: nitems inconsistent\n"); |
panic("pool_get: nitems inconsistent\n"); |
} |
} |
|
#endif |
|
|
|
#ifdef POOL_DIAGNOSTIC |
pr_log(pp, v, PRLOG_GET, file, line); |
pr_log(pp, v, PRLOG_GET, file, line); |
|
#endif |
|
|
|
#ifdef DIAGNOSTIC |
if (__predict_false(pi->pi_magic != PI_MAGIC)) { |
if (__predict_false(pi->pi_magic != PI_MAGIC)) { |
pr_printlog(pp, pi, printf); |
pr_printlog(pp, pi, printf); |
panic("pool_get(%s): free list modified: magic=%x; page %p;" |
panic("pool_get(%s): free list modified: magic=%x; page %p;" |
Line 863 pool_do_put(struct pool *pp, void *v) |
|
Line 894 pool_do_put(struct pool *pp, void *v) |
|
|
|
LOCK_ASSERT(simple_lock_held(&pp->pr_slock)); |
LOCK_ASSERT(simple_lock_held(&pp->pr_slock)); |
|
|
page = (caddr_t)((u_long)v & pp->pr_pagemask); |
page = (caddr_t)((u_long)v & pp->pr_alloc->pa_pagemask); |
|
|
#ifdef DIAGNOSTIC |
#ifdef DIAGNOSTIC |
if (__predict_false(pp->pr_nout == 0)) { |
if (__predict_false(pp->pr_nout == 0)) { |
Line 1031 pool_prime(struct pool *pp, int n) |
|
Line 1062 pool_prime(struct pool *pp, int n) |
|
|
|
while (newpages-- > 0) { |
while (newpages-- > 0) { |
simple_unlock(&pp->pr_slock); |
simple_unlock(&pp->pr_slock); |
cp = (*pp->pr_alloc)(pp->pr_pagesz, PR_NOWAIT, pp->pr_mtype); |
cp = pool_allocator_alloc(pp, PR_NOWAIT); |
if (__predict_true(cp != NULL)) |
if (__predict_true(cp != NULL)) |
ph = pool_alloc_item_header(pp, cp, PR_NOWAIT); |
ph = pool_alloc_item_header(pp, cp, PR_NOWAIT); |
simple_lock(&pp->pr_slock); |
simple_lock(&pp->pr_slock); |
Line 1039 pool_prime(struct pool *pp, int n) |
|
Line 1070 pool_prime(struct pool *pp, int n) |
|
if (__predict_false(cp == NULL || ph == NULL)) { |
if (__predict_false(cp == NULL || ph == NULL)) { |
error = ENOMEM; |
error = ENOMEM; |
if (cp != NULL) |
if (cp != NULL) |
(*pp->pr_free)(cp, pp->pr_pagesz, pp->pr_mtype); |
pool_allocator_free(pp, cp); |
break; |
break; |
} |
} |
|
|
Line 1069 pool_prime_page(struct pool *pp, caddr_t |
|
Line 1100 pool_prime_page(struct pool *pp, caddr_t |
|
unsigned int ioff = pp->pr_itemoffset; |
unsigned int ioff = pp->pr_itemoffset; |
int n; |
int n; |
|
|
if (((u_long)cp & (pp->pr_pagesz - 1)) != 0) |
#ifdef DIAGNOSTIC |
|
if (((u_long)cp & (pp->pr_alloc->pa_pagesz - 1)) != 0) |
panic("pool_prime_page: %s: unaligned page", pp->pr_wchan); |
panic("pool_prime_page: %s: unaligned page", pp->pr_wchan); |
|
#endif |
|
|
if ((pp->pr_roflags & PR_PHINPAGE) == 0) |
if ((pp->pr_roflags & PR_PHINPAGE) == 0) |
LIST_INSERT_HEAD(&pp->pr_hashtab[PR_HASH_INDEX(pp, cp)], |
LIST_INSERT_HEAD(&pp->pr_hashtab[PR_HASH_INDEX(pp, cp)], |
Line 1165 pool_catchup(struct pool *pp) |
|
Line 1198 pool_catchup(struct pool *pp) |
|
* the pool descriptor? |
* the pool descriptor? |
*/ |
*/ |
simple_unlock(&pp->pr_slock); |
simple_unlock(&pp->pr_slock); |
cp = (*pp->pr_alloc)(pp->pr_pagesz, PR_NOWAIT, pp->pr_mtype); |
cp = pool_allocator_alloc(pp, PR_NOWAIT); |
if (__predict_true(cp != NULL)) |
if (__predict_true(cp != NULL)) |
ph = pool_alloc_item_header(pp, cp, PR_NOWAIT); |
ph = pool_alloc_item_header(pp, cp, PR_NOWAIT); |
simple_lock(&pp->pr_slock); |
simple_lock(&pp->pr_slock); |
if (__predict_false(cp == NULL || ph == NULL)) { |
if (__predict_false(cp == NULL || ph == NULL)) { |
if (cp != NULL) |
if (cp != NULL) |
(*pp->pr_free)(cp, pp->pr_pagesz, pp->pr_mtype); |
pool_allocator_free(pp, cp); |
error = ENOMEM; |
error = ENOMEM; |
break; |
break; |
} |
} |
Line 1243 pool_sethardlimit(struct pool *pp, int n |
|
Line 1276 pool_sethardlimit(struct pool *pp, int n |
|
} |
} |
|
|
/* |
/* |
* Default page allocator. |
|
*/ |
|
static void * |
|
pool_page_alloc(unsigned long sz, int flags, int mtype) |
|
{ |
|
boolean_t waitok = (flags & PR_WAITOK) ? TRUE : FALSE; |
|
|
|
return ((void *)uvm_km_alloc_poolpage(waitok)); |
|
} |
|
|
|
static void |
|
pool_page_free(void *v, unsigned long sz, int mtype) |
|
{ |
|
|
|
uvm_km_free_poolpage((vaddr_t)v); |
|
} |
|
|
|
#ifdef POOL_SUBPAGE |
|
/* |
|
* Sub-page allocator, for machines with large hardware pages. |
|
*/ |
|
static void * |
|
pool_subpage_alloc(unsigned long sz, int flags, int mtype) |
|
{ |
|
|
|
return pool_get(&psppool, flags); |
|
} |
|
|
|
static void |
|
pool_subpage_free(void *v, unsigned long sz, int mtype) |
|
{ |
|
|
|
pool_put(&psppool, v); |
|
} |
|
#endif |
|
|
|
#ifdef POOL_SUBPAGE |
|
/* We don't provide a real nointr allocator. Maybe later. */ |
|
void * |
|
pool_page_alloc_nointr(unsigned long sz, int flags, int mtype) |
|
{ |
|
|
|
return pool_subpage_alloc(sz, flags, mtype); |
|
} |
|
|
|
void |
|
pool_page_free_nointr(void *v, unsigned long sz, int mtype) |
|
{ |
|
|
|
pool_subpage_free(v, sz, mtype); |
|
} |
|
#else |
|
/* |
|
* Alternate pool page allocator for pools that know they will |
|
* never be accessed in interrupt context. |
|
*/ |
|
void * |
|
pool_page_alloc_nointr(unsigned long sz, int flags, int mtype) |
|
{ |
|
boolean_t waitok = (flags & PR_WAITOK) ? TRUE : FALSE; |
|
|
|
return ((void *)uvm_km_alloc_poolpage1(kernel_map, uvm.kernel_object, |
|
waitok)); |
|
} |
|
|
|
void |
|
pool_page_free_nointr(void *v, unsigned long sz, int mtype) |
|
{ |
|
|
|
uvm_km_free_poolpage1(kernel_map, (vaddr_t)v); |
|
} |
|
#endif |
|
|
|
|
|
/* |
|
* Release all complete pages that have not been used recently. |
* Release all complete pages that have not been used recently. |
*/ |
*/ |
void |
int |
#ifdef POOL_DIAGNOSTIC |
#ifdef POOL_DIAGNOSTIC |
_pool_reclaim(struct pool *pp, const char *file, long line) |
_pool_reclaim(struct pool *pp, const char *file, long line) |
#else |
#else |
Line 1334 pool_reclaim(struct pool *pp) |
|
Line 1292 pool_reclaim(struct pool *pp) |
|
int s; |
int s; |
|
|
if (pp->pr_roflags & PR_STATIC) |
if (pp->pr_roflags & PR_STATIC) |
return; |
return (0); |
|
|
|
if (pp->pr_drain_hook != NULL) { |
|
/* |
|
* The drain hook must be called with the pool unlocked. |
|
*/ |
|
(*pp->pr_drain_hook)(pp->pr_drain_hook_arg, PR_NOWAIT); |
|
} |
|
|
if (simple_lock_try(&pp->pr_slock) == 0) |
if (simple_lock_try(&pp->pr_slock) == 0) |
return; |
return (0); |
pr_enter(pp, file, line); |
pr_enter(pp, file, line); |
|
|
TAILQ_INIT(&pq); |
TAILQ_INIT(&pq); |
|
|
/* |
/* |
Line 1378 pool_reclaim(struct pool *pp) |
|
Line 1344 pool_reclaim(struct pool *pp) |
|
|
|
pr_leave(pp); |
pr_leave(pp); |
simple_unlock(&pp->pr_slock); |
simple_unlock(&pp->pr_slock); |
if (TAILQ_EMPTY(&pq)) { |
if (TAILQ_EMPTY(&pq)) |
return; |
return (0); |
} |
|
while ((ph = TAILQ_FIRST(&pq)) != NULL) { |
while ((ph = TAILQ_FIRST(&pq)) != NULL) { |
TAILQ_REMOVE(&pq, ph, ph_pagelist); |
TAILQ_REMOVE(&pq, ph, ph_pagelist); |
(*pp->pr_free)(ph->ph_page, pp->pr_pagesz, pp->pr_mtype); |
pool_allocator_free(pp, ph->ph_page); |
if (pp->pr_roflags & PR_PHINPAGE) { |
if (pp->pr_roflags & PR_PHINPAGE) { |
continue; |
continue; |
} |
} |
Line 1392 pool_reclaim(struct pool *pp) |
|
Line 1358 pool_reclaim(struct pool *pp) |
|
pool_put(&phpool, ph); |
pool_put(&phpool, ph); |
splx(s); |
splx(s); |
} |
} |
} |
|
|
|
|
return (1); |
|
} |
|
|
/* |
/* |
* Drain pools, one at a time. |
* Drain pools, one at a time. |
Line 1421 pool_drain(void *arg) |
|
Line 1388 pool_drain(void *arg) |
|
splx(s); |
splx(s); |
} |
} |
|
|
|
|
/* |
/* |
* Diagnostic helpers. |
* Diagnostic helpers. |
*/ |
*/ |
Line 1497 pool_print1(struct pool *pp, const char |
|
Line 1463 pool_print1(struct pool *pp, const char |
|
(*pr)("POOL %s: size %u, align %u, ioff %u, roflags 0x%08x\n", |
(*pr)("POOL %s: size %u, align %u, ioff %u, roflags 0x%08x\n", |
pp->pr_wchan, pp->pr_size, pp->pr_align, pp->pr_itemoffset, |
pp->pr_wchan, pp->pr_size, pp->pr_align, pp->pr_itemoffset, |
pp->pr_roflags); |
pp->pr_roflags); |
(*pr)("\tpagesz %u, mtype %d\n", pp->pr_pagesz, pp->pr_mtype); |
(*pr)("\talloc %p\n", pp->pr_alloc); |
(*pr)("\talloc %p, release %p\n", pp->pr_alloc, pp->pr_free); |
|
(*pr)("\tminitems %u, minpages %u, maxpages %u, npages %u\n", |
(*pr)("\tminitems %u, minpages %u, maxpages %u, npages %u\n", |
pp->pr_minitems, pp->pr_minpages, pp->pr_maxpages, pp->pr_npages); |
pp->pr_minitems, pp->pr_minpages, pp->pr_maxpages, pp->pr_npages); |
(*pr)("\titemsperpage %u, nitems %u, nout %u, hardlimit %u\n", |
(*pr)("\titemsperpage %u, nitems %u, nout %u, hardlimit %u\n", |
Line 1579 pool_chk(struct pool *pp, const char *la |
|
Line 1544 pool_chk(struct pool *pp, const char *la |
|
int n; |
int n; |
caddr_t page; |
caddr_t page; |
|
|
page = (caddr_t)((u_long)ph & pp->pr_pagemask); |
page = (caddr_t)((u_long)ph & pp->pr_alloc->pa_pagemask); |
if (page != ph->ph_page && |
if (page != ph->ph_page && |
(pp->pr_roflags & PR_PHINPAGE) != 0) { |
(pp->pr_roflags & PR_PHINPAGE) != 0) { |
if (label != NULL) |
if (label != NULL) |
Line 1608 pool_chk(struct pool *pp, const char *la |
|
Line 1573 pool_chk(struct pool *pp, const char *la |
|
panic("pool"); |
panic("pool"); |
} |
} |
#endif |
#endif |
page = (caddr_t)((u_long)pi & pp->pr_pagemask); |
page = |
|
(caddr_t)((u_long)pi & pp->pr_alloc->pa_pagemask); |
if (page == ph->ph_page) |
if (page == ph->ph_page) |
continue; |
continue; |
|
|
Line 1908 pool_cache_reclaim(struct pool_cache *pc |
|
Line 1874 pool_cache_reclaim(struct pool_cache *pc |
|
pool_cache_do_invalidate(pc, 1, pool_do_put); |
pool_cache_do_invalidate(pc, 1, pool_do_put); |
simple_unlock(&pc->pc_slock); |
simple_unlock(&pc->pc_slock); |
} |
} |
|
|
|
/* |
|
* Pool backend allocators. |
|
* |
|
* Each pool has a backend allocator that handles allocation, deallocation, |
|
* and any additional draining that might be needed. |
|
* |
|
* We provide two standard allocators: |
|
* |
|
* pool_allocator_kmem - the default when no allocator is specified |
|
* |
|
* pool_allocator_nointr - used for pools that will not be accessed |
|
* in interrupt context. |
|
*/ |
|
void *pool_page_alloc(struct pool *, int); |
|
void pool_page_free(struct pool *, void *); |
|
|
|
struct pool_allocator pool_allocator_kmem = { |
|
pool_page_alloc, pool_page_free, 0, |
|
}; |
|
|
|
void *pool_page_alloc_nointr(struct pool *, int); |
|
void pool_page_free_nointr(struct pool *, void *); |
|
|
|
struct pool_allocator pool_allocator_nointr = { |
|
pool_page_alloc_nointr, pool_page_free_nointr, 0, |
|
}; |
|
|
|
#ifdef POOL_SUBPAGE |
|
void *pool_subpage_alloc(struct pool *, int); |
|
void pool_subpage_free(struct pool *, void *); |
|
|
|
struct pool_allocator pool_allocator_kmem_subpage = { |
|
pool_subpage_alloc, pool_subpage_free, 0, |
|
}; |
|
#endif /* POOL_SUBPAGE */ |
|
|
|
/* |
|
* We have at least three different resources for the same allocation and |
|
* each resource can be depleted. First, we have the ready elements in the |
|
* pool. Then we have the resource (typically a vm_map) for this allocator. |
|
* Finally, we have physical memory. Waiting for any of these can be |
|
* unnecessary when any other is freed, but the kernel doesn't support |
|
* sleeping on multiple wait channels, so we have to employ another strategy. |
|
* |
|
* The caller sleeps on the pool (so that it can be awakened when an item |
|
* is returned to the pool), but we set PA_WANT on the allocator. When a |
|
* page is returned to the allocator and PA_WANT is set, pool_allocator_free |
|
* will wake up all sleeping pools belonging to this allocator. |
|
* |
|
* XXX Thundering herd. |
|
*/ |
|
void * |
|
pool_allocator_alloc(struct pool *org, int flags) |
|
{ |
|
struct pool_allocator *pa = org->pr_alloc; |
|
struct pool *pp, *start; |
|
int s, freed; |
|
void *res; |
|
|
|
do { |
|
if ((res = (*pa->pa_alloc)(org, flags)) != NULL) |
|
return (res); |
|
if ((flags & PR_WAITOK) == 0) { |
|
/* |
|
* We only run the drain hookhere if PR_NOWAIT. |
|
* In other cases, the hook will be run in |
|
* pool_reclaim(). |
|
*/ |
|
if (org->pr_drain_hook != NULL) { |
|
(*org->pr_drain_hook)(org->pr_drain_hook_arg, |
|
flags); |
|
if ((res = (*pa->pa_alloc)(org, flags)) != NULL) |
|
return (res); |
|
} |
|
break; |
|
} |
|
|
|
/* |
|
* Drain all pools, except "org", that use this |
|
* allocator. We do this to reclaim VA space. |
|
* pa_alloc is responsible for waiting for |
|
* physical memory. |
|
* |
|
* XXX We risk looping forever if start if someone |
|
* calls pool_destroy on "start". But there is no |
|
* other way to have potentially sleeping pool_reclaim, |
|
* non-sleeping locks on pool_allocator, and some |
|
* stirring of drained pools in the allocator. |
|
* |
|
* XXX Maybe we should use pool_head_slock for locking |
|
* the allocators? |
|
*/ |
|
freed = 0; |
|
|
|
s = splvm(); |
|
simple_lock(&pa->pa_slock); |
|
pp = start = TAILQ_FIRST(&pa->pa_list); |
|
do { |
|
TAILQ_REMOVE(&pa->pa_list, pp, pr_alloc_list); |
|
TAILQ_INSERT_TAIL(&pa->pa_list, pp, pr_alloc_list); |
|
if (pp == org) |
|
continue; |
|
simple_unlock(&pa->pa_list); |
|
freed = pool_reclaim(pp); |
|
simple_lock(&pa->pa_list); |
|
} while ((pp = TAILQ_FIRST(&pa->pa_list)) != start && |
|
freed == 0); |
|
|
|
if (freed == 0) { |
|
/* |
|
* We set PA_WANT here, the caller will most likely |
|
* sleep waiting for pages (if not, this won't hurt |
|
* that much), and there is no way to set this in |
|
* the caller without violating locking order. |
|
*/ |
|
pa->pa_flags |= PA_WANT; |
|
} |
|
simple_unlock(&pa->pa_slock); |
|
splx(s); |
|
} while (freed); |
|
return (NULL); |
|
} |
|
|
|
void |
|
pool_allocator_free(struct pool *pp, void *v) |
|
{ |
|
struct pool_allocator *pa = pp->pr_alloc; |
|
int s; |
|
|
|
(*pa->pa_free)(pp, v); |
|
|
|
s = splvm(); |
|
simple_lock(&pa->pa_slock); |
|
if ((pa->pa_flags & PA_WANT) == 0) { |
|
simple_unlock(&pa->pa_slock); |
|
splx(s); |
|
return; |
|
} |
|
|
|
TAILQ_FOREACH(pp, &pa->pa_list, pr_alloc_list) { |
|
simple_lock(&pp->pr_slock); |
|
if ((pp->pr_flags & PR_WANTED) != 0) { |
|
pp->pr_flags &= ~PR_WANTED; |
|
wakeup(pp); |
|
} |
|
} |
|
pa->pa_flags &= ~PA_WANT; |
|
simple_unlock(&pa->pa_slock); |
|
splx(s); |
|
} |
|
|
|
void * |
|
pool_page_alloc(struct pool *pp, int flags) |
|
{ |
|
boolean_t waitok = (flags & PR_WAITOK) ? TRUE : FALSE; |
|
|
|
return ((void *) uvm_km_alloc_poolpage(waitok)); |
|
} |
|
|
|
void |
|
pool_page_free(struct pool *pp, void *v) |
|
{ |
|
|
|
uvm_km_free_poolpage((vaddr_t) v); |
|
} |
|
|
|
#ifdef POOL_SUBPAGE |
|
/* Sub-page allocator, for machines with large hardware pages. */ |
|
void * |
|
pool_subpage_alloc(struct pool *pp, int flags) |
|
{ |
|
|
|
return (pool_get(&psppool, flags)); |
|
} |
|
|
|
void |
|
pool_subpage_free(struct pool *pp, void *v) |
|
{ |
|
|
|
pool_put(&psppool, v); |
|
} |
|
|
|
/* We don't provide a real nointr allocator. Maybe later. */ |
|
void * |
|
pool_page_alloc_nointr(struct pool *pp, int flags) |
|
{ |
|
|
|
return (pool_subpage_alloc(pp, flags)); |
|
} |
|
|
|
void |
|
pool_page_free_nointr(struct pool *pp, void *v) |
|
{ |
|
|
|
pool_subpage_free(pp, v); |
|
} |
|
#else |
|
void * |
|
pool_page_alloc_nointr(struct pool *pp, int flags) |
|
{ |
|
boolean_t waitok = (flags & PR_WAITOK) ? TRUE : FALSE; |
|
|
|
return ((void *) uvm_km_alloc_poolpage1(kernel_map, |
|
uvm.kernel_object, waitok)); |
|
} |
|
|
|
void |
|
pool_page_free_nointr(struct pool *pp, void *v) |
|
{ |
|
|
|
uvm_km_free_poolpage1(kernel_map, (vaddr_t) v); |
|
} |
|
#endif /* POOL_SUBPAGE */ |