version 1.106, 2005/10/16 02:55:18 |
version 1.117, 2006/05/25 14:27:28 |
Line 82 static struct pool phpool[PHPOOL_MAX]; |
|
Line 82 static struct pool phpool[PHPOOL_MAX]; |
|
static struct pool psppool; |
static struct pool psppool; |
#endif |
#endif |
|
|
|
static SLIST_HEAD(, pool_allocator) pa_deferinitq = |
|
SLIST_HEAD_INITIALIZER(pa_deferinitq); |
|
|
static void *pool_page_alloc_meta(struct pool *, int); |
static void *pool_page_alloc_meta(struct pool *, int); |
static void pool_page_free_meta(struct pool *, void *); |
static void pool_page_free_meta(struct pool *, void *); |
|
|
/* allocator for pool metadata */ |
/* allocator for pool metadata */ |
static struct pool_allocator pool_allocator_meta = { |
static struct pool_allocator pool_allocator_meta = { |
pool_page_alloc_meta, pool_page_free_meta |
pool_page_alloc_meta, pool_page_free_meta, |
|
.pa_backingmapptr = &kmem_map, |
}; |
}; |
|
|
/* # of seconds to retain page after last use */ |
/* # of seconds to retain page after last use */ |
Line 183 static void pool_prime_page(struct pool |
|
Line 187 static void pool_prime_page(struct pool |
|
struct pool_item_header *); |
struct pool_item_header *); |
static void pool_update_curpage(struct pool *); |
static void pool_update_curpage(struct pool *); |
|
|
void *pool_allocator_alloc(struct pool *, int); |
static int pool_grow(struct pool *, int); |
void pool_allocator_free(struct pool *, void *); |
static void *pool_allocator_alloc(struct pool *, int); |
|
static void pool_allocator_free(struct pool *, void *); |
|
|
static void pool_print_pagelist(struct pool *, struct pool_pagelist *, |
static void pool_print_pagelist(struct pool *, struct pool_pagelist *, |
void (*)(const char *, ...)); |
void (*)(const char *, ...)); |
Line 214 struct pool_log { |
|
Line 219 struct pool_log { |
|
|
|
int pool_logsize = POOL_LOGSIZE; |
int pool_logsize = POOL_LOGSIZE; |
|
|
static __inline void |
static inline void |
pr_log(struct pool *pp, void *v, int action, const char *file, long line) |
pr_log(struct pool *pp, void *v, int action, const char *file, long line) |
{ |
{ |
int n = pp->pr_curlogentry; |
int n = pp->pr_curlogentry; |
Line 267 pr_printlog(struct pool *pp, struct pool |
|
Line 272 pr_printlog(struct pool *pp, struct pool |
|
} |
} |
} |
} |
|
|
static __inline void |
static inline void |
pr_enter(struct pool *pp, const char *file, long line) |
pr_enter(struct pool *pp, const char *file, long line) |
{ |
{ |
|
|
Line 283 pr_enter(struct pool *pp, const char *fi |
|
Line 288 pr_enter(struct pool *pp, const char *fi |
|
pp->pr_entered_line = line; |
pp->pr_entered_line = line; |
} |
} |
|
|
static __inline void |
static inline void |
pr_leave(struct pool *pp) |
pr_leave(struct pool *pp) |
{ |
{ |
|
|
Line 296 pr_leave(struct pool *pp) |
|
Line 301 pr_leave(struct pool *pp) |
|
pp->pr_entered_line = 0; |
pp->pr_entered_line = 0; |
} |
} |
|
|
static __inline void |
static inline void |
pr_enter_check(struct pool *pp, void (*pr)(const char *, ...)) |
pr_enter_check(struct pool *pp, void (*pr)(const char *, ...)) |
{ |
{ |
|
|
Line 312 pr_enter_check(struct pool *pp, void (*p |
|
Line 317 pr_enter_check(struct pool *pp, void (*p |
|
#define pr_enter_check(pp, pr) |
#define pr_enter_check(pp, pr) |
#endif /* POOL_DIAGNOSTIC */ |
#endif /* POOL_DIAGNOSTIC */ |
|
|
static __inline int |
static inline int |
pr_item_notouch_index(const struct pool *pp, const struct pool_item_header *ph, |
pr_item_notouch_index(const struct pool *pp, const struct pool_item_header *ph, |
const void *v) |
const void *v) |
{ |
{ |
Line 331 pr_item_notouch_index(const struct pool |
|
Line 336 pr_item_notouch_index(const struct pool |
|
#define PR_INDEX_USED ((pool_item_freelist_t)-1) |
#define PR_INDEX_USED ((pool_item_freelist_t)-1) |
#define PR_INDEX_EOL ((pool_item_freelist_t)-2) |
#define PR_INDEX_EOL ((pool_item_freelist_t)-2) |
|
|
static __inline void |
static inline void |
pr_item_notouch_put(const struct pool *pp, struct pool_item_header *ph, |
pr_item_notouch_put(const struct pool *pp, struct pool_item_header *ph, |
void *obj) |
void *obj) |
{ |
{ |
Line 343 pr_item_notouch_put(const struct pool *p |
|
Line 348 pr_item_notouch_put(const struct pool *p |
|
ph->ph_firstfree = idx; |
ph->ph_firstfree = idx; |
} |
} |
|
|
static __inline void * |
static inline void * |
pr_item_notouch_get(const struct pool *pp, struct pool_item_header *ph) |
pr_item_notouch_get(const struct pool *pp, struct pool_item_header *ph) |
{ |
{ |
int idx = ph->ph_firstfree; |
int idx = ph->ph_firstfree; |
Line 356 pr_item_notouch_get(const struct pool *p |
|
Line 361 pr_item_notouch_get(const struct pool *p |
|
return ph->ph_page + ph->ph_off + idx * pp->pr_size; |
return ph->ph_page + ph->ph_off + idx * pp->pr_size; |
} |
} |
|
|
static __inline int |
static inline int |
phtree_compare(struct pool_item_header *a, struct pool_item_header *b) |
phtree_compare(struct pool_item_header *a, struct pool_item_header *b) |
{ |
{ |
if (a->ph_page < b->ph_page) |
if (a->ph_page < b->ph_page) |
Line 373 SPLAY_GENERATE(phtree, pool_item_header, |
|
Line 378 SPLAY_GENERATE(phtree, pool_item_header, |
|
/* |
/* |
* Return the pool page header based on page address. |
* Return the pool page header based on page address. |
*/ |
*/ |
static __inline struct pool_item_header * |
static inline struct pool_item_header * |
pr_find_pagehead(struct pool *pp, caddr_t page) |
pr_find_pagehead(struct pool *pp, caddr_t page) |
{ |
{ |
struct pool_item_header *ph, tmp; |
struct pool_item_header *ph, tmp; |
Line 406 pr_pagelist_free(struct pool *pp, struct |
|
Line 411 pr_pagelist_free(struct pool *pp, struct |
|
/* |
/* |
* Remove a page from the pool. |
* Remove a page from the pool. |
*/ |
*/ |
static __inline void |
static inline void |
pr_rmpage(struct pool *pp, struct pool_item_header *ph, |
pr_rmpage(struct pool *pp, struct pool_item_header *ph, |
struct pool_pagelist *pq) |
struct pool_pagelist *pq) |
{ |
{ |
Line 442 pr_rmpage(struct pool *pp, struct pool_i |
|
Line 447 pr_rmpage(struct pool *pp, struct pool_i |
|
pool_update_curpage(pp); |
pool_update_curpage(pp); |
} |
} |
|
|
|
static boolean_t |
|
pa_starved_p(struct pool_allocator *pa) |
|
{ |
|
|
|
if (pa->pa_backingmap != NULL) { |
|
return vm_map_starved_p(pa->pa_backingmap); |
|
} |
|
return FALSE; |
|
} |
|
|
|
static int |
|
pool_reclaim_callback(struct callback_entry *ce, void *obj, void *arg) |
|
{ |
|
struct pool *pp = obj; |
|
struct pool_allocator *pa = pp->pr_alloc; |
|
|
|
KASSERT(&pp->pr_reclaimerentry == ce); |
|
pool_reclaim(pp); |
|
if (!pa_starved_p(pa)) { |
|
return CALLBACK_CHAIN_ABORT; |
|
} |
|
return CALLBACK_CHAIN_CONTINUE; |
|
} |
|
|
|
static void |
|
pool_reclaim_register(struct pool *pp) |
|
{ |
|
struct vm_map *map = pp->pr_alloc->pa_backingmap; |
|
int s; |
|
|
|
if (map == NULL) { |
|
return; |
|
} |
|
|
|
s = splvm(); /* not necessary for INTRSAFE maps, but don't care. */ |
|
callback_register(&vm_map_to_kernel(map)->vmk_reclaim_callback, |
|
&pp->pr_reclaimerentry, pp, pool_reclaim_callback); |
|
splx(s); |
|
} |
|
|
|
static void |
|
pool_reclaim_unregister(struct pool *pp) |
|
{ |
|
struct vm_map *map = pp->pr_alloc->pa_backingmap; |
|
int s; |
|
|
|
if (map == NULL) { |
|
return; |
|
} |
|
|
|
s = splvm(); /* not necessary for INTRSAFE maps, but don't care. */ |
|
callback_unregister(&vm_map_to_kernel(map)->vmk_reclaim_callback, |
|
&pp->pr_reclaimerentry); |
|
splx(s); |
|
} |
|
|
|
static void |
|
pa_reclaim_register(struct pool_allocator *pa) |
|
{ |
|
struct vm_map *map = *pa->pa_backingmapptr; |
|
struct pool *pp; |
|
|
|
KASSERT(pa->pa_backingmap == NULL); |
|
if (map == NULL) { |
|
SLIST_INSERT_HEAD(&pa_deferinitq, pa, pa_q); |
|
return; |
|
} |
|
pa->pa_backingmap = map; |
|
TAILQ_FOREACH(pp, &pa->pa_list, pr_alloc_list) { |
|
pool_reclaim_register(pp); |
|
} |
|
} |
|
|
/* |
/* |
* Initialize all the pools listed in the "pools" link set. |
* Initialize all the pools listed in the "pools" link set. |
*/ |
*/ |
void |
void |
link_pool_init(void) |
pool_subsystem_init(void) |
{ |
{ |
|
struct pool_allocator *pa; |
__link_set_decl(pools, struct link_pool_init); |
__link_set_decl(pools, struct link_pool_init); |
struct link_pool_init * const *pi; |
struct link_pool_init * const *pi; |
|
|
Line 455 link_pool_init(void) |
|
Line 534 link_pool_init(void) |
|
pool_init((*pi)->pp, (*pi)->size, (*pi)->align, |
pool_init((*pi)->pp, (*pi)->size, (*pi)->align, |
(*pi)->align_offset, (*pi)->flags, (*pi)->wchan, |
(*pi)->align_offset, (*pi)->flags, (*pi)->wchan, |
(*pi)->palloc); |
(*pi)->palloc); |
|
|
|
while ((pa = SLIST_FIRST(&pa_deferinitq)) != NULL) { |
|
KASSERT(pa->pa_backingmapptr != NULL); |
|
KASSERT(*pa->pa_backingmapptr != NULL); |
|
SLIST_REMOVE_HEAD(&pa_deferinitq, pa_q); |
|
pa_reclaim_register(pa); |
|
} |
} |
} |
|
|
/* |
/* |
|
|
pool_init(struct pool *pp, size_t size, u_int align, u_int ioff, int flags, |
pool_init(struct pool *pp, size_t size, u_int align, u_int ioff, int flags, |
const char *wchan, struct pool_allocator *palloc) |
const char *wchan, struct pool_allocator *palloc) |
{ |
{ |
int off, slack; |
#ifdef DEBUG |
|
struct pool *pp1; |
|
#endif |
size_t trysize, phsize; |
size_t trysize, phsize; |
int s; |
int off, slack, s; |
|
|
KASSERT((1UL << (CHAR_BIT * sizeof(pool_item_freelist_t))) - 2 >= |
KASSERT((1UL << (CHAR_BIT * sizeof(pool_item_freelist_t))) - 2 >= |
PHPOOL_FREELIST_NELEM(PHPOOL_MAX - 1)); |
PHPOOL_FREELIST_NELEM(PHPOOL_MAX - 1)); |
|
|
|
#ifdef DEBUG |
|
/* |
|
* Check that the pool hasn't already been initialised and |
|
* added to the list of all pools. |
|
*/ |
|
LIST_FOREACH(pp1, &pool_head, pr_poollist) { |
|
if (pp == pp1) |
|
panic("pool_init: pool %s already initialised", |
|
wchan); |
|
} |
|
#endif |
|
|
#ifdef POOL_DIAGNOSTIC |
#ifdef POOL_DIAGNOSTIC |
/* |
/* |
* Always log if POOL_DIAGNOSTIC is defined. |
* Always log if POOL_DIAGNOSTIC is defined. |
Line 482 pool_init(struct pool *pp, size_t size, |
|
Line 582 pool_init(struct pool *pp, size_t size, |
|
flags |= PR_LOGGING; |
flags |= PR_LOGGING; |
#endif |
#endif |
|
|
#ifdef POOL_SUBPAGE |
|
/* |
|
* XXX We don't provide a real `nointr' back-end |
|
* yet; all sub-pages come from a kmem back-end. |
|
* maybe some day... |
|
*/ |
|
if (palloc == NULL) { |
|
extern struct pool_allocator pool_allocator_kmem_subpage; |
|
palloc = &pool_allocator_kmem_subpage; |
|
} |
|
/* |
|
* We'll assume any user-specified back-end allocator |
|
* will deal with sub-pages, or simply don't care. |
|
*/ |
|
#else |
|
if (palloc == NULL) |
if (palloc == NULL) |
palloc = &pool_allocator_kmem; |
palloc = &pool_allocator_kmem; |
|
#ifdef POOL_SUBPAGE |
|
if (size > palloc->pa_pagesz) { |
|
if (palloc == &pool_allocator_kmem) |
|
palloc = &pool_allocator_kmem_fullpage; |
|
else if (palloc == &pool_allocator_nointr) |
|
palloc = &pool_allocator_nointr_fullpage; |
|
} |
#endif /* POOL_SUBPAGE */ |
#endif /* POOL_SUBPAGE */ |
if ((palloc->pa_flags & PA_INITIALIZED) == 0) { |
if ((palloc->pa_flags & PA_INITIALIZED) == 0) { |
if (palloc->pa_pagesz == 0) { |
if (palloc->pa_pagesz == 0) |
#ifdef POOL_SUBPAGE |
|
if (palloc == &pool_allocator_kmem) |
|
palloc->pa_pagesz = PAGE_SIZE; |
|
else |
|
palloc->pa_pagesz = POOL_SUBPAGE; |
|
#else |
|
palloc->pa_pagesz = PAGE_SIZE; |
palloc->pa_pagesz = PAGE_SIZE; |
#endif /* POOL_SUBPAGE */ |
|
} |
|
|
|
TAILQ_INIT(&palloc->pa_list); |
TAILQ_INIT(&palloc->pa_list); |
|
|
simple_lock_init(&palloc->pa_slock); |
simple_lock_init(&palloc->pa_slock); |
palloc->pa_pagemask = ~(palloc->pa_pagesz - 1); |
palloc->pa_pagemask = ~(palloc->pa_pagesz - 1); |
palloc->pa_pageshift = ffs(palloc->pa_pagesz) - 1; |
palloc->pa_pageshift = ffs(palloc->pa_pagesz) - 1; |
|
|
|
if (palloc->pa_backingmapptr != NULL) { |
|
pa_reclaim_register(palloc); |
|
} |
palloc->pa_flags |= PA_INITIALIZED; |
palloc->pa_flags |= PA_INITIALIZED; |
} |
} |
|
|
Line 698 pool_init(struct pool *pp, size_t size, |
|
Line 786 pool_init(struct pool *pp, size_t size, |
|
TAILQ_INSERT_TAIL(&palloc->pa_list, pp, pr_alloc_list); |
TAILQ_INSERT_TAIL(&palloc->pa_list, pp, pr_alloc_list); |
simple_unlock(&palloc->pa_slock); |
simple_unlock(&palloc->pa_slock); |
splx(s); |
splx(s); |
|
pool_reclaim_register(pp); |
} |
} |
|
|
/* |
/* |
Line 718 pool_destroy(struct pool *pp) |
|
Line 807 pool_destroy(struct pool *pp) |
|
simple_unlock(&pool_head_slock); |
simple_unlock(&pool_head_slock); |
|
|
/* Remove this pool from its allocator's list of pools. */ |
/* Remove this pool from its allocator's list of pools. */ |
|
pool_reclaim_unregister(pp); |
s = splvm(); |
s = splvm(); |
simple_lock(&pp->pr_alloc->pa_slock); |
simple_lock(&pp->pr_alloc->pa_slock); |
TAILQ_REMOVE(&pp->pr_alloc->pa_list, pp, pr_alloc_list); |
TAILQ_REMOVE(&pp->pr_alloc->pa_list, pp, pr_alloc_list); |
Line 883 pool_get(struct pool *pp, int flags) |
|
Line 973 pool_get(struct pool *pp, int flags) |
|
* has no items in its bucket. |
* has no items in its bucket. |
*/ |
*/ |
if ((ph = pp->pr_curpage) == NULL) { |
if ((ph = pp->pr_curpage) == NULL) { |
|
int error; |
|
|
#ifdef DIAGNOSTIC |
#ifdef DIAGNOSTIC |
if (pp->pr_nitems != 0) { |
if (pp->pr_nitems != 0) { |
simple_unlock(&pp->pr_slock); |
simple_unlock(&pp->pr_slock); |
Line 898 pool_get(struct pool *pp, int flags) |
|
Line 990 pool_get(struct pool *pp, int flags) |
|
* may block. |
* may block. |
*/ |
*/ |
pr_leave(pp); |
pr_leave(pp); |
simple_unlock(&pp->pr_slock); |
error = pool_grow(pp, flags); |
v = pool_allocator_alloc(pp, flags); |
pr_enter(pp, file, line); |
if (__predict_true(v != NULL)) |
if (error != 0) { |
ph = pool_alloc_item_header(pp, v, flags); |
|
|
|
if (__predict_false(v == NULL || ph == NULL)) { |
|
if (v != NULL) |
|
pool_allocator_free(pp, v); |
|
|
|
simple_lock(&pp->pr_slock); |
|
pr_enter(pp, file, line); |
|
|
|
/* |
/* |
* We were unable to allocate a page or item |
* We were unable to allocate a page or item |
* header, but we released the lock during |
* header, but we released the lock during |
Line 919 pool_get(struct pool *pp, int flags) |
|
Line 1002 pool_get(struct pool *pp, int flags) |
|
if (pp->pr_curpage != NULL) |
if (pp->pr_curpage != NULL) |
goto startover; |
goto startover; |
|
|
if ((flags & PR_WAITOK) == 0) { |
pp->pr_nfail++; |
pp->pr_nfail++; |
|
pr_leave(pp); |
|
simple_unlock(&pp->pr_slock); |
|
return (NULL); |
|
} |
|
|
|
/* |
|
* Wait for items to be returned to this pool. |
|
* |
|
* XXX: maybe we should wake up once a second and |
|
* try again? |
|
*/ |
|
pp->pr_flags |= PR_WANTED; |
|
/* PA_WANTED is already set on the allocator. */ |
|
pr_leave(pp); |
pr_leave(pp); |
ltsleep(pp, PSWP, pp->pr_wchan, 0, &pp->pr_slock); |
simple_unlock(&pp->pr_slock); |
pr_enter(pp, file, line); |
return (NULL); |
goto startover; |
|
} |
} |
|
|
/* We have more memory; add it to the pool */ |
|
simple_lock(&pp->pr_slock); |
|
pr_enter(pp, file, line); |
|
pool_prime_page(pp, v, ph); |
|
pp->pr_npagealloc++; |
|
|
|
/* Start the allocation process over. */ |
/* Start the allocation process over. */ |
goto startover; |
goto startover; |
} |
} |
Line 1033 pool_get(struct pool *pp, int flags) |
|
Line 1095 pool_get(struct pool *pp, int flags) |
|
} |
} |
|
|
pp->pr_nget++; |
pp->pr_nget++; |
|
pr_leave(pp); |
|
|
/* |
/* |
* If we have a low water mark and we are now below that low |
* If we have a low water mark and we are now below that low |
Line 1046 pool_get(struct pool *pp, int flags) |
|
Line 1109 pool_get(struct pool *pp, int flags) |
|
*/ |
*/ |
} |
} |
|
|
pr_leave(pp); |
|
simple_unlock(&pp->pr_slock); |
simple_unlock(&pp->pr_slock); |
return (v); |
return (v); |
} |
} |
Line 1143 pool_do_put(struct pool *pp, void *v, st |
|
Line 1205 pool_do_put(struct pool *pp, void *v, st |
|
pp->pr_nidle++; |
pp->pr_nidle++; |
if (pp->pr_npages > pp->pr_minpages && |
if (pp->pr_npages > pp->pr_minpages && |
(pp->pr_npages > pp->pr_maxpages || |
(pp->pr_npages > pp->pr_maxpages || |
(pp->pr_alloc->pa_flags & PA_WANT) != 0)) { |
pa_starved_p(pp->pr_alloc))) { |
pr_rmpage(pp, ph, pq); |
pr_rmpage(pp, ph, pq); |
} else { |
} else { |
LIST_REMOVE(ph, ph_pagelist); |
LIST_REMOVE(ph, ph_pagelist); |
Line 1220 pool_put(struct pool *pp, void *v) |
|
Line 1282 pool_put(struct pool *pp, void *v) |
|
#endif |
#endif |
|
|
/* |
/* |
|
* pool_grow: grow a pool by a page. |
|
* |
|
* => called with pool locked. |
|
* => unlock and relock the pool. |
|
* => return with pool locked. |
|
*/ |
|
|
|
static int |
|
pool_grow(struct pool *pp, int flags) |
|
{ |
|
struct pool_item_header *ph = NULL; |
|
char *cp; |
|
|
|
simple_unlock(&pp->pr_slock); |
|
cp = pool_allocator_alloc(pp, flags); |
|
if (__predict_true(cp != NULL)) { |
|
ph = pool_alloc_item_header(pp, cp, flags); |
|
} |
|
if (__predict_false(cp == NULL || ph == NULL)) { |
|
if (cp != NULL) { |
|
pool_allocator_free(pp, cp); |
|
} |
|
simple_lock(&pp->pr_slock); |
|
return ENOMEM; |
|
} |
|
|
|
simple_lock(&pp->pr_slock); |
|
pool_prime_page(pp, cp, ph); |
|
pp->pr_npagealloc++; |
|
return 0; |
|
} |
|
|
|
/* |
* Add N items to the pool. |
* Add N items to the pool. |
*/ |
*/ |
int |
int |
pool_prime(struct pool *pp, int n) |
pool_prime(struct pool *pp, int n) |
{ |
{ |
struct pool_item_header *ph = NULL; |
|
caddr_t cp; |
|
int newpages; |
int newpages; |
|
int error = 0; |
|
|
simple_lock(&pp->pr_slock); |
simple_lock(&pp->pr_slock); |
|
|
newpages = roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage; |
newpages = roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage; |
|
|
while (newpages-- > 0) { |
while (newpages-- > 0) { |
simple_unlock(&pp->pr_slock); |
error = pool_grow(pp, PR_NOWAIT); |
cp = pool_allocator_alloc(pp, PR_NOWAIT); |
if (error) { |
if (__predict_true(cp != NULL)) |
|
ph = pool_alloc_item_header(pp, cp, PR_NOWAIT); |
|
|
|
if (__predict_false(cp == NULL || ph == NULL)) { |
|
if (cp != NULL) |
|
pool_allocator_free(pp, cp); |
|
simple_lock(&pp->pr_slock); |
|
break; |
break; |
} |
} |
|
|
simple_lock(&pp->pr_slock); |
|
pool_prime_page(pp, cp, ph); |
|
pp->pr_npagealloc++; |
|
pp->pr_minpages++; |
pp->pr_minpages++; |
} |
} |
|
|
Line 1256 pool_prime(struct pool *pp, int n) |
|
Line 1339 pool_prime(struct pool *pp, int n) |
|
pp->pr_maxpages = pp->pr_minpages + 1; /* XXX */ |
pp->pr_maxpages = pp->pr_minpages + 1; /* XXX */ |
|
|
simple_unlock(&pp->pr_slock); |
simple_unlock(&pp->pr_slock); |
return (0); |
return error; |
} |
} |
|
|
/* |
/* |
Line 1361 pool_prime_page(struct pool *pp, caddr_t |
|
Line 1444 pool_prime_page(struct pool *pp, caddr_t |
|
static int |
static int |
pool_catchup(struct pool *pp) |
pool_catchup(struct pool *pp) |
{ |
{ |
struct pool_item_header *ph = NULL; |
|
caddr_t cp; |
|
int error = 0; |
int error = 0; |
|
|
while (POOL_NEEDS_CATCHUP(pp)) { |
while (POOL_NEEDS_CATCHUP(pp)) { |
/* |
error = pool_grow(pp, PR_NOWAIT); |
* Call the page back-end allocator for more memory. |
if (error) { |
* |
|
* XXX: We never wait, so should we bother unlocking |
|
* the pool descriptor? |
|
*/ |
|
simple_unlock(&pp->pr_slock); |
|
cp = pool_allocator_alloc(pp, PR_NOWAIT); |
|
if (__predict_true(cp != NULL)) |
|
ph = pool_alloc_item_header(pp, cp, PR_NOWAIT); |
|
if (__predict_false(cp == NULL || ph == NULL)) { |
|
if (cp != NULL) |
|
pool_allocator_free(pp, cp); |
|
error = ENOMEM; |
|
simple_lock(&pp->pr_slock); |
|
break; |
break; |
} |
} |
simple_lock(&pp->pr_slock); |
|
pool_prime_page(pp, cp, ph); |
|
pp->pr_npagealloc++; |
|
} |
} |
|
return error; |
return (error); |
|
} |
} |
|
|
static void |
static void |
Line 1510 pool_reclaim(struct pool *pp) |
|
Line 1574 pool_reclaim(struct pool *pp) |
|
|
|
KASSERT(ph->ph_nmissing == 0); |
KASSERT(ph->ph_nmissing == 0); |
timersub(&curtime, &ph->ph_time, &diff); |
timersub(&curtime, &ph->ph_time, &diff); |
if (diff.tv_sec < pool_inactive_time) |
if (diff.tv_sec < pool_inactive_time |
|
&& !pa_starved_p(pp->pr_alloc)) |
continue; |
continue; |
|
|
/* |
/* |
Line 1556 pool_drain(void *arg) |
|
Line 1621 pool_drain(void *arg) |
|
drainpp = LIST_NEXT(pp, pr_poollist); |
drainpp = LIST_NEXT(pp, pr_poollist); |
} |
} |
simple_unlock(&pool_head_slock); |
simple_unlock(&pool_head_slock); |
pool_reclaim(pp); |
if (pp) |
|
pool_reclaim(pp); |
splx(s); |
splx(s); |
} |
} |
|
|
Line 1581 pool_print(struct pool *pp, const char * |
|
Line 1647 pool_print(struct pool *pp, const char * |
|
} |
} |
|
|
void |
void |
|
pool_printall(const char *modif, void (*pr)(const char *, ...)) |
|
{ |
|
struct pool *pp; |
|
|
|
if (simple_lock_try(&pool_head_slock) == 0) { |
|
(*pr)("WARNING: pool_head_slock is locked\n"); |
|
} else { |
|
simple_unlock(&pool_head_slock); |
|
} |
|
|
|
LIST_FOREACH(pp, &pool_head, pr_poollist) { |
|
pool_printit(pp, modif, pr); |
|
} |
|
} |
|
|
|
void |
pool_printit(struct pool *pp, const char *modif, void (*pr)(const char *, ...)) |
pool_printit(struct pool *pp, const char *modif, void (*pr)(const char *, ...)) |
{ |
{ |
int didlock = 0; |
|
|
|
if (pp == NULL) { |
if (pp == NULL) { |
(*pr)("Must specify a pool to print.\n"); |
(*pr)("Must specify a pool to print.\n"); |
Line 1602 pool_printit(struct pool *pp, const char |
|
Line 1683 pool_printit(struct pool *pp, const char |
|
if (simple_lock_try(&pp->pr_slock) == 0) |
if (simple_lock_try(&pp->pr_slock) == 0) |
(*pr)("WARNING: pool %s is locked\n", pp->pr_wchan); |
(*pr)("WARNING: pool %s is locked\n", pp->pr_wchan); |
else |
else |
didlock = 1; |
simple_unlock(&pp->pr_slock); |
|
|
pool_print1(pp, modif, pr); |
pool_print1(pp, modif, pr); |
|
|
if (didlock) |
|
simple_unlock(&pp->pr_slock); |
|
} |
} |
|
|
static void |
static void |
Line 1882 pool_cache_destroy(struct pool_cache *pc |
|
Line 1960 pool_cache_destroy(struct pool_cache *pc |
|
simple_unlock(&pp->pr_slock); |
simple_unlock(&pp->pr_slock); |
} |
} |
|
|
static __inline void * |
static inline void * |
pcg_get(struct pool_cache_group *pcg, paddr_t *pap) |
pcg_get(struct pool_cache_group *pcg, paddr_t *pap) |
{ |
{ |
void *object; |
void *object; |
Line 1901 pcg_get(struct pool_cache_group *pcg, pa |
|
Line 1979 pcg_get(struct pool_cache_group *pcg, pa |
|
return (object); |
return (object); |
} |
} |
|
|
static __inline void |
static inline void |
pcg_put(struct pool_cache_group *pcg, void *object, paddr_t pa) |
pcg_put(struct pool_cache_group *pcg, void *object, paddr_t pa) |
{ |
{ |
u_int idx; |
u_int idx; |
Line 2007 pool_cache_put_paddr(struct pool_cache * |
|
Line 2085 pool_cache_put_paddr(struct pool_cache * |
|
struct pool_cache_group *pcg; |
struct pool_cache_group *pcg; |
int s; |
int s; |
|
|
|
if (__predict_false((pc->pc_pool->pr_flags & PR_WANTED) != 0)) { |
|
goto destruct; |
|
} |
|
|
simple_lock(&pc->pc_slock); |
simple_lock(&pc->pc_slock); |
|
|
pcg = LIST_FIRST(&pc->pc_partgroups); |
pcg = LIST_FIRST(&pc->pc_partgroups); |
Line 2028 pool_cache_put_paddr(struct pool_cache * |
|
Line 2110 pool_cache_put_paddr(struct pool_cache * |
|
pcg = pool_get(&pcgpool, PR_NOWAIT); |
pcg = pool_get(&pcgpool, PR_NOWAIT); |
splx(s); |
splx(s); |
if (pcg == NULL) { |
if (pcg == NULL) { |
|
destruct: |
|
|
/* |
/* |
* Unable to allocate a cache group; destruct the object |
* Unable to allocate a cache group; destruct the object |
Line 2173 pool_cache_reclaim(struct pool_cache *pc |
|
Line 2256 pool_cache_reclaim(struct pool_cache *pc |
|
void *pool_page_alloc(struct pool *, int); |
void *pool_page_alloc(struct pool *, int); |
void pool_page_free(struct pool *, void *); |
void pool_page_free(struct pool *, void *); |
|
|
|
#ifdef POOL_SUBPAGE |
|
struct pool_allocator pool_allocator_kmem_fullpage = { |
|
pool_page_alloc, pool_page_free, 0, |
|
.pa_backingmapptr = &kmem_map, |
|
}; |
|
#else |
struct pool_allocator pool_allocator_kmem = { |
struct pool_allocator pool_allocator_kmem = { |
pool_page_alloc, pool_page_free, 0, |
pool_page_alloc, pool_page_free, 0, |
|
.pa_backingmapptr = &kmem_map, |
}; |
}; |
|
#endif |
|
|
void *pool_page_alloc_nointr(struct pool *, int); |
void *pool_page_alloc_nointr(struct pool *, int); |
void pool_page_free_nointr(struct pool *, void *); |
void pool_page_free_nointr(struct pool *, void *); |
|
|
|
#ifdef POOL_SUBPAGE |
|
struct pool_allocator pool_allocator_nointr_fullpage = { |
|
pool_page_alloc_nointr, pool_page_free_nointr, 0, |
|
.pa_backingmapptr = &kernel_map, |
|
}; |
|
#else |
struct pool_allocator pool_allocator_nointr = { |
struct pool_allocator pool_allocator_nointr = { |
pool_page_alloc_nointr, pool_page_free_nointr, 0, |
pool_page_alloc_nointr, pool_page_free_nointr, 0, |
|
.pa_backingmapptr = &kernel_map, |
}; |
}; |
|
#endif |
|
|
#ifdef POOL_SUBPAGE |
#ifdef POOL_SUBPAGE |
void *pool_subpage_alloc(struct pool *, int); |
void *pool_subpage_alloc(struct pool *, int); |
void pool_subpage_free(struct pool *, void *); |
void pool_subpage_free(struct pool *, void *); |
|
|
struct pool_allocator pool_allocator_kmem_subpage = { |
struct pool_allocator pool_allocator_kmem = { |
pool_subpage_alloc, pool_subpage_free, 0, |
pool_subpage_alloc, pool_subpage_free, POOL_SUBPAGE, |
|
.pa_backingmapptr = &kmem_map, |
|
}; |
|
|
|
void *pool_subpage_alloc_nointr(struct pool *, int); |
|
void pool_subpage_free_nointr(struct pool *, void *); |
|
|
|
struct pool_allocator pool_allocator_nointr = { |
|
pool_subpage_alloc, pool_subpage_free, POOL_SUBPAGE, |
|
.pa_backingmapptr = &kmem_map, |
}; |
}; |
#endif /* POOL_SUBPAGE */ |
#endif /* POOL_SUBPAGE */ |
|
|
/* |
static void * |
* We have at least three different resources for the same allocation and |
pool_allocator_alloc(struct pool *pp, int flags) |
* each resource can be depleted. First, we have the ready elements in the |
|
* pool. Then we have the resource (typically a vm_map) for this allocator. |
|
* Finally, we have physical memory. Waiting for any of these can be |
|
* unnecessary when any other is freed, but the kernel doesn't support |
|
* sleeping on multiple wait channels, so we have to employ another strategy. |
|
* |
|
* The caller sleeps on the pool (so that it can be awakened when an item |
|
* is returned to the pool), but we set PA_WANT on the allocator. When a |
|
* page is returned to the allocator and PA_WANT is set, pool_allocator_free |
|
* will wake up all sleeping pools belonging to this allocator. |
|
* |
|
* XXX Thundering herd. |
|
*/ |
|
void * |
|
pool_allocator_alloc(struct pool *org, int flags) |
|
{ |
{ |
struct pool_allocator *pa = org->pr_alloc; |
struct pool_allocator *pa = pp->pr_alloc; |
struct pool *pp, *start; |
|
int s, freed; |
|
void *res; |
void *res; |
|
|
LOCK_ASSERT(!simple_lock_held(&org->pr_slock)); |
LOCK_ASSERT(!simple_lock_held(&pp->pr_slock)); |
|
|
do { |
|
if ((res = (*pa->pa_alloc)(org, flags)) != NULL) |
|
return (res); |
|
if ((flags & PR_WAITOK) == 0) { |
|
/* |
|
* We only run the drain hookhere if PR_NOWAIT. |
|
* In other cases, the hook will be run in |
|
* pool_reclaim(). |
|
*/ |
|
if (org->pr_drain_hook != NULL) { |
|
(*org->pr_drain_hook)(org->pr_drain_hook_arg, |
|
flags); |
|
if ((res = (*pa->pa_alloc)(org, flags)) != NULL) |
|
return (res); |
|
} |
|
break; |
|
} |
|
|
|
|
res = (*pa->pa_alloc)(pp, flags); |
|
if (res == NULL && (flags & PR_WAITOK) == 0) { |
/* |
/* |
* Drain all pools, except "org", that use this |
* We only run the drain hook here if PR_NOWAIT. |
* allocator. We do this to reclaim VA space. |
* In other cases, the hook will be run in |
* pa_alloc is responsible for waiting for |
* pool_reclaim(). |
* physical memory. |
|
* |
|
* XXX We risk looping forever if start if someone |
|
* calls pool_destroy on "start". But there is no |
|
* other way to have potentially sleeping pool_reclaim, |
|
* non-sleeping locks on pool_allocator, and some |
|
* stirring of drained pools in the allocator. |
|
* |
|
* XXX Maybe we should use pool_head_slock for locking |
|
* the allocators? |
|
*/ |
*/ |
freed = 0; |
if (pp->pr_drain_hook != NULL) { |
|
(*pp->pr_drain_hook)(pp->pr_drain_hook_arg, flags); |
s = splvm(); |
res = (*pa->pa_alloc)(pp, flags); |
simple_lock(&pa->pa_slock); |
|
pp = start = TAILQ_FIRST(&pa->pa_list); |
|
do { |
|
TAILQ_REMOVE(&pa->pa_list, pp, pr_alloc_list); |
|
TAILQ_INSERT_TAIL(&pa->pa_list, pp, pr_alloc_list); |
|
if (pp == org) |
|
continue; |
|
simple_unlock(&pa->pa_slock); |
|
freed = pool_reclaim(pp); |
|
simple_lock(&pa->pa_slock); |
|
} while ((pp = TAILQ_FIRST(&pa->pa_list)) != start && |
|
freed == 0); |
|
|
|
if (freed == 0) { |
|
/* |
|
* We set PA_WANT here, the caller will most likely |
|
* sleep waiting for pages (if not, this won't hurt |
|
* that much), and there is no way to set this in |
|
* the caller without violating locking order. |
|
*/ |
|
pa->pa_flags |= PA_WANT; |
|
} |
} |
simple_unlock(&pa->pa_slock); |
} |
splx(s); |
return res; |
} while (freed); |
|
return (NULL); |
|
} |
} |
|
|
void |
static void |
pool_allocator_free(struct pool *pp, void *v) |
pool_allocator_free(struct pool *pp, void *v) |
{ |
{ |
struct pool_allocator *pa = pp->pr_alloc; |
struct pool_allocator *pa = pp->pr_alloc; |
int s; |
|
|
|
LOCK_ASSERT(!simple_lock_held(&pp->pr_slock)); |
LOCK_ASSERT(!simple_lock_held(&pp->pr_slock)); |
|
|
(*pa->pa_free)(pp, v); |
(*pa->pa_free)(pp, v); |
|
|
s = splvm(); |
|
simple_lock(&pa->pa_slock); |
|
if ((pa->pa_flags & PA_WANT) == 0) { |
|
simple_unlock(&pa->pa_slock); |
|
splx(s); |
|
return; |
|
} |
|
|
|
TAILQ_FOREACH(pp, &pa->pa_list, pr_alloc_list) { |
|
simple_lock(&pp->pr_slock); |
|
if ((pp->pr_flags & PR_WANTED) != 0) { |
|
pp->pr_flags &= ~PR_WANTED; |
|
wakeup(pp); |
|
} |
|
simple_unlock(&pp->pr_slock); |
|
} |
|
pa->pa_flags &= ~PA_WANT; |
|
simple_unlock(&pa->pa_slock); |
|
splx(s); |
|
} |
} |
|
|
void * |
void * |
Line 2367 pool_subpage_free(struct pool *pp, void |
|
Line 2388 pool_subpage_free(struct pool *pp, void |
|
|
|
/* We don't provide a real nointr allocator. Maybe later. */ |
/* We don't provide a real nointr allocator. Maybe later. */ |
void * |
void * |
pool_page_alloc_nointr(struct pool *pp, int flags) |
pool_subpage_alloc_nointr(struct pool *pp, int flags) |
{ |
{ |
|
|
return (pool_subpage_alloc(pp, flags)); |
return (pool_subpage_alloc(pp, flags)); |
} |
} |
|
|
void |
void |
pool_page_free_nointr(struct pool *pp, void *v) |
pool_subpage_free_nointr(struct pool *pp, void *v) |
{ |
{ |
|
|
pool_subpage_free(pp, v); |
pool_subpage_free(pp, v); |
} |
} |
#else |
#endif /* POOL_SUBPAGE */ |
void * |
void * |
pool_page_alloc_nointr(struct pool *pp, int flags) |
pool_page_alloc_nointr(struct pool *pp, int flags) |
{ |
{ |
Line 2394 pool_page_free_nointr(struct pool *pp, v |
|
Line 2415 pool_page_free_nointr(struct pool *pp, v |
|
|
|
uvm_km_free_poolpage_cache(kernel_map, (vaddr_t) v); |
uvm_km_free_poolpage_cache(kernel_map, (vaddr_t) v); |
} |
} |
#endif /* POOL_SUBPAGE */ |
|