version 1.69, 2002/03/08 21:43:54 |
version 1.111.4.1, 2006/02/04 14:30:17 |
Line 59 __KERNEL_RCSID(0, "$NetBSD$"); |
|
Line 59 __KERNEL_RCSID(0, "$NetBSD$"); |
|
/* |
/* |
* Pool resource management utility. |
* Pool resource management utility. |
* |
* |
* Memory is allocated in pages which are split into pieces according |
* Memory is allocated in pages which are split into pieces according to |
* to the pool item size. Each page is kept on a list headed by `pr_pagelist' |
* the pool item size. Each page is kept on one of three lists in the |
* in the pool structure and the individual pool items are on a linked list |
* pool structure: `pr_emptypages', `pr_fullpages' and `pr_partpages', |
* headed by `ph_itemlist' in each page header. The memory for building |
* for empty, full and partially-full pages respectively. The individual |
* the page list is either taken from the allocated pages themselves (for |
* pool items are on a linked list headed by `ph_itemlist' in each page |
* small pool items) or taken from an internal pool of page headers (`phpool'). |
* header. The memory for building the page list is either taken from |
|
* the allocated pages themselves (for small pool items) or taken from |
|
* an internal pool of page headers (`phpool'). |
*/ |
*/ |
|
|
/* List of all pools */ |
/* List of all pools */ |
TAILQ_HEAD(,pool) pool_head = TAILQ_HEAD_INITIALIZER(pool_head); |
LIST_HEAD(,pool) pool_head = LIST_HEAD_INITIALIZER(pool_head); |
|
|
/* Private pool for page header structures */ |
/* Private pool for page header structures */ |
static struct pool phpool; |
#define PHPOOL_MAX 8 |
|
static struct pool phpool[PHPOOL_MAX]; |
|
#define PHPOOL_FREELIST_NELEM(idx) (((idx) == 0) ? 0 : (1 << (idx))) |
|
|
#ifdef POOL_SUBPAGE |
#ifdef POOL_SUBPAGE |
/* Pool of subpages for use by normal pools. */ |
/* Pool of subpages for use by normal pools. */ |
static struct pool psppool; |
static struct pool psppool; |
#endif |
#endif |
|
|
|
static void *pool_page_alloc_meta(struct pool *, int); |
|
static void pool_page_free_meta(struct pool *, void *); |
|
|
|
/* allocator for pool metadata */ |
|
static struct pool_allocator pool_allocator_meta = { |
|
pool_page_alloc_meta, pool_page_free_meta |
|
}; |
|
|
/* # of seconds to retain page after last use */ |
/* # of seconds to retain page after last use */ |
int pool_inactive_time = 10; |
int pool_inactive_time = 10; |
|
|
Line 87 static struct pool *drainpp; |
|
Line 99 static struct pool *drainpp; |
|
/* This spin lock protects both pool_head and drainpp. */ |
/* This spin lock protects both pool_head and drainpp. */ |
struct simplelock pool_head_slock = SIMPLELOCK_INITIALIZER; |
struct simplelock pool_head_slock = SIMPLELOCK_INITIALIZER; |
|
|
|
typedef uint8_t pool_item_freelist_t; |
|
|
struct pool_item_header { |
struct pool_item_header { |
/* Page headers */ |
/* Page headers */ |
TAILQ_ENTRY(pool_item_header) |
|
ph_pagelist; /* pool page list */ |
|
TAILQ_HEAD(,pool_item) ph_itemlist; /* chunk list for this page */ |
|
LIST_ENTRY(pool_item_header) |
LIST_ENTRY(pool_item_header) |
ph_hashlist; /* Off-page page headers */ |
ph_pagelist; /* pool page list */ |
int ph_nmissing; /* # of chunks in use */ |
SPLAY_ENTRY(pool_item_header) |
|
ph_node; /* Off-page page headers */ |
caddr_t ph_page; /* this page's address */ |
caddr_t ph_page; /* this page's address */ |
struct timeval ph_time; /* last referenced */ |
struct timeval ph_time; /* last referenced */ |
|
union { |
|
/* !PR_NOTOUCH */ |
|
struct { |
|
LIST_HEAD(, pool_item) |
|
phu_itemlist; /* chunk list for this page */ |
|
} phu_normal; |
|
/* PR_NOTOUCH */ |
|
struct { |
|
uint16_t |
|
phu_off; /* start offset in page */ |
|
pool_item_freelist_t |
|
phu_firstfree; /* first free item */ |
|
/* |
|
* XXX it might be better to use |
|
* a simple bitmap and ffs(3) |
|
*/ |
|
} phu_notouch; |
|
} ph_u; |
|
uint16_t ph_nmissing; /* # of chunks in use */ |
}; |
}; |
TAILQ_HEAD(pool_pagelist,pool_item_header); |
#define ph_itemlist ph_u.phu_normal.phu_itemlist |
|
#define ph_off ph_u.phu_notouch.phu_off |
|
#define ph_firstfree ph_u.phu_notouch.phu_firstfree |
|
|
struct pool_item { |
struct pool_item { |
#ifdef DIAGNOSTIC |
#ifdef DIAGNOSTIC |
int pi_magic; |
u_int pi_magic; |
#endif |
#endif |
#define PI_MAGIC 0xdeadbeef |
#define PI_MAGIC 0xdeadbeefU |
/* Other entries use only this list entry */ |
/* Other entries use only this list entry */ |
TAILQ_ENTRY(pool_item) pi_list; |
LIST_ENTRY(pool_item) pi_list; |
}; |
}; |
|
|
#define PR_HASH_INDEX(pp,addr) \ |
|
(((u_long)(addr) >> (pp)->pr_alloc->pa_pageshift) & \ |
|
(PR_HASHTABSIZE - 1)) |
|
|
|
#define POOL_NEEDS_CATCHUP(pp) \ |
#define POOL_NEEDS_CATCHUP(pp) \ |
((pp)->pr_nitems < (pp)->pr_minitems) |
((pp)->pr_nitems < (pp)->pr_minitems) |
|
|
Line 145 struct pool_item { |
|
Line 174 struct pool_item { |
|
/* The cache group pool. */ |
/* The cache group pool. */ |
static struct pool pcgpool; |
static struct pool pcgpool; |
|
|
/* The pool cache group. */ |
static void pool_cache_reclaim(struct pool_cache *, struct pool_pagelist *, |
#define PCG_NOBJECTS 16 |
struct pool_cache_grouplist *); |
struct pool_cache_group { |
static void pcg_grouplist_free(struct pool_cache_grouplist *); |
TAILQ_ENTRY(pool_cache_group) |
|
pcg_list; /* link in the pool cache's group list */ |
|
u_int pcg_avail; /* # available objects */ |
|
/* pointers to the objects */ |
|
void *pcg_objects[PCG_NOBJECTS]; |
|
}; |
|
|
|
static void pool_cache_reclaim(struct pool_cache *); |
|
|
|
static int pool_catchup(struct pool *); |
static int pool_catchup(struct pool *); |
static void pool_prime_page(struct pool *, caddr_t, |
static void pool_prime_page(struct pool *, caddr_t, |
struct pool_item_header *); |
struct pool_item_header *); |
|
static void pool_update_curpage(struct pool *); |
|
|
void *pool_allocator_alloc(struct pool *, int); |
void *pool_allocator_alloc(struct pool *, int); |
void pool_allocator_free(struct pool *, void *); |
void pool_allocator_free(struct pool *, void *); |
|
|
|
static void pool_print_pagelist(struct pool *, struct pool_pagelist *, |
|
void (*)(const char *, ...)); |
static void pool_print1(struct pool *, const char *, |
static void pool_print1(struct pool *, const char *, |
void (*)(const char *, ...)); |
void (*)(const char *, ...)); |
|
|
|
static int pool_chk_page(struct pool *, const char *, |
|
struct pool_item_header *); |
|
|
/* |
/* |
* Pool log entry. An array of these is allocated in pool_init(). |
* Pool log entry. An array of these is allocated in pool_init(). |
*/ |
*/ |
Line 179 struct pool_log { |
|
Line 206 struct pool_log { |
|
void *pl_addr; |
void *pl_addr; |
}; |
}; |
|
|
|
#ifdef POOL_DIAGNOSTIC |
/* Number of entries in pool log buffers */ |
/* Number of entries in pool log buffers */ |
#ifndef POOL_LOGSIZE |
#ifndef POOL_LOGSIZE |
#define POOL_LOGSIZE 10 |
#define POOL_LOGSIZE 10 |
Line 186 struct pool_log { |
|
Line 214 struct pool_log { |
|
|
|
int pool_logsize = POOL_LOGSIZE; |
int pool_logsize = POOL_LOGSIZE; |
|
|
#ifdef POOL_DIAGNOSTIC |
static inline void |
static __inline void |
|
pr_log(struct pool *pp, void *v, int action, const char *file, long line) |
pr_log(struct pool *pp, void *v, int action, const char *file, long line) |
{ |
{ |
int n = pp->pr_curlogentry; |
int n = pp->pr_curlogentry; |
Line 240 pr_printlog(struct pool *pp, struct pool |
|
Line 267 pr_printlog(struct pool *pp, struct pool |
|
} |
} |
} |
} |
|
|
static __inline void |
static inline void |
pr_enter(struct pool *pp, const char *file, long line) |
pr_enter(struct pool *pp, const char *file, long line) |
{ |
{ |
|
|
Line 256 pr_enter(struct pool *pp, const char *fi |
|
Line 283 pr_enter(struct pool *pp, const char *fi |
|
pp->pr_entered_line = line; |
pp->pr_entered_line = line; |
} |
} |
|
|
static __inline void |
static inline void |
pr_leave(struct pool *pp) |
pr_leave(struct pool *pp) |
{ |
{ |
|
|
Line 269 pr_leave(struct pool *pp) |
|
Line 296 pr_leave(struct pool *pp) |
|
pp->pr_entered_line = 0; |
pp->pr_entered_line = 0; |
} |
} |
|
|
static __inline void |
static inline void |
pr_enter_check(struct pool *pp, void (*pr)(const char *, ...)) |
pr_enter_check(struct pool *pp, void (*pr)(const char *, ...)) |
{ |
{ |
|
|
Line 285 pr_enter_check(struct pool *pp, void (*p |
|
Line 312 pr_enter_check(struct pool *pp, void (*p |
|
#define pr_enter_check(pp, pr) |
#define pr_enter_check(pp, pr) |
#endif /* POOL_DIAGNOSTIC */ |
#endif /* POOL_DIAGNOSTIC */ |
|
|
|
static inline int |
|
pr_item_notouch_index(const struct pool *pp, const struct pool_item_header *ph, |
|
const void *v) |
|
{ |
|
const char *cp = v; |
|
int idx; |
|
|
|
KASSERT(pp->pr_roflags & PR_NOTOUCH); |
|
idx = (cp - ph->ph_page - ph->ph_off) / pp->pr_size; |
|
KASSERT(idx < pp->pr_itemsperpage); |
|
return idx; |
|
} |
|
|
|
#define PR_FREELIST_ALIGN(p) \ |
|
roundup((uintptr_t)(p), sizeof(pool_item_freelist_t)) |
|
#define PR_FREELIST(ph) ((pool_item_freelist_t *)PR_FREELIST_ALIGN((ph) + 1)) |
|
#define PR_INDEX_USED ((pool_item_freelist_t)-1) |
|
#define PR_INDEX_EOL ((pool_item_freelist_t)-2) |
|
|
|
static inline void |
|
pr_item_notouch_put(const struct pool *pp, struct pool_item_header *ph, |
|
void *obj) |
|
{ |
|
int idx = pr_item_notouch_index(pp, ph, obj); |
|
pool_item_freelist_t *freelist = PR_FREELIST(ph); |
|
|
|
KASSERT(freelist[idx] == PR_INDEX_USED); |
|
freelist[idx] = ph->ph_firstfree; |
|
ph->ph_firstfree = idx; |
|
} |
|
|
|
static inline void * |
|
pr_item_notouch_get(const struct pool *pp, struct pool_item_header *ph) |
|
{ |
|
int idx = ph->ph_firstfree; |
|
pool_item_freelist_t *freelist = PR_FREELIST(ph); |
|
|
|
KASSERT(freelist[idx] != PR_INDEX_USED); |
|
ph->ph_firstfree = freelist[idx]; |
|
freelist[idx] = PR_INDEX_USED; |
|
|
|
return ph->ph_page + ph->ph_off + idx * pp->pr_size; |
|
} |
|
|
|
static inline int |
|
phtree_compare(struct pool_item_header *a, struct pool_item_header *b) |
|
{ |
|
if (a->ph_page < b->ph_page) |
|
return (-1); |
|
else if (a->ph_page > b->ph_page) |
|
return (1); |
|
else |
|
return (0); |
|
} |
|
|
|
SPLAY_PROTOTYPE(phtree, pool_item_header, ph_node, phtree_compare); |
|
SPLAY_GENERATE(phtree, pool_item_header, ph_node, phtree_compare); |
|
|
/* |
/* |
* Return the pool page header based on page address. |
* Return the pool page header based on page address. |
*/ |
*/ |
static __inline struct pool_item_header * |
static inline struct pool_item_header * |
pr_find_pagehead(struct pool *pp, caddr_t page) |
pr_find_pagehead(struct pool *pp, caddr_t page) |
{ |
{ |
struct pool_item_header *ph; |
struct pool_item_header *ph, tmp; |
|
|
if ((pp->pr_roflags & PR_PHINPAGE) != 0) |
if ((pp->pr_roflags & PR_PHINPAGE) != 0) |
return ((struct pool_item_header *)(page + pp->pr_phoffset)); |
return ((struct pool_item_header *)(page + pp->pr_phoffset)); |
|
|
for (ph = LIST_FIRST(&pp->pr_hashtab[PR_HASH_INDEX(pp, page)]); |
tmp.ph_page = page; |
ph != NULL; |
ph = SPLAY_FIND(phtree, &pp->pr_phtree, &tmp); |
ph = LIST_NEXT(ph, ph_hashlist)) { |
return ph; |
if (ph->ph_page == page) |
} |
return (ph); |
|
|
static void |
|
pr_pagelist_free(struct pool *pp, struct pool_pagelist *pq) |
|
{ |
|
struct pool_item_header *ph; |
|
int s; |
|
|
|
while ((ph = LIST_FIRST(pq)) != NULL) { |
|
LIST_REMOVE(ph, ph_pagelist); |
|
pool_allocator_free(pp, ph->ph_page); |
|
if ((pp->pr_roflags & PR_PHINPAGE) == 0) { |
|
s = splvm(); |
|
pool_put(pp->pr_phpool, ph); |
|
splx(s); |
|
} |
} |
} |
return (NULL); |
|
} |
} |
|
|
/* |
/* |
* Remove a page from the pool. |
* Remove a page from the pool. |
*/ |
*/ |
static __inline void |
static inline void |
pr_rmpage(struct pool *pp, struct pool_item_header *ph, |
pr_rmpage(struct pool *pp, struct pool_item_header *ph, |
struct pool_pagelist *pq) |
struct pool_pagelist *pq) |
{ |
{ |
int s; |
|
|
LOCK_ASSERT(simple_lock_held(&pp->pr_slock)); |
|
|
/* |
/* |
* If the page was idle, decrement the idle page count. |
* If the page was idle, decrement the idle page count. |
Line 330 pr_rmpage(struct pool *pp, struct pool_i |
|
Line 429 pr_rmpage(struct pool *pp, struct pool_i |
|
pp->pr_nitems -= pp->pr_itemsperpage; |
pp->pr_nitems -= pp->pr_itemsperpage; |
|
|
/* |
/* |
* Unlink a page from the pool and release it (or queue it for release). |
* Unlink the page from the pool and queue it for release. |
*/ |
*/ |
TAILQ_REMOVE(&pp->pr_pagelist, ph, ph_pagelist); |
LIST_REMOVE(ph, ph_pagelist); |
if (pq) { |
if ((pp->pr_roflags & PR_PHINPAGE) == 0) |
TAILQ_INSERT_HEAD(pq, ph, ph_pagelist); |
SPLAY_REMOVE(phtree, &pp->pr_phtree, ph); |
} else { |
LIST_INSERT_HEAD(pq, ph, ph_pagelist); |
pool_allocator_free(pp, ph->ph_page); |
|
if ((pp->pr_roflags & PR_PHINPAGE) == 0) { |
|
LIST_REMOVE(ph, ph_hashlist); |
|
s = splhigh(); |
|
pool_put(&phpool, ph); |
|
splx(s); |
|
} |
|
} |
|
pp->pr_npages--; |
pp->pr_npages--; |
pp->pr_npagefree++; |
pp->pr_npagefree++; |
|
|
if (pp->pr_curpage == ph) { |
pool_update_curpage(pp); |
/* |
} |
* Find a new non-empty page header, if any. |
|
* Start search from the page head, to increase the |
|
* chance for "high water" pages to be freed. |
|
*/ |
|
TAILQ_FOREACH(ph, &pp->pr_pagelist, ph_pagelist) |
|
if (TAILQ_FIRST(&ph->ph_itemlist) != NULL) |
|
break; |
|
|
|
pp->pr_curpage = ph; |
/* |
} |
* Initialize all the pools listed in the "pools" link set. |
|
*/ |
|
void |
|
link_pool_init(void) |
|
{ |
|
__link_set_decl(pools, struct link_pool_init); |
|
struct link_pool_init * const *pi; |
|
|
|
__link_set_foreach(pi, pools) |
|
pool_init((*pi)->pp, (*pi)->size, (*pi)->align, |
|
(*pi)->align_offset, (*pi)->flags, (*pi)->wchan, |
|
(*pi)->palloc); |
} |
} |
|
|
/* |
/* |
|
|
pool_init(struct pool *pp, size_t size, u_int align, u_int ioff, int flags, |
pool_init(struct pool *pp, size_t size, u_int align, u_int ioff, int flags, |
const char *wchan, struct pool_allocator *palloc) |
const char *wchan, struct pool_allocator *palloc) |
{ |
{ |
int off, slack, i; |
int off, slack; |
|
size_t trysize, phsize; |
|
int s; |
|
|
|
KASSERT((1UL << (CHAR_BIT * sizeof(pool_item_freelist_t))) - 2 >= |
|
PHPOOL_FREELIST_NELEM(PHPOOL_MAX - 1)); |
|
|
#ifdef POOL_DIAGNOSTIC |
#ifdef POOL_DIAGNOSTIC |
/* |
/* |
Line 425 pool_init(struct pool *pp, size_t size, |
|
Line 526 pool_init(struct pool *pp, size_t size, |
|
if (size < sizeof(struct pool_item)) |
if (size < sizeof(struct pool_item)) |
size = sizeof(struct pool_item); |
size = sizeof(struct pool_item); |
|
|
size = ALIGN(size); |
size = roundup(size, align); |
#ifdef DIAGNOSTIC |
#ifdef DIAGNOSTIC |
if (size > palloc->pa_pagesz) |
if (size > palloc->pa_pagesz) |
panic("pool_init: pool item size (%lu) too large", |
panic("pool_init: pool item size (%lu) too large", |
Line 435 pool_init(struct pool *pp, size_t size, |
|
Line 536 pool_init(struct pool *pp, size_t size, |
|
/* |
/* |
* Initialize the pool structure. |
* Initialize the pool structure. |
*/ |
*/ |
TAILQ_INIT(&pp->pr_pagelist); |
LIST_INIT(&pp->pr_emptypages); |
TAILQ_INIT(&pp->pr_cachelist); |
LIST_INIT(&pp->pr_fullpages); |
|
LIST_INIT(&pp->pr_partpages); |
|
LIST_INIT(&pp->pr_cachelist); |
pp->pr_curpage = NULL; |
pp->pr_curpage = NULL; |
pp->pr_npages = 0; |
pp->pr_npages = 0; |
pp->pr_minitems = 0; |
pp->pr_minitems = 0; |
Line 461 pool_init(struct pool *pp, size_t size, |
|
Line 564 pool_init(struct pool *pp, size_t size, |
|
|
|
/* |
/* |
* Decide whether to put the page header off page to avoid |
* Decide whether to put the page header off page to avoid |
* wasting too large a part of the page. Off-page page headers |
* wasting too large a part of the page or too big item. |
* go on a hash table, so we can match a returned item |
* Off-page page headers go on a hash table, so we can match |
* with its header based on the page address. |
* a returned item with its header based on the page address. |
* We use 1/16 of the page size as the threshold (XXX: tune) |
* We use 1/16 of the page size and about 8 times of the item |
|
* size as the threshold (XXX: tune) |
|
* |
|
* However, we'll put the header into the page if we can put |
|
* it without wasting any items. |
|
* |
|
* Silently enforce `0 <= ioff < align'. |
*/ |
*/ |
if (pp->pr_size < palloc->pa_pagesz/16) { |
pp->pr_itemoffset = ioff %= align; |
|
/* See the comment below about reserved bytes. */ |
|
trysize = palloc->pa_pagesz - ((align - ioff) % align); |
|
phsize = ALIGN(sizeof(struct pool_item_header)); |
|
if ((pp->pr_roflags & PR_NOTOUCH) == 0 && |
|
(pp->pr_size < MIN(palloc->pa_pagesz / 16, phsize << 3) || |
|
trysize / pp->pr_size == (trysize - phsize) / pp->pr_size)) { |
/* Use the end of the page for the page header */ |
/* Use the end of the page for the page header */ |
pp->pr_roflags |= PR_PHINPAGE; |
pp->pr_roflags |= PR_PHINPAGE; |
pp->pr_phoffset = off = palloc->pa_pagesz - |
pp->pr_phoffset = off = palloc->pa_pagesz - phsize; |
ALIGN(sizeof(struct pool_item_header)); |
|
} else { |
} else { |
/* The page header will be taken from our page header pool */ |
/* The page header will be taken from our page header pool */ |
pp->pr_phoffset = 0; |
pp->pr_phoffset = 0; |
off = palloc->pa_pagesz; |
off = palloc->pa_pagesz; |
for (i = 0; i < PR_HASHTABSIZE; i++) { |
SPLAY_INIT(&pp->pr_phtree); |
LIST_INIT(&pp->pr_hashtab[i]); |
|
} |
|
} |
} |
|
|
/* |
/* |
* Alignment is to take place at `ioff' within the item. This means |
* Alignment is to take place at `ioff' within the item. This means |
* we must reserve up to `align - 1' bytes on the page to allow |
* we must reserve up to `align - 1' bytes on the page to allow |
* appropriate positioning of each item. |
* appropriate positioning of each item. |
* |
|
* Silently enforce `0 <= ioff < align'. |
|
*/ |
*/ |
pp->pr_itemoffset = ioff = ioff % align; |
|
pp->pr_itemsperpage = (off - ((align - ioff) % align)) / pp->pr_size; |
pp->pr_itemsperpage = (off - ((align - ioff) % align)) / pp->pr_size; |
KASSERT(pp->pr_itemsperpage != 0); |
KASSERT(pp->pr_itemsperpage != 0); |
|
if ((pp->pr_roflags & PR_NOTOUCH)) { |
|
int idx; |
|
|
|
for (idx = 0; pp->pr_itemsperpage > PHPOOL_FREELIST_NELEM(idx); |
|
idx++) { |
|
/* nothing */ |
|
} |
|
if (idx >= PHPOOL_MAX) { |
|
/* |
|
* if you see this panic, consider to tweak |
|
* PHPOOL_MAX and PHPOOL_FREELIST_NELEM. |
|
*/ |
|
panic("%s: too large itemsperpage(%d) for PR_NOTOUCH", |
|
pp->pr_wchan, pp->pr_itemsperpage); |
|
} |
|
pp->pr_phpool = &phpool[idx]; |
|
} else if ((pp->pr_roflags & PR_PHINPAGE) == 0) { |
|
pp->pr_phpool = &phpool[0]; |
|
} |
|
#if defined(DIAGNOSTIC) |
|
else { |
|
pp->pr_phpool = NULL; |
|
} |
|
#endif |
|
|
/* |
/* |
* Use the slack between the chunks and the page header |
* Use the slack between the chunks and the page header |
Line 528 pool_init(struct pool *pp, size_t size, |
|
Line 661 pool_init(struct pool *pp, size_t size, |
|
* haven't done so yet. |
* haven't done so yet. |
* XXX LOCKING. |
* XXX LOCKING. |
*/ |
*/ |
if (phpool.pr_size == 0) { |
if (phpool[0].pr_size == 0) { |
|
int idx; |
|
for (idx = 0; idx < PHPOOL_MAX; idx++) { |
|
static char phpool_names[PHPOOL_MAX][6+1+6+1]; |
|
int nelem; |
|
size_t sz; |
|
|
|
nelem = PHPOOL_FREELIST_NELEM(idx); |
|
snprintf(phpool_names[idx], sizeof(phpool_names[idx]), |
|
"phpool-%d", nelem); |
|
sz = sizeof(struct pool_item_header); |
|
if (nelem) { |
|
sz = PR_FREELIST_ALIGN(sz) |
|
+ nelem * sizeof(pool_item_freelist_t); |
|
} |
|
pool_init(&phpool[idx], sz, 0, 0, 0, |
|
phpool_names[idx], &pool_allocator_meta); |
|
} |
#ifdef POOL_SUBPAGE |
#ifdef POOL_SUBPAGE |
pool_init(&phpool, sizeof(struct pool_item_header), 0, 0, 0, |
|
"phpool", &pool_allocator_kmem); |
|
pool_init(&psppool, POOL_SUBPAGE, POOL_SUBPAGE, 0, |
pool_init(&psppool, POOL_SUBPAGE, POOL_SUBPAGE, 0, |
PR_RECURSIVE, "psppool", &pool_allocator_kmem); |
PR_RECURSIVE, "psppool", &pool_allocator_meta); |
#else |
|
pool_init(&phpool, sizeof(struct pool_item_header), 0, 0, |
|
0, "phpool", NULL); |
|
#endif |
#endif |
pool_init(&pcgpool, sizeof(struct pool_cache_group), 0, 0, |
pool_init(&pcgpool, sizeof(struct pool_cache_group), 0, 0, |
0, "pcgpool", NULL); |
0, "pcgpool", &pool_allocator_meta); |
} |
} |
|
|
/* Insert into the list of all pools. */ |
/* Insert into the list of all pools. */ |
simple_lock(&pool_head_slock); |
simple_lock(&pool_head_slock); |
TAILQ_INSERT_TAIL(&pool_head, pp, pr_poollist); |
LIST_INSERT_HEAD(&pool_head, pp, pr_poollist); |
simple_unlock(&pool_head_slock); |
simple_unlock(&pool_head_slock); |
|
|
/* Insert this into the list of pools using this allocator. */ |
/* Insert this into the list of pools using this allocator. */ |
|
s = splvm(); |
simple_lock(&palloc->pa_slock); |
simple_lock(&palloc->pa_slock); |
TAILQ_INSERT_TAIL(&palloc->pa_list, pp, pr_alloc_list); |
TAILQ_INSERT_TAIL(&palloc->pa_list, pp, pr_alloc_list); |
simple_unlock(&palloc->pa_slock); |
simple_unlock(&palloc->pa_slock); |
|
splx(s); |
} |
} |
|
|
/* |
/* |
Line 559 pool_init(struct pool *pp, size_t size, |
|
Line 706 pool_init(struct pool *pp, size_t size, |
|
void |
void |
pool_destroy(struct pool *pp) |
pool_destroy(struct pool *pp) |
{ |
{ |
|
struct pool_pagelist pq; |
struct pool_item_header *ph; |
struct pool_item_header *ph; |
struct pool_cache *pc; |
int s; |
|
|
/* Locking order: pool_allocator -> pool */ |
/* Remove from global pool list */ |
|
simple_lock(&pool_head_slock); |
|
LIST_REMOVE(pp, pr_poollist); |
|
if (drainpp == pp) |
|
drainpp = NULL; |
|
simple_unlock(&pool_head_slock); |
|
|
|
/* Remove this pool from its allocator's list of pools. */ |
|
s = splvm(); |
simple_lock(&pp->pr_alloc->pa_slock); |
simple_lock(&pp->pr_alloc->pa_slock); |
TAILQ_REMOVE(&pp->pr_alloc->pa_list, pp, pr_alloc_list); |
TAILQ_REMOVE(&pp->pr_alloc->pa_list, pp, pr_alloc_list); |
simple_unlock(&pp->pr_alloc->pa_slock); |
simple_unlock(&pp->pr_alloc->pa_slock); |
|
splx(s); |
|
|
|
s = splvm(); |
|
simple_lock(&pp->pr_slock); |
|
|
/* Destroy all caches for this pool. */ |
KASSERT(LIST_EMPTY(&pp->pr_cachelist)); |
while ((pc = TAILQ_FIRST(&pp->pr_cachelist)) != NULL) |
|
pool_cache_destroy(pc); |
|
|
|
#ifdef DIAGNOSTIC |
#ifdef DIAGNOSTIC |
if (pp->pr_nout != 0) { |
if (pp->pr_nout != 0) { |
pr_printlog(pp, NULL, printf); |
pr_printlog(pp, NULL, printf); |
panic("pool_destroy: pool busy: still out: %u\n", |
panic("pool_destroy: pool busy: still out: %u", |
pp->pr_nout); |
pp->pr_nout); |
} |
} |
#endif |
#endif |
|
|
|
KASSERT(LIST_EMPTY(&pp->pr_fullpages)); |
|
KASSERT(LIST_EMPTY(&pp->pr_partpages)); |
|
|
/* Remove all pages */ |
/* Remove all pages */ |
if ((pp->pr_roflags & PR_STATIC) == 0) |
LIST_INIT(&pq); |
while ((ph = TAILQ_FIRST(&pp->pr_pagelist)) != NULL) |
while ((ph = LIST_FIRST(&pp->pr_emptypages)) != NULL) |
pr_rmpage(pp, ph, NULL); |
pr_rmpage(pp, ph, &pq); |
|
|
/* Remove from global pool list */ |
simple_unlock(&pp->pr_slock); |
simple_lock(&pool_head_slock); |
splx(s); |
TAILQ_REMOVE(&pool_head, pp, pr_poollist); |
|
if (drainpp == pp) { |
pr_pagelist_free(pp, &pq); |
drainpp = NULL; |
|
} |
|
simple_unlock(&pool_head_slock); |
|
|
|
#ifdef POOL_DIAGNOSTIC |
#ifdef POOL_DIAGNOSTIC |
if ((pp->pr_roflags & PR_LOGGING) != 0) |
if ((pp->pr_roflags & PR_LOGGING) != 0) |
Line 611 pool_set_drain_hook(struct pool *pp, voi |
|
Line 769 pool_set_drain_hook(struct pool *pp, voi |
|
pp->pr_drain_hook_arg = arg; |
pp->pr_drain_hook_arg = arg; |
} |
} |
|
|
static __inline struct pool_item_header * |
static struct pool_item_header * |
pool_alloc_item_header(struct pool *pp, caddr_t storage, int flags) |
pool_alloc_item_header(struct pool *pp, caddr_t storage, int flags) |
{ |
{ |
struct pool_item_header *ph; |
struct pool_item_header *ph; |
Line 622 pool_alloc_item_header(struct pool *pp, |
|
Line 780 pool_alloc_item_header(struct pool *pp, |
|
if ((pp->pr_roflags & PR_PHINPAGE) != 0) |
if ((pp->pr_roflags & PR_PHINPAGE) != 0) |
ph = (struct pool_item_header *) (storage + pp->pr_phoffset); |
ph = (struct pool_item_header *) (storage + pp->pr_phoffset); |
else { |
else { |
s = splhigh(); |
s = splvm(); |
ph = pool_get(&phpool, flags); |
ph = pool_get(pp->pr_phpool, flags); |
splx(s); |
splx(s); |
} |
} |
|
|
Line 645 pool_get(struct pool *pp, int flags) |
|
Line 803 pool_get(struct pool *pp, int flags) |
|
void *v; |
void *v; |
|
|
#ifdef DIAGNOSTIC |
#ifdef DIAGNOSTIC |
if (__predict_false((pp->pr_roflags & PR_STATIC) && |
if (__predict_false(pp->pr_itemsperpage == 0)) |
(flags & PR_MALLOCOK))) { |
panic("pool_get: pool %p: pr_itemsperpage is zero, " |
pr_printlog(pp, NULL, printf); |
"pool not initialized?", pp); |
panic("pool_get: static"); |
if (__predict_false(curlwp == NULL && doing_shutdown == 0 && |
} |
|
|
|
if (__predict_false(curproc == NULL && doing_shutdown == 0 && |
|
(flags & PR_WAITOK) != 0)) |
(flags & PR_WAITOK) != 0)) |
panic("pool_get: must have NOWAIT"); |
panic("pool_get: %s: must have NOWAIT", pp->pr_wchan); |
|
|
|
#endif /* DIAGNOSTIC */ |
#ifdef LOCKDEBUG |
#ifdef LOCKDEBUG |
if (flags & PR_WAITOK) |
if (flags & PR_WAITOK) |
simple_lock_only_held(NULL, "pool_get(PR_WAITOK)"); |
simple_lock_only_held(NULL, "pool_get(PR_WAITOK)"); |
|
SCHED_ASSERT_UNLOCKED(); |
#endif |
#endif |
#endif /* DIAGNOSTIC */ |
|
|
|
simple_lock(&pp->pr_slock); |
simple_lock(&pp->pr_slock); |
pr_enter(pp, file, line); |
pr_enter(pp, file, line); |
Line 732 pool_get(struct pool *pp, int flags) |
|
Line 888 pool_get(struct pool *pp, int flags) |
|
simple_unlock(&pp->pr_slock); |
simple_unlock(&pp->pr_slock); |
printf("pool_get: %s: curpage NULL, nitems %u\n", |
printf("pool_get: %s: curpage NULL, nitems %u\n", |
pp->pr_wchan, pp->pr_nitems); |
pp->pr_wchan, pp->pr_nitems); |
panic("pool_get: nitems inconsistent\n"); |
panic("pool_get: nitems inconsistent"); |
} |
} |
#endif |
#endif |
|
|
Line 746 pool_get(struct pool *pp, int flags) |
|
Line 902 pool_get(struct pool *pp, int flags) |
|
v = pool_allocator_alloc(pp, flags); |
v = pool_allocator_alloc(pp, flags); |
if (__predict_true(v != NULL)) |
if (__predict_true(v != NULL)) |
ph = pool_alloc_item_header(pp, v, flags); |
ph = pool_alloc_item_header(pp, v, flags); |
simple_lock(&pp->pr_slock); |
|
pr_enter(pp, file, line); |
|
|
|
if (__predict_false(v == NULL || ph == NULL)) { |
if (__predict_false(v == NULL || ph == NULL)) { |
if (v != NULL) |
if (v != NULL) |
pool_allocator_free(pp, v); |
pool_allocator_free(pp, v); |
|
|
|
simple_lock(&pp->pr_slock); |
|
pr_enter(pp, file, line); |
|
|
/* |
/* |
* We were unable to allocate a page or item |
* We were unable to allocate a page or item |
* header, but we released the lock during |
* header, but we released the lock during |
Line 772 pool_get(struct pool *pp, int flags) |
|
Line 929 pool_get(struct pool *pp, int flags) |
|
/* |
/* |
* Wait for items to be returned to this pool. |
* Wait for items to be returned to this pool. |
* |
* |
* XXX: maybe we should wake up once a second and |
* wake up once a second and try again, |
* try again? |
* as the check in pool_cache_put_paddr() is racy. |
*/ |
*/ |
pp->pr_flags |= PR_WANTED; |
pp->pr_flags |= PR_WANTED; |
/* PA_WANTED is already set on the allocator. */ |
/* PA_WANTED is already set on the allocator. */ |
pr_leave(pp); |
pr_leave(pp); |
ltsleep(pp, PSWP, pp->pr_wchan, 0, &pp->pr_slock); |
ltsleep(pp, PSWP, pp->pr_wchan, hz, &pp->pr_slock); |
pr_enter(pp, file, line); |
pr_enter(pp, file, line); |
goto startover; |
goto startover; |
} |
} |
|
|
/* We have more memory; add it to the pool */ |
/* We have more memory; add it to the pool */ |
|
simple_lock(&pp->pr_slock); |
|
pr_enter(pp, file, line); |
pool_prime_page(pp, v, ph); |
pool_prime_page(pp, v, ph); |
pp->pr_npagealloc++; |
pp->pr_npagealloc++; |
|
|
/* Start the allocation process over. */ |
/* Start the allocation process over. */ |
goto startover; |
goto startover; |
} |
} |
|
if (pp->pr_roflags & PR_NOTOUCH) { |
if (__predict_false((v = pi = TAILQ_FIRST(&ph->ph_itemlist)) == NULL)) { |
|
pr_leave(pp); |
|
simple_unlock(&pp->pr_slock); |
|
panic("pool_get: %s: page empty", pp->pr_wchan); |
|
} |
|
#ifdef DIAGNOSTIC |
#ifdef DIAGNOSTIC |
if (__predict_false(pp->pr_nitems == 0)) { |
if (__predict_false(ph->ph_nmissing == pp->pr_itemsperpage)) { |
pr_leave(pp); |
pr_leave(pp); |
simple_unlock(&pp->pr_slock); |
simple_unlock(&pp->pr_slock); |
printf("pool_get: %s: items on itemlist, nitems %u\n", |
panic("pool_get: %s: page empty", pp->pr_wchan); |
pp->pr_wchan, pp->pr_nitems); |
} |
panic("pool_get: nitems inconsistent\n"); |
#endif |
} |
v = pr_item_notouch_get(pp, ph); |
|
#ifdef POOL_DIAGNOSTIC |
|
pr_log(pp, v, PRLOG_GET, file, line); |
|
#endif |
|
} else { |
|
v = pi = LIST_FIRST(&ph->ph_itemlist); |
|
if (__predict_false(v == NULL)) { |
|
pr_leave(pp); |
|
simple_unlock(&pp->pr_slock); |
|
panic("pool_get: %s: page empty", pp->pr_wchan); |
|
} |
|
#ifdef DIAGNOSTIC |
|
if (__predict_false(pp->pr_nitems == 0)) { |
|
pr_leave(pp); |
|
simple_unlock(&pp->pr_slock); |
|
printf("pool_get: %s: items on itemlist, nitems %u\n", |
|
pp->pr_wchan, pp->pr_nitems); |
|
panic("pool_get: nitems inconsistent"); |
|
} |
#endif |
#endif |
|
|
#ifdef POOL_DIAGNOSTIC |
#ifdef POOL_DIAGNOSTIC |
pr_log(pp, v, PRLOG_GET, file, line); |
pr_log(pp, v, PRLOG_GET, file, line); |
#endif |
#endif |
|
|
#ifdef DIAGNOSTIC |
#ifdef DIAGNOSTIC |
if (__predict_false(pi->pi_magic != PI_MAGIC)) { |
if (__predict_false(pi->pi_magic != PI_MAGIC)) { |
pr_printlog(pp, pi, printf); |
pr_printlog(pp, pi, printf); |
panic("pool_get(%s): free list modified: magic=%x; page %p;" |
panic("pool_get(%s): free list modified: " |
" item addr %p\n", |
"magic=%x; page %p; item addr %p\n", |
pp->pr_wchan, pi->pi_magic, ph->ph_page, pi); |
pp->pr_wchan, pi->pi_magic, ph->ph_page, pi); |
} |
} |
#endif |
#endif |
|
|
/* |
/* |
* Remove from item list. |
* Remove from item list. |
*/ |
*/ |
TAILQ_REMOVE(&ph->ph_itemlist, pi, pi_list); |
LIST_REMOVE(pi, pi_list); |
|
} |
pp->pr_nitems--; |
pp->pr_nitems--; |
pp->pr_nout++; |
pp->pr_nout++; |
if (ph->ph_nmissing == 0) { |
if (ph->ph_nmissing == 0) { |
Line 831 pool_get(struct pool *pp, int flags) |
|
Line 1004 pool_get(struct pool *pp, int flags) |
|
panic("pool_get: nidle inconsistent"); |
panic("pool_get: nidle inconsistent"); |
#endif |
#endif |
pp->pr_nidle--; |
pp->pr_nidle--; |
|
|
|
/* |
|
* This page was previously empty. Move it to the list of |
|
* partially-full pages. This page is already curpage. |
|
*/ |
|
LIST_REMOVE(ph, ph_pagelist); |
|
LIST_INSERT_HEAD(&pp->pr_partpages, ph, ph_pagelist); |
} |
} |
ph->ph_nmissing++; |
ph->ph_nmissing++; |
if (TAILQ_FIRST(&ph->ph_itemlist) == NULL) { |
if (ph->ph_nmissing == pp->pr_itemsperpage) { |
#ifdef DIAGNOSTIC |
#ifdef DIAGNOSTIC |
if (__predict_false(ph->ph_nmissing != pp->pr_itemsperpage)) { |
if (__predict_false((pp->pr_roflags & PR_NOTOUCH) == 0 && |
|
!LIST_EMPTY(&ph->ph_itemlist))) { |
pr_leave(pp); |
pr_leave(pp); |
simple_unlock(&pp->pr_slock); |
simple_unlock(&pp->pr_slock); |
panic("pool_get: %s: nmissing inconsistent", |
panic("pool_get: %s: nmissing inconsistent", |
Line 843 pool_get(struct pool *pp, int flags) |
|
Line 1024 pool_get(struct pool *pp, int flags) |
|
} |
} |
#endif |
#endif |
/* |
/* |
* Find a new non-empty page header, if any. |
* This page is now full. Move it to the full list |
* Start search from the page head, to increase |
* and select a new current page. |
* the chance for "high water" pages to be freed. |
|
* |
|
* Migrate empty pages to the end of the list. This |
|
* will speed the update of curpage as pages become |
|
* idle. Empty pages intermingled with idle pages |
|
* is no big deal. As soon as a page becomes un-empty, |
|
* it will move back to the head of the list. |
|
*/ |
*/ |
TAILQ_REMOVE(&pp->pr_pagelist, ph, ph_pagelist); |
LIST_REMOVE(ph, ph_pagelist); |
TAILQ_INSERT_TAIL(&pp->pr_pagelist, ph, ph_pagelist); |
LIST_INSERT_HEAD(&pp->pr_fullpages, ph, ph_pagelist); |
TAILQ_FOREACH(ph, &pp->pr_pagelist, ph_pagelist) |
pool_update_curpage(pp); |
if (TAILQ_FIRST(&ph->ph_itemlist) != NULL) |
|
break; |
|
|
|
pp->pr_curpage = ph; |
|
} |
} |
|
|
pp->pr_nget++; |
pp->pr_nget++; |
|
pr_leave(pp); |
|
|
/* |
/* |
* If we have a low water mark and we are now below that low |
* If we have a low water mark and we are now below that low |
Line 876 pool_get(struct pool *pp, int flags) |
|
Line 1047 pool_get(struct pool *pp, int flags) |
|
*/ |
*/ |
} |
} |
|
|
pr_leave(pp); |
|
simple_unlock(&pp->pr_slock); |
simple_unlock(&pp->pr_slock); |
return (v); |
return (v); |
} |
} |
Line 885 pool_get(struct pool *pp, int flags) |
|
Line 1055 pool_get(struct pool *pp, int flags) |
|
* Internal version of pool_put(). Pool is already locked/entered. |
* Internal version of pool_put(). Pool is already locked/entered. |
*/ |
*/ |
static void |
static void |
pool_do_put(struct pool *pp, void *v) |
pool_do_put(struct pool *pp, void *v, struct pool_pagelist *pq) |
{ |
{ |
struct pool_item *pi = v; |
struct pool_item *pi = v; |
struct pool_item_header *ph; |
struct pool_item_header *ph; |
caddr_t page; |
caddr_t page; |
int s; |
|
|
|
LOCK_ASSERT(simple_lock_held(&pp->pr_slock)); |
LOCK_ASSERT(simple_lock_held(&pp->pr_slock)); |
|
SCHED_ASSERT_UNLOCKED(); |
|
|
page = (caddr_t)((u_long)v & pp->pr_alloc->pa_pagemask); |
page = (caddr_t)((u_long)v & pp->pr_alloc->pa_pagemask); |
|
|
Line 919 pool_do_put(struct pool *pp, void *v) |
|
Line 1089 pool_do_put(struct pool *pp, void *v) |
|
/* |
/* |
* Return to item list. |
* Return to item list. |
*/ |
*/ |
|
if (pp->pr_roflags & PR_NOTOUCH) { |
|
pr_item_notouch_put(pp, ph, v); |
|
} else { |
#ifdef DIAGNOSTIC |
#ifdef DIAGNOSTIC |
pi->pi_magic = PI_MAGIC; |
pi->pi_magic = PI_MAGIC; |
#endif |
#endif |
#ifdef DEBUG |
#ifdef DEBUG |
{ |
{ |
int i, *ip = v; |
int i, *ip = v; |
|
|
for (i = 0; i < pp->pr_size / sizeof(int); i++) { |
for (i = 0; i < pp->pr_size / sizeof(int); i++) { |
*ip++ = PI_MAGIC; |
*ip++ = PI_MAGIC; |
|
} |
} |
} |
} |
|
#endif |
#endif |
|
|
TAILQ_INSERT_HEAD(&ph->ph_itemlist, pi, pi_list); |
LIST_INSERT_HEAD(&ph->ph_itemlist, pi, pi_list); |
|
} |
|
KDASSERT(ph->ph_nmissing != 0); |
ph->ph_nmissing--; |
ph->ph_nmissing--; |
pp->pr_nput++; |
pp->pr_nput++; |
pp->pr_nitems++; |
pp->pr_nitems++; |
Line 951 pool_do_put(struct pool *pp, void *v) |
|
Line 1126 pool_do_put(struct pool *pp, void *v) |
|
} |
} |
|
|
/* |
/* |
* If this page is now complete, do one of two things: |
* If this page is now empty, do one of two things: |
* |
* |
* (1) If we have more pages than the page high water |
* (1) If we have more pages than the page high water mark, |
* mark, free the page back to the system. |
* free the page back to the system. ONLY CONSIDER |
|
* FREEING BACK A PAGE IF WE HAVE MORE THAN OUR MINIMUM PAGE |
|
* CLAIM. |
* |
* |
* (2) Move it to the end of the page list, so that |
* (2) Otherwise, move the page to the empty page list. |
* we minimize our chances of fragmenting the |
* |
* pool. Idle pages migrate to the end (along with |
* Either way, select a new current page (so we use a partially-full |
* completely empty pages, so that we find un-empty |
* page if one is available). |
* pages more quickly when we update curpage) of the |
|
* list so they can be more easily swept up by |
|
* the pagedaemon when pages are scarce. |
|
*/ |
*/ |
if (ph->ph_nmissing == 0) { |
if (ph->ph_nmissing == 0) { |
pp->pr_nidle++; |
pp->pr_nidle++; |
if (pp->pr_npages > pp->pr_maxpages) { |
if (pp->pr_npages > pp->pr_minpages && |
pr_rmpage(pp, ph, NULL); |
(pp->pr_npages > pp->pr_maxpages || |
|
(pp->pr_alloc->pa_flags & PA_WANT) != 0)) { |
|
pr_rmpage(pp, ph, pq); |
} else { |
} else { |
TAILQ_REMOVE(&pp->pr_pagelist, ph, ph_pagelist); |
LIST_REMOVE(ph, ph_pagelist); |
TAILQ_INSERT_TAIL(&pp->pr_pagelist, ph, ph_pagelist); |
LIST_INSERT_HEAD(&pp->pr_emptypages, ph, ph_pagelist); |
|
|
/* |
/* |
* Update the timestamp on the page. A page must |
* Update the timestamp on the page. A page must |
Line 978 pool_do_put(struct pool *pp, void *v) |
|
Line 1154 pool_do_put(struct pool *pp, void *v) |
|
* be reclaimed by the pagedaemon. This minimizes |
* be reclaimed by the pagedaemon. This minimizes |
* ping-pong'ing for memory. |
* ping-pong'ing for memory. |
*/ |
*/ |
s = splclock(); |
getmicrotime(&ph->ph_time); |
ph->ph_time = mono_time; |
|
splx(s); |
|
|
|
/* |
|
* Update the current page pointer. Just look for |
|
* the first page with any free items. |
|
* |
|
* XXX: Maybe we want an option to look for the |
|
* page with the fewest available items, to minimize |
|
* fragmentation? |
|
*/ |
|
TAILQ_FOREACH(ph, &pp->pr_pagelist, ph_pagelist) |
|
if (TAILQ_FIRST(&ph->ph_itemlist) != NULL) |
|
break; |
|
|
|
pp->pr_curpage = ph; |
|
} |
} |
|
pool_update_curpage(pp); |
} |
} |
|
|
/* |
/* |
* If the page has just become un-empty, move it to the head of |
* If the page was previously completely full, move it to the |
* the list, and make it the current page. The next allocation |
* partially-full list and make it the current page. The next |
* will get the item from this page, instead of further fragmenting |
* allocation will get the item from this page, instead of |
* the pool. |
* further fragmenting the pool. |
*/ |
*/ |
else if (ph->ph_nmissing == (pp->pr_itemsperpage - 1)) { |
else if (ph->ph_nmissing == (pp->pr_itemsperpage - 1)) { |
TAILQ_REMOVE(&pp->pr_pagelist, ph, ph_pagelist); |
LIST_REMOVE(ph, ph_pagelist); |
TAILQ_INSERT_HEAD(&pp->pr_pagelist, ph, ph_pagelist); |
LIST_INSERT_HEAD(&pp->pr_partpages, ph, ph_pagelist); |
pp->pr_curpage = ph; |
pp->pr_curpage = ph; |
} |
} |
} |
} |
Line 1017 pool_do_put(struct pool *pp, void *v) |
|
Line 1179 pool_do_put(struct pool *pp, void *v) |
|
void |
void |
_pool_put(struct pool *pp, void *v, const char *file, long line) |
_pool_put(struct pool *pp, void *v, const char *file, long line) |
{ |
{ |
|
struct pool_pagelist pq; |
|
|
|
LIST_INIT(&pq); |
|
|
simple_lock(&pp->pr_slock); |
simple_lock(&pp->pr_slock); |
pr_enter(pp, file, line); |
pr_enter(pp, file, line); |
|
|
pr_log(pp, v, PRLOG_PUT, file, line); |
pr_log(pp, v, PRLOG_PUT, file, line); |
|
|
pool_do_put(pp, v); |
pool_do_put(pp, v, &pq); |
|
|
pr_leave(pp); |
pr_leave(pp); |
simple_unlock(&pp->pr_slock); |
simple_unlock(&pp->pr_slock); |
|
|
|
pr_pagelist_free(pp, &pq); |
} |
} |
#undef pool_put |
#undef pool_put |
#endif /* POOL_DIAGNOSTIC */ |
#endif /* POOL_DIAGNOSTIC */ |
Line 1034 _pool_put(struct pool *pp, void *v, cons |
|
Line 1201 _pool_put(struct pool *pp, void *v, cons |
|
void |
void |
pool_put(struct pool *pp, void *v) |
pool_put(struct pool *pp, void *v) |
{ |
{ |
|
struct pool_pagelist pq; |
|
|
simple_lock(&pp->pr_slock); |
LIST_INIT(&pq); |
|
|
pool_do_put(pp, v); |
|
|
|
|
simple_lock(&pp->pr_slock); |
|
pool_do_put(pp, v, &pq); |
simple_unlock(&pp->pr_slock); |
simple_unlock(&pp->pr_slock); |
|
|
|
pr_pagelist_free(pp, &pq); |
} |
} |
|
|
#ifdef POOL_DIAGNOSTIC |
#ifdef POOL_DIAGNOSTIC |
Line 1052 pool_put(struct pool *pp, void *v) |
|
Line 1222 pool_put(struct pool *pp, void *v) |
|
int |
int |
pool_prime(struct pool *pp, int n) |
pool_prime(struct pool *pp, int n) |
{ |
{ |
struct pool_item_header *ph; |
struct pool_item_header *ph = NULL; |
caddr_t cp; |
caddr_t cp; |
int newpages, error = 0; |
int newpages; |
|
|
simple_lock(&pp->pr_slock); |
simple_lock(&pp->pr_slock); |
|
|
Line 1065 pool_prime(struct pool *pp, int n) |
|
Line 1235 pool_prime(struct pool *pp, int n) |
|
cp = pool_allocator_alloc(pp, PR_NOWAIT); |
cp = pool_allocator_alloc(pp, PR_NOWAIT); |
if (__predict_true(cp != NULL)) |
if (__predict_true(cp != NULL)) |
ph = pool_alloc_item_header(pp, cp, PR_NOWAIT); |
ph = pool_alloc_item_header(pp, cp, PR_NOWAIT); |
simple_lock(&pp->pr_slock); |
|
|
|
if (__predict_false(cp == NULL || ph == NULL)) { |
if (__predict_false(cp == NULL || ph == NULL)) { |
error = ENOMEM; |
|
if (cp != NULL) |
if (cp != NULL) |
pool_allocator_free(pp, cp); |
pool_allocator_free(pp, cp); |
|
simple_lock(&pp->pr_slock); |
break; |
break; |
} |
} |
|
|
|
simple_lock(&pp->pr_slock); |
pool_prime_page(pp, cp, ph); |
pool_prime_page(pp, cp, ph); |
pp->pr_npagealloc++; |
pp->pr_npagealloc++; |
pp->pr_minpages++; |
pp->pr_minpages++; |
Line 1100 pool_prime_page(struct pool *pp, caddr_t |
|
Line 1270 pool_prime_page(struct pool *pp, caddr_t |
|
unsigned int ioff = pp->pr_itemoffset; |
unsigned int ioff = pp->pr_itemoffset; |
int n; |
int n; |
|
|
|
LOCK_ASSERT(simple_lock_held(&pp->pr_slock)); |
|
|
#ifdef DIAGNOSTIC |
#ifdef DIAGNOSTIC |
if (((u_long)cp & (pp->pr_alloc->pa_pagesz - 1)) != 0) |
if (((u_long)cp & (pp->pr_alloc->pa_pagesz - 1)) != 0) |
panic("pool_prime_page: %s: unaligned page", pp->pr_wchan); |
panic("pool_prime_page: %s: unaligned page", pp->pr_wchan); |
#endif |
#endif |
|
|
if ((pp->pr_roflags & PR_PHINPAGE) == 0) |
|
LIST_INSERT_HEAD(&pp->pr_hashtab[PR_HASH_INDEX(pp, cp)], |
|
ph, ph_hashlist); |
|
|
|
/* |
/* |
* Insert page header. |
* Insert page header. |
*/ |
*/ |
TAILQ_INSERT_HEAD(&pp->pr_pagelist, ph, ph_pagelist); |
LIST_INSERT_HEAD(&pp->pr_emptypages, ph, ph_pagelist); |
TAILQ_INIT(&ph->ph_itemlist); |
LIST_INIT(&ph->ph_itemlist); |
ph->ph_page = storage; |
ph->ph_page = storage; |
ph->ph_nmissing = 0; |
ph->ph_nmissing = 0; |
memset(&ph->ph_time, 0, sizeof(ph->ph_time)); |
getmicrotime(&ph->ph_time); |
|
if ((pp->pr_roflags & PR_PHINPAGE) == 0) |
|
SPLAY_INSERT(phtree, &pp->pr_phtree, ph); |
|
|
pp->pr_nidle++; |
pp->pr_nidle++; |
|
|
Line 1139 pool_prime_page(struct pool *pp, caddr_t |
|
Line 1309 pool_prime_page(struct pool *pp, caddr_t |
|
n = pp->pr_itemsperpage; |
n = pp->pr_itemsperpage; |
pp->pr_nitems += n; |
pp->pr_nitems += n; |
|
|
while (n--) { |
if (pp->pr_roflags & PR_NOTOUCH) { |
pi = (struct pool_item *)cp; |
pool_item_freelist_t *freelist = PR_FREELIST(ph); |
|
int i; |
|
|
|
ph->ph_off = cp - storage; |
|
ph->ph_firstfree = 0; |
|
for (i = 0; i < n - 1; i++) |
|
freelist[i] = i + 1; |
|
freelist[n - 1] = PR_INDEX_EOL; |
|
} else { |
|
while (n--) { |
|
pi = (struct pool_item *)cp; |
|
|
|
KASSERT(((((vaddr_t)pi) + ioff) & (align - 1)) == 0); |
|
|
/* Insert on page list */ |
/* Insert on page list */ |
TAILQ_INSERT_TAIL(&ph->ph_itemlist, pi, pi_list); |
LIST_INSERT_HEAD(&ph->ph_itemlist, pi, pi_list); |
#ifdef DIAGNOSTIC |
#ifdef DIAGNOSTIC |
pi->pi_magic = PI_MAGIC; |
pi->pi_magic = PI_MAGIC; |
#endif |
#endif |
cp = (caddr_t)(cp + pp->pr_size); |
cp = (caddr_t)(cp + pp->pr_size); |
|
} |
} |
} |
|
|
/* |
/* |
Line 1162 pool_prime_page(struct pool *pp, caddr_t |
|
Line 1345 pool_prime_page(struct pool *pp, caddr_t |
|
|
|
/* |
/* |
* Used by pool_get() when nitems drops below the low water mark. This |
* Used by pool_get() when nitems drops below the low water mark. This |
* is used to catch up nitmes with the low water mark. |
* is used to catch up pr_nitems with the low water mark. |
* |
* |
* Note 1, we never wait for memory here, we let the caller decide what to do. |
* Note 1, we never wait for memory here, we let the caller decide what to do. |
* |
* |
* Note 2, this doesn't work with static pools. |
* Note 2, we must be called with the pool already locked, and we return |
* |
|
* Note 3, we must be called with the pool already locked, and we return |
|
* with it locked. |
* with it locked. |
*/ |
*/ |
static int |
static int |
pool_catchup(struct pool *pp) |
pool_catchup(struct pool *pp) |
{ |
{ |
struct pool_item_header *ph; |
struct pool_item_header *ph = NULL; |
caddr_t cp; |
caddr_t cp; |
int error = 0; |
int error = 0; |
|
|
if (pp->pr_roflags & PR_STATIC) { |
|
/* |
|
* We dropped below the low water mark, and this is not a |
|
* good thing. Log a warning. |
|
* |
|
* XXX: rate-limit this? |
|
*/ |
|
printf("WARNING: static pool `%s' dropped below low water " |
|
"mark\n", pp->pr_wchan); |
|
return (0); |
|
} |
|
|
|
while (POOL_NEEDS_CATCHUP(pp)) { |
while (POOL_NEEDS_CATCHUP(pp)) { |
/* |
/* |
* Call the page back-end allocator for more memory. |
* Call the page back-end allocator for more memory. |
Line 1201 pool_catchup(struct pool *pp) |
|
Line 1370 pool_catchup(struct pool *pp) |
|
cp = pool_allocator_alloc(pp, PR_NOWAIT); |
cp = pool_allocator_alloc(pp, PR_NOWAIT); |
if (__predict_true(cp != NULL)) |
if (__predict_true(cp != NULL)) |
ph = pool_alloc_item_header(pp, cp, PR_NOWAIT); |
ph = pool_alloc_item_header(pp, cp, PR_NOWAIT); |
simple_lock(&pp->pr_slock); |
|
if (__predict_false(cp == NULL || ph == NULL)) { |
if (__predict_false(cp == NULL || ph == NULL)) { |
if (cp != NULL) |
if (cp != NULL) |
pool_allocator_free(pp, cp); |
pool_allocator_free(pp, cp); |
error = ENOMEM; |
error = ENOMEM; |
|
simple_lock(&pp->pr_slock); |
break; |
break; |
} |
} |
|
simple_lock(&pp->pr_slock); |
pool_prime_page(pp, cp, ph); |
pool_prime_page(pp, cp, ph); |
pp->pr_npagealloc++; |
pp->pr_npagealloc++; |
} |
} |
Line 1215 pool_catchup(struct pool *pp) |
|
Line 1385 pool_catchup(struct pool *pp) |
|
return (error); |
return (error); |
} |
} |
|
|
|
static void |
|
pool_update_curpage(struct pool *pp) |
|
{ |
|
|
|
pp->pr_curpage = LIST_FIRST(&pp->pr_partpages); |
|
if (pp->pr_curpage == NULL) { |
|
pp->pr_curpage = LIST_FIRST(&pp->pr_emptypages); |
|
} |
|
} |
|
|
void |
void |
pool_setlowat(struct pool *pp, int n) |
pool_setlowat(struct pool *pp, int n) |
{ |
{ |
int error; |
|
|
|
simple_lock(&pp->pr_slock); |
simple_lock(&pp->pr_slock); |
|
|
Line 1228 pool_setlowat(struct pool *pp, int n) |
|
Line 1407 pool_setlowat(struct pool *pp, int n) |
|
: roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage; |
: roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage; |
|
|
/* Make sure we're caught up with the newly-set low water mark. */ |
/* Make sure we're caught up with the newly-set low water mark. */ |
if (POOL_NEEDS_CATCHUP(pp) && (error = pool_catchup(pp) != 0)) { |
if (POOL_NEEDS_CATCHUP(pp) && pool_catchup(pp) != 0) { |
/* |
/* |
* XXX: Should we log a warning? Should we set up a timeout |
* XXX: Should we log a warning? Should we set up a timeout |
* to try again in a second or so? The latter could break |
* to try again in a second or so? The latter could break |
Line 1287 pool_reclaim(struct pool *pp) |
|
Line 1466 pool_reclaim(struct pool *pp) |
|
{ |
{ |
struct pool_item_header *ph, *phnext; |
struct pool_item_header *ph, *phnext; |
struct pool_cache *pc; |
struct pool_cache *pc; |
struct timeval curtime; |
|
struct pool_pagelist pq; |
struct pool_pagelist pq; |
int s; |
struct pool_cache_grouplist pcgl; |
|
struct timeval curtime, diff; |
if (pp->pr_roflags & PR_STATIC) |
|
return (0); |
|
|
|
if (pp->pr_drain_hook != NULL) { |
if (pp->pr_drain_hook != NULL) { |
/* |
/* |
Line 1305 pool_reclaim(struct pool *pp) |
|
Line 1481 pool_reclaim(struct pool *pp) |
|
return (0); |
return (0); |
pr_enter(pp, file, line); |
pr_enter(pp, file, line); |
|
|
TAILQ_INIT(&pq); |
LIST_INIT(&pq); |
|
LIST_INIT(&pcgl); |
|
|
/* |
/* |
* Reclaim items from the pool's caches. |
* Reclaim items from the pool's caches. |
*/ |
*/ |
TAILQ_FOREACH(pc, &pp->pr_cachelist, pc_poollist) |
LIST_FOREACH(pc, &pp->pr_cachelist, pc_poollist) |
pool_cache_reclaim(pc); |
pool_cache_reclaim(pc, &pq, &pcgl); |
|
|
s = splclock(); |
getmicrotime(&curtime); |
curtime = mono_time; |
|
splx(s); |
|
|
|
for (ph = TAILQ_FIRST(&pp->pr_pagelist); ph != NULL; ph = phnext) { |
for (ph = LIST_FIRST(&pp->pr_emptypages); ph != NULL; ph = phnext) { |
phnext = TAILQ_NEXT(ph, ph_pagelist); |
phnext = LIST_NEXT(ph, ph_pagelist); |
|
|
/* Check our minimum page claim */ |
/* Check our minimum page claim */ |
if (pp->pr_npages <= pp->pr_minpages) |
if (pp->pr_npages <= pp->pr_minpages) |
break; |
break; |
|
|
if (ph->ph_nmissing == 0) { |
KASSERT(ph->ph_nmissing == 0); |
struct timeval diff; |
timersub(&curtime, &ph->ph_time, &diff); |
timersub(&curtime, &ph->ph_time, &diff); |
if (diff.tv_sec < pool_inactive_time) |
if (diff.tv_sec < pool_inactive_time) |
continue; |
continue; |
|
|
|
/* |
/* |
* If freeing this page would put us below |
* If freeing this page would put us below |
* the low water mark, stop now. |
* the low water mark, stop now. |
*/ |
*/ |
if ((pp->pr_nitems - pp->pr_itemsperpage) < |
if ((pp->pr_nitems - pp->pr_itemsperpage) < |
pp->pr_minitems) |
pp->pr_minitems) |
break; |
break; |
|
|
pr_rmpage(pp, ph, &pq); |
pr_rmpage(pp, ph, &pq); |
} |
|
} |
} |
|
|
pr_leave(pp); |
pr_leave(pp); |
simple_unlock(&pp->pr_slock); |
simple_unlock(&pp->pr_slock); |
if (TAILQ_EMPTY(&pq)) |
if (LIST_EMPTY(&pq) && LIST_EMPTY(&pcgl)) |
return (0); |
return 0; |
|
|
while ((ph = TAILQ_FIRST(&pq)) != NULL) { |
|
TAILQ_REMOVE(&pq, ph, ph_pagelist); |
|
pool_allocator_free(pp, ph->ph_page); |
|
if (pp->pr_roflags & PR_PHINPAGE) { |
|
continue; |
|
} |
|
LIST_REMOVE(ph, ph_hashlist); |
|
s = splhigh(); |
|
pool_put(&phpool, ph); |
|
splx(s); |
|
} |
|
|
|
|
pr_pagelist_free(pp, &pq); |
|
pcg_grouplist_free(&pcgl); |
return (1); |
return (1); |
} |
} |
|
|
Line 1377 pool_drain(void *arg) |
|
Line 1540 pool_drain(void *arg) |
|
s = splvm(); |
s = splvm(); |
simple_lock(&pool_head_slock); |
simple_lock(&pool_head_slock); |
if (drainpp == NULL) { |
if (drainpp == NULL) { |
drainpp = TAILQ_FIRST(&pool_head); |
drainpp = LIST_FIRST(&pool_head); |
} |
} |
if (drainpp) { |
if (drainpp) { |
pp = drainpp; |
pp = drainpp; |
drainpp = TAILQ_NEXT(pp, pr_poollist); |
drainpp = LIST_NEXT(pp, pr_poollist); |
} |
} |
simple_unlock(&pool_head_slock); |
simple_unlock(&pool_head_slock); |
pool_reclaim(pp); |
pool_reclaim(pp); |
Line 1409 pool_print(struct pool *pp, const char * |
|
Line 1572 pool_print(struct pool *pp, const char * |
|
} |
} |
|
|
void |
void |
|
pool_printall(const char *modif, void (*pr)(const char *, ...)) |
|
{ |
|
struct pool *pp; |
|
|
|
if (simple_lock_try(&pool_head_slock) == 0) { |
|
(*pr)("WARNING: pool_head_slock is locked\n"); |
|
} else { |
|
simple_unlock(&pool_head_slock); |
|
} |
|
|
|
LIST_FOREACH(pp, &pool_head, pr_poollist) { |
|
pool_printit(pp, modif, pr); |
|
} |
|
} |
|
|
|
void |
pool_printit(struct pool *pp, const char *modif, void (*pr)(const char *, ...)) |
pool_printit(struct pool *pp, const char *modif, void (*pr)(const char *, ...)) |
{ |
{ |
int didlock = 0; |
|
|
|
if (pp == NULL) { |
if (pp == NULL) { |
(*pr)("Must specify a pool to print.\n"); |
(*pr)("Must specify a pool to print.\n"); |
Line 1430 pool_printit(struct pool *pp, const char |
|
Line 1608 pool_printit(struct pool *pp, const char |
|
if (simple_lock_try(&pp->pr_slock) == 0) |
if (simple_lock_try(&pp->pr_slock) == 0) |
(*pr)("WARNING: pool %s is locked\n", pp->pr_wchan); |
(*pr)("WARNING: pool %s is locked\n", pp->pr_wchan); |
else |
else |
didlock = 1; |
simple_unlock(&pp->pr_slock); |
|
|
pool_print1(pp, modif, pr); |
pool_print1(pp, modif, pr); |
|
} |
|
|
if (didlock) |
static void |
simple_unlock(&pp->pr_slock); |
pool_print_pagelist(struct pool *pp, struct pool_pagelist *pl, |
|
void (*pr)(const char *, ...)) |
|
{ |
|
struct pool_item_header *ph; |
|
#ifdef DIAGNOSTIC |
|
struct pool_item *pi; |
|
#endif |
|
|
|
LIST_FOREACH(ph, pl, ph_pagelist) { |
|
(*pr)("\t\tpage %p, nmissing %d, time %lu,%lu\n", |
|
ph->ph_page, ph->ph_nmissing, |
|
(u_long)ph->ph_time.tv_sec, |
|
(u_long)ph->ph_time.tv_usec); |
|
#ifdef DIAGNOSTIC |
|
if (!(pp->pr_roflags & PR_NOTOUCH)) { |
|
LIST_FOREACH(pi, &ph->ph_itemlist, pi_list) { |
|
if (pi->pi_magic != PI_MAGIC) { |
|
(*pr)("\t\t\titem %p, magic 0x%x\n", |
|
pi, pi->pi_magic); |
|
} |
|
} |
|
} |
|
#endif |
|
} |
} |
} |
|
|
static void |
static void |
Line 1444 pool_print1(struct pool *pp, const char |
|
Line 1646 pool_print1(struct pool *pp, const char |
|
struct pool_item_header *ph; |
struct pool_item_header *ph; |
struct pool_cache *pc; |
struct pool_cache *pc; |
struct pool_cache_group *pcg; |
struct pool_cache_group *pcg; |
#ifdef DIAGNOSTIC |
|
struct pool_item *pi; |
|
#endif |
|
int i, print_log = 0, print_pagelist = 0, print_cache = 0; |
int i, print_log = 0, print_pagelist = 0, print_cache = 0; |
char c; |
char c; |
|
|
Line 1457 pool_print1(struct pool *pp, const char |
|
Line 1656 pool_print1(struct pool *pp, const char |
|
print_pagelist = 1; |
print_pagelist = 1; |
if (c == 'c') |
if (c == 'c') |
print_cache = 1; |
print_cache = 1; |
modif++; |
|
} |
} |
|
|
(*pr)("POOL %s: size %u, align %u, ioff %u, roflags 0x%08x\n", |
(*pr)("POOL %s: size %u, align %u, ioff %u, roflags 0x%08x\n", |
Line 1477 pool_print1(struct pool *pp, const char |
|
Line 1675 pool_print1(struct pool *pp, const char |
|
if (print_pagelist == 0) |
if (print_pagelist == 0) |
goto skip_pagelist; |
goto skip_pagelist; |
|
|
if ((ph = TAILQ_FIRST(&pp->pr_pagelist)) != NULL) |
if ((ph = LIST_FIRST(&pp->pr_emptypages)) != NULL) |
(*pr)("\n\tpage list:\n"); |
(*pr)("\n\tempty page list:\n"); |
for (; ph != NULL; ph = TAILQ_NEXT(ph, ph_pagelist)) { |
pool_print_pagelist(pp, &pp->pr_emptypages, pr); |
(*pr)("\t\tpage %p, nmissing %d, time %lu,%lu\n", |
if ((ph = LIST_FIRST(&pp->pr_fullpages)) != NULL) |
ph->ph_page, ph->ph_nmissing, |
(*pr)("\n\tfull page list:\n"); |
(u_long)ph->ph_time.tv_sec, |
pool_print_pagelist(pp, &pp->pr_fullpages, pr); |
(u_long)ph->ph_time.tv_usec); |
if ((ph = LIST_FIRST(&pp->pr_partpages)) != NULL) |
#ifdef DIAGNOSTIC |
(*pr)("\n\tpartial-page list:\n"); |
TAILQ_FOREACH(pi, &ph->ph_itemlist, pi_list) { |
pool_print_pagelist(pp, &pp->pr_partpages, pr); |
if (pi->pi_magic != PI_MAGIC) { |
|
(*pr)("\t\t\titem %p, magic 0x%x\n", |
|
pi, pi->pi_magic); |
|
} |
|
} |
|
#endif |
|
} |
|
if (pp->pr_curpage == NULL) |
if (pp->pr_curpage == NULL) |
(*pr)("\tno current page\n"); |
(*pr)("\tno current page\n"); |
else |
else |
(*pr)("\tcurpage %p\n", pp->pr_curpage->ph_page); |
(*pr)("\tcurpage %p\n", pp->pr_curpage->ph_page); |
|
|
skip_pagelist: |
skip_pagelist: |
|
|
if (print_log == 0) |
if (print_log == 0) |
goto skip_log; |
goto skip_log; |
|
|
Line 1510 pool_print1(struct pool *pp, const char |
|
Line 1701 pool_print1(struct pool *pp, const char |
|
pr_printlog(pp, NULL, pr); |
pr_printlog(pp, NULL, pr); |
|
|
skip_log: |
skip_log: |
|
|
if (print_cache == 0) |
if (print_cache == 0) |
goto skip_cache; |
goto skip_cache; |
|
|
TAILQ_FOREACH(pc, &pp->pr_cachelist, pc_poollist) { |
#define PR_GROUPLIST(pcg) \ |
(*pr)("\tcache %p: allocfrom %p freeto %p\n", pc, |
(*pr)("\t\tgroup %p: avail %d\n", pcg, pcg->pcg_avail); \ |
pc->pc_allocfrom, pc->pc_freeto); |
for (i = 0; i < PCG_NOBJECTS; i++) { \ |
|
if (pcg->pcg_objects[i].pcgo_pa != \ |
|
POOL_PADDR_INVALID) { \ |
|
(*pr)("\t\t\t%p, 0x%llx\n", \ |
|
pcg->pcg_objects[i].pcgo_va, \ |
|
(unsigned long long) \ |
|
pcg->pcg_objects[i].pcgo_pa); \ |
|
} else { \ |
|
(*pr)("\t\t\t%p\n", \ |
|
pcg->pcg_objects[i].pcgo_va); \ |
|
} \ |
|
} |
|
|
|
LIST_FOREACH(pc, &pp->pr_cachelist, pc_poollist) { |
|
(*pr)("\tcache %p\n", pc); |
(*pr)("\t hits %lu misses %lu ngroups %lu nitems %lu\n", |
(*pr)("\t hits %lu misses %lu ngroups %lu nitems %lu\n", |
pc->pc_hits, pc->pc_misses, pc->pc_ngroups, pc->pc_nitems); |
pc->pc_hits, pc->pc_misses, pc->pc_ngroups, pc->pc_nitems); |
TAILQ_FOREACH(pcg, &pc->pc_grouplist, pcg_list) { |
(*pr)("\t full groups:\n"); |
(*pr)("\t\tgroup %p: avail %d\n", pcg, pcg->pcg_avail); |
LIST_FOREACH(pcg, &pc->pc_fullgroups, pcg_list) { |
for (i = 0; i < PCG_NOBJECTS; i++) |
PR_GROUPLIST(pcg); |
(*pr)("\t\t\t%p\n", pcg->pcg_objects[i]); |
} |
|
(*pr)("\t partial groups:\n"); |
|
LIST_FOREACH(pcg, &pc->pc_partgroups, pcg_list) { |
|
PR_GROUPLIST(pcg); |
|
} |
|
(*pr)("\t empty groups:\n"); |
|
LIST_FOREACH(pcg, &pc->pc_emptygroups, pcg_list) { |
|
PR_GROUPLIST(pcg); |
} |
} |
} |
} |
|
#undef PR_GROUPLIST |
|
|
skip_cache: |
skip_cache: |
|
|
pr_enter_check(pp, pr); |
pr_enter_check(pp, pr); |
} |
} |
|
|
int |
static int |
pool_chk(struct pool *pp, const char *label) |
pool_chk_page(struct pool *pp, const char *label, struct pool_item_header *ph) |
{ |
{ |
struct pool_item_header *ph; |
struct pool_item *pi; |
int r = 0; |
caddr_t page; |
|
int n; |
|
|
simple_lock(&pp->pr_slock); |
page = (caddr_t)((u_long)ph & pp->pr_alloc->pa_pagemask); |
|
if (page != ph->ph_page && |
|
(pp->pr_roflags & PR_PHINPAGE) != 0) { |
|
if (label != NULL) |
|
printf("%s: ", label); |
|
printf("pool(%p:%s): page inconsistency: page %p;" |
|
" at page head addr %p (p %p)\n", pp, |
|
pp->pr_wchan, ph->ph_page, |
|
ph, page); |
|
return 1; |
|
} |
|
|
|
if ((pp->pr_roflags & PR_NOTOUCH) != 0) |
|
return 0; |
|
|
|
for (pi = LIST_FIRST(&ph->ph_itemlist), n = 0; |
|
pi != NULL; |
|
pi = LIST_NEXT(pi,pi_list), n++) { |
|
|
TAILQ_FOREACH(ph, &pp->pr_pagelist, ph_pagelist) { |
#ifdef DIAGNOSTIC |
struct pool_item *pi; |
if (pi->pi_magic != PI_MAGIC) { |
int n; |
|
caddr_t page; |
|
|
|
page = (caddr_t)((u_long)ph & pp->pr_alloc->pa_pagemask); |
|
if (page != ph->ph_page && |
|
(pp->pr_roflags & PR_PHINPAGE) != 0) { |
|
if (label != NULL) |
if (label != NULL) |
printf("%s: ", label); |
printf("%s: ", label); |
printf("pool(%p:%s): page inconsistency: page %p;" |
printf("pool(%s): free list modified: magic=%x;" |
" at page head addr %p (p %p)\n", pp, |
" page %p; item ordinal %d;" |
pp->pr_wchan, ph->ph_page, |
" addr %p (p %p)\n", |
ph, page); |
pp->pr_wchan, pi->pi_magic, ph->ph_page, |
r++; |
n, pi, page); |
goto out; |
panic("pool"); |
} |
} |
|
#endif |
|
page = |
|
(caddr_t)((u_long)pi & pp->pr_alloc->pa_pagemask); |
|
if (page == ph->ph_page) |
|
continue; |
|
|
for (pi = TAILQ_FIRST(&ph->ph_itemlist), n = 0; |
if (label != NULL) |
pi != NULL; |
printf("%s: ", label); |
pi = TAILQ_NEXT(pi,pi_list), n++) { |
printf("pool(%p:%s): page inconsistency: page %p;" |
|
" item ordinal %d; addr %p (p %p)\n", pp, |
|
pp->pr_wchan, ph->ph_page, |
|
n, pi, page); |
|
return 1; |
|
} |
|
return 0; |
|
} |
|
|
#ifdef DIAGNOSTIC |
|
if (pi->pi_magic != PI_MAGIC) { |
|
if (label != NULL) |
|
printf("%s: ", label); |
|
printf("pool(%s): free list modified: magic=%x;" |
|
" page %p; item ordinal %d;" |
|
" addr %p (p %p)\n", |
|
pp->pr_wchan, pi->pi_magic, ph->ph_page, |
|
n, pi, page); |
|
panic("pool"); |
|
} |
|
#endif |
|
page = |
|
(caddr_t)((u_long)pi & pp->pr_alloc->pa_pagemask); |
|
if (page == ph->ph_page) |
|
continue; |
|
|
|
if (label != NULL) |
int |
printf("%s: ", label); |
pool_chk(struct pool *pp, const char *label) |
printf("pool(%p:%s): page inconsistency: page %p;" |
{ |
" item ordinal %d; addr %p (p %p)\n", pp, |
struct pool_item_header *ph; |
pp->pr_wchan, ph->ph_page, |
int r = 0; |
n, pi, page); |
|
r++; |
simple_lock(&pp->pr_slock); |
|
LIST_FOREACH(ph, &pp->pr_emptypages, ph_pagelist) { |
|
r = pool_chk_page(pp, label, ph); |
|
if (r) { |
|
goto out; |
|
} |
|
} |
|
LIST_FOREACH(ph, &pp->pr_fullpages, ph_pagelist) { |
|
r = pool_chk_page(pp, label, ph); |
|
if (r) { |
|
goto out; |
|
} |
|
} |
|
LIST_FOREACH(ph, &pp->pr_partpages, ph_pagelist) { |
|
r = pool_chk_page(pp, label, ph); |
|
if (r) { |
goto out; |
goto out; |
} |
} |
} |
} |
|
|
out: |
out: |
simple_unlock(&pp->pr_slock); |
simple_unlock(&pp->pr_slock); |
return (r); |
return (r); |
Line 1608 pool_cache_init(struct pool_cache *pc, s |
|
Line 1843 pool_cache_init(struct pool_cache *pc, s |
|
void *arg) |
void *arg) |
{ |
{ |
|
|
TAILQ_INIT(&pc->pc_grouplist); |
LIST_INIT(&pc->pc_emptygroups); |
|
LIST_INIT(&pc->pc_fullgroups); |
|
LIST_INIT(&pc->pc_partgroups); |
simple_lock_init(&pc->pc_slock); |
simple_lock_init(&pc->pc_slock); |
|
|
pc->pc_allocfrom = NULL; |
|
pc->pc_freeto = NULL; |
|
pc->pc_pool = pp; |
pc->pc_pool = pp; |
|
|
pc->pc_ctor = ctor; |
pc->pc_ctor = ctor; |
Line 1627 pool_cache_init(struct pool_cache *pc, s |
|
Line 1862 pool_cache_init(struct pool_cache *pc, s |
|
pc->pc_nitems = 0; |
pc->pc_nitems = 0; |
|
|
simple_lock(&pp->pr_slock); |
simple_lock(&pp->pr_slock); |
TAILQ_INSERT_TAIL(&pp->pr_cachelist, pc, pc_poollist); |
LIST_INSERT_HEAD(&pp->pr_cachelist, pc, pc_poollist); |
simple_unlock(&pp->pr_slock); |
simple_unlock(&pp->pr_slock); |
} |
} |
|
|
Line 1646 pool_cache_destroy(struct pool_cache *pc |
|
Line 1881 pool_cache_destroy(struct pool_cache *pc |
|
|
|
/* ...and remove it from the pool's cache list. */ |
/* ...and remove it from the pool's cache list. */ |
simple_lock(&pp->pr_slock); |
simple_lock(&pp->pr_slock); |
TAILQ_REMOVE(&pp->pr_cachelist, pc, pc_poollist); |
LIST_REMOVE(pc, pc_poollist); |
simple_unlock(&pp->pr_slock); |
simple_unlock(&pp->pr_slock); |
} |
} |
|
|
static __inline void * |
static inline void * |
pcg_get(struct pool_cache_group *pcg) |
pcg_get(struct pool_cache_group *pcg, paddr_t *pap) |
{ |
{ |
void *object; |
void *object; |
u_int idx; |
u_int idx; |
Line 1660 pcg_get(struct pool_cache_group *pcg) |
|
Line 1895 pcg_get(struct pool_cache_group *pcg) |
|
KASSERT(pcg->pcg_avail != 0); |
KASSERT(pcg->pcg_avail != 0); |
idx = --pcg->pcg_avail; |
idx = --pcg->pcg_avail; |
|
|
KASSERT(pcg->pcg_objects[idx] != NULL); |
KASSERT(pcg->pcg_objects[idx].pcgo_va != NULL); |
object = pcg->pcg_objects[idx]; |
object = pcg->pcg_objects[idx].pcgo_va; |
pcg->pcg_objects[idx] = NULL; |
if (pap != NULL) |
|
*pap = pcg->pcg_objects[idx].pcgo_pa; |
|
pcg->pcg_objects[idx].pcgo_va = NULL; |
|
|
return (object); |
return (object); |
} |
} |
|
|
static __inline void |
static inline void |
pcg_put(struct pool_cache_group *pcg, void *object) |
pcg_put(struct pool_cache_group *pcg, void *object, paddr_t pa) |
{ |
{ |
u_int idx; |
u_int idx; |
|
|
KASSERT(pcg->pcg_avail < PCG_NOBJECTS); |
KASSERT(pcg->pcg_avail < PCG_NOBJECTS); |
idx = pcg->pcg_avail++; |
idx = pcg->pcg_avail++; |
|
|
KASSERT(pcg->pcg_objects[idx] == NULL); |
KASSERT(pcg->pcg_objects[idx].pcgo_va == NULL); |
pcg->pcg_objects[idx] = object; |
pcg->pcg_objects[idx].pcgo_va = object; |
|
pcg->pcg_objects[idx].pcgo_pa = pa; |
|
} |
|
|
|
static void |
|
pcg_grouplist_free(struct pool_cache_grouplist *pcgl) |
|
{ |
|
struct pool_cache_group *pcg; |
|
int s; |
|
|
|
s = splvm(); |
|
while ((pcg = LIST_FIRST(pcgl)) != NULL) { |
|
LIST_REMOVE(pcg, pcg_list); |
|
pool_put(&pcgpool, pcg); |
|
} |
|
splx(s); |
} |
} |
|
|
/* |
/* |
* pool_cache_get: |
* pool_cache_get{,_paddr}: |
* |
* |
* Get an object from a pool cache. |
* Get an object from a pool cache (optionally returning |
|
* the physical address of the object). |
*/ |
*/ |
void * |
void * |
pool_cache_get(struct pool_cache *pc, int flags) |
pool_cache_get_paddr(struct pool_cache *pc, int flags, paddr_t *pap) |
{ |
{ |
struct pool_cache_group *pcg; |
struct pool_cache_group *pcg; |
void *object; |
void *object; |
Line 1697 pool_cache_get(struct pool_cache *pc, in |
|
Line 1950 pool_cache_get(struct pool_cache *pc, in |
|
|
|
simple_lock(&pc->pc_slock); |
simple_lock(&pc->pc_slock); |
|
|
if ((pcg = pc->pc_allocfrom) == NULL) { |
pcg = LIST_FIRST(&pc->pc_partgroups); |
TAILQ_FOREACH(pcg, &pc->pc_grouplist, pcg_list) { |
if (pcg == NULL) { |
if (pcg->pcg_avail != 0) { |
pcg = LIST_FIRST(&pc->pc_fullgroups); |
pc->pc_allocfrom = pcg; |
if (pcg != NULL) { |
goto have_group; |
LIST_REMOVE(pcg, pcg_list); |
} |
LIST_INSERT_HEAD(&pc->pc_partgroups, pcg, pcg_list); |
} |
} |
|
} |
|
if (pcg == NULL) { |
|
|
/* |
/* |
* No groups with any available objects. Allocate |
* No groups with any available objects. Allocate |
Line 1720 pool_cache_get(struct pool_cache *pc, in |
|
Line 1975 pool_cache_get(struct pool_cache *pc, in |
|
return (NULL); |
return (NULL); |
} |
} |
} |
} |
|
if (object != NULL && pap != NULL) { |
|
#ifdef POOL_VTOPHYS |
|
*pap = POOL_VTOPHYS(object); |
|
#else |
|
*pap = POOL_PADDR_INVALID; |
|
#endif |
|
} |
return (object); |
return (object); |
} |
} |
|
|
have_group: |
|
pc->pc_hits++; |
pc->pc_hits++; |
pc->pc_nitems--; |
pc->pc_nitems--; |
object = pcg_get(pcg); |
object = pcg_get(pcg, pap); |
|
|
if (pcg->pcg_avail == 0) |
|
pc->pc_allocfrom = NULL; |
|
|
|
|
if (pcg->pcg_avail == 0) { |
|
LIST_REMOVE(pcg, pcg_list); |
|
LIST_INSERT_HEAD(&pc->pc_emptygroups, pcg, pcg_list); |
|
} |
simple_unlock(&pc->pc_slock); |
simple_unlock(&pc->pc_slock); |
|
|
return (object); |
return (object); |
} |
} |
|
|
/* |
/* |
* pool_cache_put: |
* pool_cache_put{,_paddr}: |
* |
* |
* Put an object back to the pool cache. |
* Put an object back to the pool cache (optionally caching the |
|
* physical address of the object). |
*/ |
*/ |
void |
void |
pool_cache_put(struct pool_cache *pc, void *object) |
pool_cache_put_paddr(struct pool_cache *pc, void *object, paddr_t pa) |
{ |
{ |
struct pool_cache_group *pcg; |
struct pool_cache_group *pcg; |
int s; |
int s; |
|
|
|
if (__predict_false((pc->pc_pool->pr_flags & PR_WANTED) != 0)) { |
|
goto destruct; |
|
} |
|
|
simple_lock(&pc->pc_slock); |
simple_lock(&pc->pc_slock); |
|
|
if ((pcg = pc->pc_freeto) == NULL) { |
pcg = LIST_FIRST(&pc->pc_partgroups); |
TAILQ_FOREACH(pcg, &pc->pc_grouplist, pcg_list) { |
if (pcg == NULL) { |
if (pcg->pcg_avail != PCG_NOBJECTS) { |
pcg = LIST_FIRST(&pc->pc_emptygroups); |
pc->pc_freeto = pcg; |
if (pcg != NULL) { |
goto have_group; |
LIST_REMOVE(pcg, pcg_list); |
} |
LIST_INSERT_HEAD(&pc->pc_partgroups, pcg, pcg_list); |
} |
} |
|
} |
|
if (pcg == NULL) { |
|
|
/* |
/* |
* No empty groups to free the object to. Attempt to |
* No empty groups to free the object to. Attempt to |
Line 1765 pool_cache_put(struct pool_cache *pc, vo |
|
Line 2034 pool_cache_put(struct pool_cache *pc, vo |
|
s = splvm(); |
s = splvm(); |
pcg = pool_get(&pcgpool, PR_NOWAIT); |
pcg = pool_get(&pcgpool, PR_NOWAIT); |
splx(s); |
splx(s); |
if (pcg != NULL) { |
if (pcg == NULL) { |
memset(pcg, 0, sizeof(*pcg)); |
destruct: |
simple_lock(&pc->pc_slock); |
|
pc->pc_ngroups++; |
|
TAILQ_INSERT_TAIL(&pc->pc_grouplist, pcg, pcg_list); |
|
if (pc->pc_freeto == NULL) |
|
pc->pc_freeto = pcg; |
|
goto have_group; |
|
} |
|
|
|
/* |
/* |
* Unable to allocate a cache group; destruct the object |
* Unable to allocate a cache group; destruct the object |
* and free it back to the pool. |
* and free it back to the pool. |
*/ |
*/ |
pool_cache_destruct_object(pc, object); |
pool_cache_destruct_object(pc, object); |
return; |
return; |
|
} |
|
memset(pcg, 0, sizeof(*pcg)); |
|
simple_lock(&pc->pc_slock); |
|
pc->pc_ngroups++; |
|
LIST_INSERT_HEAD(&pc->pc_partgroups, pcg, pcg_list); |
} |
} |
|
|
have_group: |
|
pc->pc_nitems++; |
pc->pc_nitems++; |
pcg_put(pcg, object); |
pcg_put(pcg, object, pa); |
|
|
if (pcg->pcg_avail == PCG_NOBJECTS) |
|
pc->pc_freeto = NULL; |
|
|
|
|
if (pcg->pcg_avail == PCG_NOBJECTS) { |
|
LIST_REMOVE(pcg, pcg_list); |
|
LIST_INSERT_HEAD(&pc->pc_fullgroups, pcg, pcg_list); |
|
} |
simple_unlock(&pc->pc_slock); |
simple_unlock(&pc->pc_slock); |
} |
} |
|
|
Line 1808 pool_cache_destruct_object(struct pool_c |
|
Line 2075 pool_cache_destruct_object(struct pool_c |
|
pool_put(pc->pc_pool, object); |
pool_put(pc->pc_pool, object); |
} |
} |
|
|
/* |
|
* pool_cache_do_invalidate: |
|
* |
|
* This internal function implements pool_cache_invalidate() and |
|
* pool_cache_reclaim(). |
|
*/ |
|
static void |
static void |
pool_cache_do_invalidate(struct pool_cache *pc, int free_groups, |
pool_do_cache_invalidate_grouplist(struct pool_cache_grouplist *pcgsl, |
void (*putit)(struct pool *, void *)) |
struct pool_cache *pc, struct pool_pagelist *pq, |
|
struct pool_cache_grouplist *pcgdl) |
{ |
{ |
struct pool_cache_group *pcg, *npcg; |
struct pool_cache_group *pcg, *npcg; |
void *object; |
void *object; |
int s; |
|
|
|
for (pcg = TAILQ_FIRST(&pc->pc_grouplist); pcg != NULL; |
for (pcg = LIST_FIRST(pcgsl); pcg != NULL; pcg = npcg) { |
pcg = npcg) { |
npcg = LIST_NEXT(pcg, pcg_list); |
npcg = TAILQ_NEXT(pcg, pcg_list); |
|
while (pcg->pcg_avail != 0) { |
while (pcg->pcg_avail != 0) { |
pc->pc_nitems--; |
pc->pc_nitems--; |
object = pcg_get(pcg); |
object = pcg_get(pcg, NULL); |
if (pcg->pcg_avail == 0 && pc->pc_allocfrom == pcg) |
|
pc->pc_allocfrom = NULL; |
|
if (pc->pc_dtor != NULL) |
if (pc->pc_dtor != NULL) |
(*pc->pc_dtor)(pc->pc_arg, object); |
(*pc->pc_dtor)(pc->pc_arg, object); |
(*putit)(pc->pc_pool, object); |
pool_do_put(pc->pc_pool, object, pq); |
} |
|
if (free_groups) { |
|
pc->pc_ngroups--; |
|
TAILQ_REMOVE(&pc->pc_grouplist, pcg, pcg_list); |
|
if (pc->pc_freeto == pcg) |
|
pc->pc_freeto = NULL; |
|
s = splvm(); |
|
pool_put(&pcgpool, pcg); |
|
splx(s); |
|
} |
} |
|
pc->pc_ngroups--; |
|
LIST_REMOVE(pcg, pcg_list); |
|
LIST_INSERT_HEAD(pcgdl, pcg, pcg_list); |
} |
} |
} |
} |
|
|
|
static void |
|
pool_do_cache_invalidate(struct pool_cache *pc, struct pool_pagelist *pq, |
|
struct pool_cache_grouplist *pcgl) |
|
{ |
|
|
|
LOCK_ASSERT(simple_lock_held(&pc->pc_slock)); |
|
LOCK_ASSERT(simple_lock_held(&pc->pc_pool->pr_slock)); |
|
|
|
pool_do_cache_invalidate_grouplist(&pc->pc_fullgroups, pc, pq, pcgl); |
|
pool_do_cache_invalidate_grouplist(&pc->pc_partgroups, pc, pq, pcgl); |
|
|
|
KASSERT(LIST_EMPTY(&pc->pc_partgroups)); |
|
KASSERT(LIST_EMPTY(&pc->pc_fullgroups)); |
|
KASSERT(pc->pc_nitems == 0); |
|
} |
|
|
/* |
/* |
* pool_cache_invalidate: |
* pool_cache_invalidate: |
* |
* |
Line 1855 pool_cache_do_invalidate(struct pool_cac |
|
Line 2123 pool_cache_do_invalidate(struct pool_cac |
|
void |
void |
pool_cache_invalidate(struct pool_cache *pc) |
pool_cache_invalidate(struct pool_cache *pc) |
{ |
{ |
|
struct pool_pagelist pq; |
|
struct pool_cache_grouplist pcgl; |
|
|
|
LIST_INIT(&pq); |
|
LIST_INIT(&pcgl); |
|
|
simple_lock(&pc->pc_slock); |
simple_lock(&pc->pc_slock); |
pool_cache_do_invalidate(pc, 0, pool_put); |
simple_lock(&pc->pc_pool->pr_slock); |
|
|
|
pool_do_cache_invalidate(pc, &pq, &pcgl); |
|
|
|
simple_unlock(&pc->pc_pool->pr_slock); |
simple_unlock(&pc->pc_slock); |
simple_unlock(&pc->pc_slock); |
|
|
|
pr_pagelist_free(pc->pc_pool, &pq); |
|
pcg_grouplist_free(&pcgl); |
} |
} |
|
|
/* |
/* |
Line 1867 pool_cache_invalidate(struct pool_cache |
|
Line 2147 pool_cache_invalidate(struct pool_cache |
|
* Reclaim a pool cache for pool_reclaim(). |
* Reclaim a pool cache for pool_reclaim(). |
*/ |
*/ |
static void |
static void |
pool_cache_reclaim(struct pool_cache *pc) |
pool_cache_reclaim(struct pool_cache *pc, struct pool_pagelist *pq, |
|
struct pool_cache_grouplist *pcgl) |
{ |
{ |
|
|
simple_lock(&pc->pc_slock); |
/* |
pool_cache_do_invalidate(pc, 1, pool_do_put); |
* We're locking in the wrong order (normally pool_cache -> pool, |
|
* but the pool is already locked when we get here), so we have |
|
* to use trylock. If we can't lock the pool_cache, it's not really |
|
* a big deal here. |
|
*/ |
|
if (simple_lock_try(&pc->pc_slock) == 0) |
|
return; |
|
|
|
pool_do_cache_invalidate(pc, pq, pcgl); |
|
|
simple_unlock(&pc->pc_slock); |
simple_unlock(&pc->pc_slock); |
} |
} |
|
|
Line 1934 pool_allocator_alloc(struct pool *org, i |
|
Line 2224 pool_allocator_alloc(struct pool *org, i |
|
int s, freed; |
int s, freed; |
void *res; |
void *res; |
|
|
|
LOCK_ASSERT(!simple_lock_held(&org->pr_slock)); |
|
|
do { |
do { |
if ((res = (*pa->pa_alloc)(org, flags)) != NULL) |
if ((res = (*pa->pa_alloc)(org, flags)) != NULL) |
return (res); |
return (res); |
Line 1953 pool_allocator_alloc(struct pool *org, i |
|
Line 2245 pool_allocator_alloc(struct pool *org, i |
|
} |
} |
|
|
/* |
/* |
* Drain all pools, except "org", that use this |
* Drain all pools, that use this allocator. |
* allocator. We do this to reclaim VA space. |
* We do this to reclaim VA space. |
* pa_alloc is responsible for waiting for |
* pa_alloc is responsible for waiting for |
* physical memory. |
* physical memory. |
* |
* |
Line 1975 pool_allocator_alloc(struct pool *org, i |
|
Line 2267 pool_allocator_alloc(struct pool *org, i |
|
do { |
do { |
TAILQ_REMOVE(&pa->pa_list, pp, pr_alloc_list); |
TAILQ_REMOVE(&pa->pa_list, pp, pr_alloc_list); |
TAILQ_INSERT_TAIL(&pa->pa_list, pp, pr_alloc_list); |
TAILQ_INSERT_TAIL(&pa->pa_list, pp, pr_alloc_list); |
if (pp == org) |
simple_unlock(&pa->pa_slock); |
continue; |
|
simple_unlock(&pa->pa_list); |
|
freed = pool_reclaim(pp); |
freed = pool_reclaim(pp); |
simple_lock(&pa->pa_list); |
simple_lock(&pa->pa_slock); |
} while ((pp = TAILQ_FIRST(&pa->pa_list)) != start && |
} while ((pp = TAILQ_FIRST(&pa->pa_list)) != start && |
freed == 0); |
freed == 0); |
|
|
Line 2004 pool_allocator_free(struct pool *pp, voi |
|
Line 2294 pool_allocator_free(struct pool *pp, voi |
|
struct pool_allocator *pa = pp->pr_alloc; |
struct pool_allocator *pa = pp->pr_alloc; |
int s; |
int s; |
|
|
|
LOCK_ASSERT(!simple_lock_held(&pp->pr_slock)); |
|
|
(*pa->pa_free)(pp, v); |
(*pa->pa_free)(pp, v); |
|
|
s = splvm(); |
s = splvm(); |
Line 2032 pool_page_alloc(struct pool *pp, int fla |
|
Line 2324 pool_page_alloc(struct pool *pp, int fla |
|
{ |
{ |
boolean_t waitok = (flags & PR_WAITOK) ? TRUE : FALSE; |
boolean_t waitok = (flags & PR_WAITOK) ? TRUE : FALSE; |
|
|
return ((void *) uvm_km_alloc_poolpage(waitok)); |
return ((void *) uvm_km_alloc_poolpage_cache(kmem_map, waitok)); |
} |
} |
|
|
void |
void |
pool_page_free(struct pool *pp, void *v) |
pool_page_free(struct pool *pp, void *v) |
{ |
{ |
|
|
uvm_km_free_poolpage((vaddr_t) v); |
uvm_km_free_poolpage_cache(kmem_map, (vaddr_t) v); |
|
} |
|
|
|
static void * |
|
pool_page_alloc_meta(struct pool *pp, int flags) |
|
{ |
|
boolean_t waitok = (flags & PR_WAITOK) ? TRUE : FALSE; |
|
|
|
return ((void *) uvm_km_alloc_poolpage(kmem_map, waitok)); |
|
} |
|
|
|
static void |
|
pool_page_free_meta(struct pool *pp, void *v) |
|
{ |
|
|
|
uvm_km_free_poolpage(kmem_map, (vaddr_t) v); |
} |
} |
|
|
#ifdef POOL_SUBPAGE |
#ifdef POOL_SUBPAGE |
Line 2047 pool_page_free(struct pool *pp, void *v) |
|
Line 2354 pool_page_free(struct pool *pp, void *v) |
|
void * |
void * |
pool_subpage_alloc(struct pool *pp, int flags) |
pool_subpage_alloc(struct pool *pp, int flags) |
{ |
{ |
|
void *v; |
return (pool_get(&psppool, flags)); |
int s; |
|
s = splvm(); |
|
v = pool_get(&psppool, flags); |
|
splx(s); |
|
return v; |
} |
} |
|
|
void |
void |
pool_subpage_free(struct pool *pp, void *v) |
pool_subpage_free(struct pool *pp, void *v) |
{ |
{ |
|
int s; |
|
s = splvm(); |
pool_put(&psppool, v); |
pool_put(&psppool, v); |
|
splx(s); |
} |
} |
|
|
/* We don't provide a real nointr allocator. Maybe later. */ |
/* We don't provide a real nointr allocator. Maybe later. */ |
Line 2078 pool_page_alloc_nointr(struct pool *pp, |
|
Line 2391 pool_page_alloc_nointr(struct pool *pp, |
|
{ |
{ |
boolean_t waitok = (flags & PR_WAITOK) ? TRUE : FALSE; |
boolean_t waitok = (flags & PR_WAITOK) ? TRUE : FALSE; |
|
|
return ((void *) uvm_km_alloc_poolpage1(kernel_map, |
return ((void *) uvm_km_alloc_poolpage_cache(kernel_map, waitok)); |
uvm.kernel_object, waitok)); |
|
} |
} |
|
|
void |
void |
pool_page_free_nointr(struct pool *pp, void *v) |
pool_page_free_nointr(struct pool *pp, void *v) |
{ |
{ |
|
|
uvm_km_free_poolpage1(kernel_map, (vaddr_t) v); |
uvm_km_free_poolpage_cache(kernel_map, (vaddr_t) v); |
} |
} |
#endif /* POOL_SUBPAGE */ |
#endif /* POOL_SUBPAGE */ |