version 1.28, 1999/07/27 21:31:17 |
version 1.50.2.10, 2002/08/27 23:47:30 |
|
|
/* $NetBSD$ */ |
/* $NetBSD$ */ |
|
|
/*- |
/*- |
* Copyright (c) 1997, 1999 The NetBSD Foundation, Inc. |
* Copyright (c) 1997, 1999, 2000 The NetBSD Foundation, Inc. |
* All rights reserved. |
* All rights reserved. |
* |
* |
* This code is derived from software contributed to The NetBSD Foundation |
* This code is derived from software contributed to The NetBSD Foundation |
|
|
* POSSIBILITY OF SUCH DAMAGE. |
* POSSIBILITY OF SUCH DAMAGE. |
*/ |
*/ |
|
|
|
#include <sys/cdefs.h> |
|
__KERNEL_RCSID(0, "$NetBSD$"); |
|
|
#include "opt_pool.h" |
#include "opt_pool.h" |
#include "opt_poollog.h" |
#include "opt_poollog.h" |
#include "opt_lockdebug.h" |
#include "opt_lockdebug.h" |
|
|
#include <sys/pool.h> |
#include <sys/pool.h> |
#include <sys/syslog.h> |
#include <sys/syslog.h> |
|
|
#include <vm/vm.h> |
|
#include <vm/vm_kern.h> |
|
|
|
#include <uvm/uvm.h> |
#include <uvm/uvm.h> |
|
|
/* |
/* |
Line 73 TAILQ_HEAD(,pool) pool_head = TAILQ_HEAD |
|
Line 73 TAILQ_HEAD(,pool) pool_head = TAILQ_HEAD |
|
/* Private pool for page header structures */ |
/* Private pool for page header structures */ |
static struct pool phpool; |
static struct pool phpool; |
|
|
|
#ifdef POOL_SUBPAGE |
|
/* Pool of subpages for use by normal pools. */ |
|
static struct pool psppool; |
|
#endif |
|
|
/* # of seconds to retain page after last use */ |
/* # of seconds to retain page after last use */ |
int pool_inactive_time = 10; |
int pool_inactive_time = 10; |
|
|
Line 89 struct pool_item_header { |
|
Line 94 struct pool_item_header { |
|
TAILQ_HEAD(,pool_item) ph_itemlist; /* chunk list for this page */ |
TAILQ_HEAD(,pool_item) ph_itemlist; /* chunk list for this page */ |
LIST_ENTRY(pool_item_header) |
LIST_ENTRY(pool_item_header) |
ph_hashlist; /* Off-page page headers */ |
ph_hashlist; /* Off-page page headers */ |
int ph_nmissing; /* # of chunks in use */ |
unsigned int ph_nmissing; /* # of chunks in use */ |
caddr_t ph_page; /* this page's address */ |
caddr_t ph_page; /* this page's address */ |
struct timeval ph_time; /* last referenced */ |
struct timeval ph_time; /* last referenced */ |
}; |
}; |
|
TAILQ_HEAD(pool_pagelist,pool_item_header); |
|
|
struct pool_item { |
struct pool_item { |
#ifdef DIAGNOSTIC |
#ifdef DIAGNOSTIC |
int pi_magic; |
int pi_magic; |
#define PI_MAGIC 0xdeadbeef |
|
#endif |
#endif |
|
#define PI_MAGIC 0xdeadbeef |
/* Other entries use only this list entry */ |
/* Other entries use only this list entry */ |
TAILQ_ENTRY(pool_item) pi_list; |
TAILQ_ENTRY(pool_item) pi_list; |
}; |
}; |
|
|
|
|
#define PR_HASH_INDEX(pp,addr) \ |
#define PR_HASH_INDEX(pp,addr) \ |
(((u_long)(addr) >> (pp)->pr_pageshift) & (PR_HASHTABSIZE - 1)) |
(((u_long)(addr) >> (pp)->pr_alloc->pa_pageshift) & \ |
|
(PR_HASHTABSIZE - 1)) |
|
|
|
#define POOL_NEEDS_CATCHUP(pp) \ |
|
((pp)->pr_nitems < (pp)->pr_minitems) |
|
|
|
/* |
|
* Pool cache management. |
|
* |
|
* Pool caches provide a way for constructed objects to be cached by the |
|
* pool subsystem. This can lead to performance improvements by avoiding |
|
* needless object construction/destruction; it is deferred until absolutely |
|
* necessary. |
|
* |
|
* Caches are grouped into cache groups. Each cache group references |
|
* up to 16 constructed objects. When a cache allocates an object |
|
* from the pool, it calls the object's constructor and places it into |
|
* a cache group. When a cache group frees an object back to the pool, |
|
* it first calls the object's destructor. This allows the object to |
|
* persist in constructed form while freed to the cache. |
|
* |
|
* Multiple caches may exist for each pool. This allows a single |
|
* object type to have multiple constructed forms. The pool references |
|
* each cache, so that when a pool is drained by the pagedaemon, it can |
|
* drain each individual cache as well. Each time a cache is drained, |
|
* the most idle cache group is freed to the pool in its entirety. |
|
* |
|
* Pool caches are layed on top of pools. By layering them, we can avoid |
|
* the complexity of cache management for pools which would not benefit |
|
* from it. |
|
*/ |
|
|
|
/* The cache group pool. */ |
|
static struct pool pcgpool; |
|
|
|
static void pool_cache_reclaim(struct pool_cache *); |
|
|
|
static int pool_catchup(struct pool *); |
|
static void pool_prime_page(struct pool *, caddr_t, |
|
struct pool_item_header *); |
|
|
static struct pool_item_header |
void *pool_allocator_alloc(struct pool *, int); |
*pr_find_pagehead __P((struct pool *, caddr_t)); |
void pool_allocator_free(struct pool *, void *); |
static void pr_rmpage __P((struct pool *, struct pool_item_header *)); |
|
static int pool_catchup __P((struct pool *)); |
|
static void pool_prime_page __P((struct pool *, caddr_t)); |
|
static void *pool_page_alloc __P((unsigned long, int, int)); |
|
static void pool_page_free __P((void *, unsigned long, int)); |
|
|
|
static void pool_print1 __P((struct pool *, const char *, |
static void pool_print1(struct pool *, const char *, |
void (*)(const char *, ...))); |
void (*)(const char *, ...)); |
|
|
/* |
/* |
* Pool log entry. An array of these is allocated in pool_create(). |
* Pool log entry. An array of these is allocated in pool_init(). |
*/ |
*/ |
struct pool_log { |
struct pool_log { |
const char *pl_file; |
const char *pl_file; |
Line 139 struct pool_log { |
|
Line 176 struct pool_log { |
|
|
|
int pool_logsize = POOL_LOGSIZE; |
int pool_logsize = POOL_LOGSIZE; |
|
|
#ifdef DIAGNOSTIC |
#ifdef POOL_DIAGNOSTIC |
static void pr_log __P((struct pool *, void *, int, const char *, long)); |
static __inline void |
static void pr_printlog __P((struct pool *, struct pool_item *, |
pr_log(struct pool *pp, void *v, int action, const char *file, long line) |
void (*)(const char *, ...))); |
|
static void pr_enter __P((struct pool *, const char *, long)); |
|
static void pr_leave __P((struct pool *)); |
|
static void pr_enter_check __P((struct pool *, |
|
void (*)(const char *, ...))); |
|
|
|
static __inline__ void |
|
pr_log(pp, v, action, file, line) |
|
struct pool *pp; |
|
void *v; |
|
int action; |
|
const char *file; |
|
long line; |
|
{ |
{ |
int n = pp->pr_curlogentry; |
int n = pp->pr_curlogentry; |
struct pool_log *pl; |
struct pool_log *pl; |
Line 177 pr_log(pp, v, action, file, line) |
|
Line 201 pr_log(pp, v, action, file, line) |
|
} |
} |
|
|
static void |
static void |
pr_printlog(pp, pi, pr) |
pr_printlog(struct pool *pp, struct pool_item *pi, |
struct pool *pp; |
void (*pr)(const char *, ...)) |
struct pool_item *pi; |
|
void (*pr) __P((const char *, ...)); |
|
{ |
{ |
int i = pp->pr_logsize; |
int i = pp->pr_logsize; |
int n = pp->pr_curlogentry; |
int n = pp->pr_curlogentry; |
Line 208 pr_printlog(pp, pi, pr) |
|
Line 230 pr_printlog(pp, pi, pr) |
|
} |
} |
} |
} |
|
|
static __inline__ void |
static __inline void |
pr_enter(pp, file, line) |
pr_enter(struct pool *pp, const char *file, long line) |
struct pool *pp; |
|
const char *file; |
|
long line; |
|
{ |
{ |
|
|
if (pp->pr_entered_file != NULL) { |
if (__predict_false(pp->pr_entered_file != NULL)) { |
printf("pool %s: reentrancy at file %s line %ld\n", |
printf("pool %s: reentrancy at file %s line %ld\n", |
pp->pr_wchan, file, line); |
pp->pr_wchan, file, line); |
printf(" previous entry at file %s line %ld\n", |
printf(" previous entry at file %s line %ld\n", |
Line 227 pr_enter(pp, file, line) |
|
Line 246 pr_enter(pp, file, line) |
|
pp->pr_entered_line = line; |
pp->pr_entered_line = line; |
} |
} |
|
|
static __inline__ void |
static __inline void |
pr_leave(pp) |
pr_leave(struct pool *pp) |
struct pool *pp; |
|
{ |
{ |
|
|
if (pp->pr_entered_file == NULL) { |
if (__predict_false(pp->pr_entered_file == NULL)) { |
printf("pool %s not entered?\n", pp->pr_wchan); |
printf("pool %s not entered?\n", pp->pr_wchan); |
panic("pr_leave"); |
panic("pr_leave"); |
} |
} |
|
|
pp->pr_entered_line = 0; |
pp->pr_entered_line = 0; |
} |
} |
|
|
static __inline__ void |
static __inline void |
pr_enter_check(pp, pr) |
pr_enter_check(struct pool *pp, void (*pr)(const char *, ...)) |
struct pool *pp; |
|
void (*pr) __P((const char *, ...)); |
|
{ |
{ |
|
|
if (pp->pr_entered_file != NULL) |
if (pp->pr_entered_file != NULL) |
Line 257 pr_enter_check(pp, pr) |
|
Line 273 pr_enter_check(pp, pr) |
|
#define pr_enter(pp, file, line) |
#define pr_enter(pp, file, line) |
#define pr_leave(pp) |
#define pr_leave(pp) |
#define pr_enter_check(pp, pr) |
#define pr_enter_check(pp, pr) |
#endif /* DIAGNOSTIC */ |
#endif /* POOL_DIAGNOSTIC */ |
|
|
/* |
/* |
* Return the pool page header based on page address. |
* Return the pool page header based on page address. |
*/ |
*/ |
static __inline__ struct pool_item_header * |
static __inline struct pool_item_header * |
pr_find_pagehead(pp, page) |
pr_find_pagehead(struct pool *pp, caddr_t page) |
struct pool *pp; |
|
caddr_t page; |
|
{ |
{ |
struct pool_item_header *ph; |
struct pool_item_header *ph; |
|
|
Line 284 pr_find_pagehead(pp, page) |
|
Line 298 pr_find_pagehead(pp, page) |
|
/* |
/* |
* Remove a page from the pool. |
* Remove a page from the pool. |
*/ |
*/ |
static __inline__ void |
static __inline void |
pr_rmpage(pp, ph) |
pr_rmpage(struct pool *pp, struct pool_item_header *ph, |
struct pool *pp; |
struct pool_pagelist *pq) |
struct pool_item_header *ph; |
|
{ |
{ |
|
int s; |
|
|
/* |
/* |
* If the page was idle, decrement the idle page count. |
* If the page was idle, decrement the idle page count. |
Line 306 pr_rmpage(pp, ph) |
|
Line 320 pr_rmpage(pp, ph) |
|
pp->pr_nitems -= pp->pr_itemsperpage; |
pp->pr_nitems -= pp->pr_itemsperpage; |
|
|
/* |
/* |
* Unlink a page from the pool and release it. |
* Unlink a page from the pool and release it (or queue it for release). |
*/ |
*/ |
TAILQ_REMOVE(&pp->pr_pagelist, ph, ph_pagelist); |
TAILQ_REMOVE(&pp->pr_pagelist, ph, ph_pagelist); |
(*pp->pr_free)(ph->ph_page, pp->pr_pagesz, pp->pr_mtype); |
if (pq) { |
|
TAILQ_INSERT_HEAD(pq, ph, ph_pagelist); |
|
} else { |
|
pool_allocator_free(pp, ph->ph_page); |
|
if ((pp->pr_roflags & PR_PHINPAGE) == 0) { |
|
LIST_REMOVE(ph, ph_hashlist); |
|
s = splhigh(); |
|
pool_put(&phpool, ph); |
|
splx(s); |
|
} |
|
} |
pp->pr_npages--; |
pp->pr_npages--; |
pp->pr_npagefree++; |
pp->pr_npagefree++; |
|
|
if ((pp->pr_roflags & PR_PHINPAGE) == 0) { |
|
int s; |
|
LIST_REMOVE(ph, ph_hashlist); |
|
s = splhigh(); |
|
pool_put(&phpool, ph); |
|
splx(s); |
|
} |
|
|
|
if (pp->pr_curpage == ph) { |
if (pp->pr_curpage == ph) { |
/* |
/* |
* Find a new non-empty page header, if any. |
* Find a new non-empty page header, if any. |
* Start search from the page head, to increase the |
* Start search from the page head, to increase the |
* chance for "high water" pages to be freed. |
* chance for "high water" pages to be freed. |
*/ |
*/ |
for (ph = TAILQ_FIRST(&pp->pr_pagelist); ph != NULL; |
TAILQ_FOREACH(ph, &pp->pr_pagelist, ph_pagelist) |
ph = TAILQ_NEXT(ph, ph_pagelist)) |
|
if (TAILQ_FIRST(&ph->ph_itemlist) != NULL) |
if (TAILQ_FIRST(&ph->ph_itemlist) != NULL) |
break; |
break; |
|
|
Line 337 pr_rmpage(pp, ph) |
|
Line 352 pr_rmpage(pp, ph) |
|
} |
} |
|
|
/* |
/* |
* Allocate and initialize a pool. |
|
*/ |
|
struct pool * |
|
pool_create(size, align, ioff, nitems, wchan, pagesz, alloc, release, mtype) |
|
size_t size; |
|
u_int align; |
|
u_int ioff; |
|
int nitems; |
|
const char *wchan; |
|
size_t pagesz; |
|
void *(*alloc) __P((unsigned long, int, int)); |
|
void (*release) __P((void *, unsigned long, int)); |
|
int mtype; |
|
{ |
|
struct pool *pp; |
|
int flags; |
|
|
|
pp = (struct pool *)malloc(sizeof(*pp), M_POOL, M_NOWAIT); |
|
if (pp == NULL) |
|
return (NULL); |
|
|
|
flags = PR_FREEHEADER; |
|
pool_init(pp, size, align, ioff, flags, wchan, pagesz, |
|
alloc, release, mtype); |
|
|
|
if (nitems != 0) { |
|
if (pool_prime(pp, nitems, NULL) != 0) { |
|
pool_destroy(pp); |
|
return (NULL); |
|
} |
|
} |
|
|
|
return (pp); |
|
} |
|
|
|
/* |
|
* Initialize the given pool resource structure. |
* Initialize the given pool resource structure. |
* |
* |
* We export this routine to allow other kernel parts to declare |
* We export this routine to allow other kernel parts to declare |
* static pools that must be initialized before malloc() is available. |
* static pools that must be initialized before malloc() is available. |
*/ |
*/ |
void |
void |
pool_init(pp, size, align, ioff, flags, wchan, pagesz, alloc, release, mtype) |
pool_init(struct pool *pp, size_t size, u_int align, u_int ioff, int flags, |
struct pool *pp; |
const char *wchan, struct pool_allocator *palloc) |
size_t size; |
|
u_int align; |
|
u_int ioff; |
|
int flags; |
|
const char *wchan; |
|
size_t pagesz; |
|
void *(*alloc) __P((unsigned long, int, int)); |
|
void (*release) __P((void *, unsigned long, int)); |
|
int mtype; |
|
{ |
{ |
int off, slack, i; |
int off, slack, i; |
|
|
Line 401 pool_init(pp, size, align, ioff, flags, |
|
Line 371 pool_init(pp, size, align, ioff, flags, |
|
flags |= PR_LOGGING; |
flags |= PR_LOGGING; |
#endif |
#endif |
|
|
|
#ifdef POOL_SUBPAGE |
/* |
/* |
* Check arguments and construct default values. |
* XXX We don't provide a real `nointr' back-end |
|
* yet; all sub-pages come from a kmem back-end. |
|
* maybe some day... |
*/ |
*/ |
if (!powerof2(pagesz) || pagesz > PAGE_SIZE) |
if (palloc == NULL) { |
panic("pool_init: page size invalid (%lx)\n", (u_long)pagesz); |
extern struct pool_allocator pool_allocator_kmem_subpage; |
|
palloc = &pool_allocator_kmem_subpage; |
|
} |
|
/* |
|
* We'll assume any user-specified back-end allocator |
|
* will deal with sub-pages, or simply don't care. |
|
*/ |
|
#else |
|
if (palloc == NULL) |
|
palloc = &pool_allocator_kmem; |
|
#endif /* POOL_SUBPAGE */ |
|
if ((palloc->pa_flags & PA_INITIALIZED) == 0) { |
|
if (palloc->pa_pagesz == 0) { |
|
#ifdef POOL_SUBPAGE |
|
if (palloc == &pool_allocator_kmem) |
|
palloc->pa_pagesz = PAGE_SIZE; |
|
else |
|
palloc->pa_pagesz = POOL_SUBPAGE; |
|
#else |
|
palloc->pa_pagesz = PAGE_SIZE; |
|
#endif /* POOL_SUBPAGE */ |
|
} |
|
|
if (alloc == NULL && release == NULL) { |
TAILQ_INIT(&palloc->pa_list); |
alloc = pool_page_alloc; |
|
release = pool_page_free; |
simple_lock_init(&palloc->pa_slock); |
pagesz = PAGE_SIZE; /* Rounds to PAGE_SIZE anyhow. */ |
palloc->pa_pagemask = ~(palloc->pa_pagesz - 1); |
} else if ((alloc != NULL && release != NULL) == 0) { |
palloc->pa_pageshift = ffs(palloc->pa_pagesz) - 1; |
/* If you specifiy one, must specify both. */ |
palloc->pa_flags |= PA_INITIALIZED; |
panic("pool_init: must specify alloc and release together"); |
} |
} |
|
|
|
if (pagesz == 0) |
|
pagesz = PAGE_SIZE; |
|
|
|
if (align == 0) |
if (align == 0) |
align = ALIGN(1); |
align = ALIGN(1); |
Line 425 pool_init(pp, size, align, ioff, flags, |
|
Line 415 pool_init(pp, size, align, ioff, flags, |
|
if (size < sizeof(struct pool_item)) |
if (size < sizeof(struct pool_item)) |
size = sizeof(struct pool_item); |
size = sizeof(struct pool_item); |
|
|
|
size = roundup(size, align); |
|
#ifdef DIAGNOSTIC |
|
if (size > palloc->pa_pagesz) |
|
panic("pool_init: pool item size (%lu) too large", |
|
(u_long)size); |
|
#endif |
|
|
/* |
/* |
* Initialize the pool structure. |
* Initialize the pool structure. |
*/ |
*/ |
TAILQ_INIT(&pp->pr_pagelist); |
TAILQ_INIT(&pp->pr_pagelist); |
|
TAILQ_INIT(&pp->pr_cachelist); |
pp->pr_curpage = NULL; |
pp->pr_curpage = NULL; |
pp->pr_npages = 0; |
pp->pr_npages = 0; |
pp->pr_minitems = 0; |
pp->pr_minitems = 0; |
Line 436 pool_init(pp, size, align, ioff, flags, |
|
Line 434 pool_init(pp, size, align, ioff, flags, |
|
pp->pr_maxpages = UINT_MAX; |
pp->pr_maxpages = UINT_MAX; |
pp->pr_roflags = flags; |
pp->pr_roflags = flags; |
pp->pr_flags = 0; |
pp->pr_flags = 0; |
pp->pr_size = ALIGN(size); |
pp->pr_size = size; |
pp->pr_align = align; |
pp->pr_align = align; |
pp->pr_wchan = wchan; |
pp->pr_wchan = wchan; |
pp->pr_mtype = mtype; |
pp->pr_alloc = palloc; |
pp->pr_alloc = alloc; |
|
pp->pr_free = release; |
|
pp->pr_pagesz = pagesz; |
|
pp->pr_pagemask = ~(pagesz - 1); |
|
pp->pr_pageshift = ffs(pagesz) - 1; |
|
pp->pr_nitems = 0; |
pp->pr_nitems = 0; |
pp->pr_nout = 0; |
pp->pr_nout = 0; |
pp->pr_hardlimit = UINT_MAX; |
pp->pr_hardlimit = UINT_MAX; |
pp->pr_hardlimit_warning = NULL; |
pp->pr_hardlimit_warning = NULL; |
pp->pr_hardlimit_ratecap = 0; |
pp->pr_hardlimit_ratecap.tv_sec = 0; |
memset(&pp->pr_hardlimit_warning_last, 0, |
pp->pr_hardlimit_ratecap.tv_usec = 0; |
sizeof(pp->pr_hardlimit_warning_last)); |
pp->pr_hardlimit_warning_last.tv_sec = 0; |
|
pp->pr_hardlimit_warning_last.tv_usec = 0; |
|
pp->pr_drain_hook = NULL; |
|
pp->pr_drain_hook_arg = NULL; |
|
|
/* |
/* |
* Decide whether to put the page header off page to avoid |
* Decide whether to put the page header off page to avoid |
Line 460 pool_init(pp, size, align, ioff, flags, |
|
Line 456 pool_init(pp, size, align, ioff, flags, |
|
* with its header based on the page address. |
* with its header based on the page address. |
* We use 1/16 of the page size as the threshold (XXX: tune) |
* We use 1/16 of the page size as the threshold (XXX: tune) |
*/ |
*/ |
if (pp->pr_size < pagesz/16) { |
if (pp->pr_size < palloc->pa_pagesz/16) { |
/* Use the end of the page for the page header */ |
/* Use the end of the page for the page header */ |
pp->pr_roflags |= PR_PHINPAGE; |
pp->pr_roflags |= PR_PHINPAGE; |
pp->pr_phoffset = off = |
pp->pr_phoffset = off = palloc->pa_pagesz - |
pagesz - ALIGN(sizeof(struct pool_item_header)); |
ALIGN(sizeof(struct pool_item_header)); |
} else { |
} else { |
/* The page header will be taken from our page header pool */ |
/* The page header will be taken from our page header pool */ |
pp->pr_phoffset = 0; |
pp->pr_phoffset = 0; |
off = pagesz; |
off = palloc->pa_pagesz; |
for (i = 0; i < PR_HASHTABSIZE; i++) { |
for (i = 0; i < PR_HASHTABSIZE; i++) { |
LIST_INIT(&pp->pr_hashtab[i]); |
LIST_INIT(&pp->pr_hashtab[i]); |
} |
} |
Line 483 pool_init(pp, size, align, ioff, flags, |
|
Line 479 pool_init(pp, size, align, ioff, flags, |
|
*/ |
*/ |
pp->pr_itemoffset = ioff = ioff % align; |
pp->pr_itemoffset = ioff = ioff % align; |
pp->pr_itemsperpage = (off - ((align - ioff) % align)) / pp->pr_size; |
pp->pr_itemsperpage = (off - ((align - ioff) % align)) / pp->pr_size; |
|
KASSERT(pp->pr_itemsperpage != 0); |
|
|
/* |
/* |
* Use the slack between the chunks and the page header |
* Use the slack between the chunks and the page header |
Line 500 pool_init(pp, size, align, ioff, flags, |
|
Line 497 pool_init(pp, size, align, ioff, flags, |
|
pp->pr_hiwat = 0; |
pp->pr_hiwat = 0; |
pp->pr_nidle = 0; |
pp->pr_nidle = 0; |
|
|
|
#ifdef POOL_DIAGNOSTIC |
if (flags & PR_LOGGING) { |
if (flags & PR_LOGGING) { |
if (kmem_map == NULL || |
if (kmem_map == NULL || |
(pp->pr_log = malloc(pool_logsize * sizeof(struct pool_log), |
(pp->pr_log = malloc(pool_logsize * sizeof(struct pool_log), |
Line 508 pool_init(pp, size, align, ioff, flags, |
|
Line 506 pool_init(pp, size, align, ioff, flags, |
|
pp->pr_curlogentry = 0; |
pp->pr_curlogentry = 0; |
pp->pr_logsize = pool_logsize; |
pp->pr_logsize = pool_logsize; |
} |
} |
|
#endif |
|
|
pp->pr_entered_file = NULL; |
pp->pr_entered_file = NULL; |
pp->pr_entered_line = 0; |
pp->pr_entered_line = 0; |
Line 515 pool_init(pp, size, align, ioff, flags, |
|
Line 514 pool_init(pp, size, align, ioff, flags, |
|
simple_lock_init(&pp->pr_slock); |
simple_lock_init(&pp->pr_slock); |
|
|
/* |
/* |
* Initialize private page header pool if we haven't done so yet. |
* Initialize private page header pool and cache magazine pool if we |
|
* haven't done so yet. |
* XXX LOCKING. |
* XXX LOCKING. |
*/ |
*/ |
if (phpool.pr_size == 0) { |
if (phpool.pr_size == 0) { |
|
#ifdef POOL_SUBPAGE |
|
pool_init(&phpool, sizeof(struct pool_item_header), 0, 0, 0, |
|
"phpool", &pool_allocator_kmem); |
|
pool_init(&psppool, POOL_SUBPAGE, POOL_SUBPAGE, 0, |
|
PR_RECURSIVE, "psppool", &pool_allocator_kmem); |
|
#else |
pool_init(&phpool, sizeof(struct pool_item_header), 0, 0, |
pool_init(&phpool, sizeof(struct pool_item_header), 0, 0, |
0, "phpool", 0, 0, 0, 0); |
0, "phpool", NULL); |
|
#endif |
|
pool_init(&pcgpool, sizeof(struct pool_cache_group), 0, 0, |
|
0, "pcgpool", NULL); |
} |
} |
|
|
/* Insert into the list of all pools. */ |
/* Insert into the list of all pools. */ |
simple_lock(&pool_head_slock); |
simple_lock(&pool_head_slock); |
TAILQ_INSERT_TAIL(&pool_head, pp, pr_poollist); |
TAILQ_INSERT_TAIL(&pool_head, pp, pr_poollist); |
simple_unlock(&pool_head_slock); |
simple_unlock(&pool_head_slock); |
|
|
|
/* Insert this into the list of pools using this allocator. */ |
|
simple_lock(&palloc->pa_slock); |
|
TAILQ_INSERT_TAIL(&palloc->pa_list, pp, pr_alloc_list); |
|
simple_unlock(&palloc->pa_slock); |
} |
} |
|
|
/* |
/* |
* De-commision a pool resource. |
* De-commision a pool resource. |
*/ |
*/ |
void |
void |
pool_destroy(pp) |
pool_destroy(struct pool *pp) |
struct pool *pp; |
|
{ |
{ |
struct pool_item_header *ph; |
struct pool_item_header *ph; |
|
struct pool_cache *pc; |
|
|
|
/* Locking order: pool_allocator -> pool */ |
|
simple_lock(&pp->pr_alloc->pa_slock); |
|
TAILQ_REMOVE(&pp->pr_alloc->pa_list, pp, pr_alloc_list); |
|
simple_unlock(&pp->pr_alloc->pa_slock); |
|
|
|
/* Destroy all caches for this pool. */ |
|
while ((pc = TAILQ_FIRST(&pp->pr_cachelist)) != NULL) |
|
pool_cache_destroy(pc); |
|
|
#ifdef DIAGNOSTIC |
#ifdef DIAGNOSTIC |
if (pp->pr_nout != 0) { |
if (pp->pr_nout != 0) { |
Line 547 pool_destroy(pp) |
|
Line 570 pool_destroy(pp) |
|
#endif |
#endif |
|
|
/* Remove all pages */ |
/* Remove all pages */ |
if ((pp->pr_roflags & PR_STATIC) == 0) |
while ((ph = TAILQ_FIRST(&pp->pr_pagelist)) != NULL) |
while ((ph = pp->pr_pagelist.tqh_first) != NULL) |
pr_rmpage(pp, ph, NULL); |
pr_rmpage(pp, ph); |
|
|
|
/* Remove from global pool list */ |
/* Remove from global pool list */ |
simple_lock(&pool_head_slock); |
simple_lock(&pool_head_slock); |
TAILQ_REMOVE(&pool_head, pp, pr_poollist); |
TAILQ_REMOVE(&pool_head, pp, pr_poollist); |
/* XXX Only clear this if we were drainpp? */ |
if (drainpp == pp) { |
drainpp = NULL; |
drainpp = NULL; |
|
} |
simple_unlock(&pool_head_slock); |
simple_unlock(&pool_head_slock); |
|
|
|
#ifdef POOL_DIAGNOSTIC |
if ((pp->pr_roflags & PR_LOGGING) != 0) |
if ((pp->pr_roflags & PR_LOGGING) != 0) |
free(pp->pr_log, M_TEMP); |
free(pp->pr_log, M_TEMP); |
|
#endif |
|
} |
|
|
|
void |
|
pool_set_drain_hook(struct pool *pp, void (*fn)(void *, int), void *arg) |
|
{ |
|
|
if (pp->pr_roflags & PR_FREEHEADER) |
/* XXX no locking -- must be used just after pool_init() */ |
free(pp, M_POOL); |
#ifdef DIAGNOSTIC |
|
if (pp->pr_drain_hook != NULL) |
|
panic("pool_set_drain_hook(%s): already set", pp->pr_wchan); |
|
#endif |
|
pp->pr_drain_hook = fn; |
|
pp->pr_drain_hook_arg = arg; |
} |
} |
|
|
|
static __inline struct pool_item_header * |
|
pool_alloc_item_header(struct pool *pp, caddr_t storage, int flags) |
|
{ |
|
struct pool_item_header *ph; |
|
int s; |
|
|
|
LOCK_ASSERT(simple_lock_held(&pp->pr_slock) == 0); |
|
|
|
if ((pp->pr_roflags & PR_PHINPAGE) != 0) |
|
ph = (struct pool_item_header *) (storage + pp->pr_phoffset); |
|
else { |
|
s = splhigh(); |
|
ph = pool_get(&phpool, flags); |
|
splx(s); |
|
} |
|
|
|
return (ph); |
|
} |
|
|
/* |
/* |
* Grab an item from the pool; must be called at appropriate spl level |
* Grab an item from the pool; must be called at appropriate spl level |
*/ |
*/ |
void * |
void * |
_pool_get(pp, flags, file, line) |
#ifdef POOL_DIAGNOSTIC |
struct pool *pp; |
_pool_get(struct pool *pp, int flags, const char *file, long line) |
int flags; |
#else |
const char *file; |
pool_get(struct pool *pp, int flags) |
long line; |
#endif |
{ |
{ |
void *v; |
|
struct pool_item *pi; |
struct pool_item *pi; |
struct pool_item_header *ph; |
struct pool_item_header *ph; |
|
void *v; |
|
|
#ifdef DIAGNOSTIC |
#ifdef DIAGNOSTIC |
if ((pp->pr_roflags & PR_STATIC) && (flags & PR_MALLOCOK)) { |
if (__predict_false(curlwp == NULL && doing_shutdown == 0 && |
pr_printlog(pp, NULL, printf); |
(flags & PR_WAITOK) != 0)) |
panic("pool_get: static"); |
panic("pool_get: %s: must have NOWAIT", pp->pr_wchan); |
} |
|
#endif |
|
|
|
if (curproc == NULL && (flags & PR_WAITOK) != 0) |
#ifdef LOCKDEBUG |
panic("pool_get: must have NOWAIT"); |
if (flags & PR_WAITOK) |
|
simple_lock_only_held(NULL, "pool_get(PR_WAITOK)"); |
|
#endif |
|
#endif /* DIAGNOSTIC */ |
|
|
simple_lock(&pp->pr_slock); |
simple_lock(&pp->pr_slock); |
pr_enter(pp, file, line); |
pr_enter(pp, file, line); |
Line 600 _pool_get(pp, flags, file, line) |
|
Line 654 _pool_get(pp, flags, file, line) |
|
* the pool. |
* the pool. |
*/ |
*/ |
#ifdef DIAGNOSTIC |
#ifdef DIAGNOSTIC |
if (pp->pr_nout > pp->pr_hardlimit) { |
if (__predict_false(pp->pr_nout > pp->pr_hardlimit)) { |
pr_leave(pp); |
pr_leave(pp); |
simple_unlock(&pp->pr_slock); |
simple_unlock(&pp->pr_slock); |
panic("pool_get: %s: crossed hard limit", pp->pr_wchan); |
panic("pool_get: %s: crossed hard limit", pp->pr_wchan); |
} |
} |
#endif |
#endif |
if (pp->pr_nout == pp->pr_hardlimit) { |
if (__predict_false(pp->pr_nout == pp->pr_hardlimit)) { |
if (flags & PR_WAITOK) { |
if (pp->pr_drain_hook != NULL) { |
/* |
/* |
* XXX: A warning isn't logged in this case. Should |
* Since the drain hook is going to free things |
* it be? |
* back to the pool, unlock, call the hook, re-lock, |
|
* and check the hardlimit condition again. |
*/ |
*/ |
pp->pr_flags |= PR_WANTED; |
|
pr_leave(pp); |
pr_leave(pp); |
simple_unlock(&pp->pr_slock); |
simple_unlock(&pp->pr_slock); |
tsleep((caddr_t)pp, PSWP, pp->pr_wchan, 0); |
(*pp->pr_drain_hook)(pp->pr_drain_hook_arg, flags); |
simple_lock(&pp->pr_slock); |
simple_lock(&pp->pr_slock); |
pr_enter(pp, file, line); |
pr_enter(pp, file, line); |
goto startover; |
if (pp->pr_nout < pp->pr_hardlimit) |
|
goto startover; |
} |
} |
if (pp->pr_hardlimit_warning != NULL) { |
|
|
if ((flags & PR_WAITOK) && !(flags & PR_LIMITFAIL)) { |
/* |
/* |
* Log a message that the hard limit has been hit. |
* XXX: A warning isn't logged in this case. Should |
|
* it be? |
*/ |
*/ |
struct timeval curtime, logdiff; |
pp->pr_flags |= PR_WANTED; |
int s = splclock(); |
pr_leave(pp); |
curtime = mono_time; |
ltsleep(pp, PSWP, pp->pr_wchan, 0, &pp->pr_slock); |
splx(s); |
pr_enter(pp, file, line); |
timersub(&curtime, &pp->pr_hardlimit_warning_last, |
goto startover; |
&logdiff); |
|
if (logdiff.tv_sec >= pp->pr_hardlimit_ratecap) { |
|
pp->pr_hardlimit_warning_last = curtime; |
|
log(LOG_ERR, "%s\n", pp->pr_hardlimit_warning); |
|
} |
|
} |
} |
|
|
if (flags & PR_URGENT) |
/* |
panic("pool_get: urgent"); |
* Log a message that the hard limit has been hit. |
|
*/ |
|
if (pp->pr_hardlimit_warning != NULL && |
|
ratecheck(&pp->pr_hardlimit_warning_last, |
|
&pp->pr_hardlimit_ratecap)) |
|
log(LOG_ERR, "%s\n", pp->pr_hardlimit_warning); |
|
|
pp->pr_nfail++; |
pp->pr_nfail++; |
|
|
Line 653 _pool_get(pp, flags, file, line) |
|
Line 710 _pool_get(pp, flags, file, line) |
|
* has no items in its bucket. |
* has no items in its bucket. |
*/ |
*/ |
if ((ph = pp->pr_curpage) == NULL) { |
if ((ph = pp->pr_curpage) == NULL) { |
void *v; |
|
|
|
#ifdef DIAGNOSTIC |
#ifdef DIAGNOSTIC |
if (pp->pr_nitems != 0) { |
if (pp->pr_nitems != 0) { |
simple_unlock(&pp->pr_slock); |
simple_unlock(&pp->pr_slock); |
Line 671 _pool_get(pp, flags, file, line) |
|
Line 726 _pool_get(pp, flags, file, line) |
|
*/ |
*/ |
pr_leave(pp); |
pr_leave(pp); |
simple_unlock(&pp->pr_slock); |
simple_unlock(&pp->pr_slock); |
v = (*pp->pr_alloc)(pp->pr_pagesz, flags, pp->pr_mtype); |
v = pool_allocator_alloc(pp, flags); |
|
if (__predict_true(v != NULL)) |
|
ph = pool_alloc_item_header(pp, v, flags); |
simple_lock(&pp->pr_slock); |
simple_lock(&pp->pr_slock); |
pr_enter(pp, file, line); |
pr_enter(pp, file, line); |
|
|
if (v == NULL) { |
if (__predict_false(v == NULL || ph == NULL)) { |
|
if (v != NULL) |
|
pool_allocator_free(pp, v); |
|
|
/* |
/* |
* We were unable to allocate a page, but |
* We were unable to allocate a page or item |
* we released the lock during allocation, |
* header, but we released the lock during |
* so perhaps items were freed back to the |
* allocation, so perhaps items were freed |
* pool. Check for this case. |
* back to the pool. Check for this case. |
*/ |
*/ |
if (pp->pr_curpage != NULL) |
if (pp->pr_curpage != NULL) |
goto startover; |
goto startover; |
|
|
if (flags & PR_URGENT) |
|
panic("pool_get: urgent"); |
|
|
|
if ((flags & PR_WAITOK) == 0) { |
if ((flags & PR_WAITOK) == 0) { |
pp->pr_nfail++; |
pp->pr_nfail++; |
pr_leave(pp); |
pr_leave(pp); |
Line 698 _pool_get(pp, flags, file, line) |
|
Line 755 _pool_get(pp, flags, file, line) |
|
/* |
/* |
* Wait for items to be returned to this pool. |
* Wait for items to be returned to this pool. |
* |
* |
* XXX: we actually want to wait just until |
|
* the page allocator has memory again. Depending |
|
* on this pool's usage, we might get stuck here |
|
* for a long time. |
|
* |
|
* XXX: maybe we should wake up once a second and |
* XXX: maybe we should wake up once a second and |
* try again? |
* try again? |
*/ |
*/ |
pp->pr_flags |= PR_WANTED; |
pp->pr_flags |= PR_WANTED; |
|
/* PA_WANTED is already set on the allocator. */ |
pr_leave(pp); |
pr_leave(pp); |
simple_unlock(&pp->pr_slock); |
ltsleep(pp, PSWP, pp->pr_wchan, 0, &pp->pr_slock); |
tsleep((caddr_t)pp, PSWP, pp->pr_wchan, 0); |
|
simple_lock(&pp->pr_slock); |
|
pr_enter(pp, file, line); |
pr_enter(pp, file, line); |
goto startover; |
goto startover; |
} |
} |
|
|
/* We have more memory; add it to the pool */ |
/* We have more memory; add it to the pool */ |
|
pool_prime_page(pp, v, ph); |
pp->pr_npagealloc++; |
pp->pr_npagealloc++; |
pool_prime_page(pp, v); |
|
|
|
/* Start the allocation process over. */ |
/* Start the allocation process over. */ |
goto startover; |
goto startover; |
} |
} |
|
|
if ((v = pi = TAILQ_FIRST(&ph->ph_itemlist)) == NULL) { |
if (__predict_false((v = pi = TAILQ_FIRST(&ph->ph_itemlist)) == NULL)) { |
pr_leave(pp); |
pr_leave(pp); |
simple_unlock(&pp->pr_slock); |
simple_unlock(&pp->pr_slock); |
panic("pool_get: %s: page empty", pp->pr_wchan); |
panic("pool_get: %s: page empty", pp->pr_wchan); |
} |
} |
#ifdef DIAGNOSTIC |
#ifdef DIAGNOSTIC |
if (pp->pr_nitems == 0) { |
if (__predict_false(pp->pr_nitems == 0)) { |
pr_leave(pp); |
pr_leave(pp); |
simple_unlock(&pp->pr_slock); |
simple_unlock(&pp->pr_slock); |
printf("pool_get: %s: items on itemlist, nitems %u\n", |
printf("pool_get: %s: items on itemlist, nitems %u\n", |
Line 737 _pool_get(pp, flags, file, line) |
|
Line 788 _pool_get(pp, flags, file, line) |
|
panic("pool_get: nitems inconsistent\n"); |
panic("pool_get: nitems inconsistent\n"); |
} |
} |
#endif |
#endif |
|
|
|
#ifdef POOL_DIAGNOSTIC |
pr_log(pp, v, PRLOG_GET, file, line); |
pr_log(pp, v, PRLOG_GET, file, line); |
|
#endif |
|
|
#ifdef DIAGNOSTIC |
#ifdef DIAGNOSTIC |
if (pi->pi_magic != PI_MAGIC) { |
if (__predict_false(pi->pi_magic != PI_MAGIC)) { |
pr_printlog(pp, pi, printf); |
pr_printlog(pp, pi, printf); |
panic("pool_get(%s): free list modified: magic=%x; page %p;" |
panic("pool_get(%s): free list modified: magic=%x; page %p;" |
" item addr %p\n", |
" item addr %p\n", |
Line 756 _pool_get(pp, flags, file, line) |
|
Line 810 _pool_get(pp, flags, file, line) |
|
pp->pr_nout++; |
pp->pr_nout++; |
if (ph->ph_nmissing == 0) { |
if (ph->ph_nmissing == 0) { |
#ifdef DIAGNOSTIC |
#ifdef DIAGNOSTIC |
if (pp->pr_nidle == 0) |
if (__predict_false(pp->pr_nidle == 0)) |
panic("pool_get: nidle inconsistent"); |
panic("pool_get: nidle inconsistent"); |
#endif |
#endif |
pp->pr_nidle--; |
pp->pr_nidle--; |
Line 764 _pool_get(pp, flags, file, line) |
|
Line 818 _pool_get(pp, flags, file, line) |
|
ph->ph_nmissing++; |
ph->ph_nmissing++; |
if (TAILQ_FIRST(&ph->ph_itemlist) == NULL) { |
if (TAILQ_FIRST(&ph->ph_itemlist) == NULL) { |
#ifdef DIAGNOSTIC |
#ifdef DIAGNOSTIC |
if (ph->ph_nmissing != pp->pr_itemsperpage) { |
if (__predict_false(ph->ph_nmissing != pp->pr_itemsperpage)) { |
pr_leave(pp); |
pr_leave(pp); |
simple_unlock(&pp->pr_slock); |
simple_unlock(&pp->pr_slock); |
panic("pool_get: %s: nmissing inconsistent", |
panic("pool_get: %s: nmissing inconsistent", |
Line 784 _pool_get(pp, flags, file, line) |
|
Line 838 _pool_get(pp, flags, file, line) |
|
*/ |
*/ |
TAILQ_REMOVE(&pp->pr_pagelist, ph, ph_pagelist); |
TAILQ_REMOVE(&pp->pr_pagelist, ph, ph_pagelist); |
TAILQ_INSERT_TAIL(&pp->pr_pagelist, ph, ph_pagelist); |
TAILQ_INSERT_TAIL(&pp->pr_pagelist, ph, ph_pagelist); |
for (ph = TAILQ_FIRST(&pp->pr_pagelist); ph != NULL; |
TAILQ_FOREACH(ph, &pp->pr_pagelist, ph_pagelist) |
ph = TAILQ_NEXT(ph, ph_pagelist)) |
|
if (TAILQ_FIRST(&ph->ph_itemlist) != NULL) |
if (TAILQ_FIRST(&ph->ph_itemlist) != NULL) |
break; |
break; |
|
|
Line 798 _pool_get(pp, flags, file, line) |
|
Line 851 _pool_get(pp, flags, file, line) |
|
* If we have a low water mark and we are now below that low |
* If we have a low water mark and we are now below that low |
* water mark, add more items to the pool. |
* water mark, add more items to the pool. |
*/ |
*/ |
if (pp->pr_nitems < pp->pr_minitems && pool_catchup(pp) != 0) { |
if (POOL_NEEDS_CATCHUP(pp) && pool_catchup(pp) != 0) { |
/* |
/* |
* XXX: Should we log a warning? Should we set up a timeout |
* XXX: Should we log a warning? Should we set up a timeout |
* to try again in a second or so? The latter could break |
* to try again in a second or so? The latter could break |
Line 812 _pool_get(pp, flags, file, line) |
|
Line 865 _pool_get(pp, flags, file, line) |
|
} |
} |
|
|
/* |
/* |
* Return resource to the pool; must be called at appropriate spl level |
* Internal version of pool_put(). Pool is already locked/entered. |
*/ |
*/ |
void |
static void |
_pool_put(pp, v, file, line) |
pool_do_put(struct pool *pp, void *v) |
struct pool *pp; |
|
void *v; |
|
const char *file; |
|
long line; |
|
{ |
{ |
struct pool_item *pi = v; |
struct pool_item *pi = v; |
struct pool_item_header *ph; |
struct pool_item_header *ph; |
caddr_t page; |
caddr_t page; |
int s; |
int s; |
|
|
page = (caddr_t)((u_long)v & pp->pr_pagemask); |
LOCK_ASSERT(simple_lock_held(&pp->pr_slock)); |
|
|
simple_lock(&pp->pr_slock); |
page = (caddr_t)((u_long)v & pp->pr_alloc->pa_pagemask); |
pr_enter(pp, file, line); |
|
|
|
pr_log(pp, v, PRLOG_PUT, file, line); |
#ifdef DIAGNOSTIC |
|
if (__predict_false(pp->pr_nout == 0)) { |
|
printf("pool %s: putting with none out\n", |
|
pp->pr_wchan); |
|
panic("pool_put"); |
|
} |
|
#endif |
|
|
if ((ph = pr_find_pagehead(pp, page)) == NULL) { |
if (__predict_false((ph = pr_find_pagehead(pp, page)) == NULL)) { |
pr_printlog(pp, NULL, printf); |
pr_printlog(pp, NULL, printf); |
panic("pool_put: %s: page header missing", pp->pr_wchan); |
panic("pool_put: %s: page header missing", pp->pr_wchan); |
} |
} |
Line 849 _pool_put(pp, v, file, line) |
|
Line 903 _pool_put(pp, v, file, line) |
|
* Return to item list. |
* Return to item list. |
*/ |
*/ |
#ifdef DIAGNOSTIC |
#ifdef DIAGNOSTIC |
/* XXX Should fill the item. */ |
|
pi->pi_magic = PI_MAGIC; |
pi->pi_magic = PI_MAGIC; |
#endif |
#endif |
|
#ifdef DEBUG |
|
{ |
|
int i, *ip = v; |
|
|
|
for (i = 0; i < pp->pr_size / sizeof(int); i++) { |
|
*ip++ = PI_MAGIC; |
|
} |
|
} |
|
#endif |
|
|
TAILQ_INSERT_HEAD(&ph->ph_itemlist, pi, pi_list); |
TAILQ_INSERT_HEAD(&ph->ph_itemlist, pi, pi_list); |
|
KDASSERT(ph->ph_nmissing != 0); |
ph->ph_nmissing--; |
ph->ph_nmissing--; |
pp->pr_nput++; |
pp->pr_nput++; |
pp->pr_nitems++; |
pp->pr_nitems++; |
Line 866 _pool_put(pp, v, file, line) |
|
Line 930 _pool_put(pp, v, file, line) |
|
pp->pr_flags &= ~PR_WANTED; |
pp->pr_flags &= ~PR_WANTED; |
if (ph->ph_nmissing == 0) |
if (ph->ph_nmissing == 0) |
pp->pr_nidle++; |
pp->pr_nidle++; |
pr_leave(pp); |
|
simple_unlock(&pp->pr_slock); |
|
wakeup((caddr_t)pp); |
wakeup((caddr_t)pp); |
return; |
return; |
} |
} |
Line 888 _pool_put(pp, v, file, line) |
|
Line 950 _pool_put(pp, v, file, line) |
|
*/ |
*/ |
if (ph->ph_nmissing == 0) { |
if (ph->ph_nmissing == 0) { |
pp->pr_nidle++; |
pp->pr_nidle++; |
if (pp->pr_npages > pp->pr_maxpages) { |
if (pp->pr_npages > pp->pr_maxpages || |
pr_rmpage(pp, ph); |
(pp->pr_alloc->pa_flags & PA_WANT) != 0) { |
|
pr_rmpage(pp, ph, NULL); |
} else { |
} else { |
TAILQ_REMOVE(&pp->pr_pagelist, ph, ph_pagelist); |
TAILQ_REMOVE(&pp->pr_pagelist, ph, ph_pagelist); |
TAILQ_INSERT_TAIL(&pp->pr_pagelist, ph, ph_pagelist); |
TAILQ_INSERT_TAIL(&pp->pr_pagelist, ph, ph_pagelist); |
Line 912 _pool_put(pp, v, file, line) |
|
Line 975 _pool_put(pp, v, file, line) |
|
* page with the fewest available items, to minimize |
* page with the fewest available items, to minimize |
* fragmentation? |
* fragmentation? |
*/ |
*/ |
for (ph = TAILQ_FIRST(&pp->pr_pagelist); ph != NULL; |
TAILQ_FOREACH(ph, &pp->pr_pagelist, ph_pagelist) |
ph = TAILQ_NEXT(ph, ph_pagelist)) |
|
if (TAILQ_FIRST(&ph->ph_itemlist) != NULL) |
if (TAILQ_FIRST(&ph->ph_itemlist) != NULL) |
break; |
break; |
|
|
Line 931 _pool_put(pp, v, file, line) |
|
Line 993 _pool_put(pp, v, file, line) |
|
TAILQ_INSERT_HEAD(&pp->pr_pagelist, ph, ph_pagelist); |
TAILQ_INSERT_HEAD(&pp->pr_pagelist, ph, ph_pagelist); |
pp->pr_curpage = ph; |
pp->pr_curpage = ph; |
} |
} |
|
} |
|
|
|
/* |
|
* Return resource to the pool; must be called at appropriate spl level |
|
*/ |
|
#ifdef POOL_DIAGNOSTIC |
|
void |
|
_pool_put(struct pool *pp, void *v, const char *file, long line) |
|
{ |
|
|
|
simple_lock(&pp->pr_slock); |
|
pr_enter(pp, file, line); |
|
|
|
pr_log(pp, v, PRLOG_PUT, file, line); |
|
|
|
pool_do_put(pp, v); |
|
|
pr_leave(pp); |
pr_leave(pp); |
simple_unlock(&pp->pr_slock); |
simple_unlock(&pp->pr_slock); |
|
} |
|
#undef pool_put |
|
#endif /* POOL_DIAGNOSTIC */ |
|
|
|
void |
|
pool_put(struct pool *pp, void *v) |
|
{ |
|
|
|
simple_lock(&pp->pr_slock); |
|
|
|
pool_do_put(pp, v); |
|
|
|
simple_unlock(&pp->pr_slock); |
} |
} |
|
|
|
#ifdef POOL_DIAGNOSTIC |
|
#define pool_put(h, v) _pool_put((h), (v), __FILE__, __LINE__) |
|
#endif |
|
|
/* |
/* |
* Add N items to the pool. |
* Add N items to the pool. |
*/ |
*/ |
int |
int |
pool_prime(pp, n, storage) |
pool_prime(struct pool *pp, int n) |
struct pool *pp; |
|
int n; |
|
caddr_t storage; |
|
{ |
{ |
|
struct pool_item_header *ph; |
caddr_t cp; |
caddr_t cp; |
int newnitems, newpages; |
int newpages; |
|
|
#ifdef DIAGNOSTIC |
|
if (storage && !(pp->pr_roflags & PR_STATIC)) |
|
panic("pool_prime: static"); |
|
/* !storage && static caught below */ |
|
#endif |
|
|
|
simple_lock(&pp->pr_slock); |
simple_lock(&pp->pr_slock); |
|
|
newnitems = pp->pr_minitems + n; |
newpages = roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage; |
newpages = |
|
roundup(newnitems, pp->pr_itemsperpage) / pp->pr_itemsperpage |
|
- pp->pr_minpages; |
|
|
|
while (newpages-- > 0) { |
while (newpages-- > 0) { |
if (pp->pr_roflags & PR_STATIC) { |
simple_unlock(&pp->pr_slock); |
cp = storage; |
cp = pool_allocator_alloc(pp, PR_NOWAIT); |
storage += pp->pr_pagesz; |
if (__predict_true(cp != NULL)) |
} else { |
ph = pool_alloc_item_header(pp, cp, PR_NOWAIT); |
simple_unlock(&pp->pr_slock); |
simple_lock(&pp->pr_slock); |
cp = (*pp->pr_alloc)(pp->pr_pagesz, 0, pp->pr_mtype); |
|
simple_lock(&pp->pr_slock); |
|
} |
|
|
|
if (cp == NULL) { |
if (__predict_false(cp == NULL || ph == NULL)) { |
simple_unlock(&pp->pr_slock); |
if (cp != NULL) |
return (ENOMEM); |
pool_allocator_free(pp, cp); |
|
break; |
} |
} |
|
|
|
pool_prime_page(pp, cp, ph); |
pp->pr_npagealloc++; |
pp->pr_npagealloc++; |
pool_prime_page(pp, cp); |
|
pp->pr_minpages++; |
pp->pr_minpages++; |
} |
} |
|
|
pp->pr_minitems = newnitems; |
|
|
|
if (pp->pr_minpages >= pp->pr_maxpages) |
if (pp->pr_minpages >= pp->pr_maxpages) |
pp->pr_maxpages = pp->pr_minpages + 1; /* XXX */ |
pp->pr_maxpages = pp->pr_minpages + 1; /* XXX */ |
|
|
Line 997 pool_prime(pp, n, storage) |
|
Line 1076 pool_prime(pp, n, storage) |
|
* Note, we must be called with the pool descriptor LOCKED. |
* Note, we must be called with the pool descriptor LOCKED. |
*/ |
*/ |
static void |
static void |
pool_prime_page(pp, storage) |
pool_prime_page(struct pool *pp, caddr_t storage, struct pool_item_header *ph) |
struct pool *pp; |
|
caddr_t storage; |
|
{ |
{ |
struct pool_item *pi; |
struct pool_item *pi; |
struct pool_item_header *ph; |
|
caddr_t cp = storage; |
caddr_t cp = storage; |
unsigned int align = pp->pr_align; |
unsigned int align = pp->pr_align; |
unsigned int ioff = pp->pr_itemoffset; |
unsigned int ioff = pp->pr_itemoffset; |
int s, n; |
int n; |
|
|
if ((pp->pr_roflags & PR_PHINPAGE) != 0) { |
#ifdef DIAGNOSTIC |
ph = (struct pool_item_header *)(cp + pp->pr_phoffset); |
if (((u_long)cp & (pp->pr_alloc->pa_pagesz - 1)) != 0) |
} else { |
panic("pool_prime_page: %s: unaligned page", pp->pr_wchan); |
s = splhigh(); |
#endif |
ph = pool_get(&phpool, PR_URGENT); |
|
splx(s); |
if ((pp->pr_roflags & PR_PHINPAGE) == 0) |
LIST_INSERT_HEAD(&pp->pr_hashtab[PR_HASH_INDEX(pp, cp)], |
LIST_INSERT_HEAD(&pp->pr_hashtab[PR_HASH_INDEX(pp, cp)], |
ph, ph_hashlist); |
ph, ph_hashlist); |
} |
|
|
|
/* |
/* |
* Insert page header. |
* Insert page header. |
Line 1051 pool_prime_page(pp, storage) |
|
Line 1126 pool_prime_page(pp, storage) |
|
while (n--) { |
while (n--) { |
pi = (struct pool_item *)cp; |
pi = (struct pool_item *)cp; |
|
|
|
KASSERT(((((vaddr_t)pi) + ioff) & (align - 1)) == 0); |
|
|
/* Insert on page list */ |
/* Insert on page list */ |
TAILQ_INSERT_TAIL(&ph->ph_itemlist, pi, pi_list); |
TAILQ_INSERT_TAIL(&ph->ph_itemlist, pi, pi_list); |
#ifdef DIAGNOSTIC |
#ifdef DIAGNOSTIC |
Line 1070 pool_prime_page(pp, storage) |
|
Line 1147 pool_prime_page(pp, storage) |
|
} |
} |
|
|
/* |
/* |
* Like pool_prime(), except this is used by pool_get() when nitems |
* Used by pool_get() when nitems drops below the low water mark. This |
* drops below the low water mark. This is used to catch up nitmes |
* is used to catch up nitmes with the low water mark. |
* with the low water mark. |
|
* |
* |
* Note 1, we never wait for memory here, we let the caller decide what to do. |
* Note 1, we never wait for memory here, we let the caller decide what to do. |
* |
* |
* Note 2, this doesn't work with static pools. |
* Note 2, we must be called with the pool already locked, and we return |
* |
|
* Note 3, we must be called with the pool already locked, and we return |
|
* with it locked. |
* with it locked. |
*/ |
*/ |
static int |
static int |
pool_catchup(pp) |
pool_catchup(struct pool *pp) |
struct pool *pp; |
|
{ |
{ |
|
struct pool_item_header *ph; |
caddr_t cp; |
caddr_t cp; |
int error = 0; |
int error = 0; |
|
|
if (pp->pr_roflags & PR_STATIC) { |
while (POOL_NEEDS_CATCHUP(pp)) { |
/* |
|
* We dropped below the low water mark, and this is not a |
|
* good thing. Log a warning. |
|
* |
|
* XXX: rate-limit this? |
|
*/ |
|
printf("WARNING: static pool `%s' dropped below low water " |
|
"mark\n", pp->pr_wchan); |
|
return (0); |
|
} |
|
|
|
while (pp->pr_nitems < pp->pr_minitems) { |
|
/* |
/* |
* Call the page back-end allocator for more memory. |
* Call the page back-end allocator for more memory. |
* |
* |
Line 1108 pool_catchup(pp) |
|
Line 1170 pool_catchup(pp) |
|
* the pool descriptor? |
* the pool descriptor? |
*/ |
*/ |
simple_unlock(&pp->pr_slock); |
simple_unlock(&pp->pr_slock); |
cp = (*pp->pr_alloc)(pp->pr_pagesz, 0, pp->pr_mtype); |
cp = pool_allocator_alloc(pp, PR_NOWAIT); |
|
if (__predict_true(cp != NULL)) |
|
ph = pool_alloc_item_header(pp, cp, PR_NOWAIT); |
simple_lock(&pp->pr_slock); |
simple_lock(&pp->pr_slock); |
if (cp == NULL) { |
if (__predict_false(cp == NULL || ph == NULL)) { |
|
if (cp != NULL) |
|
pool_allocator_free(pp, cp); |
error = ENOMEM; |
error = ENOMEM; |
break; |
break; |
} |
} |
|
pool_prime_page(pp, cp, ph); |
pp->pr_npagealloc++; |
pp->pr_npagealloc++; |
pool_prime_page(pp, cp); |
|
} |
} |
|
|
return (error); |
return (error); |
} |
} |
|
|
void |
void |
pool_setlowat(pp, n) |
pool_setlowat(struct pool *pp, int n) |
pool_handle_t pp; |
|
int n; |
|
{ |
{ |
int error; |
|
|
|
simple_lock(&pp->pr_slock); |
simple_lock(&pp->pr_slock); |
|
|
Line 1136 pool_setlowat(pp, n) |
|
Line 1199 pool_setlowat(pp, n) |
|
: roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage; |
: roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage; |
|
|
/* Make sure we're caught up with the newly-set low water mark. */ |
/* Make sure we're caught up with the newly-set low water mark. */ |
if ((error = pool_catchup(pp)) != 0) { |
if (POOL_NEEDS_CATCHUP(pp) && pool_catchup(pp) != 0) { |
/* |
/* |
* XXX: Should we log a warning? Should we set up a timeout |
* XXX: Should we log a warning? Should we set up a timeout |
* to try again in a second or so? The latter could break |
* to try again in a second or so? The latter could break |
Line 1148 pool_setlowat(pp, n) |
|
Line 1211 pool_setlowat(pp, n) |
|
} |
} |
|
|
void |
void |
pool_sethiwat(pp, n) |
pool_sethiwat(struct pool *pp, int n) |
pool_handle_t pp; |
|
int n; |
|
{ |
{ |
|
|
simple_lock(&pp->pr_slock); |
simple_lock(&pp->pr_slock); |
Line 1163 pool_sethiwat(pp, n) |
|
Line 1224 pool_sethiwat(pp, n) |
|
} |
} |
|
|
void |
void |
pool_sethardlimit(pp, n, warnmess, ratecap) |
pool_sethardlimit(struct pool *pp, int n, const char *warnmess, int ratecap) |
pool_handle_t pp; |
|
int n; |
|
const char *warnmess; |
|
int ratecap; |
|
{ |
{ |
|
|
simple_lock(&pp->pr_slock); |
simple_lock(&pp->pr_slock); |
|
|
pp->pr_hardlimit = n; |
pp->pr_hardlimit = n; |
pp->pr_hardlimit_warning = warnmess; |
pp->pr_hardlimit_warning = warnmess; |
pp->pr_hardlimit_ratecap = ratecap; |
pp->pr_hardlimit_ratecap.tv_sec = ratecap; |
memset(&pp->pr_hardlimit_warning_last, 0, |
pp->pr_hardlimit_warning_last.tv_sec = 0; |
sizeof(pp->pr_hardlimit_warning_last)); |
pp->pr_hardlimit_warning_last.tv_usec = 0; |
|
|
/* |
/* |
* In-line version of pool_sethiwat(), because we don't want to |
* In-line version of pool_sethiwat(), because we don't want to |
Line 1190 pool_sethardlimit(pp, n, warnmess, ratec |
|
Line 1247 pool_sethardlimit(pp, n, warnmess, ratec |
|
} |
} |
|
|
/* |
/* |
* Default page allocator. |
|
*/ |
|
static void * |
|
pool_page_alloc(sz, flags, mtype) |
|
unsigned long sz; |
|
int flags; |
|
int mtype; |
|
{ |
|
boolean_t waitok = (flags & PR_WAITOK) ? TRUE : FALSE; |
|
|
|
return ((void *)uvm_km_alloc_poolpage(waitok)); |
|
} |
|
|
|
static void |
|
pool_page_free(v, sz, mtype) |
|
void *v; |
|
unsigned long sz; |
|
int mtype; |
|
{ |
|
|
|
uvm_km_free_poolpage((vaddr_t)v); |
|
} |
|
|
|
/* |
|
* Alternate pool page allocator for pools that know they will |
|
* never be accessed in interrupt context. |
|
*/ |
|
void * |
|
pool_page_alloc_nointr(sz, flags, mtype) |
|
unsigned long sz; |
|
int flags; |
|
int mtype; |
|
{ |
|
boolean_t waitok = (flags & PR_WAITOK) ? TRUE : FALSE; |
|
|
|
return ((void *)uvm_km_alloc_poolpage1(kernel_map, uvm.kernel_object, |
|
waitok)); |
|
} |
|
|
|
void |
|
pool_page_free_nointr(v, sz, mtype) |
|
void *v; |
|
unsigned long sz; |
|
int mtype; |
|
{ |
|
|
|
uvm_km_free_poolpage1(kernel_map, (vaddr_t)v); |
|
} |
|
|
|
|
|
/* |
|
* Release all complete pages that have not been used recently. |
* Release all complete pages that have not been used recently. |
*/ |
*/ |
void |
int |
_pool_reclaim(pp, file, line) |
#ifdef POOL_DIAGNOSTIC |
pool_handle_t pp; |
_pool_reclaim(struct pool *pp, const char *file, long line) |
const char *file; |
#else |
long line; |
pool_reclaim(struct pool *pp) |
|
#endif |
{ |
{ |
struct pool_item_header *ph, *phnext; |
struct pool_item_header *ph, *phnext; |
|
struct pool_cache *pc; |
struct timeval curtime; |
struct timeval curtime; |
|
struct pool_pagelist pq; |
int s; |
int s; |
|
|
if (pp->pr_roflags & PR_STATIC) |
if (pp->pr_drain_hook != NULL) { |
return; |
/* |
|
* The drain hook must be called with the pool unlocked. |
|
*/ |
|
(*pp->pr_drain_hook)(pp->pr_drain_hook_arg, PR_NOWAIT); |
|
} |
|
|
if (simple_lock_try(&pp->pr_slock) == 0) |
if (simple_lock_try(&pp->pr_slock) == 0) |
return; |
return (0); |
pr_enter(pp, file, line); |
pr_enter(pp, file, line); |
|
|
|
TAILQ_INIT(&pq); |
|
|
|
/* |
|
* Reclaim items from the pool's caches. |
|
*/ |
|
TAILQ_FOREACH(pc, &pp->pr_cachelist, pc_poollist) |
|
pool_cache_reclaim(pc); |
|
|
s = splclock(); |
s = splclock(); |
curtime = mono_time; |
curtime = mono_time; |
splx(s); |
splx(s); |
Line 1285 _pool_reclaim(pp, file, line) |
|
Line 1306 _pool_reclaim(pp, file, line) |
|
pp->pr_minitems) |
pp->pr_minitems) |
break; |
break; |
|
|
pr_rmpage(pp, ph); |
pr_rmpage(pp, ph, &pq); |
} |
} |
} |
} |
|
|
pr_leave(pp); |
pr_leave(pp); |
simple_unlock(&pp->pr_slock); |
simple_unlock(&pp->pr_slock); |
} |
if (TAILQ_EMPTY(&pq)) |
|
return (0); |
|
|
|
while ((ph = TAILQ_FIRST(&pq)) != NULL) { |
|
TAILQ_REMOVE(&pq, ph, ph_pagelist); |
|
pool_allocator_free(pp, ph->ph_page); |
|
if (pp->pr_roflags & PR_PHINPAGE) { |
|
continue; |
|
} |
|
LIST_REMOVE(ph, ph_hashlist); |
|
s = splhigh(); |
|
pool_put(&phpool, ph); |
|
splx(s); |
|
} |
|
|
|
return (1); |
|
} |
|
|
/* |
/* |
* Drain pools, one at a time. |
* Drain pools, one at a time. |
Line 1300 _pool_reclaim(pp, file, line) |
|
Line 1336 _pool_reclaim(pp, file, line) |
|
* Note, we must never be called from an interrupt context. |
* Note, we must never be called from an interrupt context. |
*/ |
*/ |
void |
void |
pool_drain(arg) |
pool_drain(void *arg) |
void *arg; |
|
{ |
{ |
struct pool *pp; |
struct pool *pp; |
int s; |
int s; |
|
|
s = splimp(); |
pp = NULL; |
|
s = splvm(); |
simple_lock(&pool_head_slock); |
simple_lock(&pool_head_slock); |
|
if (drainpp == NULL) { |
if (drainpp == NULL && (drainpp = TAILQ_FIRST(&pool_head)) == NULL) |
drainpp = TAILQ_FIRST(&pool_head); |
goto out; |
} |
|
if (drainpp) { |
pp = drainpp; |
pp = drainpp; |
drainpp = TAILQ_NEXT(pp, pr_poollist); |
drainpp = TAILQ_NEXT(pp, pr_poollist); |
|
} |
pool_reclaim(pp); |
|
|
|
out: |
|
simple_unlock(&pool_head_slock); |
simple_unlock(&pool_head_slock); |
|
pool_reclaim(pp); |
splx(s); |
splx(s); |
} |
} |
|
|
|
|
/* |
/* |
* Diagnostic helpers. |
* Diagnostic helpers. |
*/ |
*/ |
void |
void |
pool_print(pp, modif) |
pool_print(struct pool *pp, const char *modif) |
struct pool *pp; |
|
const char *modif; |
|
{ |
{ |
int s; |
int s; |
|
|
s = splimp(); |
s = splvm(); |
if (simple_lock_try(&pp->pr_slock) == 0) { |
if (simple_lock_try(&pp->pr_slock) == 0) { |
printf("pool %s is locked; try again later\n", |
printf("pool %s is locked; try again later\n", |
pp->pr_wchan); |
pp->pr_wchan); |
Line 1346 pool_print(pp, modif) |
|
Line 1377 pool_print(pp, modif) |
|
} |
} |
|
|
void |
void |
pool_printit(pp, modif, pr) |
pool_printit(struct pool *pp, const char *modif, void (*pr)(const char *, ...)) |
struct pool *pp; |
|
const char *modif; |
|
void (*pr) __P((const char *, ...)); |
|
{ |
{ |
int didlock = 0; |
int didlock = 0; |
|
|
Line 1379 pool_printit(pp, modif, pr) |
|
Line 1407 pool_printit(pp, modif, pr) |
|
} |
} |
|
|
static void |
static void |
pool_print1(pp, modif, pr) |
pool_print1(struct pool *pp, const char *modif, void (*pr)(const char *, ...)) |
struct pool *pp; |
|
const char *modif; |
|
void (*pr) __P((const char *, ...)); |
|
{ |
{ |
struct pool_item_header *ph; |
struct pool_item_header *ph; |
|
struct pool_cache *pc; |
|
struct pool_cache_group *pcg; |
#ifdef DIAGNOSTIC |
#ifdef DIAGNOSTIC |
struct pool_item *pi; |
struct pool_item *pi; |
#endif |
#endif |
int print_log = 0, print_pagelist = 0; |
int i, print_log = 0, print_pagelist = 0, print_cache = 0; |
char c; |
char c; |
|
|
while ((c = *modif++) != '\0') { |
while ((c = *modif++) != '\0') { |
Line 1396 pool_print1(pp, modif, pr) |
|
Line 1423 pool_print1(pp, modif, pr) |
|
print_log = 1; |
print_log = 1; |
if (c == 'p') |
if (c == 'p') |
print_pagelist = 1; |
print_pagelist = 1; |
|
if (c == 'c') |
|
print_cache = 1; |
modif++; |
modif++; |
} |
} |
|
|
(*pr)("POOL %s: size %u, align %u, ioff %u, roflags 0x%08x\n", |
(*pr)("POOL %s: size %u, align %u, ioff %u, roflags 0x%08x\n", |
pp->pr_wchan, pp->pr_size, pp->pr_align, pp->pr_itemoffset, |
pp->pr_wchan, pp->pr_size, pp->pr_align, pp->pr_itemoffset, |
pp->pr_roflags); |
pp->pr_roflags); |
(*pr)("\tpagesz %u, mtype %d\n", pp->pr_pagesz, pp->pr_mtype); |
(*pr)("\talloc %p\n", pp->pr_alloc); |
(*pr)("\talloc %p, release %p\n", pp->pr_alloc, pp->pr_free); |
|
(*pr)("\tminitems %u, minpages %u, maxpages %u, npages %u\n", |
(*pr)("\tminitems %u, minpages %u, maxpages %u, npages %u\n", |
pp->pr_minitems, pp->pr_minpages, pp->pr_maxpages, pp->pr_npages); |
pp->pr_minitems, pp->pr_minpages, pp->pr_maxpages, pp->pr_npages); |
(*pr)("\titemsperpage %u, nitems %u, nout %u, hardlimit %u\n", |
(*pr)("\titemsperpage %u, nitems %u, nout %u, hardlimit %u\n", |
Line 1425 pool_print1(pp, modif, pr) |
|
Line 1453 pool_print1(pp, modif, pr) |
|
(u_long)ph->ph_time.tv_sec, |
(u_long)ph->ph_time.tv_sec, |
(u_long)ph->ph_time.tv_usec); |
(u_long)ph->ph_time.tv_usec); |
#ifdef DIAGNOSTIC |
#ifdef DIAGNOSTIC |
for (pi = TAILQ_FIRST(&ph->ph_itemlist); pi != NULL; |
TAILQ_FOREACH(pi, &ph->ph_itemlist, pi_list) { |
pi = TAILQ_NEXT(pi, pi_list)) { |
|
if (pi->pi_magic != PI_MAGIC) { |
if (pi->pi_magic != PI_MAGIC) { |
(*pr)("\t\t\titem %p, magic 0x%x\n", |
(*pr)("\t\t\titem %p, magic 0x%x\n", |
pi, pi->pi_magic); |
pi, pi->pi_magic); |
Line 1452 pool_print1(pp, modif, pr) |
|
Line 1479 pool_print1(pp, modif, pr) |
|
|
|
skip_log: |
skip_log: |
|
|
|
if (print_cache == 0) |
|
goto skip_cache; |
|
|
|
TAILQ_FOREACH(pc, &pp->pr_cachelist, pc_poollist) { |
|
(*pr)("\tcache %p: allocfrom %p freeto %p\n", pc, |
|
pc->pc_allocfrom, pc->pc_freeto); |
|
(*pr)("\t hits %lu misses %lu ngroups %lu nitems %lu\n", |
|
pc->pc_hits, pc->pc_misses, pc->pc_ngroups, pc->pc_nitems); |
|
TAILQ_FOREACH(pcg, &pc->pc_grouplist, pcg_list) { |
|
(*pr)("\t\tgroup %p: avail %d\n", pcg, pcg->pcg_avail); |
|
for (i = 0; i < PCG_NOBJECTS; i++) |
|
(*pr)("\t\t\t%p\n", pcg->pcg_objects[i]); |
|
} |
|
} |
|
|
|
skip_cache: |
|
|
pr_enter_check(pp, pr); |
pr_enter_check(pp, pr); |
} |
} |
|
|
int |
int |
pool_chk(pp, label) |
pool_chk(struct pool *pp, const char *label) |
struct pool *pp; |
|
char *label; |
|
{ |
{ |
struct pool_item_header *ph; |
struct pool_item_header *ph; |
int r = 0; |
int r = 0; |
|
|
simple_lock(&pp->pr_slock); |
simple_lock(&pp->pr_slock); |
|
|
for (ph = TAILQ_FIRST(&pp->pr_pagelist); ph != NULL; |
TAILQ_FOREACH(ph, &pp->pr_pagelist, ph_pagelist) { |
ph = TAILQ_NEXT(ph, ph_pagelist)) { |
|
|
|
struct pool_item *pi; |
struct pool_item *pi; |
int n; |
int n; |
caddr_t page; |
caddr_t page; |
|
|
page = (caddr_t)((u_long)ph & pp->pr_pagemask); |
page = (caddr_t)((u_long)ph & pp->pr_alloc->pa_pagemask); |
if (page != ph->ph_page && |
if (page != ph->ph_page && |
(pp->pr_roflags & PR_PHINPAGE) != 0) { |
(pp->pr_roflags & PR_PHINPAGE) != 0) { |
if (label != NULL) |
if (label != NULL) |
Line 1501 pool_chk(pp, label) |
|
Line 1541 pool_chk(pp, label) |
|
panic("pool"); |
panic("pool"); |
} |
} |
#endif |
#endif |
page = (caddr_t)((u_long)pi & pp->pr_pagemask); |
page = |
|
(caddr_t)((u_long)pi & pp->pr_alloc->pa_pagemask); |
if (page == ph->ph_page) |
if (page == ph->ph_page) |
continue; |
continue; |
|
|
|
|
simple_unlock(&pp->pr_slock); |
simple_unlock(&pp->pr_slock); |
return (r); |
return (r); |
} |
} |
|
|
|
/* |
|
* pool_cache_init: |
|
* |
|
* Initialize a pool cache. |
|
* |
|
* NOTE: If the pool must be protected from interrupts, we expect |
|
* to be called at the appropriate interrupt priority level. |
|
*/ |
|
void |
|
pool_cache_init(struct pool_cache *pc, struct pool *pp, |
|
int (*ctor)(void *, void *, int), |
|
void (*dtor)(void *, void *), |
|
void *arg) |
|
{ |
|
|
|
TAILQ_INIT(&pc->pc_grouplist); |
|
simple_lock_init(&pc->pc_slock); |
|
|
|
pc->pc_allocfrom = NULL; |
|
pc->pc_freeto = NULL; |
|
pc->pc_pool = pp; |
|
|
|
pc->pc_ctor = ctor; |
|
pc->pc_dtor = dtor; |
|
pc->pc_arg = arg; |
|
|
|
pc->pc_hits = 0; |
|
pc->pc_misses = 0; |
|
|
|
pc->pc_ngroups = 0; |
|
|
|
pc->pc_nitems = 0; |
|
|
|
simple_lock(&pp->pr_slock); |
|
TAILQ_INSERT_TAIL(&pp->pr_cachelist, pc, pc_poollist); |
|
simple_unlock(&pp->pr_slock); |
|
} |
|
|
|
/* |
|
* pool_cache_destroy: |
|
* |
|
* Destroy a pool cache. |
|
*/ |
|
void |
|
pool_cache_destroy(struct pool_cache *pc) |
|
{ |
|
struct pool *pp = pc->pc_pool; |
|
|
|
/* First, invalidate the entire cache. */ |
|
pool_cache_invalidate(pc); |
|
|
|
/* ...and remove it from the pool's cache list. */ |
|
simple_lock(&pp->pr_slock); |
|
TAILQ_REMOVE(&pp->pr_cachelist, pc, pc_poollist); |
|
simple_unlock(&pp->pr_slock); |
|
} |
|
|
|
static __inline void * |
|
pcg_get(struct pool_cache_group *pcg) |
|
{ |
|
void *object; |
|
u_int idx; |
|
|
|
KASSERT(pcg->pcg_avail <= PCG_NOBJECTS); |
|
KASSERT(pcg->pcg_avail != 0); |
|
idx = --pcg->pcg_avail; |
|
|
|
KASSERT(pcg->pcg_objects[idx] != NULL); |
|
object = pcg->pcg_objects[idx]; |
|
pcg->pcg_objects[idx] = NULL; |
|
|
|
return (object); |
|
} |
|
|
|
static __inline void |
|
pcg_put(struct pool_cache_group *pcg, void *object) |
|
{ |
|
u_int idx; |
|
|
|
KASSERT(pcg->pcg_avail < PCG_NOBJECTS); |
|
idx = pcg->pcg_avail++; |
|
|
|
KASSERT(pcg->pcg_objects[idx] == NULL); |
|
pcg->pcg_objects[idx] = object; |
|
} |
|
|
|
/* |
|
* pool_cache_get: |
|
* |
|
* Get an object from a pool cache. |
|
*/ |
|
void * |
|
pool_cache_get(struct pool_cache *pc, int flags) |
|
{ |
|
struct pool_cache_group *pcg; |
|
void *object; |
|
|
|
#ifdef LOCKDEBUG |
|
if (flags & PR_WAITOK) |
|
simple_lock_only_held(NULL, "pool_cache_get(PR_WAITOK)"); |
|
#endif |
|
|
|
simple_lock(&pc->pc_slock); |
|
|
|
if ((pcg = pc->pc_allocfrom) == NULL) { |
|
TAILQ_FOREACH(pcg, &pc->pc_grouplist, pcg_list) { |
|
if (pcg->pcg_avail != 0) { |
|
pc->pc_allocfrom = pcg; |
|
goto have_group; |
|
} |
|
} |
|
|
|
/* |
|
* No groups with any available objects. Allocate |
|
* a new object, construct it, and return it to |
|
* the caller. We will allocate a group, if necessary, |
|
* when the object is freed back to the cache. |
|
*/ |
|
pc->pc_misses++; |
|
simple_unlock(&pc->pc_slock); |
|
object = pool_get(pc->pc_pool, flags); |
|
if (object != NULL && pc->pc_ctor != NULL) { |
|
if ((*pc->pc_ctor)(pc->pc_arg, object, flags) != 0) { |
|
pool_put(pc->pc_pool, object); |
|
return (NULL); |
|
} |
|
} |
|
return (object); |
|
} |
|
|
|
have_group: |
|
pc->pc_hits++; |
|
pc->pc_nitems--; |
|
object = pcg_get(pcg); |
|
|
|
if (pcg->pcg_avail == 0) |
|
pc->pc_allocfrom = NULL; |
|
|
|
simple_unlock(&pc->pc_slock); |
|
|
|
return (object); |
|
} |
|
|
|
/* |
|
* pool_cache_put: |
|
* |
|
* Put an object back to the pool cache. |
|
*/ |
|
void |
|
pool_cache_put(struct pool_cache *pc, void *object) |
|
{ |
|
struct pool_cache_group *pcg; |
|
int s; |
|
|
|
simple_lock(&pc->pc_slock); |
|
|
|
if ((pcg = pc->pc_freeto) == NULL) { |
|
TAILQ_FOREACH(pcg, &pc->pc_grouplist, pcg_list) { |
|
if (pcg->pcg_avail != PCG_NOBJECTS) { |
|
pc->pc_freeto = pcg; |
|
goto have_group; |
|
} |
|
} |
|
|
|
/* |
|
* No empty groups to free the object to. Attempt to |
|
* allocate one. |
|
*/ |
|
simple_unlock(&pc->pc_slock); |
|
s = splvm(); |
|
pcg = pool_get(&pcgpool, PR_NOWAIT); |
|
splx(s); |
|
if (pcg != NULL) { |
|
memset(pcg, 0, sizeof(*pcg)); |
|
simple_lock(&pc->pc_slock); |
|
pc->pc_ngroups++; |
|
TAILQ_INSERT_TAIL(&pc->pc_grouplist, pcg, pcg_list); |
|
if (pc->pc_freeto == NULL) |
|
pc->pc_freeto = pcg; |
|
goto have_group; |
|
} |
|
|
|
/* |
|
* Unable to allocate a cache group; destruct the object |
|
* and free it back to the pool. |
|
*/ |
|
pool_cache_destruct_object(pc, object); |
|
return; |
|
} |
|
|
|
have_group: |
|
pc->pc_nitems++; |
|
pcg_put(pcg, object); |
|
|
|
if (pcg->pcg_avail == PCG_NOBJECTS) |
|
pc->pc_freeto = NULL; |
|
|
|
simple_unlock(&pc->pc_slock); |
|
} |
|
|
|
/* |
|
* pool_cache_destruct_object: |
|
* |
|
* Force destruction of an object and its release back into |
|
* the pool. |
|
*/ |
|
void |
|
pool_cache_destruct_object(struct pool_cache *pc, void *object) |
|
{ |
|
|
|
if (pc->pc_dtor != NULL) |
|
(*pc->pc_dtor)(pc->pc_arg, object); |
|
pool_put(pc->pc_pool, object); |
|
} |
|
|
|
/* |
|
* pool_cache_do_invalidate: |
|
* |
|
* This internal function implements pool_cache_invalidate() and |
|
* pool_cache_reclaim(). |
|
*/ |
|
static void |
|
pool_cache_do_invalidate(struct pool_cache *pc, int free_groups, |
|
void (*putit)(struct pool *, void *)) |
|
{ |
|
struct pool_cache_group *pcg, *npcg; |
|
void *object; |
|
int s; |
|
|
|
for (pcg = TAILQ_FIRST(&pc->pc_grouplist); pcg != NULL; |
|
pcg = npcg) { |
|
npcg = TAILQ_NEXT(pcg, pcg_list); |
|
while (pcg->pcg_avail != 0) { |
|
pc->pc_nitems--; |
|
object = pcg_get(pcg); |
|
if (pcg->pcg_avail == 0 && pc->pc_allocfrom == pcg) |
|
pc->pc_allocfrom = NULL; |
|
if (pc->pc_dtor != NULL) |
|
(*pc->pc_dtor)(pc->pc_arg, object); |
|
(*putit)(pc->pc_pool, object); |
|
} |
|
if (free_groups) { |
|
pc->pc_ngroups--; |
|
TAILQ_REMOVE(&pc->pc_grouplist, pcg, pcg_list); |
|
if (pc->pc_freeto == pcg) |
|
pc->pc_freeto = NULL; |
|
s = splvm(); |
|
pool_put(&pcgpool, pcg); |
|
splx(s); |
|
} |
|
} |
|
} |
|
|
|
/* |
|
* pool_cache_invalidate: |
|
* |
|
* Invalidate a pool cache (destruct and release all of the |
|
* cached objects). |
|
*/ |
|
void |
|
pool_cache_invalidate(struct pool_cache *pc) |
|
{ |
|
|
|
simple_lock(&pc->pc_slock); |
|
pool_cache_do_invalidate(pc, 0, pool_put); |
|
simple_unlock(&pc->pc_slock); |
|
} |
|
|
|
/* |
|
* pool_cache_reclaim: |
|
* |
|
* Reclaim a pool cache for pool_reclaim(). |
|
*/ |
|
static void |
|
pool_cache_reclaim(struct pool_cache *pc) |
|
{ |
|
|
|
simple_lock(&pc->pc_slock); |
|
pool_cache_do_invalidate(pc, 1, pool_do_put); |
|
simple_unlock(&pc->pc_slock); |
|
} |
|
|
|
/* |
|
* Pool backend allocators. |
|
* |
|
* Each pool has a backend allocator that handles allocation, deallocation, |
|
* and any additional draining that might be needed. |
|
* |
|
* We provide two standard allocators: |
|
* |
|
* pool_allocator_kmem - the default when no allocator is specified |
|
* |
|
* pool_allocator_nointr - used for pools that will not be accessed |
|
* in interrupt context. |
|
*/ |
|
void *pool_page_alloc(struct pool *, int); |
|
void pool_page_free(struct pool *, void *); |
|
|
|
struct pool_allocator pool_allocator_kmem = { |
|
pool_page_alloc, pool_page_free, 0, |
|
}; |
|
|
|
void *pool_page_alloc_nointr(struct pool *, int); |
|
void pool_page_free_nointr(struct pool *, void *); |
|
|
|
struct pool_allocator pool_allocator_nointr = { |
|
pool_page_alloc_nointr, pool_page_free_nointr, 0, |
|
}; |
|
|
|
#ifdef POOL_SUBPAGE |
|
void *pool_subpage_alloc(struct pool *, int); |
|
void pool_subpage_free(struct pool *, void *); |
|
|
|
struct pool_allocator pool_allocator_kmem_subpage = { |
|
pool_subpage_alloc, pool_subpage_free, 0, |
|
}; |
|
#endif /* POOL_SUBPAGE */ |
|
|
|
/* |
|
* We have at least three different resources for the same allocation and |
|
* each resource can be depleted. First, we have the ready elements in the |
|
* pool. Then we have the resource (typically a vm_map) for this allocator. |
|
* Finally, we have physical memory. Waiting for any of these can be |
|
* unnecessary when any other is freed, but the kernel doesn't support |
|
* sleeping on multiple wait channels, so we have to employ another strategy. |
|
* |
|
* The caller sleeps on the pool (so that it can be awakened when an item |
|
* is returned to the pool), but we set PA_WANT on the allocator. When a |
|
* page is returned to the allocator and PA_WANT is set, pool_allocator_free |
|
* will wake up all sleeping pools belonging to this allocator. |
|
* |
|
* XXX Thundering herd. |
|
*/ |
|
void * |
|
pool_allocator_alloc(struct pool *org, int flags) |
|
{ |
|
struct pool_allocator *pa = org->pr_alloc; |
|
struct pool *pp, *start; |
|
int s, freed; |
|
void *res; |
|
|
|
do { |
|
if ((res = (*pa->pa_alloc)(org, flags)) != NULL) |
|
return (res); |
|
if ((flags & PR_WAITOK) == 0) { |
|
/* |
|
* We only run the drain hookhere if PR_NOWAIT. |
|
* In other cases, the hook will be run in |
|
* pool_reclaim(). |
|
*/ |
|
if (org->pr_drain_hook != NULL) { |
|
(*org->pr_drain_hook)(org->pr_drain_hook_arg, |
|
flags); |
|
if ((res = (*pa->pa_alloc)(org, flags)) != NULL) |
|
return (res); |
|
} |
|
break; |
|
} |
|
|
|
/* |
|
* Drain all pools, except "org", that use this |
|
* allocator. We do this to reclaim VA space. |
|
* pa_alloc is responsible for waiting for |
|
* physical memory. |
|
* |
|
* XXX We risk looping forever if start if someone |
|
* calls pool_destroy on "start". But there is no |
|
* other way to have potentially sleeping pool_reclaim, |
|
* non-sleeping locks on pool_allocator, and some |
|
* stirring of drained pools in the allocator. |
|
* |
|
* XXX Maybe we should use pool_head_slock for locking |
|
* the allocators? |
|
*/ |
|
freed = 0; |
|
|
|
s = splvm(); |
|
simple_lock(&pa->pa_slock); |
|
pp = start = TAILQ_FIRST(&pa->pa_list); |
|
do { |
|
TAILQ_REMOVE(&pa->pa_list, pp, pr_alloc_list); |
|
TAILQ_INSERT_TAIL(&pa->pa_list, pp, pr_alloc_list); |
|
if (pp == org) |
|
continue; |
|
simple_unlock(&pa->pa_slock); |
|
freed = pool_reclaim(pp); |
|
simple_lock(&pa->pa_slock); |
|
} while ((pp = TAILQ_FIRST(&pa->pa_list)) != start && |
|
freed == 0); |
|
|
|
if (freed == 0) { |
|
/* |
|
* We set PA_WANT here, the caller will most likely |
|
* sleep waiting for pages (if not, this won't hurt |
|
* that much), and there is no way to set this in |
|
* the caller without violating locking order. |
|
*/ |
|
pa->pa_flags |= PA_WANT; |
|
} |
|
simple_unlock(&pa->pa_slock); |
|
splx(s); |
|
} while (freed); |
|
return (NULL); |
|
} |
|
|
|
void |
|
pool_allocator_free(struct pool *pp, void *v) |
|
{ |
|
struct pool_allocator *pa = pp->pr_alloc; |
|
int s; |
|
|
|
(*pa->pa_free)(pp, v); |
|
|
|
s = splvm(); |
|
simple_lock(&pa->pa_slock); |
|
if ((pa->pa_flags & PA_WANT) == 0) { |
|
simple_unlock(&pa->pa_slock); |
|
splx(s); |
|
return; |
|
} |
|
|
|
TAILQ_FOREACH(pp, &pa->pa_list, pr_alloc_list) { |
|
simple_lock(&pp->pr_slock); |
|
if ((pp->pr_flags & PR_WANTED) != 0) { |
|
pp->pr_flags &= ~PR_WANTED; |
|
wakeup(pp); |
|
} |
|
simple_unlock(&pp->pr_slock); |
|
} |
|
pa->pa_flags &= ~PA_WANT; |
|
simple_unlock(&pa->pa_slock); |
|
splx(s); |
|
} |
|
|
|
void * |
|
pool_page_alloc(struct pool *pp, int flags) |
|
{ |
|
boolean_t waitok = (flags & PR_WAITOK) ? TRUE : FALSE; |
|
|
|
return ((void *) uvm_km_alloc_poolpage(waitok)); |
|
} |
|
|
|
void |
|
pool_page_free(struct pool *pp, void *v) |
|
{ |
|
|
|
uvm_km_free_poolpage((vaddr_t) v); |
|
} |
|
|
|
#ifdef POOL_SUBPAGE |
|
/* Sub-page allocator, for machines with large hardware pages. */ |
|
void * |
|
pool_subpage_alloc(struct pool *pp, int flags) |
|
{ |
|
|
|
return (pool_get(&psppool, flags)); |
|
} |
|
|
|
void |
|
pool_subpage_free(struct pool *pp, void *v) |
|
{ |
|
|
|
pool_put(&psppool, v); |
|
} |
|
|
|
/* We don't provide a real nointr allocator. Maybe later. */ |
|
void * |
|
pool_page_alloc_nointr(struct pool *pp, int flags) |
|
{ |
|
|
|
return (pool_subpage_alloc(pp, flags)); |
|
} |
|
|
|
void |
|
pool_page_free_nointr(struct pool *pp, void *v) |
|
{ |
|
|
|
pool_subpage_free(pp, v); |
|
} |
|
#else |
|
void * |
|
pool_page_alloc_nointr(struct pool *pp, int flags) |
|
{ |
|
boolean_t waitok = (flags & PR_WAITOK) ? TRUE : FALSE; |
|
|
|
return ((void *) uvm_km_alloc_poolpage1(kernel_map, |
|
uvm.kernel_object, waitok)); |
|
} |
|
|
|
void |
|
pool_page_free_nointr(struct pool *pp, void *v) |
|
{ |
|
|
|
uvm_km_free_poolpage1(kernel_map, (vaddr_t) v); |
|
} |
|
#endif /* POOL_SUBPAGE */ |