version 1.23, 1999/04/06 23:32:44 |
version 1.60, 2001/07/01 06:12:20 |
|
|
/* $NetBSD$ */ |
/* $NetBSD$ */ |
|
|
/*- |
/*- |
* Copyright (c) 1997, 1999 The NetBSD Foundation, Inc. |
* Copyright (c) 1997, 1999, 2000 The NetBSD Foundation, Inc. |
* All rights reserved. |
* All rights reserved. |
* |
* |
* This code is derived from software contributed to The NetBSD Foundation |
* This code is derived from software contributed to The NetBSD Foundation |
|
|
* POSSIBILITY OF SUCH DAMAGE. |
* POSSIBILITY OF SUCH DAMAGE. |
*/ |
*/ |
|
|
|
#include "opt_pool.h" |
|
#include "opt_poollog.h" |
|
#include "opt_lockdebug.h" |
|
|
#include <sys/param.h> |
#include <sys/param.h> |
#include <sys/systm.h> |
#include <sys/systm.h> |
#include <sys/proc.h> |
#include <sys/proc.h> |
|
|
#include <sys/pool.h> |
#include <sys/pool.h> |
#include <sys/syslog.h> |
#include <sys/syslog.h> |
|
|
#include <vm/vm.h> |
|
#include <vm/vm_kern.h> |
|
|
|
#include <uvm/uvm.h> |
#include <uvm/uvm.h> |
|
|
/* |
/* |
Line 93 struct pool_item_header { |
|
Line 94 struct pool_item_header { |
|
struct pool_item { |
struct pool_item { |
#ifdef DIAGNOSTIC |
#ifdef DIAGNOSTIC |
int pi_magic; |
int pi_magic; |
#define PI_MAGIC 0xdeadbeef |
|
#endif |
#endif |
|
#define PI_MAGIC 0xdeadbeef |
/* Other entries use only this list entry */ |
/* Other entries use only this list entry */ |
TAILQ_ENTRY(pool_item) pi_list; |
TAILQ_ENTRY(pool_item) pi_list; |
}; |
}; |
|
|
|
#define PR_HASH_INDEX(pp,addr) \ |
#define PR_HASH_INDEX(pp,addr) \ |
|
(((u_long)(addr) >> (pp)->pr_pageshift) & (PR_HASHTABSIZE - 1)) |
(((u_long)(addr) >> (pp)->pr_pageshift) & (PR_HASHTABSIZE - 1)) |
|
|
|
#define POOL_NEEDS_CATCHUP(pp) \ |
|
((pp)->pr_nitems < (pp)->pr_minitems) |
|
|
|
/* |
|
* Pool cache management. |
|
* |
|
* Pool caches provide a way for constructed objects to be cached by the |
|
* pool subsystem. This can lead to performance improvements by avoiding |
|
* needless object construction/destruction; it is deferred until absolutely |
|
* necessary. |
|
* |
|
* Caches are grouped into cache groups. Each cache group references |
|
* up to 16 constructed objects. When a cache allocates an object |
|
* from the pool, it calls the object's constructor and places it into |
|
* a cache group. When a cache group frees an object back to the pool, |
|
* it first calls the object's destructor. This allows the object to |
|
* persist in constructed form while freed to the cache. |
|
* |
|
* Multiple caches may exist for each pool. This allows a single |
|
* object type to have multiple constructed forms. The pool references |
|
* each cache, so that when a pool is drained by the pagedaemon, it can |
|
* drain each individual cache as well. Each time a cache is drained, |
|
* the most idle cache group is freed to the pool in its entirety. |
|
* |
|
* Pool caches are layed on top of pools. By layering them, we can avoid |
|
* the complexity of cache management for pools which would not benefit |
|
* from it. |
|
*/ |
|
|
static struct pool_item_header |
/* The cache group pool. */ |
*pr_find_pagehead __P((struct pool *, caddr_t)); |
static struct pool pcgpool; |
static void pr_rmpage __P((struct pool *, struct pool_item_header *)); |
|
static int pool_catchup __P((struct pool *)); |
|
static void pool_prime_page __P((struct pool *, caddr_t)); |
|
static void *pool_page_alloc __P((unsigned long, int, int)); |
|
static void pool_page_free __P((void *, unsigned long, int)); |
|
|
|
#if defined(POOL_DIAGNOSTIC) || defined(DEBUG) |
/* The pool cache group. */ |
static void pool_print1 __P((struct pool *, const char *)); |
#define PCG_NOBJECTS 16 |
#endif |
struct pool_cache_group { |
|
TAILQ_ENTRY(pool_cache_group) |
|
pcg_list; /* link in the pool cache's group list */ |
|
u_int pcg_avail; /* # available objects */ |
|
/* pointers to the objects */ |
|
void *pcg_objects[PCG_NOBJECTS]; |
|
}; |
|
|
|
static void pool_cache_reclaim(struct pool_cache *); |
|
|
|
static int pool_catchup(struct pool *); |
|
static void pool_prime_page(struct pool *, caddr_t, |
|
struct pool_item_header *); |
|
static void *pool_page_alloc(unsigned long, int, int); |
|
static void pool_page_free(void *, unsigned long, int); |
|
|
|
static void pool_print1(struct pool *, const char *, |
|
void (*)(const char *, ...)); |
|
|
#ifdef POOL_DIAGNOSTIC |
|
/* |
/* |
* Pool log entry. An array of these is allocated in pool_create(). |
* Pool log entry. An array of these is allocated in pool_init(). |
*/ |
*/ |
struct pool_log { |
struct pool_log { |
const char *pl_file; |
const char *pl_file; |
long pl_line; |
long pl_line; |
int pl_action; |
int pl_action; |
#define PRLOG_GET 1 |
#define PRLOG_GET 1 |
#define PRLOG_PUT 2 |
#define PRLOG_PUT 2 |
void *pl_addr; |
void *pl_addr; |
}; |
}; |
|
|
Line 137 struct pool_log { |
|
Line 175 struct pool_log { |
|
|
|
int pool_logsize = POOL_LOGSIZE; |
int pool_logsize = POOL_LOGSIZE; |
|
|
static void pr_log __P((struct pool *, void *, int, const char *, long)); |
#ifdef POOL_DIAGNOSTIC |
static void pr_printlog __P((struct pool *)); |
static __inline void |
|
pr_log(struct pool *pp, void *v, int action, const char *file, long line) |
static __inline__ void |
|
pr_log(pp, v, action, file, line) |
|
struct pool *pp; |
|
void *v; |
|
int action; |
|
const char *file; |
|
long line; |
|
{ |
{ |
int n = pp->pr_curlogentry; |
int n = pp->pr_curlogentry; |
struct pool_log *pl; |
struct pool_log *pl; |
Line 169 pr_log(pp, v, action, file, line) |
|
Line 200 pr_log(pp, v, action, file, line) |
|
} |
} |
|
|
static void |
static void |
pr_printlog(pp) |
pr_printlog(struct pool *pp, struct pool_item *pi, |
struct pool *pp; |
void (*pr)(const char *, ...)) |
{ |
{ |
int i = pp->pr_logsize; |
int i = pp->pr_logsize; |
int n = pp->pr_curlogentry; |
int n = pp->pr_curlogentry; |
|
|
if ((pp->pr_roflags & PR_LOGGING) == 0) |
if ((pp->pr_roflags & PR_LOGGING) == 0) |
return; |
return; |
|
|
pool_print1(pp, "printlog"); |
|
|
|
/* |
/* |
* Print all entries in this pool's log. |
* Print all entries in this pool's log. |
*/ |
*/ |
while (i-- > 0) { |
while (i-- > 0) { |
struct pool_log *pl = &pp->pr_log[n]; |
struct pool_log *pl = &pp->pr_log[n]; |
if (pl->pl_action != 0) { |
if (pl->pl_action != 0) { |
printf("log entry %d:\n", i); |
if (pi == NULL || pi == pl->pl_addr) { |
printf("\taction = %s, addr = %p\n", |
(*pr)("\tlog entry %d:\n", i); |
pl->pl_action == PRLOG_GET ? "get" : "put", |
(*pr)("\t\taction = %s, addr = %p\n", |
pl->pl_addr); |
pl->pl_action == PRLOG_GET ? "get" : "put", |
printf("\tfile: %s at line %lu\n", |
pl->pl_addr); |
pl->pl_file, pl->pl_line); |
(*pr)("\t\tfile: %s at line %lu\n", |
|
pl->pl_file, pl->pl_line); |
|
} |
} |
} |
if (++n >= pp->pr_logsize) |
if (++n >= pp->pr_logsize) |
n = 0; |
n = 0; |
} |
} |
} |
} |
#else |
|
#define pr_log(pp, v, action, file, line) |
|
#define pr_printlog(pp) |
|
#endif |
|
|
|
|
static __inline void |
|
pr_enter(struct pool *pp, const char *file, long line) |
|
{ |
|
|
|
if (__predict_false(pp->pr_entered_file != NULL)) { |
|
printf("pool %s: reentrancy at file %s line %ld\n", |
|
pp->pr_wchan, file, line); |
|
printf(" previous entry at file %s line %ld\n", |
|
pp->pr_entered_file, pp->pr_entered_line); |
|
panic("pr_enter"); |
|
} |
|
|
|
pp->pr_entered_file = file; |
|
pp->pr_entered_line = line; |
|
} |
|
|
|
static __inline void |
|
pr_leave(struct pool *pp) |
|
{ |
|
|
|
if (__predict_false(pp->pr_entered_file == NULL)) { |
|
printf("pool %s not entered?\n", pp->pr_wchan); |
|
panic("pr_leave"); |
|
} |
|
|
|
pp->pr_entered_file = NULL; |
|
pp->pr_entered_line = 0; |
|
} |
|
|
|
static __inline void |
|
pr_enter_check(struct pool *pp, void (*pr)(const char *, ...)) |
|
{ |
|
|
|
if (pp->pr_entered_file != NULL) |
|
(*pr)("\n\tcurrently entered from file %s line %ld\n", |
|
pp->pr_entered_file, pp->pr_entered_line); |
|
} |
|
#else |
|
#define pr_log(pp, v, action, file, line) |
|
#define pr_printlog(pp, pi, pr) |
|
#define pr_enter(pp, file, line) |
|
#define pr_leave(pp) |
|
#define pr_enter_check(pp, pr) |
|
#endif /* POOL_DIAGNOSTIC */ |
|
|
/* |
/* |
* Return the pool page header based on page address. |
* Return the pool page header based on page address. |
*/ |
*/ |
static __inline__ struct pool_item_header * |
static __inline struct pool_item_header * |
pr_find_pagehead(pp, page) |
pr_find_pagehead(struct pool *pp, caddr_t page) |
struct pool *pp; |
|
caddr_t page; |
|
{ |
{ |
struct pool_item_header *ph; |
struct pool_item_header *ph; |
|
|
Line 228 pr_find_pagehead(pp, page) |
|
Line 297 pr_find_pagehead(pp, page) |
|
/* |
/* |
* Remove a page from the pool. |
* Remove a page from the pool. |
*/ |
*/ |
static __inline__ void |
static __inline void |
pr_rmpage(pp, ph) |
pr_rmpage(struct pool *pp, struct pool_item_header *ph) |
struct pool *pp; |
|
struct pool_item_header *ph; |
|
{ |
{ |
|
|
/* |
/* |
Line 258 pr_rmpage(pp, ph) |
|
Line 325 pr_rmpage(pp, ph) |
|
pp->pr_npagefree++; |
pp->pr_npagefree++; |
|
|
if ((pp->pr_roflags & PR_PHINPAGE) == 0) { |
if ((pp->pr_roflags & PR_PHINPAGE) == 0) { |
|
int s; |
LIST_REMOVE(ph, ph_hashlist); |
LIST_REMOVE(ph, ph_hashlist); |
|
s = splhigh(); |
pool_put(&phpool, ph); |
pool_put(&phpool, ph); |
|
splx(s); |
} |
} |
|
|
if (pp->pr_curpage == ph) { |
if (pp->pr_curpage == ph) { |
Line 278 pr_rmpage(pp, ph) |
|
Line 348 pr_rmpage(pp, ph) |
|
} |
} |
|
|
/* |
/* |
* Allocate and initialize a pool. |
|
*/ |
|
struct pool * |
|
pool_create(size, align, ioff, nitems, wchan, pagesz, alloc, release, mtype) |
|
size_t size; |
|
u_int align; |
|
u_int ioff; |
|
int nitems; |
|
const char *wchan; |
|
size_t pagesz; |
|
void *(*alloc) __P((unsigned long, int, int)); |
|
void (*release) __P((void *, unsigned long, int)); |
|
int mtype; |
|
{ |
|
struct pool *pp; |
|
int flags; |
|
|
|
pp = (struct pool *)malloc(sizeof(*pp), M_POOL, M_NOWAIT); |
|
if (pp == NULL) |
|
return (NULL); |
|
|
|
flags = PR_FREEHEADER; |
|
#ifdef POOL_DIAGNOSTIC |
|
if (pool_logsize != 0) |
|
flags |= PR_LOGGING; |
|
#endif |
|
|
|
pool_init(pp, size, align, ioff, flags, wchan, pagesz, |
|
alloc, release, mtype); |
|
|
|
if (nitems != 0) { |
|
if (pool_prime(pp, nitems, NULL) != 0) { |
|
pool_destroy(pp); |
|
return (NULL); |
|
} |
|
} |
|
|
|
return (pp); |
|
} |
|
|
|
/* |
|
* Initialize the given pool resource structure. |
* Initialize the given pool resource structure. |
* |
* |
* We export this routine to allow other kernel parts to declare |
* We export this routine to allow other kernel parts to declare |
* static pools that must be initialized before malloc() is available. |
* static pools that must be initialized before malloc() is available. |
*/ |
*/ |
void |
void |
pool_init(pp, size, align, ioff, flags, wchan, pagesz, alloc, release, mtype) |
pool_init(struct pool *pp, size_t size, u_int align, u_int ioff, int flags, |
struct pool *pp; |
const char *wchan, size_t pagesz, |
size_t size; |
void *(*alloc)(unsigned long, int, int), |
u_int align; |
void (*release)(void *, unsigned long, int), |
u_int ioff; |
int mtype) |
int flags; |
|
const char *wchan; |
|
size_t pagesz; |
|
void *(*alloc) __P((unsigned long, int, int)); |
|
void (*release) __P((void *, unsigned long, int)); |
|
int mtype; |
|
{ |
{ |
int off, slack, i; |
int off, slack, i; |
|
|
|
#ifdef POOL_DIAGNOSTIC |
|
/* |
|
* Always log if POOL_DIAGNOSTIC is defined. |
|
*/ |
|
if (pool_logsize != 0) |
|
flags |= PR_LOGGING; |
|
#endif |
|
|
/* |
/* |
* Check arguments and construct default values. |
* Check arguments and construct default values. |
*/ |
*/ |
if (!powerof2(pagesz) || pagesz > PAGE_SIZE) |
if (!powerof2(pagesz)) |
panic("pool_init: page size invalid (%lx)\n", (u_long)pagesz); |
panic("pool_init: page size invalid (%lx)\n", (u_long)pagesz); |
|
|
if (alloc == NULL && release == NULL) { |
if (alloc == NULL && release == NULL) { |
Line 363 pool_init(pp, size, align, ioff, flags, |
|
Line 394 pool_init(pp, size, align, ioff, flags, |
|
if (size < sizeof(struct pool_item)) |
if (size < sizeof(struct pool_item)) |
size = sizeof(struct pool_item); |
size = sizeof(struct pool_item); |
|
|
|
size = ALIGN(size); |
|
if (size > pagesz) |
|
panic("pool_init: pool item size (%lu) too large", |
|
(u_long)size); |
|
|
/* |
/* |
* Initialize the pool structure. |
* Initialize the pool structure. |
*/ |
*/ |
TAILQ_INIT(&pp->pr_pagelist); |
TAILQ_INIT(&pp->pr_pagelist); |
|
TAILQ_INIT(&pp->pr_cachelist); |
pp->pr_curpage = NULL; |
pp->pr_curpage = NULL; |
pp->pr_npages = 0; |
pp->pr_npages = 0; |
pp->pr_minitems = 0; |
pp->pr_minitems = 0; |
Line 374 pool_init(pp, size, align, ioff, flags, |
|
Line 411 pool_init(pp, size, align, ioff, flags, |
|
pp->pr_maxpages = UINT_MAX; |
pp->pr_maxpages = UINT_MAX; |
pp->pr_roflags = flags; |
pp->pr_roflags = flags; |
pp->pr_flags = 0; |
pp->pr_flags = 0; |
pp->pr_size = ALIGN(size); |
pp->pr_size = size; |
pp->pr_align = align; |
pp->pr_align = align; |
pp->pr_wchan = wchan; |
pp->pr_wchan = wchan; |
pp->pr_mtype = mtype; |
pp->pr_mtype = mtype; |
Line 387 pool_init(pp, size, align, ioff, flags, |
|
Line 424 pool_init(pp, size, align, ioff, flags, |
|
pp->pr_nout = 0; |
pp->pr_nout = 0; |
pp->pr_hardlimit = UINT_MAX; |
pp->pr_hardlimit = UINT_MAX; |
pp->pr_hardlimit_warning = NULL; |
pp->pr_hardlimit_warning = NULL; |
pp->pr_hardlimit_ratecap = 0; |
pp->pr_hardlimit_ratecap.tv_sec = 0; |
memset(&pp->pr_hardlimit_warning_last, 0, |
pp->pr_hardlimit_ratecap.tv_usec = 0; |
sizeof(pp->pr_hardlimit_warning_last)); |
pp->pr_hardlimit_warning_last.tv_sec = 0; |
|
pp->pr_hardlimit_warning_last.tv_usec = 0; |
|
|
/* |
/* |
* Decide whether to put the page header off page to avoid |
* Decide whether to put the page header off page to avoid |
Line 421 pool_init(pp, size, align, ioff, flags, |
|
Line 459 pool_init(pp, size, align, ioff, flags, |
|
*/ |
*/ |
pp->pr_itemoffset = ioff = ioff % align; |
pp->pr_itemoffset = ioff = ioff % align; |
pp->pr_itemsperpage = (off - ((align - ioff) % align)) / pp->pr_size; |
pp->pr_itemsperpage = (off - ((align - ioff) % align)) / pp->pr_size; |
|
KASSERT(pp->pr_itemsperpage != 0); |
|
|
/* |
/* |
* Use the slack between the chunks and the page header |
* Use the slack between the chunks and the page header |
Line 439 pool_init(pp, size, align, ioff, flags, |
|
Line 478 pool_init(pp, size, align, ioff, flags, |
|
pp->pr_nidle = 0; |
pp->pr_nidle = 0; |
|
|
#ifdef POOL_DIAGNOSTIC |
#ifdef POOL_DIAGNOSTIC |
if ((flags & PR_LOGGING) != 0) { |
if (flags & PR_LOGGING) { |
pp->pr_log = malloc(pool_logsize * sizeof(struct pool_log), |
if (kmem_map == NULL || |
M_TEMP, M_NOWAIT); |
(pp->pr_log = malloc(pool_logsize * sizeof(struct pool_log), |
if (pp->pr_log == NULL) |
M_TEMP, M_NOWAIT)) == NULL) |
pp->pr_roflags &= ~PR_LOGGING; |
pp->pr_roflags &= ~PR_LOGGING; |
pp->pr_curlogentry = 0; |
pp->pr_curlogentry = 0; |
pp->pr_logsize = pool_logsize; |
pp->pr_logsize = pool_logsize; |
} |
} |
#endif |
#endif |
|
|
|
pp->pr_entered_file = NULL; |
|
pp->pr_entered_line = 0; |
|
|
simple_lock_init(&pp->pr_slock); |
simple_lock_init(&pp->pr_slock); |
|
|
/* |
/* |
* Initialize private page header pool if we haven't done so yet. |
* Initialize private page header pool and cache magazine pool if we |
|
* haven't done so yet. |
* XXX LOCKING. |
* XXX LOCKING. |
*/ |
*/ |
if (phpool.pr_size == 0) { |
if (phpool.pr_size == 0) { |
pool_init(&phpool, sizeof(struct pool_item_header), 0, 0, |
pool_init(&phpool, sizeof(struct pool_item_header), 0, 0, |
0, "phpool", 0, 0, 0, 0); |
0, "phpool", 0, 0, 0, 0); |
|
pool_init(&pcgpool, sizeof(struct pool_cache_group), 0, 0, |
|
0, "pcgpool", 0, 0, 0, 0); |
} |
} |
|
|
/* Insert into the list of all pools. */ |
/* Insert into the list of all pools. */ |
Line 470 pool_init(pp, size, align, ioff, flags, |
|
Line 515 pool_init(pp, size, align, ioff, flags, |
|
* De-commision a pool resource. |
* De-commision a pool resource. |
*/ |
*/ |
void |
void |
pool_destroy(pp) |
pool_destroy(struct pool *pp) |
struct pool *pp; |
|
{ |
{ |
struct pool_item_header *ph; |
struct pool_item_header *ph; |
|
struct pool_cache *pc; |
|
|
|
/* Destroy all caches for this pool. */ |
|
while ((pc = TAILQ_FIRST(&pp->pr_cachelist)) != NULL) |
|
pool_cache_destroy(pc); |
|
|
#ifdef DIAGNOSTIC |
#ifdef DIAGNOSTIC |
if (pp->pr_nout != 0) { |
if (pp->pr_nout != 0) { |
pr_printlog(pp); |
pr_printlog(pp, NULL, printf); |
panic("pool_destroy: pool busy: still out: %u\n", |
panic("pool_destroy: pool busy: still out: %u\n", |
pp->pr_nout); |
pp->pr_nout); |
} |
} |
Line 504 pool_destroy(pp) |
|
Line 553 pool_destroy(pp) |
|
free(pp, M_POOL); |
free(pp, M_POOL); |
} |
} |
|
|
|
static __inline struct pool_item_header * |
|
pool_alloc_item_header(struct pool *pp, caddr_t storage, int flags) |
|
{ |
|
struct pool_item_header *ph; |
|
int s; |
|
|
|
LOCK_ASSERT(simple_lock_held(&pp->pr_slock) == 0); |
|
|
|
if ((pp->pr_roflags & PR_PHINPAGE) != 0) |
|
ph = (struct pool_item_header *) (storage + pp->pr_phoffset); |
|
else { |
|
s = splhigh(); |
|
ph = pool_get(&phpool, flags); |
|
splx(s); |
|
} |
|
|
|
return (ph); |
|
} |
|
|
/* |
/* |
* Grab an item from the pool; must be called at appropriate spl level |
* Grab an item from the pool; must be called at appropriate spl level |
*/ |
*/ |
#ifdef POOL_DIAGNOSTIC |
|
void * |
void * |
_pool_get(pp, flags, file, line) |
#ifdef POOL_DIAGNOSTIC |
struct pool *pp; |
_pool_get(struct pool *pp, int flags, const char *file, long line) |
int flags; |
|
const char *file; |
|
long line; |
|
#else |
#else |
void * |
pool_get(struct pool *pp, int flags) |
pool_get(pp, flags) |
|
struct pool *pp; |
|
int flags; |
|
#endif |
#endif |
{ |
{ |
void *v; |
|
struct pool_item *pi; |
struct pool_item *pi; |
struct pool_item_header *ph; |
struct pool_item_header *ph; |
|
void *v; |
|
|
#ifdef DIAGNOSTIC |
#ifdef DIAGNOSTIC |
if ((pp->pr_roflags & PR_STATIC) && (flags & PR_MALLOCOK)) { |
if (__predict_false((pp->pr_roflags & PR_STATIC) && |
pr_printlog(pp); |
(flags & PR_MALLOCOK))) { |
|
pr_printlog(pp, NULL, printf); |
panic("pool_get: static"); |
panic("pool_get: static"); |
} |
} |
#endif |
|
|
|
if (curproc == NULL && (flags & PR_WAITOK) != 0) |
if (__predict_false(curproc == NULL && doing_shutdown == 0 && |
|
(flags & PR_WAITOK) != 0)) |
panic("pool_get: must have NOWAIT"); |
panic("pool_get: must have NOWAIT"); |
|
|
|
#ifdef LOCKDEBUG |
|
if (flags & PR_WAITOK) |
|
simple_lock_only_held(NULL, "pool_get(PR_WAITOK)"); |
|
#endif |
|
#endif /* DIAGNOSTIC */ |
|
|
simple_lock(&pp->pr_slock); |
simple_lock(&pp->pr_slock); |
|
pr_enter(pp, file, line); |
|
|
startover: |
startover: |
/* |
/* |
Line 545 pool_get(pp, flags) |
|
Line 613 pool_get(pp, flags) |
|
* the pool. |
* the pool. |
*/ |
*/ |
#ifdef DIAGNOSTIC |
#ifdef DIAGNOSTIC |
if (pp->pr_nout > pp->pr_hardlimit) { |
if (__predict_false(pp->pr_nout > pp->pr_hardlimit)) { |
|
pr_leave(pp); |
simple_unlock(&pp->pr_slock); |
simple_unlock(&pp->pr_slock); |
panic("pool_get: %s: crossed hard limit", pp->pr_wchan); |
panic("pool_get: %s: crossed hard limit", pp->pr_wchan); |
} |
} |
#endif |
#endif |
if (pp->pr_nout == pp->pr_hardlimit) { |
if (__predict_false(pp->pr_nout == pp->pr_hardlimit)) { |
if (flags & PR_WAITOK) { |
if ((flags & PR_WAITOK) && !(flags & PR_LIMITFAIL)) { |
/* |
/* |
* XXX: A warning isn't logged in this case. Should |
* XXX: A warning isn't logged in this case. Should |
* it be? |
* it be? |
*/ |
*/ |
pp->pr_flags |= PR_WANTED; |
pp->pr_flags |= PR_WANTED; |
simple_unlock(&pp->pr_slock); |
pr_leave(pp); |
tsleep((caddr_t)pp, PSWP, pp->pr_wchan, 0); |
ltsleep(pp, PSWP, pp->pr_wchan, 0, &pp->pr_slock); |
simple_lock(&pp->pr_slock); |
pr_enter(pp, file, line); |
goto startover; |
goto startover; |
} |
} |
if (pp->pr_hardlimit_warning != NULL) { |
|
/* |
/* |
* Log a message that the hard limit has been hit. |
* Log a message that the hard limit has been hit. |
*/ |
*/ |
struct timeval curtime, logdiff; |
if (pp->pr_hardlimit_warning != NULL && |
int s = splclock(); |
ratecheck(&pp->pr_hardlimit_warning_last, |
curtime = mono_time; |
&pp->pr_hardlimit_ratecap)) |
splx(s); |
log(LOG_ERR, "%s\n", pp->pr_hardlimit_warning); |
timersub(&curtime, &pp->pr_hardlimit_warning_last, |
|
&logdiff); |
|
if (logdiff.tv_sec >= pp->pr_hardlimit_ratecap) { |
|
pp->pr_hardlimit_warning_last = curtime; |
|
log(LOG_ERR, "%s\n", pp->pr_hardlimit_warning); |
|
} |
|
} |
|
|
|
if (flags & PR_URGENT) |
if (flags & PR_URGENT) |
panic("pool_get: urgent"); |
panic("pool_get: urgent"); |
|
|
pp->pr_nfail++; |
pp->pr_nfail++; |
|
|
|
pr_leave(pp); |
simple_unlock(&pp->pr_slock); |
simple_unlock(&pp->pr_slock); |
return (NULL); |
return (NULL); |
} |
} |
Line 594 pool_get(pp, flags) |
|
Line 657 pool_get(pp, flags) |
|
* has no items in its bucket. |
* has no items in its bucket. |
*/ |
*/ |
if ((ph = pp->pr_curpage) == NULL) { |
if ((ph = pp->pr_curpage) == NULL) { |
void *v; |
|
|
|
#ifdef DIAGNOSTIC |
#ifdef DIAGNOSTIC |
if (pp->pr_nitems != 0) { |
if (pp->pr_nitems != 0) { |
simple_unlock(&pp->pr_slock); |
simple_unlock(&pp->pr_slock); |
Line 610 pool_get(pp, flags) |
|
Line 671 pool_get(pp, flags) |
|
* Release the pool lock, as the back-end page allocator |
* Release the pool lock, as the back-end page allocator |
* may block. |
* may block. |
*/ |
*/ |
|
pr_leave(pp); |
simple_unlock(&pp->pr_slock); |
simple_unlock(&pp->pr_slock); |
v = (*pp->pr_alloc)(pp->pr_pagesz, flags, pp->pr_mtype); |
v = (*pp->pr_alloc)(pp->pr_pagesz, flags, pp->pr_mtype); |
|
if (__predict_true(v != NULL)) |
|
ph = pool_alloc_item_header(pp, v, flags); |
simple_lock(&pp->pr_slock); |
simple_lock(&pp->pr_slock); |
|
pr_enter(pp, file, line); |
|
|
|
if (__predict_false(v == NULL || ph == NULL)) { |
|
if (v != NULL) |
|
(*pp->pr_free)(v, pp->pr_pagesz, pp->pr_mtype); |
|
|
if (v == NULL) { |
|
/* |
/* |
* We were unable to allocate a page, but |
* We were unable to allocate a page or item |
* we released the lock during allocation, |
* header, but we released the lock during |
* so perhaps items were freed back to the |
* allocation, so perhaps items were freed |
* pool. Check for this case. |
* back to the pool. Check for this case. |
*/ |
*/ |
if (pp->pr_curpage != NULL) |
if (pp->pr_curpage != NULL) |
goto startover; |
goto startover; |
Line 629 pool_get(pp, flags) |
|
Line 697 pool_get(pp, flags) |
|
|
|
if ((flags & PR_WAITOK) == 0) { |
if ((flags & PR_WAITOK) == 0) { |
pp->pr_nfail++; |
pp->pr_nfail++; |
|
pr_leave(pp); |
simple_unlock(&pp->pr_slock); |
simple_unlock(&pp->pr_slock); |
return (NULL); |
return (NULL); |
} |
} |
Line 645 pool_get(pp, flags) |
|
Line 714 pool_get(pp, flags) |
|
* try again? |
* try again? |
*/ |
*/ |
pp->pr_flags |= PR_WANTED; |
pp->pr_flags |= PR_WANTED; |
simple_unlock(&pp->pr_slock); |
pr_leave(pp); |
tsleep((caddr_t)pp, PSWP, pp->pr_wchan, 0); |
ltsleep(pp, PSWP, pp->pr_wchan, 0, &pp->pr_slock); |
simple_lock(&pp->pr_slock); |
pr_enter(pp, file, line); |
goto startover; |
goto startover; |
} |
} |
|
|
/* We have more memory; add it to the pool */ |
/* We have more memory; add it to the pool */ |
|
pool_prime_page(pp, v, ph); |
pp->pr_npagealloc++; |
pp->pr_npagealloc++; |
pool_prime_page(pp, v); |
|
|
|
/* Start the allocation process over. */ |
/* Start the allocation process over. */ |
goto startover; |
goto startover; |
} |
} |
|
|
if ((v = pi = TAILQ_FIRST(&ph->ph_itemlist)) == NULL) { |
if (__predict_false((v = pi = TAILQ_FIRST(&ph->ph_itemlist)) == NULL)) { |
|
pr_leave(pp); |
simple_unlock(&pp->pr_slock); |
simple_unlock(&pp->pr_slock); |
panic("pool_get: %s: page empty", pp->pr_wchan); |
panic("pool_get: %s: page empty", pp->pr_wchan); |
} |
} |
#ifdef DIAGNOSTIC |
#ifdef DIAGNOSTIC |
if (pp->pr_nitems == 0) { |
if (__predict_false(pp->pr_nitems == 0)) { |
|
pr_leave(pp); |
simple_unlock(&pp->pr_slock); |
simple_unlock(&pp->pr_slock); |
printf("pool_get: %s: items on itemlist, nitems %u\n", |
printf("pool_get: %s: items on itemlist, nitems %u\n", |
pp->pr_wchan, pp->pr_nitems); |
pp->pr_wchan, pp->pr_nitems); |
panic("pool_get: nitems inconsistent\n"); |
panic("pool_get: nitems inconsistent\n"); |
} |
} |
#endif |
|
pr_log(pp, v, PRLOG_GET, file, line); |
pr_log(pp, v, PRLOG_GET, file, line); |
|
|
#ifdef DIAGNOSTIC |
if (__predict_false(pi->pi_magic != PI_MAGIC)) { |
if (pi->pi_magic != PI_MAGIC) { |
pr_printlog(pp, pi, printf); |
pr_printlog(pp); |
|
panic("pool_get(%s): free list modified: magic=%x; page %p;" |
panic("pool_get(%s): free list modified: magic=%x; page %p;" |
" item addr %p\n", |
" item addr %p\n", |
pp->pr_wchan, pi->pi_magic, ph->ph_page, pi); |
pp->pr_wchan, pi->pi_magic, ph->ph_page, pi); |
Line 690 pool_get(pp, flags) |
|
Line 760 pool_get(pp, flags) |
|
pp->pr_nout++; |
pp->pr_nout++; |
if (ph->ph_nmissing == 0) { |
if (ph->ph_nmissing == 0) { |
#ifdef DIAGNOSTIC |
#ifdef DIAGNOSTIC |
if (pp->pr_nidle == 0) |
if (__predict_false(pp->pr_nidle == 0)) |
panic("pool_get: nidle inconsistent"); |
panic("pool_get: nidle inconsistent"); |
#endif |
#endif |
pp->pr_nidle--; |
pp->pr_nidle--; |
Line 698 pool_get(pp, flags) |
|
Line 768 pool_get(pp, flags) |
|
ph->ph_nmissing++; |
ph->ph_nmissing++; |
if (TAILQ_FIRST(&ph->ph_itemlist) == NULL) { |
if (TAILQ_FIRST(&ph->ph_itemlist) == NULL) { |
#ifdef DIAGNOSTIC |
#ifdef DIAGNOSTIC |
if (ph->ph_nmissing != pp->pr_itemsperpage) { |
if (__predict_false(ph->ph_nmissing != pp->pr_itemsperpage)) { |
|
pr_leave(pp); |
simple_unlock(&pp->pr_slock); |
simple_unlock(&pp->pr_slock); |
panic("pool_get: %s: nmissing inconsistent", |
panic("pool_get: %s: nmissing inconsistent", |
pp->pr_wchan); |
pp->pr_wchan); |
Line 731 pool_get(pp, flags) |
|
Line 802 pool_get(pp, flags) |
|
* If we have a low water mark and we are now below that low |
* If we have a low water mark and we are now below that low |
* water mark, add more items to the pool. |
* water mark, add more items to the pool. |
*/ |
*/ |
if (pp->pr_nitems < pp->pr_minitems && pool_catchup(pp) != 0) { |
if (POOL_NEEDS_CATCHUP(pp) && pool_catchup(pp) != 0) { |
/* |
/* |
* XXX: Should we log a warning? Should we set up a timeout |
* XXX: Should we log a warning? Should we set up a timeout |
* to try again in a second or so? The latter could break |
* to try again in a second or so? The latter could break |
Line 739 pool_get(pp, flags) |
|
Line 810 pool_get(pp, flags) |
|
*/ |
*/ |
} |
} |
|
|
|
pr_leave(pp); |
simple_unlock(&pp->pr_slock); |
simple_unlock(&pp->pr_slock); |
return (v); |
return (v); |
} |
} |
|
|
/* |
/* |
* Return resource to the pool; must be called at appropriate spl level |
* Internal version of pool_put(). Pool is already locked/entered. |
*/ |
*/ |
#ifdef POOL_DIAGNOSTIC |
static void |
void |
pool_do_put(struct pool *pp, void *v) |
_pool_put(pp, v, file, line) |
|
struct pool *pp; |
|
void *v; |
|
const char *file; |
|
long line; |
|
#else |
|
void |
|
pool_put(pp, v) |
|
struct pool *pp; |
|
void *v; |
|
#endif |
|
{ |
{ |
struct pool_item *pi = v; |
struct pool_item *pi = v; |
struct pool_item_header *ph; |
struct pool_item_header *ph; |
|
|
|
|
page = (caddr_t)((u_long)v & pp->pr_pagemask); |
page = (caddr_t)((u_long)v & pp->pr_pagemask); |
|
|
simple_lock(&pp->pr_slock); |
#ifdef DIAGNOSTIC |
|
if (__predict_false(pp->pr_nout == 0)) { |
pr_log(pp, v, PRLOG_PUT, file, line); |
printf("pool %s: putting with none out\n", |
|
pp->pr_wchan); |
|
panic("pool_put"); |
|
} |
|
#endif |
|
|
if ((ph = pr_find_pagehead(pp, page)) == NULL) { |
if (__predict_false((ph = pr_find_pagehead(pp, page)) == NULL)) { |
pr_printlog(pp); |
pr_printlog(pp, NULL, printf); |
panic("pool_put: %s: page header missing", pp->pr_wchan); |
panic("pool_put: %s: page header missing", pp->pr_wchan); |
} |
} |
|
|
|
#ifdef LOCKDEBUG |
|
/* |
|
* Check if we're freeing a locked simple lock. |
|
*/ |
|
simple_lock_freecheck((caddr_t)pi, ((caddr_t)pi) + pp->pr_size); |
|
#endif |
|
|
/* |
/* |
* Return to item list. |
* Return to item list. |
*/ |
*/ |
#ifdef DIAGNOSTIC |
#ifdef DIAGNOSTIC |
pi->pi_magic = PI_MAGIC; |
pi->pi_magic = PI_MAGIC; |
#endif |
#endif |
|
#ifdef DEBUG |
|
{ |
|
int i, *ip = v; |
|
|
|
for (i = 0; i < pp->pr_size / sizeof(int); i++) { |
|
*ip++ = PI_MAGIC; |
|
} |
|
} |
|
#endif |
|
|
TAILQ_INSERT_HEAD(&ph->ph_itemlist, pi, pi_list); |
TAILQ_INSERT_HEAD(&ph->ph_itemlist, pi, pi_list); |
ph->ph_nmissing--; |
ph->ph_nmissing--; |
pp->pr_nput++; |
pp->pr_nput++; |
|
|
pp->pr_flags &= ~PR_WANTED; |
pp->pr_flags &= ~PR_WANTED; |
if (ph->ph_nmissing == 0) |
if (ph->ph_nmissing == 0) |
pp->pr_nidle++; |
pp->pr_nidle++; |
simple_unlock(&pp->pr_slock); |
|
wakeup((caddr_t)pp); |
wakeup((caddr_t)pp); |
return; |
return; |
} |
} |
|
|
TAILQ_INSERT_HEAD(&pp->pr_pagelist, ph, ph_pagelist); |
TAILQ_INSERT_HEAD(&pp->pr_pagelist, ph, ph_pagelist); |
pp->pr_curpage = ph; |
pp->pr_curpage = ph; |
} |
} |
|
} |
|
|
|
/* |
|
* Return resource to the pool; must be called at appropriate spl level |
|
*/ |
|
#ifdef POOL_DIAGNOSTIC |
|
void |
|
_pool_put(struct pool *pp, void *v, const char *file, long line) |
|
{ |
|
|
|
simple_lock(&pp->pr_slock); |
|
pr_enter(pp, file, line); |
|
|
|
pr_log(pp, v, PRLOG_PUT, file, line); |
|
|
|
pool_do_put(pp, v); |
|
|
|
pr_leave(pp); |
simple_unlock(&pp->pr_slock); |
simple_unlock(&pp->pr_slock); |
|
} |
|
#undef pool_put |
|
#endif /* POOL_DIAGNOSTIC */ |
|
|
|
void |
|
pool_put(struct pool *pp, void *v) |
|
{ |
|
|
|
simple_lock(&pp->pr_slock); |
|
|
|
pool_do_put(pp, v); |
|
|
|
simple_unlock(&pp->pr_slock); |
} |
} |
|
|
|
#ifdef POOL_DIAGNOSTIC |
|
#define pool_put(h, v) _pool_put((h), (v), __FILE__, __LINE__) |
|
#endif |
|
|
/* |
/* |
* Add N items to the pool. |
* Add N items to the pool. |
*/ |
*/ |
int |
int |
pool_prime(pp, n, storage) |
pool_prime(struct pool *pp, int n) |
struct pool *pp; |
|
int n; |
|
caddr_t storage; |
|
{ |
{ |
|
struct pool_item_header *ph; |
caddr_t cp; |
caddr_t cp; |
int newnitems, newpages; |
int newpages, error = 0; |
|
|
#ifdef DIAGNOSTIC |
|
if (storage && !(pp->pr_roflags & PR_STATIC)) |
|
panic("pool_prime: static"); |
|
/* !storage && static caught below */ |
|
#endif |
|
|
|
simple_lock(&pp->pr_slock); |
simple_lock(&pp->pr_slock); |
|
|
newnitems = pp->pr_minitems + n; |
newpages = roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage; |
newpages = |
|
roundup(newnitems, pp->pr_itemsperpage) / pp->pr_itemsperpage |
|
- pp->pr_minpages; |
|
|
|
while (newpages-- > 0) { |
while (newpages-- > 0) { |
if (pp->pr_roflags & PR_STATIC) { |
simple_unlock(&pp->pr_slock); |
cp = storage; |
cp = (*pp->pr_alloc)(pp->pr_pagesz, PR_NOWAIT, pp->pr_mtype); |
storage += pp->pr_pagesz; |
if (__predict_true(cp != NULL)) |
} else { |
ph = pool_alloc_item_header(pp, cp, PR_NOWAIT); |
simple_unlock(&pp->pr_slock); |
simple_lock(&pp->pr_slock); |
cp = (*pp->pr_alloc)(pp->pr_pagesz, 0, pp->pr_mtype); |
|
simple_lock(&pp->pr_slock); |
|
} |
|
|
|
if (cp == NULL) { |
if (__predict_false(cp == NULL || ph == NULL)) { |
simple_unlock(&pp->pr_slock); |
error = ENOMEM; |
return (ENOMEM); |
if (cp != NULL) |
|
(*pp->pr_free)(cp, pp->pr_pagesz, pp->pr_mtype); |
|
break; |
} |
} |
|
|
pool_prime_page(pp, cp); |
pool_prime_page(pp, cp, ph); |
|
pp->pr_npagealloc++; |
pp->pr_minpages++; |
pp->pr_minpages++; |
} |
} |
|
|
pp->pr_minitems = newnitems; |
|
|
|
if (pp->pr_minpages >= pp->pr_maxpages) |
if (pp->pr_minpages >= pp->pr_maxpages) |
pp->pr_maxpages = pp->pr_minpages + 1; /* XXX */ |
pp->pr_maxpages = pp->pr_minpages + 1; /* XXX */ |
|
|
Line 924 pool_prime(pp, n, storage) |
|
Line 1025 pool_prime(pp, n, storage) |
|
* Note, we must be called with the pool descriptor LOCKED. |
* Note, we must be called with the pool descriptor LOCKED. |
*/ |
*/ |
static void |
static void |
pool_prime_page(pp, storage) |
pool_prime_page(struct pool *pp, caddr_t storage, struct pool_item_header *ph) |
struct pool *pp; |
|
caddr_t storage; |
|
{ |
{ |
struct pool_item *pi; |
struct pool_item *pi; |
struct pool_item_header *ph; |
|
caddr_t cp = storage; |
caddr_t cp = storage; |
unsigned int align = pp->pr_align; |
unsigned int align = pp->pr_align; |
unsigned int ioff = pp->pr_itemoffset; |
unsigned int ioff = pp->pr_itemoffset; |
int n; |
int n; |
|
|
if ((pp->pr_roflags & PR_PHINPAGE) != 0) { |
if (((u_long)cp & (pp->pr_pagesz - 1)) != 0) |
ph = (struct pool_item_header *)(cp + pp->pr_phoffset); |
panic("pool_prime_page: %s: unaligned page", pp->pr_wchan); |
} else { |
|
ph = pool_get(&phpool, PR_URGENT); |
if ((pp->pr_roflags & PR_PHINPAGE) == 0) |
LIST_INSERT_HEAD(&pp->pr_hashtab[PR_HASH_INDEX(pp, cp)], |
LIST_INSERT_HEAD(&pp->pr_hashtab[PR_HASH_INDEX(pp, cp)], |
ph, ph_hashlist); |
ph, ph_hashlist); |
} |
|
|
|
/* |
/* |
* Insert page header. |
* Insert page header. |
Line 995 pool_prime_page(pp, storage) |
|
Line 1092 pool_prime_page(pp, storage) |
|
} |
} |
|
|
/* |
/* |
* Like pool_prime(), except this is used by pool_get() when nitems |
* Used by pool_get() when nitems drops below the low water mark. This |
* drops below the low water mark. This is used to catch up nitmes |
* is used to catch up nitmes with the low water mark. |
* with the low water mark. |
|
* |
* |
* Note 1, we never wait for memory here, we let the caller decide what to do. |
* Note 1, we never wait for memory here, we let the caller decide what to do. |
* |
* |
Line 1007 pool_prime_page(pp, storage) |
|
Line 1103 pool_prime_page(pp, storage) |
|
* with it locked. |
* with it locked. |
*/ |
*/ |
static int |
static int |
pool_catchup(pp) |
pool_catchup(struct pool *pp) |
struct pool *pp; |
|
{ |
{ |
|
struct pool_item_header *ph; |
caddr_t cp; |
caddr_t cp; |
int error = 0; |
int error = 0; |
|
|
Line 1025 pool_catchup(pp) |
|
Line 1121 pool_catchup(pp) |
|
return (0); |
return (0); |
} |
} |
|
|
while (pp->pr_nitems < pp->pr_minitems) { |
while (POOL_NEEDS_CATCHUP(pp)) { |
/* |
/* |
* Call the page back-end allocator for more memory. |
* Call the page back-end allocator for more memory. |
* |
* |
Line 1033 pool_catchup(pp) |
|
Line 1129 pool_catchup(pp) |
|
* the pool descriptor? |
* the pool descriptor? |
*/ |
*/ |
simple_unlock(&pp->pr_slock); |
simple_unlock(&pp->pr_slock); |
cp = (*pp->pr_alloc)(pp->pr_pagesz, 0, pp->pr_mtype); |
cp = (*pp->pr_alloc)(pp->pr_pagesz, PR_NOWAIT, pp->pr_mtype); |
|
if (__predict_true(cp != NULL)) |
|
ph = pool_alloc_item_header(pp, cp, PR_NOWAIT); |
simple_lock(&pp->pr_slock); |
simple_lock(&pp->pr_slock); |
if (cp == NULL) { |
if (__predict_false(cp == NULL || ph == NULL)) { |
|
if (cp != NULL) |
|
(*pp->pr_free)(cp, pp->pr_pagesz, pp->pr_mtype); |
error = ENOMEM; |
error = ENOMEM; |
break; |
break; |
} |
} |
pool_prime_page(pp, cp); |
pool_prime_page(pp, cp, ph); |
|
pp->pr_npagealloc++; |
} |
} |
|
|
return (error); |
return (error); |
} |
} |
|
|
void |
void |
pool_setlowat(pp, n) |
pool_setlowat(struct pool *pp, int n) |
pool_handle_t pp; |
|
int n; |
|
{ |
{ |
int error; |
int error; |
|
|
Line 1060 pool_setlowat(pp, n) |
|
Line 1159 pool_setlowat(pp, n) |
|
: roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage; |
: roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage; |
|
|
/* Make sure we're caught up with the newly-set low water mark. */ |
/* Make sure we're caught up with the newly-set low water mark. */ |
if ((error = pool_catchup(pp)) != 0) { |
if (POOL_NEEDS_CATCHUP(pp) && (error = pool_catchup(pp) != 0)) { |
/* |
/* |
* XXX: Should we log a warning? Should we set up a timeout |
* XXX: Should we log a warning? Should we set up a timeout |
* to try again in a second or so? The latter could break |
* to try again in a second or so? The latter could break |
Line 1072 pool_setlowat(pp, n) |
|
Line 1171 pool_setlowat(pp, n) |
|
} |
} |
|
|
void |
void |
pool_sethiwat(pp, n) |
pool_sethiwat(struct pool *pp, int n) |
pool_handle_t pp; |
|
int n; |
|
{ |
{ |
|
|
simple_lock(&pp->pr_slock); |
simple_lock(&pp->pr_slock); |
Line 1087 pool_sethiwat(pp, n) |
|
Line 1184 pool_sethiwat(pp, n) |
|
} |
} |
|
|
void |
void |
pool_sethardlimit(pp, n, warnmess, ratecap) |
pool_sethardlimit(struct pool *pp, int n, const char *warnmess, int ratecap) |
pool_handle_t pp; |
|
int n; |
|
const char *warnmess; |
|
int ratecap; |
|
{ |
{ |
|
|
simple_lock(&pp->pr_slock); |
simple_lock(&pp->pr_slock); |
|
|
pp->pr_hardlimit = n; |
pp->pr_hardlimit = n; |
pp->pr_hardlimit_warning = warnmess; |
pp->pr_hardlimit_warning = warnmess; |
pp->pr_hardlimit_ratecap = ratecap; |
pp->pr_hardlimit_ratecap.tv_sec = ratecap; |
memset(&pp->pr_hardlimit_warning_last, 0, |
pp->pr_hardlimit_warning_last.tv_sec = 0; |
sizeof(pp->pr_hardlimit_warning_last)); |
pp->pr_hardlimit_warning_last.tv_usec = 0; |
|
|
/* |
/* |
* In-line version of pool_sethiwat(), because we don't want to |
* In-line version of pool_sethiwat(), because we don't want to |
Line 1117 pool_sethardlimit(pp, n, warnmess, ratec |
|
Line 1210 pool_sethardlimit(pp, n, warnmess, ratec |
|
* Default page allocator. |
* Default page allocator. |
*/ |
*/ |
static void * |
static void * |
pool_page_alloc(sz, flags, mtype) |
pool_page_alloc(unsigned long sz, int flags, int mtype) |
unsigned long sz; |
|
int flags; |
|
int mtype; |
|
{ |
{ |
boolean_t waitok = (flags & PR_WAITOK) ? TRUE : FALSE; |
boolean_t waitok = (flags & PR_WAITOK) ? TRUE : FALSE; |
|
|
Line 1128 pool_page_alloc(sz, flags, mtype) |
|
Line 1218 pool_page_alloc(sz, flags, mtype) |
|
} |
} |
|
|
static void |
static void |
pool_page_free(v, sz, mtype) |
pool_page_free(void *v, unsigned long sz, int mtype) |
void *v; |
|
unsigned long sz; |
|
int mtype; |
|
{ |
{ |
|
|
uvm_km_free_poolpage((vaddr_t)v); |
uvm_km_free_poolpage((vaddr_t)v); |
Line 1142 pool_page_free(v, sz, mtype) |
|
Line 1229 pool_page_free(v, sz, mtype) |
|
* never be accessed in interrupt context. |
* never be accessed in interrupt context. |
*/ |
*/ |
void * |
void * |
pool_page_alloc_nointr(sz, flags, mtype) |
pool_page_alloc_nointr(unsigned long sz, int flags, int mtype) |
unsigned long sz; |
|
int flags; |
|
int mtype; |
|
{ |
{ |
boolean_t waitok = (flags & PR_WAITOK) ? TRUE : FALSE; |
boolean_t waitok = (flags & PR_WAITOK) ? TRUE : FALSE; |
|
|
Line 1154 pool_page_alloc_nointr(sz, flags, mtype) |
|
Line 1238 pool_page_alloc_nointr(sz, flags, mtype) |
|
} |
} |
|
|
void |
void |
pool_page_free_nointr(v, sz, mtype) |
pool_page_free_nointr(void *v, unsigned long sz, int mtype) |
void *v; |
|
unsigned long sz; |
|
int mtype; |
|
{ |
{ |
|
|
uvm_km_free_poolpage1(kernel_map, (vaddr_t)v); |
uvm_km_free_poolpage1(kernel_map, (vaddr_t)v); |
Line 1168 pool_page_free_nointr(v, sz, mtype) |
|
Line 1249 pool_page_free_nointr(v, sz, mtype) |
|
* Release all complete pages that have not been used recently. |
* Release all complete pages that have not been used recently. |
*/ |
*/ |
void |
void |
pool_reclaim(pp) |
#ifdef POOL_DIAGNOSTIC |
pool_handle_t pp; |
_pool_reclaim(struct pool *pp, const char *file, long line) |
|
#else |
|
pool_reclaim(struct pool *pp) |
|
#endif |
{ |
{ |
struct pool_item_header *ph, *phnext; |
struct pool_item_header *ph, *phnext; |
|
struct pool_cache *pc; |
struct timeval curtime; |
struct timeval curtime; |
int s; |
int s; |
|
|
Line 1180 pool_reclaim(pp) |
|
Line 1265 pool_reclaim(pp) |
|
|
|
if (simple_lock_try(&pp->pr_slock) == 0) |
if (simple_lock_try(&pp->pr_slock) == 0) |
return; |
return; |
|
pr_enter(pp, file, line); |
|
|
|
/* |
|
* Reclaim items from the pool's caches. |
|
*/ |
|
for (pc = TAILQ_FIRST(&pp->pr_cachelist); pc != NULL; |
|
pc = TAILQ_NEXT(pc, pc_poollist)) |
|
pool_cache_reclaim(pc); |
|
|
s = splclock(); |
s = splclock(); |
curtime = mono_time; |
curtime = mono_time; |
Line 1210 pool_reclaim(pp) |
|
Line 1303 pool_reclaim(pp) |
|
} |
} |
} |
} |
|
|
|
pr_leave(pp); |
simple_unlock(&pp->pr_slock); |
simple_unlock(&pp->pr_slock); |
} |
} |
|
|
Line 1220 pool_reclaim(pp) |
|
Line 1314 pool_reclaim(pp) |
|
* Note, we must never be called from an interrupt context. |
* Note, we must never be called from an interrupt context. |
*/ |
*/ |
void |
void |
pool_drain(arg) |
pool_drain(void *arg) |
void *arg; |
|
{ |
{ |
struct pool *pp; |
struct pool *pp; |
int s; |
int s; |
|
|
s = splimp(); |
s = splvm(); |
simple_lock(&pool_head_slock); |
simple_lock(&pool_head_slock); |
|
|
if (drainpp == NULL && (drainpp = TAILQ_FIRST(&pool_head)) == NULL) |
if (drainpp == NULL && (drainpp = TAILQ_FIRST(&pool_head)) == NULL) |
Line 1243 pool_drain(arg) |
|
Line 1336 pool_drain(arg) |
|
} |
} |
|
|
|
|
#if defined(POOL_DIAGNOSTIC) || defined(DEBUG) |
|
/* |
/* |
* Diagnostic helpers. |
* Diagnostic helpers. |
*/ |
*/ |
void |
void |
pool_print(pp, label) |
pool_print(struct pool *pp, const char *modif) |
struct pool *pp; |
|
const char *label; |
|
{ |
{ |
int s; |
int s; |
|
|
s = splimp(); |
s = splvm(); |
simple_lock(&pp->pr_slock); |
if (simple_lock_try(&pp->pr_slock) == 0) { |
pool_print1(pp, label); |
printf("pool %s is locked; try again later\n", |
|
pp->pr_wchan); |
|
splx(s); |
|
return; |
|
} |
|
pool_print1(pp, modif, printf); |
simple_unlock(&pp->pr_slock); |
simple_unlock(&pp->pr_slock); |
splx(s); |
splx(s); |
} |
} |
|
|
|
void |
|
pool_printit(struct pool *pp, const char *modif, void (*pr)(const char *, ...)) |
|
{ |
|
int didlock = 0; |
|
|
|
if (pp == NULL) { |
|
(*pr)("Must specify a pool to print.\n"); |
|
return; |
|
} |
|
|
|
/* |
|
* Called from DDB; interrupts should be blocked, and all |
|
* other processors should be paused. We can skip locking |
|
* the pool in this case. |
|
* |
|
* We do a simple_lock_try() just to print the lock |
|
* status, however. |
|
*/ |
|
|
|
if (simple_lock_try(&pp->pr_slock) == 0) |
|
(*pr)("WARNING: pool %s is locked\n", pp->pr_wchan); |
|
else |
|
didlock = 1; |
|
|
|
pool_print1(pp, modif, pr); |
|
|
|
if (didlock) |
|
simple_unlock(&pp->pr_slock); |
|
} |
|
|
static void |
static void |
pool_print1(pp, label) |
pool_print1(struct pool *pp, const char *modif, void (*pr)(const char *, ...)) |
struct pool *pp; |
|
const char *label; |
|
{ |
{ |
|
struct pool_item_header *ph; |
|
struct pool_cache *pc; |
|
struct pool_cache_group *pcg; |
|
#ifdef DIAGNOSTIC |
|
struct pool_item *pi; |
|
#endif |
|
int i, print_log = 0, print_pagelist = 0, print_cache = 0; |
|
char c; |
|
|
if (label != NULL) |
while ((c = *modif++) != '\0') { |
printf("%s: ", label); |
if (c == 'l') |
|
print_log = 1; |
|
if (c == 'p') |
|
print_pagelist = 1; |
|
if (c == 'c') |
|
print_cache = 1; |
|
modif++; |
|
} |
|
|
|
(*pr)("POOL %s: size %u, align %u, ioff %u, roflags 0x%08x\n", |
|
pp->pr_wchan, pp->pr_size, pp->pr_align, pp->pr_itemoffset, |
|
pp->pr_roflags); |
|
(*pr)("\tpagesz %u, mtype %d\n", pp->pr_pagesz, pp->pr_mtype); |
|
(*pr)("\talloc %p, release %p\n", pp->pr_alloc, pp->pr_free); |
|
(*pr)("\tminitems %u, minpages %u, maxpages %u, npages %u\n", |
|
pp->pr_minitems, pp->pr_minpages, pp->pr_maxpages, pp->pr_npages); |
|
(*pr)("\titemsperpage %u, nitems %u, nout %u, hardlimit %u\n", |
|
pp->pr_itemsperpage, pp->pr_nitems, pp->pr_nout, pp->pr_hardlimit); |
|
|
|
(*pr)("\n\tnget %lu, nfail %lu, nput %lu\n", |
|
pp->pr_nget, pp->pr_nfail, pp->pr_nput); |
|
(*pr)("\tnpagealloc %lu, npagefree %lu, hiwat %u, nidle %lu\n", |
|
pp->pr_npagealloc, pp->pr_npagefree, pp->pr_hiwat, pp->pr_nidle); |
|
|
|
if (print_pagelist == 0) |
|
goto skip_pagelist; |
|
|
|
if ((ph = TAILQ_FIRST(&pp->pr_pagelist)) != NULL) |
|
(*pr)("\n\tpage list:\n"); |
|
for (; ph != NULL; ph = TAILQ_NEXT(ph, ph_pagelist)) { |
|
(*pr)("\t\tpage %p, nmissing %d, time %lu,%lu\n", |
|
ph->ph_page, ph->ph_nmissing, |
|
(u_long)ph->ph_time.tv_sec, |
|
(u_long)ph->ph_time.tv_usec); |
|
#ifdef DIAGNOSTIC |
|
for (pi = TAILQ_FIRST(&ph->ph_itemlist); pi != NULL; |
|
pi = TAILQ_NEXT(pi, pi_list)) { |
|
if (pi->pi_magic != PI_MAGIC) { |
|
(*pr)("\t\t\titem %p, magic 0x%x\n", |
|
pi, pi->pi_magic); |
|
} |
|
} |
|
#endif |
|
} |
|
if (pp->pr_curpage == NULL) |
|
(*pr)("\tno current page\n"); |
|
else |
|
(*pr)("\tcurpage %p\n", pp->pr_curpage->ph_page); |
|
|
|
skip_pagelist: |
|
|
|
if (print_log == 0) |
|
goto skip_log; |
|
|
printf("pool %s: nalloc %lu nfree %lu npagealloc %lu npagefree %lu\n" |
(*pr)("\n"); |
" npages %u minitems %u itemsperpage %u itemoffset %u\n" |
if ((pp->pr_roflags & PR_LOGGING) == 0) |
" nidle %lu\n", |
(*pr)("\tno log\n"); |
pp->pr_wchan, |
else |
pp->pr_nget, |
pr_printlog(pp, NULL, pr); |
pp->pr_nput, |
|
pp->pr_npagealloc, |
skip_log: |
pp->pr_npagefree, |
|
pp->pr_npages, |
if (print_cache == 0) |
pp->pr_minitems, |
goto skip_cache; |
pp->pr_itemsperpage, |
|
pp->pr_itemoffset, |
for (pc = TAILQ_FIRST(&pp->pr_cachelist); pc != NULL; |
pp->pr_nidle); |
pc = TAILQ_NEXT(pc, pc_poollist)) { |
|
(*pr)("\tcache %p: allocfrom %p freeto %p\n", pc, |
|
pc->pc_allocfrom, pc->pc_freeto); |
|
(*pr)("\t hits %lu misses %lu ngroups %lu nitems %lu\n", |
|
pc->pc_hits, pc->pc_misses, pc->pc_ngroups, pc->pc_nitems); |
|
for (pcg = TAILQ_FIRST(&pc->pc_grouplist); pcg != NULL; |
|
pcg = TAILQ_NEXT(pcg, pcg_list)) { |
|
(*pr)("\t\tgroup %p: avail %d\n", pcg, pcg->pcg_avail); |
|
for (i = 0; i < PCG_NOBJECTS; i++) |
|
(*pr)("\t\t\t%p\n", pcg->pcg_objects[i]); |
|
} |
|
} |
|
|
|
skip_cache: |
|
|
|
pr_enter_check(pp, pr); |
} |
} |
|
|
int |
int |
pool_chk(pp, label) |
pool_chk(struct pool *pp, const char *label) |
struct pool *pp; |
|
char *label; |
|
{ |
{ |
struct pool_item_header *ph; |
struct pool_item_header *ph; |
int r = 0; |
int r = 0; |
|
|
simple_unlock(&pp->pr_slock); |
simple_unlock(&pp->pr_slock); |
return (r); |
return (r); |
} |
} |
#endif /* POOL_DIAGNOSTIC || DEBUG */ |
|
|
/* |
|
* pool_cache_init: |
|
* |
|
* Initialize a pool cache. |
|
* |
|
* NOTE: If the pool must be protected from interrupts, we expect |
|
* to be called at the appropriate interrupt priority level. |
|
*/ |
|
void |
|
pool_cache_init(struct pool_cache *pc, struct pool *pp, |
|
int (*ctor)(void *, void *, int), |
|
void (*dtor)(void *, void *), |
|
void *arg) |
|
{ |
|
|
|
TAILQ_INIT(&pc->pc_grouplist); |
|
simple_lock_init(&pc->pc_slock); |
|
|
|
pc->pc_allocfrom = NULL; |
|
pc->pc_freeto = NULL; |
|
pc->pc_pool = pp; |
|
|
|
pc->pc_ctor = ctor; |
|
pc->pc_dtor = dtor; |
|
pc->pc_arg = arg; |
|
|
|
pc->pc_hits = 0; |
|
pc->pc_misses = 0; |
|
|
|
pc->pc_ngroups = 0; |
|
|
|
pc->pc_nitems = 0; |
|
|
|
simple_lock(&pp->pr_slock); |
|
TAILQ_INSERT_TAIL(&pp->pr_cachelist, pc, pc_poollist); |
|
simple_unlock(&pp->pr_slock); |
|
} |
|
|
|
/* |
|
* pool_cache_destroy: |
|
* |
|
* Destroy a pool cache. |
|
*/ |
|
void |
|
pool_cache_destroy(struct pool_cache *pc) |
|
{ |
|
struct pool *pp = pc->pc_pool; |
|
|
|
/* First, invalidate the entire cache. */ |
|
pool_cache_invalidate(pc); |
|
|
|
/* ...and remove it from the pool's cache list. */ |
|
simple_lock(&pp->pr_slock); |
|
TAILQ_REMOVE(&pp->pr_cachelist, pc, pc_poollist); |
|
simple_unlock(&pp->pr_slock); |
|
} |
|
|
|
static __inline void * |
|
pcg_get(struct pool_cache_group *pcg) |
|
{ |
|
void *object; |
|
u_int idx; |
|
|
|
KASSERT(pcg->pcg_avail <= PCG_NOBJECTS); |
|
KASSERT(pcg->pcg_avail != 0); |
|
idx = --pcg->pcg_avail; |
|
|
|
KASSERT(pcg->pcg_objects[idx] != NULL); |
|
object = pcg->pcg_objects[idx]; |
|
pcg->pcg_objects[idx] = NULL; |
|
|
|
return (object); |
|
} |
|
|
|
static __inline void |
|
pcg_put(struct pool_cache_group *pcg, void *object) |
|
{ |
|
u_int idx; |
|
|
|
KASSERT(pcg->pcg_avail < PCG_NOBJECTS); |
|
idx = pcg->pcg_avail++; |
|
|
|
KASSERT(pcg->pcg_objects[idx] == NULL); |
|
pcg->pcg_objects[idx] = object; |
|
} |
|
|
|
/* |
|
* pool_cache_get: |
|
* |
|
* Get an object from a pool cache. |
|
*/ |
|
void * |
|
pool_cache_get(struct pool_cache *pc, int flags) |
|
{ |
|
struct pool_cache_group *pcg; |
|
void *object; |
|
|
|
#ifdef LOCKDEBUG |
|
if (flags & PR_WAITOK) |
|
simple_lock_only_held(NULL, "pool_cache_get(PR_WAITOK)"); |
|
#endif |
|
|
|
simple_lock(&pc->pc_slock); |
|
|
|
if ((pcg = pc->pc_allocfrom) == NULL) { |
|
for (pcg = TAILQ_FIRST(&pc->pc_grouplist); pcg != NULL; |
|
pcg = TAILQ_NEXT(pcg, pcg_list)) { |
|
if (pcg->pcg_avail != 0) { |
|
pc->pc_allocfrom = pcg; |
|
goto have_group; |
|
} |
|
} |
|
|
|
/* |
|
* No groups with any available objects. Allocate |
|
* a new object, construct it, and return it to |
|
* the caller. We will allocate a group, if necessary, |
|
* when the object is freed back to the cache. |
|
*/ |
|
pc->pc_misses++; |
|
simple_unlock(&pc->pc_slock); |
|
object = pool_get(pc->pc_pool, flags); |
|
if (object != NULL && pc->pc_ctor != NULL) { |
|
if ((*pc->pc_ctor)(pc->pc_arg, object, flags) != 0) { |
|
pool_put(pc->pc_pool, object); |
|
return (NULL); |
|
} |
|
} |
|
return (object); |
|
} |
|
|
|
have_group: |
|
pc->pc_hits++; |
|
pc->pc_nitems--; |
|
object = pcg_get(pcg); |
|
|
|
if (pcg->pcg_avail == 0) |
|
pc->pc_allocfrom = NULL; |
|
|
|
simple_unlock(&pc->pc_slock); |
|
|
|
return (object); |
|
} |
|
|
|
/* |
|
* pool_cache_put: |
|
* |
|
* Put an object back to the pool cache. |
|
*/ |
|
void |
|
pool_cache_put(struct pool_cache *pc, void *object) |
|
{ |
|
struct pool_cache_group *pcg; |
|
int s; |
|
|
|
simple_lock(&pc->pc_slock); |
|
|
|
if ((pcg = pc->pc_freeto) == NULL) { |
|
for (pcg = TAILQ_FIRST(&pc->pc_grouplist); pcg != NULL; |
|
pcg = TAILQ_NEXT(pcg, pcg_list)) { |
|
if (pcg->pcg_avail != PCG_NOBJECTS) { |
|
pc->pc_freeto = pcg; |
|
goto have_group; |
|
} |
|
} |
|
|
|
/* |
|
* No empty groups to free the object to. Attempt to |
|
* allocate one. |
|
*/ |
|
simple_unlock(&pc->pc_slock); |
|
s = splvm(); |
|
pcg = pool_get(&pcgpool, PR_NOWAIT); |
|
splx(s); |
|
if (pcg != NULL) { |
|
memset(pcg, 0, sizeof(*pcg)); |
|
simple_lock(&pc->pc_slock); |
|
pc->pc_ngroups++; |
|
TAILQ_INSERT_TAIL(&pc->pc_grouplist, pcg, pcg_list); |
|
if (pc->pc_freeto == NULL) |
|
pc->pc_freeto = pcg; |
|
goto have_group; |
|
} |
|
|
|
/* |
|
* Unable to allocate a cache group; destruct the object |
|
* and free it back to the pool. |
|
*/ |
|
pool_cache_destruct_object(pc, object); |
|
return; |
|
} |
|
|
|
have_group: |
|
pc->pc_nitems++; |
|
pcg_put(pcg, object); |
|
|
|
if (pcg->pcg_avail == PCG_NOBJECTS) |
|
pc->pc_freeto = NULL; |
|
|
|
simple_unlock(&pc->pc_slock); |
|
} |
|
|
|
/* |
|
* pool_cache_destruct_object: |
|
* |
|
* Force destruction of an object and its release back into |
|
* the pool. |
|
*/ |
|
void |
|
pool_cache_destruct_object(struct pool_cache *pc, void *object) |
|
{ |
|
|
|
if (pc->pc_dtor != NULL) |
|
(*pc->pc_dtor)(pc->pc_arg, object); |
|
pool_put(pc->pc_pool, object); |
|
} |
|
|
|
/* |
|
* pool_cache_do_invalidate: |
|
* |
|
* This internal function implements pool_cache_invalidate() and |
|
* pool_cache_reclaim(). |
|
*/ |
|
static void |
|
pool_cache_do_invalidate(struct pool_cache *pc, int free_groups, |
|
void (*putit)(struct pool *, void *)) |
|
{ |
|
struct pool_cache_group *pcg, *npcg; |
|
void *object; |
|
int s; |
|
|
|
for (pcg = TAILQ_FIRST(&pc->pc_grouplist); pcg != NULL; |
|
pcg = npcg) { |
|
npcg = TAILQ_NEXT(pcg, pcg_list); |
|
while (pcg->pcg_avail != 0) { |
|
pc->pc_nitems--; |
|
object = pcg_get(pcg); |
|
if (pcg->pcg_avail == 0 && pc->pc_allocfrom == pcg) |
|
pc->pc_allocfrom = NULL; |
|
if (pc->pc_dtor != NULL) |
|
(*pc->pc_dtor)(pc->pc_arg, object); |
|
(*putit)(pc->pc_pool, object); |
|
} |
|
if (free_groups) { |
|
pc->pc_ngroups--; |
|
TAILQ_REMOVE(&pc->pc_grouplist, pcg, pcg_list); |
|
if (pc->pc_freeto == pcg) |
|
pc->pc_freeto = NULL; |
|
s = splvm(); |
|
pool_put(&pcgpool, pcg); |
|
splx(s); |
|
} |
|
} |
|
} |
|
|
|
/* |
|
* pool_cache_invalidate: |
|
* |
|
* Invalidate a pool cache (destruct and release all of the |
|
* cached objects). |
|
*/ |
|
void |
|
pool_cache_invalidate(struct pool_cache *pc) |
|
{ |
|
|
|
simple_lock(&pc->pc_slock); |
|
pool_cache_do_invalidate(pc, 0, pool_put); |
|
simple_unlock(&pc->pc_slock); |
|
} |
|
|
|
/* |
|
* pool_cache_reclaim: |
|
* |
|
* Reclaim a pool cache for pool_reclaim(). |
|
*/ |
|
static void |
|
pool_cache_reclaim(struct pool_cache *pc) |
|
{ |
|
|
|
simple_lock(&pc->pc_slock); |
|
pool_cache_do_invalidate(pc, 1, pool_do_put); |
|
simple_unlock(&pc->pc_slock); |
|
} |