version 1.191, 2012/01/27 19:48:40 |
version 1.198.2.2, 2013/06/23 06:18:58 |
|
|
__KERNEL_RCSID(0, "$NetBSD$"); |
__KERNEL_RCSID(0, "$NetBSD$"); |
|
|
#include "opt_ddb.h" |
#include "opt_ddb.h" |
#include "opt_pool.h" |
|
#include "opt_poollog.h" |
|
#include "opt_lockdebug.h" |
#include "opt_lockdebug.h" |
|
|
#include <sys/param.h> |
#include <sys/param.h> |
Line 45 __KERNEL_RCSID(0, "$NetBSD$"); |
|
Line 43 __KERNEL_RCSID(0, "$NetBSD$"); |
|
#include <sys/proc.h> |
#include <sys/proc.h> |
#include <sys/errno.h> |
#include <sys/errno.h> |
#include <sys/kernel.h> |
#include <sys/kernel.h> |
#include <sys/malloc.h> |
|
#include <sys/vmem.h> |
#include <sys/vmem.h> |
#include <sys/pool.h> |
#include <sys/pool.h> |
#include <sys/syslog.h> |
#include <sys/syslog.h> |
Line 194 static bool pool_cache_get_slow(pool_cac |
|
Line 191 static bool pool_cache_get_slow(pool_cac |
|
static void pool_cache_cpu_init1(struct cpu_info *, pool_cache_t); |
static void pool_cache_cpu_init1(struct cpu_info *, pool_cache_t); |
static void pool_cache_invalidate_groups(pool_cache_t, pcg_t *); |
static void pool_cache_invalidate_groups(pool_cache_t, pcg_t *); |
static void pool_cache_invalidate_cpu(pool_cache_t, u_int); |
static void pool_cache_invalidate_cpu(pool_cache_t, u_int); |
static void pool_cache_xcall(pool_cache_t); |
static void pool_cache_transfer(pool_cache_t); |
|
|
static int pool_catchup(struct pool *); |
static int pool_catchup(struct pool *); |
static void pool_prime_page(struct pool *, void *, |
static void pool_prime_page(struct pool *, void *, |
Line 206 static void *pool_allocator_alloc(struct |
|
Line 203 static void *pool_allocator_alloc(struct |
|
static void pool_allocator_free(struct pool *, void *); |
static void pool_allocator_free(struct pool *, void *); |
|
|
static void pool_print_pagelist(struct pool *, struct pool_pagelist *, |
static void pool_print_pagelist(struct pool *, struct pool_pagelist *, |
void (*)(const char *, ...)); |
void (*)(const char *, ...) __printflike(1, 2)); |
static void pool_print1(struct pool *, const char *, |
static void pool_print1(struct pool *, const char *, |
void (*)(const char *, ...)); |
void (*)(const char *, ...) __printflike(1, 2)); |
|
|
static int pool_chk_page(struct pool *, const char *, |
static int pool_chk_page(struct pool *, const char *, |
struct pool_item_header *); |
struct pool_item_header *); |
|
|
/* |
|
* Pool log entry. An array of these is allocated in pool_init(). |
|
*/ |
|
struct pool_log { |
|
const char *pl_file; |
|
long pl_line; |
|
int pl_action; |
|
#define PRLOG_GET 1 |
|
#define PRLOG_PUT 2 |
|
void *pl_addr; |
|
}; |
|
|
|
#ifdef POOL_DIAGNOSTIC |
|
/* Number of entries in pool log buffers */ |
|
#ifndef POOL_LOGSIZE |
|
#define POOL_LOGSIZE 10 |
|
#endif |
|
|
|
int pool_logsize = POOL_LOGSIZE; |
|
|
|
static inline void |
|
pr_log(struct pool *pp, void *v, int action, const char *file, long line) |
|
{ |
|
int n; |
|
struct pool_log *pl; |
|
|
|
if ((pp->pr_roflags & PR_LOGGING) == 0) |
|
return; |
|
|
|
if (pp->pr_log == NULL) { |
|
if (kmem_map != NULL) |
|
pp->pr_log = malloc( |
|
pool_logsize * sizeof(struct pool_log), |
|
M_TEMP, M_NOWAIT | M_ZERO); |
|
if (pp->pr_log == NULL) |
|
return; |
|
pp->pr_curlogentry = 0; |
|
pp->pr_logsize = pool_logsize; |
|
} |
|
|
|
/* |
|
* Fill in the current entry. Wrap around and overwrite |
|
* the oldest entry if necessary. |
|
*/ |
|
n = pp->pr_curlogentry; |
|
pl = &pp->pr_log[n]; |
|
pl->pl_file = file; |
|
pl->pl_line = line; |
|
pl->pl_action = action; |
|
pl->pl_addr = v; |
|
if (++n >= pp->pr_logsize) |
|
n = 0; |
|
pp->pr_curlogentry = n; |
|
} |
|
|
|
static void |
|
pr_printlog(struct pool *pp, struct pool_item *pi, |
|
void (*pr)(const char *, ...)) |
|
{ |
|
int i = pp->pr_logsize; |
|
int n = pp->pr_curlogentry; |
|
|
|
if (pp->pr_log == NULL) |
|
return; |
|
|
|
/* |
|
* Print all entries in this pool's log. |
|
*/ |
|
while (i-- > 0) { |
|
struct pool_log *pl = &pp->pr_log[n]; |
|
if (pl->pl_action != 0) { |
|
if (pi == NULL || pi == pl->pl_addr) { |
|
(*pr)("\tlog entry %d:\n", i); |
|
(*pr)("\t\taction = %s, addr = %p\n", |
|
pl->pl_action == PRLOG_GET ? "get" : "put", |
|
pl->pl_addr); |
|
(*pr)("\t\tfile: %s at line %lu\n", |
|
pl->pl_file, pl->pl_line); |
|
} |
|
} |
|
if (++n >= pp->pr_logsize) |
|
n = 0; |
|
} |
|
} |
|
|
|
static inline void |
|
pr_enter(struct pool *pp, const char *file, long line) |
|
{ |
|
|
|
if (__predict_false(pp->pr_entered_file != NULL)) { |
|
printf("pool %s: reentrancy at file %s line %ld\n", |
|
pp->pr_wchan, file, line); |
|
printf(" previous entry at file %s line %ld\n", |
|
pp->pr_entered_file, pp->pr_entered_line); |
|
panic("pr_enter"); |
|
} |
|
|
|
pp->pr_entered_file = file; |
|
pp->pr_entered_line = line; |
|
} |
|
|
|
static inline void |
|
pr_leave(struct pool *pp) |
|
{ |
|
|
|
if (__predict_false(pp->pr_entered_file == NULL)) { |
|
printf("pool %s not entered?\n", pp->pr_wchan); |
|
panic("pr_leave"); |
|
} |
|
|
|
pp->pr_entered_file = NULL; |
|
pp->pr_entered_line = 0; |
|
} |
|
|
|
static inline void |
|
pr_enter_check(struct pool *pp, void (*pr)(const char *, ...)) |
|
{ |
|
|
|
if (pp->pr_entered_file != NULL) |
|
(*pr)("\n\tcurrently entered from file %s line %ld\n", |
|
pp->pr_entered_file, pp->pr_entered_line); |
|
} |
|
#else |
|
#define pr_log(pp, v, action, file, line) |
|
#define pr_printlog(pp, pi, pr) |
|
#define pr_enter(pp, file, line) |
|
#define pr_leave(pp) |
|
#define pr_enter_check(pp, pr) |
|
#endif /* POOL_DIAGNOSTIC */ |
|
|
|
static inline unsigned int |
static inline unsigned int |
pr_item_notouch_index(const struct pool *pp, const struct pool_item_header *ph, |
pr_item_notouch_index(const struct pool *pp, const struct pool_item_header *ph, |
const void *v) |
const void *v) |
Line 531 pr_rmpage(struct pool *pp, struct pool_i |
|
Line 398 pr_rmpage(struct pool *pp, struct pool_i |
|
void |
void |
pool_subsystem_init(void) |
pool_subsystem_init(void) |
{ |
{ |
int idx; |
|
size_t size; |
size_t size; |
|
int idx; |
|
|
mutex_init(&pool_head_lock, MUTEX_DEFAULT, IPL_NONE); |
mutex_init(&pool_head_lock, MUTEX_DEFAULT, IPL_NONE); |
mutex_init(&pool_allocator_lock, MUTEX_DEFAULT, IPL_NONE); |
mutex_init(&pool_allocator_lock, MUTEX_DEFAULT, IPL_NONE); |
Line 584 pool_subsystem_init(void) |
|
Line 451 pool_subsystem_init(void) |
|
* Initialize the given pool resource structure. |
* Initialize the given pool resource structure. |
* |
* |
* We export this routine to allow other kernel parts to declare |
* We export this routine to allow other kernel parts to declare |
* static pools that must be initialized before malloc() is available. |
* static pools that must be initialized before kmem(9) is available. |
*/ |
*/ |
void |
void |
pool_init(struct pool *pp, size_t size, u_int align, u_int ioff, int flags, |
pool_init(struct pool *pp, size_t size, u_int align, u_int ioff, int flags, |
Line 595 pool_init(struct pool *pp, size_t size, |
|
Line 462 pool_init(struct pool *pp, size_t size, |
|
int off, slack; |
int off, slack; |
|
|
#ifdef DEBUG |
#ifdef DEBUG |
|
if (__predict_true(!cold)) |
|
mutex_enter(&pool_head_lock); |
/* |
/* |
* Check that the pool hasn't already been initialised and |
* Check that the pool hasn't already been initialised and |
* added to the list of all pools. |
* added to the list of all pools. |
Line 604 pool_init(struct pool *pp, size_t size, |
|
Line 473 pool_init(struct pool *pp, size_t size, |
|
panic("pool_init: pool %s already initialised", |
panic("pool_init: pool %s already initialised", |
wchan); |
wchan); |
} |
} |
#endif |
if (__predict_true(!cold)) |
|
mutex_exit(&pool_head_lock); |
#ifdef POOL_DIAGNOSTIC |
|
/* |
|
* Always log if POOL_DIAGNOSTIC is defined. |
|
*/ |
|
if (pool_logsize != 0) |
|
flags |= PR_LOGGING; |
|
#endif |
#endif |
|
|
if (palloc == NULL) |
if (palloc == NULL) |
Line 760 pool_init(struct pool *pp, size_t size, |
|
Line 623 pool_init(struct pool *pp, size_t size, |
|
pp->pr_nidle = 0; |
pp->pr_nidle = 0; |
pp->pr_refcnt = 0; |
pp->pr_refcnt = 0; |
|
|
pp->pr_log = NULL; |
|
|
|
pp->pr_entered_file = NULL; |
|
pp->pr_entered_line = 0; |
|
|
|
mutex_init(&pp->pr_lock, MUTEX_DEFAULT, ipl); |
mutex_init(&pp->pr_lock, MUTEX_DEFAULT, ipl); |
cv_init(&pp->pr_cv, wchan); |
cv_init(&pp->pr_cv, wchan); |
pp->pr_ipl = ipl; |
pp->pr_ipl = ipl; |
Line 825 pool_destroy(struct pool *pp) |
|
Line 683 pool_destroy(struct pool *pp) |
|
|
|
#ifdef DIAGNOSTIC |
#ifdef DIAGNOSTIC |
if (pp->pr_nout != 0) { |
if (pp->pr_nout != 0) { |
pr_printlog(pp, NULL, printf); |
|
panic("pool_destroy: pool busy: still out: %u", |
panic("pool_destroy: pool busy: still out: %u", |
pp->pr_nout); |
pp->pr_nout); |
} |
} |
Line 842 pool_destroy(struct pool *pp) |
|
Line 699 pool_destroy(struct pool *pp) |
|
mutex_exit(&pp->pr_lock); |
mutex_exit(&pp->pr_lock); |
|
|
pr_pagelist_free(pp, &pq); |
pr_pagelist_free(pp, &pq); |
|
|
#ifdef POOL_DIAGNOSTIC |
|
if (pp->pr_log != NULL) { |
|
free(pp->pr_log, M_TEMP); |
|
pp->pr_log = NULL; |
|
} |
|
#endif |
|
|
|
cv_destroy(&pp->pr_cv); |
cv_destroy(&pp->pr_cv); |
mutex_destroy(&pp->pr_lock); |
mutex_destroy(&pp->pr_lock); |
} |
} |
Line 884 pool_alloc_item_header(struct pool *pp, |
|
Line 733 pool_alloc_item_header(struct pool *pp, |
|
* Grab an item from the pool. |
* Grab an item from the pool. |
*/ |
*/ |
void * |
void * |
#ifdef POOL_DIAGNOSTIC |
|
_pool_get(struct pool *pp, int flags, const char *file, long line) |
|
#else |
|
pool_get(struct pool *pp, int flags) |
pool_get(struct pool *pp, int flags) |
#endif |
|
{ |
{ |
struct pool_item *pi; |
struct pool_item *pi; |
struct pool_item_header *ph; |
struct pool_item_header *ph; |
Line 908 pool_get(struct pool *pp, int flags) |
|
Line 753 pool_get(struct pool *pp, int flags) |
|
} |
} |
|
|
mutex_enter(&pp->pr_lock); |
mutex_enter(&pp->pr_lock); |
pr_enter(pp, file, line); |
|
|
|
startover: |
startover: |
/* |
/* |
* Check to see if we've reached the hard limit. If we have, |
* Check to see if we've reached the hard limit. If we have, |
Line 918 pool_get(struct pool *pp, int flags) |
|
Line 761 pool_get(struct pool *pp, int flags) |
|
*/ |
*/ |
#ifdef DIAGNOSTIC |
#ifdef DIAGNOSTIC |
if (__predict_false(pp->pr_nout > pp->pr_hardlimit)) { |
if (__predict_false(pp->pr_nout > pp->pr_hardlimit)) { |
pr_leave(pp); |
|
mutex_exit(&pp->pr_lock); |
mutex_exit(&pp->pr_lock); |
panic("pool_get: %s: crossed hard limit", pp->pr_wchan); |
panic("pool_get: %s: crossed hard limit", pp->pr_wchan); |
} |
} |
Line 930 pool_get(struct pool *pp, int flags) |
|
Line 772 pool_get(struct pool *pp, int flags) |
|
* back to the pool, unlock, call the hook, re-lock, |
* back to the pool, unlock, call the hook, re-lock, |
* and check the hardlimit condition again. |
* and check the hardlimit condition again. |
*/ |
*/ |
pr_leave(pp); |
|
mutex_exit(&pp->pr_lock); |
mutex_exit(&pp->pr_lock); |
(*pp->pr_drain_hook)(pp->pr_drain_hook_arg, flags); |
(*pp->pr_drain_hook)(pp->pr_drain_hook_arg, flags); |
mutex_enter(&pp->pr_lock); |
mutex_enter(&pp->pr_lock); |
pr_enter(pp, file, line); |
|
if (pp->pr_nout < pp->pr_hardlimit) |
if (pp->pr_nout < pp->pr_hardlimit) |
goto startover; |
goto startover; |
} |
} |
Line 945 pool_get(struct pool *pp, int flags) |
|
Line 785 pool_get(struct pool *pp, int flags) |
|
* it be? |
* it be? |
*/ |
*/ |
pp->pr_flags |= PR_WANTED; |
pp->pr_flags |= PR_WANTED; |
pr_leave(pp); |
|
cv_wait(&pp->pr_cv, &pp->pr_lock); |
cv_wait(&pp->pr_cv, &pp->pr_lock); |
pr_enter(pp, file, line); |
|
goto startover; |
goto startover; |
} |
} |
|
|
Line 961 pool_get(struct pool *pp, int flags) |
|
Line 799 pool_get(struct pool *pp, int flags) |
|
|
|
pp->pr_nfail++; |
pp->pr_nfail++; |
|
|
pr_leave(pp); |
|
mutex_exit(&pp->pr_lock); |
mutex_exit(&pp->pr_lock); |
return (NULL); |
return (NULL); |
} |
} |
Line 989 pool_get(struct pool *pp, int flags) |
|
Line 826 pool_get(struct pool *pp, int flags) |
|
* Release the pool lock, as the back-end page allocator |
* Release the pool lock, as the back-end page allocator |
* may block. |
* may block. |
*/ |
*/ |
pr_leave(pp); |
|
error = pool_grow(pp, flags); |
error = pool_grow(pp, flags); |
pr_enter(pp, file, line); |
|
if (error != 0) { |
if (error != 0) { |
/* |
/* |
* We were unable to allocate a page or item |
* We were unable to allocate a page or item |
Line 1003 pool_get(struct pool *pp, int flags) |
|
Line 838 pool_get(struct pool *pp, int flags) |
|
goto startover; |
goto startover; |
|
|
pp->pr_nfail++; |
pp->pr_nfail++; |
pr_leave(pp); |
|
mutex_exit(&pp->pr_lock); |
mutex_exit(&pp->pr_lock); |
return (NULL); |
return (NULL); |
} |
} |
Line 1014 pool_get(struct pool *pp, int flags) |
|
Line 848 pool_get(struct pool *pp, int flags) |
|
if (pp->pr_roflags & PR_NOTOUCH) { |
if (pp->pr_roflags & PR_NOTOUCH) { |
#ifdef DIAGNOSTIC |
#ifdef DIAGNOSTIC |
if (__predict_false(ph->ph_nmissing == pp->pr_itemsperpage)) { |
if (__predict_false(ph->ph_nmissing == pp->pr_itemsperpage)) { |
pr_leave(pp); |
|
mutex_exit(&pp->pr_lock); |
mutex_exit(&pp->pr_lock); |
panic("pool_get: %s: page empty", pp->pr_wchan); |
panic("pool_get: %s: page empty", pp->pr_wchan); |
} |
} |
#endif |
#endif |
v = pr_item_notouch_get(pp, ph); |
v = pr_item_notouch_get(pp, ph); |
#ifdef POOL_DIAGNOSTIC |
|
pr_log(pp, v, PRLOG_GET, file, line); |
|
#endif |
|
} else { |
} else { |
v = pi = LIST_FIRST(&ph->ph_itemlist); |
v = pi = LIST_FIRST(&ph->ph_itemlist); |
if (__predict_false(v == NULL)) { |
if (__predict_false(v == NULL)) { |
pr_leave(pp); |
|
mutex_exit(&pp->pr_lock); |
mutex_exit(&pp->pr_lock); |
panic("pool_get: %s: page empty", pp->pr_wchan); |
panic("pool_get: %s: page empty", pp->pr_wchan); |
} |
} |
#ifdef DIAGNOSTIC |
#ifdef DIAGNOSTIC |
if (__predict_false(pp->pr_nitems == 0)) { |
if (__predict_false(pp->pr_nitems == 0)) { |
pr_leave(pp); |
|
mutex_exit(&pp->pr_lock); |
mutex_exit(&pp->pr_lock); |
printf("pool_get: %s: items on itemlist, nitems %u\n", |
printf("pool_get: %s: items on itemlist, nitems %u\n", |
pp->pr_wchan, pp->pr_nitems); |
pp->pr_wchan, pp->pr_nitems); |
Line 1040 pool_get(struct pool *pp, int flags) |
|
Line 868 pool_get(struct pool *pp, int flags) |
|
} |
} |
#endif |
#endif |
|
|
#ifdef POOL_DIAGNOSTIC |
|
pr_log(pp, v, PRLOG_GET, file, line); |
|
#endif |
|
|
|
#ifdef DIAGNOSTIC |
#ifdef DIAGNOSTIC |
if (__predict_false(pi->pi_magic != PI_MAGIC)) { |
if (__predict_false(pi->pi_magic != PI_MAGIC)) { |
pr_printlog(pp, pi, printf); |
|
panic("pool_get(%s): free list modified: " |
panic("pool_get(%s): free list modified: " |
"magic=%x; page %p; item addr %p\n", |
"magic=%x; page %p; item addr %p\n", |
pp->pr_wchan, pi->pi_magic, ph->ph_page, pi); |
pp->pr_wchan, pi->pi_magic, ph->ph_page, pi); |
Line 1079 pool_get(struct pool *pp, int flags) |
|
Line 902 pool_get(struct pool *pp, int flags) |
|
#ifdef DIAGNOSTIC |
#ifdef DIAGNOSTIC |
if (__predict_false((pp->pr_roflags & PR_NOTOUCH) == 0 && |
if (__predict_false((pp->pr_roflags & PR_NOTOUCH) == 0 && |
!LIST_EMPTY(&ph->ph_itemlist))) { |
!LIST_EMPTY(&ph->ph_itemlist))) { |
pr_leave(pp); |
|
mutex_exit(&pp->pr_lock); |
mutex_exit(&pp->pr_lock); |
panic("pool_get: %s: nmissing inconsistent", |
panic("pool_get: %s: nmissing inconsistent", |
pp->pr_wchan); |
pp->pr_wchan); |
Line 1095 pool_get(struct pool *pp, int flags) |
|
Line 917 pool_get(struct pool *pp, int flags) |
|
} |
} |
|
|
pp->pr_nget++; |
pp->pr_nget++; |
pr_leave(pp); |
|
|
|
/* |
/* |
* If we have a low water mark and we are now below that low |
* If we have a low water mark and we are now below that low |
Line 1137 pool_do_put(struct pool *pp, void *v, st |
|
Line 958 pool_do_put(struct pool *pp, void *v, st |
|
#endif |
#endif |
|
|
if (__predict_false((ph = pr_find_pagehead(pp, v)) == NULL)) { |
if (__predict_false((ph = pr_find_pagehead(pp, v)) == NULL)) { |
pr_printlog(pp, NULL, printf); |
|
panic("pool_put: %s: page header missing", pp->pr_wchan); |
panic("pool_put: %s: page header missing", pp->pr_wchan); |
} |
} |
|
|
Line 1226 pool_do_put(struct pool *pp, void *v, st |
|
Line 1046 pool_do_put(struct pool *pp, void *v, st |
|
} |
} |
} |
} |
|
|
/* |
|
* Return resource to the pool. |
|
*/ |
|
#ifdef POOL_DIAGNOSTIC |
|
void |
|
_pool_put(struct pool *pp, void *v, const char *file, long line) |
|
{ |
|
struct pool_pagelist pq; |
|
|
|
LIST_INIT(&pq); |
|
|
|
mutex_enter(&pp->pr_lock); |
|
pr_enter(pp, file, line); |
|
|
|
pr_log(pp, v, PRLOG_PUT, file, line); |
|
|
|
pool_do_put(pp, v, &pq); |
|
|
|
pr_leave(pp); |
|
mutex_exit(&pp->pr_lock); |
|
|
|
pr_pagelist_free(pp, &pq); |
|
} |
|
#undef pool_put |
|
#endif /* POOL_DIAGNOSTIC */ |
|
|
|
void |
void |
pool_put(struct pool *pp, void *v) |
pool_put(struct pool *pp, void *v) |
{ |
{ |
Line 1266 pool_put(struct pool *pp, void *v) |
|
Line 1060 pool_put(struct pool *pp, void *v) |
|
pr_pagelist_free(pp, &pq); |
pr_pagelist_free(pp, &pq); |
} |
} |
|
|
#ifdef POOL_DIAGNOSTIC |
|
#define pool_put(h, v) _pool_put((h), (v), __FILE__, __LINE__) |
|
#endif |
|
|
|
/* |
/* |
* pool_grow: grow a pool by a page. |
* pool_grow: grow a pool by a page. |
* |
* |
Line 1514 pool_sethardlimit(struct pool *pp, int n |
|
Line 1304 pool_sethardlimit(struct pool *pp, int n |
|
/* |
/* |
* Release all complete pages that have not been used recently. |
* Release all complete pages that have not been used recently. |
* |
* |
* Might be called from interrupt context. |
* Must not be called from interrupt context. |
*/ |
*/ |
int |
int |
#ifdef POOL_DIAGNOSTIC |
|
_pool_reclaim(struct pool *pp, const char *file, long line) |
|
#else |
|
pool_reclaim(struct pool *pp) |
pool_reclaim(struct pool *pp) |
#endif |
|
{ |
{ |
struct pool_item_header *ph, *phnext; |
struct pool_item_header *ph, *phnext; |
struct pool_pagelist pq; |
struct pool_pagelist pq; |
Line 1529 pool_reclaim(struct pool *pp) |
|
Line 1315 pool_reclaim(struct pool *pp) |
|
bool klock; |
bool klock; |
int rv; |
int rv; |
|
|
if (cpu_intr_p() || cpu_softintr_p()) { |
KASSERT(!cpu_intr_p() && !cpu_softintr_p()); |
KASSERT(pp->pr_ipl != IPL_NONE); |
|
} |
|
|
|
if (pp->pr_drain_hook != NULL) { |
if (pp->pr_drain_hook != NULL) { |
/* |
/* |
Line 1561 pool_reclaim(struct pool *pp) |
|
Line 1345 pool_reclaim(struct pool *pp) |
|
} |
} |
return (0); |
return (0); |
} |
} |
pr_enter(pp, file, line); |
|
|
|
LIST_INIT(&pq); |
LIST_INIT(&pq); |
|
|
Line 1589 pool_reclaim(struct pool *pp) |
|
Line 1372 pool_reclaim(struct pool *pp) |
|
pr_rmpage(pp, ph, &pq); |
pr_rmpage(pp, ph, &pq); |
} |
} |
|
|
pr_leave(pp); |
|
mutex_exit(&pp->pr_lock); |
mutex_exit(&pp->pr_lock); |
|
|
if (LIST_EMPTY(&pq)) |
if (LIST_EMPTY(&pq)) |
Line 1607 pool_reclaim(struct pool *pp) |
|
Line 1389 pool_reclaim(struct pool *pp) |
|
} |
} |
|
|
/* |
/* |
* Drain pools, one at a time. This is a two stage process; |
* Drain pools, one at a time. The drained pool is returned within ppp. |
* drain_start kicks off a cross call to drain CPU-level caches |
|
* if the pool has an associated pool_cache. drain_end waits |
|
* for those cross calls to finish, and then drains the cache |
|
* (if any) and pool. |
|
* |
* |
* Note, must never be called from interrupt context. |
* Note, must never be called from interrupt context. |
*/ |
*/ |
void |
bool |
pool_drain_start(struct pool **ppp, uint64_t *wp) |
pool_drain(struct pool **ppp) |
{ |
{ |
|
bool reclaimed; |
struct pool *pp; |
struct pool *pp; |
|
|
KASSERT(!TAILQ_EMPTY(&pool_head)); |
KASSERT(!TAILQ_EMPTY(&pool_head)); |
Line 1642 pool_drain_start(struct pool **ppp, uint |
|
Line 1421 pool_drain_start(struct pool **ppp, uint |
|
pp->pr_refcnt++; |
pp->pr_refcnt++; |
mutex_exit(&pool_head_lock); |
mutex_exit(&pool_head_lock); |
|
|
/* If there is a pool_cache, drain CPU level caches. */ |
|
*ppp = pp; |
|
if (pp->pr_cache != NULL) { |
|
*wp = xc_broadcast(0, (xcfunc_t)pool_cache_xcall, |
|
pp->pr_cache, NULL); |
|
} |
|
} |
|
|
|
bool |
|
pool_drain_end(struct pool *pp, uint64_t where) |
|
{ |
|
bool reclaimed; |
|
|
|
if (pp == NULL) |
|
return false; |
|
|
|
KASSERT(pp->pr_refcnt > 0); |
|
|
|
/* Wait for remote draining to complete. */ |
|
if (pp->pr_cache != NULL) |
|
xc_wait(where); |
|
|
|
/* Drain the cache (if any) and pool.. */ |
/* Drain the cache (if any) and pool.. */ |
reclaimed = pool_reclaim(pp); |
reclaimed = pool_reclaim(pp); |
|
|
Line 1673 pool_drain_end(struct pool *pp, uint64_t |
|
Line 1430 pool_drain_end(struct pool *pp, uint64_t |
|
cv_broadcast(&pool_busy); |
cv_broadcast(&pool_busy); |
mutex_exit(&pool_head_lock); |
mutex_exit(&pool_head_lock); |
|
|
|
if (ppp != NULL) |
|
*ppp = pp; |
|
|
return reclaimed; |
return reclaimed; |
} |
} |
|
|
/* |
/* |
* Diagnostic helpers. |
* Diagnostic helpers. |
*/ |
*/ |
void |
|
pool_print(struct pool *pp, const char *modif) |
|
{ |
|
|
|
pool_print1(pp, modif, printf); |
|
} |
|
|
|
void |
void |
pool_printall(const char *modif, void (*pr)(const char *, ...)) |
pool_printall(const char *modif, void (*pr)(const char *, ...)) |
Line 1796 pool_print1(struct pool *pp, const char |
|
Line 1550 pool_print1(struct pool *pp, const char |
|
goto skip_log; |
goto skip_log; |
|
|
(*pr)("\n"); |
(*pr)("\n"); |
if ((pp->pr_roflags & PR_LOGGING) == 0) |
|
(*pr)("\tno log\n"); |
|
else { |
|
pr_printlog(pp, NULL, pr); |
|
} |
|
|
|
skip_log: |
skip_log: |
|
|
Line 1850 pool_print1(struct pool *pp, const char |
|
Line 1599 pool_print1(struct pool *pp, const char |
|
} |
} |
} |
} |
#undef PR_GROUPLIST |
#undef PR_GROUPLIST |
|
|
pr_enter_check(pp, pr); |
|
} |
} |
|
|
static int |
static int |
Line 2240 pool_cache_invalidate_groups(pool_cache_ |
|
Line 1987 pool_cache_invalidate_groups(pool_cache_ |
|
* Note: For pool caches that provide constructed objects, there |
* Note: For pool caches that provide constructed objects, there |
* is an assumption that another level of synchronization is occurring |
* is an assumption that another level of synchronization is occurring |
* between the input to the constructor and the cache invalidation. |
* between the input to the constructor and the cache invalidation. |
|
* |
|
* Invalidation is a costly process and should not be called from |
|
* interrupt context. |
*/ |
*/ |
void |
void |
pool_cache_invalidate(pool_cache_t pc) |
pool_cache_invalidate(pool_cache_t pc) |
{ |
{ |
pcg_t *full, *empty, *part; |
|
#if 0 |
|
uint64_t where; |
uint64_t where; |
|
pcg_t *full, *empty, *part; |
|
|
|
KASSERT(!cpu_intr_p() && !cpu_softintr_p()); |
|
|
if (ncpu < 2 || !mp_online) { |
if (ncpu < 2 || !mp_online) { |
/* |
/* |
* We might be called early enough in the boot process |
* We might be called early enough in the boot process |
* for the CPU data structures to not be fully initialized. |
* for the CPU data structures to not be fully initialized. |
* In this case, simply gather the local CPU's cache now |
* In this case, transfer the content of the local CPU's |
* since it will be the only one running. |
* cache back into global cache as only this CPU is currently |
|
* running. |
*/ |
*/ |
pool_cache_xcall(pc); |
pool_cache_transfer(pc); |
} else { |
} else { |
/* |
/* |
* Gather all of the CPU-specific caches into the |
* Signal all CPUs that they must transfer their local |
* global cache. |
* cache back to the global pool then wait for the xcall to |
|
* complete. |
*/ |
*/ |
where = xc_broadcast(0, (xcfunc_t)pool_cache_xcall, pc, NULL); |
where = xc_broadcast(0, (xcfunc_t)pool_cache_transfer, |
|
pc, NULL); |
xc_wait(where); |
xc_wait(where); |
} |
} |
#endif |
|
|
/* Empty pool caches, then invalidate objects */ |
mutex_enter(&pc->pc_lock); |
mutex_enter(&pc->pc_lock); |
full = pc->pc_fullgroups; |
full = pc->pc_fullgroups; |
empty = pc->pc_emptygroups; |
empty = pc->pc_emptygroups; |
Line 2296 pool_cache_invalidate(pool_cache_t pc) |
|
Line 2051 pool_cache_invalidate(pool_cache_t pc) |
|
static void |
static void |
pool_cache_invalidate_cpu(pool_cache_t pc, u_int index) |
pool_cache_invalidate_cpu(pool_cache_t pc, u_int index) |
{ |
{ |
|
|
pool_cache_cpu_t *cc; |
pool_cache_cpu_t *cc; |
pcg_t *pcg; |
pcg_t *pcg; |
|
|
Line 2507 pool_cache_get_paddr(pool_cache_t pc, in |
|
Line 2261 pool_cache_get_paddr(pool_cache_t pc, in |
|
static bool __noinline |
static bool __noinline |
pool_cache_put_slow(pool_cache_cpu_t *cc, int s, void *object) |
pool_cache_put_slow(pool_cache_cpu_t *cc, int s, void *object) |
{ |
{ |
|
struct lwp *l = curlwp; |
pcg_t *pcg, *cur; |
pcg_t *pcg, *cur; |
uint64_t ncsw; |
uint64_t ncsw; |
pool_cache_t pc; |
pool_cache_t pc; |
Line 2517 pool_cache_put_slow(pool_cache_cpu_t *cc |
|
Line 2272 pool_cache_put_slow(pool_cache_cpu_t *cc |
|
pc = cc->cc_cache; |
pc = cc->cc_cache; |
pcg = NULL; |
pcg = NULL; |
cc->cc_misses++; |
cc->cc_misses++; |
|
ncsw = l->l_ncsw; |
|
|
/* |
/* |
* If there are no empty groups in the cache then allocate one |
* If there are no empty groups in the cache then allocate one |
Line 2526 pool_cache_put_slow(pool_cache_cpu_t *cc |
|
Line 2282 pool_cache_put_slow(pool_cache_cpu_t *cc |
|
if (__predict_true(!pool_cache_disable)) { |
if (__predict_true(!pool_cache_disable)) { |
pcg = pool_get(pc->pc_pcgpool, PR_NOWAIT); |
pcg = pool_get(pc->pc_pcgpool, PR_NOWAIT); |
} |
} |
|
/* |
|
* If pool_get() blocked, then our view of |
|
* the per-CPU data is invalid: retry. |
|
*/ |
|
if (__predict_false(l->l_ncsw != ncsw)) { |
|
if (pcg != NULL) { |
|
pool_put(pc->pc_pcgpool, pcg); |
|
} |
|
return true; |
|
} |
if (__predict_true(pcg != NULL)) { |
if (__predict_true(pcg != NULL)) { |
pcg->pcg_avail = 0; |
pcg->pcg_avail = 0; |
pcg->pcg_size = pc->pc_pcgsize; |
pcg->pcg_size = pc->pc_pcgsize; |
Line 2534 pool_cache_put_slow(pool_cache_cpu_t *cc |
|
Line 2300 pool_cache_put_slow(pool_cache_cpu_t *cc |
|
|
|
/* Lock the cache. */ |
/* Lock the cache. */ |
if (__predict_false(!mutex_tryenter(&pc->pc_lock))) { |
if (__predict_false(!mutex_tryenter(&pc->pc_lock))) { |
ncsw = curlwp->l_ncsw; |
|
mutex_enter(&pc->pc_lock); |
mutex_enter(&pc->pc_lock); |
pc->pc_contended++; |
pc->pc_contended++; |
|
|
Line 2542 pool_cache_put_slow(pool_cache_cpu_t *cc |
|
Line 2307 pool_cache_put_slow(pool_cache_cpu_t *cc |
|
* If we context switched while locking, then our view of |
* If we context switched while locking, then our view of |
* the per-CPU data is invalid: retry. |
* the per-CPU data is invalid: retry. |
*/ |
*/ |
if (__predict_false(curlwp->l_ncsw != ncsw)) { |
if (__predict_false(l->l_ncsw != ncsw)) { |
mutex_exit(&pc->pc_lock); |
mutex_exit(&pc->pc_lock); |
if (pcg != NULL) { |
if (pcg != NULL) { |
pool_put(pc->pc_pcgpool, pcg); |
pool_put(pc->pc_pcgpool, pcg); |
Line 2649 pool_cache_put_paddr(pool_cache_t pc, vo |
|
Line 2414 pool_cache_put_paddr(pool_cache_t pc, vo |
|
} |
} |
|
|
/* |
/* |
* pool_cache_xcall: |
* pool_cache_transfer: |
* |
* |
* Transfer objects from the per-CPU cache to the global cache. |
* Transfer objects from the per-CPU cache to the global cache. |
* Run within a cross-call thread. |
* Run within a cross-call thread. |
*/ |
*/ |
static void |
static void |
pool_cache_xcall(pool_cache_t pc) |
pool_cache_transfer(pool_cache_t pc) |
{ |
{ |
pool_cache_cpu_t *cc; |
pool_cache_cpu_t *cc; |
pcg_t *prev, *cur, **list; |
pcg_t *prev, *cur, **list; |
Line 2718 void pool_page_free(struct pool *, void |
|
Line 2483 void pool_page_free(struct pool *, void |
|
|
|
#ifdef POOL_SUBPAGE |
#ifdef POOL_SUBPAGE |
struct pool_allocator pool_allocator_kmem_fullpage = { |
struct pool_allocator pool_allocator_kmem_fullpage = { |
pool_page_alloc, pool_page_free, 0 |
.pa_alloc = pool_page_alloc, |
|
.pa_free = pool_page_free, |
|
.pa_pagesz = 0 |
}; |
}; |
#else |
#else |
struct pool_allocator pool_allocator_kmem = { |
struct pool_allocator pool_allocator_kmem = { |
Line 2728 struct pool_allocator pool_allocator_kme |
|
Line 2495 struct pool_allocator pool_allocator_kme |
|
}; |
}; |
#endif |
#endif |
|
|
void *pool_page_alloc_nointr(struct pool *, int); |
|
void pool_page_free_nointr(struct pool *, void *); |
|
|
|
#ifdef POOL_SUBPAGE |
#ifdef POOL_SUBPAGE |
struct pool_allocator pool_allocator_nointr_fullpage = { |
struct pool_allocator pool_allocator_nointr_fullpage = { |
pool_page_alloc_nointr, pool_page_free_nointr, 0, |
.pa_alloc = pool_page_alloc, |
|
.pa_free = pool_page_free, |
|
.pa_pagesz = 0 |
}; |
}; |
#else |
#else |
struct pool_allocator pool_allocator_nointr = { |
struct pool_allocator pool_allocator_nointr = { |
Line 2748 void *pool_subpage_alloc(struct pool *, |
|
Line 2514 void *pool_subpage_alloc(struct pool *, |
|
void pool_subpage_free(struct pool *, void *); |
void pool_subpage_free(struct pool *, void *); |
|
|
struct pool_allocator pool_allocator_kmem = { |
struct pool_allocator pool_allocator_kmem = { |
pool_subpage_alloc, pool_subpage_free, POOL_SUBPAGE, |
.pa_alloc = pool_subpage_alloc, |
|
.pa_free = pool_subpage_free, |
|
.pa_pagesz = POOL_SUBPAGE |
}; |
}; |
|
|
void *pool_subpage_alloc_nointr(struct pool *, int); |
|
void pool_subpage_free_nointr(struct pool *, void *); |
|
|
|
struct pool_allocator pool_allocator_nointr = { |
struct pool_allocator pool_allocator_nointr = { |
pool_subpage_alloc, pool_subpage_free, POOL_SUBPAGE, |
.pa_alloc = pool_subpage_alloc, |
|
.pa_free = pool_subpage_free, |
|
.pa_pagesz = POOL_SUBPAGE |
}; |
}; |
#endif /* POOL_SUBPAGE */ |
#endif /* POOL_SUBPAGE */ |
|
|
Line 2791 pool_allocator_free(struct pool *pp, voi |
|
Line 2558 pool_allocator_free(struct pool *pp, voi |
|
void * |
void * |
pool_page_alloc(struct pool *pp, int flags) |
pool_page_alloc(struct pool *pp, int flags) |
{ |
{ |
bool waitok = (flags & PR_WAITOK) ? true : false; |
const vm_flag_t vflags = (flags & PR_WAITOK) ? VM_SLEEP: VM_NOSLEEP; |
int rc; |
|
vmem_addr_t va; |
vmem_addr_t va; |
|
int ret; |
|
|
rc = uvm_km_kmem_alloc(kmem_va_arena, |
ret = uvm_km_kmem_alloc(kmem_va_arena, pp->pr_alloc->pa_pagesz, |
pp->pr_alloc->pa_pagesz, |
vflags | VM_INSTANTFIT, &va); |
((waitok ? VM_SLEEP : VM_NOSLEEP) | VM_INSTANTFIT), &va); |
|
|
|
if (rc != 0) |
return ret ? NULL : (void *)va; |
return NULL; |
|
else |
|
return (void *)va; |
|
} |
} |
|
|
void |
void |
Line 2815 pool_page_free(struct pool *pp, void *v) |
|
Line 2578 pool_page_free(struct pool *pp, void *v) |
|
static void * |
static void * |
pool_page_alloc_meta(struct pool *pp, int flags) |
pool_page_alloc_meta(struct pool *pp, int flags) |
{ |
{ |
bool waitok = (flags & PR_WAITOK) ? true : false; |
const vm_flag_t vflags = (flags & PR_WAITOK) ? VM_SLEEP: VM_NOSLEEP; |
int rc; |
vmem_addr_t va; |
vmem_addr_t addr; |
int ret; |
|
|
rc = vmem_alloc(kmem_meta_arena, pp->pr_alloc->pa_pagesz, |
ret = vmem_alloc(kmem_meta_arena, pp->pr_alloc->pa_pagesz, |
(waitok ? VM_SLEEP : VM_NOSLEEP) | VM_INSTANTFIT, &addr); |
vflags | VM_INSTANTFIT, &va); |
|
|
if (rc != 0) |
return ret ? NULL : (void *)va; |
return 0; |
|
else |
|
return (void *)addr; |
|
} |
} |
|
|
static void |
static void |
pool_page_free_meta(struct pool *pp, void *v) |
pool_page_free_meta(struct pool *pp, void *v) |
{ |
{ |
|
|
vmem_free(kmem_meta_arena, (vmem_addr_t)v, |
vmem_free(kmem_meta_arena, (vmem_addr_t)v, pp->pr_alloc->pa_pagesz); |
pp->pr_alloc->pa_pagesz); |
|
} |
} |
|
|
#ifdef POOL_SUBPAGE |
#ifdef POOL_SUBPAGE |
Line 2850 pool_subpage_free(struct pool *pp, void |
|
Line 2609 pool_subpage_free(struct pool *pp, void |
|
pool_put(&psppool, v); |
pool_put(&psppool, v); |
} |
} |
|
|
/* We don't provide a real nointr allocator. Maybe later. */ |
|
void * |
|
pool_subpage_alloc_nointr(struct pool *pp, int flags) |
|
{ |
|
|
|
return (pool_subpage_alloc(pp, flags)); |
|
} |
|
|
|
void |
|
pool_subpage_free_nointr(struct pool *pp, void *v) |
|
{ |
|
|
|
pool_subpage_free(pp, v); |
|
} |
|
#endif /* POOL_SUBPAGE */ |
#endif /* POOL_SUBPAGE */ |
|
|
#if defined(DDB) |
#if defined(DDB) |