version 1.165.2.2, 2008/10/19 22:17:28 |
version 1.195, 2012/05/05 19:15:10 |
|
|
/* $NetBSD$ */ |
/* $NetBSD$ */ |
|
|
/*- |
/*- |
* Copyright (c) 1997, 1999, 2000, 2002, 2007, 2008 The NetBSD Foundation, Inc. |
* Copyright (c) 1997, 1999, 2000, 2002, 2007, 2008, 2010 |
|
* The NetBSD Foundation, Inc. |
* All rights reserved. |
* All rights reserved. |
* |
* |
* This code is derived from software contributed to The NetBSD Foundation |
* This code is derived from software contributed to The NetBSD Foundation |
|
|
__KERNEL_RCSID(0, "$NetBSD$"); |
__KERNEL_RCSID(0, "$NetBSD$"); |
|
|
#include "opt_ddb.h" |
#include "opt_ddb.h" |
#include "opt_pool.h" |
|
#include "opt_poollog.h" |
|
#include "opt_lockdebug.h" |
#include "opt_lockdebug.h" |
|
|
#include <sys/param.h> |
#include <sys/param.h> |
Line 44 __KERNEL_RCSID(0, "$NetBSD$"); |
|
Line 43 __KERNEL_RCSID(0, "$NetBSD$"); |
|
#include <sys/proc.h> |
#include <sys/proc.h> |
#include <sys/errno.h> |
#include <sys/errno.h> |
#include <sys/kernel.h> |
#include <sys/kernel.h> |
#include <sys/malloc.h> |
#include <sys/vmem.h> |
#include <sys/pool.h> |
#include <sys/pool.h> |
#include <sys/syslog.h> |
#include <sys/syslog.h> |
#include <sys/debug.h> |
#include <sys/debug.h> |
Line 53 __KERNEL_RCSID(0, "$NetBSD$"); |
|
Line 52 __KERNEL_RCSID(0, "$NetBSD$"); |
|
#include <sys/cpu.h> |
#include <sys/cpu.h> |
#include <sys/atomic.h> |
#include <sys/atomic.h> |
|
|
#include <uvm/uvm.h> |
#include <uvm/uvm_extern.h> |
|
|
/* |
/* |
* Pool resource management utility. |
* Pool resource management utility. |
Line 69 __KERNEL_RCSID(0, "$NetBSD$"); |
|
Line 68 __KERNEL_RCSID(0, "$NetBSD$"); |
|
*/ |
*/ |
|
|
/* List of all pools */ |
/* List of all pools */ |
TAILQ_HEAD(,pool) pool_head = TAILQ_HEAD_INITIALIZER(pool_head); |
static TAILQ_HEAD(, pool) pool_head = TAILQ_HEAD_INITIALIZER(pool_head); |
|
|
/* Private pool for page header structures */ |
/* Private pool for page header structures */ |
#define PHPOOL_MAX 8 |
#define PHPOOL_MAX 8 |
Line 82 static struct pool phpool[PHPOOL_MAX]; |
|
Line 81 static struct pool phpool[PHPOOL_MAX]; |
|
static struct pool psppool; |
static struct pool psppool; |
#endif |
#endif |
|
|
static SLIST_HEAD(, pool_allocator) pa_deferinitq = |
|
SLIST_HEAD_INITIALIZER(pa_deferinitq); |
|
|
|
static void *pool_page_alloc_meta(struct pool *, int); |
static void *pool_page_alloc_meta(struct pool *, int); |
static void pool_page_free_meta(struct pool *, void *); |
static void pool_page_free_meta(struct pool *, void *); |
|
|
/* allocator for pool metadata */ |
/* allocator for pool metadata */ |
struct pool_allocator pool_allocator_meta = { |
struct pool_allocator pool_allocator_meta = { |
pool_page_alloc_meta, pool_page_free_meta, |
.pa_alloc = pool_page_alloc_meta, |
.pa_backingmapptr = &kmem_map, |
.pa_free = pool_page_free_meta, |
|
.pa_pagesz = 0 |
}; |
}; |
|
|
/* # of seconds to retain page after last use */ |
/* # of seconds to retain page after last use */ |
Line 104 static struct pool *drainpp; |
|
Line 101 static struct pool *drainpp; |
|
static kmutex_t pool_head_lock; |
static kmutex_t pool_head_lock; |
static kcondvar_t pool_busy; |
static kcondvar_t pool_busy; |
|
|
|
/* This lock protects initialization of a potentially shared pool allocator */ |
|
static kmutex_t pool_allocator_lock; |
|
|
typedef uint32_t pool_item_bitmap_t; |
typedef uint32_t pool_item_bitmap_t; |
#define BITMAP_SIZE (CHAR_BIT * sizeof(pool_item_bitmap_t)) |
#define BITMAP_SIZE (CHAR_BIT * sizeof(pool_item_bitmap_t)) |
#define BITMAP_MASK (BITMAP_SIZE - 1) |
#define BITMAP_MASK (BITMAP_SIZE - 1) |
Line 175 static struct pool pcg_large_pool; |
|
Line 175 static struct pool pcg_large_pool; |
|
static struct pool cache_pool; |
static struct pool cache_pool; |
static struct pool cache_cpu_pool; |
static struct pool cache_cpu_pool; |
|
|
|
pool_cache_t pnbuf_cache; /* pathname buffer cache */ |
|
|
/* List of all caches. */ |
/* List of all caches. */ |
TAILQ_HEAD(,pool_cache) pool_cache_head = |
TAILQ_HEAD(,pool_cache) pool_cache_head = |
TAILQ_HEAD_INITIALIZER(pool_cache_head); |
TAILQ_HEAD_INITIALIZER(pool_cache_head); |
Line 188 static bool pool_cache_get_slow(pool_cac |
|
Line 190 static bool pool_cache_get_slow(pool_cac |
|
void **, paddr_t *, int); |
void **, paddr_t *, int); |
static void pool_cache_cpu_init1(struct cpu_info *, pool_cache_t); |
static void pool_cache_cpu_init1(struct cpu_info *, pool_cache_t); |
static void pool_cache_invalidate_groups(pool_cache_t, pcg_t *); |
static void pool_cache_invalidate_groups(pool_cache_t, pcg_t *); |
|
static void pool_cache_invalidate_cpu(pool_cache_t, u_int); |
static void pool_cache_xcall(pool_cache_t); |
static void pool_cache_xcall(pool_cache_t); |
|
|
static int pool_catchup(struct pool *); |
static int pool_catchup(struct pool *); |
Line 207 static void pool_print1(struct pool *, c |
|
Line 210 static void pool_print1(struct pool *, c |
|
static int pool_chk_page(struct pool *, const char *, |
static int pool_chk_page(struct pool *, const char *, |
struct pool_item_header *); |
struct pool_item_header *); |
|
|
/* |
|
* Pool log entry. An array of these is allocated in pool_init(). |
|
*/ |
|
struct pool_log { |
|
const char *pl_file; |
|
long pl_line; |
|
int pl_action; |
|
#define PRLOG_GET 1 |
|
#define PRLOG_PUT 2 |
|
void *pl_addr; |
|
}; |
|
|
|
#ifdef POOL_DIAGNOSTIC |
|
/* Number of entries in pool log buffers */ |
|
#ifndef POOL_LOGSIZE |
|
#define POOL_LOGSIZE 10 |
|
#endif |
|
|
|
int pool_logsize = POOL_LOGSIZE; |
|
|
|
static inline void |
|
pr_log(struct pool *pp, void *v, int action, const char *file, long line) |
|
{ |
|
int n = pp->pr_curlogentry; |
|
struct pool_log *pl; |
|
|
|
if ((pp->pr_roflags & PR_LOGGING) == 0) |
|
return; |
|
|
|
/* |
|
* Fill in the current entry. Wrap around and overwrite |
|
* the oldest entry if necessary. |
|
*/ |
|
pl = &pp->pr_log[n]; |
|
pl->pl_file = file; |
|
pl->pl_line = line; |
|
pl->pl_action = action; |
|
pl->pl_addr = v; |
|
if (++n >= pp->pr_logsize) |
|
n = 0; |
|
pp->pr_curlogentry = n; |
|
} |
|
|
|
static void |
|
pr_printlog(struct pool *pp, struct pool_item *pi, |
|
void (*pr)(const char *, ...)) |
|
{ |
|
int i = pp->pr_logsize; |
|
int n = pp->pr_curlogentry; |
|
|
|
if ((pp->pr_roflags & PR_LOGGING) == 0) |
|
return; |
|
|
|
/* |
|
* Print all entries in this pool's log. |
|
*/ |
|
while (i-- > 0) { |
|
struct pool_log *pl = &pp->pr_log[n]; |
|
if (pl->pl_action != 0) { |
|
if (pi == NULL || pi == pl->pl_addr) { |
|
(*pr)("\tlog entry %d:\n", i); |
|
(*pr)("\t\taction = %s, addr = %p\n", |
|
pl->pl_action == PRLOG_GET ? "get" : "put", |
|
pl->pl_addr); |
|
(*pr)("\t\tfile: %s at line %lu\n", |
|
pl->pl_file, pl->pl_line); |
|
} |
|
} |
|
if (++n >= pp->pr_logsize) |
|
n = 0; |
|
} |
|
} |
|
|
|
static inline void |
|
pr_enter(struct pool *pp, const char *file, long line) |
|
{ |
|
|
|
if (__predict_false(pp->pr_entered_file != NULL)) { |
|
printf("pool %s: reentrancy at file %s line %ld\n", |
|
pp->pr_wchan, file, line); |
|
printf(" previous entry at file %s line %ld\n", |
|
pp->pr_entered_file, pp->pr_entered_line); |
|
panic("pr_enter"); |
|
} |
|
|
|
pp->pr_entered_file = file; |
|
pp->pr_entered_line = line; |
|
} |
|
|
|
static inline void |
|
pr_leave(struct pool *pp) |
|
{ |
|
|
|
if (__predict_false(pp->pr_entered_file == NULL)) { |
|
printf("pool %s not entered?\n", pp->pr_wchan); |
|
panic("pr_leave"); |
|
} |
|
|
|
pp->pr_entered_file = NULL; |
|
pp->pr_entered_line = 0; |
|
} |
|
|
|
static inline void |
|
pr_enter_check(struct pool *pp, void (*pr)(const char *, ...)) |
|
{ |
|
|
|
if (pp->pr_entered_file != NULL) |
|
(*pr)("\n\tcurrently entered from file %s line %ld\n", |
|
pp->pr_entered_file, pp->pr_entered_line); |
|
} |
|
#else |
|
#define pr_log(pp, v, action, file, line) |
|
#define pr_printlog(pp, pi, pr) |
|
#define pr_enter(pp, file, line) |
|
#define pr_leave(pp) |
|
#define pr_enter_check(pp, pr) |
|
#endif /* POOL_DIAGNOSTIC */ |
|
|
|
static inline unsigned int |
static inline unsigned int |
pr_item_notouch_index(const struct pool *pp, const struct pool_item_header *ph, |
pr_item_notouch_index(const struct pool *pp, const struct pool_item_header *ph, |
const void *v) |
const void *v) |
Line 507 pr_rmpage(struct pool *pp, struct pool_i |
|
Line 392 pr_rmpage(struct pool *pp, struct pool_i |
|
pool_update_curpage(pp); |
pool_update_curpage(pp); |
} |
} |
|
|
static bool |
|
pa_starved_p(struct pool_allocator *pa) |
|
{ |
|
|
|
if (pa->pa_backingmap != NULL) { |
|
return vm_map_starved_p(pa->pa_backingmap); |
|
} |
|
return false; |
|
} |
|
|
|
static int |
|
pool_reclaim_callback(struct callback_entry *ce, void *obj, void *arg) |
|
{ |
|
struct pool *pp = obj; |
|
struct pool_allocator *pa = pp->pr_alloc; |
|
|
|
KASSERT(&pp->pr_reclaimerentry == ce); |
|
pool_reclaim(pp); |
|
if (!pa_starved_p(pa)) { |
|
return CALLBACK_CHAIN_ABORT; |
|
} |
|
return CALLBACK_CHAIN_CONTINUE; |
|
} |
|
|
|
static void |
|
pool_reclaim_register(struct pool *pp) |
|
{ |
|
struct vm_map *map = pp->pr_alloc->pa_backingmap; |
|
int s; |
|
|
|
if (map == NULL) { |
|
return; |
|
} |
|
|
|
s = splvm(); /* not necessary for INTRSAFE maps, but don't care. */ |
|
callback_register(&vm_map_to_kernel(map)->vmk_reclaim_callback, |
|
&pp->pr_reclaimerentry, pp, pool_reclaim_callback); |
|
splx(s); |
|
} |
|
|
|
static void |
|
pool_reclaim_unregister(struct pool *pp) |
|
{ |
|
struct vm_map *map = pp->pr_alloc->pa_backingmap; |
|
int s; |
|
|
|
if (map == NULL) { |
|
return; |
|
} |
|
|
|
s = splvm(); /* not necessary for INTRSAFE maps, but don't care. */ |
|
callback_unregister(&vm_map_to_kernel(map)->vmk_reclaim_callback, |
|
&pp->pr_reclaimerentry); |
|
splx(s); |
|
} |
|
|
|
static void |
|
pa_reclaim_register(struct pool_allocator *pa) |
|
{ |
|
struct vm_map *map = *pa->pa_backingmapptr; |
|
struct pool *pp; |
|
|
|
KASSERT(pa->pa_backingmap == NULL); |
|
if (map == NULL) { |
|
SLIST_INSERT_HEAD(&pa_deferinitq, pa, pa_q); |
|
return; |
|
} |
|
pa->pa_backingmap = map; |
|
TAILQ_FOREACH(pp, &pa->pa_list, pr_alloc_list) { |
|
pool_reclaim_register(pp); |
|
} |
|
} |
|
|
|
/* |
/* |
* Initialize all the pools listed in the "pools" link set. |
* Initialize all the pools listed in the "pools" link set. |
*/ |
*/ |
void |
void |
pool_subsystem_init(void) |
pool_subsystem_init(void) |
{ |
{ |
struct pool_allocator *pa; |
size_t size; |
__link_set_decl(pools, struct link_pool_init); |
int idx; |
struct link_pool_init * const *pi; |
|
|
|
mutex_init(&pool_head_lock, MUTEX_DEFAULT, IPL_NONE); |
mutex_init(&pool_head_lock, MUTEX_DEFAULT, IPL_NONE); |
|
mutex_init(&pool_allocator_lock, MUTEX_DEFAULT, IPL_NONE); |
cv_init(&pool_busy, "poolbusy"); |
cv_init(&pool_busy, "poolbusy"); |
|
|
__link_set_foreach(pi, pools) |
/* |
pool_init((*pi)->pp, (*pi)->size, (*pi)->align, |
* Initialize private page header pool and cache magazine pool if we |
(*pi)->align_offset, (*pi)->flags, (*pi)->wchan, |
* haven't done so yet. |
(*pi)->palloc, (*pi)->ipl); |
*/ |
|
for (idx = 0; idx < PHPOOL_MAX; idx++) { |
while ((pa = SLIST_FIRST(&pa_deferinitq)) != NULL) { |
static char phpool_names[PHPOOL_MAX][6+1+6+1]; |
KASSERT(pa->pa_backingmapptr != NULL); |
int nelem; |
KASSERT(*pa->pa_backingmapptr != NULL); |
size_t sz; |
SLIST_REMOVE_HEAD(&pa_deferinitq, pa_q); |
|
pa_reclaim_register(pa); |
nelem = PHPOOL_FREELIST_NELEM(idx); |
|
snprintf(phpool_names[idx], sizeof(phpool_names[idx]), |
|
"phpool-%d", nelem); |
|
sz = sizeof(struct pool_item_header); |
|
if (nelem) { |
|
sz = offsetof(struct pool_item_header, |
|
ph_bitmap[howmany(nelem, BITMAP_SIZE)]); |
|
} |
|
pool_init(&phpool[idx], sz, 0, 0, 0, |
|
phpool_names[idx], &pool_allocator_meta, IPL_VM); |
} |
} |
|
#ifdef POOL_SUBPAGE |
|
pool_init(&psppool, POOL_SUBPAGE, POOL_SUBPAGE, 0, |
|
PR_RECURSIVE, "psppool", &pool_allocator_meta, IPL_VM); |
|
#endif |
|
|
|
size = sizeof(pcg_t) + |
|
(PCG_NOBJECTS_NORMAL - 1) * sizeof(pcgpair_t); |
|
pool_init(&pcg_normal_pool, size, coherency_unit, 0, 0, |
|
"pcgnormal", &pool_allocator_meta, IPL_VM); |
|
|
|
size = sizeof(pcg_t) + |
|
(PCG_NOBJECTS_LARGE - 1) * sizeof(pcgpair_t); |
|
pool_init(&pcg_large_pool, size, coherency_unit, 0, 0, |
|
"pcglarge", &pool_allocator_meta, IPL_VM); |
|
|
pool_init(&cache_pool, sizeof(struct pool_cache), coherency_unit, |
pool_init(&cache_pool, sizeof(struct pool_cache), coherency_unit, |
0, 0, "pcache", &pool_allocator_nointr, IPL_NONE); |
0, 0, "pcache", &pool_allocator_meta, IPL_NONE); |
|
|
pool_init(&cache_cpu_pool, sizeof(pool_cache_cpu_t), coherency_unit, |
pool_init(&cache_cpu_pool, sizeof(pool_cache_cpu_t), coherency_unit, |
0, 0, "pcachecpu", &pool_allocator_nointr, IPL_NONE); |
0, 0, "pcachecpu", &pool_allocator_meta, IPL_NONE); |
} |
} |
|
|
/* |
/* |
* Initialize the given pool resource structure. |
* Initialize the given pool resource structure. |
* |
* |
* We export this routine to allow other kernel parts to declare |
* We export this routine to allow other kernel parts to declare |
* static pools that must be initialized before malloc() is available. |
* static pools that must be initialized before kmem(9) is available. |
*/ |
*/ |
void |
void |
pool_init(struct pool *pp, size_t size, u_int align, u_int ioff, int flags, |
pool_init(struct pool *pp, size_t size, u_int align, u_int ioff, int flags, |
Line 638 pool_init(struct pool *pp, size_t size, |
|
Line 473 pool_init(struct pool *pp, size_t size, |
|
} |
} |
#endif |
#endif |
|
|
#ifdef POOL_DIAGNOSTIC |
|
/* |
|
* Always log if POOL_DIAGNOSTIC is defined. |
|
*/ |
|
if (pool_logsize != 0) |
|
flags |= PR_LOGGING; |
|
#endif |
|
|
|
if (palloc == NULL) |
if (palloc == NULL) |
palloc = &pool_allocator_kmem; |
palloc = &pool_allocator_kmem; |
#ifdef POOL_SUBPAGE |
#ifdef POOL_SUBPAGE |
Line 656 pool_init(struct pool *pp, size_t size, |
|
Line 483 pool_init(struct pool *pp, size_t size, |
|
palloc = &pool_allocator_nointr_fullpage; |
palloc = &pool_allocator_nointr_fullpage; |
} |
} |
#endif /* POOL_SUBPAGE */ |
#endif /* POOL_SUBPAGE */ |
if ((palloc->pa_flags & PA_INITIALIZED) == 0) { |
if (!cold) |
|
mutex_enter(&pool_allocator_lock); |
|
if (palloc->pa_refcnt++ == 0) { |
if (palloc->pa_pagesz == 0) |
if (palloc->pa_pagesz == 0) |
palloc->pa_pagesz = PAGE_SIZE; |
palloc->pa_pagesz = PAGE_SIZE; |
|
|
Line 665 pool_init(struct pool *pp, size_t size, |
|
Line 494 pool_init(struct pool *pp, size_t size, |
|
mutex_init(&palloc->pa_lock, MUTEX_DEFAULT, IPL_VM); |
mutex_init(&palloc->pa_lock, MUTEX_DEFAULT, IPL_VM); |
palloc->pa_pagemask = ~(palloc->pa_pagesz - 1); |
palloc->pa_pagemask = ~(palloc->pa_pagesz - 1); |
palloc->pa_pageshift = ffs(palloc->pa_pagesz) - 1; |
palloc->pa_pageshift = ffs(palloc->pa_pagesz) - 1; |
|
|
if (palloc->pa_backingmapptr != NULL) { |
|
pa_reclaim_register(palloc); |
|
} |
|
palloc->pa_flags |= PA_INITIALIZED; |
|
} |
} |
|
if (!cold) |
|
mutex_exit(&pool_allocator_lock); |
|
|
if (align == 0) |
if (align == 0) |
align = ALIGN(1); |
align = ALIGN(1); |
Line 793 pool_init(struct pool *pp, size_t size, |
|
Line 619 pool_init(struct pool *pp, size_t size, |
|
pp->pr_nidle = 0; |
pp->pr_nidle = 0; |
pp->pr_refcnt = 0; |
pp->pr_refcnt = 0; |
|
|
#ifdef POOL_DIAGNOSTIC |
|
if (flags & PR_LOGGING) { |
|
if (kmem_map == NULL || |
|
(pp->pr_log = malloc(pool_logsize * sizeof(struct pool_log), |
|
M_TEMP, M_NOWAIT)) == NULL) |
|
pp->pr_roflags &= ~PR_LOGGING; |
|
pp->pr_curlogentry = 0; |
|
pp->pr_logsize = pool_logsize; |
|
} |
|
#endif |
|
|
|
pp->pr_entered_file = NULL; |
|
pp->pr_entered_line = 0; |
|
|
|
mutex_init(&pp->pr_lock, MUTEX_DEFAULT, ipl); |
mutex_init(&pp->pr_lock, MUTEX_DEFAULT, ipl); |
cv_init(&pp->pr_cv, wchan); |
cv_init(&pp->pr_cv, wchan); |
pp->pr_ipl = ipl; |
pp->pr_ipl = ipl; |
|
|
/* |
|
* Initialize private page header pool and cache magazine pool if we |
|
* haven't done so yet. |
|
* XXX LOCKING. |
|
*/ |
|
if (phpool[0].pr_size == 0) { |
|
int idx; |
|
for (idx = 0; idx < PHPOOL_MAX; idx++) { |
|
static char phpool_names[PHPOOL_MAX][6+1+6+1]; |
|
int nelem; |
|
size_t sz; |
|
|
|
nelem = PHPOOL_FREELIST_NELEM(idx); |
|
snprintf(phpool_names[idx], sizeof(phpool_names[idx]), |
|
"phpool-%d", nelem); |
|
sz = sizeof(struct pool_item_header); |
|
if (nelem) { |
|
sz = offsetof(struct pool_item_header, |
|
ph_bitmap[howmany(nelem, BITMAP_SIZE)]); |
|
} |
|
pool_init(&phpool[idx], sz, 0, 0, 0, |
|
phpool_names[idx], &pool_allocator_meta, IPL_VM); |
|
} |
|
#ifdef POOL_SUBPAGE |
|
pool_init(&psppool, POOL_SUBPAGE, POOL_SUBPAGE, 0, |
|
PR_RECURSIVE, "psppool", &pool_allocator_meta, IPL_VM); |
|
#endif |
|
|
|
size = sizeof(pcg_t) + |
|
(PCG_NOBJECTS_NORMAL - 1) * sizeof(pcgpair_t); |
|
pool_init(&pcg_normal_pool, size, coherency_unit, 0, 0, |
|
"pcgnormal", &pool_allocator_meta, IPL_VM); |
|
|
|
size = sizeof(pcg_t) + |
|
(PCG_NOBJECTS_LARGE - 1) * sizeof(pcgpair_t); |
|
pool_init(&pcg_large_pool, size, coherency_unit, 0, 0, |
|
"pcglarge", &pool_allocator_meta, IPL_VM); |
|
} |
|
|
|
/* Insert into the list of all pools. */ |
/* Insert into the list of all pools. */ |
if (__predict_true(!cold)) |
if (!cold) |
mutex_enter(&pool_head_lock); |
mutex_enter(&pool_head_lock); |
TAILQ_FOREACH(pp1, &pool_head, pr_poollist) { |
TAILQ_FOREACH(pp1, &pool_head, pr_poollist) { |
if (strcmp(pp1->pr_wchan, pp->pr_wchan) > 0) |
if (strcmp(pp1->pr_wchan, pp->pr_wchan) > 0) |
Line 861 pool_init(struct pool *pp, size_t size, |
|
Line 634 pool_init(struct pool *pp, size_t size, |
|
TAILQ_INSERT_TAIL(&pool_head, pp, pr_poollist); |
TAILQ_INSERT_TAIL(&pool_head, pp, pr_poollist); |
else |
else |
TAILQ_INSERT_BEFORE(pp1, pp, pr_poollist); |
TAILQ_INSERT_BEFORE(pp1, pp, pr_poollist); |
if (__predict_true(!cold)) |
if (!cold) |
mutex_exit(&pool_head_lock); |
mutex_exit(&pool_head_lock); |
|
|
/* Insert this into the list of pools using this allocator. */ |
/* Insert this into the list of pools using this allocator. */ |
if (__predict_true(!cold)) |
if (!cold) |
mutex_enter(&palloc->pa_lock); |
mutex_enter(&palloc->pa_lock); |
TAILQ_INSERT_TAIL(&palloc->pa_list, pp, pr_alloc_list); |
TAILQ_INSERT_TAIL(&palloc->pa_list, pp, pr_alloc_list); |
if (__predict_true(!cold)) |
if (!cold) |
mutex_exit(&palloc->pa_lock); |
mutex_exit(&palloc->pa_lock); |
|
|
pool_reclaim_register(pp); |
|
} |
} |
|
|
/* |
/* |
Line 893 pool_destroy(struct pool *pp) |
|
Line 664 pool_destroy(struct pool *pp) |
|
mutex_exit(&pool_head_lock); |
mutex_exit(&pool_head_lock); |
|
|
/* Remove this pool from its allocator's list of pools. */ |
/* Remove this pool from its allocator's list of pools. */ |
pool_reclaim_unregister(pp); |
|
mutex_enter(&pp->pr_alloc->pa_lock); |
mutex_enter(&pp->pr_alloc->pa_lock); |
TAILQ_REMOVE(&pp->pr_alloc->pa_list, pp, pr_alloc_list); |
TAILQ_REMOVE(&pp->pr_alloc->pa_list, pp, pr_alloc_list); |
mutex_exit(&pp->pr_alloc->pa_lock); |
mutex_exit(&pp->pr_alloc->pa_lock); |
|
|
|
mutex_enter(&pool_allocator_lock); |
|
if (--pp->pr_alloc->pa_refcnt == 0) |
|
mutex_destroy(&pp->pr_alloc->pa_lock); |
|
mutex_exit(&pool_allocator_lock); |
|
|
mutex_enter(&pp->pr_lock); |
mutex_enter(&pp->pr_lock); |
|
|
KASSERT(pp->pr_cache == NULL); |
KASSERT(pp->pr_cache == NULL); |
|
|
#ifdef DIAGNOSTIC |
#ifdef DIAGNOSTIC |
if (pp->pr_nout != 0) { |
if (pp->pr_nout != 0) { |
pr_printlog(pp, NULL, printf); |
|
panic("pool_destroy: pool busy: still out: %u", |
panic("pool_destroy: pool busy: still out: %u", |
pp->pr_nout); |
pp->pr_nout); |
} |
} |
Line 921 pool_destroy(struct pool *pp) |
|
Line 695 pool_destroy(struct pool *pp) |
|
mutex_exit(&pp->pr_lock); |
mutex_exit(&pp->pr_lock); |
|
|
pr_pagelist_free(pp, &pq); |
pr_pagelist_free(pp, &pq); |
|
|
#ifdef POOL_DIAGNOSTIC |
|
if ((pp->pr_roflags & PR_LOGGING) != 0) |
|
free(pp->pr_log, M_TEMP); |
|
#endif |
|
|
|
cv_destroy(&pp->pr_cv); |
cv_destroy(&pp->pr_cv); |
mutex_destroy(&pp->pr_lock); |
mutex_destroy(&pp->pr_lock); |
} |
} |
Line 961 pool_alloc_item_header(struct pool *pp, |
|
Line 729 pool_alloc_item_header(struct pool *pp, |
|
* Grab an item from the pool. |
* Grab an item from the pool. |
*/ |
*/ |
void * |
void * |
#ifdef POOL_DIAGNOSTIC |
|
_pool_get(struct pool *pp, int flags, const char *file, long line) |
|
#else |
|
pool_get(struct pool *pp, int flags) |
pool_get(struct pool *pp, int flags) |
#endif |
|
{ |
{ |
struct pool_item *pi; |
struct pool_item *pi; |
struct pool_item_header *ph; |
struct pool_item_header *ph; |
void *v; |
void *v; |
|
|
#ifdef DIAGNOSTIC |
#ifdef DIAGNOSTIC |
if (__predict_false(pp->pr_itemsperpage == 0)) |
if (pp->pr_itemsperpage == 0) |
panic("pool_get: pool %p: pr_itemsperpage is zero, " |
panic("pool_get: pool '%s': pr_itemsperpage is zero, " |
"pool not initialized?", pp); |
"pool not initialized?", pp->pr_wchan); |
if (__predict_false(curlwp == NULL && doing_shutdown == 0 && |
if ((cpu_intr_p() || cpu_softintr_p()) && pp->pr_ipl == IPL_NONE && |
(flags & PR_WAITOK) != 0)) |
!cold && panicstr == NULL) |
panic("pool_get: %s: must have NOWAIT", pp->pr_wchan); |
panic("pool '%s' is IPL_NONE, but called from " |
|
"interrupt context\n", pp->pr_wchan); |
#endif /* DIAGNOSTIC */ |
#endif |
#ifdef LOCKDEBUG |
|
if (flags & PR_WAITOK) { |
if (flags & PR_WAITOK) { |
ASSERT_SLEEPABLE(); |
ASSERT_SLEEPABLE(); |
} |
} |
#endif |
|
|
|
mutex_enter(&pp->pr_lock); |
mutex_enter(&pp->pr_lock); |
pr_enter(pp, file, line); |
|
|
|
startover: |
startover: |
/* |
/* |
* Check to see if we've reached the hard limit. If we have, |
* Check to see if we've reached the hard limit. If we have, |
Line 997 pool_get(struct pool *pp, int flags) |
|
Line 757 pool_get(struct pool *pp, int flags) |
|
*/ |
*/ |
#ifdef DIAGNOSTIC |
#ifdef DIAGNOSTIC |
if (__predict_false(pp->pr_nout > pp->pr_hardlimit)) { |
if (__predict_false(pp->pr_nout > pp->pr_hardlimit)) { |
pr_leave(pp); |
|
mutex_exit(&pp->pr_lock); |
mutex_exit(&pp->pr_lock); |
panic("pool_get: %s: crossed hard limit", pp->pr_wchan); |
panic("pool_get: %s: crossed hard limit", pp->pr_wchan); |
} |
} |
Line 1009 pool_get(struct pool *pp, int flags) |
|
Line 768 pool_get(struct pool *pp, int flags) |
|
* back to the pool, unlock, call the hook, re-lock, |
* back to the pool, unlock, call the hook, re-lock, |
* and check the hardlimit condition again. |
* and check the hardlimit condition again. |
*/ |
*/ |
pr_leave(pp); |
|
mutex_exit(&pp->pr_lock); |
mutex_exit(&pp->pr_lock); |
(*pp->pr_drain_hook)(pp->pr_drain_hook_arg, flags); |
(*pp->pr_drain_hook)(pp->pr_drain_hook_arg, flags); |
mutex_enter(&pp->pr_lock); |
mutex_enter(&pp->pr_lock); |
pr_enter(pp, file, line); |
|
if (pp->pr_nout < pp->pr_hardlimit) |
if (pp->pr_nout < pp->pr_hardlimit) |
goto startover; |
goto startover; |
} |
} |
Line 1024 pool_get(struct pool *pp, int flags) |
|
Line 781 pool_get(struct pool *pp, int flags) |
|
* it be? |
* it be? |
*/ |
*/ |
pp->pr_flags |= PR_WANTED; |
pp->pr_flags |= PR_WANTED; |
pr_leave(pp); |
|
cv_wait(&pp->pr_cv, &pp->pr_lock); |
cv_wait(&pp->pr_cv, &pp->pr_lock); |
pr_enter(pp, file, line); |
|
goto startover; |
goto startover; |
} |
} |
|
|
Line 1040 pool_get(struct pool *pp, int flags) |
|
Line 795 pool_get(struct pool *pp, int flags) |
|
|
|
pp->pr_nfail++; |
pp->pr_nfail++; |
|
|
pr_leave(pp); |
|
mutex_exit(&pp->pr_lock); |
mutex_exit(&pp->pr_lock); |
return (NULL); |
return (NULL); |
} |
} |
Line 1068 pool_get(struct pool *pp, int flags) |
|
Line 822 pool_get(struct pool *pp, int flags) |
|
* Release the pool lock, as the back-end page allocator |
* Release the pool lock, as the back-end page allocator |
* may block. |
* may block. |
*/ |
*/ |
pr_leave(pp); |
|
error = pool_grow(pp, flags); |
error = pool_grow(pp, flags); |
pr_enter(pp, file, line); |
|
if (error != 0) { |
if (error != 0) { |
/* |
/* |
* We were unable to allocate a page or item |
* We were unable to allocate a page or item |
Line 1082 pool_get(struct pool *pp, int flags) |
|
Line 834 pool_get(struct pool *pp, int flags) |
|
goto startover; |
goto startover; |
|
|
pp->pr_nfail++; |
pp->pr_nfail++; |
pr_leave(pp); |
|
mutex_exit(&pp->pr_lock); |
mutex_exit(&pp->pr_lock); |
return (NULL); |
return (NULL); |
} |
} |
Line 1093 pool_get(struct pool *pp, int flags) |
|
Line 844 pool_get(struct pool *pp, int flags) |
|
if (pp->pr_roflags & PR_NOTOUCH) { |
if (pp->pr_roflags & PR_NOTOUCH) { |
#ifdef DIAGNOSTIC |
#ifdef DIAGNOSTIC |
if (__predict_false(ph->ph_nmissing == pp->pr_itemsperpage)) { |
if (__predict_false(ph->ph_nmissing == pp->pr_itemsperpage)) { |
pr_leave(pp); |
|
mutex_exit(&pp->pr_lock); |
mutex_exit(&pp->pr_lock); |
panic("pool_get: %s: page empty", pp->pr_wchan); |
panic("pool_get: %s: page empty", pp->pr_wchan); |
} |
} |
#endif |
#endif |
v = pr_item_notouch_get(pp, ph); |
v = pr_item_notouch_get(pp, ph); |
#ifdef POOL_DIAGNOSTIC |
|
pr_log(pp, v, PRLOG_GET, file, line); |
|
#endif |
|
} else { |
} else { |
v = pi = LIST_FIRST(&ph->ph_itemlist); |
v = pi = LIST_FIRST(&ph->ph_itemlist); |
if (__predict_false(v == NULL)) { |
if (__predict_false(v == NULL)) { |
pr_leave(pp); |
|
mutex_exit(&pp->pr_lock); |
mutex_exit(&pp->pr_lock); |
panic("pool_get: %s: page empty", pp->pr_wchan); |
panic("pool_get: %s: page empty", pp->pr_wchan); |
} |
} |
#ifdef DIAGNOSTIC |
#ifdef DIAGNOSTIC |
if (__predict_false(pp->pr_nitems == 0)) { |
if (__predict_false(pp->pr_nitems == 0)) { |
pr_leave(pp); |
|
mutex_exit(&pp->pr_lock); |
mutex_exit(&pp->pr_lock); |
printf("pool_get: %s: items on itemlist, nitems %u\n", |
printf("pool_get: %s: items on itemlist, nitems %u\n", |
pp->pr_wchan, pp->pr_nitems); |
pp->pr_wchan, pp->pr_nitems); |
Line 1119 pool_get(struct pool *pp, int flags) |
|
Line 864 pool_get(struct pool *pp, int flags) |
|
} |
} |
#endif |
#endif |
|
|
#ifdef POOL_DIAGNOSTIC |
|
pr_log(pp, v, PRLOG_GET, file, line); |
|
#endif |
|
|
|
#ifdef DIAGNOSTIC |
#ifdef DIAGNOSTIC |
if (__predict_false(pi->pi_magic != PI_MAGIC)) { |
if (__predict_false(pi->pi_magic != PI_MAGIC)) { |
pr_printlog(pp, pi, printf); |
|
panic("pool_get(%s): free list modified: " |
panic("pool_get(%s): free list modified: " |
"magic=%x; page %p; item addr %p\n", |
"magic=%x; page %p; item addr %p\n", |
pp->pr_wchan, pi->pi_magic, ph->ph_page, pi); |
pp->pr_wchan, pi->pi_magic, ph->ph_page, pi); |
Line 1158 pool_get(struct pool *pp, int flags) |
|
Line 898 pool_get(struct pool *pp, int flags) |
|
#ifdef DIAGNOSTIC |
#ifdef DIAGNOSTIC |
if (__predict_false((pp->pr_roflags & PR_NOTOUCH) == 0 && |
if (__predict_false((pp->pr_roflags & PR_NOTOUCH) == 0 && |
!LIST_EMPTY(&ph->ph_itemlist))) { |
!LIST_EMPTY(&ph->ph_itemlist))) { |
pr_leave(pp); |
|
mutex_exit(&pp->pr_lock); |
mutex_exit(&pp->pr_lock); |
panic("pool_get: %s: nmissing inconsistent", |
panic("pool_get: %s: nmissing inconsistent", |
pp->pr_wchan); |
pp->pr_wchan); |
Line 1174 pool_get(struct pool *pp, int flags) |
|
Line 913 pool_get(struct pool *pp, int flags) |
|
} |
} |
|
|
pp->pr_nget++; |
pp->pr_nget++; |
pr_leave(pp); |
|
|
|
/* |
/* |
* If we have a low water mark and we are now below that low |
* If we have a low water mark and we are now below that low |
Line 1216 pool_do_put(struct pool *pp, void *v, st |
|
Line 954 pool_do_put(struct pool *pp, void *v, st |
|
#endif |
#endif |
|
|
if (__predict_false((ph = pr_find_pagehead(pp, v)) == NULL)) { |
if (__predict_false((ph = pr_find_pagehead(pp, v)) == NULL)) { |
pr_printlog(pp, NULL, printf); |
|
panic("pool_put: %s: page header missing", pp->pr_wchan); |
panic("pool_put: %s: page header missing", pp->pr_wchan); |
} |
} |
|
|
Line 1305 pool_do_put(struct pool *pp, void *v, st |
|
Line 1042 pool_do_put(struct pool *pp, void *v, st |
|
} |
} |
} |
} |
|
|
/* |
|
* Return resource to the pool. |
|
*/ |
|
#ifdef POOL_DIAGNOSTIC |
|
void |
|
_pool_put(struct pool *pp, void *v, const char *file, long line) |
|
{ |
|
struct pool_pagelist pq; |
|
|
|
LIST_INIT(&pq); |
|
|
|
mutex_enter(&pp->pr_lock); |
|
pr_enter(pp, file, line); |
|
|
|
pr_log(pp, v, PRLOG_PUT, file, line); |
|
|
|
pool_do_put(pp, v, &pq); |
|
|
|
pr_leave(pp); |
|
mutex_exit(&pp->pr_lock); |
|
|
|
pr_pagelist_free(pp, &pq); |
|
} |
|
#undef pool_put |
|
#endif /* POOL_DIAGNOSTIC */ |
|
|
|
void |
void |
pool_put(struct pool *pp, void *v) |
pool_put(struct pool *pp, void *v) |
{ |
{ |
Line 1345 pool_put(struct pool *pp, void *v) |
|
Line 1056 pool_put(struct pool *pp, void *v) |
|
pr_pagelist_free(pp, &pq); |
pr_pagelist_free(pp, &pq); |
} |
} |
|
|
#ifdef POOL_DIAGNOSTIC |
|
#define pool_put(h, v) _pool_put((h), (v), __FILE__, __LINE__) |
|
#endif |
|
|
|
/* |
/* |
* pool_grow: grow a pool by a page. |
* pool_grow: grow a pool by a page. |
* |
* |
Line 1592 pool_sethardlimit(struct pool *pp, int n |
|
Line 1299 pool_sethardlimit(struct pool *pp, int n |
|
|
|
/* |
/* |
* Release all complete pages that have not been used recently. |
* Release all complete pages that have not been used recently. |
|
* |
|
* Might be called from interrupt context. |
*/ |
*/ |
int |
int |
#ifdef POOL_DIAGNOSTIC |
|
_pool_reclaim(struct pool *pp, const char *file, long line) |
|
#else |
|
pool_reclaim(struct pool *pp) |
pool_reclaim(struct pool *pp) |
#endif |
|
{ |
{ |
struct pool_item_header *ph, *phnext; |
struct pool_item_header *ph, *phnext; |
struct pool_pagelist pq; |
struct pool_pagelist pq; |
Line 1606 pool_reclaim(struct pool *pp) |
|
Line 1311 pool_reclaim(struct pool *pp) |
|
bool klock; |
bool klock; |
int rv; |
int rv; |
|
|
|
if (cpu_intr_p() || cpu_softintr_p()) { |
|
KASSERT(pp->pr_ipl != IPL_NONE); |
|
} |
|
|
if (pp->pr_drain_hook != NULL) { |
if (pp->pr_drain_hook != NULL) { |
/* |
/* |
* The drain hook must be called with the pool unlocked. |
* The drain hook must be called with the pool unlocked. |
Line 1634 pool_reclaim(struct pool *pp) |
|
Line 1343 pool_reclaim(struct pool *pp) |
|
} |
} |
return (0); |
return (0); |
} |
} |
pr_enter(pp, file, line); |
|
|
|
LIST_INIT(&pq); |
LIST_INIT(&pq); |
|
|
Line 1648 pool_reclaim(struct pool *pp) |
|
Line 1356 pool_reclaim(struct pool *pp) |
|
break; |
break; |
|
|
KASSERT(ph->ph_nmissing == 0); |
KASSERT(ph->ph_nmissing == 0); |
if (curtime - ph->ph_time < pool_inactive_time |
if (curtime - ph->ph_time < pool_inactive_time) |
&& !pa_starved_p(pp->pr_alloc)) |
|
continue; |
continue; |
|
|
/* |
/* |
Line 1663 pool_reclaim(struct pool *pp) |
|
Line 1370 pool_reclaim(struct pool *pp) |
|
pr_rmpage(pp, ph, &pq); |
pr_rmpage(pp, ph, &pq); |
} |
} |
|
|
pr_leave(pp); |
|
mutex_exit(&pp->pr_lock); |
mutex_exit(&pp->pr_lock); |
|
|
if (LIST_EMPTY(&pq)) |
if (LIST_EMPTY(&pq)) |
Line 1724 pool_drain_start(struct pool **ppp, uint |
|
Line 1430 pool_drain_start(struct pool **ppp, uint |
|
} |
} |
} |
} |
|
|
void |
bool |
pool_drain_end(struct pool *pp, uint64_t where) |
pool_drain_end(struct pool *pp, uint64_t where) |
{ |
{ |
|
bool reclaimed; |
|
|
if (pp == NULL) |
if (pp == NULL) |
return; |
return false; |
|
|
KASSERT(pp->pr_refcnt > 0); |
KASSERT(pp->pr_refcnt > 0); |
|
|
Line 1738 pool_drain_end(struct pool *pp, uint64_t |
|
Line 1445 pool_drain_end(struct pool *pp, uint64_t |
|
xc_wait(where); |
xc_wait(where); |
|
|
/* Drain the cache (if any) and pool.. */ |
/* Drain the cache (if any) and pool.. */ |
pool_reclaim(pp); |
reclaimed = pool_reclaim(pp); |
|
|
/* Finally, unlock the pool. */ |
/* Finally, unlock the pool. */ |
mutex_enter(&pool_head_lock); |
mutex_enter(&pool_head_lock); |
pp->pr_refcnt--; |
pp->pr_refcnt--; |
cv_broadcast(&pool_busy); |
cv_broadcast(&pool_busy); |
mutex_exit(&pool_head_lock); |
mutex_exit(&pool_head_lock); |
|
|
|
return reclaimed; |
} |
} |
|
|
/* |
/* |
* Diagnostic helpers. |
* Diagnostic helpers. |
*/ |
*/ |
void |
|
pool_print(struct pool *pp, const char *modif) |
|
{ |
|
|
|
pool_print1(pp, modif, printf); |
|
} |
|
|
|
void |
void |
pool_printall(const char *modif, void (*pr)(const char *, ...)) |
pool_printall(const char *modif, void (*pr)(const char *, ...)) |
Line 1867 pool_print1(struct pool *pp, const char |
|
Line 1570 pool_print1(struct pool *pp, const char |
|
goto skip_log; |
goto skip_log; |
|
|
(*pr)("\n"); |
(*pr)("\n"); |
if ((pp->pr_roflags & PR_LOGGING) == 0) |
|
(*pr)("\tno log\n"); |
|
else { |
|
pr_printlog(pp, NULL, pr); |
|
} |
|
|
|
skip_log: |
skip_log: |
|
|
Line 1893 pool_print1(struct pool *pp, const char |
|
Line 1591 pool_print1(struct pool *pp, const char |
|
if (pc != NULL) { |
if (pc != NULL) { |
cpuhit = 0; |
cpuhit = 0; |
cpumiss = 0; |
cpumiss = 0; |
for (i = 0; i < MAXCPUS; i++) { |
for (i = 0; i < __arraycount(pc->pc_cpus); i++) { |
if ((cc = pc->pc_cpus[i]) == NULL) |
if ((cc = pc->pc_cpus[i]) == NULL) |
continue; |
continue; |
cpuhit += cc->cc_hits; |
cpuhit += cc->cc_hits; |
Line 1921 pool_print1(struct pool *pp, const char |
|
Line 1619 pool_print1(struct pool *pp, const char |
|
} |
} |
} |
} |
#undef PR_GROUPLIST |
#undef PR_GROUPLIST |
|
|
pr_enter_check(pp, pr); |
|
} |
} |
|
|
static int |
static int |
Line 2128 pool_cache_bootstrap(pool_cache_t pc, si |
|
Line 1824 pool_cache_bootstrap(pool_cache_t pc, si |
|
void |
void |
pool_cache_destroy(pool_cache_t pc) |
pool_cache_destroy(pool_cache_t pc) |
{ |
{ |
|
|
|
pool_cache_bootstrap_destroy(pc); |
|
pool_put(&cache_pool, pc); |
|
} |
|
|
|
/* |
|
* pool_cache_bootstrap_destroy: |
|
* |
|
* Destroy a pool cache. |
|
*/ |
|
void |
|
pool_cache_bootstrap_destroy(pool_cache_t pc) |
|
{ |
struct pool *pp = &pc->pc_pool; |
struct pool *pp = &pc->pc_pool; |
pool_cache_cpu_t *cc; |
u_int i; |
pcg_t *pcg; |
|
int i; |
|
|
|
/* Remove it from the global list. */ |
/* Remove it from the global list. */ |
mutex_enter(&pool_head_lock); |
mutex_enter(&pool_head_lock); |
Line 2149 pool_cache_destroy(pool_cache_t pc) |
|
Line 1856 pool_cache_destroy(pool_cache_t pc) |
|
mutex_exit(&pp->pr_lock); |
mutex_exit(&pp->pr_lock); |
|
|
/* Destroy per-CPU data */ |
/* Destroy per-CPU data */ |
for (i = 0; i < MAXCPUS; i++) { |
for (i = 0; i < __arraycount(pc->pc_cpus); i++) |
if ((cc = pc->pc_cpus[i]) == NULL) |
pool_cache_invalidate_cpu(pc, i); |
continue; |
|
if ((pcg = cc->cc_current) != &pcg_dummy) { |
|
pcg->pcg_next = NULL; |
|
pool_cache_invalidate_groups(pc, pcg); |
|
} |
|
if ((pcg = cc->cc_previous) != &pcg_dummy) { |
|
pcg->pcg_next = NULL; |
|
pool_cache_invalidate_groups(pc, pcg); |
|
} |
|
if (cc != &pc->pc_cpu0) |
|
pool_put(&cache_cpu_pool, cc); |
|
} |
|
|
|
/* Finally, destroy it. */ |
/* Finally, destroy it. */ |
mutex_destroy(&pc->pc_lock); |
mutex_destroy(&pc->pc_lock); |
pool_destroy(pp); |
pool_destroy(pp); |
pool_put(&cache_pool, pc); |
|
} |
} |
|
|
/* |
/* |
Line 2183 pool_cache_cpu_init1(struct cpu_info *ci |
|
Line 1877 pool_cache_cpu_init1(struct cpu_info *ci |
|
|
|
index = ci->ci_index; |
index = ci->ci_index; |
|
|
KASSERT(index < MAXCPUS); |
KASSERT(index < __arraycount(pc->pc_cpus)); |
|
|
if ((cc = pc->pc_cpus[index]) != NULL) { |
if ((cc = pc->pc_cpus[index]) != NULL) { |
KASSERT(cc->cc_cpuindex == index); |
KASSERT(cc->cc_cpuindex == index); |
Line 2309 pool_cache_invalidate_groups(pool_cache_ |
|
Line 2003 pool_cache_invalidate_groups(pool_cache_ |
|
* |
* |
* Invalidate a pool cache (destruct and release all of the |
* Invalidate a pool cache (destruct and release all of the |
* cached objects). Does not reclaim objects from the pool. |
* cached objects). Does not reclaim objects from the pool. |
|
* |
|
* Note: For pool caches that provide constructed objects, there |
|
* is an assumption that another level of synchronization is occurring |
|
* between the input to the constructor and the cache invalidation. |
*/ |
*/ |
void |
void |
pool_cache_invalidate(pool_cache_t pc) |
pool_cache_invalidate(pool_cache_t pc) |
{ |
{ |
pcg_t *full, *empty, *part; |
pcg_t *full, *empty, *part; |
|
#if 0 |
|
uint64_t where; |
|
|
|
if (ncpu < 2 || !mp_online) { |
|
/* |
|
* We might be called early enough in the boot process |
|
* for the CPU data structures to not be fully initialized. |
|
* In this case, simply gather the local CPU's cache now |
|
* since it will be the only one running. |
|
*/ |
|
pool_cache_xcall(pc); |
|
} else { |
|
/* |
|
* Gather all of the CPU-specific caches into the |
|
* global cache. |
|
*/ |
|
where = xc_broadcast(0, (xcfunc_t)pool_cache_xcall, pc, NULL); |
|
xc_wait(where); |
|
} |
|
#endif |
mutex_enter(&pc->pc_lock); |
mutex_enter(&pc->pc_lock); |
full = pc->pc_fullgroups; |
full = pc->pc_fullgroups; |
empty = pc->pc_emptygroups; |
empty = pc->pc_emptygroups; |
Line 2332 pool_cache_invalidate(pool_cache_t pc) |
|
Line 2049 pool_cache_invalidate(pool_cache_t pc) |
|
pool_cache_invalidate_groups(pc, part); |
pool_cache_invalidate_groups(pc, part); |
} |
} |
|
|
|
/* |
|
* pool_cache_invalidate_cpu: |
|
* |
|
* Invalidate all CPU-bound cached objects in pool cache, the CPU being |
|
* identified by its associated index. |
|
* It is caller's responsibility to ensure that no operation is |
|
* taking place on this pool cache while doing this invalidation. |
|
* WARNING: as no inter-CPU locking is enforced, trying to invalidate |
|
* pool cached objects from a CPU different from the one currently running |
|
* may result in an undefined behaviour. |
|
*/ |
|
static void |
|
pool_cache_invalidate_cpu(pool_cache_t pc, u_int index) |
|
{ |
|
pool_cache_cpu_t *cc; |
|
pcg_t *pcg; |
|
|
|
if ((cc = pc->pc_cpus[index]) == NULL) |
|
return; |
|
|
|
if ((pcg = cc->cc_current) != &pcg_dummy) { |
|
pcg->pcg_next = NULL; |
|
pool_cache_invalidate_groups(pc, pcg); |
|
} |
|
if ((pcg = cc->cc_previous) != &pcg_dummy) { |
|
pcg->pcg_next = NULL; |
|
pool_cache_invalidate_groups(pc, pcg); |
|
} |
|
if (cc != &pc->pc_cpu0) |
|
pool_put(&cache_cpu_pool, cc); |
|
|
|
} |
|
|
void |
void |
pool_cache_set_drain_hook(pool_cache_t pc, void (*fn)(void *, int), void *arg) |
pool_cache_set_drain_hook(pool_cache_t pc, void (*fn)(void *, int), void *arg) |
{ |
{ |
Line 2465 pool_cache_get_paddr(pool_cache_t pc, in |
|
Line 2215 pool_cache_get_paddr(pool_cache_t pc, in |
|
void *object; |
void *object; |
int s; |
int s; |
|
|
#ifdef LOCKDEBUG |
KASSERTMSG((!cpu_intr_p() && !cpu_softintr_p()) || |
|
(pc->pc_pool.pr_ipl != IPL_NONE || cold || panicstr != NULL), |
|
"pool '%s' is IPL_NONE, but called from interrupt context\n", |
|
pc->pc_pool.pr_wchan); |
|
|
if (flags & PR_WAITOK) { |
if (flags & PR_WAITOK) { |
ASSERT_SLEEPABLE(); |
ASSERT_SLEEPABLE(); |
} |
} |
#endif |
|
|
|
/* Lock out interrupts and disable preemption. */ |
/* Lock out interrupts and disable preemption. */ |
s = splvm(); |
s = splvm(); |
Line 2528 pool_cache_put_slow(pool_cache_cpu_t *cc |
|
Line 2281 pool_cache_put_slow(pool_cache_cpu_t *cc |
|
KASSERT(cc->cc_previous->pcg_avail == cc->cc_previous->pcg_size); |
KASSERT(cc->cc_previous->pcg_avail == cc->cc_previous->pcg_size); |
|
|
pc = cc->cc_cache; |
pc = cc->cc_cache; |
|
pcg = NULL; |
cc->cc_misses++; |
cc->cc_misses++; |
|
|
|
/* |
|
* If there are no empty groups in the cache then allocate one |
|
* while still unlocked. |
|
*/ |
|
if (__predict_false(pc->pc_emptygroups == NULL)) { |
|
if (__predict_true(!pool_cache_disable)) { |
|
pcg = pool_get(pc->pc_pcgpool, PR_NOWAIT); |
|
} |
|
if (__predict_true(pcg != NULL)) { |
|
pcg->pcg_avail = 0; |
|
pcg->pcg_size = pc->pc_pcgsize; |
|
} |
|
} |
|
|
/* Lock the cache. */ |
/* Lock the cache. */ |
if (__predict_false(!mutex_tryenter(&pc->pc_lock))) { |
if (__predict_false(!mutex_tryenter(&pc->pc_lock))) { |
ncsw = curlwp->l_ncsw; |
ncsw = curlwp->l_ncsw; |
Line 2542 pool_cache_put_slow(pool_cache_cpu_t *cc |
|
Line 2310 pool_cache_put_slow(pool_cache_cpu_t *cc |
|
*/ |
*/ |
if (__predict_false(curlwp->l_ncsw != ncsw)) { |
if (__predict_false(curlwp->l_ncsw != ncsw)) { |
mutex_exit(&pc->pc_lock); |
mutex_exit(&pc->pc_lock); |
|
if (pcg != NULL) { |
|
pool_put(pc->pc_pcgpool, pcg); |
|
} |
return true; |
return true; |
} |
} |
} |
} |
|
|
/* If there are no empty groups in the cache then allocate one. */ |
/* If there are no empty groups in the cache then allocate one. */ |
if (__predict_false((pcg = pc->pc_emptygroups) == NULL)) { |
if (pcg == NULL && pc->pc_emptygroups != NULL) { |
if (__predict_true(!pool_cache_disable)) { |
pcg = pc->pc_emptygroups; |
pcg = pool_get(pc->pc_pcgpool, PR_NOWAIT); |
|
} |
|
if (__predict_true(pcg != NULL)) { |
|
pcg->pcg_avail = 0; |
|
pcg->pcg_size = pc->pc_pcgsize; |
|
} |
|
} else { |
|
pc->pc_emptygroups = pcg->pcg_next; |
pc->pc_emptygroups = pcg->pcg_next; |
pc->pc_nempty--; |
pc->pc_nempty--; |
} |
} |
Line 2610 pool_cache_put_paddr(pool_cache_t pc, vo |
|
Line 2374 pool_cache_put_paddr(pool_cache_t pc, vo |
|
pcg_t *pcg; |
pcg_t *pcg; |
int s; |
int s; |
|
|
|
KASSERT(object != NULL); |
FREECHECK_IN(&pc->pc_freecheck, object); |
FREECHECK_IN(&pc->pc_freecheck, object); |
|
|
/* Lock out interrupts and disable preemption. */ |
/* Lock out interrupts and disable preemption. */ |
Line 2719 void pool_page_free(struct pool *, void |
|
Line 2484 void pool_page_free(struct pool *, void |
|
|
|
#ifdef POOL_SUBPAGE |
#ifdef POOL_SUBPAGE |
struct pool_allocator pool_allocator_kmem_fullpage = { |
struct pool_allocator pool_allocator_kmem_fullpage = { |
pool_page_alloc, pool_page_free, 0, |
.pa_alloc = pool_page_alloc, |
.pa_backingmapptr = &kmem_map, |
.pa_free = pool_page_free, |
|
.pa_pagesz = 0 |
}; |
}; |
#else |
#else |
struct pool_allocator pool_allocator_kmem = { |
struct pool_allocator pool_allocator_kmem = { |
pool_page_alloc, pool_page_free, 0, |
.pa_alloc = pool_page_alloc, |
.pa_backingmapptr = &kmem_map, |
.pa_free = pool_page_free, |
|
.pa_pagesz = 0 |
}; |
}; |
#endif |
#endif |
|
|
void *pool_page_alloc_nointr(struct pool *, int); |
|
void pool_page_free_nointr(struct pool *, void *); |
|
|
|
#ifdef POOL_SUBPAGE |
#ifdef POOL_SUBPAGE |
struct pool_allocator pool_allocator_nointr_fullpage = { |
struct pool_allocator pool_allocator_nointr_fullpage = { |
pool_page_alloc_nointr, pool_page_free_nointr, 0, |
.pa_alloc = pool_page_alloc, |
.pa_backingmapptr = &kernel_map, |
.pa_free = pool_page_free, |
|
.pa_pagesz = 0 |
}; |
}; |
#else |
#else |
struct pool_allocator pool_allocator_nointr = { |
struct pool_allocator pool_allocator_nointr = { |
pool_page_alloc_nointr, pool_page_free_nointr, 0, |
.pa_alloc = pool_page_alloc, |
.pa_backingmapptr = &kernel_map, |
.pa_free = pool_page_free, |
|
.pa_pagesz = 0 |
}; |
}; |
#endif |
#endif |
|
|
Line 2749 void *pool_subpage_alloc(struct pool *, |
|
Line 2515 void *pool_subpage_alloc(struct pool *, |
|
void pool_subpage_free(struct pool *, void *); |
void pool_subpage_free(struct pool *, void *); |
|
|
struct pool_allocator pool_allocator_kmem = { |
struct pool_allocator pool_allocator_kmem = { |
pool_subpage_alloc, pool_subpage_free, POOL_SUBPAGE, |
.pa_alloc = pool_subpage_alloc, |
.pa_backingmapptr = &kmem_map, |
.pa_free = pool_subpage_free, |
|
.pa_pagesz = POOL_SUBPAGE |
}; |
}; |
|
|
void *pool_subpage_alloc_nointr(struct pool *, int); |
|
void pool_subpage_free_nointr(struct pool *, void *); |
|
|
|
struct pool_allocator pool_allocator_nointr = { |
struct pool_allocator pool_allocator_nointr = { |
pool_subpage_alloc, pool_subpage_free, POOL_SUBPAGE, |
.pa_alloc = pool_subpage_alloc, |
.pa_backingmapptr = &kmem_map, |
.pa_free = pool_subpage_free, |
|
.pa_pagesz = POOL_SUBPAGE |
}; |
}; |
#endif /* POOL_SUBPAGE */ |
#endif /* POOL_SUBPAGE */ |
|
|
Line 2794 pool_allocator_free(struct pool *pp, voi |
|
Line 2559 pool_allocator_free(struct pool *pp, voi |
|
void * |
void * |
pool_page_alloc(struct pool *pp, int flags) |
pool_page_alloc(struct pool *pp, int flags) |
{ |
{ |
bool waitok = (flags & PR_WAITOK) ? true : false; |
const vm_flag_t vflags = (flags & PR_WAITOK) ? VM_SLEEP: VM_NOSLEEP; |
|
vmem_addr_t va; |
|
int ret; |
|
|
return ((void *) uvm_km_alloc_poolpage_cache(kmem_map, waitok)); |
ret = uvm_km_kmem_alloc(kmem_va_arena, pp->pr_alloc->pa_pagesz, |
|
vflags | VM_INSTANTFIT, &va); |
|
|
|
return ret ? NULL : (void *)va; |
} |
} |
|
|
void |
void |
pool_page_free(struct pool *pp, void *v) |
pool_page_free(struct pool *pp, void *v) |
{ |
{ |
|
|
uvm_km_free_poolpage_cache(kmem_map, (vaddr_t) v); |
uvm_km_kmem_free(kmem_va_arena, (vaddr_t)v, pp->pr_alloc->pa_pagesz); |
} |
} |
|
|
static void * |
static void * |
pool_page_alloc_meta(struct pool *pp, int flags) |
pool_page_alloc_meta(struct pool *pp, int flags) |
{ |
{ |
bool waitok = (flags & PR_WAITOK) ? true : false; |
const vm_flag_t vflags = (flags & PR_WAITOK) ? VM_SLEEP: VM_NOSLEEP; |
|
vmem_addr_t va; |
|
int ret; |
|
|
|
ret = vmem_alloc(kmem_meta_arena, pp->pr_alloc->pa_pagesz, |
|
vflags | VM_INSTANTFIT, &va); |
|
|
return ((void *) uvm_km_alloc_poolpage(kmem_map, waitok)); |
return ret ? NULL : (void *)va; |
} |
} |
|
|
static void |
static void |
pool_page_free_meta(struct pool *pp, void *v) |
pool_page_free_meta(struct pool *pp, void *v) |
{ |
{ |
|
|
uvm_km_free_poolpage(kmem_map, (vaddr_t) v); |
vmem_free(kmem_meta_arena, (vmem_addr_t)v, pp->pr_alloc->pa_pagesz); |
} |
} |
|
|
#ifdef POOL_SUBPAGE |
#ifdef POOL_SUBPAGE |
Line 2835 pool_subpage_free(struct pool *pp, void |
|
Line 2610 pool_subpage_free(struct pool *pp, void |
|
pool_put(&psppool, v); |
pool_put(&psppool, v); |
} |
} |
|
|
/* We don't provide a real nointr allocator. Maybe later. */ |
|
void * |
|
pool_subpage_alloc_nointr(struct pool *pp, int flags) |
|
{ |
|
|
|
return (pool_subpage_alloc(pp, flags)); |
|
} |
|
|
|
void |
|
pool_subpage_free_nointr(struct pool *pp, void *v) |
|
{ |
|
|
|
pool_subpage_free(pp, v); |
|
} |
|
#endif /* POOL_SUBPAGE */ |
#endif /* POOL_SUBPAGE */ |
void * |
|
pool_page_alloc_nointr(struct pool *pp, int flags) |
|
{ |
|
bool waitok = (flags & PR_WAITOK) ? true : false; |
|
|
|
return ((void *) uvm_km_alloc_poolpage_cache(kernel_map, waitok)); |
|
} |
|
|
|
void |
|
pool_page_free_nointr(struct pool *pp, void *v) |
|
{ |
|
|
|
uvm_km_free_poolpage_cache(kernel_map, (vaddr_t) v); |
|
} |
|
|
|
#if defined(DDB) |
#if defined(DDB) |
static bool |
static bool |
|
|
goto print; |
goto print; |
} |
} |
} |
} |
for (i = 0; i < MAXCPUS; i++) { |
for (i = 0; i < __arraycount(pc->pc_cpus); i++) { |
pool_cache_cpu_t *cc; |
pool_cache_cpu_t *cc; |
|
|
if ((cc = pc->pc_cpus[i]) == NULL) { |
if ((cc = pc->pc_cpus[i]) == NULL) { |