version 1.171, 2008/11/11 16:13:03 |
version 1.222, 2018/07/04 01:42:37 |
|
|
/* $NetBSD$ */ |
/* $NetBSD$ */ |
|
|
/*- |
/*- |
* Copyright (c) 1997, 1999, 2000, 2002, 2007, 2008 The NetBSD Foundation, Inc. |
* Copyright (c) 1997, 1999, 2000, 2002, 2007, 2008, 2010, 2014, 2015 |
|
* The NetBSD Foundation, Inc. |
* All rights reserved. |
* All rights reserved. |
* |
* |
* This code is derived from software contributed to The NetBSD Foundation |
* This code is derived from software contributed to The NetBSD Foundation |
* by Paul Kranenburg; by Jason R. Thorpe of the Numerical Aerospace |
* by Paul Kranenburg; by Jason R. Thorpe of the Numerical Aerospace |
* Simulation Facility, NASA Ames Research Center, and by Andrew Doran. |
* Simulation Facility, NASA Ames Research Center; by Andrew Doran, and by |
|
* Maxime Villard. |
* |
* |
* Redistribution and use in source and binary forms, with or without |
* Redistribution and use in source and binary forms, with or without |
* modification, are permitted provided that the following conditions |
* modification, are permitted provided that the following conditions |
|
|
#include <sys/cdefs.h> |
#include <sys/cdefs.h> |
__KERNEL_RCSID(0, "$NetBSD$"); |
__KERNEL_RCSID(0, "$NetBSD$"); |
|
|
|
#ifdef _KERNEL_OPT |
#include "opt_ddb.h" |
#include "opt_ddb.h" |
#include "opt_pool.h" |
|
#include "opt_poollog.h" |
|
#include "opt_lockdebug.h" |
#include "opt_lockdebug.h" |
|
#endif |
|
|
#include <sys/param.h> |
#include <sys/param.h> |
#include <sys/systm.h> |
#include <sys/systm.h> |
|
#include <sys/sysctl.h> |
#include <sys/bitops.h> |
#include <sys/bitops.h> |
#include <sys/proc.h> |
#include <sys/proc.h> |
#include <sys/errno.h> |
#include <sys/errno.h> |
#include <sys/kernel.h> |
#include <sys/kernel.h> |
#include <sys/malloc.h> |
#include <sys/vmem.h> |
#include <sys/pool.h> |
#include <sys/pool.h> |
#include <sys/syslog.h> |
#include <sys/syslog.h> |
#include <sys/debug.h> |
#include <sys/debug.h> |
Line 53 __KERNEL_RCSID(0, "$NetBSD$"); |
|
Line 56 __KERNEL_RCSID(0, "$NetBSD$"); |
|
#include <sys/cpu.h> |
#include <sys/cpu.h> |
#include <sys/atomic.h> |
#include <sys/atomic.h> |
|
|
#include <uvm/uvm.h> |
#include <uvm/uvm_extern.h> |
|
|
/* |
/* |
* Pool resource management utility. |
* Pool resource management utility. |
Line 68 __KERNEL_RCSID(0, "$NetBSD$"); |
|
Line 71 __KERNEL_RCSID(0, "$NetBSD$"); |
|
* an internal pool of page headers (`phpool'). |
* an internal pool of page headers (`phpool'). |
*/ |
*/ |
|
|
/* List of all pools */ |
/* List of all pools. Non static as needed by 'vmstat -m' */ |
TAILQ_HEAD(,pool) pool_head = TAILQ_HEAD_INITIALIZER(pool_head); |
TAILQ_HEAD(, pool) pool_head = TAILQ_HEAD_INITIALIZER(pool_head); |
|
|
/* Private pool for page header structures */ |
/* Private pool for page header structures */ |
#define PHPOOL_MAX 8 |
#define PHPOOL_MAX 8 |
Line 82 static struct pool phpool[PHPOOL_MAX]; |
|
Line 85 static struct pool phpool[PHPOOL_MAX]; |
|
static struct pool psppool; |
static struct pool psppool; |
#endif |
#endif |
|
|
static SLIST_HEAD(, pool_allocator) pa_deferinitq = |
#ifdef POOL_REDZONE |
SLIST_HEAD_INITIALIZER(pa_deferinitq); |
# define POOL_REDZONE_SIZE 2 |
|
static void pool_redzone_init(struct pool *, size_t); |
|
static void pool_redzone_fill(struct pool *, void *); |
|
static void pool_redzone_check(struct pool *, void *); |
|
#else |
|
# define pool_redzone_init(pp, sz) /* NOTHING */ |
|
# define pool_redzone_fill(pp, ptr) /* NOTHING */ |
|
# define pool_redzone_check(pp, ptr) /* NOTHING */ |
|
#endif |
|
|
static void *pool_page_alloc_meta(struct pool *, int); |
static void *pool_page_alloc_meta(struct pool *, int); |
static void pool_page_free_meta(struct pool *, void *); |
static void pool_page_free_meta(struct pool *, void *); |
|
|
/* allocator for pool metadata */ |
/* allocator for pool metadata */ |
struct pool_allocator pool_allocator_meta = { |
struct pool_allocator pool_allocator_meta = { |
pool_page_alloc_meta, pool_page_free_meta, |
.pa_alloc = pool_page_alloc_meta, |
.pa_backingmapptr = &kmem_map, |
.pa_free = pool_page_free_meta, |
|
.pa_pagesz = 0 |
}; |
}; |
|
|
|
#define POOL_ALLOCATOR_BIG_BASE 13 |
|
extern struct pool_allocator pool_allocator_big[]; |
|
static int pool_bigidx(size_t); |
|
|
/* # of seconds to retain page after last use */ |
/* # of seconds to retain page after last use */ |
int pool_inactive_time = 10; |
int pool_inactive_time = 10; |
|
|
Line 104 static struct pool *drainpp; |
|
Line 120 static struct pool *drainpp; |
|
static kmutex_t pool_head_lock; |
static kmutex_t pool_head_lock; |
static kcondvar_t pool_busy; |
static kcondvar_t pool_busy; |
|
|
|
/* This lock protects initialization of a potentially shared pool allocator */ |
|
static kmutex_t pool_allocator_lock; |
|
|
typedef uint32_t pool_item_bitmap_t; |
typedef uint32_t pool_item_bitmap_t; |
#define BITMAP_SIZE (CHAR_BIT * sizeof(pool_item_bitmap_t)) |
#define BITMAP_SIZE (CHAR_BIT * sizeof(pool_item_bitmap_t)) |
#define BITMAP_MASK (BITMAP_SIZE - 1) |
#define BITMAP_MASK (BITMAP_SIZE - 1) |
Line 175 static struct pool pcg_large_pool; |
|
Line 194 static struct pool pcg_large_pool; |
|
static struct pool cache_pool; |
static struct pool cache_pool; |
static struct pool cache_cpu_pool; |
static struct pool cache_cpu_pool; |
|
|
|
pool_cache_t pnbuf_cache; /* pathname buffer cache */ |
|
|
/* List of all caches. */ |
/* List of all caches. */ |
TAILQ_HEAD(,pool_cache) pool_cache_head = |
TAILQ_HEAD(,pool_cache) pool_cache_head = |
TAILQ_HEAD_INITIALIZER(pool_cache_head); |
TAILQ_HEAD_INITIALIZER(pool_cache_head); |
Line 188 static bool pool_cache_get_slow(pool_cac |
|
Line 209 static bool pool_cache_get_slow(pool_cac |
|
void **, paddr_t *, int); |
void **, paddr_t *, int); |
static void pool_cache_cpu_init1(struct cpu_info *, pool_cache_t); |
static void pool_cache_cpu_init1(struct cpu_info *, pool_cache_t); |
static void pool_cache_invalidate_groups(pool_cache_t, pcg_t *); |
static void pool_cache_invalidate_groups(pool_cache_t, pcg_t *); |
static void pool_cache_xcall(pool_cache_t); |
static void pool_cache_invalidate_cpu(pool_cache_t, u_int); |
|
static void pool_cache_transfer(pool_cache_t); |
|
|
static int pool_catchup(struct pool *); |
static int pool_catchup(struct pool *); |
static void pool_prime_page(struct pool *, void *, |
static void pool_prime_page(struct pool *, void *, |
Line 200 static void *pool_allocator_alloc(struct |
|
Line 222 static void *pool_allocator_alloc(struct |
|
static void pool_allocator_free(struct pool *, void *); |
static void pool_allocator_free(struct pool *, void *); |
|
|
static void pool_print_pagelist(struct pool *, struct pool_pagelist *, |
static void pool_print_pagelist(struct pool *, struct pool_pagelist *, |
void (*)(const char *, ...)); |
void (*)(const char *, ...) __printflike(1, 2)); |
static void pool_print1(struct pool *, const char *, |
static void pool_print1(struct pool *, const char *, |
void (*)(const char *, ...)); |
void (*)(const char *, ...) __printflike(1, 2)); |
|
|
static int pool_chk_page(struct pool *, const char *, |
static int pool_chk_page(struct pool *, const char *, |
struct pool_item_header *); |
struct pool_item_header *); |
|
|
/* |
|
* Pool log entry. An array of these is allocated in pool_init(). |
|
*/ |
|
struct pool_log { |
|
const char *pl_file; |
|
long pl_line; |
|
int pl_action; |
|
#define PRLOG_GET 1 |
|
#define PRLOG_PUT 2 |
|
void *pl_addr; |
|
}; |
|
|
|
#ifdef POOL_DIAGNOSTIC |
|
/* Number of entries in pool log buffers */ |
|
#ifndef POOL_LOGSIZE |
|
#define POOL_LOGSIZE 10 |
|
#endif |
|
|
|
int pool_logsize = POOL_LOGSIZE; |
|
|
|
static inline void |
|
pr_log(struct pool *pp, void *v, int action, const char *file, long line) |
|
{ |
|
int n = pp->pr_curlogentry; |
|
struct pool_log *pl; |
|
|
|
if ((pp->pr_roflags & PR_LOGGING) == 0) |
|
return; |
|
|
|
/* |
|
* Fill in the current entry. Wrap around and overwrite |
|
* the oldest entry if necessary. |
|
*/ |
|
pl = &pp->pr_log[n]; |
|
pl->pl_file = file; |
|
pl->pl_line = line; |
|
pl->pl_action = action; |
|
pl->pl_addr = v; |
|
if (++n >= pp->pr_logsize) |
|
n = 0; |
|
pp->pr_curlogentry = n; |
|
} |
|
|
|
static void |
|
pr_printlog(struct pool *pp, struct pool_item *pi, |
|
void (*pr)(const char *, ...)) |
|
{ |
|
int i = pp->pr_logsize; |
|
int n = pp->pr_curlogentry; |
|
|
|
if ((pp->pr_roflags & PR_LOGGING) == 0) |
|
return; |
|
|
|
/* |
|
* Print all entries in this pool's log. |
|
*/ |
|
while (i-- > 0) { |
|
struct pool_log *pl = &pp->pr_log[n]; |
|
if (pl->pl_action != 0) { |
|
if (pi == NULL || pi == pl->pl_addr) { |
|
(*pr)("\tlog entry %d:\n", i); |
|
(*pr)("\t\taction = %s, addr = %p\n", |
|
pl->pl_action == PRLOG_GET ? "get" : "put", |
|
pl->pl_addr); |
|
(*pr)("\t\tfile: %s at line %lu\n", |
|
pl->pl_file, pl->pl_line); |
|
} |
|
} |
|
if (++n >= pp->pr_logsize) |
|
n = 0; |
|
} |
|
} |
|
|
|
static inline void |
|
pr_enter(struct pool *pp, const char *file, long line) |
|
{ |
|
|
|
if (__predict_false(pp->pr_entered_file != NULL)) { |
|
printf("pool %s: reentrancy at file %s line %ld\n", |
|
pp->pr_wchan, file, line); |
|
printf(" previous entry at file %s line %ld\n", |
|
pp->pr_entered_file, pp->pr_entered_line); |
|
panic("pr_enter"); |
|
} |
|
|
|
pp->pr_entered_file = file; |
|
pp->pr_entered_line = line; |
|
} |
|
|
|
static inline void |
|
pr_leave(struct pool *pp) |
|
{ |
|
|
|
if (__predict_false(pp->pr_entered_file == NULL)) { |
|
printf("pool %s not entered?\n", pp->pr_wchan); |
|
panic("pr_leave"); |
|
} |
|
|
|
pp->pr_entered_file = NULL; |
|
pp->pr_entered_line = 0; |
|
} |
|
|
|
static inline void |
|
pr_enter_check(struct pool *pp, void (*pr)(const char *, ...)) |
|
{ |
|
|
|
if (pp->pr_entered_file != NULL) |
|
(*pr)("\n\tcurrently entered from file %s line %ld\n", |
|
pp->pr_entered_file, pp->pr_entered_line); |
|
} |
|
#else |
|
#define pr_log(pp, v, action, file, line) |
|
#define pr_printlog(pp, pi, pr) |
|
#define pr_enter(pp, file, line) |
|
#define pr_leave(pp) |
|
#define pr_enter_check(pp, pr) |
|
#endif /* POOL_DIAGNOSTIC */ |
|
|
|
static inline unsigned int |
static inline unsigned int |
pr_item_notouch_index(const struct pool *pp, const struct pool_item_header *ph, |
pr_item_notouch_index(const struct pool *pp, const struct pool_item_header *ph, |
const void *v) |
const void *v) |
Line 367 pr_item_notouch_get(const struct pool *p |
|
Line 271 pr_item_notouch_get(const struct pool *p |
|
|
|
bit--; |
bit--; |
idx = (i * BITMAP_SIZE) + bit; |
idx = (i * BITMAP_SIZE) + bit; |
mask = 1 << bit; |
mask = 1U << bit; |
KASSERT((bitmap[i] & mask) != 0); |
KASSERT((bitmap[i] & mask) != 0); |
bitmap[i] &= ~mask; |
bitmap[i] &= ~mask; |
break; |
break; |
Line 482 pr_rmpage(struct pool *pp, struct pool_i |
|
Line 386 pr_rmpage(struct pool *pp, struct pool_i |
|
* If the page was idle, decrement the idle page count. |
* If the page was idle, decrement the idle page count. |
*/ |
*/ |
if (ph->ph_nmissing == 0) { |
if (ph->ph_nmissing == 0) { |
#ifdef DIAGNOSTIC |
KASSERT(pp->pr_nidle != 0); |
if (pp->pr_nidle == 0) |
KASSERTMSG((pp->pr_nitems >= pp->pr_itemsperpage), |
panic("pr_rmpage: nidle inconsistent"); |
"nitems=%u < itemsperpage=%u", |
if (pp->pr_nitems < pp->pr_itemsperpage) |
pp->pr_nitems, pp->pr_itemsperpage); |
panic("pr_rmpage: nitems inconsistent"); |
|
#endif |
|
pp->pr_nidle--; |
pp->pr_nidle--; |
} |
} |
|
|
Line 507 pr_rmpage(struct pool *pp, struct pool_i |
|
Line 409 pr_rmpage(struct pool *pp, struct pool_i |
|
pool_update_curpage(pp); |
pool_update_curpage(pp); |
} |
} |
|
|
static bool |
|
pa_starved_p(struct pool_allocator *pa) |
|
{ |
|
|
|
if (pa->pa_backingmap != NULL) { |
|
return vm_map_starved_p(pa->pa_backingmap); |
|
} |
|
return false; |
|
} |
|
|
|
static int |
|
pool_reclaim_callback(struct callback_entry *ce, void *obj, void *arg) |
|
{ |
|
struct pool *pp = obj; |
|
struct pool_allocator *pa = pp->pr_alloc; |
|
|
|
KASSERT(&pp->pr_reclaimerentry == ce); |
|
pool_reclaim(pp); |
|
if (!pa_starved_p(pa)) { |
|
return CALLBACK_CHAIN_ABORT; |
|
} |
|
return CALLBACK_CHAIN_CONTINUE; |
|
} |
|
|
|
static void |
|
pool_reclaim_register(struct pool *pp) |
|
{ |
|
struct vm_map *map = pp->pr_alloc->pa_backingmap; |
|
int s; |
|
|
|
if (map == NULL) { |
|
return; |
|
} |
|
|
|
s = splvm(); /* not necessary for INTRSAFE maps, but don't care. */ |
|
callback_register(&vm_map_to_kernel(map)->vmk_reclaim_callback, |
|
&pp->pr_reclaimerentry, pp, pool_reclaim_callback); |
|
splx(s); |
|
} |
|
|
|
static void |
|
pool_reclaim_unregister(struct pool *pp) |
|
{ |
|
struct vm_map *map = pp->pr_alloc->pa_backingmap; |
|
int s; |
|
|
|
if (map == NULL) { |
|
return; |
|
} |
|
|
|
s = splvm(); /* not necessary for INTRSAFE maps, but don't care. */ |
|
callback_unregister(&vm_map_to_kernel(map)->vmk_reclaim_callback, |
|
&pp->pr_reclaimerentry); |
|
splx(s); |
|
} |
|
|
|
static void |
|
pa_reclaim_register(struct pool_allocator *pa) |
|
{ |
|
struct vm_map *map = *pa->pa_backingmapptr; |
|
struct pool *pp; |
|
|
|
KASSERT(pa->pa_backingmap == NULL); |
|
if (map == NULL) { |
|
SLIST_INSERT_HEAD(&pa_deferinitq, pa, pa_q); |
|
return; |
|
} |
|
pa->pa_backingmap = map; |
|
TAILQ_FOREACH(pp, &pa->pa_list, pr_alloc_list) { |
|
pool_reclaim_register(pp); |
|
} |
|
} |
|
|
|
/* |
/* |
* Initialize all the pools listed in the "pools" link set. |
* Initialize all the pools listed in the "pools" link set. |
*/ |
*/ |
void |
void |
pool_subsystem_init(void) |
pool_subsystem_init(void) |
{ |
{ |
struct pool_allocator *pa; |
size_t size; |
__link_set_decl(pools, struct link_pool_init); |
int idx; |
struct link_pool_init * const *pi; |
|
|
|
mutex_init(&pool_head_lock, MUTEX_DEFAULT, IPL_NONE); |
mutex_init(&pool_head_lock, MUTEX_DEFAULT, IPL_NONE); |
|
mutex_init(&pool_allocator_lock, MUTEX_DEFAULT, IPL_NONE); |
cv_init(&pool_busy, "poolbusy"); |
cv_init(&pool_busy, "poolbusy"); |
|
|
__link_set_foreach(pi, pools) |
/* |
pool_init((*pi)->pp, (*pi)->size, (*pi)->align, |
* Initialize private page header pool and cache magazine pool if we |
(*pi)->align_offset, (*pi)->flags, (*pi)->wchan, |
* haven't done so yet. |
(*pi)->palloc, (*pi)->ipl); |
*/ |
|
for (idx = 0; idx < PHPOOL_MAX; idx++) { |
while ((pa = SLIST_FIRST(&pa_deferinitq)) != NULL) { |
static char phpool_names[PHPOOL_MAX][6+1+6+1]; |
KASSERT(pa->pa_backingmapptr != NULL); |
int nelem; |
KASSERT(*pa->pa_backingmapptr != NULL); |
size_t sz; |
SLIST_REMOVE_HEAD(&pa_deferinitq, pa_q); |
|
pa_reclaim_register(pa); |
nelem = PHPOOL_FREELIST_NELEM(idx); |
|
snprintf(phpool_names[idx], sizeof(phpool_names[idx]), |
|
"phpool-%d", nelem); |
|
sz = sizeof(struct pool_item_header); |
|
if (nelem) { |
|
sz = offsetof(struct pool_item_header, |
|
ph_bitmap[howmany(nelem, BITMAP_SIZE)]); |
|
} |
|
pool_init(&phpool[idx], sz, 0, 0, 0, |
|
phpool_names[idx], &pool_allocator_meta, IPL_VM); |
} |
} |
|
#ifdef POOL_SUBPAGE |
|
pool_init(&psppool, POOL_SUBPAGE, POOL_SUBPAGE, 0, |
|
PR_RECURSIVE, "psppool", &pool_allocator_meta, IPL_VM); |
|
#endif |
|
|
|
size = sizeof(pcg_t) + |
|
(PCG_NOBJECTS_NORMAL - 1) * sizeof(pcgpair_t); |
|
pool_init(&pcg_normal_pool, size, coherency_unit, 0, 0, |
|
"pcgnormal", &pool_allocator_meta, IPL_VM); |
|
|
|
size = sizeof(pcg_t) + |
|
(PCG_NOBJECTS_LARGE - 1) * sizeof(pcgpair_t); |
|
pool_init(&pcg_large_pool, size, coherency_unit, 0, 0, |
|
"pcglarge", &pool_allocator_meta, IPL_VM); |
|
|
pool_init(&cache_pool, sizeof(struct pool_cache), coherency_unit, |
pool_init(&cache_pool, sizeof(struct pool_cache), coherency_unit, |
0, 0, "pcache", &pool_allocator_nointr, IPL_NONE); |
0, 0, "pcache", &pool_allocator_meta, IPL_NONE); |
|
|
pool_init(&cache_cpu_pool, sizeof(pool_cache_cpu_t), coherency_unit, |
pool_init(&cache_cpu_pool, sizeof(pool_cache_cpu_t), coherency_unit, |
0, 0, "pcachecpu", &pool_allocator_nointr, IPL_NONE); |
0, 0, "pcachecpu", &pool_allocator_meta, IPL_NONE); |
} |
} |
|
|
/* |
/* |
* Initialize the given pool resource structure. |
* Initialize the given pool resource structure. |
* |
* |
* We export this routine to allow other kernel parts to declare |
* We export this routine to allow other kernel parts to declare |
* static pools that must be initialized before malloc() is available. |
* static pools that must be initialized before kmem(9) is available. |
*/ |
*/ |
void |
void |
pool_init(struct pool *pp, size_t size, u_int align, u_int ioff, int flags, |
pool_init(struct pool *pp, size_t size, u_int align, u_int ioff, int flags, |
const char *wchan, struct pool_allocator *palloc, int ipl) |
const char *wchan, struct pool_allocator *palloc, int ipl) |
{ |
{ |
struct pool *pp1; |
struct pool *pp1; |
size_t trysize, phsize; |
size_t trysize, phsize, prsize; |
int off, slack; |
int off, slack; |
|
|
#ifdef DEBUG |
#ifdef DEBUG |
|
if (__predict_true(!cold)) |
|
mutex_enter(&pool_head_lock); |
/* |
/* |
* Check that the pool hasn't already been initialised and |
* Check that the pool hasn't already been initialised and |
* added to the list of all pools. |
* added to the list of all pools. |
*/ |
*/ |
TAILQ_FOREACH(pp1, &pool_head, pr_poollist) { |
TAILQ_FOREACH(pp1, &pool_head, pr_poollist) { |
if (pp == pp1) |
if (pp == pp1) |
panic("pool_init: pool %s already initialised", |
panic("%s: [%s] already initialised", __func__, |
wchan); |
wchan); |
} |
} |
#endif |
if (__predict_true(!cold)) |
|
mutex_exit(&pool_head_lock); |
#ifdef POOL_DIAGNOSTIC |
|
/* |
|
* Always log if POOL_DIAGNOSTIC is defined. |
|
*/ |
|
if (pool_logsize != 0) |
|
flags |= PR_LOGGING; |
|
#endif |
#endif |
|
|
if (palloc == NULL) |
if (palloc == NULL) |
Line 656 pool_init(struct pool *pp, size_t size, |
|
Line 504 pool_init(struct pool *pp, size_t size, |
|
palloc = &pool_allocator_nointr_fullpage; |
palloc = &pool_allocator_nointr_fullpage; |
} |
} |
#endif /* POOL_SUBPAGE */ |
#endif /* POOL_SUBPAGE */ |
if ((palloc->pa_flags & PA_INITIALIZED) == 0) { |
if (!cold) |
|
mutex_enter(&pool_allocator_lock); |
|
if (palloc->pa_refcnt++ == 0) { |
if (palloc->pa_pagesz == 0) |
if (palloc->pa_pagesz == 0) |
palloc->pa_pagesz = PAGE_SIZE; |
palloc->pa_pagesz = PAGE_SIZE; |
|
|
Line 665 pool_init(struct pool *pp, size_t size, |
|
Line 515 pool_init(struct pool *pp, size_t size, |
|
mutex_init(&palloc->pa_lock, MUTEX_DEFAULT, IPL_VM); |
mutex_init(&palloc->pa_lock, MUTEX_DEFAULT, IPL_VM); |
palloc->pa_pagemask = ~(palloc->pa_pagesz - 1); |
palloc->pa_pagemask = ~(palloc->pa_pagesz - 1); |
palloc->pa_pageshift = ffs(palloc->pa_pagesz) - 1; |
palloc->pa_pageshift = ffs(palloc->pa_pagesz) - 1; |
|
|
if (palloc->pa_backingmapptr != NULL) { |
|
pa_reclaim_register(palloc); |
|
} |
|
palloc->pa_flags |= PA_INITIALIZED; |
|
} |
} |
|
if (!cold) |
|
mutex_exit(&pool_allocator_lock); |
|
|
if (align == 0) |
if (align == 0) |
align = ALIGN(1); |
align = ALIGN(1); |
|
|
if ((flags & PR_NOTOUCH) == 0 && size < sizeof(struct pool_item)) |
prsize = size; |
size = sizeof(struct pool_item); |
if ((flags & PR_NOTOUCH) == 0 && prsize < sizeof(struct pool_item)) |
|
prsize = sizeof(struct pool_item); |
size = roundup(size, align); |
|
#ifdef DIAGNOSTIC |
prsize = roundup(prsize, align); |
if (size > palloc->pa_pagesz) |
KASSERTMSG((prsize <= palloc->pa_pagesz), |
panic("pool_init: pool item size (%zu) too large", size); |
"%s: [%s] pool item size (%zu) larger than page size (%u)", |
#endif |
__func__, wchan, prsize, palloc->pa_pagesz); |
|
|
/* |
/* |
* Initialize the pool structure. |
* Initialize the pool structure. |
Line 698 pool_init(struct pool *pp, size_t size, |
|
Line 545 pool_init(struct pool *pp, size_t size, |
|
pp->pr_maxpages = UINT_MAX; |
pp->pr_maxpages = UINT_MAX; |
pp->pr_roflags = flags; |
pp->pr_roflags = flags; |
pp->pr_flags = 0; |
pp->pr_flags = 0; |
pp->pr_size = size; |
pp->pr_size = prsize; |
pp->pr_align = align; |
pp->pr_align = align; |
pp->pr_wchan = wchan; |
pp->pr_wchan = wchan; |
pp->pr_alloc = palloc; |
pp->pr_alloc = palloc; |
Line 713 pool_init(struct pool *pp, size_t size, |
|
Line 560 pool_init(struct pool *pp, size_t size, |
|
pp->pr_drain_hook = NULL; |
pp->pr_drain_hook = NULL; |
pp->pr_drain_hook_arg = NULL; |
pp->pr_drain_hook_arg = NULL; |
pp->pr_freecheck = NULL; |
pp->pr_freecheck = NULL; |
|
pool_redzone_init(pp, size); |
|
|
/* |
/* |
* Decide whether to put the page header off page to avoid |
* Decide whether to put the page header off page to avoid |
Line 731 pool_init(struct pool *pp, size_t size, |
|
Line 579 pool_init(struct pool *pp, size_t size, |
|
/* See the comment below about reserved bytes. */ |
/* See the comment below about reserved bytes. */ |
trysize = palloc->pa_pagesz - ((align - ioff) % align); |
trysize = palloc->pa_pagesz - ((align - ioff) % align); |
phsize = ALIGN(sizeof(struct pool_item_header)); |
phsize = ALIGN(sizeof(struct pool_item_header)); |
if ((pp->pr_roflags & (PR_NOTOUCH | PR_NOALIGN)) == 0 && |
if (pp->pr_roflags & PR_PHINPAGE || |
|
((pp->pr_roflags & (PR_NOTOUCH | PR_NOALIGN)) == 0 && |
(pp->pr_size < MIN(palloc->pa_pagesz / 16, phsize << 3) || |
(pp->pr_size < MIN(palloc->pa_pagesz / 16, phsize << 3) || |
trysize / pp->pr_size == (trysize - phsize) / pp->pr_size)) { |
trysize / pp->pr_size == (trysize - phsize) / pp->pr_size))) { |
/* Use the end of the page for the page header */ |
/* Use the end of the page for the page header */ |
pp->pr_roflags |= PR_PHINPAGE; |
pp->pr_roflags |= PR_PHINPAGE; |
pp->pr_phoffset = off = palloc->pa_pagesz - phsize; |
pp->pr_phoffset = off = palloc->pa_pagesz - phsize; |
Line 763 pool_init(struct pool *pp, size_t size, |
|
Line 612 pool_init(struct pool *pp, size_t size, |
|
* if you see this panic, consider to tweak |
* if you see this panic, consider to tweak |
* PHPOOL_MAX and PHPOOL_FREELIST_NELEM. |
* PHPOOL_MAX and PHPOOL_FREELIST_NELEM. |
*/ |
*/ |
panic("%s: too large itemsperpage(%d) for PR_NOTOUCH", |
panic("%s: [%s] too large itemsperpage(%d) for " |
|
"PR_NOTOUCH", __func__, |
pp->pr_wchan, pp->pr_itemsperpage); |
pp->pr_wchan, pp->pr_itemsperpage); |
} |
} |
pp->pr_phpool = &phpool[idx]; |
pp->pr_phpool = &phpool[idx]; |
Line 793 pool_init(struct pool *pp, size_t size, |
|
Line 643 pool_init(struct pool *pp, size_t size, |
|
pp->pr_nidle = 0; |
pp->pr_nidle = 0; |
pp->pr_refcnt = 0; |
pp->pr_refcnt = 0; |
|
|
#ifdef POOL_DIAGNOSTIC |
|
if (flags & PR_LOGGING) { |
|
if (kmem_map == NULL || |
|
(pp->pr_log = malloc(pool_logsize * sizeof(struct pool_log), |
|
M_TEMP, M_NOWAIT)) == NULL) |
|
pp->pr_roflags &= ~PR_LOGGING; |
|
pp->pr_curlogentry = 0; |
|
pp->pr_logsize = pool_logsize; |
|
} |
|
#endif |
|
|
|
pp->pr_entered_file = NULL; |
|
pp->pr_entered_line = 0; |
|
|
|
mutex_init(&pp->pr_lock, MUTEX_DEFAULT, ipl); |
mutex_init(&pp->pr_lock, MUTEX_DEFAULT, ipl); |
cv_init(&pp->pr_cv, wchan); |
cv_init(&pp->pr_cv, wchan); |
pp->pr_ipl = ipl; |
pp->pr_ipl = ipl; |
|
|
/* |
|
* Initialize private page header pool and cache magazine pool if we |
|
* haven't done so yet. |
|
* XXX LOCKING. |
|
*/ |
|
if (phpool[0].pr_size == 0) { |
|
int idx; |
|
for (idx = 0; idx < PHPOOL_MAX; idx++) { |
|
static char phpool_names[PHPOOL_MAX][6+1+6+1]; |
|
int nelem; |
|
size_t sz; |
|
|
|
nelem = PHPOOL_FREELIST_NELEM(idx); |
|
snprintf(phpool_names[idx], sizeof(phpool_names[idx]), |
|
"phpool-%d", nelem); |
|
sz = sizeof(struct pool_item_header); |
|
if (nelem) { |
|
sz = offsetof(struct pool_item_header, |
|
ph_bitmap[howmany(nelem, BITMAP_SIZE)]); |
|
} |
|
pool_init(&phpool[idx], sz, 0, 0, 0, |
|
phpool_names[idx], &pool_allocator_meta, IPL_VM); |
|
} |
|
#ifdef POOL_SUBPAGE |
|
pool_init(&psppool, POOL_SUBPAGE, POOL_SUBPAGE, 0, |
|
PR_RECURSIVE, "psppool", &pool_allocator_meta, IPL_VM); |
|
#endif |
|
|
|
size = sizeof(pcg_t) + |
|
(PCG_NOBJECTS_NORMAL - 1) * sizeof(pcgpair_t); |
|
pool_init(&pcg_normal_pool, size, coherency_unit, 0, 0, |
|
"pcgnormal", &pool_allocator_meta, IPL_VM); |
|
|
|
size = sizeof(pcg_t) + |
|
(PCG_NOBJECTS_LARGE - 1) * sizeof(pcgpair_t); |
|
pool_init(&pcg_large_pool, size, coherency_unit, 0, 0, |
|
"pcglarge", &pool_allocator_meta, IPL_VM); |
|
} |
|
|
|
/* Insert into the list of all pools. */ |
/* Insert into the list of all pools. */ |
if (__predict_true(!cold)) |
if (!cold) |
mutex_enter(&pool_head_lock); |
mutex_enter(&pool_head_lock); |
TAILQ_FOREACH(pp1, &pool_head, pr_poollist) { |
TAILQ_FOREACH(pp1, &pool_head, pr_poollist) { |
if (strcmp(pp1->pr_wchan, pp->pr_wchan) > 0) |
if (strcmp(pp1->pr_wchan, pp->pr_wchan) > 0) |
Line 861 pool_init(struct pool *pp, size_t size, |
|
Line 658 pool_init(struct pool *pp, size_t size, |
|
TAILQ_INSERT_TAIL(&pool_head, pp, pr_poollist); |
TAILQ_INSERT_TAIL(&pool_head, pp, pr_poollist); |
else |
else |
TAILQ_INSERT_BEFORE(pp1, pp, pr_poollist); |
TAILQ_INSERT_BEFORE(pp1, pp, pr_poollist); |
if (__predict_true(!cold)) |
if (!cold) |
mutex_exit(&pool_head_lock); |
mutex_exit(&pool_head_lock); |
|
|
/* Insert this into the list of pools using this allocator. */ |
/* Insert this into the list of pools using this allocator. */ |
if (__predict_true(!cold)) |
if (!cold) |
mutex_enter(&palloc->pa_lock); |
mutex_enter(&palloc->pa_lock); |
TAILQ_INSERT_TAIL(&palloc->pa_list, pp, pr_alloc_list); |
TAILQ_INSERT_TAIL(&palloc->pa_list, pp, pr_alloc_list); |
if (__predict_true(!cold)) |
if (!cold) |
mutex_exit(&palloc->pa_lock); |
mutex_exit(&palloc->pa_lock); |
|
|
pool_reclaim_register(pp); |
|
} |
} |
|
|
/* |
/* |
Line 893 pool_destroy(struct pool *pp) |
|
Line 688 pool_destroy(struct pool *pp) |
|
mutex_exit(&pool_head_lock); |
mutex_exit(&pool_head_lock); |
|
|
/* Remove this pool from its allocator's list of pools. */ |
/* Remove this pool from its allocator's list of pools. */ |
pool_reclaim_unregister(pp); |
|
mutex_enter(&pp->pr_alloc->pa_lock); |
mutex_enter(&pp->pr_alloc->pa_lock); |
TAILQ_REMOVE(&pp->pr_alloc->pa_list, pp, pr_alloc_list); |
TAILQ_REMOVE(&pp->pr_alloc->pa_list, pp, pr_alloc_list); |
mutex_exit(&pp->pr_alloc->pa_lock); |
mutex_exit(&pp->pr_alloc->pa_lock); |
|
|
|
mutex_enter(&pool_allocator_lock); |
|
if (--pp->pr_alloc->pa_refcnt == 0) |
|
mutex_destroy(&pp->pr_alloc->pa_lock); |
|
mutex_exit(&pool_allocator_lock); |
|
|
mutex_enter(&pp->pr_lock); |
mutex_enter(&pp->pr_lock); |
|
|
KASSERT(pp->pr_cache == NULL); |
KASSERT(pp->pr_cache == NULL); |
|
KASSERTMSG((pp->pr_nout == 0), |
#ifdef DIAGNOSTIC |
"%s: pool busy: still out: %u", __func__, pp->pr_nout); |
if (pp->pr_nout != 0) { |
|
pr_printlog(pp, NULL, printf); |
|
panic("pool_destroy: pool busy: still out: %u", |
|
pp->pr_nout); |
|
} |
|
#endif |
|
|
|
KASSERT(LIST_EMPTY(&pp->pr_fullpages)); |
KASSERT(LIST_EMPTY(&pp->pr_fullpages)); |
KASSERT(LIST_EMPTY(&pp->pr_partpages)); |
KASSERT(LIST_EMPTY(&pp->pr_partpages)); |
|
|
Line 921 pool_destroy(struct pool *pp) |
|
Line 713 pool_destroy(struct pool *pp) |
|
mutex_exit(&pp->pr_lock); |
mutex_exit(&pp->pr_lock); |
|
|
pr_pagelist_free(pp, &pq); |
pr_pagelist_free(pp, &pq); |
|
|
#ifdef POOL_DIAGNOSTIC |
|
if ((pp->pr_roflags & PR_LOGGING) != 0) |
|
free(pp->pr_log, M_TEMP); |
|
#endif |
|
|
|
cv_destroy(&pp->pr_cv); |
cv_destroy(&pp->pr_cv); |
mutex_destroy(&pp->pr_lock); |
mutex_destroy(&pp->pr_lock); |
} |
} |
Line 936 pool_set_drain_hook(struct pool *pp, voi |
|
Line 722 pool_set_drain_hook(struct pool *pp, voi |
|
{ |
{ |
|
|
/* XXX no locking -- must be used just after pool_init() */ |
/* XXX no locking -- must be used just after pool_init() */ |
#ifdef DIAGNOSTIC |
KASSERTMSG((pp->pr_drain_hook == NULL), |
if (pp->pr_drain_hook != NULL) |
"%s: [%s] already set", __func__, pp->pr_wchan); |
panic("pool_set_drain_hook(%s): already set", pp->pr_wchan); |
|
#endif |
|
pp->pr_drain_hook = fn; |
pp->pr_drain_hook = fn; |
pp->pr_drain_hook_arg = arg; |
pp->pr_drain_hook_arg = arg; |
} |
} |
Line 950 pool_alloc_item_header(struct pool *pp, |
|
Line 734 pool_alloc_item_header(struct pool *pp, |
|
struct pool_item_header *ph; |
struct pool_item_header *ph; |
|
|
if ((pp->pr_roflags & PR_PHINPAGE) != 0) |
if ((pp->pr_roflags & PR_PHINPAGE) != 0) |
ph = (struct pool_item_header *) ((char *)storage + pp->pr_phoffset); |
ph = (void *)((char *)storage + pp->pr_phoffset); |
else |
else |
ph = pool_get(pp->pr_phpool, flags); |
ph = pool_get(pp->pr_phpool, flags); |
|
|
Line 961 pool_alloc_item_header(struct pool *pp, |
|
Line 745 pool_alloc_item_header(struct pool *pp, |
|
* Grab an item from the pool. |
* Grab an item from the pool. |
*/ |
*/ |
void * |
void * |
#ifdef POOL_DIAGNOSTIC |
|
_pool_get(struct pool *pp, int flags, const char *file, long line) |
|
#else |
|
pool_get(struct pool *pp, int flags) |
pool_get(struct pool *pp, int flags) |
#endif |
|
{ |
{ |
struct pool_item *pi; |
struct pool_item *pi; |
struct pool_item_header *ph; |
struct pool_item_header *ph; |
void *v; |
void *v; |
|
|
#ifdef DIAGNOSTIC |
KASSERT(!(flags & PR_NOWAIT) != !(flags & PR_WAITOK)); |
if (__predict_false(pp->pr_itemsperpage == 0)) |
KASSERTMSG((pp->pr_itemsperpage != 0), |
panic("pool_get: pool %p: pr_itemsperpage is zero, " |
"%s: [%s] pr_itemsperpage is zero, " |
"pool not initialized?", pp); |
"pool not initialized?", __func__, pp->pr_wchan); |
if (__predict_false(curlwp == NULL && doing_shutdown == 0 && |
KASSERTMSG((!(cpu_intr_p() || cpu_softintr_p()) |
(flags & PR_WAITOK) != 0)) |
|| pp->pr_ipl != IPL_NONE || cold || panicstr != NULL), |
panic("pool_get: %s: must have NOWAIT", pp->pr_wchan); |
"%s: [%s] is IPL_NONE, but called from interrupt context", |
|
__func__, pp->pr_wchan); |
#endif /* DIAGNOSTIC */ |
|
#ifdef LOCKDEBUG |
|
if (flags & PR_WAITOK) { |
if (flags & PR_WAITOK) { |
ASSERT_SLEEPABLE(); |
ASSERT_SLEEPABLE(); |
} |
} |
#endif |
|
|
|
mutex_enter(&pp->pr_lock); |
mutex_enter(&pp->pr_lock); |
pr_enter(pp, file, line); |
|
|
|
startover: |
startover: |
/* |
/* |
* Check to see if we've reached the hard limit. If we have, |
* Check to see if we've reached the hard limit. If we have, |
* and we can wait, then wait until an item has been returned to |
* and we can wait, then wait until an item has been returned to |
* the pool. |
* the pool. |
*/ |
*/ |
#ifdef DIAGNOSTIC |
KASSERTMSG((pp->pr_nout <= pp->pr_hardlimit), |
if (__predict_false(pp->pr_nout > pp->pr_hardlimit)) { |
"%s: %s: crossed hard limit", __func__, pp->pr_wchan); |
pr_leave(pp); |
|
mutex_exit(&pp->pr_lock); |
|
panic("pool_get: %s: crossed hard limit", pp->pr_wchan); |
|
} |
|
#endif |
|
if (__predict_false(pp->pr_nout == pp->pr_hardlimit)) { |
if (__predict_false(pp->pr_nout == pp->pr_hardlimit)) { |
if (pp->pr_drain_hook != NULL) { |
if (pp->pr_drain_hook != NULL) { |
/* |
/* |
Line 1009 pool_get(struct pool *pp, int flags) |
|
Line 779 pool_get(struct pool *pp, int flags) |
|
* back to the pool, unlock, call the hook, re-lock, |
* back to the pool, unlock, call the hook, re-lock, |
* and check the hardlimit condition again. |
* and check the hardlimit condition again. |
*/ |
*/ |
pr_leave(pp); |
|
mutex_exit(&pp->pr_lock); |
mutex_exit(&pp->pr_lock); |
(*pp->pr_drain_hook)(pp->pr_drain_hook_arg, flags); |
(*pp->pr_drain_hook)(pp->pr_drain_hook_arg, flags); |
mutex_enter(&pp->pr_lock); |
mutex_enter(&pp->pr_lock); |
pr_enter(pp, file, line); |
|
if (pp->pr_nout < pp->pr_hardlimit) |
if (pp->pr_nout < pp->pr_hardlimit) |
goto startover; |
goto startover; |
} |
} |
Line 1024 pool_get(struct pool *pp, int flags) |
|
Line 792 pool_get(struct pool *pp, int flags) |
|
* it be? |
* it be? |
*/ |
*/ |
pp->pr_flags |= PR_WANTED; |
pp->pr_flags |= PR_WANTED; |
pr_leave(pp); |
do { |
cv_wait(&pp->pr_cv, &pp->pr_lock); |
cv_wait(&pp->pr_cv, &pp->pr_lock); |
pr_enter(pp, file, line); |
} while (pp->pr_flags & PR_WANTED); |
goto startover; |
goto startover; |
} |
} |
|
|
Line 1040 pool_get(struct pool *pp, int flags) |
|
Line 808 pool_get(struct pool *pp, int flags) |
|
|
|
pp->pr_nfail++; |
pp->pr_nfail++; |
|
|
pr_leave(pp); |
|
mutex_exit(&pp->pr_lock); |
mutex_exit(&pp->pr_lock); |
|
KASSERT((flags & (PR_NOWAIT|PR_LIMITFAIL)) != 0); |
return (NULL); |
return (NULL); |
} |
} |
|
|
Line 1054 pool_get(struct pool *pp, int flags) |
|
Line 822 pool_get(struct pool *pp, int flags) |
|
if ((ph = pp->pr_curpage) == NULL) { |
if ((ph = pp->pr_curpage) == NULL) { |
int error; |
int error; |
|
|
#ifdef DIAGNOSTIC |
KASSERTMSG((pp->pr_nitems == 0), |
if (pp->pr_nitems != 0) { |
"%s: [%s] curpage NULL, inconsistent nitems %u", |
mutex_exit(&pp->pr_lock); |
__func__, pp->pr_wchan, pp->pr_nitems); |
printf("pool_get: %s: curpage NULL, nitems %u\n", |
|
pp->pr_wchan, pp->pr_nitems); |
|
panic("pool_get: nitems inconsistent"); |
|
} |
|
#endif |
|
|
|
/* |
/* |
* Call the back-end page allocator for more memory. |
* Call the back-end page allocator for more memory. |
* Release the pool lock, as the back-end page allocator |
* Release the pool lock, as the back-end page allocator |
* may block. |
* may block. |
*/ |
*/ |
pr_leave(pp); |
|
error = pool_grow(pp, flags); |
error = pool_grow(pp, flags); |
pr_enter(pp, file, line); |
|
if (error != 0) { |
if (error != 0) { |
/* |
/* |
|
* pool_grow aborts when another thread |
|
* is allocating a new page. Retry if it |
|
* waited for it. |
|
*/ |
|
if (error == ERESTART) |
|
goto startover; |
|
|
|
/* |
* We were unable to allocate a page or item |
* We were unable to allocate a page or item |
* header, but we released the lock during |
* header, but we released the lock during |
* allocation, so perhaps items were freed |
* allocation, so perhaps items were freed |
Line 1082 pool_get(struct pool *pp, int flags) |
|
Line 851 pool_get(struct pool *pp, int flags) |
|
goto startover; |
goto startover; |
|
|
pp->pr_nfail++; |
pp->pr_nfail++; |
pr_leave(pp); |
|
mutex_exit(&pp->pr_lock); |
mutex_exit(&pp->pr_lock); |
|
KASSERT((flags & (PR_WAITOK|PR_NOWAIT)) == PR_NOWAIT); |
return (NULL); |
return (NULL); |
} |
} |
|
|
Line 1091 pool_get(struct pool *pp, int flags) |
|
Line 860 pool_get(struct pool *pp, int flags) |
|
goto startover; |
goto startover; |
} |
} |
if (pp->pr_roflags & PR_NOTOUCH) { |
if (pp->pr_roflags & PR_NOTOUCH) { |
#ifdef DIAGNOSTIC |
KASSERTMSG((ph->ph_nmissing < pp->pr_itemsperpage), |
if (__predict_false(ph->ph_nmissing == pp->pr_itemsperpage)) { |
"%s: %s: page empty", __func__, pp->pr_wchan); |
pr_leave(pp); |
|
mutex_exit(&pp->pr_lock); |
|
panic("pool_get: %s: page empty", pp->pr_wchan); |
|
} |
|
#endif |
|
v = pr_item_notouch_get(pp, ph); |
v = pr_item_notouch_get(pp, ph); |
#ifdef POOL_DIAGNOSTIC |
|
pr_log(pp, v, PRLOG_GET, file, line); |
|
#endif |
|
} else { |
} else { |
v = pi = LIST_FIRST(&ph->ph_itemlist); |
v = pi = LIST_FIRST(&ph->ph_itemlist); |
if (__predict_false(v == NULL)) { |
if (__predict_false(v == NULL)) { |
pr_leave(pp); |
|
mutex_exit(&pp->pr_lock); |
mutex_exit(&pp->pr_lock); |
panic("pool_get: %s: page empty", pp->pr_wchan); |
panic("%s: [%s] page empty", __func__, pp->pr_wchan); |
} |
} |
#ifdef DIAGNOSTIC |
KASSERTMSG((pp->pr_nitems > 0), |
if (__predict_false(pp->pr_nitems == 0)) { |
"%s: [%s] nitems %u inconsistent on itemlist", |
pr_leave(pp); |
__func__, pp->pr_wchan, pp->pr_nitems); |
mutex_exit(&pp->pr_lock); |
KASSERTMSG((pi->pi_magic == PI_MAGIC), |
printf("pool_get: %s: items on itemlist, nitems %u\n", |
"%s: [%s] free list modified: " |
pp->pr_wchan, pp->pr_nitems); |
"magic=%x; page %p; item addr %p", __func__, |
panic("pool_get: nitems inconsistent"); |
pp->pr_wchan, pi->pi_magic, ph->ph_page, pi); |
} |
|
#endif |
|
|
|
#ifdef POOL_DIAGNOSTIC |
|
pr_log(pp, v, PRLOG_GET, file, line); |
|
#endif |
|
|
|
#ifdef DIAGNOSTIC |
|
if (__predict_false(pi->pi_magic != PI_MAGIC)) { |
|
pr_printlog(pp, pi, printf); |
|
panic("pool_get(%s): free list modified: " |
|
"magic=%x; page %p; item addr %p\n", |
|
pp->pr_wchan, pi->pi_magic, ph->ph_page, pi); |
|
} |
|
#endif |
|
|
|
/* |
/* |
* Remove from item list. |
* Remove from item list. |
Line 1140 pool_get(struct pool *pp, int flags) |
|
Line 885 pool_get(struct pool *pp, int flags) |
|
pp->pr_nitems--; |
pp->pr_nitems--; |
pp->pr_nout++; |
pp->pr_nout++; |
if (ph->ph_nmissing == 0) { |
if (ph->ph_nmissing == 0) { |
#ifdef DIAGNOSTIC |
KASSERT(pp->pr_nidle > 0); |
if (__predict_false(pp->pr_nidle == 0)) |
|
panic("pool_get: nidle inconsistent"); |
|
#endif |
|
pp->pr_nidle--; |
pp->pr_nidle--; |
|
|
/* |
/* |
Line 1155 pool_get(struct pool *pp, int flags) |
|
Line 897 pool_get(struct pool *pp, int flags) |
|
} |
} |
ph->ph_nmissing++; |
ph->ph_nmissing++; |
if (ph->ph_nmissing == pp->pr_itemsperpage) { |
if (ph->ph_nmissing == pp->pr_itemsperpage) { |
#ifdef DIAGNOSTIC |
KASSERTMSG(((pp->pr_roflags & PR_NOTOUCH) || |
if (__predict_false((pp->pr_roflags & PR_NOTOUCH) == 0 && |
LIST_EMPTY(&ph->ph_itemlist)), |
!LIST_EMPTY(&ph->ph_itemlist))) { |
"%s: [%s] nmissing (%u) inconsistent", __func__, |
pr_leave(pp); |
pp->pr_wchan, ph->ph_nmissing); |
mutex_exit(&pp->pr_lock); |
|
panic("pool_get: %s: nmissing inconsistent", |
|
pp->pr_wchan); |
|
} |
|
#endif |
|
/* |
/* |
* This page is now full. Move it to the full list |
* This page is now full. Move it to the full list |
* and select a new current page. |
* and select a new current page. |
Line 1174 pool_get(struct pool *pp, int flags) |
|
Line 911 pool_get(struct pool *pp, int flags) |
|
} |
} |
|
|
pp->pr_nget++; |
pp->pr_nget++; |
pr_leave(pp); |
|
|
|
/* |
/* |
* If we have a low water mark and we are now below that low |
* If we have a low water mark and we are now below that low |
Line 1191 pool_get(struct pool *pp, int flags) |
|
Line 927 pool_get(struct pool *pp, int flags) |
|
mutex_exit(&pp->pr_lock); |
mutex_exit(&pp->pr_lock); |
KASSERT((((vaddr_t)v + pp->pr_itemoffset) & (pp->pr_align - 1)) == 0); |
KASSERT((((vaddr_t)v + pp->pr_itemoffset) & (pp->pr_align - 1)) == 0); |
FREECHECK_OUT(&pp->pr_freecheck, v); |
FREECHECK_OUT(&pp->pr_freecheck, v); |
|
pool_redzone_fill(pp, v); |
return (v); |
return (v); |
} |
} |
|
|
Line 1204 pool_do_put(struct pool *pp, void *v, st |
|
Line 941 pool_do_put(struct pool *pp, void *v, st |
|
struct pool_item_header *ph; |
struct pool_item_header *ph; |
|
|
KASSERT(mutex_owned(&pp->pr_lock)); |
KASSERT(mutex_owned(&pp->pr_lock)); |
|
pool_redzone_check(pp, v); |
FREECHECK_IN(&pp->pr_freecheck, v); |
FREECHECK_IN(&pp->pr_freecheck, v); |
LOCKDEBUG_MEM_CHECK(v, pp->pr_size); |
LOCKDEBUG_MEM_CHECK(v, pp->pr_size); |
|
|
#ifdef DIAGNOSTIC |
KASSERTMSG((pp->pr_nout > 0), |
if (__predict_false(pp->pr_nout == 0)) { |
"%s: [%s] putting with none out", __func__, pp->pr_wchan); |
printf("pool %s: putting with none out\n", |
|
pp->pr_wchan); |
|
panic("pool_put"); |
|
} |
|
#endif |
|
|
|
if (__predict_false((ph = pr_find_pagehead(pp, v)) == NULL)) { |
if (__predict_false((ph = pr_find_pagehead(pp, v)) == NULL)) { |
pr_printlog(pp, NULL, printf); |
panic("%s: [%s] page header missing", __func__, pp->pr_wchan); |
panic("pool_put: %s: page header missing", pp->pr_wchan); |
|
} |
} |
|
|
/* |
/* |
Line 1305 pool_do_put(struct pool *pp, void *v, st |
|
Line 1037 pool_do_put(struct pool *pp, void *v, st |
|
} |
} |
} |
} |
|
|
/* |
|
* Return resource to the pool. |
|
*/ |
|
#ifdef POOL_DIAGNOSTIC |
|
void |
|
_pool_put(struct pool *pp, void *v, const char *file, long line) |
|
{ |
|
struct pool_pagelist pq; |
|
|
|
LIST_INIT(&pq); |
|
|
|
mutex_enter(&pp->pr_lock); |
|
pr_enter(pp, file, line); |
|
|
|
pr_log(pp, v, PRLOG_PUT, file, line); |
|
|
|
pool_do_put(pp, v, &pq); |
|
|
|
pr_leave(pp); |
|
mutex_exit(&pp->pr_lock); |
|
|
|
pr_pagelist_free(pp, &pq); |
|
} |
|
#undef pool_put |
|
#endif /* POOL_DIAGNOSTIC */ |
|
|
|
void |
void |
pool_put(struct pool *pp, void *v) |
pool_put(struct pool *pp, void *v) |
{ |
{ |
Line 1345 pool_put(struct pool *pp, void *v) |
|
Line 1051 pool_put(struct pool *pp, void *v) |
|
pr_pagelist_free(pp, &pq); |
pr_pagelist_free(pp, &pq); |
} |
} |
|
|
#ifdef POOL_DIAGNOSTIC |
|
#define pool_put(h, v) _pool_put((h), (v), __FILE__, __LINE__) |
|
#endif |
|
|
|
/* |
/* |
* pool_grow: grow a pool by a page. |
* pool_grow: grow a pool by a page. |
* |
* |
Line 1360 pool_put(struct pool *pp, void *v) |
|
Line 1062 pool_put(struct pool *pp, void *v) |
|
static int |
static int |
pool_grow(struct pool *pp, int flags) |
pool_grow(struct pool *pp, int flags) |
{ |
{ |
struct pool_item_header *ph = NULL; |
/* |
char *cp; |
* If there's a pool_grow in progress, wait for it to complete |
|
* and try again from the top. |
mutex_exit(&pp->pr_lock); |
*/ |
cp = pool_allocator_alloc(pp, flags); |
if (pp->pr_flags & PR_GROWING) { |
if (__predict_true(cp != NULL)) { |
if (flags & PR_WAITOK) { |
ph = pool_alloc_item_header(pp, cp, flags); |
do { |
} |
cv_wait(&pp->pr_cv, &pp->pr_lock); |
if (__predict_false(cp == NULL || ph == NULL)) { |
} while (pp->pr_flags & PR_GROWING); |
if (cp != NULL) { |
return ERESTART; |
pool_allocator_free(pp, cp); |
} else { |
|
if (pp->pr_flags & PR_GROWINGNOWAIT) { |
|
/* |
|
* This needs an unlock/relock dance so |
|
* that the other caller has a chance to |
|
* run and actually do the thing. Note |
|
* that this is effectively a busy-wait. |
|
*/ |
|
mutex_exit(&pp->pr_lock); |
|
mutex_enter(&pp->pr_lock); |
|
return ERESTART; |
|
} |
|
return EWOULDBLOCK; |
} |
} |
mutex_enter(&pp->pr_lock); |
|
return ENOMEM; |
|
} |
} |
|
pp->pr_flags |= PR_GROWING; |
|
if (flags & PR_WAITOK) |
|
mutex_exit(&pp->pr_lock); |
|
else |
|
pp->pr_flags |= PR_GROWINGNOWAIT; |
|
|
mutex_enter(&pp->pr_lock); |
char *cp = pool_allocator_alloc(pp, flags); |
|
if (__predict_false(cp == NULL)) |
|
goto out; |
|
|
|
struct pool_item_header *ph = pool_alloc_item_header(pp, cp, flags); |
|
if (__predict_false(ph == NULL)) { |
|
pool_allocator_free(pp, cp); |
|
goto out; |
|
} |
|
|
|
if (flags & PR_WAITOK) |
|
mutex_enter(&pp->pr_lock); |
pool_prime_page(pp, cp, ph); |
pool_prime_page(pp, cp, ph); |
pp->pr_npagealloc++; |
pp->pr_npagealloc++; |
|
KASSERT(pp->pr_flags & PR_GROWING); |
|
pp->pr_flags &= ~(PR_GROWING|PR_GROWINGNOWAIT); |
|
/* |
|
* If anyone was waiting for pool_grow, notify them that we |
|
* may have just done it. |
|
*/ |
|
cv_broadcast(&pp->pr_cv); |
return 0; |
return 0; |
|
out: |
|
if (flags & PR_WAITOK) |
|
mutex_enter(&pp->pr_lock); |
|
KASSERT(pp->pr_flags & PR_GROWING); |
|
pp->pr_flags &= ~(PR_GROWING|PR_GROWINGNOWAIT); |
|
return ENOMEM; |
} |
} |
|
|
/* |
/* |
Line 1395 pool_prime(struct pool *pp, int n) |
|
Line 1136 pool_prime(struct pool *pp, int n) |
|
|
|
newpages = roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage; |
newpages = roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage; |
|
|
while (newpages-- > 0) { |
while (newpages > 0) { |
error = pool_grow(pp, PR_NOWAIT); |
error = pool_grow(pp, PR_NOWAIT); |
if (error) { |
if (error) { |
|
if (error == ERESTART) |
|
continue; |
break; |
break; |
} |
} |
pp->pr_minpages++; |
pp->pr_minpages++; |
|
newpages--; |
} |
} |
|
|
if (pp->pr_minpages >= pp->pr_maxpages) |
if (pp->pr_minpages >= pp->pr_maxpages) |
Line 1425 pool_prime_page(struct pool *pp, void *s |
|
Line 1169 pool_prime_page(struct pool *pp, void *s |
|
int n; |
int n; |
|
|
KASSERT(mutex_owned(&pp->pr_lock)); |
KASSERT(mutex_owned(&pp->pr_lock)); |
|
KASSERTMSG(((pp->pr_roflags & PR_NOALIGN) || |
#ifdef DIAGNOSTIC |
(((uintptr_t)cp & (pp->pr_alloc->pa_pagesz - 1)) == 0)), |
if ((pp->pr_roflags & PR_NOALIGN) == 0 && |
"%s: [%s] unaligned page: %p", __func__, pp->pr_wchan, cp); |
((uintptr_t)cp & (pp->pr_alloc->pa_pagesz - 1)) != 0) |
|
panic("pool_prime_page: %s: unaligned page", pp->pr_wchan); |
|
#endif |
|
|
|
/* |
/* |
* Insert page header. |
* Insert page header. |
Line 1513 pool_catchup(struct pool *pp) |
|
Line 1254 pool_catchup(struct pool *pp) |
|
while (POOL_NEEDS_CATCHUP(pp)) { |
while (POOL_NEEDS_CATCHUP(pp)) { |
error = pool_grow(pp, PR_NOWAIT); |
error = pool_grow(pp, PR_NOWAIT); |
if (error) { |
if (error) { |
|
if (error == ERESTART) |
|
continue; |
break; |
break; |
} |
} |
} |
} |
Line 1592 pool_sethardlimit(struct pool *pp, int n |
|
Line 1335 pool_sethardlimit(struct pool *pp, int n |
|
|
|
/* |
/* |
* Release all complete pages that have not been used recently. |
* Release all complete pages that have not been used recently. |
|
* |
|
* Must not be called from interrupt context. |
*/ |
*/ |
int |
int |
#ifdef POOL_DIAGNOSTIC |
|
_pool_reclaim(struct pool *pp, const char *file, long line) |
|
#else |
|
pool_reclaim(struct pool *pp) |
pool_reclaim(struct pool *pp) |
#endif |
|
{ |
{ |
struct pool_item_header *ph, *phnext; |
struct pool_item_header *ph, *phnext; |
struct pool_pagelist pq; |
struct pool_pagelist pq; |
Line 1606 pool_reclaim(struct pool *pp) |
|
Line 1347 pool_reclaim(struct pool *pp) |
|
bool klock; |
bool klock; |
int rv; |
int rv; |
|
|
|
KASSERT(!cpu_intr_p() && !cpu_softintr_p()); |
|
|
if (pp->pr_drain_hook != NULL) { |
if (pp->pr_drain_hook != NULL) { |
/* |
/* |
* The drain hook must be called with the pool unlocked. |
* The drain hook must be called with the pool unlocked. |
Line 1634 pool_reclaim(struct pool *pp) |
|
Line 1377 pool_reclaim(struct pool *pp) |
|
} |
} |
return (0); |
return (0); |
} |
} |
pr_enter(pp, file, line); |
|
|
|
LIST_INIT(&pq); |
LIST_INIT(&pq); |
|
|
Line 1648 pool_reclaim(struct pool *pp) |
|
Line 1390 pool_reclaim(struct pool *pp) |
|
break; |
break; |
|
|
KASSERT(ph->ph_nmissing == 0); |
KASSERT(ph->ph_nmissing == 0); |
if (curtime - ph->ph_time < pool_inactive_time |
if (curtime - ph->ph_time < pool_inactive_time) |
&& !pa_starved_p(pp->pr_alloc)) |
|
continue; |
continue; |
|
|
/* |
/* |
Line 1663 pool_reclaim(struct pool *pp) |
|
Line 1404 pool_reclaim(struct pool *pp) |
|
pr_rmpage(pp, ph, &pq); |
pr_rmpage(pp, ph, &pq); |
} |
} |
|
|
pr_leave(pp); |
|
mutex_exit(&pp->pr_lock); |
mutex_exit(&pp->pr_lock); |
|
|
if (LIST_EMPTY(&pq)) |
if (LIST_EMPTY(&pq)) |
Line 1681 pool_reclaim(struct pool *pp) |
|
Line 1421 pool_reclaim(struct pool *pp) |
|
} |
} |
|
|
/* |
/* |
* Drain pools, one at a time. This is a two stage process; |
* Drain pools, one at a time. The drained pool is returned within ppp. |
* drain_start kicks off a cross call to drain CPU-level caches |
|
* if the pool has an associated pool_cache. drain_end waits |
|
* for those cross calls to finish, and then drains the cache |
|
* (if any) and pool. |
|
* |
* |
* Note, must never be called from interrupt context. |
* Note, must never be called from interrupt context. |
*/ |
*/ |
void |
bool |
pool_drain_start(struct pool **ppp, uint64_t *wp) |
pool_drain(struct pool **ppp) |
{ |
{ |
|
bool reclaimed; |
struct pool *pp; |
struct pool *pp; |
|
|
KASSERT(!TAILQ_EMPTY(&pool_head)); |
KASSERT(!TAILQ_EMPTY(&pool_head)); |
Line 1716 pool_drain_start(struct pool **ppp, uint |
|
Line 1453 pool_drain_start(struct pool **ppp, uint |
|
pp->pr_refcnt++; |
pp->pr_refcnt++; |
mutex_exit(&pool_head_lock); |
mutex_exit(&pool_head_lock); |
|
|
/* If there is a pool_cache, drain CPU level caches. */ |
|
*ppp = pp; |
|
if (pp->pr_cache != NULL) { |
|
*wp = xc_broadcast(0, (xcfunc_t)pool_cache_xcall, |
|
pp->pr_cache, NULL); |
|
} |
|
} |
|
|
|
void |
|
pool_drain_end(struct pool *pp, uint64_t where) |
|
{ |
|
|
|
if (pp == NULL) |
|
return; |
|
|
|
KASSERT(pp->pr_refcnt > 0); |
|
|
|
/* Wait for remote draining to complete. */ |
|
if (pp->pr_cache != NULL) |
|
xc_wait(where); |
|
|
|
/* Drain the cache (if any) and pool.. */ |
/* Drain the cache (if any) and pool.. */ |
pool_reclaim(pp); |
reclaimed = pool_reclaim(pp); |
|
|
/* Finally, unlock the pool. */ |
/* Finally, unlock the pool. */ |
mutex_enter(&pool_head_lock); |
mutex_enter(&pool_head_lock); |
pp->pr_refcnt--; |
pp->pr_refcnt--; |
cv_broadcast(&pool_busy); |
cv_broadcast(&pool_busy); |
mutex_exit(&pool_head_lock); |
mutex_exit(&pool_head_lock); |
|
|
|
if (ppp != NULL) |
|
*ppp = pp; |
|
|
|
return reclaimed; |
} |
} |
|
|
/* |
/* |
* Diagnostic helpers. |
* Calculate the total number of pages consumed by pools. |
*/ |
*/ |
void |
int |
pool_print(struct pool *pp, const char *modif) |
pool_totalpages(void) |
{ |
{ |
|
struct pool *pp; |
|
uint64_t total = 0; |
|
|
pool_print1(pp, modif, printf); |
mutex_enter(&pool_head_lock); |
|
TAILQ_FOREACH(pp, &pool_head, pr_poollist) { |
|
uint64_t bytes = pp->pr_npages * pp->pr_alloc->pa_pagesz; |
|
|
|
if ((pp->pr_roflags & PR_RECURSIVE) != 0) |
|
bytes -= (pp->pr_nout * pp->pr_size); |
|
total += bytes; |
|
} |
|
mutex_exit(&pool_head_lock); |
|
|
|
return atop(total); |
} |
} |
|
|
|
/* |
|
* Diagnostic helpers. |
|
*/ |
|
|
void |
void |
pool_printall(const char *modif, void (*pr)(const char *, ...)) |
pool_printall(const char *modif, void (*pr)(const char *, ...)) |
{ |
{ |
Line 1784 pool_print_pagelist(struct pool *pp, str |
|
Line 1521 pool_print_pagelist(struct pool *pp, str |
|
void (*pr)(const char *, ...)) |
void (*pr)(const char *, ...)) |
{ |
{ |
struct pool_item_header *ph; |
struct pool_item_header *ph; |
#ifdef DIAGNOSTIC |
struct pool_item *pi __diagused; |
struct pool_item *pi; |
|
#endif |
|
|
|
LIST_FOREACH(ph, pl, ph_pagelist) { |
LIST_FOREACH(ph, pl, ph_pagelist) { |
(*pr)("\t\tpage %p, nmissing %d, time %" PRIu32 "\n", |
(*pr)("\t\tpage %p, nmissing %d, time %" PRIu32 "\n", |
Line 1867 pool_print1(struct pool *pp, const char |
|
Line 1602 pool_print1(struct pool *pp, const char |
|
goto skip_log; |
goto skip_log; |
|
|
(*pr)("\n"); |
(*pr)("\n"); |
if ((pp->pr_roflags & PR_LOGGING) == 0) |
|
(*pr)("\tno log\n"); |
|
else { |
|
pr_printlog(pp, NULL, pr); |
|
} |
|
|
|
skip_log: |
skip_log: |
|
|
Line 1893 pool_print1(struct pool *pp, const char |
|
Line 1623 pool_print1(struct pool *pp, const char |
|
if (pc != NULL) { |
if (pc != NULL) { |
cpuhit = 0; |
cpuhit = 0; |
cpumiss = 0; |
cpumiss = 0; |
for (i = 0; i < MAXCPUS; i++) { |
for (i = 0; i < __arraycount(pc->pc_cpus); i++) { |
if ((cc = pc->pc_cpus[i]) == NULL) |
if ((cc = pc->pc_cpus[i]) == NULL) |
continue; |
continue; |
cpuhit += cc->cc_hits; |
cpuhit += cc->cc_hits; |
Line 1921 pool_print1(struct pool *pp, const char |
|
Line 1651 pool_print1(struct pool *pp, const char |
|
} |
} |
} |
} |
#undef PR_GROUPLIST |
#undef PR_GROUPLIST |
|
|
pr_enter_check(pp, pr); |
|
} |
} |
|
|
static int |
static int |
Line 2055 pool_cache_bootstrap(pool_cache_t pc, si |
|
Line 1783 pool_cache_bootstrap(pool_cache_t pc, si |
|
struct pool *pp; |
struct pool *pp; |
|
|
pp = &pc->pc_pool; |
pp = &pc->pc_pool; |
if (palloc == NULL && ipl == IPL_NONE) |
if (palloc == NULL && ipl == IPL_NONE) { |
palloc = &pool_allocator_nointr; |
if (size > PAGE_SIZE) { |
|
int bigidx = pool_bigidx(size); |
|
|
|
palloc = &pool_allocator_big[bigidx]; |
|
} else |
|
palloc = &pool_allocator_nointr; |
|
} |
pool_init(pp, size, align, align_offset, flags, wchan, palloc, ipl); |
pool_init(pp, size, align, align_offset, flags, wchan, palloc, ipl); |
mutex_init(&pc->pc_lock, MUTEX_DEFAULT, ipl); |
mutex_init(&pc->pc_lock, MUTEX_DEFAULT, ipl); |
|
|
Line 2128 pool_cache_bootstrap(pool_cache_t pc, si |
|
Line 1862 pool_cache_bootstrap(pool_cache_t pc, si |
|
void |
void |
pool_cache_destroy(pool_cache_t pc) |
pool_cache_destroy(pool_cache_t pc) |
{ |
{ |
|
|
|
pool_cache_bootstrap_destroy(pc); |
|
pool_put(&cache_pool, pc); |
|
} |
|
|
|
/* |
|
* pool_cache_bootstrap_destroy: |
|
* |
|
* Destroy a pool cache. |
|
*/ |
|
void |
|
pool_cache_bootstrap_destroy(pool_cache_t pc) |
|
{ |
struct pool *pp = &pc->pc_pool; |
struct pool *pp = &pc->pc_pool; |
pool_cache_cpu_t *cc; |
u_int i; |
pcg_t *pcg; |
|
int i; |
|
|
|
/* Remove it from the global list. */ |
/* Remove it from the global list. */ |
mutex_enter(&pool_head_lock); |
mutex_enter(&pool_head_lock); |
Line 2149 pool_cache_destroy(pool_cache_t pc) |
|
Line 1894 pool_cache_destroy(pool_cache_t pc) |
|
mutex_exit(&pp->pr_lock); |
mutex_exit(&pp->pr_lock); |
|
|
/* Destroy per-CPU data */ |
/* Destroy per-CPU data */ |
for (i = 0; i < MAXCPUS; i++) { |
for (i = 0; i < __arraycount(pc->pc_cpus); i++) |
if ((cc = pc->pc_cpus[i]) == NULL) |
pool_cache_invalidate_cpu(pc, i); |
continue; |
|
if ((pcg = cc->cc_current) != &pcg_dummy) { |
|
pcg->pcg_next = NULL; |
|
pool_cache_invalidate_groups(pc, pcg); |
|
} |
|
if ((pcg = cc->cc_previous) != &pcg_dummy) { |
|
pcg->pcg_next = NULL; |
|
pool_cache_invalidate_groups(pc, pcg); |
|
} |
|
if (cc != &pc->pc_cpu0) |
|
pool_put(&cache_cpu_pool, cc); |
|
} |
|
|
|
/* Finally, destroy it. */ |
/* Finally, destroy it. */ |
mutex_destroy(&pc->pc_lock); |
mutex_destroy(&pc->pc_lock); |
pool_destroy(pp); |
pool_destroy(pp); |
pool_put(&cache_pool, pc); |
|
} |
} |
|
|
/* |
/* |
Line 2183 pool_cache_cpu_init1(struct cpu_info *ci |
|
Line 1915 pool_cache_cpu_init1(struct cpu_info *ci |
|
|
|
index = ci->ci_index; |
index = ci->ci_index; |
|
|
KASSERT(index < MAXCPUS); |
KASSERT(index < __arraycount(pc->pc_cpus)); |
|
|
if ((cc = pc->pc_cpus[index]) != NULL) { |
if ((cc = pc->pc_cpus[index]) != NULL) { |
KASSERT(cc->cc_cpuindex == index); |
KASSERT(cc->cc_cpuindex == index); |
Line 2309 pool_cache_invalidate_groups(pool_cache_ |
|
Line 2041 pool_cache_invalidate_groups(pool_cache_ |
|
* |
* |
* Invalidate a pool cache (destruct and release all of the |
* Invalidate a pool cache (destruct and release all of the |
* cached objects). Does not reclaim objects from the pool. |
* cached objects). Does not reclaim objects from the pool. |
|
* |
|
* Note: For pool caches that provide constructed objects, there |
|
* is an assumption that another level of synchronization is occurring |
|
* between the input to the constructor and the cache invalidation. |
|
* |
|
* Invalidation is a costly process and should not be called from |
|
* interrupt context. |
*/ |
*/ |
void |
void |
pool_cache_invalidate(pool_cache_t pc) |
pool_cache_invalidate(pool_cache_t pc) |
{ |
{ |
|
uint64_t where; |
pcg_t *full, *empty, *part; |
pcg_t *full, *empty, *part; |
|
|
|
KASSERT(!cpu_intr_p() && !cpu_softintr_p()); |
|
|
|
if (ncpu < 2 || !mp_online) { |
|
/* |
|
* We might be called early enough in the boot process |
|
* for the CPU data structures to not be fully initialized. |
|
* In this case, transfer the content of the local CPU's |
|
* cache back into global cache as only this CPU is currently |
|
* running. |
|
*/ |
|
pool_cache_transfer(pc); |
|
} else { |
|
/* |
|
* Signal all CPUs that they must transfer their local |
|
* cache back to the global pool then wait for the xcall to |
|
* complete. |
|
*/ |
|
where = xc_broadcast(0, (xcfunc_t)pool_cache_transfer, |
|
pc, NULL); |
|
xc_wait(where); |
|
} |
|
|
|
/* Empty pool caches, then invalidate objects */ |
mutex_enter(&pc->pc_lock); |
mutex_enter(&pc->pc_lock); |
full = pc->pc_fullgroups; |
full = pc->pc_fullgroups; |
empty = pc->pc_emptygroups; |
empty = pc->pc_emptygroups; |
Line 2332 pool_cache_invalidate(pool_cache_t pc) |
|
Line 2095 pool_cache_invalidate(pool_cache_t pc) |
|
pool_cache_invalidate_groups(pc, part); |
pool_cache_invalidate_groups(pc, part); |
} |
} |
|
|
|
/* |
|
* pool_cache_invalidate_cpu: |
|
* |
|
* Invalidate all CPU-bound cached objects in pool cache, the CPU being |
|
* identified by its associated index. |
|
* It is caller's responsibility to ensure that no operation is |
|
* taking place on this pool cache while doing this invalidation. |
|
* WARNING: as no inter-CPU locking is enforced, trying to invalidate |
|
* pool cached objects from a CPU different from the one currently running |
|
* may result in an undefined behaviour. |
|
*/ |
|
static void |
|
pool_cache_invalidate_cpu(pool_cache_t pc, u_int index) |
|
{ |
|
pool_cache_cpu_t *cc; |
|
pcg_t *pcg; |
|
|
|
if ((cc = pc->pc_cpus[index]) == NULL) |
|
return; |
|
|
|
if ((pcg = cc->cc_current) != &pcg_dummy) { |
|
pcg->pcg_next = NULL; |
|
pool_cache_invalidate_groups(pc, pcg); |
|
} |
|
if ((pcg = cc->cc_previous) != &pcg_dummy) { |
|
pcg->pcg_next = NULL; |
|
pool_cache_invalidate_groups(pc, pcg); |
|
} |
|
if (cc != &pc->pc_cpu0) |
|
pool_put(&cache_cpu_pool, cc); |
|
|
|
} |
|
|
void |
void |
pool_cache_set_drain_hook(pool_cache_t pc, void (*fn)(void *, int), void *arg) |
pool_cache_set_drain_hook(pool_cache_t pc, void (*fn)(void *, int), void *arg) |
{ |
{ |
Line 2427 pool_cache_get_slow(pool_cache_cpu_t *cc |
|
Line 2223 pool_cache_get_slow(pool_cache_cpu_t *cc |
|
|
|
object = pool_get(&pc->pc_pool, flags); |
object = pool_get(&pc->pc_pool, flags); |
*objectp = object; |
*objectp = object; |
if (__predict_false(object == NULL)) |
if (__predict_false(object == NULL)) { |
|
KASSERT((flags & (PR_WAITOK|PR_NOWAIT)) == PR_NOWAIT); |
return false; |
return false; |
|
} |
|
|
if (__predict_false((*pc->pc_ctor)(pc->pc_arg, object, flags) != 0)) { |
if (__predict_false((*pc->pc_ctor)(pc->pc_arg, object, flags) != 0)) { |
pool_put(&pc->pc_pool, object); |
pool_put(&pc->pc_pool, object); |
Line 2448 pool_cache_get_slow(pool_cache_cpu_t *cc |
|
Line 2246 pool_cache_get_slow(pool_cache_cpu_t *cc |
|
} |
} |
|
|
FREECHECK_OUT(&pc->pc_freecheck, object); |
FREECHECK_OUT(&pc->pc_freecheck, object); |
|
pool_redzone_fill(&pc->pc_pool, object); |
return false; |
return false; |
} |
} |
|
|
Line 2465 pool_cache_get_paddr(pool_cache_t pc, in |
|
Line 2264 pool_cache_get_paddr(pool_cache_t pc, in |
|
void *object; |
void *object; |
int s; |
int s; |
|
|
#ifdef LOCKDEBUG |
KASSERT(!(flags & PR_NOWAIT) != !(flags & PR_WAITOK)); |
|
KASSERTMSG((!cpu_intr_p() && !cpu_softintr_p()) || |
|
(pc->pc_pool.pr_ipl != IPL_NONE || cold || panicstr != NULL), |
|
"%s: [%s] is IPL_NONE, but called from interrupt context", |
|
__func__, pc->pc_pool.pr_wchan); |
|
|
if (flags & PR_WAITOK) { |
if (flags & PR_WAITOK) { |
ASSERT_SLEEPABLE(); |
ASSERT_SLEEPABLE(); |
} |
} |
#endif |
|
|
|
/* Lock out interrupts and disable preemption. */ |
/* Lock out interrupts and disable preemption. */ |
s = splvm(); |
s = splvm(); |
Line 2490 pool_cache_get_paddr(pool_cache_t pc, in |
|
Line 2293 pool_cache_get_paddr(pool_cache_t pc, in |
|
cc->cc_hits++; |
cc->cc_hits++; |
splx(s); |
splx(s); |
FREECHECK_OUT(&pc->pc_freecheck, object); |
FREECHECK_OUT(&pc->pc_freecheck, object); |
|
pool_redzone_fill(&pc->pc_pool, object); |
return object; |
return object; |
} |
} |
|
|
Line 2514 pool_cache_get_paddr(pool_cache_t pc, in |
|
Line 2318 pool_cache_get_paddr(pool_cache_t pc, in |
|
break; |
break; |
} |
} |
|
|
|
/* |
|
* We would like to KASSERT(object || (flags & PR_NOWAIT)), but |
|
* pool_cache_get can fail even in the PR_WAITOK case, if the |
|
* constructor fails. |
|
*/ |
return object; |
return object; |
} |
} |
|
|
static bool __noinline |
static bool __noinline |
pool_cache_put_slow(pool_cache_cpu_t *cc, int s, void *object) |
pool_cache_put_slow(pool_cache_cpu_t *cc, int s, void *object) |
{ |
{ |
|
struct lwp *l = curlwp; |
pcg_t *pcg, *cur; |
pcg_t *pcg, *cur; |
uint64_t ncsw; |
uint64_t ncsw; |
pool_cache_t pc; |
pool_cache_t pc; |
Line 2530 pool_cache_put_slow(pool_cache_cpu_t *cc |
|
Line 2340 pool_cache_put_slow(pool_cache_cpu_t *cc |
|
pc = cc->cc_cache; |
pc = cc->cc_cache; |
pcg = NULL; |
pcg = NULL; |
cc->cc_misses++; |
cc->cc_misses++; |
|
ncsw = l->l_ncsw; |
|
|
/* |
/* |
* If there are no empty groups in the cache then allocate one |
* If there are no empty groups in the cache then allocate one |
Line 2539 pool_cache_put_slow(pool_cache_cpu_t *cc |
|
Line 2350 pool_cache_put_slow(pool_cache_cpu_t *cc |
|
if (__predict_true(!pool_cache_disable)) { |
if (__predict_true(!pool_cache_disable)) { |
pcg = pool_get(pc->pc_pcgpool, PR_NOWAIT); |
pcg = pool_get(pc->pc_pcgpool, PR_NOWAIT); |
} |
} |
|
/* |
|
* If pool_get() blocked, then our view of |
|
* the per-CPU data is invalid: retry. |
|
*/ |
|
if (__predict_false(l->l_ncsw != ncsw)) { |
|
if (pcg != NULL) { |
|
pool_put(pc->pc_pcgpool, pcg); |
|
} |
|
return true; |
|
} |
if (__predict_true(pcg != NULL)) { |
if (__predict_true(pcg != NULL)) { |
pcg->pcg_avail = 0; |
pcg->pcg_avail = 0; |
pcg->pcg_size = pc->pc_pcgsize; |
pcg->pcg_size = pc->pc_pcgsize; |
Line 2547 pool_cache_put_slow(pool_cache_cpu_t *cc |
|
Line 2368 pool_cache_put_slow(pool_cache_cpu_t *cc |
|
|
|
/* Lock the cache. */ |
/* Lock the cache. */ |
if (__predict_false(!mutex_tryenter(&pc->pc_lock))) { |
if (__predict_false(!mutex_tryenter(&pc->pc_lock))) { |
ncsw = curlwp->l_ncsw; |
|
mutex_enter(&pc->pc_lock); |
mutex_enter(&pc->pc_lock); |
pc->pc_contended++; |
pc->pc_contended++; |
|
|
Line 2555 pool_cache_put_slow(pool_cache_cpu_t *cc |
|
Line 2375 pool_cache_put_slow(pool_cache_cpu_t *cc |
|
* If we context switched while locking, then our view of |
* If we context switched while locking, then our view of |
* the per-CPU data is invalid: retry. |
* the per-CPU data is invalid: retry. |
*/ |
*/ |
if (__predict_false(curlwp->l_ncsw != ncsw)) { |
if (__predict_false(l->l_ncsw != ncsw)) { |
mutex_exit(&pc->pc_lock); |
mutex_exit(&pc->pc_lock); |
if (pcg != NULL) { |
if (pcg != NULL) { |
pool_put(pc->pc_pcgpool, pcg); |
pool_put(pc->pc_pcgpool, pcg); |
Line 2621 pool_cache_put_paddr(pool_cache_t pc, vo |
|
Line 2441 pool_cache_put_paddr(pool_cache_t pc, vo |
|
pcg_t *pcg; |
pcg_t *pcg; |
int s; |
int s; |
|
|
|
KASSERT(object != NULL); |
|
pool_redzone_check(&pc->pc_pool, object); |
FREECHECK_IN(&pc->pc_freecheck, object); |
FREECHECK_IN(&pc->pc_freecheck, object); |
|
|
/* Lock out interrupts and disable preemption. */ |
/* Lock out interrupts and disable preemption. */ |
Line 2661 pool_cache_put_paddr(pool_cache_t pc, vo |
|
Line 2483 pool_cache_put_paddr(pool_cache_t pc, vo |
|
} |
} |
|
|
/* |
/* |
* pool_cache_xcall: |
* pool_cache_transfer: |
* |
* |
* Transfer objects from the per-CPU cache to the global cache. |
* Transfer objects from the per-CPU cache to the global cache. |
* Run within a cross-call thread. |
* Run within a cross-call thread. |
*/ |
*/ |
static void |
static void |
pool_cache_xcall(pool_cache_t pc) |
pool_cache_transfer(pool_cache_t pc) |
{ |
{ |
pool_cache_cpu_t *cc; |
pool_cache_cpu_t *cc; |
pcg_t *prev, *cur, **list; |
pcg_t *prev, *cur, **list; |
Line 2730 void pool_page_free(struct pool *, void |
|
Line 2552 void pool_page_free(struct pool *, void |
|
|
|
#ifdef POOL_SUBPAGE |
#ifdef POOL_SUBPAGE |
struct pool_allocator pool_allocator_kmem_fullpage = { |
struct pool_allocator pool_allocator_kmem_fullpage = { |
pool_page_alloc, pool_page_free, 0, |
.pa_alloc = pool_page_alloc, |
.pa_backingmapptr = &kmem_map, |
.pa_free = pool_page_free, |
|
.pa_pagesz = 0 |
}; |
}; |
#else |
#else |
struct pool_allocator pool_allocator_kmem = { |
struct pool_allocator pool_allocator_kmem = { |
pool_page_alloc, pool_page_free, 0, |
.pa_alloc = pool_page_alloc, |
.pa_backingmapptr = &kmem_map, |
.pa_free = pool_page_free, |
|
.pa_pagesz = 0 |
}; |
}; |
#endif |
#endif |
|
|
void *pool_page_alloc_nointr(struct pool *, int); |
|
void pool_page_free_nointr(struct pool *, void *); |
|
|
|
#ifdef POOL_SUBPAGE |
#ifdef POOL_SUBPAGE |
struct pool_allocator pool_allocator_nointr_fullpage = { |
struct pool_allocator pool_allocator_nointr_fullpage = { |
pool_page_alloc_nointr, pool_page_free_nointr, 0, |
.pa_alloc = pool_page_alloc, |
.pa_backingmapptr = &kernel_map, |
.pa_free = pool_page_free, |
|
.pa_pagesz = 0 |
}; |
}; |
#else |
#else |
struct pool_allocator pool_allocator_nointr = { |
struct pool_allocator pool_allocator_nointr = { |
pool_page_alloc_nointr, pool_page_free_nointr, 0, |
.pa_alloc = pool_page_alloc, |
.pa_backingmapptr = &kernel_map, |
.pa_free = pool_page_free, |
|
.pa_pagesz = 0 |
}; |
}; |
#endif |
#endif |
|
|
Line 2760 void *pool_subpage_alloc(struct pool *, |
|
Line 2583 void *pool_subpage_alloc(struct pool *, |
|
void pool_subpage_free(struct pool *, void *); |
void pool_subpage_free(struct pool *, void *); |
|
|
struct pool_allocator pool_allocator_kmem = { |
struct pool_allocator pool_allocator_kmem = { |
pool_subpage_alloc, pool_subpage_free, POOL_SUBPAGE, |
.pa_alloc = pool_subpage_alloc, |
.pa_backingmapptr = &kmem_map, |
.pa_free = pool_subpage_free, |
|
.pa_pagesz = POOL_SUBPAGE |
}; |
}; |
|
|
void *pool_subpage_alloc_nointr(struct pool *, int); |
|
void pool_subpage_free_nointr(struct pool *, void *); |
|
|
|
struct pool_allocator pool_allocator_nointr = { |
struct pool_allocator pool_allocator_nointr = { |
pool_subpage_alloc, pool_subpage_free, POOL_SUBPAGE, |
.pa_alloc = pool_subpage_alloc, |
.pa_backingmapptr = &kmem_map, |
.pa_free = pool_subpage_free, |
|
.pa_pagesz = POOL_SUBPAGE |
}; |
}; |
#endif /* POOL_SUBPAGE */ |
#endif /* POOL_SUBPAGE */ |
|
|
|
struct pool_allocator pool_allocator_big[] = { |
|
{ |
|
.pa_alloc = pool_page_alloc, |
|
.pa_free = pool_page_free, |
|
.pa_pagesz = 1 << (POOL_ALLOCATOR_BIG_BASE + 0), |
|
}, |
|
{ |
|
.pa_alloc = pool_page_alloc, |
|
.pa_free = pool_page_free, |
|
.pa_pagesz = 1 << (POOL_ALLOCATOR_BIG_BASE + 1), |
|
}, |
|
{ |
|
.pa_alloc = pool_page_alloc, |
|
.pa_free = pool_page_free, |
|
.pa_pagesz = 1 << (POOL_ALLOCATOR_BIG_BASE + 2), |
|
}, |
|
{ |
|
.pa_alloc = pool_page_alloc, |
|
.pa_free = pool_page_free, |
|
.pa_pagesz = 1 << (POOL_ALLOCATOR_BIG_BASE + 3), |
|
}, |
|
{ |
|
.pa_alloc = pool_page_alloc, |
|
.pa_free = pool_page_free, |
|
.pa_pagesz = 1 << (POOL_ALLOCATOR_BIG_BASE + 4), |
|
}, |
|
{ |
|
.pa_alloc = pool_page_alloc, |
|
.pa_free = pool_page_free, |
|
.pa_pagesz = 1 << (POOL_ALLOCATOR_BIG_BASE + 5), |
|
}, |
|
{ |
|
.pa_alloc = pool_page_alloc, |
|
.pa_free = pool_page_free, |
|
.pa_pagesz = 1 << (POOL_ALLOCATOR_BIG_BASE + 6), |
|
}, |
|
{ |
|
.pa_alloc = pool_page_alloc, |
|
.pa_free = pool_page_free, |
|
.pa_pagesz = 1 << (POOL_ALLOCATOR_BIG_BASE + 7), |
|
} |
|
}; |
|
|
|
static int |
|
pool_bigidx(size_t size) |
|
{ |
|
int i; |
|
|
|
for (i = 0; i < __arraycount(pool_allocator_big); i++) { |
|
if (1 << (i + POOL_ALLOCATOR_BIG_BASE) >= size) |
|
return i; |
|
} |
|
panic("pool item size %zu too large, use a custom allocator", size); |
|
} |
|
|
static void * |
static void * |
pool_allocator_alloc(struct pool *pp, int flags) |
pool_allocator_alloc(struct pool *pp, int flags) |
{ |
{ |
Line 2805 pool_allocator_free(struct pool *pp, voi |
|
Line 2682 pool_allocator_free(struct pool *pp, voi |
|
void * |
void * |
pool_page_alloc(struct pool *pp, int flags) |
pool_page_alloc(struct pool *pp, int flags) |
{ |
{ |
bool waitok = (flags & PR_WAITOK) ? true : false; |
const vm_flag_t vflags = (flags & PR_WAITOK) ? VM_SLEEP: VM_NOSLEEP; |
|
vmem_addr_t va; |
|
int ret; |
|
|
return ((void *) uvm_km_alloc_poolpage_cache(kmem_map, waitok)); |
ret = uvm_km_kmem_alloc(kmem_va_arena, pp->pr_alloc->pa_pagesz, |
|
vflags | VM_INSTANTFIT, &va); |
|
|
|
return ret ? NULL : (void *)va; |
} |
} |
|
|
void |
void |
pool_page_free(struct pool *pp, void *v) |
pool_page_free(struct pool *pp, void *v) |
{ |
{ |
|
|
uvm_km_free_poolpage_cache(kmem_map, (vaddr_t) v); |
uvm_km_kmem_free(kmem_va_arena, (vaddr_t)v, pp->pr_alloc->pa_pagesz); |
} |
} |
|
|
static void * |
static void * |
pool_page_alloc_meta(struct pool *pp, int flags) |
pool_page_alloc_meta(struct pool *pp, int flags) |
{ |
{ |
bool waitok = (flags & PR_WAITOK) ? true : false; |
const vm_flag_t vflags = (flags & PR_WAITOK) ? VM_SLEEP: VM_NOSLEEP; |
|
vmem_addr_t va; |
|
int ret; |
|
|
|
ret = vmem_alloc(kmem_meta_arena, pp->pr_alloc->pa_pagesz, |
|
vflags | VM_INSTANTFIT, &va); |
|
|
return ((void *) uvm_km_alloc_poolpage(kmem_map, waitok)); |
return ret ? NULL : (void *)va; |
} |
} |
|
|
static void |
static void |
pool_page_free_meta(struct pool *pp, void *v) |
pool_page_free_meta(struct pool *pp, void *v) |
{ |
{ |
|
|
uvm_km_free_poolpage(kmem_map, (vaddr_t) v); |
vmem_free(kmem_meta_arena, (vmem_addr_t)v, pp->pr_alloc->pa_pagesz); |
} |
} |
|
|
#ifdef POOL_SUBPAGE |
#ifdef POOL_REDZONE |
/* Sub-page allocator, for machines with large hardware pages. */ |
#if defined(_LP64) |
void * |
# define PRIME 0x9e37fffffffc0000UL |
pool_subpage_alloc(struct pool *pp, int flags) |
#else /* defined(_LP64) */ |
|
# define PRIME 0x9e3779b1 |
|
#endif /* defined(_LP64) */ |
|
#define STATIC_BYTE 0xFE |
|
CTASSERT(POOL_REDZONE_SIZE > 1); |
|
|
|
static inline uint8_t |
|
pool_pattern_generate(const void *p) |
{ |
{ |
return pool_get(&psppool, flags); |
return (uint8_t)(((uintptr_t)p) * PRIME |
|
>> ((sizeof(uintptr_t) - sizeof(uint8_t))) * CHAR_BIT); |
} |
} |
|
|
void |
static void |
pool_subpage_free(struct pool *pp, void *v) |
pool_redzone_init(struct pool *pp, size_t requested_size) |
{ |
{ |
pool_put(&psppool, v); |
size_t nsz; |
|
|
|
if (pp->pr_roflags & PR_NOTOUCH) { |
|
pp->pr_reqsize = 0; |
|
pp->pr_redzone = false; |
|
return; |
|
} |
|
|
|
/* |
|
* We may have extended the requested size earlier; check if |
|
* there's naturally space in the padding for a red zone. |
|
*/ |
|
if (pp->pr_size - requested_size >= POOL_REDZONE_SIZE) { |
|
pp->pr_reqsize = requested_size; |
|
pp->pr_redzone = true; |
|
return; |
|
} |
|
|
|
/* |
|
* No space in the natural padding; check if we can extend a |
|
* bit the size of the pool. |
|
*/ |
|
nsz = roundup(pp->pr_size + POOL_REDZONE_SIZE, pp->pr_align); |
|
if (nsz <= pp->pr_alloc->pa_pagesz) { |
|
/* Ok, we can */ |
|
pp->pr_size = nsz; |
|
pp->pr_reqsize = requested_size; |
|
pp->pr_redzone = true; |
|
} else { |
|
/* No space for a red zone... snif :'( */ |
|
pp->pr_reqsize = 0; |
|
pp->pr_redzone = false; |
|
printf("pool redzone disabled for '%s'\n", pp->pr_wchan); |
|
} |
} |
} |
|
|
/* We don't provide a real nointr allocator. Maybe later. */ |
static void |
void * |
pool_redzone_fill(struct pool *pp, void *p) |
pool_subpage_alloc_nointr(struct pool *pp, int flags) |
|
{ |
{ |
|
uint8_t *cp, pat; |
|
const uint8_t *ep; |
|
|
|
if (!pp->pr_redzone) |
|
return; |
|
|
return (pool_subpage_alloc(pp, flags)); |
cp = (uint8_t *)p + pp->pr_reqsize; |
|
ep = cp + POOL_REDZONE_SIZE; |
|
|
|
/* |
|
* We really don't want the first byte of the red zone to be '\0'; |
|
* an off-by-one in a string may not be properly detected. |
|
*/ |
|
pat = pool_pattern_generate(cp); |
|
*cp = (pat == '\0') ? STATIC_BYTE: pat; |
|
cp++; |
|
|
|
while (cp < ep) { |
|
*cp = pool_pattern_generate(cp); |
|
cp++; |
|
} |
} |
} |
|
|
void |
static void |
pool_subpage_free_nointr(struct pool *pp, void *v) |
pool_redzone_check(struct pool *pp, void *p) |
{ |
{ |
|
uint8_t *cp, pat, expected; |
|
const uint8_t *ep; |
|
|
pool_subpage_free(pp, v); |
if (!pp->pr_redzone) |
|
return; |
|
|
|
cp = (uint8_t *)p + pp->pr_reqsize; |
|
ep = cp + POOL_REDZONE_SIZE; |
|
|
|
pat = pool_pattern_generate(cp); |
|
expected = (pat == '\0') ? STATIC_BYTE: pat; |
|
if (expected != *cp) { |
|
panic("%s: %p: 0x%02x != 0x%02x\n", |
|
__func__, cp, *cp, expected); |
|
} |
|
cp++; |
|
|
|
while (cp < ep) { |
|
expected = pool_pattern_generate(cp); |
|
if (*cp != expected) { |
|
panic("%s: %p: 0x%02x != 0x%02x\n", |
|
__func__, cp, *cp, expected); |
|
} |
|
cp++; |
|
} |
} |
} |
#endif /* POOL_SUBPAGE */ |
|
|
#endif /* POOL_REDZONE */ |
|
|
|
|
|
#ifdef POOL_SUBPAGE |
|
/* Sub-page allocator, for machines with large hardware pages. */ |
void * |
void * |
pool_page_alloc_nointr(struct pool *pp, int flags) |
pool_subpage_alloc(struct pool *pp, int flags) |
{ |
{ |
bool waitok = (flags & PR_WAITOK) ? true : false; |
return pool_get(&psppool, flags); |
|
|
return ((void *) uvm_km_alloc_poolpage_cache(kernel_map, waitok)); |
|
} |
} |
|
|
void |
void |
pool_page_free_nointr(struct pool *pp, void *v) |
pool_subpage_free(struct pool *pp, void *v) |
{ |
{ |
|
pool_put(&psppool, v); |
uvm_km_free_poolpage_cache(kernel_map, (vaddr_t) v); |
|
} |
} |
|
|
|
#endif /* POOL_SUBPAGE */ |
|
|
#if defined(DDB) |
#if defined(DDB) |
static bool |
static bool |
pool_in_page(struct pool *pp, struct pool_item_header *ph, uintptr_t addr) |
pool_in_page(struct pool *pp, struct pool_item_header *ph, uintptr_t addr) |
|
|
goto print; |
goto print; |
} |
} |
} |
} |
for (i = 0; i < MAXCPUS; i++) { |
for (i = 0; i < __arraycount(pc->pc_cpus); i++) { |
pool_cache_cpu_t *cc; |
pool_cache_cpu_t *cc; |
|
|
if ((cc = pc->pc_cpus[i]) == NULL) { |
if ((cc = pc->pc_cpus[i]) == NULL) { |
|
|
} |
} |
} |
} |
#endif /* defined(DDB) */ |
#endif /* defined(DDB) */ |
|
|
|
static int |
|
pool_sysctl(SYSCTLFN_ARGS) |
|
{ |
|
struct pool_sysctl data; |
|
struct pool *pp; |
|
struct pool_cache *pc; |
|
pool_cache_cpu_t *cc; |
|
int error; |
|
size_t i, written; |
|
|
|
if (oldp == NULL) { |
|
*oldlenp = 0; |
|
TAILQ_FOREACH(pp, &pool_head, pr_poollist) |
|
*oldlenp += sizeof(data); |
|
return 0; |
|
} |
|
|
|
memset(&data, 0, sizeof(data)); |
|
error = 0; |
|
written = 0; |
|
TAILQ_FOREACH(pp, &pool_head, pr_poollist) { |
|
if (written + sizeof(data) > *oldlenp) |
|
break; |
|
strlcpy(data.pr_wchan, pp->pr_wchan, sizeof(data.pr_wchan)); |
|
data.pr_pagesize = pp->pr_alloc->pa_pagesz; |
|
data.pr_flags = pp->pr_roflags | pp->pr_flags; |
|
#define COPY(field) data.field = pp->field |
|
COPY(pr_size); |
|
|
|
COPY(pr_itemsperpage); |
|
COPY(pr_nitems); |
|
COPY(pr_nout); |
|
COPY(pr_hardlimit); |
|
COPY(pr_npages); |
|
COPY(pr_minpages); |
|
COPY(pr_maxpages); |
|
|
|
COPY(pr_nget); |
|
COPY(pr_nfail); |
|
COPY(pr_nput); |
|
COPY(pr_npagealloc); |
|
COPY(pr_npagefree); |
|
COPY(pr_hiwat); |
|
COPY(pr_nidle); |
|
#undef COPY |
|
|
|
data.pr_cache_nmiss_pcpu = 0; |
|
data.pr_cache_nhit_pcpu = 0; |
|
if (pp->pr_cache) { |
|
pc = pp->pr_cache; |
|
data.pr_cache_meta_size = pc->pc_pcgsize; |
|
data.pr_cache_nfull = pc->pc_nfull; |
|
data.pr_cache_npartial = pc->pc_npart; |
|
data.pr_cache_nempty = pc->pc_nempty; |
|
data.pr_cache_ncontended = pc->pc_contended; |
|
data.pr_cache_nmiss_global = pc->pc_misses; |
|
data.pr_cache_nhit_global = pc->pc_hits; |
|
for (i = 0; i < pc->pc_ncpu; ++i) { |
|
cc = pc->pc_cpus[i]; |
|
if (cc == NULL) |
|
continue; |
|
data.pr_cache_nmiss_pcpu += cc->cc_misses; |
|
data.pr_cache_nhit_pcpu += cc->cc_hits; |
|
} |
|
} else { |
|
data.pr_cache_meta_size = 0; |
|
data.pr_cache_nfull = 0; |
|
data.pr_cache_npartial = 0; |
|
data.pr_cache_nempty = 0; |
|
data.pr_cache_ncontended = 0; |
|
data.pr_cache_nmiss_global = 0; |
|
data.pr_cache_nhit_global = 0; |
|
} |
|
|
|
error = sysctl_copyout(l, &data, oldp, sizeof(data)); |
|
if (error) |
|
break; |
|
written += sizeof(data); |
|
oldp = (char *)oldp + sizeof(data); |
|
} |
|
|
|
*oldlenp = written; |
|
return error; |
|
} |
|
|
|
SYSCTL_SETUP(sysctl_pool_setup, "sysctl kern.pool setup") |
|
{ |
|
const struct sysctlnode *rnode = NULL; |
|
|
|
sysctl_createv(clog, 0, NULL, &rnode, |
|
CTLFLAG_PERMANENT, |
|
CTLTYPE_STRUCT, "pool", |
|
SYSCTL_DESCR("Get pool statistics"), |
|
pool_sysctl, 0, NULL, 0, |
|
CTL_KERN, CTL_CREATE, CTL_EOL); |
|
} |