version 1.138.2.3, 2007/12/13 05:06:01 |
version 1.151.6.4, 2009/01/17 13:29:19 |
|
|
/* $NetBSD$ */ |
/* $NetBSD$ */ |
|
|
/*- |
/*- |
* Copyright (c) 1997, 1999, 2000, 2002, 2007 The NetBSD Foundation, Inc. |
* Copyright (c) 1997, 1999, 2000, 2002, 2007, 2008 The NetBSD Foundation, Inc. |
* All rights reserved. |
* All rights reserved. |
* |
* |
* This code is derived from software contributed to The NetBSD Foundation |
* This code is derived from software contributed to The NetBSD Foundation |
|
|
* 2. Redistributions in binary form must reproduce the above copyright |
* 2. Redistributions in binary form must reproduce the above copyright |
* notice, this list of conditions and the following disclaimer in the |
* notice, this list of conditions and the following disclaimer in the |
* documentation and/or other materials provided with the distribution. |
* documentation and/or other materials provided with the distribution. |
* 3. All advertising materials mentioning features or use of this software |
|
* must display the following acknowledgement: |
|
* This product includes software developed by the NetBSD |
|
* Foundation, Inc. and its contributors. |
|
* 4. Neither the name of The NetBSD Foundation nor the names of its |
|
* contributors may be used to endorse or promote products derived |
|
* from this software without specific prior written permission. |
|
* |
* |
* THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS |
* THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS |
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED |
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED |
Line 52 __KERNEL_RCSID(0, "$NetBSD$"); |
|
Line 45 __KERNEL_RCSID(0, "$NetBSD$"); |
|
#include <sys/errno.h> |
#include <sys/errno.h> |
#include <sys/kernel.h> |
#include <sys/kernel.h> |
#include <sys/malloc.h> |
#include <sys/malloc.h> |
#include <sys/lock.h> |
|
#include <sys/pool.h> |
#include <sys/pool.h> |
#include <sys/syslog.h> |
#include <sys/syslog.h> |
#include <sys/debug.h> |
#include <sys/debug.h> |
#include <sys/lockdebug.h> |
#include <sys/lockdebug.h> |
#include <sys/xcall.h> |
#include <sys/xcall.h> |
#include <sys/cpu.h> |
#include <sys/cpu.h> |
|
#include <sys/atomic.h> |
|
|
#include <uvm/uvm.h> |
#include <uvm/uvm.h> |
|
|
Line 76 __KERNEL_RCSID(0, "$NetBSD$"); |
|
Line 69 __KERNEL_RCSID(0, "$NetBSD$"); |
|
*/ |
*/ |
|
|
/* List of all pools */ |
/* List of all pools */ |
LIST_HEAD(,pool) pool_head = LIST_HEAD_INITIALIZER(pool_head); |
TAILQ_HEAD(,pool) pool_head = TAILQ_HEAD_INITIALIZER(pool_head); |
|
|
/* List of all caches. */ |
|
LIST_HEAD(,pool_cache) pool_cache_head = |
|
LIST_HEAD_INITIALIZER(pool_cache_head); |
|
|
|
/* Private pool for page header structures */ |
/* Private pool for page header structures */ |
#define PHPOOL_MAX 8 |
#define PHPOOL_MAX 8 |
Line 102 static void pool_page_free_meta(struct p |
|
Line 91 static void pool_page_free_meta(struct p |
|
/* allocator for pool metadata */ |
/* allocator for pool metadata */ |
struct pool_allocator pool_allocator_meta = { |
struct pool_allocator pool_allocator_meta = { |
pool_page_alloc_meta, pool_page_free_meta, |
pool_page_alloc_meta, pool_page_free_meta, |
.pa_backingmapptr = &kernel_map, |
.pa_backingmapptr = &kmem_map, |
}; |
}; |
|
|
/* # of seconds to retain page after last use */ |
/* # of seconds to retain page after last use */ |
Line 126 struct pool_item_header { |
|
Line 115 struct pool_item_header { |
|
SPLAY_ENTRY(pool_item_header) |
SPLAY_ENTRY(pool_item_header) |
ph_node; /* Off-page page headers */ |
ph_node; /* Off-page page headers */ |
void * ph_page; /* this page's address */ |
void * ph_page; /* this page's address */ |
struct timeval ph_time; /* last referenced */ |
uint32_t ph_time; /* last referenced */ |
uint16_t ph_nmissing; /* # of chunks in use */ |
uint16_t ph_nmissing; /* # of chunks in use */ |
uint16_t ph_off; /* start offset in page */ |
uint16_t ph_off; /* start offset in page */ |
union { |
union { |
Line 181 struct pool_item { |
|
Line 170 struct pool_item { |
|
* from it. |
* from it. |
*/ |
*/ |
|
|
static struct pool pcgpool; |
static struct pool pcg_normal_pool; |
|
static struct pool pcg_large_pool; |
static struct pool cache_pool; |
static struct pool cache_pool; |
static struct pool cache_cpu_pool; |
static struct pool cache_cpu_pool; |
|
|
static pool_cache_cpu_t *pool_cache_put_slow(pool_cache_cpu_t *, int *, |
/* List of all caches. */ |
void *, paddr_t); |
TAILQ_HEAD(,pool_cache) pool_cache_head = |
static pool_cache_cpu_t *pool_cache_get_slow(pool_cache_cpu_t *, int *, |
TAILQ_HEAD_INITIALIZER(pool_cache_head); |
void **, paddr_t *, int); |
|
|
int pool_cache_disable; /* global disable for caching */ |
|
static const pcg_t pcg_dummy; /* zero sized: always empty, yet always full */ |
|
|
|
static bool pool_cache_put_slow(pool_cache_cpu_t *, int, |
|
void *); |
|
static bool pool_cache_get_slow(pool_cache_cpu_t *, int, |
|
void **, paddr_t *, int); |
static void pool_cache_cpu_init1(struct cpu_info *, pool_cache_t); |
static void pool_cache_cpu_init1(struct cpu_info *, pool_cache_t); |
static void pool_cache_invalidate_groups(pool_cache_t, pcg_t *); |
static void pool_cache_invalidate_groups(pool_cache_t, pcg_t *); |
static void pool_cache_xcall(pool_cache_t); |
static void pool_cache_xcall(pool_cache_t); |
Line 608 pool_subsystem_init(void) |
|
Line 605 pool_subsystem_init(void) |
|
pa_reclaim_register(pa); |
pa_reclaim_register(pa); |
} |
} |
|
|
pool_init(&cache_pool, sizeof(struct pool_cache), CACHE_LINE_SIZE, |
pool_init(&cache_pool, sizeof(struct pool_cache), coherency_unit, |
0, 0, "pcache", &pool_allocator_nointr, IPL_NONE); |
0, 0, "pcache", &pool_allocator_nointr, IPL_NONE); |
|
|
pool_init(&cache_cpu_pool, sizeof(pool_cache_cpu_t), CACHE_LINE_SIZE, |
pool_init(&cache_cpu_pool, sizeof(pool_cache_cpu_t), coherency_unit, |
0, 0, "pcachecpu", &pool_allocator_nointr, IPL_NONE); |
0, 0, "pcachecpu", &pool_allocator_nointr, IPL_NONE); |
} |
} |
|
|
|
|
pool_init(struct pool *pp, size_t size, u_int align, u_int ioff, int flags, |
pool_init(struct pool *pp, size_t size, u_int align, u_int ioff, int flags, |
const char *wchan, struct pool_allocator *palloc, int ipl) |
const char *wchan, struct pool_allocator *palloc, int ipl) |
{ |
{ |
#ifdef DEBUG |
|
struct pool *pp1; |
struct pool *pp1; |
#endif |
|
size_t trysize, phsize; |
size_t trysize, phsize; |
int off, slack; |
int off, slack; |
|
|
Line 636 pool_init(struct pool *pp, size_t size, |
|
Line 631 pool_init(struct pool *pp, size_t size, |
|
* Check that the pool hasn't already been initialised and |
* Check that the pool hasn't already been initialised and |
* added to the list of all pools. |
* added to the list of all pools. |
*/ |
*/ |
LIST_FOREACH(pp1, &pool_head, pr_poollist) { |
TAILQ_FOREACH(pp1, &pool_head, pr_poollist) { |
if (pp == pp1) |
if (pp == pp1) |
panic("pool_init: pool %s already initialised", |
panic("pool_init: pool %s already initialised", |
wchan); |
wchan); |
Line 800 pool_init(struct pool *pp, size_t size, |
|
Line 795 pool_init(struct pool *pp, size_t size, |
|
|
|
#ifdef POOL_DIAGNOSTIC |
#ifdef POOL_DIAGNOSTIC |
if (flags & PR_LOGGING) { |
if (flags & PR_LOGGING) { |
if (kernel_map == NULL || |
if (kmem_map == NULL || |
(pp->pr_log = malloc(pool_logsize * sizeof(struct pool_log), |
(pp->pr_log = malloc(pool_logsize * sizeof(struct pool_log), |
M_TEMP, M_NOWAIT)) == NULL) |
M_TEMP, M_NOWAIT)) == NULL) |
pp->pr_roflags &= ~PR_LOGGING; |
pp->pr_roflags &= ~PR_LOGGING; |
Line 812 pool_init(struct pool *pp, size_t size, |
|
Line 807 pool_init(struct pool *pp, size_t size, |
|
pp->pr_entered_file = NULL; |
pp->pr_entered_file = NULL; |
pp->pr_entered_line = 0; |
pp->pr_entered_line = 0; |
|
|
/* |
mutex_init(&pp->pr_lock, MUTEX_DEFAULT, ipl); |
* XXXAD hack to prevent IP input processing from blocking. |
|
*/ |
|
if (ipl == IPL_SOFTNET) { |
|
mutex_init(&pp->pr_lock, MUTEX_DEFAULT, IPL_VM); |
|
} else { |
|
mutex_init(&pp->pr_lock, MUTEX_DEFAULT, ipl); |
|
} |
|
cv_init(&pp->pr_cv, wchan); |
cv_init(&pp->pr_cv, wchan); |
pp->pr_ipl = ipl; |
pp->pr_ipl = ipl; |
|
|
Line 850 pool_init(struct pool *pp, size_t size, |
|
Line 838 pool_init(struct pool *pp, size_t size, |
|
pool_init(&psppool, POOL_SUBPAGE, POOL_SUBPAGE, 0, |
pool_init(&psppool, POOL_SUBPAGE, POOL_SUBPAGE, 0, |
PR_RECURSIVE, "psppool", &pool_allocator_meta, IPL_VM); |
PR_RECURSIVE, "psppool", &pool_allocator_meta, IPL_VM); |
#endif |
#endif |
pool_init(&pcgpool, sizeof(pcg_t), CACHE_LINE_SIZE, 0, 0, |
|
"cachegrp", &pool_allocator_meta, IPL_VM); |
size = sizeof(pcg_t) + |
|
(PCG_NOBJECTS_NORMAL - 1) * sizeof(pcgpair_t); |
|
pool_init(&pcg_normal_pool, size, coherency_unit, 0, 0, |
|
"pcgnormal", &pool_allocator_meta, IPL_VM); |
|
|
|
size = sizeof(pcg_t) + |
|
(PCG_NOBJECTS_LARGE - 1) * sizeof(pcgpair_t); |
|
pool_init(&pcg_large_pool, size, coherency_unit, 0, 0, |
|
"pcglarge", &pool_allocator_meta, IPL_VM); |
} |
} |
|
|
if (__predict_true(!cold)) { |
/* Insert into the list of all pools. */ |
/* Insert into the list of all pools. */ |
if (__predict_true(!cold)) |
mutex_enter(&pool_head_lock); |
mutex_enter(&pool_head_lock); |
LIST_INSERT_HEAD(&pool_head, pp, pr_poollist); |
TAILQ_FOREACH(pp1, &pool_head, pr_poollist) { |
|
if (strcmp(pp1->pr_wchan, pp->pr_wchan) > 0) |
|
break; |
|
} |
|
if (pp1 == NULL) |
|
TAILQ_INSERT_TAIL(&pool_head, pp, pr_poollist); |
|
else |
|
TAILQ_INSERT_BEFORE(pp1, pp, pr_poollist); |
|
if (__predict_true(!cold)) |
mutex_exit(&pool_head_lock); |
mutex_exit(&pool_head_lock); |
|
|
/* Insert this into the list of pools using this allocator. */ |
/* Insert this into the list of pools using this allocator. */ |
|
if (__predict_true(!cold)) |
mutex_enter(&palloc->pa_lock); |
mutex_enter(&palloc->pa_lock); |
TAILQ_INSERT_TAIL(&palloc->pa_list, pp, pr_alloc_list); |
TAILQ_INSERT_TAIL(&palloc->pa_list, pp, pr_alloc_list); |
|
if (__predict_true(!cold)) |
mutex_exit(&palloc->pa_lock); |
mutex_exit(&palloc->pa_lock); |
} else { |
|
LIST_INSERT_HEAD(&pool_head, pp, pr_poollist); |
|
TAILQ_INSERT_TAIL(&palloc->pa_list, pp, pr_alloc_list); |
|
} |
|
|
|
pool_reclaim_register(pp); |
pool_reclaim_register(pp); |
} |
} |
Line 885 pool_destroy(struct pool *pp) |
|
Line 887 pool_destroy(struct pool *pp) |
|
mutex_enter(&pool_head_lock); |
mutex_enter(&pool_head_lock); |
while (pp->pr_refcnt != 0) |
while (pp->pr_refcnt != 0) |
cv_wait(&pool_busy, &pool_head_lock); |
cv_wait(&pool_busy, &pool_head_lock); |
LIST_REMOVE(pp, pr_poollist); |
TAILQ_REMOVE(&pool_head, pp, pr_poollist); |
if (drainpp == pp) |
if (drainpp == pp) |
drainpp = NULL; |
drainpp = NULL; |
mutex_exit(&pool_head_lock); |
mutex_exit(&pool_head_lock); |
Line 979 pool_get(struct pool *pp, int flags) |
|
Line 981 pool_get(struct pool *pp, int flags) |
|
|
|
#endif /* DIAGNOSTIC */ |
#endif /* DIAGNOSTIC */ |
#ifdef LOCKDEBUG |
#ifdef LOCKDEBUG |
if (flags & PR_WAITOK) |
if (flags & PR_WAITOK) { |
ASSERT_SLEEPABLE(NULL, "pool_get(PR_WAITOK)"); |
ASSERT_SLEEPABLE(); |
|
} |
#endif |
#endif |
|
|
mutex_enter(&pp->pr_lock); |
mutex_enter(&pp->pr_lock); |
Line 1250 pool_do_put(struct pool *pp, void *v, st |
|
Line 1253 pool_do_put(struct pool *pp, void *v, st |
|
|
|
if (pp->pr_flags & PR_WANTED) { |
if (pp->pr_flags & PR_WANTED) { |
pp->pr_flags &= ~PR_WANTED; |
pp->pr_flags &= ~PR_WANTED; |
if (ph->ph_nmissing == 0) |
|
pp->pr_nidle++; |
|
cv_broadcast(&pp->pr_cv); |
cv_broadcast(&pp->pr_cv); |
return; |
|
} |
} |
|
|
/* |
/* |
Line 1272 pool_do_put(struct pool *pp, void *v, st |
|
Line 1272 pool_do_put(struct pool *pp, void *v, st |
|
if (ph->ph_nmissing == 0) { |
if (ph->ph_nmissing == 0) { |
pp->pr_nidle++; |
pp->pr_nidle++; |
if (pp->pr_npages > pp->pr_minpages && |
if (pp->pr_npages > pp->pr_minpages && |
(pp->pr_npages > pp->pr_maxpages || |
pp->pr_npages > pp->pr_maxpages) { |
pa_starved_p(pp->pr_alloc))) { |
|
pr_rmpage(pp, ph, pq); |
pr_rmpage(pp, ph, pq); |
} else { |
} else { |
LIST_REMOVE(ph, ph_pagelist); |
LIST_REMOVE(ph, ph_pagelist); |
Line 1284 pool_do_put(struct pool *pp, void *v, st |
|
Line 1283 pool_do_put(struct pool *pp, void *v, st |
|
* be idle for some period of time before it can |
* be idle for some period of time before it can |
* be reclaimed by the pagedaemon. This minimizes |
* be reclaimed by the pagedaemon. This minimizes |
* ping-pong'ing for memory. |
* ping-pong'ing for memory. |
|
* |
|
* note for 64-bit time_t: truncating to 32-bit is not |
|
* a problem for our usage. |
*/ |
*/ |
getmicrotime(&ph->ph_time); |
ph->ph_time = time_uptime; |
} |
} |
pool_update_curpage(pp); |
pool_update_curpage(pp); |
} |
} |
Line 1437 pool_prime_page(struct pool *pp, void *s |
|
Line 1439 pool_prime_page(struct pool *pp, void *s |
|
LIST_INIT(&ph->ph_itemlist); |
LIST_INIT(&ph->ph_itemlist); |
ph->ph_page = storage; |
ph->ph_page = storage; |
ph->ph_nmissing = 0; |
ph->ph_nmissing = 0; |
getmicrotime(&ph->ph_time); |
ph->ph_time = time_uptime; |
if ((pp->pr_roflags & PR_PHINPAGE) == 0) |
if ((pp->pr_roflags & PR_PHINPAGE) == 0) |
SPLAY_INSERT(phtree, &pp->pr_phtree, ph); |
SPLAY_INSERT(phtree, &pp->pr_phtree, ph); |
|
|
Line 1525 pool_update_curpage(struct pool *pp) |
|
Line 1527 pool_update_curpage(struct pool *pp) |
|
if (pp->pr_curpage == NULL) { |
if (pp->pr_curpage == NULL) { |
pp->pr_curpage = LIST_FIRST(&pp->pr_emptypages); |
pp->pr_curpage = LIST_FIRST(&pp->pr_emptypages); |
} |
} |
|
KASSERT((pp->pr_curpage == NULL && pp->pr_nitems == 0) || |
|
(pp->pr_curpage != NULL && pp->pr_nitems > 0)); |
} |
} |
|
|
void |
void |
Line 1598 pool_reclaim(struct pool *pp) |
|
Line 1602 pool_reclaim(struct pool *pp) |
|
{ |
{ |
struct pool_item_header *ph, *phnext; |
struct pool_item_header *ph, *phnext; |
struct pool_pagelist pq; |
struct pool_pagelist pq; |
struct timeval curtime, diff; |
uint32_t curtime; |
bool klock; |
bool klock; |
int rv; |
int rv; |
|
|
Line 1610 pool_reclaim(struct pool *pp) |
|
Line 1614 pool_reclaim(struct pool *pp) |
|
} |
} |
|
|
/* |
/* |
* XXXSMP Because mutexes at IPL_SOFTXXX are still spinlocks, |
* XXXSMP Because we do not want to cause non-MPSAFE code |
* and we are called from the pagedaemon without kernel_lock. |
* to block. |
* Does not apply to IPL_SOFTBIO. |
|
*/ |
*/ |
if (pp->pr_ipl == IPL_SOFTNET || pp->pr_ipl == IPL_SOFTCLOCK || |
if (pp->pr_ipl == IPL_SOFTNET || pp->pr_ipl == IPL_SOFTCLOCK || |
pp->pr_ipl == IPL_SOFTSERIAL) { |
pp->pr_ipl == IPL_SOFTSERIAL) { |
Line 1635 pool_reclaim(struct pool *pp) |
|
Line 1638 pool_reclaim(struct pool *pp) |
|
|
|
LIST_INIT(&pq); |
LIST_INIT(&pq); |
|
|
getmicrotime(&curtime); |
curtime = time_uptime; |
|
|
for (ph = LIST_FIRST(&pp->pr_emptypages); ph != NULL; ph = phnext) { |
for (ph = LIST_FIRST(&pp->pr_emptypages); ph != NULL; ph = phnext) { |
phnext = LIST_NEXT(ph, ph_pagelist); |
phnext = LIST_NEXT(ph, ph_pagelist); |
Line 1645 pool_reclaim(struct pool *pp) |
|
Line 1648 pool_reclaim(struct pool *pp) |
|
break; |
break; |
|
|
KASSERT(ph->ph_nmissing == 0); |
KASSERT(ph->ph_nmissing == 0); |
timersub(&curtime, &ph->ph_time, &diff); |
if (curtime - ph->ph_time < pool_inactive_time |
if (diff.tv_sec < pool_inactive_time |
|
&& !pa_starved_p(pp->pr_alloc)) |
&& !pa_starved_p(pp->pr_alloc)) |
continue; |
continue; |
|
|
Line 1692 pool_drain_start(struct pool **ppp, uint |
|
Line 1694 pool_drain_start(struct pool **ppp, uint |
|
{ |
{ |
struct pool *pp; |
struct pool *pp; |
|
|
KASSERT(!LIST_EMPTY(&pool_head)); |
KASSERT(!TAILQ_EMPTY(&pool_head)); |
|
|
pp = NULL; |
pp = NULL; |
|
|
Line 1700 pool_drain_start(struct pool **ppp, uint |
|
Line 1702 pool_drain_start(struct pool **ppp, uint |
|
mutex_enter(&pool_head_lock); |
mutex_enter(&pool_head_lock); |
do { |
do { |
if (drainpp == NULL) { |
if (drainpp == NULL) { |
drainpp = LIST_FIRST(&pool_head); |
drainpp = TAILQ_FIRST(&pool_head); |
} |
} |
if (drainpp != NULL) { |
if (drainpp != NULL) { |
pp = drainpp; |
pp = drainpp; |
drainpp = LIST_NEXT(pp, pr_poollist); |
drainpp = TAILQ_NEXT(pp, pr_poollist); |
} |
} |
/* |
/* |
* Skip completely idle pools. We depend on at least |
* Skip completely idle pools. We depend on at least |
Line 1760 pool_printall(const char *modif, void (* |
|
Line 1762 pool_printall(const char *modif, void (* |
|
{ |
{ |
struct pool *pp; |
struct pool *pp; |
|
|
LIST_FOREACH(pp, &pool_head, pr_poollist) { |
TAILQ_FOREACH(pp, &pool_head, pr_poollist) { |
pool_printit(pp, modif, pr); |
pool_printit(pp, modif, pr); |
} |
} |
} |
} |
Line 1787 pool_print_pagelist(struct pool *pp, str |
|
Line 1789 pool_print_pagelist(struct pool *pp, str |
|
#endif |
#endif |
|
|
LIST_FOREACH(ph, pl, ph_pagelist) { |
LIST_FOREACH(ph, pl, ph_pagelist) { |
(*pr)("\t\tpage %p, nmissing %d, time %lu,%lu\n", |
(*pr)("\t\tpage %p, nmissing %d, time %" PRIu32 "\n", |
ph->ph_page, ph->ph_nmissing, |
ph->ph_page, ph->ph_nmissing, ph->ph_time); |
(u_long)ph->ph_time.tv_sec, |
|
(u_long)ph->ph_time.tv_usec); |
|
#ifdef DIAGNOSTIC |
#ifdef DIAGNOSTIC |
if (!(pp->pr_roflags & PR_NOTOUCH)) { |
if (!(pp->pr_roflags & PR_NOTOUCH)) { |
LIST_FOREACH(pi, &ph->ph_itemlist, pi_list) { |
LIST_FOREACH(pi, &ph->ph_itemlist, pi_list) { |
Line 1877 pool_print1(struct pool *pp, const char |
|
Line 1877 pool_print1(struct pool *pp, const char |
|
|
|
#define PR_GROUPLIST(pcg) \ |
#define PR_GROUPLIST(pcg) \ |
(*pr)("\t\tgroup %p: avail %d\n", pcg, pcg->pcg_avail); \ |
(*pr)("\t\tgroup %p: avail %d\n", pcg, pcg->pcg_avail); \ |
for (i = 0; i < PCG_NOBJECTS; i++) { \ |
for (i = 0; i < pcg->pcg_size; i++) { \ |
if (pcg->pcg_objects[i].pcgo_pa != \ |
if (pcg->pcg_objects[i].pcgo_pa != \ |
POOL_PADDR_INVALID) { \ |
POOL_PADDR_INVALID) { \ |
(*pr)("\t\t\t%p, 0x%llx\n", \ |
(*pr)("\t\t\t%p, 0x%llx\n", \ |
Line 2050 pool_cache_bootstrap(pool_cache_t pc, si |
|
Line 2050 pool_cache_bootstrap(pool_cache_t pc, si |
|
void *arg) |
void *arg) |
{ |
{ |
CPU_INFO_ITERATOR cii; |
CPU_INFO_ITERATOR cii; |
|
pool_cache_t pc1; |
struct cpu_info *ci; |
struct cpu_info *ci; |
struct pool *pp; |
struct pool *pp; |
|
|
Line 2057 pool_cache_bootstrap(pool_cache_t pc, si |
|
Line 2058 pool_cache_bootstrap(pool_cache_t pc, si |
|
if (palloc == NULL && ipl == IPL_NONE) |
if (palloc == NULL && ipl == IPL_NONE) |
palloc = &pool_allocator_nointr; |
palloc = &pool_allocator_nointr; |
pool_init(pp, size, align, align_offset, flags, wchan, palloc, ipl); |
pool_init(pp, size, align, align_offset, flags, wchan, palloc, ipl); |
|
mutex_init(&pc->pc_lock, MUTEX_DEFAULT, ipl); |
/* |
|
* XXXAD hack to prevent IP input processing from blocking. |
|
*/ |
|
if (ipl == IPL_SOFTNET) { |
|
mutex_init(&pc->pc_lock, MUTEX_DEFAULT, IPL_VM); |
|
} else { |
|
mutex_init(&pc->pc_lock, MUTEX_DEFAULT, ipl); |
|
} |
|
|
|
if (ctor == NULL) { |
if (ctor == NULL) { |
ctor = (int (*)(void *, void *, int))nullop; |
ctor = (int (*)(void *, void *, int))nullop; |
Line 2089 pool_cache_bootstrap(pool_cache_t pc, si |
|
Line 2082 pool_cache_bootstrap(pool_cache_t pc, si |
|
pc->pc_refcnt = 0; |
pc->pc_refcnt = 0; |
pc->pc_freecheck = NULL; |
pc->pc_freecheck = NULL; |
|
|
|
if ((flags & PR_LARGECACHE) != 0) { |
|
pc->pc_pcgsize = PCG_NOBJECTS_LARGE; |
|
pc->pc_pcgpool = &pcg_large_pool; |
|
} else { |
|
pc->pc_pcgsize = PCG_NOBJECTS_NORMAL; |
|
pc->pc_pcgpool = &pcg_normal_pool; |
|
} |
|
|
/* Allocate per-CPU caches. */ |
/* Allocate per-CPU caches. */ |
memset(pc->pc_cpus, 0, sizeof(pc->pc_cpus)); |
memset(pc->pc_cpus, 0, sizeof(pc->pc_cpus)); |
pc->pc_ncpu = 0; |
pc->pc_ncpu = 0; |
Line 2100 pool_cache_bootstrap(pool_cache_t pc, si |
|
Line 2101 pool_cache_bootstrap(pool_cache_t pc, si |
|
pool_cache_cpu_init1(ci, pc); |
pool_cache_cpu_init1(ci, pc); |
} |
} |
} |
} |
|
|
if (__predict_true(!cold)) { |
/* Add to list of all pools. */ |
mutex_enter(&pp->pr_lock); |
if (__predict_true(!cold)) |
pp->pr_cache = pc; |
|
mutex_exit(&pp->pr_lock); |
|
mutex_enter(&pool_head_lock); |
mutex_enter(&pool_head_lock); |
LIST_INSERT_HEAD(&pool_cache_head, pc, pc_cachelist); |
TAILQ_FOREACH(pc1, &pool_cache_head, pc_cachelist) { |
mutex_exit(&pool_head_lock); |
if (strcmp(pc1->pc_pool.pr_wchan, pc->pc_pool.pr_wchan) > 0) |
} else { |
break; |
pp->pr_cache = pc; |
|
LIST_INSERT_HEAD(&pool_cache_head, pc, pc_cachelist); |
|
} |
} |
|
if (pc1 == NULL) |
|
TAILQ_INSERT_TAIL(&pool_cache_head, pc, pc_cachelist); |
|
else |
|
TAILQ_INSERT_BEFORE(pc1, pc, pc_cachelist); |
|
if (__predict_true(!cold)) |
|
mutex_exit(&pool_head_lock); |
|
|
|
membar_sync(); |
|
pp->pr_cache = pc; |
} |
} |
|
|
/* |
/* |
Line 2122 pool_cache_bootstrap(pool_cache_t pc, si |
|
Line 2128 pool_cache_bootstrap(pool_cache_t pc, si |
|
void |
void |
pool_cache_destroy(pool_cache_t pc) |
pool_cache_destroy(pool_cache_t pc) |
{ |
{ |
|
|
pool_cache_bootstrap_destroy(pc); |
|
pool_put(&cache_pool, pc); |
|
} |
|
|
|
/* |
|
* pool_cache_bootstrap_destroy: |
|
* |
|
* Kernel-private version of pool_cache_destroy(). |
|
* Destroy a pool cache initialized by pool_cache_bootstrap. |
|
*/ |
|
void |
|
pool_cache_bootstrap_destroy(pool_cache_t pc) |
|
{ |
|
struct pool *pp = &pc->pc_pool; |
struct pool *pp = &pc->pc_pool; |
pool_cache_cpu_t *cc; |
pool_cache_cpu_t *cc; |
pcg_t *pcg; |
pcg_t *pcg; |
Line 2145 pool_cache_bootstrap_destroy(pool_cache_ |
|
Line 2137 pool_cache_bootstrap_destroy(pool_cache_ |
|
mutex_enter(&pool_head_lock); |
mutex_enter(&pool_head_lock); |
while (pc->pc_refcnt != 0) |
while (pc->pc_refcnt != 0) |
cv_wait(&pool_busy, &pool_head_lock); |
cv_wait(&pool_busy, &pool_head_lock); |
LIST_REMOVE(pc, pc_cachelist); |
TAILQ_REMOVE(&pool_cache_head, pc, pc_cachelist); |
mutex_exit(&pool_head_lock); |
mutex_exit(&pool_head_lock); |
|
|
/* First, invalidate the entire cache. */ |
/* First, invalidate the entire cache. */ |
Line 2160 pool_cache_bootstrap_destroy(pool_cache_ |
|
Line 2152 pool_cache_bootstrap_destroy(pool_cache_ |
|
for (i = 0; i < MAXCPUS; i++) { |
for (i = 0; i < MAXCPUS; i++) { |
if ((cc = pc->pc_cpus[i]) == NULL) |
if ((cc = pc->pc_cpus[i]) == NULL) |
continue; |
continue; |
if ((pcg = cc->cc_current) != NULL) { |
if ((pcg = cc->cc_current) != &pcg_dummy) { |
pcg->pcg_next = NULL; |
pcg->pcg_next = NULL; |
pool_cache_invalidate_groups(pc, pcg); |
pool_cache_invalidate_groups(pc, pcg); |
} |
} |
if ((pcg = cc->cc_previous) != NULL) { |
if ((pcg = cc->cc_previous) != &pcg_dummy) { |
pcg->pcg_next = NULL; |
pcg->pcg_next = NULL; |
pool_cache_invalidate_groups(pc, pcg); |
pool_cache_invalidate_groups(pc, pcg); |
} |
} |
Line 2175 pool_cache_bootstrap_destroy(pool_cache_ |
|
Line 2167 pool_cache_bootstrap_destroy(pool_cache_ |
|
/* Finally, destroy it. */ |
/* Finally, destroy it. */ |
mutex_destroy(&pc->pc_lock); |
mutex_destroy(&pc->pc_lock); |
pool_destroy(pp); |
pool_destroy(pp); |
|
pool_put(&cache_pool, pc); |
} |
} |
|
|
/* |
/* |
Line 2191 pool_cache_cpu_init1(struct cpu_info *ci |
|
Line 2184 pool_cache_cpu_init1(struct cpu_info *ci |
|
index = ci->ci_index; |
index = ci->ci_index; |
|
|
KASSERT(index < MAXCPUS); |
KASSERT(index < MAXCPUS); |
KASSERT(((uintptr_t)pc->pc_cpus & (CACHE_LINE_SIZE - 1)) == 0); |
|
|
|
if ((cc = pc->pc_cpus[index]) != NULL) { |
if ((cc = pc->pc_cpus[index]) != NULL) { |
KASSERT(cc->cc_cpuindex == index); |
KASSERT(cc->cc_cpuindex == index); |
Line 2218 pool_cache_cpu_init1(struct cpu_info *ci |
|
Line 2210 pool_cache_cpu_init1(struct cpu_info *ci |
|
cc->cc_cpuindex = index; |
cc->cc_cpuindex = index; |
cc->cc_hits = 0; |
cc->cc_hits = 0; |
cc->cc_misses = 0; |
cc->cc_misses = 0; |
cc->cc_current = NULL; |
cc->cc_current = __UNCONST(&pcg_dummy); |
cc->cc_previous = NULL; |
cc->cc_previous = __UNCONST(&pcg_dummy); |
|
|
pc->pc_cpus[index] = cc; |
pc->pc_cpus[index] = cc; |
} |
} |
Line 2235 pool_cache_cpu_init(struct cpu_info *ci) |
|
Line 2227 pool_cache_cpu_init(struct cpu_info *ci) |
|
pool_cache_t pc; |
pool_cache_t pc; |
|
|
mutex_enter(&pool_head_lock); |
mutex_enter(&pool_head_lock); |
LIST_FOREACH(pc, &pool_cache_head, pc_cachelist) { |
TAILQ_FOREACH(pc, &pool_cache_head, pc_cachelist) { |
pc->pc_refcnt++; |
pc->pc_refcnt++; |
mutex_exit(&pool_head_lock); |
mutex_exit(&pool_head_lock); |
|
|
Line 2303 pool_cache_invalidate_groups(pool_cache_ |
|
Line 2295 pool_cache_invalidate_groups(pool_cache_ |
|
pool_cache_destruct_object1(pc, object); |
pool_cache_destruct_object1(pc, object); |
} |
} |
|
|
pool_put(&pcgpool, pcg); |
if (pcg->pcg_size == PCG_NOBJECTS_LARGE) { |
|
pool_put(&pcg_large_pool, pcg); |
|
} else { |
|
KASSERT(pcg->pcg_size == PCG_NOBJECTS_NORMAL); |
|
pool_put(&pcg_normal_pool, pcg); |
|
} |
} |
} |
} |
} |
|
|
Line 2363 pool_cache_sethardlimit(pool_cache_t pc, |
|
Line 2360 pool_cache_sethardlimit(pool_cache_t pc, |
|
pool_sethardlimit(&pc->pc_pool, n, warnmess, ratecap); |
pool_sethardlimit(&pc->pc_pool, n, warnmess, ratecap); |
} |
} |
|
|
static inline pool_cache_cpu_t * |
static bool __noinline |
pool_cache_cpu_enter(pool_cache_t pc, int *s) |
pool_cache_get_slow(pool_cache_cpu_t *cc, int s, void **objectp, |
{ |
|
pool_cache_cpu_t *cc; |
|
|
|
/* |
|
* Prevent other users of the cache from accessing our |
|
* CPU-local data. To avoid touching shared state, we |
|
* pull the neccessary information from CPU local data. |
|
*/ |
|
crit_enter(); |
|
cc = pc->pc_cpus[curcpu()->ci_index]; |
|
KASSERT(cc->cc_cache == pc); |
|
if (cc->cc_ipl != IPL_NONE) { |
|
*s = splraiseipl(cc->cc_iplcookie); |
|
} |
|
KASSERT(((uintptr_t)cc & (CACHE_LINE_SIZE - 1)) == 0); |
|
|
|
return cc; |
|
} |
|
|
|
static inline void |
|
pool_cache_cpu_exit(pool_cache_cpu_t *cc, int *s) |
|
{ |
|
|
|
/* No longer need exclusive access to the per-CPU data. */ |
|
if (cc->cc_ipl != IPL_NONE) { |
|
splx(*s); |
|
} |
|
crit_exit(); |
|
} |
|
|
|
#if __GNUC_PREREQ__(3, 0) |
|
__attribute ((noinline)) |
|
#endif |
|
pool_cache_cpu_t * |
|
pool_cache_get_slow(pool_cache_cpu_t *cc, int *s, void **objectp, |
|
paddr_t *pap, int flags) |
paddr_t *pap, int flags) |
{ |
{ |
pcg_t *pcg, *cur; |
pcg_t *pcg, *cur; |
Line 2407 pool_cache_get_slow(pool_cache_cpu_t *cc |
|
Line 2369 pool_cache_get_slow(pool_cache_cpu_t *cc |
|
pool_cache_t pc; |
pool_cache_t pc; |
void *object; |
void *object; |
|
|
|
KASSERT(cc->cc_current->pcg_avail == 0); |
|
KASSERT(cc->cc_previous->pcg_avail == 0); |
|
|
pc = cc->cc_cache; |
pc = cc->cc_cache; |
cc->cc_misses++; |
cc->cc_misses++; |
|
|
Line 2414 pool_cache_get_slow(pool_cache_cpu_t *cc |
|
Line 2379 pool_cache_get_slow(pool_cache_cpu_t *cc |
|
* Nothing was available locally. Try and grab a group |
* Nothing was available locally. Try and grab a group |
* from the cache. |
* from the cache. |
*/ |
*/ |
if (!mutex_tryenter(&pc->pc_lock)) { |
if (__predict_false(!mutex_tryenter(&pc->pc_lock))) { |
ncsw = curlwp->l_ncsw; |
ncsw = curlwp->l_ncsw; |
mutex_enter(&pc->pc_lock); |
mutex_enter(&pc->pc_lock); |
pc->pc_contended++; |
pc->pc_contended++; |
Line 2426 pool_cache_get_slow(pool_cache_cpu_t *cc |
|
Line 2391 pool_cache_get_slow(pool_cache_cpu_t *cc |
|
*/ |
*/ |
if (curlwp->l_ncsw != ncsw) { |
if (curlwp->l_ncsw != ncsw) { |
mutex_exit(&pc->pc_lock); |
mutex_exit(&pc->pc_lock); |
pool_cache_cpu_exit(cc, s); |
return true; |
return pool_cache_cpu_enter(pc, s); |
|
} |
} |
} |
} |
|
|
if ((pcg = pc->pc_fullgroups) != NULL) { |
if (__predict_true((pcg = pc->pc_fullgroups) != NULL)) { |
/* |
/* |
* If there's a full group, release our empty |
* If there's a full group, release our empty |
* group back to the cache. Install the full |
* group back to the cache. Install the full |
* group as cc_current and return. |
* group as cc_current and return. |
*/ |
*/ |
if ((cur = cc->cc_current) != NULL) { |
if (__predict_true((cur = cc->cc_current) != &pcg_dummy)) { |
KASSERT(cur->pcg_avail == 0); |
KASSERT(cur->pcg_avail == 0); |
cur->pcg_next = pc->pc_emptygroups; |
cur->pcg_next = pc->pc_emptygroups; |
pc->pc_emptygroups = cur; |
pc->pc_emptygroups = cur; |
pc->pc_nempty++; |
pc->pc_nempty++; |
} |
} |
KASSERT(pcg->pcg_avail == PCG_NOBJECTS); |
KASSERT(pcg->pcg_avail == pcg->pcg_size); |
cc->cc_current = pcg; |
cc->cc_current = pcg; |
pc->pc_fullgroups = pcg->pcg_next; |
pc->pc_fullgroups = pcg->pcg_next; |
pc->pc_hits++; |
pc->pc_hits++; |
pc->pc_nfull--; |
pc->pc_nfull--; |
mutex_exit(&pc->pc_lock); |
mutex_exit(&pc->pc_lock); |
return cc; |
return true; |
} |
} |
|
|
/* |
/* |
Line 2459 pool_cache_get_slow(pool_cache_cpu_t *cc |
|
Line 2423 pool_cache_get_slow(pool_cache_cpu_t *cc |
|
*/ |
*/ |
pc->pc_misses++; |
pc->pc_misses++; |
mutex_exit(&pc->pc_lock); |
mutex_exit(&pc->pc_lock); |
pool_cache_cpu_exit(cc, s); |
splx(s); |
|
|
object = pool_get(&pc->pc_pool, flags); |
object = pool_get(&pc->pc_pool, flags); |
*objectp = object; |
*objectp = object; |
if (object == NULL) |
if (__predict_false(object == NULL)) |
return NULL; |
return false; |
|
|
if ((*pc->pc_ctor)(pc->pc_arg, object, flags) != 0) { |
if (__predict_false((*pc->pc_ctor)(pc->pc_arg, object, flags) != 0)) { |
pool_put(&pc->pc_pool, object); |
pool_put(&pc->pc_pool, object); |
*objectp = NULL; |
*objectp = NULL; |
return NULL; |
return false; |
} |
} |
|
|
KASSERT((((vaddr_t)object + pc->pc_pool.pr_itemoffset) & |
KASSERT((((vaddr_t)object + pc->pc_pool.pr_itemoffset) & |
Line 2484 pool_cache_get_slow(pool_cache_cpu_t *cc |
|
Line 2448 pool_cache_get_slow(pool_cache_cpu_t *cc |
|
} |
} |
|
|
FREECHECK_OUT(&pc->pc_freecheck, object); |
FREECHECK_OUT(&pc->pc_freecheck, object); |
return NULL; |
return false; |
} |
} |
|
|
/* |
/* |
Line 2502 pool_cache_get_paddr(pool_cache_t pc, in |
|
Line 2466 pool_cache_get_paddr(pool_cache_t pc, in |
|
int s; |
int s; |
|
|
#ifdef LOCKDEBUG |
#ifdef LOCKDEBUG |
if (flags & PR_WAITOK) |
if (flags & PR_WAITOK) { |
ASSERT_SLEEPABLE(NULL, "pool_cache_get(PR_WAITOK)"); |
ASSERT_SLEEPABLE(); |
|
} |
#endif |
#endif |
|
|
cc = pool_cache_cpu_enter(pc, &s); |
/* Lock out interrupts and disable preemption. */ |
do { |
s = splvm(); |
|
while (/* CONSTCOND */ true) { |
/* Try and allocate an object from the current group. */ |
/* Try and allocate an object from the current group. */ |
|
cc = pc->pc_cpus[curcpu()->ci_index]; |
|
KASSERT(cc->cc_cache == pc); |
pcg = cc->cc_current; |
pcg = cc->cc_current; |
if (pcg != NULL && pcg->pcg_avail > 0) { |
if (__predict_true(pcg->pcg_avail > 0)) { |
object = pcg->pcg_objects[--pcg->pcg_avail].pcgo_va; |
object = pcg->pcg_objects[--pcg->pcg_avail].pcgo_va; |
if (pap != NULL) |
if (__predict_false(pap != NULL)) |
*pap = pcg->pcg_objects[pcg->pcg_avail].pcgo_pa; |
*pap = pcg->pcg_objects[pcg->pcg_avail].pcgo_pa; |
|
#if defined(DIAGNOSTIC) |
pcg->pcg_objects[pcg->pcg_avail].pcgo_va = NULL; |
pcg->pcg_objects[pcg->pcg_avail].pcgo_va = NULL; |
KASSERT(pcg->pcg_avail <= PCG_NOBJECTS); |
KASSERT(pcg->pcg_avail < pcg->pcg_size); |
KASSERT(object != NULL); |
KASSERT(object != NULL); |
|
#endif |
cc->cc_hits++; |
cc->cc_hits++; |
pool_cache_cpu_exit(cc, &s); |
splx(s); |
FREECHECK_OUT(&pc->pc_freecheck, object); |
FREECHECK_OUT(&pc->pc_freecheck, object); |
return object; |
return object; |
} |
} |
Line 2528 pool_cache_get_paddr(pool_cache_t pc, in |
|
Line 2498 pool_cache_get_paddr(pool_cache_t pc, in |
|
* it with the current group and allocate from there. |
* it with the current group and allocate from there. |
*/ |
*/ |
pcg = cc->cc_previous; |
pcg = cc->cc_previous; |
if (pcg != NULL && pcg->pcg_avail > 0) { |
if (__predict_true(pcg->pcg_avail > 0)) { |
cc->cc_previous = cc->cc_current; |
cc->cc_previous = cc->cc_current; |
cc->cc_current = pcg; |
cc->cc_current = pcg; |
continue; |
continue; |
Line 2537 pool_cache_get_paddr(pool_cache_t pc, in |
|
Line 2507 pool_cache_get_paddr(pool_cache_t pc, in |
|
/* |
/* |
* Can't allocate from either group: try the slow path. |
* Can't allocate from either group: try the slow path. |
* If get_slow() allocated an object for us, or if |
* If get_slow() allocated an object for us, or if |
* no more objects are available, it will return NULL. |
* no more objects are available, it will return false. |
* Otherwise, we need to retry. |
* Otherwise, we need to retry. |
*/ |
*/ |
cc = pool_cache_get_slow(cc, &s, &object, pap, flags); |
if (!pool_cache_get_slow(cc, s, &object, pap, flags)) |
} while (cc != NULL); |
break; |
|
} |
|
|
return object; |
return object; |
} |
} |
|
|
#if __GNUC_PREREQ__(3, 0) |
static bool __noinline |
__attribute ((noinline)) |
pool_cache_put_slow(pool_cache_cpu_t *cc, int s, void *object) |
#endif |
|
pool_cache_cpu_t * |
|
pool_cache_put_slow(pool_cache_cpu_t *cc, int *s, void *object, paddr_t pa) |
|
{ |
{ |
pcg_t *pcg, *cur; |
pcg_t *pcg, *cur; |
uint64_t ncsw; |
uint64_t ncsw; |
pool_cache_t pc; |
pool_cache_t pc; |
|
|
|
KASSERT(cc->cc_current->pcg_avail == cc->cc_current->pcg_size); |
|
KASSERT(cc->cc_previous->pcg_avail == cc->cc_previous->pcg_size); |
|
|
pc = cc->cc_cache; |
pc = cc->cc_cache; |
|
pcg = NULL; |
cc->cc_misses++; |
cc->cc_misses++; |
|
|
/* |
/* |
* No free slots locally. Try to grab an empty, unused |
* If there are no empty groups in the cache then allocate one |
* group from the cache. |
* while still unlocked. |
*/ |
*/ |
if (!mutex_tryenter(&pc->pc_lock)) { |
if (__predict_false(pc->pc_emptygroups == NULL)) { |
|
if (__predict_true(!pool_cache_disable)) { |
|
pcg = pool_get(pc->pc_pcgpool, PR_NOWAIT); |
|
} |
|
if (__predict_true(pcg != NULL)) { |
|
pcg->pcg_avail = 0; |
|
pcg->pcg_size = pc->pc_pcgsize; |
|
} |
|
} |
|
|
|
/* Lock the cache. */ |
|
if (__predict_false(!mutex_tryenter(&pc->pc_lock))) { |
ncsw = curlwp->l_ncsw; |
ncsw = curlwp->l_ncsw; |
mutex_enter(&pc->pc_lock); |
mutex_enter(&pc->pc_lock); |
pc->pc_contended++; |
pc->pc_contended++; |
|
|
/* |
/* |
* If we context switched while locking, then |
* If we context switched while locking, then our view of |
* our view of the per-CPU data is invalid: |
* the per-CPU data is invalid: retry. |
* retry. |
|
*/ |
*/ |
if (curlwp->l_ncsw != ncsw) { |
if (__predict_false(curlwp->l_ncsw != ncsw)) { |
mutex_exit(&pc->pc_lock); |
mutex_exit(&pc->pc_lock); |
pool_cache_cpu_exit(cc, s); |
if (pcg != NULL) { |
return pool_cache_cpu_enter(pc, s); |
pool_put(pc->pc_pcgpool, pcg); |
|
} |
|
return true; |
} |
} |
} |
} |
|
|
if ((pcg = pc->pc_emptygroups) != NULL) { |
/* If there are no empty groups in the cache then allocate one. */ |
/* |
if (pcg == NULL && pc->pc_emptygroups != NULL) { |
* If there's a empty group, release our full |
pcg = pc->pc_emptygroups; |
* group back to the cache. Install the empty |
|
* group as cc_current and return. |
|
*/ |
|
if ((cur = cc->cc_current) != NULL) { |
|
KASSERT(cur->pcg_avail == PCG_NOBJECTS); |
|
cur->pcg_next = pc->pc_fullgroups; |
|
pc->pc_fullgroups = cur; |
|
pc->pc_nfull++; |
|
} |
|
KASSERT(pcg->pcg_avail == 0); |
|
cc->cc_current = pcg; |
|
pc->pc_emptygroups = pcg->pcg_next; |
pc->pc_emptygroups = pcg->pcg_next; |
pc->pc_hits++; |
|
pc->pc_nempty--; |
pc->pc_nempty--; |
mutex_exit(&pc->pc_lock); |
|
return cc; |
|
} |
} |
|
|
/* |
/* |
* Nothing available locally or in cache. Take the |
* If there's a empty group, release our full group back |
* slow path and try to allocate a new group that we |
* to the cache. Install the empty group to the local CPU |
* can release to. |
* and return. |
*/ |
*/ |
pc->pc_misses++; |
if (pcg != NULL) { |
mutex_exit(&pc->pc_lock); |
KASSERT(pcg->pcg_avail == 0); |
pool_cache_cpu_exit(cc, s); |
if (__predict_false(cc->cc_previous == &pcg_dummy)) { |
|
cc->cc_previous = pcg; |
/* |
} else { |
* If we can't allocate a new group, just throw the |
cur = cc->cc_current; |
* object away. |
if (__predict_true(cur != &pcg_dummy)) { |
*/ |
KASSERT(cur->pcg_avail == cur->pcg_size); |
pcg = pool_get(&pcgpool, PR_NOWAIT); |
cur->pcg_next = pc->pc_fullgroups; |
if (pcg == NULL) { |
pc->pc_fullgroups = cur; |
pool_cache_destruct_object(pc, object); |
pc->pc_nfull++; |
return NULL; |
} |
|
cc->cc_current = pcg; |
|
} |
|
pc->pc_hits++; |
|
mutex_exit(&pc->pc_lock); |
|
return true; |
} |
} |
#ifdef DIAGNOSTIC |
|
memset(pcg, 0, sizeof(*pcg)); |
|
#else |
|
pcg->pcg_avail = 0; |
|
#endif |
|
|
|
/* |
/* |
* Add the empty group to the cache and try again. |
* Nothing available locally or in cache, and we didn't |
|
* allocate an empty group. Take the slow path and destroy |
|
* the object here and now. |
*/ |
*/ |
mutex_enter(&pc->pc_lock); |
pc->pc_misses++; |
pcg->pcg_next = pc->pc_emptygroups; |
|
pc->pc_emptygroups = pcg; |
|
pc->pc_nempty++; |
|
mutex_exit(&pc->pc_lock); |
mutex_exit(&pc->pc_lock); |
|
splx(s); |
|
pool_cache_destruct_object(pc, object); |
|
|
return pool_cache_cpu_enter(pc, s); |
return false; |
} |
} |
|
|
/* |
/* |
Line 2652 pool_cache_put_paddr(pool_cache_t pc, vo |
|
Line 2623 pool_cache_put_paddr(pool_cache_t pc, vo |
|
|
|
FREECHECK_IN(&pc->pc_freecheck, object); |
FREECHECK_IN(&pc->pc_freecheck, object); |
|
|
cc = pool_cache_cpu_enter(pc, &s); |
/* Lock out interrupts and disable preemption. */ |
do { |
s = splvm(); |
|
while (/* CONSTCOND */ true) { |
/* If the current group isn't full, release it there. */ |
/* If the current group isn't full, release it there. */ |
|
cc = pc->pc_cpus[curcpu()->ci_index]; |
|
KASSERT(cc->cc_cache == pc); |
pcg = cc->cc_current; |
pcg = cc->cc_current; |
if (pcg != NULL && pcg->pcg_avail < PCG_NOBJECTS) { |
if (__predict_true(pcg->pcg_avail < pcg->pcg_size)) { |
KASSERT(pcg->pcg_objects[pcg->pcg_avail].pcgo_va |
|
== NULL); |
|
pcg->pcg_objects[pcg->pcg_avail].pcgo_va = object; |
pcg->pcg_objects[pcg->pcg_avail].pcgo_va = object; |
pcg->pcg_objects[pcg->pcg_avail].pcgo_pa = pa; |
pcg->pcg_objects[pcg->pcg_avail].pcgo_pa = pa; |
pcg->pcg_avail++; |
pcg->pcg_avail++; |
cc->cc_hits++; |
cc->cc_hits++; |
pool_cache_cpu_exit(cc, &s); |
splx(s); |
return; |
return; |
} |
} |
|
|
/* |
/* |
* That failed. If the previous group is empty, swap |
* That failed. If the previous group isn't full, swap |
* it with the current group and try again. |
* it with the current group and try again. |
*/ |
*/ |
pcg = cc->cc_previous; |
pcg = cc->cc_previous; |
if (pcg != NULL && pcg->pcg_avail == 0) { |
if (__predict_true(pcg->pcg_avail < pcg->pcg_size)) { |
cc->cc_previous = cc->cc_current; |
cc->cc_previous = cc->cc_current; |
cc->cc_current = pcg; |
cc->cc_current = pcg; |
continue; |
continue; |
Line 2681 pool_cache_put_paddr(pool_cache_t pc, vo |
|
Line 2653 pool_cache_put_paddr(pool_cache_t pc, vo |
|
/* |
/* |
* Can't free to either group: try the slow path. |
* Can't free to either group: try the slow path. |
* If put_slow() releases the object for us, it |
* If put_slow() releases the object for us, it |
* will return NULL. Otherwise we need to retry. |
* will return false. Otherwise we need to retry. |
*/ |
*/ |
cc = pool_cache_put_slow(cc, &s, object, pa); |
if (!pool_cache_put_slow(cc, s, object)) |
} while (cc != NULL); |
break; |
|
} |
} |
} |
|
|
/* |
/* |
Line 2698 pool_cache_xcall(pool_cache_t pc) |
|
Line 2671 pool_cache_xcall(pool_cache_t pc) |
|
{ |
{ |
pool_cache_cpu_t *cc; |
pool_cache_cpu_t *cc; |
pcg_t *prev, *cur, **list; |
pcg_t *prev, *cur, **list; |
int s = 0; /* XXXgcc */ |
int s; |
|
|
cc = pool_cache_cpu_enter(pc, &s); |
|
cur = cc->cc_current; |
|
cc->cc_current = NULL; |
|
prev = cc->cc_previous; |
|
cc->cc_previous = NULL; |
|
pool_cache_cpu_exit(cc, &s); |
|
|
|
/* |
|
* XXXSMP Go to splvm to prevent kernel_lock from being taken, |
|
* because locks at IPL_SOFTXXX are still spinlocks. Does not |
|
* apply to IPL_SOFTBIO. Cross-call threads do not take the |
|
* kernel_lock. |
|
*/ |
|
s = splvm(); |
s = splvm(); |
mutex_enter(&pc->pc_lock); |
mutex_enter(&pc->pc_lock); |
if (cur != NULL) { |
cc = pc->pc_cpus[curcpu()->ci_index]; |
if (cur->pcg_avail == PCG_NOBJECTS) { |
cur = cc->cc_current; |
|
cc->cc_current = __UNCONST(&pcg_dummy); |
|
prev = cc->cc_previous; |
|
cc->cc_previous = __UNCONST(&pcg_dummy); |
|
if (cur != &pcg_dummy) { |
|
if (cur->pcg_avail == cur->pcg_size) { |
list = &pc->pc_fullgroups; |
list = &pc->pc_fullgroups; |
pc->pc_nfull++; |
pc->pc_nfull++; |
} else if (cur->pcg_avail == 0) { |
} else if (cur->pcg_avail == 0) { |
Line 2729 pool_cache_xcall(pool_cache_t pc) |
|
Line 2694 pool_cache_xcall(pool_cache_t pc) |
|
cur->pcg_next = *list; |
cur->pcg_next = *list; |
*list = cur; |
*list = cur; |
} |
} |
if (prev != NULL) { |
if (prev != &pcg_dummy) { |
if (prev->pcg_avail == PCG_NOBJECTS) { |
if (prev->pcg_avail == prev->pcg_size) { |
list = &pc->pc_fullgroups; |
list = &pc->pc_fullgroups; |
pc->pc_nfull++; |
pc->pc_nfull++; |
} else if (prev->pcg_avail == 0) { |
} else if (prev->pcg_avail == 0) { |
Line 2766 void pool_page_free(struct pool *, void |
|
Line 2731 void pool_page_free(struct pool *, void |
|
#ifdef POOL_SUBPAGE |
#ifdef POOL_SUBPAGE |
struct pool_allocator pool_allocator_kmem_fullpage = { |
struct pool_allocator pool_allocator_kmem_fullpage = { |
pool_page_alloc, pool_page_free, 0, |
pool_page_alloc, pool_page_free, 0, |
.pa_backingmapptr = &kernel_map, |
.pa_backingmapptr = &kmem_map, |
}; |
}; |
#else |
#else |
struct pool_allocator pool_allocator_kmem = { |
struct pool_allocator pool_allocator_kmem = { |
pool_page_alloc, pool_page_free, 0, |
pool_page_alloc, pool_page_free, 0, |
.pa_backingmapptr = &kernel_map, |
.pa_backingmapptr = &kmem_map, |
}; |
}; |
#endif |
#endif |
|
|
Line 2796 void pool_subpage_free(struct pool *, vo |
|
Line 2761 void pool_subpage_free(struct pool *, vo |
|
|
|
struct pool_allocator pool_allocator_kmem = { |
struct pool_allocator pool_allocator_kmem = { |
pool_subpage_alloc, pool_subpage_free, POOL_SUBPAGE, |
pool_subpage_alloc, pool_subpage_free, POOL_SUBPAGE, |
.pa_backingmapptr = &kernel_map, |
.pa_backingmapptr = &kmem_map, |
}; |
}; |
|
|
void *pool_subpage_alloc_nointr(struct pool *, int); |
void *pool_subpage_alloc_nointr(struct pool *, int); |
Line 2804 void pool_subpage_free_nointr(struct poo |
|
Line 2769 void pool_subpage_free_nointr(struct poo |
|
|
|
struct pool_allocator pool_allocator_nointr = { |
struct pool_allocator pool_allocator_nointr = { |
pool_subpage_alloc, pool_subpage_free, POOL_SUBPAGE, |
pool_subpage_alloc, pool_subpage_free, POOL_SUBPAGE, |
.pa_backingmapptr = &kernel_map, |
.pa_backingmapptr = &kmem_map, |
}; |
}; |
#endif /* POOL_SUBPAGE */ |
#endif /* POOL_SUBPAGE */ |
|
|
Line 2842 pool_page_alloc(struct pool *pp, int fla |
|
Line 2807 pool_page_alloc(struct pool *pp, int fla |
|
{ |
{ |
bool waitok = (flags & PR_WAITOK) ? true : false; |
bool waitok = (flags & PR_WAITOK) ? true : false; |
|
|
return ((void *) uvm_km_alloc_poolpage_cache(kernel_map, waitok)); |
return ((void *) uvm_km_alloc_poolpage_cache(kmem_map, waitok)); |
} |
} |
|
|
void |
void |
pool_page_free(struct pool *pp, void *v) |
pool_page_free(struct pool *pp, void *v) |
{ |
{ |
|
|
uvm_km_free_poolpage_cache(kernel_map, (vaddr_t) v); |
uvm_km_free_poolpage_cache(kmem_map, (vaddr_t) v); |
} |
} |
|
|
static void * |
static void * |
Line 2857 pool_page_alloc_meta(struct pool *pp, in |
|
Line 2822 pool_page_alloc_meta(struct pool *pp, in |
|
{ |
{ |
bool waitok = (flags & PR_WAITOK) ? true : false; |
bool waitok = (flags & PR_WAITOK) ? true : false; |
|
|
return ((void *) uvm_km_alloc_poolpage(kernel_map, waitok)); |
return ((void *) uvm_km_alloc_poolpage(kmem_map, waitok)); |
} |
} |
|
|
static void |
static void |
pool_page_free_meta(struct pool *pp, void *v) |
pool_page_free_meta(struct pool *pp, void *v) |
{ |
{ |
|
|
uvm_km_free_poolpage(kernel_map, (vaddr_t) v); |
uvm_km_free_poolpage(kmem_map, (vaddr_t) v); |
} |
} |
|
|
#ifdef POOL_SUBPAGE |
#ifdef POOL_SUBPAGE |
Line 2920 pool_in_page(struct pool *pp, struct poo |
|
Line 2885 pool_in_page(struct pool *pp, struct poo |
|
addr < (uintptr_t)ph->ph_page + pp->pr_alloc->pa_pagesz; |
addr < (uintptr_t)ph->ph_page + pp->pr_alloc->pa_pagesz; |
} |
} |
|
|
|
static bool |
|
pool_in_item(struct pool *pp, void *item, uintptr_t addr) |
|
{ |
|
|
|
return (uintptr_t)item <= addr && addr < (uintptr_t)item + pp->pr_size; |
|
} |
|
|
|
static bool |
|
pool_in_cg(struct pool *pp, struct pool_cache_group *pcg, uintptr_t addr) |
|
{ |
|
int i; |
|
|
|
if (pcg == NULL) { |
|
return false; |
|
} |
|
for (i = 0; i < pcg->pcg_avail; i++) { |
|
if (pool_in_item(pp, pcg->pcg_objects[i].pcgo_va, addr)) { |
|
return true; |
|
} |
|
} |
|
return false; |
|
} |
|
|
|
static bool |
|
pool_allocated(struct pool *pp, struct pool_item_header *ph, uintptr_t addr) |
|
{ |
|
|
|
if ((pp->pr_roflags & PR_NOTOUCH) != 0) { |
|
unsigned int idx = pr_item_notouch_index(pp, ph, (void *)addr); |
|
pool_item_bitmap_t *bitmap = |
|
ph->ph_bitmap + (idx / BITMAP_SIZE); |
|
pool_item_bitmap_t mask = 1 << (idx & BITMAP_MASK); |
|
|
|
return (*bitmap & mask) == 0; |
|
} else { |
|
struct pool_item *pi; |
|
|
|
LIST_FOREACH(pi, &ph->ph_itemlist, pi_list) { |
|
if (pool_in_item(pp, pi, addr)) { |
|
return false; |
|
} |
|
} |
|
return true; |
|
} |
|
} |
|
|
void |
void |
pool_whatis(uintptr_t addr, void (*pr)(const char *, ...)) |
pool_whatis(uintptr_t addr, void (*pr)(const char *, ...)) |
{ |
{ |
struct pool *pp; |
struct pool *pp; |
|
|
LIST_FOREACH(pp, &pool_head, pr_poollist) { |
TAILQ_FOREACH(pp, &pool_head, pr_poollist) { |
struct pool_item_header *ph; |
struct pool_item_header *ph; |
uintptr_t item; |
uintptr_t item; |
|
bool allocated = true; |
|
bool incache = false; |
|
bool incpucache = false; |
|
char cpucachestr[32]; |
|
|
if ((pp->pr_roflags & PR_PHINPAGE) != 0) { |
if ((pp->pr_roflags & PR_PHINPAGE) != 0) { |
LIST_FOREACH(ph, &pp->pr_fullpages, ph_pagelist) { |
LIST_FOREACH(ph, &pp->pr_fullpages, ph_pagelist) { |
Line 2937 pool_whatis(uintptr_t addr, void (*pr)(c |
|
Line 2952 pool_whatis(uintptr_t addr, void (*pr)(c |
|
} |
} |
LIST_FOREACH(ph, &pp->pr_partpages, ph_pagelist) { |
LIST_FOREACH(ph, &pp->pr_partpages, ph_pagelist) { |
if (pool_in_page(pp, ph, addr)) { |
if (pool_in_page(pp, ph, addr)) { |
|
allocated = |
|
pool_allocated(pp, ph, addr); |
|
goto found; |
|
} |
|
} |
|
LIST_FOREACH(ph, &pp->pr_emptypages, ph_pagelist) { |
|
if (pool_in_page(pp, ph, addr)) { |
|
allocated = false; |
goto found; |
goto found; |
} |
} |
} |
} |
Line 2946 pool_whatis(uintptr_t addr, void (*pr)(c |
|
Line 2969 pool_whatis(uintptr_t addr, void (*pr)(c |
|
if (ph == NULL || !pool_in_page(pp, ph, addr)) { |
if (ph == NULL || !pool_in_page(pp, ph, addr)) { |
continue; |
continue; |
} |
} |
|
allocated = pool_allocated(pp, ph, addr); |
} |
} |
found: |
found: |
|
if (allocated && pp->pr_cache) { |
|
pool_cache_t pc = pp->pr_cache; |
|
struct pool_cache_group *pcg; |
|
int i; |
|
|
|
for (pcg = pc->pc_fullgroups; pcg != NULL; |
|
pcg = pcg->pcg_next) { |
|
if (pool_in_cg(pp, pcg, addr)) { |
|
incache = true; |
|
goto print; |
|
} |
|
} |
|
for (i = 0; i < MAXCPUS; i++) { |
|
pool_cache_cpu_t *cc; |
|
|
|
if ((cc = pc->pc_cpus[i]) == NULL) { |
|
continue; |
|
} |
|
if (pool_in_cg(pp, cc->cc_current, addr) || |
|
pool_in_cg(pp, cc->cc_previous, addr)) { |
|
struct cpu_info *ci = |
|
cpu_lookup(i); |
|
|
|
incpucache = true; |
|
snprintf(cpucachestr, |
|
sizeof(cpucachestr), |
|
"cached by CPU %u", |
|
ci->ci_index); |
|
goto print; |
|
} |
|
} |
|
} |
|
print: |
item = (uintptr_t)ph->ph_page + ph->ph_off; |
item = (uintptr_t)ph->ph_page + ph->ph_off; |
item = item + rounddown(addr - item, pp->pr_size); |
item = item + rounddown(addr - item, pp->pr_size); |
(*pr)("%p is %p+%zu from POOL '%s'\n", |
(*pr)("%p is %p+%zu in POOL '%s' (%s)\n", |
(void *)addr, item, (size_t)(addr - item), |
(void *)addr, item, (size_t)(addr - item), |
pp->pr_wchan); |
pp->pr_wchan, |
|
incpucache ? cpucachestr : |
|
incache ? "cached" : allocated ? "allocated" : "free"); |
} |
} |
} |
} |
#endif /* defined(DDB) */ |
#endif /* defined(DDB) */ |