version 1.187, 2011/01/17 07:13:32 |
version 1.191, 2012/01/27 19:48:40 |
Line 46 __KERNEL_RCSID(0, "$NetBSD$"); |
|
Line 46 __KERNEL_RCSID(0, "$NetBSD$"); |
|
#include <sys/errno.h> |
#include <sys/errno.h> |
#include <sys/kernel.h> |
#include <sys/kernel.h> |
#include <sys/malloc.h> |
#include <sys/malloc.h> |
|
#include <sys/vmem.h> |
#include <sys/pool.h> |
#include <sys/pool.h> |
#include <sys/syslog.h> |
#include <sys/syslog.h> |
#include <sys/debug.h> |
#include <sys/debug.h> |
Line 83 static struct pool phpool[PHPOOL_MAX]; |
|
Line 84 static struct pool phpool[PHPOOL_MAX]; |
|
static struct pool psppool; |
static struct pool psppool; |
#endif |
#endif |
|
|
static SLIST_HEAD(, pool_allocator) pa_deferinitq = |
|
SLIST_HEAD_INITIALIZER(pa_deferinitq); |
|
|
|
static void *pool_page_alloc_meta(struct pool *, int); |
static void *pool_page_alloc_meta(struct pool *, int); |
static void pool_page_free_meta(struct pool *, void *); |
static void pool_page_free_meta(struct pool *, void *); |
|
|
/* allocator for pool metadata */ |
/* allocator for pool metadata */ |
struct pool_allocator pool_allocator_meta = { |
struct pool_allocator pool_allocator_meta = { |
pool_page_alloc_meta, pool_page_free_meta, |
.pa_alloc = pool_page_alloc_meta, |
.pa_backingmapptr = &kmem_map, |
.pa_free = pool_page_free_meta, |
|
.pa_pagesz = 0 |
}; |
}; |
|
|
/* # of seconds to retain page after last use */ |
/* # of seconds to retain page after last use */ |
Line 179 static struct pool pcg_large_pool; |
|
Line 178 static struct pool pcg_large_pool; |
|
static struct pool cache_pool; |
static struct pool cache_pool; |
static struct pool cache_cpu_pool; |
static struct pool cache_cpu_pool; |
|
|
|
pool_cache_t pnbuf_cache; /* pathname buffer cache */ |
|
|
/* List of all caches. */ |
/* List of all caches. */ |
TAILQ_HEAD(,pool_cache) pool_cache_head = |
TAILQ_HEAD(,pool_cache) pool_cache_head = |
TAILQ_HEAD_INITIALIZER(pool_cache_head); |
TAILQ_HEAD_INITIALIZER(pool_cache_head); |
Line 524 pr_rmpage(struct pool *pp, struct pool_i |
|
Line 525 pr_rmpage(struct pool *pp, struct pool_i |
|
pool_update_curpage(pp); |
pool_update_curpage(pp); |
} |
} |
|
|
static bool |
|
pa_starved_p(struct pool_allocator *pa) |
|
{ |
|
|
|
if (pa->pa_backingmap != NULL) { |
|
return vm_map_starved_p(pa->pa_backingmap); |
|
} |
|
return false; |
|
} |
|
|
|
static int |
|
pool_reclaim_callback(struct callback_entry *ce, void *obj, void *arg) |
|
{ |
|
struct pool *pp = obj; |
|
struct pool_allocator *pa = pp->pr_alloc; |
|
|
|
KASSERT(&pp->pr_reclaimerentry == ce); |
|
pool_reclaim(pp); |
|
if (!pa_starved_p(pa)) { |
|
return CALLBACK_CHAIN_ABORT; |
|
} |
|
return CALLBACK_CHAIN_CONTINUE; |
|
} |
|
|
|
static void |
|
pool_reclaim_register(struct pool *pp) |
|
{ |
|
struct vm_map *map = pp->pr_alloc->pa_backingmap; |
|
int s; |
|
|
|
if (map == NULL) { |
|
return; |
|
} |
|
|
|
s = splvm(); /* not necessary for INTRSAFE maps, but don't care. */ |
|
callback_register(&vm_map_to_kernel(map)->vmk_reclaim_callback, |
|
&pp->pr_reclaimerentry, pp, pool_reclaim_callback); |
|
splx(s); |
|
|
|
#ifdef DIAGNOSTIC |
|
/* Diagnostic drain attempt. */ |
|
uvm_km_va_drain(map, 0); |
|
#endif |
|
} |
|
|
|
static void |
|
pool_reclaim_unregister(struct pool *pp) |
|
{ |
|
struct vm_map *map = pp->pr_alloc->pa_backingmap; |
|
int s; |
|
|
|
if (map == NULL) { |
|
return; |
|
} |
|
|
|
s = splvm(); /* not necessary for INTRSAFE maps, but don't care. */ |
|
callback_unregister(&vm_map_to_kernel(map)->vmk_reclaim_callback, |
|
&pp->pr_reclaimerentry); |
|
splx(s); |
|
} |
|
|
|
static void |
|
pa_reclaim_register(struct pool_allocator *pa) |
|
{ |
|
struct vm_map *map = *pa->pa_backingmapptr; |
|
struct pool *pp; |
|
|
|
KASSERT(pa->pa_backingmap == NULL); |
|
if (map == NULL) { |
|
SLIST_INSERT_HEAD(&pa_deferinitq, pa, pa_q); |
|
return; |
|
} |
|
pa->pa_backingmap = map; |
|
TAILQ_FOREACH(pp, &pa->pa_list, pr_alloc_list) { |
|
pool_reclaim_register(pp); |
|
} |
|
} |
|
|
|
/* |
/* |
* Initialize all the pools listed in the "pools" link set. |
* Initialize all the pools listed in the "pools" link set. |
*/ |
*/ |
void |
void |
pool_subsystem_init(void) |
pool_subsystem_init(void) |
{ |
{ |
struct pool_allocator *pa; |
int idx; |
|
size_t size; |
|
|
mutex_init(&pool_head_lock, MUTEX_DEFAULT, IPL_NONE); |
mutex_init(&pool_head_lock, MUTEX_DEFAULT, IPL_NONE); |
mutex_init(&pool_allocator_lock, MUTEX_DEFAULT, IPL_NONE); |
mutex_init(&pool_allocator_lock, MUTEX_DEFAULT, IPL_NONE); |
cv_init(&pool_busy, "poolbusy"); |
cv_init(&pool_busy, "poolbusy"); |
|
|
while ((pa = SLIST_FIRST(&pa_deferinitq)) != NULL) { |
/* |
KASSERT(pa->pa_backingmapptr != NULL); |
* Initialize private page header pool and cache magazine pool if we |
KASSERT(*pa->pa_backingmapptr != NULL); |
* haven't done so yet. |
SLIST_REMOVE_HEAD(&pa_deferinitq, pa_q); |
*/ |
pa_reclaim_register(pa); |
for (idx = 0; idx < PHPOOL_MAX; idx++) { |
|
static char phpool_names[PHPOOL_MAX][6+1+6+1]; |
|
int nelem; |
|
size_t sz; |
|
|
|
nelem = PHPOOL_FREELIST_NELEM(idx); |
|
snprintf(phpool_names[idx], sizeof(phpool_names[idx]), |
|
"phpool-%d", nelem); |
|
sz = sizeof(struct pool_item_header); |
|
if (nelem) { |
|
sz = offsetof(struct pool_item_header, |
|
ph_bitmap[howmany(nelem, BITMAP_SIZE)]); |
|
} |
|
pool_init(&phpool[idx], sz, 0, 0, 0, |
|
phpool_names[idx], &pool_allocator_meta, IPL_VM); |
} |
} |
|
#ifdef POOL_SUBPAGE |
|
pool_init(&psppool, POOL_SUBPAGE, POOL_SUBPAGE, 0, |
|
PR_RECURSIVE, "psppool", &pool_allocator_meta, IPL_VM); |
|
#endif |
|
|
|
size = sizeof(pcg_t) + |
|
(PCG_NOBJECTS_NORMAL - 1) * sizeof(pcgpair_t); |
|
pool_init(&pcg_normal_pool, size, coherency_unit, 0, 0, |
|
"pcgnormal", &pool_allocator_meta, IPL_VM); |
|
|
|
size = sizeof(pcg_t) + |
|
(PCG_NOBJECTS_LARGE - 1) * sizeof(pcgpair_t); |
|
pool_init(&pcg_large_pool, size, coherency_unit, 0, 0, |
|
"pcglarge", &pool_allocator_meta, IPL_VM); |
|
|
pool_init(&cache_pool, sizeof(struct pool_cache), coherency_unit, |
pool_init(&cache_pool, sizeof(struct pool_cache), coherency_unit, |
0, 0, "pcache", &pool_allocator_nointr, IPL_NONE); |
0, 0, "pcache", &pool_allocator_meta, IPL_NONE); |
|
|
pool_init(&cache_cpu_pool, sizeof(pool_cache_cpu_t), coherency_unit, |
pool_init(&cache_cpu_pool, sizeof(pool_cache_cpu_t), coherency_unit, |
0, 0, "pcachecpu", &pool_allocator_nointr, IPL_NONE); |
0, 0, "pcachecpu", &pool_allocator_meta, IPL_NONE); |
} |
} |
|
|
/* |
/* |
Line 683 pool_init(struct pool *pp, size_t size, |
|
Line 635 pool_init(struct pool *pp, size_t size, |
|
mutex_init(&palloc->pa_lock, MUTEX_DEFAULT, IPL_VM); |
mutex_init(&palloc->pa_lock, MUTEX_DEFAULT, IPL_VM); |
palloc->pa_pagemask = ~(palloc->pa_pagesz - 1); |
palloc->pa_pagemask = ~(palloc->pa_pagesz - 1); |
palloc->pa_pageshift = ffs(palloc->pa_pagesz) - 1; |
palloc->pa_pageshift = ffs(palloc->pa_pagesz) - 1; |
|
|
if (palloc->pa_backingmapptr != NULL) { |
|
pa_reclaim_register(palloc); |
|
} |
|
} |
} |
if (!cold) |
if (!cold) |
mutex_exit(&pool_allocator_lock); |
mutex_exit(&pool_allocator_lock); |
Line 821 pool_init(struct pool *pp, size_t size, |
|
Line 769 pool_init(struct pool *pp, size_t size, |
|
cv_init(&pp->pr_cv, wchan); |
cv_init(&pp->pr_cv, wchan); |
pp->pr_ipl = ipl; |
pp->pr_ipl = ipl; |
|
|
/* |
|
* Initialize private page header pool and cache magazine pool if we |
|
* haven't done so yet. |
|
* XXX LOCKING. |
|
*/ |
|
if (phpool[0].pr_size == 0) { |
|
int idx; |
|
for (idx = 0; idx < PHPOOL_MAX; idx++) { |
|
static char phpool_names[PHPOOL_MAX][6+1+6+1]; |
|
int nelem; |
|
size_t sz; |
|
|
|
nelem = PHPOOL_FREELIST_NELEM(idx); |
|
snprintf(phpool_names[idx], sizeof(phpool_names[idx]), |
|
"phpool-%d", nelem); |
|
sz = sizeof(struct pool_item_header); |
|
if (nelem) { |
|
sz = offsetof(struct pool_item_header, |
|
ph_bitmap[howmany(nelem, BITMAP_SIZE)]); |
|
} |
|
pool_init(&phpool[idx], sz, 0, 0, 0, |
|
phpool_names[idx], &pool_allocator_meta, IPL_VM); |
|
} |
|
#ifdef POOL_SUBPAGE |
|
pool_init(&psppool, POOL_SUBPAGE, POOL_SUBPAGE, 0, |
|
PR_RECURSIVE, "psppool", &pool_allocator_meta, IPL_VM); |
|
#endif |
|
|
|
size = sizeof(pcg_t) + |
|
(PCG_NOBJECTS_NORMAL - 1) * sizeof(pcgpair_t); |
|
pool_init(&pcg_normal_pool, size, coherency_unit, 0, 0, |
|
"pcgnormal", &pool_allocator_meta, IPL_VM); |
|
|
|
size = sizeof(pcg_t) + |
|
(PCG_NOBJECTS_LARGE - 1) * sizeof(pcgpair_t); |
|
pool_init(&pcg_large_pool, size, coherency_unit, 0, 0, |
|
"pcglarge", &pool_allocator_meta, IPL_VM); |
|
} |
|
|
|
/* Insert into the list of all pools. */ |
/* Insert into the list of all pools. */ |
if (!cold) |
if (!cold) |
mutex_enter(&pool_head_lock); |
mutex_enter(&pool_head_lock); |
Line 880 pool_init(struct pool *pp, size_t size, |
|
Line 789 pool_init(struct pool *pp, size_t size, |
|
TAILQ_INSERT_TAIL(&palloc->pa_list, pp, pr_alloc_list); |
TAILQ_INSERT_TAIL(&palloc->pa_list, pp, pr_alloc_list); |
if (!cold) |
if (!cold) |
mutex_exit(&palloc->pa_lock); |
mutex_exit(&palloc->pa_lock); |
|
|
pool_reclaim_register(pp); |
|
} |
} |
|
|
/* |
/* |
Line 903 pool_destroy(struct pool *pp) |
|
Line 810 pool_destroy(struct pool *pp) |
|
mutex_exit(&pool_head_lock); |
mutex_exit(&pool_head_lock); |
|
|
/* Remove this pool from its allocator's list of pools. */ |
/* Remove this pool from its allocator's list of pools. */ |
pool_reclaim_unregister(pp); |
|
mutex_enter(&pp->pr_alloc->pa_lock); |
mutex_enter(&pp->pr_alloc->pa_lock); |
TAILQ_REMOVE(&pp->pr_alloc->pa_list, pp, pr_alloc_list); |
TAILQ_REMOVE(&pp->pr_alloc->pa_list, pp, pr_alloc_list); |
mutex_exit(&pp->pr_alloc->pa_lock); |
mutex_exit(&pp->pr_alloc->pa_lock); |
Line 1669 pool_reclaim(struct pool *pp) |
|
Line 1575 pool_reclaim(struct pool *pp) |
|
break; |
break; |
|
|
KASSERT(ph->ph_nmissing == 0); |
KASSERT(ph->ph_nmissing == 0); |
if (curtime - ph->ph_time < pool_inactive_time |
if (curtime - ph->ph_time < pool_inactive_time) |
&& !pa_starved_p(pp->pr_alloc)) |
|
continue; |
continue; |
|
|
/* |
/* |
Line 2152 pool_cache_bootstrap(pool_cache_t pc, si |
|
Line 2057 pool_cache_bootstrap(pool_cache_t pc, si |
|
void |
void |
pool_cache_destroy(pool_cache_t pc) |
pool_cache_destroy(pool_cache_t pc) |
{ |
{ |
|
|
|
pool_cache_bootstrap_destroy(pc); |
|
pool_put(&cache_pool, pc); |
|
} |
|
|
|
/* |
|
* pool_cache_bootstrap_destroy: |
|
* |
|
* Destroy a pool cache. |
|
*/ |
|
void |
|
pool_cache_bootstrap_destroy(pool_cache_t pc) |
|
{ |
struct pool *pp = &pc->pc_pool; |
struct pool *pp = &pc->pc_pool; |
u_int i; |
u_int i; |
|
|
Line 2177 pool_cache_destroy(pool_cache_t pc) |
|
Line 2095 pool_cache_destroy(pool_cache_t pc) |
|
/* Finally, destroy it. */ |
/* Finally, destroy it. */ |
mutex_destroy(&pc->pc_lock); |
mutex_destroy(&pc->pc_lock); |
pool_destroy(pp); |
pool_destroy(pp); |
pool_put(&cache_pool, pc); |
|
} |
} |
|
|
/* |
/* |
Line 2534 pool_cache_get_paddr(pool_cache_t pc, in |
|
Line 2451 pool_cache_get_paddr(pool_cache_t pc, in |
|
|
|
KASSERTMSG((!cpu_intr_p() && !cpu_softintr_p()) || |
KASSERTMSG((!cpu_intr_p() && !cpu_softintr_p()) || |
(pc->pc_pool.pr_ipl != IPL_NONE || cold || panicstr != NULL), |
(pc->pc_pool.pr_ipl != IPL_NONE || cold || panicstr != NULL), |
("pool '%s' is IPL_NONE, but called from interrupt context\n", |
"pool '%s' is IPL_NONE, but called from interrupt context\n", |
pc->pc_pool.pr_wchan)); |
pc->pc_pool.pr_wchan); |
|
|
if (flags & PR_WAITOK) { |
if (flags & PR_WAITOK) { |
ASSERT_SLEEPABLE(); |
ASSERT_SLEEPABLE(); |
Line 2801 void pool_page_free(struct pool *, void |
|
Line 2718 void pool_page_free(struct pool *, void |
|
|
|
#ifdef POOL_SUBPAGE |
#ifdef POOL_SUBPAGE |
struct pool_allocator pool_allocator_kmem_fullpage = { |
struct pool_allocator pool_allocator_kmem_fullpage = { |
pool_page_alloc, pool_page_free, 0, |
pool_page_alloc, pool_page_free, 0 |
.pa_backingmapptr = &kmem_map, |
|
}; |
}; |
#else |
#else |
struct pool_allocator pool_allocator_kmem = { |
struct pool_allocator pool_allocator_kmem = { |
pool_page_alloc, pool_page_free, 0, |
.pa_alloc = pool_page_alloc, |
.pa_backingmapptr = &kmem_map, |
.pa_free = pool_page_free, |
|
.pa_pagesz = 0 |
}; |
}; |
#endif |
#endif |
|
|
Line 2817 void pool_page_free_nointr(struct pool * |
|
Line 2734 void pool_page_free_nointr(struct pool * |
|
#ifdef POOL_SUBPAGE |
#ifdef POOL_SUBPAGE |
struct pool_allocator pool_allocator_nointr_fullpage = { |
struct pool_allocator pool_allocator_nointr_fullpage = { |
pool_page_alloc_nointr, pool_page_free_nointr, 0, |
pool_page_alloc_nointr, pool_page_free_nointr, 0, |
.pa_backingmapptr = &kernel_map, |
|
}; |
}; |
#else |
#else |
struct pool_allocator pool_allocator_nointr = { |
struct pool_allocator pool_allocator_nointr = { |
pool_page_alloc_nointr, pool_page_free_nointr, 0, |
.pa_alloc = pool_page_alloc, |
.pa_backingmapptr = &kernel_map, |
.pa_free = pool_page_free, |
|
.pa_pagesz = 0 |
}; |
}; |
#endif |
#endif |
|
|
Line 2832 void pool_subpage_free(struct pool *, vo |
|
Line 2749 void pool_subpage_free(struct pool *, vo |
|
|
|
struct pool_allocator pool_allocator_kmem = { |
struct pool_allocator pool_allocator_kmem = { |
pool_subpage_alloc, pool_subpage_free, POOL_SUBPAGE, |
pool_subpage_alloc, pool_subpage_free, POOL_SUBPAGE, |
.pa_backingmapptr = &kmem_map, |
|
}; |
}; |
|
|
void *pool_subpage_alloc_nointr(struct pool *, int); |
void *pool_subpage_alloc_nointr(struct pool *, int); |
Line 2840 void pool_subpage_free_nointr(struct poo |
|
Line 2756 void pool_subpage_free_nointr(struct poo |
|
|
|
struct pool_allocator pool_allocator_nointr = { |
struct pool_allocator pool_allocator_nointr = { |
pool_subpage_alloc, pool_subpage_free, POOL_SUBPAGE, |
pool_subpage_alloc, pool_subpage_free, POOL_SUBPAGE, |
.pa_backingmapptr = &kmem_map, |
|
}; |
}; |
#endif /* POOL_SUBPAGE */ |
#endif /* POOL_SUBPAGE */ |
|
|
|
|
pool_page_alloc(struct pool *pp, int flags) |
pool_page_alloc(struct pool *pp, int flags) |
{ |
{ |
bool waitok = (flags & PR_WAITOK) ? true : false; |
bool waitok = (flags & PR_WAITOK) ? true : false; |
|
int rc; |
|
vmem_addr_t va; |
|
|
|
rc = uvm_km_kmem_alloc(kmem_va_arena, |
|
pp->pr_alloc->pa_pagesz, |
|
((waitok ? VM_SLEEP : VM_NOSLEEP) | VM_INSTANTFIT), &va); |
|
|
return ((void *) uvm_km_alloc_poolpage_cache(kmem_map, waitok)); |
if (rc != 0) |
|
return NULL; |
|
else |
|
return (void *)va; |
} |
} |
|
|
void |
void |
pool_page_free(struct pool *pp, void *v) |
pool_page_free(struct pool *pp, void *v) |
{ |
{ |
|
|
uvm_km_free_poolpage_cache(kmem_map, (vaddr_t) v); |
uvm_km_kmem_free(kmem_va_arena, (vaddr_t)v, pp->pr_alloc->pa_pagesz); |
} |
} |
|
|
static void * |
static void * |
pool_page_alloc_meta(struct pool *pp, int flags) |
pool_page_alloc_meta(struct pool *pp, int flags) |
{ |
{ |
bool waitok = (flags & PR_WAITOK) ? true : false; |
bool waitok = (flags & PR_WAITOK) ? true : false; |
|
int rc; |
|
vmem_addr_t addr; |
|
|
|
rc = vmem_alloc(kmem_meta_arena, pp->pr_alloc->pa_pagesz, |
|
(waitok ? VM_SLEEP : VM_NOSLEEP) | VM_INSTANTFIT, &addr); |
|
|
return ((void *) uvm_km_alloc_poolpage(kmem_map, waitok)); |
if (rc != 0) |
|
return 0; |
|
else |
|
return (void *)addr; |
} |
} |
|
|
static void |
static void |
pool_page_free_meta(struct pool *pp, void *v) |
pool_page_free_meta(struct pool *pp, void *v) |
{ |
{ |
|
|
uvm_km_free_poolpage(kmem_map, (vaddr_t) v); |
vmem_free(kmem_meta_arena, (vmem_addr_t)v, |
|
pp->pr_alloc->pa_pagesz); |
} |
} |
|
|
#ifdef POOL_SUBPAGE |
#ifdef POOL_SUBPAGE |
Line 2932 pool_subpage_free_nointr(struct pool *pp |
|
Line 2865 pool_subpage_free_nointr(struct pool *pp |
|
pool_subpage_free(pp, v); |
pool_subpage_free(pp, v); |
} |
} |
#endif /* POOL_SUBPAGE */ |
#endif /* POOL_SUBPAGE */ |
void * |
|
pool_page_alloc_nointr(struct pool *pp, int flags) |
|
{ |
|
bool waitok = (flags & PR_WAITOK) ? true : false; |
|
|
|
return ((void *) uvm_km_alloc_poolpage_cache(kernel_map, waitok)); |
|
} |
|
|
|
void |
|
pool_page_free_nointr(struct pool *pp, void *v) |
|
{ |
|
|
|
uvm_km_free_poolpage_cache(kernel_map, (vaddr_t) v); |
|
} |
|
|
|
#if defined(DDB) |
#if defined(DDB) |
static bool |
static bool |