Please note that diffs are not public domain; they are subject to the copyright notices on the relevant files. =================================================================== RCS file: /ftp/cvs/cvsroot/src/sys/kern/subr_pool.c,v rcsdiff: /ftp/cvs/cvsroot/src/sys/kern/subr_pool.c,v: warning: Unknown phrases like `commitid ...;' are present. retrieving revision 1.182.2.1 retrieving revision 1.194 diff -u -p -r1.182.2.1 -r1.194 --- src/sys/kern/subr_pool.c 2010/04/30 14:44:12 1.182.2.1 +++ src/sys/kern/subr_pool.c 2012/02/04 22:11:42 1.194 @@ -1,4 +1,4 @@ -/* $NetBSD: subr_pool.c,v 1.182.2.1 2010/04/30 14:44:12 uebayasi Exp $ */ +/* $NetBSD: subr_pool.c,v 1.194 2012/02/04 22:11:42 para Exp $ */ /*- * Copyright (c) 1997, 1999, 2000, 2002, 2007, 2008, 2010 @@ -32,7 +32,7 @@ */ #include -__KERNEL_RCSID(0, "$NetBSD: subr_pool.c,v 1.182.2.1 2010/04/30 14:44:12 uebayasi Exp $"); +__KERNEL_RCSID(0, "$NetBSD: subr_pool.c,v 1.194 2012/02/04 22:11:42 para Exp $"); #include "opt_ddb.h" #include "opt_pool.h" @@ -46,6 +46,7 @@ __KERNEL_RCSID(0, "$NetBSD: subr_pool.c, #include #include #include +#include #include #include #include @@ -54,7 +55,7 @@ __KERNEL_RCSID(0, "$NetBSD: subr_pool.c, #include #include -#include +#include /* * Pool resource management utility. @@ -83,16 +84,14 @@ static struct pool phpool[PHPOOL_MAX]; static struct pool psppool; #endif -static SLIST_HEAD(, pool_allocator) pa_deferinitq = - SLIST_HEAD_INITIALIZER(pa_deferinitq); - static void *pool_page_alloc_meta(struct pool *, int); static void pool_page_free_meta(struct pool *, void *); /* allocator for pool metadata */ struct pool_allocator pool_allocator_meta = { - pool_page_alloc_meta, pool_page_free_meta, - .pa_backingmapptr = &kmem_map, + .pa_alloc = pool_page_alloc_meta, + .pa_free = pool_page_free_meta, + .pa_pagesz = 0 }; /* # of seconds to retain page after last use */ @@ -179,6 +178,8 @@ static struct pool pcg_large_pool; static struct pool cache_pool; static struct pool cache_cpu_pool; +pool_cache_t pnbuf_cache; /* pathname buffer cache */ + /* List of all caches. */ TAILQ_HEAD(,pool_cache) pool_cache_head = TAILQ_HEAD_INITIALIZER(pool_cache_head); @@ -524,103 +525,59 @@ pr_rmpage(struct pool *pp, struct pool_i pool_update_curpage(pp); } -static bool -pa_starved_p(struct pool_allocator *pa) -{ - - if (pa->pa_backingmap != NULL) { - return vm_map_starved_p(pa->pa_backingmap); - } - return false; -} - -static int -pool_reclaim_callback(struct callback_entry *ce, void *obj, void *arg) -{ - struct pool *pp = obj; - struct pool_allocator *pa = pp->pr_alloc; - - KASSERT(&pp->pr_reclaimerentry == ce); - pool_reclaim(pp); - if (!pa_starved_p(pa)) { - return CALLBACK_CHAIN_ABORT; - } - return CALLBACK_CHAIN_CONTINUE; -} - -static void -pool_reclaim_register(struct pool *pp) -{ - struct vm_map *map = pp->pr_alloc->pa_backingmap; - int s; - - if (map == NULL) { - return; - } - - s = splvm(); /* not necessary for INTRSAFE maps, but don't care. */ - callback_register(&vm_map_to_kernel(map)->vmk_reclaim_callback, - &pp->pr_reclaimerentry, pp, pool_reclaim_callback); - splx(s); -} - -static void -pool_reclaim_unregister(struct pool *pp) -{ - struct vm_map *map = pp->pr_alloc->pa_backingmap; - int s; - - if (map == NULL) { - return; - } - - s = splvm(); /* not necessary for INTRSAFE maps, but don't care. */ - callback_unregister(&vm_map_to_kernel(map)->vmk_reclaim_callback, - &pp->pr_reclaimerentry); - splx(s); -} - -static void -pa_reclaim_register(struct pool_allocator *pa) -{ - struct vm_map *map = *pa->pa_backingmapptr; - struct pool *pp; - - KASSERT(pa->pa_backingmap == NULL); - if (map == NULL) { - SLIST_INSERT_HEAD(&pa_deferinitq, pa, pa_q); - return; - } - pa->pa_backingmap = map; - TAILQ_FOREACH(pp, &pa->pa_list, pr_alloc_list) { - pool_reclaim_register(pp); - } -} - /* * Initialize all the pools listed in the "pools" link set. */ void pool_subsystem_init(void) { - struct pool_allocator *pa; + size_t size; + int idx; mutex_init(&pool_head_lock, MUTEX_DEFAULT, IPL_NONE); mutex_init(&pool_allocator_lock, MUTEX_DEFAULT, IPL_NONE); cv_init(&pool_busy, "poolbusy"); - while ((pa = SLIST_FIRST(&pa_deferinitq)) != NULL) { - KASSERT(pa->pa_backingmapptr != NULL); - KASSERT(*pa->pa_backingmapptr != NULL); - SLIST_REMOVE_HEAD(&pa_deferinitq, pa_q); - pa_reclaim_register(pa); + /* + * Initialize private page header pool and cache magazine pool if we + * haven't done so yet. + */ + for (idx = 0; idx < PHPOOL_MAX; idx++) { + static char phpool_names[PHPOOL_MAX][6+1+6+1]; + int nelem; + size_t sz; + + nelem = PHPOOL_FREELIST_NELEM(idx); + snprintf(phpool_names[idx], sizeof(phpool_names[idx]), + "phpool-%d", nelem); + sz = sizeof(struct pool_item_header); + if (nelem) { + sz = offsetof(struct pool_item_header, + ph_bitmap[howmany(nelem, BITMAP_SIZE)]); + } + pool_init(&phpool[idx], sz, 0, 0, 0, + phpool_names[idx], &pool_allocator_meta, IPL_VM); } +#ifdef POOL_SUBPAGE + pool_init(&psppool, POOL_SUBPAGE, POOL_SUBPAGE, 0, + PR_RECURSIVE, "psppool", &pool_allocator_meta, IPL_VM); +#endif + + size = sizeof(pcg_t) + + (PCG_NOBJECTS_NORMAL - 1) * sizeof(pcgpair_t); + pool_init(&pcg_normal_pool, size, coherency_unit, 0, 0, + "pcgnormal", &pool_allocator_meta, IPL_VM); + + size = sizeof(pcg_t) + + (PCG_NOBJECTS_LARGE - 1) * sizeof(pcgpair_t); + pool_init(&pcg_large_pool, size, coherency_unit, 0, 0, + "pcglarge", &pool_allocator_meta, IPL_VM); pool_init(&cache_pool, sizeof(struct pool_cache), coherency_unit, - 0, 0, "pcache", &pool_allocator_nointr, IPL_NONE); + 0, 0, "pcache", &pool_allocator_meta, IPL_NONE); pool_init(&cache_cpu_pool, sizeof(pool_cache_cpu_t), coherency_unit, - 0, 0, "pcachecpu", &pool_allocator_nointr, IPL_NONE); + 0, 0, "pcachecpu", &pool_allocator_meta, IPL_NONE); } /* @@ -678,10 +635,6 @@ pool_init(struct pool *pp, size_t size, mutex_init(&palloc->pa_lock, MUTEX_DEFAULT, IPL_VM); palloc->pa_pagemask = ~(palloc->pa_pagesz - 1); palloc->pa_pageshift = ffs(palloc->pa_pagesz) - 1; - - if (palloc->pa_backingmapptr != NULL) { - pa_reclaim_register(palloc); - } } if (!cold) mutex_exit(&pool_allocator_lock); @@ -816,45 +769,6 @@ pool_init(struct pool *pp, size_t size, cv_init(&pp->pr_cv, wchan); pp->pr_ipl = ipl; - /* - * Initialize private page header pool and cache magazine pool if we - * haven't done so yet. - * XXX LOCKING. - */ - if (phpool[0].pr_size == 0) { - int idx; - for (idx = 0; idx < PHPOOL_MAX; idx++) { - static char phpool_names[PHPOOL_MAX][6+1+6+1]; - int nelem; - size_t sz; - - nelem = PHPOOL_FREELIST_NELEM(idx); - snprintf(phpool_names[idx], sizeof(phpool_names[idx]), - "phpool-%d", nelem); - sz = sizeof(struct pool_item_header); - if (nelem) { - sz = offsetof(struct pool_item_header, - ph_bitmap[howmany(nelem, BITMAP_SIZE)]); - } - pool_init(&phpool[idx], sz, 0, 0, 0, - phpool_names[idx], &pool_allocator_meta, IPL_VM); - } -#ifdef POOL_SUBPAGE - pool_init(&psppool, POOL_SUBPAGE, POOL_SUBPAGE, 0, - PR_RECURSIVE, "psppool", &pool_allocator_meta, IPL_VM); -#endif - - size = sizeof(pcg_t) + - (PCG_NOBJECTS_NORMAL - 1) * sizeof(pcgpair_t); - pool_init(&pcg_normal_pool, size, coherency_unit, 0, 0, - "pcgnormal", &pool_allocator_meta, IPL_VM); - - size = sizeof(pcg_t) + - (PCG_NOBJECTS_LARGE - 1) * sizeof(pcgpair_t); - pool_init(&pcg_large_pool, size, coherency_unit, 0, 0, - "pcglarge", &pool_allocator_meta, IPL_VM); - } - /* Insert into the list of all pools. */ if (!cold) mutex_enter(&pool_head_lock); @@ -875,8 +789,6 @@ pool_init(struct pool *pp, size_t size, TAILQ_INSERT_TAIL(&palloc->pa_list, pp, pr_alloc_list); if (!cold) mutex_exit(&palloc->pa_lock); - - pool_reclaim_register(pp); } /* @@ -898,7 +810,6 @@ pool_destroy(struct pool *pp) mutex_exit(&pool_head_lock); /* Remove this pool from its allocator's list of pools. */ - pool_reclaim_unregister(pp); mutex_enter(&pp->pr_alloc->pa_lock); TAILQ_REMOVE(&pp->pr_alloc->pa_list, pp, pr_alloc_list); mutex_exit(&pp->pr_alloc->pa_lock); @@ -984,19 +895,17 @@ pool_get(struct pool *pp, int flags) void *v; #ifdef DIAGNOSTIC - if (__predict_false(pp->pr_itemsperpage == 0)) - panic("pool_get: pool %p: pr_itemsperpage is zero, " - "pool not initialized?", pp); - if (__predict_false(curlwp == NULL && doing_shutdown == 0 && - (flags & PR_WAITOK) != 0)) - panic("pool_get: %s: must have NOWAIT", pp->pr_wchan); - -#endif /* DIAGNOSTIC */ -#ifdef LOCKDEBUG + if (pp->pr_itemsperpage == 0) + panic("pool_get: pool '%s': pr_itemsperpage is zero, " + "pool not initialized?", pp->pr_wchan); + if ((cpu_intr_p() || cpu_softintr_p()) && pp->pr_ipl == IPL_NONE && + !cold && panicstr == NULL) + panic("pool '%s' is IPL_NONE, but called from " + "interrupt context\n", pp->pr_wchan); +#endif if (flags & PR_WAITOK) { ASSERT_SLEEPABLE(); } -#endif mutex_enter(&pp->pr_lock); pr_enter(pp, file, line); @@ -1604,6 +1513,8 @@ pool_sethardlimit(struct pool *pp, int n /* * Release all complete pages that have not been used recently. + * + * Might be called from interrupt context. */ int #ifdef POOL_DIAGNOSTIC @@ -1618,6 +1529,10 @@ pool_reclaim(struct pool *pp) bool klock; int rv; + if (cpu_intr_p() || cpu_softintr_p()) { + KASSERT(pp->pr_ipl != IPL_NONE); + } + if (pp->pr_drain_hook != NULL) { /* * The drain hook must be called with the pool unlocked. @@ -1660,8 +1575,7 @@ pool_reclaim(struct pool *pp) break; KASSERT(ph->ph_nmissing == 0); - if (curtime - ph->ph_time < pool_inactive_time - && !pa_starved_p(pp->pr_alloc)) + if (curtime - ph->ph_time < pool_inactive_time) continue; /* @@ -1736,12 +1650,13 @@ pool_drain_start(struct pool **ppp, uint } } -void +bool pool_drain_end(struct pool *pp, uint64_t where) { + bool reclaimed; if (pp == NULL) - return; + return false; KASSERT(pp->pr_refcnt > 0); @@ -1750,13 +1665,15 @@ pool_drain_end(struct pool *pp, uint64_t xc_wait(where); /* Drain the cache (if any) and pool.. */ - pool_reclaim(pp); + reclaimed = pool_reclaim(pp); /* Finally, unlock the pool. */ mutex_enter(&pool_head_lock); pp->pr_refcnt--; cv_broadcast(&pool_busy); mutex_exit(&pool_head_lock); + + return reclaimed; } /* @@ -2140,6 +2057,19 @@ pool_cache_bootstrap(pool_cache_t pc, si void pool_cache_destroy(pool_cache_t pc) { + + pool_cache_bootstrap_destroy(pc); + pool_put(&cache_pool, pc); +} + +/* + * pool_cache_bootstrap_destroy: + * + * Destroy a pool cache. + */ +void +pool_cache_bootstrap_destroy(pool_cache_t pc) +{ struct pool *pp = &pc->pc_pool; u_int i; @@ -2165,7 +2095,6 @@ pool_cache_destroy(pool_cache_t pc) /* Finally, destroy it. */ mutex_destroy(&pc->pc_lock); pool_destroy(pp); - pool_put(&cache_pool, pc); } /* @@ -2520,11 +2449,14 @@ pool_cache_get_paddr(pool_cache_t pc, in void *object; int s; -#ifdef LOCKDEBUG + KASSERTMSG((!cpu_intr_p() && !cpu_softintr_p()) || + (pc->pc_pool.pr_ipl != IPL_NONE || cold || panicstr != NULL), + "pool '%s' is IPL_NONE, but called from interrupt context\n", + pc->pc_pool.pr_wchan); + if (flags & PR_WAITOK) { ASSERT_SLEEPABLE(); } -#endif /* Lock out interrupts and disable preemption. */ s = splvm(); @@ -2786,28 +2718,29 @@ void pool_page_free(struct pool *, void #ifdef POOL_SUBPAGE struct pool_allocator pool_allocator_kmem_fullpage = { - pool_page_alloc, pool_page_free, 0, - .pa_backingmapptr = &kmem_map, + .pa_alloc = pool_page_alloc, + .pa_free = pool_page_free, + .pa_pagesz = 0 }; #else struct pool_allocator pool_allocator_kmem = { - pool_page_alloc, pool_page_free, 0, - .pa_backingmapptr = &kmem_map, + .pa_alloc = pool_page_alloc, + .pa_free = pool_page_free, + .pa_pagesz = 0 }; #endif -void *pool_page_alloc_nointr(struct pool *, int); -void pool_page_free_nointr(struct pool *, void *); - #ifdef POOL_SUBPAGE struct pool_allocator pool_allocator_nointr_fullpage = { - pool_page_alloc_nointr, pool_page_free_nointr, 0, - .pa_backingmapptr = &kernel_map, + .pa_alloc = pool_page_alloc, + .pa_free = pool_page_free, + .pa_pagesz = 0 }; #else struct pool_allocator pool_allocator_nointr = { - pool_page_alloc_nointr, pool_page_free_nointr, 0, - .pa_backingmapptr = &kernel_map, + .pa_alloc = pool_page_alloc, + .pa_free = pool_page_free, + .pa_pagesz = 0 }; #endif @@ -2816,16 +2749,15 @@ void *pool_subpage_alloc(struct pool *, void pool_subpage_free(struct pool *, void *); struct pool_allocator pool_allocator_kmem = { - pool_subpage_alloc, pool_subpage_free, POOL_SUBPAGE, - .pa_backingmapptr = &kmem_map, + .pa_alloc = pool_subpage_alloc, + .pa_free = pool_subpage_free, + .pa_pagesz = POOL_SUBPAGE }; -void *pool_subpage_alloc_nointr(struct pool *, int); -void pool_subpage_free_nointr(struct pool *, void *); - struct pool_allocator pool_allocator_nointr = { - pool_subpage_alloc, pool_subpage_free, POOL_SUBPAGE, - .pa_backingmapptr = &kmem_map, + .pa_alloc = pool_subpage_alloc, + .pa_free = pool_subpage_free, + .pa_pagesz = POOL_SUBPAGE }; #endif /* POOL_SUBPAGE */ @@ -2861,31 +2793,41 @@ pool_allocator_free(struct pool *pp, voi void * pool_page_alloc(struct pool *pp, int flags) { - bool waitok = (flags & PR_WAITOK) ? true : false; + const vm_flag_t vflags = (flags & PR_WAITOK) ? VM_SLEEP: VM_NOSLEEP; + vmem_addr_t va; + int ret; + + ret = uvm_km_kmem_alloc(kmem_va_arena, pp->pr_alloc->pa_pagesz, + vflags | VM_INSTANTFIT, &va); - return ((void *) uvm_km_alloc_poolpage_cache(kmem_map, waitok)); + return ret ? NULL : (void *)va; } void pool_page_free(struct pool *pp, void *v) { - uvm_km_free_poolpage_cache(kmem_map, (vaddr_t) v); + uvm_km_kmem_free(kmem_va_arena, (vaddr_t)v, pp->pr_alloc->pa_pagesz); } static void * pool_page_alloc_meta(struct pool *pp, int flags) { - bool waitok = (flags & PR_WAITOK) ? true : false; + const vm_flag_t vflags = (flags & PR_WAITOK) ? VM_SLEEP: VM_NOSLEEP; + vmem_addr_t va; + int ret; - return ((void *) uvm_km_alloc_poolpage(kmem_map, waitok)); + ret = vmem_alloc(kmem_meta_arena, pp->pr_alloc->pa_pagesz, + vflags | VM_INSTANTFIT, &va); + + return ret ? NULL : (void *)va; } static void pool_page_free_meta(struct pool *pp, void *v) { - uvm_km_free_poolpage(kmem_map, (vaddr_t) v); + vmem_free(kmem_meta_arena, (vmem_addr_t)v, pp->pr_alloc->pa_pagesz); } #ifdef POOL_SUBPAGE @@ -2902,35 +2844,7 @@ pool_subpage_free(struct pool *pp, void pool_put(&psppool, v); } -/* We don't provide a real nointr allocator. Maybe later. */ -void * -pool_subpage_alloc_nointr(struct pool *pp, int flags) -{ - - return (pool_subpage_alloc(pp, flags)); -} - -void -pool_subpage_free_nointr(struct pool *pp, void *v) -{ - - pool_subpage_free(pp, v); -} #endif /* POOL_SUBPAGE */ -void * -pool_page_alloc_nointr(struct pool *pp, int flags) -{ - bool waitok = (flags & PR_WAITOK) ? true : false; - - return ((void *) uvm_km_alloc_poolpage_cache(kernel_map, waitok)); -} - -void -pool_page_free_nointr(struct pool *pp, void *v) -{ - - uvm_km_free_poolpage_cache(kernel_map, (vaddr_t) v); -} #if defined(DDB) static bool