Please note that diffs are not public domain; they are subject to the copyright notices on the relevant files. =================================================================== RCS file: /ftp/cvs/cvsroot/src/sys/kern/subr_pool.c,v rcsdiff: /ftp/cvs/cvsroot/src/sys/kern/subr_pool.c,v: warning: Unknown phrases like `commitid ...;' are present. retrieving revision 1.135 retrieving revision 1.138.2.3 diff -u -p -r1.135 -r1.138.2.3 --- src/sys/kern/subr_pool.c 2007/11/10 07:29:28 1.135 +++ src/sys/kern/subr_pool.c 2007/12/13 05:06:01 1.138.2.3 @@ -1,4 +1,4 @@ -/* $NetBSD: subr_pool.c,v 1.135 2007/11/10 07:29:28 yamt Exp $ */ +/* $NetBSD: subr_pool.c,v 1.138.2.3 2007/12/13 05:06:01 yamt Exp $ */ /*- * Copyright (c) 1997, 1999, 2000, 2002, 2007 The NetBSD Foundation, Inc. @@ -38,8 +38,9 @@ */ #include -__KERNEL_RCSID(0, "$NetBSD: subr_pool.c,v 1.135 2007/11/10 07:29:28 yamt Exp $"); +__KERNEL_RCSID(0, "$NetBSD: subr_pool.c,v 1.138.2.3 2007/12/13 05:06:01 yamt Exp $"); +#include "opt_ddb.h" #include "opt_pool.h" #include "opt_poollog.h" #include "opt_lockdebug.h" @@ -101,7 +102,7 @@ static void pool_page_free_meta(struct p /* allocator for pool metadata */ struct pool_allocator pool_allocator_meta = { pool_page_alloc_meta, pool_page_free_meta, - .pa_backingmapptr = &kmem_map, + .pa_backingmapptr = &kernel_map, }; /* # of seconds to retain page after last use */ @@ -127,6 +128,7 @@ struct pool_item_header { void * ph_page; /* this page's address */ struct timeval ph_time; /* last referenced */ uint16_t ph_nmissing; /* # of chunks in use */ + uint16_t ph_off; /* start offset in page */ union { /* !PR_NOTOUCH */ struct { @@ -135,13 +137,11 @@ struct pool_item_header { } phu_normal; /* PR_NOTOUCH */ struct { - uint16_t phu_off; /* start offset in page */ - pool_item_bitmap_t phu_bitmap[]; + pool_item_bitmap_t phu_bitmap[1]; } phu_notouch; } ph_u; }; #define ph_itemlist ph_u.phu_normal.phu_itemlist -#define ph_off ph_u.phu_notouch.phu_off #define ph_bitmap ph_u.phu_notouch.phu_bitmap struct pool_item { @@ -412,6 +412,24 @@ phtree_compare(struct pool_item_header * SPLAY_PROTOTYPE(phtree, pool_item_header, ph_node, phtree_compare); SPLAY_GENERATE(phtree, pool_item_header, ph_node, phtree_compare); +static inline struct pool_item_header * +pr_find_pagehead_noalign(struct pool *pp, void *v) +{ + struct pool_item_header *ph, tmp; + + tmp.ph_page = (void *)(uintptr_t)v; + ph = SPLAY_FIND(phtree, &pp->pr_phtree, &tmp); + if (ph == NULL) { + ph = SPLAY_ROOT(&pp->pr_phtree); + if (ph != NULL && phtree_compare(&tmp, ph) >= 0) { + ph = SPLAY_NEXT(phtree, &pp->pr_phtree, ph); + } + KASSERT(ph == NULL || phtree_compare(&tmp, ph) < 0); + } + + return ph; +} + /* * Return the pool page header based on item address. */ @@ -421,15 +439,7 @@ pr_find_pagehead(struct pool *pp, void * struct pool_item_header *ph, tmp; if ((pp->pr_roflags & PR_NOALIGN) != 0) { - tmp.ph_page = (void *)(uintptr_t)v; - ph = SPLAY_FIND(phtree, &pp->pr_phtree, &tmp); - if (ph == NULL) { - ph = SPLAY_ROOT(&pp->pr_phtree); - if (ph != NULL && phtree_compare(&tmp, ph) >= 0) { - ph = SPLAY_NEXT(phtree, &pp->pr_phtree, ph); - } - KASSERT(ph == NULL || phtree_compare(&tmp, ph) < 0); - } + ph = pr_find_pagehead_noalign(pp, v); } else { void *page = (void *)((uintptr_t)v & pp->pr_alloc->pa_pagemask); @@ -790,7 +800,7 @@ pool_init(struct pool *pp, size_t size, #ifdef POOL_DIAGNOSTIC if (flags & PR_LOGGING) { - if (kmem_map == NULL || + if (kernel_map == NULL || (pp->pr_log = malloc(pool_logsize * sizeof(struct pool_log), M_TEMP, M_NOWAIT)) == NULL) pp->pr_roflags &= ~PR_LOGGING; @@ -802,7 +812,14 @@ pool_init(struct pool *pp, size_t size, pp->pr_entered_file = NULL; pp->pr_entered_line = 0; - mutex_init(&pp->pr_lock, MUTEX_DEFAULT, ipl); + /* + * XXXAD hack to prevent IP input processing from blocking. + */ + if (ipl == IPL_SOFTNET) { + mutex_init(&pp->pr_lock, MUTEX_DEFAULT, IPL_VM); + } else { + mutex_init(&pp->pr_lock, MUTEX_DEFAULT, ipl); + } cv_init(&pp->pr_cv, wchan); pp->pr_ipl = ipl; @@ -1429,7 +1446,8 @@ pool_prime_page(struct pool *pp, void *s /* * Color this page. */ - cp = (char *)cp + pp->pr_curcolor; + ph->ph_off = pp->pr_curcolor; + cp = (char *)cp + ph->ph_off; if ((pp->pr_curcolor += align) > pp->pr_maxcolor) pp->pr_curcolor = 0; @@ -2040,7 +2058,14 @@ pool_cache_bootstrap(pool_cache_t pc, si palloc = &pool_allocator_nointr; pool_init(pp, size, align, align_offset, flags, wchan, palloc, ipl); - mutex_init(&pc->pc_lock, MUTEX_DEFAULT, pp->pr_ipl); + /* + * XXXAD hack to prevent IP input processing from blocking. + */ + if (ipl == IPL_SOFTNET) { + mutex_init(&pc->pc_lock, MUTEX_DEFAULT, IPL_VM); + } else { + mutex_init(&pc->pc_lock, MUTEX_DEFAULT, ipl); + } if (ctor == NULL) { ctor = (int (*)(void *, void *, int))nullop; @@ -2062,12 +2087,18 @@ pool_cache_bootstrap(pool_cache_t pc, si pc->pc_nfull = 0; pc->pc_contended = 0; pc->pc_refcnt = 0; + pc->pc_freecheck = NULL; /* Allocate per-CPU caches. */ memset(pc->pc_cpus, 0, sizeof(pc->pc_cpus)); pc->pc_ncpu = 0; - for (CPU_INFO_FOREACH(cii, ci)) { - pool_cache_cpu_init1(ci, pc); + if (ncpu < 2) { + /* XXX For sparc: boot CPU is not attached yet. */ + pool_cache_cpu_init1(curcpu(), pc); + } else { + for (CPU_INFO_FOREACH(cii, ci)) { + pool_cache_cpu_init1(ci, pc); + } } if (__predict_true(!cold)) { @@ -2091,6 +2122,20 @@ pool_cache_bootstrap(pool_cache_t pc, si void pool_cache_destroy(pool_cache_t pc) { + + pool_cache_bootstrap_destroy(pc); + pool_put(&cache_pool, pc); +} + +/* + * pool_cache_bootstrap_destroy: + * + * Kernel-private version of pool_cache_destroy(). + * Destroy a pool cache initialized by pool_cache_bootstrap. + */ +void +pool_cache_bootstrap_destroy(pool_cache_t pc) +{ struct pool *pp = &pc->pc_pool; pool_cache_cpu_t *cc; pcg_t *pcg; @@ -2130,7 +2175,6 @@ pool_cache_destroy(pool_cache_t pc) /* Finally, destroy it. */ mutex_destroy(&pc->pc_lock); pool_destroy(pp); - pool_put(&cache_pool, pc); } /* @@ -2142,11 +2186,15 @@ static void pool_cache_cpu_init1(struct cpu_info *ci, pool_cache_t pc) { pool_cache_cpu_t *cc; + int index; + + index = ci->ci_index; + KASSERT(index < MAXCPUS); KASSERT(((uintptr_t)pc->pc_cpus & (CACHE_LINE_SIZE - 1)) == 0); - if ((cc = pc->pc_cpus[ci->ci_index]) != NULL) { - KASSERT(cc->cc_cpu = ci); + if ((cc = pc->pc_cpus[index]) != NULL) { + KASSERT(cc->cc_cpuindex == index); return; } @@ -2167,13 +2215,13 @@ pool_cache_cpu_init1(struct cpu_info *ci cc->cc_ipl = pc->pc_pool.pr_ipl; cc->cc_iplcookie = makeiplcookie(cc->cc_ipl); cc->cc_cache = pc; - cc->cc_cpu = ci; + cc->cc_cpuindex = index; cc->cc_hits = 0; cc->cc_misses = 0; cc->cc_current = NULL; cc->cc_previous = NULL; - pc->pc_cpus[ci->ci_index] = cc; + pc->pc_cpus[index] = cc; } /* @@ -2212,6 +2260,14 @@ pool_cache_reclaim(pool_cache_t pc) return pool_reclaim(&pc->pc_pool); } +static void +pool_cache_destruct_object1(pool_cache_t pc, void *object) +{ + + (*pc->pc_dtor)(pc->pc_arg, object); + pool_put(&pc->pc_pool, object); +} + /* * pool_cache_destruct_object: * @@ -2222,8 +2278,9 @@ void pool_cache_destruct_object(pool_cache_t pc, void *object) { - (*pc->pc_dtor)(pc->pc_arg, object); - pool_put(&pc->pc_pool, object); + FREECHECK_IN(&pc->pc_freecheck, object); + + pool_cache_destruct_object1(pc, object); } /* @@ -2243,7 +2300,7 @@ pool_cache_invalidate_groups(pool_cache_ for (i = 0; i < pcg->pcg_avail; i++) { object = pcg->pcg_objects[i].pcgo_va; - pool_cache_destruct_object(pc, object); + pool_cache_destruct_object1(pc, object); } pool_put(&pcgpool, pcg); @@ -2310,33 +2367,19 @@ static inline pool_cache_cpu_t * pool_cache_cpu_enter(pool_cache_t pc, int *s) { pool_cache_cpu_t *cc; - struct cpu_info *ci; /* * Prevent other users of the cache from accessing our * CPU-local data. To avoid touching shared state, we * pull the neccessary information from CPU local data. */ - ci = curcpu(); - KASSERT(ci->ci_data.cpu_index < MAXCPUS); - cc = pc->pc_cpus[ci->ci_data.cpu_index]; + crit_enter(); + cc = pc->pc_cpus[curcpu()->ci_index]; KASSERT(cc->cc_cache == pc); - if (cc->cc_ipl == IPL_NONE) { - crit_enter(); - } else { + if (cc->cc_ipl != IPL_NONE) { *s = splraiseipl(cc->cc_iplcookie); } - - /* Moved to another CPU before disabling preemption? */ - if (__predict_false(ci != curcpu())) { - ci = curcpu(); - cc = pc->pc_cpus[ci->ci_data.cpu_index]; - } - -#ifdef DIAGNOSTIC - KASSERT(cc->cc_cpu == ci); KASSERT(((uintptr_t)cc & (CACHE_LINE_SIZE - 1)) == 0); -#endif return cc; } @@ -2346,11 +2389,10 @@ pool_cache_cpu_exit(pool_cache_cpu_t *cc { /* No longer need exclusive access to the per-CPU data. */ - if (cc->cc_ipl == IPL_NONE) { - crit_exit(); - } else { + if (cc->cc_ipl != IPL_NONE) { splx(*s); } + crit_exit(); } #if __GNUC_PREREQ__(3, 0) @@ -2724,12 +2766,12 @@ void pool_page_free(struct pool *, void #ifdef POOL_SUBPAGE struct pool_allocator pool_allocator_kmem_fullpage = { pool_page_alloc, pool_page_free, 0, - .pa_backingmapptr = &kmem_map, + .pa_backingmapptr = &kernel_map, }; #else struct pool_allocator pool_allocator_kmem = { pool_page_alloc, pool_page_free, 0, - .pa_backingmapptr = &kmem_map, + .pa_backingmapptr = &kernel_map, }; #endif @@ -2754,7 +2796,7 @@ void pool_subpage_free(struct pool *, vo struct pool_allocator pool_allocator_kmem = { pool_subpage_alloc, pool_subpage_free, POOL_SUBPAGE, - .pa_backingmapptr = &kmem_map, + .pa_backingmapptr = &kernel_map, }; void *pool_subpage_alloc_nointr(struct pool *, int); @@ -2762,7 +2804,7 @@ void pool_subpage_free_nointr(struct poo struct pool_allocator pool_allocator_nointr = { pool_subpage_alloc, pool_subpage_free, POOL_SUBPAGE, - .pa_backingmapptr = &kmem_map, + .pa_backingmapptr = &kernel_map, }; #endif /* POOL_SUBPAGE */ @@ -2800,14 +2842,14 @@ pool_page_alloc(struct pool *pp, int fla { bool waitok = (flags & PR_WAITOK) ? true : false; - return ((void *) uvm_km_alloc_poolpage_cache(kmem_map, waitok)); + return ((void *) uvm_km_alloc_poolpage_cache(kernel_map, waitok)); } void pool_page_free(struct pool *pp, void *v) { - uvm_km_free_poolpage_cache(kmem_map, (vaddr_t) v); + uvm_km_free_poolpage_cache(kernel_map, (vaddr_t) v); } static void * @@ -2815,14 +2857,14 @@ pool_page_alloc_meta(struct pool *pp, in { bool waitok = (flags & PR_WAITOK) ? true : false; - return ((void *) uvm_km_alloc_poolpage(kmem_map, waitok)); + return ((void *) uvm_km_alloc_poolpage(kernel_map, waitok)); } static void pool_page_free_meta(struct pool *pp, void *v) { - uvm_km_free_poolpage(kmem_map, (vaddr_t) v); + uvm_km_free_poolpage(kernel_map, (vaddr_t) v); } #ifdef POOL_SUBPAGE @@ -2868,3 +2910,49 @@ pool_page_free_nointr(struct pool *pp, v uvm_km_free_poolpage_cache(kernel_map, (vaddr_t) v); } + +#if defined(DDB) +static bool +pool_in_page(struct pool *pp, struct pool_item_header *ph, uintptr_t addr) +{ + + return (uintptr_t)ph->ph_page <= addr && + addr < (uintptr_t)ph->ph_page + pp->pr_alloc->pa_pagesz; +} + +void +pool_whatis(uintptr_t addr, void (*pr)(const char *, ...)) +{ + struct pool *pp; + + LIST_FOREACH(pp, &pool_head, pr_poollist) { + struct pool_item_header *ph; + uintptr_t item; + + if ((pp->pr_roflags & PR_PHINPAGE) != 0) { + LIST_FOREACH(ph, &pp->pr_fullpages, ph_pagelist) { + if (pool_in_page(pp, ph, addr)) { + goto found; + } + } + LIST_FOREACH(ph, &pp->pr_partpages, ph_pagelist) { + if (pool_in_page(pp, ph, addr)) { + goto found; + } + } + continue; + } else { + ph = pr_find_pagehead_noalign(pp, (void *)addr); + if (ph == NULL || !pool_in_page(pp, ph, addr)) { + continue; + } + } +found: + item = (uintptr_t)ph->ph_page + ph->ph_off; + item = item + rounddown(addr - item, pp->pr_size); + (*pr)("%p is %p+%zu from POOL '%s'\n", + (void *)addr, item, (size_t)(addr - item), + pp->pr_wchan); + } +} +#endif /* defined(DDB) */