Please note that diffs are not public domain; they are subject to the copyright notices on the relevant files. =================================================================== RCS file: /ftp/cvs/cvsroot/src/sys/kern/subr_pool.c,v rcsdiff: /ftp/cvs/cvsroot/src/sys/kern/subr_pool.c,v: warning: Unknown phrases like `commitid ...;' are present. retrieving revision 1.133.4.3 retrieving revision 1.134 diff -u -p -r1.133.4.3 -r1.134 --- src/sys/kern/subr_pool.c 2007/12/27 00:46:08 1.133.4.3 +++ src/sys/kern/subr_pool.c 2007/11/07 00:23:23 1.134 @@ -1,4 +1,4 @@ -/* $NetBSD: subr_pool.c,v 1.133.4.3 2007/12/27 00:46:08 mjf Exp $ */ +/* $NetBSD: subr_pool.c,v 1.134 2007/11/07 00:23:23 ad Exp $ */ /*- * Copyright (c) 1997, 1999, 2000, 2002, 2007 The NetBSD Foundation, Inc. @@ -38,16 +38,14 @@ */ #include -__KERNEL_RCSID(0, "$NetBSD: subr_pool.c,v 1.133.4.3 2007/12/27 00:46:08 mjf Exp $"); +__KERNEL_RCSID(0, "$NetBSD: subr_pool.c,v 1.134 2007/11/07 00:23:23 ad Exp $"); -#include "opt_ddb.h" #include "opt_pool.h" #include "opt_poollog.h" #include "opt_lockdebug.h" #include #include -#include #include #include #include @@ -85,8 +83,7 @@ LIST_HEAD(,pool_cache) pool_cache_head = /* Private pool for page header structures */ #define PHPOOL_MAX 8 static struct pool phpool[PHPOOL_MAX]; -#define PHPOOL_FREELIST_NELEM(idx) \ - (((idx) == 0) ? 0 : BITMAP_SIZE * (1 << (idx))) +#define PHPOOL_FREELIST_NELEM(idx) (((idx) == 0) ? 0 : (1 << (idx))) #ifdef POOL_SUBPAGE /* Pool of subpages for use by normal pools. */ @@ -115,9 +112,7 @@ static struct pool *drainpp; static kmutex_t pool_head_lock; static kcondvar_t pool_busy; -typedef uint32_t pool_item_bitmap_t; -#define BITMAP_SIZE (CHAR_BIT * sizeof(pool_item_bitmap_t)) -#define BITMAP_MASK (BITMAP_SIZE - 1) +typedef uint8_t pool_item_freelist_t; struct pool_item_header { /* Page headers */ @@ -127,8 +122,6 @@ struct pool_item_header { ph_node; /* Off-page page headers */ void * ph_page; /* this page's address */ struct timeval ph_time; /* last referenced */ - uint16_t ph_nmissing; /* # of chunks in use */ - uint16_t ph_off; /* start offset in page */ union { /* !PR_NOTOUCH */ struct { @@ -137,12 +130,21 @@ struct pool_item_header { } phu_normal; /* PR_NOTOUCH */ struct { - pool_item_bitmap_t phu_bitmap[1]; + uint16_t + phu_off; /* start offset in page */ + pool_item_freelist_t + phu_firstfree; /* first free item */ + /* + * XXX it might be better to use + * a simple bitmap and ffs(3) + */ } phu_notouch; } ph_u; + uint16_t ph_nmissing; /* # of chunks in use */ }; #define ph_itemlist ph_u.phu_normal.phu_itemlist -#define ph_bitmap ph_u.phu_notouch.phu_bitmap +#define ph_off ph_u.phu_notouch.phu_off +#define ph_firstfree ph_u.phu_notouch.phu_firstfree struct pool_item { #ifdef DIAGNOSTIC @@ -181,8 +183,7 @@ struct pool_item { * from it. */ -static struct pool pcg_normal_pool; -static struct pool pcg_large_pool; +static struct pool pcgpool; static struct pool cache_pool; static struct pool cache_cpu_pool; @@ -329,12 +330,12 @@ pr_enter_check(struct pool *pp, void (*p #define pr_enter_check(pp, pr) #endif /* POOL_DIAGNOSTIC */ -static inline unsigned int +static inline int pr_item_notouch_index(const struct pool *pp, const struct pool_item_header *ph, const void *v) { const char *cp = v; - unsigned int idx; + int idx; KASSERT(pp->pr_roflags & PR_NOTOUCH); idx = (cp - (char *)ph->ph_page - ph->ph_off) / pp->pr_size; @@ -342,57 +343,37 @@ pr_item_notouch_index(const struct pool return idx; } +#define PR_FREELIST_ALIGN(p) \ + roundup((uintptr_t)(p), sizeof(pool_item_freelist_t)) +#define PR_FREELIST(ph) ((pool_item_freelist_t *)PR_FREELIST_ALIGN((ph) + 1)) +#define PR_INDEX_USED ((pool_item_freelist_t)-1) +#define PR_INDEX_EOL ((pool_item_freelist_t)-2) + static inline void pr_item_notouch_put(const struct pool *pp, struct pool_item_header *ph, void *obj) { - unsigned int idx = pr_item_notouch_index(pp, ph, obj); - pool_item_bitmap_t *bitmap = ph->ph_bitmap + (idx / BITMAP_SIZE); - pool_item_bitmap_t mask = 1 << (idx & BITMAP_MASK); + int idx = pr_item_notouch_index(pp, ph, obj); + pool_item_freelist_t *freelist = PR_FREELIST(ph); - KASSERT((*bitmap & mask) == 0); - *bitmap |= mask; + KASSERT(freelist[idx] == PR_INDEX_USED); + freelist[idx] = ph->ph_firstfree; + ph->ph_firstfree = idx; } static inline void * pr_item_notouch_get(const struct pool *pp, struct pool_item_header *ph) { - pool_item_bitmap_t *bitmap = ph->ph_bitmap; - unsigned int idx; - int i; + int idx = ph->ph_firstfree; + pool_item_freelist_t *freelist = PR_FREELIST(ph); - for (i = 0; ; i++) { - int bit; + KASSERT(freelist[idx] != PR_INDEX_USED); + ph->ph_firstfree = freelist[idx]; + freelist[idx] = PR_INDEX_USED; - KASSERT((i * BITMAP_SIZE) < pp->pr_itemsperpage); - bit = ffs32(bitmap[i]); - if (bit) { - pool_item_bitmap_t mask; - - bit--; - idx = (i * BITMAP_SIZE) + bit; - mask = 1 << bit; - KASSERT((bitmap[i] & mask) != 0); - bitmap[i] &= ~mask; - break; - } - } - KASSERT(idx < pp->pr_itemsperpage); return (char *)ph->ph_page + ph->ph_off + idx * pp->pr_size; } -static inline void -pr_item_notouch_init(const struct pool *pp, struct pool_item_header *ph) -{ - pool_item_bitmap_t *bitmap = ph->ph_bitmap; - const int n = howmany(pp->pr_itemsperpage, BITMAP_SIZE); - int i; - - for (i = 0; i < n; i++) { - bitmap[i] = (pool_item_bitmap_t)-1; - } -} - static inline int phtree_compare(struct pool_item_header *a, struct pool_item_header *b) { @@ -413,24 +394,6 @@ phtree_compare(struct pool_item_header * SPLAY_PROTOTYPE(phtree, pool_item_header, ph_node, phtree_compare); SPLAY_GENERATE(phtree, pool_item_header, ph_node, phtree_compare); -static inline struct pool_item_header * -pr_find_pagehead_noalign(struct pool *pp, void *v) -{ - struct pool_item_header *ph, tmp; - - tmp.ph_page = (void *)(uintptr_t)v; - ph = SPLAY_FIND(phtree, &pp->pr_phtree, &tmp); - if (ph == NULL) { - ph = SPLAY_ROOT(&pp->pr_phtree); - if (ph != NULL && phtree_compare(&tmp, ph) >= 0) { - ph = SPLAY_NEXT(phtree, &pp->pr_phtree, ph); - } - KASSERT(ph == NULL || phtree_compare(&tmp, ph) < 0); - } - - return ph; -} - /* * Return the pool page header based on item address. */ @@ -440,7 +403,15 @@ pr_find_pagehead(struct pool *pp, void * struct pool_item_header *ph, tmp; if ((pp->pr_roflags & PR_NOALIGN) != 0) { - ph = pr_find_pagehead_noalign(pp, v); + tmp.ph_page = (void *)(uintptr_t)v; + ph = SPLAY_FIND(phtree, &pp->pr_phtree, &tmp); + if (ph == NULL) { + ph = SPLAY_ROOT(&pp->pr_phtree); + if (ph != NULL && phtree_compare(&tmp, ph) >= 0) { + ph = SPLAY_NEXT(phtree, &pp->pr_phtree, ph); + } + KASSERT(ph == NULL || phtree_compare(&tmp, ph) < 0); + } } else { void *page = (void *)((uintptr_t)v & pp->pr_alloc->pa_pagemask); @@ -632,6 +603,9 @@ pool_init(struct pool *pp, size_t size, size_t trysize, phsize; int off, slack; + KASSERT((1UL << (CHAR_BIT * sizeof(pool_item_freelist_t))) - 2 >= + PHPOOL_FREELIST_NELEM(PHPOOL_MAX - 1)); + #ifdef DEBUG /* * Check that the pool hasn't already been initialised and @@ -813,14 +787,7 @@ pool_init(struct pool *pp, size_t size, pp->pr_entered_file = NULL; pp->pr_entered_line = 0; - /* - * XXXAD hack to prevent IP input processing from blocking. - */ - if (ipl == IPL_SOFTNET) { - mutex_init(&pp->pr_lock, MUTEX_DEFAULT, IPL_VM); - } else { - mutex_init(&pp->pr_lock, MUTEX_DEFAULT, ipl); - } + mutex_init(&pp->pr_lock, MUTEX_DEFAULT, ipl); cv_init(&pp->pr_cv, wchan); pp->pr_ipl = ipl; @@ -841,8 +808,8 @@ pool_init(struct pool *pp, size_t size, "phpool-%d", nelem); sz = sizeof(struct pool_item_header); if (nelem) { - sz = offsetof(struct pool_item_header, - ph_bitmap[howmany(nelem, BITMAP_SIZE)]); + sz = PR_FREELIST_ALIGN(sz) + + nelem * sizeof(pool_item_freelist_t); } pool_init(&phpool[idx], sz, 0, 0, 0, phpool_names[idx], &pool_allocator_meta, IPL_VM); @@ -851,16 +818,8 @@ pool_init(struct pool *pp, size_t size, pool_init(&psppool, POOL_SUBPAGE, POOL_SUBPAGE, 0, PR_RECURSIVE, "psppool", &pool_allocator_meta, IPL_VM); #endif - - size = sizeof(pcg_t) + - (PCG_NOBJECTS_NORMAL - 1) * sizeof(pcgpair_t); - pool_init(&pcg_normal_pool, size, CACHE_LINE_SIZE, 0, 0, - "pcgnormal", &pool_allocator_meta, IPL_VM); - - size = sizeof(pcg_t) + - (PCG_NOBJECTS_LARGE - 1) * sizeof(pcgpair_t); - pool_init(&pcg_large_pool, size, CACHE_LINE_SIZE, 0, 0, - "pcglarge", &pool_allocator_meta, IPL_VM); + pool_init(&pcgpool, sizeof(pcg_t), CACHE_LINE_SIZE, 0, 0, + "cachegrp", &pool_allocator_meta, IPL_VM); } if (__predict_true(!cold)) { @@ -1455,8 +1414,7 @@ pool_prime_page(struct pool *pp, void *s /* * Color this page. */ - ph->ph_off = pp->pr_curcolor; - cp = (char *)cp + ph->ph_off; + cp = (char *)cp + pp->pr_curcolor; if ((pp->pr_curcolor += align) > pp->pr_maxcolor) pp->pr_curcolor = 0; @@ -1475,7 +1433,14 @@ pool_prime_page(struct pool *pp, void *s pp->pr_nitems += n; if (pp->pr_roflags & PR_NOTOUCH) { - pr_item_notouch_init(pp, ph); + pool_item_freelist_t *freelist = PR_FREELIST(ph); + int i; + + ph->ph_off = (char *)cp - (char *)storage; + ph->ph_firstfree = 0; + for (i = 0; i < n - 1; i++) + freelist[i] = i + 1; + freelist[n - 1] = PR_INDEX_EOL; } else { while (n--) { pi = (struct pool_item *)cp; @@ -1886,7 +1851,7 @@ pool_print1(struct pool *pp, const char #define PR_GROUPLIST(pcg) \ (*pr)("\t\tgroup %p: avail %d\n", pcg, pcg->pcg_avail); \ - for (i = 0; i < pcg->pcg_size; i++) { \ + for (i = 0; i < PCG_NOBJECTS; i++) { \ if (pcg->pcg_objects[i].pcgo_pa != \ POOL_PADDR_INVALID) { \ (*pr)("\t\t\t%p, 0x%llx\n", \ @@ -2067,14 +2032,7 @@ pool_cache_bootstrap(pool_cache_t pc, si palloc = &pool_allocator_nointr; pool_init(pp, size, align, align_offset, flags, wchan, palloc, ipl); - /* - * XXXAD hack to prevent IP input processing from blocking. - */ - if (ipl == IPL_SOFTNET) { - mutex_init(&pc->pc_lock, MUTEX_DEFAULT, IPL_VM); - } else { - mutex_init(&pc->pc_lock, MUTEX_DEFAULT, ipl); - } + mutex_init(&pc->pc_lock, MUTEX_DEFAULT, pp->pr_ipl); if (ctor == NULL) { ctor = (int (*)(void *, void *, int))nullop; @@ -2096,24 +2054,12 @@ pool_cache_bootstrap(pool_cache_t pc, si pc->pc_nfull = 0; pc->pc_contended = 0; pc->pc_refcnt = 0; - pc->pc_freecheck = NULL; - - if ((flags & PR_LARGECACHE) != 0) { - pc->pc_pcgsize = PCG_NOBJECTS_LARGE; - } else { - pc->pc_pcgsize = PCG_NOBJECTS_NORMAL; - } /* Allocate per-CPU caches. */ memset(pc->pc_cpus, 0, sizeof(pc->pc_cpus)); pc->pc_ncpu = 0; - if (ncpu < 2) { - /* XXX For sparc: boot CPU is not attached yet. */ - pool_cache_cpu_init1(curcpu(), pc); - } else { - for (CPU_INFO_FOREACH(cii, ci)) { - pool_cache_cpu_init1(ci, pc); - } + for (CPU_INFO_FOREACH(cii, ci)) { + pool_cache_cpu_init1(ci, pc); } if (__predict_true(!cold)) { @@ -2188,15 +2134,11 @@ static void pool_cache_cpu_init1(struct cpu_info *ci, pool_cache_t pc) { pool_cache_cpu_t *cc; - int index; - - index = ci->ci_index; - KASSERT(index < MAXCPUS); KASSERT(((uintptr_t)pc->pc_cpus & (CACHE_LINE_SIZE - 1)) == 0); - if ((cc = pc->pc_cpus[index]) != NULL) { - KASSERT(cc->cc_cpuindex == index); + if ((cc = pc->pc_cpus[ci->ci_index]) != NULL) { + KASSERT(cc->cc_cpu = ci); return; } @@ -2217,13 +2159,13 @@ pool_cache_cpu_init1(struct cpu_info *ci cc->cc_ipl = pc->pc_pool.pr_ipl; cc->cc_iplcookie = makeiplcookie(cc->cc_ipl); cc->cc_cache = pc; - cc->cc_cpuindex = index; + cc->cc_cpu = ci; cc->cc_hits = 0; cc->cc_misses = 0; cc->cc_current = NULL; cc->cc_previous = NULL; - pc->pc_cpus[index] = cc; + pc->pc_cpus[ci->ci_index] = cc; } /* @@ -2262,14 +2204,6 @@ pool_cache_reclaim(pool_cache_t pc) return pool_reclaim(&pc->pc_pool); } -static void -pool_cache_destruct_object1(pool_cache_t pc, void *object) -{ - - (*pc->pc_dtor)(pc->pc_arg, object); - pool_put(&pc->pc_pool, object); -} - /* * pool_cache_destruct_object: * @@ -2280,9 +2214,8 @@ void pool_cache_destruct_object(pool_cache_t pc, void *object) { - FREECHECK_IN(&pc->pc_freecheck, object); - - pool_cache_destruct_object1(pc, object); + (*pc->pc_dtor)(pc->pc_arg, object); + pool_put(&pc->pc_pool, object); } /* @@ -2302,15 +2235,10 @@ pool_cache_invalidate_groups(pool_cache_ for (i = 0; i < pcg->pcg_avail; i++) { object = pcg->pcg_objects[i].pcgo_va; - pool_cache_destruct_object1(pc, object); + pool_cache_destruct_object(pc, object); } - if (pcg->pcg_size == PCG_NOBJECTS_LARGE) { - pool_put(&pcg_large_pool, pcg); - } else { - KASSERT(pcg->pcg_size == PCG_NOBJECTS_NORMAL); - pool_put(&pcg_normal_pool, pcg); - } + pool_put(&pcgpool, pcg); } } @@ -2374,19 +2302,33 @@ static inline pool_cache_cpu_t * pool_cache_cpu_enter(pool_cache_t pc, int *s) { pool_cache_cpu_t *cc; + struct cpu_info *ci; /* * Prevent other users of the cache from accessing our * CPU-local data. To avoid touching shared state, we * pull the neccessary information from CPU local data. */ - crit_enter(); - cc = pc->pc_cpus[curcpu()->ci_index]; + ci = curcpu(); + KASSERT(ci->ci_data.cpu_index < MAXCPUS); + cc = pc->pc_cpus[ci->ci_data.cpu_index]; KASSERT(cc->cc_cache == pc); - if (cc->cc_ipl != IPL_NONE) { + if (cc->cc_ipl == IPL_NONE) { + crit_enter(); + } else { *s = splraiseipl(cc->cc_iplcookie); } + + /* Moved to another CPU before disabling preemption? */ + if (__predict_false(ci != curcpu())) { + ci = curcpu(); + cc = pc->pc_cpus[ci->ci_data.cpu_index]; + } + +#ifdef DIAGNOSTIC + KASSERT(cc->cc_cpu == ci); KASSERT(((uintptr_t)cc & (CACHE_LINE_SIZE - 1)) == 0); +#endif return cc; } @@ -2396,10 +2338,11 @@ pool_cache_cpu_exit(pool_cache_cpu_t *cc { /* No longer need exclusive access to the per-CPU data. */ - if (cc->cc_ipl != IPL_NONE) { + if (cc->cc_ipl == IPL_NONE) { + crit_exit(); + } else { splx(*s); } - crit_exit(); } #if __GNUC_PREREQ__(3, 0) @@ -2450,7 +2393,7 @@ pool_cache_get_slow(pool_cache_cpu_t *cc pc->pc_emptygroups = cur; pc->pc_nempty++; } - KASSERT(pcg->pcg_avail == pcg->pcg_size); + KASSERT(pcg->pcg_avail == PCG_NOBJECTS); cc->cc_current = pcg; pc->pc_fullgroups = pcg->pcg_next; pc->pc_hits++; @@ -2522,7 +2465,7 @@ pool_cache_get_paddr(pool_cache_t pc, in if (pap != NULL) *pap = pcg->pcg_objects[pcg->pcg_avail].pcgo_pa; pcg->pcg_objects[pcg->pcg_avail].pcgo_va = NULL; - KASSERT(pcg->pcg_avail <= pcg->pcg_size); + KASSERT(pcg->pcg_avail <= PCG_NOBJECTS); KASSERT(object != NULL); cc->cc_hits++; pool_cache_cpu_exit(cc, &s); @@ -2562,7 +2505,6 @@ pool_cache_put_slow(pool_cache_cpu_t *cc pcg_t *pcg, *cur; uint64_t ncsw; pool_cache_t pc; - u_int nobj; pc = cc->cc_cache; cc->cc_misses++; @@ -2595,7 +2537,7 @@ pool_cache_put_slow(pool_cache_cpu_t *cc * group as cc_current and return. */ if ((cur = cc->cc_current) != NULL) { - KASSERT(cur->pcg_avail == pcg->pcg_size); + KASSERT(cur->pcg_avail == PCG_NOBJECTS); cur->pcg_next = pc->pc_fullgroups; pc->pc_fullgroups = cur; pc->pc_nfull++; @@ -2622,18 +2564,16 @@ pool_cache_put_slow(pool_cache_cpu_t *cc * If we can't allocate a new group, just throw the * object away. */ - nobj = pc->pc_pcgsize; - if (nobj == PCG_NOBJECTS_LARGE) { - pcg = pool_get(&pcg_large_pool, PR_NOWAIT); - } else { - pcg = pool_get(&pcg_normal_pool, PR_NOWAIT); - } + pcg = pool_get(&pcgpool, PR_NOWAIT); if (pcg == NULL) { pool_cache_destruct_object(pc, object); return NULL; } +#ifdef DIAGNOSTIC + memset(pcg, 0, sizeof(*pcg)); +#else pcg->pcg_avail = 0; - pcg->pcg_size = nobj; +#endif /* * Add the empty group to the cache and try again. @@ -2666,7 +2606,9 @@ pool_cache_put_paddr(pool_cache_t pc, vo do { /* If the current group isn't full, release it there. */ pcg = cc->cc_current; - if (pcg != NULL && pcg->pcg_avail < pcg->pcg_size) { + if (pcg != NULL && pcg->pcg_avail < PCG_NOBJECTS) { + KASSERT(pcg->pcg_objects[pcg->pcg_avail].pcgo_va + == NULL); pcg->pcg_objects[pcg->pcg_avail].pcgo_va = object; pcg->pcg_objects[pcg->pcg_avail].pcgo_pa = pa; pcg->pcg_avail++; @@ -2724,7 +2666,7 @@ pool_cache_xcall(pool_cache_t pc) s = splvm(); mutex_enter(&pc->pc_lock); if (cur != NULL) { - if (cur->pcg_avail == cur->pcg_size) { + if (cur->pcg_avail == PCG_NOBJECTS) { list = &pc->pc_fullgroups; pc->pc_nfull++; } else if (cur->pcg_avail == 0) { @@ -2738,7 +2680,7 @@ pool_cache_xcall(pool_cache_t pc) *list = cur; } if (prev != NULL) { - if (prev->pcg_avail == prev->pcg_size) { + if (prev->pcg_avail == PCG_NOBJECTS) { list = &pc->pc_fullgroups; pc->pc_nfull++; } else if (prev->pcg_avail == 0) { @@ -2918,143 +2860,3 @@ pool_page_free_nointr(struct pool *pp, v uvm_km_free_poolpage_cache(kernel_map, (vaddr_t) v); } - -#if defined(DDB) -static bool -pool_in_page(struct pool *pp, struct pool_item_header *ph, uintptr_t addr) -{ - - return (uintptr_t)ph->ph_page <= addr && - addr < (uintptr_t)ph->ph_page + pp->pr_alloc->pa_pagesz; -} - -static bool -pool_in_item(struct pool *pp, void *item, uintptr_t addr) -{ - - return (uintptr_t)item <= addr && addr < (uintptr_t)item + pp->pr_size; -} - -static bool -pool_in_cg(struct pool *pp, struct pool_cache_group *pcg, uintptr_t addr) -{ - int i; - - if (pcg == NULL) { - return false; - } - for (i = 0; i < pcg->pcg_avail; i++) { - if (pool_in_item(pp, pcg->pcg_objects[i].pcgo_va, addr)) { - return true; - } - } - return false; -} - -static bool -pool_allocated(struct pool *pp, struct pool_item_header *ph, uintptr_t addr) -{ - - if ((pp->pr_roflags & PR_NOTOUCH) != 0) { - unsigned int idx = pr_item_notouch_index(pp, ph, (void *)addr); - pool_item_bitmap_t *bitmap = - ph->ph_bitmap + (idx / BITMAP_SIZE); - pool_item_bitmap_t mask = 1 << (idx & BITMAP_MASK); - - return (*bitmap & mask) == 0; - } else { - struct pool_item *pi; - - LIST_FOREACH(pi, &ph->ph_itemlist, pi_list) { - if (pool_in_item(pp, pi, addr)) { - return false; - } - } - return true; - } -} - -void -pool_whatis(uintptr_t addr, void (*pr)(const char *, ...)) -{ - struct pool *pp; - - LIST_FOREACH(pp, &pool_head, pr_poollist) { - struct pool_item_header *ph; - uintptr_t item; - bool allocated = true; - bool incache = false; - bool incpucache = false; - char cpucachestr[32]; - - if ((pp->pr_roflags & PR_PHINPAGE) != 0) { - LIST_FOREACH(ph, &pp->pr_fullpages, ph_pagelist) { - if (pool_in_page(pp, ph, addr)) { - goto found; - } - } - LIST_FOREACH(ph, &pp->pr_partpages, ph_pagelist) { - if (pool_in_page(pp, ph, addr)) { - allocated = - pool_allocated(pp, ph, addr); - goto found; - } - } - LIST_FOREACH(ph, &pp->pr_emptypages, ph_pagelist) { - if (pool_in_page(pp, ph, addr)) { - allocated = false; - goto found; - } - } - continue; - } else { - ph = pr_find_pagehead_noalign(pp, (void *)addr); - if (ph == NULL || !pool_in_page(pp, ph, addr)) { - continue; - } - allocated = pool_allocated(pp, ph, addr); - } -found: - if (allocated && pp->pr_cache) { - pool_cache_t pc = pp->pr_cache; - struct pool_cache_group *pcg; - int i; - - for (pcg = pc->pc_fullgroups; pcg != NULL; - pcg = pcg->pcg_next) { - if (pool_in_cg(pp, pcg, addr)) { - incache = true; - goto print; - } - } - for (i = 0; i < MAXCPUS; i++) { - pool_cache_cpu_t *cc; - - if ((cc = pc->pc_cpus[i]) == NULL) { - continue; - } - if (pool_in_cg(pp, cc->cc_current, addr) || - pool_in_cg(pp, cc->cc_previous, addr)) { - struct cpu_info *ci = - cpu_lookup_byindex(i); - - incpucache = true; - snprintf(cpucachestr, - sizeof(cpucachestr), - "cached by CPU %u", - (u_int)ci->ci_cpuid); - goto print; - } - } - } -print: - item = (uintptr_t)ph->ph_page + ph->ph_off; - item = item + rounddown(addr - item, pp->pr_size); - (*pr)("%p is %p+%zu in POOL '%s' (%s)\n", - (void *)addr, item, (size_t)(addr - item), - pp->pr_wchan, - incpucache ? cpucachestr : - incache ? "cached" : allocated ? "allocated" : "free"); - } -} -#endif /* defined(DDB) */