Please note that diffs are not public domain; they are subject to the copyright notices on the relevant files. =================================================================== RCS file: /ftp/cvs/cvsroot/src/sys/kern/subr_pool.c,v rcsdiff: /ftp/cvs/cvsroot/src/sys/kern/subr_pool.c,v: warning: Unknown phrases like `commitid ...;' are present. retrieving revision 1.138.2.1 retrieving revision 1.155 diff -u -p -r1.138.2.1 -r1.155 --- src/sys/kern/subr_pool.c 2007/12/10 08:56:56 1.138.2.1 +++ src/sys/kern/subr_pool.c 2008/03/17 17:05:54 1.155 @@ -1,4 +1,4 @@ -/* $NetBSD: subr_pool.c,v 1.138.2.1 2007/12/10 08:56:56 yamt Exp $ */ +/* $NetBSD: subr_pool.c,v 1.155 2008/03/17 17:05:54 ad Exp $ */ /*- * Copyright (c) 1997, 1999, 2000, 2002, 2007 The NetBSD Foundation, Inc. @@ -38,8 +38,9 @@ */ #include -__KERNEL_RCSID(0, "$NetBSD: subr_pool.c,v 1.138.2.1 2007/12/10 08:56:56 yamt Exp $"); +__KERNEL_RCSID(0, "$NetBSD: subr_pool.c,v 1.155 2008/03/17 17:05:54 ad Exp $"); +#include "opt_ddb.h" #include "opt_pool.h" #include "opt_poollog.h" #include "opt_lockdebug.h" @@ -51,13 +52,13 @@ __KERNEL_RCSID(0, "$NetBSD: subr_pool.c, #include #include #include -#include #include #include #include #include #include #include +#include #include @@ -75,11 +76,7 @@ __KERNEL_RCSID(0, "$NetBSD: subr_pool.c, */ /* List of all pools */ -LIST_HEAD(,pool) pool_head = LIST_HEAD_INITIALIZER(pool_head); - -/* List of all caches. */ -LIST_HEAD(,pool_cache) pool_cache_head = - LIST_HEAD_INITIALIZER(pool_cache_head); +TAILQ_HEAD(,pool) pool_head = TAILQ_HEAD_INITIALIZER(pool_head); /* Private pool for page header structures */ #define PHPOOL_MAX 8 @@ -125,8 +122,9 @@ struct pool_item_header { SPLAY_ENTRY(pool_item_header) ph_node; /* Off-page page headers */ void * ph_page; /* this page's address */ - struct timeval ph_time; /* last referenced */ + uint32_t ph_time; /* last referenced */ uint16_t ph_nmissing; /* # of chunks in use */ + uint16_t ph_off; /* start offset in page */ union { /* !PR_NOTOUCH */ struct { @@ -135,13 +133,11 @@ struct pool_item_header { } phu_normal; /* PR_NOTOUCH */ struct { - uint16_t phu_off; /* start offset in page */ - pool_item_bitmap_t phu_bitmap[]; + pool_item_bitmap_t phu_bitmap[1]; } phu_notouch; } ph_u; }; #define ph_itemlist ph_u.phu_normal.phu_itemlist -#define ph_off ph_u.phu_notouch.phu_off #define ph_bitmap ph_u.phu_notouch.phu_bitmap struct pool_item { @@ -181,10 +177,18 @@ struct pool_item { * from it. */ -static struct pool pcgpool; +static struct pool pcg_normal_pool; +static struct pool pcg_large_pool; static struct pool cache_pool; static struct pool cache_cpu_pool; +/* List of all caches. */ +TAILQ_HEAD(,pool_cache) pool_cache_head = + TAILQ_HEAD_INITIALIZER(pool_cache_head); + +int pool_cache_disable; + + static pool_cache_cpu_t *pool_cache_put_slow(pool_cache_cpu_t *, int *, void *, paddr_t); static pool_cache_cpu_t *pool_cache_get_slow(pool_cache_cpu_t *, int *, @@ -412,6 +416,24 @@ phtree_compare(struct pool_item_header * SPLAY_PROTOTYPE(phtree, pool_item_header, ph_node, phtree_compare); SPLAY_GENERATE(phtree, pool_item_header, ph_node, phtree_compare); +static inline struct pool_item_header * +pr_find_pagehead_noalign(struct pool *pp, void *v) +{ + struct pool_item_header *ph, tmp; + + tmp.ph_page = (void *)(uintptr_t)v; + ph = SPLAY_FIND(phtree, &pp->pr_phtree, &tmp); + if (ph == NULL) { + ph = SPLAY_ROOT(&pp->pr_phtree); + if (ph != NULL && phtree_compare(&tmp, ph) >= 0) { + ph = SPLAY_NEXT(phtree, &pp->pr_phtree, ph); + } + KASSERT(ph == NULL || phtree_compare(&tmp, ph) < 0); + } + + return ph; +} + /* * Return the pool page header based on item address. */ @@ -421,15 +443,7 @@ pr_find_pagehead(struct pool *pp, void * struct pool_item_header *ph, tmp; if ((pp->pr_roflags & PR_NOALIGN) != 0) { - tmp.ph_page = (void *)(uintptr_t)v; - ph = SPLAY_FIND(phtree, &pp->pr_phtree, &tmp); - if (ph == NULL) { - ph = SPLAY_ROOT(&pp->pr_phtree); - if (ph != NULL && phtree_compare(&tmp, ph) >= 0) { - ph = SPLAY_NEXT(phtree, &pp->pr_phtree, ph); - } - KASSERT(ph == NULL || phtree_compare(&tmp, ph) < 0); - } + ph = pr_find_pagehead_noalign(pp, v); } else { void *page = (void *)((uintptr_t)v & pp->pr_alloc->pa_pagemask); @@ -615,9 +629,7 @@ void pool_init(struct pool *pp, size_t size, u_int align, u_int ioff, int flags, const char *wchan, struct pool_allocator *palloc, int ipl) { -#ifdef DEBUG struct pool *pp1; -#endif size_t trysize, phsize; int off, slack; @@ -626,7 +638,7 @@ pool_init(struct pool *pp, size_t size, * Check that the pool hasn't already been initialised and * added to the list of all pools. */ - LIST_FOREACH(pp1, &pool_head, pr_poollist) { + TAILQ_FOREACH(pp1, &pool_head, pr_poollist) { if (pp == pp1) panic("pool_init: pool %s already initialised", wchan); @@ -840,24 +852,38 @@ pool_init(struct pool *pp, size_t size, pool_init(&psppool, POOL_SUBPAGE, POOL_SUBPAGE, 0, PR_RECURSIVE, "psppool", &pool_allocator_meta, IPL_VM); #endif - pool_init(&pcgpool, sizeof(pcg_t), CACHE_LINE_SIZE, 0, 0, - "cachegrp", &pool_allocator_meta, IPL_VM); + + size = sizeof(pcg_t) + + (PCG_NOBJECTS_NORMAL - 1) * sizeof(pcgpair_t); + pool_init(&pcg_normal_pool, size, CACHE_LINE_SIZE, 0, 0, + "pcgnormal", &pool_allocator_meta, IPL_VM); + + size = sizeof(pcg_t) + + (PCG_NOBJECTS_LARGE - 1) * sizeof(pcgpair_t); + pool_init(&pcg_large_pool, size, CACHE_LINE_SIZE, 0, 0, + "pcglarge", &pool_allocator_meta, IPL_VM); } - if (__predict_true(!cold)) { - /* Insert into the list of all pools. */ + /* Insert into the list of all pools. */ + if (__predict_true(!cold)) mutex_enter(&pool_head_lock); - LIST_INSERT_HEAD(&pool_head, pp, pr_poollist); + TAILQ_FOREACH(pp1, &pool_head, pr_poollist) { + if (strcmp(pp1->pr_wchan, pp->pr_wchan) > 0) + break; + } + if (pp1 == NULL) + TAILQ_INSERT_TAIL(&pool_head, pp, pr_poollist); + else + TAILQ_INSERT_BEFORE(pp1, pp, pr_poollist); + if (__predict_true(!cold)) mutex_exit(&pool_head_lock); /* Insert this into the list of pools using this allocator. */ + if (__predict_true(!cold)) mutex_enter(&palloc->pa_lock); - TAILQ_INSERT_TAIL(&palloc->pa_list, pp, pr_alloc_list); + TAILQ_INSERT_TAIL(&palloc->pa_list, pp, pr_alloc_list); + if (__predict_true(!cold)) mutex_exit(&palloc->pa_lock); - } else { - LIST_INSERT_HEAD(&pool_head, pp, pr_poollist); - TAILQ_INSERT_TAIL(&palloc->pa_list, pp, pr_alloc_list); - } pool_reclaim_register(pp); } @@ -875,7 +901,7 @@ pool_destroy(struct pool *pp) mutex_enter(&pool_head_lock); while (pp->pr_refcnt != 0) cv_wait(&pool_busy, &pool_head_lock); - LIST_REMOVE(pp, pr_poollist); + TAILQ_REMOVE(&pool_head, pp, pr_poollist); if (drainpp == pp) drainpp = NULL; mutex_exit(&pool_head_lock); @@ -969,8 +995,9 @@ pool_get(struct pool *pp, int flags) #endif /* DIAGNOSTIC */ #ifdef LOCKDEBUG - if (flags & PR_WAITOK) - ASSERT_SLEEPABLE(NULL, "pool_get(PR_WAITOK)"); + if (flags & PR_WAITOK) { + ASSERT_SLEEPABLE(); + } #endif mutex_enter(&pp->pr_lock); @@ -1262,8 +1289,7 @@ pool_do_put(struct pool *pp, void *v, st if (ph->ph_nmissing == 0) { pp->pr_nidle++; if (pp->pr_npages > pp->pr_minpages && - (pp->pr_npages > pp->pr_maxpages || - pa_starved_p(pp->pr_alloc))) { + pp->pr_npages > pp->pr_maxpages) { pr_rmpage(pp, ph, pq); } else { LIST_REMOVE(ph, ph_pagelist); @@ -1274,8 +1300,11 @@ pool_do_put(struct pool *pp, void *v, st * be idle for some period of time before it can * be reclaimed by the pagedaemon. This minimizes * ping-pong'ing for memory. + * + * note for 64-bit time_t: truncating to 32-bit is not + * a problem for our usage. */ - getmicrotime(&ph->ph_time); + ph->ph_time = time_uptime; } pool_update_curpage(pp); } @@ -1427,7 +1456,7 @@ pool_prime_page(struct pool *pp, void *s LIST_INIT(&ph->ph_itemlist); ph->ph_page = storage; ph->ph_nmissing = 0; - getmicrotime(&ph->ph_time); + ph->ph_time = time_uptime; if ((pp->pr_roflags & PR_PHINPAGE) == 0) SPLAY_INSERT(phtree, &pp->pr_phtree, ph); @@ -1436,7 +1465,8 @@ pool_prime_page(struct pool *pp, void *s /* * Color this page. */ - cp = (char *)cp + pp->pr_curcolor; + ph->ph_off = pp->pr_curcolor; + cp = (char *)cp + ph->ph_off; if ((pp->pr_curcolor += align) > pp->pr_maxcolor) pp->pr_curcolor = 0; @@ -1587,7 +1617,7 @@ pool_reclaim(struct pool *pp) { struct pool_item_header *ph, *phnext; struct pool_pagelist pq; - struct timeval curtime, diff; + uint32_t curtime; bool klock; int rv; @@ -1624,7 +1654,7 @@ pool_reclaim(struct pool *pp) LIST_INIT(&pq); - getmicrotime(&curtime); + curtime = time_uptime; for (ph = LIST_FIRST(&pp->pr_emptypages); ph != NULL; ph = phnext) { phnext = LIST_NEXT(ph, ph_pagelist); @@ -1634,8 +1664,7 @@ pool_reclaim(struct pool *pp) break; KASSERT(ph->ph_nmissing == 0); - timersub(&curtime, &ph->ph_time, &diff); - if (diff.tv_sec < pool_inactive_time + if (curtime - ph->ph_time < pool_inactive_time && !pa_starved_p(pp->pr_alloc)) continue; @@ -1681,7 +1710,7 @@ pool_drain_start(struct pool **ppp, uint { struct pool *pp; - KASSERT(!LIST_EMPTY(&pool_head)); + KASSERT(!TAILQ_EMPTY(&pool_head)); pp = NULL; @@ -1689,11 +1718,11 @@ pool_drain_start(struct pool **ppp, uint mutex_enter(&pool_head_lock); do { if (drainpp == NULL) { - drainpp = LIST_FIRST(&pool_head); + drainpp = TAILQ_FIRST(&pool_head); } if (drainpp != NULL) { pp = drainpp; - drainpp = LIST_NEXT(pp, pr_poollist); + drainpp = TAILQ_NEXT(pp, pr_poollist); } /* * Skip completely idle pools. We depend on at least @@ -1749,7 +1778,7 @@ pool_printall(const char *modif, void (* { struct pool *pp; - LIST_FOREACH(pp, &pool_head, pr_poollist) { + TAILQ_FOREACH(pp, &pool_head, pr_poollist) { pool_printit(pp, modif, pr); } } @@ -1776,10 +1805,8 @@ pool_print_pagelist(struct pool *pp, str #endif LIST_FOREACH(ph, pl, ph_pagelist) { - (*pr)("\t\tpage %p, nmissing %d, time %lu,%lu\n", - ph->ph_page, ph->ph_nmissing, - (u_long)ph->ph_time.tv_sec, - (u_long)ph->ph_time.tv_usec); + (*pr)("\t\tpage %p, nmissing %d, time %" PRIu32 "\n", + ph->ph_page, ph->ph_nmissing, ph->ph_time); #ifdef DIAGNOSTIC if (!(pp->pr_roflags & PR_NOTOUCH)) { LIST_FOREACH(pi, &ph->ph_itemlist, pi_list) { @@ -1866,7 +1893,7 @@ pool_print1(struct pool *pp, const char #define PR_GROUPLIST(pcg) \ (*pr)("\t\tgroup %p: avail %d\n", pcg, pcg->pcg_avail); \ - for (i = 0; i < PCG_NOBJECTS; i++) { \ + for (i = 0; i < pcg->pcg_size; i++) { \ if (pcg->pcg_objects[i].pcgo_pa != \ POOL_PADDR_INVALID) { \ (*pr)("\t\t\t%p, 0x%llx\n", \ @@ -2039,6 +2066,7 @@ pool_cache_bootstrap(pool_cache_t pc, si void *arg) { CPU_INFO_ITERATOR cii; + pool_cache_t pc1; struct cpu_info *ci; struct pool *pp; @@ -2078,10 +2106,16 @@ pool_cache_bootstrap(pool_cache_t pc, si pc->pc_refcnt = 0; pc->pc_freecheck = NULL; + if ((flags & PR_LARGECACHE) != 0) { + pc->pc_pcgsize = PCG_NOBJECTS_LARGE; + } else { + pc->pc_pcgsize = PCG_NOBJECTS_NORMAL; + } + /* Allocate per-CPU caches. */ memset(pc->pc_cpus, 0, sizeof(pc->pc_cpus)); pc->pc_ncpu = 0; - if (ncpu == 0) { + if (ncpu < 2) { /* XXX For sparc: boot CPU is not attached yet. */ pool_cache_cpu_init1(curcpu(), pc); } else { @@ -2089,18 +2123,23 @@ pool_cache_bootstrap(pool_cache_t pc, si pool_cache_cpu_init1(ci, pc); } } - - if (__predict_true(!cold)) { - mutex_enter(&pp->pr_lock); - pp->pr_cache = pc; - mutex_exit(&pp->pr_lock); + + /* Add to list of all pools. */ + if (__predict_true(!cold)) mutex_enter(&pool_head_lock); - LIST_INSERT_HEAD(&pool_cache_head, pc, pc_cachelist); - mutex_exit(&pool_head_lock); - } else { - pp->pr_cache = pc; - LIST_INSERT_HEAD(&pool_cache_head, pc, pc_cachelist); + TAILQ_FOREACH(pc1, &pool_cache_head, pc_cachelist) { + if (strcmp(pc1->pc_pool.pr_wchan, pc->pc_pool.pr_wchan) > 0) + break; } + if (pc1 == NULL) + TAILQ_INSERT_TAIL(&pool_cache_head, pc, pc_cachelist); + else + TAILQ_INSERT_BEFORE(pc1, pc, pc_cachelist); + if (__predict_true(!cold)) + mutex_exit(&pool_head_lock); + + membar_sync(); + pp->pr_cache = pc; } /* @@ -2111,20 +2150,6 @@ pool_cache_bootstrap(pool_cache_t pc, si void pool_cache_destroy(pool_cache_t pc) { - - pool_cache_bootstrap_destroy(pc); - pool_put(&cache_pool, pc); -} - -/* - * pool_cache_bootstrap_destroy: - * - * Kernel-private version of pool_cache_destroy(). - * Destroy a pool cache initialized by pool_cache_bootstrap. - */ -void -pool_cache_bootstrap_destroy(pool_cache_t pc) -{ struct pool *pp = &pc->pc_pool; pool_cache_cpu_t *cc; pcg_t *pcg; @@ -2134,7 +2159,7 @@ pool_cache_bootstrap_destroy(pool_cache_ mutex_enter(&pool_head_lock); while (pc->pc_refcnt != 0) cv_wait(&pool_busy, &pool_head_lock); - LIST_REMOVE(pc, pc_cachelist); + TAILQ_REMOVE(&pool_cache_head, pc, pc_cachelist); mutex_exit(&pool_head_lock); /* First, invalidate the entire cache. */ @@ -2164,6 +2189,7 @@ pool_cache_bootstrap_destroy(pool_cache_ /* Finally, destroy it. */ mutex_destroy(&pc->pc_lock); pool_destroy(pp); + pool_put(&cache_pool, pc); } /* @@ -2224,7 +2250,7 @@ pool_cache_cpu_init(struct cpu_info *ci) pool_cache_t pc; mutex_enter(&pool_head_lock); - LIST_FOREACH(pc, &pool_cache_head, pc_cachelist) { + TAILQ_FOREACH(pc, &pool_cache_head, pc_cachelist) { pc->pc_refcnt++; mutex_exit(&pool_head_lock); @@ -2292,7 +2318,12 @@ pool_cache_invalidate_groups(pool_cache_ pool_cache_destruct_object1(pc, object); } - pool_put(&pcgpool, pcg); + if (pcg->pcg_size == PCG_NOBJECTS_LARGE) { + pool_put(&pcg_large_pool, pcg); + } else { + KASSERT(pcg->pcg_size == PCG_NOBJECTS_NORMAL); + pool_put(&pcg_normal_pool, pcg); + } } } @@ -2432,7 +2463,7 @@ pool_cache_get_slow(pool_cache_cpu_t *cc pc->pc_emptygroups = cur; pc->pc_nempty++; } - KASSERT(pcg->pcg_avail == PCG_NOBJECTS); + KASSERT(pcg->pcg_avail == pcg->pcg_size); cc->cc_current = pcg; pc->pc_fullgroups = pcg->pcg_next; pc->pc_hits++; @@ -2491,8 +2522,9 @@ pool_cache_get_paddr(pool_cache_t pc, in int s; #ifdef LOCKDEBUG - if (flags & PR_WAITOK) - ASSERT_SLEEPABLE(NULL, "pool_cache_get(PR_WAITOK)"); + if (flags & PR_WAITOK) { + ASSERT_SLEEPABLE(); + } #endif cc = pool_cache_cpu_enter(pc, &s); @@ -2503,8 +2535,10 @@ pool_cache_get_paddr(pool_cache_t pc, in object = pcg->pcg_objects[--pcg->pcg_avail].pcgo_va; if (pap != NULL) *pap = pcg->pcg_objects[pcg->pcg_avail].pcgo_pa; +#if defined(DIAGNOSTIC) pcg->pcg_objects[pcg->pcg_avail].pcgo_va = NULL; - KASSERT(pcg->pcg_avail <= PCG_NOBJECTS); +#endif /* defined(DIAGNOSTIC) */ + KASSERT(pcg->pcg_avail <= pcg->pcg_size); KASSERT(object != NULL); cc->cc_hits++; pool_cache_cpu_exit(cc, &s); @@ -2544,6 +2578,7 @@ pool_cache_put_slow(pool_cache_cpu_t *cc pcg_t *pcg, *cur; uint64_t ncsw; pool_cache_t pc; + u_int nobj; pc = cc->cc_cache; cc->cc_misses++; @@ -2573,17 +2608,21 @@ pool_cache_put_slow(pool_cache_cpu_t *cc /* * If there's a empty group, release our full * group back to the cache. Install the empty - * group as cc_current and return. + * group and return. */ - if ((cur = cc->cc_current) != NULL) { - KASSERT(cur->pcg_avail == PCG_NOBJECTS); - cur->pcg_next = pc->pc_fullgroups; - pc->pc_fullgroups = cur; - pc->pc_nfull++; - } KASSERT(pcg->pcg_avail == 0); - cc->cc_current = pcg; pc->pc_emptygroups = pcg->pcg_next; + if (cc->cc_previous == NULL) { + cc->cc_previous = pcg; + } else { + if ((cur = cc->cc_current) != NULL) { + KASSERT(cur->pcg_avail == pcg->pcg_size); + cur->pcg_next = pc->pc_fullgroups; + pc->pc_fullgroups = cur; + pc->pc_nfull++; + } + cc->cc_current = pcg; + } pc->pc_hits++; pc->pc_nempty--; mutex_exit(&pc->pc_lock); @@ -2603,16 +2642,20 @@ pool_cache_put_slow(pool_cache_cpu_t *cc * If we can't allocate a new group, just throw the * object away. */ - pcg = pool_get(&pcgpool, PR_NOWAIT); + nobj = pc->pc_pcgsize; + if (pool_cache_disable) { + pcg = NULL; + } else if (nobj == PCG_NOBJECTS_LARGE) { + pcg = pool_get(&pcg_large_pool, PR_NOWAIT); + } else { + pcg = pool_get(&pcg_normal_pool, PR_NOWAIT); + } if (pcg == NULL) { pool_cache_destruct_object(pc, object); return NULL; } -#ifdef DIAGNOSTIC - memset(pcg, 0, sizeof(*pcg)); -#else pcg->pcg_avail = 0; -#endif + pcg->pcg_size = nobj; /* * Add the empty group to the cache and try again. @@ -2645,9 +2688,7 @@ pool_cache_put_paddr(pool_cache_t pc, vo do { /* If the current group isn't full, release it there. */ pcg = cc->cc_current; - if (pcg != NULL && pcg->pcg_avail < PCG_NOBJECTS) { - KASSERT(pcg->pcg_objects[pcg->pcg_avail].pcgo_va - == NULL); + if (pcg != NULL && pcg->pcg_avail < pcg->pcg_size) { pcg->pcg_objects[pcg->pcg_avail].pcgo_va = object; pcg->pcg_objects[pcg->pcg_avail].pcgo_pa = pa; pcg->pcg_avail++; @@ -2705,7 +2746,7 @@ pool_cache_xcall(pool_cache_t pc) s = splvm(); mutex_enter(&pc->pc_lock); if (cur != NULL) { - if (cur->pcg_avail == PCG_NOBJECTS) { + if (cur->pcg_avail == cur->pcg_size) { list = &pc->pc_fullgroups; pc->pc_nfull++; } else if (cur->pcg_avail == 0) { @@ -2719,7 +2760,7 @@ pool_cache_xcall(pool_cache_t pc) *list = cur; } if (prev != NULL) { - if (prev->pcg_avail == PCG_NOBJECTS) { + if (prev->pcg_avail == prev->pcg_size) { list = &pc->pc_fullgroups; pc->pc_nfull++; } else if (prev->pcg_avail == 0) { @@ -2899,3 +2940,143 @@ pool_page_free_nointr(struct pool *pp, v uvm_km_free_poolpage_cache(kernel_map, (vaddr_t) v); } + +#if defined(DDB) +static bool +pool_in_page(struct pool *pp, struct pool_item_header *ph, uintptr_t addr) +{ + + return (uintptr_t)ph->ph_page <= addr && + addr < (uintptr_t)ph->ph_page + pp->pr_alloc->pa_pagesz; +} + +static bool +pool_in_item(struct pool *pp, void *item, uintptr_t addr) +{ + + return (uintptr_t)item <= addr && addr < (uintptr_t)item + pp->pr_size; +} + +static bool +pool_in_cg(struct pool *pp, struct pool_cache_group *pcg, uintptr_t addr) +{ + int i; + + if (pcg == NULL) { + return false; + } + for (i = 0; i < pcg->pcg_avail; i++) { + if (pool_in_item(pp, pcg->pcg_objects[i].pcgo_va, addr)) { + return true; + } + } + return false; +} + +static bool +pool_allocated(struct pool *pp, struct pool_item_header *ph, uintptr_t addr) +{ + + if ((pp->pr_roflags & PR_NOTOUCH) != 0) { + unsigned int idx = pr_item_notouch_index(pp, ph, (void *)addr); + pool_item_bitmap_t *bitmap = + ph->ph_bitmap + (idx / BITMAP_SIZE); + pool_item_bitmap_t mask = 1 << (idx & BITMAP_MASK); + + return (*bitmap & mask) == 0; + } else { + struct pool_item *pi; + + LIST_FOREACH(pi, &ph->ph_itemlist, pi_list) { + if (pool_in_item(pp, pi, addr)) { + return false; + } + } + return true; + } +} + +void +pool_whatis(uintptr_t addr, void (*pr)(const char *, ...)) +{ + struct pool *pp; + + TAILQ_FOREACH(pp, &pool_head, pr_poollist) { + struct pool_item_header *ph; + uintptr_t item; + bool allocated = true; + bool incache = false; + bool incpucache = false; + char cpucachestr[32]; + + if ((pp->pr_roflags & PR_PHINPAGE) != 0) { + LIST_FOREACH(ph, &pp->pr_fullpages, ph_pagelist) { + if (pool_in_page(pp, ph, addr)) { + goto found; + } + } + LIST_FOREACH(ph, &pp->pr_partpages, ph_pagelist) { + if (pool_in_page(pp, ph, addr)) { + allocated = + pool_allocated(pp, ph, addr); + goto found; + } + } + LIST_FOREACH(ph, &pp->pr_emptypages, ph_pagelist) { + if (pool_in_page(pp, ph, addr)) { + allocated = false; + goto found; + } + } + continue; + } else { + ph = pr_find_pagehead_noalign(pp, (void *)addr); + if (ph == NULL || !pool_in_page(pp, ph, addr)) { + continue; + } + allocated = pool_allocated(pp, ph, addr); + } +found: + if (allocated && pp->pr_cache) { + pool_cache_t pc = pp->pr_cache; + struct pool_cache_group *pcg; + int i; + + for (pcg = pc->pc_fullgroups; pcg != NULL; + pcg = pcg->pcg_next) { + if (pool_in_cg(pp, pcg, addr)) { + incache = true; + goto print; + } + } + for (i = 0; i < MAXCPUS; i++) { + pool_cache_cpu_t *cc; + + if ((cc = pc->pc_cpus[i]) == NULL) { + continue; + } + if (pool_in_cg(pp, cc->cc_current, addr) || + pool_in_cg(pp, cc->cc_previous, addr)) { + struct cpu_info *ci = + cpu_lookup_byindex(i); + + incpucache = true; + snprintf(cpucachestr, + sizeof(cpucachestr), + "cached by CPU %u", + ci->ci_index); + goto print; + } + } + } +print: + item = (uintptr_t)ph->ph_page + ph->ph_off; + item = item + rounddown(addr - item, pp->pr_size); + (*pr)("%p is %p+%zu in POOL '%s' (%s)\n", + (void *)addr, item, (size_t)(addr - item), + pp->pr_wchan, + incpucache ? cpucachestr : + incache ? "cached" : allocated ? "allocated" : "free"); + } +} +#endif /* defined(DDB) */