Please note that diffs are not public domain; they are subject to the copyright notices on the relevant files. =================================================================== RCS file: /ftp/cvs/cvsroot/src/sys/kern/subr_pool.c,v rcsdiff: /ftp/cvs/cvsroot/src/sys/kern/subr_pool.c,v: warning: Unknown phrases like `commitid ...;' are present. retrieving revision 1.52 retrieving revision 1.213 diff -u -p -r1.52 -r1.213 --- src/sys/kern/subr_pool.c 2001/05/09 23:46:03 1.52 +++ src/sys/kern/subr_pool.c 2017/11/09 15:53:40 1.213 @@ -1,12 +1,14 @@ -/* $NetBSD: subr_pool.c,v 1.52 2001/05/09 23:46:03 thorpej Exp $ */ +/* $NetBSD: subr_pool.c,v 1.213 2017/11/09 15:53:40 christos Exp $ */ /*- - * Copyright (c) 1997, 1999, 2000 The NetBSD Foundation, Inc. + * Copyright (c) 1997, 1999, 2000, 2002, 2007, 2008, 2010, 2014, 2015 + * The NetBSD Foundation, Inc. * All rights reserved. * * This code is derived from software contributed to The NetBSD Foundation * by Paul Kranenburg; by Jason R. Thorpe of the Numerical Aerospace - * Simulation Facility, NASA Ames Research Center. + * Simulation Facility, NASA Ames Research Center; by Andrew Doran, and by + * Maxime Villard. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -16,13 +18,6 @@ * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. - * 3. All advertising materials mentioning features or use of this software - * must display the following acknowledgement: - * This product includes software developed by the NetBSD - * Foundation, Inc. and its contributors. - * 4. Neither the name of The NetBSD Foundation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED @@ -37,38 +32,83 @@ * POSSIBILITY OF SUCH DAMAGE. */ -#include "opt_pool.h" -#include "opt_poollog.h" +#include +__KERNEL_RCSID(0, "$NetBSD: subr_pool.c,v 1.213 2017/11/09 15:53:40 christos Exp $"); + +#ifdef _KERNEL_OPT +#include "opt_ddb.h" #include "opt_lockdebug.h" +#endif #include #include +#include +#include #include #include #include -#include -#include +#include #include #include +#include +#include +#include +#include +#include -#include +#include /* * Pool resource management utility. * - * Memory is allocated in pages which are split into pieces according - * to the pool item size. Each page is kept on a list headed by `pr_pagelist' - * in the pool structure and the individual pool items are on a linked list - * headed by `ph_itemlist' in each page header. The memory for building - * the page list is either taken from the allocated pages themselves (for - * small pool items) or taken from an internal pool of page headers (`phpool'). + * Memory is allocated in pages which are split into pieces according to + * the pool item size. Each page is kept on one of three lists in the + * pool structure: `pr_emptypages', `pr_fullpages' and `pr_partpages', + * for empty, full and partially-full pages respectively. The individual + * pool items are on a linked list headed by `ph_itemlist' in each page + * header. The memory for building the page list is either taken from + * the allocated pages themselves (for small pool items) or taken from + * an internal pool of page headers (`phpool'). */ -/* List of all pools */ -TAILQ_HEAD(,pool) pool_head = TAILQ_HEAD_INITIALIZER(pool_head); +/* List of all pools. Non static as needed by 'vmstat -i' */ +TAILQ_HEAD(, pool) pool_head = TAILQ_HEAD_INITIALIZER(pool_head); /* Private pool for page header structures */ -static struct pool phpool; +#define PHPOOL_MAX 8 +static struct pool phpool[PHPOOL_MAX]; +#define PHPOOL_FREELIST_NELEM(idx) \ + (((idx) == 0) ? 0 : BITMAP_SIZE * (1 << (idx))) + +#ifdef POOL_SUBPAGE +/* Pool of subpages for use by normal pools. */ +static struct pool psppool; +#endif + +#ifdef POOL_REDZONE +# define POOL_REDZONE_SIZE 2 +static void pool_redzone_init(struct pool *, size_t); +static void pool_redzone_fill(struct pool *, void *); +static void pool_redzone_check(struct pool *, void *); +#else +# define pool_redzone_init(pp, sz) /* NOTHING */ +# define pool_redzone_fill(pp, ptr) /* NOTHING */ +# define pool_redzone_check(pp, ptr) /* NOTHING */ +#endif + +static void *pool_page_alloc_meta(struct pool *, int); +static void pool_page_free_meta(struct pool *, void *); + +/* allocator for pool metadata */ +struct pool_allocator pool_allocator_meta = { + .pa_alloc = pool_page_alloc_meta, + .pa_free = pool_page_free_meta, + .pa_pagesz = 0 +}; + +#define POOL_ALLOCATOR_BIG_BASE 13 +extern struct pool_allocator pool_allocator_big[]; +static int pool_bigidx(size_t); /* # of seconds to retain page after last use */ int pool_inactive_time = 10; @@ -76,32 +116,53 @@ int pool_inactive_time = 10; /* Next candidate for drainage (see pool_drain()) */ static struct pool *drainpp; -/* This spin lock protects both pool_head and drainpp. */ -struct simplelock pool_head_slock = SIMPLELOCK_INITIALIZER; +/* This lock protects both pool_head and drainpp. */ +static kmutex_t pool_head_lock; +static kcondvar_t pool_busy; + +/* This lock protects initialization of a potentially shared pool allocator */ +static kmutex_t pool_allocator_lock; + +typedef uint32_t pool_item_bitmap_t; +#define BITMAP_SIZE (CHAR_BIT * sizeof(pool_item_bitmap_t)) +#define BITMAP_MASK (BITMAP_SIZE - 1) struct pool_item_header { /* Page headers */ - TAILQ_ENTRY(pool_item_header) - ph_pagelist; /* pool page list */ - TAILQ_HEAD(,pool_item) ph_itemlist; /* chunk list for this page */ LIST_ENTRY(pool_item_header) - ph_hashlist; /* Off-page page headers */ - int ph_nmissing; /* # of chunks in use */ - caddr_t ph_page; /* this page's address */ - struct timeval ph_time; /* last referenced */ + ph_pagelist; /* pool page list */ + SPLAY_ENTRY(pool_item_header) + ph_node; /* Off-page page headers */ + void * ph_page; /* this page's address */ + uint32_t ph_time; /* last referenced */ + uint16_t ph_nmissing; /* # of chunks in use */ + uint16_t ph_off; /* start offset in page */ + union { + /* !PR_NOTOUCH */ + struct { + LIST_HEAD(, pool_item) + phu_itemlist; /* chunk list for this page */ + } phu_normal; + /* PR_NOTOUCH */ + struct { + pool_item_bitmap_t phu_bitmap[1]; + } phu_notouch; + } ph_u; }; +#define ph_itemlist ph_u.phu_normal.phu_itemlist +#define ph_bitmap ph_u.phu_notouch.phu_bitmap struct pool_item { #ifdef DIAGNOSTIC - int pi_magic; + u_int pi_magic; #endif -#define PI_MAGIC 0xdeadbeef +#define PI_MAGIC 0xdeaddeadU /* Other entries use only this list entry */ - TAILQ_ENTRY(pool_item) pi_list; + LIST_ENTRY(pool_item) pi_list; }; -#define PR_HASH_INDEX(pp,addr) \ - (((u_long)(addr) >> (pp)->pr_pageshift) & (PR_HASHTABSIZE - 1)) +#define POOL_NEEDS_CATCHUP(pp) \ + ((pp)->pr_nitems < (pp)->pr_minitems) /* * Pool cache management. @@ -111,295 +172,372 @@ struct pool_item { * needless object construction/destruction; it is deferred until absolutely * necessary. * - * Caches are grouped into cache groups. Each cache group references - * up to 16 constructed objects. When a cache allocates an object - * from the pool, it calls the object's constructor and places it into - * a cache group. When a cache group frees an object back to the pool, - * it first calls the object's destructor. This allows the object to - * persist in constructed form while freed to the cache. - * - * Multiple caches may exist for each pool. This allows a single - * object type to have multiple constructed forms. The pool references - * each cache, so that when a pool is drained by the pagedaemon, it can - * drain each individual cache as well. Each time a cache is drained, - * the most idle cache group is freed to the pool in its entirety. + * Caches are grouped into cache groups. Each cache group references up + * to PCG_NUMOBJECTS constructed objects. When a cache allocates an + * object from the pool, it calls the object's constructor and places it + * into a cache group. When a cache group frees an object back to the + * pool, it first calls the object's destructor. This allows the object + * to persist in constructed form while freed to the cache. + * + * The pool references each cache, so that when a pool is drained by the + * pagedaemon, it can drain each individual cache as well. Each time a + * cache is drained, the most idle cache group is freed to the pool in + * its entirety. * * Pool caches are layed on top of pools. By layering them, we can avoid * the complexity of cache management for pools which would not benefit * from it. */ -/* The cache group pool. */ -static struct pool pcgpool; - -/* The pool cache group. */ -#define PCG_NOBJECTS 16 -struct pool_cache_group { - TAILQ_ENTRY(pool_cache_group) - pcg_list; /* link in the pool cache's group list */ - u_int pcg_avail; /* # available objects */ - /* pointers to the objects */ - void *pcg_objects[PCG_NOBJECTS]; -}; - -static void pool_cache_reclaim(struct pool_cache *); +static struct pool pcg_normal_pool; +static struct pool pcg_large_pool; +static struct pool cache_pool; +static struct pool cache_cpu_pool; + +pool_cache_t pnbuf_cache; /* pathname buffer cache */ + +/* List of all caches. */ +TAILQ_HEAD(,pool_cache) pool_cache_head = + TAILQ_HEAD_INITIALIZER(pool_cache_head); + +int pool_cache_disable; /* global disable for caching */ +static const pcg_t pcg_dummy; /* zero sized: always empty, yet always full */ + +static bool pool_cache_put_slow(pool_cache_cpu_t *, int, + void *); +static bool pool_cache_get_slow(pool_cache_cpu_t *, int, + void **, paddr_t *, int); +static void pool_cache_cpu_init1(struct cpu_info *, pool_cache_t); +static void pool_cache_invalidate_groups(pool_cache_t, pcg_t *); +static void pool_cache_invalidate_cpu(pool_cache_t, u_int); +static void pool_cache_transfer(pool_cache_t); static int pool_catchup(struct pool *); -static int pool_prime_page(struct pool *, caddr_t, int); -static void *pool_page_alloc(unsigned long, int, int); -static void pool_page_free(void *, unsigned long, int); +static void pool_prime_page(struct pool *, void *, + struct pool_item_header *); +static void pool_update_curpage(struct pool *); + +static int pool_grow(struct pool *, int); +static void *pool_allocator_alloc(struct pool *, int); +static void pool_allocator_free(struct pool *, void *); +static void pool_print_pagelist(struct pool *, struct pool_pagelist *, + void (*)(const char *, ...) __printflike(1, 2)); static void pool_print1(struct pool *, const char *, - void (*)(const char *, ...)); + void (*)(const char *, ...) __printflike(1, 2)); -/* - * Pool log entry. An array of these is allocated in pool_init(). - */ -struct pool_log { - const char *pl_file; - long pl_line; - int pl_action; -#define PRLOG_GET 1 -#define PRLOG_PUT 2 - void *pl_addr; -}; +static int pool_chk_page(struct pool *, const char *, + struct pool_item_header *); -/* Number of entries in pool log buffers */ -#ifndef POOL_LOGSIZE -#define POOL_LOGSIZE 10 -#endif +static inline unsigned int +pr_item_notouch_index(const struct pool *pp, const struct pool_item_header *ph, + const void *v) +{ + const char *cp = v; + unsigned int idx; -int pool_logsize = POOL_LOGSIZE; + KASSERT(pp->pr_roflags & PR_NOTOUCH); + idx = (cp - (char *)ph->ph_page - ph->ph_off) / pp->pr_size; + KASSERT(idx < pp->pr_itemsperpage); + return idx; +} -#ifdef DIAGNOSTIC -static __inline void -pr_log(struct pool *pp, void *v, int action, const char *file, long line) +static inline void +pr_item_notouch_put(const struct pool *pp, struct pool_item_header *ph, + void *obj) { - int n = pp->pr_curlogentry; - struct pool_log *pl; - - if ((pp->pr_roflags & PR_LOGGING) == 0) - return; + unsigned int idx = pr_item_notouch_index(pp, ph, obj); + pool_item_bitmap_t *bitmap = ph->ph_bitmap + (idx / BITMAP_SIZE); + pool_item_bitmap_t mask = 1 << (idx & BITMAP_MASK); - /* - * Fill in the current entry. Wrap around and overwrite - * the oldest entry if necessary. - */ - pl = &pp->pr_log[n]; - pl->pl_file = file; - pl->pl_line = line; - pl->pl_action = action; - pl->pl_addr = v; - if (++n >= pp->pr_logsize) - n = 0; - pp->pr_curlogentry = n; + KASSERT((*bitmap & mask) == 0); + *bitmap |= mask; } -static void -pr_printlog(struct pool *pp, struct pool_item *pi, - void (*pr)(const char *, ...)) +static inline void * +pr_item_notouch_get(const struct pool *pp, struct pool_item_header *ph) { - int i = pp->pr_logsize; - int n = pp->pr_curlogentry; + pool_item_bitmap_t *bitmap = ph->ph_bitmap; + unsigned int idx; + int i; - if ((pp->pr_roflags & PR_LOGGING) == 0) - return; + for (i = 0; ; i++) { + int bit; - /* - * Print all entries in this pool's log. - */ - while (i-- > 0) { - struct pool_log *pl = &pp->pr_log[n]; - if (pl->pl_action != 0) { - if (pi == NULL || pi == pl->pl_addr) { - (*pr)("\tlog entry %d:\n", i); - (*pr)("\t\taction = %s, addr = %p\n", - pl->pl_action == PRLOG_GET ? "get" : "put", - pl->pl_addr); - (*pr)("\t\tfile: %s at line %lu\n", - pl->pl_file, pl->pl_line); - } + KASSERT((i * BITMAP_SIZE) < pp->pr_itemsperpage); + bit = ffs32(bitmap[i]); + if (bit) { + pool_item_bitmap_t mask; + + bit--; + idx = (i * BITMAP_SIZE) + bit; + mask = 1 << bit; + KASSERT((bitmap[i] & mask) != 0); + bitmap[i] &= ~mask; + break; } - if (++n >= pp->pr_logsize) - n = 0; } + KASSERT(idx < pp->pr_itemsperpage); + return (char *)ph->ph_page + ph->ph_off + idx * pp->pr_size; } -static __inline void -pr_enter(struct pool *pp, const char *file, long line) +static inline void +pr_item_notouch_init(const struct pool *pp, struct pool_item_header *ph) { + pool_item_bitmap_t *bitmap = ph->ph_bitmap; + const int n = howmany(pp->pr_itemsperpage, BITMAP_SIZE); + int i; - if (__predict_false(pp->pr_entered_file != NULL)) { - printf("pool %s: reentrancy at file %s line %ld\n", - pp->pr_wchan, file, line); - printf(" previous entry at file %s line %ld\n", - pp->pr_entered_file, pp->pr_entered_line); - panic("pr_enter"); + for (i = 0; i < n; i++) { + bitmap[i] = (pool_item_bitmap_t)-1; } - - pp->pr_entered_file = file; - pp->pr_entered_line = line; } -static __inline void -pr_leave(struct pool *pp) +static inline int +phtree_compare(struct pool_item_header *a, struct pool_item_header *b) { - if (__predict_false(pp->pr_entered_file == NULL)) { - printf("pool %s not entered?\n", pp->pr_wchan); - panic("pr_leave"); - } + /* + * we consider pool_item_header with smaller ph_page bigger. + * (this unnatural ordering is for the benefit of pr_find_pagehead.) + */ - pp->pr_entered_file = NULL; - pp->pr_entered_line = 0; + if (a->ph_page < b->ph_page) + return (1); + else if (a->ph_page > b->ph_page) + return (-1); + else + return (0); } -static __inline void -pr_enter_check(struct pool *pp, void (*pr)(const char *, ...)) +SPLAY_PROTOTYPE(phtree, pool_item_header, ph_node, phtree_compare); +SPLAY_GENERATE(phtree, pool_item_header, ph_node, phtree_compare); + +static inline struct pool_item_header * +pr_find_pagehead_noalign(struct pool *pp, void *v) { + struct pool_item_header *ph, tmp; + + tmp.ph_page = (void *)(uintptr_t)v; + ph = SPLAY_FIND(phtree, &pp->pr_phtree, &tmp); + if (ph == NULL) { + ph = SPLAY_ROOT(&pp->pr_phtree); + if (ph != NULL && phtree_compare(&tmp, ph) >= 0) { + ph = SPLAY_NEXT(phtree, &pp->pr_phtree, ph); + } + KASSERT(ph == NULL || phtree_compare(&tmp, ph) < 0); + } - if (pp->pr_entered_file != NULL) - (*pr)("\n\tcurrently entered from file %s line %ld\n", - pp->pr_entered_file, pp->pr_entered_line); + return ph; } -#else -#define pr_log(pp, v, action, file, line) -#define pr_printlog(pp, pi, pr) -#define pr_enter(pp, file, line) -#define pr_leave(pp) -#define pr_enter_check(pp, pr) -#endif /* DIAGNOSTIC */ /* - * Return the pool page header based on page address. + * Return the pool page header based on item address. */ -static __inline struct pool_item_header * -pr_find_pagehead(struct pool *pp, caddr_t page) +static inline struct pool_item_header * +pr_find_pagehead(struct pool *pp, void *v) { - struct pool_item_header *ph; + struct pool_item_header *ph, tmp; - if ((pp->pr_roflags & PR_PHINPAGE) != 0) - return ((struct pool_item_header *)(page + pp->pr_phoffset)); + if ((pp->pr_roflags & PR_NOALIGN) != 0) { + ph = pr_find_pagehead_noalign(pp, v); + } else { + void *page = + (void *)((uintptr_t)v & pp->pr_alloc->pa_pagemask); - for (ph = LIST_FIRST(&pp->pr_hashtab[PR_HASH_INDEX(pp, page)]); - ph != NULL; - ph = LIST_NEXT(ph, ph_hashlist)) { - if (ph->ph_page == page) - return (ph); + if ((pp->pr_roflags & PR_PHINPAGE) != 0) { + ph = (struct pool_item_header *)((char *)page + pp->pr_phoffset); + } else { + tmp.ph_page = page; + ph = SPLAY_FIND(phtree, &pp->pr_phtree, &tmp); + } + } + + KASSERT(ph == NULL || ((pp->pr_roflags & PR_PHINPAGE) != 0) || + ((char *)ph->ph_page <= (char *)v && + (char *)v < (char *)ph->ph_page + pp->pr_alloc->pa_pagesz)); + return ph; +} + +static void +pr_pagelist_free(struct pool *pp, struct pool_pagelist *pq) +{ + struct pool_item_header *ph; + + while ((ph = LIST_FIRST(pq)) != NULL) { + LIST_REMOVE(ph, ph_pagelist); + pool_allocator_free(pp, ph->ph_page); + if ((pp->pr_roflags & PR_PHINPAGE) == 0) + pool_put(pp->pr_phpool, ph); } - return (NULL); } /* * Remove a page from the pool. */ -static __inline void -pr_rmpage(struct pool *pp, struct pool_item_header *ph) +static inline void +pr_rmpage(struct pool *pp, struct pool_item_header *ph, + struct pool_pagelist *pq) { + KASSERT(mutex_owned(&pp->pr_lock)); + /* * If the page was idle, decrement the idle page count. */ if (ph->ph_nmissing == 0) { -#ifdef DIAGNOSTIC - if (pp->pr_nidle == 0) - panic("pr_rmpage: nidle inconsistent"); - if (pp->pr_nitems < pp->pr_itemsperpage) - panic("pr_rmpage: nitems inconsistent"); -#endif + KASSERT(pp->pr_nidle != 0); + KASSERTMSG((pp->pr_nitems >= pp->pr_itemsperpage), + "nitems=%u < itemsperpage=%u", + pp->pr_nitems, pp->pr_itemsperpage); pp->pr_nidle--; } pp->pr_nitems -= pp->pr_itemsperpage; /* - * Unlink a page from the pool and release it. + * Unlink the page from the pool and queue it for release. */ - TAILQ_REMOVE(&pp->pr_pagelist, ph, ph_pagelist); - (*pp->pr_free)(ph->ph_page, pp->pr_pagesz, pp->pr_mtype); + LIST_REMOVE(ph, ph_pagelist); + if ((pp->pr_roflags & PR_PHINPAGE) == 0) + SPLAY_REMOVE(phtree, &pp->pr_phtree, ph); + LIST_INSERT_HEAD(pq, ph, ph_pagelist); + pp->pr_npages--; pp->pr_npagefree++; - if ((pp->pr_roflags & PR_PHINPAGE) == 0) { - int s; - LIST_REMOVE(ph, ph_hashlist); - s = splhigh(); - pool_put(&phpool, ph); - splx(s); - } - - if (pp->pr_curpage == ph) { - /* - * Find a new non-empty page header, if any. - * Start search from the page head, to increase the - * chance for "high water" pages to be freed. - */ - for (ph = TAILQ_FIRST(&pp->pr_pagelist); ph != NULL; - ph = TAILQ_NEXT(ph, ph_pagelist)) - if (TAILQ_FIRST(&ph->ph_itemlist) != NULL) - break; + pool_update_curpage(pp); +} - pp->pr_curpage = ph; - } +/* + * Initialize all the pools listed in the "pools" link set. + */ +void +pool_subsystem_init(void) +{ + size_t size; + int idx; + + mutex_init(&pool_head_lock, MUTEX_DEFAULT, IPL_NONE); + mutex_init(&pool_allocator_lock, MUTEX_DEFAULT, IPL_NONE); + cv_init(&pool_busy, "poolbusy"); + + /* + * Initialize private page header pool and cache magazine pool if we + * haven't done so yet. + */ + for (idx = 0; idx < PHPOOL_MAX; idx++) { + static char phpool_names[PHPOOL_MAX][6+1+6+1]; + int nelem; + size_t sz; + + nelem = PHPOOL_FREELIST_NELEM(idx); + snprintf(phpool_names[idx], sizeof(phpool_names[idx]), + "phpool-%d", nelem); + sz = sizeof(struct pool_item_header); + if (nelem) { + sz = offsetof(struct pool_item_header, + ph_bitmap[howmany(nelem, BITMAP_SIZE)]); + } + pool_init(&phpool[idx], sz, 0, 0, 0, + phpool_names[idx], &pool_allocator_meta, IPL_VM); + } +#ifdef POOL_SUBPAGE + pool_init(&psppool, POOL_SUBPAGE, POOL_SUBPAGE, 0, + PR_RECURSIVE, "psppool", &pool_allocator_meta, IPL_VM); +#endif + + size = sizeof(pcg_t) + + (PCG_NOBJECTS_NORMAL - 1) * sizeof(pcgpair_t); + pool_init(&pcg_normal_pool, size, coherency_unit, 0, 0, + "pcgnormal", &pool_allocator_meta, IPL_VM); + + size = sizeof(pcg_t) + + (PCG_NOBJECTS_LARGE - 1) * sizeof(pcgpair_t); + pool_init(&pcg_large_pool, size, coherency_unit, 0, 0, + "pcglarge", &pool_allocator_meta, IPL_VM); + + pool_init(&cache_pool, sizeof(struct pool_cache), coherency_unit, + 0, 0, "pcache", &pool_allocator_meta, IPL_NONE); + + pool_init(&cache_cpu_pool, sizeof(pool_cache_cpu_t), coherency_unit, + 0, 0, "pcachecpu", &pool_allocator_meta, IPL_NONE); } /* * Initialize the given pool resource structure. * * We export this routine to allow other kernel parts to declare - * static pools that must be initialized before malloc() is available. + * static pools that must be initialized before kmem(9) is available. */ void pool_init(struct pool *pp, size_t size, u_int align, u_int ioff, int flags, - const char *wchan, size_t pagesz, - void *(*alloc)(unsigned long, int, int), - void (*release)(void *, unsigned long, int), - int mtype) + const char *wchan, struct pool_allocator *palloc, int ipl) { - int off, slack, i; + struct pool *pp1; + size_t trysize, phsize, prsize; + int off, slack; -#ifdef POOL_DIAGNOSTIC +#ifdef DEBUG + if (__predict_true(!cold)) + mutex_enter(&pool_head_lock); /* - * Always log if POOL_DIAGNOSTIC is defined. + * Check that the pool hasn't already been initialised and + * added to the list of all pools. */ - if (pool_logsize != 0) - flags |= PR_LOGGING; + TAILQ_FOREACH(pp1, &pool_head, pr_poollist) { + if (pp == pp1) + panic("%s: [%s] already initialised", __func__, + wchan); + } + if (__predict_true(!cold)) + mutex_exit(&pool_head_lock); #endif - /* - * Check arguments and construct default values. - */ - if (!powerof2(pagesz)) - panic("pool_init: page size invalid (%lx)\n", (u_long)pagesz); - - if (alloc == NULL && release == NULL) { - alloc = pool_page_alloc; - release = pool_page_free; - pagesz = PAGE_SIZE; /* Rounds to PAGE_SIZE anyhow. */ - } else if ((alloc != NULL && release != NULL) == 0) { - /* If you specifiy one, must specify both. */ - panic("pool_init: must specify alloc and release together"); - } - - if (pagesz == 0) - pagesz = PAGE_SIZE; + if (palloc == NULL) + palloc = &pool_allocator_kmem; +#ifdef POOL_SUBPAGE + if (size > palloc->pa_pagesz) { + if (palloc == &pool_allocator_kmem) + palloc = &pool_allocator_kmem_fullpage; + else if (palloc == &pool_allocator_nointr) + palloc = &pool_allocator_nointr_fullpage; + } +#endif /* POOL_SUBPAGE */ + if (!cold) + mutex_enter(&pool_allocator_lock); + if (palloc->pa_refcnt++ == 0) { + if (palloc->pa_pagesz == 0) + palloc->pa_pagesz = PAGE_SIZE; + + TAILQ_INIT(&palloc->pa_list); + + mutex_init(&palloc->pa_lock, MUTEX_DEFAULT, IPL_VM); + palloc->pa_pagemask = ~(palloc->pa_pagesz - 1); + palloc->pa_pageshift = ffs(palloc->pa_pagesz) - 1; + } + if (!cold) + mutex_exit(&pool_allocator_lock); if (align == 0) align = ALIGN(1); - if (size < sizeof(struct pool_item)) - size = sizeof(struct pool_item); - - size = ALIGN(size); - if (size > pagesz) - panic("pool_init: pool item size (%lu) too large", - (u_long)size); + prsize = size; + if ((flags & PR_NOTOUCH) == 0 && prsize < sizeof(struct pool_item)) + prsize = sizeof(struct pool_item); + + prsize = roundup(prsize, align); + KASSERTMSG((prsize <= palloc->pa_pagesz), + "%s: [%s] pool item size (%zu) larger than page size (%u)", + __func__, wchan, prsize, palloc->pa_pagesz); /* * Initialize the pool structure. */ - TAILQ_INIT(&pp->pr_pagelist); - TAILQ_INIT(&pp->pr_cachelist); + LIST_INIT(&pp->pr_emptypages); + LIST_INIT(&pp->pr_fullpages); + LIST_INIT(&pp->pr_partpages); + pp->pr_cache = NULL; pp->pr_curpage = NULL; pp->pr_npages = 0; pp->pr_minitems = 0; @@ -407,15 +545,10 @@ pool_init(struct pool *pp, size_t size, pp->pr_maxpages = UINT_MAX; pp->pr_roflags = flags; pp->pr_flags = 0; - pp->pr_size = size; + pp->pr_size = prsize; pp->pr_align = align; pp->pr_wchan = wchan; - pp->pr_mtype = mtype; - pp->pr_alloc = alloc; - pp->pr_free = release; - pp->pr_pagesz = pagesz; - pp->pr_pagemask = ~(pagesz - 1); - pp->pr_pageshift = ffs(pagesz) - 1; + pp->pr_alloc = palloc; pp->pr_nitems = 0; pp->pr_nout = 0; pp->pr_hardlimit = UINT_MAX; @@ -424,38 +557,74 @@ pool_init(struct pool *pp, size_t size, pp->pr_hardlimit_ratecap.tv_usec = 0; pp->pr_hardlimit_warning_last.tv_sec = 0; pp->pr_hardlimit_warning_last.tv_usec = 0; + pp->pr_drain_hook = NULL; + pp->pr_drain_hook_arg = NULL; + pp->pr_freecheck = NULL; + pool_redzone_init(pp, size); /* * Decide whether to put the page header off page to avoid - * wasting too large a part of the page. Off-page page headers - * go on a hash table, so we can match a returned item - * with its header based on the page address. - * We use 1/16 of the page size as the threshold (XXX: tune) + * wasting too large a part of the page or too big item. + * Off-page page headers go on a hash table, so we can match + * a returned item with its header based on the page address. + * We use 1/16 of the page size and about 8 times of the item + * size as the threshold (XXX: tune) + * + * However, we'll put the header into the page if we can put + * it without wasting any items. + * + * Silently enforce `0 <= ioff < align'. */ - if (pp->pr_size < pagesz/16) { + pp->pr_itemoffset = ioff %= align; + /* See the comment below about reserved bytes. */ + trysize = palloc->pa_pagesz - ((align - ioff) % align); + phsize = ALIGN(sizeof(struct pool_item_header)); + if (pp->pr_roflags & PR_PHINPAGE || + ((pp->pr_roflags & (PR_NOTOUCH | PR_NOALIGN)) == 0 && + (pp->pr_size < MIN(palloc->pa_pagesz / 16, phsize << 3) || + trysize / pp->pr_size == (trysize - phsize) / pp->pr_size))) { /* Use the end of the page for the page header */ pp->pr_roflags |= PR_PHINPAGE; - pp->pr_phoffset = off = - pagesz - ALIGN(sizeof(struct pool_item_header)); + pp->pr_phoffset = off = palloc->pa_pagesz - phsize; } else { /* The page header will be taken from our page header pool */ pp->pr_phoffset = 0; - off = pagesz; - for (i = 0; i < PR_HASHTABSIZE; i++) { - LIST_INIT(&pp->pr_hashtab[i]); - } + off = palloc->pa_pagesz; + SPLAY_INIT(&pp->pr_phtree); } /* * Alignment is to take place at `ioff' within the item. This means * we must reserve up to `align - 1' bytes on the page to allow * appropriate positioning of each item. - * - * Silently enforce `0 <= ioff < align'. */ - pp->pr_itemoffset = ioff = ioff % align; pp->pr_itemsperpage = (off - ((align - ioff) % align)) / pp->pr_size; KASSERT(pp->pr_itemsperpage != 0); + if ((pp->pr_roflags & PR_NOTOUCH)) { + int idx; + + for (idx = 0; pp->pr_itemsperpage > PHPOOL_FREELIST_NELEM(idx); + idx++) { + /* nothing */ + } + if (idx >= PHPOOL_MAX) { + /* + * if you see this panic, consider to tweak + * PHPOOL_MAX and PHPOOL_FREELIST_NELEM. + */ + panic("%s: [%s] too large itemsperpage(%d) for " + "PR_NOTOUCH", __func__, + pp->pr_wchan, pp->pr_itemsperpage); + } + pp->pr_phpool = &phpool[idx]; + } else if ((pp->pr_roflags & PR_PHINPAGE) == 0) { + pp->pr_phpool = &phpool[0]; + } +#if defined(DIAGNOSTIC) + else { + pp->pr_phpool = NULL; + } +#endif /* * Use the slack between the chunks and the page header @@ -472,37 +641,32 @@ pool_init(struct pool *pp, size_t size, pp->pr_npagefree = 0; pp->pr_hiwat = 0; pp->pr_nidle = 0; + pp->pr_refcnt = 0; - if (flags & PR_LOGGING) { - if (kmem_map == NULL || - (pp->pr_log = malloc(pool_logsize * sizeof(struct pool_log), - M_TEMP, M_NOWAIT)) == NULL) - pp->pr_roflags &= ~PR_LOGGING; - pp->pr_curlogentry = 0; - pp->pr_logsize = pool_logsize; - } - - pp->pr_entered_file = NULL; - pp->pr_entered_line = 0; - - simple_lock_init(&pp->pr_slock); - - /* - * Initialize private page header pool and cache magazine pool if we - * haven't done so yet. - * XXX LOCKING. - */ - if (phpool.pr_size == 0) { - pool_init(&phpool, sizeof(struct pool_item_header), 0, 0, - 0, "phpool", 0, 0, 0, 0); - pool_init(&pcgpool, sizeof(struct pool_cache_group), 0, 0, - 0, "pcgpool", 0, 0, 0, 0); - } + mutex_init(&pp->pr_lock, MUTEX_DEFAULT, ipl); + cv_init(&pp->pr_cv, wchan); + pp->pr_ipl = ipl; /* Insert into the list of all pools. */ - simple_lock(&pool_head_slock); - TAILQ_INSERT_TAIL(&pool_head, pp, pr_poollist); - simple_unlock(&pool_head_slock); + if (!cold) + mutex_enter(&pool_head_lock); + TAILQ_FOREACH(pp1, &pool_head, pr_poollist) { + if (strcmp(pp1->pr_wchan, pp->pr_wchan) > 0) + break; + } + if (pp1 == NULL) + TAILQ_INSERT_TAIL(&pool_head, pp, pr_poollist); + else + TAILQ_INSERT_BEFORE(pp1, pp, pr_poollist); + if (!cold) + mutex_exit(&pool_head_lock); + + /* Insert this into the list of pools using this allocator. */ + if (!cold) + mutex_enter(&palloc->pa_lock); + TAILQ_INSERT_TAIL(&palloc->pa_list, pp, pr_alloc_list); + if (!cold) + mutex_exit(&palloc->pa_lock); } /* @@ -511,89 +675,125 @@ pool_init(struct pool *pp, size_t size, void pool_destroy(struct pool *pp) { + struct pool_pagelist pq; struct pool_item_header *ph; - struct pool_cache *pc; - - /* Destroy all caches for this pool. */ - while ((pc = TAILQ_FIRST(&pp->pr_cachelist)) != NULL) - pool_cache_destroy(pc); - -#ifdef DIAGNOSTIC - if (pp->pr_nout != 0) { - pr_printlog(pp, NULL, printf); - panic("pool_destroy: pool busy: still out: %u\n", - pp->pr_nout); - } -#endif - - /* Remove all pages */ - if ((pp->pr_roflags & PR_STATIC) == 0) - while ((ph = pp->pr_pagelist.tqh_first) != NULL) - pr_rmpage(pp, ph); /* Remove from global pool list */ - simple_lock(&pool_head_slock); + mutex_enter(&pool_head_lock); + while (pp->pr_refcnt != 0) + cv_wait(&pool_busy, &pool_head_lock); TAILQ_REMOVE(&pool_head, pp, pr_poollist); - /* XXX Only clear this if we were drainpp? */ - drainpp = NULL; - simple_unlock(&pool_head_slock); + if (drainpp == pp) + drainpp = NULL; + mutex_exit(&pool_head_lock); + + /* Remove this pool from its allocator's list of pools. */ + mutex_enter(&pp->pr_alloc->pa_lock); + TAILQ_REMOVE(&pp->pr_alloc->pa_list, pp, pr_alloc_list); + mutex_exit(&pp->pr_alloc->pa_lock); + + mutex_enter(&pool_allocator_lock); + if (--pp->pr_alloc->pa_refcnt == 0) + mutex_destroy(&pp->pr_alloc->pa_lock); + mutex_exit(&pool_allocator_lock); + + mutex_enter(&pp->pr_lock); + + KASSERT(pp->pr_cache == NULL); + KASSERTMSG((pp->pr_nout == 0), + "%s: pool busy: still out: %u", __func__, pp->pr_nout); + KASSERT(LIST_EMPTY(&pp->pr_fullpages)); + KASSERT(LIST_EMPTY(&pp->pr_partpages)); + + /* Remove all pages */ + LIST_INIT(&pq); + while ((ph = LIST_FIRST(&pp->pr_emptypages)) != NULL) + pr_rmpage(pp, ph, &pq); + + mutex_exit(&pp->pr_lock); + + pr_pagelist_free(pp, &pq); + cv_destroy(&pp->pr_cv); + mutex_destroy(&pp->pr_lock); +} - if ((pp->pr_roflags & PR_LOGGING) != 0) - free(pp->pr_log, M_TEMP); +void +pool_set_drain_hook(struct pool *pp, void (*fn)(void *, int), void *arg) +{ - if (pp->pr_roflags & PR_FREEHEADER) - free(pp, M_POOL); + /* XXX no locking -- must be used just after pool_init() */ + KASSERTMSG((pp->pr_drain_hook == NULL), + "%s: [%s] already set", __func__, pp->pr_wchan); + pp->pr_drain_hook = fn; + pp->pr_drain_hook_arg = arg; } +static struct pool_item_header * +pool_alloc_item_header(struct pool *pp, void *storage, int flags) +{ + struct pool_item_header *ph; + + if ((pp->pr_roflags & PR_PHINPAGE) != 0) + ph = (void *)((char *)storage + pp->pr_phoffset); + else + ph = pool_get(pp->pr_phpool, flags); + + return (ph); +} /* - * Grab an item from the pool; must be called at appropriate spl level + * Grab an item from the pool. */ void * -_pool_get(struct pool *pp, int flags, const char *file, long line) +pool_get(struct pool *pp, int flags) { - void *v; struct pool_item *pi; struct pool_item_header *ph; + void *v; -#ifdef DIAGNOSTIC - if (__predict_false((pp->pr_roflags & PR_STATIC) && - (flags & PR_MALLOCOK))) { - pr_printlog(pp, NULL, printf); - panic("pool_get: static"); + KASSERTMSG((pp->pr_itemsperpage != 0), + "%s: [%s] pr_itemsperpage is zero, " + "pool not initialized?", __func__, pp->pr_wchan); + KASSERTMSG((!(cpu_intr_p() || cpu_softintr_p()) + || pp->pr_ipl != IPL_NONE || cold || panicstr != NULL), + "%s: [%s] is IPL_NONE, but called from interrupt context", + __func__, pp->pr_wchan); + if (flags & PR_WAITOK) { + ASSERT_SLEEPABLE(); } -#endif - - if (__predict_false(curproc == NULL && doing_shutdown == 0 && - (flags & PR_WAITOK) != 0)) - panic("pool_get: must have NOWAIT"); - - simple_lock(&pp->pr_slock); - pr_enter(pp, file, line); + mutex_enter(&pp->pr_lock); startover: /* * Check to see if we've reached the hard limit. If we have, * and we can wait, then wait until an item has been returned to * the pool. */ -#ifdef DIAGNOSTIC - if (__predict_false(pp->pr_nout > pp->pr_hardlimit)) { - pr_leave(pp); - simple_unlock(&pp->pr_slock); - panic("pool_get: %s: crossed hard limit", pp->pr_wchan); - } -#endif + KASSERTMSG((pp->pr_nout <= pp->pr_hardlimit), + "%s: %s: crossed hard limit", __func__, pp->pr_wchan); if (__predict_false(pp->pr_nout == pp->pr_hardlimit)) { + if (pp->pr_drain_hook != NULL) { + /* + * Since the drain hook is going to free things + * back to the pool, unlock, call the hook, re-lock, + * and check the hardlimit condition again. + */ + mutex_exit(&pp->pr_lock); + (*pp->pr_drain_hook)(pp->pr_drain_hook_arg, flags); + mutex_enter(&pp->pr_lock); + if (pp->pr_nout < pp->pr_hardlimit) + goto startover; + } + if ((flags & PR_WAITOK) && !(flags & PR_LIMITFAIL)) { /* * XXX: A warning isn't logged in this case. Should * it be? */ pp->pr_flags |= PR_WANTED; - pr_leave(pp); - ltsleep(pp, PSWP, pp->pr_wchan, 0, &pp->pr_slock); - pr_enter(pp, file, line); + do { + cv_wait(&pp->pr_cv, &pp->pr_lock); + } while (pp->pr_flags & PR_WANTED); goto startover; } @@ -605,13 +805,10 @@ _pool_get(struct pool *pp, int flags, co &pp->pr_hardlimit_ratecap)) log(LOG_ERR, "%s\n", pp->pr_hardlimit_warning); - if (flags & PR_URGENT) - panic("pool_get: urgent"); - pp->pr_nfail++; - pr_leave(pp); - simple_unlock(&pp->pr_slock); + mutex_exit(&pp->pr_lock); + KASSERT((flags & (PR_WAITOK|PR_NOWAIT)) == PR_NOWAIT); return (NULL); } @@ -622,151 +819,94 @@ _pool_get(struct pool *pp, int flags, co * has no items in its bucket. */ if ((ph = pp->pr_curpage) == NULL) { - void *v; + int error; -#ifdef DIAGNOSTIC - if (pp->pr_nitems != 0) { - simple_unlock(&pp->pr_slock); - printf("pool_get: %s: curpage NULL, nitems %u\n", - pp->pr_wchan, pp->pr_nitems); - panic("pool_get: nitems inconsistent\n"); - } -#endif + KASSERTMSG((pp->pr_nitems == 0), + "%s: [%s] curpage NULL, inconsistent nitems %u", + __func__, pp->pr_wchan, pp->pr_nitems); /* * Call the back-end page allocator for more memory. * Release the pool lock, as the back-end page allocator * may block. */ - pr_leave(pp); - simple_unlock(&pp->pr_slock); - v = (*pp->pr_alloc)(pp->pr_pagesz, flags, pp->pr_mtype); - simple_lock(&pp->pr_slock); - pr_enter(pp, file, line); - - if (v == NULL) { + error = pool_grow(pp, flags); + if (error != 0) { /* - * We were unable to allocate a page, but - * we released the lock during allocation, - * so perhaps items were freed back to the - * pool. Check for this case. + * pool_grow aborts when another thread + * is allocating a new page. Retry if it + * waited for it. */ - if (pp->pr_curpage != NULL) + if (error == ERESTART) goto startover; - if (flags & PR_URGENT) - panic("pool_get: urgent"); - - if ((flags & PR_WAITOK) == 0) { - pp->pr_nfail++; - pr_leave(pp); - simple_unlock(&pp->pr_slock); - return (NULL); - } - /* - * Wait for items to be returned to this pool. - * - * XXX: we actually want to wait just until - * the page allocator has memory again. Depending - * on this pool's usage, we might get stuck here - * for a long time. - * - * XXX: maybe we should wake up once a second and - * try again? + * We were unable to allocate a page or item + * header, but we released the lock during + * allocation, so perhaps items were freed + * back to the pool. Check for this case. */ - pp->pr_flags |= PR_WANTED; - pr_leave(pp); - ltsleep(pp, PSWP, pp->pr_wchan, 0, &pp->pr_slock); - pr_enter(pp, file, line); - goto startover; - } + if (pp->pr_curpage != NULL) + goto startover; - /* We have more memory; add it to the pool */ - if (pool_prime_page(pp, v, flags & PR_WAITOK) != 0) { - /* - * Probably, we don't allowed to wait and - * couldn't allocate a page header. - */ - (*pp->pr_free)(v, pp->pr_pagesz, pp->pr_mtype); pp->pr_nfail++; - pr_leave(pp); - simple_unlock(&pp->pr_slock); + mutex_exit(&pp->pr_lock); + KASSERT((flags & (PR_WAITOK|PR_NOWAIT)) == PR_NOWAIT); return (NULL); } - pp->pr_npagealloc++; /* Start the allocation process over. */ goto startover; } + if (pp->pr_roflags & PR_NOTOUCH) { + KASSERTMSG((ph->ph_nmissing < pp->pr_itemsperpage), + "%s: %s: page empty", __func__, pp->pr_wchan); + v = pr_item_notouch_get(pp, ph); + } else { + v = pi = LIST_FIRST(&ph->ph_itemlist); + if (__predict_false(v == NULL)) { + mutex_exit(&pp->pr_lock); + panic("%s: [%s] page empty", __func__, pp->pr_wchan); + } + KASSERTMSG((pp->pr_nitems > 0), + "%s: [%s] nitems %u inconsistent on itemlist", + __func__, pp->pr_wchan, pp->pr_nitems); + KASSERTMSG((pi->pi_magic == PI_MAGIC), + "%s: [%s] free list modified: " + "magic=%x; page %p; item addr %p", __func__, + pp->pr_wchan, pi->pi_magic, ph->ph_page, pi); - if (__predict_false((v = pi = TAILQ_FIRST(&ph->ph_itemlist)) == NULL)) { - pr_leave(pp); - simple_unlock(&pp->pr_slock); - panic("pool_get: %s: page empty", pp->pr_wchan); - } -#ifdef DIAGNOSTIC - if (__predict_false(pp->pr_nitems == 0)) { - pr_leave(pp); - simple_unlock(&pp->pr_slock); - printf("pool_get: %s: items on itemlist, nitems %u\n", - pp->pr_wchan, pp->pr_nitems); - panic("pool_get: nitems inconsistent\n"); - } -#endif - pr_log(pp, v, PRLOG_GET, file, line); - -#ifdef DIAGNOSTIC - if (__predict_false(pi->pi_magic != PI_MAGIC)) { - pr_printlog(pp, pi, printf); - panic("pool_get(%s): free list modified: magic=%x; page %p;" - " item addr %p\n", - pp->pr_wchan, pi->pi_magic, ph->ph_page, pi); + /* + * Remove from item list. + */ + LIST_REMOVE(pi, pi_list); } -#endif - - /* - * Remove from item list. - */ - TAILQ_REMOVE(&ph->ph_itemlist, pi, pi_list); pp->pr_nitems--; pp->pr_nout++; if (ph->ph_nmissing == 0) { -#ifdef DIAGNOSTIC - if (__predict_false(pp->pr_nidle == 0)) - panic("pool_get: nidle inconsistent"); -#endif + KASSERT(pp->pr_nidle > 0); pp->pr_nidle--; + + /* + * This page was previously empty. Move it to the list of + * partially-full pages. This page is already curpage. + */ + LIST_REMOVE(ph, ph_pagelist); + LIST_INSERT_HEAD(&pp->pr_partpages, ph, ph_pagelist); } ph->ph_nmissing++; - if (TAILQ_FIRST(&ph->ph_itemlist) == NULL) { -#ifdef DIAGNOSTIC - if (__predict_false(ph->ph_nmissing != pp->pr_itemsperpage)) { - pr_leave(pp); - simple_unlock(&pp->pr_slock); - panic("pool_get: %s: nmissing inconsistent", - pp->pr_wchan); - } -#endif + if (ph->ph_nmissing == pp->pr_itemsperpage) { + KASSERTMSG(((pp->pr_roflags & PR_NOTOUCH) || + LIST_EMPTY(&ph->ph_itemlist)), + "%s: [%s] nmissing (%u) inconsistent", __func__, + pp->pr_wchan, ph->ph_nmissing); /* - * Find a new non-empty page header, if any. - * Start search from the page head, to increase - * the chance for "high water" pages to be freed. - * - * Migrate empty pages to the end of the list. This - * will speed the update of curpage as pages become - * idle. Empty pages intermingled with idle pages - * is no big deal. As soon as a page becomes un-empty, - * it will move back to the head of the list. - */ - TAILQ_REMOVE(&pp->pr_pagelist, ph, ph_pagelist); - TAILQ_INSERT_TAIL(&pp->pr_pagelist, ph, ph_pagelist); - for (ph = TAILQ_FIRST(&pp->pr_pagelist); ph != NULL; - ph = TAILQ_NEXT(ph, ph_pagelist)) - if (TAILQ_FIRST(&ph->ph_itemlist) != NULL) - break; - - pp->pr_curpage = ph; + * This page is now full. Move it to the full list + * and select a new current page. + */ + LIST_REMOVE(ph, ph_pagelist); + LIST_INSERT_HEAD(&pp->pr_fullpages, ph, ph_pagelist); + pool_update_curpage(pp); } pp->pr_nget++; @@ -775,7 +915,7 @@ _pool_get(struct pool *pp, int flags, co * If we have a low water mark and we are now below that low * water mark, add more items to the pool. */ - if (pp->pr_nitems < pp->pr_minitems && pool_catchup(pp) != 0) { + if (POOL_NEEDS_CATCHUP(pp) && pool_catchup(pp) != 0) { /* * XXX: Should we log a warning? Should we set up a timeout * to try again in a second or so? The latter could break @@ -783,8 +923,10 @@ _pool_get(struct pool *pp, int flags, co */ } - pr_leave(pp); - simple_unlock(&pp->pr_slock); + mutex_exit(&pp->pr_lock); + KASSERT((((vaddr_t)v + pp->pr_itemoffset) & (pp->pr_align - 1)) == 0); + FREECHECK_OUT(&pp->pr_freecheck, v); + pool_redzone_fill(pp, v); return (v); } @@ -792,54 +934,45 @@ _pool_get(struct pool *pp, int flags, co * Internal version of pool_put(). Pool is already locked/entered. */ static void -pool_do_put(struct pool *pp, void *v, const char *file, long line) +pool_do_put(struct pool *pp, void *v, struct pool_pagelist *pq) { struct pool_item *pi = v; struct pool_item_header *ph; - caddr_t page; - int s; - - page = (caddr_t)((u_long)v & pp->pr_pagemask); -#ifdef DIAGNOSTIC - if (__predict_false(pp->pr_nout == 0)) { - printf("pool %s: putting with none out\n", - pp->pr_wchan); - panic("pool_put"); - } -#endif + KASSERT(mutex_owned(&pp->pr_lock)); + pool_redzone_check(pp, v); + FREECHECK_IN(&pp->pr_freecheck, v); + LOCKDEBUG_MEM_CHECK(v, pp->pr_size); - pr_log(pp, v, PRLOG_PUT, file, line); + KASSERTMSG((pp->pr_nout > 0), + "%s: [%s] putting with none out", __func__, pp->pr_wchan); - if (__predict_false((ph = pr_find_pagehead(pp, page)) == NULL)) { - pr_printlog(pp, NULL, printf); - panic("pool_put: %s: page header missing", pp->pr_wchan); + if (__predict_false((ph = pr_find_pagehead(pp, v)) == NULL)) { + panic("%s: [%s] page header missing", __func__, pp->pr_wchan); } -#ifdef LOCKDEBUG - /* - * Check if we're freeing a locked simple lock. - */ - simple_lock_freecheck((caddr_t)pi, ((caddr_t)pi) + pp->pr_size); -#endif - /* * Return to item list. */ + if (pp->pr_roflags & PR_NOTOUCH) { + pr_item_notouch_put(pp, ph, v); + } else { #ifdef DIAGNOSTIC - pi->pi_magic = PI_MAGIC; + pi->pi_magic = PI_MAGIC; #endif #ifdef DEBUG - { - int i, *ip = v; + { + int i, *ip = v; - for (i = 0; i < pp->pr_size / sizeof(int); i++) { - *ip++ = PI_MAGIC; + for (i = 0; i < pp->pr_size / sizeof(int); i++) { + *ip++ = PI_MAGIC; + } } - } #endif - TAILQ_INSERT_HEAD(&ph->ph_itemlist, pi, pi_list); + LIST_INSERT_HEAD(&ph->ph_itemlist, pi, pi_list); + } + KDASSERT(ph->ph_nmissing != 0); ph->ph_nmissing--; pp->pr_nput++; pp->pr_nitems++; @@ -851,159 +984,234 @@ pool_do_put(struct pool *pp, void *v, co if (pp->pr_flags & PR_WANTED) { pp->pr_flags &= ~PR_WANTED; - if (ph->ph_nmissing == 0) - pp->pr_nidle++; - wakeup((caddr_t)pp); - return; + cv_broadcast(&pp->pr_cv); } /* - * If this page is now complete, do one of two things: + * If this page is now empty, do one of two things: * - * (1) If we have more pages than the page high water - * mark, free the page back to the system. + * (1) If we have more pages than the page high water mark, + * free the page back to the system. ONLY CONSIDER + * FREEING BACK A PAGE IF WE HAVE MORE THAN OUR MINIMUM PAGE + * CLAIM. * - * (2) Move it to the end of the page list, so that - * we minimize our chances of fragmenting the - * pool. Idle pages migrate to the end (along with - * completely empty pages, so that we find un-empty - * pages more quickly when we update curpage) of the - * list so they can be more easily swept up by - * the pagedaemon when pages are scarce. + * (2) Otherwise, move the page to the empty page list. + * + * Either way, select a new current page (so we use a partially-full + * page if one is available). */ if (ph->ph_nmissing == 0) { pp->pr_nidle++; - if (pp->pr_npages > pp->pr_maxpages) { - pr_rmpage(pp, ph); + if (pp->pr_npages > pp->pr_minpages && + pp->pr_npages > pp->pr_maxpages) { + pr_rmpage(pp, ph, pq); } else { - TAILQ_REMOVE(&pp->pr_pagelist, ph, ph_pagelist); - TAILQ_INSERT_TAIL(&pp->pr_pagelist, ph, ph_pagelist); + LIST_REMOVE(ph, ph_pagelist); + LIST_INSERT_HEAD(&pp->pr_emptypages, ph, ph_pagelist); /* * Update the timestamp on the page. A page must * be idle for some period of time before it can * be reclaimed by the pagedaemon. This minimizes * ping-pong'ing for memory. - */ - s = splclock(); - ph->ph_time = mono_time; - splx(s); - - /* - * Update the current page pointer. Just look for - * the first page with any free items. * - * XXX: Maybe we want an option to look for the - * page with the fewest available items, to minimize - * fragmentation? + * note for 64-bit time_t: truncating to 32-bit is not + * a problem for our usage. */ - for (ph = TAILQ_FIRST(&pp->pr_pagelist); ph != NULL; - ph = TAILQ_NEXT(ph, ph_pagelist)) - if (TAILQ_FIRST(&ph->ph_itemlist) != NULL) - break; - - pp->pr_curpage = ph; + ph->ph_time = time_uptime; } + pool_update_curpage(pp); } + /* - * If the page has just become un-empty, move it to the head of - * the list, and make it the current page. The next allocation - * will get the item from this page, instead of further fragmenting - * the pool. + * If the page was previously completely full, move it to the + * partially-full list and make it the current page. The next + * allocation will get the item from this page, instead of + * further fragmenting the pool. */ else if (ph->ph_nmissing == (pp->pr_itemsperpage - 1)) { - TAILQ_REMOVE(&pp->pr_pagelist, ph, ph_pagelist); - TAILQ_INSERT_HEAD(&pp->pr_pagelist, ph, ph_pagelist); + LIST_REMOVE(ph, ph_pagelist); + LIST_INSERT_HEAD(&pp->pr_partpages, ph, ph_pagelist); pp->pr_curpage = ph; } } -/* - * Return resource to the pool; must be called at appropriate spl level - */ void -_pool_put(struct pool *pp, void *v, const char *file, long line) +pool_put(struct pool *pp, void *v) { + struct pool_pagelist pq; - simple_lock(&pp->pr_slock); - pr_enter(pp, file, line); + LIST_INIT(&pq); - pool_do_put(pp, v, file, line); + mutex_enter(&pp->pr_lock); + pool_do_put(pp, v, &pq); + mutex_exit(&pp->pr_lock); - pr_leave(pp); - simple_unlock(&pp->pr_slock); + pr_pagelist_free(pp, &pq); } /* - * Add a page worth of items to the pool. + * pool_grow: grow a pool by a page. * - * Note, we must be called with the pool descriptor LOCKED. + * => called with pool locked. + * => unlock and relock the pool. + * => return with pool locked. */ + static int -pool_prime_page(struct pool *pp, caddr_t storage, int flags) +pool_grow(struct pool *pp, int flags) { - struct pool_item *pi; - struct pool_item_header *ph; - caddr_t cp = storage; - unsigned int align = pp->pr_align; - unsigned int ioff = pp->pr_itemoffset; - int s, n; - - if (((u_long)cp & (pp->pr_pagesz - 1)) != 0) - panic("pool_prime_page: %s: unaligned page", pp->pr_wchan); - - if ((pp->pr_roflags & PR_PHINPAGE) != 0) { - ph = (struct pool_item_header *)(cp + pp->pr_phoffset); - } else { - s = splhigh(); - ph = pool_get(&phpool, flags); - splx(s); - if (ph == NULL) - return (ENOMEM); - LIST_INSERT_HEAD(&pp->pr_hashtab[PR_HASH_INDEX(pp, cp)], - ph, ph_hashlist); - } + struct pool_item_header *ph = NULL; + char *cp; + int error; /* - * Insert page header. + * If there's a pool_grow in progress, wait for it to complete + * and try again from the top. */ - TAILQ_INSERT_HEAD(&pp->pr_pagelist, ph, ph_pagelist); - TAILQ_INIT(&ph->ph_itemlist); - ph->ph_page = storage; - ph->ph_nmissing = 0; - memset(&ph->ph_time, 0, sizeof(ph->ph_time)); + if (pp->pr_flags & PR_GROWING) { + if (flags & PR_WAITOK) { + do { + cv_wait(&pp->pr_cv, &pp->pr_lock); + } while (pp->pr_flags & PR_GROWING); + return ERESTART; + } else { + return EWOULDBLOCK; + } + } + pp->pr_flags |= PR_GROWING; - pp->pr_nidle++; + mutex_exit(&pp->pr_lock); + cp = pool_allocator_alloc(pp, flags); + if (__predict_true(cp != NULL)) { + ph = pool_alloc_item_header(pp, cp, flags); + } + if (__predict_false(cp == NULL || ph == NULL)) { + if (cp != NULL) { + pool_allocator_free(pp, cp); + } + mutex_enter(&pp->pr_lock); + error = ENOMEM; + goto out; + } - /* - * Color this page. - */ - cp = (caddr_t)(cp + pp->pr_curcolor); - if ((pp->pr_curcolor += align) > pp->pr_maxcolor) - pp->pr_curcolor = 0; + mutex_enter(&pp->pr_lock); + pool_prime_page(pp, cp, ph); + pp->pr_npagealloc++; + error = 0; +out: /* - * Adjust storage to apply aligment to `pr_itemoffset' in each item. + * If anyone was waiting for pool_grow, notify them that we + * may have just done it. */ - if (ioff != 0) - cp = (caddr_t)(cp + (align - ioff)); + KASSERT(pp->pr_flags & PR_GROWING); + pp->pr_flags &= ~PR_GROWING; + cv_broadcast(&pp->pr_cv); - /* - * Insert remaining chunks on the bucket list. - */ - n = pp->pr_itemsperpage; - pp->pr_nitems += n; + return error; +} - while (n--) { - pi = (struct pool_item *)cp; +/* + * Add N items to the pool. + */ +int +pool_prime(struct pool *pp, int n) +{ + int newpages; + int error = 0; - /* Insert on page list */ - TAILQ_INSERT_TAIL(&ph->ph_itemlist, pi, pi_list); -#ifdef DIAGNOSTIC - pi->pi_magic = PI_MAGIC; -#endif - cp = (caddr_t)(cp + pp->pr_size); - } + mutex_enter(&pp->pr_lock); + + newpages = roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage; + + while (newpages-- > 0) { + error = pool_grow(pp, PR_NOWAIT); + if (error) { + break; + } + pp->pr_minpages++; + } + + if (pp->pr_minpages >= pp->pr_maxpages) + pp->pr_maxpages = pp->pr_minpages + 1; /* XXX */ + + mutex_exit(&pp->pr_lock); + return error; +} + +/* + * Add a page worth of items to the pool. + * + * Note, we must be called with the pool descriptor LOCKED. + */ +static void +pool_prime_page(struct pool *pp, void *storage, struct pool_item_header *ph) +{ + struct pool_item *pi; + void *cp = storage; + const unsigned int align = pp->pr_align; + const unsigned int ioff = pp->pr_itemoffset; + int n; + + KASSERT(mutex_owned(&pp->pr_lock)); + KASSERTMSG(((pp->pr_roflags & PR_NOALIGN) || + (((uintptr_t)cp & (pp->pr_alloc->pa_pagesz - 1)) == 0)), + "%s: [%s] unaligned page: %p", __func__, pp->pr_wchan, cp); + + /* + * Insert page header. + */ + LIST_INSERT_HEAD(&pp->pr_emptypages, ph, ph_pagelist); + LIST_INIT(&ph->ph_itemlist); + ph->ph_page = storage; + ph->ph_nmissing = 0; + ph->ph_time = time_uptime; + if ((pp->pr_roflags & PR_PHINPAGE) == 0) + SPLAY_INSERT(phtree, &pp->pr_phtree, ph); + + pp->pr_nidle++; + + /* + * Color this page. + */ + ph->ph_off = pp->pr_curcolor; + cp = (char *)cp + ph->ph_off; + if ((pp->pr_curcolor += align) > pp->pr_maxcolor) + pp->pr_curcolor = 0; + + /* + * Adjust storage to apply aligment to `pr_itemoffset' in each item. + */ + if (ioff != 0) + cp = (char *)cp + align - ioff; + + KASSERT((((vaddr_t)cp + ioff) & (align - 1)) == 0); + + /* + * Insert remaining chunks on the bucket list. + */ + n = pp->pr_itemsperpage; + pp->pr_nitems += n; + + if (pp->pr_roflags & PR_NOTOUCH) { + pr_item_notouch_init(pp, ph); + } else { + while (n--) { + pi = (struct pool_item *)cp; + + KASSERT(((((vaddr_t)pi) + ioff) & (align - 1)) == 0); + + /* Insert on page list */ + LIST_INSERT_HEAD(&ph->ph_itemlist, pi, pi_list); +#ifdef DIAGNOSTIC + pi->pi_magic = PI_MAGIC; +#endif + cp = (char *)cp + pp->pr_size; + + KASSERT((((vaddr_t)cp + ioff) & (align - 1)) == 0); + } + } /* * If the pool was depleted, point at the new page. @@ -1013,69 +1221,48 @@ pool_prime_page(struct pool *pp, caddr_t if (++pp->pr_npages > pp->pr_hiwat) pp->pr_hiwat = pp->pr_npages; - - return (0); } /* * Used by pool_get() when nitems drops below the low water mark. This - * is used to catch up nitmes with the low water mark. + * is used to catch up pr_nitems with the low water mark. * * Note 1, we never wait for memory here, we let the caller decide what to do. * - * Note 2, this doesn't work with static pools. - * - * Note 3, we must be called with the pool already locked, and we return + * Note 2, we must be called with the pool already locked, and we return * with it locked. */ static int pool_catchup(struct pool *pp) { - caddr_t cp; int error = 0; - if (pp->pr_roflags & PR_STATIC) { - /* - * We dropped below the low water mark, and this is not a - * good thing. Log a warning. - * - * XXX: rate-limit this? - */ - printf("WARNING: static pool `%s' dropped below low water " - "mark\n", pp->pr_wchan); - return (0); - } - - while (pp->pr_nitems < pp->pr_minitems) { - /* - * Call the page back-end allocator for more memory. - * - * XXX: We never wait, so should we bother unlocking - * the pool descriptor? - */ - simple_unlock(&pp->pr_slock); - cp = (*pp->pr_alloc)(pp->pr_pagesz, 0, pp->pr_mtype); - simple_lock(&pp->pr_slock); - if (__predict_false(cp == NULL)) { - error = ENOMEM; + while (POOL_NEEDS_CATCHUP(pp)) { + error = pool_grow(pp, PR_NOWAIT); + if (error) { break; } - if ((error = pool_prime_page(pp, cp, PR_NOWAIT)) != 0) { - (*pp->pr_free)(cp, pp->pr_pagesz, pp->pr_mtype); - break; - } - pp->pr_npagealloc++; } + return error; +} + +static void +pool_update_curpage(struct pool *pp) +{ - return (error); + pp->pr_curpage = LIST_FIRST(&pp->pr_partpages); + if (pp->pr_curpage == NULL) { + pp->pr_curpage = LIST_FIRST(&pp->pr_emptypages); + } + KASSERT((pp->pr_curpage == NULL && pp->pr_nitems == 0) || + (pp->pr_curpage != NULL && pp->pr_nitems > 0)); } void pool_setlowat(struct pool *pp, int n) { - int error; - simple_lock(&pp->pr_slock); + mutex_enter(&pp->pr_lock); pp->pr_minitems = n; pp->pr_minpages = (n == 0) @@ -1083,8 +1270,7 @@ pool_setlowat(struct pool *pp, int n) : roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage; /* Make sure we're caught up with the newly-set low water mark. */ - if ((pp->pr_nitems < pp->pr_minitems) && - (error = pool_catchup(pp)) != 0) { + if (POOL_NEEDS_CATCHUP(pp) && pool_catchup(pp) != 0) { /* * XXX: Should we log a warning? Should we set up a timeout * to try again in a second or so? The latter could break @@ -1092,27 +1278,27 @@ pool_setlowat(struct pool *pp, int n) */ } - simple_unlock(&pp->pr_slock); + mutex_exit(&pp->pr_lock); } void pool_sethiwat(struct pool *pp, int n) { - simple_lock(&pp->pr_slock); + mutex_enter(&pp->pr_lock); pp->pr_maxpages = (n == 0) ? 0 : roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage; - simple_unlock(&pp->pr_slock); + mutex_exit(&pp->pr_lock); } void pool_sethardlimit(struct pool *pp, int n, const char *warnmess, int ratecap) { - simple_lock(&pp->pr_slock); + mutex_enter(&pp->pr_lock); pp->pr_hardlimit = n; pp->pr_hardlimit_warning = warnmess; @@ -1128,194 +1314,201 @@ pool_sethardlimit(struct pool *pp, int n ? 0 : roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage; - simple_unlock(&pp->pr_slock); -} - -/* - * Default page allocator. - */ -static void * -pool_page_alloc(unsigned long sz, int flags, int mtype) -{ - boolean_t waitok = (flags & PR_WAITOK) ? TRUE : FALSE; - - return ((void *)uvm_km_alloc_poolpage(waitok)); -} - -static void -pool_page_free(void *v, unsigned long sz, int mtype) -{ - - uvm_km_free_poolpage((vaddr_t)v); + mutex_exit(&pp->pr_lock); } /* - * Alternate pool page allocator for pools that know they will - * never be accessed in interrupt context. - */ -void * -pool_page_alloc_nointr(unsigned long sz, int flags, int mtype) -{ - boolean_t waitok = (flags & PR_WAITOK) ? TRUE : FALSE; - - return ((void *)uvm_km_alloc_poolpage1(kernel_map, uvm.kernel_object, - waitok)); -} - -void -pool_page_free_nointr(void *v, unsigned long sz, int mtype) -{ - - uvm_km_free_poolpage1(kernel_map, (vaddr_t)v); -} - - -/* * Release all complete pages that have not been used recently. + * + * Must not be called from interrupt context. */ -void -_pool_reclaim(struct pool *pp, const char *file, long line) +int +pool_reclaim(struct pool *pp) { struct pool_item_header *ph, *phnext; - struct pool_cache *pc; - struct timeval curtime; - int s; + struct pool_pagelist pq; + uint32_t curtime; + bool klock; + int rv; - if (pp->pr_roflags & PR_STATIC) - return; + KASSERT(!cpu_intr_p() && !cpu_softintr_p()); - if (simple_lock_try(&pp->pr_slock) == 0) - return; - pr_enter(pp, file, line); + if (pp->pr_drain_hook != NULL) { + /* + * The drain hook must be called with the pool unlocked. + */ + (*pp->pr_drain_hook)(pp->pr_drain_hook_arg, PR_NOWAIT); + } /* - * Reclaim items from the pool's caches. + * XXXSMP Because we do not want to cause non-MPSAFE code + * to block. */ - for (pc = TAILQ_FIRST(&pp->pr_cachelist); pc != NULL; - pc = TAILQ_NEXT(pc, pc_poollist)) - pool_cache_reclaim(pc); + if (pp->pr_ipl == IPL_SOFTNET || pp->pr_ipl == IPL_SOFTCLOCK || + pp->pr_ipl == IPL_SOFTSERIAL) { + KERNEL_LOCK(1, NULL); + klock = true; + } else + klock = false; - s = splclock(); - curtime = mono_time; - splx(s); + /* Reclaim items from the pool's cache (if any). */ + if (pp->pr_cache != NULL) + pool_cache_invalidate(pp->pr_cache); - for (ph = TAILQ_FIRST(&pp->pr_pagelist); ph != NULL; ph = phnext) { - phnext = TAILQ_NEXT(ph, ph_pagelist); + if (mutex_tryenter(&pp->pr_lock) == 0) { + if (klock) { + KERNEL_UNLOCK_ONE(NULL); + } + return (0); + } + + LIST_INIT(&pq); + + curtime = time_uptime; + + for (ph = LIST_FIRST(&pp->pr_emptypages); ph != NULL; ph = phnext) { + phnext = LIST_NEXT(ph, ph_pagelist); /* Check our minimum page claim */ if (pp->pr_npages <= pp->pr_minpages) break; - if (ph->ph_nmissing == 0) { - struct timeval diff; - timersub(&curtime, &ph->ph_time, &diff); - if (diff.tv_sec < pool_inactive_time) - continue; + KASSERT(ph->ph_nmissing == 0); + if (curtime - ph->ph_time < pool_inactive_time) + continue; - /* - * If freeing this page would put us below - * the low water mark, stop now. - */ - if ((pp->pr_nitems - pp->pr_itemsperpage) < - pp->pr_minitems) - break; + /* + * If freeing this page would put us below + * the low water mark, stop now. + */ + if ((pp->pr_nitems - pp->pr_itemsperpage) < + pp->pr_minitems) + break; - pr_rmpage(pp, ph); - } + pr_rmpage(pp, ph, &pq); } - pr_leave(pp); - simple_unlock(&pp->pr_slock); -} + mutex_exit(&pp->pr_lock); + + if (LIST_EMPTY(&pq)) + rv = 0; + else { + pr_pagelist_free(pp, &pq); + rv = 1; + } + if (klock) { + KERNEL_UNLOCK_ONE(NULL); + } + + return (rv); +} /* - * Drain pools, one at a time. + * Drain pools, one at a time. The drained pool is returned within ppp. * - * Note, we must never be called from an interrupt context. + * Note, must never be called from interrupt context. */ -void -pool_drain(void *arg) +bool +pool_drain(struct pool **ppp) { + bool reclaimed; struct pool *pp; - int s; - s = splvm(); - simple_lock(&pool_head_slock); + KASSERT(!TAILQ_EMPTY(&pool_head)); - if (drainpp == NULL && (drainpp = TAILQ_FIRST(&pool_head)) == NULL) - goto out; + pp = NULL; - pp = drainpp; - drainpp = TAILQ_NEXT(pp, pr_poollist); + /* Find next pool to drain, and add a reference. */ + mutex_enter(&pool_head_lock); + do { + if (drainpp == NULL) { + drainpp = TAILQ_FIRST(&pool_head); + } + if (drainpp != NULL) { + pp = drainpp; + drainpp = TAILQ_NEXT(pp, pr_poollist); + } + /* + * Skip completely idle pools. We depend on at least + * one pool in the system being active. + */ + } while (pp == NULL || pp->pr_npages == 0); + pp->pr_refcnt++; + mutex_exit(&pool_head_lock); + + /* Drain the cache (if any) and pool.. */ + reclaimed = pool_reclaim(pp); + + /* Finally, unlock the pool. */ + mutex_enter(&pool_head_lock); + pp->pr_refcnt--; + cv_broadcast(&pool_busy); + mutex_exit(&pool_head_lock); - pool_reclaim(pp); + if (ppp != NULL) + *ppp = pp; - out: - simple_unlock(&pool_head_slock); - splx(s); + return reclaimed; } - /* * Diagnostic helpers. */ + void -pool_print(struct pool *pp, const char *modif) +pool_printall(const char *modif, void (*pr)(const char *, ...)) { - int s; + struct pool *pp; - s = splvm(); - if (simple_lock_try(&pp->pr_slock) == 0) { - printf("pool %s is locked; try again later\n", - pp->pr_wchan); - splx(s); - return; + TAILQ_FOREACH(pp, &pool_head, pr_poollist) { + pool_printit(pp, modif, pr); } - pool_print1(pp, modif, printf); - simple_unlock(&pp->pr_slock); - splx(s); } void pool_printit(struct pool *pp, const char *modif, void (*pr)(const char *, ...)) { - int didlock = 0; if (pp == NULL) { (*pr)("Must specify a pool to print.\n"); return; } - /* - * Called from DDB; interrupts should be blocked, and all - * other processors should be paused. We can skip locking - * the pool in this case. - * - * We do a simple_lock_try() just to print the lock - * status, however. - */ - - if (simple_lock_try(&pp->pr_slock) == 0) - (*pr)("WARNING: pool %s is locked\n", pp->pr_wchan); - else - didlock = 1; - pool_print1(pp, modif, pr); - - if (didlock) - simple_unlock(&pp->pr_slock); } static void -pool_print1(struct pool *pp, const char *modif, void (*pr)(const char *, ...)) +pool_print_pagelist(struct pool *pp, struct pool_pagelist *pl, + void (*pr)(const char *, ...)) { struct pool_item_header *ph; - struct pool_cache *pc; - struct pool_cache_group *pcg; + struct pool_item *pi __diagused; + + LIST_FOREACH(ph, pl, ph_pagelist) { + (*pr)("\t\tpage %p, nmissing %d, time %" PRIu32 "\n", + ph->ph_page, ph->ph_nmissing, ph->ph_time); #ifdef DIAGNOSTIC - struct pool_item *pi; + if (!(pp->pr_roflags & PR_NOTOUCH)) { + LIST_FOREACH(pi, &ph->ph_itemlist, pi_list) { + if (pi->pi_magic != PI_MAGIC) { + (*pr)("\t\t\titem %p, magic 0x%x\n", + pi, pi->pi_magic); + } + } + } #endif + } +} + +static void +pool_print1(struct pool *pp, const char *modif, void (*pr)(const char *, ...)) +{ + struct pool_item_header *ph; + pool_cache_t pc; + pcg_t *pcg; + pool_cache_cpu_t *cc; + uint64_t cpuhit, cpumiss; int i, print_log = 0, print_pagelist = 0, print_cache = 0; char c; @@ -1326,20 +1519,24 @@ pool_print1(struct pool *pp, const char print_pagelist = 1; if (c == 'c') print_cache = 1; - modif++; } - (*pr)("POOL %s: size %u, align %u, ioff %u, roflags 0x%08x\n", + if ((pc = pp->pr_cache) != NULL) { + (*pr)("POOL CACHE"); + } else { + (*pr)("POOL"); + } + + (*pr)(" %s: size %u, align %u, ioff %u, roflags 0x%08x\n", pp->pr_wchan, pp->pr_size, pp->pr_align, pp->pr_itemoffset, pp->pr_roflags); - (*pr)("\tpagesz %u, mtype %d\n", pp->pr_pagesz, pp->pr_mtype); - (*pr)("\talloc %p, release %p\n", pp->pr_alloc, pp->pr_free); + (*pr)("\talloc %p\n", pp->pr_alloc); (*pr)("\tminitems %u, minpages %u, maxpages %u, npages %u\n", pp->pr_minitems, pp->pr_minpages, pp->pr_maxpages, pp->pr_npages); (*pr)("\titemsperpage %u, nitems %u, nout %u, hardlimit %u\n", pp->pr_itemsperpage, pp->pr_nitems, pp->pr_nout, pp->pr_hardlimit); - (*pr)("\n\tnget %lu, nfail %lu, nput %lu\n", + (*pr)("\tnget %lu, nfail %lu, nput %lu\n", pp->pr_nget, pp->pr_nfail, pp->pr_nput); (*pr)("\tnpagealloc %lu, npagefree %lu, hiwat %u, nidle %lu\n", pp->pr_npagealloc, pp->pr_npagefree, pp->pr_hiwat, pp->pr_nidle); @@ -1347,79 +1544,86 @@ pool_print1(struct pool *pp, const char if (print_pagelist == 0) goto skip_pagelist; - if ((ph = TAILQ_FIRST(&pp->pr_pagelist)) != NULL) - (*pr)("\n\tpage list:\n"); - for (; ph != NULL; ph = TAILQ_NEXT(ph, ph_pagelist)) { - (*pr)("\t\tpage %p, nmissing %d, time %lu,%lu\n", - ph->ph_page, ph->ph_nmissing, - (u_long)ph->ph_time.tv_sec, - (u_long)ph->ph_time.tv_usec); -#ifdef DIAGNOSTIC - for (pi = TAILQ_FIRST(&ph->ph_itemlist); pi != NULL; - pi = TAILQ_NEXT(pi, pi_list)) { - if (pi->pi_magic != PI_MAGIC) { - (*pr)("\t\t\titem %p, magic 0x%x\n", - pi, pi->pi_magic); - } - } -#endif - } + if ((ph = LIST_FIRST(&pp->pr_emptypages)) != NULL) + (*pr)("\n\tempty page list:\n"); + pool_print_pagelist(pp, &pp->pr_emptypages, pr); + if ((ph = LIST_FIRST(&pp->pr_fullpages)) != NULL) + (*pr)("\n\tfull page list:\n"); + pool_print_pagelist(pp, &pp->pr_fullpages, pr); + if ((ph = LIST_FIRST(&pp->pr_partpages)) != NULL) + (*pr)("\n\tpartial-page list:\n"); + pool_print_pagelist(pp, &pp->pr_partpages, pr); + if (pp->pr_curpage == NULL) (*pr)("\tno current page\n"); else (*pr)("\tcurpage %p\n", pp->pr_curpage->ph_page); skip_pagelist: - if (print_log == 0) goto skip_log; (*pr)("\n"); - if ((pp->pr_roflags & PR_LOGGING) == 0) - (*pr)("\tno log\n"); - else - pr_printlog(pp, NULL, pr); skip_log: - if (print_cache == 0) - goto skip_cache; - - for (pc = TAILQ_FIRST(&pp->pr_cachelist); pc != NULL; - pc = TAILQ_NEXT(pc, pc_poollist)) { - (*pr)("\tcache %p: allocfrom %p freeto %p\n", pc, - pc->pc_allocfrom, pc->pc_freeto); - (*pr)("\t hits %lu misses %lu ngroups %lu nitems %lu\n", - pc->pc_hits, pc->pc_misses, pc->pc_ngroups, pc->pc_nitems); - for (pcg = TAILQ_FIRST(&pc->pc_grouplist); pcg != NULL; - pcg = TAILQ_NEXT(pcg, pcg_list)) { - (*pr)("\t\tgroup %p: avail %d\n", pcg, pcg->pcg_avail); - for (i = 0; i < PCG_NOBJECTS; i++) - (*pr)("\t\t\t%p\n", pcg->pcg_objects[i]); +#define PR_GROUPLIST(pcg) \ + (*pr)("\t\tgroup %p: avail %d\n", pcg, pcg->pcg_avail); \ + for (i = 0; i < pcg->pcg_size; i++) { \ + if (pcg->pcg_objects[i].pcgo_pa != \ + POOL_PADDR_INVALID) { \ + (*pr)("\t\t\t%p, 0x%llx\n", \ + pcg->pcg_objects[i].pcgo_va, \ + (unsigned long long) \ + pcg->pcg_objects[i].pcgo_pa); \ + } else { \ + (*pr)("\t\t\t%p\n", \ + pcg->pcg_objects[i].pcgo_va); \ + } \ + } + + if (pc != NULL) { + cpuhit = 0; + cpumiss = 0; + for (i = 0; i < __arraycount(pc->pc_cpus); i++) { + if ((cc = pc->pc_cpus[i]) == NULL) + continue; + cpuhit += cc->cc_hits; + cpumiss += cc->cc_misses; + } + (*pr)("\tcpu layer hits %llu misses %llu\n", cpuhit, cpumiss); + (*pr)("\tcache layer hits %llu misses %llu\n", + pc->pc_hits, pc->pc_misses); + (*pr)("\tcache layer entry uncontended %llu contended %llu\n", + pc->pc_hits + pc->pc_misses - pc->pc_contended, + pc->pc_contended); + (*pr)("\tcache layer empty groups %u full groups %u\n", + pc->pc_nempty, pc->pc_nfull); + if (print_cache) { + (*pr)("\tfull cache groups:\n"); + for (pcg = pc->pc_fullgroups; pcg != NULL; + pcg = pcg->pcg_next) { + PR_GROUPLIST(pcg); + } + (*pr)("\tempty cache groups:\n"); + for (pcg = pc->pc_emptygroups; pcg != NULL; + pcg = pcg->pcg_next) { + PR_GROUPLIST(pcg); + } } } - - skip_cache: - - pr_enter_check(pp, pr); +#undef PR_GROUPLIST } -int -pool_chk(struct pool *pp, const char *label) +static int +pool_chk_page(struct pool *pp, const char *label, struct pool_item_header *ph) { - struct pool_item_header *ph; - int r = 0; - - simple_lock(&pp->pr_slock); - - for (ph = TAILQ_FIRST(&pp->pr_pagelist); ph != NULL; - ph = TAILQ_NEXT(ph, ph_pagelist)) { - - struct pool_item *pi; - int n; - caddr_t page; + struct pool_item *pi; + void *page; + int n; - page = (caddr_t)((u_long)ph & pp->pr_pagemask); + if ((pp->pr_roflags & PR_NOALIGN) == 0) { + page = (void *)((uintptr_t)ph & pp->pr_alloc->pa_pagemask); if (page != ph->ph_page && (pp->pr_roflags & PR_PHINPAGE) != 0) { if (label != NULL) @@ -1428,42 +1632,75 @@ pool_chk(struct pool *pp, const char *la " at page head addr %p (p %p)\n", pp, pp->pr_wchan, ph->ph_page, ph, page); - r++; - goto out; + return 1; } + } - for (pi = TAILQ_FIRST(&ph->ph_itemlist), n = 0; - pi != NULL; - pi = TAILQ_NEXT(pi,pi_list), n++) { + if ((pp->pr_roflags & PR_NOTOUCH) != 0) + return 0; -#ifdef DIAGNOSTIC - if (pi->pi_magic != PI_MAGIC) { - if (label != NULL) - printf("%s: ", label); - printf("pool(%s): free list modified: magic=%x;" - " page %p; item ordinal %d;" - " addr %p (p %p)\n", - pp->pr_wchan, pi->pi_magic, ph->ph_page, - n, pi, page); - panic("pool"); - } -#endif - page = (caddr_t)((u_long)pi & pp->pr_pagemask); - if (page == ph->ph_page) - continue; + for (pi = LIST_FIRST(&ph->ph_itemlist), n = 0; + pi != NULL; + pi = LIST_NEXT(pi,pi_list), n++) { +#ifdef DIAGNOSTIC + if (pi->pi_magic != PI_MAGIC) { if (label != NULL) printf("%s: ", label); - printf("pool(%p:%s): page inconsistency: page %p;" - " item ordinal %d; addr %p (p %p)\n", pp, - pp->pr_wchan, ph->ph_page, - n, pi, page); - r++; + printf("pool(%s): free list modified: magic=%x;" + " page %p; item ordinal %d; addr %p\n", + pp->pr_wchan, pi->pi_magic, ph->ph_page, + n, pi); + panic("pool"); + } +#endif + if ((pp->pr_roflags & PR_NOALIGN) != 0) { + continue; + } + page = (void *)((uintptr_t)pi & pp->pr_alloc->pa_pagemask); + if (page == ph->ph_page) + continue; + + if (label != NULL) + printf("%s: ", label); + printf("pool(%p:%s): page inconsistency: page %p;" + " item ordinal %d; addr %p (p %p)\n", pp, + pp->pr_wchan, ph->ph_page, + n, pi, page); + return 1; + } + return 0; +} + + +int +pool_chk(struct pool *pp, const char *label) +{ + struct pool_item_header *ph; + int r = 0; + + mutex_enter(&pp->pr_lock); + LIST_FOREACH(ph, &pp->pr_emptypages, ph_pagelist) { + r = pool_chk_page(pp, label, ph); + if (r) { + goto out; + } + } + LIST_FOREACH(ph, &pp->pr_fullpages, ph_pagelist) { + r = pool_chk_page(pp, label, ph); + if (r) { goto out; } } + LIST_FOREACH(ph, &pp->pr_partpages, ph_pagelist) { + r = pool_chk_page(pp, label, ph); + if (r) { + goto out; + } + } + out: - simple_unlock(&pp->pr_slock); + mutex_exit(&pp->pr_lock); return (r); } @@ -1471,38 +1708,112 @@ out: * pool_cache_init: * * Initialize a pool cache. + */ +pool_cache_t +pool_cache_init(size_t size, u_int align, u_int align_offset, u_int flags, + const char *wchan, struct pool_allocator *palloc, int ipl, + int (*ctor)(void *, void *, int), void (*dtor)(void *, void *), void *arg) +{ + pool_cache_t pc; + + pc = pool_get(&cache_pool, PR_WAITOK); + if (pc == NULL) + return NULL; + + pool_cache_bootstrap(pc, size, align, align_offset, flags, wchan, + palloc, ipl, ctor, dtor, arg); + + return pc; +} + +/* + * pool_cache_bootstrap: * - * NOTE: If the pool must be protected from interrupts, we expect - * to be called at the appropriate interrupt priority level. + * Kernel-private version of pool_cache_init(). The caller + * provides initial storage. */ void -pool_cache_init(struct pool_cache *pc, struct pool *pp, - int (*ctor)(void *, void *, int), - void (*dtor)(void *, void *), +pool_cache_bootstrap(pool_cache_t pc, size_t size, u_int align, + u_int align_offset, u_int flags, const char *wchan, + struct pool_allocator *palloc, int ipl, + int (*ctor)(void *, void *, int), void (*dtor)(void *, void *), void *arg) { + CPU_INFO_ITERATOR cii; + pool_cache_t pc1; + struct cpu_info *ci; + struct pool *pp; + + pp = &pc->pc_pool; + if (palloc == NULL && ipl == IPL_NONE) { + if (size > PAGE_SIZE) { + int bigidx = pool_bigidx(size); - TAILQ_INIT(&pc->pc_grouplist); - simple_lock_init(&pc->pc_slock); + palloc = &pool_allocator_big[bigidx]; + } else + palloc = &pool_allocator_nointr; + } + pool_init(pp, size, align, align_offset, flags, wchan, palloc, ipl); + mutex_init(&pc->pc_lock, MUTEX_DEFAULT, ipl); - pc->pc_allocfrom = NULL; - pc->pc_freeto = NULL; - pc->pc_pool = pp; + if (ctor == NULL) { + ctor = (int (*)(void *, void *, int))nullop; + } + if (dtor == NULL) { + dtor = (void (*)(void *, void *))nullop; + } + pc->pc_emptygroups = NULL; + pc->pc_fullgroups = NULL; + pc->pc_partgroups = NULL; pc->pc_ctor = ctor; pc->pc_dtor = dtor; pc->pc_arg = arg; - - pc->pc_hits = 0; + pc->pc_hits = 0; pc->pc_misses = 0; + pc->pc_nempty = 0; + pc->pc_npart = 0; + pc->pc_nfull = 0; + pc->pc_contended = 0; + pc->pc_refcnt = 0; + pc->pc_freecheck = NULL; + + if ((flags & PR_LARGECACHE) != 0) { + pc->pc_pcgsize = PCG_NOBJECTS_LARGE; + pc->pc_pcgpool = &pcg_large_pool; + } else { + pc->pc_pcgsize = PCG_NOBJECTS_NORMAL; + pc->pc_pcgpool = &pcg_normal_pool; + } - pc->pc_ngroups = 0; + /* Allocate per-CPU caches. */ + memset(pc->pc_cpus, 0, sizeof(pc->pc_cpus)); + pc->pc_ncpu = 0; + if (ncpu < 2) { + /* XXX For sparc: boot CPU is not attached yet. */ + pool_cache_cpu_init1(curcpu(), pc); + } else { + for (CPU_INFO_FOREACH(cii, ci)) { + pool_cache_cpu_init1(ci, pc); + } + } - pc->pc_nitems = 0; + /* Add to list of all pools. */ + if (__predict_true(!cold)) + mutex_enter(&pool_head_lock); + TAILQ_FOREACH(pc1, &pool_cache_head, pc_cachelist) { + if (strcmp(pc1->pc_pool.pr_wchan, pc->pc_pool.pr_wchan) > 0) + break; + } + if (pc1 == NULL) + TAILQ_INSERT_TAIL(&pool_cache_head, pc, pc_cachelist); + else + TAILQ_INSERT_BEFORE(pc1, pc, pc_cachelist); + if (__predict_true(!cold)) + mutex_exit(&pool_head_lock); - simple_lock(&pp->pr_slock); - TAILQ_INSERT_TAIL(&pp->pr_cachelist, pc, pc_poollist); - simple_unlock(&pp->pr_slock); + membar_sync(); + pp->pr_cache = pc; } /* @@ -1511,154 +1822,136 @@ pool_cache_init(struct pool_cache *pc, s * Destroy a pool cache. */ void -pool_cache_destroy(struct pool_cache *pc) +pool_cache_destroy(pool_cache_t pc) { - struct pool *pp = pc->pc_pool; - - /* First, invalidate the entire cache. */ - pool_cache_invalidate(pc); - /* ...and remove it from the pool's cache list. */ - simple_lock(&pp->pr_slock); - TAILQ_REMOVE(&pp->pr_cachelist, pc, pc_poollist); - simple_unlock(&pp->pr_slock); + pool_cache_bootstrap_destroy(pc); + pool_put(&cache_pool, pc); } -static __inline void * -pcg_get(struct pool_cache_group *pcg) +/* + * pool_cache_bootstrap_destroy: + * + * Destroy a pool cache. + */ +void +pool_cache_bootstrap_destroy(pool_cache_t pc) { - void *object; - u_int idx; - - KASSERT(pcg->pcg_avail <= PCG_NOBJECTS); - KASSERT(pcg->pcg_avail != 0); - idx = --pcg->pcg_avail; + struct pool *pp = &pc->pc_pool; + u_int i; - KASSERT(pcg->pcg_objects[idx] != NULL); - object = pcg->pcg_objects[idx]; - pcg->pcg_objects[idx] = NULL; + /* Remove it from the global list. */ + mutex_enter(&pool_head_lock); + while (pc->pc_refcnt != 0) + cv_wait(&pool_busy, &pool_head_lock); + TAILQ_REMOVE(&pool_cache_head, pc, pc_cachelist); + mutex_exit(&pool_head_lock); - return (object); -} + /* First, invalidate the entire cache. */ + pool_cache_invalidate(pc); -static __inline void -pcg_put(struct pool_cache_group *pcg, void *object) -{ - u_int idx; + /* Disassociate it from the pool. */ + mutex_enter(&pp->pr_lock); + pp->pr_cache = NULL; + mutex_exit(&pp->pr_lock); - KASSERT(pcg->pcg_avail < PCG_NOBJECTS); - idx = pcg->pcg_avail++; + /* Destroy per-CPU data */ + for (i = 0; i < __arraycount(pc->pc_cpus); i++) + pool_cache_invalidate_cpu(pc, i); - KASSERT(pcg->pcg_objects[idx] == NULL); - pcg->pcg_objects[idx] = object; + /* Finally, destroy it. */ + mutex_destroy(&pc->pc_lock); + pool_destroy(pp); } /* - * pool_cache_get: + * pool_cache_cpu_init1: * - * Get an object from a pool cache. + * Called for each pool_cache whenever a new CPU is attached. */ -void * -pool_cache_get(struct pool_cache *pc, int flags) +static void +pool_cache_cpu_init1(struct cpu_info *ci, pool_cache_t pc) { - struct pool_cache_group *pcg; - void *object; + pool_cache_cpu_t *cc; + int index; - simple_lock(&pc->pc_slock); + index = ci->ci_index; - if ((pcg = pc->pc_allocfrom) == NULL) { - for (pcg = TAILQ_FIRST(&pc->pc_grouplist); pcg != NULL; - pcg = TAILQ_NEXT(pcg, pcg_list)) { - if (pcg->pcg_avail != 0) { - pc->pc_allocfrom = pcg; - goto have_group; - } - } + KASSERT(index < __arraycount(pc->pc_cpus)); - /* - * No groups with any available objects. Allocate - * a new object, construct it, and return it to - * the caller. We will allocate a group, if necessary, - * when the object is freed back to the cache. - */ - pc->pc_misses++; - simple_unlock(&pc->pc_slock); - object = pool_get(pc->pc_pool, flags); - if (object != NULL && pc->pc_ctor != NULL) { - if ((*pc->pc_ctor)(pc->pc_arg, object, flags) != 0) { - pool_put(pc->pc_pool, object); - return (NULL); - } - } - return (object); + if ((cc = pc->pc_cpus[index]) != NULL) { + KASSERT(cc->cc_cpuindex == index); + return; } - have_group: - pc->pc_hits++; - pc->pc_nitems--; - object = pcg_get(pcg); - - if (pcg->pcg_avail == 0) - pc->pc_allocfrom = NULL; + /* + * The first CPU is 'free'. This needs to be the case for + * bootstrap - we may not be able to allocate yet. + */ + if (pc->pc_ncpu == 0) { + cc = &pc->pc_cpu0; + pc->pc_ncpu = 1; + } else { + mutex_enter(&pc->pc_lock); + pc->pc_ncpu++; + mutex_exit(&pc->pc_lock); + cc = pool_get(&cache_cpu_pool, PR_WAITOK); + } - simple_unlock(&pc->pc_slock); + cc->cc_ipl = pc->pc_pool.pr_ipl; + cc->cc_iplcookie = makeiplcookie(cc->cc_ipl); + cc->cc_cache = pc; + cc->cc_cpuindex = index; + cc->cc_hits = 0; + cc->cc_misses = 0; + cc->cc_current = __UNCONST(&pcg_dummy); + cc->cc_previous = __UNCONST(&pcg_dummy); - return (object); + pc->pc_cpus[index] = cc; } /* - * pool_cache_put: + * pool_cache_cpu_init: * - * Put an object back to the pool cache. + * Called whenever a new CPU is attached. */ void -pool_cache_put(struct pool_cache *pc, void *object) +pool_cache_cpu_init(struct cpu_info *ci) { - struct pool_cache_group *pcg; + pool_cache_t pc; - simple_lock(&pc->pc_slock); + mutex_enter(&pool_head_lock); + TAILQ_FOREACH(pc, &pool_cache_head, pc_cachelist) { + pc->pc_refcnt++; + mutex_exit(&pool_head_lock); - if ((pcg = pc->pc_freeto) == NULL) { - for (pcg = TAILQ_FIRST(&pc->pc_grouplist); pcg != NULL; - pcg = TAILQ_NEXT(pcg, pcg_list)) { - if (pcg->pcg_avail != PCG_NOBJECTS) { - pc->pc_freeto = pcg; - goto have_group; - } - } + pool_cache_cpu_init1(ci, pc); - /* - * No empty groups to free the object to. Attempt to - * allocate one. - */ - simple_unlock(&pc->pc_slock); - pcg = pool_get(&pcgpool, PR_NOWAIT); - if (pcg != NULL) { - memset(pcg, 0, sizeof(*pcg)); - simple_lock(&pc->pc_slock); - pc->pc_ngroups++; - TAILQ_INSERT_TAIL(&pc->pc_grouplist, pcg, pcg_list); - if (pc->pc_freeto == NULL) - pc->pc_freeto = pcg; - goto have_group; - } - - /* - * Unable to allocate a cache group; destruct the object - * and free it back to the pool. - */ - pool_cache_destruct_object(pc, object); - return; + mutex_enter(&pool_head_lock); + pc->pc_refcnt--; + cv_broadcast(&pool_busy); } + mutex_exit(&pool_head_lock); +} + +/* + * pool_cache_reclaim: + * + * Reclaim memory from a pool cache. + */ +bool +pool_cache_reclaim(pool_cache_t pc) +{ - have_group: - pc->pc_nitems++; - pcg_put(pcg, object); + return pool_reclaim(&pc->pc_pool); +} - if (pcg->pcg_avail == PCG_NOBJECTS) - pc->pc_freeto = NULL; +static void +pool_cache_destruct_object1(pool_cache_t pc, void *object) +{ - simple_unlock(&pc->pc_slock); + (*pc->pc_dtor)(pc->pc_arg, object); + pool_put(&pc->pc_pool, object); } /* @@ -1668,45 +1961,39 @@ pool_cache_put(struct pool_cache *pc, vo * the pool. */ void -pool_cache_destruct_object(struct pool_cache *pc, void *object) +pool_cache_destruct_object(pool_cache_t pc, void *object) { - if (pc->pc_dtor != NULL) - (*pc->pc_dtor)(pc->pc_arg, object); - pool_put(pc->pc_pool, object); + FREECHECK_IN(&pc->pc_freecheck, object); + + pool_cache_destruct_object1(pc, object); } /* - * pool_cache_do_invalidate: + * pool_cache_invalidate_groups: * - * This internal function implements pool_cache_invalidate() and - * pool_cache_reclaim(). + * Invalidate a chain of groups and destruct all objects. */ static void -pool_cache_do_invalidate(struct pool_cache *pc, int free_groups, - void (*putit)(struct pool *, void *, const char *, long)) +pool_cache_invalidate_groups(pool_cache_t pc, pcg_t *pcg) { - struct pool_cache_group *pcg, *npcg; void *object; + pcg_t *next; + int i; + + for (; pcg != NULL; pcg = next) { + next = pcg->pcg_next; - for (pcg = TAILQ_FIRST(&pc->pc_grouplist); pcg != NULL; - pcg = npcg) { - npcg = TAILQ_NEXT(pcg, pcg_list); - while (pcg->pcg_avail != 0) { - pc->pc_nitems--; - object = pcg_get(pcg); - if (pcg->pcg_avail == 0 && pc->pc_allocfrom == pcg) - pc->pc_allocfrom = NULL; - if (pc->pc_dtor != NULL) - (*pc->pc_dtor)(pc->pc_arg, object); - (*putit)(pc->pc_pool, object, __FILE__, __LINE__); - } - if (free_groups) { - pc->pc_ngroups--; - TAILQ_REMOVE(&pc->pc_grouplist, pcg, pcg_list); - if (pc->pc_freeto == pcg) - pc->pc_freeto = NULL; - pool_put(&pcgpool, pcg); + for (i = 0; i < pcg->pcg_avail; i++) { + object = pcg->pcg_objects[i].pcgo_va; + pool_cache_destruct_object1(pc, object); + } + + if (pcg->pcg_size == PCG_NOBJECTS_LARGE) { + pool_put(&pcg_large_pool, pcg); + } else { + KASSERT(pcg->pcg_size == PCG_NOBJECTS_NORMAL); + pool_put(&pcg_normal_pool, pcg); } } } @@ -1715,27 +2002,1047 @@ pool_cache_do_invalidate(struct pool_cac * pool_cache_invalidate: * * Invalidate a pool cache (destruct and release all of the - * cached objects). + * cached objects). Does not reclaim objects from the pool. + * + * Note: For pool caches that provide constructed objects, there + * is an assumption that another level of synchronization is occurring + * between the input to the constructor and the cache invalidation. + * + * Invalidation is a costly process and should not be called from + * interrupt context. */ void -pool_cache_invalidate(struct pool_cache *pc) +pool_cache_invalidate(pool_cache_t pc) { + uint64_t where; + pcg_t *full, *empty, *part; + + KASSERT(!cpu_intr_p() && !cpu_softintr_p()); - simple_lock(&pc->pc_slock); - pool_cache_do_invalidate(pc, 0, _pool_put); - simple_unlock(&pc->pc_slock); + if (ncpu < 2 || !mp_online) { + /* + * We might be called early enough in the boot process + * for the CPU data structures to not be fully initialized. + * In this case, transfer the content of the local CPU's + * cache back into global cache as only this CPU is currently + * running. + */ + pool_cache_transfer(pc); + } else { + /* + * Signal all CPUs that they must transfer their local + * cache back to the global pool then wait for the xcall to + * complete. + */ + where = xc_broadcast(0, (xcfunc_t)pool_cache_transfer, + pc, NULL); + xc_wait(where); + } + + /* Empty pool caches, then invalidate objects */ + mutex_enter(&pc->pc_lock); + full = pc->pc_fullgroups; + empty = pc->pc_emptygroups; + part = pc->pc_partgroups; + pc->pc_fullgroups = NULL; + pc->pc_emptygroups = NULL; + pc->pc_partgroups = NULL; + pc->pc_nfull = 0; + pc->pc_nempty = 0; + pc->pc_npart = 0; + mutex_exit(&pc->pc_lock); + + pool_cache_invalidate_groups(pc, full); + pool_cache_invalidate_groups(pc, empty); + pool_cache_invalidate_groups(pc, part); } /* - * pool_cache_reclaim: + * pool_cache_invalidate_cpu: + * + * Invalidate all CPU-bound cached objects in pool cache, the CPU being + * identified by its associated index. + * It is caller's responsibility to ensure that no operation is + * taking place on this pool cache while doing this invalidation. + * WARNING: as no inter-CPU locking is enforced, trying to invalidate + * pool cached objects from a CPU different from the one currently running + * may result in an undefined behaviour. + */ +static void +pool_cache_invalidate_cpu(pool_cache_t pc, u_int index) +{ + pool_cache_cpu_t *cc; + pcg_t *pcg; + + if ((cc = pc->pc_cpus[index]) == NULL) + return; + + if ((pcg = cc->cc_current) != &pcg_dummy) { + pcg->pcg_next = NULL; + pool_cache_invalidate_groups(pc, pcg); + } + if ((pcg = cc->cc_previous) != &pcg_dummy) { + pcg->pcg_next = NULL; + pool_cache_invalidate_groups(pc, pcg); + } + if (cc != &pc->pc_cpu0) + pool_put(&cache_cpu_pool, cc); + +} + +void +pool_cache_set_drain_hook(pool_cache_t pc, void (*fn)(void *, int), void *arg) +{ + + pool_set_drain_hook(&pc->pc_pool, fn, arg); +} + +void +pool_cache_setlowat(pool_cache_t pc, int n) +{ + + pool_setlowat(&pc->pc_pool, n); +} + +void +pool_cache_sethiwat(pool_cache_t pc, int n) +{ + + pool_sethiwat(&pc->pc_pool, n); +} + +void +pool_cache_sethardlimit(pool_cache_t pc, int n, const char *warnmess, int ratecap) +{ + + pool_sethardlimit(&pc->pc_pool, n, warnmess, ratecap); +} + +static bool __noinline +pool_cache_get_slow(pool_cache_cpu_t *cc, int s, void **objectp, + paddr_t *pap, int flags) +{ + pcg_t *pcg, *cur; + uint64_t ncsw; + pool_cache_t pc; + void *object; + + KASSERT(cc->cc_current->pcg_avail == 0); + KASSERT(cc->cc_previous->pcg_avail == 0); + + pc = cc->cc_cache; + cc->cc_misses++; + + /* + * Nothing was available locally. Try and grab a group + * from the cache. + */ + if (__predict_false(!mutex_tryenter(&pc->pc_lock))) { + ncsw = curlwp->l_ncsw; + mutex_enter(&pc->pc_lock); + pc->pc_contended++; + + /* + * If we context switched while locking, then + * our view of the per-CPU data is invalid: + * retry. + */ + if (curlwp->l_ncsw != ncsw) { + mutex_exit(&pc->pc_lock); + return true; + } + } + + if (__predict_true((pcg = pc->pc_fullgroups) != NULL)) { + /* + * If there's a full group, release our empty + * group back to the cache. Install the full + * group as cc_current and return. + */ + if (__predict_true((cur = cc->cc_current) != &pcg_dummy)) { + KASSERT(cur->pcg_avail == 0); + cur->pcg_next = pc->pc_emptygroups; + pc->pc_emptygroups = cur; + pc->pc_nempty++; + } + KASSERT(pcg->pcg_avail == pcg->pcg_size); + cc->cc_current = pcg; + pc->pc_fullgroups = pcg->pcg_next; + pc->pc_hits++; + pc->pc_nfull--; + mutex_exit(&pc->pc_lock); + return true; + } + + /* + * Nothing available locally or in cache. Take the slow + * path: fetch a new object from the pool and construct + * it. + */ + pc->pc_misses++; + mutex_exit(&pc->pc_lock); + splx(s); + + object = pool_get(&pc->pc_pool, flags); + *objectp = object; + if (__predict_false(object == NULL)) { + KASSERT((flags & (PR_WAITOK|PR_NOWAIT)) == PR_NOWAIT); + return false; + } + + if (__predict_false((*pc->pc_ctor)(pc->pc_arg, object, flags) != 0)) { + pool_put(&pc->pc_pool, object); + *objectp = NULL; + return false; + } + + KASSERT((((vaddr_t)object + pc->pc_pool.pr_itemoffset) & + (pc->pc_pool.pr_align - 1)) == 0); + + if (pap != NULL) { +#ifdef POOL_VTOPHYS + *pap = POOL_VTOPHYS(object); +#else + *pap = POOL_PADDR_INVALID; +#endif + } + + FREECHECK_OUT(&pc->pc_freecheck, object); + pool_redzone_fill(&pc->pc_pool, object); + return false; +} + +/* + * pool_cache_get{,_paddr}: * - * Reclaim a pool cache for pool_reclaim(). + * Get an object from a pool cache (optionally returning + * the physical address of the object). + */ +void * +pool_cache_get_paddr(pool_cache_t pc, int flags, paddr_t *pap) +{ + pool_cache_cpu_t *cc; + pcg_t *pcg; + void *object; + int s; + + KASSERTMSG((!cpu_intr_p() && !cpu_softintr_p()) || + (pc->pc_pool.pr_ipl != IPL_NONE || cold || panicstr != NULL), + "%s: [%s] is IPL_NONE, but called from interrupt context", + __func__, pc->pc_pool.pr_wchan); + + if (flags & PR_WAITOK) { + ASSERT_SLEEPABLE(); + } + + /* Lock out interrupts and disable preemption. */ + s = splvm(); + while (/* CONSTCOND */ true) { + /* Try and allocate an object from the current group. */ + cc = pc->pc_cpus[curcpu()->ci_index]; + KASSERT(cc->cc_cache == pc); + pcg = cc->cc_current; + if (__predict_true(pcg->pcg_avail > 0)) { + object = pcg->pcg_objects[--pcg->pcg_avail].pcgo_va; + if (__predict_false(pap != NULL)) + *pap = pcg->pcg_objects[pcg->pcg_avail].pcgo_pa; +#if defined(DIAGNOSTIC) + pcg->pcg_objects[pcg->pcg_avail].pcgo_va = NULL; + KASSERT(pcg->pcg_avail < pcg->pcg_size); + KASSERT(object != NULL); +#endif + cc->cc_hits++; + splx(s); + FREECHECK_OUT(&pc->pc_freecheck, object); + pool_redzone_fill(&pc->pc_pool, object); + return object; + } + + /* + * That failed. If the previous group isn't empty, swap + * it with the current group and allocate from there. + */ + pcg = cc->cc_previous; + if (__predict_true(pcg->pcg_avail > 0)) { + cc->cc_previous = cc->cc_current; + cc->cc_current = pcg; + continue; + } + + /* + * Can't allocate from either group: try the slow path. + * If get_slow() allocated an object for us, or if + * no more objects are available, it will return false. + * Otherwise, we need to retry. + */ + if (!pool_cache_get_slow(cc, s, &object, pap, flags)) + break; + } + + /* + * We would like to KASSERT(object || (flags & PR_NOWAIT)), but + * pool_cache_get can fail even in the PR_WAITOK case, if the + * constructor fails. + */ + return object; +} + +static bool __noinline +pool_cache_put_slow(pool_cache_cpu_t *cc, int s, void *object) +{ + struct lwp *l = curlwp; + pcg_t *pcg, *cur; + uint64_t ncsw; + pool_cache_t pc; + + KASSERT(cc->cc_current->pcg_avail == cc->cc_current->pcg_size); + KASSERT(cc->cc_previous->pcg_avail == cc->cc_previous->pcg_size); + + pc = cc->cc_cache; + pcg = NULL; + cc->cc_misses++; + ncsw = l->l_ncsw; + + /* + * If there are no empty groups in the cache then allocate one + * while still unlocked. + */ + if (__predict_false(pc->pc_emptygroups == NULL)) { + if (__predict_true(!pool_cache_disable)) { + pcg = pool_get(pc->pc_pcgpool, PR_NOWAIT); + } + /* + * If pool_get() blocked, then our view of + * the per-CPU data is invalid: retry. + */ + if (__predict_false(l->l_ncsw != ncsw)) { + if (pcg != NULL) { + pool_put(pc->pc_pcgpool, pcg); + } + return true; + } + if (__predict_true(pcg != NULL)) { + pcg->pcg_avail = 0; + pcg->pcg_size = pc->pc_pcgsize; + } + } + + /* Lock the cache. */ + if (__predict_false(!mutex_tryenter(&pc->pc_lock))) { + mutex_enter(&pc->pc_lock); + pc->pc_contended++; + + /* + * If we context switched while locking, then our view of + * the per-CPU data is invalid: retry. + */ + if (__predict_false(l->l_ncsw != ncsw)) { + mutex_exit(&pc->pc_lock); + if (pcg != NULL) { + pool_put(pc->pc_pcgpool, pcg); + } + return true; + } + } + + /* If there are no empty groups in the cache then allocate one. */ + if (pcg == NULL && pc->pc_emptygroups != NULL) { + pcg = pc->pc_emptygroups; + pc->pc_emptygroups = pcg->pcg_next; + pc->pc_nempty--; + } + + /* + * If there's a empty group, release our full group back + * to the cache. Install the empty group to the local CPU + * and return. + */ + if (pcg != NULL) { + KASSERT(pcg->pcg_avail == 0); + if (__predict_false(cc->cc_previous == &pcg_dummy)) { + cc->cc_previous = pcg; + } else { + cur = cc->cc_current; + if (__predict_true(cur != &pcg_dummy)) { + KASSERT(cur->pcg_avail == cur->pcg_size); + cur->pcg_next = pc->pc_fullgroups; + pc->pc_fullgroups = cur; + pc->pc_nfull++; + } + cc->cc_current = pcg; + } + pc->pc_hits++; + mutex_exit(&pc->pc_lock); + return true; + } + + /* + * Nothing available locally or in cache, and we didn't + * allocate an empty group. Take the slow path and destroy + * the object here and now. + */ + pc->pc_misses++; + mutex_exit(&pc->pc_lock); + splx(s); + pool_cache_destruct_object(pc, object); + + return false; +} + +/* + * pool_cache_put{,_paddr}: + * + * Put an object back to the pool cache (optionally caching the + * physical address of the object). + */ +void +pool_cache_put_paddr(pool_cache_t pc, void *object, paddr_t pa) +{ + pool_cache_cpu_t *cc; + pcg_t *pcg; + int s; + + KASSERT(object != NULL); + pool_redzone_check(&pc->pc_pool, object); + FREECHECK_IN(&pc->pc_freecheck, object); + + /* Lock out interrupts and disable preemption. */ + s = splvm(); + while (/* CONSTCOND */ true) { + /* If the current group isn't full, release it there. */ + cc = pc->pc_cpus[curcpu()->ci_index]; + KASSERT(cc->cc_cache == pc); + pcg = cc->cc_current; + if (__predict_true(pcg->pcg_avail < pcg->pcg_size)) { + pcg->pcg_objects[pcg->pcg_avail].pcgo_va = object; + pcg->pcg_objects[pcg->pcg_avail].pcgo_pa = pa; + pcg->pcg_avail++; + cc->cc_hits++; + splx(s); + return; + } + + /* + * That failed. If the previous group isn't full, swap + * it with the current group and try again. + */ + pcg = cc->cc_previous; + if (__predict_true(pcg->pcg_avail < pcg->pcg_size)) { + cc->cc_previous = cc->cc_current; + cc->cc_current = pcg; + continue; + } + + /* + * Can't free to either group: try the slow path. + * If put_slow() releases the object for us, it + * will return false. Otherwise we need to retry. + */ + if (!pool_cache_put_slow(cc, s, object)) + break; + } +} + +/* + * pool_cache_transfer: + * + * Transfer objects from the per-CPU cache to the global cache. + * Run within a cross-call thread. */ static void -pool_cache_reclaim(struct pool_cache *pc) +pool_cache_transfer(pool_cache_t pc) +{ + pool_cache_cpu_t *cc; + pcg_t *prev, *cur, **list; + int s; + + s = splvm(); + mutex_enter(&pc->pc_lock); + cc = pc->pc_cpus[curcpu()->ci_index]; + cur = cc->cc_current; + cc->cc_current = __UNCONST(&pcg_dummy); + prev = cc->cc_previous; + cc->cc_previous = __UNCONST(&pcg_dummy); + if (cur != &pcg_dummy) { + if (cur->pcg_avail == cur->pcg_size) { + list = &pc->pc_fullgroups; + pc->pc_nfull++; + } else if (cur->pcg_avail == 0) { + list = &pc->pc_emptygroups; + pc->pc_nempty++; + } else { + list = &pc->pc_partgroups; + pc->pc_npart++; + } + cur->pcg_next = *list; + *list = cur; + } + if (prev != &pcg_dummy) { + if (prev->pcg_avail == prev->pcg_size) { + list = &pc->pc_fullgroups; + pc->pc_nfull++; + } else if (prev->pcg_avail == 0) { + list = &pc->pc_emptygroups; + pc->pc_nempty++; + } else { + list = &pc->pc_partgroups; + pc->pc_npart++; + } + prev->pcg_next = *list; + *list = prev; + } + mutex_exit(&pc->pc_lock); + splx(s); +} + +/* + * Pool backend allocators. + * + * Each pool has a backend allocator that handles allocation, deallocation, + * and any additional draining that might be needed. + * + * We provide two standard allocators: + * + * pool_allocator_kmem - the default when no allocator is specified + * + * pool_allocator_nointr - used for pools that will not be accessed + * in interrupt context. + */ +void *pool_page_alloc(struct pool *, int); +void pool_page_free(struct pool *, void *); + +#ifdef POOL_SUBPAGE +struct pool_allocator pool_allocator_kmem_fullpage = { + .pa_alloc = pool_page_alloc, + .pa_free = pool_page_free, + .pa_pagesz = 0 +}; +#else +struct pool_allocator pool_allocator_kmem = { + .pa_alloc = pool_page_alloc, + .pa_free = pool_page_free, + .pa_pagesz = 0 +}; +#endif + +#ifdef POOL_SUBPAGE +struct pool_allocator pool_allocator_nointr_fullpage = { + .pa_alloc = pool_page_alloc, + .pa_free = pool_page_free, + .pa_pagesz = 0 +}; +#else +struct pool_allocator pool_allocator_nointr = { + .pa_alloc = pool_page_alloc, + .pa_free = pool_page_free, + .pa_pagesz = 0 +}; +#endif + +#ifdef POOL_SUBPAGE +void *pool_subpage_alloc(struct pool *, int); +void pool_subpage_free(struct pool *, void *); + +struct pool_allocator pool_allocator_kmem = { + .pa_alloc = pool_subpage_alloc, + .pa_free = pool_subpage_free, + .pa_pagesz = POOL_SUBPAGE +}; + +struct pool_allocator pool_allocator_nointr = { + .pa_alloc = pool_subpage_alloc, + .pa_free = pool_subpage_free, + .pa_pagesz = POOL_SUBPAGE +}; +#endif /* POOL_SUBPAGE */ + +struct pool_allocator pool_allocator_big[] = { + { + .pa_alloc = pool_page_alloc, + .pa_free = pool_page_free, + .pa_pagesz = 1 << (POOL_ALLOCATOR_BIG_BASE + 0), + }, + { + .pa_alloc = pool_page_alloc, + .pa_free = pool_page_free, + .pa_pagesz = 1 << (POOL_ALLOCATOR_BIG_BASE + 1), + }, + { + .pa_alloc = pool_page_alloc, + .pa_free = pool_page_free, + .pa_pagesz = 1 << (POOL_ALLOCATOR_BIG_BASE + 2), + }, + { + .pa_alloc = pool_page_alloc, + .pa_free = pool_page_free, + .pa_pagesz = 1 << (POOL_ALLOCATOR_BIG_BASE + 3), + }, + { + .pa_alloc = pool_page_alloc, + .pa_free = pool_page_free, + .pa_pagesz = 1 << (POOL_ALLOCATOR_BIG_BASE + 4), + }, + { + .pa_alloc = pool_page_alloc, + .pa_free = pool_page_free, + .pa_pagesz = 1 << (POOL_ALLOCATOR_BIG_BASE + 5), + }, + { + .pa_alloc = pool_page_alloc, + .pa_free = pool_page_free, + .pa_pagesz = 1 << (POOL_ALLOCATOR_BIG_BASE + 6), + }, + { + .pa_alloc = pool_page_alloc, + .pa_free = pool_page_free, + .pa_pagesz = 1 << (POOL_ALLOCATOR_BIG_BASE + 7), + } +}; + +static int +pool_bigidx(size_t size) +{ + int i; + + for (i = 0; i < __arraycount(pool_allocator_big); i++) { + if (1 << (i + POOL_ALLOCATOR_BIG_BASE) >= size) + return i; + } + panic("pool item size %zu too large, use a custom allocator", size); +} + +static void * +pool_allocator_alloc(struct pool *pp, int flags) +{ + struct pool_allocator *pa = pp->pr_alloc; + void *res; + + res = (*pa->pa_alloc)(pp, flags); + if (res == NULL && (flags & PR_WAITOK) == 0) { + /* + * We only run the drain hook here if PR_NOWAIT. + * In other cases, the hook will be run in + * pool_reclaim(). + */ + if (pp->pr_drain_hook != NULL) { + (*pp->pr_drain_hook)(pp->pr_drain_hook_arg, flags); + res = (*pa->pa_alloc)(pp, flags); + } + } + return res; +} + +static void +pool_allocator_free(struct pool *pp, void *v) +{ + struct pool_allocator *pa = pp->pr_alloc; + + (*pa->pa_free)(pp, v); +} + +void * +pool_page_alloc(struct pool *pp, int flags) +{ + const vm_flag_t vflags = (flags & PR_WAITOK) ? VM_SLEEP: VM_NOSLEEP; + vmem_addr_t va; + int ret; + + ret = uvm_km_kmem_alloc(kmem_va_arena, pp->pr_alloc->pa_pagesz, + vflags | VM_INSTANTFIT, &va); + + return ret ? NULL : (void *)va; +} + +void +pool_page_free(struct pool *pp, void *v) +{ + + uvm_km_kmem_free(kmem_va_arena, (vaddr_t)v, pp->pr_alloc->pa_pagesz); +} + +static void * +pool_page_alloc_meta(struct pool *pp, int flags) +{ + const vm_flag_t vflags = (flags & PR_WAITOK) ? VM_SLEEP: VM_NOSLEEP; + vmem_addr_t va; + int ret; + + ret = vmem_alloc(kmem_meta_arena, pp->pr_alloc->pa_pagesz, + vflags | VM_INSTANTFIT, &va); + + return ret ? NULL : (void *)va; +} + +static void +pool_page_free_meta(struct pool *pp, void *v) +{ + + vmem_free(kmem_meta_arena, (vmem_addr_t)v, pp->pr_alloc->pa_pagesz); +} + +#ifdef POOL_REDZONE +#if defined(_LP64) +# define PRIME 0x9e37fffffffc0000UL +#else /* defined(_LP64) */ +# define PRIME 0x9e3779b1 +#endif /* defined(_LP64) */ +#define STATIC_BYTE 0xFE +CTASSERT(POOL_REDZONE_SIZE > 1); + +static inline uint8_t +pool_pattern_generate(const void *p) +{ + return (uint8_t)(((uintptr_t)p) * PRIME + >> ((sizeof(uintptr_t) - sizeof(uint8_t))) * CHAR_BIT); +} + +static void +pool_redzone_init(struct pool *pp, size_t requested_size) +{ + size_t nsz; + + if (pp->pr_roflags & PR_NOTOUCH) { + pp->pr_reqsize = 0; + pp->pr_redzone = false; + return; + } + + /* + * We may have extended the requested size earlier; check if + * there's naturally space in the padding for a red zone. + */ + if (pp->pr_size - requested_size >= POOL_REDZONE_SIZE) { + pp->pr_reqsize = requested_size; + pp->pr_redzone = true; + return; + } + + /* + * No space in the natural padding; check if we can extend a + * bit the size of the pool. + */ + nsz = roundup(pp->pr_size + POOL_REDZONE_SIZE, pp->pr_align); + if (nsz <= pp->pr_alloc->pa_pagesz) { + /* Ok, we can */ + pp->pr_size = nsz; + pp->pr_reqsize = requested_size; + pp->pr_redzone = true; + } else { + /* No space for a red zone... snif :'( */ + pp->pr_reqsize = 0; + pp->pr_redzone = false; + printf("pool redzone disabled for '%s'\n", pp->pr_wchan); + } +} + +static void +pool_redzone_fill(struct pool *pp, void *p) +{ + uint8_t *cp, pat; + const uint8_t *ep; + + if (!pp->pr_redzone) + return; + + cp = (uint8_t *)p + pp->pr_reqsize; + ep = cp + POOL_REDZONE_SIZE; + + /* + * We really don't want the first byte of the red zone to be '\0'; + * an off-by-one in a string may not be properly detected. + */ + pat = pool_pattern_generate(cp); + *cp = (pat == '\0') ? STATIC_BYTE: pat; + cp++; + + while (cp < ep) { + *cp = pool_pattern_generate(cp); + cp++; + } +} + +static void +pool_redzone_check(struct pool *pp, void *p) +{ + uint8_t *cp, pat, expected; + const uint8_t *ep; + + if (!pp->pr_redzone) + return; + + cp = (uint8_t *)p + pp->pr_reqsize; + ep = cp + POOL_REDZONE_SIZE; + + pat = pool_pattern_generate(cp); + expected = (pat == '\0') ? STATIC_BYTE: pat; + if (expected != *cp) { + panic("%s: %p: 0x%02x != 0x%02x\n", + __func__, cp, *cp, expected); + } + cp++; + + while (cp < ep) { + expected = pool_pattern_generate(cp); + if (*cp != expected) { + panic("%s: %p: 0x%02x != 0x%02x\n", + __func__, cp, *cp, expected); + } + cp++; + } +} + +#endif /* POOL_REDZONE */ + + +#ifdef POOL_SUBPAGE +/* Sub-page allocator, for machines with large hardware pages. */ +void * +pool_subpage_alloc(struct pool *pp, int flags) +{ + return pool_get(&psppool, flags); +} + +void +pool_subpage_free(struct pool *pp, void *v) +{ + pool_put(&psppool, v); +} + +#endif /* POOL_SUBPAGE */ + +#if defined(DDB) +static bool +pool_in_page(struct pool *pp, struct pool_item_header *ph, uintptr_t addr) +{ + + return (uintptr_t)ph->ph_page <= addr && + addr < (uintptr_t)ph->ph_page + pp->pr_alloc->pa_pagesz; +} + +static bool +pool_in_item(struct pool *pp, void *item, uintptr_t addr) +{ + + return (uintptr_t)item <= addr && addr < (uintptr_t)item + pp->pr_size; +} + +static bool +pool_in_cg(struct pool *pp, struct pool_cache_group *pcg, uintptr_t addr) +{ + int i; + + if (pcg == NULL) { + return false; + } + for (i = 0; i < pcg->pcg_avail; i++) { + if (pool_in_item(pp, pcg->pcg_objects[i].pcgo_va, addr)) { + return true; + } + } + return false; +} + +static bool +pool_allocated(struct pool *pp, struct pool_item_header *ph, uintptr_t addr) +{ + + if ((pp->pr_roflags & PR_NOTOUCH) != 0) { + unsigned int idx = pr_item_notouch_index(pp, ph, (void *)addr); + pool_item_bitmap_t *bitmap = + ph->ph_bitmap + (idx / BITMAP_SIZE); + pool_item_bitmap_t mask = 1 << (idx & BITMAP_MASK); + + return (*bitmap & mask) == 0; + } else { + struct pool_item *pi; + + LIST_FOREACH(pi, &ph->ph_itemlist, pi_list) { + if (pool_in_item(pp, pi, addr)) { + return false; + } + } + return true; + } +} + +void +pool_whatis(uintptr_t addr, void (*pr)(const char *, ...)) +{ + struct pool *pp; + + TAILQ_FOREACH(pp, &pool_head, pr_poollist) { + struct pool_item_header *ph; + uintptr_t item; + bool allocated = true; + bool incache = false; + bool incpucache = false; + char cpucachestr[32]; + + if ((pp->pr_roflags & PR_PHINPAGE) != 0) { + LIST_FOREACH(ph, &pp->pr_fullpages, ph_pagelist) { + if (pool_in_page(pp, ph, addr)) { + goto found; + } + } + LIST_FOREACH(ph, &pp->pr_partpages, ph_pagelist) { + if (pool_in_page(pp, ph, addr)) { + allocated = + pool_allocated(pp, ph, addr); + goto found; + } + } + LIST_FOREACH(ph, &pp->pr_emptypages, ph_pagelist) { + if (pool_in_page(pp, ph, addr)) { + allocated = false; + goto found; + } + } + continue; + } else { + ph = pr_find_pagehead_noalign(pp, (void *)addr); + if (ph == NULL || !pool_in_page(pp, ph, addr)) { + continue; + } + allocated = pool_allocated(pp, ph, addr); + } +found: + if (allocated && pp->pr_cache) { + pool_cache_t pc = pp->pr_cache; + struct pool_cache_group *pcg; + int i; + + for (pcg = pc->pc_fullgroups; pcg != NULL; + pcg = pcg->pcg_next) { + if (pool_in_cg(pp, pcg, addr)) { + incache = true; + goto print; + } + } + for (i = 0; i < __arraycount(pc->pc_cpus); i++) { + pool_cache_cpu_t *cc; + + if ((cc = pc->pc_cpus[i]) == NULL) { + continue; + } + if (pool_in_cg(pp, cc->cc_current, addr) || + pool_in_cg(pp, cc->cc_previous, addr)) { + struct cpu_info *ci = + cpu_lookup(i); + + incpucache = true; + snprintf(cpucachestr, + sizeof(cpucachestr), + "cached by CPU %u", + ci->ci_index); + goto print; + } + } + } +print: + item = (uintptr_t)ph->ph_page + ph->ph_off; + item = item + rounddown(addr - item, pp->pr_size); + (*pr)("%p is %p+%zu in POOL '%s' (%s)\n", + (void *)addr, item, (size_t)(addr - item), + pp->pr_wchan, + incpucache ? cpucachestr : + incache ? "cached" : allocated ? "allocated" : "free"); + } +} +#endif /* defined(DDB) */ + +static int +pool_sysctl(SYSCTLFN_ARGS) +{ + struct pool_sysctl data; + struct pool *pp; + struct pool_cache *pc; + pool_cache_cpu_t *cc; + int error; + size_t i, written; + + if (oldp == NULL) { + *oldlenp = 0; + TAILQ_FOREACH(pp, &pool_head, pr_poollist) + *oldlenp += sizeof(data); + return 0; + } + + memset(&data, 0, sizeof(data)); + error = 0; + written = 0; + TAILQ_FOREACH(pp, &pool_head, pr_poollist) { + if (written + sizeof(data) > *oldlenp) + break; + strlcpy(data.pr_wchan, pp->pr_wchan, sizeof(data.pr_wchan)); + data.pr_pagesize = pp->pr_alloc->pa_pagesz; + data.pr_flags = pp->pr_roflags | pp->pr_flags; +#define COPY(field) data.field = pp->field + COPY(pr_size); + + COPY(pr_itemsperpage); + COPY(pr_nitems); + COPY(pr_nout); + COPY(pr_hardlimit); + COPY(pr_npages); + COPY(pr_minpages); + COPY(pr_maxpages); + + COPY(pr_nget); + COPY(pr_nfail); + COPY(pr_nput); + COPY(pr_npagealloc); + COPY(pr_npagefree); + COPY(pr_hiwat); + COPY(pr_nidle); +#undef COPY + + data.pr_cache_nmiss_pcpu = 0; + data.pr_cache_nhit_pcpu = 0; + if (pp->pr_cache) { + pc = pp->pr_cache; + data.pr_cache_meta_size = pc->pc_pcgsize; + data.pr_cache_nfull = pc->pc_nfull; + data.pr_cache_npartial = pc->pc_npart; + data.pr_cache_nempty = pc->pc_nempty; + data.pr_cache_ncontended = pc->pc_contended; + data.pr_cache_nmiss_global = pc->pc_misses; + data.pr_cache_nhit_global = pc->pc_hits; + for (i = 0; i < pc->pc_ncpu; ++i) { + cc = pc->pc_cpus[i]; + if (cc == NULL) + continue; + data.pr_cache_nmiss_pcpu += cc->cc_misses; + data.pr_cache_nhit_pcpu += cc->cc_hits; + } + } else { + data.pr_cache_meta_size = 0; + data.pr_cache_nfull = 0; + data.pr_cache_npartial = 0; + data.pr_cache_nempty = 0; + data.pr_cache_ncontended = 0; + data.pr_cache_nmiss_global = 0; + data.pr_cache_nhit_global = 0; + } + + error = sysctl_copyout(l, &data, oldp, sizeof(data)); + if (error) + break; + written += sizeof(data); + oldp = (char *)oldp + sizeof(data); + } + + *oldlenp = written; + return error; +} + +SYSCTL_SETUP(sysctl_pool_setup, "sysctl kern.pool setup") { + const struct sysctlnode *rnode = NULL; - simple_lock(&pc->pc_slock); - pool_cache_do_invalidate(pc, 1, pool_do_put); - simple_unlock(&pc->pc_slock); + sysctl_createv(clog, 0, NULL, &rnode, + CTLFLAG_PERMANENT, + CTLTYPE_STRUCT, "pool", + SYSCTL_DESCR("Get pool statistics"), + pool_sysctl, 0, NULL, 0, + CTL_KERN, CTL_CREATE, CTL_EOL); }