Please note that diffs are not public domain; they are subject to the copyright notices on the relevant files. =================================================================== RCS file: /ftp/cvs/cvsroot/src/sys/kern/subr_pool.c,v rcsdiff: /ftp/cvs/cvsroot/src/sys/kern/subr_pool.c,v: warning: Unknown phrases like `commitid ...;' are present. retrieving revision 1.110.2.2 retrieving revision 1.129.12.5 diff -u -p -r1.110.2.2 -r1.129.12.5 --- src/sys/kern/subr_pool.c 2006/03/01 09:28:46 1.110.2.2 +++ src/sys/kern/subr_pool.c 2007/11/21 21:56:03 1.129.12.5 @@ -1,12 +1,12 @@ -/* $NetBSD: subr_pool.c,v 1.110.2.2 2006/03/01 09:28:46 yamt Exp $ */ +/* $NetBSD: subr_pool.c,v 1.129.12.5 2007/11/21 21:56:03 joerg Exp $ */ /*- - * Copyright (c) 1997, 1999, 2000 The NetBSD Foundation, Inc. + * Copyright (c) 1997, 1999, 2000, 2002, 2007 The NetBSD Foundation, Inc. * All rights reserved. * * This code is derived from software contributed to The NetBSD Foundation * by Paul Kranenburg; by Jason R. Thorpe of the Numerical Aerospace - * Simulation Facility, NASA Ames Research Center. + * Simulation Facility, NASA Ames Research Center, and by Andrew Doran. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -38,7 +38,7 @@ */ #include -__KERNEL_RCSID(0, "$NetBSD: subr_pool.c,v 1.110.2.2 2006/03/01 09:28:46 yamt Exp $"); +__KERNEL_RCSID(0, "$NetBSD: subr_pool.c,v 1.129.12.5 2007/11/21 21:56:03 joerg Exp $"); #include "opt_pool.h" #include "opt_poollog.h" @@ -46,6 +46,7 @@ __KERNEL_RCSID(0, "$NetBSD: subr_pool.c, #include #include +#include #include #include #include @@ -53,6 +54,10 @@ __KERNEL_RCSID(0, "$NetBSD: subr_pool.c, #include #include #include +#include +#include +#include +#include #include @@ -72,22 +77,31 @@ __KERNEL_RCSID(0, "$NetBSD: subr_pool.c, /* List of all pools */ LIST_HEAD(,pool) pool_head = LIST_HEAD_INITIALIZER(pool_head); +/* List of all caches. */ +LIST_HEAD(,pool_cache) pool_cache_head = + LIST_HEAD_INITIALIZER(pool_cache_head); + /* Private pool for page header structures */ #define PHPOOL_MAX 8 static struct pool phpool[PHPOOL_MAX]; -#define PHPOOL_FREELIST_NELEM(idx) (((idx) == 0) ? 0 : (1 << (idx))) +#define PHPOOL_FREELIST_NELEM(idx) \ + (((idx) == 0) ? 0 : BITMAP_SIZE * (1 << (idx))) #ifdef POOL_SUBPAGE /* Pool of subpages for use by normal pools. */ static struct pool psppool; #endif +static SLIST_HEAD(, pool_allocator) pa_deferinitq = + SLIST_HEAD_INITIALIZER(pa_deferinitq); + static void *pool_page_alloc_meta(struct pool *, int); static void pool_page_free_meta(struct pool *, void *); /* allocator for pool metadata */ -static struct pool_allocator pool_allocator_meta = { - pool_page_alloc_meta, pool_page_free_meta +struct pool_allocator pool_allocator_meta = { + pool_page_alloc_meta, pool_page_free_meta, + .pa_backingmapptr = &kmem_map, }; /* # of seconds to retain page after last use */ @@ -96,10 +110,13 @@ int pool_inactive_time = 10; /* Next candidate for drainage (see pool_drain()) */ static struct pool *drainpp; -/* This spin lock protects both pool_head and drainpp. */ -struct simplelock pool_head_slock = SIMPLELOCK_INITIALIZER; - -typedef uint8_t pool_item_freelist_t; +/* This lock protects both pool_head and drainpp. */ +static kmutex_t pool_head_lock; +static kcondvar_t pool_busy; + +typedef uint32_t pool_item_bitmap_t; +#define BITMAP_SIZE (CHAR_BIT * sizeof(pool_item_bitmap_t)) +#define BITMAP_MASK (BITMAP_SIZE - 1) struct pool_item_header { /* Page headers */ @@ -107,8 +124,9 @@ struct pool_item_header { ph_pagelist; /* pool page list */ SPLAY_ENTRY(pool_item_header) ph_node; /* Off-page page headers */ - caddr_t ph_page; /* this page's address */ + void * ph_page; /* this page's address */ struct timeval ph_time; /* last referenced */ + uint16_t ph_nmissing; /* # of chunks in use */ union { /* !PR_NOTOUCH */ struct { @@ -117,27 +135,20 @@ struct pool_item_header { } phu_normal; /* PR_NOTOUCH */ struct { - uint16_t - phu_off; /* start offset in page */ - pool_item_freelist_t - phu_firstfree; /* first free item */ - /* - * XXX it might be better to use - * a simple bitmap and ffs(3) - */ + uint16_t phu_off; /* start offset in page */ + pool_item_bitmap_t phu_bitmap[]; } phu_notouch; } ph_u; - uint16_t ph_nmissing; /* # of chunks in use */ }; #define ph_itemlist ph_u.phu_normal.phu_itemlist #define ph_off ph_u.phu_notouch.phu_off -#define ph_firstfree ph_u.phu_notouch.phu_firstfree +#define ph_bitmap ph_u.phu_notouch.phu_bitmap struct pool_item { #ifdef DIAGNOSTIC u_int pi_magic; #endif -#define PI_MAGIC 0xdeadbeefU +#define PI_MAGIC 0xdeaddeadU /* Other entries use only this list entry */ LIST_ENTRY(pool_item) pi_list; }; @@ -153,38 +164,43 @@ struct pool_item { * needless object construction/destruction; it is deferred until absolutely * necessary. * - * Caches are grouped into cache groups. Each cache group references - * up to 16 constructed objects. When a cache allocates an object - * from the pool, it calls the object's constructor and places it into - * a cache group. When a cache group frees an object back to the pool, - * it first calls the object's destructor. This allows the object to - * persist in constructed form while freed to the cache. - * - * Multiple caches may exist for each pool. This allows a single - * object type to have multiple constructed forms. The pool references - * each cache, so that when a pool is drained by the pagedaemon, it can - * drain each individual cache as well. Each time a cache is drained, - * the most idle cache group is freed to the pool in its entirety. + * Caches are grouped into cache groups. Each cache group references up + * to PCG_NUMOBJECTS constructed objects. When a cache allocates an + * object from the pool, it calls the object's constructor and places it + * into a cache group. When a cache group frees an object back to the + * pool, it first calls the object's destructor. This allows the object + * to persist in constructed form while freed to the cache. + * + * The pool references each cache, so that when a pool is drained by the + * pagedaemon, it can drain each individual cache as well. Each time a + * cache is drained, the most idle cache group is freed to the pool in + * its entirety. * * Pool caches are layed on top of pools. By layering them, we can avoid * the complexity of cache management for pools which would not benefit * from it. */ -/* The cache group pool. */ static struct pool pcgpool; +static struct pool cache_pool; +static struct pool cache_cpu_pool; -static void pool_cache_reclaim(struct pool_cache *, struct pool_pagelist *, - struct pool_cache_grouplist *); -static void pcg_grouplist_free(struct pool_cache_grouplist *); +static pool_cache_cpu_t *pool_cache_put_slow(pool_cache_cpu_t *, int *, + void *, paddr_t); +static pool_cache_cpu_t *pool_cache_get_slow(pool_cache_cpu_t *, int *, + void **, paddr_t *, int); +static void pool_cache_cpu_init1(struct cpu_info *, pool_cache_t); +static void pool_cache_invalidate_groups(pool_cache_t, pcg_t *); +static void pool_cache_xcall(pool_cache_t); static int pool_catchup(struct pool *); -static void pool_prime_page(struct pool *, caddr_t, +static void pool_prime_page(struct pool *, void *, struct pool_item_header *); static void pool_update_curpage(struct pool *); -void *pool_allocator_alloc(struct pool *, int); -void pool_allocator_free(struct pool *, void *); +static int pool_grow(struct pool *, int); +static void *pool_allocator_alloc(struct pool *, int); +static void pool_allocator_free(struct pool *, void *); static void pool_print_pagelist(struct pool *, struct pool_pagelist *, void (*)(const char *, ...)); @@ -312,57 +328,83 @@ pr_enter_check(struct pool *pp, void (*p #define pr_enter_check(pp, pr) #endif /* POOL_DIAGNOSTIC */ -static inline int +static inline unsigned int pr_item_notouch_index(const struct pool *pp, const struct pool_item_header *ph, const void *v) { const char *cp = v; - int idx; + unsigned int idx; KASSERT(pp->pr_roflags & PR_NOTOUCH); - idx = (cp - ph->ph_page - ph->ph_off) / pp->pr_size; + idx = (cp - (char *)ph->ph_page - ph->ph_off) / pp->pr_size; KASSERT(idx < pp->pr_itemsperpage); return idx; } -#define PR_FREELIST_ALIGN(p) \ - roundup((uintptr_t)(p), sizeof(pool_item_freelist_t)) -#define PR_FREELIST(ph) ((pool_item_freelist_t *)PR_FREELIST_ALIGN((ph) + 1)) -#define PR_INDEX_USED ((pool_item_freelist_t)-1) -#define PR_INDEX_EOL ((pool_item_freelist_t)-2) - static inline void pr_item_notouch_put(const struct pool *pp, struct pool_item_header *ph, void *obj) { - int idx = pr_item_notouch_index(pp, ph, obj); - pool_item_freelist_t *freelist = PR_FREELIST(ph); + unsigned int idx = pr_item_notouch_index(pp, ph, obj); + pool_item_bitmap_t *bitmap = ph->ph_bitmap + (idx / BITMAP_SIZE); + pool_item_bitmap_t mask = 1 << (idx & BITMAP_MASK); - KASSERT(freelist[idx] == PR_INDEX_USED); - freelist[idx] = ph->ph_firstfree; - ph->ph_firstfree = idx; + KASSERT((*bitmap & mask) == 0); + *bitmap |= mask; } static inline void * pr_item_notouch_get(const struct pool *pp, struct pool_item_header *ph) { - int idx = ph->ph_firstfree; - pool_item_freelist_t *freelist = PR_FREELIST(ph); + pool_item_bitmap_t *bitmap = ph->ph_bitmap; + unsigned int idx; + int i; + + for (i = 0; ; i++) { + int bit; + + KASSERT((i * BITMAP_SIZE) < pp->pr_itemsperpage); + bit = ffs32(bitmap[i]); + if (bit) { + pool_item_bitmap_t mask; + + bit--; + idx = (i * BITMAP_SIZE) + bit; + mask = 1 << bit; + KASSERT((bitmap[i] & mask) != 0); + bitmap[i] &= ~mask; + break; + } + } + KASSERT(idx < pp->pr_itemsperpage); + return (char *)ph->ph_page + ph->ph_off + idx * pp->pr_size; +} - KASSERT(freelist[idx] != PR_INDEX_USED); - ph->ph_firstfree = freelist[idx]; - freelist[idx] = PR_INDEX_USED; +static inline void +pr_item_notouch_init(const struct pool *pp, struct pool_item_header *ph) +{ + pool_item_bitmap_t *bitmap = ph->ph_bitmap; + const int n = howmany(pp->pr_itemsperpage, BITMAP_SIZE); + int i; - return ph->ph_page + ph->ph_off + idx * pp->pr_size; + for (i = 0; i < n; i++) { + bitmap[i] = (pool_item_bitmap_t)-1; + } } static inline int phtree_compare(struct pool_item_header *a, struct pool_item_header *b) { + + /* + * we consider pool_item_header with smaller ph_page bigger. + * (this unnatural ordering is for the benefit of pr_find_pagehead.) + */ + if (a->ph_page < b->ph_page) - return (-1); - else if (a->ph_page > b->ph_page) return (1); + else if (a->ph_page > b->ph_page) + return (-1); else return (0); } @@ -371,18 +413,38 @@ SPLAY_PROTOTYPE(phtree, pool_item_header SPLAY_GENERATE(phtree, pool_item_header, ph_node, phtree_compare); /* - * Return the pool page header based on page address. + * Return the pool page header based on item address. */ static inline struct pool_item_header * -pr_find_pagehead(struct pool *pp, caddr_t page) +pr_find_pagehead(struct pool *pp, void *v) { struct pool_item_header *ph, tmp; - if ((pp->pr_roflags & PR_PHINPAGE) != 0) - return ((struct pool_item_header *)(page + pp->pr_phoffset)); + if ((pp->pr_roflags & PR_NOALIGN) != 0) { + tmp.ph_page = (void *)(uintptr_t)v; + ph = SPLAY_FIND(phtree, &pp->pr_phtree, &tmp); + if (ph == NULL) { + ph = SPLAY_ROOT(&pp->pr_phtree); + if (ph != NULL && phtree_compare(&tmp, ph) >= 0) { + ph = SPLAY_NEXT(phtree, &pp->pr_phtree, ph); + } + KASSERT(ph == NULL || phtree_compare(&tmp, ph) < 0); + } + } else { + void *page = + (void *)((uintptr_t)v & pp->pr_alloc->pa_pagemask); + + if ((pp->pr_roflags & PR_PHINPAGE) != 0) { + ph = (struct pool_item_header *)((char *)page + pp->pr_phoffset); + } else { + tmp.ph_page = page; + ph = SPLAY_FIND(phtree, &pp->pr_phtree, &tmp); + } + } - tmp.ph_page = page; - ph = SPLAY_FIND(phtree, &pp->pr_phtree, &tmp); + KASSERT(ph == NULL || ((pp->pr_roflags & PR_PHINPAGE) != 0) || + ((char *)ph->ph_page <= (char *)v && + (char *)v < (char *)ph->ph_page + pp->pr_alloc->pa_pagesz)); return ph; } @@ -390,16 +452,12 @@ static void pr_pagelist_free(struct pool *pp, struct pool_pagelist *pq) { struct pool_item_header *ph; - int s; while ((ph = LIST_FIRST(pq)) != NULL) { LIST_REMOVE(ph, ph_pagelist); pool_allocator_free(pp, ph->ph_page); - if ((pp->pr_roflags & PR_PHINPAGE) == 0) { - s = splvm(); + if ((pp->pr_roflags & PR_PHINPAGE) == 0) pool_put(pp->pr_phpool, ph); - splx(s); - } } } @@ -411,7 +469,7 @@ pr_rmpage(struct pool *pp, struct pool_i struct pool_pagelist *pq) { - LOCK_ASSERT(simple_lock_held(&pp->pr_slock)); + KASSERT(mutex_owned(&pp->pr_lock)); /* * If the page was idle, decrement the idle page count. @@ -442,19 +500,109 @@ pr_rmpage(struct pool *pp, struct pool_i pool_update_curpage(pp); } +static bool +pa_starved_p(struct pool_allocator *pa) +{ + + if (pa->pa_backingmap != NULL) { + return vm_map_starved_p(pa->pa_backingmap); + } + return false; +} + +static int +pool_reclaim_callback(struct callback_entry *ce, void *obj, void *arg) +{ + struct pool *pp = obj; + struct pool_allocator *pa = pp->pr_alloc; + + KASSERT(&pp->pr_reclaimerentry == ce); + pool_reclaim(pp); + if (!pa_starved_p(pa)) { + return CALLBACK_CHAIN_ABORT; + } + return CALLBACK_CHAIN_CONTINUE; +} + +static void +pool_reclaim_register(struct pool *pp) +{ + struct vm_map *map = pp->pr_alloc->pa_backingmap; + int s; + + if (map == NULL) { + return; + } + + s = splvm(); /* not necessary for INTRSAFE maps, but don't care. */ + callback_register(&vm_map_to_kernel(map)->vmk_reclaim_callback, + &pp->pr_reclaimerentry, pp, pool_reclaim_callback); + splx(s); +} + +static void +pool_reclaim_unregister(struct pool *pp) +{ + struct vm_map *map = pp->pr_alloc->pa_backingmap; + int s; + + if (map == NULL) { + return; + } + + s = splvm(); /* not necessary for INTRSAFE maps, but don't care. */ + callback_unregister(&vm_map_to_kernel(map)->vmk_reclaim_callback, + &pp->pr_reclaimerentry); + splx(s); +} + +static void +pa_reclaim_register(struct pool_allocator *pa) +{ + struct vm_map *map = *pa->pa_backingmapptr; + struct pool *pp; + + KASSERT(pa->pa_backingmap == NULL); + if (map == NULL) { + SLIST_INSERT_HEAD(&pa_deferinitq, pa, pa_q); + return; + } + pa->pa_backingmap = map; + TAILQ_FOREACH(pp, &pa->pa_list, pr_alloc_list) { + pool_reclaim_register(pp); + } +} + /* * Initialize all the pools listed in the "pools" link set. */ void -link_pool_init(void) +pool_subsystem_init(void) { + struct pool_allocator *pa; __link_set_decl(pools, struct link_pool_init); struct link_pool_init * const *pi; + mutex_init(&pool_head_lock, MUTEX_DEFAULT, IPL_NONE); + cv_init(&pool_busy, "poolbusy"); + __link_set_foreach(pi, pools) pool_init((*pi)->pp, (*pi)->size, (*pi)->align, (*pi)->align_offset, (*pi)->flags, (*pi)->wchan, - (*pi)->palloc); + (*pi)->palloc, (*pi)->ipl); + + while ((pa = SLIST_FIRST(&pa_deferinitq)) != NULL) { + KASSERT(pa->pa_backingmapptr != NULL); + KASSERT(*pa->pa_backingmapptr != NULL); + SLIST_REMOVE_HEAD(&pa_deferinitq, pa_q); + pa_reclaim_register(pa); + } + + pool_init(&cache_pool, sizeof(struct pool_cache), CACHE_LINE_SIZE, + 0, 0, "pcache", &pool_allocator_nointr, IPL_NONE); + + pool_init(&cache_cpu_pool, sizeof(pool_cache_cpu_t), CACHE_LINE_SIZE, + 0, 0, "pcachecpu", &pool_allocator_nointr, IPL_NONE); } /* @@ -465,14 +613,25 @@ link_pool_init(void) */ void pool_init(struct pool *pp, size_t size, u_int align, u_int ioff, int flags, - const char *wchan, struct pool_allocator *palloc) + const char *wchan, struct pool_allocator *palloc, int ipl) { - int off, slack; +#ifdef DEBUG + struct pool *pp1; +#endif size_t trysize, phsize; - int s; + int off, slack; - KASSERT((1UL << (CHAR_BIT * sizeof(pool_item_freelist_t))) - 2 >= - PHPOOL_FREELIST_NELEM(PHPOOL_MAX - 1)); +#ifdef DEBUG + /* + * Check that the pool hasn't already been initialised and + * added to the list of all pools. + */ + LIST_FOREACH(pp1, &pool_head, pr_poollist) { + if (pp == pp1) + panic("pool_init: pool %s already initialised", + wchan); + } +#endif #ifdef POOL_DIAGNOSTIC /* @@ -498,23 +657,26 @@ pool_init(struct pool *pp, size_t size, TAILQ_INIT(&palloc->pa_list); - simple_lock_init(&palloc->pa_slock); + mutex_init(&palloc->pa_lock, MUTEX_DEFAULT, IPL_VM); palloc->pa_pagemask = ~(palloc->pa_pagesz - 1); palloc->pa_pageshift = ffs(palloc->pa_pagesz) - 1; + + if (palloc->pa_backingmapptr != NULL) { + pa_reclaim_register(palloc); + } palloc->pa_flags |= PA_INITIALIZED; } if (align == 0) align = ALIGN(1); - if (size < sizeof(struct pool_item)) + if ((flags & PR_NOTOUCH) == 0 && size < sizeof(struct pool_item)) size = sizeof(struct pool_item); size = roundup(size, align); #ifdef DIAGNOSTIC if (size > palloc->pa_pagesz) - panic("pool_init: pool item size (%lu) too large", - (u_long)size); + panic("pool_init: pool item size (%zu) too large", size); #endif /* @@ -523,7 +685,7 @@ pool_init(struct pool *pp, size_t size, LIST_INIT(&pp->pr_emptypages); LIST_INIT(&pp->pr_fullpages); LIST_INIT(&pp->pr_partpages); - LIST_INIT(&pp->pr_cachelist); + pp->pr_cache = NULL; pp->pr_curpage = NULL; pp->pr_npages = 0; pp->pr_minitems = 0; @@ -545,6 +707,7 @@ pool_init(struct pool *pp, size_t size, pp->pr_hardlimit_warning_last.tv_usec = 0; pp->pr_drain_hook = NULL; pp->pr_drain_hook_arg = NULL; + pp->pr_freecheck = NULL; /* * Decide whether to put the page header off page to avoid @@ -563,7 +726,7 @@ pool_init(struct pool *pp, size_t size, /* See the comment below about reserved bytes. */ trysize = palloc->pa_pagesz - ((align - ioff) % align); phsize = ALIGN(sizeof(struct pool_item_header)); - if ((pp->pr_roflags & PR_NOTOUCH) == 0 && + if ((pp->pr_roflags & (PR_NOTOUCH | PR_NOALIGN)) == 0 && (pp->pr_size < MIN(palloc->pa_pagesz / 16, phsize << 3) || trysize / pp->pr_size == (trysize - phsize) / pp->pr_size)) { /* Use the end of the page for the page header */ @@ -623,6 +786,7 @@ pool_init(struct pool *pp, size_t size, pp->pr_npagefree = 0; pp->pr_hiwat = 0; pp->pr_nidle = 0; + pp->pr_refcnt = 0; #ifdef POOL_DIAGNOSTIC if (flags & PR_LOGGING) { @@ -638,7 +802,9 @@ pool_init(struct pool *pp, size_t size, pp->pr_entered_file = NULL; pp->pr_entered_line = 0; - simple_lock_init(&pp->pr_slock); + mutex_init(&pp->pr_lock, MUTEX_DEFAULT, ipl); + cv_init(&pp->pr_cv, wchan); + pp->pr_ipl = ipl; /* * Initialize private page header pool and cache magazine pool if we @@ -657,31 +823,36 @@ pool_init(struct pool *pp, size_t size, "phpool-%d", nelem); sz = sizeof(struct pool_item_header); if (nelem) { - sz = PR_FREELIST_ALIGN(sz) - + nelem * sizeof(pool_item_freelist_t); + sz = offsetof(struct pool_item_header, + ph_bitmap[howmany(nelem, BITMAP_SIZE)]); } pool_init(&phpool[idx], sz, 0, 0, 0, - phpool_names[idx], &pool_allocator_meta); + phpool_names[idx], &pool_allocator_meta, IPL_VM); } #ifdef POOL_SUBPAGE pool_init(&psppool, POOL_SUBPAGE, POOL_SUBPAGE, 0, - PR_RECURSIVE, "psppool", &pool_allocator_meta); + PR_RECURSIVE, "psppool", &pool_allocator_meta, IPL_VM); #endif - pool_init(&pcgpool, sizeof(struct pool_cache_group), 0, 0, - 0, "pcgpool", &pool_allocator_meta); + pool_init(&pcgpool, sizeof(pcg_t), CACHE_LINE_SIZE, 0, 0, + "cachegrp", &pool_allocator_meta, IPL_VM); } - /* Insert into the list of all pools. */ - simple_lock(&pool_head_slock); - LIST_INSERT_HEAD(&pool_head, pp, pr_poollist); - simple_unlock(&pool_head_slock); + if (__predict_true(!cold)) { + /* Insert into the list of all pools. */ + mutex_enter(&pool_head_lock); + LIST_INSERT_HEAD(&pool_head, pp, pr_poollist); + mutex_exit(&pool_head_lock); + + /* Insert this into the list of pools using this allocator. */ + mutex_enter(&palloc->pa_lock); + TAILQ_INSERT_TAIL(&palloc->pa_list, pp, pr_alloc_list); + mutex_exit(&palloc->pa_lock); + } else { + LIST_INSERT_HEAD(&pool_head, pp, pr_poollist); + TAILQ_INSERT_TAIL(&palloc->pa_list, pp, pr_alloc_list); + } - /* Insert this into the list of pools using this allocator. */ - s = splvm(); - simple_lock(&palloc->pa_slock); - TAILQ_INSERT_TAIL(&palloc->pa_list, pp, pr_alloc_list); - simple_unlock(&palloc->pa_slock); - splx(s); + pool_reclaim_register(pp); } /* @@ -692,26 +863,25 @@ pool_destroy(struct pool *pp) { struct pool_pagelist pq; struct pool_item_header *ph; - int s; /* Remove from global pool list */ - simple_lock(&pool_head_slock); + mutex_enter(&pool_head_lock); + while (pp->pr_refcnt != 0) + cv_wait(&pool_busy, &pool_head_lock); LIST_REMOVE(pp, pr_poollist); if (drainpp == pp) drainpp = NULL; - simple_unlock(&pool_head_slock); + mutex_exit(&pool_head_lock); /* Remove this pool from its allocator's list of pools. */ - s = splvm(); - simple_lock(&pp->pr_alloc->pa_slock); + pool_reclaim_unregister(pp); + mutex_enter(&pp->pr_alloc->pa_lock); TAILQ_REMOVE(&pp->pr_alloc->pa_list, pp, pr_alloc_list); - simple_unlock(&pp->pr_alloc->pa_slock); - splx(s); + mutex_exit(&pp->pr_alloc->pa_lock); - s = splvm(); - simple_lock(&pp->pr_slock); + mutex_enter(&pp->pr_lock); - KASSERT(LIST_EMPTY(&pp->pr_cachelist)); + KASSERT(pp->pr_cache == NULL); #ifdef DIAGNOSTIC if (pp->pr_nout != 0) { @@ -729,8 +899,7 @@ pool_destroy(struct pool *pp) while ((ph = LIST_FIRST(&pp->pr_emptypages)) != NULL) pr_rmpage(pp, ph, &pq); - simple_unlock(&pp->pr_slock); - splx(s); + mutex_exit(&pp->pr_lock); pr_pagelist_free(pp, &pq); @@ -738,6 +907,9 @@ pool_destroy(struct pool *pp) if ((pp->pr_roflags & PR_LOGGING) != 0) free(pp->pr_log, M_TEMP); #endif + + cv_destroy(&pp->pr_cv); + mutex_destroy(&pp->pr_lock); } void @@ -754,26 +926,20 @@ pool_set_drain_hook(struct pool *pp, voi } static struct pool_item_header * -pool_alloc_item_header(struct pool *pp, caddr_t storage, int flags) +pool_alloc_item_header(struct pool *pp, void *storage, int flags) { struct pool_item_header *ph; - int s; - - LOCK_ASSERT(simple_lock_held(&pp->pr_slock) == 0); if ((pp->pr_roflags & PR_PHINPAGE) != 0) - ph = (struct pool_item_header *) (storage + pp->pr_phoffset); - else { - s = splvm(); + ph = (struct pool_item_header *) ((char *)storage + pp->pr_phoffset); + else ph = pool_get(pp->pr_phpool, flags); - splx(s); - } return (ph); } /* - * Grab an item from the pool; must be called at appropriate spl level + * Grab an item from the pool. */ void * #ifdef POOL_DIAGNOSTIC @@ -797,11 +963,10 @@ pool_get(struct pool *pp, int flags) #endif /* DIAGNOSTIC */ #ifdef LOCKDEBUG if (flags & PR_WAITOK) - simple_lock_only_held(NULL, "pool_get(PR_WAITOK)"); - SCHED_ASSERT_UNLOCKED(); + ASSERT_SLEEPABLE(NULL, "pool_get(PR_WAITOK)"); #endif - simple_lock(&pp->pr_slock); + mutex_enter(&pp->pr_lock); pr_enter(pp, file, line); startover: @@ -813,7 +978,7 @@ pool_get(struct pool *pp, int flags) #ifdef DIAGNOSTIC if (__predict_false(pp->pr_nout > pp->pr_hardlimit)) { pr_leave(pp); - simple_unlock(&pp->pr_slock); + mutex_exit(&pp->pr_lock); panic("pool_get: %s: crossed hard limit", pp->pr_wchan); } #endif @@ -825,9 +990,9 @@ pool_get(struct pool *pp, int flags) * and check the hardlimit condition again. */ pr_leave(pp); - simple_unlock(&pp->pr_slock); + mutex_exit(&pp->pr_lock); (*pp->pr_drain_hook)(pp->pr_drain_hook_arg, flags); - simple_lock(&pp->pr_slock); + mutex_enter(&pp->pr_lock); pr_enter(pp, file, line); if (pp->pr_nout < pp->pr_hardlimit) goto startover; @@ -840,7 +1005,7 @@ pool_get(struct pool *pp, int flags) */ pp->pr_flags |= PR_WANTED; pr_leave(pp); - ltsleep(pp, PSWP, pp->pr_wchan, 0, &pp->pr_slock); + cv_wait(&pp->pr_cv, &pp->pr_lock); pr_enter(pp, file, line); goto startover; } @@ -856,7 +1021,7 @@ pool_get(struct pool *pp, int flags) pp->pr_nfail++; pr_leave(pp); - simple_unlock(&pp->pr_slock); + mutex_exit(&pp->pr_lock); return (NULL); } @@ -867,9 +1032,11 @@ pool_get(struct pool *pp, int flags) * has no items in its bucket. */ if ((ph = pp->pr_curpage) == NULL) { + int error; + #ifdef DIAGNOSTIC if (pp->pr_nitems != 0) { - simple_unlock(&pp->pr_slock); + mutex_exit(&pp->pr_lock); printf("pool_get: %s: curpage NULL, nitems %u\n", pp->pr_wchan, pp->pr_nitems); panic("pool_get: nitems inconsistent"); @@ -882,18 +1049,9 @@ pool_get(struct pool *pp, int flags) * may block. */ pr_leave(pp); - simple_unlock(&pp->pr_slock); - v = pool_allocator_alloc(pp, flags); - if (__predict_true(v != NULL)) - ph = pool_alloc_item_header(pp, v, flags); - - if (__predict_false(v == NULL || ph == NULL)) { - if (v != NULL) - pool_allocator_free(pp, v); - - simple_lock(&pp->pr_slock); - pr_enter(pp, file, line); - + error = pool_grow(pp, flags); + pr_enter(pp, file, line); + if (error != 0) { /* * We were unable to allocate a page or item * header, but we released the lock during @@ -903,33 +1061,12 @@ pool_get(struct pool *pp, int flags) if (pp->pr_curpage != NULL) goto startover; - if ((flags & PR_WAITOK) == 0) { - pp->pr_nfail++; - pr_leave(pp); - simple_unlock(&pp->pr_slock); - return (NULL); - } - - /* - * Wait for items to be returned to this pool. - * - * wake up once a second and try again, - * as the check in pool_cache_put_paddr() is racy. - */ - pp->pr_flags |= PR_WANTED; - /* PA_WANTED is already set on the allocator. */ + pp->pr_nfail++; pr_leave(pp); - ltsleep(pp, PSWP, pp->pr_wchan, hz, &pp->pr_slock); - pr_enter(pp, file, line); - goto startover; + mutex_exit(&pp->pr_lock); + return (NULL); } - /* We have more memory; add it to the pool */ - simple_lock(&pp->pr_slock); - pr_enter(pp, file, line); - pool_prime_page(pp, v, ph); - pp->pr_npagealloc++; - /* Start the allocation process over. */ goto startover; } @@ -937,7 +1074,7 @@ pool_get(struct pool *pp, int flags) #ifdef DIAGNOSTIC if (__predict_false(ph->ph_nmissing == pp->pr_itemsperpage)) { pr_leave(pp); - simple_unlock(&pp->pr_slock); + mutex_exit(&pp->pr_lock); panic("pool_get: %s: page empty", pp->pr_wchan); } #endif @@ -949,13 +1086,13 @@ pool_get(struct pool *pp, int flags) v = pi = LIST_FIRST(&ph->ph_itemlist); if (__predict_false(v == NULL)) { pr_leave(pp); - simple_unlock(&pp->pr_slock); + mutex_exit(&pp->pr_lock); panic("pool_get: %s: page empty", pp->pr_wchan); } #ifdef DIAGNOSTIC if (__predict_false(pp->pr_nitems == 0)) { pr_leave(pp); - simple_unlock(&pp->pr_slock); + mutex_exit(&pp->pr_lock); printf("pool_get: %s: items on itemlist, nitems %u\n", pp->pr_wchan, pp->pr_nitems); panic("pool_get: nitems inconsistent"); @@ -1002,7 +1139,7 @@ pool_get(struct pool *pp, int flags) if (__predict_false((pp->pr_roflags & PR_NOTOUCH) == 0 && !LIST_EMPTY(&ph->ph_itemlist))) { pr_leave(pp); - simple_unlock(&pp->pr_slock); + mutex_exit(&pp->pr_lock); panic("pool_get: %s: nmissing inconsistent", pp->pr_wchan); } @@ -1031,7 +1168,9 @@ pool_get(struct pool *pp, int flags) */ } - simple_unlock(&pp->pr_slock); + mutex_exit(&pp->pr_lock); + KASSERT((((vaddr_t)v + pp->pr_itemoffset) & (pp->pr_align - 1)) == 0); + FREECHECK_OUT(&pp->pr_freecheck, v); return (v); } @@ -1043,13 +1182,10 @@ pool_do_put(struct pool *pp, void *v, st { struct pool_item *pi = v; struct pool_item_header *ph; - caddr_t page; - int s; - - LOCK_ASSERT(simple_lock_held(&pp->pr_slock)); - SCHED_ASSERT_UNLOCKED(); - page = (caddr_t)((u_long)v & pp->pr_alloc->pa_pagemask); + KASSERT(mutex_owned(&pp->pr_lock)); + FREECHECK_IN(&pp->pr_freecheck, v); + LOCKDEBUG_MEM_CHECK(v, pp->pr_size); #ifdef DIAGNOSTIC if (__predict_false(pp->pr_nout == 0)) { @@ -1059,18 +1195,11 @@ pool_do_put(struct pool *pp, void *v, st } #endif - if (__predict_false((ph = pr_find_pagehead(pp, page)) == NULL)) { + if (__predict_false((ph = pr_find_pagehead(pp, v)) == NULL)) { pr_printlog(pp, NULL, printf); panic("pool_put: %s: page header missing", pp->pr_wchan); } -#ifdef LOCKDEBUG - /* - * Check if we're freeing a locked simple lock. - */ - simple_lock_freecheck((caddr_t)pi, ((caddr_t)pi) + pp->pr_size); -#endif - /* * Return to item list. */ @@ -1106,7 +1235,7 @@ pool_do_put(struct pool *pp, void *v, st pp->pr_flags &= ~PR_WANTED; if (ph->ph_nmissing == 0) pp->pr_nidle++; - wakeup((caddr_t)pp); + cv_broadcast(&pp->pr_cv); return; } @@ -1127,7 +1256,7 @@ pool_do_put(struct pool *pp, void *v, st pp->pr_nidle++; if (pp->pr_npages > pp->pr_minpages && (pp->pr_npages > pp->pr_maxpages || - (pp->pr_alloc->pa_flags & PA_WANT) != 0)) { + pa_starved_p(pp->pr_alloc))) { pr_rmpage(pp, ph, pq); } else { LIST_REMOVE(ph, ph_pagelist); @@ -1139,9 +1268,7 @@ pool_do_put(struct pool *pp, void *v, st * be reclaimed by the pagedaemon. This minimizes * ping-pong'ing for memory. */ - s = splclock(); - ph->ph_time = mono_time; - splx(s); + getmicrotime(&ph->ph_time); } pool_update_curpage(pp); } @@ -1160,7 +1287,7 @@ pool_do_put(struct pool *pp, void *v, st } /* - * Return resource to the pool; must be called at appropriate spl level + * Return resource to the pool. */ #ifdef POOL_DIAGNOSTIC void @@ -1170,7 +1297,7 @@ _pool_put(struct pool *pp, void *v, cons LIST_INIT(&pq); - simple_lock(&pp->pr_slock); + mutex_enter(&pp->pr_lock); pr_enter(pp, file, line); pr_log(pp, v, PRLOG_PUT, file, line); @@ -1178,7 +1305,7 @@ _pool_put(struct pool *pp, void *v, cons pool_do_put(pp, v, &pq); pr_leave(pp); - simple_unlock(&pp->pr_slock); + mutex_exit(&pp->pr_lock); pr_pagelist_free(pp, &pq); } @@ -1192,9 +1319,9 @@ pool_put(struct pool *pp, void *v) LIST_INIT(&pq); - simple_lock(&pp->pr_slock); + mutex_enter(&pp->pr_lock); pool_do_put(pp, v, &pq); - simple_unlock(&pp->pr_slock); + mutex_exit(&pp->pr_lock); pr_pagelist_free(pp, &pq); } @@ -1204,43 +1331,64 @@ pool_put(struct pool *pp, void *v) #endif /* + * pool_grow: grow a pool by a page. + * + * => called with pool locked. + * => unlock and relock the pool. + * => return with pool locked. + */ + +static int +pool_grow(struct pool *pp, int flags) +{ + struct pool_item_header *ph = NULL; + char *cp; + + mutex_exit(&pp->pr_lock); + cp = pool_allocator_alloc(pp, flags); + if (__predict_true(cp != NULL)) { + ph = pool_alloc_item_header(pp, cp, flags); + } + if (__predict_false(cp == NULL || ph == NULL)) { + if (cp != NULL) { + pool_allocator_free(pp, cp); + } + mutex_enter(&pp->pr_lock); + return ENOMEM; + } + + mutex_enter(&pp->pr_lock); + pool_prime_page(pp, cp, ph); + pp->pr_npagealloc++; + return 0; +} + +/* * Add N items to the pool. */ int pool_prime(struct pool *pp, int n) { - struct pool_item_header *ph = NULL; - caddr_t cp; int newpages; + int error = 0; - simple_lock(&pp->pr_slock); + mutex_enter(&pp->pr_lock); newpages = roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage; while (newpages-- > 0) { - simple_unlock(&pp->pr_slock); - cp = pool_allocator_alloc(pp, PR_NOWAIT); - if (__predict_true(cp != NULL)) - ph = pool_alloc_item_header(pp, cp, PR_NOWAIT); - - if (__predict_false(cp == NULL || ph == NULL)) { - if (cp != NULL) - pool_allocator_free(pp, cp); - simple_lock(&pp->pr_slock); + error = pool_grow(pp, PR_NOWAIT); + if (error) { break; } - - simple_lock(&pp->pr_slock); - pool_prime_page(pp, cp, ph); - pp->pr_npagealloc++; pp->pr_minpages++; } if (pp->pr_minpages >= pp->pr_maxpages) pp->pr_maxpages = pp->pr_minpages + 1; /* XXX */ - simple_unlock(&pp->pr_slock); - return (0); + mutex_exit(&pp->pr_lock); + return error; } /* @@ -1249,19 +1397,19 @@ pool_prime(struct pool *pp, int n) * Note, we must be called with the pool descriptor LOCKED. */ static void -pool_prime_page(struct pool *pp, caddr_t storage, struct pool_item_header *ph) +pool_prime_page(struct pool *pp, void *storage, struct pool_item_header *ph) { struct pool_item *pi; - caddr_t cp = storage; - unsigned int align = pp->pr_align; - unsigned int ioff = pp->pr_itemoffset; + void *cp = storage; + const unsigned int align = pp->pr_align; + const unsigned int ioff = pp->pr_itemoffset; int n; - int s; - LOCK_ASSERT(simple_lock_held(&pp->pr_slock)); + KASSERT(mutex_owned(&pp->pr_lock)); #ifdef DIAGNOSTIC - if (((u_long)cp & (pp->pr_alloc->pa_pagesz - 1)) != 0) + if ((pp->pr_roflags & PR_NOALIGN) == 0 && + ((uintptr_t)cp & (pp->pr_alloc->pa_pagesz - 1)) != 0) panic("pool_prime_page: %s: unaligned page", pp->pr_wchan); #endif @@ -1272,9 +1420,7 @@ pool_prime_page(struct pool *pp, caddr_t LIST_INIT(&ph->ph_itemlist); ph->ph_page = storage; ph->ph_nmissing = 0; - s = splclock(); - ph->ph_time = mono_time; - splx(s); + getmicrotime(&ph->ph_time); if ((pp->pr_roflags & PR_PHINPAGE) == 0) SPLAY_INSERT(phtree, &pp->pr_phtree, ph); @@ -1283,7 +1429,7 @@ pool_prime_page(struct pool *pp, caddr_t /* * Color this page. */ - cp = (caddr_t)(cp + pp->pr_curcolor); + cp = (char *)cp + pp->pr_curcolor; if ((pp->pr_curcolor += align) > pp->pr_maxcolor) pp->pr_curcolor = 0; @@ -1291,7 +1437,9 @@ pool_prime_page(struct pool *pp, caddr_t * Adjust storage to apply aligment to `pr_itemoffset' in each item. */ if (ioff != 0) - cp = (caddr_t)(cp + (align - ioff)); + cp = (char *)cp + align - ioff; + + KASSERT((((vaddr_t)cp + ioff) & (align - 1)) == 0); /* * Insert remaining chunks on the bucket list. @@ -1300,14 +1448,7 @@ pool_prime_page(struct pool *pp, caddr_t pp->pr_nitems += n; if (pp->pr_roflags & PR_NOTOUCH) { - pool_item_freelist_t *freelist = PR_FREELIST(ph); - int i; - - ph->ph_off = cp - storage; - ph->ph_firstfree = 0; - for (i = 0; i < n - 1; i++) - freelist[i] = i + 1; - freelist[n - 1] = PR_INDEX_EOL; + pr_item_notouch_init(pp, ph); } else { while (n--) { pi = (struct pool_item *)cp; @@ -1319,7 +1460,9 @@ pool_prime_page(struct pool *pp, caddr_t #ifdef DIAGNOSTIC pi->pi_magic = PI_MAGIC; #endif - cp = (caddr_t)(cp + pp->pr_size); + cp = (char *)cp + pp->pr_size; + + KASSERT((((vaddr_t)cp + ioff) & (align - 1)) == 0); } } @@ -1345,34 +1488,15 @@ pool_prime_page(struct pool *pp, caddr_t static int pool_catchup(struct pool *pp) { - struct pool_item_header *ph = NULL; - caddr_t cp; int error = 0; while (POOL_NEEDS_CATCHUP(pp)) { - /* - * Call the page back-end allocator for more memory. - * - * XXX: We never wait, so should we bother unlocking - * the pool descriptor? - */ - simple_unlock(&pp->pr_slock); - cp = pool_allocator_alloc(pp, PR_NOWAIT); - if (__predict_true(cp != NULL)) - ph = pool_alloc_item_header(pp, cp, PR_NOWAIT); - if (__predict_false(cp == NULL || ph == NULL)) { - if (cp != NULL) - pool_allocator_free(pp, cp); - error = ENOMEM; - simple_lock(&pp->pr_slock); + error = pool_grow(pp, PR_NOWAIT); + if (error) { break; } - simple_lock(&pp->pr_slock); - pool_prime_page(pp, cp, ph); - pp->pr_npagealloc++; } - - return (error); + return error; } static void @@ -1389,7 +1513,7 @@ void pool_setlowat(struct pool *pp, int n) { - simple_lock(&pp->pr_slock); + mutex_enter(&pp->pr_lock); pp->pr_minitems = n; pp->pr_minpages = (n == 0) @@ -1405,27 +1529,27 @@ pool_setlowat(struct pool *pp, int n) */ } - simple_unlock(&pp->pr_slock); + mutex_exit(&pp->pr_lock); } void pool_sethiwat(struct pool *pp, int n) { - simple_lock(&pp->pr_slock); + mutex_enter(&pp->pr_lock); pp->pr_maxpages = (n == 0) ? 0 : roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage; - simple_unlock(&pp->pr_slock); + mutex_exit(&pp->pr_lock); } void pool_sethardlimit(struct pool *pp, int n, const char *warnmess, int ratecap) { - simple_lock(&pp->pr_slock); + mutex_enter(&pp->pr_lock); pp->pr_hardlimit = n; pp->pr_hardlimit_warning = warnmess; @@ -1441,7 +1565,7 @@ pool_sethardlimit(struct pool *pp, int n ? 0 : roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage; - simple_unlock(&pp->pr_slock); + mutex_exit(&pp->pr_lock); } /* @@ -1455,11 +1579,10 @@ pool_reclaim(struct pool *pp) #endif { struct pool_item_header *ph, *phnext; - struct pool_cache *pc; struct pool_pagelist pq; - struct pool_cache_grouplist pcgl; struct timeval curtime, diff; - int s; + bool klock; + int rv; if (pp->pr_drain_hook != NULL) { /* @@ -1468,22 +1591,33 @@ pool_reclaim(struct pool *pp) (*pp->pr_drain_hook)(pp->pr_drain_hook_arg, PR_NOWAIT); } - if (simple_lock_try(&pp->pr_slock) == 0) + /* + * XXXSMP Because mutexes at IPL_SOFTXXX are still spinlocks, + * and we are called from the pagedaemon without kernel_lock. + * Does not apply to IPL_SOFTBIO. + */ + if (pp->pr_ipl == IPL_SOFTNET || pp->pr_ipl == IPL_SOFTCLOCK || + pp->pr_ipl == IPL_SOFTSERIAL) { + KERNEL_LOCK(1, NULL); + klock = true; + } else + klock = false; + + /* Reclaim items from the pool's cache (if any). */ + if (pp->pr_cache != NULL) + pool_cache_invalidate(pp->pr_cache); + + if (mutex_tryenter(&pp->pr_lock) == 0) { + if (klock) { + KERNEL_UNLOCK_ONE(NULL); + } return (0); + } pr_enter(pp, file, line); LIST_INIT(&pq); - LIST_INIT(&pcgl); - - /* - * Reclaim items from the pool's caches. - */ - LIST_FOREACH(pc, &pp->pr_cachelist, pc_poollist) - pool_cache_reclaim(pc, &pq, &pcgl); - s = splclock(); - curtime = mono_time; - splx(s); + getmicrotime(&curtime); for (ph = LIST_FIRST(&pp->pr_emptypages); ph != NULL; ph = phnext) { phnext = LIST_NEXT(ph, ph_pagelist); @@ -1494,7 +1628,8 @@ pool_reclaim(struct pool *pp) KASSERT(ph->ph_nmissing == 0); timersub(&curtime, &ph->ph_time, &diff); - if (diff.tv_sec < pool_inactive_time) + if (diff.tv_sec < pool_inactive_time + && !pa_starved_p(pp->pr_alloc)) continue; /* @@ -1509,39 +1644,87 @@ pool_reclaim(struct pool *pp) } pr_leave(pp); - simple_unlock(&pp->pr_slock); - if (LIST_EMPTY(&pq) && LIST_EMPTY(&pcgl)) - return 0; + mutex_exit(&pp->pr_lock); - pr_pagelist_free(pp, &pq); - pcg_grouplist_free(&pcgl); - return (1); + if (LIST_EMPTY(&pq)) + rv = 0; + else { + pr_pagelist_free(pp, &pq); + rv = 1; + } + + if (klock) { + KERNEL_UNLOCK_ONE(NULL); + } + + return (rv); } /* - * Drain pools, one at a time. + * Drain pools, one at a time. This is a two stage process; + * drain_start kicks off a cross call to drain CPU-level caches + * if the pool has an associated pool_cache. drain_end waits + * for those cross calls to finish, and then drains the cache + * (if any) and pool. * - * Note, we must never be called from an interrupt context. + * Note, must never be called from interrupt context. */ void -pool_drain(void *arg) +pool_drain_start(struct pool **ppp, uint64_t *wp) { struct pool *pp; - int s; + + KASSERT(!LIST_EMPTY(&pool_head)); pp = NULL; - s = splvm(); - simple_lock(&pool_head_slock); - if (drainpp == NULL) { - drainpp = LIST_FIRST(&pool_head); - } - if (drainpp) { - pp = drainpp; - drainpp = LIST_NEXT(pp, pr_poollist); + + /* Find next pool to drain, and add a reference. */ + mutex_enter(&pool_head_lock); + do { + if (drainpp == NULL) { + drainpp = LIST_FIRST(&pool_head); + } + if (drainpp != NULL) { + pp = drainpp; + drainpp = LIST_NEXT(pp, pr_poollist); + } + /* + * Skip completely idle pools. We depend on at least + * one pool in the system being active. + */ + } while (pp == NULL || pp->pr_npages == 0); + pp->pr_refcnt++; + mutex_exit(&pool_head_lock); + + /* If there is a pool_cache, drain CPU level caches. */ + *ppp = pp; + if (pp->pr_cache != NULL) { + *wp = xc_broadcast(0, (xcfunc_t)pool_cache_xcall, + pp->pr_cache, NULL); } - simple_unlock(&pool_head_slock); +} + +void +pool_drain_end(struct pool *pp, uint64_t where) +{ + + if (pp == NULL) + return; + + KASSERT(pp->pr_refcnt > 0); + + /* Wait for remote draining to complete. */ + if (pp->pr_cache != NULL) + xc_wait(where); + + /* Drain the cache (if any) and pool.. */ pool_reclaim(pp); - splx(s); + + /* Finally, unlock the pool. */ + mutex_enter(&pool_head_lock); + pp->pr_refcnt--; + cv_broadcast(&pool_busy); + mutex_exit(&pool_head_lock); } /* @@ -1550,18 +1733,8 @@ pool_drain(void *arg) void pool_print(struct pool *pp, const char *modif) { - int s; - s = splvm(); - if (simple_lock_try(&pp->pr_slock) == 0) { - printf("pool %s is locked; try again later\n", - pp->pr_wchan); - splx(s); - return; - } pool_print1(pp, modif, printf); - simple_unlock(&pp->pr_slock); - splx(s); } void @@ -1569,12 +1742,6 @@ pool_printall(const char *modif, void (* { struct pool *pp; - if (simple_lock_try(&pool_head_slock) == 0) { - (*pr)("WARNING: pool_head_slock is locked\n"); - } else { - simple_unlock(&pool_head_slock); - } - LIST_FOREACH(pp, &pool_head, pr_poollist) { pool_printit(pp, modif, pr); } @@ -1589,20 +1756,6 @@ pool_printit(struct pool *pp, const char return; } - /* - * Called from DDB; interrupts should be blocked, and all - * other processors should be paused. We can skip locking - * the pool in this case. - * - * We do a simple_lock_try() just to print the lock - * status, however. - */ - - if (simple_lock_try(&pp->pr_slock) == 0) - (*pr)("WARNING: pool %s is locked\n", pp->pr_wchan); - else - simple_unlock(&pp->pr_slock); - pool_print1(pp, modif, pr); } @@ -1637,8 +1790,10 @@ static void pool_print1(struct pool *pp, const char *modif, void (*pr)(const char *, ...)) { struct pool_item_header *ph; - struct pool_cache *pc; - struct pool_cache_group *pcg; + pool_cache_t pc; + pcg_t *pcg; + pool_cache_cpu_t *cc; + uint64_t cpuhit, cpumiss; int i, print_log = 0, print_pagelist = 0, print_cache = 0; char c; @@ -1651,7 +1806,13 @@ pool_print1(struct pool *pp, const char print_cache = 1; } - (*pr)("POOL %s: size %u, align %u, ioff %u, roflags 0x%08x\n", + if ((pc = pp->pr_cache) != NULL) { + (*pr)("POOL CACHE"); + } else { + (*pr)("POOL"); + } + + (*pr)(" %s: size %u, align %u, ioff %u, roflags 0x%08x\n", pp->pr_wchan, pp->pr_size, pp->pr_align, pp->pr_itemoffset, pp->pr_roflags); (*pr)("\talloc %p\n", pp->pr_alloc); @@ -1660,7 +1821,7 @@ pool_print1(struct pool *pp, const char (*pr)("\titemsperpage %u, nitems %u, nout %u, hardlimit %u\n", pp->pr_itemsperpage, pp->pr_nitems, pp->pr_nout, pp->pr_hardlimit); - (*pr)("\n\tnget %lu, nfail %lu, nput %lu\n", + (*pr)("\tnget %lu, nfail %lu, nput %lu\n", pp->pr_nget, pp->pr_nfail, pp->pr_nput); (*pr)("\tnpagealloc %lu, npagefree %lu, hiwat %u, nidle %lu\n", pp->pr_npagealloc, pp->pr_npagefree, pp->pr_hiwat, pp->pr_nidle); @@ -1690,12 +1851,11 @@ pool_print1(struct pool *pp, const char (*pr)("\n"); if ((pp->pr_roflags & PR_LOGGING) == 0) (*pr)("\tno log\n"); - else + else { pr_printlog(pp, NULL, pr); + } skip_log: - if (print_cache == 0) - goto skip_cache; #define PR_GROUPLIST(pcg) \ (*pr)("\t\tgroup %p: avail %d\n", pcg, pcg->pcg_avail); \ @@ -1712,26 +1872,38 @@ pool_print1(struct pool *pp, const char } \ } - LIST_FOREACH(pc, &pp->pr_cachelist, pc_poollist) { - (*pr)("\tcache %p\n", pc); - (*pr)("\t hits %lu misses %lu ngroups %lu nitems %lu\n", - pc->pc_hits, pc->pc_misses, pc->pc_ngroups, pc->pc_nitems); - (*pr)("\t full groups:\n"); - LIST_FOREACH(pcg, &pc->pc_fullgroups, pcg_list) { - PR_GROUPLIST(pcg); - } - (*pr)("\t partial groups:\n"); - LIST_FOREACH(pcg, &pc->pc_partgroups, pcg_list) { - PR_GROUPLIST(pcg); - } - (*pr)("\t empty groups:\n"); - LIST_FOREACH(pcg, &pc->pc_emptygroups, pcg_list) { - PR_GROUPLIST(pcg); + if (pc != NULL) { + cpuhit = 0; + cpumiss = 0; + for (i = 0; i < MAXCPUS; i++) { + if ((cc = pc->pc_cpus[i]) == NULL) + continue; + cpuhit += cc->cc_hits; + cpumiss += cc->cc_misses; + } + (*pr)("\tcpu layer hits %llu misses %llu\n", cpuhit, cpumiss); + (*pr)("\tcache layer hits %llu misses %llu\n", + pc->pc_hits, pc->pc_misses); + (*pr)("\tcache layer entry uncontended %llu contended %llu\n", + pc->pc_hits + pc->pc_misses - pc->pc_contended, + pc->pc_contended); + (*pr)("\tcache layer empty groups %u full groups %u\n", + pc->pc_nempty, pc->pc_nfull); + if (print_cache) { + (*pr)("\tfull cache groups:\n"); + for (pcg = pc->pc_fullgroups; pcg != NULL; + pcg = pcg->pcg_next) { + PR_GROUPLIST(pcg); + } + (*pr)("\tempty cache groups:\n"); + for (pcg = pc->pc_emptygroups; pcg != NULL; + pcg = pcg->pcg_next) { + PR_GROUPLIST(pcg); + } } } #undef PR_GROUPLIST - skip_cache: pr_enter_check(pp, pr); } @@ -1739,19 +1911,21 @@ static int pool_chk_page(struct pool *pp, const char *label, struct pool_item_header *ph) { struct pool_item *pi; - caddr_t page; + void *page; int n; - page = (caddr_t)((u_long)ph & pp->pr_alloc->pa_pagemask); - if (page != ph->ph_page && - (pp->pr_roflags & PR_PHINPAGE) != 0) { - if (label != NULL) - printf("%s: ", label); - printf("pool(%p:%s): page inconsistency: page %p;" - " at page head addr %p (p %p)\n", pp, - pp->pr_wchan, ph->ph_page, - ph, page); - return 1; + if ((pp->pr_roflags & PR_NOALIGN) == 0) { + page = (void *)((uintptr_t)ph & pp->pr_alloc->pa_pagemask); + if (page != ph->ph_page && + (pp->pr_roflags & PR_PHINPAGE) != 0) { + if (label != NULL) + printf("%s: ", label); + printf("pool(%p:%s): page inconsistency: page %p;" + " at page head addr %p (p %p)\n", pp, + pp->pr_wchan, ph->ph_page, + ph, page); + return 1; + } } if ((pp->pr_roflags & PR_NOTOUCH) != 0) @@ -1766,15 +1940,16 @@ pool_chk_page(struct pool *pp, const cha if (label != NULL) printf("%s: ", label); printf("pool(%s): free list modified: magic=%x;" - " page %p; item ordinal %d;" - " addr %p (p %p)\n", + " page %p; item ordinal %d; addr %p\n", pp->pr_wchan, pi->pi_magic, ph->ph_page, - n, pi, page); + n, pi); panic("pool"); } #endif - page = - (caddr_t)((u_long)pi & pp->pr_alloc->pa_pagemask); + if ((pp->pr_roflags & PR_NOALIGN) != 0) { + continue; + } + page = (void *)((uintptr_t)pi & pp->pr_alloc->pa_pagemask); if (page == ph->ph_page) continue; @@ -1796,7 +1971,7 @@ pool_chk(struct pool *pp, const char *la struct pool_item_header *ph; int r = 0; - simple_lock(&pp->pr_slock); + mutex_enter(&pp->pr_lock); LIST_FOREACH(ph, &pp->pr_emptypages, ph_pagelist) { r = pool_chk_page(pp, label, ph); if (r) { @@ -1817,7 +1992,7 @@ pool_chk(struct pool *pp, const char *la } out: - simple_unlock(&pp->pr_slock); + mutex_exit(&pp->pr_lock); return (r); } @@ -1825,38 +2000,93 @@ out: * pool_cache_init: * * Initialize a pool cache. + */ +pool_cache_t +pool_cache_init(size_t size, u_int align, u_int align_offset, u_int flags, + const char *wchan, struct pool_allocator *palloc, int ipl, + int (*ctor)(void *, void *, int), void (*dtor)(void *, void *), void *arg) +{ + pool_cache_t pc; + + pc = pool_get(&cache_pool, PR_WAITOK); + if (pc == NULL) + return NULL; + + pool_cache_bootstrap(pc, size, align, align_offset, flags, wchan, + palloc, ipl, ctor, dtor, arg); + + return pc; +} + +/* + * pool_cache_bootstrap: * - * NOTE: If the pool must be protected from interrupts, we expect - * to be called at the appropriate interrupt priority level. + * Kernel-private version of pool_cache_init(). The caller + * provides initial storage. */ void -pool_cache_init(struct pool_cache *pc, struct pool *pp, - int (*ctor)(void *, void *, int), - void (*dtor)(void *, void *), +pool_cache_bootstrap(pool_cache_t pc, size_t size, u_int align, + u_int align_offset, u_int flags, const char *wchan, + struct pool_allocator *palloc, int ipl, + int (*ctor)(void *, void *, int), void (*dtor)(void *, void *), void *arg) { + CPU_INFO_ITERATOR cii; + struct cpu_info *ci; + struct pool *pp; - LIST_INIT(&pc->pc_emptygroups); - LIST_INIT(&pc->pc_fullgroups); - LIST_INIT(&pc->pc_partgroups); - simple_lock_init(&pc->pc_slock); + pp = &pc->pc_pool; + if (palloc == NULL && ipl == IPL_NONE) + palloc = &pool_allocator_nointr; + pool_init(pp, size, align, align_offset, flags, wchan, palloc, ipl); - pc->pc_pool = pp; + mutex_init(&pc->pc_lock, MUTEX_DEFAULT, pp->pr_ipl); + if (ctor == NULL) { + ctor = (int (*)(void *, void *, int))nullop; + } + if (dtor == NULL) { + dtor = (void (*)(void *, void *))nullop; + } + + pc->pc_emptygroups = NULL; + pc->pc_fullgroups = NULL; + pc->pc_partgroups = NULL; pc->pc_ctor = ctor; pc->pc_dtor = dtor; pc->pc_arg = arg; - - pc->pc_hits = 0; + pc->pc_hits = 0; pc->pc_misses = 0; - - pc->pc_ngroups = 0; - - pc->pc_nitems = 0; - - simple_lock(&pp->pr_slock); - LIST_INSERT_HEAD(&pp->pr_cachelist, pc, pc_poollist); - simple_unlock(&pp->pr_slock); + pc->pc_nempty = 0; + pc->pc_npart = 0; + pc->pc_nfull = 0; + pc->pc_contended = 0; + pc->pc_refcnt = 0; + pc->pc_freecheck = NULL; + + /* Allocate per-CPU caches. */ + memset(pc->pc_cpus, 0, sizeof(pc->pc_cpus)); + pc->pc_ncpu = 0; + if (ncpu == 0) { + /* XXX For sparc: boot CPU is not attached yet. */ + pool_cache_cpu_init1(curcpu(), pc); + } else { + for (CPU_INFO_FOREACH(cii, ci)) { + pool_cache_cpu_init1(ci, pc); + } + } + + if (__predict_true(!cold)) { + mutex_enter(&pp->pr_lock); + pp->pr_cache = pc; + mutex_exit(&pp->pr_lock); + mutex_enter(&pool_head_lock); + LIST_INSERT_HEAD(&pool_cache_head, pc, pc_cachelist); + mutex_exit(&pool_head_lock); + } else { + pp->pr_cache = pc; + LIST_INSERT_HEAD(&pool_cache_head, pc, pc_cachelist); + } } /* @@ -1865,297 +2095,618 @@ pool_cache_init(struct pool_cache *pc, s * Destroy a pool cache. */ void -pool_cache_destroy(struct pool_cache *pc) +pool_cache_destroy(pool_cache_t pc) { - struct pool *pp = pc->pc_pool; + struct pool *pp = &pc->pc_pool; + pool_cache_cpu_t *cc; + pcg_t *pcg; + int i; + + /* Remove it from the global list. */ + mutex_enter(&pool_head_lock); + while (pc->pc_refcnt != 0) + cv_wait(&pool_busy, &pool_head_lock); + LIST_REMOVE(pc, pc_cachelist); + mutex_exit(&pool_head_lock); /* First, invalidate the entire cache. */ pool_cache_invalidate(pc); - /* ...and remove it from the pool's cache list. */ - simple_lock(&pp->pr_slock); - LIST_REMOVE(pc, pc_poollist); - simple_unlock(&pp->pr_slock); + /* Disassociate it from the pool. */ + mutex_enter(&pp->pr_lock); + pp->pr_cache = NULL; + mutex_exit(&pp->pr_lock); + + /* Destroy per-CPU data */ + for (i = 0; i < MAXCPUS; i++) { + if ((cc = pc->pc_cpus[i]) == NULL) + continue; + if ((pcg = cc->cc_current) != NULL) { + pcg->pcg_next = NULL; + pool_cache_invalidate_groups(pc, pcg); + } + if ((pcg = cc->cc_previous) != NULL) { + pcg->pcg_next = NULL; + pool_cache_invalidate_groups(pc, pcg); + } + if (cc != &pc->pc_cpu0) + pool_put(&cache_cpu_pool, cc); + } + + /* Finally, destroy it. */ + mutex_destroy(&pc->pc_lock); + pool_destroy(pp); + pool_put(&cache_pool, pc); } -static inline void * -pcg_get(struct pool_cache_group *pcg, paddr_t *pap) -{ - void *object; - u_int idx; +/* + * pool_cache_cpu_init1: + * + * Called for each pool_cache whenever a new CPU is attached. + */ +static void +pool_cache_cpu_init1(struct cpu_info *ci, pool_cache_t pc) +{ + pool_cache_cpu_t *cc; + int index; - KASSERT(pcg->pcg_avail <= PCG_NOBJECTS); - KASSERT(pcg->pcg_avail != 0); - idx = --pcg->pcg_avail; - - KASSERT(pcg->pcg_objects[idx].pcgo_va != NULL); - object = pcg->pcg_objects[idx].pcgo_va; - if (pap != NULL) - *pap = pcg->pcg_objects[idx].pcgo_pa; - pcg->pcg_objects[idx].pcgo_va = NULL; + index = ci->ci_index; + + KASSERT(index < MAXCPUS); + KASSERT(((uintptr_t)pc->pc_cpus & (CACHE_LINE_SIZE - 1)) == 0); + + if ((cc = pc->pc_cpus[index]) != NULL) { + KASSERT(cc->cc_cpuindex == index); + return; + } - return (object); + /* + * The first CPU is 'free'. This needs to be the case for + * bootstrap - we may not be able to allocate yet. + */ + if (pc->pc_ncpu == 0) { + cc = &pc->pc_cpu0; + pc->pc_ncpu = 1; + } else { + mutex_enter(&pc->pc_lock); + pc->pc_ncpu++; + mutex_exit(&pc->pc_lock); + cc = pool_get(&cache_cpu_pool, PR_WAITOK); + } + + cc->cc_ipl = pc->pc_pool.pr_ipl; + cc->cc_iplcookie = makeiplcookie(cc->cc_ipl); + cc->cc_cache = pc; + cc->cc_cpuindex = index; + cc->cc_hits = 0; + cc->cc_misses = 0; + cc->cc_current = NULL; + cc->cc_previous = NULL; + + pc->pc_cpus[index] = cc; } -static inline void -pcg_put(struct pool_cache_group *pcg, void *object, paddr_t pa) +/* + * pool_cache_cpu_init: + * + * Called whenever a new CPU is attached. + */ +void +pool_cache_cpu_init(struct cpu_info *ci) { - u_int idx; + pool_cache_t pc; + + mutex_enter(&pool_head_lock); + LIST_FOREACH(pc, &pool_cache_head, pc_cachelist) { + pc->pc_refcnt++; + mutex_exit(&pool_head_lock); + + pool_cache_cpu_init1(ci, pc); + + mutex_enter(&pool_head_lock); + pc->pc_refcnt--; + cv_broadcast(&pool_busy); + } + mutex_exit(&pool_head_lock); +} - KASSERT(pcg->pcg_avail < PCG_NOBJECTS); - idx = pcg->pcg_avail++; +/* + * pool_cache_reclaim: + * + * Reclaim memory from a pool cache. + */ +bool +pool_cache_reclaim(pool_cache_t pc) +{ - KASSERT(pcg->pcg_objects[idx].pcgo_va == NULL); - pcg->pcg_objects[idx].pcgo_va = object; - pcg->pcg_objects[idx].pcgo_pa = pa; + return pool_reclaim(&pc->pc_pool); } static void -pcg_grouplist_free(struct pool_cache_grouplist *pcgl) +pool_cache_destruct_object1(pool_cache_t pc, void *object) { - struct pool_cache_group *pcg; - int s; - s = splvm(); - while ((pcg = LIST_FIRST(pcgl)) != NULL) { - LIST_REMOVE(pcg, pcg_list); - pool_put(&pcgpool, pcg); - } - splx(s); + (*pc->pc_dtor)(pc->pc_arg, object); + pool_put(&pc->pc_pool, object); } /* - * pool_cache_get{,_paddr}: + * pool_cache_destruct_object: * - * Get an object from a pool cache (optionally returning - * the physical address of the object). + * Force destruction of an object and its release back into + * the pool. */ -void * -pool_cache_get_paddr(struct pool_cache *pc, int flags, paddr_t *pap) +void +pool_cache_destruct_object(pool_cache_t pc, void *object) { - struct pool_cache_group *pcg; - void *object; -#ifdef LOCKDEBUG - if (flags & PR_WAITOK) - simple_lock_only_held(NULL, "pool_cache_get(PR_WAITOK)"); -#endif + FREECHECK_IN(&pc->pc_freecheck, object); - simple_lock(&pc->pc_slock); + pool_cache_destruct_object1(pc, object); +} - pcg = LIST_FIRST(&pc->pc_partgroups); - if (pcg == NULL) { - pcg = LIST_FIRST(&pc->pc_fullgroups); - if (pcg != NULL) { - LIST_REMOVE(pcg, pcg_list); - LIST_INSERT_HEAD(&pc->pc_partgroups, pcg, pcg_list); - } - } - if (pcg == NULL) { +/* + * pool_cache_invalidate_groups: + * + * Invalidate a chain of groups and destruct all objects. + */ +static void +pool_cache_invalidate_groups(pool_cache_t pc, pcg_t *pcg) +{ + void *object; + pcg_t *next; + int i; - /* - * No groups with any available objects. Allocate - * a new object, construct it, and return it to - * the caller. We will allocate a group, if necessary, - * when the object is freed back to the cache. - */ - pc->pc_misses++; - simple_unlock(&pc->pc_slock); - object = pool_get(pc->pc_pool, flags); - if (object != NULL && pc->pc_ctor != NULL) { - if ((*pc->pc_ctor)(pc->pc_arg, object, flags) != 0) { - pool_put(pc->pc_pool, object); - return (NULL); - } - } - if (object != NULL && pap != NULL) { -#ifdef POOL_VTOPHYS - *pap = POOL_VTOPHYS(object); -#else - *pap = POOL_PADDR_INVALID; -#endif - } - return (object); - } + for (; pcg != NULL; pcg = next) { + next = pcg->pcg_next; - pc->pc_hits++; - pc->pc_nitems--; - object = pcg_get(pcg, pap); + for (i = 0; i < pcg->pcg_avail; i++) { + object = pcg->pcg_objects[i].pcgo_va; + pool_cache_destruct_object1(pc, object); + } - if (pcg->pcg_avail == 0) { - LIST_REMOVE(pcg, pcg_list); - LIST_INSERT_HEAD(&pc->pc_emptygroups, pcg, pcg_list); + pool_put(&pcgpool, pcg); } - simple_unlock(&pc->pc_slock); - - return (object); } /* - * pool_cache_put{,_paddr}: + * pool_cache_invalidate: * - * Put an object back to the pool cache (optionally caching the - * physical address of the object). + * Invalidate a pool cache (destruct and release all of the + * cached objects). Does not reclaim objects from the pool. */ void -pool_cache_put_paddr(struct pool_cache *pc, void *object, paddr_t pa) +pool_cache_invalidate(pool_cache_t pc) { - struct pool_cache_group *pcg; - int s; + pcg_t *full, *empty, *part; + + mutex_enter(&pc->pc_lock); + full = pc->pc_fullgroups; + empty = pc->pc_emptygroups; + part = pc->pc_partgroups; + pc->pc_fullgroups = NULL; + pc->pc_emptygroups = NULL; + pc->pc_partgroups = NULL; + pc->pc_nfull = 0; + pc->pc_nempty = 0; + pc->pc_npart = 0; + mutex_exit(&pc->pc_lock); + + pool_cache_invalidate_groups(pc, full); + pool_cache_invalidate_groups(pc, empty); + pool_cache_invalidate_groups(pc, part); +} + +void +pool_cache_set_drain_hook(pool_cache_t pc, void (*fn)(void *, int), void *arg) +{ + + pool_set_drain_hook(&pc->pc_pool, fn, arg); +} + +void +pool_cache_setlowat(pool_cache_t pc, int n) +{ + + pool_setlowat(&pc->pc_pool, n); +} + +void +pool_cache_sethiwat(pool_cache_t pc, int n) +{ + + pool_sethiwat(&pc->pc_pool, n); +} + +void +pool_cache_sethardlimit(pool_cache_t pc, int n, const char *warnmess, int ratecap) +{ + + pool_sethardlimit(&pc->pc_pool, n, warnmess, ratecap); +} - if (__predict_false((pc->pc_pool->pr_flags & PR_WANTED) != 0)) { - goto destruct; +static inline pool_cache_cpu_t * +pool_cache_cpu_enter(pool_cache_t pc, int *s) +{ + pool_cache_cpu_t *cc; + + /* + * Prevent other users of the cache from accessing our + * CPU-local data. To avoid touching shared state, we + * pull the neccessary information from CPU local data. + */ + crit_enter(); + cc = pc->pc_cpus[curcpu()->ci_index]; + KASSERT(cc->cc_cache == pc); + if (cc->cc_ipl != IPL_NONE) { + *s = splraiseipl(cc->cc_iplcookie); } + KASSERT(((uintptr_t)cc & (CACHE_LINE_SIZE - 1)) == 0); - simple_lock(&pc->pc_slock); + return cc; +} - pcg = LIST_FIRST(&pc->pc_partgroups); - if (pcg == NULL) { - pcg = LIST_FIRST(&pc->pc_emptygroups); - if (pcg != NULL) { - LIST_REMOVE(pcg, pcg_list); - LIST_INSERT_HEAD(&pc->pc_partgroups, pcg, pcg_list); - } +static inline void +pool_cache_cpu_exit(pool_cache_cpu_t *cc, int *s) +{ + + /* No longer need exclusive access to the per-CPU data. */ + if (cc->cc_ipl != IPL_NONE) { + splx(*s); } - if (pcg == NULL) { + crit_exit(); +} + +#if __GNUC_PREREQ__(3, 0) +__attribute ((noinline)) +#endif +pool_cache_cpu_t * +pool_cache_get_slow(pool_cache_cpu_t *cc, int *s, void **objectp, + paddr_t *pap, int flags) +{ + pcg_t *pcg, *cur; + uint64_t ncsw; + pool_cache_t pc; + void *object; + + pc = cc->cc_cache; + cc->cc_misses++; + + /* + * Nothing was available locally. Try and grab a group + * from the cache. + */ + if (!mutex_tryenter(&pc->pc_lock)) { + ncsw = curlwp->l_ncsw; + mutex_enter(&pc->pc_lock); + pc->pc_contended++; /* - * No empty groups to free the object to. Attempt to - * allocate one. + * If we context switched while locking, then + * our view of the per-CPU data is invalid: + * retry. */ - simple_unlock(&pc->pc_slock); - s = splvm(); - pcg = pool_get(&pcgpool, PR_NOWAIT); - splx(s); - if (pcg == NULL) { -destruct: + if (curlwp->l_ncsw != ncsw) { + mutex_exit(&pc->pc_lock); + pool_cache_cpu_exit(cc, s); + return pool_cache_cpu_enter(pc, s); + } + } - /* - * Unable to allocate a cache group; destruct the object - * and free it back to the pool. - */ - pool_cache_destruct_object(pc, object); - return; + if ((pcg = pc->pc_fullgroups) != NULL) { + /* + * If there's a full group, release our empty + * group back to the cache. Install the full + * group as cc_current and return. + */ + if ((cur = cc->cc_current) != NULL) { + KASSERT(cur->pcg_avail == 0); + cur->pcg_next = pc->pc_emptygroups; + pc->pc_emptygroups = cur; + pc->pc_nempty++; } - memset(pcg, 0, sizeof(*pcg)); - simple_lock(&pc->pc_slock); - pc->pc_ngroups++; - LIST_INSERT_HEAD(&pc->pc_partgroups, pcg, pcg_list); + KASSERT(pcg->pcg_avail == PCG_NOBJECTS); + cc->cc_current = pcg; + pc->pc_fullgroups = pcg->pcg_next; + pc->pc_hits++; + pc->pc_nfull--; + mutex_exit(&pc->pc_lock); + return cc; + } + + /* + * Nothing available locally or in cache. Take the slow + * path: fetch a new object from the pool and construct + * it. + */ + pc->pc_misses++; + mutex_exit(&pc->pc_lock); + pool_cache_cpu_exit(cc, s); + + object = pool_get(&pc->pc_pool, flags); + *objectp = object; + if (object == NULL) + return NULL; + + if ((*pc->pc_ctor)(pc->pc_arg, object, flags) != 0) { + pool_put(&pc->pc_pool, object); + *objectp = NULL; + return NULL; } - pc->pc_nitems++; - pcg_put(pcg, object, pa); + KASSERT((((vaddr_t)object + pc->pc_pool.pr_itemoffset) & + (pc->pc_pool.pr_align - 1)) == 0); - if (pcg->pcg_avail == PCG_NOBJECTS) { - LIST_REMOVE(pcg, pcg_list); - LIST_INSERT_HEAD(&pc->pc_fullgroups, pcg, pcg_list); + if (pap != NULL) { +#ifdef POOL_VTOPHYS + *pap = POOL_VTOPHYS(object); +#else + *pap = POOL_PADDR_INVALID; +#endif } - simple_unlock(&pc->pc_slock); + + FREECHECK_OUT(&pc->pc_freecheck, object); + return NULL; } /* - * pool_cache_destruct_object: + * pool_cache_get{,_paddr}: * - * Force destruction of an object and its release back into - * the pool. + * Get an object from a pool cache (optionally returning + * the physical address of the object). */ -void -pool_cache_destruct_object(struct pool_cache *pc, void *object) +void * +pool_cache_get_paddr(pool_cache_t pc, int flags, paddr_t *pap) { + pool_cache_cpu_t *cc; + pcg_t *pcg; + void *object; + int s; - if (pc->pc_dtor != NULL) - (*pc->pc_dtor)(pc->pc_arg, object); - pool_put(pc->pc_pool, object); +#ifdef LOCKDEBUG + if (flags & PR_WAITOK) + ASSERT_SLEEPABLE(NULL, "pool_cache_get(PR_WAITOK)"); +#endif + + cc = pool_cache_cpu_enter(pc, &s); + do { + /* Try and allocate an object from the current group. */ + pcg = cc->cc_current; + if (pcg != NULL && pcg->pcg_avail > 0) { + object = pcg->pcg_objects[--pcg->pcg_avail].pcgo_va; + if (pap != NULL) + *pap = pcg->pcg_objects[pcg->pcg_avail].pcgo_pa; + pcg->pcg_objects[pcg->pcg_avail].pcgo_va = NULL; + KASSERT(pcg->pcg_avail <= PCG_NOBJECTS); + KASSERT(object != NULL); + cc->cc_hits++; + pool_cache_cpu_exit(cc, &s); + FREECHECK_OUT(&pc->pc_freecheck, object); + return object; + } + + /* + * That failed. If the previous group isn't empty, swap + * it with the current group and allocate from there. + */ + pcg = cc->cc_previous; + if (pcg != NULL && pcg->pcg_avail > 0) { + cc->cc_previous = cc->cc_current; + cc->cc_current = pcg; + continue; + } + + /* + * Can't allocate from either group: try the slow path. + * If get_slow() allocated an object for us, or if + * no more objects are available, it will return NULL. + * Otherwise, we need to retry. + */ + cc = pool_cache_get_slow(cc, &s, &object, pap, flags); + } while (cc != NULL); + + return object; } -static void -pool_do_cache_invalidate_grouplist(struct pool_cache_grouplist *pcgsl, - struct pool_cache *pc, struct pool_pagelist *pq, - struct pool_cache_grouplist *pcgdl) +#if __GNUC_PREREQ__(3, 0) +__attribute ((noinline)) +#endif +pool_cache_cpu_t * +pool_cache_put_slow(pool_cache_cpu_t *cc, int *s, void *object, paddr_t pa) { - struct pool_cache_group *pcg, *npcg; - void *object; + pcg_t *pcg, *cur; + uint64_t ncsw; + pool_cache_t pc; + + pc = cc->cc_cache; + cc->cc_misses++; - for (pcg = LIST_FIRST(pcgsl); pcg != NULL; pcg = npcg) { - npcg = LIST_NEXT(pcg, pcg_list); - while (pcg->pcg_avail != 0) { - pc->pc_nitems--; - object = pcg_get(pcg, NULL); - if (pc->pc_dtor != NULL) - (*pc->pc_dtor)(pc->pc_arg, object); - pool_do_put(pc->pc_pool, object, pq); - } - pc->pc_ngroups--; - LIST_REMOVE(pcg, pcg_list); - LIST_INSERT_HEAD(pcgdl, pcg, pcg_list); + /* + * No free slots locally. Try to grab an empty, unused + * group from the cache. + */ + if (!mutex_tryenter(&pc->pc_lock)) { + ncsw = curlwp->l_ncsw; + mutex_enter(&pc->pc_lock); + pc->pc_contended++; + + /* + * If we context switched while locking, then + * our view of the per-CPU data is invalid: + * retry. + */ + if (curlwp->l_ncsw != ncsw) { + mutex_exit(&pc->pc_lock); + pool_cache_cpu_exit(cc, s); + return pool_cache_cpu_enter(pc, s); + } } -} -static void -pool_do_cache_invalidate(struct pool_cache *pc, struct pool_pagelist *pq, - struct pool_cache_grouplist *pcgl) -{ + if ((pcg = pc->pc_emptygroups) != NULL) { + /* + * If there's a empty group, release our full + * group back to the cache. Install the empty + * group as cc_current and return. + */ + if ((cur = cc->cc_current) != NULL) { + KASSERT(cur->pcg_avail == PCG_NOBJECTS); + cur->pcg_next = pc->pc_fullgroups; + pc->pc_fullgroups = cur; + pc->pc_nfull++; + } + KASSERT(pcg->pcg_avail == 0); + cc->cc_current = pcg; + pc->pc_emptygroups = pcg->pcg_next; + pc->pc_hits++; + pc->pc_nempty--; + mutex_exit(&pc->pc_lock); + return cc; + } - LOCK_ASSERT(simple_lock_held(&pc->pc_slock)); - LOCK_ASSERT(simple_lock_held(&pc->pc_pool->pr_slock)); + /* + * Nothing available locally or in cache. Take the + * slow path and try to allocate a new group that we + * can release to. + */ + pc->pc_misses++; + mutex_exit(&pc->pc_lock); + pool_cache_cpu_exit(cc, s); - pool_do_cache_invalidate_grouplist(&pc->pc_fullgroups, pc, pq, pcgl); - pool_do_cache_invalidate_grouplist(&pc->pc_partgroups, pc, pq, pcgl); + /* + * If we can't allocate a new group, just throw the + * object away. + */ + pcg = pool_get(&pcgpool, PR_NOWAIT); + if (pcg == NULL) { + pool_cache_destruct_object(pc, object); + return NULL; + } +#ifdef DIAGNOSTIC + memset(pcg, 0, sizeof(*pcg)); +#else + pcg->pcg_avail = 0; +#endif - KASSERT(LIST_EMPTY(&pc->pc_partgroups)); - KASSERT(LIST_EMPTY(&pc->pc_fullgroups)); - KASSERT(pc->pc_nitems == 0); -} + /* + * Add the empty group to the cache and try again. + */ + mutex_enter(&pc->pc_lock); + pcg->pcg_next = pc->pc_emptygroups; + pc->pc_emptygroups = pcg; + pc->pc_nempty++; + mutex_exit(&pc->pc_lock); + + return pool_cache_cpu_enter(pc, s); +} /* - * pool_cache_invalidate: + * pool_cache_put{,_paddr}: * - * Invalidate a pool cache (destruct and release all of the - * cached objects). + * Put an object back to the pool cache (optionally caching the + * physical address of the object). */ void -pool_cache_invalidate(struct pool_cache *pc) +pool_cache_put_paddr(pool_cache_t pc, void *object, paddr_t pa) { - struct pool_pagelist pq; - struct pool_cache_grouplist pcgl; - - LIST_INIT(&pq); - LIST_INIT(&pcgl); + pool_cache_cpu_t *cc; + pcg_t *pcg; + int s; - simple_lock(&pc->pc_slock); - simple_lock(&pc->pc_pool->pr_slock); + FREECHECK_IN(&pc->pc_freecheck, object); - pool_do_cache_invalidate(pc, &pq, &pcgl); + cc = pool_cache_cpu_enter(pc, &s); + do { + /* If the current group isn't full, release it there. */ + pcg = cc->cc_current; + if (pcg != NULL && pcg->pcg_avail < PCG_NOBJECTS) { + KASSERT(pcg->pcg_objects[pcg->pcg_avail].pcgo_va + == NULL); + pcg->pcg_objects[pcg->pcg_avail].pcgo_va = object; + pcg->pcg_objects[pcg->pcg_avail].pcgo_pa = pa; + pcg->pcg_avail++; + cc->cc_hits++; + pool_cache_cpu_exit(cc, &s); + return; + } - simple_unlock(&pc->pc_pool->pr_slock); - simple_unlock(&pc->pc_slock); + /* + * That failed. If the previous group is empty, swap + * it with the current group and try again. + */ + pcg = cc->cc_previous; + if (pcg != NULL && pcg->pcg_avail == 0) { + cc->cc_previous = cc->cc_current; + cc->cc_current = pcg; + continue; + } - pr_pagelist_free(pc->pc_pool, &pq); - pcg_grouplist_free(&pcgl); + /* + * Can't free to either group: try the slow path. + * If put_slow() releases the object for us, it + * will return NULL. Otherwise we need to retry. + */ + cc = pool_cache_put_slow(cc, &s, object, pa); + } while (cc != NULL); } /* - * pool_cache_reclaim: + * pool_cache_xcall: * - * Reclaim a pool cache for pool_reclaim(). + * Transfer objects from the per-CPU cache to the global cache. + * Run within a cross-call thread. */ static void -pool_cache_reclaim(struct pool_cache *pc, struct pool_pagelist *pq, - struct pool_cache_grouplist *pcgl) +pool_cache_xcall(pool_cache_t pc) { - - /* - * We're locking in the wrong order (normally pool_cache -> pool, - * but the pool is already locked when we get here), so we have - * to use trylock. If we can't lock the pool_cache, it's not really - * a big deal here. + pool_cache_cpu_t *cc; + pcg_t *prev, *cur, **list; + int s = 0; /* XXXgcc */ + + cc = pool_cache_cpu_enter(pc, &s); + cur = cc->cc_current; + cc->cc_current = NULL; + prev = cc->cc_previous; + cc->cc_previous = NULL; + pool_cache_cpu_exit(cc, &s); + + /* + * XXXSMP Go to splvm to prevent kernel_lock from being taken, + * because locks at IPL_SOFTXXX are still spinlocks. Does not + * apply to IPL_SOFTBIO. Cross-call threads do not take the + * kernel_lock. */ - if (simple_lock_try(&pc->pc_slock) == 0) - return; - - pool_do_cache_invalidate(pc, pq, pcgl); - - simple_unlock(&pc->pc_slock); + s = splvm(); + mutex_enter(&pc->pc_lock); + if (cur != NULL) { + if (cur->pcg_avail == PCG_NOBJECTS) { + list = &pc->pc_fullgroups; + pc->pc_nfull++; + } else if (cur->pcg_avail == 0) { + list = &pc->pc_emptygroups; + pc->pc_nempty++; + } else { + list = &pc->pc_partgroups; + pc->pc_npart++; + } + cur->pcg_next = *list; + *list = cur; + } + if (prev != NULL) { + if (prev->pcg_avail == PCG_NOBJECTS) { + list = &pc->pc_fullgroups; + pc->pc_nfull++; + } else if (prev->pcg_avail == 0) { + list = &pc->pc_emptygroups; + pc->pc_nempty++; + } else { + list = &pc->pc_partgroups; + pc->pc_npart++; + } + prev->pcg_next = *list; + *list = prev; + } + mutex_exit(&pc->pc_lock); + splx(s); } /* @@ -2177,10 +2728,12 @@ void pool_page_free(struct pool *, void #ifdef POOL_SUBPAGE struct pool_allocator pool_allocator_kmem_fullpage = { pool_page_alloc, pool_page_free, 0, + .pa_backingmapptr = &kmem_map, }; #else struct pool_allocator pool_allocator_kmem = { pool_page_alloc, pool_page_free, 0, + .pa_backingmapptr = &kmem_map, }; #endif @@ -2190,10 +2743,12 @@ void pool_page_free_nointr(struct pool * #ifdef POOL_SUBPAGE struct pool_allocator pool_allocator_nointr_fullpage = { pool_page_alloc_nointr, pool_page_free_nointr, 0, + .pa_backingmapptr = &kernel_map, }; #else struct pool_allocator pool_allocator_nointr = { pool_page_alloc_nointr, pool_page_free_nointr, 0, + .pa_backingmapptr = &kernel_map, }; #endif @@ -2203,6 +2758,7 @@ void pool_subpage_free(struct pool *, vo struct pool_allocator pool_allocator_kmem = { pool_subpage_alloc, pool_subpage_free, POOL_SUBPAGE, + .pa_backingmapptr = &kmem_map, }; void *pool_subpage_alloc_nointr(struct pool *, int); @@ -2210,131 +2766,43 @@ void pool_subpage_free_nointr(struct poo struct pool_allocator pool_allocator_nointr = { pool_subpage_alloc, pool_subpage_free, POOL_SUBPAGE, + .pa_backingmapptr = &kmem_map, }; #endif /* POOL_SUBPAGE */ -/* - * We have at least three different resources for the same allocation and - * each resource can be depleted. First, we have the ready elements in the - * pool. Then we have the resource (typically a vm_map) for this allocator. - * Finally, we have physical memory. Waiting for any of these can be - * unnecessary when any other is freed, but the kernel doesn't support - * sleeping on multiple wait channels, so we have to employ another strategy. - * - * The caller sleeps on the pool (so that it can be awakened when an item - * is returned to the pool), but we set PA_WANT on the allocator. When a - * page is returned to the allocator and PA_WANT is set, pool_allocator_free - * will wake up all sleeping pools belonging to this allocator. - * - * XXX Thundering herd. - */ -void * -pool_allocator_alloc(struct pool *org, int flags) +static void * +pool_allocator_alloc(struct pool *pp, int flags) { - struct pool_allocator *pa = org->pr_alloc; - struct pool *pp, *start; - int s, freed; + struct pool_allocator *pa = pp->pr_alloc; void *res; - LOCK_ASSERT(!simple_lock_held(&org->pr_slock)); - - do { - if ((res = (*pa->pa_alloc)(org, flags)) != NULL) - return (res); - if ((flags & PR_WAITOK) == 0) { - /* - * We only run the drain hookhere if PR_NOWAIT. - * In other cases, the hook will be run in - * pool_reclaim(). - */ - if (org->pr_drain_hook != NULL) { - (*org->pr_drain_hook)(org->pr_drain_hook_arg, - flags); - if ((res = (*pa->pa_alloc)(org, flags)) != NULL) - return (res); - } - break; - } - + res = (*pa->pa_alloc)(pp, flags); + if (res == NULL && (flags & PR_WAITOK) == 0) { /* - * Drain all pools, that use this allocator. - * We do this to reclaim VA space. - * pa_alloc is responsible for waiting for - * physical memory. - * - * XXX We risk looping forever if start if someone - * calls pool_destroy on "start". But there is no - * other way to have potentially sleeping pool_reclaim, - * non-sleeping locks on pool_allocator, and some - * stirring of drained pools in the allocator. - * - * XXX Maybe we should use pool_head_slock for locking - * the allocators? + * We only run the drain hook here if PR_NOWAIT. + * In other cases, the hook will be run in + * pool_reclaim(). */ - freed = 0; - - s = splvm(); - simple_lock(&pa->pa_slock); - pp = start = TAILQ_FIRST(&pa->pa_list); - do { - TAILQ_REMOVE(&pa->pa_list, pp, pr_alloc_list); - TAILQ_INSERT_TAIL(&pa->pa_list, pp, pr_alloc_list); - simple_unlock(&pa->pa_slock); - freed = pool_reclaim(pp); - simple_lock(&pa->pa_slock); - } while ((pp = TAILQ_FIRST(&pa->pa_list)) != start && - freed == 0); - - if (freed == 0) { - /* - * We set PA_WANT here, the caller will most likely - * sleep waiting for pages (if not, this won't hurt - * that much), and there is no way to set this in - * the caller without violating locking order. - */ - pa->pa_flags |= PA_WANT; + if (pp->pr_drain_hook != NULL) { + (*pp->pr_drain_hook)(pp->pr_drain_hook_arg, flags); + res = (*pa->pa_alloc)(pp, flags); } - simple_unlock(&pa->pa_slock); - splx(s); - } while (freed); - return (NULL); + } + return res; } -void +static void pool_allocator_free(struct pool *pp, void *v) { struct pool_allocator *pa = pp->pr_alloc; - int s; - - LOCK_ASSERT(!simple_lock_held(&pp->pr_slock)); (*pa->pa_free)(pp, v); - - s = splvm(); - simple_lock(&pa->pa_slock); - if ((pa->pa_flags & PA_WANT) == 0) { - simple_unlock(&pa->pa_slock); - splx(s); - return; - } - - TAILQ_FOREACH(pp, &pa->pa_list, pr_alloc_list) { - simple_lock(&pp->pr_slock); - if ((pp->pr_flags & PR_WANTED) != 0) { - pp->pr_flags &= ~PR_WANTED; - wakeup(pp); - } - simple_unlock(&pp->pr_slock); - } - pa->pa_flags &= ~PA_WANT; - simple_unlock(&pa->pa_slock); - splx(s); } void * pool_page_alloc(struct pool *pp, int flags) { - boolean_t waitok = (flags & PR_WAITOK) ? TRUE : FALSE; + bool waitok = (flags & PR_WAITOK) ? true : false; return ((void *) uvm_km_alloc_poolpage_cache(kmem_map, waitok)); } @@ -2349,7 +2817,7 @@ pool_page_free(struct pool *pp, void *v) static void * pool_page_alloc_meta(struct pool *pp, int flags) { - boolean_t waitok = (flags & PR_WAITOK) ? TRUE : FALSE; + bool waitok = (flags & PR_WAITOK) ? true : false; return ((void *) uvm_km_alloc_poolpage(kmem_map, waitok)); } @@ -2366,21 +2834,13 @@ pool_page_free_meta(struct pool *pp, voi void * pool_subpage_alloc(struct pool *pp, int flags) { - void *v; - int s; - s = splvm(); - v = pool_get(&psppool, flags); - splx(s); - return v; + return pool_get(&psppool, flags); } void pool_subpage_free(struct pool *pp, void *v) { - int s; - s = splvm(); pool_put(&psppool, v); - splx(s); } /* We don't provide a real nointr allocator. Maybe later. */ @@ -2401,7 +2861,7 @@ pool_subpage_free_nointr(struct pool *pp void * pool_page_alloc_nointr(struct pool *pp, int flags) { - boolean_t waitok = (flags & PR_WAITOK) ? TRUE : FALSE; + bool waitok = (flags & PR_WAITOK) ? true : false; return ((void *) uvm_km_alloc_poolpage_cache(kernel_map, waitok)); }