Please note that diffs are not public domain; they are subject to the copyright notices on the relevant files. =================================================================== RCS file: /ftp/cvs/cvsroot/src/sys/kern/subr_pool.c,v rcsdiff: /ftp/cvs/cvsroot/src/sys/kern/subr_pool.c,v: warning: Unknown phrases like `commitid ...;' are present. retrieving revision 1.116.2.1 retrieving revision 1.128.2.4 diff -u -p -r1.116.2.1 -r1.128.2.4 --- src/sys/kern/subr_pool.c 2006/06/19 04:07:16 1.116.2.1 +++ src/sys/kern/subr_pool.c 2007/03/22 12:30:29 1.128.2.4 @@ -1,7 +1,7 @@ -/* $NetBSD: subr_pool.c,v 1.116.2.1 2006/06/19 04:07:16 chap Exp $ */ +/* $NetBSD: subr_pool.c,v 1.128.2.4 2007/03/22 12:30:29 ad Exp $ */ /*- - * Copyright (c) 1997, 1999, 2000 The NetBSD Foundation, Inc. + * Copyright (c) 1997, 1999, 2000, 2002, 2007 The NetBSD Foundation, Inc. * All rights reserved. * * This code is derived from software contributed to The NetBSD Foundation @@ -38,7 +38,7 @@ */ #include -__KERNEL_RCSID(0, "$NetBSD: subr_pool.c,v 1.116.2.1 2006/06/19 04:07:16 chap Exp $"); +__KERNEL_RCSID(0, "$NetBSD: subr_pool.c,v 1.128.2.4 2007/03/22 12:30:29 ad Exp $"); #include "opt_pool.h" #include "opt_poollog.h" @@ -53,6 +53,7 @@ __KERNEL_RCSID(0, "$NetBSD: subr_pool.c, #include #include #include +#include #include @@ -100,8 +101,8 @@ int pool_inactive_time = 10; /* Next candidate for drainage (see pool_drain()) */ static struct pool *drainpp; -/* This spin lock protects both pool_head and drainpp. */ -struct simplelock pool_head_slock = SIMPLELOCK_INITIALIZER; +/* This lock protects both pool_head and drainpp. */ +static kmutex_t pool_head_lock; typedef uint8_t pool_item_freelist_t; @@ -111,7 +112,7 @@ struct pool_item_header { ph_pagelist; /* pool page list */ SPLAY_ENTRY(pool_item_header) ph_node; /* Off-page page headers */ - caddr_t ph_page; /* this page's address */ + void * ph_page; /* this page's address */ struct timeval ph_time; /* last referenced */ union { /* !PR_NOTOUCH */ @@ -183,7 +184,7 @@ static void pool_cache_reclaim(struct po static void pcg_grouplist_free(struct pool_cache_grouplist *); static int pool_catchup(struct pool *); -static void pool_prime_page(struct pool *, caddr_t, +static void pool_prime_page(struct pool *, void *, struct pool_item_header *); static void pool_update_curpage(struct pool *); @@ -325,7 +326,7 @@ pr_item_notouch_index(const struct pool int idx; KASSERT(pp->pr_roflags & PR_NOTOUCH); - idx = (cp - ph->ph_page - ph->ph_off) / pp->pr_size; + idx = (cp - (char *)ph->ph_page - ph->ph_off) / pp->pr_size; KASSERT(idx < pp->pr_itemsperpage); return idx; } @@ -358,16 +359,22 @@ pr_item_notouch_get(const struct pool *p ph->ph_firstfree = freelist[idx]; freelist[idx] = PR_INDEX_USED; - return ph->ph_page + ph->ph_off + idx * pp->pr_size; + return (char *)ph->ph_page + ph->ph_off + idx * pp->pr_size; } static inline int phtree_compare(struct pool_item_header *a, struct pool_item_header *b) { + + /* + * we consider pool_item_header with smaller ph_page bigger. + * (this unnatural ordering is for the benefit of pr_find_pagehead.) + */ + if (a->ph_page < b->ph_page) - return (-1); - else if (a->ph_page > b->ph_page) return (1); + else if (a->ph_page > b->ph_page) + return (-1); else return (0); } @@ -376,18 +383,38 @@ SPLAY_PROTOTYPE(phtree, pool_item_header SPLAY_GENERATE(phtree, pool_item_header, ph_node, phtree_compare); /* - * Return the pool page header based on page address. + * Return the pool page header based on item address. */ static inline struct pool_item_header * -pr_find_pagehead(struct pool *pp, caddr_t page) +pr_find_pagehead(struct pool *pp, void *v) { struct pool_item_header *ph, tmp; - if ((pp->pr_roflags & PR_PHINPAGE) != 0) - return ((struct pool_item_header *)(page + pp->pr_phoffset)); + if ((pp->pr_roflags & PR_NOALIGN) != 0) { + tmp.ph_page = (void *)(uintptr_t)v; + ph = SPLAY_FIND(phtree, &pp->pr_phtree, &tmp); + if (ph == NULL) { + ph = SPLAY_ROOT(&pp->pr_phtree); + if (ph != NULL && phtree_compare(&tmp, ph) >= 0) { + ph = SPLAY_NEXT(phtree, &pp->pr_phtree, ph); + } + KASSERT(ph == NULL || phtree_compare(&tmp, ph) < 0); + } + } else { + void *page = + (void *)((uintptr_t)v & pp->pr_alloc->pa_pagemask); + + if ((pp->pr_roflags & PR_PHINPAGE) != 0) { + ph = (struct pool_item_header *)((char *)page + pp->pr_phoffset); + } else { + tmp.ph_page = page; + ph = SPLAY_FIND(phtree, &pp->pr_phtree, &tmp); + } + } - tmp.ph_page = page; - ph = SPLAY_FIND(phtree, &pp->pr_phtree, &tmp); + KASSERT(ph == NULL || ((pp->pr_roflags & PR_PHINPAGE) != 0) || + ((char *)ph->ph_page <= (char *)v && + (char *)v < (char *)ph->ph_page + pp->pr_alloc->pa_pagesz)); return ph; } @@ -395,16 +422,12 @@ static void pr_pagelist_free(struct pool *pp, struct pool_pagelist *pq) { struct pool_item_header *ph; - int s; while ((ph = LIST_FIRST(pq)) != NULL) { LIST_REMOVE(ph, ph_pagelist); pool_allocator_free(pp, ph->ph_page); - if ((pp->pr_roflags & PR_PHINPAGE) == 0) { - s = splvm(); + if ((pp->pr_roflags & PR_PHINPAGE) == 0) pool_put(pp->pr_phpool, ph); - splx(s); - } } } @@ -416,7 +439,7 @@ pr_rmpage(struct pool *pp, struct pool_i struct pool_pagelist *pq) { - LOCK_ASSERT(simple_lock_held(&pp->pr_slock)); + KASSERT(mutex_owned(&pp->pr_lock)); /* * If the page was idle, decrement the idle page count. @@ -447,14 +470,14 @@ pr_rmpage(struct pool *pp, struct pool_i pool_update_curpage(pp); } -static boolean_t +static bool pa_starved_p(struct pool_allocator *pa) { if (pa->pa_backingmap != NULL) { return vm_map_starved_p(pa->pa_backingmap); } - return FALSE; + return false; } static int @@ -530,10 +553,12 @@ pool_subsystem_init(void) __link_set_decl(pools, struct link_pool_init); struct link_pool_init * const *pi; + mutex_init(&pool_head_lock, MUTEX_DEFAULT, IPL_NONE); + __link_set_foreach(pi, pools) pool_init((*pi)->pp, (*pi)->size, (*pi)->align, (*pi)->align_offset, (*pi)->flags, (*pi)->wchan, - (*pi)->palloc); + (*pi)->palloc, (*pi)->ipl); while ((pa = SLIST_FIRST(&pa_deferinitq)) != NULL) { KASSERT(pa->pa_backingmapptr != NULL); @@ -551,13 +576,13 @@ pool_subsystem_init(void) */ void pool_init(struct pool *pp, size_t size, u_int align, u_int ioff, int flags, - const char *wchan, struct pool_allocator *palloc) + const char *wchan, struct pool_allocator *palloc, int ipl) { #ifdef DEBUG struct pool *pp1; #endif size_t trysize, phsize; - int off, slack, s; + int off, slack; KASSERT((1UL << (CHAR_BIT * sizeof(pool_item_freelist_t))) - 2 >= PHPOOL_FREELIST_NELEM(PHPOOL_MAX - 1)); @@ -598,7 +623,7 @@ pool_init(struct pool *pp, size_t size, TAILQ_INIT(&palloc->pa_list); - simple_lock_init(&palloc->pa_slock); + mutex_init(&palloc->pa_lock, MUTEX_DRIVER, IPL_VM); palloc->pa_pagemask = ~(palloc->pa_pagesz - 1); palloc->pa_pageshift = ffs(palloc->pa_pagesz) - 1; @@ -611,14 +636,13 @@ pool_init(struct pool *pp, size_t size, if (align == 0) align = ALIGN(1); - if (size < sizeof(struct pool_item)) + if ((flags & PR_NOTOUCH) == 0 && size < sizeof(struct pool_item)) size = sizeof(struct pool_item); size = roundup(size, align); #ifdef DIAGNOSTIC if (size > palloc->pa_pagesz) - panic("pool_init: pool item size (%lu) too large", - (u_long)size); + panic("pool_init: pool item size (%zu) too large", size); #endif /* @@ -649,6 +673,7 @@ pool_init(struct pool *pp, size_t size, pp->pr_hardlimit_warning_last.tv_usec = 0; pp->pr_drain_hook = NULL; pp->pr_drain_hook_arg = NULL; + pp->pr_freecheck = NULL; /* * Decide whether to put the page header off page to avoid @@ -667,7 +692,7 @@ pool_init(struct pool *pp, size_t size, /* See the comment below about reserved bytes. */ trysize = palloc->pa_pagesz - ((align - ioff) % align); phsize = ALIGN(sizeof(struct pool_item_header)); - if ((pp->pr_roflags & PR_NOTOUCH) == 0 && + if ((pp->pr_roflags & (PR_NOTOUCH | PR_NOALIGN)) == 0 && (pp->pr_size < MIN(palloc->pa_pagesz / 16, phsize << 3) || trysize / pp->pr_size == (trysize - phsize) / pp->pr_size)) { /* Use the end of the page for the page header */ @@ -742,7 +767,9 @@ pool_init(struct pool *pp, size_t size, pp->pr_entered_file = NULL; pp->pr_entered_line = 0; - simple_lock_init(&pp->pr_slock); + mutex_init(&pp->pr_lock, MUTEX_DRIVER, ipl); + cv_init(&pp->pr_cv, wchan); + pp->pr_ipl = ipl; /* * Initialize private page header pool and cache magazine pool if we @@ -765,27 +792,31 @@ pool_init(struct pool *pp, size_t size, + nelem * sizeof(pool_item_freelist_t); } pool_init(&phpool[idx], sz, 0, 0, 0, - phpool_names[idx], &pool_allocator_meta); + phpool_names[idx], &pool_allocator_meta, IPL_VM); } #ifdef POOL_SUBPAGE pool_init(&psppool, POOL_SUBPAGE, POOL_SUBPAGE, 0, - PR_RECURSIVE, "psppool", &pool_allocator_meta); + PR_RECURSIVE, "psppool", &pool_allocator_meta, IPL_VM); #endif pool_init(&pcgpool, sizeof(struct pool_cache_group), 0, 0, - 0, "pcgpool", &pool_allocator_meta); + 0, "pcgpool", &pool_allocator_meta, IPL_VM); + } + + if (__predict_true(!cold)) { + /* Insert into the list of all pools. */ + mutex_enter(&pool_head_lock); + LIST_INSERT_HEAD(&pool_head, pp, pr_poollist); + mutex_exit(&pool_head_lock); + + /* Insert this into the list of pools using this allocator. */ + mutex_enter(&palloc->pa_lock); + TAILQ_INSERT_TAIL(&palloc->pa_list, pp, pr_alloc_list); + mutex_exit(&palloc->pa_lock); + } else { + LIST_INSERT_HEAD(&pool_head, pp, pr_poollist); + TAILQ_INSERT_TAIL(&palloc->pa_list, pp, pr_alloc_list); } - /* Insert into the list of all pools. */ - simple_lock(&pool_head_slock); - LIST_INSERT_HEAD(&pool_head, pp, pr_poollist); - simple_unlock(&pool_head_slock); - - /* Insert this into the list of pools using this allocator. */ - s = splvm(); - simple_lock(&palloc->pa_slock); - TAILQ_INSERT_TAIL(&palloc->pa_list, pp, pr_alloc_list); - simple_unlock(&palloc->pa_slock); - splx(s); pool_reclaim_register(pp); } @@ -797,25 +828,21 @@ pool_destroy(struct pool *pp) { struct pool_pagelist pq; struct pool_item_header *ph; - int s; /* Remove from global pool list */ - simple_lock(&pool_head_slock); + mutex_enter(&pool_head_lock); LIST_REMOVE(pp, pr_poollist); if (drainpp == pp) drainpp = NULL; - simple_unlock(&pool_head_slock); + mutex_exit(&pool_head_lock); /* Remove this pool from its allocator's list of pools. */ pool_reclaim_unregister(pp); - s = splvm(); - simple_lock(&pp->pr_alloc->pa_slock); + mutex_enter(&pp->pr_alloc->pa_lock); TAILQ_REMOVE(&pp->pr_alloc->pa_list, pp, pr_alloc_list); - simple_unlock(&pp->pr_alloc->pa_slock); - splx(s); + mutex_exit(&pp->pr_alloc->pa_lock); - s = splvm(); - simple_lock(&pp->pr_slock); + mutex_enter(&pp->pr_lock); KASSERT(LIST_EMPTY(&pp->pr_cachelist)); @@ -835,8 +862,7 @@ pool_destroy(struct pool *pp) while ((ph = LIST_FIRST(&pp->pr_emptypages)) != NULL) pr_rmpage(pp, ph, &pq); - simple_unlock(&pp->pr_slock); - splx(s); + mutex_exit(&pp->pr_lock); pr_pagelist_free(pp, &pq); @@ -844,6 +870,9 @@ pool_destroy(struct pool *pp) if ((pp->pr_roflags & PR_LOGGING) != 0) free(pp->pr_log, M_TEMP); #endif + + cv_destroy(&pp->pr_cv); + mutex_destroy(&pp->pr_lock); } void @@ -860,20 +889,14 @@ pool_set_drain_hook(struct pool *pp, voi } static struct pool_item_header * -pool_alloc_item_header(struct pool *pp, caddr_t storage, int flags) +pool_alloc_item_header(struct pool *pp, void *storage, int flags) { struct pool_item_header *ph; - int s; - - LOCK_ASSERT(simple_lock_held(&pp->pr_slock) == 0); if ((pp->pr_roflags & PR_PHINPAGE) != 0) - ph = (struct pool_item_header *) (storage + pp->pr_phoffset); - else { - s = splvm(); + ph = (struct pool_item_header *) ((char *)storage + pp->pr_phoffset); + else ph = pool_get(pp->pr_phpool, flags); - splx(s); - } return (ph); } @@ -903,11 +926,10 @@ pool_get(struct pool *pp, int flags) #endif /* DIAGNOSTIC */ #ifdef LOCKDEBUG if (flags & PR_WAITOK) - simple_lock_only_held(NULL, "pool_get(PR_WAITOK)"); - SCHED_ASSERT_UNLOCKED(); + ASSERT_SLEEPABLE(NULL, "pool_get(PR_WAITOK)"); #endif - simple_lock(&pp->pr_slock); + mutex_enter(&pp->pr_lock); pr_enter(pp, file, line); startover: @@ -919,7 +941,7 @@ pool_get(struct pool *pp, int flags) #ifdef DIAGNOSTIC if (__predict_false(pp->pr_nout > pp->pr_hardlimit)) { pr_leave(pp); - simple_unlock(&pp->pr_slock); + mutex_exit(&pp->pr_lock); panic("pool_get: %s: crossed hard limit", pp->pr_wchan); } #endif @@ -931,9 +953,9 @@ pool_get(struct pool *pp, int flags) * and check the hardlimit condition again. */ pr_leave(pp); - simple_unlock(&pp->pr_slock); + mutex_exit(&pp->pr_lock); (*pp->pr_drain_hook)(pp->pr_drain_hook_arg, flags); - simple_lock(&pp->pr_slock); + mutex_enter(&pp->pr_lock); pr_enter(pp, file, line); if (pp->pr_nout < pp->pr_hardlimit) goto startover; @@ -946,7 +968,7 @@ pool_get(struct pool *pp, int flags) */ pp->pr_flags |= PR_WANTED; pr_leave(pp); - ltsleep(pp, PSWP, pp->pr_wchan, 0, &pp->pr_slock); + cv_wait(&pp->pr_cv, &pp->pr_lock); pr_enter(pp, file, line); goto startover; } @@ -962,7 +984,7 @@ pool_get(struct pool *pp, int flags) pp->pr_nfail++; pr_leave(pp); - simple_unlock(&pp->pr_slock); + mutex_exit(&pp->pr_lock); return (NULL); } @@ -977,7 +999,7 @@ pool_get(struct pool *pp, int flags) #ifdef DIAGNOSTIC if (pp->pr_nitems != 0) { - simple_unlock(&pp->pr_slock); + mutex_exit(&pp->pr_lock); printf("pool_get: %s: curpage NULL, nitems %u\n", pp->pr_wchan, pp->pr_nitems); panic("pool_get: nitems inconsistent"); @@ -1004,7 +1026,7 @@ pool_get(struct pool *pp, int flags) pp->pr_nfail++; pr_leave(pp); - simple_unlock(&pp->pr_slock); + mutex_exit(&pp->pr_lock); return (NULL); } @@ -1015,7 +1037,7 @@ pool_get(struct pool *pp, int flags) #ifdef DIAGNOSTIC if (__predict_false(ph->ph_nmissing == pp->pr_itemsperpage)) { pr_leave(pp); - simple_unlock(&pp->pr_slock); + mutex_exit(&pp->pr_lock); panic("pool_get: %s: page empty", pp->pr_wchan); } #endif @@ -1027,13 +1049,13 @@ pool_get(struct pool *pp, int flags) v = pi = LIST_FIRST(&ph->ph_itemlist); if (__predict_false(v == NULL)) { pr_leave(pp); - simple_unlock(&pp->pr_slock); + mutex_exit(&pp->pr_lock); panic("pool_get: %s: page empty", pp->pr_wchan); } #ifdef DIAGNOSTIC if (__predict_false(pp->pr_nitems == 0)) { pr_leave(pp); - simple_unlock(&pp->pr_slock); + mutex_exit(&pp->pr_lock); printf("pool_get: %s: items on itemlist, nitems %u\n", pp->pr_wchan, pp->pr_nitems); panic("pool_get: nitems inconsistent"); @@ -1080,7 +1102,7 @@ pool_get(struct pool *pp, int flags) if (__predict_false((pp->pr_roflags & PR_NOTOUCH) == 0 && !LIST_EMPTY(&ph->ph_itemlist))) { pr_leave(pp); - simple_unlock(&pp->pr_slock); + mutex_exit(&pp->pr_lock); panic("pool_get: %s: nmissing inconsistent", pp->pr_wchan); } @@ -1109,7 +1131,9 @@ pool_get(struct pool *pp, int flags) */ } - simple_unlock(&pp->pr_slock); + mutex_exit(&pp->pr_lock); + KASSERT((((vaddr_t)v + pp->pr_itemoffset) & (pp->pr_align - 1)) == 0); + FREECHECK_OUT(&pp->pr_freecheck, v); return (v); } @@ -1121,12 +1145,9 @@ pool_do_put(struct pool *pp, void *v, st { struct pool_item *pi = v; struct pool_item_header *ph; - caddr_t page; - LOCK_ASSERT(simple_lock_held(&pp->pr_slock)); - SCHED_ASSERT_UNLOCKED(); - - page = (caddr_t)((u_long)v & pp->pr_alloc->pa_pagemask); + KASSERT(mutex_owned(&pp->pr_lock)); + FREECHECK_IN(&pp->pr_freecheck, v); #ifdef DIAGNOSTIC if (__predict_false(pp->pr_nout == 0)) { @@ -1136,18 +1157,11 @@ pool_do_put(struct pool *pp, void *v, st } #endif - if (__predict_false((ph = pr_find_pagehead(pp, page)) == NULL)) { + if (__predict_false((ph = pr_find_pagehead(pp, v)) == NULL)) { pr_printlog(pp, NULL, printf); panic("pool_put: %s: page header missing", pp->pr_wchan); } -#ifdef LOCKDEBUG - /* - * Check if we're freeing a locked simple lock. - */ - simple_lock_freecheck((caddr_t)pi, ((caddr_t)pi) + pp->pr_size); -#endif - /* * Return to item list. */ @@ -1183,7 +1197,7 @@ pool_do_put(struct pool *pp, void *v, st pp->pr_flags &= ~PR_WANTED; if (ph->ph_nmissing == 0) pp->pr_nidle++; - wakeup((caddr_t)pp); + cv_broadcast(&pp->pr_cv); return; } @@ -1245,7 +1259,7 @@ _pool_put(struct pool *pp, void *v, cons LIST_INIT(&pq); - simple_lock(&pp->pr_slock); + mutex_enter(&pp->pr_lock); pr_enter(pp, file, line); pr_log(pp, v, PRLOG_PUT, file, line); @@ -1253,7 +1267,7 @@ _pool_put(struct pool *pp, void *v, cons pool_do_put(pp, v, &pq); pr_leave(pp); - simple_unlock(&pp->pr_slock); + mutex_exit(&pp->pr_lock); pr_pagelist_free(pp, &pq); } @@ -1267,9 +1281,9 @@ pool_put(struct pool *pp, void *v) LIST_INIT(&pq); - simple_lock(&pp->pr_slock); + mutex_enter(&pp->pr_lock); pool_do_put(pp, v, &pq); - simple_unlock(&pp->pr_slock); + mutex_exit(&pp->pr_lock); pr_pagelist_free(pp, &pq); } @@ -1292,7 +1306,7 @@ pool_grow(struct pool *pp, int flags) struct pool_item_header *ph = NULL; char *cp; - simple_unlock(&pp->pr_slock); + mutex_exit(&pp->pr_lock); cp = pool_allocator_alloc(pp, flags); if (__predict_true(cp != NULL)) { ph = pool_alloc_item_header(pp, cp, flags); @@ -1301,11 +1315,11 @@ pool_grow(struct pool *pp, int flags) if (cp != NULL) { pool_allocator_free(pp, cp); } - simple_lock(&pp->pr_slock); + mutex_enter(&pp->pr_lock); return ENOMEM; } - simple_lock(&pp->pr_slock); + mutex_enter(&pp->pr_lock); pool_prime_page(pp, cp, ph); pp->pr_npagealloc++; return 0; @@ -1320,7 +1334,7 @@ pool_prime(struct pool *pp, int n) int newpages; int error = 0; - simple_lock(&pp->pr_slock); + mutex_enter(&pp->pr_lock); newpages = roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage; @@ -1335,7 +1349,7 @@ pool_prime(struct pool *pp, int n) if (pp->pr_minpages >= pp->pr_maxpages) pp->pr_maxpages = pp->pr_minpages + 1; /* XXX */ - simple_unlock(&pp->pr_slock); + mutex_exit(&pp->pr_lock); return error; } @@ -1345,18 +1359,19 @@ pool_prime(struct pool *pp, int n) * Note, we must be called with the pool descriptor LOCKED. */ static void -pool_prime_page(struct pool *pp, caddr_t storage, struct pool_item_header *ph) +pool_prime_page(struct pool *pp, void *storage, struct pool_item_header *ph) { struct pool_item *pi; - caddr_t cp = storage; - unsigned int align = pp->pr_align; - unsigned int ioff = pp->pr_itemoffset; + void *cp = storage; + const unsigned int align = pp->pr_align; + const unsigned int ioff = pp->pr_itemoffset; int n; - LOCK_ASSERT(simple_lock_held(&pp->pr_slock)); + KASSERT(mutex_owned(&pp->pr_lock)); #ifdef DIAGNOSTIC - if (((u_long)cp & (pp->pr_alloc->pa_pagesz - 1)) != 0) + if ((pp->pr_roflags & PR_NOALIGN) == 0 && + ((uintptr_t)cp & (pp->pr_alloc->pa_pagesz - 1)) != 0) panic("pool_prime_page: %s: unaligned page", pp->pr_wchan); #endif @@ -1376,7 +1391,7 @@ pool_prime_page(struct pool *pp, caddr_t /* * Color this page. */ - cp = (caddr_t)(cp + pp->pr_curcolor); + cp = (char *)cp + pp->pr_curcolor; if ((pp->pr_curcolor += align) > pp->pr_maxcolor) pp->pr_curcolor = 0; @@ -1384,7 +1399,9 @@ pool_prime_page(struct pool *pp, caddr_t * Adjust storage to apply aligment to `pr_itemoffset' in each item. */ if (ioff != 0) - cp = (caddr_t)(cp + (align - ioff)); + cp = (char *)cp + align - ioff; + + KASSERT((((vaddr_t)cp + ioff) & (align - 1)) == 0); /* * Insert remaining chunks on the bucket list. @@ -1396,7 +1413,7 @@ pool_prime_page(struct pool *pp, caddr_t pool_item_freelist_t *freelist = PR_FREELIST(ph); int i; - ph->ph_off = cp - storage; + ph->ph_off = (char *)cp - (char *)storage; ph->ph_firstfree = 0; for (i = 0; i < n - 1; i++) freelist[i] = i + 1; @@ -1412,7 +1429,9 @@ pool_prime_page(struct pool *pp, caddr_t #ifdef DIAGNOSTIC pi->pi_magic = PI_MAGIC; #endif - cp = (caddr_t)(cp + pp->pr_size); + cp = (char *)cp + pp->pr_size; + + KASSERT((((vaddr_t)cp + ioff) & (align - 1)) == 0); } } @@ -1463,7 +1482,7 @@ void pool_setlowat(struct pool *pp, int n) { - simple_lock(&pp->pr_slock); + mutex_enter(&pp->pr_lock); pp->pr_minitems = n; pp->pr_minpages = (n == 0) @@ -1479,27 +1498,27 @@ pool_setlowat(struct pool *pp, int n) */ } - simple_unlock(&pp->pr_slock); + mutex_exit(&pp->pr_lock); } void pool_sethiwat(struct pool *pp, int n) { - simple_lock(&pp->pr_slock); + mutex_enter(&pp->pr_lock); pp->pr_maxpages = (n == 0) ? 0 : roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage; - simple_unlock(&pp->pr_slock); + mutex_exit(&pp->pr_lock); } void pool_sethardlimit(struct pool *pp, int n, const char *warnmess, int ratecap) { - simple_lock(&pp->pr_slock); + mutex_enter(&pp->pr_lock); pp->pr_hardlimit = n; pp->pr_hardlimit_warning = warnmess; @@ -1515,7 +1534,7 @@ pool_sethardlimit(struct pool *pp, int n ? 0 : roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage; - simple_unlock(&pp->pr_slock); + mutex_exit(&pp->pr_lock); } /* @@ -1541,7 +1560,7 @@ pool_reclaim(struct pool *pp) (*pp->pr_drain_hook)(pp->pr_drain_hook_arg, PR_NOWAIT); } - if (simple_lock_try(&pp->pr_slock) == 0) + if (mutex_tryenter(&pp->pr_lock) == 0) return (0); pr_enter(pp, file, line); @@ -1581,7 +1600,7 @@ pool_reclaim(struct pool *pp) } pr_leave(pp); - simple_unlock(&pp->pr_slock); + mutex_exit(&pp->pr_lock); if (LIST_EMPTY(&pq) && LIST_EMPTY(&pcgl)) return 0; @@ -1602,8 +1621,8 @@ pool_drain(void *arg) int s; pp = NULL; - s = splvm(); - simple_lock(&pool_head_slock); + s = splvm(); /* XXX why? */ + mutex_enter(&pool_head_lock); if (drainpp == NULL) { drainpp = LIST_FIRST(&pool_head); } @@ -1611,7 +1630,7 @@ pool_drain(void *arg) pp = drainpp; drainpp = LIST_NEXT(pp, pr_poollist); } - simple_unlock(&pool_head_slock); + mutex_exit(&pool_head_lock); if (pp) pool_reclaim(pp); splx(s); @@ -1623,18 +1642,14 @@ pool_drain(void *arg) void pool_print(struct pool *pp, const char *modif) { - int s; - s = splvm(); - if (simple_lock_try(&pp->pr_slock) == 0) { + if (mutex_tryenter(&pp->pr_lock) == 0) { printf("pool %s is locked; try again later\n", pp->pr_wchan); - splx(s); return; } pool_print1(pp, modif, printf); - simple_unlock(&pp->pr_slock); - splx(s); + mutex_exit(&pp->pr_lock); } void @@ -1642,10 +1657,10 @@ pool_printall(const char *modif, void (* { struct pool *pp; - if (simple_lock_try(&pool_head_slock) == 0) { + if (mutex_tryenter(&pool_head_lock) == 0) { (*pr)("WARNING: pool_head_slock is locked\n"); } else { - simple_unlock(&pool_head_slock); + mutex_exit(&pool_head_lock); } LIST_FOREACH(pp, &pool_head, pr_poollist) { @@ -1667,14 +1682,14 @@ pool_printit(struct pool *pp, const char * other processors should be paused. We can skip locking * the pool in this case. * - * We do a simple_lock_try() just to print the lock + * We do a mutex_tryenter() just to print the lock * status, however. */ - if (simple_lock_try(&pp->pr_slock) == 0) + if (mutex_tryenter(&pp->pr_lock) == 0) (*pr)("WARNING: pool %s is locked\n", pp->pr_wchan); else - simple_unlock(&pp->pr_slock); + mutex_exit(&pp->pr_lock); pool_print1(pp, modif, pr); } @@ -1763,8 +1778,9 @@ pool_print1(struct pool *pp, const char (*pr)("\n"); if ((pp->pr_roflags & PR_LOGGING) == 0) (*pr)("\tno log\n"); - else + else { pr_printlog(pp, NULL, pr); + } skip_log: if (print_cache == 0) @@ -1812,19 +1828,21 @@ static int pool_chk_page(struct pool *pp, const char *label, struct pool_item_header *ph) { struct pool_item *pi; - caddr_t page; + void *page; int n; - page = (caddr_t)((u_long)ph & pp->pr_alloc->pa_pagemask); - if (page != ph->ph_page && - (pp->pr_roflags & PR_PHINPAGE) != 0) { - if (label != NULL) - printf("%s: ", label); - printf("pool(%p:%s): page inconsistency: page %p;" - " at page head addr %p (p %p)\n", pp, - pp->pr_wchan, ph->ph_page, - ph, page); - return 1; + if ((pp->pr_roflags & PR_NOALIGN) == 0) { + page = (void *)((uintptr_t)ph & pp->pr_alloc->pa_pagemask); + if (page != ph->ph_page && + (pp->pr_roflags & PR_PHINPAGE) != 0) { + if (label != NULL) + printf("%s: ", label); + printf("pool(%p:%s): page inconsistency: page %p;" + " at page head addr %p (p %p)\n", pp, + pp->pr_wchan, ph->ph_page, + ph, page); + return 1; + } } if ((pp->pr_roflags & PR_NOTOUCH) != 0) @@ -1839,15 +1857,16 @@ pool_chk_page(struct pool *pp, const cha if (label != NULL) printf("%s: ", label); printf("pool(%s): free list modified: magic=%x;" - " page %p; item ordinal %d;" - " addr %p (p %p)\n", + " page %p; item ordinal %d; addr %p\n", pp->pr_wchan, pi->pi_magic, ph->ph_page, - n, pi, page); + n, pi); panic("pool"); } #endif - page = - (caddr_t)((u_long)pi & pp->pr_alloc->pa_pagemask); + if ((pp->pr_roflags & PR_NOALIGN) != 0) { + continue; + } + page = (void *)((uintptr_t)pi & pp->pr_alloc->pa_pagemask); if (page == ph->ph_page) continue; @@ -1869,7 +1888,7 @@ pool_chk(struct pool *pp, const char *la struct pool_item_header *ph; int r = 0; - simple_lock(&pp->pr_slock); + mutex_enter(&pp->pr_lock); LIST_FOREACH(ph, &pp->pr_emptypages, ph_pagelist) { r = pool_chk_page(pp, label, ph); if (r) { @@ -1890,7 +1909,7 @@ pool_chk(struct pool *pp, const char *la } out: - simple_unlock(&pp->pr_slock); + mutex_exit(&pp->pr_lock); return (r); } @@ -1912,7 +1931,7 @@ pool_cache_init(struct pool_cache *pc, s LIST_INIT(&pc->pc_emptygroups); LIST_INIT(&pc->pc_fullgroups); LIST_INIT(&pc->pc_partgroups); - simple_lock_init(&pc->pc_slock); + mutex_init(&pc->pc_lock, MUTEX_DRIVER, pp->pr_ipl); pc->pc_pool = pp; @@ -1927,9 +1946,12 @@ pool_cache_init(struct pool_cache *pc, s pc->pc_nitems = 0; - simple_lock(&pp->pr_slock); - LIST_INSERT_HEAD(&pp->pr_cachelist, pc, pc_poollist); - simple_unlock(&pp->pr_slock); + if (__predict_true(!cold)) { + mutex_enter(&pp->pr_lock); + LIST_INSERT_HEAD(&pp->pr_cachelist, pc, pc_poollist); + mutex_exit(&pp->pr_lock); + } else + LIST_INSERT_HEAD(&pp->pr_cachelist, pc, pc_poollist); } /* @@ -1946,9 +1968,11 @@ pool_cache_destroy(struct pool_cache *pc pool_cache_invalidate(pc); /* ...and remove it from the pool's cache list. */ - simple_lock(&pp->pr_slock); + mutex_enter(&pp->pr_lock); LIST_REMOVE(pc, pc_poollist); - simple_unlock(&pp->pr_slock); + mutex_exit(&pp->pr_lock); + + mutex_destroy(&pc->pc_lock); } static inline void * @@ -1987,14 +2011,11 @@ static void pcg_grouplist_free(struct pool_cache_grouplist *pcgl) { struct pool_cache_group *pcg; - int s; - s = splvm(); while ((pcg = LIST_FIRST(pcgl)) != NULL) { LIST_REMOVE(pcg, pcg_list); pool_put(&pcgpool, pcg); } - splx(s); } /* @@ -2011,10 +2032,10 @@ pool_cache_get_paddr(struct pool_cache * #ifdef LOCKDEBUG if (flags & PR_WAITOK) - simple_lock_only_held(NULL, "pool_cache_get(PR_WAITOK)"); + ASSERT_SLEEPABLE(NULL, "pool_cache_get(PR_WAITOK)"); #endif - simple_lock(&pc->pc_slock); + mutex_enter(&pc->pc_lock); pcg = LIST_FIRST(&pc->pc_partgroups); if (pcg == NULL) { @@ -2033,7 +2054,7 @@ pool_cache_get_paddr(struct pool_cache * * when the object is freed back to the cache. */ pc->pc_misses++; - simple_unlock(&pc->pc_slock); + mutex_exit(&pc->pc_lock); object = pool_get(pc->pc_pool, flags); if (object != NULL && pc->pc_ctor != NULL) { if ((*pc->pc_ctor)(pc->pc_arg, object, flags) != 0) { @@ -2041,6 +2062,8 @@ pool_cache_get_paddr(struct pool_cache * return (NULL); } } + KASSERT((((vaddr_t)object + pc->pc_pool->pr_itemoffset) & + (pc->pc_pool->pr_align - 1)) == 0); if (object != NULL && pap != NULL) { #ifdef POOL_VTOPHYS *pap = POOL_VTOPHYS(object); @@ -2048,6 +2071,8 @@ pool_cache_get_paddr(struct pool_cache * *pap = POOL_PADDR_INVALID; #endif } + + FREECHECK_OUT(&pc->pc_freecheck, object); return (object); } @@ -2059,8 +2084,11 @@ pool_cache_get_paddr(struct pool_cache * LIST_REMOVE(pcg, pcg_list); LIST_INSERT_HEAD(&pc->pc_emptygroups, pcg, pcg_list); } - simple_unlock(&pc->pc_slock); + mutex_exit(&pc->pc_lock); + KASSERT((((vaddr_t)object + pc->pc_pool->pr_itemoffset) & + (pc->pc_pool->pr_align - 1)) == 0); + FREECHECK_OUT(&pc->pc_freecheck, object); return (object); } @@ -2074,13 +2102,14 @@ void pool_cache_put_paddr(struct pool_cache *pc, void *object, paddr_t pa) { struct pool_cache_group *pcg; - int s; + + FREECHECK_IN(&pc->pc_freecheck, object); if (__predict_false((pc->pc_pool->pr_flags & PR_WANTED) != 0)) { goto destruct; } - simple_lock(&pc->pc_slock); + mutex_enter(&pc->pc_lock); pcg = LIST_FIRST(&pc->pc_partgroups); if (pcg == NULL) { @@ -2096,10 +2125,8 @@ pool_cache_put_paddr(struct pool_cache * * No empty groups to free the object to. Attempt to * allocate one. */ - simple_unlock(&pc->pc_slock); - s = splvm(); + mutex_exit(&pc->pc_lock); pcg = pool_get(&pcgpool, PR_NOWAIT); - splx(s); if (pcg == NULL) { destruct: @@ -2111,7 +2138,7 @@ destruct: return; } memset(pcg, 0, sizeof(*pcg)); - simple_lock(&pc->pc_slock); + mutex_enter(&pc->pc_lock); pc->pc_ngroups++; LIST_INSERT_HEAD(&pc->pc_partgroups, pcg, pcg_list); } @@ -2123,7 +2150,7 @@ destruct: LIST_REMOVE(pcg, pcg_list); LIST_INSERT_HEAD(&pc->pc_fullgroups, pcg, pcg_list); } - simple_unlock(&pc->pc_slock); + mutex_exit(&pc->pc_lock); } /* @@ -2169,8 +2196,8 @@ pool_do_cache_invalidate(struct pool_cac struct pool_cache_grouplist *pcgl) { - LOCK_ASSERT(simple_lock_held(&pc->pc_slock)); - LOCK_ASSERT(simple_lock_held(&pc->pc_pool->pr_slock)); + KASSERT(mutex_owned(&pc->pc_lock)); + KASSERT(mutex_owned(&pc->pc_pool->pr_lock)); pool_do_cache_invalidate_grouplist(&pc->pc_fullgroups, pc, pq, pcgl); pool_do_cache_invalidate_grouplist(&pc->pc_partgroups, pc, pq, pcgl); @@ -2195,13 +2222,13 @@ pool_cache_invalidate(struct pool_cache LIST_INIT(&pq); LIST_INIT(&pcgl); - simple_lock(&pc->pc_slock); - simple_lock(&pc->pc_pool->pr_slock); + mutex_enter(&pc->pc_lock); + mutex_enter(&pc->pc_pool->pr_lock); pool_do_cache_invalidate(pc, &pq, &pcgl); - simple_unlock(&pc->pc_pool->pr_slock); - simple_unlock(&pc->pc_slock); + mutex_exit(&pc->pc_pool->pr_lock); + mutex_exit(&pc->pc_lock); pr_pagelist_free(pc->pc_pool, &pq); pcg_grouplist_free(&pcgl); @@ -2223,12 +2250,12 @@ pool_cache_reclaim(struct pool_cache *pc * to use trylock. If we can't lock the pool_cache, it's not really * a big deal here. */ - if (simple_lock_try(&pc->pc_slock) == 0) + if (mutex_tryenter(&pc->pc_lock) == 0) return; pool_do_cache_invalidate(pc, pq, pcgl); - simple_unlock(&pc->pc_slock); + mutex_exit(&pc->pc_lock); } /* @@ -2298,8 +2325,6 @@ pool_allocator_alloc(struct pool *pp, in struct pool_allocator *pa = pp->pr_alloc; void *res; - LOCK_ASSERT(!simple_lock_held(&pp->pr_slock)); - res = (*pa->pa_alloc)(pp, flags); if (res == NULL && (flags & PR_WAITOK) == 0) { /* @@ -2320,15 +2345,13 @@ pool_allocator_free(struct pool *pp, voi { struct pool_allocator *pa = pp->pr_alloc; - LOCK_ASSERT(!simple_lock_held(&pp->pr_slock)); - (*pa->pa_free)(pp, v); } void * pool_page_alloc(struct pool *pp, int flags) { - boolean_t waitok = (flags & PR_WAITOK) ? TRUE : FALSE; + bool waitok = (flags & PR_WAITOK) ? true : false; return ((void *) uvm_km_alloc_poolpage_cache(kmem_map, waitok)); } @@ -2343,7 +2366,7 @@ pool_page_free(struct pool *pp, void *v) static void * pool_page_alloc_meta(struct pool *pp, int flags) { - boolean_t waitok = (flags & PR_WAITOK) ? TRUE : FALSE; + bool waitok = (flags & PR_WAITOK) ? true : false; return ((void *) uvm_km_alloc_poolpage(kmem_map, waitok)); } @@ -2360,21 +2383,13 @@ pool_page_free_meta(struct pool *pp, voi void * pool_subpage_alloc(struct pool *pp, int flags) { - void *v; - int s; - s = splvm(); - v = pool_get(&psppool, flags); - splx(s); - return v; + return pool_get(&psppool, flags); } void pool_subpage_free(struct pool *pp, void *v) { - int s; - s = splvm(); pool_put(&psppool, v); - splx(s); } /* We don't provide a real nointr allocator. Maybe later. */ @@ -2395,7 +2410,7 @@ pool_subpage_free_nointr(struct pool *pp void * pool_page_alloc_nointr(struct pool *pp, int flags) { - boolean_t waitok = (flags & PR_WAITOK) ? TRUE : FALSE; + bool waitok = (flags & PR_WAITOK) ? true : false; return ((void *) uvm_km_alloc_poolpage_cache(kernel_map, waitok)); }