Please note that diffs are not public domain; they are subject to the copyright notices on the relevant files. =================================================================== RCS file: /ftp/cvs/cvsroot/src/sys/kern/subr_pool.c,v rcsdiff: /ftp/cvs/cvsroot/src/sys/kern/subr_pool.c,v: warning: Unknown phrases like `commitid ...;' are present. retrieving revision 1.122.2.3 retrieving revision 1.129.12.1 diff -u -p -r1.122.2.3 -r1.129.12.1 --- src/sys/kern/subr_pool.c 2007/01/19 20:49:54 1.122.2.3 +++ src/sys/kern/subr_pool.c 2007/09/03 16:48:49 1.129.12.1 @@ -1,4 +1,4 @@ -/* $NetBSD: subr_pool.c,v 1.122.2.3 2007/01/19 20:49:54 ad Exp $ */ +/* $NetBSD: subr_pool.c,v 1.129.12.1 2007/09/03 16:48:49 jmcneill Exp $ */ /*- * Copyright (c) 1997, 1999, 2000, 2002 The NetBSD Foundation, Inc. @@ -38,7 +38,7 @@ */ #include -__KERNEL_RCSID(0, "$NetBSD: subr_pool.c,v 1.122.2.3 2007/01/19 20:49:54 ad Exp $"); +__KERNEL_RCSID(0, "$NetBSD: subr_pool.c,v 1.129.12.1 2007/09/03 16:48:49 jmcneill Exp $"); #include "opt_pool.h" #include "opt_poollog.h" @@ -112,7 +112,7 @@ struct pool_item_header { ph_pagelist; /* pool page list */ SPLAY_ENTRY(pool_item_header) ph_node; /* Off-page page headers */ - caddr_t ph_page; /* this page's address */ + void * ph_page; /* this page's address */ struct timeval ph_time; /* last referenced */ union { /* !PR_NOTOUCH */ @@ -184,7 +184,7 @@ static void pool_cache_reclaim(struct po static void pcg_grouplist_free(struct pool_cache_grouplist *); static int pool_catchup(struct pool *); -static void pool_prime_page(struct pool *, caddr_t, +static void pool_prime_page(struct pool *, void *, struct pool_item_header *); static void pool_update_curpage(struct pool *); @@ -326,7 +326,7 @@ pr_item_notouch_index(const struct pool int idx; KASSERT(pp->pr_roflags & PR_NOTOUCH); - idx = (cp - ph->ph_page - ph->ph_off) / pp->pr_size; + idx = (cp - (char *)ph->ph_page - ph->ph_off) / pp->pr_size; KASSERT(idx < pp->pr_itemsperpage); return idx; } @@ -359,7 +359,7 @@ pr_item_notouch_get(const struct pool *p ph->ph_firstfree = freelist[idx]; freelist[idx] = PR_INDEX_USED; - return ph->ph_page + ph->ph_off + idx * pp->pr_size; + return (char *)ph->ph_page + ph->ph_off + idx * pp->pr_size; } static inline int @@ -391,7 +391,7 @@ pr_find_pagehead(struct pool *pp, void * struct pool_item_header *ph, tmp; if ((pp->pr_roflags & PR_NOALIGN) != 0) { - tmp.ph_page = (caddr_t)(uintptr_t)v; + tmp.ph_page = (void *)(uintptr_t)v; ph = SPLAY_FIND(phtree, &pp->pr_phtree, &tmp); if (ph == NULL) { ph = SPLAY_ROOT(&pp->pr_phtree); @@ -401,11 +401,11 @@ pr_find_pagehead(struct pool *pp, void * KASSERT(ph == NULL || phtree_compare(&tmp, ph) < 0); } } else { - caddr_t page = - (caddr_t)((uintptr_t)v & pp->pr_alloc->pa_pagemask); + void *page = + (void *)((uintptr_t)v & pp->pr_alloc->pa_pagemask); if ((pp->pr_roflags & PR_PHINPAGE) != 0) { - ph = (void *)(page + pp->pr_phoffset); + ph = (struct pool_item_header *)((char *)page + pp->pr_phoffset); } else { tmp.ph_page = page; ph = SPLAY_FIND(phtree, &pp->pr_phtree, &tmp); @@ -413,8 +413,8 @@ pr_find_pagehead(struct pool *pp, void * } KASSERT(ph == NULL || ((pp->pr_roflags & PR_PHINPAGE) != 0) || - (ph->ph_page <= (char *)v && - (char *)v < ph->ph_page + pp->pr_alloc->pa_pagesz)); + ((char *)ph->ph_page <= (char *)v && + (char *)v < (char *)ph->ph_page + pp->pr_alloc->pa_pagesz)); return ph; } @@ -474,14 +474,14 @@ pr_rmpage(struct pool *pp, struct pool_i pool_update_curpage(pp); } -static boolean_t +static bool pa_starved_p(struct pool_allocator *pa) { if (pa->pa_backingmap != NULL) { return vm_map_starved_p(pa->pa_backingmap); } - return FALSE; + return false; } static int @@ -560,7 +560,7 @@ pool_subsystem_init(void) __link_set_foreach(pi, pools) pool_init((*pi)->pp, (*pi)->size, (*pi)->align, (*pi)->align_offset, (*pi)->flags, (*pi)->wchan, - (*pi)->palloc); + (*pi)->palloc, (*pi)->ipl); while ((pa = SLIST_FIRST(&pa_deferinitq)) != NULL) { KASSERT(pa->pa_backingmapptr != NULL); @@ -578,7 +578,7 @@ pool_subsystem_init(void) */ void pool_init(struct pool *pp, size_t size, u_int align, u_int ioff, int flags, - const char *wchan, struct pool_allocator *palloc) + const char *wchan, struct pool_allocator *palloc, int ipl) { #ifdef DEBUG struct pool *pp1; @@ -792,14 +792,14 @@ pool_init(struct pool *pp, size_t size, + nelem * sizeof(pool_item_freelist_t); } pool_init(&phpool[idx], sz, 0, 0, 0, - phpool_names[idx], &pool_allocator_meta); + phpool_names[idx], &pool_allocator_meta, IPL_VM); } #ifdef POOL_SUBPAGE pool_init(&psppool, POOL_SUBPAGE, POOL_SUBPAGE, 0, - PR_RECURSIVE, "psppool", &pool_allocator_meta); + PR_RECURSIVE, "psppool", &pool_allocator_meta, IPL_VM); #endif pool_init(&pcgpool, sizeof(struct pool_cache_group), 0, 0, - 0, "pcgpool", &pool_allocator_meta); + 0, "pcgpool", &pool_allocator_meta, IPL_VM); } /* Insert into the list of all pools. */ @@ -887,7 +887,7 @@ pool_set_drain_hook(struct pool *pp, voi } static struct pool_item_header * -pool_alloc_item_header(struct pool *pp, caddr_t storage, int flags) +pool_alloc_item_header(struct pool *pp, void *storage, int flags) { struct pool_item_header *ph; int s; @@ -895,7 +895,7 @@ pool_alloc_item_header(struct pool *pp, LOCK_ASSERT(simple_lock_held(&pp->pr_slock) == 0); if ((pp->pr_roflags & PR_PHINPAGE) != 0) - ph = (struct pool_item_header *) (storage + pp->pr_phoffset); + ph = (struct pool_item_header *) ((char *)storage + pp->pr_phoffset); else { s = splvm(); ph = pool_get(pp->pr_phpool, flags); @@ -1170,7 +1170,7 @@ pool_do_put(struct pool *pp, void *v, st /* * Check if we're freeing a locked simple lock. */ - simple_lock_freecheck((caddr_t)pi, ((caddr_t)pi) + pp->pr_size); + simple_lock_freecheck(pi, (char *)pi + pp->pr_size); #endif /* @@ -1208,7 +1208,7 @@ pool_do_put(struct pool *pp, void *v, st pp->pr_flags &= ~PR_WANTED; if (ph->ph_nmissing == 0) pp->pr_nidle++; - wakeup((caddr_t)pp); + wakeup((void *)pp); return; } @@ -1370,10 +1370,10 @@ pool_prime(struct pool *pp, int n) * Note, we must be called with the pool descriptor LOCKED. */ static void -pool_prime_page(struct pool *pp, caddr_t storage, struct pool_item_header *ph) +pool_prime_page(struct pool *pp, void *storage, struct pool_item_header *ph) { struct pool_item *pi; - caddr_t cp = storage; + void *cp = storage; const unsigned int align = pp->pr_align; const unsigned int ioff = pp->pr_itemoffset; int n; @@ -1402,7 +1402,7 @@ pool_prime_page(struct pool *pp, caddr_t /* * Color this page. */ - cp = (caddr_t)(cp + pp->pr_curcolor); + cp = (char *)cp + pp->pr_curcolor; if ((pp->pr_curcolor += align) > pp->pr_maxcolor) pp->pr_curcolor = 0; @@ -1410,7 +1410,7 @@ pool_prime_page(struct pool *pp, caddr_t * Adjust storage to apply aligment to `pr_itemoffset' in each item. */ if (ioff != 0) - cp = (caddr_t)(cp + (align - ioff)); + cp = (char *)cp + align - ioff; KASSERT((((vaddr_t)cp + ioff) & (align - 1)) == 0); @@ -1424,7 +1424,7 @@ pool_prime_page(struct pool *pp, caddr_t pool_item_freelist_t *freelist = PR_FREELIST(ph); int i; - ph->ph_off = cp - storage; + ph->ph_off = (char *)cp - (char *)storage; ph->ph_firstfree = 0; for (i = 0; i < n - 1; i++) freelist[i] = i + 1; @@ -1440,7 +1440,7 @@ pool_prime_page(struct pool *pp, caddr_t #ifdef DIAGNOSTIC pi->pi_magic = PI_MAGIC; #endif - cp = (caddr_t)(cp + pp->pr_size); + cp = (char *)cp + pp->pr_size; KASSERT((((vaddr_t)cp + ioff) & (align - 1)) == 0); } @@ -1624,6 +1624,8 @@ pool_reclaim(struct pool *pp) * Drain pools, one at a time. * * Note, we must never be called from an interrupt context. + * + * XXX Pool can disappear while draining. */ void pool_drain(void *arg) @@ -1843,11 +1845,11 @@ static int pool_chk_page(struct pool *pp, const char *label, struct pool_item_header *ph) { struct pool_item *pi; - caddr_t page; + void *page; int n; if ((pp->pr_roflags & PR_NOALIGN) == 0) { - page = (caddr_t)((uintptr_t)ph & pp->pr_alloc->pa_pagemask); + page = (void *)((uintptr_t)ph & pp->pr_alloc->pa_pagemask); if (page != ph->ph_page && (pp->pr_roflags & PR_PHINPAGE) != 0) { if (label != NULL) @@ -1881,7 +1883,7 @@ pool_chk_page(struct pool *pp, const cha if ((pp->pr_roflags & PR_NOALIGN) != 0) { continue; } - page = (caddr_t)((uintptr_t)pi & pp->pr_alloc->pa_pagemask); + page = (void *)((uintptr_t)pi & pp->pr_alloc->pa_pagemask); if (page == ph->ph_page) continue; @@ -2184,26 +2186,43 @@ pool_cache_destruct_object(struct pool_c pool_put(pc->pc_pool, object); } +/* + * pool_do_cache_invalidate_grouplist: + * + * Invalidate a single grouplist and destruct all objects. + * XXX This is too expensive. We should swap the list then + * unlock. + */ static void pool_do_cache_invalidate_grouplist(struct pool_cache_grouplist *pcgsl, struct pool_cache *pc, struct pool_pagelist *pq, struct pool_cache_grouplist *pcgdl) { - struct pool_cache_group *pcg, *npcg; + struct pool_cache_group *pcg; void *object; - for (pcg = LIST_FIRST(pcgsl); pcg != NULL; pcg = npcg) { - npcg = LIST_NEXT(pcg, pcg_list); + LOCK_ASSERT(simple_lock_held(&pc->pc_slock)); + LOCK_ASSERT(simple_lock_held(&pc->pc_pool->pr_slock)); + + while ((pcg = LIST_FIRST(pcgsl)) != NULL) { + pc->pc_ngroups--; + LIST_REMOVE(pcg, pcg_list); + LIST_INSERT_HEAD(pcgdl, pcg, pcg_list); + pc->pc_nitems -= pcg->pcg_avail; + simple_unlock(&pc->pc_pool->pr_slock); + simple_unlock(&pc->pc_slock); + while (pcg->pcg_avail != 0) { - pc->pc_nitems--; object = pcg_get(pcg, NULL); if (pc->pc_dtor != NULL) (*pc->pc_dtor)(pc->pc_arg, object); + simple_lock(&pc->pc_pool->pr_slock); pool_do_put(pc->pc_pool, object, pq); + simple_unlock(&pc->pc_pool->pr_slock); } - pc->pc_ngroups--; - LIST_REMOVE(pcg, pcg_list); - LIST_INSERT_HEAD(pcgdl, pcg, pcg_list); + + simple_lock(&pc->pc_slock); + simple_lock(&pc->pc_pool->pr_slock); } } @@ -2371,7 +2390,7 @@ pool_allocator_free(struct pool *pp, voi void * pool_page_alloc(struct pool *pp, int flags) { - boolean_t waitok = (flags & PR_WAITOK) ? TRUE : FALSE; + bool waitok = (flags & PR_WAITOK) ? true : false; return ((void *) uvm_km_alloc_poolpage_cache(kmem_map, waitok)); } @@ -2386,7 +2405,7 @@ pool_page_free(struct pool *pp, void *v) static void * pool_page_alloc_meta(struct pool *pp, int flags) { - boolean_t waitok = (flags & PR_WAITOK) ? TRUE : FALSE; + bool waitok = (flags & PR_WAITOK) ? true : false; return ((void *) uvm_km_alloc_poolpage(kmem_map, waitok)); } @@ -2438,7 +2457,7 @@ pool_subpage_free_nointr(struct pool *pp void * pool_page_alloc_nointr(struct pool *pp, int flags) { - boolean_t waitok = (flags & PR_WAITOK) ? TRUE : FALSE; + bool waitok = (flags & PR_WAITOK) ? true : false; return ((void *) uvm_km_alloc_poolpage_cache(kernel_map, waitok)); }