Please note that diffs are not public domain; they are subject to the copyright notices on the relevant files. =================================================================== RCS file: /ftp/cvs/cvsroot/src/sys/kern/subr_pool.c,v rcsdiff: /ftp/cvs/cvsroot/src/sys/kern/subr_pool.c,v: warning: Unknown phrases like `commitid ...;' are present. retrieving revision 1.45 retrieving revision 1.50 diff -u -p -r1.45 -r1.50 --- src/sys/kern/subr_pool.c 2000/12/07 20:16:56 1.45 +++ src/sys/kern/subr_pool.c 2001/01/29 02:38:02 1.50 @@ -1,4 +1,4 @@ -/* $NetBSD: subr_pool.c,v 1.45 2000/12/07 20:16:56 thorpej Exp $ */ +/* $NetBSD: subr_pool.c,v 1.50 2001/01/29 02:38:02 enami Exp $ */ /*- * Copyright (c) 1997, 1999, 2000 The NetBSD Foundation, Inc. @@ -145,7 +145,7 @@ struct pool_cache_group { static void pool_cache_reclaim(struct pool_cache *); static int pool_catchup(struct pool *); -static void pool_prime_page(struct pool *, caddr_t); +static int pool_prime_page(struct pool *, caddr_t, int); static void *pool_page_alloc(unsigned long, int, int); static void pool_page_free(void *, unsigned long, int); @@ -714,8 +714,18 @@ _pool_get(struct pool *pp, int flags, co } /* We have more memory; add it to the pool */ + if (pool_prime_page(pp, v, flags & PR_WAITOK) != 0) { + /* + * Probably, we don't allowed to wait and + * couldn't allocate a page header. + */ + (*pp->pr_free)(v, pp->pr_pagesz, pp->pr_mtype); + pp->pr_nfail++; + pr_leave(pp); + simple_unlock(&pp->pr_slock); + return (NULL); + } pp->pr_npagealloc++; - pool_prime_page(pp, v); /* Start the allocation process over. */ goto startover; @@ -962,7 +972,7 @@ int pool_prime(struct pool *pp, int n, caddr_t storage) { caddr_t cp; - int newnitems, newpages; + int error, newnitems, newpages; #ifdef DIAGNOSTIC if (__predict_false(storage && !(pp->pr_roflags & PR_STATIC))) @@ -992,8 +1002,14 @@ pool_prime(struct pool *pp, int n, caddr return (ENOMEM); } + if ((error = pool_prime_page(pp, cp, PR_NOWAIT)) != 0) { + if ((pp->pr_roflags & PR_STATIC) == 0) + (*pp->pr_free)(cp, pp->pr_pagesz, + pp->pr_mtype); + simple_unlock(&pp->pr_slock); + return (error); + } pp->pr_npagealloc++; - pool_prime_page(pp, cp); pp->pr_minpages++; } @@ -1011,8 +1027,8 @@ pool_prime(struct pool *pp, int n, caddr * * Note, we must be called with the pool descriptor LOCKED. */ -static void -pool_prime_page(struct pool *pp, caddr_t storage) +static int +pool_prime_page(struct pool *pp, caddr_t storage, int flags) { struct pool_item *pi; struct pool_item_header *ph; @@ -1028,8 +1044,10 @@ pool_prime_page(struct pool *pp, caddr_t ph = (struct pool_item_header *)(cp + pp->pr_phoffset); } else { s = splhigh(); - ph = pool_get(&phpool, PR_URGENT); + ph = pool_get(&phpool, flags); splx(s); + if (ph == NULL) + return (ENOMEM); LIST_INSERT_HEAD(&pp->pr_hashtab[PR_HASH_INDEX(pp, cp)], ph, ph_hashlist); } @@ -1083,6 +1101,8 @@ pool_prime_page(struct pool *pp, caddr_t if (++pp->pr_npages > pp->pr_hiwat) pp->pr_hiwat = pp->pr_npages; + + return (0); } /* @@ -1129,8 +1149,11 @@ pool_catchup(struct pool *pp) error = ENOMEM; break; } + if ((error = pool_prime_page(pp, cp, PR_NOWAIT)) != 0) { + (*pp->pr_free)(cp, pp->pr_pagesz, pp->pr_mtype); + break; + } pp->pr_npagealloc++; - pool_prime_page(pp, cp); } return (error); @@ -1306,7 +1329,7 @@ pool_drain(void *arg) struct pool *pp; int s; - s = splimp(); + s = splvm(); simple_lock(&pool_head_slock); if (drainpp == NULL && (drainpp = TAILQ_FIRST(&pool_head)) == NULL) @@ -1331,7 +1354,7 @@ pool_print(struct pool *pp, const char * { int s; - s = splimp(); + s = splvm(); if (simple_lock_try(&pp->pr_slock) == 0) { printf("pool %s is locked; try again later\n", pp->pr_wchan); @@ -1455,6 +1478,8 @@ pool_print1(struct pool *pp, const char pc = TAILQ_NEXT(pc, pc_poollist)) { (*pr)("\tcache %p: allocfrom %p freeto %p\n", pc, pc->pc_allocfrom, pc->pc_freeto); + (*pr)("\t hits %lu misses %lu ngroups %lu nitems %lu\n", + pc->pc_hits, pc->pc_misses, pc->pc_ngroups, pc->pc_nitems); for (pcg = TAILQ_FIRST(&pc->pc_grouplist); pcg != NULL; pcg = TAILQ_NEXT(pcg, pcg_list)) { (*pr)("\t\tgroup %p: avail %d\n", pcg, pcg->pcg_avail); @@ -1557,6 +1582,13 @@ pool_cache_init(struct pool_cache *pc, s pc->pc_dtor = dtor; pc->pc_arg = arg; + pc->pc_hits = 0; + pc->pc_misses = 0; + + pc->pc_ngroups = 0; + + pc->pc_nitems = 0; + simple_lock(&pp->pr_slock); TAILQ_INSERT_TAIL(&pp->pr_cachelist, pc, pc_poollist); simple_unlock(&pp->pr_slock); @@ -1638,6 +1670,7 @@ pool_cache_get(struct pool_cache *pc, in * the caller. We will allocate a group, if necessary, * when the object is freed back to the cache. */ + pc->pc_misses++; simple_unlock(&pc->pc_slock); object = pool_get(pc->pc_pool, flags); if (object != NULL && pc->pc_ctor != NULL) { @@ -1650,6 +1683,8 @@ pool_cache_get(struct pool_cache *pc, in } have_group: + pc->pc_hits++; + pc->pc_nitems--; object = pcg_get(pcg); if (pcg->pcg_avail == 0) @@ -1683,19 +1718,20 @@ pool_cache_put(struct pool_cache *pc, vo /* * No empty groups to free the object to. Attempt to - * allocate one. We don't unlock the cache here, since - * we never block. + * allocate one. */ + simple_unlock(&pc->pc_slock); pcg = pool_get(&pcgpool, PR_NOWAIT); if (pcg != NULL) { memset(pcg, 0, sizeof(*pcg)); + simple_lock(&pc->pc_slock); + pc->pc_ngroups++; TAILQ_INSERT_TAIL(&pc->pc_grouplist, pcg, pcg_list); - pc->pc_freeto = pcg; + if (pc->pc_freeto == NULL) + pc->pc_freeto = pcg; goto have_group; } - simple_unlock(&pc->pc_slock); - /* * Unable to allocate a cache group; destruct the object * and free it back to the pool. @@ -1707,6 +1743,7 @@ pool_cache_put(struct pool_cache *pc, vo } have_group: + pc->pc_nitems++; pcg_put(pcg, object); if (pcg->pcg_avail == PCG_NOBJECTS) @@ -1732,6 +1769,7 @@ pool_cache_do_invalidate(struct pool_cac pcg = npcg) { npcg = TAILQ_NEXT(pcg, pcg_list); while (pcg->pcg_avail != 0) { + pc->pc_nitems--; object = pcg_get(pcg); if (pcg->pcg_avail == 0 && pc->pc_allocfrom == pcg) pc->pc_allocfrom = NULL; @@ -1740,7 +1778,10 @@ pool_cache_do_invalidate(struct pool_cac (*putit)(pc->pc_pool, object, __FILE__, __LINE__); } if (free_groups) { + pc->pc_ngroups--; TAILQ_REMOVE(&pc->pc_grouplist, pcg, pcg_list); + if (pc->pc_freeto == pcg) + pc->pc_freeto = NULL; pool_put(&pcgpool, pcg); } } @@ -1770,13 +1811,7 @@ static void pool_cache_reclaim(struct pool_cache *pc) { - /* - * We're locking in the opposite order (pool already - * locked in pool_reclaim()), so use a try-lock instead. - */ - - if (simple_lock_try(&pc->pc_slock) == 0) - return; + simple_lock(&pc->pc_slock); pool_cache_do_invalidate(pc, 1, pool_do_put); simple_unlock(&pc->pc_slock); }