version 1.46, 2000/12/07 21:30:07 |
version 1.54, 2001/05/10 02:19:32 |
Line 103 struct pool_item { |
|
Line 103 struct pool_item { |
|
#define PR_HASH_INDEX(pp,addr) \ |
#define PR_HASH_INDEX(pp,addr) \ |
(((u_long)(addr) >> (pp)->pr_pageshift) & (PR_HASHTABSIZE - 1)) |
(((u_long)(addr) >> (pp)->pr_pageshift) & (PR_HASHTABSIZE - 1)) |
|
|
|
#define POOL_NEEDS_CATCHUP(pp) \ |
|
((pp)->pr_nitems < (pp)->pr_minitems) |
|
|
/* |
/* |
* Pool cache management. |
* Pool cache management. |
* |
* |
Line 145 struct pool_cache_group { |
|
Line 148 struct pool_cache_group { |
|
static void pool_cache_reclaim(struct pool_cache *); |
static void pool_cache_reclaim(struct pool_cache *); |
|
|
static int pool_catchup(struct pool *); |
static int pool_catchup(struct pool *); |
static void pool_prime_page(struct pool *, caddr_t); |
static int pool_prime_page(struct pool *, caddr_t, int); |
static void *pool_page_alloc(unsigned long, int, int); |
static void *pool_page_alloc(unsigned long, int, int); |
static void pool_page_free(void *, unsigned long, int); |
static void pool_page_free(void *, unsigned long, int); |
|
|
Line 153 static void pool_print1(struct pool *, c |
|
Line 156 static void pool_print1(struct pool *, c |
|
void (*)(const char *, ...)); |
void (*)(const char *, ...)); |
|
|
/* |
/* |
* Pool log entry. An array of these is allocated in pool_create(). |
* Pool log entry. An array of these is allocated in pool_init(). |
*/ |
*/ |
struct pool_log { |
struct pool_log { |
const char *pl_file; |
const char *pl_file; |
Line 344 pr_rmpage(struct pool *pp, struct pool_i |
|
Line 347 pr_rmpage(struct pool *pp, struct pool_i |
|
} |
} |
|
|
/* |
/* |
* Allocate and initialize a pool. |
|
*/ |
|
struct pool * |
|
pool_create(size_t size, u_int align, u_int ioff, int nitems, |
|
const char *wchan, size_t pagesz, |
|
void *(*alloc)(unsigned long, int, int), |
|
void (*release)(void *, unsigned long, int), |
|
int mtype) |
|
{ |
|
struct pool *pp; |
|
int flags; |
|
|
|
pp = (struct pool *)malloc(sizeof(*pp), M_POOL, M_NOWAIT); |
|
if (pp == NULL) |
|
return (NULL); |
|
|
|
flags = PR_FREEHEADER; |
|
pool_init(pp, size, align, ioff, flags, wchan, pagesz, |
|
alloc, release, mtype); |
|
|
|
if (nitems != 0) { |
|
if (pool_prime(pp, nitems, NULL) != 0) { |
|
pool_destroy(pp); |
|
return (NULL); |
|
} |
|
} |
|
|
|
return (pp); |
|
} |
|
|
|
/* |
|
* Initialize the given pool resource structure. |
* Initialize the given pool resource structure. |
* |
* |
* We export this routine to allow other kernel parts to declare |
* We export this routine to allow other kernel parts to declare |
Line 714 _pool_get(struct pool *pp, int flags, co |
|
Line 686 _pool_get(struct pool *pp, int flags, co |
|
} |
} |
|
|
/* We have more memory; add it to the pool */ |
/* We have more memory; add it to the pool */ |
|
if (pool_prime_page(pp, v, flags & PR_WAITOK) != 0) { |
|
/* |
|
* Probably, we don't allowed to wait and |
|
* couldn't allocate a page header. |
|
*/ |
|
(*pp->pr_free)(v, pp->pr_pagesz, pp->pr_mtype); |
|
pp->pr_nfail++; |
|
pr_leave(pp); |
|
simple_unlock(&pp->pr_slock); |
|
return (NULL); |
|
} |
pp->pr_npagealloc++; |
pp->pr_npagealloc++; |
pool_prime_page(pp, v); |
|
|
|
/* Start the allocation process over. */ |
/* Start the allocation process over. */ |
goto startover; |
goto startover; |
Line 796 _pool_get(struct pool *pp, int flags, co |
|
Line 778 _pool_get(struct pool *pp, int flags, co |
|
* If we have a low water mark and we are now below that low |
* If we have a low water mark and we are now below that low |
* water mark, add more items to the pool. |
* water mark, add more items to the pool. |
*/ |
*/ |
if (pp->pr_nitems < pp->pr_minitems && pool_catchup(pp) != 0) { |
if (POOL_NEEDS_CATCHUP(pp) && pool_catchup(pp) != 0) { |
/* |
/* |
* XXX: Should we log a warning? Should we set up a timeout |
* XXX: Should we log a warning? Should we set up a timeout |
* to try again in a second or so? The latter could break |
* to try again in a second or so? The latter could break |
Line 956 _pool_put(struct pool *pp, void *v, cons |
|
Line 938 _pool_put(struct pool *pp, void *v, cons |
|
} |
} |
|
|
/* |
/* |
* Add N items to the pool. |
|
*/ |
|
int |
|
pool_prime(struct pool *pp, int n, caddr_t storage) |
|
{ |
|
caddr_t cp; |
|
int newnitems, newpages; |
|
|
|
#ifdef DIAGNOSTIC |
|
if (__predict_false(storage && !(pp->pr_roflags & PR_STATIC))) |
|
panic("pool_prime: static"); |
|
/* !storage && static caught below */ |
|
#endif |
|
|
|
simple_lock(&pp->pr_slock); |
|
|
|
newnitems = pp->pr_minitems + n; |
|
newpages = |
|
roundup(newnitems, pp->pr_itemsperpage) / pp->pr_itemsperpage |
|
- pp->pr_minpages; |
|
|
|
while (newpages-- > 0) { |
|
if (pp->pr_roflags & PR_STATIC) { |
|
cp = storage; |
|
storage += pp->pr_pagesz; |
|
} else { |
|
simple_unlock(&pp->pr_slock); |
|
cp = (*pp->pr_alloc)(pp->pr_pagesz, 0, pp->pr_mtype); |
|
simple_lock(&pp->pr_slock); |
|
} |
|
|
|
if (cp == NULL) { |
|
simple_unlock(&pp->pr_slock); |
|
return (ENOMEM); |
|
} |
|
|
|
pp->pr_npagealloc++; |
|
pool_prime_page(pp, cp); |
|
pp->pr_minpages++; |
|
} |
|
|
|
pp->pr_minitems = newnitems; |
|
|
|
if (pp->pr_minpages >= pp->pr_maxpages) |
|
pp->pr_maxpages = pp->pr_minpages + 1; /* XXX */ |
|
|
|
simple_unlock(&pp->pr_slock); |
|
return (0); |
|
} |
|
|
|
/* |
|
* Add a page worth of items to the pool. |
* Add a page worth of items to the pool. |
* |
* |
* Note, we must be called with the pool descriptor LOCKED. |
* Note, we must be called with the pool descriptor LOCKED. |
*/ |
*/ |
static void |
static int |
pool_prime_page(struct pool *pp, caddr_t storage) |
pool_prime_page(struct pool *pp, caddr_t storage, int flags) |
{ |
{ |
struct pool_item *pi; |
struct pool_item *pi; |
struct pool_item_header *ph; |
struct pool_item_header *ph; |
Line 1028 pool_prime_page(struct pool *pp, caddr_t |
|
Line 959 pool_prime_page(struct pool *pp, caddr_t |
|
ph = (struct pool_item_header *)(cp + pp->pr_phoffset); |
ph = (struct pool_item_header *)(cp + pp->pr_phoffset); |
} else { |
} else { |
s = splhigh(); |
s = splhigh(); |
ph = pool_get(&phpool, PR_URGENT); |
ph = pool_get(&phpool, flags); |
splx(s); |
splx(s); |
|
if (ph == NULL) |
|
return (ENOMEM); |
LIST_INSERT_HEAD(&pp->pr_hashtab[PR_HASH_INDEX(pp, cp)], |
LIST_INSERT_HEAD(&pp->pr_hashtab[PR_HASH_INDEX(pp, cp)], |
ph, ph_hashlist); |
ph, ph_hashlist); |
} |
} |
Line 1083 pool_prime_page(struct pool *pp, caddr_t |
|
Line 1016 pool_prime_page(struct pool *pp, caddr_t |
|
|
|
if (++pp->pr_npages > pp->pr_hiwat) |
if (++pp->pr_npages > pp->pr_hiwat) |
pp->pr_hiwat = pp->pr_npages; |
pp->pr_hiwat = pp->pr_npages; |
|
|
|
return (0); |
} |
} |
|
|
/* |
/* |
* Like pool_prime(), except this is used by pool_get() when nitems |
* Used by pool_get() when nitems drops below the low water mark. This |
* drops below the low water mark. This is used to catch up nitmes |
* is used to catch up nitmes with the low water mark. |
* with the low water mark. |
|
* |
* |
* Note 1, we never wait for memory here, we let the caller decide what to do. |
* Note 1, we never wait for memory here, we let the caller decide what to do. |
* |
* |
Line 1115 pool_catchup(struct pool *pp) |
|
Line 1049 pool_catchup(struct pool *pp) |
|
return (0); |
return (0); |
} |
} |
|
|
while (pp->pr_nitems < pp->pr_minitems) { |
while (POOL_NEEDS_CATCHUP(pp)) { |
/* |
/* |
* Call the page back-end allocator for more memory. |
* Call the page back-end allocator for more memory. |
* |
* |
Line 1129 pool_catchup(struct pool *pp) |
|
Line 1063 pool_catchup(struct pool *pp) |
|
error = ENOMEM; |
error = ENOMEM; |
break; |
break; |
} |
} |
|
if ((error = pool_prime_page(pp, cp, PR_NOWAIT)) != 0) { |
|
(*pp->pr_free)(cp, pp->pr_pagesz, pp->pr_mtype); |
|
break; |
|
} |
pp->pr_npagealloc++; |
pp->pr_npagealloc++; |
pool_prime_page(pp, cp); |
|
} |
} |
|
|
return (error); |
return (error); |
Line 1149 pool_setlowat(struct pool *pp, int n) |
|
Line 1086 pool_setlowat(struct pool *pp, int n) |
|
: roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage; |
: roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage; |
|
|
/* Make sure we're caught up with the newly-set low water mark. */ |
/* Make sure we're caught up with the newly-set low water mark. */ |
if ((pp->pr_nitems < pp->pr_minitems) && |
if (POOL_NEEDS_CATCHUP(pp) && (error = pool_catchup(pp) != 0)) { |
(error = pool_catchup(pp)) != 0) { |
|
/* |
/* |
* XXX: Should we log a warning? Should we set up a timeout |
* XXX: Should we log a warning? Should we set up a timeout |
* to try again in a second or so? The latter could break |
* to try again in a second or so? The latter could break |
Line 1306 pool_drain(void *arg) |
|
Line 1242 pool_drain(void *arg) |
|
struct pool *pp; |
struct pool *pp; |
int s; |
int s; |
|
|
s = splimp(); |
s = splvm(); |
simple_lock(&pool_head_slock); |
simple_lock(&pool_head_slock); |
|
|
if (drainpp == NULL && (drainpp = TAILQ_FIRST(&pool_head)) == NULL) |
if (drainpp == NULL && (drainpp = TAILQ_FIRST(&pool_head)) == NULL) |
Line 1331 pool_print(struct pool *pp, const char * |
|
Line 1267 pool_print(struct pool *pp, const char * |
|
{ |
{ |
int s; |
int s; |
|
|
s = splimp(); |
s = splvm(); |
if (simple_lock_try(&pp->pr_slock) == 0) { |
if (simple_lock_try(&pp->pr_slock) == 0) { |
printf("pool %s is locked; try again later\n", |
printf("pool %s is locked; try again later\n", |
pp->pr_wchan); |
pp->pr_wchan); |
Line 1455 pool_print1(struct pool *pp, const char |
|
Line 1391 pool_print1(struct pool *pp, const char |
|
pc = TAILQ_NEXT(pc, pc_poollist)) { |
pc = TAILQ_NEXT(pc, pc_poollist)) { |
(*pr)("\tcache %p: allocfrom %p freeto %p\n", pc, |
(*pr)("\tcache %p: allocfrom %p freeto %p\n", pc, |
pc->pc_allocfrom, pc->pc_freeto); |
pc->pc_allocfrom, pc->pc_freeto); |
|
(*pr)("\t hits %lu misses %lu ngroups %lu nitems %lu\n", |
|
pc->pc_hits, pc->pc_misses, pc->pc_ngroups, pc->pc_nitems); |
for (pcg = TAILQ_FIRST(&pc->pc_grouplist); pcg != NULL; |
for (pcg = TAILQ_FIRST(&pc->pc_grouplist); pcg != NULL; |
pcg = TAILQ_NEXT(pcg, pcg_list)) { |
pcg = TAILQ_NEXT(pcg, pcg_list)) { |
(*pr)("\t\tgroup %p: avail %d\n", pcg, pcg->pcg_avail); |
(*pr)("\t\tgroup %p: avail %d\n", pcg, pcg->pcg_avail); |
Line 1557 pool_cache_init(struct pool_cache *pc, s |
|
Line 1495 pool_cache_init(struct pool_cache *pc, s |
|
pc->pc_dtor = dtor; |
pc->pc_dtor = dtor; |
pc->pc_arg = arg; |
pc->pc_arg = arg; |
|
|
|
pc->pc_hits = 0; |
|
pc->pc_misses = 0; |
|
|
|
pc->pc_ngroups = 0; |
|
|
|
pc->pc_nitems = 0; |
|
|
simple_lock(&pp->pr_slock); |
simple_lock(&pp->pr_slock); |
TAILQ_INSERT_TAIL(&pp->pr_cachelist, pc, pc_poollist); |
TAILQ_INSERT_TAIL(&pp->pr_cachelist, pc, pc_poollist); |
simple_unlock(&pp->pr_slock); |
simple_unlock(&pp->pr_slock); |
Line 1638 pool_cache_get(struct pool_cache *pc, in |
|
Line 1583 pool_cache_get(struct pool_cache *pc, in |
|
* the caller. We will allocate a group, if necessary, |
* the caller. We will allocate a group, if necessary, |
* when the object is freed back to the cache. |
* when the object is freed back to the cache. |
*/ |
*/ |
|
pc->pc_misses++; |
simple_unlock(&pc->pc_slock); |
simple_unlock(&pc->pc_slock); |
object = pool_get(pc->pc_pool, flags); |
object = pool_get(pc->pc_pool, flags); |
if (object != NULL && pc->pc_ctor != NULL) { |
if (object != NULL && pc->pc_ctor != NULL) { |
Line 1650 pool_cache_get(struct pool_cache *pc, in |
|
Line 1596 pool_cache_get(struct pool_cache *pc, in |
|
} |
} |
|
|
have_group: |
have_group: |
|
pc->pc_hits++; |
|
pc->pc_nitems--; |
object = pcg_get(pcg); |
object = pcg_get(pcg); |
|
|
if (pcg->pcg_avail == 0) |
if (pcg->pcg_avail == 0) |
Line 1683 pool_cache_put(struct pool_cache *pc, vo |
|
Line 1631 pool_cache_put(struct pool_cache *pc, vo |
|
|
|
/* |
/* |
* No empty groups to free the object to. Attempt to |
* No empty groups to free the object to. Attempt to |
* allocate one. We don't unlock the cache here, since |
* allocate one. |
* we never block. |
|
*/ |
*/ |
|
simple_unlock(&pc->pc_slock); |
pcg = pool_get(&pcgpool, PR_NOWAIT); |
pcg = pool_get(&pcgpool, PR_NOWAIT); |
if (pcg != NULL) { |
if (pcg != NULL) { |
memset(pcg, 0, sizeof(*pcg)); |
memset(pcg, 0, sizeof(*pcg)); |
|
simple_lock(&pc->pc_slock); |
|
pc->pc_ngroups++; |
TAILQ_INSERT_TAIL(&pc->pc_grouplist, pcg, pcg_list); |
TAILQ_INSERT_TAIL(&pc->pc_grouplist, pcg, pcg_list); |
pc->pc_freeto = pcg; |
if (pc->pc_freeto == NULL) |
|
pc->pc_freeto = pcg; |
goto have_group; |
goto have_group; |
} |
} |
|
|
simple_unlock(&pc->pc_slock); |
|
|
|
/* |
/* |
* Unable to allocate a cache group; destruct the object |
* Unable to allocate a cache group; destruct the object |
* and free it back to the pool. |
* and free it back to the pool. |
*/ |
*/ |
if (pc->pc_dtor != NULL) |
pool_cache_destruct_object(pc, object); |
(*pc->pc_dtor)(pc->pc_arg, object); |
|
pool_put(pc->pc_pool, object); |
|
return; |
return; |
} |
} |
|
|
have_group: |
have_group: |
|
pc->pc_nitems++; |
pcg_put(pcg, object); |
pcg_put(pcg, object); |
|
|
if (pcg->pcg_avail == PCG_NOBJECTS) |
if (pcg->pcg_avail == PCG_NOBJECTS) |
Line 1716 pool_cache_put(struct pool_cache *pc, vo |
|
Line 1664 pool_cache_put(struct pool_cache *pc, vo |
|
} |
} |
|
|
/* |
/* |
|
* pool_cache_destruct_object: |
|
* |
|
* Force destruction of an object and its release back into |
|
* the pool. |
|
*/ |
|
void |
|
pool_cache_destruct_object(struct pool_cache *pc, void *object) |
|
{ |
|
|
|
if (pc->pc_dtor != NULL) |
|
(*pc->pc_dtor)(pc->pc_arg, object); |
|
pool_put(pc->pc_pool, object); |
|
} |
|
|
|
/* |
* pool_cache_do_invalidate: |
* pool_cache_do_invalidate: |
* |
* |
* This internal function implements pool_cache_invalidate() and |
* This internal function implements pool_cache_invalidate() and |
Line 1732 pool_cache_do_invalidate(struct pool_cac |
|
Line 1695 pool_cache_do_invalidate(struct pool_cac |
|
pcg = npcg) { |
pcg = npcg) { |
npcg = TAILQ_NEXT(pcg, pcg_list); |
npcg = TAILQ_NEXT(pcg, pcg_list); |
while (pcg->pcg_avail != 0) { |
while (pcg->pcg_avail != 0) { |
|
pc->pc_nitems--; |
object = pcg_get(pcg); |
object = pcg_get(pcg); |
if (pcg->pcg_avail == 0 && pc->pc_allocfrom == pcg) |
if (pcg->pcg_avail == 0 && pc->pc_allocfrom == pcg) |
pc->pc_allocfrom = NULL; |
pc->pc_allocfrom = NULL; |
Line 1740 pool_cache_do_invalidate(struct pool_cac |
|
Line 1704 pool_cache_do_invalidate(struct pool_cac |
|
(*putit)(pc->pc_pool, object, __FILE__, __LINE__); |
(*putit)(pc->pc_pool, object, __FILE__, __LINE__); |
} |
} |
if (free_groups) { |
if (free_groups) { |
|
pc->pc_ngroups--; |
TAILQ_REMOVE(&pc->pc_grouplist, pcg, pcg_list); |
TAILQ_REMOVE(&pc->pc_grouplist, pcg, pcg_list); |
if (pc->pc_freeto == pcg) |
if (pc->pc_freeto == pcg) |
pc->pc_freeto = NULL; |
pc->pc_freeto = NULL; |
|
|
pool_cache_reclaim(struct pool_cache *pc) |
pool_cache_reclaim(struct pool_cache *pc) |
{ |
{ |
|
|
/* |
simple_lock(&pc->pc_slock); |
* We're locking in the opposite order (pool already |
|
* locked in pool_reclaim()), so use a try-lock instead. |
|
*/ |
|
|
|
if (simple_lock_try(&pc->pc_slock) == 0) |
|
return; |
|
pool_cache_do_invalidate(pc, 1, pool_do_put); |
pool_cache_do_invalidate(pc, 1, pool_do_put); |
simple_unlock(&pc->pc_slock); |
simple_unlock(&pc->pc_slock); |
} |
} |