version 1.72, 2002/03/09 01:56:27 |
version 1.75, 2002/03/13 08:12:58 |
Line 1041 pool_put(struct pool *pp, void *v) |
|
Line 1041 pool_put(struct pool *pp, void *v) |
|
#endif |
#endif |
|
|
/* |
/* |
|
* Add N items to the pool. |
|
*/ |
|
int |
|
pool_prime(struct pool *pp, int n) |
|
{ |
|
struct pool_item_header *ph; |
|
caddr_t cp; |
|
int newpages; |
|
|
|
simple_lock(&pp->pr_slock); |
|
|
|
newpages = roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage; |
|
|
|
while (newpages-- > 0) { |
|
simple_unlock(&pp->pr_slock); |
|
cp = pool_allocator_alloc(pp, PR_NOWAIT); |
|
if (__predict_true(cp != NULL)) |
|
ph = pool_alloc_item_header(pp, cp, PR_NOWAIT); |
|
simple_lock(&pp->pr_slock); |
|
|
|
if (__predict_false(cp == NULL || ph == NULL)) { |
|
if (cp != NULL) |
|
pool_allocator_free(pp, cp); |
|
break; |
|
} |
|
|
|
pool_prime_page(pp, cp, ph); |
|
pp->pr_npagealloc++; |
|
pp->pr_minpages++; |
|
} |
|
|
|
if (pp->pr_minpages >= pp->pr_maxpages) |
|
pp->pr_maxpages = pp->pr_minpages + 1; /* XXX */ |
|
|
|
simple_unlock(&pp->pr_slock); |
|
return (0); |
|
} |
|
|
|
/* |
* Add a page worth of items to the pool. |
* Add a page worth of items to the pool. |
* |
* |
* Note, we must be called with the pool descriptor LOCKED. |
* Note, we must be called with the pool descriptor LOCKED. |
Line 1120 pool_prime_page(struct pool *pp, caddr_t |
|
Line 1159 pool_prime_page(struct pool *pp, caddr_t |
|
* |
* |
* Note 1, we never wait for memory here, we let the caller decide what to do. |
* Note 1, we never wait for memory here, we let the caller decide what to do. |
* |
* |
* Note 2, this doesn't work with static pools. |
* Note 2, we must be called with the pool already locked, and we return |
* |
|
* Note 3, we must be called with the pool already locked, and we return |
|
* with it locked. |
* with it locked. |
*/ |
*/ |
static int |
static int |
Line 1160 pool_catchup(struct pool *pp) |
|
Line 1197 pool_catchup(struct pool *pp) |
|
void |
void |
pool_setlowat(struct pool *pp, int n) |
pool_setlowat(struct pool *pp, int n) |
{ |
{ |
int error; |
|
|
|
simple_lock(&pp->pr_slock); |
simple_lock(&pp->pr_slock); |
|
|
Line 1170 pool_setlowat(struct pool *pp, int n) |
|
Line 1206 pool_setlowat(struct pool *pp, int n) |
|
: roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage; |
: roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage; |
|
|
/* Make sure we're caught up with the newly-set low water mark. */ |
/* Make sure we're caught up with the newly-set low water mark. */ |
if (POOL_NEEDS_CATCHUP(pp) && (error = pool_catchup(pp) != 0)) { |
if (POOL_NEEDS_CATCHUP(pp) && pool_catchup(pp) != 0) { |
/* |
/* |
* XXX: Should we log a warning? Should we set up a timeout |
* XXX: Should we log a warning? Should we set up a timeout |
* to try again in a second or so? The latter could break |
* to try again in a second or so? The latter could break |
Line 1916 pool_allocator_alloc(struct pool *org, i |
|
Line 1952 pool_allocator_alloc(struct pool *org, i |
|
TAILQ_INSERT_TAIL(&pa->pa_list, pp, pr_alloc_list); |
TAILQ_INSERT_TAIL(&pa->pa_list, pp, pr_alloc_list); |
if (pp == org) |
if (pp == org) |
continue; |
continue; |
simple_unlock(&pa->pa_list); |
simple_unlock(&pa->pa_slock); |
freed = pool_reclaim(pp); |
freed = pool_reclaim(pp); |
simple_lock(&pa->pa_list); |
simple_lock(&pa->pa_slock); |
} while ((pp = TAILQ_FIRST(&pa->pa_list)) != start && |
} while ((pp = TAILQ_FIRST(&pa->pa_list)) != start && |
freed == 0); |
freed == 0); |
|
|