version 1.68, 2002/03/08 21:41:59 |
version 1.74.2.1, 2002/03/12 07:53:25 |
|
|
/* $NetBSD$ */ |
/* $NetBSD$ */ |
|
|
/*- |
/*- |
* Copyright (c) 1997, 1999, 2000 The NetBSD Foundation, Inc. |
* Copyright (c) 1997, 1999, 2000, 2002 The NetBSD Foundation, Inc. |
* All rights reserved. |
* All rights reserved. |
* |
* |
* This code is derived from software contributed to The NetBSD Foundation |
* This code is derived from software contributed to The NetBSD Foundation |
Line 580 pool_destroy(struct pool *pp) |
|
Line 580 pool_destroy(struct pool *pp) |
|
#endif |
#endif |
|
|
/* Remove all pages */ |
/* Remove all pages */ |
if ((pp->pr_roflags & PR_STATIC) == 0) |
while ((ph = TAILQ_FIRST(&pp->pr_pagelist)) != NULL) |
while ((ph = TAILQ_FIRST(&pp->pr_pagelist)) != NULL) |
pr_rmpage(pp, ph, NULL); |
pr_rmpage(pp, ph, NULL); |
|
|
|
/* Remove from global pool list */ |
/* Remove from global pool list */ |
simple_lock(&pool_head_slock); |
simple_lock(&pool_head_slock); |
Line 645 pool_get(struct pool *pp, int flags) |
|
Line 644 pool_get(struct pool *pp, int flags) |
|
void *v; |
void *v; |
|
|
#ifdef DIAGNOSTIC |
#ifdef DIAGNOSTIC |
if (__predict_false((pp->pr_roflags & PR_STATIC) && |
|
(flags & PR_MALLOCOK))) { |
|
pr_printlog(pp, NULL, printf); |
|
panic("pool_get: static"); |
|
} |
|
|
|
if (__predict_false(curproc == NULL && doing_shutdown == 0 && |
if (__predict_false(curproc == NULL && doing_shutdown == 0 && |
(flags & PR_WAITOK) != 0)) |
(flags & PR_WAITOK) != 0)) |
panic("pool_get: must have NOWAIT"); |
panic("pool_get: must have NOWAIT"); |
Line 878 pool_get(struct pool *pp, int flags) |
|
Line 871 pool_get(struct pool *pp, int flags) |
|
|
|
pr_leave(pp); |
pr_leave(pp); |
simple_unlock(&pp->pr_slock); |
simple_unlock(&pp->pr_slock); |
|
KASSERT((((vaddr_t)v + pp->pr_itemoffset) & (pp->pr_align - 1)) == 0); |
return (v); |
return (v); |
} |
} |
|
|
Line 966 pool_do_put(struct pool *pp, void *v) |
|
Line 960 pool_do_put(struct pool *pp, void *v) |
|
*/ |
*/ |
if (ph->ph_nmissing == 0) { |
if (ph->ph_nmissing == 0) { |
pp->pr_nidle++; |
pp->pr_nidle++; |
if (pp->pr_npages > pp->pr_maxpages) { |
if (pp->pr_npages > pp->pr_maxpages || |
|
(pp->pr_alloc->pa_flags & PA_WANT) != 0) { |
pr_rmpage(pp, ph, NULL); |
pr_rmpage(pp, ph, NULL); |
} else { |
} else { |
TAILQ_REMOVE(&pp->pr_pagelist, ph, ph_pagelist); |
TAILQ_REMOVE(&pp->pr_pagelist, ph, ph_pagelist); |
Line 1096 pool_prime_page(struct pool *pp, caddr_t |
|
Line 1091 pool_prime_page(struct pool *pp, caddr_t |
|
{ |
{ |
struct pool_item *pi; |
struct pool_item *pi; |
caddr_t cp = storage; |
caddr_t cp = storage; |
unsigned int align = pp->pr_align; |
const unsigned int align = pp->pr_align; |
unsigned int ioff = pp->pr_itemoffset; |
const unsigned int ioff = pp->pr_itemoffset; |
|
const unsigned int alignsize = roundup(pp->pr_size, align); |
int n; |
int n; |
|
|
#ifdef DIAGNOSTIC |
#ifdef DIAGNOSTIC |
Line 1133 pool_prime_page(struct pool *pp, caddr_t |
|
Line 1129 pool_prime_page(struct pool *pp, caddr_t |
|
if (ioff != 0) |
if (ioff != 0) |
cp = (caddr_t)(cp + (align - ioff)); |
cp = (caddr_t)(cp + (align - ioff)); |
|
|
|
KASSERT((((vaddr_t)cp + ioff) & (align - 1)) == 0); |
|
|
/* |
/* |
* Insert remaining chunks on the bucket list. |
* Insert remaining chunks on the bucket list. |
*/ |
*/ |
Line 1147 pool_prime_page(struct pool *pp, caddr_t |
|
Line 1145 pool_prime_page(struct pool *pp, caddr_t |
|
#ifdef DIAGNOSTIC |
#ifdef DIAGNOSTIC |
pi->pi_magic = PI_MAGIC; |
pi->pi_magic = PI_MAGIC; |
#endif |
#endif |
cp = (caddr_t)(cp + pp->pr_size); |
cp = (caddr_t)(cp + alignsize); |
|
|
|
KASSERT((((vaddr_t)cp + ioff) & (align - 1)) == 0); |
} |
} |
|
|
/* |
/* |
Line 1166 pool_prime_page(struct pool *pp, caddr_t |
|
Line 1166 pool_prime_page(struct pool *pp, caddr_t |
|
* |
* |
* Note 1, we never wait for memory here, we let the caller decide what to do. |
* Note 1, we never wait for memory here, we let the caller decide what to do. |
* |
* |
* Note 2, this doesn't work with static pools. |
* Note 2, we must be called with the pool already locked, and we return |
* |
|
* Note 3, we must be called with the pool already locked, and we return |
|
* with it locked. |
* with it locked. |
*/ |
*/ |
static int |
static int |
Line 1178 pool_catchup(struct pool *pp) |
|
Line 1176 pool_catchup(struct pool *pp) |
|
caddr_t cp; |
caddr_t cp; |
int error = 0; |
int error = 0; |
|
|
if (pp->pr_roflags & PR_STATIC) { |
|
/* |
|
* We dropped below the low water mark, and this is not a |
|
* good thing. Log a warning. |
|
* |
|
* XXX: rate-limit this? |
|
*/ |
|
printf("WARNING: static pool `%s' dropped below low water " |
|
"mark\n", pp->pr_wchan); |
|
return (0); |
|
} |
|
|
|
while (POOL_NEEDS_CATCHUP(pp)) { |
while (POOL_NEEDS_CATCHUP(pp)) { |
/* |
/* |
* Call the page back-end allocator for more memory. |
* Call the page back-end allocator for more memory. |
Line 1291 pool_reclaim(struct pool *pp) |
|
Line 1277 pool_reclaim(struct pool *pp) |
|
struct pool_pagelist pq; |
struct pool_pagelist pq; |
int s; |
int s; |
|
|
if (pp->pr_roflags & PR_STATIC) |
|
return (0); |
|
|
|
if (pp->pr_drain_hook != NULL) { |
if (pp->pr_drain_hook != NULL) { |
/* |
/* |
* The drain hook must be called with the pool unlocked. |
* The drain hook must be called with the pool unlocked. |
Line 1720 pool_cache_get(struct pool_cache *pc, in |
|
Line 1703 pool_cache_get(struct pool_cache *pc, in |
|
return (NULL); |
return (NULL); |
} |
} |
} |
} |
|
KASSERT((((vaddr_t)object + pc->pc_pool->pr_itemoffset) & |
|
(pc->pc_pool->pr_align - 1)) == 0); |
return (object); |
return (object); |
} |
} |
|
|
Line 1733 pool_cache_get(struct pool_cache *pc, in |
|
Line 1718 pool_cache_get(struct pool_cache *pc, in |
|
|
|
simple_unlock(&pc->pc_slock); |
simple_unlock(&pc->pc_slock); |
|
|
|
KASSERT((((vaddr_t)object + pc->pc_pool->pr_itemoffset) & |
|
(pc->pc_pool->pr_align - 1)) == 0); |
return (object); |
return (object); |
} |
} |
|
|
Line 1977 pool_allocator_alloc(struct pool *org, i |
|
Line 1964 pool_allocator_alloc(struct pool *org, i |
|
TAILQ_INSERT_TAIL(&pa->pa_list, pp, pr_alloc_list); |
TAILQ_INSERT_TAIL(&pa->pa_list, pp, pr_alloc_list); |
if (pp == org) |
if (pp == org) |
continue; |
continue; |
simple_unlock(&pa->pa_list); |
simple_unlock(&pa->pa_slock); |
freed = pool_reclaim(pp); |
freed = pool_reclaim(pp); |
simple_lock(&pa->pa_list); |
simple_lock(&pa->pa_slock); |
} while ((pp = TAILQ_FIRST(&pa->pa_list)) != start && |
} while ((pp = TAILQ_FIRST(&pa->pa_list)) != start && |
freed == 0); |
freed == 0); |
|
|
Line 2020 pool_allocator_free(struct pool *pp, voi |
|
Line 2007 pool_allocator_free(struct pool *pp, voi |
|
pp->pr_flags &= ~PR_WANTED; |
pp->pr_flags &= ~PR_WANTED; |
wakeup(pp); |
wakeup(pp); |
} |
} |
|
simple_unlock(&pp->pr_slock); |
} |
} |
pa->pa_flags &= ~PA_WANT; |
pa->pa_flags &= ~PA_WANT; |
simple_unlock(&pa->pa_slock); |
simple_unlock(&pa->pa_slock); |