version 1.191, 2012/01/27 19:48:40 |
version 1.192, 2012/01/28 00:00:06 |
Line 531 pr_rmpage(struct pool *pp, struct pool_i |
|
Line 531 pr_rmpage(struct pool *pp, struct pool_i |
|
void |
void |
pool_subsystem_init(void) |
pool_subsystem_init(void) |
{ |
{ |
int idx; |
|
size_t size; |
size_t size; |
|
int idx; |
|
|
mutex_init(&pool_head_lock, MUTEX_DEFAULT, IPL_NONE); |
mutex_init(&pool_head_lock, MUTEX_DEFAULT, IPL_NONE); |
mutex_init(&pool_allocator_lock, MUTEX_DEFAULT, IPL_NONE); |
mutex_init(&pool_allocator_lock, MUTEX_DEFAULT, IPL_NONE); |
Line 2718 void pool_page_free(struct pool *, void |
|
Line 2718 void pool_page_free(struct pool *, void |
|
|
|
#ifdef POOL_SUBPAGE |
#ifdef POOL_SUBPAGE |
struct pool_allocator pool_allocator_kmem_fullpage = { |
struct pool_allocator pool_allocator_kmem_fullpage = { |
pool_page_alloc, pool_page_free, 0 |
.pa_alloc = pool_page_alloc, |
|
.pa_free = pool_page_free, |
|
.pa_pagesz = 0 |
}; |
}; |
#else |
#else |
struct pool_allocator pool_allocator_kmem = { |
struct pool_allocator pool_allocator_kmem = { |
Line 2733 void pool_page_free_nointr(struct pool * |
|
Line 2735 void pool_page_free_nointr(struct pool * |
|
|
|
#ifdef POOL_SUBPAGE |
#ifdef POOL_SUBPAGE |
struct pool_allocator pool_allocator_nointr_fullpage = { |
struct pool_allocator pool_allocator_nointr_fullpage = { |
pool_page_alloc_nointr, pool_page_free_nointr, 0, |
.pa_alloc = pool_page_alloc_nointr, |
|
.pa_free = pool_page_free_nointr, |
|
.pa_pagesz = 0 |
}; |
}; |
#else |
#else |
struct pool_allocator pool_allocator_nointr = { |
struct pool_allocator pool_allocator_nointr = { |
Line 2755 void *pool_subpage_alloc_nointr(struct p |
|
Line 2759 void *pool_subpage_alloc_nointr(struct p |
|
void pool_subpage_free_nointr(struct pool *, void *); |
void pool_subpage_free_nointr(struct pool *, void *); |
|
|
struct pool_allocator pool_allocator_nointr = { |
struct pool_allocator pool_allocator_nointr = { |
pool_subpage_alloc, pool_subpage_free, POOL_SUBPAGE, |
.pa_alloc = pool_subpage_alloc, |
|
.pa_free = pool_subpage_free, |
|
.pa_pagesz = POOL_SUBPAGE |
}; |
}; |
#endif /* POOL_SUBPAGE */ |
#endif /* POOL_SUBPAGE */ |
|
|
Line 2791 pool_allocator_free(struct pool *pp, voi |
|
Line 2797 pool_allocator_free(struct pool *pp, voi |
|
void * |
void * |
pool_page_alloc(struct pool *pp, int flags) |
pool_page_alloc(struct pool *pp, int flags) |
{ |
{ |
bool waitok = (flags & PR_WAITOK) ? true : false; |
const vm_flag_t vflags = (flags & PR_WAITOK) ? VM_SLEEP: VM_NOSLEEP; |
int rc; |
|
vmem_addr_t va; |
vmem_addr_t va; |
|
int ret; |
|
|
rc = uvm_km_kmem_alloc(kmem_va_arena, |
ret = uvm_km_kmem_alloc(kmem_va_arena, pp->pr_alloc->pa_pagesz, |
pp->pr_alloc->pa_pagesz, |
vflags | VM_INSTANTFIT, &va); |
((waitok ? VM_SLEEP : VM_NOSLEEP) | VM_INSTANTFIT), &va); |
|
|
|
if (rc != 0) |
return ret ? NULL : (void *)va; |
return NULL; |
|
else |
|
return (void *)va; |
|
} |
} |
|
|
void |
void |
Line 2815 pool_page_free(struct pool *pp, void *v) |
|
Line 2817 pool_page_free(struct pool *pp, void *v) |
|
static void * |
static void * |
pool_page_alloc_meta(struct pool *pp, int flags) |
pool_page_alloc_meta(struct pool *pp, int flags) |
{ |
{ |
bool waitok = (flags & PR_WAITOK) ? true : false; |
const vm_flag_t vflags = (flags & PR_WAITOK) ? VM_SLEEP: VM_NOSLEEP; |
int rc; |
vmem_addr_t va; |
vmem_addr_t addr; |
int ret; |
|
|
rc = vmem_alloc(kmem_meta_arena, pp->pr_alloc->pa_pagesz, |
ret = vmem_alloc(kmem_meta_arena, pp->pr_alloc->pa_pagesz, |
(waitok ? VM_SLEEP : VM_NOSLEEP) | VM_INSTANTFIT, &addr); |
vflags | VM_INSTANTFIT, &va); |
|
|
if (rc != 0) |
return ret ? NULL : (void *)va; |
return 0; |
|
else |
|
return (void *)addr; |
|
} |
} |
|
|
static void |
static void |
pool_page_free_meta(struct pool *pp, void *v) |
pool_page_free_meta(struct pool *pp, void *v) |
{ |
{ |
|
|
vmem_free(kmem_meta_arena, (vmem_addr_t)v, |
vmem_free(kmem_meta_arena, (vmem_addr_t)v, pp->pr_alloc->pa_pagesz); |
pp->pr_alloc->pa_pagesz); |
|
} |
} |
|
|
#ifdef POOL_SUBPAGE |
#ifdef POOL_SUBPAGE |