version 1.92, 2014/04/02 18:09:10 |
version 1.92.6.2, 2016/03/19 11:30:31 |
|
|
#include <sys/cdefs.h> |
#include <sys/cdefs.h> |
__KERNEL_RCSID(0, "$NetBSD$"); |
__KERNEL_RCSID(0, "$NetBSD$"); |
|
|
#if defined(_KERNEL) |
|
|
#if defined(_KERNEL) && defined(_KERNEL_OPT) |
#include "opt_ddb.h" |
#include "opt_ddb.h" |
#endif /* defined(_KERNEL) */ |
#endif /* defined(_KERNEL) && defined(_KERNEL_OPT) */ |
|
|
#include <sys/param.h> |
#include <sys/param.h> |
#include <sys/hash.h> |
#include <sys/hash.h> |
Line 194 static LIST_HEAD(, vmem_btag) vmem_btag_ |
|
Line 195 static LIST_HEAD(, vmem_btag) vmem_btag_ |
|
static size_t vmem_btag_freelist_count = 0; |
static size_t vmem_btag_freelist_count = 0; |
static struct pool vmem_btag_pool; |
static struct pool vmem_btag_pool; |
|
|
|
static void |
|
vmem_kick_pdaemon(void) |
|
{ |
|
#if defined(_KERNEL) |
|
mutex_spin_enter(&uvm_fpageqlock); |
|
uvm_kick_pdaemon(); |
|
mutex_spin_exit(&uvm_fpageqlock); |
|
#endif |
|
} |
|
|
/* ---- boundary tag */ |
/* ---- boundary tag */ |
|
|
static int bt_refill(vmem_t *vm, vm_flag_t flags); |
static int bt_refill(vmem_t *vm); |
|
|
static void * |
static void * |
pool_page_alloc_vmem_meta(struct pool *pp, int flags) |
pool_page_alloc_vmem_meta(struct pool *pp, int flags) |
Line 226 struct pool_allocator pool_allocator_vme |
|
Line 237 struct pool_allocator pool_allocator_vme |
|
}; |
}; |
|
|
static int |
static int |
bt_refill(vmem_t *vm, vm_flag_t flags) |
bt_refill(vmem_t *vm) |
{ |
{ |
bt_t *bt; |
bt_t *bt; |
|
|
KASSERT(flags & VM_NOSLEEP); |
|
|
|
VMEM_LOCK(vm); |
VMEM_LOCK(vm); |
if (vm->vm_nfreetags > BT_MINRESERVE) { |
if (vm->vm_nfreetags > BT_MINRESERVE) { |
VMEM_UNLOCK(vm); |
VMEM_UNLOCK(vm); |
Line 270 bt_refill(vmem_t *vm, vm_flag_t flags) |
|
Line 279 bt_refill(vmem_t *vm, vm_flag_t flags) |
|
VMEM_UNLOCK(vm); |
VMEM_UNLOCK(vm); |
|
|
if (kmem_meta_arena != NULL) { |
if (kmem_meta_arena != NULL) { |
bt_refill(kmem_arena, (flags & ~VM_FITMASK) |
(void)bt_refill(kmem_arena); |
| VM_INSTANTFIT | VM_POPULATING); |
(void)bt_refill(kmem_va_meta_arena); |
bt_refill(kmem_va_meta_arena, (flags & ~VM_FITMASK) |
(void)bt_refill(kmem_meta_arena); |
| VM_INSTANTFIT | VM_POPULATING); |
|
bt_refill(kmem_meta_arena, (flags & ~VM_FITMASK) |
|
| VM_INSTANTFIT | VM_POPULATING); |
|
} |
} |
|
|
return 0; |
return 0; |
Line 288 bt_alloc(vmem_t *vm, vm_flag_t flags) |
|
Line 294 bt_alloc(vmem_t *vm, vm_flag_t flags) |
|
VMEM_LOCK(vm); |
VMEM_LOCK(vm); |
while (vm->vm_nfreetags <= BT_MINRESERVE && (flags & VM_POPULATING) == 0) { |
while (vm->vm_nfreetags <= BT_MINRESERVE && (flags & VM_POPULATING) == 0) { |
VMEM_UNLOCK(vm); |
VMEM_UNLOCK(vm); |
if (bt_refill(vm, VM_NOSLEEP | VM_INSTANTFIT)) { |
if (bt_refill(vm)) { |
return NULL; |
if ((flags & VM_NOSLEEP) != 0) { |
|
return NULL; |
|
} |
|
|
|
/* |
|
* It would be nice to wait for something specific here |
|
* but there are multiple ways that a retry could |
|
* succeed and we can't wait for multiple things |
|
* simultaneously. So we'll just sleep for an arbitrary |
|
* short period of time and retry regardless. |
|
* This should be a very rare case. |
|
*/ |
|
|
|
vmem_kick_pdaemon(); |
|
kpause("btalloc", false, 1, NULL); |
} |
} |
VMEM_LOCK(vm); |
VMEM_LOCK(vm); |
} |
} |
Line 940 vmem_init(vmem_t *vm, const char *name, |
|
Line 960 vmem_init(vmem_t *vm, const char *name, |
|
|
|
#if defined(_KERNEL) |
#if defined(_KERNEL) |
if (flags & VM_BOOTSTRAP) { |
if (flags & VM_BOOTSTRAP) { |
bt_refill(vm, VM_NOSLEEP); |
bt_refill(vm); |
} |
} |
|
|
mutex_enter(&vmem_list_lock); |
mutex_enter(&vmem_list_lock); |
|
|
/* XXX */ |
/* XXX */ |
|
|
if ((flags & VM_SLEEP) != 0) { |
if ((flags & VM_SLEEP) != 0) { |
#if defined(_KERNEL) |
vmem_kick_pdaemon(); |
mutex_spin_enter(&uvm_fpageqlock); |
|
uvm_kick_pdaemon(); |
|
mutex_spin_exit(&uvm_fpageqlock); |
|
#endif |
|
VMEM_LOCK(vm); |
VMEM_LOCK(vm); |
VMEM_CONDVAR_WAIT(vm); |
VMEM_CONDVAR_WAIT(vm); |
VMEM_UNLOCK(vm); |
VMEM_UNLOCK(vm); |