version 1.204, 2005/06/28 05:25:42 |
version 1.204.2.4, 2007/09/03 14:47:08 |
Line 92 __KERNEL_RCSID(0, "$NetBSD$"); |
|
Line 92 __KERNEL_RCSID(0, "$NetBSD$"); |
|
#include <sys/shm.h> |
#include <sys/shm.h> |
#endif |
#endif |
|
|
#define UVM_MAP_C |
|
#include <uvm/uvm.h> |
#include <uvm/uvm.h> |
#undef RB_AUGMENT |
#undef RB_AUGMENT |
#define RB_AUGMENT(x) uvm_rb_augment(x) |
#define RB_AUGMENT(x) uvm_rb_augment(x) |
Line 101 __KERNEL_RCSID(0, "$NetBSD$"); |
|
Line 100 __KERNEL_RCSID(0, "$NetBSD$"); |
|
#include <uvm/uvm_ddb.h> |
#include <uvm/uvm_ddb.h> |
#endif |
#endif |
|
|
#ifndef UVMMAP_NOCOUNTERS |
#if defined(UVMMAP_NOCOUNTERS) |
#include <sys/device.h> |
|
struct evcnt map_ubackmerge = EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, |
|
"uvmmap", "ubackmerge"); |
|
struct evcnt map_uforwmerge = EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, |
|
"uvmmap", "uforwmerge"); |
|
struct evcnt map_ubimerge = EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, |
|
"uvmmap", "ubimerge"); |
|
struct evcnt map_unomerge = EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, |
|
"uvmmap", "unomerge"); |
|
struct evcnt map_kbackmerge = EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, |
|
"uvmmap", "kbackmerge"); |
|
struct evcnt map_kforwmerge = EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, |
|
"uvmmap", "kforwmerge"); |
|
struct evcnt map_kbimerge = EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, |
|
"uvmmap", "kbimerge"); |
|
struct evcnt map_knomerge = EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, |
|
"uvmmap", "knomerge"); |
|
struct evcnt uvm_map_call = EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, |
|
"uvmmap", "map_call"); |
|
struct evcnt uvm_mlk_call = EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, |
|
"uvmmap", "mlk_call"); |
|
struct evcnt uvm_mlk_hint = EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, |
|
"uvmmap", "mlk_hint"); |
|
|
|
EVCNT_ATTACH_STATIC(map_ubackmerge); |
|
EVCNT_ATTACH_STATIC(map_uforwmerge); |
|
EVCNT_ATTACH_STATIC(map_ubimerge); |
|
EVCNT_ATTACH_STATIC(map_unomerge); |
|
EVCNT_ATTACH_STATIC(map_kbackmerge); |
|
EVCNT_ATTACH_STATIC(map_kforwmerge); |
|
EVCNT_ATTACH_STATIC(map_kbimerge); |
|
EVCNT_ATTACH_STATIC(map_knomerge); |
|
EVCNT_ATTACH_STATIC(uvm_map_call); |
|
EVCNT_ATTACH_STATIC(uvm_mlk_call); |
|
EVCNT_ATTACH_STATIC(uvm_mlk_hint); |
|
|
|
#define UVMCNT_INCR(ev) ev.ev_count++ |
#define UVMMAP_EVCNT_DEFINE(name) /* nothing */ |
#define UVMCNT_DECR(ev) ev.ev_count-- |
#define UVMMAP_EVCNT_INCR(ev) /* nothing */ |
#else |
#define UVMMAP_EVCNT_DECR(ev) /* nothing */ |
#define UVMCNT_INCR(ev) |
|
#define UVMCNT_DECR(ev) |
#else /* defined(UVMMAP_NOCOUNTERS) */ |
#endif |
|
|
#include <sys/evcnt.h> |
|
#define UVMMAP_EVCNT_DEFINE(name) \ |
|
struct evcnt uvmmap_evcnt_##name = EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, \ |
|
"uvmmap", #name); \ |
|
EVCNT_ATTACH_STATIC(uvmmap_evcnt_##name); |
|
#define UVMMAP_EVCNT_INCR(ev) uvmmap_evcnt_##ev.ev_count++ |
|
#define UVMMAP_EVCNT_DECR(ev) uvmmap_evcnt_##ev.ev_count-- |
|
|
|
#endif /* defined(UVMMAP_NOCOUNTERS) */ |
|
|
|
UVMMAP_EVCNT_DEFINE(ubackmerge) |
|
UVMMAP_EVCNT_DEFINE(uforwmerge) |
|
UVMMAP_EVCNT_DEFINE(ubimerge) |
|
UVMMAP_EVCNT_DEFINE(unomerge) |
|
UVMMAP_EVCNT_DEFINE(kbackmerge) |
|
UVMMAP_EVCNT_DEFINE(kforwmerge) |
|
UVMMAP_EVCNT_DEFINE(kbimerge) |
|
UVMMAP_EVCNT_DEFINE(knomerge) |
|
UVMMAP_EVCNT_DEFINE(map_call) |
|
UVMMAP_EVCNT_DEFINE(mlk_call) |
|
UVMMAP_EVCNT_DEFINE(mlk_hint) |
|
|
|
UVMMAP_EVCNT_DEFINE(uke_alloc) |
|
UVMMAP_EVCNT_DEFINE(uke_free) |
|
UVMMAP_EVCNT_DEFINE(ukh_alloc) |
|
UVMMAP_EVCNT_DEFINE(ukh_free) |
|
|
const char vmmapbsy[] = "vmmapbsy"; |
const char vmmapbsy[] = "vmmapbsy"; |
|
|
Line 152 const char vmmapbsy[] = "vmmapbsy"; |
|
Line 142 const char vmmapbsy[] = "vmmapbsy"; |
|
*/ |
*/ |
|
|
POOL_INIT(uvm_vmspace_pool, sizeof(struct vmspace), 0, 0, 0, "vmsppl", |
POOL_INIT(uvm_vmspace_pool, sizeof(struct vmspace), 0, 0, 0, "vmsppl", |
&pool_allocator_nointr); |
&pool_allocator_nointr, IPL_NONE); |
|
|
/* |
/* |
* pool for dynamically-allocated map entries. |
* pool for dynamically-allocated map entries. |
*/ |
*/ |
|
|
POOL_INIT(uvm_map_entry_pool, sizeof(struct vm_map_entry), 0, 0, 0, "vmmpepl", |
POOL_INIT(uvm_map_entry_pool, sizeof(struct vm_map_entry), 0, 0, 0, "vmmpepl", |
&pool_allocator_nointr); |
&pool_allocator_nointr, IPL_NONE); |
|
|
MALLOC_DEFINE(M_VMMAP, "VM map", "VM map structures"); |
MALLOC_DEFINE(M_VMMAP, "VM map", "VM map structures"); |
MALLOC_DEFINE(M_VMPMAP, "VM pmap", "VM pmap"); |
MALLOC_DEFINE(M_VMPMAP, "VM pmap", "VM pmap"); |
Line 184 vaddr_t uvm_maxkaddr; |
|
Line 174 vaddr_t uvm_maxkaddr; |
|
* for the vm_map. |
* for the vm_map. |
*/ |
*/ |
extern struct vm_map *pager_map; /* XXX */ |
extern struct vm_map *pager_map; /* XXX */ |
|
#define VM_MAP_USE_KMAPENT_FLAGS(flags) \ |
|
(((flags) & VM_MAP_INTRSAFE) != 0) |
#define VM_MAP_USE_KMAPENT(map) \ |
#define VM_MAP_USE_KMAPENT(map) \ |
(((map)->flags & VM_MAP_INTRSAFE) || (map) == kernel_map) |
(VM_MAP_USE_KMAPENT_FLAGS((map)->flags) || (map) == kernel_map) |
|
|
/* |
/* |
* UVM_ET_ISCOMPATIBLE: check some requirements for map entry merging |
* UVM_ET_ISCOMPATIBLE: check some requirements for map entry merging |
Line 209 extern struct vm_map *pager_map; /* XXX |
|
Line 201 extern struct vm_map *pager_map; /* XXX |
|
* => map must be locked |
* => map must be locked |
*/ |
*/ |
#define uvm_map_entry_link(map, after_where, entry) do { \ |
#define uvm_map_entry_link(map, after_where, entry) do { \ |
KASSERT(entry->start < entry->end); \ |
uvm_mapent_check(entry); \ |
(map)->nentries++; \ |
(map)->nentries++; \ |
(entry)->prev = (after_where); \ |
(entry)->prev = (after_where); \ |
(entry)->next = (after_where)->next; \ |
(entry)->next = (after_where)->next; \ |
Line 224 extern struct vm_map *pager_map; /* XXX |
|
Line 216 extern struct vm_map *pager_map; /* XXX |
|
* => map must be locked |
* => map must be locked |
*/ |
*/ |
#define uvm_map_entry_unlink(map, entry) do { \ |
#define uvm_map_entry_unlink(map, entry) do { \ |
|
KASSERT((entry) != (map)->first_free); \ |
|
KASSERT((entry) != (map)->hint); \ |
|
uvm_mapent_check(entry); \ |
(map)->nentries--; \ |
(map)->nentries--; \ |
(entry)->next->prev = (entry)->prev; \ |
(entry)->next->prev = (entry)->prev; \ |
(entry)->prev->next = (entry)->next; \ |
(entry)->prev->next = (entry)->next; \ |
Line 236 extern struct vm_map *pager_map; /* XXX |
|
Line 231 extern struct vm_map *pager_map; /* XXX |
|
* => map need not be locked (protected by hint_lock). |
* => map need not be locked (protected by hint_lock). |
*/ |
*/ |
#define SAVE_HINT(map,check,value) do { \ |
#define SAVE_HINT(map,check,value) do { \ |
simple_lock(&(map)->hint_lock); \ |
mutex_enter(&(map)->hint_lock); \ |
if ((map)->hint == (check)) \ |
if ((map)->hint == (check)) \ |
(map)->hint = (value); \ |
(map)->hint = (value); \ |
simple_unlock(&(map)->hint_lock); \ |
mutex_exit(&(map)->hint_lock); \ |
} while (/*CONSTCOND*/ 0) |
} while (/*CONSTCOND*/ 0) |
|
|
/* |
/* |
|
* clear_hints: ensure that hints don't point to the entry. |
|
* |
|
* => map must be write-locked. |
|
*/ |
|
static void |
|
clear_hints(struct vm_map *map, struct vm_map_entry *ent) |
|
{ |
|
|
|
SAVE_HINT(map, ent, ent->prev); |
|
if (map->first_free == ent) { |
|
map->first_free = ent->prev; |
|
} |
|
} |
|
|
|
/* |
* VM_MAP_RANGE_CHECK: check and correct range |
* VM_MAP_RANGE_CHECK: check and correct range |
* |
* |
* => map must at least be read locked |
* => map must at least be read locked |
Line 269 static struct vm_map_entry * |
|
Line 279 static struct vm_map_entry * |
|
struct uvm_mapent_reservation *); |
struct uvm_mapent_reservation *); |
static void uvm_mapent_copy(struct vm_map_entry *, struct vm_map_entry *); |
static void uvm_mapent_copy(struct vm_map_entry *, struct vm_map_entry *); |
static void uvm_mapent_free(struct vm_map_entry *); |
static void uvm_mapent_free(struct vm_map_entry *); |
|
#if defined(DEBUG) |
|
static void _uvm_mapent_check(const struct vm_map_entry *, const char *, |
|
int); |
|
#define uvm_mapent_check(map) _uvm_mapent_check(map, __FILE__, __LINE__) |
|
#else /* defined(DEBUG) */ |
|
#define uvm_mapent_check(e) /* nothing */ |
|
#endif /* defined(DEBUG) */ |
static struct vm_map_entry * |
static struct vm_map_entry * |
uvm_kmapent_alloc(struct vm_map *, int); |
uvm_kmapent_alloc(struct vm_map *, int); |
static void uvm_kmapent_free(struct vm_map_entry *); |
static void uvm_kmapent_free(struct vm_map_entry *); |
|
static vsize_t uvm_kmapent_overhead(vsize_t); |
|
|
static void uvm_map_entry_unwire(struct vm_map *, struct vm_map_entry *); |
static void uvm_map_entry_unwire(struct vm_map *, struct vm_map_entry *); |
static void uvm_map_reference_amap(struct vm_map_entry *, int); |
static void uvm_map_reference_amap(struct vm_map_entry *, int); |
static int uvm_map_space_avail(vaddr_t *, vsize_t, voff_t, vsize_t, int, |
static int uvm_map_space_avail(vaddr_t *, vsize_t, voff_t, vsize_t, int, |
struct vm_map_entry *); |
struct vm_map_entry *); |
static void uvm_map_unreference_amap(struct vm_map_entry *, int); |
static void uvm_map_unreference_amap(struct vm_map_entry *, int); |
|
|
int _uvm_tree_sanity(struct vm_map *, const char *); |
int _uvm_map_sanity(struct vm_map *); |
|
int _uvm_tree_sanity(struct vm_map *); |
static vsize_t uvm_rb_subtree_space(const struct vm_map_entry *); |
static vsize_t uvm_rb_subtree_space(const struct vm_map_entry *); |
|
|
static __inline int |
static inline int |
uvm_compare(const struct vm_map_entry *a, const struct vm_map_entry *b) |
uvm_compare(const struct vm_map_entry *a, const struct vm_map_entry *b) |
{ |
{ |
|
|
Line 293 uvm_compare(const struct vm_map_entry *a |
|
Line 313 uvm_compare(const struct vm_map_entry *a |
|
return (0); |
return (0); |
} |
} |
|
|
static __inline void |
static inline void |
uvm_rb_augment(struct vm_map_entry *entry) |
uvm_rb_augment(struct vm_map_entry *entry) |
{ |
{ |
|
|
Line 304 RB_PROTOTYPE(uvm_tree, vm_map_entry, rb_ |
|
Line 324 RB_PROTOTYPE(uvm_tree, vm_map_entry, rb_ |
|
|
|
RB_GENERATE(uvm_tree, vm_map_entry, rb_entry, uvm_compare); |
RB_GENERATE(uvm_tree, vm_map_entry, rb_entry, uvm_compare); |
|
|
static __inline vsize_t |
static inline vsize_t |
uvm_rb_space(const struct vm_map *map, const struct vm_map_entry *entry) |
uvm_rb_space(const struct vm_map *map, const struct vm_map_entry *entry) |
{ |
{ |
/* XXX map is not used */ |
/* XXX map is not used */ |
Line 334 uvm_rb_subtree_space(const struct vm_map |
|
Line 354 uvm_rb_subtree_space(const struct vm_map |
|
return (space); |
return (space); |
} |
} |
|
|
static __inline void |
static inline void |
uvm_rb_fixup(struct vm_map *map, struct vm_map_entry *entry) |
uvm_rb_fixup(struct vm_map *map, struct vm_map_entry *entry) |
{ |
{ |
/* We need to traverse to the very top */ |
/* We need to traverse to the very top */ |
Line 374 uvm_rb_remove(struct vm_map *map, struct |
|
Line 394 uvm_rb_remove(struct vm_map *map, struct |
|
uvm_rb_fixup(map, parent); |
uvm_rb_fixup(map, parent); |
} |
} |
|
|
#ifdef DEBUG |
#if defined(DEBUG) |
|
int uvm_debug_check_map = 0; |
int uvm_debug_check_rbtree = 0; |
int uvm_debug_check_rbtree = 0; |
#define uvm_tree_sanity(x,y) \ |
#define uvm_map_check(map, name) \ |
if (uvm_debug_check_rbtree) \ |
_uvm_map_check((map), (name), __FILE__, __LINE__) |
_uvm_tree_sanity(x,y) |
static void |
#else |
_uvm_map_check(struct vm_map *map, const char *name, |
#define uvm_tree_sanity(x,y) |
const char *file, int line) |
#endif |
{ |
|
|
|
if ((uvm_debug_check_map && _uvm_map_sanity(map)) || |
|
(uvm_debug_check_rbtree && _uvm_tree_sanity(map))) { |
|
panic("uvm_map_check failed: \"%s\" map=%p (%s:%d)", |
|
name, map, file, line); |
|
} |
|
} |
|
#else /* defined(DEBUG) */ |
|
#define uvm_map_check(map, name) /* nothing */ |
|
#endif /* defined(DEBUG) */ |
|
|
|
#if defined(DEBUG) || defined(DDB) |
int |
int |
_uvm_tree_sanity(struct vm_map *map, const char *name) |
_uvm_map_sanity(struct vm_map *map) |
|
{ |
|
bool first_free_found = false; |
|
bool hint_found = false; |
|
const struct vm_map_entry *e; |
|
|
|
e = &map->header; |
|
for (;;) { |
|
if (map->first_free == e) { |
|
first_free_found = true; |
|
} else if (!first_free_found && e->next->start > e->end) { |
|
printf("first_free %p should be %p\n", |
|
map->first_free, e); |
|
return -1; |
|
} |
|
if (map->hint == e) { |
|
hint_found = true; |
|
} |
|
|
|
e = e->next; |
|
if (e == &map->header) { |
|
break; |
|
} |
|
} |
|
if (!first_free_found) { |
|
printf("stale first_free\n"); |
|
return -1; |
|
} |
|
if (!hint_found) { |
|
printf("stale hint\n"); |
|
return -1; |
|
} |
|
return 0; |
|
} |
|
|
|
int |
|
_uvm_tree_sanity(struct vm_map *map) |
{ |
{ |
struct vm_map_entry *tmp, *trtmp; |
struct vm_map_entry *tmp, *trtmp; |
int n = 0, i = 1; |
int n = 0, i = 1; |
|
|
RB_FOREACH(tmp, uvm_tree, &map->rbhead) { |
RB_FOREACH(tmp, uvm_tree, &map->rbhead) { |
if (tmp->ownspace != uvm_rb_space(map, tmp)) { |
if (tmp->ownspace != uvm_rb_space(map, tmp)) { |
printf("%s: %d/%d ownspace %lx != %lx %s\n", |
printf("%d/%d ownspace %lx != %lx %s\n", |
name, n + 1, map->nentries, |
n + 1, map->nentries, |
(ulong)tmp->ownspace, (ulong)uvm_rb_space(map, tmp), |
(ulong)tmp->ownspace, (ulong)uvm_rb_space(map, tmp), |
tmp->next == &map->header ? "(last)" : ""); |
tmp->next == &map->header ? "(last)" : ""); |
goto error; |
goto error; |
Line 401 _uvm_tree_sanity(struct vm_map *map, con |
|
Line 469 _uvm_tree_sanity(struct vm_map *map, con |
|
trtmp = NULL; |
trtmp = NULL; |
RB_FOREACH(tmp, uvm_tree, &map->rbhead) { |
RB_FOREACH(tmp, uvm_tree, &map->rbhead) { |
if (tmp->space != uvm_rb_subtree_space(tmp)) { |
if (tmp->space != uvm_rb_subtree_space(tmp)) { |
printf("%s: space %lx != %lx\n", |
printf("space %lx != %lx\n", |
name, (ulong)tmp->space, |
(ulong)tmp->space, |
(ulong)uvm_rb_subtree_space(tmp)); |
(ulong)uvm_rb_subtree_space(tmp)); |
goto error; |
goto error; |
} |
} |
if (trtmp != NULL && trtmp->start >= tmp->start) { |
if (trtmp != NULL && trtmp->start >= tmp->start) { |
printf("%s: corrupt: 0x%lx >= 0x%lx\n", |
printf("corrupt: 0x%lx >= 0x%lx\n", |
name, trtmp->start, tmp->start); |
trtmp->start, tmp->start); |
goto error; |
goto error; |
} |
} |
n++; |
n++; |
Line 417 _uvm_tree_sanity(struct vm_map *map, con |
|
Line 485 _uvm_tree_sanity(struct vm_map *map, con |
|
} |
} |
|
|
if (n != map->nentries) { |
if (n != map->nentries) { |
printf("%s: nentries: %d vs %d\n", |
printf("nentries: %d vs %d\n", n, map->nentries); |
name, n, map->nentries); |
|
goto error; |
goto error; |
} |
} |
|
|
Line 426 _uvm_tree_sanity(struct vm_map *map, con |
|
Line 493 _uvm_tree_sanity(struct vm_map *map, con |
|
tmp = tmp->next, i++) { |
tmp = tmp->next, i++) { |
trtmp = RB_FIND(uvm_tree, &map->rbhead, tmp); |
trtmp = RB_FIND(uvm_tree, &map->rbhead, tmp); |
if (trtmp != tmp) { |
if (trtmp != tmp) { |
printf("%s: lookup: %d: %p - %p: %p\n", |
printf("lookup: %d: %p - %p: %p\n", i, tmp, trtmp, |
name, i, tmp, trtmp, |
|
RB_PARENT(tmp, rb_entry)); |
RB_PARENT(tmp, rb_entry)); |
goto error; |
goto error; |
} |
} |
Line 435 _uvm_tree_sanity(struct vm_map *map, con |
|
Line 501 _uvm_tree_sanity(struct vm_map *map, con |
|
|
|
return (0); |
return (0); |
error: |
error: |
#if defined(DDB) && __GNUC__ < 4 |
|
/* handy breakpoint location for error case */ |
|
__asm(".globl treesanity_label\ntreesanity_label:"); |
|
#endif |
|
return (-1); |
return (-1); |
} |
} |
|
#endif /* defined(DEBUG) || defined(DDB) */ |
|
|
#ifdef DIAGNOSTIC |
#ifdef DIAGNOSTIC |
static struct vm_map *uvm_kmapent_map(struct vm_map_entry *); |
static struct vm_map *uvm_kmapent_map(struct vm_map_entry *); |
#endif |
#endif |
|
|
/* |
/* |
|
* vm_map_lock: acquire an exclusive (write) lock on a map. |
|
* |
|
* => Note that "intrsafe" maps use only exclusive, spin locks. |
|
* |
|
* => The locking protocol provides for guaranteed upgrade from shared -> |
|
* exclusive by whichever thread currently has the map marked busy. |
|
* See "LOCKING PROTOCOL NOTES" in uvm_map.h. This is horrible; among |
|
* other problems, it defeats any fairness guarantees provided by RW |
|
* locks. |
|
*/ |
|
|
|
void |
|
vm_map_lock(struct vm_map *map) |
|
{ |
|
|
|
if ((map->flags & VM_MAP_INTRSAFE) != 0) { |
|
mutex_spin_enter(&map->mutex); |
|
return; |
|
} |
|
|
|
for (;;) { |
|
rw_enter(&map->lock, RW_WRITER); |
|
if (map->busy == NULL) |
|
break; |
|
KASSERT(map->busy != curlwp); |
|
mutex_enter(&map->misc_lock); |
|
rw_exit(&map->lock); |
|
cv_wait(&map->cv, &map->misc_lock); |
|
mutex_exit(&map->misc_lock); |
|
} |
|
|
|
map->timestamp++; |
|
} |
|
|
|
/* |
|
* vm_map_lock_try: try to lock a map, failing if it is already locked. |
|
*/ |
|
|
|
bool |
|
vm_map_lock_try(struct vm_map *map) |
|
{ |
|
|
|
if ((map->flags & VM_MAP_INTRSAFE) != 0) |
|
return mutex_tryenter(&map->mutex); |
|
if (!rw_tryenter(&map->lock, RW_WRITER)) |
|
return false; |
|
if (map->busy != NULL) { |
|
rw_exit(&map->lock); |
|
return false; |
|
} |
|
|
|
map->timestamp++; |
|
return true; |
|
} |
|
|
|
/* |
|
* vm_map_unlock: release an exclusive lock on a map. |
|
*/ |
|
|
|
void |
|
vm_map_unlock(struct vm_map *map) |
|
{ |
|
|
|
if ((map->flags & VM_MAP_INTRSAFE) != 0) |
|
mutex_spin_exit(&map->mutex); |
|
else { |
|
KASSERT(rw_write_held(&map->lock)); |
|
rw_exit(&map->lock); |
|
} |
|
} |
|
|
|
/* |
|
* vm_map_upgrade: upgrade a shared lock to an exclusive lock. |
|
* |
|
* => the caller must hold the map busy |
|
*/ |
|
|
|
void |
|
vm_map_upgrade(struct vm_map *map) |
|
{ |
|
|
|
KASSERT(rw_read_held(&map->lock)); |
|
KASSERT(map->busy == curlwp); |
|
|
|
if (rw_tryupgrade(&map->lock)) |
|
return; |
|
|
|
rw_exit(&map->lock); |
|
rw_enter(&map->lock, RW_WRITER); |
|
} |
|
|
|
/* |
|
* vm_map_unbusy: mark the map as unbusy, and wake any waiters that |
|
* want an exclusive lock. |
|
*/ |
|
|
|
void |
|
vm_map_unbusy(struct vm_map *map) |
|
{ |
|
|
|
KASSERT(rw_lock_held(&map->lock)); |
|
KASSERT(map->busy == curlwp); |
|
|
|
/* |
|
* Safe to clear 'busy' and 'waiters' with only a read lock held: |
|
* |
|
* o they can only be set with a write lock held |
|
* o writers are blocked out with a read or write hold |
|
* o at any time, only one thread owns the set of values |
|
*/ |
|
map->busy = NULL; |
|
mutex_enter(&map->misc_lock); |
|
cv_broadcast(&map->cv); |
|
mutex_exit(&map->misc_lock); |
|
} |
|
|
|
/* |
* uvm_mapent_alloc: allocate a map entry |
* uvm_mapent_alloc: allocate a map entry |
*/ |
*/ |
|
|
Line 486 uvm_mapent_alloc_split(struct vm_map *ma |
|
Line 666 uvm_mapent_alloc_split(struct vm_map *ma |
|
(old_entry->flags & UVM_MAP_QUANTUM) || !UMR_EMPTY(umr)); |
(old_entry->flags & UVM_MAP_QUANTUM) || !UMR_EMPTY(umr)); |
|
|
if (old_entry->flags & UVM_MAP_QUANTUM) { |
if (old_entry->flags & UVM_MAP_QUANTUM) { |
int s; |
|
struct vm_map_kernel *vmk = vm_map_to_kernel(map); |
struct vm_map_kernel *vmk = vm_map_to_kernel(map); |
|
|
s = splvm(); |
mutex_spin_enter(&uvm_kentry_lock); |
simple_lock(&uvm.kentry_lock); |
|
me = vmk->vmk_merged_entries; |
me = vmk->vmk_merged_entries; |
KASSERT(me); |
KASSERT(me); |
vmk->vmk_merged_entries = me->next; |
vmk->vmk_merged_entries = me->next; |
simple_unlock(&uvm.kentry_lock); |
mutex_spin_exit(&uvm_kentry_lock); |
splx(s); |
|
KASSERT(me->flags & UVM_MAP_QUANTUM); |
KASSERT(me->flags & UVM_MAP_QUANTUM); |
} else { |
} else { |
me = uvm_mapent_alloc(map, flags); |
me = uvm_mapent_alloc(map, flags); |
Line 540 uvm_mapent_free_merged(struct vm_map *ma |
|
Line 717 uvm_mapent_free_merged(struct vm_map *ma |
|
* keep this entry for later splitting. |
* keep this entry for later splitting. |
*/ |
*/ |
struct vm_map_kernel *vmk; |
struct vm_map_kernel *vmk; |
int s; |
|
|
|
KASSERT(VM_MAP_IS_KERNEL(map)); |
KASSERT(VM_MAP_IS_KERNEL(map)); |
KASSERT(!VM_MAP_USE_KMAPENT(map) || |
KASSERT(!VM_MAP_USE_KMAPENT(map) || |
(me->flags & UVM_MAP_KERNEL)); |
(me->flags & UVM_MAP_KERNEL)); |
|
|
vmk = vm_map_to_kernel(map); |
vmk = vm_map_to_kernel(map); |
s = splvm(); |
mutex_spin_enter(&uvm_kentry_lock); |
simple_lock(&uvm.kentry_lock); |
|
me->next = vmk->vmk_merged_entries; |
me->next = vmk->vmk_merged_entries; |
vmk->vmk_merged_entries = me; |
vmk->vmk_merged_entries = me; |
simple_unlock(&uvm.kentry_lock); |
mutex_spin_exit(&uvm_kentry_lock); |
splx(s); |
|
} else { |
} else { |
uvm_mapent_free(me); |
uvm_mapent_free(me); |
} |
} |
Line 562 uvm_mapent_free_merged(struct vm_map *ma |
|
Line 736 uvm_mapent_free_merged(struct vm_map *ma |
|
* uvm_mapent_copy: copy a map entry, preserving flags |
* uvm_mapent_copy: copy a map entry, preserving flags |
*/ |
*/ |
|
|
static __inline void |
static inline void |
uvm_mapent_copy(struct vm_map_entry *src, struct vm_map_entry *dst) |
uvm_mapent_copy(struct vm_map_entry *src, struct vm_map_entry *dst) |
{ |
{ |
|
|
Line 571 uvm_mapent_copy(struct vm_map_entry *src |
|
Line 745 uvm_mapent_copy(struct vm_map_entry *src |
|
} |
} |
|
|
/* |
/* |
|
* uvm_mapent_overhead: calculate maximum kva overhead necessary for |
|
* map entries. |
|
* |
|
* => size and flags are the same as uvm_km_suballoc's ones. |
|
*/ |
|
|
|
vsize_t |
|
uvm_mapent_overhead(vsize_t size, int flags) |
|
{ |
|
|
|
if (VM_MAP_USE_KMAPENT_FLAGS(flags)) { |
|
return uvm_kmapent_overhead(size); |
|
} |
|
return 0; |
|
} |
|
|
|
#if defined(DEBUG) |
|
static void |
|
_uvm_mapent_check(const struct vm_map_entry *entry, const char *file, int line) |
|
{ |
|
|
|
if (entry->start >= entry->end) { |
|
goto bad; |
|
} |
|
if (UVM_ET_ISOBJ(entry)) { |
|
if (entry->object.uvm_obj == NULL) { |
|
goto bad; |
|
} |
|
} else if (UVM_ET_ISSUBMAP(entry)) { |
|
if (entry->object.sub_map == NULL) { |
|
goto bad; |
|
} |
|
} else { |
|
if (entry->object.uvm_obj != NULL || |
|
entry->object.sub_map != NULL) { |
|
goto bad; |
|
} |
|
} |
|
if (!UVM_ET_ISOBJ(entry)) { |
|
if (entry->offset != 0) { |
|
goto bad; |
|
} |
|
} |
|
|
|
return; |
|
|
|
bad: |
|
panic("%s: bad entry %p (%s:%d)", __func__, entry, file, line); |
|
} |
|
#endif /* defined(DEBUG) */ |
|
|
|
/* |
* uvm_map_entry_unwire: unwire a map entry |
* uvm_map_entry_unwire: unwire a map entry |
* |
* |
* => map should be locked by caller |
* => map should be locked by caller |
*/ |
*/ |
|
|
static __inline void |
static inline void |
uvm_map_entry_unwire(struct vm_map *map, struct vm_map_entry *entry) |
uvm_map_entry_unwire(struct vm_map *map, struct vm_map_entry *entry) |
{ |
{ |
|
|
Line 588 uvm_map_entry_unwire(struct vm_map *map, |
|
Line 814 uvm_map_entry_unwire(struct vm_map *map, |
|
/* |
/* |
* wrapper for calling amap_ref() |
* wrapper for calling amap_ref() |
*/ |
*/ |
static __inline void |
static inline void |
uvm_map_reference_amap(struct vm_map_entry *entry, int flags) |
uvm_map_reference_amap(struct vm_map_entry *entry, int flags) |
{ |
{ |
|
|
Line 600 uvm_map_reference_amap(struct vm_map_ent |
|
Line 826 uvm_map_reference_amap(struct vm_map_ent |
|
/* |
/* |
* wrapper for calling amap_unref() |
* wrapper for calling amap_unref() |
*/ |
*/ |
static __inline void |
static inline void |
uvm_map_unreference_amap(struct vm_map_entry *entry, int flags) |
uvm_map_unreference_amap(struct vm_map_entry *entry, int flags) |
{ |
{ |
|
|
Line 634 uvm_map_init(void) |
|
Line 860 uvm_map_init(void) |
|
|
|
/* |
/* |
* initialize the global lock for kernel map entry. |
* initialize the global lock for kernel map entry. |
* |
|
* XXX is it worth to have per-map lock instead? |
|
*/ |
*/ |
|
|
simple_lock_init(&uvm.kentry_lock); |
mutex_init(&uvm_kentry_lock, MUTEX_DRIVER, IPL_VM); |
} |
} |
|
|
/* |
/* |
Line 646 uvm_map_init(void) |
|
Line 870 uvm_map_init(void) |
|
*/ |
*/ |
|
|
/* |
/* |
|
* uvm_mapent_splitadj: adjust map entries for splitting, after uvm_mapent_copy. |
|
*/ |
|
|
|
static void |
|
uvm_mapent_splitadj(struct vm_map_entry *entry1, struct vm_map_entry *entry2, |
|
vaddr_t splitat) |
|
{ |
|
vaddr_t adj; |
|
|
|
KASSERT(entry1->start < splitat); |
|
KASSERT(splitat < entry1->end); |
|
|
|
adj = splitat - entry1->start; |
|
entry1->end = entry2->start = splitat; |
|
|
|
if (entry1->aref.ar_amap) { |
|
amap_splitref(&entry1->aref, &entry2->aref, adj); |
|
} |
|
if (UVM_ET_ISSUBMAP(entry1)) { |
|
/* ... unlikely to happen, but play it safe */ |
|
uvm_map_reference(entry1->object.sub_map); |
|
} else if (UVM_ET_ISOBJ(entry1)) { |
|
KASSERT(entry1->object.uvm_obj != NULL); /* suppress coverity */ |
|
entry2->offset += adj; |
|
if (entry1->object.uvm_obj->pgops && |
|
entry1->object.uvm_obj->pgops->pgo_reference) |
|
entry1->object.uvm_obj->pgops->pgo_reference( |
|
entry1->object.uvm_obj); |
|
} |
|
} |
|
|
|
/* |
* uvm_map_clip_start: ensure that the entry begins at or after |
* uvm_map_clip_start: ensure that the entry begins at or after |
* the starting address, if it doesn't we split the entry. |
* the starting address, if it doesn't we split the entry. |
* |
* |
Line 659 uvm_map_clip_start(struct vm_map *map, s |
|
Line 915 uvm_map_clip_start(struct vm_map *map, s |
|
vaddr_t start, struct uvm_mapent_reservation *umr) |
vaddr_t start, struct uvm_mapent_reservation *umr) |
{ |
{ |
struct vm_map_entry *new_entry; |
struct vm_map_entry *new_entry; |
vaddr_t new_adj; |
|
|
|
/* uvm_map_simplify_entry(map, entry); */ /* XXX */ |
/* uvm_map_simplify_entry(map, entry); */ /* XXX */ |
|
|
uvm_tree_sanity(map, "clip_start entry"); |
uvm_map_check(map, "clip_start entry"); |
|
uvm_mapent_check(entry); |
|
|
/* |
/* |
* Split off the front portion. note that we must insert the new |
* Split off the front portion. note that we must insert the new |
Line 672 uvm_map_clip_start(struct vm_map *map, s |
|
Line 928 uvm_map_clip_start(struct vm_map *map, s |
|
*/ |
*/ |
new_entry = uvm_mapent_alloc_split(map, entry, 0, umr); |
new_entry = uvm_mapent_alloc_split(map, entry, 0, umr); |
uvm_mapent_copy(entry, new_entry); /* entry -> new_entry */ |
uvm_mapent_copy(entry, new_entry); /* entry -> new_entry */ |
|
uvm_mapent_splitadj(new_entry, entry, start); |
new_entry->end = start; |
|
new_adj = start - new_entry->start; |
|
if (entry->object.uvm_obj) |
|
entry->offset += new_adj; /* shift start over */ |
|
|
|
/* Does not change order for the RB tree */ |
|
entry->start = start; |
|
|
|
if (new_entry->aref.ar_amap) { |
|
amap_splitref(&new_entry->aref, &entry->aref, new_adj); |
|
} |
|
|
|
uvm_map_entry_link(map, entry->prev, new_entry); |
uvm_map_entry_link(map, entry->prev, new_entry); |
|
|
if (UVM_ET_ISSUBMAP(entry)) { |
uvm_map_check(map, "clip_start leave"); |
/* ... unlikely to happen, but play it safe */ |
|
uvm_map_reference(new_entry->object.sub_map); |
|
} else { |
|
if (UVM_ET_ISOBJ(entry) && |
|
entry->object.uvm_obj->pgops && |
|
entry->object.uvm_obj->pgops->pgo_reference) |
|
entry->object.uvm_obj->pgops->pgo_reference( |
|
entry->object.uvm_obj); |
|
} |
|
|
|
uvm_tree_sanity(map, "clip_start leave"); |
|
} |
} |
|
|
/* |
/* |
|
|
uvm_map_clip_end(struct vm_map *map, struct vm_map_entry *entry, vaddr_t end, |
uvm_map_clip_end(struct vm_map *map, struct vm_map_entry *entry, vaddr_t end, |
struct uvm_mapent_reservation *umr) |
struct uvm_mapent_reservation *umr) |
{ |
{ |
struct vm_map_entry * new_entry; |
struct vm_map_entry *new_entry; |
vaddr_t new_adj; /* #bytes we move start forward */ |
|
|
|
uvm_tree_sanity(map, "clip_end entry"); |
uvm_map_check(map, "clip_end entry"); |
|
uvm_mapent_check(entry); |
|
|
/* |
/* |
* Create a new entry and insert it |
* Create a new entry and insert it |
Line 725 uvm_map_clip_end(struct vm_map *map, str |
|
Line 958 uvm_map_clip_end(struct vm_map *map, str |
|
*/ |
*/ |
new_entry = uvm_mapent_alloc_split(map, entry, 0, umr); |
new_entry = uvm_mapent_alloc_split(map, entry, 0, umr); |
uvm_mapent_copy(entry, new_entry); /* entry -> new_entry */ |
uvm_mapent_copy(entry, new_entry); /* entry -> new_entry */ |
|
uvm_mapent_splitadj(entry, new_entry, end); |
|
uvm_map_entry_link(map, entry, new_entry); |
|
|
new_entry->start = entry->end = end; |
uvm_map_check(map, "clip_end leave"); |
new_adj = end - entry->start; |
} |
if (new_entry->object.uvm_obj) |
|
new_entry->offset += new_adj; |
|
|
|
if (entry->aref.ar_amap) |
|
amap_splitref(&entry->aref, &new_entry->aref, new_adj); |
|
|
|
uvm_rb_fixup(map, entry); |
|
|
|
uvm_map_entry_link(map, entry, new_entry); |
static void |
|
vm_map_drain(struct vm_map *map, uvm_flag_t flags) |
|
{ |
|
|
if (UVM_ET_ISSUBMAP(entry)) { |
if (!VM_MAP_IS_KERNEL(map)) { |
/* ... unlikely to happen, but play it safe */ |
return; |
uvm_map_reference(new_entry->object.sub_map); |
|
} else { |
|
if (UVM_ET_ISOBJ(entry) && |
|
entry->object.uvm_obj->pgops && |
|
entry->object.uvm_obj->pgops->pgo_reference) |
|
entry->object.uvm_obj->pgops->pgo_reference( |
|
entry->object.uvm_obj); |
|
} |
} |
|
|
uvm_tree_sanity(map, "clip_end leave"); |
uvm_km_va_drain(map, flags); |
} |
} |
|
|
|
|
/* |
/* |
* M A P - m a i n e n t r y p o i n t |
* M A P - m a i n e n t r y p o i n t |
*/ |
*/ |
Line 862 uvm_map_prepare(struct vm_map *map, vadd |
|
Line 1084 uvm_map_prepare(struct vm_map *map, vadd |
|
|
|
KASSERT((~flags & (UVM_FLAG_NOWAIT | UVM_FLAG_WAITVA)) != 0); |
KASSERT((~flags & (UVM_FLAG_NOWAIT | UVM_FLAG_WAITVA)) != 0); |
|
|
uvm_tree_sanity(map, "map entry"); |
uvm_map_check(map, "map entry"); |
|
|
/* |
/* |
* check sanity of protection code |
* check sanity of protection code |
Line 879 uvm_map_prepare(struct vm_map *map, vadd |
|
Line 1101 uvm_map_prepare(struct vm_map *map, vadd |
|
*/ |
*/ |
|
|
retry: |
retry: |
if (vm_map_lock_try(map) == FALSE) { |
if (vm_map_lock_try(map) == false) { |
if (flags & UVM_FLAG_TRYLOCK) { |
if (flags & UVM_FLAG_TRYLOCK) { |
return EAGAIN; |
return EAGAIN; |
} |
} |
vm_map_lock(map); /* could sleep here */ |
vm_map_lock(map); /* could sleep here */ |
} |
} |
if ((prev_entry = uvm_map_findspace(map, start, size, &start, |
prev_entry = uvm_map_findspace(map, start, size, &start, |
uobj, uoffset, align, flags)) == NULL) { |
uobj, uoffset, align, flags); |
|
if (prev_entry == NULL) { |
unsigned int timestamp; |
unsigned int timestamp; |
|
|
if ((flags & UVM_FLAG_WAITVA) == 0) { |
|
UVMHIST_LOG(maphist,"<- uvm_map_findspace failed!", |
|
0,0,0,0); |
|
vm_map_unlock(map); |
|
return ENOMEM; |
|
} |
|
timestamp = map->timestamp; |
timestamp = map->timestamp; |
UVMHIST_LOG(maphist,"waiting va timestamp=0x%x", |
UVMHIST_LOG(maphist,"waiting va timestamp=0x%x", |
timestamp,0,0,0); |
timestamp,0,0,0); |
simple_lock(&map->flags_lock); |
|
map->flags |= VM_MAP_WANTVA; |
map->flags |= VM_MAP_WANTVA; |
simple_unlock(&map->flags_lock); |
|
vm_map_unlock(map); |
vm_map_unlock(map); |
|
|
/* |
/* |
* wait until someone does unmap. |
* try to reclaim kva and wait until someone does unmap. |
* XXX fragile locking |
* fragile locking here, so we awaken every second to |
|
* recheck the condition. |
*/ |
*/ |
|
|
simple_lock(&map->flags_lock); |
vm_map_drain(map, flags); |
|
|
|
mutex_enter(&map->misc_lock); |
while ((map->flags & VM_MAP_WANTVA) != 0 && |
while ((map->flags & VM_MAP_WANTVA) != 0 && |
map->timestamp == timestamp) { |
map->timestamp == timestamp) { |
ltsleep(&map->header, PVM, "vmmapva", 0, |
if ((flags & UVM_FLAG_WAITVA) == 0) { |
&map->flags_lock); |
mutex_exit(&map->misc_lock); |
|
UVMHIST_LOG(maphist, |
|
"<- uvm_map_findspace failed!", 0,0,0,0); |
|
return ENOMEM; |
|
} else { |
|
cv_timedwait(&map->cv, &map->misc_lock, hz); |
|
} |
} |
} |
simple_unlock(&map->flags_lock); |
mutex_exit(&map->misc_lock); |
goto retry; |
goto retry; |
} |
} |
|
|
|
|
uvm_maxkaddr = pmap_growkernel(start + size); |
uvm_maxkaddr = pmap_growkernel(start + size); |
#endif |
#endif |
|
|
UVMCNT_INCR(uvm_map_call); |
UVMMAP_EVCNT_INCR(map_call); |
|
|
/* |
/* |
* if uobj is null, then uoffset is either a VAC hint for PMAP_PREFER |
* if uobj is null, then uoffset is either a VAC hint for PMAP_PREFER |
Line 998 uvm_map_enter(struct vm_map *map, const |
|
Line 1222 uvm_map_enter(struct vm_map *map, const |
|
map, start, size, flags); |
map, start, size, flags); |
UVMHIST_LOG(maphist, " uobj/offset 0x%x/%d", uobj, uoffset,0,0); |
UVMHIST_LOG(maphist, " uobj/offset 0x%x/%d", uobj, uoffset,0,0); |
|
|
|
KASSERT(map->hint == prev_entry); /* bimerge case assumes this */ |
|
|
if (flags & UVM_FLAG_QUANTUM) { |
if (flags & UVM_FLAG_QUANTUM) { |
KASSERT(new_entry); |
KASSERT(new_entry); |
KASSERT(new_entry->flags & UVM_MAP_QUANTUM); |
KASSERT(new_entry->flags & UVM_MAP_QUANTUM); |
Line 1051 uvm_map_enter(struct vm_map *map, const |
|
Line 1277 uvm_map_enter(struct vm_map *map, const |
|
} |
} |
|
|
if (kmap) |
if (kmap) |
UVMCNT_INCR(map_kbackmerge); |
UVMMAP_EVCNT_INCR(kbackmerge); |
else |
else |
UVMCNT_INCR(map_ubackmerge); |
UVMMAP_EVCNT_INCR(ubackmerge); |
UVMHIST_LOG(maphist," starting back merge", 0, 0, 0, 0); |
UVMHIST_LOG(maphist," starting back merge", 0, 0, 0, 0); |
|
|
/* |
/* |
Line 1067 uvm_map_enter(struct vm_map *map, const |
|
Line 1293 uvm_map_enter(struct vm_map *map, const |
|
prev_entry->end += size; |
prev_entry->end += size; |
uvm_rb_fixup(map, prev_entry); |
uvm_rb_fixup(map, prev_entry); |
|
|
uvm_tree_sanity(map, "map backmerged"); |
uvm_map_check(map, "map backmerged"); |
|
|
UVMHIST_LOG(maphist,"<- done (via backmerge)!", 0, 0, 0, 0); |
UVMHIST_LOG(maphist,"<- done (via backmerge)!", 0, 0, 0, 0); |
merged++; |
merged++; |
|
|
|
|
if (merged) { |
if (merged) { |
if (kmap) { |
if (kmap) { |
UVMCNT_DECR(map_kbackmerge); |
UVMMAP_EVCNT_DECR(kbackmerge); |
UVMCNT_INCR(map_kbimerge); |
UVMMAP_EVCNT_INCR(kbimerge); |
} else { |
} else { |
UVMCNT_DECR(map_ubackmerge); |
UVMMAP_EVCNT_DECR(ubackmerge); |
UVMCNT_INCR(map_ubimerge); |
UVMMAP_EVCNT_INCR(ubimerge); |
} |
} |
} else { |
} else { |
if (kmap) |
if (kmap) |
UVMCNT_INCR(map_kforwmerge); |
UVMMAP_EVCNT_INCR(kforwmerge); |
else |
else |
UVMCNT_INCR(map_uforwmerge); |
UVMMAP_EVCNT_INCR(uforwmerge); |
} |
} |
UVMHIST_LOG(maphist," starting forward merge", 0, 0, 0, 0); |
UVMHIST_LOG(maphist," starting forward merge", 0, 0, 0, 0); |
|
|
|
|
prev_entry->next->offset = uoffset; |
prev_entry->next->offset = uoffset; |
} |
} |
|
|
uvm_tree_sanity(map, "map forwardmerged"); |
uvm_map_check(map, "map forwardmerged"); |
|
|
UVMHIST_LOG(maphist,"<- done forwardmerge", 0, 0, 0, 0); |
UVMHIST_LOG(maphist,"<- done forwardmerge", 0, 0, 0, 0); |
merged++; |
merged++; |
|
|
if (!merged) { |
if (!merged) { |
UVMHIST_LOG(maphist," allocating new map entry", 0, 0, 0, 0); |
UVMHIST_LOG(maphist," allocating new map entry", 0, 0, 0, 0); |
if (kmap) |
if (kmap) |
UVMCNT_INCR(map_knomerge); |
UVMMAP_EVCNT_INCR(knomerge); |
else |
else |
UVMCNT_INCR(map_unomerge); |
UVMMAP_EVCNT_INCR(unomerge); |
|
|
/* |
/* |
* allocate new entry and link it in. |
* allocate new entry and link it in. |
|
|
vaddr_t to_add = (flags & UVM_FLAG_AMAPPAD) ? |
vaddr_t to_add = (flags & UVM_FLAG_AMAPPAD) ? |
UVM_AMAP_CHUNK << PAGE_SHIFT : 0; |
UVM_AMAP_CHUNK << PAGE_SHIFT : 0; |
struct vm_amap *amap = amap_alloc(size, to_add, |
struct vm_amap *amap = amap_alloc(size, to_add, |
(flags & UVM_FLAG_NOWAIT) ? M_NOWAIT : M_WAITOK); |
(flags & UVM_FLAG_NOWAIT)); |
if (__predict_false(amap == NULL)) { |
if (__predict_false(amap == NULL)) { |
error = ENOMEM; |
error = ENOMEM; |
goto done; |
goto done; |
|
|
* => return value is true if address is in the returned entry |
* => return value is true if address is in the returned entry |
*/ |
*/ |
|
|
boolean_t |
bool |
uvm_map_lookup_entry(struct vm_map *map, vaddr_t address, |
uvm_map_lookup_entry(struct vm_map *map, vaddr_t address, |
struct vm_map_entry **entry /* OUT */) |
struct vm_map_entry **entry /* OUT */) |
{ |
{ |
struct vm_map_entry *cur; |
struct vm_map_entry *cur; |
boolean_t use_tree = FALSE; |
bool use_tree = false; |
UVMHIST_FUNC("uvm_map_lookup_entry"); |
UVMHIST_FUNC("uvm_map_lookup_entry"); |
UVMHIST_CALLED(maphist); |
UVMHIST_CALLED(maphist); |
|
|
Line 1319 uvm_map_lookup_entry(struct vm_map *map, |
|
Line 1545 uvm_map_lookup_entry(struct vm_map *map, |
|
* list, or from the hint. |
* list, or from the hint. |
*/ |
*/ |
|
|
simple_lock(&map->hint_lock); |
mutex_enter(&map->hint_lock); |
cur = map->hint; |
cur = map->hint; |
simple_unlock(&map->hint_lock); |
mutex_exit(&map->hint_lock); |
|
|
if (cur == &map->header) |
if (cur == &map->header) |
cur = cur->next; |
cur = cur->next; |
|
|
UVMCNT_INCR(uvm_mlk_call); |
UVMMAP_EVCNT_INCR(mlk_call); |
if (address >= cur->start) { |
if (address >= cur->start) { |
|
|
/* |
/* |
Line 1342 uvm_map_lookup_entry(struct vm_map *map, |
|
Line 1568 uvm_map_lookup_entry(struct vm_map *map, |
|
*/ |
*/ |
|
|
if (cur != &map->header && cur->end > address) { |
if (cur != &map->header && cur->end > address) { |
UVMCNT_INCR(uvm_mlk_hint); |
UVMMAP_EVCNT_INCR(mlk_hint); |
*entry = cur; |
*entry = cur; |
UVMHIST_LOG(maphist,"<- got it via hint (0x%x)", |
UVMHIST_LOG(maphist,"<- got it via hint (0x%x)", |
cur, 0, 0, 0); |
cur, 0, 0, 0); |
return (TRUE); |
uvm_mapent_check(*entry); |
|
return (true); |
} |
} |
|
|
if (map->nentries > 30) |
if (map->nentries > 30) |
use_tree = TRUE; |
use_tree = true; |
} else { |
} else { |
|
|
/* |
/* |
* invalid hint. use tree. |
* invalid hint. use tree. |
*/ |
*/ |
use_tree = TRUE; |
use_tree = true; |
} |
} |
|
|
uvm_tree_sanity(map, __func__); |
uvm_map_check(map, __func__); |
|
|
if (use_tree) { |
if (use_tree) { |
struct vm_map_entry *prev = &map->header; |
struct vm_map_entry *prev = &map->header; |
|
|
cur, 0, 0, 0); |
cur, 0, 0, 0); |
KDASSERT((*entry)->start <= address); |
KDASSERT((*entry)->start <= address); |
KDASSERT(address < (*entry)->end); |
KDASSERT(address < (*entry)->end); |
return (TRUE); |
uvm_mapent_check(*entry); |
|
return (true); |
} |
} |
break; |
break; |
} |
} |
|
|
KDASSERT((*entry) == &map->header || (*entry)->end <= address); |
KDASSERT((*entry) == &map->header || (*entry)->end <= address); |
KDASSERT((*entry)->next == &map->header || |
KDASSERT((*entry)->next == &map->header || |
address < (*entry)->next->start); |
address < (*entry)->next->start); |
return (FALSE); |
return (false); |
} |
} |
|
|
/* |
/* |
Line 1498 uvm_map_findspace(struct vm_map *map, va |
|
Line 1726 uvm_map_findspace(struct vm_map *map, va |
|
KASSERT((align & (align - 1)) == 0); |
KASSERT((align & (align - 1)) == 0); |
KASSERT((flags & UVM_FLAG_FIXED) == 0 || align == 0); |
KASSERT((flags & UVM_FLAG_FIXED) == 0 || align == 0); |
|
|
uvm_tree_sanity(map, "map_findspace entry"); |
uvm_map_check(map, "map_findspace entry"); |
|
|
/* |
/* |
* remember the original hint. if we are aligning, then we |
* remember the original hint. if we are aligning, then we |
Line 1834 uvm_unmap_remove(struct vm_map *map, vad |
|
Line 2062 uvm_unmap_remove(struct vm_map *map, vad |
|
map, start, end, 0); |
map, start, end, 0); |
VM_MAP_RANGE_CHECK(map, start, end); |
VM_MAP_RANGE_CHECK(map, start, end); |
|
|
uvm_tree_sanity(map, "unmap_remove entry"); |
uvm_map_check(map, "unmap_remove entry"); |
|
|
/* |
/* |
* find first entry |
* find first entry |
*/ |
*/ |
|
|
if (uvm_map_lookup_entry(map, start, &first_entry) == TRUE) { |
if (uvm_map_lookup_entry(map, start, &first_entry) == true) { |
/* clip and go... */ |
/* clip and go... */ |
entry = first_entry; |
entry = first_entry; |
UVM_MAP_CLIP_START(map, entry, start, umr); |
UVM_MAP_CLIP_START(map, entry, start, umr); |
Line 1854 uvm_unmap_remove(struct vm_map *map, vad |
|
Line 2082 uvm_unmap_remove(struct vm_map *map, vad |
|
* Save the free space hint |
* Save the free space hint |
*/ |
*/ |
|
|
if (map->first_free->start >= start) |
if (map->first_free != &map->header && map->first_free->start >= start) |
map->first_free = entry->prev; |
map->first_free = entry->prev; |
|
|
/* |
/* |
Line 2017 uvm_unmap_remove(struct vm_map *map, vad |
|
Line 2245 uvm_unmap_remove(struct vm_map *map, vad |
|
pmap_update(vm_map_pmap(map)); |
pmap_update(vm_map_pmap(map)); |
} |
} |
|
|
uvm_tree_sanity(map, "unmap_remove leave"); |
uvm_map_check(map, "unmap_remove leave"); |
|
|
/* |
/* |
* now we've cleaned up the map and are ready for the caller to drop |
* now we've cleaned up the map and are ready for the caller to drop |
Line 2027 uvm_unmap_remove(struct vm_map *map, vad |
|
Line 2255 uvm_unmap_remove(struct vm_map *map, vad |
|
*entry_list = first_entry; |
*entry_list = first_entry; |
UVMHIST_LOG(maphist,"<- done!", 0, 0, 0, 0); |
UVMHIST_LOG(maphist,"<- done!", 0, 0, 0, 0); |
|
|
simple_lock(&map->flags_lock); |
|
if (map->flags & VM_MAP_WANTVA) { |
if (map->flags & VM_MAP_WANTVA) { |
|
mutex_enter(&map->misc_lock); |
map->flags &= ~VM_MAP_WANTVA; |
map->flags &= ~VM_MAP_WANTVA; |
wakeup(&map->header); |
cv_broadcast(&map->cv); |
|
mutex_exit(&map->misc_lock); |
} |
} |
simple_unlock(&map->flags_lock); |
|
} |
} |
|
|
/* |
/* |
|
|
uvm_map_reserve(struct vm_map *map, vsize_t size, |
uvm_map_reserve(struct vm_map *map, vsize_t size, |
vaddr_t offset /* hint for pmap_prefer */, |
vaddr_t offset /* hint for pmap_prefer */, |
vsize_t align /* alignment hint */, |
vsize_t align /* alignment hint */, |
vaddr_t *raddr /* IN:hint, OUT: reserved VA */) |
vaddr_t *raddr /* IN:hint, OUT: reserved VA */, |
|
uvm_flag_t flags /* UVM_FLAG_FIXED or 0 */) |
{ |
{ |
UVMHIST_FUNC("uvm_map_reserve"); UVMHIST_CALLED(maphist); |
UVMHIST_FUNC("uvm_map_reserve"); UVMHIST_CALLED(maphist); |
|
|
Line 2105 uvm_map_reserve(struct vm_map *map, vsiz |
|
Line 2334 uvm_map_reserve(struct vm_map *map, vsiz |
|
map,size,offset,raddr); |
map,size,offset,raddr); |
|
|
size = round_page(size); |
size = round_page(size); |
if (*raddr < vm_map_min(map)) |
|
*raddr = vm_map_min(map); /* hint */ |
|
|
|
/* |
/* |
* reserve some virtual space. |
* reserve some virtual space. |
Line 2114 uvm_map_reserve(struct vm_map *map, vsiz |
|
Line 2341 uvm_map_reserve(struct vm_map *map, vsiz |
|
|
|
if (uvm_map(map, raddr, size, NULL, offset, 0, |
if (uvm_map(map, raddr, size, NULL, offset, 0, |
UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE, UVM_INH_NONE, |
UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE, UVM_INH_NONE, |
UVM_ADV_RANDOM, UVM_FLAG_NOMERGE)) != 0) { |
UVM_ADV_RANDOM, UVM_FLAG_NOMERGE|flags)) != 0) { |
UVMHIST_LOG(maphist, "<- done (no VM)", 0,0,0,0); |
UVMHIST_LOG(maphist, "<- done (no VM)", 0,0,0,0); |
return (FALSE); |
return (false); |
} |
} |
|
|
UVMHIST_LOG(maphist, "<- done (*raddr=0x%x)", *raddr,0,0,0); |
UVMHIST_LOG(maphist, "<- done (*raddr=0x%x)", *raddr,0,0,0); |
return (TRUE); |
return (true); |
} |
} |
|
|
/* |
/* |
Line 2128 uvm_map_reserve(struct vm_map *map, vsiz |
|
Line 2355 uvm_map_reserve(struct vm_map *map, vsiz |
|
* real mappings. |
* real mappings. |
* |
* |
* => caller must WRITE-LOCK the map |
* => caller must WRITE-LOCK the map |
* => we return TRUE if replacement was a success |
* => we return true if replacement was a success |
* => we expect the newents chain to have nnewents entrys on it and |
* => we expect the newents chain to have nnewents entrys on it and |
* we expect newents->prev to point to the last entry on the list |
* we expect newents->prev to point to the last entry on the list |
* => note newents is allowed to be NULL |
* => note newents is allowed to be NULL |
Line 2140 uvm_map_replace(struct vm_map *map, vadd |
|
Line 2367 uvm_map_replace(struct vm_map *map, vadd |
|
{ |
{ |
struct vm_map_entry *oldent, *last; |
struct vm_map_entry *oldent, *last; |
|
|
uvm_tree_sanity(map, "map_replace entry"); |
uvm_map_check(map, "map_replace entry"); |
|
|
/* |
/* |
* first find the blank map entry at the specified address |
* first find the blank map entry at the specified address |
*/ |
*/ |
|
|
if (!uvm_map_lookup_entry(map, start, &oldent)) { |
if (!uvm_map_lookup_entry(map, start, &oldent)) { |
return (FALSE); |
return (false); |
} |
} |
|
|
/* |
/* |
* check to make sure we have a proper blank entry |
* check to make sure we have a proper blank entry |
*/ |
*/ |
|
|
|
if (end < oldent->end && !VM_MAP_USE_KMAPENT(map)) { |
|
UVM_MAP_CLIP_END(map, oldent, end, NULL); |
|
} |
if (oldent->start != start || oldent->end != end || |
if (oldent->start != start || oldent->end != end || |
oldent->object.uvm_obj != NULL || oldent->aref.ar_amap != NULL) { |
oldent->object.uvm_obj != NULL || oldent->aref.ar_amap != NULL) { |
return (FALSE); |
return (false); |
} |
} |
|
|
#ifdef DIAGNOSTIC |
#ifdef DIAGNOSTIC |
Line 2229 uvm_map_replace(struct vm_map *map, vadd |
|
Line 2459 uvm_map_replace(struct vm_map *map, vadd |
|
} |
} |
} |
} |
} else { |
} else { |
|
|
/* critical: flush stale hints out of map */ |
|
SAVE_HINT(map, map->hint, oldent->prev); |
|
if (map->first_free == oldent) |
|
map->first_free = oldent->prev; |
|
|
|
/* NULL list of new entries: just remove the old one */ |
/* NULL list of new entries: just remove the old one */ |
|
clear_hints(map, oldent); |
uvm_map_entry_unlink(map, oldent); |
uvm_map_entry_unlink(map, oldent); |
} |
} |
|
|
uvm_tree_sanity(map, "map_replace leave"); |
uvm_map_check(map, "map_replace leave"); |
|
|
/* |
/* |
* now we can free the old blank entry, unlock the map and return. |
* now we can free the old blank entry and return. |
*/ |
*/ |
|
|
uvm_mapent_free(oldent); |
uvm_mapent_free(oldent); |
return (TRUE); |
return (true); |
} |
} |
|
|
/* |
/* |
Line 2282 uvm_map_extract(struct vm_map *srcmap, v |
|
Line 2507 uvm_map_extract(struct vm_map *srcmap, v |
|
len,0); |
len,0); |
UVMHIST_LOG(maphist," ...,dstmap=0x%x, flags=0x%x)", dstmap,flags,0,0); |
UVMHIST_LOG(maphist," ...,dstmap=0x%x, flags=0x%x)", dstmap,flags,0,0); |
|
|
uvm_tree_sanity(srcmap, "map_extract src enter"); |
uvm_map_check(srcmap, "map_extract src enter"); |
uvm_tree_sanity(dstmap, "map_extract dst enter"); |
uvm_map_check(dstmap, "map_extract dst enter"); |
|
|
/* |
/* |
* step 0: sanity check: start must be on a page boundary, length |
* step 0: sanity check: start must be on a page boundary, length |
Line 2299 uvm_map_extract(struct vm_map *srcmap, v |
|
Line 2524 uvm_map_extract(struct vm_map *srcmap, v |
|
* step 1: reserve space in the target map for the extracted area |
* step 1: reserve space in the target map for the extracted area |
*/ |
*/ |
|
|
dstaddr = vm_map_min(dstmap); |
if ((flags & UVM_EXTRACT_RESERVED) == 0) { |
if (uvm_map_reserve(dstmap, len, start, 0, &dstaddr) == FALSE) |
dstaddr = vm_map_min(dstmap); |
return (ENOMEM); |
if (!uvm_map_reserve(dstmap, len, start, 0, &dstaddr, 0)) |
*dstaddrp = dstaddr; /* pass address back to caller */ |
return (ENOMEM); |
UVMHIST_LOG(maphist, " dstaddr=0x%x", dstaddr,0,0,0); |
*dstaddrp = dstaddr; /* pass address back to caller */ |
|
UVMHIST_LOG(maphist, " dstaddr=0x%x", dstaddr,0,0,0); |
|
} else { |
|
dstaddr = *dstaddrp; |
|
} |
|
|
/* |
/* |
* step 2: setup for the extraction process loop by init'ing the |
* step 2: setup for the extraction process loop by init'ing the |
Line 2371 uvm_map_extract(struct vm_map *srcmap, v |
|
Line 2600 uvm_map_extract(struct vm_map *srcmap, v |
|
|
|
/* clear needs_copy (allow chunking) */ |
/* clear needs_copy (allow chunking) */ |
if (UVM_ET_ISNEEDSCOPY(entry)) { |
if (UVM_ET_ISNEEDSCOPY(entry)) { |
amap_copy(srcmap, entry, M_NOWAIT, TRUE, start, end); |
amap_copy(srcmap, entry, |
|
AMAP_COPY_NOWAIT|AMAP_COPY_NOMERGE, start, end); |
if (UVM_ET_ISNEEDSCOPY(entry)) { /* failed? */ |
if (UVM_ET_ISNEEDSCOPY(entry)) { /* failed? */ |
error = ENOMEM; |
error = ENOMEM; |
goto bad; |
goto bad; |
Line 2462 uvm_map_extract(struct vm_map *srcmap, v |
|
Line 2692 uvm_map_extract(struct vm_map *srcmap, v |
|
* 0 => dstmap unlocked, NO pmap_copy, and we will "replace" in step 7 |
* 0 => dstmap unlocked, NO pmap_copy, and we will "replace" in step 7 |
*/ |
*/ |
|
|
if (srcmap == dstmap || vm_map_lock_try(dstmap) == TRUE) { |
if (srcmap == dstmap || vm_map_lock_try(dstmap) == true) { |
copy_ok = 1; |
copy_ok = 1; |
if (!uvm_map_replace(dstmap, dstaddr, dstaddr+len, chain, |
if (!uvm_map_replace(dstmap, dstaddr, dstaddr+len, chain, |
nchain)) { |
nchain)) { |
Line 2488 uvm_map_extract(struct vm_map *srcmap, v |
|
Line 2718 uvm_map_extract(struct vm_map *srcmap, v |
|
/* purge possible stale hints from srcmap */ |
/* purge possible stale hints from srcmap */ |
if (flags & UVM_EXTRACT_REMOVE) { |
if (flags & UVM_EXTRACT_REMOVE) { |
SAVE_HINT(srcmap, srcmap->hint, orig_entry->prev); |
SAVE_HINT(srcmap, srcmap->hint, orig_entry->prev); |
if (srcmap->first_free->start >= start) |
if (srcmap->first_free != &srcmap->header && |
|
srcmap->first_free->start >= start) |
srcmap->first_free = orig_entry->prev; |
srcmap->first_free = orig_entry->prev; |
} |
} |
|
|
Line 2554 uvm_map_extract(struct vm_map *srcmap, v |
|
Line 2785 uvm_map_extract(struct vm_map *srcmap, v |
|
nchain); |
nchain); |
vm_map_unlock(dstmap); |
vm_map_unlock(dstmap); |
|
|
if (error == FALSE) { |
if (error == false) { |
error = EIO; |
error = EIO; |
goto bad2; |
goto bad2; |
} |
} |
} |
} |
|
|
uvm_tree_sanity(srcmap, "map_extract src leave"); |
uvm_map_check(srcmap, "map_extract src leave"); |
uvm_tree_sanity(dstmap, "map_extract dst leave"); |
uvm_map_check(dstmap, "map_extract dst leave"); |
|
|
return (0); |
return (0); |
|
|
Line 2575 bad2: /* src already unlocked */ |
|
Line 2806 bad2: /* src already unlocked */ |
|
uvm_unmap_detach(chain, |
uvm_unmap_detach(chain, |
(flags & UVM_EXTRACT_QREF) ? AMAP_REFALL : 0); |
(flags & UVM_EXTRACT_QREF) ? AMAP_REFALL : 0); |
|
|
uvm_tree_sanity(srcmap, "map_extract src err leave"); |
uvm_map_check(srcmap, "map_extract src err leave"); |
uvm_tree_sanity(dstmap, "map_extract dst err leave"); |
uvm_map_check(dstmap, "map_extract dst err leave"); |
|
|
uvm_unmap(dstmap, dstaddr, dstaddr+len); /* ??? */ |
if ((flags & UVM_EXTRACT_RESERVED) == 0) { |
|
uvm_unmap(dstmap, dstaddr, dstaddr+len); /* ??? */ |
|
} |
return (error); |
return (error); |
} |
} |
|
|
Line 2652 uvm_map_setup_kernel(struct vm_map_kerne |
|
Line 2885 uvm_map_setup_kernel(struct vm_map_kerne |
|
{ |
{ |
|
|
uvm_map_setup(&map->vmk_map, vmin, vmax, flags); |
uvm_map_setup(&map->vmk_map, vmin, vmax, flags); |
|
callback_head_init(&map->vmk_reclaim_callback, IPL_VM); |
LIST_INIT(&map->vmk_kentry_free); |
LIST_INIT(&map->vmk_kentry_free); |
map->vmk_merged_entries = NULL; |
map->vmk_merged_entries = NULL; |
} |
} |
Line 2670 uvm_map_setup_kernel(struct vm_map_kerne |
|
Line 2903 uvm_map_setup_kernel(struct vm_map_kerne |
|
|
|
int |
int |
uvm_map_protect(struct vm_map *map, vaddr_t start, vaddr_t end, |
uvm_map_protect(struct vm_map *map, vaddr_t start, vaddr_t end, |
vm_prot_t new_prot, boolean_t set_max) |
vm_prot_t new_prot, bool set_max) |
{ |
{ |
struct vm_map_entry *current, *entry; |
struct vm_map_entry *current, *entry; |
int error = 0; |
int error = 0; |
Line 2716 uvm_map_protect(struct vm_map *map, vadd |
|
Line 2949 uvm_map_protect(struct vm_map *map, vadd |
|
goto out; |
goto out; |
} |
} |
} |
} |
|
|
current = current->next; |
current = current->next; |
} |
} |
|
|
Line 2769 uvm_map_protect(struct vm_map *map, vadd |
|
Line 3003 uvm_map_protect(struct vm_map *map, vadd |
|
old_prot == VM_PROT_NONE && |
old_prot == VM_PROT_NONE && |
new_prot != VM_PROT_NONE) { |
new_prot != VM_PROT_NONE) { |
if (uvm_map_pageable(map, entry->start, |
if (uvm_map_pageable(map, entry->start, |
entry->end, FALSE, |
entry->end, false, |
UVM_LK_ENTER|UVM_LK_EXIT) != 0) { |
UVM_LK_ENTER|UVM_LK_EXIT) != 0) { |
|
|
/* |
/* |
Line 2903 uvm_map_advice(struct vm_map *map, vaddr |
|
Line 3137 uvm_map_advice(struct vm_map *map, vaddr |
|
* |
* |
* => wires map entries. should not be used for transient page locking. |
* => wires map entries. should not be used for transient page locking. |
* for that, use uvm_fault_wire()/uvm_fault_unwire() (see uvm_vslock()). |
* for that, use uvm_fault_wire()/uvm_fault_unwire() (see uvm_vslock()). |
* => regions sepcified as not pageable require lock-down (wired) memory |
* => regions specified as not pageable require lock-down (wired) memory |
* and page tables. |
* and page tables. |
* => map must never be read-locked |
* => map must never be read-locked |
* => if islocked is TRUE, map is already write-locked |
* => if islocked is true, map is already write-locked |
* => we always unlock the map, since we must downgrade to a read-lock |
* => we always unlock the map, since we must downgrade to a read-lock |
* to call uvm_fault_wire() |
* to call uvm_fault_wire() |
* => XXXCDC: check this and try and clean it up. |
* => XXXCDC: check this and try and clean it up. |
Line 2914 uvm_map_advice(struct vm_map *map, vaddr |
|
Line 3148 uvm_map_advice(struct vm_map *map, vaddr |
|
|
|
int |
int |
uvm_map_pageable(struct vm_map *map, vaddr_t start, vaddr_t end, |
uvm_map_pageable(struct vm_map *map, vaddr_t start, vaddr_t end, |
boolean_t new_pageable, int lockflags) |
bool new_pageable, int lockflags) |
{ |
{ |
struct vm_map_entry *entry, *start_entry, *failed_entry; |
struct vm_map_entry *entry, *start_entry, *failed_entry; |
int rv; |
int rv; |
Line 2938 uvm_map_pageable(struct vm_map *map, vad |
|
Line 3172 uvm_map_pageable(struct vm_map *map, vad |
|
* making any changes. |
* making any changes. |
*/ |
*/ |
|
|
if (uvm_map_lookup_entry(map, start, &start_entry) == FALSE) { |
if (uvm_map_lookup_entry(map, start, &start_entry) == false) { |
if ((lockflags & UVM_LK_EXIT) == 0) |
if ((lockflags & UVM_LK_EXIT) == 0) |
vm_map_unlock(map); |
vm_map_unlock(map); |
|
|
Line 3027 uvm_map_pageable(struct vm_map *map, vad |
|
Line 3261 uvm_map_pageable(struct vm_map *map, vad |
|
if (UVM_ET_ISNEEDSCOPY(entry) && |
if (UVM_ET_ISNEEDSCOPY(entry) && |
((entry->max_protection & VM_PROT_WRITE) || |
((entry->max_protection & VM_PROT_WRITE) || |
(entry->object.uvm_obj == NULL))) { |
(entry->object.uvm_obj == NULL))) { |
amap_copy(map, entry, M_WAITOK, TRUE, |
amap_copy(map, entry, 0, start, end); |
start, end); |
|
/* XXXCDC: wait OK? */ |
/* XXXCDC: wait OK? */ |
} |
} |
} |
} |
Line 3078 uvm_map_pageable(struct vm_map *map, vad |
|
Line 3311 uvm_map_pageable(struct vm_map *map, vad |
|
while (entry != &map->header && entry->start < end) { |
while (entry != &map->header && entry->start < end) { |
if (entry->wired_count == 1) { |
if (entry->wired_count == 1) { |
rv = uvm_fault_wire(map, entry->start, entry->end, |
rv = uvm_fault_wire(map, entry->start, entry->end, |
VM_FAULT_WIREMAX, entry->max_protection); |
entry->max_protection, 1); |
if (rv) { |
if (rv) { |
|
|
/* |
/* |
Line 3195 uvm_map_pageable_all(struct vm_map *map, |
|
Line 3428 uvm_map_pageable_all(struct vm_map *map, |
|
if (VM_MAPENT_ISWIRED(entry)) |
if (VM_MAPENT_ISWIRED(entry)) |
uvm_map_entry_unwire(map, entry); |
uvm_map_entry_unwire(map, entry); |
} |
} |
vm_map_modflags(map, 0, VM_MAP_WIREFUTURE); |
map->flags &= ~VM_MAP_WIREFUTURE; |
vm_map_unlock(map); |
vm_map_unlock(map); |
UVMHIST_LOG(maphist,"<- done (OK UNWIRE)",0,0,0,0); |
UVMHIST_LOG(maphist,"<- done (OK UNWIRE)",0,0,0,0); |
return 0; |
return 0; |
Line 3207 uvm_map_pageable_all(struct vm_map *map, |
|
Line 3440 uvm_map_pageable_all(struct vm_map *map, |
|
* must wire all future mappings; remember this. |
* must wire all future mappings; remember this. |
*/ |
*/ |
|
|
vm_map_modflags(map, VM_MAP_WIREFUTURE, 0); |
map->flags |= VM_MAP_WIREFUTURE; |
} |
} |
|
|
if ((flags & MCL_CURRENT) == 0) { |
if ((flags & MCL_CURRENT) == 0) { |
Line 3285 uvm_map_pageable_all(struct vm_map *map, |
|
Line 3518 uvm_map_pageable_all(struct vm_map *map, |
|
if (UVM_ET_ISNEEDSCOPY(entry) && |
if (UVM_ET_ISNEEDSCOPY(entry) && |
((entry->max_protection & VM_PROT_WRITE) || |
((entry->max_protection & VM_PROT_WRITE) || |
(entry->object.uvm_obj == NULL))) { |
(entry->object.uvm_obj == NULL))) { |
amap_copy(map, entry, M_WAITOK, TRUE, |
amap_copy(map, entry, 0, entry->start, |
entry->start, entry->end); |
entry->end); |
/* XXXCDC: wait OK? */ |
/* XXXCDC: wait OK? */ |
} |
} |
} |
} |
Line 3309 uvm_map_pageable_all(struct vm_map *map, |
|
Line 3542 uvm_map_pageable_all(struct vm_map *map, |
|
entry = entry->next) { |
entry = entry->next) { |
if (entry->wired_count == 1) { |
if (entry->wired_count == 1) { |
rv = uvm_fault_wire(map, entry->start, entry->end, |
rv = uvm_fault_wire(map, entry->start, entry->end, |
VM_FAULT_WIREMAX, entry->max_protection); |
entry->max_protection, 1); |
if (rv) { |
if (rv) { |
|
|
/* |
/* |
Line 3417 uvm_map_clean(struct vm_map *map, vaddr_ |
|
Line 3650 uvm_map_clean(struct vm_map *map, vaddr_ |
|
|
|
vm_map_lock_read(map); |
vm_map_lock_read(map); |
VM_MAP_RANGE_CHECK(map, start, end); |
VM_MAP_RANGE_CHECK(map, start, end); |
if (uvm_map_lookup_entry(map, start, &entry) == FALSE) { |
if (uvm_map_lookup_entry(map, start, &entry) == false) { |
vm_map_unlock_read(map); |
vm_map_unlock_read(map); |
return EFAULT; |
return EFAULT; |
} |
} |
Line 3563 uvm_map_clean(struct vm_map *map, vaddr_ |
|
Line 3796 uvm_map_clean(struct vm_map *map, vaddr_ |
|
* => map must be read or write locked by caller. |
* => map must be read or write locked by caller. |
*/ |
*/ |
|
|
boolean_t |
bool |
uvm_map_checkprot(struct vm_map *map, vaddr_t start, vaddr_t end, |
uvm_map_checkprot(struct vm_map *map, vaddr_t start, vaddr_t end, |
vm_prot_t protection) |
vm_prot_t protection) |
{ |
{ |
Line 3571 uvm_map_checkprot(struct vm_map *map, va |
|
Line 3804 uvm_map_checkprot(struct vm_map *map, va |
|
struct vm_map_entry *tmp_entry; |
struct vm_map_entry *tmp_entry; |
|
|
if (!uvm_map_lookup_entry(map, start, &tmp_entry)) { |
if (!uvm_map_lookup_entry(map, start, &tmp_entry)) { |
return (FALSE); |
return (false); |
} |
} |
entry = tmp_entry; |
entry = tmp_entry; |
while (start < end) { |
while (start < end) { |
if (entry == &map->header) { |
if (entry == &map->header) { |
return (FALSE); |
return (false); |
} |
} |
|
|
/* |
/* |
Line 3584 uvm_map_checkprot(struct vm_map *map, va |
|
Line 3817 uvm_map_checkprot(struct vm_map *map, va |
|
*/ |
*/ |
|
|
if (start < entry->start) { |
if (start < entry->start) { |
return (FALSE); |
return (false); |
} |
} |
|
|
/* |
/* |
Line 3592 uvm_map_checkprot(struct vm_map *map, va |
|
Line 3825 uvm_map_checkprot(struct vm_map *map, va |
|
*/ |
*/ |
|
|
if ((entry->protection & protection) != protection) { |
if ((entry->protection & protection) != protection) { |
return (FALSE); |
return (false); |
} |
} |
start = entry->end; |
start = entry->end; |
entry = entry->next; |
entry = entry->next; |
} |
} |
return (TRUE); |
return (true); |
} |
} |
|
|
/* |
/* |
Line 3654 uvmspace_init(struct vmspace *vm, struct |
|
Line 3887 uvmspace_init(struct vmspace *vm, struct |
|
void |
void |
uvmspace_share(struct proc *p1, struct proc *p2) |
uvmspace_share(struct proc *p1, struct proc *p2) |
{ |
{ |
struct simplelock *slock = &p1->p_vmspace->vm_map.ref_lock; |
|
|
|
|
uvmspace_addref(p1->p_vmspace); |
p2->p_vmspace = p1->p_vmspace; |
p2->p_vmspace = p1->p_vmspace; |
simple_lock(slock); |
|
p1->p_vmspace->vm_refcnt++; |
|
simple_unlock(slock); |
|
} |
} |
|
|
/* |
/* |
Line 3729 uvmspace_exec(struct lwp *l, vaddr_t sta |
|
Line 3959 uvmspace_exec(struct lwp *l, vaddr_t sta |
|
* when a process execs another program image. |
* when a process execs another program image. |
*/ |
*/ |
|
|
vm_map_modflags(map, 0, VM_MAP_WIREFUTURE); |
map->flags &= ~VM_MAP_WIREFUTURE; |
|
|
/* |
/* |
* now unmap the old program |
* now unmap the old program |
Line 3769 uvmspace_exec(struct lwp *l, vaddr_t sta |
|
Line 3999 uvmspace_exec(struct lwp *l, vaddr_t sta |
|
} |
} |
|
|
/* |
/* |
|
* uvmspace_addref: add a referece to a vmspace. |
|
*/ |
|
|
|
void |
|
uvmspace_addref(struct vmspace *vm) |
|
{ |
|
struct vm_map *map = &vm->vm_map; |
|
|
|
KASSERT((map->flags & VM_MAP_DYING) == 0); |
|
|
|
mutex_enter(&map->misc_lock); |
|
KASSERT(vm->vm_refcnt > 0); |
|
vm->vm_refcnt++; |
|
mutex_exit(&map->misc_lock); |
|
} |
|
|
|
/* |
* uvmspace_free: free a vmspace data structure |
* uvmspace_free: free a vmspace data structure |
*/ |
*/ |
|
|
Line 3782 uvmspace_free(struct vmspace *vm) |
|
Line 4029 uvmspace_free(struct vmspace *vm) |
|
UVMHIST_FUNC("uvmspace_free"); UVMHIST_CALLED(maphist); |
UVMHIST_FUNC("uvmspace_free"); UVMHIST_CALLED(maphist); |
|
|
UVMHIST_LOG(maphist,"(vm=0x%x) ref=%d", vm, vm->vm_refcnt,0,0); |
UVMHIST_LOG(maphist,"(vm=0x%x) ref=%d", vm, vm->vm_refcnt,0,0); |
simple_lock(&map->ref_lock); |
mutex_enter(&map->misc_lock); |
n = --vm->vm_refcnt; |
n = --vm->vm_refcnt; |
simple_unlock(&map->ref_lock); |
mutex_exit(&map->misc_lock); |
if (n > 0) |
if (n > 0) |
return; |
return; |
|
|
Line 3808 uvmspace_free(struct vmspace *vm) |
|
Line 4055 uvmspace_free(struct vmspace *vm) |
|
} |
} |
KASSERT(map->nentries == 0); |
KASSERT(map->nentries == 0); |
KASSERT(map->size == 0); |
KASSERT(map->size == 0); |
|
mutex_destroy(&map->misc_lock); |
|
mutex_destroy(&map->hint_lock); |
|
mutex_destroy(&map->mutex); |
|
rw_destroy(&map->lock); |
pmap_destroy(map->pmap); |
pmap_destroy(map->pmap); |
pool_put(&uvm_vmspace_pool, vm); |
pool_put(&uvm_vmspace_pool, vm); |
} |
} |
Line 3836 uvmspace_fork(struct vmspace *vm1) |
|
Line 4087 uvmspace_fork(struct vmspace *vm1) |
|
|
|
vm2 = uvmspace_alloc(vm_map_min(old_map), vm_map_max(old_map)); |
vm2 = uvmspace_alloc(vm_map_min(old_map), vm_map_max(old_map)); |
memcpy(&vm2->vm_startcopy, &vm1->vm_startcopy, |
memcpy(&vm2->vm_startcopy, &vm1->vm_startcopy, |
(caddr_t) (vm1 + 1) - (caddr_t) &vm1->vm_startcopy); |
(char *) (vm1 + 1) - (char *) &vm1->vm_startcopy); |
new_map = &vm2->vm_map; /* XXX */ |
new_map = &vm2->vm_map; /* XXX */ |
|
|
old_entry = old_map->header.next; |
old_entry = old_map->header.next; |
Line 3880 uvmspace_fork(struct vmspace *vm1) |
|
Line 4131 uvmspace_fork(struct vmspace *vm1) |
|
|
|
if (UVM_ET_ISNEEDSCOPY(old_entry)) { |
if (UVM_ET_ISNEEDSCOPY(old_entry)) { |
/* get our own amap, clears needs_copy */ |
/* get our own amap, clears needs_copy */ |
amap_copy(old_map, old_entry, M_WAITOK, FALSE, |
amap_copy(old_map, old_entry, AMAP_COPY_NOCHUNK, |
0, 0); |
0, 0); |
/* XXXCDC: WAITOK??? */ |
/* XXXCDC: WAITOK??? */ |
} |
} |
Line 3979 uvmspace_fork(struct vmspace *vm1) |
|
Line 4230 uvmspace_fork(struct vmspace *vm1) |
|
AMAP_SHARED) != 0 || |
AMAP_SHARED) != 0 || |
VM_MAPENT_ISWIRED(old_entry)) { |
VM_MAPENT_ISWIRED(old_entry)) { |
|
|
amap_copy(new_map, new_entry, M_WAITOK, |
amap_copy(new_map, new_entry, |
FALSE, 0, 0); |
AMAP_COPY_NOCHUNK, 0, 0); |
/* XXXCDC: M_WAITOK ... ok? */ |
/* XXXCDC: M_WAITOK ... ok? */ |
} |
} |
} |
} |
Line 4055 uvmspace_fork(struct vmspace *vm1) |
|
Line 4306 uvmspace_fork(struct vmspace *vm1) |
|
* in-kernel map entry allocation. |
* in-kernel map entry allocation. |
*/ |
*/ |
|
|
int ukh_alloc, ukh_free; |
|
int uke_alloc, uke_free; |
|
|
|
struct uvm_kmapent_hdr { |
struct uvm_kmapent_hdr { |
LIST_ENTRY(uvm_kmapent_hdr) ukh_listq; |
LIST_ENTRY(uvm_kmapent_hdr) ukh_listq; |
int ukh_nused; |
int ukh_nused; |
Line 4085 uvm_kmapent_map(struct vm_map_entry *ent |
|
Line 4333 uvm_kmapent_map(struct vm_map_entry *ent |
|
} |
} |
#endif |
#endif |
|
|
static __inline struct vm_map_entry * |
static inline struct vm_map_entry * |
uvm_kmapent_get(struct uvm_kmapent_hdr *ukh) |
uvm_kmapent_get(struct uvm_kmapent_hdr *ukh) |
{ |
{ |
struct vm_map_entry *entry; |
struct vm_map_entry *entry; |
Line 4107 uvm_kmapent_get(struct uvm_kmapent_hdr * |
|
Line 4355 uvm_kmapent_get(struct uvm_kmapent_hdr * |
|
return entry; |
return entry; |
} |
} |
|
|
static __inline void |
static inline void |
uvm_kmapent_put(struct uvm_kmapent_hdr *ukh, struct vm_map_entry *entry) |
uvm_kmapent_put(struct uvm_kmapent_hdr *ukh, struct vm_map_entry *entry) |
{ |
{ |
|
|
Line 4141 uvm_kmapent_alloc(struct vm_map *map, in |
|
Line 4389 uvm_kmapent_alloc(struct vm_map *map, in |
|
vaddr_t va; |
vaddr_t va; |
int error; |
int error; |
int i; |
int i; |
int s; |
|
|
|
KDASSERT(UVM_KMAPENT_CHUNK > 2); |
KDASSERT(UVM_KMAPENT_CHUNK > 2); |
KDASSERT(kernel_map != NULL); |
KDASSERT(kernel_map != NULL); |
KASSERT(vm_map_pmap(map) == pmap_kernel()); |
KASSERT(vm_map_pmap(map) == pmap_kernel()); |
|
|
uke_alloc++; |
UVMMAP_EVCNT_INCR(uke_alloc); |
entry = NULL; |
entry = NULL; |
again: |
again: |
/* |
/* |
* try to grab an entry from freelist. |
* try to grab an entry from freelist. |
*/ |
*/ |
s = splvm(); |
mutex_spin_enter(&uvm_kentry_lock); |
simple_lock(&uvm.kentry_lock); |
|
ukh = LIST_FIRST(&vm_map_to_kernel(map)->vmk_kentry_free); |
ukh = LIST_FIRST(&vm_map_to_kernel(map)->vmk_kentry_free); |
if (ukh) { |
if (ukh) { |
entry = uvm_kmapent_get(ukh); |
entry = uvm_kmapent_get(ukh); |
if (ukh->ukh_nused == UVM_KMAPENT_CHUNK) |
if (ukh->ukh_nused == UVM_KMAPENT_CHUNK) |
LIST_REMOVE(ukh, ukh_listq); |
LIST_REMOVE(ukh, ukh_listq); |
} |
} |
simple_unlock(&uvm.kentry_lock); |
mutex_spin_exit(&uvm_kentry_lock); |
splx(s); |
|
|
|
if (entry) |
if (entry) |
return entry; |
return entry; |
|
|
} |
} |
KASSERT(ukh->ukh_nused == 2); |
KASSERT(ukh->ukh_nused == 2); |
|
|
s = splvm(); |
mutex_spin_enter(&uvm_kentry_lock); |
simple_lock(&uvm.kentry_lock); |
|
LIST_INSERT_HEAD(&vm_map_to_kernel(map)->vmk_kentry_free, |
LIST_INSERT_HEAD(&vm_map_to_kernel(map)->vmk_kentry_free, |
ukh, ukh_listq); |
ukh, ukh_listq); |
simple_unlock(&uvm.kentry_lock); |
mutex_spin_exit(&uvm_kentry_lock); |
splx(s); |
|
|
|
/* |
/* |
* return second entry. |
* return second entry. |
|
|
|
|
entry = &ukh->ukh_entries[1]; |
entry = &ukh->ukh_entries[1]; |
entry->flags = UVM_MAP_KERNEL; |
entry->flags = UVM_MAP_KERNEL; |
ukh_alloc++; |
UVMMAP_EVCNT_INCR(ukh_alloc); |
return entry; |
return entry; |
} |
} |
|
|
Line 4245 uvm_kmapent_free(struct vm_map_entry *en |
|
Line 4488 uvm_kmapent_free(struct vm_map_entry *en |
|
vaddr_t va; |
vaddr_t va; |
paddr_t pa; |
paddr_t pa; |
struct vm_map_entry *deadentry; |
struct vm_map_entry *deadentry; |
int s; |
|
|
|
uke_free++; |
UVMMAP_EVCNT_INCR(uke_free); |
ukh = UVM_KHDR_FIND(entry); |
ukh = UVM_KHDR_FIND(entry); |
map = ukh->ukh_map; |
map = ukh->ukh_map; |
|
|
s = splvm(); |
mutex_spin_enter(&uvm_kentry_lock); |
simple_lock(&uvm.kentry_lock); |
|
uvm_kmapent_put(ukh, entry); |
uvm_kmapent_put(ukh, entry); |
if (ukh->ukh_nused > 1) { |
if (ukh->ukh_nused > 1) { |
if (ukh->ukh_nused == UVM_KMAPENT_CHUNK - 1) |
if (ukh->ukh_nused == UVM_KMAPENT_CHUNK - 1) |
LIST_INSERT_HEAD( |
LIST_INSERT_HEAD( |
&vm_map_to_kernel(map)->vmk_kentry_free, |
&vm_map_to_kernel(map)->vmk_kentry_free, |
ukh, ukh_listq); |
ukh, ukh_listq); |
simple_unlock(&uvm.kentry_lock); |
mutex_spin_exit(&uvm_kentry_lock); |
splx(s); |
|
return; |
return; |
} |
} |
|
|
Line 4272 uvm_kmapent_free(struct vm_map_entry *en |
|
Line 4512 uvm_kmapent_free(struct vm_map_entry *en |
|
|
|
if (LIST_FIRST(&vm_map_to_kernel(map)->vmk_kentry_free) == ukh && |
if (LIST_FIRST(&vm_map_to_kernel(map)->vmk_kentry_free) == ukh && |
LIST_NEXT(ukh, ukh_listq) == NULL) { |
LIST_NEXT(ukh, ukh_listq) == NULL) { |
simple_unlock(&uvm.kentry_lock); |
mutex_spin_exit(&uvm_kentry_lock); |
splx(s); |
|
return; |
return; |
} |
} |
LIST_REMOVE(ukh, ukh_listq); |
LIST_REMOVE(ukh, ukh_listq); |
simple_unlock(&uvm.kentry_lock); |
mutex_spin_exit(&uvm_kentry_lock); |
splx(s); |
|
|
|
KASSERT(ukh->ukh_nused == 1); |
KASSERT(ukh->ukh_nused == 1); |
|
|
Line 4307 uvm_kmapent_free(struct vm_map_entry *en |
|
Line 4545 uvm_kmapent_free(struct vm_map_entry *en |
|
vm_map_unlock(map); |
vm_map_unlock(map); |
pg = PHYS_TO_VM_PAGE(pa); |
pg = PHYS_TO_VM_PAGE(pa); |
uvm_pagefree(pg); |
uvm_pagefree(pg); |
ukh_free++; |
UVMMAP_EVCNT_INCR(ukh_free); |
|
} |
|
|
|
static vsize_t |
|
uvm_kmapent_overhead(vsize_t size) |
|
{ |
|
|
|
/* |
|
* - the max number of unmerged entries is howmany(size, PAGE_SIZE) |
|
* as the min allocation unit is PAGE_SIZE. |
|
* - UVM_KMAPENT_CHUNK "kmapent"s are allocated from a page. |
|
* one of them are used to map the page itself. |
|
*/ |
|
|
|
return howmany(howmany(size, PAGE_SIZE), (UVM_KMAPENT_CHUNK - 1)) * |
|
PAGE_SIZE; |
} |
} |
|
|
/* |
/* |
Line 4375 uvm_mapent_trymerge(struct vm_map *map, |
|
Line 4628 uvm_mapent_trymerge(struct vm_map *map, |
|
struct vm_map_entry *prev; |
struct vm_map_entry *prev; |
vsize_t size; |
vsize_t size; |
int merged = 0; |
int merged = 0; |
boolean_t copying; |
bool copying; |
int newetype; |
int newetype; |
|
|
if (VM_MAP_USE_KMAPENT(map)) { |
if (VM_MAP_USE_KMAPENT(map)) { |
Line 4420 uvm_mapent_trymerge(struct vm_map *map, |
|
Line 4673 uvm_mapent_trymerge(struct vm_map *map, |
|
} |
} |
|
|
entry->end = next->end; |
entry->end = next->end; |
|
clear_hints(map, next); |
uvm_map_entry_unlink(map, next); |
uvm_map_entry_unlink(map, next); |
if (copying) { |
if (copying) { |
entry->aref = next->aref; |
entry->aref = next->aref; |
entry->etype &= ~UVM_ET_NEEDSCOPY; |
entry->etype &= ~UVM_ET_NEEDSCOPY; |
} |
} |
uvm_tree_sanity(map, "trymerge forwardmerge"); |
uvm_map_check(map, "trymerge forwardmerge"); |
uvm_mapent_free_merged(map, next); |
uvm_mapent_free_merged(map, next); |
merged++; |
merged++; |
} |
} |
Line 4460 uvm_mapent_trymerge(struct vm_map *map, |
|
Line 4714 uvm_mapent_trymerge(struct vm_map *map, |
|
} |
} |
|
|
entry->start = prev->start; |
entry->start = prev->start; |
|
clear_hints(map, prev); |
uvm_map_entry_unlink(map, prev); |
uvm_map_entry_unlink(map, prev); |
if (copying) { |
if (copying) { |
entry->aref = prev->aref; |
entry->aref = prev->aref; |
entry->etype &= ~UVM_ET_NEEDSCOPY; |
entry->etype &= ~UVM_ET_NEEDSCOPY; |
} |
} |
uvm_tree_sanity(map, "trymerge backmerge"); |
uvm_map_check(map, "trymerge backmerge"); |
uvm_mapent_free_merged(map, prev); |
uvm_mapent_free_merged(map, prev); |
merged++; |
merged++; |
} |
} |
Line 4485 uvm_mapent_trymerge(struct vm_map *map, |
|
Line 4740 uvm_mapent_trymerge(struct vm_map *map, |
|
*/ |
*/ |
|
|
void |
void |
uvm_map_printit(struct vm_map *map, boolean_t full, |
uvm_map_printit(struct vm_map *map, bool full, |
void (*pr)(const char *, ...)) |
void (*pr)(const char *, ...)) |
{ |
{ |
struct vm_map_entry *entry; |
struct vm_map_entry *entry; |
Line 4521 uvm_map_printit(struct vm_map *map, bool |
|
Line 4776 uvm_map_printit(struct vm_map *map, bool |
|
*/ |
*/ |
|
|
void |
void |
uvm_object_printit(struct uvm_object *uobj, boolean_t full, |
uvm_object_printit(struct uvm_object *uobj, bool full, |
void (*pr)(const char *, ...)) |
void (*pr)(const char *, ...)) |
{ |
{ |
struct vm_page *pg; |
struct vm_page *pg; |
Line 4554 uvm_object_printit(struct uvm_object *uo |
|
Line 4809 uvm_object_printit(struct uvm_object *uo |
|
* uvm_page_printit: actually print the page |
* uvm_page_printit: actually print the page |
*/ |
*/ |
|
|
static const char page_flagbits[] = |
static const char page_flagbits[] = UVM_PGFLAGBITS; |
"\20\1BUSY\2WANTED\3TABLED\4CLEAN\5PAGEOUT\6RELEASED\7FAKE\10RDONLY" |
static const char page_pqflagbits[] = UVM_PQFLAGBITS; |
"\11ZERO\15PAGER1"; |
|
static const char page_pqflagbits[] = |
|
"\20\1FREE\2INACTIVE\3ACTIVE\5ANON\6AOBJ"; |
|
|
|
void |
void |
uvm_page_printit(struct vm_page *pg, boolean_t full, |
uvm_page_printit(struct vm_page *pg, bool full, |
void (*pr)(const char *, ...)) |
void (*pr)(const char *, ...)) |
{ |
{ |
struct vm_page *tpg; |
struct vm_page *tpg; |
Line 4621 uvm_page_printit(struct vm_page *pg, boo |
|
Line 4873 uvm_page_printit(struct vm_page *pg, boo |
|
int color = VM_PGCOLOR_BUCKET(pg); |
int color = VM_PGCOLOR_BUCKET(pg); |
pgl = &uvm.page_free[fl].pgfl_buckets[color].pgfl_queues[ |
pgl = &uvm.page_free[fl].pgfl_buckets[color].pgfl_queues[ |
((pg)->flags & PG_ZERO) ? PGFL_ZEROS : PGFL_UNKNOWN]; |
((pg)->flags & PG_ZERO) ? PGFL_ZEROS : PGFL_UNKNOWN]; |
} else if (pg->pqflags & PQ_INACTIVE) { |
|
pgl = &uvm.page_inactive; |
|
} else if (pg->pqflags & PQ_ACTIVE) { |
|
pgl = &uvm.page_active; |
|
} else { |
} else { |
pgl = NULL; |
pgl = NULL; |
} |
} |
Line 4642 uvm_page_printit(struct vm_page *pg, boo |
|
Line 4890 uvm_page_printit(struct vm_page *pg, boo |
|
(*pr)(" >>> PAGE NOT FOUND ON PAGEQ LIST! <<<\n"); |
(*pr)(" >>> PAGE NOT FOUND ON PAGEQ LIST! <<<\n"); |
} |
} |
} |
} |
|
|
|
/* |
|
* uvm_pages_printthem - print a summary of all managed pages |
|
*/ |
|
|
|
void |
|
uvm_page_printall(void (*pr)(const char *, ...)) |
|
{ |
|
unsigned i; |
|
struct vm_page *pg; |
|
|
|
(*pr)("%18s %4s %4s %18s %18s" |
|
#ifdef UVM_PAGE_TRKOWN |
|
" OWNER" |
|
#endif |
|
"\n", "PAGE", "FLAG", "PQ", "UOBJECT", "UANON"); |
|
for (i = 0; i < vm_nphysseg; i++) { |
|
for (pg = vm_physmem[i].pgs; pg <= vm_physmem[i].lastpg; pg++) { |
|
(*pr)("%18p %04x %04x %18p %18p", |
|
pg, pg->flags, pg->pqflags, pg->uobject, |
|
pg->uanon); |
|
#ifdef UVM_PAGE_TRKOWN |
|
if (pg->flags & PG_BUSY) |
|
(*pr)(" %d [%s]", pg->owner, pg->owner_tag); |
|
#endif |
|
(*pr)("\n"); |
|
} |
|
} |
|
} |
|
|
#endif |
#endif |
|
|
|
/* |
|
* uvm_map_create: create map |
|
*/ |
|
|
|
struct vm_map * |
|
uvm_map_create(pmap_t pmap, vaddr_t vmin, vaddr_t vmax, int flags) |
|
{ |
|
struct vm_map *result; |
|
|
|
MALLOC(result, struct vm_map *, sizeof(struct vm_map), |
|
M_VMMAP, M_WAITOK); |
|
uvm_map_setup(result, vmin, vmax, flags); |
|
result->pmap = pmap; |
|
return(result); |
|
} |
|
|
|
/* |
|
* uvm_map_setup: init map |
|
* |
|
* => map must not be in service yet. |
|
*/ |
|
|
|
void |
|
uvm_map_setup(struct vm_map *map, vaddr_t vmin, vaddr_t vmax, int flags) |
|
{ |
|
int ipl; |
|
|
|
RB_INIT(&map->rbhead); |
|
map->header.next = map->header.prev = &map->header; |
|
map->nentries = 0; |
|
map->size = 0; |
|
map->ref_count = 1; |
|
vm_map_setmin(map, vmin); |
|
vm_map_setmax(map, vmax); |
|
map->flags = flags; |
|
map->first_free = &map->header; |
|
map->hint = &map->header; |
|
map->timestamp = 0; |
|
map->busy = NULL; |
|
|
|
if ((flags & VM_MAP_INTRSAFE) != 0) { |
|
ipl = IPL_VM; |
|
} else { |
|
ipl = IPL_NONE; |
|
} |
|
|
|
rw_init(&map->lock); |
|
cv_init(&map->cv, "vm_map"); |
|
mutex_init(&map->misc_lock, MUTEX_DRIVER, ipl); |
|
mutex_init(&map->mutex, MUTEX_DRIVER, ipl); |
|
|
|
/* |
|
* The hint lock can get acquired with the pagequeue |
|
* lock held, so must be at IPL_VM. |
|
*/ |
|
mutex_init(&map->hint_lock, MUTEX_DRIVER, IPL_VM); |
|
} |
|
|
|
|
|
/* |
|
* U N M A P - m a i n e n t r y p o i n t |
|
*/ |
|
|
|
/* |
|
* uvm_unmap1: remove mappings from a vm_map (from "start" up to "stop") |
|
* |
|
* => caller must check alignment and size |
|
* => map must be unlocked (we will lock it) |
|
* => flags is UVM_FLAG_QUANTUM or 0. |
|
*/ |
|
|
|
void |
|
uvm_unmap1(struct vm_map *map, vaddr_t start, vaddr_t end, int flags) |
|
{ |
|
struct vm_map_entry *dead_entries; |
|
struct uvm_mapent_reservation umr; |
|
UVMHIST_FUNC("uvm_unmap"); UVMHIST_CALLED(maphist); |
|
|
|
UVMHIST_LOG(maphist, " (map=0x%x, start=0x%x, end=0x%x)", |
|
map, start, end, 0); |
|
/* |
|
* work now done by helper functions. wipe the pmap's and then |
|
* detach from the dead entries... |
|
*/ |
|
uvm_mapent_reserve(map, &umr, 2, flags); |
|
vm_map_lock(map); |
|
uvm_unmap_remove(map, start, end, &dead_entries, &umr, flags); |
|
vm_map_unlock(map); |
|
uvm_mapent_unreserve(map, &umr); |
|
|
|
if (dead_entries != NULL) |
|
uvm_unmap_detach(dead_entries, 0); |
|
|
|
UVMHIST_LOG(maphist, "<- done", 0,0,0,0); |
|
} |
|
|
|
|
|
/* |
|
* uvm_map_reference: add reference to a map |
|
* |
|
* => map need not be locked (we use misc_lock). |
|
*/ |
|
|
|
void |
|
uvm_map_reference(struct vm_map *map) |
|
{ |
|
mutex_enter(&map->misc_lock); |
|
map->ref_count++; |
|
mutex_exit(&map->misc_lock); |
|
} |
|
|
|
struct vm_map_kernel * |
|
vm_map_to_kernel(struct vm_map *map) |
|
{ |
|
|
|
KASSERT(VM_MAP_IS_KERNEL(map)); |
|
|
|
return (struct vm_map_kernel *)map; |
|
} |
|
|
|
bool |
|
vm_map_starved_p(struct vm_map *map) |
|
{ |
|
|
|
if ((map->flags & VM_MAP_WANTVA) != 0) { |
|
return true; |
|
} |
|
/* XXX */ |
|
if ((vm_map_max(map) - vm_map_min(map)) / 16 * 15 < map->size) { |
|
return true; |
|
} |
|
return false; |
|
} |