version 1.76.6.2, 2017/02/05 13:41:01 |
version 1.77, 2015/03/21 13:11:14 |
|
|
* UVM and pmap(9) may use uvm_page_locked_p() to assert whether the |
* UVM and pmap(9) may use uvm_page_locked_p() to assert whether the |
* page owner's lock is acquired. |
* page owner's lock is acquired. |
* |
* |
* A page can have one of four identities: |
* A page can be in one of four life states: |
* |
* |
* o free |
* o free |
* => pageq.list is entry on global free page queue |
* => pageq.list is entry on global free page queue |
|
|
* o owned by a uvm_object |
* o owned by a uvm_object |
* => pageq.queue is entry on wired page queue, if any |
* => pageq.queue is entry on wired page queue, if any |
* => listq.queue is entry on list of pages in object |
* => listq.queue is entry on list of pages in object |
* => uanon is NULL or the vm_anon to which it has been O->A loaned |
* => uanon is NULL |
* => uobject is owner |
* => uobject is owner |
* o owned by a vm_anon |
* o owned by a vm_anon |
* => pageq is unused (XXX correct?) |
* => pageq is unused (XXX correct?) |
Line 294 struct vm_page { |
|
Line 294 struct vm_page { |
|
#define VM_PSTRAT_BSEARCH 2 |
#define VM_PSTRAT_BSEARCH 2 |
#define VM_PSTRAT_BIGFIRST 3 |
#define VM_PSTRAT_BIGFIRST 3 |
|
|
|
/* |
|
* vm_physseg: describes one segment of physical memory |
|
*/ |
|
struct vm_physseg { |
|
paddr_t start; /* PF# of first page in segment */ |
|
paddr_t end; /* (PF# of last page in segment) + 1 */ |
|
paddr_t avail_start; /* PF# of first free page in segment */ |
|
paddr_t avail_end; /* (PF# of last free page in segment) +1 */ |
|
struct vm_page *pgs; /* vm_page structures (from start) */ |
|
struct vm_page *lastpg; /* vm_page structure for end */ |
|
int free_list; /* which free list they belong on */ |
|
u_int start_hint; /* start looking for free pages here */ |
|
/* protected by uvm_fpageqlock */ |
|
#ifdef __HAVE_PMAP_PHYSSEG |
|
struct pmap_physseg pmseg; /* pmap specific (MD) data */ |
|
#endif |
|
}; |
|
|
#ifdef _KERNEL |
#ifdef _KERNEL |
|
|
/* |
/* |
Line 303 struct vm_page { |
|
Line 321 struct vm_page { |
|
extern bool vm_page_zero_enable; |
extern bool vm_page_zero_enable; |
|
|
/* |
/* |
|
* physical memory config is stored in vm_physmem. |
|
*/ |
|
|
|
#define VM_PHYSMEM_PTR(i) (&vm_physmem[i]) |
|
#if VM_PHYSSEG_MAX == 1 |
|
#define VM_PHYSMEM_PTR_SWAP(i, j) /* impossible */ |
|
#else |
|
#define VM_PHYSMEM_PTR_SWAP(i, j) \ |
|
do { vm_physmem[(i)] = vm_physmem[(j)]; } while (0) |
|
#endif |
|
|
|
extern struct vm_physseg vm_physmem[VM_PHYSSEG_MAX]; |
|
extern int vm_nphysseg; |
|
|
|
/* |
* prototypes: the following prototypes define the interface to pages |
* prototypes: the following prototypes define the interface to pages |
*/ |
*/ |
|
|
Line 333 bool uvm_page_locked_p(struct vm_page *) |
|
Line 366 bool uvm_page_locked_p(struct vm_page *) |
|
|
|
int uvm_page_lookup_freelist(struct vm_page *); |
int uvm_page_lookup_freelist(struct vm_page *); |
|
|
|
int vm_physseg_find(paddr_t, int *); |
struct vm_page *uvm_phys_to_vm_page(paddr_t); |
struct vm_page *uvm_phys_to_vm_page(paddr_t); |
paddr_t uvm_vm_page_to_phys(const struct vm_page *); |
paddr_t uvm_vm_page_to_phys(const struct vm_page *); |
|
|
#if !defined(PMAP_STEAL_MEMORY) |
|
bool uvm_page_physget(paddr_t *); |
|
#endif |
|
|
|
/* |
/* |
* macros |
* macros |
*/ |
*/ |