version 1.59, 2010/02/06 12:10:59 |
version 1.59.2.28, 2010/08/11 09:50:01 |
Line 185 struct vm_page { |
|
Line 185 struct vm_page { |
|
|
|
#define UVM_PGFLAGBITS \ |
#define UVM_PGFLAGBITS \ |
"\20\1BUSY\2WANTED\3TABLED\4CLEAN\5PAGEOUT\6RELEASED\7FAKE\10RDONLY" \ |
"\20\1BUSY\2WANTED\3TABLED\4CLEAN\5PAGEOUT\6RELEASED\7FAKE\10RDONLY" \ |
"\11ZERO\15PAGER1" |
"\11ZERO\12DIRECT\15PAGER1" |
|
|
#define PQ_FREE 0x0001 /* page is on free list */ |
#define PQ_FREE 0x0001 /* page is on free list */ |
#define PQ_ANON 0x0002 /* page is part of an anon, rather |
#define PQ_ANON 0x0002 /* page is part of an anon, rather |
Line 194 struct vm_page { |
|
Line 194 struct vm_page { |
|
uvm_object */ |
uvm_object */ |
#define PQ_SWAPBACKED (PQ_ANON|PQ_AOBJ) |
#define PQ_SWAPBACKED (PQ_ANON|PQ_AOBJ) |
#define PQ_READAHEAD 0x0008 /* read-ahead but has not been "hit" yet */ |
#define PQ_READAHEAD 0x0008 /* read-ahead but has not been "hit" yet */ |
|
#define PQ_FIXED 0x0010 /* resident page (never paged out) */ |
|
|
#define PQ_PRIVATE1 0x0100 |
#define PQ_PRIVATE1 0x0100 |
#define PQ_PRIVATE2 0x0200 |
#define PQ_PRIVATE2 0x0200 |
Line 232 struct vm_page { |
|
Line 233 struct vm_page { |
|
struct vm_physseg { |
struct vm_physseg { |
paddr_t start; /* PF# of first page in segment */ |
paddr_t start; /* PF# of first page in segment */ |
paddr_t end; /* (PF# of last page in segment) + 1 */ |
paddr_t end; /* (PF# of last page in segment) + 1 */ |
|
|
|
/* memory properties */ |
paddr_t avail_start; /* PF# of first free page in segment */ |
paddr_t avail_start; /* PF# of first free page in segment */ |
paddr_t avail_end; /* (PF# of last free page in segment) +1 */ |
paddr_t avail_end; /* (PF# of last free page in segment) +1 */ |
int free_list; /* which free list they belong on */ |
int free_list; /* which free list they belong on */ |
struct vm_page *pgs; /* vm_page structures (from start) */ |
struct vm_page *pgs; /* vm_page structures (from start) */ |
struct vm_page *lastpg; /* vm_page structure for end */ |
struct vm_page *endpg; /* vm_page structure for end */ |
|
|
#ifdef __HAVE_PMAP_PHYSSEG |
#ifdef __HAVE_PMAP_PHYSSEG |
struct pmap_physseg pmseg; /* pmap specific (MD) data */ |
struct pmap_physseg pmseg; /* pmap specific (MD) data */ |
#endif |
#endif |
|
SIMPLEQ_ENTRY(vm_physseg) list; |
|
|
|
/* device properties */ |
|
int prot; /* protection of device region */ |
|
int flags; /* XXXUEBS BUS_SPACE_MAP_* */ |
}; |
}; |
|
|
#ifdef _KERNEL |
#ifdef _KERNEL |
Line 254 extern bool vm_page_zero_enable; |
|
Line 263 extern bool vm_page_zero_enable; |
|
* physical memory config is stored in vm_physmem. |
* physical memory config is stored in vm_physmem. |
*/ |
*/ |
|
|
extern struct vm_physseg vm_physmem[VM_PHYSSEG_MAX]; |
#define VM_PHYSMEM_PTR(i) (vm_physmem_ptrs[i]) |
extern int vm_nphysseg; |
#define VM_PHYSDEV_PTR(i) (vm_physdev_ptrs[i]) |
|
|
|
extern struct vm_physseg *vm_physmem_ptrs[VM_PHYSSEG_MAX]; |
|
extern int vm_nphysmem; |
|
#ifdef XIP |
|
extern struct vm_physseg *vm_physdev_ptrs[VM_PHYSSEG_MAX]; |
|
extern int vm_nphysdev; |
|
#endif |
|
|
|
#define vm_nphysseg vm_nphysmem /* XXX backward compat */ |
|
|
/* |
/* |
* prototypes: the following prototypes define the interface to pages |
* prototypes: the following prototypes define the interface to pages |
Line 289 bool uvm_pageismanaged(paddr_t); |
|
Line 307 bool uvm_pageismanaged(paddr_t); |
|
|
|
int uvm_page_lookup_freelist(struct vm_page *); |
int uvm_page_lookup_freelist(struct vm_page *); |
|
|
static struct vm_page *PHYS_TO_VM_PAGE(paddr_t); |
int vm_physseg_find(paddr_t, int *); |
static int vm_physseg_find(paddr_t, int *); |
struct vm_page *uvm_phys_to_vm_page(paddr_t); |
|
paddr_t uvm_vm_page_to_phys(const struct vm_page *); |
|
#ifdef XIP |
|
int vm_physseg_find_device(paddr_t, int *); |
|
#endif |
|
|
/* |
/* |
* macros |
* macros |
Line 298 static int vm_physseg_find(paddr_t, int |
|
Line 320 static int vm_physseg_find(paddr_t, int |
|
|
|
#define UVM_PAGE_TREE_PENALTY 4 /* XXX: a guess */ |
#define UVM_PAGE_TREE_PENALTY 4 /* XXX: a guess */ |
|
|
#define VM_PAGE_TO_PHYS(entry) ((entry)->phys_addr) |
#define VM_PAGE_TO_PHYS(entry) uvm_vm_page_to_phys(entry) |
|
|
|
#ifdef __HAVE_VM_PAGE_MD |
|
#define VM_PAGE_TO_MD(pg) (&(pg)->mdpage) |
|
#endif |
|
|
/* |
/* |
* Compute the page color bucket for a given page. |
* Compute the page color bucket for a given page. |
Line 306 static int vm_physseg_find(paddr_t, int |
|
Line 332 static int vm_physseg_find(paddr_t, int |
|
#define VM_PGCOLOR_BUCKET(pg) \ |
#define VM_PGCOLOR_BUCKET(pg) \ |
(atop(VM_PAGE_TO_PHYS((pg))) & uvmexp.colormask) |
(atop(VM_PAGE_TO_PHYS((pg))) & uvmexp.colormask) |
|
|
/* |
#define PHYS_TO_VM_PAGE(pa) uvm_phys_to_vm_page(pa) |
* when VM_PHYSSEG_MAX is 1, we can simplify these functions |
|
*/ |
|
|
|
#if VM_PHYSSEG_MAX == 1 |
|
static inline int vm_physseg_find_contig(struct vm_physseg *, int, paddr_t, int *); |
|
#elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH) |
|
static inline int vm_physseg_find_bsearch(struct vm_physseg *, int, paddr_t, int *); |
|
#else |
|
static inline int vm_physseg_find_linear(struct vm_physseg *, int, paddr_t, int *); |
|
#endif |
|
|
|
/* |
|
* vm_physseg_find: find vm_physseg structure that belongs to a PA |
|
*/ |
|
static inline int |
|
vm_physseg_find(paddr_t pframe, int *offp) |
|
{ |
|
|
|
#if VM_PHYSSEG_MAX == 1 |
|
return vm_physseg_find_contig(vm_physmem, vm_nphysseg, pframe, offp); |
|
#elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH) |
|
return vm_physseg_find_bsearch(vm_physmem, vm_nphysseg, pframe, offp); |
|
#else |
|
return vm_physseg_find_linear(vm_physmem, vm_nphysseg, pframe, offp); |
|
#endif |
|
} |
|
|
|
#if VM_PHYSSEG_MAX == 1 |
|
static inline int |
|
vm_physseg_find_contig(struct vm_physseg *segs, int nsegs, paddr_t pframe, int *offp) |
|
{ |
|
|
|
/* 'contig' case */ |
|
if (pframe >= segs[0].start && pframe < segs[0].end) { |
|
if (offp) |
|
*offp = pframe - segs[0].start; |
|
return(0); |
|
} |
|
return(-1); |
|
} |
|
|
|
#elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH) |
|
|
|
static inline int |
|
vm_physseg_find_bsearch(struct vm_physseg *segs, int nsegs, paddr_t pframe, int *offp) |
|
{ |
|
/* binary search for it */ |
|
u_int start, len, try; |
|
|
|
/* |
|
* if try is too large (thus target is less than try) we reduce |
|
* the length to trunc(len/2) [i.e. everything smaller than "try"] |
|
* |
|
* if the try is too small (thus target is greater than try) then |
|
* we set the new start to be (try + 1). this means we need to |
|
* reduce the length to (round(len/2) - 1). |
|
* |
|
* note "adjust" below which takes advantage of the fact that |
|
* (round(len/2) - 1) == trunc((len - 1) / 2) |
|
* for any value of len we may have |
|
*/ |
|
|
|
for (start = 0, len = nsegs ; len != 0 ; len = len / 2) { |
|
try = start + (len / 2); /* try in the middle */ |
|
|
|
/* start past our try? */ |
|
if (pframe >= segs[try].start) { |
|
/* was try correct? */ |
|
if (pframe < segs[try].end) { |
|
if (offp) |
|
*offp = pframe - segs[try].start; |
|
return(try); /* got it */ |
|
} |
|
start = try + 1; /* next time, start here */ |
|
len--; /* "adjust" */ |
|
} else { |
|
/* |
|
* pframe before try, just reduce length of |
|
* region, done in "for" loop |
|
*/ |
|
} |
|
} |
|
return(-1); |
|
} |
|
|
|
#else |
|
|
|
static inline int |
|
vm_physseg_find_linear(struct vm_physseg *segs, int nsegs, paddr_t pframe, int *offp) |
|
{ |
|
/* linear search for it */ |
|
int lcv; |
|
|
|
for (lcv = 0; lcv < nsegs; lcv++) { |
|
if (pframe >= segs[lcv].start && |
|
pframe < segs[lcv].end) { |
|
if (offp) |
|
*offp = pframe - segs[lcv].start; |
|
return(lcv); /* got it */ |
|
} |
|
} |
|
return(-1); |
|
} |
|
#endif |
|
|
|
|
|
/* |
|
* PHYS_TO_VM_PAGE: find vm_page for a PA. used by MI code to get vm_pages |
|
* back from an I/O mapping (ugh!). used in some MD code as well. |
|
*/ |
|
static inline struct vm_page * |
|
PHYS_TO_VM_PAGE(paddr_t pa) |
|
{ |
|
paddr_t pf = atop(pa); |
|
int off; |
|
int psi; |
|
|
|
psi = vm_physseg_find(pf, &off); |
|
if (psi != -1) |
|
return(&vm_physmem[psi].pgs[off]); |
|
return(NULL); |
|
} |
|
|
|
#define VM_PAGE_IS_FREE(entry) ((entry)->pqflags & PQ_FREE) |
#define VM_PAGE_IS_FREE(entry) ((entry)->pqflags & PQ_FREE) |
#define VM_FREE_PAGE_TO_CPU(pg) ((struct uvm_cpu *)((uintptr_t)pg->offset)) |
#define VM_FREE_PAGE_TO_CPU(pg) ((struct uvm_cpu *)((uintptr_t)pg->offset)) |