version 1.36, 2010/01/30 12:06:20 |
version 1.36.2.23, 2010/09/26 06:38:36 |
|
|
#include <sys/cdefs.h> |
#include <sys/cdefs.h> |
__KERNEL_RCSID(0, "$NetBSD$"); |
__KERNEL_RCSID(0, "$NetBSD$"); |
|
|
|
#include "opt_xip.h" |
|
|
#include <sys/param.h> |
#include <sys/param.h> |
#include <sys/systm.h> |
#include <sys/systm.h> |
#include <sys/proc.h> |
#include <sys/proc.h> |
Line 48 __KERNEL_RCSID(0, "$NetBSD$"); |
|
Line 50 __KERNEL_RCSID(0, "$NetBSD$"); |
|
#include <sys/kauth.h> |
#include <sys/kauth.h> |
#include <sys/fstrans.h> |
#include <sys/fstrans.h> |
#include <sys/buf.h> |
#include <sys/buf.h> |
|
#include <sys/once.h> |
|
|
#include <miscfs/genfs/genfs.h> |
#include <miscfs/genfs/genfs.h> |
#include <miscfs/genfs/genfs_node.h> |
#include <miscfs/genfs/genfs_node.h> |
Line 56 __KERNEL_RCSID(0, "$NetBSD$"); |
|
Line 59 __KERNEL_RCSID(0, "$NetBSD$"); |
|
#include <uvm/uvm.h> |
#include <uvm/uvm.h> |
#include <uvm/uvm_pager.h> |
#include <uvm/uvm_pager.h> |
|
|
|
static int genfs_do_getpages(void *); |
|
#ifdef XIP |
|
static int genfs_do_getpages_xip(void *); |
|
#endif |
static int genfs_do_directio(struct vmspace *, vaddr_t, size_t, struct vnode *, |
static int genfs_do_directio(struct vmspace *, vaddr_t, size_t, struct vnode *, |
off_t, enum uio_rw); |
off_t, enum uio_rw); |
static void genfs_dio_iodone(struct buf *); |
static void genfs_dio_iodone(struct buf *); |
|
|
static int genfs_do_io(struct vnode *, off_t, vaddr_t, size_t, int, enum uio_rw, |
static int genfs_do_io(struct vnode *, off_t, vaddr_t, size_t, int, enum uio_rw, |
void (*)(struct buf *)); |
void (*)(struct buf *)); |
static inline void genfs_rel_pages(struct vm_page **, int); |
static void genfs_rel_pages(struct vm_page **, int); |
|
static void genfs_markdirty(struct vnode *); |
|
|
int genfs_maxdio = MAXPHYS; |
int genfs_maxdio = MAXPHYS; |
|
|
static inline void |
static void |
genfs_rel_pages(struct vm_page **pgs, int npages) |
genfs_rel_pages(struct vm_page **pgs, int npages) |
{ |
{ |
int i; |
int i; |
Line 85 genfs_rel_pages(struct vm_page **pgs, in |
|
Line 93 genfs_rel_pages(struct vm_page **pgs, in |
|
mutex_exit(&uvm_pageqlock); |
mutex_exit(&uvm_pageqlock); |
} |
} |
|
|
|
static void |
|
genfs_markdirty(struct vnode *vp) |
|
{ |
|
struct genfs_node * const gp = VTOG(vp); |
|
|
|
KASSERT(mutex_owned(&vp->v_interlock)); |
|
gp->g_dirtygen++; |
|
if ((vp->v_iflag & VI_ONWORKLST) == 0) { |
|
vn_syncer_add_to_worklist(vp, filedelay); |
|
} |
|
if ((vp->v_iflag & (VI_WRMAP|VI_WRMAPDIRTY)) == VI_WRMAP) { |
|
vp->v_iflag |= VI_WRMAPDIRTY; |
|
} |
|
} |
|
|
/* |
/* |
* generic VM getpages routine. |
* generic VM getpages routine. |
* Return PG_BUSY pages for the given range, |
* Return PG_BUSY pages for the given range, |
Line 94 genfs_rel_pages(struct vm_page **pgs, in |
|
Line 117 genfs_rel_pages(struct vm_page **pgs, in |
|
int |
int |
genfs_getpages(void *v) |
genfs_getpages(void *v) |
{ |
{ |
|
#ifdef XIP |
|
struct vop_getpages_args /* { |
|
struct vnode *a_vp; |
|
voff_t a_offset; |
|
struct vm_page **a_m; |
|
int *a_count; |
|
int a_centeridx; |
|
vm_prot_t a_access_type; |
|
int a_advice; |
|
int a_flags; |
|
} */ * const ap = v; |
|
struct vnode * const vp = ap->a_vp; |
|
|
|
if ((vp->v_vflag & VV_XIP) != 0) |
|
return genfs_do_getpages_xip(v); |
|
else |
|
#endif |
|
return genfs_do_getpages(v); |
|
} |
|
|
|
static int |
|
genfs_do_getpages(void *v) |
|
{ |
struct vop_getpages_args /* { |
struct vop_getpages_args /* { |
struct vnode *a_vp; |
struct vnode *a_vp; |
voff_t a_offset; |
voff_t a_offset; |
|
|
} |
} |
} |
} |
|
|
if (memwrite) { |
|
gp->g_dirtygen++; |
|
if ((vp->v_iflag & VI_ONWORKLST) == 0) { |
|
vn_syncer_add_to_worklist(vp, filedelay); |
|
} |
|
if ((vp->v_iflag & (VI_WRMAP|VI_WRMAPDIRTY)) == VI_WRMAP) { |
|
vp->v_iflag |= VI_WRMAPDIRTY; |
|
} |
|
} |
|
|
|
/* |
/* |
* For PGO_LOCKED requests, just return whatever's in memory. |
* For PGO_LOCKED requests, just return whatever's in memory. |
*/ |
*/ |
|
|
genfs_node_unlock(vp); |
genfs_node_unlock(vp); |
} |
} |
error = (ap->a_m[ap->a_centeridx] == NULL ? EBUSY : 0); |
error = (ap->a_m[ap->a_centeridx] == NULL ? EBUSY : 0); |
|
if (error == 0 && memwrite) { |
|
genfs_markdirty(vp); |
|
} |
goto out_err; |
goto out_err; |
} |
} |
mutex_exit(&uobj->vmobjlock); |
mutex_exit(&uobj->vmobjlock); |
|
|
} |
} |
} |
} |
mutex_exit(&uvm_pageqlock); |
mutex_exit(&uvm_pageqlock); |
|
if (memwrite) { |
|
genfs_markdirty(vp); |
|
} |
mutex_exit(&uobj->vmobjlock); |
mutex_exit(&uobj->vmobjlock); |
if (ap->a_m != NULL) { |
if (ap->a_m != NULL) { |
memcpy(ap->a_m, &pgs[ridx], |
memcpy(ap->a_m, &pgs[ridx], |
|
|
out_err: |
out_err: |
if (has_trans) |
if (has_trans) |
fstrans_done(vp->v_mount); |
fstrans_done(vp->v_mount); |
return (error); |
return error; |
|
} |
|
|
|
#ifdef XIP |
|
static struct uvm_object xip_zero_obj; |
|
static struct vm_page *xip_zero_page; |
|
|
|
static int |
|
xip_zero_page_init(void) |
|
{ |
|
|
|
UVM_OBJ_INIT(&xip_zero_obj, NULL, 0); |
|
xip_zero_page = uvm_pagealloc(&xip_zero_obj, 0, NULL, UVM_PGA_ZERO); |
|
KASSERT(xip_zero_page != NULL); |
|
uvm_pagewire(xip_zero_page); |
|
return 0; |
} |
} |
|
|
/* |
/* |
|
* genfs_do_getpages_xip |
|
* Return "direct pages" of XIP vnode. The block addresses of XIP |
|
* vnode pages are returned back to the VM fault handler as the |
|
* actually mapped physical addresses. |
|
* |
|
* XXX Should be merged into genfs_do_getpages() after |
|
* XXX genfs_do_getpages() and genfs_do_io() are merged. |
|
*/ |
|
static int |
|
genfs_do_getpages_xip(void *v) |
|
{ |
|
struct vop_getpages_args /* { |
|
struct vnode *a_vp; |
|
voff_t a_offset; |
|
struct vm_page **a_m; |
|
int *a_count; |
|
int a_centeridx; |
|
vm_prot_t a_access_type; |
|
int a_advice; |
|
int a_flags; |
|
} */ * const ap = v; |
|
|
|
struct vnode * const vp = ap->a_vp; |
|
int *npagesp = ap->a_count; |
|
const off_t offset = ap->a_offset; |
|
struct vm_page **pps = ap->a_m; |
|
struct uvm_object * const uobj = &vp->v_uobj; |
|
const int flags = ap->a_flags; |
|
|
|
int error; |
|
off_t eof, sbkoff, ebkoff, off; |
|
int npages; |
|
int fs_bshift, fs_bsize, dev_bshift, dev_bsize; |
|
int i; |
|
|
|
UVMHIST_FUNC("genfs_do_getpages_xip"); UVMHIST_CALLED(ubchist); |
|
|
|
KASSERT((vp->v_vflag & VV_XIP) != 0); |
|
|
|
GOP_SIZE(vp, vp->v_size, &eof, GOP_SIZE_MEM); |
|
npages = MIN(*npagesp, round_page(eof - offset) >> PAGE_SHIFT); |
|
|
|
fs_bshift = vp->v_mount->mnt_fs_bshift; |
|
fs_bsize = 1 << fs_bshift; |
|
dev_bshift = vp->v_mount->mnt_dev_bshift; |
|
dev_bsize = 1 << dev_bshift; |
|
|
|
sbkoff = offset & ~(fs_bsize - 1); |
|
ebkoff = ((offset + PAGE_SIZE * npages) + (fs_bsize - 1)) & |
|
~(fs_bsize - 1); |
|
|
|
UVMHIST_LOG(ubchist, "xip npages=%d sbkoff=%lx ebkoff=%lx", |
|
npages, (long)sbkoff, (long)ebkoff, 0); |
|
|
|
if ((flags & PGO_LOCKED) == 0) |
|
mutex_exit(&uobj->vmobjlock); |
|
|
|
off = offset; |
|
for (i = 0; i < npages; i++) { |
|
daddr_t lbn, blkno; |
|
int run; |
|
struct vnode *devvp; |
|
|
|
lbn = (off & ~(fs_bsize - 1)) >> fs_bshift; |
|
|
|
error = VOP_BMAP(vp, lbn, &devvp, &blkno, &run); |
|
KASSERT(error == 0); |
|
UVMHIST_LOG(ubchist, "xip VOP_BMAP: lbn=%ld blkno=%ld run=%d", |
|
(long)lbn, (long)blkno, run, 0); |
|
|
|
/* |
|
* XIP page metadata assignment |
|
* - Unallocated block is redirected to the dedicated zero'ed |
|
* page. |
|
* - Assume that struct vm_page *[] array of this segment is |
|
* allocated and linearly ordered by physical address. |
|
*/ |
|
if (blkno < 0) { |
|
static ONCE_DECL(xip_zero_page_inited); |
|
|
|
RUN_ONCE(&xip_zero_page_inited, xip_zero_page_init); |
|
pps[i] = xip_zero_page; |
|
} else { |
|
struct vm_physseg *seg; |
|
daddr_t seg_off; |
|
struct vm_page *pg; |
|
|
|
seg = devvp->v_physseg; |
|
KASSERT(seg != NULL); |
|
/* bus_space_mmap cookie -> paddr_t */ |
|
seg_off = (blkno << dev_bshift) + |
|
(off - (lbn << fs_bshift)); |
|
KASSERT((seg_off & PAGE_MASK) == 0); |
|
pg = seg->pgs + (seg_off >> PAGE_SHIFT); |
|
KASSERT(pg->phys_addr == |
|
(seg->start << PAGE_SHIFT) + seg_off); |
|
|
|
pps[i] = pg; |
|
} |
|
|
|
UVMHIST_LOG(ubchist, "xip pgs %d => phys_addr=0x%lx (%p)", |
|
i, |
|
(long)pps[i]->phys_addr, |
|
pps[i], |
|
0); |
|
|
|
off += PAGE_SIZE; |
|
} |
|
|
|
if ((flags & PGO_LOCKED) == 0) |
|
mutex_enter(&uobj->vmobjlock); |
|
KASSERT(mutex_owned(&uobj->vmobjlock)); |
|
|
|
for (i = 0; i < npages; i++) { |
|
struct vm_page *pg = pps[i]; |
|
|
|
if (pg == xip_zero_page) { |
|
} else { |
|
KASSERT((pg->flags & PG_BUSY) == 0); |
|
KASSERT((pg->flags & PG_RDONLY) != 0); |
|
KASSERT((pg->flags & PG_CLEAN) != 0); |
|
KASSERT((pg->pqflags & PQ_FIXED) != 0); |
|
pg->flags |= PG_BUSY; |
|
pg->flags &= ~PG_FAKE; |
|
pg->uobject = &vp->v_uobj; |
|
} |
|
} |
|
|
|
if ((flags & PGO_LOCKED) == 0) |
|
mutex_exit(&uobj->vmobjlock); |
|
|
|
*npagesp = npages; |
|
|
|
return 0; |
|
} |
|
#endif |
|
|
|
/* |
* generic VM putpages routine. |
* generic VM putpages routine. |
* Write the given range of pages to backing store. |
* Write the given range of pages to backing store. |
* |
* |
|
|
dirtygen = gp->g_dirtygen; |
dirtygen = gp->g_dirtygen; |
freeflag = pagedaemon ? PG_PAGEOUT : PG_RELEASED; |
freeflag = pagedaemon ? PG_PAGEOUT : PG_RELEASED; |
if (by_list) { |
if (by_list) { |
curmp.uobject = uobj; |
curmp.flags = PG_MARKER; |
curmp.offset = (voff_t)-1; |
endmp.flags = PG_MARKER; |
curmp.flags = PG_BUSY; |
|
endmp.uobject = uobj; |
|
endmp.offset = (voff_t)-1; |
|
endmp.flags = PG_BUSY; |
|
pg = TAILQ_FIRST(&uobj->memq); |
pg = TAILQ_FIRST(&uobj->memq); |
TAILQ_INSERT_TAIL(&uobj->memq, &endmp, listq.queue); |
TAILQ_INSERT_TAIL(&uobj->memq, &endmp, listq.queue); |
} else { |
} else { |
|
|
* if the current page is not interesting, move on to the next. |
* if the current page is not interesting, move on to the next. |
*/ |
*/ |
|
|
KASSERT(pg == NULL || pg->uobject == uobj); |
KASSERT(pg == NULL || pg->uobject == uobj || |
|
(pg->flags & PG_MARKER) != 0); |
KASSERT(pg == NULL || |
KASSERT(pg == NULL || |
(pg->flags & (PG_RELEASED|PG_PAGEOUT)) == 0 || |
(pg->flags & (PG_RELEASED|PG_PAGEOUT)) == 0 || |
(pg->flags & PG_BUSY) != 0); |
(pg->flags & (PG_BUSY|PG_MARKER)) != 0); |
if (by_list) { |
if (by_list) { |
if (pg == &endmp) { |
if (pg == &endmp) { |
break; |
break; |
} |
} |
|
if (pg->flags & PG_MARKER) { |
|
pg = TAILQ_NEXT(pg, listq.queue); |
|
continue; |
|
} |
if (pg->offset < startoff || pg->offset >= endoff || |
if (pg->offset < startoff || pg->offset >= endoff || |
pg->flags & (PG_RELEASED|PG_PAGEOUT)) { |
pg->flags & (PG_RELEASED|PG_PAGEOUT)) { |
if (pg->flags & (PG_RELEASED|PG_PAGEOUT)) { |
if (pg->flags & (PG_RELEASED|PG_PAGEOUT)) { |
|
|
(vp->v_iflag & VI_ONWORKLST) != 0) { |
(vp->v_iflag & VI_ONWORKLST) != 0) { |
#if defined(DEBUG) |
#if defined(DEBUG) |
TAILQ_FOREACH(pg, &uobj->memq, listq.queue) { |
TAILQ_FOREACH(pg, &uobj->memq, listq.queue) { |
|
if ((pg->flags & PG_MARKER) != 0) { |
|
continue; |
|
} |
if ((pg->flags & PG_CLEAN) == 0) { |
if ((pg->flags & PG_CLEAN) == 0) { |
printf("%s: %p: !CLEAN\n", __func__, pg); |
printf("%s: %p: !CLEAN\n", __func__, pg); |
} |
} |
Line 1463 genfs_compat_getpages(void *v) |
|
Line 1662 genfs_compat_getpages(void *v) |
|
orignpages = *ap->a_count; |
orignpages = *ap->a_count; |
pgs = ap->a_m; |
pgs = ap->a_m; |
|
|
if (memwrite && (vp->v_iflag & VI_ONWORKLST) == 0) { |
|
vn_syncer_add_to_worklist(vp, filedelay); |
|
} |
|
if (ap->a_flags & PGO_LOCKED) { |
if (ap->a_flags & PGO_LOCKED) { |
uvn_findpages(uobj, origoffset, ap->a_count, ap->a_m, |
uvn_findpages(uobj, origoffset, ap->a_count, ap->a_m, |
UFP_NOWAIT|UFP_NOALLOC| (memwrite ? UFP_NORDONLY : 0)); |
UFP_NOWAIT|UFP_NOALLOC| (memwrite ? UFP_NORDONLY : 0)); |
|
|
return (ap->a_m[ap->a_centeridx] == NULL ? EBUSY : 0); |
error = ap->a_m[ap->a_centeridx] == NULL ? EBUSY : 0; |
|
if (error == 0 && memwrite) { |
|
genfs_markdirty(vp); |
|
} |
|
return error; |
} |
} |
if (origoffset + (ap->a_centeridx << PAGE_SHIFT) >= vp->v_size) { |
if (origoffset + (ap->a_centeridx << PAGE_SHIFT) >= vp->v_size) { |
mutex_exit(&uobj->vmobjlock); |
mutex_exit(&uobj->vmobjlock); |
return (EINVAL); |
return EINVAL; |
} |
} |
if ((ap->a_flags & PGO_SYNCIO) == 0) { |
if ((ap->a_flags & PGO_SYNCIO) == 0) { |
mutex_exit(&uobj->vmobjlock); |
mutex_exit(&uobj->vmobjlock); |
Line 1523 genfs_compat_getpages(void *v) |
|
Line 1723 genfs_compat_getpages(void *v) |
|
uvm_page_unbusy(pgs, npages); |
uvm_page_unbusy(pgs, npages); |
} |
} |
mutex_exit(&uvm_pageqlock); |
mutex_exit(&uvm_pageqlock); |
|
if (error == 0 && memwrite) { |
|
genfs_markdirty(vp); |
|
} |
mutex_exit(&uobj->vmobjlock); |
mutex_exit(&uobj->vmobjlock); |
return (error); |
return error; |
} |
} |
|
|
int |
int |