version 1.30.2.7, 2002/04/01 07:49:19 |
version 1.30.2.8, 2002/11/11 22:16:57 |
|
|
int blkoffset, error, flags, ioflag, resid, size, xfersize; |
int blkoffset, error, flags, ioflag, resid, size, xfersize; |
int bsize, aflag; |
int bsize, aflag; |
int ubc_alloc_flags; |
int ubc_alloc_flags; |
|
int extended=0; |
void *win; |
void *win; |
vsize_t bytelen; |
vsize_t bytelen; |
boolean_t async; |
boolean_t async; |
|
|
|
|
ubc_alloc_flags = UBC_WRITE; |
ubc_alloc_flags = UBC_WRITE; |
while (uio->uio_resid > 0) { |
while (uio->uio_resid > 0) { |
|
boolean_t extending; /* if we're extending a whole block */ |
|
off_t newoff; |
|
|
oldoff = uio->uio_offset; |
oldoff = uio->uio_offset; |
blkoffset = blkoff(fs, uio->uio_offset); |
blkoffset = blkoff(fs, uio->uio_offset); |
bytelen = MIN(fs->fs_bsize - blkoffset, uio->uio_resid); |
bytelen = MIN(fs->fs_bsize - blkoffset, uio->uio_resid); |
|
|
* since the new blocks will be inaccessible until the write |
* since the new blocks will be inaccessible until the write |
* is complete. |
* is complete. |
*/ |
*/ |
|
extending = uio->uio_offset >= preallocoff && |
|
uio->uio_offset < endallocoff; |
|
|
if (uio->uio_offset < preallocoff || |
if (!extending) { |
uio->uio_offset >= endallocoff) { |
|
error = ufs_balloc_range(vp, uio->uio_offset, bytelen, |
error = ufs_balloc_range(vp, uio->uio_offset, bytelen, |
cred, aflag); |
cred, aflag); |
if (error) { |
if (error) { |
|
|
win = ubc_alloc(&vp->v_uobj, uio->uio_offset, &bytelen, |
win = ubc_alloc(&vp->v_uobj, uio->uio_offset, &bytelen, |
ubc_alloc_flags); |
ubc_alloc_flags); |
error = uiomove(win, bytelen, uio); |
error = uiomove(win, bytelen, uio); |
ubc_release(win, 0); |
if (error && extending) { |
if (error) { |
/* |
break; |
* if we haven't initialized the pages yet, |
|
* do it now. it's safe to use memset here |
|
* because we just mapped the pages above. |
|
*/ |
|
memset(win, 0, bytelen); |
} |
} |
|
ubc_release(win, 0); |
|
|
/* |
/* |
* update UVM's notion of the size now that we've |
* update UVM's notion of the size now that we've |
* copied the data into the vnode's pages. |
* copied the data into the vnode's pages. |
|
* |
|
* we should update the size even when uiomove failed. |
|
* otherwise ffs_truncate can't flush soft update states. |
*/ |
*/ |
|
|
if (vp->v_size < uio->uio_offset) { |
newoff = oldoff + bytelen; |
uvm_vnp_setsize(vp, uio->uio_offset); |
if (vp->v_size < newoff) { |
|
uvm_vnp_setsize(vp, newoff); |
|
extended = 1; |
|
} |
|
|
|
if (error) { |
|
break; |
} |
} |
|
|
/* |
/* |
|
|
if (uio->uio_offset + xfersize > ip->i_ffs_size) { |
if (uio->uio_offset + xfersize > ip->i_ffs_size) { |
ip->i_ffs_size = uio->uio_offset + xfersize; |
ip->i_ffs_size = uio->uio_offset + xfersize; |
uvm_vnp_setsize(vp, ip->i_ffs_size); |
uvm_vnp_setsize(vp, ip->i_ffs_size); |
|
extended = 1; |
} |
} |
size = BLKSIZE(fs, ip, lbn) - bp->b_resid; |
size = BLKSIZE(fs, ip, lbn) - bp->b_resid; |
if (xfersize > size) |
if (xfersize > size) |
|
|
ip->i_flag |= IN_CHANGE | IN_UPDATE; |
ip->i_flag |= IN_CHANGE | IN_UPDATE; |
if (resid > uio->uio_resid && ap->a_cred && ap->a_cred->cr_uid != 0) |
if (resid > uio->uio_resid && ap->a_cred && ap->a_cred->cr_uid != 0) |
ip->i_ffs_mode &= ~(ISUID | ISGID); |
ip->i_ffs_mode &= ~(ISUID | ISGID); |
|
if (resid > uio->uio_resid) |
|
VN_KNOTE(vp, NOTE_WRITE | (extended ? NOTE_EXTEND : 0)); |
if (error) { |
if (error) { |
(void) VOP_TRUNCATE(vp, osize, ioflag & IO_SYNC, ap->a_cred, |
(void) VOP_TRUNCATE(vp, osize, ioflag & IO_SYNC, ap->a_cred, |
uio->uio_procp); |
uio->uio_procp); |