version 1.22.2.8, 2002/02/28 04:15:30 |
version 1.23, 2001/04/18 03:48:23 |
|
|
* @(#)ufs_inode.c 8.9 (Berkeley) 5/14/95 |
* @(#)ufs_inode.c 8.9 (Berkeley) 5/14/95 |
*/ |
*/ |
|
|
#include <sys/cdefs.h> |
|
__KERNEL_RCSID(0, "$NetBSD$"); |
|
|
|
#include "opt_quota.h" |
#include "opt_quota.h" |
|
|
#include <sys/param.h> |
#include <sys/param.h> |
|
|
*/ |
*/ |
if (ip->i_ffs_mode == 0) |
if (ip->i_ffs_mode == 0) |
goto out; |
goto out; |
if (ip->i_ffs_effnlink == 0 && DOINGSOFTDEP(vp)) |
|
softdep_releasefile(ip); |
|
|
|
if (ip->i_ffs_nlink <= 0 && (vp->v_mount->mnt_flag & MNT_RDONLY) == 0) { |
if (ip->i_ffs_nlink <= 0 && (vp->v_mount->mnt_flag & MNT_RDONLY) == 0) { |
#ifdef QUOTA |
#ifdef QUOTA |
|
|
if (ip->i_ffs_size != 0) { |
if (ip->i_ffs_size != 0) { |
error = VOP_TRUNCATE(vp, (off_t)0, 0, NOCRED, p); |
error = VOP_TRUNCATE(vp, (off_t)0, 0, NOCRED, p); |
} |
} |
/* |
|
* Setting the mode to zero needs to wait for the inode |
|
* to be written just as does a change to the link count. |
|
* So, rather than creating a new entry point to do the |
|
* same thing, we just use softdep_change_linkcnt(). |
|
*/ |
|
ip->i_ffs_rdev = 0; |
ip->i_ffs_rdev = 0; |
mode = ip->i_ffs_mode; |
mode = ip->i_ffs_mode; |
ip->i_ffs_mode = 0; |
ip->i_ffs_mode = 0; |
ip->i_flag |= IN_CHANGE | IN_UPDATE; |
ip->i_flag |= IN_CHANGE | IN_UPDATE; |
if (DOINGSOFTDEP(vp)) |
|
softdep_change_linkcnt(ip); |
|
VOP_VFREE(vp, ip->i_number, mode); |
VOP_VFREE(vp, ip->i_number, mode); |
} |
} |
|
|
Line 179 ufs_balloc_range(vp, off, len, cred, fla |
|
Line 166 ufs_balloc_range(vp, off, len, cred, fla |
|
struct ucred *cred; |
struct ucred *cred; |
int flags; |
int flags; |
{ |
{ |
off_t oldeof, neweof, oldeob, neweob, pagestart; |
off_t oldeof, neweof, oldeob, neweob, oldpagestart, pagestart; |
struct uvm_object *uobj; |
struct uvm_object *uobj; |
struct genfs_node *gp = VTOG(vp); |
int i, delta, error, npages1, npages2; |
int i, delta, error, npages; |
|
int bshift = vp->v_mount->mnt_fs_bshift; |
int bshift = vp->v_mount->mnt_fs_bshift; |
int bsize = 1 << bshift; |
int bsize = 1 << bshift; |
int ppb = MAX(bsize >> PAGE_SHIFT, 1); |
int ppb = MAX(bsize >> PAGE_SHIFT, 1); |
struct vm_page *pgs[ppb]; |
struct vm_page *pgs1[ppb], *pgs2[ppb]; |
UVMHIST_FUNC("ufs_balloc_range"); UVMHIST_CALLED(ubchist); |
UVMHIST_FUNC("ufs_balloc_range"); UVMHIST_CALLED(ubchist); |
UVMHIST_LOG(ubchist, "vp %p off 0x%x len 0x%x u_size 0x%x", |
UVMHIST_LOG(ubchist, "vp %p off 0x%x len 0x%x u_size 0x%x", |
vp, off, len, vp->v_size); |
vp, off, len, vp->v_uvm.u_size); |
|
|
oldeof = vp->v_size; |
oldeof = vp->v_uvm.u_size; |
GOP_SIZE(vp, oldeof, &oldeob); |
error = VOP_SIZE(vp, oldeof, &oldeob); |
|
if (error) { |
|
return error; |
|
} |
|
|
neweof = MAX(vp->v_size, off + len); |
neweof = MAX(vp->v_uvm.u_size, off + len); |
GOP_SIZE(vp, neweof, &neweob); |
error = VOP_SIZE(vp, neweof, &neweob); |
|
if (error) { |
|
return error; |
|
} |
|
|
error = 0; |
error = 0; |
uobj = &vp->v_uobj; |
uobj = &vp->v_uvm.u_obj; |
pgs[0] = NULL; |
pgs1[0] = pgs2[0] = NULL; |
|
|
/* |
/* |
* read or create pages covering the range of the allocation and |
* if the last block in the file is not a full block (ie. it is a |
* keep them locked until the new block is allocated, so there |
* fragment), and this allocation is causing the fragment to change |
* will be no window where the old contents of the new block are |
* size (either to expand the fragment or promote it to a full block), |
* visible to racing threads. |
* cache the old last block (at its new size). |
*/ |
*/ |
|
|
pagestart = trunc_page(off) & ~(bsize - 1); |
oldpagestart = trunc_page(oldeof) & ~(bsize - 1); |
npages = MIN(ppb, (round_page(neweob) - pagestart) >> PAGE_SHIFT); |
if ((oldeob & (bsize - 1)) != 0 && oldeob != neweob) { |
memset(pgs, 0, npages * sizeof(struct vm_page *)); |
npages1 = MIN(ppb, (round_page(neweob) - oldpagestart) >> |
simple_lock(&uobj->vmobjlock); |
PAGE_SHIFT); |
error = VOP_GETPAGES(vp, pagestart, pgs, &npages, 0, |
memset(pgs1, 0, npages1 * sizeof(struct vm_page *)); |
VM_PROT_READ, 0, PGO_SYNCIO|PGO_PASTEOF); |
simple_lock(&uobj->vmobjlock); |
if (error) { |
error = VOP_GETPAGES(vp, oldpagestart, pgs1, &npages1, |
return error; |
0, VM_PROT_READ, 0, PGO_SYNCIO|PGO_PASTEOF); |
|
if (error) { |
|
goto out; |
|
} |
|
simple_lock(&uobj->vmobjlock); |
|
uvm_lock_pageq(); |
|
for (i = 0; i < npages1; i++) { |
|
UVMHIST_LOG(ubchist, "got pgs1[%d] %p", i, pgs1[i],0,0); |
|
KASSERT((pgs1[i]->flags & PG_RELEASED) == 0); |
|
pgs1[i]->flags &= ~PG_CLEAN; |
|
uvm_pageactivate(pgs1[i]); |
|
} |
|
uvm_unlock_pageq(); |
|
simple_unlock(&uobj->vmobjlock); |
} |
} |
simple_lock(&uobj->vmobjlock); |
|
uvm_lock_pageq(); |
/* |
for (i = 0; i < npages; i++) { |
* cache the new range as well. this will create zeroed pages |
UVMHIST_LOG(ubchist, "got pgs[%d] %p", i, pgs[i],0,0); |
* where the new block will be and keep them locked until the |
KASSERT((pgs[i]->flags & PG_RELEASED) == 0); |
* new block is allocated, so there will be no window where |
pgs[i]->flags &= ~PG_CLEAN; |
* the old contents of the new block is visible to racing threads. |
uvm_pageactivate(pgs[i]); |
*/ |
|
|
|
pagestart = trunc_page(off) & ~(bsize - 1); |
|
if (pagestart != oldpagestart || pgs1[0] == NULL) { |
|
npages2 = MIN(ppb, (round_page(neweob) - pagestart) >> |
|
PAGE_SHIFT); |
|
memset(pgs2, 0, npages2 * sizeof(struct vm_page *)); |
|
simple_lock(&uobj->vmobjlock); |
|
error = VOP_GETPAGES(vp, pagestart, pgs2, &npages2, 0, |
|
VM_PROT_READ, 0, PGO_SYNCIO|PGO_PASTEOF); |
|
if (error) { |
|
goto out; |
|
} |
|
simple_lock(&uobj->vmobjlock); |
|
uvm_lock_pageq(); |
|
for (i = 0; i < npages2; i++) { |
|
UVMHIST_LOG(ubchist, "got pgs2[%d] %p", i, pgs2[i],0,0); |
|
KASSERT((pgs2[i]->flags & PG_RELEASED) == 0); |
|
pgs2[i]->flags &= ~PG_CLEAN; |
|
uvm_pageactivate(pgs2[i]); |
|
} |
|
uvm_unlock_pageq(); |
|
simple_unlock(&uobj->vmobjlock); |
} |
} |
uvm_unlock_pageq(); |
|
simple_unlock(&uobj->vmobjlock); |
|
|
|
/* |
/* |
* adjust off to be block-aligned. |
* adjust off to be block-aligned. |
Line 240 ufs_balloc_range(vp, off, len, cred, fla |
|
Line 265 ufs_balloc_range(vp, off, len, cred, fla |
|
* now allocate the range. |
* now allocate the range. |
*/ |
*/ |
|
|
lockmgr(&gp->g_glock, LK_EXCLUSIVE, NULL); |
lockmgr(&vp->v_glock, LK_EXCLUSIVE, NULL); |
error = GOP_ALLOC(vp, off, len, flags, cred); |
error = VOP_BALLOCN(vp, off, len, cred, flags); |
lockmgr(&gp->g_glock, LK_RELEASE, NULL); |
lockmgr(&vp->v_glock, LK_RELEASE, NULL); |
|
|
/* |
/* |
* clear PG_RDONLY on any pages we are holding |
* unbusy any pages we are holding. |
* (since they now have backing store) and unbusy them. |
* if we got an error, free any pages we created past the old eob. |
*/ |
*/ |
|
|
|
out: |
simple_lock(&uobj->vmobjlock); |
simple_lock(&uobj->vmobjlock); |
for (i = 0; i < npages; i++) { |
|
pgs[i]->flags &= ~PG_RDONLY; |
|
if (error) { |
|
pgs[i]->flags |= PG_RELEASED; |
|
} |
|
} |
|
if (error) { |
if (error) { |
uvm_lock_pageq(); |
(void) (uobj->pgops->pgo_flush)(uobj, round_page(oldeob), 0, |
uvm_page_unbusy(pgs, npages); |
PGO_FREE); |
uvm_unlock_pageq(); |
} |
} else { |
if (pgs1[0] != NULL) { |
uvm_page_unbusy(pgs, npages); |
uvm_page_unbusy(pgs1, npages1); |
|
|
|
/* |
|
* The data in the frag might be moving to a new disk location. |
|
* We need to flush pages to the new disk locations. |
|
*/ |
|
|
|
if ((flags & B_SYNC) != 0) |
|
(*uobj->pgops->pgo_flush)(uobj, oldeof & ~(bsize - 1), |
|
MIN((oldeof + bsize) & ~(bsize - 1), neweof), |
|
PGO_CLEANIT | PGO_SYNCIO); |
|
} |
|
if (pgs2[0] != NULL) { |
|
uvm_page_unbusy(pgs2, npages2); |
} |
} |
simple_unlock(&uobj->vmobjlock); |
simple_unlock(&uobj->vmobjlock); |
return error; |
return error; |