Please note that diffs are not public domain; they are subject to the copyright notices on the relevant files. =================================================================== RCS file: /ftp/cvs/cvsroot/src/sys/ufs/ufs/ufs_dirhash.c,v rcsdiff: /ftp/cvs/cvsroot/src/sys/ufs/ufs/ufs_dirhash.c,v: warning: Unknown phrases like `commitid ...;' are present. retrieving revision 1.8 retrieving revision 1.34.22.1 diff -u -p -r1.8 -r1.34.22.1 --- src/sys/ufs/ufs/ufs_dirhash.c 2006/01/13 00:50:25 1.8 +++ src/sys/ufs/ufs/ufs_dirhash.c 2013/06/23 06:18:40 1.34.22.1 @@ -1,4 +1,4 @@ -/* $NetBSD: ufs_dirhash.c,v 1.8 2006/01/13 00:50:25 yamt Exp $ */ +/* $NetBSD: ufs_dirhash.c,v 1.34.22.1 2013/06/23 06:18:40 tls Exp $ */ /* * Copyright (c) 2001, 2002 Ian Dowse. All rights reserved. @@ -27,6 +27,9 @@ * $FreeBSD: src/sys/ufs/ufs/ufs_dirhash.c,v 1.3.2.8 2004/12/08 11:54:13 dwmalone Exp $ */ +#include +__KERNEL_RCSID(0, "$NetBSD: ufs_dirhash.c,v 1.34.22.1 2013/06/23 06:18:40 tls Exp $"); + /* * This implements a hash-based lookup scheme for UFS directories. */ @@ -34,7 +37,7 @@ #include #include #include -#include +#include #include #include #include @@ -43,8 +46,8 @@ #include #include #include +#include -#include #include #include #include @@ -57,12 +60,10 @@ #define OFSFMT(ip) ((ip)->i_ump->um_maxsymlinklen <= 0) #define BLKFREE2IDX(n) ((n) > DH_NFSTATS ? DH_NFSTATS : (n)) -static MALLOC_DEFINE(M_DIRHASH, "UFS dirhash", "UFS directory hash tables"); - -static int ufs_dirhashminblks = 5; -static int ufs_dirhashmaxmem = 2 * 1024 * 1024; -static int ufs_dirhashmem; -static int ufs_dirhashcheck = 0; +static u_int ufs_dirhashminblks = 5; +static u_int ufs_dirhashmaxmem = 2 * 1024 * 1024; +static u_int ufs_dirhashmem; +static u_int ufs_dirhashcheck = 0; static int ufsdirhash_hash(struct dirhash *dh, const char *name, int namelen); static void ufsdirhash_adjfree(struct dirhash *dh, doff_t offset, int diff, @@ -74,19 +75,36 @@ static doff_t ufsdirhash_getprev(struct int dirblksiz); static int ufsdirhash_recycle(int wanted); -POOL_INIT(ufsdirhash_pool, DH_NBLKOFF * sizeof(daddr_t), 0, 0, 0, "ufsdirhash", - &pool_allocator_nointr); +static pool_cache_t ufsdirhashblk_cache; +static pool_cache_t ufsdirhash_cache; -#define DIRHASHLIST_LOCK() do { } while (0) -#define DIRHASHLIST_UNLOCK() do { } while (0) -#define DIRHASH_LOCK(dh) do { } while (0) -#define DIRHASH_UNLOCK(dh) do { } while (0) -#define DIRHASH_BLKALLOC_WAITOK() pool_get(&ufsdirhash_pool, PR_WAITOK) -#define DIRHASH_BLKFREE(ptr) pool_put(&ufsdirhash_pool, ptr) +#define DIRHASHLIST_LOCK() mutex_enter(&ufsdirhash_lock) +#define DIRHASHLIST_UNLOCK() mutex_exit(&ufsdirhash_lock) +#define DIRHASH_LOCK(dh) mutex_enter(&(dh)->dh_lock) +#define DIRHASH_UNLOCK(dh) mutex_exit(&(dh)->dh_lock) +#define DIRHASH_BLKALLOC() \ + pool_cache_get(ufsdirhashblk_cache, PR_NOWAIT) +#define DIRHASH_BLKFREE(ptr) \ + pool_cache_put(ufsdirhashblk_cache, ptr) /* Dirhash list; recently-used entries are near the tail. */ static TAILQ_HEAD(, dirhash) ufsdirhash_list; +/* Protects: ufsdirhash_list, `dh_list' field, ufs_dirhashmem. */ +static kmutex_t ufsdirhash_lock; + +static struct sysctllog *ufsdirhash_sysctl_log; + +/* + * Locking order: + * ufsdirhash_lock + * dh_lock + * + * The dh_lock mutex should be acquired either via the inode lock, or via + * ufsdirhash_lock. Only the owner of the inode may free the associated + * dirhash, but anything can steal its memory and set dh_hash to NULL. + */ + /* * Attempt to build up a hash table for the directory contents in * inode 'ip'. Returns 0 on success, or -1 of the operation failed. @@ -122,13 +140,13 @@ ufsdirhash_build(struct inode *ip) } /* Don't hash removed directories. */ - if (ip->i_ffs_effnlink == 0) + if (ip->i_nlink == 0) return (-1); vp = ip->i_vnode; /* Allocate 50% more entries than this dir size could ever need. */ KASSERT(ip->i_size >= dirblksiz); - nslots = ip->i_size / DIRECTSIZ(1); + nslots = ip->i_size / UFS_DIRECTSIZ(1); nslots = (nslots * 3 + 1) / 2; narrays = howmany(nslots, DH_NBLKOFF); nslots = narrays * DH_NBLKOFF; @@ -138,39 +156,39 @@ ufsdirhash_build(struct inode *ip) memreqd = sizeof(*dh) + narrays * sizeof(*dh->dh_hash) + narrays * DH_NBLKOFF * sizeof(**dh->dh_hash) + nblocks * sizeof(*dh->dh_blkfree); - DIRHASHLIST_LOCK(); - if (memreqd + ufs_dirhashmem > ufs_dirhashmaxmem) { - DIRHASHLIST_UNLOCK(); + + while (atomic_add_int_nv(&ufs_dirhashmem, memreqd) > + ufs_dirhashmaxmem) { + atomic_add_int(&ufs_dirhashmem, -memreqd); if (memreqd > ufs_dirhashmaxmem / 2) return (-1); - /* Try to free some space. */ if (ufsdirhash_recycle(memreqd) != 0) return (-1); - /* Enough was freed, and list has been locked. */ + else + DIRHASHLIST_UNLOCK(); } - ufs_dirhashmem += memreqd; - DIRHASHLIST_UNLOCK(); /* * Use non-blocking mallocs so that we will revert to a linear * lookup on failure rather than potentially blocking forever. */ - MALLOC(dh, struct dirhash *, sizeof *dh, M_DIRHASH, M_NOWAIT | M_ZERO); + dh = pool_cache_get(ufsdirhash_cache, PR_NOWAIT); if (dh == NULL) { - DIRHASHLIST_LOCK(); - ufs_dirhashmem -= memreqd; - DIRHASHLIST_UNLOCK(); + atomic_add_int(&ufs_dirhashmem, -memreqd); return (-1); } - MALLOC(dh->dh_hash, doff_t **, narrays * sizeof(dh->dh_hash[0]), - M_DIRHASH, M_NOWAIT | M_ZERO); - MALLOC(dh->dh_blkfree, u_int8_t *, nblocks * sizeof(dh->dh_blkfree[0]), - M_DIRHASH, M_NOWAIT); + memset(dh, 0, sizeof(*dh)); + mutex_init(&dh->dh_lock, MUTEX_DEFAULT, IPL_NONE); + DIRHASH_LOCK(dh); + dh->dh_hashsz = narrays * sizeof(dh->dh_hash[0]); + dh->dh_hash = kmem_zalloc(dh->dh_hashsz, KM_NOSLEEP); + dh->dh_blkfreesz = nblocks * sizeof(dh->dh_blkfree[0]); + dh->dh_blkfree = kmem_zalloc(dh->dh_blkfreesz, KM_NOSLEEP); if (dh->dh_hash == NULL || dh->dh_blkfree == NULL) goto fail; for (i = 0; i < narrays; i++) { - if ((dh->dh_hash[i] = DIRHASH_BLKALLOC_WAITOK()) == NULL) + if ((dh->dh_hash[i] = DIRHASH_BLKALLOC()) == NULL) goto fail; for (j = 0; j < DH_NBLKOFF; j++) dh->dh_hash[i][j] = DIRHASH_EMPTY; @@ -196,13 +214,13 @@ ufsdirhash_build(struct inode *ip) while (pos < ip->i_size) { if ((curcpu()->ci_schedstate.spc_flags & SPCF_SHOULDYIELD) != 0) { - preempt(1); + preempt(); } /* If necessary, get the next directory block. */ if ((pos & bmask) == 0) { if (bp != NULL) - brelse(bp); - if (UFS_BLKATOFF(vp, (off_t)pos, NULL, &bp) != 0) + brelse(bp, 0); + if (ufs_blkatoff(vp, (off_t)pos, NULL, &bp, false) != 0) goto fail; } @@ -211,7 +229,7 @@ ufsdirhash_build(struct inode *ip) if (ep->d_reclen == 0 || ep->d_reclen > dirblksiz - (pos & (dirblksiz - 1))) { /* Corrupted directory. */ - brelse(bp); + brelse(bp, 0); goto fail; } if (ep->d_ino != 0) { @@ -221,34 +239,35 @@ ufsdirhash_build(struct inode *ip) slot = WRAPINCR(slot, dh->dh_hlen); dh->dh_hused++; DH_ENTRY(dh, slot) = pos; - ufsdirhash_adjfree(dh, pos, -DIRSIZ(0, ep, needswap), + ufsdirhash_adjfree(dh, pos, -UFS_DIRSIZ(0, ep, needswap), dirblksiz); } pos += ep->d_reclen; } if (bp != NULL) - brelse(bp); + brelse(bp, 0); DIRHASHLIST_LOCK(); TAILQ_INSERT_TAIL(&ufsdirhash_list, dh, dh_list); dh->dh_onlist = 1; + DIRHASH_UNLOCK(dh); DIRHASHLIST_UNLOCK(); return (0); fail: + DIRHASH_UNLOCK(dh); if (dh->dh_hash != NULL) { for (i = 0; i < narrays; i++) if (dh->dh_hash[i] != NULL) DIRHASH_BLKFREE(dh->dh_hash[i]); - FREE(dh->dh_hash, M_DIRHASH); + kmem_free(dh->dh_hash, dh->dh_hashsz); } if (dh->dh_blkfree != NULL) - FREE(dh->dh_blkfree, M_DIRHASH); - FREE(dh, M_DIRHASH); + kmem_free(dh->dh_blkfree, dh->dh_blkfreesz); + mutex_destroy(&dh->dh_lock); + pool_cache_put(ufsdirhash_cache, dh); ip->i_dirhash = NULL; - DIRHASHLIST_LOCK(); - ufs_dirhashmem -= memreqd; - DIRHASHLIST_UNLOCK(); + atomic_add_int(&ufs_dirhashmem, -memreqd); return (-1); } @@ -263,31 +282,30 @@ ufsdirhash_free(struct inode *ip) if ((dh = ip->i_dirhash) == NULL) return; - DIRHASHLIST_LOCK(); - DIRHASH_LOCK(dh); - if (dh->dh_onlist) - TAILQ_REMOVE(&ufsdirhash_list, dh, dh_list); - DIRHASH_UNLOCK(dh); - DIRHASHLIST_UNLOCK(); - /* The dirhash pointed to by 'dh' is exclusively ours now. */ + if (dh->dh_onlist) { + DIRHASHLIST_LOCK(); + if (dh->dh_onlist) + TAILQ_REMOVE(&ufsdirhash_list, dh, dh_list); + DIRHASHLIST_UNLOCK(); + } + /* The dirhash pointed to by 'dh' is exclusively ours now. */ mem = sizeof(*dh); if (dh->dh_hash != NULL) { for (i = 0; i < dh->dh_narrays; i++) DIRHASH_BLKFREE(dh->dh_hash[i]); - FREE(dh->dh_hash, M_DIRHASH); - FREE(dh->dh_blkfree, M_DIRHASH); - mem += dh->dh_narrays * sizeof(*dh->dh_hash) + - dh->dh_narrays * DH_NBLKOFF * sizeof(**dh->dh_hash) + - dh->dh_nblk * sizeof(*dh->dh_blkfree); + kmem_free(dh->dh_hash, dh->dh_hashsz); + kmem_free(dh->dh_blkfree, dh->dh_blkfreesz); + mem += dh->dh_hashsz; + mem += dh->dh_narrays * DH_NBLKOFF * sizeof(**dh->dh_hash); + mem += dh->dh_nblk * sizeof(*dh->dh_blkfree); } - FREE(dh, M_DIRHASH); + mutex_destroy(&dh->dh_lock); + pool_cache_put(ufsdirhash_cache, dh); ip->i_dirhash = NULL; - DIRHASHLIST_LOCK(); - ufs_dirhashmem -= mem; - DIRHASHLIST_UNLOCK(); + atomic_add_int(&ufs_dirhashmem, -mem); } /* @@ -298,7 +316,7 @@ ufsdirhash_free(struct inode *ip) * If successful, the directory offset is stored in *offp, and a * pointer to a struct buf containing the entry is stored in *bpp. If * prevoffp is non-NULL, the offset of the previous entry within - * the DIRBLKSIZ-sized block is stored in *prevoffp (if the entry + * the UFS_DIRBLKSIZ-sized block is stored in *prevoffp (if the entry * is the first in a block, the start of the block is used). */ int @@ -316,13 +334,14 @@ ufsdirhash_lookup(struct inode *ip, cons if ((dh = ip->i_dirhash) == NULL) return (EJUSTRETURN); + /* * Move this dirhash towards the end of the list if it has a - * score higher than the next entry, and acquire the dh_mtx. + * score higher than the next entry, and acquire the dh_lock. * Optimise the case where it's already the last by performing * an unlocked read of the TAILQ_NEXT pointer. * - * In both cases, end up holding just dh_mtx. + * In both cases, end up holding just dh_lock. */ if (TAILQ_NEXT(dh, dh_list) != NULL) { DIRHASHLIST_LOCK(); @@ -389,33 +408,36 @@ restart: slot = WRAPINCR(slot, dh->dh_hlen)) { if (offset == DIRHASH_DEL) continue; - DIRHASH_UNLOCK(dh); if (offset < 0 || offset >= ip->i_size) panic("ufsdirhash_lookup: bad offset in hash array"); if ((offset & ~bmask) != blkoff) { if (bp != NULL) - brelse(bp); + brelse(bp, 0); blkoff = offset & ~bmask; - if (UFS_BLKATOFF(vp, (off_t)blkoff, NULL, &bp) != 0) + if (ufs_blkatoff(vp, (off_t)blkoff, + NULL, &bp, false) != 0) { + DIRHASH_UNLOCK(dh); return (EJUSTRETURN); + } } - dp = (struct direct *)(bp->b_data + (offset & bmask)); + dp = (struct direct *)((char *)bp->b_data + (offset & bmask)); if (dp->d_reclen == 0 || dp->d_reclen > dirblksiz - (offset & (dirblksiz - 1))) { /* Corrupted directory. */ - brelse(bp); + DIRHASH_UNLOCK(dh); + brelse(bp, 0); return (EJUSTRETURN); } if (dp->d_namlen == namelen && - bcmp(dp->d_name, name, namelen) == 0) { + memcmp(dp->d_name, name, namelen) == 0) { /* Found. Get the prev offset if needed. */ if (prevoffp != NULL) { if (offset & (dirblksiz - 1)) { prevoff = ufsdirhash_getprev(dp, offset, dirblksiz); if (prevoff == -1) { - brelse(bp); + brelse(bp, 0); return (EJUSTRETURN); } } else @@ -426,18 +448,18 @@ restart: /* Check for sequential access, and update offset. */ if (dh->dh_seqopt == 0 && dh->dh_seqoff == offset) dh->dh_seqopt = 1; - dh->dh_seqoff = offset + DIRSIZ(0, dp, needswap); + dh->dh_seqoff = offset + UFS_DIRSIZ(0, dp, needswap); + DIRHASH_UNLOCK(dh); *bpp = bp; *offp = offset; return (0); } - DIRHASH_LOCK(dh); if (dh->dh_hash == NULL) { DIRHASH_UNLOCK(dh); if (bp != NULL) - brelse(bp); + brelse(bp, 0); ufsdirhash_free(ip); return (EJUSTRETURN); } @@ -452,7 +474,7 @@ restart: } DIRHASH_UNLOCK(dh); if (bp != NULL) - brelse(bp); + brelse(bp, 0); return (ENOENT); } @@ -461,7 +483,7 @@ restart: * the offset of the directory entry that begins the free space. * This will either be the offset of an existing entry that has free * space at the end, or the offset of an entry with d_ino == 0 at - * the start of a DIRBLKSIZ block. + * the start of a UFS_DIRBLKSIZ block. * * To use the space, the caller may need to compact existing entries in * the directory. The total number of bytes in all of the entries involved @@ -485,6 +507,7 @@ ufsdirhash_findfree(struct inode *ip, in if ((dh = ip->i_dirhash) == NULL) return (-1); + DIRHASH_LOCK(dh); if (dh->dh_hash == NULL) { DIRHASH_UNLOCK(dh); @@ -504,24 +527,27 @@ ufsdirhash_findfree(struct inode *ip, in KASSERT(dirblock < dh->dh_nblk && dh->dh_blkfree[dirblock] >= howmany(slotneeded, DIRALIGN)); - DIRHASH_UNLOCK(dh); pos = dirblock * dirblksiz; - error = UFS_BLKATOFF(ip->i_vnode, (off_t)pos, (void *)&dp, &bp); - if (error) + error = ufs_blkatoff(ip->i_vnode, (off_t)pos, (void *)&dp, &bp, false); + if (error) { + DIRHASH_UNLOCK(dh); return (-1); + } /* Find the first entry with free space. */ for (i = 0; i < dirblksiz; ) { if (dp->d_reclen == 0) { - brelse(bp); + DIRHASH_UNLOCK(dh); + brelse(bp, 0); return (-1); } - if (dp->d_ino == 0 || dp->d_reclen > DIRSIZ(0, dp, needswap)) + if (dp->d_ino == 0 || dp->d_reclen > UFS_DIRSIZ(0, dp, needswap)) break; i += dp->d_reclen; dp = (struct direct *)((char *)dp + dp->d_reclen); } if (i > dirblksiz) { - brelse(bp); + DIRHASH_UNLOCK(dh); + brelse(bp, 0); return (-1); } slotstart = pos + i; @@ -531,21 +557,24 @@ ufsdirhash_findfree(struct inode *ip, in while (i < dirblksiz && freebytes < slotneeded) { freebytes += dp->d_reclen; if (dp->d_ino != 0) - freebytes -= DIRSIZ(0, dp, needswap); + freebytes -= UFS_DIRSIZ(0, dp, needswap); if (dp->d_reclen == 0) { - brelse(bp); + DIRHASH_UNLOCK(dh); + brelse(bp, 0); return (-1); } i += dp->d_reclen; dp = (struct direct *)((char *)dp + dp->d_reclen); } if (i > dirblksiz) { - brelse(bp); + DIRHASH_UNLOCK(dh); + brelse(bp, 0); return (-1); } if (freebytes < slotneeded) panic("ufsdirhash_findfree: free mismatch"); - brelse(bp); + DIRHASH_UNLOCK(dh); + brelse(bp, 0); *slotsize = pos + i - slotstart; return (slotstart); } @@ -563,6 +592,7 @@ ufsdirhash_enduseful(struct inode *ip) if ((dh = ip->i_dirhash) == NULL) return (-1); + DIRHASH_LOCK(dh); if (dh->dh_hash == NULL) { DIRHASH_UNLOCK(dh); @@ -597,6 +627,7 @@ ufsdirhash_add(struct inode *ip, struct if ((dh = ip->i_dirhash) == NULL) return; + DIRHASH_LOCK(dh); if (dh->dh_hash == NULL) { DIRHASH_UNLOCK(dh); @@ -624,7 +655,7 @@ ufsdirhash_add(struct inode *ip, struct DH_ENTRY(dh, slot) = offset; /* Update the per-block summary info. */ - ufsdirhash_adjfree(dh, offset, -DIRSIZ(0, dirp, needswap), dirblksiz); + ufsdirhash_adjfree(dh, offset, -UFS_DIRSIZ(0, dirp, needswap), dirblksiz); DIRHASH_UNLOCK(dh); } @@ -643,6 +674,7 @@ ufsdirhash_remove(struct inode *ip, stru if ((dh = ip->i_dirhash) == NULL) return; + DIRHASH_LOCK(dh); if (dh->dh_hash == NULL) { DIRHASH_UNLOCK(dh); @@ -658,7 +690,7 @@ ufsdirhash_remove(struct inode *ip, stru ufsdirhash_delslot(dh, slot); /* Update the per-block summary info. */ - ufsdirhash_adjfree(dh, offset, DIRSIZ(0, dirp, needswap), dirblksiz); + ufsdirhash_adjfree(dh, offset, UFS_DIRSIZ(0, dirp, needswap), dirblksiz); DIRHASH_UNLOCK(dh); } @@ -692,7 +724,7 @@ ufsdirhash_move(struct inode *ip, struct /* * Inform dirhash that the directory has grown by one block that - * begins at offset (i.e. the new length is offset + DIRBLKSIZ). + * begins at offset (i.e. the new length is offset + UFS_DIRBLKSIZ). */ void ufsdirhash_newblk(struct inode *ip, doff_t offset) @@ -739,6 +771,7 @@ ufsdirhash_dirtrunc(struct inode *ip, do if ((dh = ip->i_dirhash) == NULL) return; + DIRHASH_LOCK(dh); if (dh->dh_hash == NULL) { DIRHASH_UNLOCK(dh); @@ -799,6 +832,7 @@ ufsdirhash_checkblock(struct inode *ip, return; if ((dh = ip->i_dirhash) == NULL) return; + DIRHASH_LOCK(dh); if (dh->dh_hash == NULL) { DIRHASH_UNLOCK(dh); @@ -834,7 +868,7 @@ ufsdirhash_checkblock(struct inode *ip, /* Check that the entry exists (will panic if it doesn't). */ ufsdirhash_findslot(dh, dp->d_name, dp->d_namlen, offset + i); - nfree += dp->d_reclen - DIRSIZ(0, dp, needswap); + nfree += dp->d_reclen - UFS_DIRSIZ(0, dp, needswap); } if (i != dirblksiz) panic("ufsdirhash_checkblock: bad dir end"); @@ -875,7 +909,7 @@ ufsdirhash_hash(struct dirhash *dh, cons * by the value specified by `diff'. * * The caller must ensure we have exclusive access to `dh'; normally - * that means that dh_mtx should be held, but this is also called + * that means that dh_lock should be held, but this is also called * from ufsdirhash_build() where exclusive access can be assumed. */ static void @@ -883,6 +917,8 @@ ufsdirhash_adjfree(struct dirhash *dh, d { int block, i, nfidx, ofidx; + KASSERT(mutex_owned(&dh->dh_lock)); + /* Update the per-block summary info. */ block = offset / dirblksiz; KASSERT(block < dh->dh_nblk && block < dh->dh_dirblks); @@ -919,6 +955,8 @@ ufsdirhash_findslot(struct dirhash *dh, { int slot; + KASSERT(mutex_owned(&dh->dh_lock)); + /* Find the entry. */ KASSERT(dh->dh_hused < dh->dh_hlen); slot = ufsdirhash_hash(dh, name, namelen); @@ -941,6 +979,8 @@ ufsdirhash_delslot(struct dirhash *dh, i { int i; + KASSERT(mutex_owned(&dh->dh_lock)); + /* Mark the entry as deleted. */ DH_ENTRY(dh, slot) = DIRHASH_DEL; @@ -960,7 +1000,7 @@ ufsdirhash_delslot(struct dirhash *dh, i /* * Given a directory entry and its offset, find the offset of the - * previous entry in the same DIRBLKSIZ-sized block. Returns an + * previous entry in the same UFS_DIRBLKSIZ-sized block. Returns an * offset, or -1 if there is no previous entry in the block or some * other problem occurred. */ @@ -1002,6 +1042,7 @@ ufsdirhash_recycle(int wanted) doff_t **hash; u_int8_t *blkfree; int i, mem, narrays; + size_t hashsz, blkfreesz; DIRHASHLIST_LOCK(); while (wanted + ufs_dirhashmem > ufs_dirhashmaxmem) { @@ -1024,8 +1065,10 @@ ufsdirhash_recycle(int wanted) TAILQ_REMOVE(&ufsdirhash_list, dh, dh_list); dh->dh_onlist = 0; hash = dh->dh_hash; + hashsz = dh->dh_hashsz; dh->dh_hash = NULL; blkfree = dh->dh_blkfree; + blkfreesz = dh->dh_blkfreesz; dh->dh_blkfree = NULL; narrays = dh->dh_narrays; mem = narrays * sizeof(*dh->dh_hash) + @@ -1035,87 +1078,94 @@ ufsdirhash_recycle(int wanted) /* Unlock everything, free the detached memory. */ DIRHASH_UNLOCK(dh); DIRHASHLIST_UNLOCK(); + for (i = 0; i < narrays; i++) DIRHASH_BLKFREE(hash[i]); - FREE(hash, M_DIRHASH); - FREE(blkfree, M_DIRHASH); + kmem_free(hash, hashsz); + kmem_free(blkfree, blkfreesz); /* Account for the returned memory, and repeat if necessary. */ DIRHASHLIST_LOCK(); - ufs_dirhashmem -= mem; + atomic_add_int(&ufs_dirhashmem, -mem); } - /* Success; return with list locked. */ + /* Success. */ return (0); } -void -ufsdirhash_init() -{ -#ifdef _LKM - pool_init(&ufsdirhash_pool, DH_NBLKOFF * sizeof(daddr_t), 0, 0, 0, - "ufsdirhash", &pool_allocator_nointr); -#endif - TAILQ_INIT(&ufsdirhash_list); -} - -void -ufsdirhash_done(void) -{ - KASSERT(TAILQ_EMPTY(&ufsdirhash_list)); -#ifdef _LKM - pool_destroy(&ufsdirhash_pool); -#endif -} - -SYSCTL_SETUP(sysctl_vfs_ufs_setup, "sysctl vfs.ufs.dirhash subtree setup") +static void +ufsdirhash_sysctl_init(void) { const struct sysctlnode *rnode, *cnode; - sysctl_createv(clog, 0, NULL, &rnode, + sysctl_createv(&ufsdirhash_sysctl_log, 0, NULL, &rnode, CTLFLAG_PERMANENT, CTLTYPE_NODE, "vfs", NULL, NULL, 0, NULL, 0, CTL_VFS, CTL_EOL); - sysctl_createv(clog, 0, &rnode, &rnode, + sysctl_createv(&ufsdirhash_sysctl_log, 0, &rnode, &rnode, CTLFLAG_PERMANENT, CTLTYPE_NODE, "ufs", SYSCTL_DESCR("ufs"), NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL); - sysctl_createv(clog, 0, &rnode, &rnode, + sysctl_createv(&ufsdirhash_sysctl_log, 0, &rnode, &rnode, CTLFLAG_PERMANENT, CTLTYPE_NODE, "dirhash", SYSCTL_DESCR("dirhash"), NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL); - sysctl_createv(clog, 0, &rnode, &cnode, + sysctl_createv(&ufsdirhash_sysctl_log, 0, &rnode, &cnode, CTLFLAG_PERMANENT|CTLFLAG_READWRITE, CTLTYPE_INT, "minblocks", SYSCTL_DESCR("minimum hashed directory size in blocks"), NULL, 0, &ufs_dirhashminblks, 0, CTL_CREATE, CTL_EOL); - sysctl_createv(clog, 0, &rnode, &cnode, + sysctl_createv(&ufsdirhash_sysctl_log, 0, &rnode, &cnode, CTLFLAG_PERMANENT|CTLFLAG_READWRITE, CTLTYPE_INT, "maxmem", SYSCTL_DESCR("maximum dirhash memory usage"), NULL, 0, &ufs_dirhashmaxmem, 0, CTL_CREATE, CTL_EOL); - sysctl_createv(clog, 0, &rnode, &cnode, + sysctl_createv(&ufsdirhash_sysctl_log, 0, &rnode, &cnode, CTLFLAG_PERMANENT|CTLFLAG_READONLY, CTLTYPE_INT, "memused", SYSCTL_DESCR("current dirhash memory usage"), NULL, 0, &ufs_dirhashmem, 0, CTL_CREATE, CTL_EOL); - sysctl_createv(clog, 0, &rnode, &cnode, + sysctl_createv(&ufsdirhash_sysctl_log, 0, &rnode, &cnode, CTLFLAG_PERMANENT|CTLFLAG_READWRITE, CTLTYPE_INT, "docheck", SYSCTL_DESCR("enable extra sanity checks"), NULL, 0, &ufs_dirhashcheck, 0, CTL_CREATE, CTL_EOL); } + +void +ufsdirhash_init(void) +{ + + mutex_init(&ufsdirhash_lock, MUTEX_DEFAULT, IPL_NONE); + ufsdirhashblk_cache = pool_cache_init(DH_NBLKOFF * sizeof(daddr_t), 0, + 0, 0, "dirhashblk", NULL, IPL_NONE, NULL, NULL, NULL); + ufsdirhash_cache = pool_cache_init(sizeof(struct dirhash), 0, + 0, 0, "dirhash", NULL, IPL_NONE, NULL, NULL, NULL); + TAILQ_INIT(&ufsdirhash_list); + ufsdirhash_sysctl_init(); +} + +void +ufsdirhash_done(void) +{ + + KASSERT(TAILQ_EMPTY(&ufsdirhash_list)); + pool_cache_destroy(ufsdirhashblk_cache); + pool_cache_destroy(ufsdirhash_cache); + mutex_destroy(&ufsdirhash_lock); + sysctl_teardown(&ufsdirhash_sysctl_log); +}