Annotation of src/sys/kern/vfs_cache.c, Revision 1.67
1.67 ! chs 1: /* $NetBSD: vfs_cache.c,v 1.66 2006/10/25 18:56:38 christos Exp $ */
1.6 cgd 2:
1.1 cgd 3: /*
1.5 mycroft 4: * Copyright (c) 1989, 1993
5: * The Regents of the University of California. All rights reserved.
1.1 cgd 6: *
7: * Redistribution and use in source and binary forms, with or without
8: * modification, are permitted provided that the following conditions
9: * are met:
10: * 1. Redistributions of source code must retain the above copyright
11: * notice, this list of conditions and the following disclaimer.
12: * 2. Redistributions in binary form must reproduce the above copyright
13: * notice, this list of conditions and the following disclaimer in the
14: * documentation and/or other materials provided with the distribution.
1.51 agc 15: * 3. Neither the name of the University nor the names of its contributors
1.1 cgd 16: * may be used to endorse or promote products derived from this software
17: * without specific prior written permission.
18: *
19: * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
20: * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21: * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22: * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
23: * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24: * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25: * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26: * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27: * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28: * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29: * SUCH DAMAGE.
30: *
1.10 mycroft 31: * @(#)vfs_cache.c 8.3 (Berkeley) 8/22/94
1.1 cgd 32: */
1.32 lukem 33:
34: #include <sys/cdefs.h>
1.67 ! chs 35: __KERNEL_RCSID(0, "$NetBSD: vfs_cache.c,v 1.66 2006/10/25 18:56:38 christos Exp $");
1.1 cgd 36:
1.28 chs 37: #include "opt_ddb.h"
1.29 fvdl 38: #include "opt_revcache.h"
1.28 chs 39:
1.4 mycroft 40: #include <sys/param.h>
41: #include <sys/systm.h>
42: #include <sys/time.h>
43: #include <sys/mount.h>
44: #include <sys/vnode.h>
45: #include <sys/namei.h>
46: #include <sys/errno.h>
47: #include <sys/malloc.h>
1.18 thorpej 48: #include <sys/pool.h>
1.39 pk 49: #include <sys/lock.h>
1.1 cgd 50:
1.66 christos 51: #define NAMECACHE_ENTER_REVERSE
1.1 cgd 52: /*
53: * Name caching works as follows:
54: *
55: * Names found by directory scans are retained in a cache
56: * for future reference. It is managed LRU, so frequently
57: * used names will hang around. Cache is indexed by hash value
1.20 jdolecek 58: * obtained from (dvp, name) where dvp refers to the directory
1.1 cgd 59: * containing name.
60: *
61: * For simplicity (and economy of storage), names longer than
62: * a maximum length of NCHNAMLEN are not cached; they occur
63: * infrequently in any case, and are almost never of interest.
64: *
65: * Upon reaching the last segment of a path, if the reference
66: * is for DELETE, or NOCACHE is set (rewrite), and the
67: * name is located in the cache, it will be dropped.
1.20 jdolecek 68: * The entry is dropped also when it was not possible to lock
69: * the cached vnode, either because vget() failed or the generation
70: * number has changed while waiting for the lock.
1.1 cgd 71: */
72:
73: /*
74: * Structures associated with name cacheing.
75: */
1.9 mycroft 76: LIST_HEAD(nchashhead, namecache) *nchashtbl;
1.1 cgd 77: u_long nchash; /* size of hash table - 1 */
78: long numcache; /* number of cache entries allocated */
1.49 yamt 79: #define NCHASH(cnp, dvp) \
80: (((cnp)->cn_hash ^ ((uintptr_t)(dvp) >> 3)) & nchash)
1.19 sommerfe 81:
82: LIST_HEAD(ncvhashhead, namecache) *ncvhashtbl;
83: u_long ncvhash; /* size of hash table - 1 */
1.48 yamt 84: #define NCVHASH(vp) (((uintptr_t)(vp) >> 3) & ncvhash)
1.19 sommerfe 85:
1.9 mycroft 86: TAILQ_HEAD(, namecache) nclruhead; /* LRU chain */
1.1 cgd 87: struct nchstats nchstats; /* cache effectiveness statistics */
88:
1.56 simonb 89: POOL_INIT(namecache_pool, sizeof(struct namecache), 0, 0, 0, "ncachepl",
90: &pool_allocator_nointr);
1.38 thorpej 91:
92: MALLOC_DEFINE(M_CACHE, "namecache", "Dynamically allocated cache entries");
1.18 thorpej 93:
1.7 chopps 94: int doingcache = 1; /* 1 => enable the cache */
1.1 cgd 95:
1.39 pk 96: /* A single lock to protect cache insertion, removal and lookup */
97: static struct simplelock namecache_slock = SIMPLELOCK_INITIALIZER;
98:
1.46 yamt 99: static void cache_remove(struct namecache *);
100: static void cache_free(struct namecache *);
1.63 perry 101: static inline struct namecache *cache_lookup_entry(
1.55 yamt 102: const struct vnode *, const struct componentname *);
1.46 yamt 103:
104: static void
105: cache_remove(struct namecache *ncp)
106: {
107:
108: LOCK_ASSERT(simple_lock_held(&namecache_slock));
109:
110: ncp->nc_dvp = NULL;
111: ncp->nc_vp = NULL;
112:
113: TAILQ_REMOVE(&nclruhead, ncp, nc_lru);
114: if (ncp->nc_hash.le_prev != NULL) {
115: LIST_REMOVE(ncp, nc_hash);
116: ncp->nc_hash.le_prev = NULL;
117: }
118: if (ncp->nc_vhash.le_prev != NULL) {
119: LIST_REMOVE(ncp, nc_vhash);
120: ncp->nc_vhash.le_prev = NULL;
121: }
122: if (ncp->nc_vlist.le_prev != NULL) {
123: LIST_REMOVE(ncp, nc_vlist);
124: ncp->nc_vlist.le_prev = NULL;
125: }
126: if (ncp->nc_dvlist.le_prev != NULL) {
127: LIST_REMOVE(ncp, nc_dvlist);
128: ncp->nc_dvlist.le_prev = NULL;
129: }
130: }
131:
132: static void
133: cache_free(struct namecache *ncp)
134: {
135:
136: pool_put(&namecache_pool, ncp);
1.57 pk 137: numcache--;
1.46 yamt 138: }
139:
1.63 perry 140: static inline struct namecache *
1.55 yamt 141: cache_lookup_entry(const struct vnode *dvp, const struct componentname *cnp)
142: {
143: struct nchashhead *ncpp;
144: struct namecache *ncp;
145:
146: LOCK_ASSERT(simple_lock_held(&namecache_slock));
147:
148: ncpp = &nchashtbl[NCHASH(cnp, dvp)];
149:
150: LIST_FOREACH(ncp, ncpp, nc_hash) {
151: if (ncp->nc_dvp == dvp &&
152: ncp->nc_nlen == cnp->cn_namelen &&
153: !memcmp(ncp->nc_name, cnp->cn_nameptr, (u_int)ncp->nc_nlen))
154: break;
155: }
156:
157: return ncp;
158: }
159:
1.1 cgd 160: /*
161: * Look for a the name in the cache. We don't do this
162: * if the segment name is long, simply so the cache can avoid
163: * holding long names (which would either waste space, or
164: * add greatly to the complexity).
165: *
166: * Lookup is called with ni_dvp pointing to the directory to search,
167: * ni_ptr pointing to the name of the entry being sought, ni_namelen
168: * tells the length of the name, and ni_hash contains a hash of
1.20 jdolecek 169: * the name. If the lookup succeeds, the vnode is locked, stored in ni_vp
170: * and a status of zero is returned. If the locking fails for whatever
171: * reason, the vnode is unlocked and the error is returned to caller.
172: * If the lookup determines that the name does not exist (negative cacheing),
173: * a status of ENOENT is returned. If the lookup fails, a status of -1
174: * is returned.
1.1 cgd 175: */
1.5 mycroft 176: int
1.34 enami 177: cache_lookup(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp)
1.1 cgd 178: {
1.23 augustss 179: struct namecache *ncp;
1.20 jdolecek 180: struct vnode *vp;
1.36 thorpej 181: int error;
1.1 cgd 182:
1.8 cgd 183: if (!doingcache) {
184: cnp->cn_flags &= ~MAKEENTRY;
1.34 enami 185: *vpp = NULL;
1.20 jdolecek 186: return (-1);
1.8 cgd 187: }
1.39 pk 188:
1.5 mycroft 189: if (cnp->cn_namelen > NCHNAMLEN) {
1.53 yamt 190: /* XXXSMP - updating stats without lock; do we care? */
1.1 cgd 191: nchstats.ncs_long++;
1.5 mycroft 192: cnp->cn_flags &= ~MAKEENTRY;
1.53 yamt 193: goto fail;
1.1 cgd 194: }
1.53 yamt 195: simple_lock(&namecache_slock);
1.55 yamt 196: ncp = cache_lookup_entry(dvp, cnp);
197: if (ncp == NULL) {
1.1 cgd 198: nchstats.ncs_miss++;
1.39 pk 199: goto fail_wlock;
1.1 cgd 200: }
1.9 mycroft 201: if ((cnp->cn_flags & MAKEENTRY) == 0) {
1.1 cgd 202: nchstats.ncs_badhits++;
1.20 jdolecek 203: goto remove;
1.1 cgd 204: } else if (ncp->nc_vp == NULL) {
1.11 mycroft 205: /*
206: * Restore the ISWHITEOUT flag saved earlier.
207: */
1.50 yamt 208: cnp->cn_flags |= ncp->nc_flags;
1.21 mycroft 209: if (cnp->cn_nameiop != CREATE ||
210: (cnp->cn_flags & ISLASTCN) == 0) {
1.1 cgd 211: nchstats.ncs_neghits++;
212: /*
213: * Move this slot to end of LRU chain,
214: * if not already there.
215: */
1.37 matt 216: if (TAILQ_NEXT(ncp, nc_lru) != 0) {
1.9 mycroft 217: TAILQ_REMOVE(&nclruhead, ncp, nc_lru);
218: TAILQ_INSERT_TAIL(&nclruhead, ncp, nc_lru);
1.1 cgd 219: }
1.39 pk 220: simple_unlock(&namecache_slock);
1.1 cgd 221: return (ENOENT);
1.20 jdolecek 222: } else {
1.15 fvdl 223: nchstats.ncs_badhits++;
1.20 jdolecek 224: goto remove;
225: }
226: }
227:
228: vp = ncp->nc_vp;
1.52 yamt 229:
230: /*
231: * Move this slot to end of LRU chain, if not already there.
232: */
233: if (TAILQ_NEXT(ncp, nc_lru) != 0) {
234: TAILQ_REMOVE(&nclruhead, ncp, nc_lru);
235: TAILQ_INSERT_TAIL(&nclruhead, ncp, nc_lru);
236: }
237:
1.60 yamt 238: error = vget(vp, LK_NOWAIT);
1.53 yamt 239:
1.60 yamt 240: /* Release the name cache mutex while we get reference to the vnode */
1.39 pk 241: simple_unlock(&namecache_slock);
242:
1.52 yamt 243: #ifdef DEBUG
244: /*
245: * since we released namecache_slock,
246: * we can't use this pointer any more.
247: */
248: ncp = NULL;
249: #endif /* DEBUG */
250:
1.60 yamt 251: if (error) {
252: KASSERT(error == EBUSY);
1.53 yamt 253: /*
254: * this vnode is being cleaned out.
255: */
256: nchstats.ncs_falsehits++; /* XXX badhits? */
257: goto fail;
258: }
259:
1.20 jdolecek 260: if (vp == dvp) { /* lookup on "." */
261: error = 0;
262: } else if (cnp->cn_flags & ISDOTDOT) {
263: VOP_UNLOCK(dvp, 0);
1.60 yamt 264: error = vn_lock(vp, LK_EXCLUSIVE);
1.67 ! chs 265: vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY);
1.1 cgd 266: } else {
1.60 yamt 267: error = vn_lock(vp, LK_EXCLUSIVE);
1.20 jdolecek 268: }
269:
270: /*
1.54 yamt 271: * Check that the lock succeeded.
1.20 jdolecek 272: */
1.47 yamt 273: if (error) {
1.39 pk 274: /* XXXSMP - updating stats without lock; do we care? */
1.53 yamt 275: nchstats.ncs_badhits++;
1.31 chs 276: *vpp = NULL;
277: return (-1);
1.20 jdolecek 278: }
279:
1.52 yamt 280: /* XXXSMP - updating stats without lock; do we care? */
1.20 jdolecek 281: nchstats.ncs_goodhits++;
282: *vpp = vp;
283: return (0);
1.1 cgd 284:
1.20 jdolecek 285: remove:
1.1 cgd 286: /*
287: * Last component and we are renaming or deleting,
288: * the cache entry is invalid, or otherwise don't
289: * want cache entry to exist.
290: */
1.55 yamt 291: cache_remove(ncp);
292: cache_free(ncp);
1.39 pk 293:
294: fail_wlock:
295: simple_unlock(&namecache_slock);
1.53 yamt 296: fail:
1.34 enami 297: *vpp = NULL;
1.20 jdolecek 298: return (-1);
1.1 cgd 299: }
300:
1.61 yamt 301: int
302: cache_lookup_raw(struct vnode *dvp, struct vnode **vpp,
303: struct componentname *cnp)
304: {
305: struct namecache *ncp;
306: struct vnode *vp;
307: int error;
308:
309: if (!doingcache) {
310: cnp->cn_flags &= ~MAKEENTRY;
311: *vpp = NULL;
312: return (-1);
313: }
314:
315: if (cnp->cn_namelen > NCHNAMLEN) {
316: /* XXXSMP - updating stats without lock; do we care? */
317: nchstats.ncs_long++;
318: cnp->cn_flags &= ~MAKEENTRY;
319: goto fail;
320: }
321: simple_lock(&namecache_slock);
322: ncp = cache_lookup_entry(dvp, cnp);
323: if (ncp == NULL) {
324: nchstats.ncs_miss++;
325: goto fail_wlock;
326: }
327: /*
328: * Move this slot to end of LRU chain,
329: * if not already there.
330: */
331: if (TAILQ_NEXT(ncp, nc_lru) != 0) {
332: TAILQ_REMOVE(&nclruhead, ncp, nc_lru);
333: TAILQ_INSERT_TAIL(&nclruhead, ncp, nc_lru);
334: }
335:
336: vp = ncp->nc_vp;
337: if (vp == NULL) {
338: /*
339: * Restore the ISWHITEOUT flag saved earlier.
340: */
341: cnp->cn_flags |= ncp->nc_flags;
342: nchstats.ncs_neghits++;
343: simple_unlock(&namecache_slock);
344: return (ENOENT);
345: }
346:
347: error = vget(vp, LK_NOWAIT);
348:
349: /* Release the name cache mutex while we get reference to the vnode */
350: simple_unlock(&namecache_slock);
351:
352: if (error) {
353: KASSERT(error == EBUSY);
354: /*
355: * this vnode is being cleaned out.
356: */
357: nchstats.ncs_falsehits++; /* XXX badhits? */
358: goto fail;
359: }
360:
361: *vpp = vp;
362:
363: return 0;
364:
365: fail_wlock:
366: simple_unlock(&namecache_slock);
367: fail:
368: *vpp = NULL;
369: return -1;
370: }
371:
1.1 cgd 372: /*
1.19 sommerfe 373: * Scan cache looking for name of directory entry pointing at vp.
374: *
375: * Fill in dvpp.
376: *
377: * If bufp is non-NULL, also place the name in the buffer which starts
378: * at bufp, immediately before *bpp, and move bpp backwards to point
379: * at the start of it. (Yes, this is a little baroque, but it's done
380: * this way to cater to the whims of getcwd).
381: *
382: * Returns 0 on success, -1 on cache miss, positive errno on failure.
383: */
384: int
1.34 enami 385: cache_revlookup(struct vnode *vp, struct vnode **dvpp, char **bpp, char *bufp)
1.19 sommerfe 386: {
387: struct namecache *ncp;
388: struct vnode *dvp;
389: struct ncvhashhead *nvcpp;
1.34 enami 390: char *bp;
391:
1.19 sommerfe 392: if (!doingcache)
393: goto out;
394:
1.30 chs 395: nvcpp = &ncvhashtbl[NCVHASH(vp)];
1.19 sommerfe 396:
1.39 pk 397: simple_lock(&namecache_slock);
1.27 chs 398: LIST_FOREACH(ncp, nvcpp, nc_vhash) {
1.34 enami 399: if (ncp->nc_vp == vp &&
400: (dvp = ncp->nc_dvp) != NULL &&
1.47 yamt 401: dvp != vp) { /* avoid pesky . entries.. */
1.34 enami 402:
1.19 sommerfe 403: #ifdef DIAGNOSTIC
1.34 enami 404: if (ncp->nc_nlen == 1 &&
405: ncp->nc_name[0] == '.')
1.19 sommerfe 406: panic("cache_revlookup: found entry for .");
407:
1.34 enami 408: if (ncp->nc_nlen == 2 &&
409: ncp->nc_name[0] == '.' &&
410: ncp->nc_name[1] == '.')
1.19 sommerfe 411: panic("cache_revlookup: found entry for ..");
412: #endif
413: nchstats.ncs_revhits++;
414:
415: if (bufp) {
416: bp = *bpp;
417: bp -= ncp->nc_nlen;
418: if (bp <= bufp) {
1.34 enami 419: *dvpp = NULL;
1.42 fvdl 420: simple_unlock(&namecache_slock);
1.34 enami 421: return (ERANGE);
1.19 sommerfe 422: }
423: memcpy(bp, ncp->nc_name, ncp->nc_nlen);
424: *bpp = bp;
425: }
1.34 enami 426:
1.19 sommerfe 427: /* XXX MP: how do we know dvp won't evaporate? */
428: *dvpp = dvp;
1.39 pk 429: simple_unlock(&namecache_slock);
1.34 enami 430: return (0);
1.19 sommerfe 431: }
432: }
433: nchstats.ncs_revmiss++;
1.39 pk 434: simple_unlock(&namecache_slock);
1.19 sommerfe 435: out:
1.34 enami 436: *dvpp = NULL;
437: return (-1);
1.19 sommerfe 438: }
439:
440: /*
1.1 cgd 441: * Add an entry to the cache
442: */
1.13 christos 443: void
1.34 enami 444: cache_enter(struct vnode *dvp, struct vnode *vp, struct componentname *cnp)
1.1 cgd 445: {
1.23 augustss 446: struct namecache *ncp;
1.59 yamt 447: struct namecache *oncp;
1.23 augustss 448: struct nchashhead *ncpp;
449: struct ncvhashhead *nvcpp;
1.1 cgd 450:
1.5 mycroft 451: #ifdef DIAGNOSTIC
452: if (cnp->cn_namelen > NCHNAMLEN)
453: panic("cache_enter: name too long");
454: #endif
1.1 cgd 455: if (!doingcache)
456: return;
457: /*
458: * Free the cache slot at head of lru chain.
459: */
1.39 pk 460: simple_lock(&namecache_slock);
1.58 yamt 461:
1.59 yamt 462: if (numcache < numvnodes) {
1.39 pk 463: numcache++;
464: simple_unlock(&namecache_slock);
1.18 thorpej 465: ncp = pool_get(&namecache_pool, PR_WAITOK);
1.27 chs 466: memset(ncp, 0, sizeof(*ncp));
1.39 pk 467: simple_lock(&namecache_slock);
1.27 chs 468: } else if ((ncp = TAILQ_FIRST(&nclruhead)) != NULL) {
1.46 yamt 469: cache_remove(ncp);
1.39 pk 470: } else {
471: simple_unlock(&namecache_slock);
1.1 cgd 472: return;
1.39 pk 473: }
1.57 pk 474:
1.59 yamt 475: /*
476: * Concurrent lookups in the same directory may race for a
477: * cache entry. if there's a duplicated entry, free it.
478: */
479: oncp = cache_lookup_entry(dvp, cnp);
480: if (oncp) {
481: cache_remove(oncp);
482: cache_free(oncp);
483: }
484: KASSERT(cache_lookup_entry(dvp, cnp) == NULL);
485:
1.34 enami 486: /* Grab the vnode we just found. */
1.5 mycroft 487: ncp->nc_vp = vp;
1.47 yamt 488: if (vp == NULL) {
1.11 mycroft 489: /*
490: * For negative hits, save the ISWHITEOUT flag so we can
491: * restore it later when the cache entry is used again.
492: */
1.50 yamt 493: ncp->nc_flags = cnp->cn_flags & ISWHITEOUT;
1.11 mycroft 494: }
1.34 enami 495: /* Fill in cache info. */
1.5 mycroft 496: ncp->nc_dvp = dvp;
1.46 yamt 497: LIST_INSERT_HEAD(&dvp->v_dnclist, ncp, nc_dvlist);
498: if (vp)
499: LIST_INSERT_HEAD(&vp->v_nclist, ncp, nc_vlist);
1.5 mycroft 500: ncp->nc_nlen = cnp->cn_namelen;
1.17 perry 501: memcpy(ncp->nc_name, cnp->cn_nameptr, (unsigned)ncp->nc_nlen);
1.9 mycroft 502: TAILQ_INSERT_TAIL(&nclruhead, ncp, nc_lru);
1.30 chs 503: ncpp = &nchashtbl[NCHASH(cnp, dvp)];
1.9 mycroft 504: LIST_INSERT_HEAD(ncpp, ncp, nc_hash);
1.19 sommerfe 505:
1.34 enami 506: ncp->nc_vhash.le_prev = NULL;
507: ncp->nc_vhash.le_next = NULL;
508:
1.19 sommerfe 509: /*
510: * Create reverse-cache entries (used in getcwd) for directories.
1.66 christos 511: * (and in linux procfs exe node)
1.19 sommerfe 512: */
1.33 enami 513: if (vp != NULL &&
514: vp != dvp &&
1.29 fvdl 515: #ifndef NAMECACHE_ENTER_REVERSE
1.33 enami 516: vp->v_type == VDIR &&
1.29 fvdl 517: #endif
1.33 enami 518: (ncp->nc_nlen > 2 ||
519: (ncp->nc_nlen > 1 && ncp->nc_name[1] != '.') ||
520: (/* ncp->nc_nlen > 0 && */ ncp->nc_name[0] != '.'))) {
1.30 chs 521: nvcpp = &ncvhashtbl[NCVHASH(vp)];
1.19 sommerfe 522: LIST_INSERT_HEAD(nvcpp, ncp, nc_vhash);
523: }
1.39 pk 524: simple_unlock(&namecache_slock);
1.1 cgd 525: }
526:
527: /*
528: * Name cache initialization, from vfs_init() when we are booting
529: */
1.13 christos 530: void
1.34 enami 531: nchinit(void)
1.1 cgd 532: {
533:
1.9 mycroft 534: TAILQ_INIT(&nclruhead);
1.26 ad 535: nchashtbl =
536: hashinit(desiredvnodes, HASH_LIST, M_CACHE, M_WAITOK, &nchash);
537: ncvhashtbl =
1.29 fvdl 538: #ifdef NAMECACHE_ENTER_REVERSE
539: hashinit(desiredvnodes, HASH_LIST, M_CACHE, M_WAITOK, &ncvhash);
540: #else
1.26 ad 541: hashinit(desiredvnodes/8, HASH_LIST, M_CACHE, M_WAITOK, &ncvhash);
1.29 fvdl 542: #endif
1.30 chs 543: }
544:
545: /*
546: * Name cache reinitialization, for when the maximum number of vnodes increases.
547: */
548: void
1.34 enami 549: nchreinit(void)
1.30 chs 550: {
551: struct namecache *ncp;
552: struct nchashhead *oldhash1, *hash1;
553: struct ncvhashhead *oldhash2, *hash2;
1.36 thorpej 554: u_long i, oldmask1, oldmask2, mask1, mask2;
1.30 chs 555:
556: hash1 = hashinit(desiredvnodes, HASH_LIST, M_CACHE, M_WAITOK, &mask1);
557: hash2 =
558: #ifdef NAMECACHE_ENTER_REVERSE
559: hashinit(desiredvnodes, HASH_LIST, M_CACHE, M_WAITOK, &mask2);
560: #else
561: hashinit(desiredvnodes/8, HASH_LIST, M_CACHE, M_WAITOK, &mask2);
562: #endif
1.40 jmc 563: simple_lock(&namecache_slock);
1.30 chs 564: oldhash1 = nchashtbl;
565: oldmask1 = nchash;
566: nchashtbl = hash1;
567: nchash = mask1;
568: oldhash2 = ncvhashtbl;
569: oldmask2 = ncvhash;
570: ncvhashtbl = hash2;
571: ncvhash = mask2;
572: for (i = 0; i <= oldmask1; i++) {
573: while ((ncp = LIST_FIRST(&oldhash1[i])) != NULL) {
574: LIST_REMOVE(ncp, nc_hash);
575: ncp->nc_hash.le_prev = NULL;
576: }
577: }
578: for (i = 0; i <= oldmask2; i++) {
579: while ((ncp = LIST_FIRST(&oldhash2[i])) != NULL) {
580: LIST_REMOVE(ncp, nc_vhash);
581: ncp->nc_vhash.le_prev = NULL;
582: }
583: }
1.41 enami 584: simple_unlock(&namecache_slock);
1.30 chs 585: hashdone(oldhash1, M_CACHE);
586: hashdone(oldhash2, M_CACHE);
1.1 cgd 587: }
588:
589: /*
590: * Cache flush, a particular vnode; called when a vnode is renamed to
591: * hide entries that would now be invalid
592: */
1.13 christos 593: void
1.55 yamt 594: cache_purge1(struct vnode *vp, const struct componentname *cnp, int flags)
1.1 cgd 595: {
1.46 yamt 596: struct namecache *ncp, *ncnext;
1.1 cgd 597:
1.39 pk 598: simple_lock(&namecache_slock);
1.55 yamt 599: if (flags & PURGE_PARENTS) {
600: for (ncp = LIST_FIRST(&vp->v_nclist); ncp != NULL;
601: ncp = ncnext) {
602: ncnext = LIST_NEXT(ncp, nc_vlist);
603: cache_remove(ncp);
604: cache_free(ncp);
605: }
606: }
607: if (flags & PURGE_CHILDREN) {
608: for (ncp = LIST_FIRST(&vp->v_dnclist); ncp != NULL;
609: ncp = ncnext) {
610: ncnext = LIST_NEXT(ncp, nc_dvlist);
611: cache_remove(ncp);
612: cache_free(ncp);
613: }
1.46 yamt 614: }
1.55 yamt 615: if (cnp != NULL) {
616: ncp = cache_lookup_entry(vp, cnp);
617: if (ncp) {
618: cache_remove(ncp);
619: cache_free(ncp);
620: }
1.46 yamt 621: }
1.39 pk 622: simple_unlock(&namecache_slock);
1.1 cgd 623: }
624:
625: /*
626: * Cache flush, a whole filesystem; called when filesys is umounted to
1.27 chs 627: * remove entries that would now be invalid.
1.1 cgd 628: */
1.13 christos 629: void
1.34 enami 630: cache_purgevfs(struct mount *mp)
1.1 cgd 631: {
1.23 augustss 632: struct namecache *ncp, *nxtcp;
1.1 cgd 633:
1.39 pk 634: simple_lock(&namecache_slock);
1.27 chs 635: for (ncp = TAILQ_FIRST(&nclruhead); ncp != NULL; ncp = nxtcp) {
636: nxtcp = TAILQ_NEXT(ncp, nc_lru);
1.5 mycroft 637: if (ncp->nc_dvp == NULL || ncp->nc_dvp->v_mount != mp) {
1.1 cgd 638: continue;
1.5 mycroft 639: }
1.34 enami 640: /* Free the resources we had. */
1.46 yamt 641: cache_remove(ncp);
642: cache_free(ncp);
1.1 cgd 643: }
1.39 pk 644: simple_unlock(&namecache_slock);
1.1 cgd 645: }
1.19 sommerfe 646:
1.28 chs 647: #ifdef DDB
648: void
649: namecache_print(struct vnode *vp, void (*pr)(const char *, ...))
650: {
651: struct vnode *dvp = NULL;
652: struct namecache *ncp;
653:
654: TAILQ_FOREACH(ncp, &nclruhead, nc_lru) {
1.47 yamt 655: if (ncp->nc_vp == vp) {
1.28 chs 656: (*pr)("name %.*s\n", ncp->nc_nlen, ncp->nc_name);
657: dvp = ncp->nc_dvp;
658: }
659: }
660: if (dvp == NULL) {
661: (*pr)("name not found\n");
662: return;
663: }
664: vp = dvp;
665: TAILQ_FOREACH(ncp, &nclruhead, nc_lru) {
1.47 yamt 666: if (ncp->nc_vp == vp) {
1.28 chs 667: (*pr)("parent %.*s\n", ncp->nc_nlen, ncp->nc_name);
668: }
669: }
670: }
671: #endif
CVSweb <webmaster@jp.NetBSD.org>