Please note that diffs are not public domain; they are subject to the copyright notices on the relevant files. =================================================================== RCS file: /ftp/cvs/cvsroot/src/sys/kern/subr_pool.c,v rcsdiff: /ftp/cvs/cvsroot/src/sys/kern/subr_pool.c,v: warning: Unknown phrases like `commitid ...;' are present. retrieving revision 1.137.2.6 retrieving revision 1.158 diff -u -p -r1.137.2.6 -r1.158 --- src/sys/kern/subr_pool.c 2007/12/28 15:06:20 1.137.2.6 +++ src/sys/kern/subr_pool.c 2008/04/27 11:37:48 1.158 @@ -1,4 +1,4 @@ -/* $NetBSD: subr_pool.c,v 1.137.2.6 2007/12/28 15:06:20 ad Exp $ */ +/* $NetBSD: subr_pool.c,v 1.158 2008/04/27 11:37:48 ad Exp $ */ /*- * Copyright (c) 1997, 1999, 2000, 2002, 2007 The NetBSD Foundation, Inc. @@ -38,7 +38,7 @@ */ #include -__KERNEL_RCSID(0, "$NetBSD: subr_pool.c,v 1.137.2.6 2007/12/28 15:06:20 ad Exp $"); +__KERNEL_RCSID(0, "$NetBSD: subr_pool.c,v 1.158 2008/04/27 11:37:48 ad Exp $"); #include "opt_ddb.h" #include "opt_pool.h" @@ -52,7 +52,6 @@ __KERNEL_RCSID(0, "$NetBSD: subr_pool.c, #include #include #include -#include #include #include #include @@ -123,7 +122,7 @@ struct pool_item_header { SPLAY_ENTRY(pool_item_header) ph_node; /* Off-page page headers */ void * ph_page; /* this page's address */ - struct timeval ph_time; /* last referenced */ + uint32_t ph_time; /* last referenced */ uint16_t ph_nmissing; /* # of chunks in use */ uint16_t ph_off; /* start offset in page */ union { @@ -613,10 +612,10 @@ pool_subsystem_init(void) pa_reclaim_register(pa); } - pool_init(&cache_pool, sizeof(struct pool_cache), CACHE_LINE_SIZE, + pool_init(&cache_pool, sizeof(struct pool_cache), coherency_unit, 0, 0, "pcache", &pool_allocator_nointr, IPL_NONE); - pool_init(&cache_cpu_pool, sizeof(pool_cache_cpu_t), CACHE_LINE_SIZE, + pool_init(&cache_cpu_pool, sizeof(pool_cache_cpu_t), coherency_unit, 0, 0, "pcachecpu", &pool_allocator_nointr, IPL_NONE); } @@ -815,14 +814,7 @@ pool_init(struct pool *pp, size_t size, pp->pr_entered_file = NULL; pp->pr_entered_line = 0; - /* - * XXXAD hack to prevent IP input processing from blocking. - */ - if (ipl == IPL_SOFTNET) { - mutex_init(&pp->pr_lock, MUTEX_DEFAULT, IPL_VM); - } else { - mutex_init(&pp->pr_lock, MUTEX_DEFAULT, ipl); - } + mutex_init(&pp->pr_lock, MUTEX_DEFAULT, ipl); cv_init(&pp->pr_cv, wchan); pp->pr_ipl = ipl; @@ -856,12 +848,12 @@ pool_init(struct pool *pp, size_t size, size = sizeof(pcg_t) + (PCG_NOBJECTS_NORMAL - 1) * sizeof(pcgpair_t); - pool_init(&pcg_normal_pool, size, CACHE_LINE_SIZE, 0, 0, + pool_init(&pcg_normal_pool, size, coherency_unit, 0, 0, "pcgnormal", &pool_allocator_meta, IPL_VM); size = sizeof(pcg_t) + (PCG_NOBJECTS_LARGE - 1) * sizeof(pcgpair_t); - pool_init(&pcg_large_pool, size, CACHE_LINE_SIZE, 0, 0, + pool_init(&pcg_large_pool, size, coherency_unit, 0, 0, "pcglarge", &pool_allocator_meta, IPL_VM); } @@ -996,8 +988,9 @@ pool_get(struct pool *pp, int flags) #endif /* DIAGNOSTIC */ #ifdef LOCKDEBUG - if (flags & PR_WAITOK) - ASSERT_SLEEPABLE(NULL, "pool_get(PR_WAITOK)"); + if (flags & PR_WAITOK) { + ASSERT_SLEEPABLE(); + } #endif mutex_enter(&pp->pr_lock); @@ -1289,8 +1282,7 @@ pool_do_put(struct pool *pp, void *v, st if (ph->ph_nmissing == 0) { pp->pr_nidle++; if (pp->pr_npages > pp->pr_minpages && - (pp->pr_npages > pp->pr_maxpages || - pa_starved_p(pp->pr_alloc))) { + pp->pr_npages > pp->pr_maxpages) { pr_rmpage(pp, ph, pq); } else { LIST_REMOVE(ph, ph_pagelist); @@ -1301,8 +1293,11 @@ pool_do_put(struct pool *pp, void *v, st * be idle for some period of time before it can * be reclaimed by the pagedaemon. This minimizes * ping-pong'ing for memory. + * + * note for 64-bit time_t: truncating to 32-bit is not + * a problem for our usage. */ - getmicrotime(&ph->ph_time); + ph->ph_time = time_uptime; } pool_update_curpage(pp); } @@ -1454,7 +1449,7 @@ pool_prime_page(struct pool *pp, void *s LIST_INIT(&ph->ph_itemlist); ph->ph_page = storage; ph->ph_nmissing = 0; - getmicrotime(&ph->ph_time); + ph->ph_time = time_uptime; if ((pp->pr_roflags & PR_PHINPAGE) == 0) SPLAY_INSERT(phtree, &pp->pr_phtree, ph); @@ -1615,7 +1610,7 @@ pool_reclaim(struct pool *pp) { struct pool_item_header *ph, *phnext; struct pool_pagelist pq; - struct timeval curtime, diff; + uint32_t curtime; bool klock; int rv; @@ -1627,9 +1622,8 @@ pool_reclaim(struct pool *pp) } /* - * XXXSMP Because mutexes at IPL_SOFTXXX are still spinlocks, - * and we are called from the pagedaemon without kernel_lock. - * Does not apply to IPL_SOFTBIO. + * XXXSMP Because we do not want to cause non-MPSAFE code + * to block. */ if (pp->pr_ipl == IPL_SOFTNET || pp->pr_ipl == IPL_SOFTCLOCK || pp->pr_ipl == IPL_SOFTSERIAL) { @@ -1652,7 +1646,7 @@ pool_reclaim(struct pool *pp) LIST_INIT(&pq); - getmicrotime(&curtime); + curtime = time_uptime; for (ph = LIST_FIRST(&pp->pr_emptypages); ph != NULL; ph = phnext) { phnext = LIST_NEXT(ph, ph_pagelist); @@ -1662,8 +1656,7 @@ pool_reclaim(struct pool *pp) break; KASSERT(ph->ph_nmissing == 0); - timersub(&curtime, &ph->ph_time, &diff); - if (diff.tv_sec < pool_inactive_time + if (curtime - ph->ph_time < pool_inactive_time && !pa_starved_p(pp->pr_alloc)) continue; @@ -1804,10 +1797,8 @@ pool_print_pagelist(struct pool *pp, str #endif LIST_FOREACH(ph, pl, ph_pagelist) { - (*pr)("\t\tpage %p, nmissing %d, time %lu,%lu\n", - ph->ph_page, ph->ph_nmissing, - (u_long)ph->ph_time.tv_sec, - (u_long)ph->ph_time.tv_usec); + (*pr)("\t\tpage %p, nmissing %d, time %" PRIu32 "\n", + ph->ph_page, ph->ph_nmissing, ph->ph_time); #ifdef DIAGNOSTIC if (!(pp->pr_roflags & PR_NOTOUCH)) { LIST_FOREACH(pi, &ph->ph_itemlist, pi_list) { @@ -2075,15 +2066,7 @@ pool_cache_bootstrap(pool_cache_t pc, si if (palloc == NULL && ipl == IPL_NONE) palloc = &pool_allocator_nointr; pool_init(pp, size, align, align_offset, flags, wchan, palloc, ipl); - - /* - * XXXAD hack to prevent IP input processing from blocking. - */ - if (ipl == IPL_SOFTNET) { - mutex_init(&pc->pc_lock, MUTEX_DEFAULT, IPL_VM); - } else { - mutex_init(&pc->pc_lock, MUTEX_DEFAULT, ipl); - } + mutex_init(&pc->pc_lock, MUTEX_DEFAULT, ipl); if (ctor == NULL) { ctor = (int (*)(void *, void *, int))nullop; @@ -2207,7 +2190,6 @@ pool_cache_cpu_init1(struct cpu_info *ci index = ci->ci_index; KASSERT(index < MAXCPUS); - KASSERT(((uintptr_t)pc->pc_cpus & (CACHE_LINE_SIZE - 1)) == 0); if ((cc = pc->pc_cpus[index]) != NULL) { KASSERT(cc->cc_cpuindex == index); @@ -2394,13 +2376,12 @@ pool_cache_cpu_enter(pool_cache_t pc, in * CPU-local data. To avoid touching shared state, we * pull the neccessary information from CPU local data. */ - crit_enter(); + KPREEMPT_DISABLE(); cc = pc->pc_cpus[curcpu()->ci_index]; KASSERT(cc->cc_cache == pc); if (cc->cc_ipl != IPL_NONE) { *s = splraiseipl(cc->cc_iplcookie); } - KASSERT(((uintptr_t)cc & (CACHE_LINE_SIZE - 1)) == 0); return cc; } @@ -2413,7 +2394,7 @@ pool_cache_cpu_exit(pool_cache_cpu_t *cc if (cc->cc_ipl != IPL_NONE) { splx(*s); } - crit_exit(); + KPREEMPT_ENABLE(); } #if __GNUC_PREREQ__(3, 0) @@ -2523,8 +2504,9 @@ pool_cache_get_paddr(pool_cache_t pc, in int s; #ifdef LOCKDEBUG - if (flags & PR_WAITOK) - ASSERT_SLEEPABLE(NULL, "pool_cache_get(PR_WAITOK)"); + if (flags & PR_WAITOK) { + ASSERT_SLEEPABLE(); + } #endif cc = pool_cache_cpu_enter(pc, &s); @@ -2535,7 +2517,9 @@ pool_cache_get_paddr(pool_cache_t pc, in object = pcg->pcg_objects[--pcg->pcg_avail].pcgo_va; if (pap != NULL) *pap = pcg->pcg_objects[pcg->pcg_avail].pcgo_pa; +#if defined(DIAGNOSTIC) pcg->pcg_objects[pcg->pcg_avail].pcgo_va = NULL; +#endif /* defined(DIAGNOSTIC) */ KASSERT(pcg->pcg_avail <= pcg->pcg_size); KASSERT(object != NULL); cc->cc_hits++; @@ -2641,7 +2625,9 @@ pool_cache_put_slow(pool_cache_cpu_t *cc * object away. */ nobj = pc->pc_pcgsize; - if (nobj == PCG_NOBJECTS_LARGE) { + if (pool_cache_disable) { + pcg = NULL; + } else if (nobj == PCG_NOBJECTS_LARGE) { pcg = pool_get(&pcg_large_pool, PR_NOWAIT); } else { pcg = pool_get(&pcg_normal_pool, PR_NOWAIT); @@ -3060,7 +3046,7 @@ found: snprintf(cpucachestr, sizeof(cpucachestr), "cached by CPU %u", - (u_int)ci->ci_cpuid); + ci->ci_index); goto print; } }