Annotation of src/sys/kern/subr_pool.c, Revision 1.72
1.72 ! thorpej 1: /* $NetBSD: subr_pool.c,v 1.71 2002/03/09 01:37:19 thorpej Exp $ */
1.1 pk 2:
3: /*-
1.43 thorpej 4: * Copyright (c) 1997, 1999, 2000 The NetBSD Foundation, Inc.
1.1 pk 5: * All rights reserved.
6: *
7: * This code is derived from software contributed to The NetBSD Foundation
1.20 thorpej 8: * by Paul Kranenburg; by Jason R. Thorpe of the Numerical Aerospace
9: * Simulation Facility, NASA Ames Research Center.
1.1 pk 10: *
11: * Redistribution and use in source and binary forms, with or without
12: * modification, are permitted provided that the following conditions
13: * are met:
14: * 1. Redistributions of source code must retain the above copyright
15: * notice, this list of conditions and the following disclaimer.
16: * 2. Redistributions in binary form must reproduce the above copyright
17: * notice, this list of conditions and the following disclaimer in the
18: * documentation and/or other materials provided with the distribution.
19: * 3. All advertising materials mentioning features or use of this software
20: * must display the following acknowledgement:
1.13 christos 21: * This product includes software developed by the NetBSD
22: * Foundation, Inc. and its contributors.
1.1 pk 23: * 4. Neither the name of The NetBSD Foundation nor the names of its
24: * contributors may be used to endorse or promote products derived
25: * from this software without specific prior written permission.
26: *
27: * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28: * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29: * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30: * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31: * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32: * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33: * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34: * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35: * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36: * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37: * POSSIBILITY OF SUCH DAMAGE.
38: */
1.64 lukem 39:
40: #include <sys/cdefs.h>
1.72 ! thorpej 41: __KERNEL_RCSID(0, "$NetBSD: subr_pool.c,v 1.71 2002/03/09 01:37:19 thorpej Exp $");
1.24 scottr 42:
1.25 thorpej 43: #include "opt_pool.h"
1.24 scottr 44: #include "opt_poollog.h"
1.28 thorpej 45: #include "opt_lockdebug.h"
1.1 pk 46:
47: #include <sys/param.h>
48: #include <sys/systm.h>
49: #include <sys/proc.h>
50: #include <sys/errno.h>
51: #include <sys/kernel.h>
52: #include <sys/malloc.h>
53: #include <sys/lock.h>
54: #include <sys/pool.h>
1.20 thorpej 55: #include <sys/syslog.h>
1.3 pk 56:
57: #include <uvm/uvm.h>
58:
1.1 pk 59: /*
60: * Pool resource management utility.
1.3 pk 61: *
62: * Memory is allocated in pages which are split into pieces according
63: * to the pool item size. Each page is kept on a list headed by `pr_pagelist'
64: * in the pool structure and the individual pool items are on a linked list
65: * headed by `ph_itemlist' in each page header. The memory for building
66: * the page list is either taken from the allocated pages themselves (for
67: * small pool items) or taken from an internal pool of page headers (`phpool').
1.1 pk 68: */
69:
1.3 pk 70: /* List of all pools */
1.5 thorpej 71: TAILQ_HEAD(,pool) pool_head = TAILQ_HEAD_INITIALIZER(pool_head);
1.3 pk 72:
73: /* Private pool for page header structures */
74: static struct pool phpool;
75:
1.62 bjh21 76: #ifdef POOL_SUBPAGE
77: /* Pool of subpages for use by normal pools. */
78: static struct pool psppool;
79: #endif
80:
1.3 pk 81: /* # of seconds to retain page after last use */
82: int pool_inactive_time = 10;
83:
84: /* Next candidate for drainage (see pool_drain()) */
1.23 thorpej 85: static struct pool *drainpp;
86:
87: /* This spin lock protects both pool_head and drainpp. */
88: struct simplelock pool_head_slock = SIMPLELOCK_INITIALIZER;
1.3 pk 89:
90: struct pool_item_header {
91: /* Page headers */
92: TAILQ_ENTRY(pool_item_header)
93: ph_pagelist; /* pool page list */
94: TAILQ_HEAD(,pool_item) ph_itemlist; /* chunk list for this page */
95: LIST_ENTRY(pool_item_header)
96: ph_hashlist; /* Off-page page headers */
97: int ph_nmissing; /* # of chunks in use */
98: caddr_t ph_page; /* this page's address */
99: struct timeval ph_time; /* last referenced */
100: };
1.61 chs 101: TAILQ_HEAD(pool_pagelist,pool_item_header);
1.3 pk 102:
1.1 pk 103: struct pool_item {
1.3 pk 104: #ifdef DIAGNOSTIC
105: int pi_magic;
1.33 chs 106: #endif
1.25 thorpej 107: #define PI_MAGIC 0xdeadbeef
1.3 pk 108: /* Other entries use only this list entry */
109: TAILQ_ENTRY(pool_item) pi_list;
110: };
111:
1.25 thorpej 112: #define PR_HASH_INDEX(pp,addr) \
1.66 thorpej 113: (((u_long)(addr) >> (pp)->pr_alloc->pa_pageshift) & \
114: (PR_HASHTABSIZE - 1))
1.3 pk 115:
1.53 thorpej 116: #define POOL_NEEDS_CATCHUP(pp) \
117: ((pp)->pr_nitems < (pp)->pr_minitems)
118:
1.43 thorpej 119: /*
120: * Pool cache management.
121: *
122: * Pool caches provide a way for constructed objects to be cached by the
123: * pool subsystem. This can lead to performance improvements by avoiding
124: * needless object construction/destruction; it is deferred until absolutely
125: * necessary.
126: *
127: * Caches are grouped into cache groups. Each cache group references
128: * up to 16 constructed objects. When a cache allocates an object
129: * from the pool, it calls the object's constructor and places it into
130: * a cache group. When a cache group frees an object back to the pool,
131: * it first calls the object's destructor. This allows the object to
132: * persist in constructed form while freed to the cache.
133: *
134: * Multiple caches may exist for each pool. This allows a single
135: * object type to have multiple constructed forms. The pool references
136: * each cache, so that when a pool is drained by the pagedaemon, it can
137: * drain each individual cache as well. Each time a cache is drained,
138: * the most idle cache group is freed to the pool in its entirety.
139: *
140: * Pool caches are layed on top of pools. By layering them, we can avoid
141: * the complexity of cache management for pools which would not benefit
142: * from it.
143: */
144:
145: /* The cache group pool. */
146: static struct pool pcgpool;
147:
148: /* The pool cache group. */
149: #define PCG_NOBJECTS 16
150: struct pool_cache_group {
151: TAILQ_ENTRY(pool_cache_group)
152: pcg_list; /* link in the pool cache's group list */
153: u_int pcg_avail; /* # available objects */
154: /* pointers to the objects */
155: void *pcg_objects[PCG_NOBJECTS];
156: };
1.3 pk 157:
1.43 thorpej 158: static void pool_cache_reclaim(struct pool_cache *);
1.3 pk 159:
1.42 thorpej 160: static int pool_catchup(struct pool *);
1.55 thorpej 161: static void pool_prime_page(struct pool *, caddr_t,
162: struct pool_item_header *);
1.66 thorpej 163:
164: void *pool_allocator_alloc(struct pool *, int);
165: void pool_allocator_free(struct pool *, void *);
1.3 pk 166:
1.42 thorpej 167: static void pool_print1(struct pool *, const char *,
168: void (*)(const char *, ...));
1.3 pk 169:
170: /*
1.52 thorpej 171: * Pool log entry. An array of these is allocated in pool_init().
1.3 pk 172: */
173: struct pool_log {
174: const char *pl_file;
175: long pl_line;
176: int pl_action;
1.25 thorpej 177: #define PRLOG_GET 1
178: #define PRLOG_PUT 2
1.3 pk 179: void *pl_addr;
1.1 pk 180: };
181:
1.3 pk 182: /* Number of entries in pool log buffers */
1.17 thorpej 183: #ifndef POOL_LOGSIZE
184: #define POOL_LOGSIZE 10
185: #endif
186:
187: int pool_logsize = POOL_LOGSIZE;
1.1 pk 188:
1.59 thorpej 189: #ifdef POOL_DIAGNOSTIC
1.42 thorpej 190: static __inline void
191: pr_log(struct pool *pp, void *v, int action, const char *file, long line)
1.3 pk 192: {
193: int n = pp->pr_curlogentry;
194: struct pool_log *pl;
195:
1.20 thorpej 196: if ((pp->pr_roflags & PR_LOGGING) == 0)
1.3 pk 197: return;
198:
199: /*
200: * Fill in the current entry. Wrap around and overwrite
201: * the oldest entry if necessary.
202: */
203: pl = &pp->pr_log[n];
204: pl->pl_file = file;
205: pl->pl_line = line;
206: pl->pl_action = action;
207: pl->pl_addr = v;
208: if (++n >= pp->pr_logsize)
209: n = 0;
210: pp->pr_curlogentry = n;
211: }
212:
213: static void
1.42 thorpej 214: pr_printlog(struct pool *pp, struct pool_item *pi,
215: void (*pr)(const char *, ...))
1.3 pk 216: {
217: int i = pp->pr_logsize;
218: int n = pp->pr_curlogentry;
219:
1.20 thorpej 220: if ((pp->pr_roflags & PR_LOGGING) == 0)
1.3 pk 221: return;
222:
223: /*
224: * Print all entries in this pool's log.
225: */
226: while (i-- > 0) {
227: struct pool_log *pl = &pp->pr_log[n];
228: if (pl->pl_action != 0) {
1.25 thorpej 229: if (pi == NULL || pi == pl->pl_addr) {
230: (*pr)("\tlog entry %d:\n", i);
231: (*pr)("\t\taction = %s, addr = %p\n",
232: pl->pl_action == PRLOG_GET ? "get" : "put",
233: pl->pl_addr);
234: (*pr)("\t\tfile: %s at line %lu\n",
235: pl->pl_file, pl->pl_line);
236: }
1.3 pk 237: }
238: if (++n >= pp->pr_logsize)
239: n = 0;
240: }
241: }
1.25 thorpej 242:
1.42 thorpej 243: static __inline void
244: pr_enter(struct pool *pp, const char *file, long line)
1.25 thorpej 245: {
246:
1.34 thorpej 247: if (__predict_false(pp->pr_entered_file != NULL)) {
1.25 thorpej 248: printf("pool %s: reentrancy at file %s line %ld\n",
249: pp->pr_wchan, file, line);
250: printf(" previous entry at file %s line %ld\n",
251: pp->pr_entered_file, pp->pr_entered_line);
252: panic("pr_enter");
253: }
254:
255: pp->pr_entered_file = file;
256: pp->pr_entered_line = line;
257: }
258:
1.42 thorpej 259: static __inline void
260: pr_leave(struct pool *pp)
1.25 thorpej 261: {
262:
1.34 thorpej 263: if (__predict_false(pp->pr_entered_file == NULL)) {
1.25 thorpej 264: printf("pool %s not entered?\n", pp->pr_wchan);
265: panic("pr_leave");
266: }
267:
268: pp->pr_entered_file = NULL;
269: pp->pr_entered_line = 0;
270: }
271:
1.42 thorpej 272: static __inline void
273: pr_enter_check(struct pool *pp, void (*pr)(const char *, ...))
1.25 thorpej 274: {
275:
276: if (pp->pr_entered_file != NULL)
277: (*pr)("\n\tcurrently entered from file %s line %ld\n",
278: pp->pr_entered_file, pp->pr_entered_line);
279: }
1.3 pk 280: #else
1.25 thorpej 281: #define pr_log(pp, v, action, file, line)
282: #define pr_printlog(pp, pi, pr)
283: #define pr_enter(pp, file, line)
284: #define pr_leave(pp)
285: #define pr_enter_check(pp, pr)
1.59 thorpej 286: #endif /* POOL_DIAGNOSTIC */
1.3 pk 287:
288: /*
289: * Return the pool page header based on page address.
290: */
1.42 thorpej 291: static __inline struct pool_item_header *
292: pr_find_pagehead(struct pool *pp, caddr_t page)
1.3 pk 293: {
294: struct pool_item_header *ph;
295:
1.20 thorpej 296: if ((pp->pr_roflags & PR_PHINPAGE) != 0)
1.3 pk 297: return ((struct pool_item_header *)(page + pp->pr_phoffset));
298:
299: for (ph = LIST_FIRST(&pp->pr_hashtab[PR_HASH_INDEX(pp, page)]);
300: ph != NULL;
301: ph = LIST_NEXT(ph, ph_hashlist)) {
302: if (ph->ph_page == page)
303: return (ph);
304: }
305: return (NULL);
306: }
307:
308: /*
309: * Remove a page from the pool.
310: */
1.42 thorpej 311: static __inline void
1.61 chs 312: pr_rmpage(struct pool *pp, struct pool_item_header *ph,
313: struct pool_pagelist *pq)
1.3 pk 314: {
1.61 chs 315: int s;
1.3 pk 316:
317: /*
1.7 thorpej 318: * If the page was idle, decrement the idle page count.
1.3 pk 319: */
1.6 thorpej 320: if (ph->ph_nmissing == 0) {
321: #ifdef DIAGNOSTIC
322: if (pp->pr_nidle == 0)
323: panic("pr_rmpage: nidle inconsistent");
1.20 thorpej 324: if (pp->pr_nitems < pp->pr_itemsperpage)
325: panic("pr_rmpage: nitems inconsistent");
1.6 thorpej 326: #endif
327: pp->pr_nidle--;
328: }
1.7 thorpej 329:
1.20 thorpej 330: pp->pr_nitems -= pp->pr_itemsperpage;
331:
1.7 thorpej 332: /*
1.61 chs 333: * Unlink a page from the pool and release it (or queue it for release).
1.7 thorpej 334: */
335: TAILQ_REMOVE(&pp->pr_pagelist, ph, ph_pagelist);
1.61 chs 336: if (pq) {
337: TAILQ_INSERT_HEAD(pq, ph, ph_pagelist);
338: } else {
1.66 thorpej 339: pool_allocator_free(pp, ph->ph_page);
1.61 chs 340: if ((pp->pr_roflags & PR_PHINPAGE) == 0) {
341: LIST_REMOVE(ph, ph_hashlist);
342: s = splhigh();
343: pool_put(&phpool, ph);
344: splx(s);
345: }
346: }
1.7 thorpej 347: pp->pr_npages--;
348: pp->pr_npagefree++;
1.6 thorpej 349:
1.3 pk 350: if (pp->pr_curpage == ph) {
351: /*
352: * Find a new non-empty page header, if any.
353: * Start search from the page head, to increase the
354: * chance for "high water" pages to be freed.
355: */
1.61 chs 356: TAILQ_FOREACH(ph, &pp->pr_pagelist, ph_pagelist)
1.3 pk 357: if (TAILQ_FIRST(&ph->ph_itemlist) != NULL)
358: break;
359:
360: pp->pr_curpage = ph;
1.21 thorpej 361: }
1.3 pk 362: }
363:
364: /*
365: * Initialize the given pool resource structure.
366: *
367: * We export this routine to allow other kernel parts to declare
368: * static pools that must be initialized before malloc() is available.
369: */
370: void
1.42 thorpej 371: pool_init(struct pool *pp, size_t size, u_int align, u_int ioff, int flags,
1.66 thorpej 372: const char *wchan, struct pool_allocator *palloc)
1.3 pk 373: {
1.16 briggs 374: int off, slack, i;
1.3 pk 375:
1.25 thorpej 376: #ifdef POOL_DIAGNOSTIC
377: /*
378: * Always log if POOL_DIAGNOSTIC is defined.
379: */
380: if (pool_logsize != 0)
381: flags |= PR_LOGGING;
382: #endif
383:
1.66 thorpej 384: #ifdef POOL_SUBPAGE
385: /*
386: * XXX We don't provide a real `nointr' back-end
387: * yet; all sub-pages come from a kmem back-end.
388: * maybe some day...
389: */
390: if (palloc == NULL) {
391: extern struct pool_allocator pool_allocator_kmem_subpage;
392: palloc = &pool_allocator_kmem_subpage;
393: }
1.3 pk 394: /*
1.66 thorpej 395: * We'll assume any user-specified back-end allocator
396: * will deal with sub-pages, or simply don't care.
1.3 pk 397: */
1.66 thorpej 398: #else
399: if (palloc == NULL)
400: palloc = &pool_allocator_kmem;
401: #endif /* POOL_SUBPAGE */
402: if ((palloc->pa_flags & PA_INITIALIZED) == 0) {
403: if (palloc->pa_pagesz == 0) {
1.62 bjh21 404: #ifdef POOL_SUBPAGE
1.66 thorpej 405: if (palloc == &pool_allocator_kmem)
406: palloc->pa_pagesz = PAGE_SIZE;
407: else
408: palloc->pa_pagesz = POOL_SUBPAGE;
1.62 bjh21 409: #else
1.66 thorpej 410: palloc->pa_pagesz = PAGE_SIZE;
411: #endif /* POOL_SUBPAGE */
412: }
413:
414: TAILQ_INIT(&palloc->pa_list);
415:
416: simple_lock_init(&palloc->pa_slock);
417: palloc->pa_pagemask = ~(palloc->pa_pagesz - 1);
418: palloc->pa_pageshift = ffs(palloc->pa_pagesz) - 1;
419: palloc->pa_flags |= PA_INITIALIZED;
1.4 thorpej 420: }
1.3 pk 421:
422: if (align == 0)
423: align = ALIGN(1);
1.14 thorpej 424:
425: if (size < sizeof(struct pool_item))
426: size = sizeof(struct pool_item);
1.3 pk 427:
1.35 pk 428: size = ALIGN(size);
1.66 thorpej 429: #ifdef DIAGNOSTIC
430: if (size > palloc->pa_pagesz)
1.35 pk 431: panic("pool_init: pool item size (%lu) too large",
432: (u_long)size);
1.66 thorpej 433: #endif
1.35 pk 434:
1.3 pk 435: /*
436: * Initialize the pool structure.
437: */
438: TAILQ_INIT(&pp->pr_pagelist);
1.43 thorpej 439: TAILQ_INIT(&pp->pr_cachelist);
1.3 pk 440: pp->pr_curpage = NULL;
441: pp->pr_npages = 0;
442: pp->pr_minitems = 0;
443: pp->pr_minpages = 0;
444: pp->pr_maxpages = UINT_MAX;
1.20 thorpej 445: pp->pr_roflags = flags;
446: pp->pr_flags = 0;
1.35 pk 447: pp->pr_size = size;
1.3 pk 448: pp->pr_align = align;
449: pp->pr_wchan = wchan;
1.66 thorpej 450: pp->pr_alloc = palloc;
1.20 thorpej 451: pp->pr_nitems = 0;
452: pp->pr_nout = 0;
453: pp->pr_hardlimit = UINT_MAX;
454: pp->pr_hardlimit_warning = NULL;
1.31 thorpej 455: pp->pr_hardlimit_ratecap.tv_sec = 0;
456: pp->pr_hardlimit_ratecap.tv_usec = 0;
457: pp->pr_hardlimit_warning_last.tv_sec = 0;
458: pp->pr_hardlimit_warning_last.tv_usec = 0;
1.68 thorpej 459: pp->pr_drain_hook = NULL;
460: pp->pr_drain_hook_arg = NULL;
1.3 pk 461:
462: /*
463: * Decide whether to put the page header off page to avoid
464: * wasting too large a part of the page. Off-page page headers
465: * go on a hash table, so we can match a returned item
466: * with its header based on the page address.
467: * We use 1/16 of the page size as the threshold (XXX: tune)
468: */
1.66 thorpej 469: if (pp->pr_size < palloc->pa_pagesz/16) {
1.3 pk 470: /* Use the end of the page for the page header */
1.20 thorpej 471: pp->pr_roflags |= PR_PHINPAGE;
1.66 thorpej 472: pp->pr_phoffset = off = palloc->pa_pagesz -
473: ALIGN(sizeof(struct pool_item_header));
1.2 pk 474: } else {
1.3 pk 475: /* The page header will be taken from our page header pool */
476: pp->pr_phoffset = 0;
1.66 thorpej 477: off = palloc->pa_pagesz;
1.16 briggs 478: for (i = 0; i < PR_HASHTABSIZE; i++) {
479: LIST_INIT(&pp->pr_hashtab[i]);
480: }
1.2 pk 481: }
1.1 pk 482:
1.3 pk 483: /*
484: * Alignment is to take place at `ioff' within the item. This means
485: * we must reserve up to `align - 1' bytes on the page to allow
486: * appropriate positioning of each item.
487: *
488: * Silently enforce `0 <= ioff < align'.
489: */
490: pp->pr_itemoffset = ioff = ioff % align;
491: pp->pr_itemsperpage = (off - ((align - ioff) % align)) / pp->pr_size;
1.43 thorpej 492: KASSERT(pp->pr_itemsperpage != 0);
1.3 pk 493:
494: /*
495: * Use the slack between the chunks and the page header
496: * for "cache coloring".
497: */
498: slack = off - pp->pr_itemsperpage * pp->pr_size;
499: pp->pr_maxcolor = (slack / align) * align;
500: pp->pr_curcolor = 0;
501:
502: pp->pr_nget = 0;
503: pp->pr_nfail = 0;
504: pp->pr_nput = 0;
505: pp->pr_npagealloc = 0;
506: pp->pr_npagefree = 0;
1.1 pk 507: pp->pr_hiwat = 0;
1.8 thorpej 508: pp->pr_nidle = 0;
1.3 pk 509:
1.59 thorpej 510: #ifdef POOL_DIAGNOSTIC
1.25 thorpej 511: if (flags & PR_LOGGING) {
512: if (kmem_map == NULL ||
513: (pp->pr_log = malloc(pool_logsize * sizeof(struct pool_log),
514: M_TEMP, M_NOWAIT)) == NULL)
1.20 thorpej 515: pp->pr_roflags &= ~PR_LOGGING;
1.3 pk 516: pp->pr_curlogentry = 0;
517: pp->pr_logsize = pool_logsize;
518: }
1.59 thorpej 519: #endif
1.25 thorpej 520:
521: pp->pr_entered_file = NULL;
522: pp->pr_entered_line = 0;
1.3 pk 523:
1.21 thorpej 524: simple_lock_init(&pp->pr_slock);
1.1 pk 525:
1.3 pk 526: /*
1.43 thorpej 527: * Initialize private page header pool and cache magazine pool if we
528: * haven't done so yet.
1.23 thorpej 529: * XXX LOCKING.
1.3 pk 530: */
531: if (phpool.pr_size == 0) {
1.62 bjh21 532: #ifdef POOL_SUBPAGE
533: pool_init(&phpool, sizeof(struct pool_item_header), 0, 0, 0,
1.66 thorpej 534: "phpool", &pool_allocator_kmem);
1.62 bjh21 535: pool_init(&psppool, POOL_SUBPAGE, POOL_SUBPAGE, 0,
1.66 thorpej 536: PR_RECURSIVE, "psppool", &pool_allocator_kmem);
1.62 bjh21 537: #else
1.3 pk 538: pool_init(&phpool, sizeof(struct pool_item_header), 0, 0,
1.66 thorpej 539: 0, "phpool", NULL);
1.62 bjh21 540: #endif
1.43 thorpej 541: pool_init(&pcgpool, sizeof(struct pool_cache_group), 0, 0,
1.66 thorpej 542: 0, "pcgpool", NULL);
1.1 pk 543: }
544:
1.23 thorpej 545: /* Insert into the list of all pools. */
546: simple_lock(&pool_head_slock);
547: TAILQ_INSERT_TAIL(&pool_head, pp, pr_poollist);
548: simple_unlock(&pool_head_slock);
1.66 thorpej 549:
550: /* Insert this into the list of pools using this allocator. */
551: simple_lock(&palloc->pa_slock);
552: TAILQ_INSERT_TAIL(&palloc->pa_list, pp, pr_alloc_list);
553: simple_unlock(&palloc->pa_slock);
1.1 pk 554: }
555:
556: /*
557: * De-commision a pool resource.
558: */
559: void
1.42 thorpej 560: pool_destroy(struct pool *pp)
1.1 pk 561: {
1.3 pk 562: struct pool_item_header *ph;
1.43 thorpej 563: struct pool_cache *pc;
564:
1.66 thorpej 565: /* Locking order: pool_allocator -> pool */
566: simple_lock(&pp->pr_alloc->pa_slock);
567: TAILQ_REMOVE(&pp->pr_alloc->pa_list, pp, pr_alloc_list);
568: simple_unlock(&pp->pr_alloc->pa_slock);
569:
1.43 thorpej 570: /* Destroy all caches for this pool. */
571: while ((pc = TAILQ_FIRST(&pp->pr_cachelist)) != NULL)
572: pool_cache_destroy(pc);
1.3 pk 573:
574: #ifdef DIAGNOSTIC
1.20 thorpej 575: if (pp->pr_nout != 0) {
1.25 thorpej 576: pr_printlog(pp, NULL, printf);
1.20 thorpej 577: panic("pool_destroy: pool busy: still out: %u\n",
578: pp->pr_nout);
1.3 pk 579: }
580: #endif
1.1 pk 581:
1.3 pk 582: /* Remove all pages */
1.70 thorpej 583: while ((ph = TAILQ_FIRST(&pp->pr_pagelist)) != NULL)
584: pr_rmpage(pp, ph, NULL);
1.3 pk 585:
586: /* Remove from global pool list */
1.23 thorpej 587: simple_lock(&pool_head_slock);
1.3 pk 588: TAILQ_REMOVE(&pool_head, pp, pr_poollist);
1.61 chs 589: if (drainpp == pp) {
590: drainpp = NULL;
591: }
1.23 thorpej 592: simple_unlock(&pool_head_slock);
1.3 pk 593:
1.59 thorpej 594: #ifdef POOL_DIAGNOSTIC
1.20 thorpej 595: if ((pp->pr_roflags & PR_LOGGING) != 0)
1.3 pk 596: free(pp->pr_log, M_TEMP);
1.59 thorpej 597: #endif
1.1 pk 598: }
599:
1.68 thorpej 600: void
601: pool_set_drain_hook(struct pool *pp, void (*fn)(void *, int), void *arg)
602: {
603:
604: /* XXX no locking -- must be used just after pool_init() */
605: #ifdef DIAGNOSTIC
606: if (pp->pr_drain_hook != NULL)
607: panic("pool_set_drain_hook(%s): already set", pp->pr_wchan);
608: #endif
609: pp->pr_drain_hook = fn;
610: pp->pr_drain_hook_arg = arg;
611: }
612:
1.55 thorpej 613: static __inline struct pool_item_header *
614: pool_alloc_item_header(struct pool *pp, caddr_t storage, int flags)
615: {
616: struct pool_item_header *ph;
617: int s;
618:
619: LOCK_ASSERT(simple_lock_held(&pp->pr_slock) == 0);
620:
621: if ((pp->pr_roflags & PR_PHINPAGE) != 0)
622: ph = (struct pool_item_header *) (storage + pp->pr_phoffset);
623: else {
624: s = splhigh();
625: ph = pool_get(&phpool, flags);
626: splx(s);
627: }
628:
629: return (ph);
630: }
1.1 pk 631:
632: /*
1.3 pk 633: * Grab an item from the pool; must be called at appropriate spl level
1.1 pk 634: */
1.3 pk 635: void *
1.59 thorpej 636: #ifdef POOL_DIAGNOSTIC
1.42 thorpej 637: _pool_get(struct pool *pp, int flags, const char *file, long line)
1.56 sommerfe 638: #else
639: pool_get(struct pool *pp, int flags)
640: #endif
1.1 pk 641: {
642: struct pool_item *pi;
1.3 pk 643: struct pool_item_header *ph;
1.55 thorpej 644: void *v;
1.1 pk 645:
1.2 pk 646: #ifdef DIAGNOSTIC
1.37 sommerfe 647: if (__predict_false(curproc == NULL && doing_shutdown == 0 &&
648: (flags & PR_WAITOK) != 0))
1.3 pk 649: panic("pool_get: must have NOWAIT");
1.58 thorpej 650:
651: #ifdef LOCKDEBUG
652: if (flags & PR_WAITOK)
653: simple_lock_only_held(NULL, "pool_get(PR_WAITOK)");
1.56 sommerfe 654: #endif
1.58 thorpej 655: #endif /* DIAGNOSTIC */
1.1 pk 656:
1.21 thorpej 657: simple_lock(&pp->pr_slock);
1.25 thorpej 658: pr_enter(pp, file, line);
1.20 thorpej 659:
660: startover:
661: /*
662: * Check to see if we've reached the hard limit. If we have,
663: * and we can wait, then wait until an item has been returned to
664: * the pool.
665: */
666: #ifdef DIAGNOSTIC
1.34 thorpej 667: if (__predict_false(pp->pr_nout > pp->pr_hardlimit)) {
1.25 thorpej 668: pr_leave(pp);
1.21 thorpej 669: simple_unlock(&pp->pr_slock);
1.20 thorpej 670: panic("pool_get: %s: crossed hard limit", pp->pr_wchan);
671: }
672: #endif
1.34 thorpej 673: if (__predict_false(pp->pr_nout == pp->pr_hardlimit)) {
1.68 thorpej 674: if (pp->pr_drain_hook != NULL) {
675: /*
676: * Since the drain hook is going to free things
677: * back to the pool, unlock, call the hook, re-lock,
678: * and check the hardlimit condition again.
679: */
680: pr_leave(pp);
681: simple_unlock(&pp->pr_slock);
682: (*pp->pr_drain_hook)(pp->pr_drain_hook_arg, flags);
683: simple_lock(&pp->pr_slock);
684: pr_enter(pp, file, line);
685: if (pp->pr_nout < pp->pr_hardlimit)
686: goto startover;
687: }
688:
1.29 sommerfe 689: if ((flags & PR_WAITOK) && !(flags & PR_LIMITFAIL)) {
1.20 thorpej 690: /*
691: * XXX: A warning isn't logged in this case. Should
692: * it be?
693: */
694: pp->pr_flags |= PR_WANTED;
1.25 thorpej 695: pr_leave(pp);
1.40 sommerfe 696: ltsleep(pp, PSWP, pp->pr_wchan, 0, &pp->pr_slock);
1.25 thorpej 697: pr_enter(pp, file, line);
1.20 thorpej 698: goto startover;
699: }
1.31 thorpej 700:
701: /*
702: * Log a message that the hard limit has been hit.
703: */
704: if (pp->pr_hardlimit_warning != NULL &&
705: ratecheck(&pp->pr_hardlimit_warning_last,
706: &pp->pr_hardlimit_ratecap))
707: log(LOG_ERR, "%s\n", pp->pr_hardlimit_warning);
1.21 thorpej 708:
709: pp->pr_nfail++;
710:
1.25 thorpej 711: pr_leave(pp);
1.21 thorpej 712: simple_unlock(&pp->pr_slock);
1.20 thorpej 713: return (NULL);
714: }
715:
1.3 pk 716: /*
717: * The convention we use is that if `curpage' is not NULL, then
718: * it points at a non-empty bucket. In particular, `curpage'
719: * never points at a page header which has PR_PHINPAGE set and
720: * has no items in its bucket.
721: */
1.20 thorpej 722: if ((ph = pp->pr_curpage) == NULL) {
723: #ifdef DIAGNOSTIC
724: if (pp->pr_nitems != 0) {
1.21 thorpej 725: simple_unlock(&pp->pr_slock);
1.20 thorpej 726: printf("pool_get: %s: curpage NULL, nitems %u\n",
727: pp->pr_wchan, pp->pr_nitems);
728: panic("pool_get: nitems inconsistent\n");
729: }
730: #endif
731:
1.21 thorpej 732: /*
733: * Call the back-end page allocator for more memory.
734: * Release the pool lock, as the back-end page allocator
735: * may block.
736: */
1.25 thorpej 737: pr_leave(pp);
1.21 thorpej 738: simple_unlock(&pp->pr_slock);
1.66 thorpej 739: v = pool_allocator_alloc(pp, flags);
1.55 thorpej 740: if (__predict_true(v != NULL))
741: ph = pool_alloc_item_header(pp, v, flags);
1.21 thorpej 742: simple_lock(&pp->pr_slock);
1.25 thorpej 743: pr_enter(pp, file, line);
1.15 pk 744:
1.55 thorpej 745: if (__predict_false(v == NULL || ph == NULL)) {
746: if (v != NULL)
1.66 thorpej 747: pool_allocator_free(pp, v);
1.55 thorpej 748:
1.21 thorpej 749: /*
1.55 thorpej 750: * We were unable to allocate a page or item
751: * header, but we released the lock during
752: * allocation, so perhaps items were freed
753: * back to the pool. Check for this case.
1.21 thorpej 754: */
755: if (pp->pr_curpage != NULL)
756: goto startover;
1.15 pk 757:
1.3 pk 758: if ((flags & PR_WAITOK) == 0) {
759: pp->pr_nfail++;
1.25 thorpej 760: pr_leave(pp);
1.21 thorpej 761: simple_unlock(&pp->pr_slock);
1.1 pk 762: return (NULL);
1.3 pk 763: }
764:
1.15 pk 765: /*
766: * Wait for items to be returned to this pool.
1.21 thorpej 767: *
1.20 thorpej 768: * XXX: maybe we should wake up once a second and
769: * try again?
1.15 pk 770: */
1.1 pk 771: pp->pr_flags |= PR_WANTED;
1.66 thorpej 772: /* PA_WANTED is already set on the allocator. */
1.25 thorpej 773: pr_leave(pp);
1.40 sommerfe 774: ltsleep(pp, PSWP, pp->pr_wchan, 0, &pp->pr_slock);
1.25 thorpej 775: pr_enter(pp, file, line);
1.20 thorpej 776: goto startover;
1.1 pk 777: }
1.3 pk 778:
1.15 pk 779: /* We have more memory; add it to the pool */
1.55 thorpej 780: pool_prime_page(pp, v, ph);
1.15 pk 781: pp->pr_npagealloc++;
782:
1.20 thorpej 783: /* Start the allocation process over. */
784: goto startover;
1.3 pk 785: }
786:
1.34 thorpej 787: if (__predict_false((v = pi = TAILQ_FIRST(&ph->ph_itemlist)) == NULL)) {
1.25 thorpej 788: pr_leave(pp);
1.21 thorpej 789: simple_unlock(&pp->pr_slock);
1.3 pk 790: panic("pool_get: %s: page empty", pp->pr_wchan);
1.21 thorpej 791: }
1.20 thorpej 792: #ifdef DIAGNOSTIC
1.34 thorpej 793: if (__predict_false(pp->pr_nitems == 0)) {
1.25 thorpej 794: pr_leave(pp);
1.21 thorpej 795: simple_unlock(&pp->pr_slock);
1.20 thorpej 796: printf("pool_get: %s: items on itemlist, nitems %u\n",
797: pp->pr_wchan, pp->pr_nitems);
798: panic("pool_get: nitems inconsistent\n");
799: }
1.65 enami 800: #endif
1.56 sommerfe 801:
1.65 enami 802: #ifdef POOL_DIAGNOSTIC
1.3 pk 803: pr_log(pp, v, PRLOG_GET, file, line);
1.65 enami 804: #endif
1.3 pk 805:
1.65 enami 806: #ifdef DIAGNOSTIC
1.34 thorpej 807: if (__predict_false(pi->pi_magic != PI_MAGIC)) {
1.25 thorpej 808: pr_printlog(pp, pi, printf);
1.3 pk 809: panic("pool_get(%s): free list modified: magic=%x; page %p;"
810: " item addr %p\n",
811: pp->pr_wchan, pi->pi_magic, ph->ph_page, pi);
812: }
813: #endif
814:
815: /*
816: * Remove from item list.
817: */
818: TAILQ_REMOVE(&ph->ph_itemlist, pi, pi_list);
1.20 thorpej 819: pp->pr_nitems--;
820: pp->pr_nout++;
1.6 thorpej 821: if (ph->ph_nmissing == 0) {
822: #ifdef DIAGNOSTIC
1.34 thorpej 823: if (__predict_false(pp->pr_nidle == 0))
1.6 thorpej 824: panic("pool_get: nidle inconsistent");
825: #endif
826: pp->pr_nidle--;
827: }
1.3 pk 828: ph->ph_nmissing++;
829: if (TAILQ_FIRST(&ph->ph_itemlist) == NULL) {
1.21 thorpej 830: #ifdef DIAGNOSTIC
1.34 thorpej 831: if (__predict_false(ph->ph_nmissing != pp->pr_itemsperpage)) {
1.25 thorpej 832: pr_leave(pp);
1.21 thorpej 833: simple_unlock(&pp->pr_slock);
834: panic("pool_get: %s: nmissing inconsistent",
835: pp->pr_wchan);
836: }
837: #endif
1.3 pk 838: /*
839: * Find a new non-empty page header, if any.
840: * Start search from the page head, to increase
841: * the chance for "high water" pages to be freed.
842: *
1.21 thorpej 843: * Migrate empty pages to the end of the list. This
844: * will speed the update of curpage as pages become
845: * idle. Empty pages intermingled with idle pages
846: * is no big deal. As soon as a page becomes un-empty,
847: * it will move back to the head of the list.
1.3 pk 848: */
849: TAILQ_REMOVE(&pp->pr_pagelist, ph, ph_pagelist);
1.21 thorpej 850: TAILQ_INSERT_TAIL(&pp->pr_pagelist, ph, ph_pagelist);
1.61 chs 851: TAILQ_FOREACH(ph, &pp->pr_pagelist, ph_pagelist)
1.3 pk 852: if (TAILQ_FIRST(&ph->ph_itemlist) != NULL)
853: break;
854:
855: pp->pr_curpage = ph;
1.1 pk 856: }
1.3 pk 857:
858: pp->pr_nget++;
1.20 thorpej 859:
860: /*
861: * If we have a low water mark and we are now below that low
862: * water mark, add more items to the pool.
863: */
1.53 thorpej 864: if (POOL_NEEDS_CATCHUP(pp) && pool_catchup(pp) != 0) {
1.20 thorpej 865: /*
866: * XXX: Should we log a warning? Should we set up a timeout
867: * to try again in a second or so? The latter could break
868: * a caller's assumptions about interrupt protection, etc.
869: */
870: }
871:
1.25 thorpej 872: pr_leave(pp);
1.21 thorpej 873: simple_unlock(&pp->pr_slock);
1.1 pk 874: return (v);
875: }
876:
877: /*
1.43 thorpej 878: * Internal version of pool_put(). Pool is already locked/entered.
1.1 pk 879: */
1.43 thorpej 880: static void
1.56 sommerfe 881: pool_do_put(struct pool *pp, void *v)
1.1 pk 882: {
883: struct pool_item *pi = v;
1.3 pk 884: struct pool_item_header *ph;
885: caddr_t page;
1.21 thorpej 886: int s;
1.3 pk 887:
1.61 chs 888: LOCK_ASSERT(simple_lock_held(&pp->pr_slock));
889:
1.66 thorpej 890: page = (caddr_t)((u_long)v & pp->pr_alloc->pa_pagemask);
1.1 pk 891:
1.30 thorpej 892: #ifdef DIAGNOSTIC
1.34 thorpej 893: if (__predict_false(pp->pr_nout == 0)) {
1.30 thorpej 894: printf("pool %s: putting with none out\n",
895: pp->pr_wchan);
896: panic("pool_put");
897: }
898: #endif
1.3 pk 899:
1.34 thorpej 900: if (__predict_false((ph = pr_find_pagehead(pp, page)) == NULL)) {
1.25 thorpej 901: pr_printlog(pp, NULL, printf);
1.3 pk 902: panic("pool_put: %s: page header missing", pp->pr_wchan);
903: }
1.28 thorpej 904:
905: #ifdef LOCKDEBUG
906: /*
907: * Check if we're freeing a locked simple lock.
908: */
909: simple_lock_freecheck((caddr_t)pi, ((caddr_t)pi) + pp->pr_size);
910: #endif
1.3 pk 911:
912: /*
913: * Return to item list.
914: */
1.2 pk 915: #ifdef DIAGNOSTIC
1.3 pk 916: pi->pi_magic = PI_MAGIC;
917: #endif
1.32 chs 918: #ifdef DEBUG
919: {
920: int i, *ip = v;
921:
922: for (i = 0; i < pp->pr_size / sizeof(int); i++) {
923: *ip++ = PI_MAGIC;
924: }
925: }
926: #endif
927:
1.3 pk 928: TAILQ_INSERT_HEAD(&ph->ph_itemlist, pi, pi_list);
929: ph->ph_nmissing--;
930: pp->pr_nput++;
1.20 thorpej 931: pp->pr_nitems++;
932: pp->pr_nout--;
1.3 pk 933:
934: /* Cancel "pool empty" condition if it exists */
935: if (pp->pr_curpage == NULL)
936: pp->pr_curpage = ph;
937:
938: if (pp->pr_flags & PR_WANTED) {
939: pp->pr_flags &= ~PR_WANTED;
1.15 pk 940: if (ph->ph_nmissing == 0)
941: pp->pr_nidle++;
1.3 pk 942: wakeup((caddr_t)pp);
943: return;
944: }
945:
946: /*
1.21 thorpej 947: * If this page is now complete, do one of two things:
948: *
949: * (1) If we have more pages than the page high water
950: * mark, free the page back to the system.
951: *
952: * (2) Move it to the end of the page list, so that
953: * we minimize our chances of fragmenting the
954: * pool. Idle pages migrate to the end (along with
955: * completely empty pages, so that we find un-empty
956: * pages more quickly when we update curpage) of the
957: * list so they can be more easily swept up by
958: * the pagedaemon when pages are scarce.
1.3 pk 959: */
960: if (ph->ph_nmissing == 0) {
1.6 thorpej 961: pp->pr_nidle++;
1.71 thorpej 962: if (pp->pr_npages > pp->pr_maxpages ||
963: (pp->pr_alloc->pa_flags & PA_WANT) != 0) {
1.61 chs 964: pr_rmpage(pp, ph, NULL);
1.3 pk 965: } else {
966: TAILQ_REMOVE(&pp->pr_pagelist, ph, ph_pagelist);
967: TAILQ_INSERT_TAIL(&pp->pr_pagelist, ph, ph_pagelist);
968:
1.21 thorpej 969: /*
970: * Update the timestamp on the page. A page must
971: * be idle for some period of time before it can
972: * be reclaimed by the pagedaemon. This minimizes
973: * ping-pong'ing for memory.
974: */
975: s = splclock();
976: ph->ph_time = mono_time;
977: splx(s);
978:
979: /*
980: * Update the current page pointer. Just look for
981: * the first page with any free items.
982: *
983: * XXX: Maybe we want an option to look for the
984: * page with the fewest available items, to minimize
985: * fragmentation?
986: */
1.61 chs 987: TAILQ_FOREACH(ph, &pp->pr_pagelist, ph_pagelist)
1.3 pk 988: if (TAILQ_FIRST(&ph->ph_itemlist) != NULL)
989: break;
1.1 pk 990:
1.3 pk 991: pp->pr_curpage = ph;
1.1 pk 992: }
993: }
1.21 thorpej 994: /*
995: * If the page has just become un-empty, move it to the head of
996: * the list, and make it the current page. The next allocation
997: * will get the item from this page, instead of further fragmenting
998: * the pool.
999: */
1000: else if (ph->ph_nmissing == (pp->pr_itemsperpage - 1)) {
1001: TAILQ_REMOVE(&pp->pr_pagelist, ph, ph_pagelist);
1002: TAILQ_INSERT_HEAD(&pp->pr_pagelist, ph, ph_pagelist);
1003: pp->pr_curpage = ph;
1004: }
1.43 thorpej 1005: }
1006:
1007: /*
1008: * Return resource to the pool; must be called at appropriate spl level
1009: */
1.59 thorpej 1010: #ifdef POOL_DIAGNOSTIC
1.43 thorpej 1011: void
1012: _pool_put(struct pool *pp, void *v, const char *file, long line)
1013: {
1014:
1015: simple_lock(&pp->pr_slock);
1016: pr_enter(pp, file, line);
1017:
1.56 sommerfe 1018: pr_log(pp, v, PRLOG_PUT, file, line);
1019:
1020: pool_do_put(pp, v);
1.21 thorpej 1021:
1.25 thorpej 1022: pr_leave(pp);
1.21 thorpej 1023: simple_unlock(&pp->pr_slock);
1.1 pk 1024: }
1.57 sommerfe 1025: #undef pool_put
1.59 thorpej 1026: #endif /* POOL_DIAGNOSTIC */
1.1 pk 1027:
1.56 sommerfe 1028: void
1029: pool_put(struct pool *pp, void *v)
1030: {
1031:
1032: simple_lock(&pp->pr_slock);
1033:
1034: pool_do_put(pp, v);
1035:
1036: simple_unlock(&pp->pr_slock);
1037: }
1.57 sommerfe 1038:
1.59 thorpej 1039: #ifdef POOL_DIAGNOSTIC
1.57 sommerfe 1040: #define pool_put(h, v) _pool_put((h), (v), __FILE__, __LINE__)
1.56 sommerfe 1041: #endif
1.55 thorpej 1042:
1043: /*
1.3 pk 1044: * Add a page worth of items to the pool.
1.21 thorpej 1045: *
1046: * Note, we must be called with the pool descriptor LOCKED.
1.3 pk 1047: */
1.55 thorpej 1048: static void
1049: pool_prime_page(struct pool *pp, caddr_t storage, struct pool_item_header *ph)
1.3 pk 1050: {
1051: struct pool_item *pi;
1052: caddr_t cp = storage;
1053: unsigned int align = pp->pr_align;
1054: unsigned int ioff = pp->pr_itemoffset;
1.55 thorpej 1055: int n;
1.36 pk 1056:
1.66 thorpej 1057: #ifdef DIAGNOSTIC
1058: if (((u_long)cp & (pp->pr_alloc->pa_pagesz - 1)) != 0)
1.36 pk 1059: panic("pool_prime_page: %s: unaligned page", pp->pr_wchan);
1.66 thorpej 1060: #endif
1.3 pk 1061:
1.55 thorpej 1062: if ((pp->pr_roflags & PR_PHINPAGE) == 0)
1.3 pk 1063: LIST_INSERT_HEAD(&pp->pr_hashtab[PR_HASH_INDEX(pp, cp)],
1.55 thorpej 1064: ph, ph_hashlist);
1.3 pk 1065:
1066: /*
1067: * Insert page header.
1068: */
1069: TAILQ_INSERT_HEAD(&pp->pr_pagelist, ph, ph_pagelist);
1070: TAILQ_INIT(&ph->ph_itemlist);
1071: ph->ph_page = storage;
1072: ph->ph_nmissing = 0;
1.21 thorpej 1073: memset(&ph->ph_time, 0, sizeof(ph->ph_time));
1.3 pk 1074:
1.6 thorpej 1075: pp->pr_nidle++;
1076:
1.3 pk 1077: /*
1078: * Color this page.
1079: */
1080: cp = (caddr_t)(cp + pp->pr_curcolor);
1081: if ((pp->pr_curcolor += align) > pp->pr_maxcolor)
1082: pp->pr_curcolor = 0;
1083:
1084: /*
1085: * Adjust storage to apply aligment to `pr_itemoffset' in each item.
1086: */
1087: if (ioff != 0)
1088: cp = (caddr_t)(cp + (align - ioff));
1089:
1090: /*
1091: * Insert remaining chunks on the bucket list.
1092: */
1093: n = pp->pr_itemsperpage;
1.20 thorpej 1094: pp->pr_nitems += n;
1.3 pk 1095:
1096: while (n--) {
1097: pi = (struct pool_item *)cp;
1098:
1099: /* Insert on page list */
1100: TAILQ_INSERT_TAIL(&ph->ph_itemlist, pi, pi_list);
1101: #ifdef DIAGNOSTIC
1102: pi->pi_magic = PI_MAGIC;
1103: #endif
1104: cp = (caddr_t)(cp + pp->pr_size);
1105: }
1106:
1107: /*
1108: * If the pool was depleted, point at the new page.
1109: */
1110: if (pp->pr_curpage == NULL)
1111: pp->pr_curpage = ph;
1112:
1113: if (++pp->pr_npages > pp->pr_hiwat)
1114: pp->pr_hiwat = pp->pr_npages;
1115: }
1116:
1.20 thorpej 1117: /*
1.52 thorpej 1118: * Used by pool_get() when nitems drops below the low water mark. This
1119: * is used to catch up nitmes with the low water mark.
1.20 thorpej 1120: *
1.21 thorpej 1121: * Note 1, we never wait for memory here, we let the caller decide what to do.
1.20 thorpej 1122: *
1123: * Note 2, this doesn't work with static pools.
1124: *
1125: * Note 3, we must be called with the pool already locked, and we return
1126: * with it locked.
1127: */
1128: static int
1.42 thorpej 1129: pool_catchup(struct pool *pp)
1.20 thorpej 1130: {
1.55 thorpej 1131: struct pool_item_header *ph;
1.20 thorpej 1132: caddr_t cp;
1133: int error = 0;
1134:
1.54 thorpej 1135: while (POOL_NEEDS_CATCHUP(pp)) {
1.20 thorpej 1136: /*
1.21 thorpej 1137: * Call the page back-end allocator for more memory.
1138: *
1139: * XXX: We never wait, so should we bother unlocking
1140: * the pool descriptor?
1.20 thorpej 1141: */
1.21 thorpej 1142: simple_unlock(&pp->pr_slock);
1.66 thorpej 1143: cp = pool_allocator_alloc(pp, PR_NOWAIT);
1.55 thorpej 1144: if (__predict_true(cp != NULL))
1145: ph = pool_alloc_item_header(pp, cp, PR_NOWAIT);
1.21 thorpej 1146: simple_lock(&pp->pr_slock);
1.55 thorpej 1147: if (__predict_false(cp == NULL || ph == NULL)) {
1148: if (cp != NULL)
1.66 thorpej 1149: pool_allocator_free(pp, cp);
1.20 thorpej 1150: error = ENOMEM;
1151: break;
1152: }
1.55 thorpej 1153: pool_prime_page(pp, cp, ph);
1.26 thorpej 1154: pp->pr_npagealloc++;
1.20 thorpej 1155: }
1156:
1157: return (error);
1158: }
1159:
1.3 pk 1160: void
1.42 thorpej 1161: pool_setlowat(struct pool *pp, int n)
1.3 pk 1162: {
1.20 thorpej 1163: int error;
1.15 pk 1164:
1.21 thorpej 1165: simple_lock(&pp->pr_slock);
1166:
1.3 pk 1167: pp->pr_minitems = n;
1.15 pk 1168: pp->pr_minpages = (n == 0)
1169: ? 0
1.18 thorpej 1170: : roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1.20 thorpej 1171:
1172: /* Make sure we're caught up with the newly-set low water mark. */
1.53 thorpej 1173: if (POOL_NEEDS_CATCHUP(pp) && (error = pool_catchup(pp) != 0)) {
1.20 thorpej 1174: /*
1175: * XXX: Should we log a warning? Should we set up a timeout
1176: * to try again in a second or so? The latter could break
1177: * a caller's assumptions about interrupt protection, etc.
1178: */
1179: }
1.21 thorpej 1180:
1181: simple_unlock(&pp->pr_slock);
1.3 pk 1182: }
1183:
1184: void
1.42 thorpej 1185: pool_sethiwat(struct pool *pp, int n)
1.3 pk 1186: {
1.15 pk 1187:
1.21 thorpej 1188: simple_lock(&pp->pr_slock);
1189:
1.15 pk 1190: pp->pr_maxpages = (n == 0)
1191: ? 0
1.18 thorpej 1192: : roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1.21 thorpej 1193:
1194: simple_unlock(&pp->pr_slock);
1.3 pk 1195: }
1196:
1.20 thorpej 1197: void
1.42 thorpej 1198: pool_sethardlimit(struct pool *pp, int n, const char *warnmess, int ratecap)
1.20 thorpej 1199: {
1200:
1.21 thorpej 1201: simple_lock(&pp->pr_slock);
1.20 thorpej 1202:
1203: pp->pr_hardlimit = n;
1204: pp->pr_hardlimit_warning = warnmess;
1.31 thorpej 1205: pp->pr_hardlimit_ratecap.tv_sec = ratecap;
1206: pp->pr_hardlimit_warning_last.tv_sec = 0;
1207: pp->pr_hardlimit_warning_last.tv_usec = 0;
1.20 thorpej 1208:
1209: /*
1.21 thorpej 1210: * In-line version of pool_sethiwat(), because we don't want to
1211: * release the lock.
1.20 thorpej 1212: */
1213: pp->pr_maxpages = (n == 0)
1214: ? 0
1215: : roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1.21 thorpej 1216:
1217: simple_unlock(&pp->pr_slock);
1.20 thorpej 1218: }
1.3 pk 1219:
1220: /*
1221: * Release all complete pages that have not been used recently.
1222: */
1.66 thorpej 1223: int
1.59 thorpej 1224: #ifdef POOL_DIAGNOSTIC
1.42 thorpej 1225: _pool_reclaim(struct pool *pp, const char *file, long line)
1.56 sommerfe 1226: #else
1227: pool_reclaim(struct pool *pp)
1228: #endif
1.3 pk 1229: {
1230: struct pool_item_header *ph, *phnext;
1.43 thorpej 1231: struct pool_cache *pc;
1.21 thorpej 1232: struct timeval curtime;
1.61 chs 1233: struct pool_pagelist pq;
1.21 thorpej 1234: int s;
1.3 pk 1235:
1.68 thorpej 1236: if (pp->pr_drain_hook != NULL) {
1237: /*
1238: * The drain hook must be called with the pool unlocked.
1239: */
1240: (*pp->pr_drain_hook)(pp->pr_drain_hook_arg, PR_NOWAIT);
1241: }
1242:
1.21 thorpej 1243: if (simple_lock_try(&pp->pr_slock) == 0)
1.66 thorpej 1244: return (0);
1.25 thorpej 1245: pr_enter(pp, file, line);
1.68 thorpej 1246:
1.61 chs 1247: TAILQ_INIT(&pq);
1.3 pk 1248:
1.43 thorpej 1249: /*
1250: * Reclaim items from the pool's caches.
1251: */
1.61 chs 1252: TAILQ_FOREACH(pc, &pp->pr_cachelist, pc_poollist)
1.43 thorpej 1253: pool_cache_reclaim(pc);
1254:
1.21 thorpej 1255: s = splclock();
1256: curtime = mono_time;
1257: splx(s);
1258:
1.3 pk 1259: for (ph = TAILQ_FIRST(&pp->pr_pagelist); ph != NULL; ph = phnext) {
1260: phnext = TAILQ_NEXT(ph, ph_pagelist);
1261:
1262: /* Check our minimum page claim */
1263: if (pp->pr_npages <= pp->pr_minpages)
1264: break;
1265:
1266: if (ph->ph_nmissing == 0) {
1267: struct timeval diff;
1268: timersub(&curtime, &ph->ph_time, &diff);
1269: if (diff.tv_sec < pool_inactive_time)
1270: continue;
1.21 thorpej 1271:
1272: /*
1273: * If freeing this page would put us below
1274: * the low water mark, stop now.
1275: */
1276: if ((pp->pr_nitems - pp->pr_itemsperpage) <
1277: pp->pr_minitems)
1278: break;
1279:
1.61 chs 1280: pr_rmpage(pp, ph, &pq);
1.3 pk 1281: }
1282: }
1283:
1.25 thorpej 1284: pr_leave(pp);
1.21 thorpej 1285: simple_unlock(&pp->pr_slock);
1.66 thorpej 1286: if (TAILQ_EMPTY(&pq))
1287: return (0);
1288:
1.61 chs 1289: while ((ph = TAILQ_FIRST(&pq)) != NULL) {
1290: TAILQ_REMOVE(&pq, ph, ph_pagelist);
1.66 thorpej 1291: pool_allocator_free(pp, ph->ph_page);
1.61 chs 1292: if (pp->pr_roflags & PR_PHINPAGE) {
1293: continue;
1294: }
1295: LIST_REMOVE(ph, ph_hashlist);
1296: s = splhigh();
1297: pool_put(&phpool, ph);
1298: splx(s);
1299: }
1.66 thorpej 1300:
1301: return (1);
1.3 pk 1302: }
1303:
1304: /*
1305: * Drain pools, one at a time.
1.21 thorpej 1306: *
1307: * Note, we must never be called from an interrupt context.
1.3 pk 1308: */
1309: void
1.42 thorpej 1310: pool_drain(void *arg)
1.3 pk 1311: {
1312: struct pool *pp;
1.23 thorpej 1313: int s;
1.3 pk 1314:
1.61 chs 1315: pp = NULL;
1.49 thorpej 1316: s = splvm();
1.23 thorpej 1317: simple_lock(&pool_head_slock);
1.61 chs 1318: if (drainpp == NULL) {
1319: drainpp = TAILQ_FIRST(&pool_head);
1320: }
1321: if (drainpp) {
1322: pp = drainpp;
1323: drainpp = TAILQ_NEXT(pp, pr_poollist);
1324: }
1325: simple_unlock(&pool_head_slock);
1.63 chs 1326: pool_reclaim(pp);
1.61 chs 1327: splx(s);
1.3 pk 1328: }
1329:
1330: /*
1331: * Diagnostic helpers.
1332: */
1333: void
1.42 thorpej 1334: pool_print(struct pool *pp, const char *modif)
1.21 thorpej 1335: {
1336: int s;
1337:
1.49 thorpej 1338: s = splvm();
1.25 thorpej 1339: if (simple_lock_try(&pp->pr_slock) == 0) {
1340: printf("pool %s is locked; try again later\n",
1341: pp->pr_wchan);
1342: splx(s);
1343: return;
1344: }
1345: pool_print1(pp, modif, printf);
1.21 thorpej 1346: simple_unlock(&pp->pr_slock);
1347: splx(s);
1348: }
1349:
1.25 thorpej 1350: void
1.42 thorpej 1351: pool_printit(struct pool *pp, const char *modif, void (*pr)(const char *, ...))
1.25 thorpej 1352: {
1353: int didlock = 0;
1354:
1355: if (pp == NULL) {
1356: (*pr)("Must specify a pool to print.\n");
1357: return;
1358: }
1359:
1360: /*
1361: * Called from DDB; interrupts should be blocked, and all
1362: * other processors should be paused. We can skip locking
1363: * the pool in this case.
1364: *
1365: * We do a simple_lock_try() just to print the lock
1366: * status, however.
1367: */
1368:
1369: if (simple_lock_try(&pp->pr_slock) == 0)
1370: (*pr)("WARNING: pool %s is locked\n", pp->pr_wchan);
1371: else
1372: didlock = 1;
1373:
1374: pool_print1(pp, modif, pr);
1375:
1376: if (didlock)
1377: simple_unlock(&pp->pr_slock);
1378: }
1379:
1.21 thorpej 1380: static void
1.42 thorpej 1381: pool_print1(struct pool *pp, const char *modif, void (*pr)(const char *, ...))
1.3 pk 1382: {
1.25 thorpej 1383: struct pool_item_header *ph;
1.44 thorpej 1384: struct pool_cache *pc;
1385: struct pool_cache_group *pcg;
1.25 thorpej 1386: #ifdef DIAGNOSTIC
1387: struct pool_item *pi;
1388: #endif
1.44 thorpej 1389: int i, print_log = 0, print_pagelist = 0, print_cache = 0;
1.25 thorpej 1390: char c;
1391:
1392: while ((c = *modif++) != '\0') {
1393: if (c == 'l')
1394: print_log = 1;
1395: if (c == 'p')
1396: print_pagelist = 1;
1.44 thorpej 1397: if (c == 'c')
1398: print_cache = 1;
1.25 thorpej 1399: modif++;
1400: }
1401:
1402: (*pr)("POOL %s: size %u, align %u, ioff %u, roflags 0x%08x\n",
1403: pp->pr_wchan, pp->pr_size, pp->pr_align, pp->pr_itemoffset,
1404: pp->pr_roflags);
1.66 thorpej 1405: (*pr)("\talloc %p\n", pp->pr_alloc);
1.25 thorpej 1406: (*pr)("\tminitems %u, minpages %u, maxpages %u, npages %u\n",
1407: pp->pr_minitems, pp->pr_minpages, pp->pr_maxpages, pp->pr_npages);
1408: (*pr)("\titemsperpage %u, nitems %u, nout %u, hardlimit %u\n",
1409: pp->pr_itemsperpage, pp->pr_nitems, pp->pr_nout, pp->pr_hardlimit);
1410:
1411: (*pr)("\n\tnget %lu, nfail %lu, nput %lu\n",
1412: pp->pr_nget, pp->pr_nfail, pp->pr_nput);
1413: (*pr)("\tnpagealloc %lu, npagefree %lu, hiwat %u, nidle %lu\n",
1414: pp->pr_npagealloc, pp->pr_npagefree, pp->pr_hiwat, pp->pr_nidle);
1415:
1416: if (print_pagelist == 0)
1417: goto skip_pagelist;
1418:
1419: if ((ph = TAILQ_FIRST(&pp->pr_pagelist)) != NULL)
1420: (*pr)("\n\tpage list:\n");
1421: for (; ph != NULL; ph = TAILQ_NEXT(ph, ph_pagelist)) {
1422: (*pr)("\t\tpage %p, nmissing %d, time %lu,%lu\n",
1423: ph->ph_page, ph->ph_nmissing,
1424: (u_long)ph->ph_time.tv_sec,
1425: (u_long)ph->ph_time.tv_usec);
1426: #ifdef DIAGNOSTIC
1.61 chs 1427: TAILQ_FOREACH(pi, &ph->ph_itemlist, pi_list) {
1.25 thorpej 1428: if (pi->pi_magic != PI_MAGIC) {
1429: (*pr)("\t\t\titem %p, magic 0x%x\n",
1430: pi, pi->pi_magic);
1431: }
1432: }
1433: #endif
1434: }
1435: if (pp->pr_curpage == NULL)
1436: (*pr)("\tno current page\n");
1437: else
1438: (*pr)("\tcurpage %p\n", pp->pr_curpage->ph_page);
1439:
1440: skip_pagelist:
1441:
1442: if (print_log == 0)
1443: goto skip_log;
1444:
1445: (*pr)("\n");
1446: if ((pp->pr_roflags & PR_LOGGING) == 0)
1447: (*pr)("\tno log\n");
1448: else
1449: pr_printlog(pp, NULL, pr);
1.3 pk 1450:
1.25 thorpej 1451: skip_log:
1.44 thorpej 1452:
1453: if (print_cache == 0)
1454: goto skip_cache;
1455:
1.61 chs 1456: TAILQ_FOREACH(pc, &pp->pr_cachelist, pc_poollist) {
1.44 thorpej 1457: (*pr)("\tcache %p: allocfrom %p freeto %p\n", pc,
1458: pc->pc_allocfrom, pc->pc_freeto);
1.48 thorpej 1459: (*pr)("\t hits %lu misses %lu ngroups %lu nitems %lu\n",
1460: pc->pc_hits, pc->pc_misses, pc->pc_ngroups, pc->pc_nitems);
1.61 chs 1461: TAILQ_FOREACH(pcg, &pc->pc_grouplist, pcg_list) {
1.44 thorpej 1462: (*pr)("\t\tgroup %p: avail %d\n", pcg, pcg->pcg_avail);
1463: for (i = 0; i < PCG_NOBJECTS; i++)
1464: (*pr)("\t\t\t%p\n", pcg->pcg_objects[i]);
1465: }
1466: }
1467:
1468: skip_cache:
1.3 pk 1469:
1.25 thorpej 1470: pr_enter_check(pp, pr);
1.3 pk 1471: }
1472:
1473: int
1.42 thorpej 1474: pool_chk(struct pool *pp, const char *label)
1.3 pk 1475: {
1476: struct pool_item_header *ph;
1477: int r = 0;
1478:
1.21 thorpej 1479: simple_lock(&pp->pr_slock);
1.3 pk 1480:
1.61 chs 1481: TAILQ_FOREACH(ph, &pp->pr_pagelist, ph_pagelist) {
1.3 pk 1482: struct pool_item *pi;
1483: int n;
1484: caddr_t page;
1485:
1.66 thorpej 1486: page = (caddr_t)((u_long)ph & pp->pr_alloc->pa_pagemask);
1.20 thorpej 1487: if (page != ph->ph_page &&
1488: (pp->pr_roflags & PR_PHINPAGE) != 0) {
1.3 pk 1489: if (label != NULL)
1490: printf("%s: ", label);
1.16 briggs 1491: printf("pool(%p:%s): page inconsistency: page %p;"
1492: " at page head addr %p (p %p)\n", pp,
1.3 pk 1493: pp->pr_wchan, ph->ph_page,
1494: ph, page);
1495: r++;
1496: goto out;
1497: }
1498:
1499: for (pi = TAILQ_FIRST(&ph->ph_itemlist), n = 0;
1500: pi != NULL;
1501: pi = TAILQ_NEXT(pi,pi_list), n++) {
1502:
1503: #ifdef DIAGNOSTIC
1504: if (pi->pi_magic != PI_MAGIC) {
1505: if (label != NULL)
1506: printf("%s: ", label);
1507: printf("pool(%s): free list modified: magic=%x;"
1508: " page %p; item ordinal %d;"
1509: " addr %p (p %p)\n",
1510: pp->pr_wchan, pi->pi_magic, ph->ph_page,
1511: n, pi, page);
1512: panic("pool");
1513: }
1514: #endif
1.66 thorpej 1515: page =
1516: (caddr_t)((u_long)pi & pp->pr_alloc->pa_pagemask);
1.3 pk 1517: if (page == ph->ph_page)
1518: continue;
1519:
1520: if (label != NULL)
1521: printf("%s: ", label);
1.16 briggs 1522: printf("pool(%p:%s): page inconsistency: page %p;"
1523: " item ordinal %d; addr %p (p %p)\n", pp,
1.3 pk 1524: pp->pr_wchan, ph->ph_page,
1525: n, pi, page);
1526: r++;
1527: goto out;
1528: }
1529: }
1530: out:
1.21 thorpej 1531: simple_unlock(&pp->pr_slock);
1.3 pk 1532: return (r);
1.43 thorpej 1533: }
1534:
1535: /*
1536: * pool_cache_init:
1537: *
1538: * Initialize a pool cache.
1539: *
1540: * NOTE: If the pool must be protected from interrupts, we expect
1541: * to be called at the appropriate interrupt priority level.
1542: */
1543: void
1544: pool_cache_init(struct pool_cache *pc, struct pool *pp,
1545: int (*ctor)(void *, void *, int),
1546: void (*dtor)(void *, void *),
1547: void *arg)
1548: {
1549:
1550: TAILQ_INIT(&pc->pc_grouplist);
1551: simple_lock_init(&pc->pc_slock);
1552:
1553: pc->pc_allocfrom = NULL;
1554: pc->pc_freeto = NULL;
1555: pc->pc_pool = pp;
1556:
1557: pc->pc_ctor = ctor;
1558: pc->pc_dtor = dtor;
1559: pc->pc_arg = arg;
1560:
1.48 thorpej 1561: pc->pc_hits = 0;
1562: pc->pc_misses = 0;
1563:
1564: pc->pc_ngroups = 0;
1565:
1566: pc->pc_nitems = 0;
1567:
1.43 thorpej 1568: simple_lock(&pp->pr_slock);
1569: TAILQ_INSERT_TAIL(&pp->pr_cachelist, pc, pc_poollist);
1570: simple_unlock(&pp->pr_slock);
1571: }
1572:
1573: /*
1574: * pool_cache_destroy:
1575: *
1576: * Destroy a pool cache.
1577: */
1578: void
1579: pool_cache_destroy(struct pool_cache *pc)
1580: {
1581: struct pool *pp = pc->pc_pool;
1582:
1583: /* First, invalidate the entire cache. */
1584: pool_cache_invalidate(pc);
1585:
1586: /* ...and remove it from the pool's cache list. */
1587: simple_lock(&pp->pr_slock);
1588: TAILQ_REMOVE(&pp->pr_cachelist, pc, pc_poollist);
1589: simple_unlock(&pp->pr_slock);
1590: }
1591:
1592: static __inline void *
1593: pcg_get(struct pool_cache_group *pcg)
1594: {
1595: void *object;
1596: u_int idx;
1597:
1598: KASSERT(pcg->pcg_avail <= PCG_NOBJECTS);
1.45 thorpej 1599: KASSERT(pcg->pcg_avail != 0);
1.43 thorpej 1600: idx = --pcg->pcg_avail;
1601:
1602: KASSERT(pcg->pcg_objects[idx] != NULL);
1603: object = pcg->pcg_objects[idx];
1604: pcg->pcg_objects[idx] = NULL;
1605:
1606: return (object);
1607: }
1608:
1609: static __inline void
1610: pcg_put(struct pool_cache_group *pcg, void *object)
1611: {
1612: u_int idx;
1613:
1614: KASSERT(pcg->pcg_avail < PCG_NOBJECTS);
1615: idx = pcg->pcg_avail++;
1616:
1617: KASSERT(pcg->pcg_objects[idx] == NULL);
1618: pcg->pcg_objects[idx] = object;
1619: }
1620:
1621: /*
1622: * pool_cache_get:
1623: *
1624: * Get an object from a pool cache.
1625: */
1626: void *
1627: pool_cache_get(struct pool_cache *pc, int flags)
1628: {
1629: struct pool_cache_group *pcg;
1630: void *object;
1.58 thorpej 1631:
1632: #ifdef LOCKDEBUG
1633: if (flags & PR_WAITOK)
1634: simple_lock_only_held(NULL, "pool_cache_get(PR_WAITOK)");
1635: #endif
1.43 thorpej 1636:
1637: simple_lock(&pc->pc_slock);
1638:
1639: if ((pcg = pc->pc_allocfrom) == NULL) {
1.61 chs 1640: TAILQ_FOREACH(pcg, &pc->pc_grouplist, pcg_list) {
1.43 thorpej 1641: if (pcg->pcg_avail != 0) {
1642: pc->pc_allocfrom = pcg;
1643: goto have_group;
1644: }
1645: }
1646:
1647: /*
1648: * No groups with any available objects. Allocate
1649: * a new object, construct it, and return it to
1650: * the caller. We will allocate a group, if necessary,
1651: * when the object is freed back to the cache.
1652: */
1.48 thorpej 1653: pc->pc_misses++;
1.43 thorpej 1654: simple_unlock(&pc->pc_slock);
1655: object = pool_get(pc->pc_pool, flags);
1656: if (object != NULL && pc->pc_ctor != NULL) {
1657: if ((*pc->pc_ctor)(pc->pc_arg, object, flags) != 0) {
1658: pool_put(pc->pc_pool, object);
1659: return (NULL);
1660: }
1661: }
1662: return (object);
1663: }
1664:
1665: have_group:
1.48 thorpej 1666: pc->pc_hits++;
1667: pc->pc_nitems--;
1.43 thorpej 1668: object = pcg_get(pcg);
1669:
1670: if (pcg->pcg_avail == 0)
1671: pc->pc_allocfrom = NULL;
1.45 thorpej 1672:
1.43 thorpej 1673: simple_unlock(&pc->pc_slock);
1674:
1675: return (object);
1676: }
1677:
1678: /*
1679: * pool_cache_put:
1680: *
1681: * Put an object back to the pool cache.
1682: */
1683: void
1684: pool_cache_put(struct pool_cache *pc, void *object)
1685: {
1686: struct pool_cache_group *pcg;
1.60 thorpej 1687: int s;
1.43 thorpej 1688:
1689: simple_lock(&pc->pc_slock);
1690:
1691: if ((pcg = pc->pc_freeto) == NULL) {
1.61 chs 1692: TAILQ_FOREACH(pcg, &pc->pc_grouplist, pcg_list) {
1.43 thorpej 1693: if (pcg->pcg_avail != PCG_NOBJECTS) {
1694: pc->pc_freeto = pcg;
1695: goto have_group;
1696: }
1697: }
1698:
1699: /*
1700: * No empty groups to free the object to. Attempt to
1.47 thorpej 1701: * allocate one.
1.43 thorpej 1702: */
1.47 thorpej 1703: simple_unlock(&pc->pc_slock);
1.60 thorpej 1704: s = splvm();
1.43 thorpej 1705: pcg = pool_get(&pcgpool, PR_NOWAIT);
1.60 thorpej 1706: splx(s);
1.43 thorpej 1707: if (pcg != NULL) {
1708: memset(pcg, 0, sizeof(*pcg));
1.47 thorpej 1709: simple_lock(&pc->pc_slock);
1.48 thorpej 1710: pc->pc_ngroups++;
1.43 thorpej 1711: TAILQ_INSERT_TAIL(&pc->pc_grouplist, pcg, pcg_list);
1.47 thorpej 1712: if (pc->pc_freeto == NULL)
1713: pc->pc_freeto = pcg;
1.43 thorpej 1714: goto have_group;
1715: }
1716:
1717: /*
1718: * Unable to allocate a cache group; destruct the object
1719: * and free it back to the pool.
1720: */
1.51 thorpej 1721: pool_cache_destruct_object(pc, object);
1.43 thorpej 1722: return;
1723: }
1724:
1725: have_group:
1.48 thorpej 1726: pc->pc_nitems++;
1.43 thorpej 1727: pcg_put(pcg, object);
1728:
1729: if (pcg->pcg_avail == PCG_NOBJECTS)
1730: pc->pc_freeto = NULL;
1731:
1732: simple_unlock(&pc->pc_slock);
1.51 thorpej 1733: }
1734:
1735: /*
1736: * pool_cache_destruct_object:
1737: *
1738: * Force destruction of an object and its release back into
1739: * the pool.
1740: */
1741: void
1742: pool_cache_destruct_object(struct pool_cache *pc, void *object)
1743: {
1744:
1745: if (pc->pc_dtor != NULL)
1746: (*pc->pc_dtor)(pc->pc_arg, object);
1747: pool_put(pc->pc_pool, object);
1.43 thorpej 1748: }
1749:
1750: /*
1751: * pool_cache_do_invalidate:
1752: *
1753: * This internal function implements pool_cache_invalidate() and
1754: * pool_cache_reclaim().
1755: */
1756: static void
1757: pool_cache_do_invalidate(struct pool_cache *pc, int free_groups,
1.56 sommerfe 1758: void (*putit)(struct pool *, void *))
1.43 thorpej 1759: {
1760: struct pool_cache_group *pcg, *npcg;
1761: void *object;
1.60 thorpej 1762: int s;
1.43 thorpej 1763:
1764: for (pcg = TAILQ_FIRST(&pc->pc_grouplist); pcg != NULL;
1765: pcg = npcg) {
1766: npcg = TAILQ_NEXT(pcg, pcg_list);
1767: while (pcg->pcg_avail != 0) {
1.48 thorpej 1768: pc->pc_nitems--;
1.43 thorpej 1769: object = pcg_get(pcg);
1.45 thorpej 1770: if (pcg->pcg_avail == 0 && pc->pc_allocfrom == pcg)
1771: pc->pc_allocfrom = NULL;
1.43 thorpej 1772: if (pc->pc_dtor != NULL)
1773: (*pc->pc_dtor)(pc->pc_arg, object);
1.56 sommerfe 1774: (*putit)(pc->pc_pool, object);
1.43 thorpej 1775: }
1776: if (free_groups) {
1.48 thorpej 1777: pc->pc_ngroups--;
1.43 thorpej 1778: TAILQ_REMOVE(&pc->pc_grouplist, pcg, pcg_list);
1.46 thorpej 1779: if (pc->pc_freeto == pcg)
1780: pc->pc_freeto = NULL;
1.60 thorpej 1781: s = splvm();
1.43 thorpej 1782: pool_put(&pcgpool, pcg);
1.60 thorpej 1783: splx(s);
1.43 thorpej 1784: }
1785: }
1786: }
1787:
1788: /*
1789: * pool_cache_invalidate:
1790: *
1791: * Invalidate a pool cache (destruct and release all of the
1792: * cached objects).
1793: */
1794: void
1795: pool_cache_invalidate(struct pool_cache *pc)
1796: {
1797:
1798: simple_lock(&pc->pc_slock);
1.56 sommerfe 1799: pool_cache_do_invalidate(pc, 0, pool_put);
1.43 thorpej 1800: simple_unlock(&pc->pc_slock);
1801: }
1802:
1803: /*
1804: * pool_cache_reclaim:
1805: *
1806: * Reclaim a pool cache for pool_reclaim().
1807: */
1808: static void
1809: pool_cache_reclaim(struct pool_cache *pc)
1810: {
1811:
1.47 thorpej 1812: simple_lock(&pc->pc_slock);
1.43 thorpej 1813: pool_cache_do_invalidate(pc, 1, pool_do_put);
1814: simple_unlock(&pc->pc_slock);
1.3 pk 1815: }
1.66 thorpej 1816:
1817: /*
1818: * Pool backend allocators.
1819: *
1820: * Each pool has a backend allocator that handles allocation, deallocation,
1821: * and any additional draining that might be needed.
1822: *
1823: * We provide two standard allocators:
1824: *
1825: * pool_allocator_kmem - the default when no allocator is specified
1826: *
1827: * pool_allocator_nointr - used for pools that will not be accessed
1828: * in interrupt context.
1829: */
1830: void *pool_page_alloc(struct pool *, int);
1831: void pool_page_free(struct pool *, void *);
1832:
1833: struct pool_allocator pool_allocator_kmem = {
1834: pool_page_alloc, pool_page_free, 0,
1835: };
1836:
1837: void *pool_page_alloc_nointr(struct pool *, int);
1838: void pool_page_free_nointr(struct pool *, void *);
1839:
1840: struct pool_allocator pool_allocator_nointr = {
1841: pool_page_alloc_nointr, pool_page_free_nointr, 0,
1842: };
1843:
1844: #ifdef POOL_SUBPAGE
1845: void *pool_subpage_alloc(struct pool *, int);
1846: void pool_subpage_free(struct pool *, void *);
1847:
1848: struct pool_allocator pool_allocator_kmem_subpage = {
1849: pool_subpage_alloc, pool_subpage_free, 0,
1850: };
1851: #endif /* POOL_SUBPAGE */
1852:
1853: /*
1854: * We have at least three different resources for the same allocation and
1855: * each resource can be depleted. First, we have the ready elements in the
1856: * pool. Then we have the resource (typically a vm_map) for this allocator.
1857: * Finally, we have physical memory. Waiting for any of these can be
1858: * unnecessary when any other is freed, but the kernel doesn't support
1859: * sleeping on multiple wait channels, so we have to employ another strategy.
1860: *
1861: * The caller sleeps on the pool (so that it can be awakened when an item
1862: * is returned to the pool), but we set PA_WANT on the allocator. When a
1863: * page is returned to the allocator and PA_WANT is set, pool_allocator_free
1864: * will wake up all sleeping pools belonging to this allocator.
1865: *
1866: * XXX Thundering herd.
1867: */
1868: void *
1869: pool_allocator_alloc(struct pool *org, int flags)
1870: {
1871: struct pool_allocator *pa = org->pr_alloc;
1872: struct pool *pp, *start;
1873: int s, freed;
1874: void *res;
1875:
1876: do {
1877: if ((res = (*pa->pa_alloc)(org, flags)) != NULL)
1878: return (res);
1.68 thorpej 1879: if ((flags & PR_WAITOK) == 0) {
1880: /*
1881: * We only run the drain hookhere if PR_NOWAIT.
1882: * In other cases, the hook will be run in
1883: * pool_reclaim().
1884: */
1885: if (org->pr_drain_hook != NULL) {
1886: (*org->pr_drain_hook)(org->pr_drain_hook_arg,
1887: flags);
1888: if ((res = (*pa->pa_alloc)(org, flags)) != NULL)
1889: return (res);
1890: }
1.66 thorpej 1891: break;
1.68 thorpej 1892: }
1.66 thorpej 1893:
1894: /*
1895: * Drain all pools, except "org", that use this
1896: * allocator. We do this to reclaim VA space.
1897: * pa_alloc is responsible for waiting for
1898: * physical memory.
1899: *
1900: * XXX We risk looping forever if start if someone
1901: * calls pool_destroy on "start". But there is no
1902: * other way to have potentially sleeping pool_reclaim,
1903: * non-sleeping locks on pool_allocator, and some
1904: * stirring of drained pools in the allocator.
1.68 thorpej 1905: *
1906: * XXX Maybe we should use pool_head_slock for locking
1907: * the allocators?
1.66 thorpej 1908: */
1909: freed = 0;
1910:
1911: s = splvm();
1912: simple_lock(&pa->pa_slock);
1913: pp = start = TAILQ_FIRST(&pa->pa_list);
1914: do {
1915: TAILQ_REMOVE(&pa->pa_list, pp, pr_alloc_list);
1916: TAILQ_INSERT_TAIL(&pa->pa_list, pp, pr_alloc_list);
1917: if (pp == org)
1918: continue;
1919: simple_unlock(&pa->pa_list);
1920: freed = pool_reclaim(pp);
1921: simple_lock(&pa->pa_list);
1922: } while ((pp = TAILQ_FIRST(&pa->pa_list)) != start &&
1923: freed == 0);
1924:
1925: if (freed == 0) {
1926: /*
1927: * We set PA_WANT here, the caller will most likely
1928: * sleep waiting for pages (if not, this won't hurt
1929: * that much), and there is no way to set this in
1930: * the caller without violating locking order.
1931: */
1932: pa->pa_flags |= PA_WANT;
1933: }
1934: simple_unlock(&pa->pa_slock);
1935: splx(s);
1936: } while (freed);
1937: return (NULL);
1938: }
1939:
1940: void
1941: pool_allocator_free(struct pool *pp, void *v)
1942: {
1943: struct pool_allocator *pa = pp->pr_alloc;
1944: int s;
1945:
1946: (*pa->pa_free)(pp, v);
1947:
1948: s = splvm();
1949: simple_lock(&pa->pa_slock);
1950: if ((pa->pa_flags & PA_WANT) == 0) {
1951: simple_unlock(&pa->pa_slock);
1952: splx(s);
1953: return;
1954: }
1955:
1956: TAILQ_FOREACH(pp, &pa->pa_list, pr_alloc_list) {
1957: simple_lock(&pp->pr_slock);
1958: if ((pp->pr_flags & PR_WANTED) != 0) {
1959: pp->pr_flags &= ~PR_WANTED;
1960: wakeup(pp);
1961: }
1.69 thorpej 1962: simple_unlock(&pp->pr_slock);
1.66 thorpej 1963: }
1964: pa->pa_flags &= ~PA_WANT;
1965: simple_unlock(&pa->pa_slock);
1966: splx(s);
1967: }
1968:
1969: void *
1970: pool_page_alloc(struct pool *pp, int flags)
1971: {
1972: boolean_t waitok = (flags & PR_WAITOK) ? TRUE : FALSE;
1973:
1974: return ((void *) uvm_km_alloc_poolpage(waitok));
1975: }
1976:
1977: void
1978: pool_page_free(struct pool *pp, void *v)
1979: {
1980:
1981: uvm_km_free_poolpage((vaddr_t) v);
1982: }
1983:
1984: #ifdef POOL_SUBPAGE
1985: /* Sub-page allocator, for machines with large hardware pages. */
1986: void *
1987: pool_subpage_alloc(struct pool *pp, int flags)
1988: {
1989:
1990: return (pool_get(&psppool, flags));
1991: }
1992:
1993: void
1994: pool_subpage_free(struct pool *pp, void *v)
1995: {
1996:
1997: pool_put(&psppool, v);
1998: }
1999:
2000: /* We don't provide a real nointr allocator. Maybe later. */
2001: void *
2002: pool_page_alloc_nointr(struct pool *pp, int flags)
2003: {
2004:
2005: return (pool_subpage_alloc(pp, flags));
2006: }
2007:
2008: void
2009: pool_page_free_nointr(struct pool *pp, void *v)
2010: {
2011:
2012: pool_subpage_free(pp, v);
2013: }
2014: #else
2015: void *
2016: pool_page_alloc_nointr(struct pool *pp, int flags)
2017: {
2018: boolean_t waitok = (flags & PR_WAITOK) ? TRUE : FALSE;
2019:
2020: return ((void *) uvm_km_alloc_poolpage1(kernel_map,
2021: uvm.kernel_object, waitok));
2022: }
2023:
2024: void
2025: pool_page_free_nointr(struct pool *pp, void *v)
2026: {
2027:
2028: uvm_km_free_poolpage1(kernel_map, (vaddr_t) v);
2029: }
2030: #endif /* POOL_SUBPAGE */
CVSweb <webmaster@jp.NetBSD.org>