Annotation of src/sys/kern/subr_pool.c, Revision 1.68
1.68 ! thorpej 1: /* $NetBSD: subr_pool.c,v 1.67 2002/03/08 20:51:26 thorpej Exp $ */
1.1 pk 2:
3: /*-
1.43 thorpej 4: * Copyright (c) 1997, 1999, 2000 The NetBSD Foundation, Inc.
1.1 pk 5: * All rights reserved.
6: *
7: * This code is derived from software contributed to The NetBSD Foundation
1.20 thorpej 8: * by Paul Kranenburg; by Jason R. Thorpe of the Numerical Aerospace
9: * Simulation Facility, NASA Ames Research Center.
1.1 pk 10: *
11: * Redistribution and use in source and binary forms, with or without
12: * modification, are permitted provided that the following conditions
13: * are met:
14: * 1. Redistributions of source code must retain the above copyright
15: * notice, this list of conditions and the following disclaimer.
16: * 2. Redistributions in binary form must reproduce the above copyright
17: * notice, this list of conditions and the following disclaimer in the
18: * documentation and/or other materials provided with the distribution.
19: * 3. All advertising materials mentioning features or use of this software
20: * must display the following acknowledgement:
1.13 christos 21: * This product includes software developed by the NetBSD
22: * Foundation, Inc. and its contributors.
1.1 pk 23: * 4. Neither the name of The NetBSD Foundation nor the names of its
24: * contributors may be used to endorse or promote products derived
25: * from this software without specific prior written permission.
26: *
27: * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28: * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29: * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30: * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31: * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32: * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33: * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34: * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35: * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36: * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37: * POSSIBILITY OF SUCH DAMAGE.
38: */
1.64 lukem 39:
40: #include <sys/cdefs.h>
1.68 ! thorpej 41: __KERNEL_RCSID(0, "$NetBSD: subr_pool.c,v 1.67 2002/03/08 20:51:26 thorpej Exp $");
1.24 scottr 42:
1.25 thorpej 43: #include "opt_pool.h"
1.24 scottr 44: #include "opt_poollog.h"
1.28 thorpej 45: #include "opt_lockdebug.h"
1.1 pk 46:
47: #include <sys/param.h>
48: #include <sys/systm.h>
49: #include <sys/proc.h>
50: #include <sys/errno.h>
51: #include <sys/kernel.h>
52: #include <sys/malloc.h>
53: #include <sys/lock.h>
54: #include <sys/pool.h>
1.20 thorpej 55: #include <sys/syslog.h>
1.3 pk 56:
57: #include <uvm/uvm.h>
58:
1.1 pk 59: /*
60: * Pool resource management utility.
1.3 pk 61: *
62: * Memory is allocated in pages which are split into pieces according
63: * to the pool item size. Each page is kept on a list headed by `pr_pagelist'
64: * in the pool structure and the individual pool items are on a linked list
65: * headed by `ph_itemlist' in each page header. The memory for building
66: * the page list is either taken from the allocated pages themselves (for
67: * small pool items) or taken from an internal pool of page headers (`phpool').
1.1 pk 68: */
69:
1.3 pk 70: /* List of all pools */
1.5 thorpej 71: TAILQ_HEAD(,pool) pool_head = TAILQ_HEAD_INITIALIZER(pool_head);
1.3 pk 72:
73: /* Private pool for page header structures */
74: static struct pool phpool;
75:
1.62 bjh21 76: #ifdef POOL_SUBPAGE
77: /* Pool of subpages for use by normal pools. */
78: static struct pool psppool;
79: #endif
80:
1.3 pk 81: /* # of seconds to retain page after last use */
82: int pool_inactive_time = 10;
83:
84: /* Next candidate for drainage (see pool_drain()) */
1.23 thorpej 85: static struct pool *drainpp;
86:
87: /* This spin lock protects both pool_head and drainpp. */
88: struct simplelock pool_head_slock = SIMPLELOCK_INITIALIZER;
1.3 pk 89:
90: struct pool_item_header {
91: /* Page headers */
92: TAILQ_ENTRY(pool_item_header)
93: ph_pagelist; /* pool page list */
94: TAILQ_HEAD(,pool_item) ph_itemlist; /* chunk list for this page */
95: LIST_ENTRY(pool_item_header)
96: ph_hashlist; /* Off-page page headers */
97: int ph_nmissing; /* # of chunks in use */
98: caddr_t ph_page; /* this page's address */
99: struct timeval ph_time; /* last referenced */
100: };
1.61 chs 101: TAILQ_HEAD(pool_pagelist,pool_item_header);
1.3 pk 102:
1.1 pk 103: struct pool_item {
1.3 pk 104: #ifdef DIAGNOSTIC
105: int pi_magic;
1.33 chs 106: #endif
1.25 thorpej 107: #define PI_MAGIC 0xdeadbeef
1.3 pk 108: /* Other entries use only this list entry */
109: TAILQ_ENTRY(pool_item) pi_list;
110: };
111:
1.25 thorpej 112: #define PR_HASH_INDEX(pp,addr) \
1.66 thorpej 113: (((u_long)(addr) >> (pp)->pr_alloc->pa_pageshift) & \
114: (PR_HASHTABSIZE - 1))
1.3 pk 115:
1.53 thorpej 116: #define POOL_NEEDS_CATCHUP(pp) \
117: ((pp)->pr_nitems < (pp)->pr_minitems)
118:
1.43 thorpej 119: /*
120: * Pool cache management.
121: *
122: * Pool caches provide a way for constructed objects to be cached by the
123: * pool subsystem. This can lead to performance improvements by avoiding
124: * needless object construction/destruction; it is deferred until absolutely
125: * necessary.
126: *
127: * Caches are grouped into cache groups. Each cache group references
128: * up to 16 constructed objects. When a cache allocates an object
129: * from the pool, it calls the object's constructor and places it into
130: * a cache group. When a cache group frees an object back to the pool,
131: * it first calls the object's destructor. This allows the object to
132: * persist in constructed form while freed to the cache.
133: *
134: * Multiple caches may exist for each pool. This allows a single
135: * object type to have multiple constructed forms. The pool references
136: * each cache, so that when a pool is drained by the pagedaemon, it can
137: * drain each individual cache as well. Each time a cache is drained,
138: * the most idle cache group is freed to the pool in its entirety.
139: *
140: * Pool caches are layed on top of pools. By layering them, we can avoid
141: * the complexity of cache management for pools which would not benefit
142: * from it.
143: */
144:
145: /* The cache group pool. */
146: static struct pool pcgpool;
147:
148: /* The pool cache group. */
149: #define PCG_NOBJECTS 16
150: struct pool_cache_group {
151: TAILQ_ENTRY(pool_cache_group)
152: pcg_list; /* link in the pool cache's group list */
153: u_int pcg_avail; /* # available objects */
154: /* pointers to the objects */
155: void *pcg_objects[PCG_NOBJECTS];
156: };
1.3 pk 157:
1.43 thorpej 158: static void pool_cache_reclaim(struct pool_cache *);
1.3 pk 159:
1.42 thorpej 160: static int pool_catchup(struct pool *);
1.55 thorpej 161: static void pool_prime_page(struct pool *, caddr_t,
162: struct pool_item_header *);
1.66 thorpej 163:
164: void *pool_allocator_alloc(struct pool *, int);
165: void pool_allocator_free(struct pool *, void *);
1.3 pk 166:
1.42 thorpej 167: static void pool_print1(struct pool *, const char *,
168: void (*)(const char *, ...));
1.3 pk 169:
170: /*
1.52 thorpej 171: * Pool log entry. An array of these is allocated in pool_init().
1.3 pk 172: */
173: struct pool_log {
174: const char *pl_file;
175: long pl_line;
176: int pl_action;
1.25 thorpej 177: #define PRLOG_GET 1
178: #define PRLOG_PUT 2
1.3 pk 179: void *pl_addr;
1.1 pk 180: };
181:
1.3 pk 182: /* Number of entries in pool log buffers */
1.17 thorpej 183: #ifndef POOL_LOGSIZE
184: #define POOL_LOGSIZE 10
185: #endif
186:
187: int pool_logsize = POOL_LOGSIZE;
1.1 pk 188:
1.59 thorpej 189: #ifdef POOL_DIAGNOSTIC
1.42 thorpej 190: static __inline void
191: pr_log(struct pool *pp, void *v, int action, const char *file, long line)
1.3 pk 192: {
193: int n = pp->pr_curlogentry;
194: struct pool_log *pl;
195:
1.20 thorpej 196: if ((pp->pr_roflags & PR_LOGGING) == 0)
1.3 pk 197: return;
198:
199: /*
200: * Fill in the current entry. Wrap around and overwrite
201: * the oldest entry if necessary.
202: */
203: pl = &pp->pr_log[n];
204: pl->pl_file = file;
205: pl->pl_line = line;
206: pl->pl_action = action;
207: pl->pl_addr = v;
208: if (++n >= pp->pr_logsize)
209: n = 0;
210: pp->pr_curlogentry = n;
211: }
212:
213: static void
1.42 thorpej 214: pr_printlog(struct pool *pp, struct pool_item *pi,
215: void (*pr)(const char *, ...))
1.3 pk 216: {
217: int i = pp->pr_logsize;
218: int n = pp->pr_curlogentry;
219:
1.20 thorpej 220: if ((pp->pr_roflags & PR_LOGGING) == 0)
1.3 pk 221: return;
222:
223: /*
224: * Print all entries in this pool's log.
225: */
226: while (i-- > 0) {
227: struct pool_log *pl = &pp->pr_log[n];
228: if (pl->pl_action != 0) {
1.25 thorpej 229: if (pi == NULL || pi == pl->pl_addr) {
230: (*pr)("\tlog entry %d:\n", i);
231: (*pr)("\t\taction = %s, addr = %p\n",
232: pl->pl_action == PRLOG_GET ? "get" : "put",
233: pl->pl_addr);
234: (*pr)("\t\tfile: %s at line %lu\n",
235: pl->pl_file, pl->pl_line);
236: }
1.3 pk 237: }
238: if (++n >= pp->pr_logsize)
239: n = 0;
240: }
241: }
1.25 thorpej 242:
1.42 thorpej 243: static __inline void
244: pr_enter(struct pool *pp, const char *file, long line)
1.25 thorpej 245: {
246:
1.34 thorpej 247: if (__predict_false(pp->pr_entered_file != NULL)) {
1.25 thorpej 248: printf("pool %s: reentrancy at file %s line %ld\n",
249: pp->pr_wchan, file, line);
250: printf(" previous entry at file %s line %ld\n",
251: pp->pr_entered_file, pp->pr_entered_line);
252: panic("pr_enter");
253: }
254:
255: pp->pr_entered_file = file;
256: pp->pr_entered_line = line;
257: }
258:
1.42 thorpej 259: static __inline void
260: pr_leave(struct pool *pp)
1.25 thorpej 261: {
262:
1.34 thorpej 263: if (__predict_false(pp->pr_entered_file == NULL)) {
1.25 thorpej 264: printf("pool %s not entered?\n", pp->pr_wchan);
265: panic("pr_leave");
266: }
267:
268: pp->pr_entered_file = NULL;
269: pp->pr_entered_line = 0;
270: }
271:
1.42 thorpej 272: static __inline void
273: pr_enter_check(struct pool *pp, void (*pr)(const char *, ...))
1.25 thorpej 274: {
275:
276: if (pp->pr_entered_file != NULL)
277: (*pr)("\n\tcurrently entered from file %s line %ld\n",
278: pp->pr_entered_file, pp->pr_entered_line);
279: }
1.3 pk 280: #else
1.25 thorpej 281: #define pr_log(pp, v, action, file, line)
282: #define pr_printlog(pp, pi, pr)
283: #define pr_enter(pp, file, line)
284: #define pr_leave(pp)
285: #define pr_enter_check(pp, pr)
1.59 thorpej 286: #endif /* POOL_DIAGNOSTIC */
1.3 pk 287:
288: /*
289: * Return the pool page header based on page address.
290: */
1.42 thorpej 291: static __inline struct pool_item_header *
292: pr_find_pagehead(struct pool *pp, caddr_t page)
1.3 pk 293: {
294: struct pool_item_header *ph;
295:
1.20 thorpej 296: if ((pp->pr_roflags & PR_PHINPAGE) != 0)
1.3 pk 297: return ((struct pool_item_header *)(page + pp->pr_phoffset));
298:
299: for (ph = LIST_FIRST(&pp->pr_hashtab[PR_HASH_INDEX(pp, page)]);
300: ph != NULL;
301: ph = LIST_NEXT(ph, ph_hashlist)) {
302: if (ph->ph_page == page)
303: return (ph);
304: }
305: return (NULL);
306: }
307:
308: /*
309: * Remove a page from the pool.
310: */
1.42 thorpej 311: static __inline void
1.61 chs 312: pr_rmpage(struct pool *pp, struct pool_item_header *ph,
313: struct pool_pagelist *pq)
1.3 pk 314: {
1.61 chs 315: int s;
1.3 pk 316:
317: /*
1.7 thorpej 318: * If the page was idle, decrement the idle page count.
1.3 pk 319: */
1.6 thorpej 320: if (ph->ph_nmissing == 0) {
321: #ifdef DIAGNOSTIC
322: if (pp->pr_nidle == 0)
323: panic("pr_rmpage: nidle inconsistent");
1.20 thorpej 324: if (pp->pr_nitems < pp->pr_itemsperpage)
325: panic("pr_rmpage: nitems inconsistent");
1.6 thorpej 326: #endif
327: pp->pr_nidle--;
328: }
1.7 thorpej 329:
1.20 thorpej 330: pp->pr_nitems -= pp->pr_itemsperpage;
331:
1.7 thorpej 332: /*
1.61 chs 333: * Unlink a page from the pool and release it (or queue it for release).
1.7 thorpej 334: */
335: TAILQ_REMOVE(&pp->pr_pagelist, ph, ph_pagelist);
1.61 chs 336: if (pq) {
337: TAILQ_INSERT_HEAD(pq, ph, ph_pagelist);
338: } else {
1.66 thorpej 339: pool_allocator_free(pp, ph->ph_page);
1.61 chs 340: if ((pp->pr_roflags & PR_PHINPAGE) == 0) {
341: LIST_REMOVE(ph, ph_hashlist);
342: s = splhigh();
343: pool_put(&phpool, ph);
344: splx(s);
345: }
346: }
1.7 thorpej 347: pp->pr_npages--;
348: pp->pr_npagefree++;
1.6 thorpej 349:
1.3 pk 350: if (pp->pr_curpage == ph) {
351: /*
352: * Find a new non-empty page header, if any.
353: * Start search from the page head, to increase the
354: * chance for "high water" pages to be freed.
355: */
1.61 chs 356: TAILQ_FOREACH(ph, &pp->pr_pagelist, ph_pagelist)
1.3 pk 357: if (TAILQ_FIRST(&ph->ph_itemlist) != NULL)
358: break;
359:
360: pp->pr_curpage = ph;
1.21 thorpej 361: }
1.3 pk 362: }
363:
364: /*
365: * Initialize the given pool resource structure.
366: *
367: * We export this routine to allow other kernel parts to declare
368: * static pools that must be initialized before malloc() is available.
369: */
370: void
1.42 thorpej 371: pool_init(struct pool *pp, size_t size, u_int align, u_int ioff, int flags,
1.66 thorpej 372: const char *wchan, struct pool_allocator *palloc)
1.3 pk 373: {
1.16 briggs 374: int off, slack, i;
1.3 pk 375:
1.25 thorpej 376: #ifdef POOL_DIAGNOSTIC
377: /*
378: * Always log if POOL_DIAGNOSTIC is defined.
379: */
380: if (pool_logsize != 0)
381: flags |= PR_LOGGING;
382: #endif
383:
1.66 thorpej 384: #ifdef POOL_SUBPAGE
385: /*
386: * XXX We don't provide a real `nointr' back-end
387: * yet; all sub-pages come from a kmem back-end.
388: * maybe some day...
389: */
390: if (palloc == NULL) {
391: extern struct pool_allocator pool_allocator_kmem_subpage;
392: palloc = &pool_allocator_kmem_subpage;
393: }
1.3 pk 394: /*
1.66 thorpej 395: * We'll assume any user-specified back-end allocator
396: * will deal with sub-pages, or simply don't care.
1.3 pk 397: */
1.66 thorpej 398: #else
399: if (palloc == NULL)
400: palloc = &pool_allocator_kmem;
401: #endif /* POOL_SUBPAGE */
402: if ((palloc->pa_flags & PA_INITIALIZED) == 0) {
403: if (palloc->pa_pagesz == 0) {
1.62 bjh21 404: #ifdef POOL_SUBPAGE
1.66 thorpej 405: if (palloc == &pool_allocator_kmem)
406: palloc->pa_pagesz = PAGE_SIZE;
407: else
408: palloc->pa_pagesz = POOL_SUBPAGE;
1.62 bjh21 409: #else
1.66 thorpej 410: palloc->pa_pagesz = PAGE_SIZE;
411: #endif /* POOL_SUBPAGE */
412: }
413:
414: TAILQ_INIT(&palloc->pa_list);
415:
416: simple_lock_init(&palloc->pa_slock);
417: palloc->pa_pagemask = ~(palloc->pa_pagesz - 1);
418: palloc->pa_pageshift = ffs(palloc->pa_pagesz) - 1;
419: palloc->pa_flags |= PA_INITIALIZED;
1.4 thorpej 420: }
1.3 pk 421:
422: if (align == 0)
423: align = ALIGN(1);
1.14 thorpej 424:
425: if (size < sizeof(struct pool_item))
426: size = sizeof(struct pool_item);
1.3 pk 427:
1.35 pk 428: size = ALIGN(size);
1.66 thorpej 429: #ifdef DIAGNOSTIC
430: if (size > palloc->pa_pagesz)
1.35 pk 431: panic("pool_init: pool item size (%lu) too large",
432: (u_long)size);
1.66 thorpej 433: #endif
1.35 pk 434:
1.3 pk 435: /*
436: * Initialize the pool structure.
437: */
438: TAILQ_INIT(&pp->pr_pagelist);
1.43 thorpej 439: TAILQ_INIT(&pp->pr_cachelist);
1.3 pk 440: pp->pr_curpage = NULL;
441: pp->pr_npages = 0;
442: pp->pr_minitems = 0;
443: pp->pr_minpages = 0;
444: pp->pr_maxpages = UINT_MAX;
1.20 thorpej 445: pp->pr_roflags = flags;
446: pp->pr_flags = 0;
1.35 pk 447: pp->pr_size = size;
1.3 pk 448: pp->pr_align = align;
449: pp->pr_wchan = wchan;
1.66 thorpej 450: pp->pr_alloc = palloc;
1.20 thorpej 451: pp->pr_nitems = 0;
452: pp->pr_nout = 0;
453: pp->pr_hardlimit = UINT_MAX;
454: pp->pr_hardlimit_warning = NULL;
1.31 thorpej 455: pp->pr_hardlimit_ratecap.tv_sec = 0;
456: pp->pr_hardlimit_ratecap.tv_usec = 0;
457: pp->pr_hardlimit_warning_last.tv_sec = 0;
458: pp->pr_hardlimit_warning_last.tv_usec = 0;
1.68 ! thorpej 459: pp->pr_drain_hook = NULL;
! 460: pp->pr_drain_hook_arg = NULL;
1.3 pk 461:
462: /*
463: * Decide whether to put the page header off page to avoid
464: * wasting too large a part of the page. Off-page page headers
465: * go on a hash table, so we can match a returned item
466: * with its header based on the page address.
467: * We use 1/16 of the page size as the threshold (XXX: tune)
468: */
1.66 thorpej 469: if (pp->pr_size < palloc->pa_pagesz/16) {
1.3 pk 470: /* Use the end of the page for the page header */
1.20 thorpej 471: pp->pr_roflags |= PR_PHINPAGE;
1.66 thorpej 472: pp->pr_phoffset = off = palloc->pa_pagesz -
473: ALIGN(sizeof(struct pool_item_header));
1.2 pk 474: } else {
1.3 pk 475: /* The page header will be taken from our page header pool */
476: pp->pr_phoffset = 0;
1.66 thorpej 477: off = palloc->pa_pagesz;
1.16 briggs 478: for (i = 0; i < PR_HASHTABSIZE; i++) {
479: LIST_INIT(&pp->pr_hashtab[i]);
480: }
1.2 pk 481: }
1.1 pk 482:
1.3 pk 483: /*
484: * Alignment is to take place at `ioff' within the item. This means
485: * we must reserve up to `align - 1' bytes on the page to allow
486: * appropriate positioning of each item.
487: *
488: * Silently enforce `0 <= ioff < align'.
489: */
490: pp->pr_itemoffset = ioff = ioff % align;
491: pp->pr_itemsperpage = (off - ((align - ioff) % align)) / pp->pr_size;
1.43 thorpej 492: KASSERT(pp->pr_itemsperpage != 0);
1.3 pk 493:
494: /*
495: * Use the slack between the chunks and the page header
496: * for "cache coloring".
497: */
498: slack = off - pp->pr_itemsperpage * pp->pr_size;
499: pp->pr_maxcolor = (slack / align) * align;
500: pp->pr_curcolor = 0;
501:
502: pp->pr_nget = 0;
503: pp->pr_nfail = 0;
504: pp->pr_nput = 0;
505: pp->pr_npagealloc = 0;
506: pp->pr_npagefree = 0;
1.1 pk 507: pp->pr_hiwat = 0;
1.8 thorpej 508: pp->pr_nidle = 0;
1.3 pk 509:
1.59 thorpej 510: #ifdef POOL_DIAGNOSTIC
1.25 thorpej 511: if (flags & PR_LOGGING) {
512: if (kmem_map == NULL ||
513: (pp->pr_log = malloc(pool_logsize * sizeof(struct pool_log),
514: M_TEMP, M_NOWAIT)) == NULL)
1.20 thorpej 515: pp->pr_roflags &= ~PR_LOGGING;
1.3 pk 516: pp->pr_curlogentry = 0;
517: pp->pr_logsize = pool_logsize;
518: }
1.59 thorpej 519: #endif
1.25 thorpej 520:
521: pp->pr_entered_file = NULL;
522: pp->pr_entered_line = 0;
1.3 pk 523:
1.21 thorpej 524: simple_lock_init(&pp->pr_slock);
1.1 pk 525:
1.3 pk 526: /*
1.43 thorpej 527: * Initialize private page header pool and cache magazine pool if we
528: * haven't done so yet.
1.23 thorpej 529: * XXX LOCKING.
1.3 pk 530: */
531: if (phpool.pr_size == 0) {
1.62 bjh21 532: #ifdef POOL_SUBPAGE
533: pool_init(&phpool, sizeof(struct pool_item_header), 0, 0, 0,
1.66 thorpej 534: "phpool", &pool_allocator_kmem);
1.62 bjh21 535: pool_init(&psppool, POOL_SUBPAGE, POOL_SUBPAGE, 0,
1.66 thorpej 536: PR_RECURSIVE, "psppool", &pool_allocator_kmem);
1.62 bjh21 537: #else
1.3 pk 538: pool_init(&phpool, sizeof(struct pool_item_header), 0, 0,
1.66 thorpej 539: 0, "phpool", NULL);
1.62 bjh21 540: #endif
1.43 thorpej 541: pool_init(&pcgpool, sizeof(struct pool_cache_group), 0, 0,
1.66 thorpej 542: 0, "pcgpool", NULL);
1.1 pk 543: }
544:
1.23 thorpej 545: /* Insert into the list of all pools. */
546: simple_lock(&pool_head_slock);
547: TAILQ_INSERT_TAIL(&pool_head, pp, pr_poollist);
548: simple_unlock(&pool_head_slock);
1.66 thorpej 549:
550: /* Insert this into the list of pools using this allocator. */
551: simple_lock(&palloc->pa_slock);
552: TAILQ_INSERT_TAIL(&palloc->pa_list, pp, pr_alloc_list);
553: simple_unlock(&palloc->pa_slock);
1.1 pk 554: }
555:
556: /*
557: * De-commision a pool resource.
558: */
559: void
1.42 thorpej 560: pool_destroy(struct pool *pp)
1.1 pk 561: {
1.3 pk 562: struct pool_item_header *ph;
1.43 thorpej 563: struct pool_cache *pc;
564:
1.66 thorpej 565: /* Locking order: pool_allocator -> pool */
566: simple_lock(&pp->pr_alloc->pa_slock);
567: TAILQ_REMOVE(&pp->pr_alloc->pa_list, pp, pr_alloc_list);
568: simple_unlock(&pp->pr_alloc->pa_slock);
569:
1.43 thorpej 570: /* Destroy all caches for this pool. */
571: while ((pc = TAILQ_FIRST(&pp->pr_cachelist)) != NULL)
572: pool_cache_destroy(pc);
1.3 pk 573:
574: #ifdef DIAGNOSTIC
1.20 thorpej 575: if (pp->pr_nout != 0) {
1.25 thorpej 576: pr_printlog(pp, NULL, printf);
1.20 thorpej 577: panic("pool_destroy: pool busy: still out: %u\n",
578: pp->pr_nout);
1.3 pk 579: }
580: #endif
1.1 pk 581:
1.3 pk 582: /* Remove all pages */
1.20 thorpej 583: if ((pp->pr_roflags & PR_STATIC) == 0)
1.61 chs 584: while ((ph = TAILQ_FIRST(&pp->pr_pagelist)) != NULL)
585: pr_rmpage(pp, ph, NULL);
1.3 pk 586:
587: /* Remove from global pool list */
1.23 thorpej 588: simple_lock(&pool_head_slock);
1.3 pk 589: TAILQ_REMOVE(&pool_head, pp, pr_poollist);
1.61 chs 590: if (drainpp == pp) {
591: drainpp = NULL;
592: }
1.23 thorpej 593: simple_unlock(&pool_head_slock);
1.3 pk 594:
1.59 thorpej 595: #ifdef POOL_DIAGNOSTIC
1.20 thorpej 596: if ((pp->pr_roflags & PR_LOGGING) != 0)
1.3 pk 597: free(pp->pr_log, M_TEMP);
1.59 thorpej 598: #endif
1.1 pk 599: }
600:
1.68 ! thorpej 601: void
! 602: pool_set_drain_hook(struct pool *pp, void (*fn)(void *, int), void *arg)
! 603: {
! 604:
! 605: /* XXX no locking -- must be used just after pool_init() */
! 606: #ifdef DIAGNOSTIC
! 607: if (pp->pr_drain_hook != NULL)
! 608: panic("pool_set_drain_hook(%s): already set", pp->pr_wchan);
! 609: #endif
! 610: pp->pr_drain_hook = fn;
! 611: pp->pr_drain_hook_arg = arg;
! 612: }
! 613:
1.55 thorpej 614: static __inline struct pool_item_header *
615: pool_alloc_item_header(struct pool *pp, caddr_t storage, int flags)
616: {
617: struct pool_item_header *ph;
618: int s;
619:
620: LOCK_ASSERT(simple_lock_held(&pp->pr_slock) == 0);
621:
622: if ((pp->pr_roflags & PR_PHINPAGE) != 0)
623: ph = (struct pool_item_header *) (storage + pp->pr_phoffset);
624: else {
625: s = splhigh();
626: ph = pool_get(&phpool, flags);
627: splx(s);
628: }
629:
630: return (ph);
631: }
1.1 pk 632:
633: /*
1.3 pk 634: * Grab an item from the pool; must be called at appropriate spl level
1.1 pk 635: */
1.3 pk 636: void *
1.59 thorpej 637: #ifdef POOL_DIAGNOSTIC
1.42 thorpej 638: _pool_get(struct pool *pp, int flags, const char *file, long line)
1.56 sommerfe 639: #else
640: pool_get(struct pool *pp, int flags)
641: #endif
1.1 pk 642: {
643: struct pool_item *pi;
1.3 pk 644: struct pool_item_header *ph;
1.55 thorpej 645: void *v;
1.1 pk 646:
1.2 pk 647: #ifdef DIAGNOSTIC
1.34 thorpej 648: if (__predict_false((pp->pr_roflags & PR_STATIC) &&
649: (flags & PR_MALLOCOK))) {
1.25 thorpej 650: pr_printlog(pp, NULL, printf);
1.2 pk 651: panic("pool_get: static");
1.3 pk 652: }
1.2 pk 653:
1.37 sommerfe 654: if (__predict_false(curproc == NULL && doing_shutdown == 0 &&
655: (flags & PR_WAITOK) != 0))
1.3 pk 656: panic("pool_get: must have NOWAIT");
1.58 thorpej 657:
658: #ifdef LOCKDEBUG
659: if (flags & PR_WAITOK)
660: simple_lock_only_held(NULL, "pool_get(PR_WAITOK)");
1.56 sommerfe 661: #endif
1.58 thorpej 662: #endif /* DIAGNOSTIC */
1.1 pk 663:
1.21 thorpej 664: simple_lock(&pp->pr_slock);
1.25 thorpej 665: pr_enter(pp, file, line);
1.20 thorpej 666:
667: startover:
668: /*
669: * Check to see if we've reached the hard limit. If we have,
670: * and we can wait, then wait until an item has been returned to
671: * the pool.
672: */
673: #ifdef DIAGNOSTIC
1.34 thorpej 674: if (__predict_false(pp->pr_nout > pp->pr_hardlimit)) {
1.25 thorpej 675: pr_leave(pp);
1.21 thorpej 676: simple_unlock(&pp->pr_slock);
1.20 thorpej 677: panic("pool_get: %s: crossed hard limit", pp->pr_wchan);
678: }
679: #endif
1.34 thorpej 680: if (__predict_false(pp->pr_nout == pp->pr_hardlimit)) {
1.68 ! thorpej 681: if (pp->pr_drain_hook != NULL) {
! 682: /*
! 683: * Since the drain hook is going to free things
! 684: * back to the pool, unlock, call the hook, re-lock,
! 685: * and check the hardlimit condition again.
! 686: */
! 687: pr_leave(pp);
! 688: simple_unlock(&pp->pr_slock);
! 689: (*pp->pr_drain_hook)(pp->pr_drain_hook_arg, flags);
! 690: simple_lock(&pp->pr_slock);
! 691: pr_enter(pp, file, line);
! 692: if (pp->pr_nout < pp->pr_hardlimit)
! 693: goto startover;
! 694: }
! 695:
1.29 sommerfe 696: if ((flags & PR_WAITOK) && !(flags & PR_LIMITFAIL)) {
1.20 thorpej 697: /*
698: * XXX: A warning isn't logged in this case. Should
699: * it be?
700: */
701: pp->pr_flags |= PR_WANTED;
1.25 thorpej 702: pr_leave(pp);
1.40 sommerfe 703: ltsleep(pp, PSWP, pp->pr_wchan, 0, &pp->pr_slock);
1.25 thorpej 704: pr_enter(pp, file, line);
1.20 thorpej 705: goto startover;
706: }
1.31 thorpej 707:
708: /*
709: * Log a message that the hard limit has been hit.
710: */
711: if (pp->pr_hardlimit_warning != NULL &&
712: ratecheck(&pp->pr_hardlimit_warning_last,
713: &pp->pr_hardlimit_ratecap))
714: log(LOG_ERR, "%s\n", pp->pr_hardlimit_warning);
1.21 thorpej 715:
716: pp->pr_nfail++;
717:
1.25 thorpej 718: pr_leave(pp);
1.21 thorpej 719: simple_unlock(&pp->pr_slock);
1.20 thorpej 720: return (NULL);
721: }
722:
1.3 pk 723: /*
724: * The convention we use is that if `curpage' is not NULL, then
725: * it points at a non-empty bucket. In particular, `curpage'
726: * never points at a page header which has PR_PHINPAGE set and
727: * has no items in its bucket.
728: */
1.20 thorpej 729: if ((ph = pp->pr_curpage) == NULL) {
730: #ifdef DIAGNOSTIC
731: if (pp->pr_nitems != 0) {
1.21 thorpej 732: simple_unlock(&pp->pr_slock);
1.20 thorpej 733: printf("pool_get: %s: curpage NULL, nitems %u\n",
734: pp->pr_wchan, pp->pr_nitems);
735: panic("pool_get: nitems inconsistent\n");
736: }
737: #endif
738:
1.21 thorpej 739: /*
740: * Call the back-end page allocator for more memory.
741: * Release the pool lock, as the back-end page allocator
742: * may block.
743: */
1.25 thorpej 744: pr_leave(pp);
1.21 thorpej 745: simple_unlock(&pp->pr_slock);
1.66 thorpej 746: v = pool_allocator_alloc(pp, flags);
1.55 thorpej 747: if (__predict_true(v != NULL))
748: ph = pool_alloc_item_header(pp, v, flags);
1.21 thorpej 749: simple_lock(&pp->pr_slock);
1.25 thorpej 750: pr_enter(pp, file, line);
1.15 pk 751:
1.55 thorpej 752: if (__predict_false(v == NULL || ph == NULL)) {
753: if (v != NULL)
1.66 thorpej 754: pool_allocator_free(pp, v);
1.55 thorpej 755:
1.21 thorpej 756: /*
1.55 thorpej 757: * We were unable to allocate a page or item
758: * header, but we released the lock during
759: * allocation, so perhaps items were freed
760: * back to the pool. Check for this case.
1.21 thorpej 761: */
762: if (pp->pr_curpage != NULL)
763: goto startover;
1.15 pk 764:
1.3 pk 765: if ((flags & PR_WAITOK) == 0) {
766: pp->pr_nfail++;
1.25 thorpej 767: pr_leave(pp);
1.21 thorpej 768: simple_unlock(&pp->pr_slock);
1.1 pk 769: return (NULL);
1.3 pk 770: }
771:
1.15 pk 772: /*
773: * Wait for items to be returned to this pool.
1.21 thorpej 774: *
1.20 thorpej 775: * XXX: maybe we should wake up once a second and
776: * try again?
1.15 pk 777: */
1.1 pk 778: pp->pr_flags |= PR_WANTED;
1.66 thorpej 779: /* PA_WANTED is already set on the allocator. */
1.25 thorpej 780: pr_leave(pp);
1.40 sommerfe 781: ltsleep(pp, PSWP, pp->pr_wchan, 0, &pp->pr_slock);
1.25 thorpej 782: pr_enter(pp, file, line);
1.20 thorpej 783: goto startover;
1.1 pk 784: }
1.3 pk 785:
1.15 pk 786: /* We have more memory; add it to the pool */
1.55 thorpej 787: pool_prime_page(pp, v, ph);
1.15 pk 788: pp->pr_npagealloc++;
789:
1.20 thorpej 790: /* Start the allocation process over. */
791: goto startover;
1.3 pk 792: }
793:
1.34 thorpej 794: if (__predict_false((v = pi = TAILQ_FIRST(&ph->ph_itemlist)) == NULL)) {
1.25 thorpej 795: pr_leave(pp);
1.21 thorpej 796: simple_unlock(&pp->pr_slock);
1.3 pk 797: panic("pool_get: %s: page empty", pp->pr_wchan);
1.21 thorpej 798: }
1.20 thorpej 799: #ifdef DIAGNOSTIC
1.34 thorpej 800: if (__predict_false(pp->pr_nitems == 0)) {
1.25 thorpej 801: pr_leave(pp);
1.21 thorpej 802: simple_unlock(&pp->pr_slock);
1.20 thorpej 803: printf("pool_get: %s: items on itemlist, nitems %u\n",
804: pp->pr_wchan, pp->pr_nitems);
805: panic("pool_get: nitems inconsistent\n");
806: }
1.65 enami 807: #endif
1.56 sommerfe 808:
1.65 enami 809: #ifdef POOL_DIAGNOSTIC
1.3 pk 810: pr_log(pp, v, PRLOG_GET, file, line);
1.65 enami 811: #endif
1.3 pk 812:
1.65 enami 813: #ifdef DIAGNOSTIC
1.34 thorpej 814: if (__predict_false(pi->pi_magic != PI_MAGIC)) {
1.25 thorpej 815: pr_printlog(pp, pi, printf);
1.3 pk 816: panic("pool_get(%s): free list modified: magic=%x; page %p;"
817: " item addr %p\n",
818: pp->pr_wchan, pi->pi_magic, ph->ph_page, pi);
819: }
820: #endif
821:
822: /*
823: * Remove from item list.
824: */
825: TAILQ_REMOVE(&ph->ph_itemlist, pi, pi_list);
1.20 thorpej 826: pp->pr_nitems--;
827: pp->pr_nout++;
1.6 thorpej 828: if (ph->ph_nmissing == 0) {
829: #ifdef DIAGNOSTIC
1.34 thorpej 830: if (__predict_false(pp->pr_nidle == 0))
1.6 thorpej 831: panic("pool_get: nidle inconsistent");
832: #endif
833: pp->pr_nidle--;
834: }
1.3 pk 835: ph->ph_nmissing++;
836: if (TAILQ_FIRST(&ph->ph_itemlist) == NULL) {
1.21 thorpej 837: #ifdef DIAGNOSTIC
1.34 thorpej 838: if (__predict_false(ph->ph_nmissing != pp->pr_itemsperpage)) {
1.25 thorpej 839: pr_leave(pp);
1.21 thorpej 840: simple_unlock(&pp->pr_slock);
841: panic("pool_get: %s: nmissing inconsistent",
842: pp->pr_wchan);
843: }
844: #endif
1.3 pk 845: /*
846: * Find a new non-empty page header, if any.
847: * Start search from the page head, to increase
848: * the chance for "high water" pages to be freed.
849: *
1.21 thorpej 850: * Migrate empty pages to the end of the list. This
851: * will speed the update of curpage as pages become
852: * idle. Empty pages intermingled with idle pages
853: * is no big deal. As soon as a page becomes un-empty,
854: * it will move back to the head of the list.
1.3 pk 855: */
856: TAILQ_REMOVE(&pp->pr_pagelist, ph, ph_pagelist);
1.21 thorpej 857: TAILQ_INSERT_TAIL(&pp->pr_pagelist, ph, ph_pagelist);
1.61 chs 858: TAILQ_FOREACH(ph, &pp->pr_pagelist, ph_pagelist)
1.3 pk 859: if (TAILQ_FIRST(&ph->ph_itemlist) != NULL)
860: break;
861:
862: pp->pr_curpage = ph;
1.1 pk 863: }
1.3 pk 864:
865: pp->pr_nget++;
1.20 thorpej 866:
867: /*
868: * If we have a low water mark and we are now below that low
869: * water mark, add more items to the pool.
870: */
1.53 thorpej 871: if (POOL_NEEDS_CATCHUP(pp) && pool_catchup(pp) != 0) {
1.20 thorpej 872: /*
873: * XXX: Should we log a warning? Should we set up a timeout
874: * to try again in a second or so? The latter could break
875: * a caller's assumptions about interrupt protection, etc.
876: */
877: }
878:
1.25 thorpej 879: pr_leave(pp);
1.21 thorpej 880: simple_unlock(&pp->pr_slock);
1.1 pk 881: return (v);
882: }
883:
884: /*
1.43 thorpej 885: * Internal version of pool_put(). Pool is already locked/entered.
1.1 pk 886: */
1.43 thorpej 887: static void
1.56 sommerfe 888: pool_do_put(struct pool *pp, void *v)
1.1 pk 889: {
890: struct pool_item *pi = v;
1.3 pk 891: struct pool_item_header *ph;
892: caddr_t page;
1.21 thorpej 893: int s;
1.3 pk 894:
1.61 chs 895: LOCK_ASSERT(simple_lock_held(&pp->pr_slock));
896:
1.66 thorpej 897: page = (caddr_t)((u_long)v & pp->pr_alloc->pa_pagemask);
1.1 pk 898:
1.30 thorpej 899: #ifdef DIAGNOSTIC
1.34 thorpej 900: if (__predict_false(pp->pr_nout == 0)) {
1.30 thorpej 901: printf("pool %s: putting with none out\n",
902: pp->pr_wchan);
903: panic("pool_put");
904: }
905: #endif
1.3 pk 906:
1.34 thorpej 907: if (__predict_false((ph = pr_find_pagehead(pp, page)) == NULL)) {
1.25 thorpej 908: pr_printlog(pp, NULL, printf);
1.3 pk 909: panic("pool_put: %s: page header missing", pp->pr_wchan);
910: }
1.28 thorpej 911:
912: #ifdef LOCKDEBUG
913: /*
914: * Check if we're freeing a locked simple lock.
915: */
916: simple_lock_freecheck((caddr_t)pi, ((caddr_t)pi) + pp->pr_size);
917: #endif
1.3 pk 918:
919: /*
920: * Return to item list.
921: */
1.2 pk 922: #ifdef DIAGNOSTIC
1.3 pk 923: pi->pi_magic = PI_MAGIC;
924: #endif
1.32 chs 925: #ifdef DEBUG
926: {
927: int i, *ip = v;
928:
929: for (i = 0; i < pp->pr_size / sizeof(int); i++) {
930: *ip++ = PI_MAGIC;
931: }
932: }
933: #endif
934:
1.3 pk 935: TAILQ_INSERT_HEAD(&ph->ph_itemlist, pi, pi_list);
936: ph->ph_nmissing--;
937: pp->pr_nput++;
1.20 thorpej 938: pp->pr_nitems++;
939: pp->pr_nout--;
1.3 pk 940:
941: /* Cancel "pool empty" condition if it exists */
942: if (pp->pr_curpage == NULL)
943: pp->pr_curpage = ph;
944:
945: if (pp->pr_flags & PR_WANTED) {
946: pp->pr_flags &= ~PR_WANTED;
1.15 pk 947: if (ph->ph_nmissing == 0)
948: pp->pr_nidle++;
1.3 pk 949: wakeup((caddr_t)pp);
950: return;
951: }
952:
953: /*
1.21 thorpej 954: * If this page is now complete, do one of two things:
955: *
956: * (1) If we have more pages than the page high water
957: * mark, free the page back to the system.
958: *
959: * (2) Move it to the end of the page list, so that
960: * we minimize our chances of fragmenting the
961: * pool. Idle pages migrate to the end (along with
962: * completely empty pages, so that we find un-empty
963: * pages more quickly when we update curpage) of the
964: * list so they can be more easily swept up by
965: * the pagedaemon when pages are scarce.
1.3 pk 966: */
967: if (ph->ph_nmissing == 0) {
1.6 thorpej 968: pp->pr_nidle++;
1.3 pk 969: if (pp->pr_npages > pp->pr_maxpages) {
1.61 chs 970: pr_rmpage(pp, ph, NULL);
1.3 pk 971: } else {
972: TAILQ_REMOVE(&pp->pr_pagelist, ph, ph_pagelist);
973: TAILQ_INSERT_TAIL(&pp->pr_pagelist, ph, ph_pagelist);
974:
1.21 thorpej 975: /*
976: * Update the timestamp on the page. A page must
977: * be idle for some period of time before it can
978: * be reclaimed by the pagedaemon. This minimizes
979: * ping-pong'ing for memory.
980: */
981: s = splclock();
982: ph->ph_time = mono_time;
983: splx(s);
984:
985: /*
986: * Update the current page pointer. Just look for
987: * the first page with any free items.
988: *
989: * XXX: Maybe we want an option to look for the
990: * page with the fewest available items, to minimize
991: * fragmentation?
992: */
1.61 chs 993: TAILQ_FOREACH(ph, &pp->pr_pagelist, ph_pagelist)
1.3 pk 994: if (TAILQ_FIRST(&ph->ph_itemlist) != NULL)
995: break;
1.1 pk 996:
1.3 pk 997: pp->pr_curpage = ph;
1.1 pk 998: }
999: }
1.21 thorpej 1000: /*
1001: * If the page has just become un-empty, move it to the head of
1002: * the list, and make it the current page. The next allocation
1003: * will get the item from this page, instead of further fragmenting
1004: * the pool.
1005: */
1006: else if (ph->ph_nmissing == (pp->pr_itemsperpage - 1)) {
1007: TAILQ_REMOVE(&pp->pr_pagelist, ph, ph_pagelist);
1008: TAILQ_INSERT_HEAD(&pp->pr_pagelist, ph, ph_pagelist);
1009: pp->pr_curpage = ph;
1010: }
1.43 thorpej 1011: }
1012:
1013: /*
1014: * Return resource to the pool; must be called at appropriate spl level
1015: */
1.59 thorpej 1016: #ifdef POOL_DIAGNOSTIC
1.43 thorpej 1017: void
1018: _pool_put(struct pool *pp, void *v, const char *file, long line)
1019: {
1020:
1021: simple_lock(&pp->pr_slock);
1022: pr_enter(pp, file, line);
1023:
1.56 sommerfe 1024: pr_log(pp, v, PRLOG_PUT, file, line);
1025:
1026: pool_do_put(pp, v);
1.21 thorpej 1027:
1.25 thorpej 1028: pr_leave(pp);
1.21 thorpej 1029: simple_unlock(&pp->pr_slock);
1.1 pk 1030: }
1.57 sommerfe 1031: #undef pool_put
1.59 thorpej 1032: #endif /* POOL_DIAGNOSTIC */
1.1 pk 1033:
1.56 sommerfe 1034: void
1035: pool_put(struct pool *pp, void *v)
1036: {
1037:
1038: simple_lock(&pp->pr_slock);
1039:
1040: pool_do_put(pp, v);
1041:
1042: simple_unlock(&pp->pr_slock);
1043: }
1.57 sommerfe 1044:
1.59 thorpej 1045: #ifdef POOL_DIAGNOSTIC
1.57 sommerfe 1046: #define pool_put(h, v) _pool_put((h), (v), __FILE__, __LINE__)
1.56 sommerfe 1047: #endif
1048:
1.1 pk 1049: /*
1.55 thorpej 1050: * Add N items to the pool.
1051: */
1052: int
1053: pool_prime(struct pool *pp, int n)
1054: {
1055: struct pool_item_header *ph;
1056: caddr_t cp;
1057: int newpages, error = 0;
1058:
1059: simple_lock(&pp->pr_slock);
1060:
1061: newpages = roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1062:
1063: while (newpages-- > 0) {
1064: simple_unlock(&pp->pr_slock);
1.66 thorpej 1065: cp = pool_allocator_alloc(pp, PR_NOWAIT);
1.55 thorpej 1066: if (__predict_true(cp != NULL))
1067: ph = pool_alloc_item_header(pp, cp, PR_NOWAIT);
1068: simple_lock(&pp->pr_slock);
1069:
1070: if (__predict_false(cp == NULL || ph == NULL)) {
1071: error = ENOMEM;
1072: if (cp != NULL)
1.66 thorpej 1073: pool_allocator_free(pp, cp);
1.55 thorpej 1074: break;
1075: }
1076:
1077: pool_prime_page(pp, cp, ph);
1078: pp->pr_npagealloc++;
1079: pp->pr_minpages++;
1080: }
1081:
1082: if (pp->pr_minpages >= pp->pr_maxpages)
1083: pp->pr_maxpages = pp->pr_minpages + 1; /* XXX */
1084:
1085: simple_unlock(&pp->pr_slock);
1086: return (0);
1087: }
1088:
1089: /*
1.3 pk 1090: * Add a page worth of items to the pool.
1.21 thorpej 1091: *
1092: * Note, we must be called with the pool descriptor LOCKED.
1.3 pk 1093: */
1.55 thorpej 1094: static void
1095: pool_prime_page(struct pool *pp, caddr_t storage, struct pool_item_header *ph)
1.3 pk 1096: {
1097: struct pool_item *pi;
1098: caddr_t cp = storage;
1099: unsigned int align = pp->pr_align;
1100: unsigned int ioff = pp->pr_itemoffset;
1.55 thorpej 1101: int n;
1.36 pk 1102:
1.66 thorpej 1103: #ifdef DIAGNOSTIC
1104: if (((u_long)cp & (pp->pr_alloc->pa_pagesz - 1)) != 0)
1.36 pk 1105: panic("pool_prime_page: %s: unaligned page", pp->pr_wchan);
1.66 thorpej 1106: #endif
1.3 pk 1107:
1.55 thorpej 1108: if ((pp->pr_roflags & PR_PHINPAGE) == 0)
1.3 pk 1109: LIST_INSERT_HEAD(&pp->pr_hashtab[PR_HASH_INDEX(pp, cp)],
1.55 thorpej 1110: ph, ph_hashlist);
1.3 pk 1111:
1112: /*
1113: * Insert page header.
1114: */
1115: TAILQ_INSERT_HEAD(&pp->pr_pagelist, ph, ph_pagelist);
1116: TAILQ_INIT(&ph->ph_itemlist);
1117: ph->ph_page = storage;
1118: ph->ph_nmissing = 0;
1.21 thorpej 1119: memset(&ph->ph_time, 0, sizeof(ph->ph_time));
1.3 pk 1120:
1.6 thorpej 1121: pp->pr_nidle++;
1122:
1.3 pk 1123: /*
1124: * Color this page.
1125: */
1126: cp = (caddr_t)(cp + pp->pr_curcolor);
1127: if ((pp->pr_curcolor += align) > pp->pr_maxcolor)
1128: pp->pr_curcolor = 0;
1129:
1130: /*
1131: * Adjust storage to apply aligment to `pr_itemoffset' in each item.
1132: */
1133: if (ioff != 0)
1134: cp = (caddr_t)(cp + (align - ioff));
1135:
1136: /*
1137: * Insert remaining chunks on the bucket list.
1138: */
1139: n = pp->pr_itemsperpage;
1.20 thorpej 1140: pp->pr_nitems += n;
1.3 pk 1141:
1142: while (n--) {
1143: pi = (struct pool_item *)cp;
1144:
1145: /* Insert on page list */
1146: TAILQ_INSERT_TAIL(&ph->ph_itemlist, pi, pi_list);
1147: #ifdef DIAGNOSTIC
1148: pi->pi_magic = PI_MAGIC;
1149: #endif
1150: cp = (caddr_t)(cp + pp->pr_size);
1151: }
1152:
1153: /*
1154: * If the pool was depleted, point at the new page.
1155: */
1156: if (pp->pr_curpage == NULL)
1157: pp->pr_curpage = ph;
1158:
1159: if (++pp->pr_npages > pp->pr_hiwat)
1160: pp->pr_hiwat = pp->pr_npages;
1161: }
1162:
1.20 thorpej 1163: /*
1.52 thorpej 1164: * Used by pool_get() when nitems drops below the low water mark. This
1165: * is used to catch up nitmes with the low water mark.
1.20 thorpej 1166: *
1.21 thorpej 1167: * Note 1, we never wait for memory here, we let the caller decide what to do.
1.20 thorpej 1168: *
1169: * Note 2, this doesn't work with static pools.
1170: *
1171: * Note 3, we must be called with the pool already locked, and we return
1172: * with it locked.
1173: */
1174: static int
1.42 thorpej 1175: pool_catchup(struct pool *pp)
1.20 thorpej 1176: {
1.55 thorpej 1177: struct pool_item_header *ph;
1.20 thorpej 1178: caddr_t cp;
1179: int error = 0;
1180:
1181: if (pp->pr_roflags & PR_STATIC) {
1182: /*
1183: * We dropped below the low water mark, and this is not a
1184: * good thing. Log a warning.
1.21 thorpej 1185: *
1186: * XXX: rate-limit this?
1.20 thorpej 1187: */
1188: printf("WARNING: static pool `%s' dropped below low water "
1189: "mark\n", pp->pr_wchan);
1190: return (0);
1191: }
1192:
1.54 thorpej 1193: while (POOL_NEEDS_CATCHUP(pp)) {
1.20 thorpej 1194: /*
1.21 thorpej 1195: * Call the page back-end allocator for more memory.
1196: *
1197: * XXX: We never wait, so should we bother unlocking
1198: * the pool descriptor?
1.20 thorpej 1199: */
1.21 thorpej 1200: simple_unlock(&pp->pr_slock);
1.66 thorpej 1201: cp = pool_allocator_alloc(pp, PR_NOWAIT);
1.55 thorpej 1202: if (__predict_true(cp != NULL))
1203: ph = pool_alloc_item_header(pp, cp, PR_NOWAIT);
1.21 thorpej 1204: simple_lock(&pp->pr_slock);
1.55 thorpej 1205: if (__predict_false(cp == NULL || ph == NULL)) {
1206: if (cp != NULL)
1.66 thorpej 1207: pool_allocator_free(pp, cp);
1.20 thorpej 1208: error = ENOMEM;
1209: break;
1210: }
1.55 thorpej 1211: pool_prime_page(pp, cp, ph);
1.26 thorpej 1212: pp->pr_npagealloc++;
1.20 thorpej 1213: }
1214:
1215: return (error);
1216: }
1217:
1.3 pk 1218: void
1.42 thorpej 1219: pool_setlowat(struct pool *pp, int n)
1.3 pk 1220: {
1.20 thorpej 1221: int error;
1.15 pk 1222:
1.21 thorpej 1223: simple_lock(&pp->pr_slock);
1224:
1.3 pk 1225: pp->pr_minitems = n;
1.15 pk 1226: pp->pr_minpages = (n == 0)
1227: ? 0
1.18 thorpej 1228: : roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1.20 thorpej 1229:
1230: /* Make sure we're caught up with the newly-set low water mark. */
1.53 thorpej 1231: if (POOL_NEEDS_CATCHUP(pp) && (error = pool_catchup(pp) != 0)) {
1.20 thorpej 1232: /*
1233: * XXX: Should we log a warning? Should we set up a timeout
1234: * to try again in a second or so? The latter could break
1235: * a caller's assumptions about interrupt protection, etc.
1236: */
1237: }
1.21 thorpej 1238:
1239: simple_unlock(&pp->pr_slock);
1.3 pk 1240: }
1241:
1242: void
1.42 thorpej 1243: pool_sethiwat(struct pool *pp, int n)
1.3 pk 1244: {
1.15 pk 1245:
1.21 thorpej 1246: simple_lock(&pp->pr_slock);
1247:
1.15 pk 1248: pp->pr_maxpages = (n == 0)
1249: ? 0
1.18 thorpej 1250: : roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1.21 thorpej 1251:
1252: simple_unlock(&pp->pr_slock);
1.3 pk 1253: }
1254:
1.20 thorpej 1255: void
1.42 thorpej 1256: pool_sethardlimit(struct pool *pp, int n, const char *warnmess, int ratecap)
1.20 thorpej 1257: {
1258:
1.21 thorpej 1259: simple_lock(&pp->pr_slock);
1.20 thorpej 1260:
1261: pp->pr_hardlimit = n;
1262: pp->pr_hardlimit_warning = warnmess;
1.31 thorpej 1263: pp->pr_hardlimit_ratecap.tv_sec = ratecap;
1264: pp->pr_hardlimit_warning_last.tv_sec = 0;
1265: pp->pr_hardlimit_warning_last.tv_usec = 0;
1.20 thorpej 1266:
1267: /*
1.21 thorpej 1268: * In-line version of pool_sethiwat(), because we don't want to
1269: * release the lock.
1.20 thorpej 1270: */
1271: pp->pr_maxpages = (n == 0)
1272: ? 0
1273: : roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1.21 thorpej 1274:
1275: simple_unlock(&pp->pr_slock);
1.20 thorpej 1276: }
1.3 pk 1277:
1278: /*
1279: * Release all complete pages that have not been used recently.
1280: */
1.66 thorpej 1281: int
1.59 thorpej 1282: #ifdef POOL_DIAGNOSTIC
1.42 thorpej 1283: _pool_reclaim(struct pool *pp, const char *file, long line)
1.56 sommerfe 1284: #else
1285: pool_reclaim(struct pool *pp)
1286: #endif
1.3 pk 1287: {
1288: struct pool_item_header *ph, *phnext;
1.43 thorpej 1289: struct pool_cache *pc;
1.21 thorpej 1290: struct timeval curtime;
1.61 chs 1291: struct pool_pagelist pq;
1.21 thorpej 1292: int s;
1.3 pk 1293:
1.20 thorpej 1294: if (pp->pr_roflags & PR_STATIC)
1.66 thorpej 1295: return (0);
1.3 pk 1296:
1.68 ! thorpej 1297: if (pp->pr_drain_hook != NULL) {
! 1298: /*
! 1299: * The drain hook must be called with the pool unlocked.
! 1300: */
! 1301: (*pp->pr_drain_hook)(pp->pr_drain_hook_arg, PR_NOWAIT);
! 1302: }
! 1303:
1.21 thorpej 1304: if (simple_lock_try(&pp->pr_slock) == 0)
1.66 thorpej 1305: return (0);
1.25 thorpej 1306: pr_enter(pp, file, line);
1.68 ! thorpej 1307:
1.61 chs 1308: TAILQ_INIT(&pq);
1.3 pk 1309:
1.43 thorpej 1310: /*
1311: * Reclaim items from the pool's caches.
1312: */
1.61 chs 1313: TAILQ_FOREACH(pc, &pp->pr_cachelist, pc_poollist)
1.43 thorpej 1314: pool_cache_reclaim(pc);
1315:
1.21 thorpej 1316: s = splclock();
1317: curtime = mono_time;
1318: splx(s);
1319:
1.3 pk 1320: for (ph = TAILQ_FIRST(&pp->pr_pagelist); ph != NULL; ph = phnext) {
1321: phnext = TAILQ_NEXT(ph, ph_pagelist);
1322:
1323: /* Check our minimum page claim */
1324: if (pp->pr_npages <= pp->pr_minpages)
1325: break;
1326:
1327: if (ph->ph_nmissing == 0) {
1328: struct timeval diff;
1329: timersub(&curtime, &ph->ph_time, &diff);
1330: if (diff.tv_sec < pool_inactive_time)
1331: continue;
1.21 thorpej 1332:
1333: /*
1334: * If freeing this page would put us below
1335: * the low water mark, stop now.
1336: */
1337: if ((pp->pr_nitems - pp->pr_itemsperpage) <
1338: pp->pr_minitems)
1339: break;
1340:
1.61 chs 1341: pr_rmpage(pp, ph, &pq);
1.3 pk 1342: }
1343: }
1344:
1.25 thorpej 1345: pr_leave(pp);
1.21 thorpej 1346: simple_unlock(&pp->pr_slock);
1.66 thorpej 1347: if (TAILQ_EMPTY(&pq))
1348: return (0);
1349:
1.61 chs 1350: while ((ph = TAILQ_FIRST(&pq)) != NULL) {
1351: TAILQ_REMOVE(&pq, ph, ph_pagelist);
1.66 thorpej 1352: pool_allocator_free(pp, ph->ph_page);
1.61 chs 1353: if (pp->pr_roflags & PR_PHINPAGE) {
1354: continue;
1355: }
1356: LIST_REMOVE(ph, ph_hashlist);
1357: s = splhigh();
1358: pool_put(&phpool, ph);
1359: splx(s);
1360: }
1.66 thorpej 1361:
1362: return (1);
1.3 pk 1363: }
1364:
1365: /*
1366: * Drain pools, one at a time.
1.21 thorpej 1367: *
1368: * Note, we must never be called from an interrupt context.
1.3 pk 1369: */
1370: void
1.42 thorpej 1371: pool_drain(void *arg)
1.3 pk 1372: {
1373: struct pool *pp;
1.23 thorpej 1374: int s;
1.3 pk 1375:
1.61 chs 1376: pp = NULL;
1.49 thorpej 1377: s = splvm();
1.23 thorpej 1378: simple_lock(&pool_head_slock);
1.61 chs 1379: if (drainpp == NULL) {
1380: drainpp = TAILQ_FIRST(&pool_head);
1381: }
1382: if (drainpp) {
1383: pp = drainpp;
1384: drainpp = TAILQ_NEXT(pp, pr_poollist);
1385: }
1386: simple_unlock(&pool_head_slock);
1.63 chs 1387: pool_reclaim(pp);
1.61 chs 1388: splx(s);
1.3 pk 1389: }
1390:
1391: /*
1392: * Diagnostic helpers.
1393: */
1394: void
1.42 thorpej 1395: pool_print(struct pool *pp, const char *modif)
1.21 thorpej 1396: {
1397: int s;
1398:
1.49 thorpej 1399: s = splvm();
1.25 thorpej 1400: if (simple_lock_try(&pp->pr_slock) == 0) {
1401: printf("pool %s is locked; try again later\n",
1402: pp->pr_wchan);
1403: splx(s);
1404: return;
1405: }
1406: pool_print1(pp, modif, printf);
1.21 thorpej 1407: simple_unlock(&pp->pr_slock);
1408: splx(s);
1409: }
1410:
1.25 thorpej 1411: void
1.42 thorpej 1412: pool_printit(struct pool *pp, const char *modif, void (*pr)(const char *, ...))
1.25 thorpej 1413: {
1414: int didlock = 0;
1415:
1416: if (pp == NULL) {
1417: (*pr)("Must specify a pool to print.\n");
1418: return;
1419: }
1420:
1421: /*
1422: * Called from DDB; interrupts should be blocked, and all
1423: * other processors should be paused. We can skip locking
1424: * the pool in this case.
1425: *
1426: * We do a simple_lock_try() just to print the lock
1427: * status, however.
1428: */
1429:
1430: if (simple_lock_try(&pp->pr_slock) == 0)
1431: (*pr)("WARNING: pool %s is locked\n", pp->pr_wchan);
1432: else
1433: didlock = 1;
1434:
1435: pool_print1(pp, modif, pr);
1436:
1437: if (didlock)
1438: simple_unlock(&pp->pr_slock);
1439: }
1440:
1.21 thorpej 1441: static void
1.42 thorpej 1442: pool_print1(struct pool *pp, const char *modif, void (*pr)(const char *, ...))
1.3 pk 1443: {
1.25 thorpej 1444: struct pool_item_header *ph;
1.44 thorpej 1445: struct pool_cache *pc;
1446: struct pool_cache_group *pcg;
1.25 thorpej 1447: #ifdef DIAGNOSTIC
1448: struct pool_item *pi;
1449: #endif
1.44 thorpej 1450: int i, print_log = 0, print_pagelist = 0, print_cache = 0;
1.25 thorpej 1451: char c;
1452:
1453: while ((c = *modif++) != '\0') {
1454: if (c == 'l')
1455: print_log = 1;
1456: if (c == 'p')
1457: print_pagelist = 1;
1.44 thorpej 1458: if (c == 'c')
1459: print_cache = 1;
1.25 thorpej 1460: modif++;
1461: }
1462:
1463: (*pr)("POOL %s: size %u, align %u, ioff %u, roflags 0x%08x\n",
1464: pp->pr_wchan, pp->pr_size, pp->pr_align, pp->pr_itemoffset,
1465: pp->pr_roflags);
1.66 thorpej 1466: (*pr)("\talloc %p\n", pp->pr_alloc);
1.25 thorpej 1467: (*pr)("\tminitems %u, minpages %u, maxpages %u, npages %u\n",
1468: pp->pr_minitems, pp->pr_minpages, pp->pr_maxpages, pp->pr_npages);
1469: (*pr)("\titemsperpage %u, nitems %u, nout %u, hardlimit %u\n",
1470: pp->pr_itemsperpage, pp->pr_nitems, pp->pr_nout, pp->pr_hardlimit);
1471:
1472: (*pr)("\n\tnget %lu, nfail %lu, nput %lu\n",
1473: pp->pr_nget, pp->pr_nfail, pp->pr_nput);
1474: (*pr)("\tnpagealloc %lu, npagefree %lu, hiwat %u, nidle %lu\n",
1475: pp->pr_npagealloc, pp->pr_npagefree, pp->pr_hiwat, pp->pr_nidle);
1476:
1477: if (print_pagelist == 0)
1478: goto skip_pagelist;
1479:
1480: if ((ph = TAILQ_FIRST(&pp->pr_pagelist)) != NULL)
1481: (*pr)("\n\tpage list:\n");
1482: for (; ph != NULL; ph = TAILQ_NEXT(ph, ph_pagelist)) {
1483: (*pr)("\t\tpage %p, nmissing %d, time %lu,%lu\n",
1484: ph->ph_page, ph->ph_nmissing,
1485: (u_long)ph->ph_time.tv_sec,
1486: (u_long)ph->ph_time.tv_usec);
1487: #ifdef DIAGNOSTIC
1.61 chs 1488: TAILQ_FOREACH(pi, &ph->ph_itemlist, pi_list) {
1.25 thorpej 1489: if (pi->pi_magic != PI_MAGIC) {
1490: (*pr)("\t\t\titem %p, magic 0x%x\n",
1491: pi, pi->pi_magic);
1492: }
1493: }
1494: #endif
1495: }
1496: if (pp->pr_curpage == NULL)
1497: (*pr)("\tno current page\n");
1498: else
1499: (*pr)("\tcurpage %p\n", pp->pr_curpage->ph_page);
1500:
1501: skip_pagelist:
1502:
1503: if (print_log == 0)
1504: goto skip_log;
1505:
1506: (*pr)("\n");
1507: if ((pp->pr_roflags & PR_LOGGING) == 0)
1508: (*pr)("\tno log\n");
1509: else
1510: pr_printlog(pp, NULL, pr);
1.3 pk 1511:
1.25 thorpej 1512: skip_log:
1.44 thorpej 1513:
1514: if (print_cache == 0)
1515: goto skip_cache;
1516:
1.61 chs 1517: TAILQ_FOREACH(pc, &pp->pr_cachelist, pc_poollist) {
1.44 thorpej 1518: (*pr)("\tcache %p: allocfrom %p freeto %p\n", pc,
1519: pc->pc_allocfrom, pc->pc_freeto);
1.48 thorpej 1520: (*pr)("\t hits %lu misses %lu ngroups %lu nitems %lu\n",
1521: pc->pc_hits, pc->pc_misses, pc->pc_ngroups, pc->pc_nitems);
1.61 chs 1522: TAILQ_FOREACH(pcg, &pc->pc_grouplist, pcg_list) {
1.44 thorpej 1523: (*pr)("\t\tgroup %p: avail %d\n", pcg, pcg->pcg_avail);
1524: for (i = 0; i < PCG_NOBJECTS; i++)
1525: (*pr)("\t\t\t%p\n", pcg->pcg_objects[i]);
1526: }
1527: }
1528:
1529: skip_cache:
1.3 pk 1530:
1.25 thorpej 1531: pr_enter_check(pp, pr);
1.3 pk 1532: }
1533:
1534: int
1.42 thorpej 1535: pool_chk(struct pool *pp, const char *label)
1.3 pk 1536: {
1537: struct pool_item_header *ph;
1538: int r = 0;
1539:
1.21 thorpej 1540: simple_lock(&pp->pr_slock);
1.3 pk 1541:
1.61 chs 1542: TAILQ_FOREACH(ph, &pp->pr_pagelist, ph_pagelist) {
1.3 pk 1543: struct pool_item *pi;
1544: int n;
1545: caddr_t page;
1546:
1.66 thorpej 1547: page = (caddr_t)((u_long)ph & pp->pr_alloc->pa_pagemask);
1.20 thorpej 1548: if (page != ph->ph_page &&
1549: (pp->pr_roflags & PR_PHINPAGE) != 0) {
1.3 pk 1550: if (label != NULL)
1551: printf("%s: ", label);
1.16 briggs 1552: printf("pool(%p:%s): page inconsistency: page %p;"
1553: " at page head addr %p (p %p)\n", pp,
1.3 pk 1554: pp->pr_wchan, ph->ph_page,
1555: ph, page);
1556: r++;
1557: goto out;
1558: }
1559:
1560: for (pi = TAILQ_FIRST(&ph->ph_itemlist), n = 0;
1561: pi != NULL;
1562: pi = TAILQ_NEXT(pi,pi_list), n++) {
1563:
1564: #ifdef DIAGNOSTIC
1565: if (pi->pi_magic != PI_MAGIC) {
1566: if (label != NULL)
1567: printf("%s: ", label);
1568: printf("pool(%s): free list modified: magic=%x;"
1569: " page %p; item ordinal %d;"
1570: " addr %p (p %p)\n",
1571: pp->pr_wchan, pi->pi_magic, ph->ph_page,
1572: n, pi, page);
1573: panic("pool");
1574: }
1575: #endif
1.66 thorpej 1576: page =
1577: (caddr_t)((u_long)pi & pp->pr_alloc->pa_pagemask);
1.3 pk 1578: if (page == ph->ph_page)
1579: continue;
1580:
1581: if (label != NULL)
1582: printf("%s: ", label);
1.16 briggs 1583: printf("pool(%p:%s): page inconsistency: page %p;"
1584: " item ordinal %d; addr %p (p %p)\n", pp,
1.3 pk 1585: pp->pr_wchan, ph->ph_page,
1586: n, pi, page);
1587: r++;
1588: goto out;
1589: }
1590: }
1591: out:
1.21 thorpej 1592: simple_unlock(&pp->pr_slock);
1.3 pk 1593: return (r);
1.43 thorpej 1594: }
1595:
1596: /*
1597: * pool_cache_init:
1598: *
1599: * Initialize a pool cache.
1600: *
1601: * NOTE: If the pool must be protected from interrupts, we expect
1602: * to be called at the appropriate interrupt priority level.
1603: */
1604: void
1605: pool_cache_init(struct pool_cache *pc, struct pool *pp,
1606: int (*ctor)(void *, void *, int),
1607: void (*dtor)(void *, void *),
1608: void *arg)
1609: {
1610:
1611: TAILQ_INIT(&pc->pc_grouplist);
1612: simple_lock_init(&pc->pc_slock);
1613:
1614: pc->pc_allocfrom = NULL;
1615: pc->pc_freeto = NULL;
1616: pc->pc_pool = pp;
1617:
1618: pc->pc_ctor = ctor;
1619: pc->pc_dtor = dtor;
1620: pc->pc_arg = arg;
1621:
1.48 thorpej 1622: pc->pc_hits = 0;
1623: pc->pc_misses = 0;
1624:
1625: pc->pc_ngroups = 0;
1626:
1627: pc->pc_nitems = 0;
1628:
1.43 thorpej 1629: simple_lock(&pp->pr_slock);
1630: TAILQ_INSERT_TAIL(&pp->pr_cachelist, pc, pc_poollist);
1631: simple_unlock(&pp->pr_slock);
1632: }
1633:
1634: /*
1635: * pool_cache_destroy:
1636: *
1637: * Destroy a pool cache.
1638: */
1639: void
1640: pool_cache_destroy(struct pool_cache *pc)
1641: {
1642: struct pool *pp = pc->pc_pool;
1643:
1644: /* First, invalidate the entire cache. */
1645: pool_cache_invalidate(pc);
1646:
1647: /* ...and remove it from the pool's cache list. */
1648: simple_lock(&pp->pr_slock);
1649: TAILQ_REMOVE(&pp->pr_cachelist, pc, pc_poollist);
1650: simple_unlock(&pp->pr_slock);
1651: }
1652:
1653: static __inline void *
1654: pcg_get(struct pool_cache_group *pcg)
1655: {
1656: void *object;
1657: u_int idx;
1658:
1659: KASSERT(pcg->pcg_avail <= PCG_NOBJECTS);
1.45 thorpej 1660: KASSERT(pcg->pcg_avail != 0);
1.43 thorpej 1661: idx = --pcg->pcg_avail;
1662:
1663: KASSERT(pcg->pcg_objects[idx] != NULL);
1664: object = pcg->pcg_objects[idx];
1665: pcg->pcg_objects[idx] = NULL;
1666:
1667: return (object);
1668: }
1669:
1670: static __inline void
1671: pcg_put(struct pool_cache_group *pcg, void *object)
1672: {
1673: u_int idx;
1674:
1675: KASSERT(pcg->pcg_avail < PCG_NOBJECTS);
1676: idx = pcg->pcg_avail++;
1677:
1678: KASSERT(pcg->pcg_objects[idx] == NULL);
1679: pcg->pcg_objects[idx] = object;
1680: }
1681:
1682: /*
1683: * pool_cache_get:
1684: *
1685: * Get an object from a pool cache.
1686: */
1687: void *
1688: pool_cache_get(struct pool_cache *pc, int flags)
1689: {
1690: struct pool_cache_group *pcg;
1691: void *object;
1.58 thorpej 1692:
1693: #ifdef LOCKDEBUG
1694: if (flags & PR_WAITOK)
1695: simple_lock_only_held(NULL, "pool_cache_get(PR_WAITOK)");
1696: #endif
1.43 thorpej 1697:
1698: simple_lock(&pc->pc_slock);
1699:
1700: if ((pcg = pc->pc_allocfrom) == NULL) {
1.61 chs 1701: TAILQ_FOREACH(pcg, &pc->pc_grouplist, pcg_list) {
1.43 thorpej 1702: if (pcg->pcg_avail != 0) {
1703: pc->pc_allocfrom = pcg;
1704: goto have_group;
1705: }
1706: }
1707:
1708: /*
1709: * No groups with any available objects. Allocate
1710: * a new object, construct it, and return it to
1711: * the caller. We will allocate a group, if necessary,
1712: * when the object is freed back to the cache.
1713: */
1.48 thorpej 1714: pc->pc_misses++;
1.43 thorpej 1715: simple_unlock(&pc->pc_slock);
1716: object = pool_get(pc->pc_pool, flags);
1717: if (object != NULL && pc->pc_ctor != NULL) {
1718: if ((*pc->pc_ctor)(pc->pc_arg, object, flags) != 0) {
1719: pool_put(pc->pc_pool, object);
1720: return (NULL);
1721: }
1722: }
1723: return (object);
1724: }
1725:
1726: have_group:
1.48 thorpej 1727: pc->pc_hits++;
1728: pc->pc_nitems--;
1.43 thorpej 1729: object = pcg_get(pcg);
1730:
1731: if (pcg->pcg_avail == 0)
1732: pc->pc_allocfrom = NULL;
1.45 thorpej 1733:
1.43 thorpej 1734: simple_unlock(&pc->pc_slock);
1735:
1736: return (object);
1737: }
1738:
1739: /*
1740: * pool_cache_put:
1741: *
1742: * Put an object back to the pool cache.
1743: */
1744: void
1745: pool_cache_put(struct pool_cache *pc, void *object)
1746: {
1747: struct pool_cache_group *pcg;
1.60 thorpej 1748: int s;
1.43 thorpej 1749:
1750: simple_lock(&pc->pc_slock);
1751:
1752: if ((pcg = pc->pc_freeto) == NULL) {
1.61 chs 1753: TAILQ_FOREACH(pcg, &pc->pc_grouplist, pcg_list) {
1.43 thorpej 1754: if (pcg->pcg_avail != PCG_NOBJECTS) {
1755: pc->pc_freeto = pcg;
1756: goto have_group;
1757: }
1758: }
1759:
1760: /*
1761: * No empty groups to free the object to. Attempt to
1.47 thorpej 1762: * allocate one.
1.43 thorpej 1763: */
1.47 thorpej 1764: simple_unlock(&pc->pc_slock);
1.60 thorpej 1765: s = splvm();
1.43 thorpej 1766: pcg = pool_get(&pcgpool, PR_NOWAIT);
1.60 thorpej 1767: splx(s);
1.43 thorpej 1768: if (pcg != NULL) {
1769: memset(pcg, 0, sizeof(*pcg));
1.47 thorpej 1770: simple_lock(&pc->pc_slock);
1.48 thorpej 1771: pc->pc_ngroups++;
1.43 thorpej 1772: TAILQ_INSERT_TAIL(&pc->pc_grouplist, pcg, pcg_list);
1.47 thorpej 1773: if (pc->pc_freeto == NULL)
1774: pc->pc_freeto = pcg;
1.43 thorpej 1775: goto have_group;
1776: }
1777:
1778: /*
1779: * Unable to allocate a cache group; destruct the object
1780: * and free it back to the pool.
1781: */
1.51 thorpej 1782: pool_cache_destruct_object(pc, object);
1.43 thorpej 1783: return;
1784: }
1785:
1786: have_group:
1.48 thorpej 1787: pc->pc_nitems++;
1.43 thorpej 1788: pcg_put(pcg, object);
1789:
1790: if (pcg->pcg_avail == PCG_NOBJECTS)
1791: pc->pc_freeto = NULL;
1792:
1793: simple_unlock(&pc->pc_slock);
1.51 thorpej 1794: }
1795:
1796: /*
1797: * pool_cache_destruct_object:
1798: *
1799: * Force destruction of an object and its release back into
1800: * the pool.
1801: */
1802: void
1803: pool_cache_destruct_object(struct pool_cache *pc, void *object)
1804: {
1805:
1806: if (pc->pc_dtor != NULL)
1807: (*pc->pc_dtor)(pc->pc_arg, object);
1808: pool_put(pc->pc_pool, object);
1.43 thorpej 1809: }
1810:
1811: /*
1812: * pool_cache_do_invalidate:
1813: *
1814: * This internal function implements pool_cache_invalidate() and
1815: * pool_cache_reclaim().
1816: */
1817: static void
1818: pool_cache_do_invalidate(struct pool_cache *pc, int free_groups,
1.56 sommerfe 1819: void (*putit)(struct pool *, void *))
1.43 thorpej 1820: {
1821: struct pool_cache_group *pcg, *npcg;
1822: void *object;
1.60 thorpej 1823: int s;
1.43 thorpej 1824:
1825: for (pcg = TAILQ_FIRST(&pc->pc_grouplist); pcg != NULL;
1826: pcg = npcg) {
1827: npcg = TAILQ_NEXT(pcg, pcg_list);
1828: while (pcg->pcg_avail != 0) {
1.48 thorpej 1829: pc->pc_nitems--;
1.43 thorpej 1830: object = pcg_get(pcg);
1.45 thorpej 1831: if (pcg->pcg_avail == 0 && pc->pc_allocfrom == pcg)
1832: pc->pc_allocfrom = NULL;
1.43 thorpej 1833: if (pc->pc_dtor != NULL)
1834: (*pc->pc_dtor)(pc->pc_arg, object);
1.56 sommerfe 1835: (*putit)(pc->pc_pool, object);
1.43 thorpej 1836: }
1837: if (free_groups) {
1.48 thorpej 1838: pc->pc_ngroups--;
1.43 thorpej 1839: TAILQ_REMOVE(&pc->pc_grouplist, pcg, pcg_list);
1.46 thorpej 1840: if (pc->pc_freeto == pcg)
1841: pc->pc_freeto = NULL;
1.60 thorpej 1842: s = splvm();
1.43 thorpej 1843: pool_put(&pcgpool, pcg);
1.60 thorpej 1844: splx(s);
1.43 thorpej 1845: }
1846: }
1847: }
1848:
1849: /*
1850: * pool_cache_invalidate:
1851: *
1852: * Invalidate a pool cache (destruct and release all of the
1853: * cached objects).
1854: */
1855: void
1856: pool_cache_invalidate(struct pool_cache *pc)
1857: {
1858:
1859: simple_lock(&pc->pc_slock);
1.56 sommerfe 1860: pool_cache_do_invalidate(pc, 0, pool_put);
1.43 thorpej 1861: simple_unlock(&pc->pc_slock);
1862: }
1863:
1864: /*
1865: * pool_cache_reclaim:
1866: *
1867: * Reclaim a pool cache for pool_reclaim().
1868: */
1869: static void
1870: pool_cache_reclaim(struct pool_cache *pc)
1871: {
1872:
1.47 thorpej 1873: simple_lock(&pc->pc_slock);
1.43 thorpej 1874: pool_cache_do_invalidate(pc, 1, pool_do_put);
1875: simple_unlock(&pc->pc_slock);
1.3 pk 1876: }
1.66 thorpej 1877:
1878: /*
1879: * Pool backend allocators.
1880: *
1881: * Each pool has a backend allocator that handles allocation, deallocation,
1882: * and any additional draining that might be needed.
1883: *
1884: * We provide two standard allocators:
1885: *
1886: * pool_allocator_kmem - the default when no allocator is specified
1887: *
1888: * pool_allocator_nointr - used for pools that will not be accessed
1889: * in interrupt context.
1890: */
1891: void *pool_page_alloc(struct pool *, int);
1892: void pool_page_free(struct pool *, void *);
1893:
1894: struct pool_allocator pool_allocator_kmem = {
1895: pool_page_alloc, pool_page_free, 0,
1896: };
1897:
1898: void *pool_page_alloc_nointr(struct pool *, int);
1899: void pool_page_free_nointr(struct pool *, void *);
1900:
1901: struct pool_allocator pool_allocator_nointr = {
1902: pool_page_alloc_nointr, pool_page_free_nointr, 0,
1903: };
1904:
1905: #ifdef POOL_SUBPAGE
1906: void *pool_subpage_alloc(struct pool *, int);
1907: void pool_subpage_free(struct pool *, void *);
1908:
1909: struct pool_allocator pool_allocator_kmem_subpage = {
1910: pool_subpage_alloc, pool_subpage_free, 0,
1911: };
1912: #endif /* POOL_SUBPAGE */
1913:
1914: /*
1915: * We have at least three different resources for the same allocation and
1916: * each resource can be depleted. First, we have the ready elements in the
1917: * pool. Then we have the resource (typically a vm_map) for this allocator.
1918: * Finally, we have physical memory. Waiting for any of these can be
1919: * unnecessary when any other is freed, but the kernel doesn't support
1920: * sleeping on multiple wait channels, so we have to employ another strategy.
1921: *
1922: * The caller sleeps on the pool (so that it can be awakened when an item
1923: * is returned to the pool), but we set PA_WANT on the allocator. When a
1924: * page is returned to the allocator and PA_WANT is set, pool_allocator_free
1925: * will wake up all sleeping pools belonging to this allocator.
1926: *
1927: * XXX Thundering herd.
1928: */
1929: void *
1930: pool_allocator_alloc(struct pool *org, int flags)
1931: {
1932: struct pool_allocator *pa = org->pr_alloc;
1933: struct pool *pp, *start;
1934: int s, freed;
1935: void *res;
1936:
1937: do {
1938: if ((res = (*pa->pa_alloc)(org, flags)) != NULL)
1939: return (res);
1.68 ! thorpej 1940: if ((flags & PR_WAITOK) == 0) {
! 1941: /*
! 1942: * We only run the drain hookhere if PR_NOWAIT.
! 1943: * In other cases, the hook will be run in
! 1944: * pool_reclaim().
! 1945: */
! 1946: if (org->pr_drain_hook != NULL) {
! 1947: (*org->pr_drain_hook)(org->pr_drain_hook_arg,
! 1948: flags);
! 1949: if ((res = (*pa->pa_alloc)(org, flags)) != NULL)
! 1950: return (res);
! 1951: }
1.66 thorpej 1952: break;
1.68 ! thorpej 1953: }
1.66 thorpej 1954:
1955: /*
1956: * Drain all pools, except "org", that use this
1957: * allocator. We do this to reclaim VA space.
1958: * pa_alloc is responsible for waiting for
1959: * physical memory.
1960: *
1961: * XXX We risk looping forever if start if someone
1962: * calls pool_destroy on "start". But there is no
1963: * other way to have potentially sleeping pool_reclaim,
1964: * non-sleeping locks on pool_allocator, and some
1965: * stirring of drained pools in the allocator.
1.68 ! thorpej 1966: *
! 1967: * XXX Maybe we should use pool_head_slock for locking
! 1968: * the allocators?
1.66 thorpej 1969: */
1970: freed = 0;
1971:
1972: s = splvm();
1973: simple_lock(&pa->pa_slock);
1974: pp = start = TAILQ_FIRST(&pa->pa_list);
1975: do {
1976: TAILQ_REMOVE(&pa->pa_list, pp, pr_alloc_list);
1977: TAILQ_INSERT_TAIL(&pa->pa_list, pp, pr_alloc_list);
1978: if (pp == org)
1979: continue;
1980: simple_unlock(&pa->pa_list);
1981: freed = pool_reclaim(pp);
1982: simple_lock(&pa->pa_list);
1983: } while ((pp = TAILQ_FIRST(&pa->pa_list)) != start &&
1984: freed == 0);
1985:
1986: if (freed == 0) {
1987: /*
1988: * We set PA_WANT here, the caller will most likely
1989: * sleep waiting for pages (if not, this won't hurt
1990: * that much), and there is no way to set this in
1991: * the caller without violating locking order.
1992: */
1993: pa->pa_flags |= PA_WANT;
1994: }
1995: simple_unlock(&pa->pa_slock);
1996: splx(s);
1997: } while (freed);
1998: return (NULL);
1999: }
2000:
2001: void
2002: pool_allocator_free(struct pool *pp, void *v)
2003: {
2004: struct pool_allocator *pa = pp->pr_alloc;
2005: int s;
2006:
2007: (*pa->pa_free)(pp, v);
2008:
2009: s = splvm();
2010: simple_lock(&pa->pa_slock);
2011: if ((pa->pa_flags & PA_WANT) == 0) {
2012: simple_unlock(&pa->pa_slock);
2013: splx(s);
2014: return;
2015: }
2016:
2017: TAILQ_FOREACH(pp, &pa->pa_list, pr_alloc_list) {
2018: simple_lock(&pp->pr_slock);
2019: if ((pp->pr_flags & PR_WANTED) != 0) {
2020: pp->pr_flags &= ~PR_WANTED;
2021: wakeup(pp);
2022: }
2023: }
2024: pa->pa_flags &= ~PA_WANT;
2025: simple_unlock(&pa->pa_slock);
2026: splx(s);
2027: }
2028:
2029: void *
2030: pool_page_alloc(struct pool *pp, int flags)
2031: {
2032: boolean_t waitok = (flags & PR_WAITOK) ? TRUE : FALSE;
2033:
2034: return ((void *) uvm_km_alloc_poolpage(waitok));
2035: }
2036:
2037: void
2038: pool_page_free(struct pool *pp, void *v)
2039: {
2040:
2041: uvm_km_free_poolpage((vaddr_t) v);
2042: }
2043:
2044: #ifdef POOL_SUBPAGE
2045: /* Sub-page allocator, for machines with large hardware pages. */
2046: void *
2047: pool_subpage_alloc(struct pool *pp, int flags)
2048: {
2049:
2050: return (pool_get(&psppool, flags));
2051: }
2052:
2053: void
2054: pool_subpage_free(struct pool *pp, void *v)
2055: {
2056:
2057: pool_put(&psppool, v);
2058: }
2059:
2060: /* We don't provide a real nointr allocator. Maybe later. */
2061: void *
2062: pool_page_alloc_nointr(struct pool *pp, int flags)
2063: {
2064:
2065: return (pool_subpage_alloc(pp, flags));
2066: }
2067:
2068: void
2069: pool_page_free_nointr(struct pool *pp, void *v)
2070: {
2071:
2072: pool_subpage_free(pp, v);
2073: }
2074: #else
2075: void *
2076: pool_page_alloc_nointr(struct pool *pp, int flags)
2077: {
2078: boolean_t waitok = (flags & PR_WAITOK) ? TRUE : FALSE;
2079:
2080: return ((void *) uvm_km_alloc_poolpage1(kernel_map,
2081: uvm.kernel_object, waitok));
2082: }
2083:
2084: void
2085: pool_page_free_nointr(struct pool *pp, void *v)
2086: {
2087:
2088: uvm_km_free_poolpage1(kernel_map, (vaddr_t) v);
2089: }
2090: #endif /* POOL_SUBPAGE */
CVSweb <webmaster@jp.NetBSD.org>