Annotation of src/sys/kern/subr_pool.c, Revision 1.40
1.40 ! sommerfe 1: /* $NetBSD: subr_pool.c,v 1.39 2000/06/27 17:41:34 mrg Exp $ */
1.1 pk 2:
3: /*-
1.20 thorpej 4: * Copyright (c) 1997, 1999 The NetBSD Foundation, Inc.
1.1 pk 5: * All rights reserved.
6: *
7: * This code is derived from software contributed to The NetBSD Foundation
1.20 thorpej 8: * by Paul Kranenburg; by Jason R. Thorpe of the Numerical Aerospace
9: * Simulation Facility, NASA Ames Research Center.
1.1 pk 10: *
11: * Redistribution and use in source and binary forms, with or without
12: * modification, are permitted provided that the following conditions
13: * are met:
14: * 1. Redistributions of source code must retain the above copyright
15: * notice, this list of conditions and the following disclaimer.
16: * 2. Redistributions in binary form must reproduce the above copyright
17: * notice, this list of conditions and the following disclaimer in the
18: * documentation and/or other materials provided with the distribution.
19: * 3. All advertising materials mentioning features or use of this software
20: * must display the following acknowledgement:
1.13 christos 21: * This product includes software developed by the NetBSD
22: * Foundation, Inc. and its contributors.
1.1 pk 23: * 4. Neither the name of The NetBSD Foundation nor the names of its
24: * contributors may be used to endorse or promote products derived
25: * from this software without specific prior written permission.
26: *
27: * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28: * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29: * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30: * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31: * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32: * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33: * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34: * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35: * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36: * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37: * POSSIBILITY OF SUCH DAMAGE.
38: */
1.24 scottr 39:
1.25 thorpej 40: #include "opt_pool.h"
1.24 scottr 41: #include "opt_poollog.h"
1.28 thorpej 42: #include "opt_lockdebug.h"
1.1 pk 43:
44: #include <sys/param.h>
45: #include <sys/systm.h>
46: #include <sys/proc.h>
47: #include <sys/errno.h>
48: #include <sys/kernel.h>
49: #include <sys/malloc.h>
50: #include <sys/lock.h>
51: #include <sys/pool.h>
1.20 thorpej 52: #include <sys/syslog.h>
1.3 pk 53:
54: #include <uvm/uvm.h>
55:
1.1 pk 56: /*
57: * Pool resource management utility.
1.3 pk 58: *
59: * Memory is allocated in pages which are split into pieces according
60: * to the pool item size. Each page is kept on a list headed by `pr_pagelist'
61: * in the pool structure and the individual pool items are on a linked list
62: * headed by `ph_itemlist' in each page header. The memory for building
63: * the page list is either taken from the allocated pages themselves (for
64: * small pool items) or taken from an internal pool of page headers (`phpool').
1.1 pk 65: */
66:
1.3 pk 67: /* List of all pools */
1.5 thorpej 68: TAILQ_HEAD(,pool) pool_head = TAILQ_HEAD_INITIALIZER(pool_head);
1.3 pk 69:
70: /* Private pool for page header structures */
71: static struct pool phpool;
72:
73: /* # of seconds to retain page after last use */
74: int pool_inactive_time = 10;
75:
76: /* Next candidate for drainage (see pool_drain()) */
1.23 thorpej 77: static struct pool *drainpp;
78:
79: /* This spin lock protects both pool_head and drainpp. */
80: struct simplelock pool_head_slock = SIMPLELOCK_INITIALIZER;
1.3 pk 81:
82: struct pool_item_header {
83: /* Page headers */
84: TAILQ_ENTRY(pool_item_header)
85: ph_pagelist; /* pool page list */
86: TAILQ_HEAD(,pool_item) ph_itemlist; /* chunk list for this page */
87: LIST_ENTRY(pool_item_header)
88: ph_hashlist; /* Off-page page headers */
89: int ph_nmissing; /* # of chunks in use */
90: caddr_t ph_page; /* this page's address */
91: struct timeval ph_time; /* last referenced */
92: };
93:
1.1 pk 94: struct pool_item {
1.3 pk 95: #ifdef DIAGNOSTIC
96: int pi_magic;
1.33 chs 97: #endif
1.25 thorpej 98: #define PI_MAGIC 0xdeadbeef
1.3 pk 99: /* Other entries use only this list entry */
100: TAILQ_ENTRY(pool_item) pi_list;
101: };
102:
103:
1.25 thorpej 104: #define PR_HASH_INDEX(pp,addr) \
1.3 pk 105: (((u_long)(addr) >> (pp)->pr_pageshift) & (PR_HASHTABSIZE - 1))
106:
107:
108:
109: static struct pool_item_header
110: *pr_find_pagehead __P((struct pool *, caddr_t));
111: static void pr_rmpage __P((struct pool *, struct pool_item_header *));
1.20 thorpej 112: static int pool_catchup __P((struct pool *));
1.21 thorpej 113: static void pool_prime_page __P((struct pool *, caddr_t));
1.3 pk 114: static void *pool_page_alloc __P((unsigned long, int, int));
115: static void pool_page_free __P((void *, unsigned long, int));
116:
1.25 thorpej 117: static void pool_print1 __P((struct pool *, const char *,
118: void (*)(const char *, ...)));
1.3 pk 119:
120: /*
121: * Pool log entry. An array of these is allocated in pool_create().
122: */
123: struct pool_log {
124: const char *pl_file;
125: long pl_line;
126: int pl_action;
1.25 thorpej 127: #define PRLOG_GET 1
128: #define PRLOG_PUT 2
1.3 pk 129: void *pl_addr;
1.1 pk 130: };
131:
1.3 pk 132: /* Number of entries in pool log buffers */
1.17 thorpej 133: #ifndef POOL_LOGSIZE
134: #define POOL_LOGSIZE 10
135: #endif
136:
137: int pool_logsize = POOL_LOGSIZE;
1.1 pk 138:
1.25 thorpej 139: #ifdef DIAGNOSTIC
1.3 pk 140: static void pr_log __P((struct pool *, void *, int, const char *, long));
1.25 thorpej 141: static void pr_printlog __P((struct pool *, struct pool_item *,
142: void (*)(const char *, ...)));
143: static void pr_enter __P((struct pool *, const char *, long));
144: static void pr_leave __P((struct pool *));
145: static void pr_enter_check __P((struct pool *,
146: void (*)(const char *, ...)));
1.3 pk 147:
148: static __inline__ void
149: pr_log(pp, v, action, file, line)
150: struct pool *pp;
151: void *v;
152: int action;
153: const char *file;
154: long line;
155: {
156: int n = pp->pr_curlogentry;
157: struct pool_log *pl;
158:
1.20 thorpej 159: if ((pp->pr_roflags & PR_LOGGING) == 0)
1.3 pk 160: return;
161:
162: /*
163: * Fill in the current entry. Wrap around and overwrite
164: * the oldest entry if necessary.
165: */
166: pl = &pp->pr_log[n];
167: pl->pl_file = file;
168: pl->pl_line = line;
169: pl->pl_action = action;
170: pl->pl_addr = v;
171: if (++n >= pp->pr_logsize)
172: n = 0;
173: pp->pr_curlogentry = n;
174: }
175:
176: static void
1.25 thorpej 177: pr_printlog(pp, pi, pr)
1.3 pk 178: struct pool *pp;
1.25 thorpej 179: struct pool_item *pi;
180: void (*pr) __P((const char *, ...));
1.3 pk 181: {
182: int i = pp->pr_logsize;
183: int n = pp->pr_curlogentry;
184:
1.20 thorpej 185: if ((pp->pr_roflags & PR_LOGGING) == 0)
1.3 pk 186: return;
187:
188: /*
189: * Print all entries in this pool's log.
190: */
191: while (i-- > 0) {
192: struct pool_log *pl = &pp->pr_log[n];
193: if (pl->pl_action != 0) {
1.25 thorpej 194: if (pi == NULL || pi == pl->pl_addr) {
195: (*pr)("\tlog entry %d:\n", i);
196: (*pr)("\t\taction = %s, addr = %p\n",
197: pl->pl_action == PRLOG_GET ? "get" : "put",
198: pl->pl_addr);
199: (*pr)("\t\tfile: %s at line %lu\n",
200: pl->pl_file, pl->pl_line);
201: }
1.3 pk 202: }
203: if (++n >= pp->pr_logsize)
204: n = 0;
205: }
206: }
1.25 thorpej 207:
208: static __inline__ void
209: pr_enter(pp, file, line)
210: struct pool *pp;
211: const char *file;
212: long line;
213: {
214:
1.34 thorpej 215: if (__predict_false(pp->pr_entered_file != NULL)) {
1.25 thorpej 216: printf("pool %s: reentrancy at file %s line %ld\n",
217: pp->pr_wchan, file, line);
218: printf(" previous entry at file %s line %ld\n",
219: pp->pr_entered_file, pp->pr_entered_line);
220: panic("pr_enter");
221: }
222:
223: pp->pr_entered_file = file;
224: pp->pr_entered_line = line;
225: }
226:
227: static __inline__ void
228: pr_leave(pp)
229: struct pool *pp;
230: {
231:
1.34 thorpej 232: if (__predict_false(pp->pr_entered_file == NULL)) {
1.25 thorpej 233: printf("pool %s not entered?\n", pp->pr_wchan);
234: panic("pr_leave");
235: }
236:
237: pp->pr_entered_file = NULL;
238: pp->pr_entered_line = 0;
239: }
240:
241: static __inline__ void
242: pr_enter_check(pp, pr)
243: struct pool *pp;
244: void (*pr) __P((const char *, ...));
245: {
246:
247: if (pp->pr_entered_file != NULL)
248: (*pr)("\n\tcurrently entered from file %s line %ld\n",
249: pp->pr_entered_file, pp->pr_entered_line);
250: }
1.3 pk 251: #else
1.25 thorpej 252: #define pr_log(pp, v, action, file, line)
253: #define pr_printlog(pp, pi, pr)
254: #define pr_enter(pp, file, line)
255: #define pr_leave(pp)
256: #define pr_enter_check(pp, pr)
257: #endif /* DIAGNOSTIC */
1.3 pk 258:
259: /*
260: * Return the pool page header based on page address.
261: */
262: static __inline__ struct pool_item_header *
263: pr_find_pagehead(pp, page)
264: struct pool *pp;
265: caddr_t page;
266: {
267: struct pool_item_header *ph;
268:
1.20 thorpej 269: if ((pp->pr_roflags & PR_PHINPAGE) != 0)
1.3 pk 270: return ((struct pool_item_header *)(page + pp->pr_phoffset));
271:
272: for (ph = LIST_FIRST(&pp->pr_hashtab[PR_HASH_INDEX(pp, page)]);
273: ph != NULL;
274: ph = LIST_NEXT(ph, ph_hashlist)) {
275: if (ph->ph_page == page)
276: return (ph);
277: }
278: return (NULL);
279: }
280:
281: /*
282: * Remove a page from the pool.
283: */
284: static __inline__ void
285: pr_rmpage(pp, ph)
286: struct pool *pp;
287: struct pool_item_header *ph;
288: {
289:
290: /*
1.7 thorpej 291: * If the page was idle, decrement the idle page count.
1.3 pk 292: */
1.6 thorpej 293: if (ph->ph_nmissing == 0) {
294: #ifdef DIAGNOSTIC
295: if (pp->pr_nidle == 0)
296: panic("pr_rmpage: nidle inconsistent");
1.20 thorpej 297: if (pp->pr_nitems < pp->pr_itemsperpage)
298: panic("pr_rmpage: nitems inconsistent");
1.6 thorpej 299: #endif
300: pp->pr_nidle--;
301: }
1.7 thorpej 302:
1.20 thorpej 303: pp->pr_nitems -= pp->pr_itemsperpage;
304:
1.7 thorpej 305: /*
306: * Unlink a page from the pool and release it.
307: */
308: TAILQ_REMOVE(&pp->pr_pagelist, ph, ph_pagelist);
309: (*pp->pr_free)(ph->ph_page, pp->pr_pagesz, pp->pr_mtype);
310: pp->pr_npages--;
311: pp->pr_npagefree++;
1.6 thorpej 312:
1.22 chs 313: if ((pp->pr_roflags & PR_PHINPAGE) == 0) {
1.27 pk 314: int s;
1.22 chs 315: LIST_REMOVE(ph, ph_hashlist);
1.27 pk 316: s = splhigh();
1.22 chs 317: pool_put(&phpool, ph);
1.27 pk 318: splx(s);
1.22 chs 319: }
320:
1.3 pk 321: if (pp->pr_curpage == ph) {
322: /*
323: * Find a new non-empty page header, if any.
324: * Start search from the page head, to increase the
325: * chance for "high water" pages to be freed.
326: */
327: for (ph = TAILQ_FIRST(&pp->pr_pagelist); ph != NULL;
328: ph = TAILQ_NEXT(ph, ph_pagelist))
329: if (TAILQ_FIRST(&ph->ph_itemlist) != NULL)
330: break;
331:
332: pp->pr_curpage = ph;
1.21 thorpej 333: }
1.3 pk 334: }
335:
336: /*
337: * Allocate and initialize a pool.
338: */
1.1 pk 339: struct pool *
1.3 pk 340: pool_create(size, align, ioff, nitems, wchan, pagesz, alloc, release, mtype)
1.1 pk 341: size_t size;
1.3 pk 342: u_int align;
343: u_int ioff;
1.1 pk 344: int nitems;
1.21 thorpej 345: const char *wchan;
1.3 pk 346: size_t pagesz;
347: void *(*alloc) __P((unsigned long, int, int));
348: void (*release) __P((void *, unsigned long, int));
1.1 pk 349: int mtype;
350: {
351: struct pool *pp;
1.3 pk 352: int flags;
1.1 pk 353:
1.3 pk 354: pp = (struct pool *)malloc(sizeof(*pp), M_POOL, M_NOWAIT);
355: if (pp == NULL)
1.1 pk 356: return (NULL);
1.3 pk 357:
358: flags = PR_FREEHEADER;
359: pool_init(pp, size, align, ioff, flags, wchan, pagesz,
360: alloc, release, mtype);
361:
362: if (nitems != 0) {
363: if (pool_prime(pp, nitems, NULL) != 0) {
364: pool_destroy(pp);
365: return (NULL);
366: }
1.1 pk 367: }
368:
1.3 pk 369: return (pp);
370: }
371:
372: /*
373: * Initialize the given pool resource structure.
374: *
375: * We export this routine to allow other kernel parts to declare
376: * static pools that must be initialized before malloc() is available.
377: */
378: void
379: pool_init(pp, size, align, ioff, flags, wchan, pagesz, alloc, release, mtype)
380: struct pool *pp;
381: size_t size;
382: u_int align;
383: u_int ioff;
384: int flags;
1.21 thorpej 385: const char *wchan;
1.3 pk 386: size_t pagesz;
387: void *(*alloc) __P((unsigned long, int, int));
388: void (*release) __P((void *, unsigned long, int));
389: int mtype;
390: {
1.16 briggs 391: int off, slack, i;
1.3 pk 392:
1.25 thorpej 393: #ifdef POOL_DIAGNOSTIC
394: /*
395: * Always log if POOL_DIAGNOSTIC is defined.
396: */
397: if (pool_logsize != 0)
398: flags |= PR_LOGGING;
399: #endif
400:
1.3 pk 401: /*
402: * Check arguments and construct default values.
403: */
1.36 pk 404: if (!powerof2(pagesz))
1.3 pk 405: panic("pool_init: page size invalid (%lx)\n", (u_long)pagesz);
406:
1.4 thorpej 407: if (alloc == NULL && release == NULL) {
1.3 pk 408: alloc = pool_page_alloc;
409: release = pool_page_free;
1.4 thorpej 410: pagesz = PAGE_SIZE; /* Rounds to PAGE_SIZE anyhow. */
411: } else if ((alloc != NULL && release != NULL) == 0) {
412: /* If you specifiy one, must specify both. */
413: panic("pool_init: must specify alloc and release together");
414: }
415:
1.3 pk 416: if (pagesz == 0)
417: pagesz = PAGE_SIZE;
418:
419: if (align == 0)
420: align = ALIGN(1);
1.14 thorpej 421:
422: if (size < sizeof(struct pool_item))
423: size = sizeof(struct pool_item);
1.3 pk 424:
1.35 pk 425: size = ALIGN(size);
426: if (size >= pagesz)
427: panic("pool_init: pool item size (%lu) too large",
428: (u_long)size);
429:
1.3 pk 430: /*
431: * Initialize the pool structure.
432: */
433: TAILQ_INIT(&pp->pr_pagelist);
434: pp->pr_curpage = NULL;
435: pp->pr_npages = 0;
436: pp->pr_minitems = 0;
437: pp->pr_minpages = 0;
438: pp->pr_maxpages = UINT_MAX;
1.20 thorpej 439: pp->pr_roflags = flags;
440: pp->pr_flags = 0;
1.35 pk 441: pp->pr_size = size;
1.3 pk 442: pp->pr_align = align;
443: pp->pr_wchan = wchan;
444: pp->pr_mtype = mtype;
445: pp->pr_alloc = alloc;
446: pp->pr_free = release;
447: pp->pr_pagesz = pagesz;
448: pp->pr_pagemask = ~(pagesz - 1);
449: pp->pr_pageshift = ffs(pagesz) - 1;
1.20 thorpej 450: pp->pr_nitems = 0;
451: pp->pr_nout = 0;
452: pp->pr_hardlimit = UINT_MAX;
453: pp->pr_hardlimit_warning = NULL;
1.31 thorpej 454: pp->pr_hardlimit_ratecap.tv_sec = 0;
455: pp->pr_hardlimit_ratecap.tv_usec = 0;
456: pp->pr_hardlimit_warning_last.tv_sec = 0;
457: pp->pr_hardlimit_warning_last.tv_usec = 0;
1.3 pk 458:
459: /*
460: * Decide whether to put the page header off page to avoid
461: * wasting too large a part of the page. Off-page page headers
462: * go on a hash table, so we can match a returned item
463: * with its header based on the page address.
464: * We use 1/16 of the page size as the threshold (XXX: tune)
465: */
466: if (pp->pr_size < pagesz/16) {
467: /* Use the end of the page for the page header */
1.20 thorpej 468: pp->pr_roflags |= PR_PHINPAGE;
1.3 pk 469: pp->pr_phoffset = off =
470: pagesz - ALIGN(sizeof(struct pool_item_header));
1.2 pk 471: } else {
1.3 pk 472: /* The page header will be taken from our page header pool */
473: pp->pr_phoffset = 0;
474: off = pagesz;
1.16 briggs 475: for (i = 0; i < PR_HASHTABSIZE; i++) {
476: LIST_INIT(&pp->pr_hashtab[i]);
477: }
1.2 pk 478: }
1.1 pk 479:
1.3 pk 480: /*
481: * Alignment is to take place at `ioff' within the item. This means
482: * we must reserve up to `align - 1' bytes on the page to allow
483: * appropriate positioning of each item.
484: *
485: * Silently enforce `0 <= ioff < align'.
486: */
487: pp->pr_itemoffset = ioff = ioff % align;
488: pp->pr_itemsperpage = (off - ((align - ioff) % align)) / pp->pr_size;
489:
490: /*
491: * Use the slack between the chunks and the page header
492: * for "cache coloring".
493: */
494: slack = off - pp->pr_itemsperpage * pp->pr_size;
495: pp->pr_maxcolor = (slack / align) * align;
496: pp->pr_curcolor = 0;
497:
498: pp->pr_nget = 0;
499: pp->pr_nfail = 0;
500: pp->pr_nput = 0;
501: pp->pr_npagealloc = 0;
502: pp->pr_npagefree = 0;
1.1 pk 503: pp->pr_hiwat = 0;
1.8 thorpej 504: pp->pr_nidle = 0;
1.3 pk 505:
1.25 thorpej 506: if (flags & PR_LOGGING) {
507: if (kmem_map == NULL ||
508: (pp->pr_log = malloc(pool_logsize * sizeof(struct pool_log),
509: M_TEMP, M_NOWAIT)) == NULL)
1.20 thorpej 510: pp->pr_roflags &= ~PR_LOGGING;
1.3 pk 511: pp->pr_curlogentry = 0;
512: pp->pr_logsize = pool_logsize;
513: }
1.25 thorpej 514:
515: pp->pr_entered_file = NULL;
516: pp->pr_entered_line = 0;
1.3 pk 517:
1.21 thorpej 518: simple_lock_init(&pp->pr_slock);
1.1 pk 519:
1.3 pk 520: /*
521: * Initialize private page header pool if we haven't done so yet.
1.23 thorpej 522: * XXX LOCKING.
1.3 pk 523: */
524: if (phpool.pr_size == 0) {
525: pool_init(&phpool, sizeof(struct pool_item_header), 0, 0,
526: 0, "phpool", 0, 0, 0, 0);
1.1 pk 527: }
528:
1.23 thorpej 529: /* Insert into the list of all pools. */
530: simple_lock(&pool_head_slock);
531: TAILQ_INSERT_TAIL(&pool_head, pp, pr_poollist);
532: simple_unlock(&pool_head_slock);
1.1 pk 533: }
534:
535: /*
536: * De-commision a pool resource.
537: */
538: void
539: pool_destroy(pp)
540: struct pool *pp;
541: {
1.3 pk 542: struct pool_item_header *ph;
543:
544: #ifdef DIAGNOSTIC
1.20 thorpej 545: if (pp->pr_nout != 0) {
1.25 thorpej 546: pr_printlog(pp, NULL, printf);
1.20 thorpej 547: panic("pool_destroy: pool busy: still out: %u\n",
548: pp->pr_nout);
1.3 pk 549: }
550: #endif
1.1 pk 551:
1.3 pk 552: /* Remove all pages */
1.20 thorpej 553: if ((pp->pr_roflags & PR_STATIC) == 0)
1.3 pk 554: while ((ph = pp->pr_pagelist.tqh_first) != NULL)
555: pr_rmpage(pp, ph);
556:
557: /* Remove from global pool list */
1.23 thorpej 558: simple_lock(&pool_head_slock);
1.3 pk 559: TAILQ_REMOVE(&pool_head, pp, pr_poollist);
1.23 thorpej 560: /* XXX Only clear this if we were drainpp? */
1.3 pk 561: drainpp = NULL;
1.23 thorpej 562: simple_unlock(&pool_head_slock);
1.3 pk 563:
1.20 thorpej 564: if ((pp->pr_roflags & PR_LOGGING) != 0)
1.3 pk 565: free(pp->pr_log, M_TEMP);
1.2 pk 566:
1.20 thorpej 567: if (pp->pr_roflags & PR_FREEHEADER)
1.3 pk 568: free(pp, M_POOL);
1.1 pk 569: }
570:
571:
572: /*
1.3 pk 573: * Grab an item from the pool; must be called at appropriate spl level
1.1 pk 574: */
1.3 pk 575: void *
576: _pool_get(pp, flags, file, line)
577: struct pool *pp;
578: int flags;
579: const char *file;
580: long line;
1.1 pk 581: {
582: void *v;
583: struct pool_item *pi;
1.3 pk 584: struct pool_item_header *ph;
1.1 pk 585:
1.2 pk 586: #ifdef DIAGNOSTIC
1.34 thorpej 587: if (__predict_false((pp->pr_roflags & PR_STATIC) &&
588: (flags & PR_MALLOCOK))) {
1.25 thorpej 589: pr_printlog(pp, NULL, printf);
1.2 pk 590: panic("pool_get: static");
1.3 pk 591: }
1.2 pk 592: #endif
593:
1.37 sommerfe 594: if (__predict_false(curproc == NULL && doing_shutdown == 0 &&
595: (flags & PR_WAITOK) != 0))
1.3 pk 596: panic("pool_get: must have NOWAIT");
1.1 pk 597:
1.21 thorpej 598: simple_lock(&pp->pr_slock);
1.25 thorpej 599: pr_enter(pp, file, line);
1.20 thorpej 600:
601: startover:
602: /*
603: * Check to see if we've reached the hard limit. If we have,
604: * and we can wait, then wait until an item has been returned to
605: * the pool.
606: */
607: #ifdef DIAGNOSTIC
1.34 thorpej 608: if (__predict_false(pp->pr_nout > pp->pr_hardlimit)) {
1.25 thorpej 609: pr_leave(pp);
1.21 thorpej 610: simple_unlock(&pp->pr_slock);
1.20 thorpej 611: panic("pool_get: %s: crossed hard limit", pp->pr_wchan);
612: }
613: #endif
1.34 thorpej 614: if (__predict_false(pp->pr_nout == pp->pr_hardlimit)) {
1.29 sommerfe 615: if ((flags & PR_WAITOK) && !(flags & PR_LIMITFAIL)) {
1.20 thorpej 616: /*
617: * XXX: A warning isn't logged in this case. Should
618: * it be?
619: */
620: pp->pr_flags |= PR_WANTED;
1.25 thorpej 621: pr_leave(pp);
1.40 ! sommerfe 622: ltsleep(pp, PSWP, pp->pr_wchan, 0, &pp->pr_slock);
1.25 thorpej 623: pr_enter(pp, file, line);
1.20 thorpej 624: goto startover;
625: }
1.31 thorpej 626:
627: /*
628: * Log a message that the hard limit has been hit.
629: */
630: if (pp->pr_hardlimit_warning != NULL &&
631: ratecheck(&pp->pr_hardlimit_warning_last,
632: &pp->pr_hardlimit_ratecap))
633: log(LOG_ERR, "%s\n", pp->pr_hardlimit_warning);
1.21 thorpej 634:
635: if (flags & PR_URGENT)
636: panic("pool_get: urgent");
637:
638: pp->pr_nfail++;
639:
1.25 thorpej 640: pr_leave(pp);
1.21 thorpej 641: simple_unlock(&pp->pr_slock);
1.20 thorpej 642: return (NULL);
643: }
644:
1.3 pk 645: /*
646: * The convention we use is that if `curpage' is not NULL, then
647: * it points at a non-empty bucket. In particular, `curpage'
648: * never points at a page header which has PR_PHINPAGE set and
649: * has no items in its bucket.
650: */
1.20 thorpej 651: if ((ph = pp->pr_curpage) == NULL) {
1.15 pk 652: void *v;
653:
1.20 thorpej 654: #ifdef DIAGNOSTIC
655: if (pp->pr_nitems != 0) {
1.21 thorpej 656: simple_unlock(&pp->pr_slock);
1.20 thorpej 657: printf("pool_get: %s: curpage NULL, nitems %u\n",
658: pp->pr_wchan, pp->pr_nitems);
659: panic("pool_get: nitems inconsistent\n");
660: }
661: #endif
662:
1.21 thorpej 663: /*
664: * Call the back-end page allocator for more memory.
665: * Release the pool lock, as the back-end page allocator
666: * may block.
667: */
1.25 thorpej 668: pr_leave(pp);
1.21 thorpej 669: simple_unlock(&pp->pr_slock);
670: v = (*pp->pr_alloc)(pp->pr_pagesz, flags, pp->pr_mtype);
671: simple_lock(&pp->pr_slock);
1.25 thorpej 672: pr_enter(pp, file, line);
1.15 pk 673:
1.21 thorpej 674: if (v == NULL) {
675: /*
676: * We were unable to allocate a page, but
677: * we released the lock during allocation,
678: * so perhaps items were freed back to the
679: * pool. Check for this case.
680: */
681: if (pp->pr_curpage != NULL)
682: goto startover;
1.15 pk 683:
1.3 pk 684: if (flags & PR_URGENT)
685: panic("pool_get: urgent");
1.21 thorpej 686:
1.3 pk 687: if ((flags & PR_WAITOK) == 0) {
688: pp->pr_nfail++;
1.25 thorpej 689: pr_leave(pp);
1.21 thorpej 690: simple_unlock(&pp->pr_slock);
1.1 pk 691: return (NULL);
1.3 pk 692: }
693:
1.15 pk 694: /*
695: * Wait for items to be returned to this pool.
1.21 thorpej 696: *
1.15 pk 697: * XXX: we actually want to wait just until
698: * the page allocator has memory again. Depending
699: * on this pool's usage, we might get stuck here
700: * for a long time.
1.20 thorpej 701: *
702: * XXX: maybe we should wake up once a second and
703: * try again?
1.15 pk 704: */
1.1 pk 705: pp->pr_flags |= PR_WANTED;
1.25 thorpej 706: pr_leave(pp);
1.40 ! sommerfe 707: ltsleep(pp, PSWP, pp->pr_wchan, 0, &pp->pr_slock);
1.25 thorpej 708: pr_enter(pp, file, line);
1.20 thorpej 709: goto startover;
1.1 pk 710: }
1.3 pk 711:
1.15 pk 712: /* We have more memory; add it to the pool */
713: pp->pr_npagealloc++;
714: pool_prime_page(pp, v);
715:
1.20 thorpej 716: /* Start the allocation process over. */
717: goto startover;
1.3 pk 718: }
719:
1.34 thorpej 720: if (__predict_false((v = pi = TAILQ_FIRST(&ph->ph_itemlist)) == NULL)) {
1.25 thorpej 721: pr_leave(pp);
1.21 thorpej 722: simple_unlock(&pp->pr_slock);
1.3 pk 723: panic("pool_get: %s: page empty", pp->pr_wchan);
1.21 thorpej 724: }
1.20 thorpej 725: #ifdef DIAGNOSTIC
1.34 thorpej 726: if (__predict_false(pp->pr_nitems == 0)) {
1.25 thorpej 727: pr_leave(pp);
1.21 thorpej 728: simple_unlock(&pp->pr_slock);
1.20 thorpej 729: printf("pool_get: %s: items on itemlist, nitems %u\n",
730: pp->pr_wchan, pp->pr_nitems);
731: panic("pool_get: nitems inconsistent\n");
732: }
733: #endif
1.3 pk 734: pr_log(pp, v, PRLOG_GET, file, line);
735:
736: #ifdef DIAGNOSTIC
1.34 thorpej 737: if (__predict_false(pi->pi_magic != PI_MAGIC)) {
1.25 thorpej 738: pr_printlog(pp, pi, printf);
1.3 pk 739: panic("pool_get(%s): free list modified: magic=%x; page %p;"
740: " item addr %p\n",
741: pp->pr_wchan, pi->pi_magic, ph->ph_page, pi);
742: }
743: #endif
744:
745: /*
746: * Remove from item list.
747: */
748: TAILQ_REMOVE(&ph->ph_itemlist, pi, pi_list);
1.20 thorpej 749: pp->pr_nitems--;
750: pp->pr_nout++;
1.6 thorpej 751: if (ph->ph_nmissing == 0) {
752: #ifdef DIAGNOSTIC
1.34 thorpej 753: if (__predict_false(pp->pr_nidle == 0))
1.6 thorpej 754: panic("pool_get: nidle inconsistent");
755: #endif
756: pp->pr_nidle--;
757: }
1.3 pk 758: ph->ph_nmissing++;
759: if (TAILQ_FIRST(&ph->ph_itemlist) == NULL) {
1.21 thorpej 760: #ifdef DIAGNOSTIC
1.34 thorpej 761: if (__predict_false(ph->ph_nmissing != pp->pr_itemsperpage)) {
1.25 thorpej 762: pr_leave(pp);
1.21 thorpej 763: simple_unlock(&pp->pr_slock);
764: panic("pool_get: %s: nmissing inconsistent",
765: pp->pr_wchan);
766: }
767: #endif
1.3 pk 768: /*
769: * Find a new non-empty page header, if any.
770: * Start search from the page head, to increase
771: * the chance for "high water" pages to be freed.
772: *
1.21 thorpej 773: * Migrate empty pages to the end of the list. This
774: * will speed the update of curpage as pages become
775: * idle. Empty pages intermingled with idle pages
776: * is no big deal. As soon as a page becomes un-empty,
777: * it will move back to the head of the list.
1.3 pk 778: */
779: TAILQ_REMOVE(&pp->pr_pagelist, ph, ph_pagelist);
1.21 thorpej 780: TAILQ_INSERT_TAIL(&pp->pr_pagelist, ph, ph_pagelist);
781: for (ph = TAILQ_FIRST(&pp->pr_pagelist); ph != NULL;
782: ph = TAILQ_NEXT(ph, ph_pagelist))
1.3 pk 783: if (TAILQ_FIRST(&ph->ph_itemlist) != NULL)
784: break;
785:
786: pp->pr_curpage = ph;
1.1 pk 787: }
1.3 pk 788:
789: pp->pr_nget++;
1.20 thorpej 790:
791: /*
792: * If we have a low water mark and we are now below that low
793: * water mark, add more items to the pool.
794: */
795: if (pp->pr_nitems < pp->pr_minitems && pool_catchup(pp) != 0) {
796: /*
797: * XXX: Should we log a warning? Should we set up a timeout
798: * to try again in a second or so? The latter could break
799: * a caller's assumptions about interrupt protection, etc.
800: */
801: }
802:
1.25 thorpej 803: pr_leave(pp);
1.21 thorpej 804: simple_unlock(&pp->pr_slock);
1.1 pk 805: return (v);
806: }
807:
808: /*
1.3 pk 809: * Return resource to the pool; must be called at appropriate spl level
1.1 pk 810: */
1.3 pk 811: void
812: _pool_put(pp, v, file, line)
813: struct pool *pp;
814: void *v;
815: const char *file;
816: long line;
1.1 pk 817: {
818: struct pool_item *pi = v;
1.3 pk 819: struct pool_item_header *ph;
820: caddr_t page;
1.21 thorpej 821: int s;
1.3 pk 822:
823: page = (caddr_t)((u_long)v & pp->pr_pagemask);
1.1 pk 824:
1.21 thorpej 825: simple_lock(&pp->pr_slock);
1.25 thorpej 826: pr_enter(pp, file, line);
1.30 thorpej 827:
828: #ifdef DIAGNOSTIC
1.34 thorpej 829: if (__predict_false(pp->pr_nout == 0)) {
1.30 thorpej 830: printf("pool %s: putting with none out\n",
831: pp->pr_wchan);
832: panic("pool_put");
833: }
834: #endif
1.3 pk 835:
836: pr_log(pp, v, PRLOG_PUT, file, line);
837:
1.34 thorpej 838: if (__predict_false((ph = pr_find_pagehead(pp, page)) == NULL)) {
1.25 thorpej 839: pr_printlog(pp, NULL, printf);
1.3 pk 840: panic("pool_put: %s: page header missing", pp->pr_wchan);
841: }
1.28 thorpej 842:
843: #ifdef LOCKDEBUG
844: /*
845: * Check if we're freeing a locked simple lock.
846: */
847: simple_lock_freecheck((caddr_t)pi, ((caddr_t)pi) + pp->pr_size);
848: #endif
1.3 pk 849:
850: /*
851: * Return to item list.
852: */
1.2 pk 853: #ifdef DIAGNOSTIC
1.3 pk 854: pi->pi_magic = PI_MAGIC;
855: #endif
1.32 chs 856: #ifdef DEBUG
857: {
858: int i, *ip = v;
859:
860: for (i = 0; i < pp->pr_size / sizeof(int); i++) {
861: *ip++ = PI_MAGIC;
862: }
863: }
864: #endif
865:
1.3 pk 866: TAILQ_INSERT_HEAD(&ph->ph_itemlist, pi, pi_list);
867: ph->ph_nmissing--;
868: pp->pr_nput++;
1.20 thorpej 869: pp->pr_nitems++;
870: pp->pr_nout--;
1.3 pk 871:
872: /* Cancel "pool empty" condition if it exists */
873: if (pp->pr_curpage == NULL)
874: pp->pr_curpage = ph;
875:
876: if (pp->pr_flags & PR_WANTED) {
877: pp->pr_flags &= ~PR_WANTED;
1.15 pk 878: if (ph->ph_nmissing == 0)
879: pp->pr_nidle++;
1.25 thorpej 880: pr_leave(pp);
1.21 thorpej 881: simple_unlock(&pp->pr_slock);
1.3 pk 882: wakeup((caddr_t)pp);
883: return;
884: }
885:
886: /*
1.21 thorpej 887: * If this page is now complete, do one of two things:
888: *
889: * (1) If we have more pages than the page high water
890: * mark, free the page back to the system.
891: *
892: * (2) Move it to the end of the page list, so that
893: * we minimize our chances of fragmenting the
894: * pool. Idle pages migrate to the end (along with
895: * completely empty pages, so that we find un-empty
896: * pages more quickly when we update curpage) of the
897: * list so they can be more easily swept up by
898: * the pagedaemon when pages are scarce.
1.3 pk 899: */
900: if (ph->ph_nmissing == 0) {
1.6 thorpej 901: pp->pr_nidle++;
1.3 pk 902: if (pp->pr_npages > pp->pr_maxpages) {
903: pr_rmpage(pp, ph);
904: } else {
905: TAILQ_REMOVE(&pp->pr_pagelist, ph, ph_pagelist);
906: TAILQ_INSERT_TAIL(&pp->pr_pagelist, ph, ph_pagelist);
907:
1.21 thorpej 908: /*
909: * Update the timestamp on the page. A page must
910: * be idle for some period of time before it can
911: * be reclaimed by the pagedaemon. This minimizes
912: * ping-pong'ing for memory.
913: */
914: s = splclock();
915: ph->ph_time = mono_time;
916: splx(s);
917:
918: /*
919: * Update the current page pointer. Just look for
920: * the first page with any free items.
921: *
922: * XXX: Maybe we want an option to look for the
923: * page with the fewest available items, to minimize
924: * fragmentation?
925: */
1.3 pk 926: for (ph = TAILQ_FIRST(&pp->pr_pagelist); ph != NULL;
927: ph = TAILQ_NEXT(ph, ph_pagelist))
928: if (TAILQ_FIRST(&ph->ph_itemlist) != NULL)
929: break;
1.1 pk 930:
1.3 pk 931: pp->pr_curpage = ph;
1.1 pk 932: }
933: }
1.21 thorpej 934: /*
935: * If the page has just become un-empty, move it to the head of
936: * the list, and make it the current page. The next allocation
937: * will get the item from this page, instead of further fragmenting
938: * the pool.
939: */
940: else if (ph->ph_nmissing == (pp->pr_itemsperpage - 1)) {
941: TAILQ_REMOVE(&pp->pr_pagelist, ph, ph_pagelist);
942: TAILQ_INSERT_HEAD(&pp->pr_pagelist, ph, ph_pagelist);
943: pp->pr_curpage = ph;
944: }
945:
1.25 thorpej 946: pr_leave(pp);
1.21 thorpej 947: simple_unlock(&pp->pr_slock);
1.3 pk 948:
1.1 pk 949: }
950:
951: /*
1.3 pk 952: * Add N items to the pool.
1.1 pk 953: */
954: int
1.2 pk 955: pool_prime(pp, n, storage)
1.1 pk 956: struct pool *pp;
957: int n;
1.2 pk 958: caddr_t storage;
1.1 pk 959: {
1.3 pk 960: caddr_t cp;
961: int newnitems, newpages;
1.2 pk 962:
963: #ifdef DIAGNOSTIC
1.34 thorpej 964: if (__predict_false(storage && !(pp->pr_roflags & PR_STATIC)))
1.2 pk 965: panic("pool_prime: static");
966: /* !storage && static caught below */
967: #endif
1.1 pk 968:
1.21 thorpej 969: simple_lock(&pp->pr_slock);
970:
1.3 pk 971: newnitems = pp->pr_minitems + n;
972: newpages =
1.18 thorpej 973: roundup(newnitems, pp->pr_itemsperpage) / pp->pr_itemsperpage
1.3 pk 974: - pp->pr_minpages;
975:
976: while (newpages-- > 0) {
1.20 thorpej 977: if (pp->pr_roflags & PR_STATIC) {
1.3 pk 978: cp = storage;
979: storage += pp->pr_pagesz;
980: } else {
1.21 thorpej 981: simple_unlock(&pp->pr_slock);
1.3 pk 982: cp = (*pp->pr_alloc)(pp->pr_pagesz, 0, pp->pr_mtype);
1.21 thorpej 983: simple_lock(&pp->pr_slock);
1.3 pk 984: }
1.2 pk 985:
1.3 pk 986: if (cp == NULL) {
1.21 thorpej 987: simple_unlock(&pp->pr_slock);
1.1 pk 988: return (ENOMEM);
989: }
990:
1.26 thorpej 991: pp->pr_npagealloc++;
1.3 pk 992: pool_prime_page(pp, cp);
993: pp->pr_minpages++;
1.1 pk 994: }
1.3 pk 995:
996: pp->pr_minitems = newnitems;
997:
998: if (pp->pr_minpages >= pp->pr_maxpages)
999: pp->pr_maxpages = pp->pr_minpages + 1; /* XXX */
1000:
1.21 thorpej 1001: simple_unlock(&pp->pr_slock);
1.1 pk 1002: return (0);
1003: }
1.3 pk 1004:
1005: /*
1006: * Add a page worth of items to the pool.
1.21 thorpej 1007: *
1008: * Note, we must be called with the pool descriptor LOCKED.
1.3 pk 1009: */
1.21 thorpej 1010: static void
1.3 pk 1011: pool_prime_page(pp, storage)
1012: struct pool *pp;
1013: caddr_t storage;
1014: {
1015: struct pool_item *pi;
1016: struct pool_item_header *ph;
1017: caddr_t cp = storage;
1018: unsigned int align = pp->pr_align;
1019: unsigned int ioff = pp->pr_itemoffset;
1.27 pk 1020: int s, n;
1.36 pk 1021:
1022: if (((u_long)cp & (pp->pr_pagesz - 1)) != 0)
1023: panic("pool_prime_page: %s: unaligned page", pp->pr_wchan);
1.3 pk 1024:
1.20 thorpej 1025: if ((pp->pr_roflags & PR_PHINPAGE) != 0) {
1.3 pk 1026: ph = (struct pool_item_header *)(cp + pp->pr_phoffset);
1027: } else {
1.27 pk 1028: s = splhigh();
1.3 pk 1029: ph = pool_get(&phpool, PR_URGENT);
1.27 pk 1030: splx(s);
1.3 pk 1031: LIST_INSERT_HEAD(&pp->pr_hashtab[PR_HASH_INDEX(pp, cp)],
1032: ph, ph_hashlist);
1033: }
1034:
1035: /*
1036: * Insert page header.
1037: */
1038: TAILQ_INSERT_HEAD(&pp->pr_pagelist, ph, ph_pagelist);
1039: TAILQ_INIT(&ph->ph_itemlist);
1040: ph->ph_page = storage;
1041: ph->ph_nmissing = 0;
1.21 thorpej 1042: memset(&ph->ph_time, 0, sizeof(ph->ph_time));
1.3 pk 1043:
1.6 thorpej 1044: pp->pr_nidle++;
1045:
1.3 pk 1046: /*
1047: * Color this page.
1048: */
1049: cp = (caddr_t)(cp + pp->pr_curcolor);
1050: if ((pp->pr_curcolor += align) > pp->pr_maxcolor)
1051: pp->pr_curcolor = 0;
1052:
1053: /*
1054: * Adjust storage to apply aligment to `pr_itemoffset' in each item.
1055: */
1056: if (ioff != 0)
1057: cp = (caddr_t)(cp + (align - ioff));
1058:
1059: /*
1060: * Insert remaining chunks on the bucket list.
1061: */
1062: n = pp->pr_itemsperpage;
1.20 thorpej 1063: pp->pr_nitems += n;
1.3 pk 1064:
1065: while (n--) {
1066: pi = (struct pool_item *)cp;
1067:
1068: /* Insert on page list */
1069: TAILQ_INSERT_TAIL(&ph->ph_itemlist, pi, pi_list);
1070: #ifdef DIAGNOSTIC
1071: pi->pi_magic = PI_MAGIC;
1072: #endif
1073: cp = (caddr_t)(cp + pp->pr_size);
1074: }
1075:
1076: /*
1077: * If the pool was depleted, point at the new page.
1078: */
1079: if (pp->pr_curpage == NULL)
1080: pp->pr_curpage = ph;
1081:
1082: if (++pp->pr_npages > pp->pr_hiwat)
1083: pp->pr_hiwat = pp->pr_npages;
1084: }
1085:
1.20 thorpej 1086: /*
1087: * Like pool_prime(), except this is used by pool_get() when nitems
1088: * drops below the low water mark. This is used to catch up nitmes
1089: * with the low water mark.
1090: *
1.21 thorpej 1091: * Note 1, we never wait for memory here, we let the caller decide what to do.
1.20 thorpej 1092: *
1093: * Note 2, this doesn't work with static pools.
1094: *
1095: * Note 3, we must be called with the pool already locked, and we return
1096: * with it locked.
1097: */
1098: static int
1099: pool_catchup(pp)
1100: struct pool *pp;
1101: {
1102: caddr_t cp;
1103: int error = 0;
1104:
1105: if (pp->pr_roflags & PR_STATIC) {
1106: /*
1107: * We dropped below the low water mark, and this is not a
1108: * good thing. Log a warning.
1.21 thorpej 1109: *
1110: * XXX: rate-limit this?
1.20 thorpej 1111: */
1112: printf("WARNING: static pool `%s' dropped below low water "
1113: "mark\n", pp->pr_wchan);
1114: return (0);
1115: }
1116:
1.21 thorpej 1117: while (pp->pr_nitems < pp->pr_minitems) {
1.20 thorpej 1118: /*
1.21 thorpej 1119: * Call the page back-end allocator for more memory.
1120: *
1121: * XXX: We never wait, so should we bother unlocking
1122: * the pool descriptor?
1.20 thorpej 1123: */
1.21 thorpej 1124: simple_unlock(&pp->pr_slock);
1.20 thorpej 1125: cp = (*pp->pr_alloc)(pp->pr_pagesz, 0, pp->pr_mtype);
1.21 thorpej 1126: simple_lock(&pp->pr_slock);
1.34 thorpej 1127: if (__predict_false(cp == NULL)) {
1.20 thorpej 1128: error = ENOMEM;
1129: break;
1130: }
1.26 thorpej 1131: pp->pr_npagealloc++;
1.20 thorpej 1132: pool_prime_page(pp, cp);
1133: }
1134:
1135: return (error);
1136: }
1137:
1.3 pk 1138: void
1139: pool_setlowat(pp, n)
1140: pool_handle_t pp;
1141: int n;
1142: {
1.20 thorpej 1143: int error;
1.15 pk 1144:
1.21 thorpej 1145: simple_lock(&pp->pr_slock);
1146:
1.3 pk 1147: pp->pr_minitems = n;
1.15 pk 1148: pp->pr_minpages = (n == 0)
1149: ? 0
1.18 thorpej 1150: : roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1.20 thorpej 1151:
1152: /* Make sure we're caught up with the newly-set low water mark. */
1.21 thorpej 1153: if ((error = pool_catchup(pp)) != 0) {
1.20 thorpej 1154: /*
1155: * XXX: Should we log a warning? Should we set up a timeout
1156: * to try again in a second or so? The latter could break
1157: * a caller's assumptions about interrupt protection, etc.
1158: */
1159: }
1.21 thorpej 1160:
1161: simple_unlock(&pp->pr_slock);
1.3 pk 1162: }
1163:
1164: void
1165: pool_sethiwat(pp, n)
1166: pool_handle_t pp;
1167: int n;
1168: {
1.15 pk 1169:
1.21 thorpej 1170: simple_lock(&pp->pr_slock);
1171:
1.15 pk 1172: pp->pr_maxpages = (n == 0)
1173: ? 0
1.18 thorpej 1174: : roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1.21 thorpej 1175:
1176: simple_unlock(&pp->pr_slock);
1.3 pk 1177: }
1178:
1.20 thorpej 1179: void
1180: pool_sethardlimit(pp, n, warnmess, ratecap)
1181: pool_handle_t pp;
1182: int n;
1183: const char *warnmess;
1184: int ratecap;
1185: {
1186:
1.21 thorpej 1187: simple_lock(&pp->pr_slock);
1.20 thorpej 1188:
1189: pp->pr_hardlimit = n;
1190: pp->pr_hardlimit_warning = warnmess;
1.31 thorpej 1191: pp->pr_hardlimit_ratecap.tv_sec = ratecap;
1192: pp->pr_hardlimit_warning_last.tv_sec = 0;
1193: pp->pr_hardlimit_warning_last.tv_usec = 0;
1.20 thorpej 1194:
1195: /*
1.21 thorpej 1196: * In-line version of pool_sethiwat(), because we don't want to
1197: * release the lock.
1.20 thorpej 1198: */
1199: pp->pr_maxpages = (n == 0)
1200: ? 0
1201: : roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1.21 thorpej 1202:
1203: simple_unlock(&pp->pr_slock);
1.20 thorpej 1204: }
1.3 pk 1205:
1206: /*
1207: * Default page allocator.
1208: */
1209: static void *
1210: pool_page_alloc(sz, flags, mtype)
1211: unsigned long sz;
1212: int flags;
1213: int mtype;
1214: {
1.11 thorpej 1215: boolean_t waitok = (flags & PR_WAITOK) ? TRUE : FALSE;
1.3 pk 1216:
1.11 thorpej 1217: return ((void *)uvm_km_alloc_poolpage(waitok));
1.3 pk 1218: }
1219:
1220: static void
1221: pool_page_free(v, sz, mtype)
1222: void *v;
1223: unsigned long sz;
1224: int mtype;
1225: {
1226:
1.10 eeh 1227: uvm_km_free_poolpage((vaddr_t)v);
1.3 pk 1228: }
1.12 thorpej 1229:
1230: /*
1231: * Alternate pool page allocator for pools that know they will
1232: * never be accessed in interrupt context.
1233: */
1234: void *
1235: pool_page_alloc_nointr(sz, flags, mtype)
1236: unsigned long sz;
1237: int flags;
1238: int mtype;
1239: {
1240: boolean_t waitok = (flags & PR_WAITOK) ? TRUE : FALSE;
1241:
1242: return ((void *)uvm_km_alloc_poolpage1(kernel_map, uvm.kernel_object,
1243: waitok));
1244: }
1245:
1246: void
1247: pool_page_free_nointr(v, sz, mtype)
1248: void *v;
1249: unsigned long sz;
1250: int mtype;
1251: {
1252:
1253: uvm_km_free_poolpage1(kernel_map, (vaddr_t)v);
1254: }
1255:
1.3 pk 1256:
1257: /*
1258: * Release all complete pages that have not been used recently.
1259: */
1260: void
1.25 thorpej 1261: _pool_reclaim(pp, file, line)
1.3 pk 1262: pool_handle_t pp;
1.25 thorpej 1263: const char *file;
1264: long line;
1.3 pk 1265: {
1266: struct pool_item_header *ph, *phnext;
1.21 thorpej 1267: struct timeval curtime;
1268: int s;
1.3 pk 1269:
1.20 thorpej 1270: if (pp->pr_roflags & PR_STATIC)
1.3 pk 1271: return;
1272:
1.21 thorpej 1273: if (simple_lock_try(&pp->pr_slock) == 0)
1.3 pk 1274: return;
1.25 thorpej 1275: pr_enter(pp, file, line);
1.3 pk 1276:
1.21 thorpej 1277: s = splclock();
1278: curtime = mono_time;
1279: splx(s);
1280:
1.3 pk 1281: for (ph = TAILQ_FIRST(&pp->pr_pagelist); ph != NULL; ph = phnext) {
1282: phnext = TAILQ_NEXT(ph, ph_pagelist);
1283:
1284: /* Check our minimum page claim */
1285: if (pp->pr_npages <= pp->pr_minpages)
1286: break;
1287:
1288: if (ph->ph_nmissing == 0) {
1289: struct timeval diff;
1290: timersub(&curtime, &ph->ph_time, &diff);
1291: if (diff.tv_sec < pool_inactive_time)
1292: continue;
1.21 thorpej 1293:
1294: /*
1295: * If freeing this page would put us below
1296: * the low water mark, stop now.
1297: */
1298: if ((pp->pr_nitems - pp->pr_itemsperpage) <
1299: pp->pr_minitems)
1300: break;
1301:
1.3 pk 1302: pr_rmpage(pp, ph);
1303: }
1304: }
1305:
1.25 thorpej 1306: pr_leave(pp);
1.21 thorpej 1307: simple_unlock(&pp->pr_slock);
1.3 pk 1308: }
1309:
1310:
1311: /*
1312: * Drain pools, one at a time.
1.21 thorpej 1313: *
1314: * Note, we must never be called from an interrupt context.
1.3 pk 1315: */
1316: void
1317: pool_drain(arg)
1318: void *arg;
1319: {
1320: struct pool *pp;
1.23 thorpej 1321: int s;
1.3 pk 1322:
1.23 thorpej 1323: s = splimp();
1324: simple_lock(&pool_head_slock);
1325:
1326: if (drainpp == NULL && (drainpp = TAILQ_FIRST(&pool_head)) == NULL)
1327: goto out;
1.3 pk 1328:
1329: pp = drainpp;
1330: drainpp = TAILQ_NEXT(pp, pr_poollist);
1331:
1332: pool_reclaim(pp);
1.23 thorpej 1333:
1334: out:
1335: simple_unlock(&pool_head_slock);
1.3 pk 1336: splx(s);
1337: }
1338:
1339:
1340: /*
1341: * Diagnostic helpers.
1342: */
1343: void
1.25 thorpej 1344: pool_print(pp, modif)
1.3 pk 1345: struct pool *pp;
1.25 thorpej 1346: const char *modif;
1.21 thorpej 1347: {
1348: int s;
1349:
1350: s = splimp();
1.25 thorpej 1351: if (simple_lock_try(&pp->pr_slock) == 0) {
1352: printf("pool %s is locked; try again later\n",
1353: pp->pr_wchan);
1354: splx(s);
1355: return;
1356: }
1357: pool_print1(pp, modif, printf);
1.21 thorpej 1358: simple_unlock(&pp->pr_slock);
1359: splx(s);
1360: }
1361:
1.25 thorpej 1362: void
1363: pool_printit(pp, modif, pr)
1364: struct pool *pp;
1365: const char *modif;
1366: void (*pr) __P((const char *, ...));
1367: {
1368: int didlock = 0;
1369:
1370: if (pp == NULL) {
1371: (*pr)("Must specify a pool to print.\n");
1372: return;
1373: }
1374:
1375: /*
1376: * Called from DDB; interrupts should be blocked, and all
1377: * other processors should be paused. We can skip locking
1378: * the pool in this case.
1379: *
1380: * We do a simple_lock_try() just to print the lock
1381: * status, however.
1382: */
1383:
1384: if (simple_lock_try(&pp->pr_slock) == 0)
1385: (*pr)("WARNING: pool %s is locked\n", pp->pr_wchan);
1386: else
1387: didlock = 1;
1388:
1389: pool_print1(pp, modif, pr);
1390:
1391: if (didlock)
1392: simple_unlock(&pp->pr_slock);
1393: }
1394:
1.21 thorpej 1395: static void
1.25 thorpej 1396: pool_print1(pp, modif, pr)
1.21 thorpej 1397: struct pool *pp;
1.25 thorpej 1398: const char *modif;
1399: void (*pr) __P((const char *, ...));
1.3 pk 1400: {
1.25 thorpej 1401: struct pool_item_header *ph;
1402: #ifdef DIAGNOSTIC
1403: struct pool_item *pi;
1404: #endif
1405: int print_log = 0, print_pagelist = 0;
1406: char c;
1407:
1408: while ((c = *modif++) != '\0') {
1409: if (c == 'l')
1410: print_log = 1;
1411: if (c == 'p')
1412: print_pagelist = 1;
1413: modif++;
1414: }
1415:
1416: (*pr)("POOL %s: size %u, align %u, ioff %u, roflags 0x%08x\n",
1417: pp->pr_wchan, pp->pr_size, pp->pr_align, pp->pr_itemoffset,
1418: pp->pr_roflags);
1419: (*pr)("\tpagesz %u, mtype %d\n", pp->pr_pagesz, pp->pr_mtype);
1420: (*pr)("\talloc %p, release %p\n", pp->pr_alloc, pp->pr_free);
1421: (*pr)("\tminitems %u, minpages %u, maxpages %u, npages %u\n",
1422: pp->pr_minitems, pp->pr_minpages, pp->pr_maxpages, pp->pr_npages);
1423: (*pr)("\titemsperpage %u, nitems %u, nout %u, hardlimit %u\n",
1424: pp->pr_itemsperpage, pp->pr_nitems, pp->pr_nout, pp->pr_hardlimit);
1425:
1426: (*pr)("\n\tnget %lu, nfail %lu, nput %lu\n",
1427: pp->pr_nget, pp->pr_nfail, pp->pr_nput);
1428: (*pr)("\tnpagealloc %lu, npagefree %lu, hiwat %u, nidle %lu\n",
1429: pp->pr_npagealloc, pp->pr_npagefree, pp->pr_hiwat, pp->pr_nidle);
1430:
1431: if (print_pagelist == 0)
1432: goto skip_pagelist;
1433:
1434: if ((ph = TAILQ_FIRST(&pp->pr_pagelist)) != NULL)
1435: (*pr)("\n\tpage list:\n");
1436: for (; ph != NULL; ph = TAILQ_NEXT(ph, ph_pagelist)) {
1437: (*pr)("\t\tpage %p, nmissing %d, time %lu,%lu\n",
1438: ph->ph_page, ph->ph_nmissing,
1439: (u_long)ph->ph_time.tv_sec,
1440: (u_long)ph->ph_time.tv_usec);
1441: #ifdef DIAGNOSTIC
1442: for (pi = TAILQ_FIRST(&ph->ph_itemlist); pi != NULL;
1443: pi = TAILQ_NEXT(pi, pi_list)) {
1444: if (pi->pi_magic != PI_MAGIC) {
1445: (*pr)("\t\t\titem %p, magic 0x%x\n",
1446: pi, pi->pi_magic);
1447: }
1448: }
1449: #endif
1450: }
1451: if (pp->pr_curpage == NULL)
1452: (*pr)("\tno current page\n");
1453: else
1454: (*pr)("\tcurpage %p\n", pp->pr_curpage->ph_page);
1455:
1456: skip_pagelist:
1457:
1458: if (print_log == 0)
1459: goto skip_log;
1460:
1461: (*pr)("\n");
1462: if ((pp->pr_roflags & PR_LOGGING) == 0)
1463: (*pr)("\tno log\n");
1464: else
1465: pr_printlog(pp, NULL, pr);
1.3 pk 1466:
1.25 thorpej 1467: skip_log:
1.3 pk 1468:
1.25 thorpej 1469: pr_enter_check(pp, pr);
1.3 pk 1470: }
1471:
1472: int
1473: pool_chk(pp, label)
1474: struct pool *pp;
1475: char *label;
1476: {
1477: struct pool_item_header *ph;
1478: int r = 0;
1479:
1.21 thorpej 1480: simple_lock(&pp->pr_slock);
1.3 pk 1481:
1482: for (ph = TAILQ_FIRST(&pp->pr_pagelist); ph != NULL;
1483: ph = TAILQ_NEXT(ph, ph_pagelist)) {
1484:
1485: struct pool_item *pi;
1486: int n;
1487: caddr_t page;
1488:
1489: page = (caddr_t)((u_long)ph & pp->pr_pagemask);
1.20 thorpej 1490: if (page != ph->ph_page &&
1491: (pp->pr_roflags & PR_PHINPAGE) != 0) {
1.3 pk 1492: if (label != NULL)
1493: printf("%s: ", label);
1.16 briggs 1494: printf("pool(%p:%s): page inconsistency: page %p;"
1495: " at page head addr %p (p %p)\n", pp,
1.3 pk 1496: pp->pr_wchan, ph->ph_page,
1497: ph, page);
1498: r++;
1499: goto out;
1500: }
1501:
1502: for (pi = TAILQ_FIRST(&ph->ph_itemlist), n = 0;
1503: pi != NULL;
1504: pi = TAILQ_NEXT(pi,pi_list), n++) {
1505:
1506: #ifdef DIAGNOSTIC
1507: if (pi->pi_magic != PI_MAGIC) {
1508: if (label != NULL)
1509: printf("%s: ", label);
1510: printf("pool(%s): free list modified: magic=%x;"
1511: " page %p; item ordinal %d;"
1512: " addr %p (p %p)\n",
1513: pp->pr_wchan, pi->pi_magic, ph->ph_page,
1514: n, pi, page);
1515: panic("pool");
1516: }
1517: #endif
1518: page = (caddr_t)((u_long)pi & pp->pr_pagemask);
1519: if (page == ph->ph_page)
1520: continue;
1521:
1522: if (label != NULL)
1523: printf("%s: ", label);
1.16 briggs 1524: printf("pool(%p:%s): page inconsistency: page %p;"
1525: " item ordinal %d; addr %p (p %p)\n", pp,
1.3 pk 1526: pp->pr_wchan, ph->ph_page,
1527: n, pi, page);
1528: r++;
1529: goto out;
1530: }
1531: }
1532: out:
1.21 thorpej 1533: simple_unlock(&pp->pr_slock);
1.3 pk 1534: return (r);
1535: }
CVSweb <webmaster@jp.NetBSD.org>