Annotation of src/sys/kern/subr_pool.c, Revision 1.34
1.34 ! thorpej 1: /* $NetBSD: subr_pool.c,v 1.33 2000/04/13 00:44:19 chs Exp $ */
1.1 pk 2:
3: /*-
1.20 thorpej 4: * Copyright (c) 1997, 1999 The NetBSD Foundation, Inc.
1.1 pk 5: * All rights reserved.
6: *
7: * This code is derived from software contributed to The NetBSD Foundation
1.20 thorpej 8: * by Paul Kranenburg; by Jason R. Thorpe of the Numerical Aerospace
9: * Simulation Facility, NASA Ames Research Center.
1.1 pk 10: *
11: * Redistribution and use in source and binary forms, with or without
12: * modification, are permitted provided that the following conditions
13: * are met:
14: * 1. Redistributions of source code must retain the above copyright
15: * notice, this list of conditions and the following disclaimer.
16: * 2. Redistributions in binary form must reproduce the above copyright
17: * notice, this list of conditions and the following disclaimer in the
18: * documentation and/or other materials provided with the distribution.
19: * 3. All advertising materials mentioning features or use of this software
20: * must display the following acknowledgement:
1.13 christos 21: * This product includes software developed by the NetBSD
22: * Foundation, Inc. and its contributors.
1.1 pk 23: * 4. Neither the name of The NetBSD Foundation nor the names of its
24: * contributors may be used to endorse or promote products derived
25: * from this software without specific prior written permission.
26: *
27: * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28: * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29: * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30: * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31: * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32: * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33: * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34: * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35: * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36: * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37: * POSSIBILITY OF SUCH DAMAGE.
38: */
1.24 scottr 39:
1.25 thorpej 40: #include "opt_pool.h"
1.24 scottr 41: #include "opt_poollog.h"
1.28 thorpej 42: #include "opt_lockdebug.h"
1.1 pk 43:
44: #include <sys/param.h>
45: #include <sys/systm.h>
46: #include <sys/proc.h>
47: #include <sys/errno.h>
48: #include <sys/kernel.h>
49: #include <sys/malloc.h>
50: #include <sys/lock.h>
51: #include <sys/pool.h>
1.20 thorpej 52: #include <sys/syslog.h>
1.1 pk 53:
1.3 pk 54: #include <vm/vm.h>
55: #include <vm/vm_kern.h>
56:
57: #include <uvm/uvm.h>
58:
1.1 pk 59: /*
60: * Pool resource management utility.
1.3 pk 61: *
62: * Memory is allocated in pages which are split into pieces according
63: * to the pool item size. Each page is kept on a list headed by `pr_pagelist'
64: * in the pool structure and the individual pool items are on a linked list
65: * headed by `ph_itemlist' in each page header. The memory for building
66: * the page list is either taken from the allocated pages themselves (for
67: * small pool items) or taken from an internal pool of page headers (`phpool').
1.1 pk 68: */
69:
1.3 pk 70: /* List of all pools */
1.5 thorpej 71: TAILQ_HEAD(,pool) pool_head = TAILQ_HEAD_INITIALIZER(pool_head);
1.3 pk 72:
73: /* Private pool for page header structures */
74: static struct pool phpool;
75:
76: /* # of seconds to retain page after last use */
77: int pool_inactive_time = 10;
78:
79: /* Next candidate for drainage (see pool_drain()) */
1.23 thorpej 80: static struct pool *drainpp;
81:
82: /* This spin lock protects both pool_head and drainpp. */
83: struct simplelock pool_head_slock = SIMPLELOCK_INITIALIZER;
1.3 pk 84:
85: struct pool_item_header {
86: /* Page headers */
87: TAILQ_ENTRY(pool_item_header)
88: ph_pagelist; /* pool page list */
89: TAILQ_HEAD(,pool_item) ph_itemlist; /* chunk list for this page */
90: LIST_ENTRY(pool_item_header)
91: ph_hashlist; /* Off-page page headers */
92: int ph_nmissing; /* # of chunks in use */
93: caddr_t ph_page; /* this page's address */
94: struct timeval ph_time; /* last referenced */
95: };
96:
1.1 pk 97: struct pool_item {
1.3 pk 98: #ifdef DIAGNOSTIC
99: int pi_magic;
1.33 chs 100: #endif
1.25 thorpej 101: #define PI_MAGIC 0xdeadbeef
1.3 pk 102: /* Other entries use only this list entry */
103: TAILQ_ENTRY(pool_item) pi_list;
104: };
105:
106:
1.25 thorpej 107: #define PR_HASH_INDEX(pp,addr) \
1.3 pk 108: (((u_long)(addr) >> (pp)->pr_pageshift) & (PR_HASHTABSIZE - 1))
109:
110:
111:
112: static struct pool_item_header
113: *pr_find_pagehead __P((struct pool *, caddr_t));
114: static void pr_rmpage __P((struct pool *, struct pool_item_header *));
1.20 thorpej 115: static int pool_catchup __P((struct pool *));
1.21 thorpej 116: static void pool_prime_page __P((struct pool *, caddr_t));
1.3 pk 117: static void *pool_page_alloc __P((unsigned long, int, int));
118: static void pool_page_free __P((void *, unsigned long, int));
119:
1.25 thorpej 120: static void pool_print1 __P((struct pool *, const char *,
121: void (*)(const char *, ...)));
1.3 pk 122:
123: /*
124: * Pool log entry. An array of these is allocated in pool_create().
125: */
126: struct pool_log {
127: const char *pl_file;
128: long pl_line;
129: int pl_action;
1.25 thorpej 130: #define PRLOG_GET 1
131: #define PRLOG_PUT 2
1.3 pk 132: void *pl_addr;
1.1 pk 133: };
134:
1.3 pk 135: /* Number of entries in pool log buffers */
1.17 thorpej 136: #ifndef POOL_LOGSIZE
137: #define POOL_LOGSIZE 10
138: #endif
139:
140: int pool_logsize = POOL_LOGSIZE;
1.1 pk 141:
1.25 thorpej 142: #ifdef DIAGNOSTIC
1.3 pk 143: static void pr_log __P((struct pool *, void *, int, const char *, long));
1.25 thorpej 144: static void pr_printlog __P((struct pool *, struct pool_item *,
145: void (*)(const char *, ...)));
146: static void pr_enter __P((struct pool *, const char *, long));
147: static void pr_leave __P((struct pool *));
148: static void pr_enter_check __P((struct pool *,
149: void (*)(const char *, ...)));
1.3 pk 150:
151: static __inline__ void
152: pr_log(pp, v, action, file, line)
153: struct pool *pp;
154: void *v;
155: int action;
156: const char *file;
157: long line;
158: {
159: int n = pp->pr_curlogentry;
160: struct pool_log *pl;
161:
1.20 thorpej 162: if ((pp->pr_roflags & PR_LOGGING) == 0)
1.3 pk 163: return;
164:
165: /*
166: * Fill in the current entry. Wrap around and overwrite
167: * the oldest entry if necessary.
168: */
169: pl = &pp->pr_log[n];
170: pl->pl_file = file;
171: pl->pl_line = line;
172: pl->pl_action = action;
173: pl->pl_addr = v;
174: if (++n >= pp->pr_logsize)
175: n = 0;
176: pp->pr_curlogentry = n;
177: }
178:
179: static void
1.25 thorpej 180: pr_printlog(pp, pi, pr)
1.3 pk 181: struct pool *pp;
1.25 thorpej 182: struct pool_item *pi;
183: void (*pr) __P((const char *, ...));
1.3 pk 184: {
185: int i = pp->pr_logsize;
186: int n = pp->pr_curlogentry;
187:
1.20 thorpej 188: if ((pp->pr_roflags & PR_LOGGING) == 0)
1.3 pk 189: return;
190:
191: /*
192: * Print all entries in this pool's log.
193: */
194: while (i-- > 0) {
195: struct pool_log *pl = &pp->pr_log[n];
196: if (pl->pl_action != 0) {
1.25 thorpej 197: if (pi == NULL || pi == pl->pl_addr) {
198: (*pr)("\tlog entry %d:\n", i);
199: (*pr)("\t\taction = %s, addr = %p\n",
200: pl->pl_action == PRLOG_GET ? "get" : "put",
201: pl->pl_addr);
202: (*pr)("\t\tfile: %s at line %lu\n",
203: pl->pl_file, pl->pl_line);
204: }
1.3 pk 205: }
206: if (++n >= pp->pr_logsize)
207: n = 0;
208: }
209: }
1.25 thorpej 210:
211: static __inline__ void
212: pr_enter(pp, file, line)
213: struct pool *pp;
214: const char *file;
215: long line;
216: {
217:
1.34 ! thorpej 218: if (__predict_false(pp->pr_entered_file != NULL)) {
1.25 thorpej 219: printf("pool %s: reentrancy at file %s line %ld\n",
220: pp->pr_wchan, file, line);
221: printf(" previous entry at file %s line %ld\n",
222: pp->pr_entered_file, pp->pr_entered_line);
223: panic("pr_enter");
224: }
225:
226: pp->pr_entered_file = file;
227: pp->pr_entered_line = line;
228: }
229:
230: static __inline__ void
231: pr_leave(pp)
232: struct pool *pp;
233: {
234:
1.34 ! thorpej 235: if (__predict_false(pp->pr_entered_file == NULL)) {
1.25 thorpej 236: printf("pool %s not entered?\n", pp->pr_wchan);
237: panic("pr_leave");
238: }
239:
240: pp->pr_entered_file = NULL;
241: pp->pr_entered_line = 0;
242: }
243:
244: static __inline__ void
245: pr_enter_check(pp, pr)
246: struct pool *pp;
247: void (*pr) __P((const char *, ...));
248: {
249:
250: if (pp->pr_entered_file != NULL)
251: (*pr)("\n\tcurrently entered from file %s line %ld\n",
252: pp->pr_entered_file, pp->pr_entered_line);
253: }
1.3 pk 254: #else
1.25 thorpej 255: #define pr_log(pp, v, action, file, line)
256: #define pr_printlog(pp, pi, pr)
257: #define pr_enter(pp, file, line)
258: #define pr_leave(pp)
259: #define pr_enter_check(pp, pr)
260: #endif /* DIAGNOSTIC */
1.3 pk 261:
262: /*
263: * Return the pool page header based on page address.
264: */
265: static __inline__ struct pool_item_header *
266: pr_find_pagehead(pp, page)
267: struct pool *pp;
268: caddr_t page;
269: {
270: struct pool_item_header *ph;
271:
1.20 thorpej 272: if ((pp->pr_roflags & PR_PHINPAGE) != 0)
1.3 pk 273: return ((struct pool_item_header *)(page + pp->pr_phoffset));
274:
275: for (ph = LIST_FIRST(&pp->pr_hashtab[PR_HASH_INDEX(pp, page)]);
276: ph != NULL;
277: ph = LIST_NEXT(ph, ph_hashlist)) {
278: if (ph->ph_page == page)
279: return (ph);
280: }
281: return (NULL);
282: }
283:
284: /*
285: * Remove a page from the pool.
286: */
287: static __inline__ void
288: pr_rmpage(pp, ph)
289: struct pool *pp;
290: struct pool_item_header *ph;
291: {
292:
293: /*
1.7 thorpej 294: * If the page was idle, decrement the idle page count.
1.3 pk 295: */
1.6 thorpej 296: if (ph->ph_nmissing == 0) {
297: #ifdef DIAGNOSTIC
298: if (pp->pr_nidle == 0)
299: panic("pr_rmpage: nidle inconsistent");
1.20 thorpej 300: if (pp->pr_nitems < pp->pr_itemsperpage)
301: panic("pr_rmpage: nitems inconsistent");
1.6 thorpej 302: #endif
303: pp->pr_nidle--;
304: }
1.7 thorpej 305:
1.20 thorpej 306: pp->pr_nitems -= pp->pr_itemsperpage;
307:
1.7 thorpej 308: /*
309: * Unlink a page from the pool and release it.
310: */
311: TAILQ_REMOVE(&pp->pr_pagelist, ph, ph_pagelist);
312: (*pp->pr_free)(ph->ph_page, pp->pr_pagesz, pp->pr_mtype);
313: pp->pr_npages--;
314: pp->pr_npagefree++;
1.6 thorpej 315:
1.22 chs 316: if ((pp->pr_roflags & PR_PHINPAGE) == 0) {
1.27 pk 317: int s;
1.22 chs 318: LIST_REMOVE(ph, ph_hashlist);
1.27 pk 319: s = splhigh();
1.22 chs 320: pool_put(&phpool, ph);
1.27 pk 321: splx(s);
1.22 chs 322: }
323:
1.3 pk 324: if (pp->pr_curpage == ph) {
325: /*
326: * Find a new non-empty page header, if any.
327: * Start search from the page head, to increase the
328: * chance for "high water" pages to be freed.
329: */
330: for (ph = TAILQ_FIRST(&pp->pr_pagelist); ph != NULL;
331: ph = TAILQ_NEXT(ph, ph_pagelist))
332: if (TAILQ_FIRST(&ph->ph_itemlist) != NULL)
333: break;
334:
335: pp->pr_curpage = ph;
1.21 thorpej 336: }
1.3 pk 337: }
338:
339: /*
340: * Allocate and initialize a pool.
341: */
1.1 pk 342: struct pool *
1.3 pk 343: pool_create(size, align, ioff, nitems, wchan, pagesz, alloc, release, mtype)
1.1 pk 344: size_t size;
1.3 pk 345: u_int align;
346: u_int ioff;
1.1 pk 347: int nitems;
1.21 thorpej 348: const char *wchan;
1.3 pk 349: size_t pagesz;
350: void *(*alloc) __P((unsigned long, int, int));
351: void (*release) __P((void *, unsigned long, int));
1.1 pk 352: int mtype;
353: {
354: struct pool *pp;
1.3 pk 355: int flags;
1.1 pk 356:
1.3 pk 357: pp = (struct pool *)malloc(sizeof(*pp), M_POOL, M_NOWAIT);
358: if (pp == NULL)
1.1 pk 359: return (NULL);
1.3 pk 360:
361: flags = PR_FREEHEADER;
362: pool_init(pp, size, align, ioff, flags, wchan, pagesz,
363: alloc, release, mtype);
364:
365: if (nitems != 0) {
366: if (pool_prime(pp, nitems, NULL) != 0) {
367: pool_destroy(pp);
368: return (NULL);
369: }
1.1 pk 370: }
371:
1.3 pk 372: return (pp);
373: }
374:
375: /*
376: * Initialize the given pool resource structure.
377: *
378: * We export this routine to allow other kernel parts to declare
379: * static pools that must be initialized before malloc() is available.
380: */
381: void
382: pool_init(pp, size, align, ioff, flags, wchan, pagesz, alloc, release, mtype)
383: struct pool *pp;
384: size_t size;
385: u_int align;
386: u_int ioff;
387: int flags;
1.21 thorpej 388: const char *wchan;
1.3 pk 389: size_t pagesz;
390: void *(*alloc) __P((unsigned long, int, int));
391: void (*release) __P((void *, unsigned long, int));
392: int mtype;
393: {
1.16 briggs 394: int off, slack, i;
1.3 pk 395:
1.25 thorpej 396: #ifdef POOL_DIAGNOSTIC
397: /*
398: * Always log if POOL_DIAGNOSTIC is defined.
399: */
400: if (pool_logsize != 0)
401: flags |= PR_LOGGING;
402: #endif
403:
1.3 pk 404: /*
405: * Check arguments and construct default values.
406: */
407: if (!powerof2(pagesz) || pagesz > PAGE_SIZE)
408: panic("pool_init: page size invalid (%lx)\n", (u_long)pagesz);
409:
1.4 thorpej 410: if (alloc == NULL && release == NULL) {
1.3 pk 411: alloc = pool_page_alloc;
412: release = pool_page_free;
1.4 thorpej 413: pagesz = PAGE_SIZE; /* Rounds to PAGE_SIZE anyhow. */
414: } else if ((alloc != NULL && release != NULL) == 0) {
415: /* If you specifiy one, must specify both. */
416: panic("pool_init: must specify alloc and release together");
417: }
418:
1.3 pk 419: if (pagesz == 0)
420: pagesz = PAGE_SIZE;
421:
422: if (align == 0)
423: align = ALIGN(1);
1.14 thorpej 424:
425: if (size < sizeof(struct pool_item))
426: size = sizeof(struct pool_item);
1.3 pk 427:
428: /*
429: * Initialize the pool structure.
430: */
431: TAILQ_INIT(&pp->pr_pagelist);
432: pp->pr_curpage = NULL;
433: pp->pr_npages = 0;
434: pp->pr_minitems = 0;
435: pp->pr_minpages = 0;
436: pp->pr_maxpages = UINT_MAX;
1.20 thorpej 437: pp->pr_roflags = flags;
438: pp->pr_flags = 0;
1.3 pk 439: pp->pr_size = ALIGN(size);
440: pp->pr_align = align;
441: pp->pr_wchan = wchan;
442: pp->pr_mtype = mtype;
443: pp->pr_alloc = alloc;
444: pp->pr_free = release;
445: pp->pr_pagesz = pagesz;
446: pp->pr_pagemask = ~(pagesz - 1);
447: pp->pr_pageshift = ffs(pagesz) - 1;
1.20 thorpej 448: pp->pr_nitems = 0;
449: pp->pr_nout = 0;
450: pp->pr_hardlimit = UINT_MAX;
451: pp->pr_hardlimit_warning = NULL;
1.31 thorpej 452: pp->pr_hardlimit_ratecap.tv_sec = 0;
453: pp->pr_hardlimit_ratecap.tv_usec = 0;
454: pp->pr_hardlimit_warning_last.tv_sec = 0;
455: pp->pr_hardlimit_warning_last.tv_usec = 0;
1.3 pk 456:
457: /*
458: * Decide whether to put the page header off page to avoid
459: * wasting too large a part of the page. Off-page page headers
460: * go on a hash table, so we can match a returned item
461: * with its header based on the page address.
462: * We use 1/16 of the page size as the threshold (XXX: tune)
463: */
464: if (pp->pr_size < pagesz/16) {
465: /* Use the end of the page for the page header */
1.20 thorpej 466: pp->pr_roflags |= PR_PHINPAGE;
1.3 pk 467: pp->pr_phoffset = off =
468: pagesz - ALIGN(sizeof(struct pool_item_header));
1.2 pk 469: } else {
1.3 pk 470: /* The page header will be taken from our page header pool */
471: pp->pr_phoffset = 0;
472: off = pagesz;
1.16 briggs 473: for (i = 0; i < PR_HASHTABSIZE; i++) {
474: LIST_INIT(&pp->pr_hashtab[i]);
475: }
1.2 pk 476: }
1.1 pk 477:
1.3 pk 478: /*
479: * Alignment is to take place at `ioff' within the item. This means
480: * we must reserve up to `align - 1' bytes on the page to allow
481: * appropriate positioning of each item.
482: *
483: * Silently enforce `0 <= ioff < align'.
484: */
485: pp->pr_itemoffset = ioff = ioff % align;
486: pp->pr_itemsperpage = (off - ((align - ioff) % align)) / pp->pr_size;
487:
488: /*
489: * Use the slack between the chunks and the page header
490: * for "cache coloring".
491: */
492: slack = off - pp->pr_itemsperpage * pp->pr_size;
493: pp->pr_maxcolor = (slack / align) * align;
494: pp->pr_curcolor = 0;
495:
496: pp->pr_nget = 0;
497: pp->pr_nfail = 0;
498: pp->pr_nput = 0;
499: pp->pr_npagealloc = 0;
500: pp->pr_npagefree = 0;
1.1 pk 501: pp->pr_hiwat = 0;
1.8 thorpej 502: pp->pr_nidle = 0;
1.3 pk 503:
1.25 thorpej 504: if (flags & PR_LOGGING) {
505: if (kmem_map == NULL ||
506: (pp->pr_log = malloc(pool_logsize * sizeof(struct pool_log),
507: M_TEMP, M_NOWAIT)) == NULL)
1.20 thorpej 508: pp->pr_roflags &= ~PR_LOGGING;
1.3 pk 509: pp->pr_curlogentry = 0;
510: pp->pr_logsize = pool_logsize;
511: }
1.25 thorpej 512:
513: pp->pr_entered_file = NULL;
514: pp->pr_entered_line = 0;
1.3 pk 515:
1.21 thorpej 516: simple_lock_init(&pp->pr_slock);
1.1 pk 517:
1.3 pk 518: /*
519: * Initialize private page header pool if we haven't done so yet.
1.23 thorpej 520: * XXX LOCKING.
1.3 pk 521: */
522: if (phpool.pr_size == 0) {
523: pool_init(&phpool, sizeof(struct pool_item_header), 0, 0,
524: 0, "phpool", 0, 0, 0, 0);
1.1 pk 525: }
526:
1.23 thorpej 527: /* Insert into the list of all pools. */
528: simple_lock(&pool_head_slock);
529: TAILQ_INSERT_TAIL(&pool_head, pp, pr_poollist);
530: simple_unlock(&pool_head_slock);
1.1 pk 531: }
532:
533: /*
534: * De-commision a pool resource.
535: */
536: void
537: pool_destroy(pp)
538: struct pool *pp;
539: {
1.3 pk 540: struct pool_item_header *ph;
541:
542: #ifdef DIAGNOSTIC
1.20 thorpej 543: if (pp->pr_nout != 0) {
1.25 thorpej 544: pr_printlog(pp, NULL, printf);
1.20 thorpej 545: panic("pool_destroy: pool busy: still out: %u\n",
546: pp->pr_nout);
1.3 pk 547: }
548: #endif
1.1 pk 549:
1.3 pk 550: /* Remove all pages */
1.20 thorpej 551: if ((pp->pr_roflags & PR_STATIC) == 0)
1.3 pk 552: while ((ph = pp->pr_pagelist.tqh_first) != NULL)
553: pr_rmpage(pp, ph);
554:
555: /* Remove from global pool list */
1.23 thorpej 556: simple_lock(&pool_head_slock);
1.3 pk 557: TAILQ_REMOVE(&pool_head, pp, pr_poollist);
1.23 thorpej 558: /* XXX Only clear this if we were drainpp? */
1.3 pk 559: drainpp = NULL;
1.23 thorpej 560: simple_unlock(&pool_head_slock);
1.3 pk 561:
1.20 thorpej 562: if ((pp->pr_roflags & PR_LOGGING) != 0)
1.3 pk 563: free(pp->pr_log, M_TEMP);
1.2 pk 564:
1.20 thorpej 565: if (pp->pr_roflags & PR_FREEHEADER)
1.3 pk 566: free(pp, M_POOL);
1.1 pk 567: }
568:
569:
570: /*
1.3 pk 571: * Grab an item from the pool; must be called at appropriate spl level
1.1 pk 572: */
1.3 pk 573: void *
574: _pool_get(pp, flags, file, line)
575: struct pool *pp;
576: int flags;
577: const char *file;
578: long line;
1.1 pk 579: {
580: void *v;
581: struct pool_item *pi;
1.3 pk 582: struct pool_item_header *ph;
1.1 pk 583:
1.2 pk 584: #ifdef DIAGNOSTIC
1.34 ! thorpej 585: if (__predict_false((pp->pr_roflags & PR_STATIC) &&
! 586: (flags & PR_MALLOCOK))) {
1.25 thorpej 587: pr_printlog(pp, NULL, printf);
1.2 pk 588: panic("pool_get: static");
1.3 pk 589: }
1.2 pk 590: #endif
591:
1.34 ! thorpej 592: if (__predict_false(curproc == NULL && (flags & PR_WAITOK) != 0))
1.3 pk 593: panic("pool_get: must have NOWAIT");
1.1 pk 594:
1.21 thorpej 595: simple_lock(&pp->pr_slock);
1.25 thorpej 596: pr_enter(pp, file, line);
1.20 thorpej 597:
598: startover:
599: /*
600: * Check to see if we've reached the hard limit. If we have,
601: * and we can wait, then wait until an item has been returned to
602: * the pool.
603: */
604: #ifdef DIAGNOSTIC
1.34 ! thorpej 605: if (__predict_false(pp->pr_nout > pp->pr_hardlimit)) {
1.25 thorpej 606: pr_leave(pp);
1.21 thorpej 607: simple_unlock(&pp->pr_slock);
1.20 thorpej 608: panic("pool_get: %s: crossed hard limit", pp->pr_wchan);
609: }
610: #endif
1.34 ! thorpej 611: if (__predict_false(pp->pr_nout == pp->pr_hardlimit)) {
1.29 sommerfe 612: if ((flags & PR_WAITOK) && !(flags & PR_LIMITFAIL)) {
1.20 thorpej 613: /*
614: * XXX: A warning isn't logged in this case. Should
615: * it be?
616: */
617: pp->pr_flags |= PR_WANTED;
1.25 thorpej 618: pr_leave(pp);
1.21 thorpej 619: simple_unlock(&pp->pr_slock);
1.20 thorpej 620: tsleep((caddr_t)pp, PSWP, pp->pr_wchan, 0);
1.21 thorpej 621: simple_lock(&pp->pr_slock);
1.25 thorpej 622: pr_enter(pp, file, line);
1.20 thorpej 623: goto startover;
624: }
1.31 thorpej 625:
626: /*
627: * Log a message that the hard limit has been hit.
628: */
629: if (pp->pr_hardlimit_warning != NULL &&
630: ratecheck(&pp->pr_hardlimit_warning_last,
631: &pp->pr_hardlimit_ratecap))
632: log(LOG_ERR, "%s\n", pp->pr_hardlimit_warning);
1.21 thorpej 633:
634: if (flags & PR_URGENT)
635: panic("pool_get: urgent");
636:
637: pp->pr_nfail++;
638:
1.25 thorpej 639: pr_leave(pp);
1.21 thorpej 640: simple_unlock(&pp->pr_slock);
1.20 thorpej 641: return (NULL);
642: }
643:
1.3 pk 644: /*
645: * The convention we use is that if `curpage' is not NULL, then
646: * it points at a non-empty bucket. In particular, `curpage'
647: * never points at a page header which has PR_PHINPAGE set and
648: * has no items in its bucket.
649: */
1.20 thorpej 650: if ((ph = pp->pr_curpage) == NULL) {
1.15 pk 651: void *v;
652:
1.20 thorpej 653: #ifdef DIAGNOSTIC
654: if (pp->pr_nitems != 0) {
1.21 thorpej 655: simple_unlock(&pp->pr_slock);
1.20 thorpej 656: printf("pool_get: %s: curpage NULL, nitems %u\n",
657: pp->pr_wchan, pp->pr_nitems);
658: panic("pool_get: nitems inconsistent\n");
659: }
660: #endif
661:
1.21 thorpej 662: /*
663: * Call the back-end page allocator for more memory.
664: * Release the pool lock, as the back-end page allocator
665: * may block.
666: */
1.25 thorpej 667: pr_leave(pp);
1.21 thorpej 668: simple_unlock(&pp->pr_slock);
669: v = (*pp->pr_alloc)(pp->pr_pagesz, flags, pp->pr_mtype);
670: simple_lock(&pp->pr_slock);
1.25 thorpej 671: pr_enter(pp, file, line);
1.15 pk 672:
1.21 thorpej 673: if (v == NULL) {
674: /*
675: * We were unable to allocate a page, but
676: * we released the lock during allocation,
677: * so perhaps items were freed back to the
678: * pool. Check for this case.
679: */
680: if (pp->pr_curpage != NULL)
681: goto startover;
1.15 pk 682:
1.3 pk 683: if (flags & PR_URGENT)
684: panic("pool_get: urgent");
1.21 thorpej 685:
1.3 pk 686: if ((flags & PR_WAITOK) == 0) {
687: pp->pr_nfail++;
1.25 thorpej 688: pr_leave(pp);
1.21 thorpej 689: simple_unlock(&pp->pr_slock);
1.1 pk 690: return (NULL);
1.3 pk 691: }
692:
1.15 pk 693: /*
694: * Wait for items to be returned to this pool.
1.21 thorpej 695: *
1.15 pk 696: * XXX: we actually want to wait just until
697: * the page allocator has memory again. Depending
698: * on this pool's usage, we might get stuck here
699: * for a long time.
1.20 thorpej 700: *
701: * XXX: maybe we should wake up once a second and
702: * try again?
1.15 pk 703: */
1.1 pk 704: pp->pr_flags |= PR_WANTED;
1.25 thorpej 705: pr_leave(pp);
1.21 thorpej 706: simple_unlock(&pp->pr_slock);
1.1 pk 707: tsleep((caddr_t)pp, PSWP, pp->pr_wchan, 0);
1.21 thorpej 708: simple_lock(&pp->pr_slock);
1.25 thorpej 709: pr_enter(pp, file, line);
1.20 thorpej 710: goto startover;
1.1 pk 711: }
1.3 pk 712:
1.15 pk 713: /* We have more memory; add it to the pool */
714: pp->pr_npagealloc++;
715: pool_prime_page(pp, v);
716:
1.20 thorpej 717: /* Start the allocation process over. */
718: goto startover;
1.3 pk 719: }
720:
1.34 ! thorpej 721: if (__predict_false((v = pi = TAILQ_FIRST(&ph->ph_itemlist)) == NULL)) {
1.25 thorpej 722: pr_leave(pp);
1.21 thorpej 723: simple_unlock(&pp->pr_slock);
1.3 pk 724: panic("pool_get: %s: page empty", pp->pr_wchan);
1.21 thorpej 725: }
1.20 thorpej 726: #ifdef DIAGNOSTIC
1.34 ! thorpej 727: if (__predict_false(pp->pr_nitems == 0)) {
1.25 thorpej 728: pr_leave(pp);
1.21 thorpej 729: simple_unlock(&pp->pr_slock);
1.20 thorpej 730: printf("pool_get: %s: items on itemlist, nitems %u\n",
731: pp->pr_wchan, pp->pr_nitems);
732: panic("pool_get: nitems inconsistent\n");
733: }
734: #endif
1.3 pk 735: pr_log(pp, v, PRLOG_GET, file, line);
736:
737: #ifdef DIAGNOSTIC
1.34 ! thorpej 738: if (__predict_false(pi->pi_magic != PI_MAGIC)) {
1.25 thorpej 739: pr_printlog(pp, pi, printf);
1.3 pk 740: panic("pool_get(%s): free list modified: magic=%x; page %p;"
741: " item addr %p\n",
742: pp->pr_wchan, pi->pi_magic, ph->ph_page, pi);
743: }
744: #endif
745:
746: /*
747: * Remove from item list.
748: */
749: TAILQ_REMOVE(&ph->ph_itemlist, pi, pi_list);
1.20 thorpej 750: pp->pr_nitems--;
751: pp->pr_nout++;
1.6 thorpej 752: if (ph->ph_nmissing == 0) {
753: #ifdef DIAGNOSTIC
1.34 ! thorpej 754: if (__predict_false(pp->pr_nidle == 0))
1.6 thorpej 755: panic("pool_get: nidle inconsistent");
756: #endif
757: pp->pr_nidle--;
758: }
1.3 pk 759: ph->ph_nmissing++;
760: if (TAILQ_FIRST(&ph->ph_itemlist) == NULL) {
1.21 thorpej 761: #ifdef DIAGNOSTIC
1.34 ! thorpej 762: if (__predict_false(ph->ph_nmissing != pp->pr_itemsperpage)) {
1.25 thorpej 763: pr_leave(pp);
1.21 thorpej 764: simple_unlock(&pp->pr_slock);
765: panic("pool_get: %s: nmissing inconsistent",
766: pp->pr_wchan);
767: }
768: #endif
1.3 pk 769: /*
770: * Find a new non-empty page header, if any.
771: * Start search from the page head, to increase
772: * the chance for "high water" pages to be freed.
773: *
1.21 thorpej 774: * Migrate empty pages to the end of the list. This
775: * will speed the update of curpage as pages become
776: * idle. Empty pages intermingled with idle pages
777: * is no big deal. As soon as a page becomes un-empty,
778: * it will move back to the head of the list.
1.3 pk 779: */
780: TAILQ_REMOVE(&pp->pr_pagelist, ph, ph_pagelist);
1.21 thorpej 781: TAILQ_INSERT_TAIL(&pp->pr_pagelist, ph, ph_pagelist);
782: for (ph = TAILQ_FIRST(&pp->pr_pagelist); ph != NULL;
783: ph = TAILQ_NEXT(ph, ph_pagelist))
1.3 pk 784: if (TAILQ_FIRST(&ph->ph_itemlist) != NULL)
785: break;
786:
787: pp->pr_curpage = ph;
1.1 pk 788: }
1.3 pk 789:
790: pp->pr_nget++;
1.20 thorpej 791:
792: /*
793: * If we have a low water mark and we are now below that low
794: * water mark, add more items to the pool.
795: */
796: if (pp->pr_nitems < pp->pr_minitems && pool_catchup(pp) != 0) {
797: /*
798: * XXX: Should we log a warning? Should we set up a timeout
799: * to try again in a second or so? The latter could break
800: * a caller's assumptions about interrupt protection, etc.
801: */
802: }
803:
1.25 thorpej 804: pr_leave(pp);
1.21 thorpej 805: simple_unlock(&pp->pr_slock);
1.1 pk 806: return (v);
807: }
808:
809: /*
1.3 pk 810: * Return resource to the pool; must be called at appropriate spl level
1.1 pk 811: */
1.3 pk 812: void
813: _pool_put(pp, v, file, line)
814: struct pool *pp;
815: void *v;
816: const char *file;
817: long line;
1.1 pk 818: {
819: struct pool_item *pi = v;
1.3 pk 820: struct pool_item_header *ph;
821: caddr_t page;
1.21 thorpej 822: int s;
1.3 pk 823:
824: page = (caddr_t)((u_long)v & pp->pr_pagemask);
1.1 pk 825:
1.21 thorpej 826: simple_lock(&pp->pr_slock);
1.25 thorpej 827: pr_enter(pp, file, line);
1.30 thorpej 828:
829: #ifdef DIAGNOSTIC
1.34 ! thorpej 830: if (__predict_false(pp->pr_nout == 0)) {
1.30 thorpej 831: printf("pool %s: putting with none out\n",
832: pp->pr_wchan);
833: panic("pool_put");
834: }
835: #endif
1.3 pk 836:
837: pr_log(pp, v, PRLOG_PUT, file, line);
838:
1.34 ! thorpej 839: if (__predict_false((ph = pr_find_pagehead(pp, page)) == NULL)) {
1.25 thorpej 840: pr_printlog(pp, NULL, printf);
1.3 pk 841: panic("pool_put: %s: page header missing", pp->pr_wchan);
842: }
1.28 thorpej 843:
844: #ifdef LOCKDEBUG
845: /*
846: * Check if we're freeing a locked simple lock.
847: */
848: simple_lock_freecheck((caddr_t)pi, ((caddr_t)pi) + pp->pr_size);
849: #endif
1.3 pk 850:
851: /*
852: * Return to item list.
853: */
1.2 pk 854: #ifdef DIAGNOSTIC
1.3 pk 855: pi->pi_magic = PI_MAGIC;
856: #endif
1.32 chs 857: #ifdef DEBUG
858: {
859: int i, *ip = v;
860:
861: for (i = 0; i < pp->pr_size / sizeof(int); i++) {
862: *ip++ = PI_MAGIC;
863: }
864: }
865: #endif
866:
1.3 pk 867: TAILQ_INSERT_HEAD(&ph->ph_itemlist, pi, pi_list);
868: ph->ph_nmissing--;
869: pp->pr_nput++;
1.20 thorpej 870: pp->pr_nitems++;
871: pp->pr_nout--;
1.3 pk 872:
873: /* Cancel "pool empty" condition if it exists */
874: if (pp->pr_curpage == NULL)
875: pp->pr_curpage = ph;
876:
877: if (pp->pr_flags & PR_WANTED) {
878: pp->pr_flags &= ~PR_WANTED;
1.15 pk 879: if (ph->ph_nmissing == 0)
880: pp->pr_nidle++;
1.25 thorpej 881: pr_leave(pp);
1.21 thorpej 882: simple_unlock(&pp->pr_slock);
1.3 pk 883: wakeup((caddr_t)pp);
884: return;
885: }
886:
887: /*
1.21 thorpej 888: * If this page is now complete, do one of two things:
889: *
890: * (1) If we have more pages than the page high water
891: * mark, free the page back to the system.
892: *
893: * (2) Move it to the end of the page list, so that
894: * we minimize our chances of fragmenting the
895: * pool. Idle pages migrate to the end (along with
896: * completely empty pages, so that we find un-empty
897: * pages more quickly when we update curpage) of the
898: * list so they can be more easily swept up by
899: * the pagedaemon when pages are scarce.
1.3 pk 900: */
901: if (ph->ph_nmissing == 0) {
1.6 thorpej 902: pp->pr_nidle++;
1.3 pk 903: if (pp->pr_npages > pp->pr_maxpages) {
904: pr_rmpage(pp, ph);
905: } else {
906: TAILQ_REMOVE(&pp->pr_pagelist, ph, ph_pagelist);
907: TAILQ_INSERT_TAIL(&pp->pr_pagelist, ph, ph_pagelist);
908:
1.21 thorpej 909: /*
910: * Update the timestamp on the page. A page must
911: * be idle for some period of time before it can
912: * be reclaimed by the pagedaemon. This minimizes
913: * ping-pong'ing for memory.
914: */
915: s = splclock();
916: ph->ph_time = mono_time;
917: splx(s);
918:
919: /*
920: * Update the current page pointer. Just look for
921: * the first page with any free items.
922: *
923: * XXX: Maybe we want an option to look for the
924: * page with the fewest available items, to minimize
925: * fragmentation?
926: */
1.3 pk 927: for (ph = TAILQ_FIRST(&pp->pr_pagelist); ph != NULL;
928: ph = TAILQ_NEXT(ph, ph_pagelist))
929: if (TAILQ_FIRST(&ph->ph_itemlist) != NULL)
930: break;
1.1 pk 931:
1.3 pk 932: pp->pr_curpage = ph;
1.1 pk 933: }
934: }
1.21 thorpej 935: /*
936: * If the page has just become un-empty, move it to the head of
937: * the list, and make it the current page. The next allocation
938: * will get the item from this page, instead of further fragmenting
939: * the pool.
940: */
941: else if (ph->ph_nmissing == (pp->pr_itemsperpage - 1)) {
942: TAILQ_REMOVE(&pp->pr_pagelist, ph, ph_pagelist);
943: TAILQ_INSERT_HEAD(&pp->pr_pagelist, ph, ph_pagelist);
944: pp->pr_curpage = ph;
945: }
946:
1.25 thorpej 947: pr_leave(pp);
1.21 thorpej 948: simple_unlock(&pp->pr_slock);
1.3 pk 949:
1.1 pk 950: }
951:
952: /*
1.3 pk 953: * Add N items to the pool.
1.1 pk 954: */
955: int
1.2 pk 956: pool_prime(pp, n, storage)
1.1 pk 957: struct pool *pp;
958: int n;
1.2 pk 959: caddr_t storage;
1.1 pk 960: {
1.3 pk 961: caddr_t cp;
962: int newnitems, newpages;
1.2 pk 963:
964: #ifdef DIAGNOSTIC
1.34 ! thorpej 965: if (__predict_false(storage && !(pp->pr_roflags & PR_STATIC)))
1.2 pk 966: panic("pool_prime: static");
967: /* !storage && static caught below */
968: #endif
1.1 pk 969:
1.21 thorpej 970: simple_lock(&pp->pr_slock);
971:
1.3 pk 972: newnitems = pp->pr_minitems + n;
973: newpages =
1.18 thorpej 974: roundup(newnitems, pp->pr_itemsperpage) / pp->pr_itemsperpage
1.3 pk 975: - pp->pr_minpages;
976:
977: while (newpages-- > 0) {
1.20 thorpej 978: if (pp->pr_roflags & PR_STATIC) {
1.3 pk 979: cp = storage;
980: storage += pp->pr_pagesz;
981: } else {
1.21 thorpej 982: simple_unlock(&pp->pr_slock);
1.3 pk 983: cp = (*pp->pr_alloc)(pp->pr_pagesz, 0, pp->pr_mtype);
1.21 thorpej 984: simple_lock(&pp->pr_slock);
1.3 pk 985: }
1.2 pk 986:
1.3 pk 987: if (cp == NULL) {
1.21 thorpej 988: simple_unlock(&pp->pr_slock);
1.1 pk 989: return (ENOMEM);
990: }
991:
1.26 thorpej 992: pp->pr_npagealloc++;
1.3 pk 993: pool_prime_page(pp, cp);
994: pp->pr_minpages++;
1.1 pk 995: }
1.3 pk 996:
997: pp->pr_minitems = newnitems;
998:
999: if (pp->pr_minpages >= pp->pr_maxpages)
1000: pp->pr_maxpages = pp->pr_minpages + 1; /* XXX */
1001:
1.21 thorpej 1002: simple_unlock(&pp->pr_slock);
1.1 pk 1003: return (0);
1004: }
1.3 pk 1005:
1006: /*
1007: * Add a page worth of items to the pool.
1.21 thorpej 1008: *
1009: * Note, we must be called with the pool descriptor LOCKED.
1.3 pk 1010: */
1.21 thorpej 1011: static void
1.3 pk 1012: pool_prime_page(pp, storage)
1013: struct pool *pp;
1014: caddr_t storage;
1015: {
1016: struct pool_item *pi;
1017: struct pool_item_header *ph;
1018: caddr_t cp = storage;
1019: unsigned int align = pp->pr_align;
1020: unsigned int ioff = pp->pr_itemoffset;
1.27 pk 1021: int s, n;
1.3 pk 1022:
1.20 thorpej 1023: if ((pp->pr_roflags & PR_PHINPAGE) != 0) {
1.3 pk 1024: ph = (struct pool_item_header *)(cp + pp->pr_phoffset);
1025: } else {
1.27 pk 1026: s = splhigh();
1.3 pk 1027: ph = pool_get(&phpool, PR_URGENT);
1.27 pk 1028: splx(s);
1.3 pk 1029: LIST_INSERT_HEAD(&pp->pr_hashtab[PR_HASH_INDEX(pp, cp)],
1030: ph, ph_hashlist);
1031: }
1032:
1033: /*
1034: * Insert page header.
1035: */
1036: TAILQ_INSERT_HEAD(&pp->pr_pagelist, ph, ph_pagelist);
1037: TAILQ_INIT(&ph->ph_itemlist);
1038: ph->ph_page = storage;
1039: ph->ph_nmissing = 0;
1.21 thorpej 1040: memset(&ph->ph_time, 0, sizeof(ph->ph_time));
1.3 pk 1041:
1.6 thorpej 1042: pp->pr_nidle++;
1043:
1.3 pk 1044: /*
1045: * Color this page.
1046: */
1047: cp = (caddr_t)(cp + pp->pr_curcolor);
1048: if ((pp->pr_curcolor += align) > pp->pr_maxcolor)
1049: pp->pr_curcolor = 0;
1050:
1051: /*
1052: * Adjust storage to apply aligment to `pr_itemoffset' in each item.
1053: */
1054: if (ioff != 0)
1055: cp = (caddr_t)(cp + (align - ioff));
1056:
1057: /*
1058: * Insert remaining chunks on the bucket list.
1059: */
1060: n = pp->pr_itemsperpage;
1.20 thorpej 1061: pp->pr_nitems += n;
1.3 pk 1062:
1063: while (n--) {
1064: pi = (struct pool_item *)cp;
1065:
1066: /* Insert on page list */
1067: TAILQ_INSERT_TAIL(&ph->ph_itemlist, pi, pi_list);
1068: #ifdef DIAGNOSTIC
1069: pi->pi_magic = PI_MAGIC;
1070: #endif
1071: cp = (caddr_t)(cp + pp->pr_size);
1072: }
1073:
1074: /*
1075: * If the pool was depleted, point at the new page.
1076: */
1077: if (pp->pr_curpage == NULL)
1078: pp->pr_curpage = ph;
1079:
1080: if (++pp->pr_npages > pp->pr_hiwat)
1081: pp->pr_hiwat = pp->pr_npages;
1082: }
1083:
1.20 thorpej 1084: /*
1085: * Like pool_prime(), except this is used by pool_get() when nitems
1086: * drops below the low water mark. This is used to catch up nitmes
1087: * with the low water mark.
1088: *
1.21 thorpej 1089: * Note 1, we never wait for memory here, we let the caller decide what to do.
1.20 thorpej 1090: *
1091: * Note 2, this doesn't work with static pools.
1092: *
1093: * Note 3, we must be called with the pool already locked, and we return
1094: * with it locked.
1095: */
1096: static int
1097: pool_catchup(pp)
1098: struct pool *pp;
1099: {
1100: caddr_t cp;
1101: int error = 0;
1102:
1103: if (pp->pr_roflags & PR_STATIC) {
1104: /*
1105: * We dropped below the low water mark, and this is not a
1106: * good thing. Log a warning.
1.21 thorpej 1107: *
1108: * XXX: rate-limit this?
1.20 thorpej 1109: */
1110: printf("WARNING: static pool `%s' dropped below low water "
1111: "mark\n", pp->pr_wchan);
1112: return (0);
1113: }
1114:
1.21 thorpej 1115: while (pp->pr_nitems < pp->pr_minitems) {
1.20 thorpej 1116: /*
1.21 thorpej 1117: * Call the page back-end allocator for more memory.
1118: *
1119: * XXX: We never wait, so should we bother unlocking
1120: * the pool descriptor?
1.20 thorpej 1121: */
1.21 thorpej 1122: simple_unlock(&pp->pr_slock);
1.20 thorpej 1123: cp = (*pp->pr_alloc)(pp->pr_pagesz, 0, pp->pr_mtype);
1.21 thorpej 1124: simple_lock(&pp->pr_slock);
1.34 ! thorpej 1125: if (__predict_false(cp == NULL)) {
1.20 thorpej 1126: error = ENOMEM;
1127: break;
1128: }
1.26 thorpej 1129: pp->pr_npagealloc++;
1.20 thorpej 1130: pool_prime_page(pp, cp);
1131: }
1132:
1133: return (error);
1134: }
1135:
1.3 pk 1136: void
1137: pool_setlowat(pp, n)
1138: pool_handle_t pp;
1139: int n;
1140: {
1.20 thorpej 1141: int error;
1.15 pk 1142:
1.21 thorpej 1143: simple_lock(&pp->pr_slock);
1144:
1.3 pk 1145: pp->pr_minitems = n;
1.15 pk 1146: pp->pr_minpages = (n == 0)
1147: ? 0
1.18 thorpej 1148: : roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1.20 thorpej 1149:
1150: /* Make sure we're caught up with the newly-set low water mark. */
1.21 thorpej 1151: if ((error = pool_catchup(pp)) != 0) {
1.20 thorpej 1152: /*
1153: * XXX: Should we log a warning? Should we set up a timeout
1154: * to try again in a second or so? The latter could break
1155: * a caller's assumptions about interrupt protection, etc.
1156: */
1157: }
1.21 thorpej 1158:
1159: simple_unlock(&pp->pr_slock);
1.3 pk 1160: }
1161:
1162: void
1163: pool_sethiwat(pp, n)
1164: pool_handle_t pp;
1165: int n;
1166: {
1.15 pk 1167:
1.21 thorpej 1168: simple_lock(&pp->pr_slock);
1169:
1.15 pk 1170: pp->pr_maxpages = (n == 0)
1171: ? 0
1.18 thorpej 1172: : roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1.21 thorpej 1173:
1174: simple_unlock(&pp->pr_slock);
1.3 pk 1175: }
1176:
1.20 thorpej 1177: void
1178: pool_sethardlimit(pp, n, warnmess, ratecap)
1179: pool_handle_t pp;
1180: int n;
1181: const char *warnmess;
1182: int ratecap;
1183: {
1184:
1.21 thorpej 1185: simple_lock(&pp->pr_slock);
1.20 thorpej 1186:
1187: pp->pr_hardlimit = n;
1188: pp->pr_hardlimit_warning = warnmess;
1.31 thorpej 1189: pp->pr_hardlimit_ratecap.tv_sec = ratecap;
1190: pp->pr_hardlimit_warning_last.tv_sec = 0;
1191: pp->pr_hardlimit_warning_last.tv_usec = 0;
1.20 thorpej 1192:
1193: /*
1.21 thorpej 1194: * In-line version of pool_sethiwat(), because we don't want to
1195: * release the lock.
1.20 thorpej 1196: */
1197: pp->pr_maxpages = (n == 0)
1198: ? 0
1199: : roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1.21 thorpej 1200:
1201: simple_unlock(&pp->pr_slock);
1.20 thorpej 1202: }
1.3 pk 1203:
1204: /*
1205: * Default page allocator.
1206: */
1207: static void *
1208: pool_page_alloc(sz, flags, mtype)
1209: unsigned long sz;
1210: int flags;
1211: int mtype;
1212: {
1.11 thorpej 1213: boolean_t waitok = (flags & PR_WAITOK) ? TRUE : FALSE;
1.3 pk 1214:
1.11 thorpej 1215: return ((void *)uvm_km_alloc_poolpage(waitok));
1.3 pk 1216: }
1217:
1218: static void
1219: pool_page_free(v, sz, mtype)
1220: void *v;
1221: unsigned long sz;
1222: int mtype;
1223: {
1224:
1.10 eeh 1225: uvm_km_free_poolpage((vaddr_t)v);
1.3 pk 1226: }
1.12 thorpej 1227:
1228: /*
1229: * Alternate pool page allocator for pools that know they will
1230: * never be accessed in interrupt context.
1231: */
1232: void *
1233: pool_page_alloc_nointr(sz, flags, mtype)
1234: unsigned long sz;
1235: int flags;
1236: int mtype;
1237: {
1238: boolean_t waitok = (flags & PR_WAITOK) ? TRUE : FALSE;
1239:
1240: return ((void *)uvm_km_alloc_poolpage1(kernel_map, uvm.kernel_object,
1241: waitok));
1242: }
1243:
1244: void
1245: pool_page_free_nointr(v, sz, mtype)
1246: void *v;
1247: unsigned long sz;
1248: int mtype;
1249: {
1250:
1251: uvm_km_free_poolpage1(kernel_map, (vaddr_t)v);
1252: }
1253:
1.3 pk 1254:
1255: /*
1256: * Release all complete pages that have not been used recently.
1257: */
1258: void
1.25 thorpej 1259: _pool_reclaim(pp, file, line)
1.3 pk 1260: pool_handle_t pp;
1.25 thorpej 1261: const char *file;
1262: long line;
1.3 pk 1263: {
1264: struct pool_item_header *ph, *phnext;
1.21 thorpej 1265: struct timeval curtime;
1266: int s;
1.3 pk 1267:
1.20 thorpej 1268: if (pp->pr_roflags & PR_STATIC)
1.3 pk 1269: return;
1270:
1.21 thorpej 1271: if (simple_lock_try(&pp->pr_slock) == 0)
1.3 pk 1272: return;
1.25 thorpej 1273: pr_enter(pp, file, line);
1.3 pk 1274:
1.21 thorpej 1275: s = splclock();
1276: curtime = mono_time;
1277: splx(s);
1278:
1.3 pk 1279: for (ph = TAILQ_FIRST(&pp->pr_pagelist); ph != NULL; ph = phnext) {
1280: phnext = TAILQ_NEXT(ph, ph_pagelist);
1281:
1282: /* Check our minimum page claim */
1283: if (pp->pr_npages <= pp->pr_minpages)
1284: break;
1285:
1286: if (ph->ph_nmissing == 0) {
1287: struct timeval diff;
1288: timersub(&curtime, &ph->ph_time, &diff);
1289: if (diff.tv_sec < pool_inactive_time)
1290: continue;
1.21 thorpej 1291:
1292: /*
1293: * If freeing this page would put us below
1294: * the low water mark, stop now.
1295: */
1296: if ((pp->pr_nitems - pp->pr_itemsperpage) <
1297: pp->pr_minitems)
1298: break;
1299:
1.3 pk 1300: pr_rmpage(pp, ph);
1301: }
1302: }
1303:
1.25 thorpej 1304: pr_leave(pp);
1.21 thorpej 1305: simple_unlock(&pp->pr_slock);
1.3 pk 1306: }
1307:
1308:
1309: /*
1310: * Drain pools, one at a time.
1.21 thorpej 1311: *
1312: * Note, we must never be called from an interrupt context.
1.3 pk 1313: */
1314: void
1315: pool_drain(arg)
1316: void *arg;
1317: {
1318: struct pool *pp;
1.23 thorpej 1319: int s;
1.3 pk 1320:
1.23 thorpej 1321: s = splimp();
1322: simple_lock(&pool_head_slock);
1323:
1324: if (drainpp == NULL && (drainpp = TAILQ_FIRST(&pool_head)) == NULL)
1325: goto out;
1.3 pk 1326:
1327: pp = drainpp;
1328: drainpp = TAILQ_NEXT(pp, pr_poollist);
1329:
1330: pool_reclaim(pp);
1.23 thorpej 1331:
1332: out:
1333: simple_unlock(&pool_head_slock);
1.3 pk 1334: splx(s);
1335: }
1336:
1337:
1338: /*
1339: * Diagnostic helpers.
1340: */
1341: void
1.25 thorpej 1342: pool_print(pp, modif)
1.3 pk 1343: struct pool *pp;
1.25 thorpej 1344: const char *modif;
1.21 thorpej 1345: {
1346: int s;
1347:
1348: s = splimp();
1.25 thorpej 1349: if (simple_lock_try(&pp->pr_slock) == 0) {
1350: printf("pool %s is locked; try again later\n",
1351: pp->pr_wchan);
1352: splx(s);
1353: return;
1354: }
1355: pool_print1(pp, modif, printf);
1.21 thorpej 1356: simple_unlock(&pp->pr_slock);
1357: splx(s);
1358: }
1359:
1.25 thorpej 1360: void
1361: pool_printit(pp, modif, pr)
1362: struct pool *pp;
1363: const char *modif;
1364: void (*pr) __P((const char *, ...));
1365: {
1366: int didlock = 0;
1367:
1368: if (pp == NULL) {
1369: (*pr)("Must specify a pool to print.\n");
1370: return;
1371: }
1372:
1373: /*
1374: * Called from DDB; interrupts should be blocked, and all
1375: * other processors should be paused. We can skip locking
1376: * the pool in this case.
1377: *
1378: * We do a simple_lock_try() just to print the lock
1379: * status, however.
1380: */
1381:
1382: if (simple_lock_try(&pp->pr_slock) == 0)
1383: (*pr)("WARNING: pool %s is locked\n", pp->pr_wchan);
1384: else
1385: didlock = 1;
1386:
1387: pool_print1(pp, modif, pr);
1388:
1389: if (didlock)
1390: simple_unlock(&pp->pr_slock);
1391: }
1392:
1.21 thorpej 1393: static void
1.25 thorpej 1394: pool_print1(pp, modif, pr)
1.21 thorpej 1395: struct pool *pp;
1.25 thorpej 1396: const char *modif;
1397: void (*pr) __P((const char *, ...));
1.3 pk 1398: {
1.25 thorpej 1399: struct pool_item_header *ph;
1400: #ifdef DIAGNOSTIC
1401: struct pool_item *pi;
1402: #endif
1403: int print_log = 0, print_pagelist = 0;
1404: char c;
1405:
1406: while ((c = *modif++) != '\0') {
1407: if (c == 'l')
1408: print_log = 1;
1409: if (c == 'p')
1410: print_pagelist = 1;
1411: modif++;
1412: }
1413:
1414: (*pr)("POOL %s: size %u, align %u, ioff %u, roflags 0x%08x\n",
1415: pp->pr_wchan, pp->pr_size, pp->pr_align, pp->pr_itemoffset,
1416: pp->pr_roflags);
1417: (*pr)("\tpagesz %u, mtype %d\n", pp->pr_pagesz, pp->pr_mtype);
1418: (*pr)("\talloc %p, release %p\n", pp->pr_alloc, pp->pr_free);
1419: (*pr)("\tminitems %u, minpages %u, maxpages %u, npages %u\n",
1420: pp->pr_minitems, pp->pr_minpages, pp->pr_maxpages, pp->pr_npages);
1421: (*pr)("\titemsperpage %u, nitems %u, nout %u, hardlimit %u\n",
1422: pp->pr_itemsperpage, pp->pr_nitems, pp->pr_nout, pp->pr_hardlimit);
1423:
1424: (*pr)("\n\tnget %lu, nfail %lu, nput %lu\n",
1425: pp->pr_nget, pp->pr_nfail, pp->pr_nput);
1426: (*pr)("\tnpagealloc %lu, npagefree %lu, hiwat %u, nidle %lu\n",
1427: pp->pr_npagealloc, pp->pr_npagefree, pp->pr_hiwat, pp->pr_nidle);
1428:
1429: if (print_pagelist == 0)
1430: goto skip_pagelist;
1431:
1432: if ((ph = TAILQ_FIRST(&pp->pr_pagelist)) != NULL)
1433: (*pr)("\n\tpage list:\n");
1434: for (; ph != NULL; ph = TAILQ_NEXT(ph, ph_pagelist)) {
1435: (*pr)("\t\tpage %p, nmissing %d, time %lu,%lu\n",
1436: ph->ph_page, ph->ph_nmissing,
1437: (u_long)ph->ph_time.tv_sec,
1438: (u_long)ph->ph_time.tv_usec);
1439: #ifdef DIAGNOSTIC
1440: for (pi = TAILQ_FIRST(&ph->ph_itemlist); pi != NULL;
1441: pi = TAILQ_NEXT(pi, pi_list)) {
1442: if (pi->pi_magic != PI_MAGIC) {
1443: (*pr)("\t\t\titem %p, magic 0x%x\n",
1444: pi, pi->pi_magic);
1445: }
1446: }
1447: #endif
1448: }
1449: if (pp->pr_curpage == NULL)
1450: (*pr)("\tno current page\n");
1451: else
1452: (*pr)("\tcurpage %p\n", pp->pr_curpage->ph_page);
1453:
1454: skip_pagelist:
1455:
1456: if (print_log == 0)
1457: goto skip_log;
1458:
1459: (*pr)("\n");
1460: if ((pp->pr_roflags & PR_LOGGING) == 0)
1461: (*pr)("\tno log\n");
1462: else
1463: pr_printlog(pp, NULL, pr);
1.3 pk 1464:
1.25 thorpej 1465: skip_log:
1.3 pk 1466:
1.25 thorpej 1467: pr_enter_check(pp, pr);
1.3 pk 1468: }
1469:
1470: int
1471: pool_chk(pp, label)
1472: struct pool *pp;
1473: char *label;
1474: {
1475: struct pool_item_header *ph;
1476: int r = 0;
1477:
1.21 thorpej 1478: simple_lock(&pp->pr_slock);
1.3 pk 1479:
1480: for (ph = TAILQ_FIRST(&pp->pr_pagelist); ph != NULL;
1481: ph = TAILQ_NEXT(ph, ph_pagelist)) {
1482:
1483: struct pool_item *pi;
1484: int n;
1485: caddr_t page;
1486:
1487: page = (caddr_t)((u_long)ph & pp->pr_pagemask);
1.20 thorpej 1488: if (page != ph->ph_page &&
1489: (pp->pr_roflags & PR_PHINPAGE) != 0) {
1.3 pk 1490: if (label != NULL)
1491: printf("%s: ", label);
1.16 briggs 1492: printf("pool(%p:%s): page inconsistency: page %p;"
1493: " at page head addr %p (p %p)\n", pp,
1.3 pk 1494: pp->pr_wchan, ph->ph_page,
1495: ph, page);
1496: r++;
1497: goto out;
1498: }
1499:
1500: for (pi = TAILQ_FIRST(&ph->ph_itemlist), n = 0;
1501: pi != NULL;
1502: pi = TAILQ_NEXT(pi,pi_list), n++) {
1503:
1504: #ifdef DIAGNOSTIC
1505: if (pi->pi_magic != PI_MAGIC) {
1506: if (label != NULL)
1507: printf("%s: ", label);
1508: printf("pool(%s): free list modified: magic=%x;"
1509: " page %p; item ordinal %d;"
1510: " addr %p (p %p)\n",
1511: pp->pr_wchan, pi->pi_magic, ph->ph_page,
1512: n, pi, page);
1513: panic("pool");
1514: }
1515: #endif
1516: page = (caddr_t)((u_long)pi & pp->pr_pagemask);
1517: if (page == ph->ph_page)
1518: continue;
1519:
1520: if (label != NULL)
1521: printf("%s: ", label);
1.16 briggs 1522: printf("pool(%p:%s): page inconsistency: page %p;"
1523: " item ordinal %d; addr %p (p %p)\n", pp,
1.3 pk 1524: pp->pr_wchan, ph->ph_page,
1525: n, pi, page);
1526: r++;
1527: goto out;
1528: }
1529: }
1530: out:
1.21 thorpej 1531: simple_unlock(&pp->pr_slock);
1.3 pk 1532: return (r);
1533: }
CVSweb <webmaster@jp.NetBSD.org>