Annotation of src/sys/kern/subr_pool.c, Revision 1.21.2.2.2.2
1.21.2.2.2.2! chs 1: /* $NetBSD: subr_pool.c,v 1.21.2.2.2.1 1999/06/21 01:24:03 thorpej Exp $ */
1.1 pk 2:
3: /*-
1.20 thorpej 4: * Copyright (c) 1997, 1999 The NetBSD Foundation, Inc.
1.1 pk 5: * All rights reserved.
6: *
7: * This code is derived from software contributed to The NetBSD Foundation
1.20 thorpej 8: * by Paul Kranenburg; by Jason R. Thorpe of the Numerical Aerospace
9: * Simulation Facility, NASA Ames Research Center.
1.1 pk 10: *
11: * Redistribution and use in source and binary forms, with or without
12: * modification, are permitted provided that the following conditions
13: * are met:
14: * 1. Redistributions of source code must retain the above copyright
15: * notice, this list of conditions and the following disclaimer.
16: * 2. Redistributions in binary form must reproduce the above copyright
17: * notice, this list of conditions and the following disclaimer in the
18: * documentation and/or other materials provided with the distribution.
19: * 3. All advertising materials mentioning features or use of this software
20: * must display the following acknowledgement:
1.13 christos 21: * This product includes software developed by the NetBSD
22: * Foundation, Inc. and its contributors.
1.1 pk 23: * 4. Neither the name of The NetBSD Foundation nor the names of its
24: * contributors may be used to endorse or promote products derived
25: * from this software without specific prior written permission.
26: *
27: * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28: * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29: * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30: * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31: * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32: * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33: * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34: * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35: * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36: * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37: * POSSIBILITY OF SUCH DAMAGE.
38: */
39:
1.21.2.2.2.1 thorpej 40: #include "opt_pool.h"
41: #include "opt_poollog.h"
42:
1.1 pk 43: #include <sys/param.h>
44: #include <sys/systm.h>
45: #include <sys/proc.h>
46: #include <sys/errno.h>
47: #include <sys/kernel.h>
48: #include <sys/malloc.h>
49: #include <sys/lock.h>
50: #include <sys/pool.h>
1.20 thorpej 51: #include <sys/syslog.h>
1.1 pk 52:
1.3 pk 53: #include <vm/vm.h>
54: #include <vm/vm_kern.h>
55:
56: #include <uvm/uvm.h>
57:
1.1 pk 58: /*
59: * Pool resource management utility.
1.3 pk 60: *
61: * Memory is allocated in pages which are split into pieces according
62: * to the pool item size. Each page is kept on a list headed by `pr_pagelist'
63: * in the pool structure and the individual pool items are on a linked list
64: * headed by `ph_itemlist' in each page header. The memory for building
65: * the page list is either taken from the allocated pages themselves (for
66: * small pool items) or taken from an internal pool of page headers (`phpool').
1.1 pk 67: */
68:
1.3 pk 69: /* List of all pools */
1.5 thorpej 70: TAILQ_HEAD(,pool) pool_head = TAILQ_HEAD_INITIALIZER(pool_head);
1.3 pk 71:
72: /* Private pool for page header structures */
73: static struct pool phpool;
74:
75: /* # of seconds to retain page after last use */
76: int pool_inactive_time = 10;
77:
78: /* Next candidate for drainage (see pool_drain()) */
1.21.2.2 thorpej 79: static struct pool *drainpp;
80:
81: /* This spin lock protects both pool_head and drainpp. */
82: struct simplelock pool_head_slock = SIMPLELOCK_INITIALIZER;
1.3 pk 83:
84: struct pool_item_header {
85: /* Page headers */
86: TAILQ_ENTRY(pool_item_header)
87: ph_pagelist; /* pool page list */
88: TAILQ_HEAD(,pool_item) ph_itemlist; /* chunk list for this page */
89: LIST_ENTRY(pool_item_header)
90: ph_hashlist; /* Off-page page headers */
91: int ph_nmissing; /* # of chunks in use */
92: caddr_t ph_page; /* this page's address */
93: struct timeval ph_time; /* last referenced */
94: };
95:
1.1 pk 96: struct pool_item {
1.3 pk 97: #ifdef DIAGNOSTIC
98: int pi_magic;
1.21.2.2.2.1 thorpej 99: #define PI_MAGIC 0xdeadbeef
1.3 pk 100: #endif
101: /* Other entries use only this list entry */
102: TAILQ_ENTRY(pool_item) pi_list;
103: };
104:
105:
1.21.2.2.2.1 thorpej 106: #define PR_HASH_INDEX(pp,addr) \
1.3 pk 107: (((u_long)(addr) >> (pp)->pr_pageshift) & (PR_HASHTABSIZE - 1))
108:
109:
110:
111: static struct pool_item_header
112: *pr_find_pagehead __P((struct pool *, caddr_t));
113: static void pr_rmpage __P((struct pool *, struct pool_item_header *));
1.20 thorpej 114: static int pool_catchup __P((struct pool *));
1.21 thorpej 115: static void pool_prime_page __P((struct pool *, caddr_t));
1.3 pk 116: static void *pool_page_alloc __P((unsigned long, int, int));
117: static void pool_page_free __P((void *, unsigned long, int));
118:
1.21.2.2.2.1 thorpej 119: static void pool_print1 __P((struct pool *, const char *,
120: void (*)(const char *, ...)));
1.3 pk 121:
122: /*
123: * Pool log entry. An array of these is allocated in pool_create().
124: */
125: struct pool_log {
126: const char *pl_file;
127: long pl_line;
128: int pl_action;
1.21.2.2.2.1 thorpej 129: #define PRLOG_GET 1
130: #define PRLOG_PUT 2
1.3 pk 131: void *pl_addr;
1.1 pk 132: };
133:
1.3 pk 134: /* Number of entries in pool log buffers */
1.17 thorpej 135: #ifndef POOL_LOGSIZE
136: #define POOL_LOGSIZE 10
137: #endif
138:
139: int pool_logsize = POOL_LOGSIZE;
1.1 pk 140:
1.21.2.2.2.1 thorpej 141: #ifdef DIAGNOSTIC
1.3 pk 142: static void pr_log __P((struct pool *, void *, int, const char *, long));
1.21.2.2.2.1 thorpej 143: static void pr_printlog __P((struct pool *, struct pool_item *,
144: void (*)(const char *, ...)));
145: static void pr_enter __P((struct pool *, const char *, long));
146: static void pr_leave __P((struct pool *));
147: static void pr_enter_check __P((struct pool *,
148: void (*)(const char *, ...)));
1.3 pk 149:
150: static __inline__ void
151: pr_log(pp, v, action, file, line)
152: struct pool *pp;
153: void *v;
154: int action;
155: const char *file;
156: long line;
157: {
158: int n = pp->pr_curlogentry;
159: struct pool_log *pl;
160:
1.20 thorpej 161: if ((pp->pr_roflags & PR_LOGGING) == 0)
1.3 pk 162: return;
163:
164: /*
165: * Fill in the current entry. Wrap around and overwrite
166: * the oldest entry if necessary.
167: */
168: pl = &pp->pr_log[n];
169: pl->pl_file = file;
170: pl->pl_line = line;
171: pl->pl_action = action;
172: pl->pl_addr = v;
173: if (++n >= pp->pr_logsize)
174: n = 0;
175: pp->pr_curlogentry = n;
176: }
177:
178: static void
1.21.2.2.2.1 thorpej 179: pr_printlog(pp, pi, pr)
1.3 pk 180: struct pool *pp;
1.21.2.2.2.1 thorpej 181: struct pool_item *pi;
182: void (*pr) __P((const char *, ...));
1.3 pk 183: {
184: int i = pp->pr_logsize;
185: int n = pp->pr_curlogentry;
186:
1.20 thorpej 187: if ((pp->pr_roflags & PR_LOGGING) == 0)
1.3 pk 188: return;
189:
190: /*
191: * Print all entries in this pool's log.
192: */
193: while (i-- > 0) {
194: struct pool_log *pl = &pp->pr_log[n];
195: if (pl->pl_action != 0) {
1.21.2.2.2.1 thorpej 196: if (pi == NULL || pi == pl->pl_addr) {
197: (*pr)("\tlog entry %d:\n", i);
198: (*pr)("\t\taction = %s, addr = %p\n",
199: pl->pl_action == PRLOG_GET ? "get" : "put",
200: pl->pl_addr);
201: (*pr)("\t\tfile: %s at line %lu\n",
202: pl->pl_file, pl->pl_line);
203: }
1.3 pk 204: }
205: if (++n >= pp->pr_logsize)
206: n = 0;
207: }
208: }
209:
1.21.2.2.2.1 thorpej 210: static __inline__ void
211: pr_enter(pp, file, line)
212: struct pool *pp;
213: const char *file;
214: long line;
215: {
216:
217: if (pp->pr_entered_file != NULL) {
218: printf("pool %s: reentrancy at file %s line %ld\n",
219: pp->pr_wchan, file, line);
220: printf(" previous entry at file %s line %ld\n",
221: pp->pr_entered_file, pp->pr_entered_line);
222: panic("pr_enter");
223: }
224:
225: pp->pr_entered_file = file;
226: pp->pr_entered_line = line;
227: }
228:
229: static __inline__ void
230: pr_leave(pp)
231: struct pool *pp;
232: {
233:
234: if (pp->pr_entered_file == NULL) {
235: printf("pool %s not entered?\n", pp->pr_wchan);
236: panic("pr_leave");
237: }
238:
239: pp->pr_entered_file = NULL;
240: pp->pr_entered_line = 0;
241: }
242:
243: static __inline__ void
244: pr_enter_check(pp, pr)
245: struct pool *pp;
246: void (*pr) __P((const char *, ...));
247: {
248:
249: if (pp->pr_entered_file != NULL)
250: (*pr)("\n\tcurrently entered from file %s line %ld\n",
251: pp->pr_entered_file, pp->pr_entered_line);
252: }
253: #else
254: #define pr_log(pp, v, action, file, line)
255: #define pr_printlog(pp, pi, pr)
256: #define pr_enter(pp, file, line)
257: #define pr_leave(pp)
258: #define pr_enter_check(pp, pr)
259: #endif /* DIAGNOSTIC */
1.3 pk 260:
261: /*
262: * Return the pool page header based on page address.
263: */
264: static __inline__ struct pool_item_header *
265: pr_find_pagehead(pp, page)
266: struct pool *pp;
267: caddr_t page;
268: {
269: struct pool_item_header *ph;
270:
1.20 thorpej 271: if ((pp->pr_roflags & PR_PHINPAGE) != 0)
1.3 pk 272: return ((struct pool_item_header *)(page + pp->pr_phoffset));
273:
274: for (ph = LIST_FIRST(&pp->pr_hashtab[PR_HASH_INDEX(pp, page)]);
275: ph != NULL;
276: ph = LIST_NEXT(ph, ph_hashlist)) {
277: if (ph->ph_page == page)
278: return (ph);
279: }
280: return (NULL);
281: }
282:
283: /*
284: * Remove a page from the pool.
285: */
286: static __inline__ void
287: pr_rmpage(pp, ph)
288: struct pool *pp;
289: struct pool_item_header *ph;
290: {
291:
292: /*
1.7 thorpej 293: * If the page was idle, decrement the idle page count.
1.3 pk 294: */
1.6 thorpej 295: if (ph->ph_nmissing == 0) {
296: #ifdef DIAGNOSTIC
297: if (pp->pr_nidle == 0)
298: panic("pr_rmpage: nidle inconsistent");
1.20 thorpej 299: if (pp->pr_nitems < pp->pr_itemsperpage)
300: panic("pr_rmpage: nitems inconsistent");
1.6 thorpej 301: #endif
302: pp->pr_nidle--;
303: }
1.7 thorpej 304:
1.20 thorpej 305: pp->pr_nitems -= pp->pr_itemsperpage;
306:
1.7 thorpej 307: /*
308: * Unlink a page from the pool and release it.
309: */
310: TAILQ_REMOVE(&pp->pr_pagelist, ph, ph_pagelist);
311: (*pp->pr_free)(ph->ph_page, pp->pr_pagesz, pp->pr_mtype);
312: pp->pr_npages--;
313: pp->pr_npagefree++;
1.6 thorpej 314:
1.21.2.1 chs 315: if ((pp->pr_roflags & PR_PHINPAGE) == 0) {
1.21.2.2.2.1 thorpej 316: int s;
1.21.2.1 chs 317: LIST_REMOVE(ph, ph_hashlist);
1.21.2.2.2.1 thorpej 318: s = splhigh();
1.21.2.1 chs 319: pool_put(&phpool, ph);
1.21.2.2.2.1 thorpej 320: splx(s);
1.21.2.1 chs 321: }
322:
1.3 pk 323: if (pp->pr_curpage == ph) {
324: /*
325: * Find a new non-empty page header, if any.
326: * Start search from the page head, to increase the
327: * chance for "high water" pages to be freed.
328: */
329: for (ph = TAILQ_FIRST(&pp->pr_pagelist); ph != NULL;
330: ph = TAILQ_NEXT(ph, ph_pagelist))
331: if (TAILQ_FIRST(&ph->ph_itemlist) != NULL)
332: break;
333:
334: pp->pr_curpage = ph;
1.21 thorpej 335: }
1.3 pk 336: }
337:
338: /*
339: * Allocate and initialize a pool.
340: */
1.1 pk 341: struct pool *
1.3 pk 342: pool_create(size, align, ioff, nitems, wchan, pagesz, alloc, release, mtype)
1.1 pk 343: size_t size;
1.3 pk 344: u_int align;
345: u_int ioff;
1.1 pk 346: int nitems;
1.21 thorpej 347: const char *wchan;
1.3 pk 348: size_t pagesz;
349: void *(*alloc) __P((unsigned long, int, int));
350: void (*release) __P((void *, unsigned long, int));
1.1 pk 351: int mtype;
352: {
353: struct pool *pp;
1.3 pk 354: int flags;
1.1 pk 355:
1.3 pk 356: pp = (struct pool *)malloc(sizeof(*pp), M_POOL, M_NOWAIT);
357: if (pp == NULL)
1.1 pk 358: return (NULL);
1.3 pk 359:
360: flags = PR_FREEHEADER;
361: pool_init(pp, size, align, ioff, flags, wchan, pagesz,
362: alloc, release, mtype);
363:
364: if (nitems != 0) {
365: if (pool_prime(pp, nitems, NULL) != 0) {
366: pool_destroy(pp);
367: return (NULL);
368: }
1.1 pk 369: }
370:
1.3 pk 371: return (pp);
372: }
373:
374: /*
375: * Initialize the given pool resource structure.
376: *
377: * We export this routine to allow other kernel parts to declare
378: * static pools that must be initialized before malloc() is available.
379: */
380: void
381: pool_init(pp, size, align, ioff, flags, wchan, pagesz, alloc, release, mtype)
382: struct pool *pp;
383: size_t size;
384: u_int align;
385: u_int ioff;
386: int flags;
1.21 thorpej 387: const char *wchan;
1.3 pk 388: size_t pagesz;
389: void *(*alloc) __P((unsigned long, int, int));
390: void (*release) __P((void *, unsigned long, int));
391: int mtype;
392: {
1.16 briggs 393: int off, slack, i;
1.3 pk 394:
1.21.2.2.2.1 thorpej 395: #ifdef POOL_DIAGNOSTIC
396: /*
397: * Always log if POOL_DIAGNOSTIC is defined.
398: */
399: if (pool_logsize != 0)
400: flags |= PR_LOGGING;
401: #endif
402:
1.3 pk 403: /*
404: * Check arguments and construct default values.
405: */
406: if (!powerof2(pagesz) || pagesz > PAGE_SIZE)
407: panic("pool_init: page size invalid (%lx)\n", (u_long)pagesz);
408:
1.4 thorpej 409: if (alloc == NULL && release == NULL) {
1.3 pk 410: alloc = pool_page_alloc;
411: release = pool_page_free;
1.4 thorpej 412: pagesz = PAGE_SIZE; /* Rounds to PAGE_SIZE anyhow. */
413: } else if ((alloc != NULL && release != NULL) == 0) {
414: /* If you specifiy one, must specify both. */
415: panic("pool_init: must specify alloc and release together");
416: }
417:
1.3 pk 418: if (pagesz == 0)
419: pagesz = PAGE_SIZE;
420:
421: if (align == 0)
422: align = ALIGN(1);
1.14 thorpej 423:
424: if (size < sizeof(struct pool_item))
425: size = sizeof(struct pool_item);
1.3 pk 426:
427: /*
428: * Initialize the pool structure.
429: */
430: TAILQ_INIT(&pp->pr_pagelist);
431: pp->pr_curpage = NULL;
432: pp->pr_npages = 0;
433: pp->pr_minitems = 0;
434: pp->pr_minpages = 0;
435: pp->pr_maxpages = UINT_MAX;
1.20 thorpej 436: pp->pr_roflags = flags;
437: pp->pr_flags = 0;
1.3 pk 438: pp->pr_size = ALIGN(size);
439: pp->pr_align = align;
440: pp->pr_wchan = wchan;
441: pp->pr_mtype = mtype;
442: pp->pr_alloc = alloc;
443: pp->pr_free = release;
444: pp->pr_pagesz = pagesz;
445: pp->pr_pagemask = ~(pagesz - 1);
446: pp->pr_pageshift = ffs(pagesz) - 1;
1.20 thorpej 447: pp->pr_nitems = 0;
448: pp->pr_nout = 0;
449: pp->pr_hardlimit = UINT_MAX;
450: pp->pr_hardlimit_warning = NULL;
451: pp->pr_hardlimit_ratecap = 0;
452: memset(&pp->pr_hardlimit_warning_last, 0,
453: sizeof(pp->pr_hardlimit_warning_last));
1.3 pk 454:
455: /*
456: * Decide whether to put the page header off page to avoid
457: * wasting too large a part of the page. Off-page page headers
458: * go on a hash table, so we can match a returned item
459: * with its header based on the page address.
460: * We use 1/16 of the page size as the threshold (XXX: tune)
461: */
462: if (pp->pr_size < pagesz/16) {
463: /* Use the end of the page for the page header */
1.20 thorpej 464: pp->pr_roflags |= PR_PHINPAGE;
1.3 pk 465: pp->pr_phoffset = off =
466: pagesz - ALIGN(sizeof(struct pool_item_header));
1.2 pk 467: } else {
1.3 pk 468: /* The page header will be taken from our page header pool */
469: pp->pr_phoffset = 0;
470: off = pagesz;
1.16 briggs 471: for (i = 0; i < PR_HASHTABSIZE; i++) {
472: LIST_INIT(&pp->pr_hashtab[i]);
473: }
1.2 pk 474: }
1.1 pk 475:
1.3 pk 476: /*
477: * Alignment is to take place at `ioff' within the item. This means
478: * we must reserve up to `align - 1' bytes on the page to allow
479: * appropriate positioning of each item.
480: *
481: * Silently enforce `0 <= ioff < align'.
482: */
483: pp->pr_itemoffset = ioff = ioff % align;
484: pp->pr_itemsperpage = (off - ((align - ioff) % align)) / pp->pr_size;
485:
486: /*
487: * Use the slack between the chunks and the page header
488: * for "cache coloring".
489: */
490: slack = off - pp->pr_itemsperpage * pp->pr_size;
491: pp->pr_maxcolor = (slack / align) * align;
492: pp->pr_curcolor = 0;
493:
494: pp->pr_nget = 0;
495: pp->pr_nfail = 0;
496: pp->pr_nput = 0;
497: pp->pr_npagealloc = 0;
498: pp->pr_npagefree = 0;
1.1 pk 499: pp->pr_hiwat = 0;
1.8 thorpej 500: pp->pr_nidle = 0;
1.3 pk 501:
1.21.2.2.2.1 thorpej 502: if (flags & PR_LOGGING) {
503: if (kmem_map == NULL ||
504: (pp->pr_log = malloc(pool_logsize * sizeof(struct pool_log),
505: M_TEMP, M_NOWAIT)) == NULL)
1.20 thorpej 506: pp->pr_roflags &= ~PR_LOGGING;
1.3 pk 507: pp->pr_curlogentry = 0;
508: pp->pr_logsize = pool_logsize;
509: }
1.21.2.2.2.1 thorpej 510:
511: pp->pr_entered_file = NULL;
512: pp->pr_entered_line = 0;
1.3 pk 513:
1.21 thorpej 514: simple_lock_init(&pp->pr_slock);
1.1 pk 515:
1.3 pk 516: /*
517: * Initialize private page header pool if we haven't done so yet.
1.21.2.2 thorpej 518: * XXX LOCKING.
1.3 pk 519: */
520: if (phpool.pr_size == 0) {
521: pool_init(&phpool, sizeof(struct pool_item_header), 0, 0,
522: 0, "phpool", 0, 0, 0, 0);
1.1 pk 523: }
524:
1.21.2.2 thorpej 525: /* Insert into the list of all pools. */
526: simple_lock(&pool_head_slock);
527: TAILQ_INSERT_TAIL(&pool_head, pp, pr_poollist);
528: simple_unlock(&pool_head_slock);
1.1 pk 529: }
530:
531: /*
532: * De-commision a pool resource.
533: */
534: void
535: pool_destroy(pp)
536: struct pool *pp;
537: {
1.3 pk 538: struct pool_item_header *ph;
539:
540: #ifdef DIAGNOSTIC
1.20 thorpej 541: if (pp->pr_nout != 0) {
1.21.2.2.2.1 thorpej 542: pr_printlog(pp, NULL, printf);
1.20 thorpej 543: panic("pool_destroy: pool busy: still out: %u\n",
544: pp->pr_nout);
1.3 pk 545: }
546: #endif
1.1 pk 547:
1.3 pk 548: /* Remove all pages */
1.20 thorpej 549: if ((pp->pr_roflags & PR_STATIC) == 0)
1.3 pk 550: while ((ph = pp->pr_pagelist.tqh_first) != NULL)
551: pr_rmpage(pp, ph);
552:
553: /* Remove from global pool list */
1.21.2.2 thorpej 554: simple_lock(&pool_head_slock);
1.3 pk 555: TAILQ_REMOVE(&pool_head, pp, pr_poollist);
1.21.2.2 thorpej 556: /* XXX Only clear this if we were drainpp? */
1.3 pk 557: drainpp = NULL;
1.21.2.2 thorpej 558: simple_unlock(&pool_head_slock);
1.3 pk 559:
1.20 thorpej 560: if ((pp->pr_roflags & PR_LOGGING) != 0)
1.3 pk 561: free(pp->pr_log, M_TEMP);
1.2 pk 562:
1.20 thorpej 563: if (pp->pr_roflags & PR_FREEHEADER)
1.3 pk 564: free(pp, M_POOL);
1.1 pk 565: }
566:
567:
568: /*
1.3 pk 569: * Grab an item from the pool; must be called at appropriate spl level
1.1 pk 570: */
1.3 pk 571: void *
572: _pool_get(pp, flags, file, line)
573: struct pool *pp;
574: int flags;
575: const char *file;
576: long line;
1.1 pk 577: {
578: void *v;
579: struct pool_item *pi;
1.3 pk 580: struct pool_item_header *ph;
1.1 pk 581:
1.2 pk 582: #ifdef DIAGNOSTIC
1.20 thorpej 583: if ((pp->pr_roflags & PR_STATIC) && (flags & PR_MALLOCOK)) {
1.21.2.2.2.1 thorpej 584: pr_printlog(pp, NULL, printf);
1.2 pk 585: panic("pool_get: static");
1.3 pk 586: }
1.2 pk 587: #endif
588:
1.3 pk 589: if (curproc == NULL && (flags & PR_WAITOK) != 0)
590: panic("pool_get: must have NOWAIT");
1.1 pk 591:
1.21 thorpej 592: simple_lock(&pp->pr_slock);
1.21.2.2.2.1 thorpej 593: pr_enter(pp, file, line);
1.20 thorpej 594:
595: startover:
596: /*
597: * Check to see if we've reached the hard limit. If we have,
598: * and we can wait, then wait until an item has been returned to
599: * the pool.
600: */
601: #ifdef DIAGNOSTIC
602: if (pp->pr_nout > pp->pr_hardlimit) {
1.21.2.2.2.1 thorpej 603: pr_leave(pp);
1.21 thorpej 604: simple_unlock(&pp->pr_slock);
1.20 thorpej 605: panic("pool_get: %s: crossed hard limit", pp->pr_wchan);
606: }
607: #endif
608: if (pp->pr_nout == pp->pr_hardlimit) {
609: if (flags & PR_WAITOK) {
610: /*
611: * XXX: A warning isn't logged in this case. Should
612: * it be?
613: */
614: pp->pr_flags |= PR_WANTED;
1.21.2.2.2.1 thorpej 615: pr_leave(pp);
1.21 thorpej 616: simple_unlock(&pp->pr_slock);
1.20 thorpej 617: tsleep((caddr_t)pp, PSWP, pp->pr_wchan, 0);
1.21 thorpej 618: simple_lock(&pp->pr_slock);
1.21.2.2.2.1 thorpej 619: pr_enter(pp, file, line);
1.20 thorpej 620: goto startover;
621: }
622: if (pp->pr_hardlimit_warning != NULL) {
623: /*
624: * Log a message that the hard limit has been hit.
625: */
626: struct timeval curtime, logdiff;
627: int s = splclock();
628: curtime = mono_time;
629: splx(s);
630: timersub(&curtime, &pp->pr_hardlimit_warning_last,
631: &logdiff);
632: if (logdiff.tv_sec >= pp->pr_hardlimit_ratecap) {
633: pp->pr_hardlimit_warning_last = curtime;
634: log(LOG_ERR, "%s\n", pp->pr_hardlimit_warning);
635: }
636: }
1.21 thorpej 637:
638: if (flags & PR_URGENT)
639: panic("pool_get: urgent");
640:
641: pp->pr_nfail++;
642:
1.21.2.2.2.1 thorpej 643: pr_leave(pp);
1.21 thorpej 644: simple_unlock(&pp->pr_slock);
1.20 thorpej 645: return (NULL);
646: }
647:
1.3 pk 648: /*
649: * The convention we use is that if `curpage' is not NULL, then
650: * it points at a non-empty bucket. In particular, `curpage'
651: * never points at a page header which has PR_PHINPAGE set and
652: * has no items in its bucket.
653: */
1.20 thorpej 654: if ((ph = pp->pr_curpage) == NULL) {
1.15 pk 655: void *v;
656:
1.20 thorpej 657: #ifdef DIAGNOSTIC
658: if (pp->pr_nitems != 0) {
1.21 thorpej 659: simple_unlock(&pp->pr_slock);
1.20 thorpej 660: printf("pool_get: %s: curpage NULL, nitems %u\n",
661: pp->pr_wchan, pp->pr_nitems);
662: panic("pool_get: nitems inconsistent\n");
663: }
664: #endif
665:
1.21 thorpej 666: /*
667: * Call the back-end page allocator for more memory.
668: * Release the pool lock, as the back-end page allocator
669: * may block.
670: */
1.21.2.2.2.1 thorpej 671: pr_leave(pp);
1.21 thorpej 672: simple_unlock(&pp->pr_slock);
673: v = (*pp->pr_alloc)(pp->pr_pagesz, flags, pp->pr_mtype);
674: simple_lock(&pp->pr_slock);
1.21.2.2.2.1 thorpej 675: pr_enter(pp, file, line);
1.15 pk 676:
1.21 thorpej 677: if (v == NULL) {
678: /*
679: * We were unable to allocate a page, but
680: * we released the lock during allocation,
681: * so perhaps items were freed back to the
682: * pool. Check for this case.
683: */
684: if (pp->pr_curpage != NULL)
685: goto startover;
1.15 pk 686:
1.3 pk 687: if (flags & PR_URGENT)
688: panic("pool_get: urgent");
1.21 thorpej 689:
1.3 pk 690: if ((flags & PR_WAITOK) == 0) {
691: pp->pr_nfail++;
1.21.2.2.2.1 thorpej 692: pr_leave(pp);
1.21 thorpej 693: simple_unlock(&pp->pr_slock);
1.1 pk 694: return (NULL);
1.3 pk 695: }
696:
1.15 pk 697: /*
698: * Wait for items to be returned to this pool.
1.21 thorpej 699: *
1.15 pk 700: * XXX: we actually want to wait just until
701: * the page allocator has memory again. Depending
702: * on this pool's usage, we might get stuck here
703: * for a long time.
1.20 thorpej 704: *
705: * XXX: maybe we should wake up once a second and
706: * try again?
1.15 pk 707: */
1.1 pk 708: pp->pr_flags |= PR_WANTED;
1.21.2.2.2.1 thorpej 709: pr_leave(pp);
1.21 thorpej 710: simple_unlock(&pp->pr_slock);
1.1 pk 711: tsleep((caddr_t)pp, PSWP, pp->pr_wchan, 0);
1.21 thorpej 712: simple_lock(&pp->pr_slock);
1.21.2.2.2.1 thorpej 713: pr_enter(pp, file, line);
1.20 thorpej 714: goto startover;
1.1 pk 715: }
1.3 pk 716:
1.15 pk 717: /* We have more memory; add it to the pool */
718: pp->pr_npagealloc++;
719: pool_prime_page(pp, v);
720:
1.20 thorpej 721: /* Start the allocation process over. */
722: goto startover;
1.3 pk 723: }
724:
1.21 thorpej 725: if ((v = pi = TAILQ_FIRST(&ph->ph_itemlist)) == NULL) {
1.21.2.2.2.1 thorpej 726: pr_leave(pp);
1.21 thorpej 727: simple_unlock(&pp->pr_slock);
1.3 pk 728: panic("pool_get: %s: page empty", pp->pr_wchan);
1.21 thorpej 729: }
1.20 thorpej 730: #ifdef DIAGNOSTIC
731: if (pp->pr_nitems == 0) {
1.21.2.2.2.1 thorpej 732: pr_leave(pp);
1.21 thorpej 733: simple_unlock(&pp->pr_slock);
1.20 thorpej 734: printf("pool_get: %s: items on itemlist, nitems %u\n",
735: pp->pr_wchan, pp->pr_nitems);
736: panic("pool_get: nitems inconsistent\n");
737: }
738: #endif
1.3 pk 739: pr_log(pp, v, PRLOG_GET, file, line);
740:
741: #ifdef DIAGNOSTIC
742: if (pi->pi_magic != PI_MAGIC) {
1.21.2.2.2.1 thorpej 743: pr_printlog(pp, pi, printf);
1.3 pk 744: panic("pool_get(%s): free list modified: magic=%x; page %p;"
745: " item addr %p\n",
746: pp->pr_wchan, pi->pi_magic, ph->ph_page, pi);
747: }
748: #endif
749:
750: /*
751: * Remove from item list.
752: */
753: TAILQ_REMOVE(&ph->ph_itemlist, pi, pi_list);
1.20 thorpej 754: pp->pr_nitems--;
755: pp->pr_nout++;
1.6 thorpej 756: if (ph->ph_nmissing == 0) {
757: #ifdef DIAGNOSTIC
758: if (pp->pr_nidle == 0)
759: panic("pool_get: nidle inconsistent");
760: #endif
761: pp->pr_nidle--;
762: }
1.3 pk 763: ph->ph_nmissing++;
764: if (TAILQ_FIRST(&ph->ph_itemlist) == NULL) {
1.21 thorpej 765: #ifdef DIAGNOSTIC
766: if (ph->ph_nmissing != pp->pr_itemsperpage) {
1.21.2.2.2.1 thorpej 767: pr_leave(pp);
1.21 thorpej 768: simple_unlock(&pp->pr_slock);
769: panic("pool_get: %s: nmissing inconsistent",
770: pp->pr_wchan);
771: }
772: #endif
1.3 pk 773: /*
774: * Find a new non-empty page header, if any.
775: * Start search from the page head, to increase
776: * the chance for "high water" pages to be freed.
777: *
1.21 thorpej 778: * Migrate empty pages to the end of the list. This
779: * will speed the update of curpage as pages become
780: * idle. Empty pages intermingled with idle pages
781: * is no big deal. As soon as a page becomes un-empty,
782: * it will move back to the head of the list.
1.3 pk 783: */
784: TAILQ_REMOVE(&pp->pr_pagelist, ph, ph_pagelist);
1.21 thorpej 785: TAILQ_INSERT_TAIL(&pp->pr_pagelist, ph, ph_pagelist);
786: for (ph = TAILQ_FIRST(&pp->pr_pagelist); ph != NULL;
787: ph = TAILQ_NEXT(ph, ph_pagelist))
1.3 pk 788: if (TAILQ_FIRST(&ph->ph_itemlist) != NULL)
789: break;
790:
791: pp->pr_curpage = ph;
1.1 pk 792: }
1.3 pk 793:
794: pp->pr_nget++;
1.20 thorpej 795:
796: /*
797: * If we have a low water mark and we are now below that low
798: * water mark, add more items to the pool.
799: */
800: if (pp->pr_nitems < pp->pr_minitems && pool_catchup(pp) != 0) {
801: /*
802: * XXX: Should we log a warning? Should we set up a timeout
803: * to try again in a second or so? The latter could break
804: * a caller's assumptions about interrupt protection, etc.
805: */
806: }
807:
1.21.2.2.2.1 thorpej 808: pr_leave(pp);
1.21 thorpej 809: simple_unlock(&pp->pr_slock);
1.1 pk 810: return (v);
811: }
812:
813: /*
1.3 pk 814: * Return resource to the pool; must be called at appropriate spl level
1.1 pk 815: */
1.3 pk 816: void
817: _pool_put(pp, v, file, line)
818: struct pool *pp;
819: void *v;
820: const char *file;
821: long line;
1.1 pk 822: {
823: struct pool_item *pi = v;
1.3 pk 824: struct pool_item_header *ph;
825: caddr_t page;
1.21 thorpej 826: int s;
1.3 pk 827:
828: page = (caddr_t)((u_long)v & pp->pr_pagemask);
1.1 pk 829:
1.21 thorpej 830: simple_lock(&pp->pr_slock);
1.21.2.2.2.1 thorpej 831: pr_enter(pp, file, line);
1.3 pk 832:
833: pr_log(pp, v, PRLOG_PUT, file, line);
834:
835: if ((ph = pr_find_pagehead(pp, page)) == NULL) {
1.21.2.2.2.1 thorpej 836: pr_printlog(pp, NULL, printf);
1.3 pk 837: panic("pool_put: %s: page header missing", pp->pr_wchan);
838: }
839:
840: /*
841: * Return to item list.
842: */
1.2 pk 843: #ifdef DIAGNOSTIC
1.3 pk 844: pi->pi_magic = PI_MAGIC;
1.21.2.2.2.2! chs 845: #endif
! 846: #ifdef DEBUG
! 847: {
! 848: int i, *ip = v;
! 849:
! 850: for (i = 0; i < pp->pr_size / 4; i++) {
! 851: *ip++ = PI_MAGIC;
! 852: }
! 853: }
1.3 pk 854: #endif
855: TAILQ_INSERT_HEAD(&ph->ph_itemlist, pi, pi_list);
856: ph->ph_nmissing--;
857: pp->pr_nput++;
1.20 thorpej 858: pp->pr_nitems++;
859: pp->pr_nout--;
1.3 pk 860:
861: /* Cancel "pool empty" condition if it exists */
862: if (pp->pr_curpage == NULL)
863: pp->pr_curpage = ph;
864:
865: if (pp->pr_flags & PR_WANTED) {
866: pp->pr_flags &= ~PR_WANTED;
1.15 pk 867: if (ph->ph_nmissing == 0)
868: pp->pr_nidle++;
1.21.2.2.2.1 thorpej 869: pr_leave(pp);
1.21 thorpej 870: simple_unlock(&pp->pr_slock);
1.3 pk 871: wakeup((caddr_t)pp);
872: return;
873: }
874:
875: /*
1.21 thorpej 876: * If this page is now complete, do one of two things:
877: *
878: * (1) If we have more pages than the page high water
879: * mark, free the page back to the system.
880: *
881: * (2) Move it to the end of the page list, so that
882: * we minimize our chances of fragmenting the
883: * pool. Idle pages migrate to the end (along with
884: * completely empty pages, so that we find un-empty
885: * pages more quickly when we update curpage) of the
886: * list so they can be more easily swept up by
887: * the pagedaemon when pages are scarce.
1.3 pk 888: */
889: if (ph->ph_nmissing == 0) {
1.6 thorpej 890: pp->pr_nidle++;
1.3 pk 891: if (pp->pr_npages > pp->pr_maxpages) {
892: pr_rmpage(pp, ph);
893: } else {
894: TAILQ_REMOVE(&pp->pr_pagelist, ph, ph_pagelist);
895: TAILQ_INSERT_TAIL(&pp->pr_pagelist, ph, ph_pagelist);
896:
1.21 thorpej 897: /*
898: * Update the timestamp on the page. A page must
899: * be idle for some period of time before it can
900: * be reclaimed by the pagedaemon. This minimizes
901: * ping-pong'ing for memory.
902: */
903: s = splclock();
904: ph->ph_time = mono_time;
905: splx(s);
906:
907: /*
908: * Update the current page pointer. Just look for
909: * the first page with any free items.
910: *
911: * XXX: Maybe we want an option to look for the
912: * page with the fewest available items, to minimize
913: * fragmentation?
914: */
1.3 pk 915: for (ph = TAILQ_FIRST(&pp->pr_pagelist); ph != NULL;
916: ph = TAILQ_NEXT(ph, ph_pagelist))
917: if (TAILQ_FIRST(&ph->ph_itemlist) != NULL)
918: break;
1.1 pk 919:
1.3 pk 920: pp->pr_curpage = ph;
1.1 pk 921: }
922: }
1.21 thorpej 923: /*
924: * If the page has just become un-empty, move it to the head of
925: * the list, and make it the current page. The next allocation
926: * will get the item from this page, instead of further fragmenting
927: * the pool.
928: */
929: else if (ph->ph_nmissing == (pp->pr_itemsperpage - 1)) {
930: TAILQ_REMOVE(&pp->pr_pagelist, ph, ph_pagelist);
931: TAILQ_INSERT_HEAD(&pp->pr_pagelist, ph, ph_pagelist);
932: pp->pr_curpage = ph;
933: }
934:
1.21.2.2.2.1 thorpej 935: pr_leave(pp);
1.21 thorpej 936: simple_unlock(&pp->pr_slock);
1.3 pk 937:
1.1 pk 938: }
939:
940: /*
1.3 pk 941: * Add N items to the pool.
1.1 pk 942: */
943: int
1.2 pk 944: pool_prime(pp, n, storage)
1.1 pk 945: struct pool *pp;
946: int n;
1.2 pk 947: caddr_t storage;
1.1 pk 948: {
1.3 pk 949: caddr_t cp;
950: int newnitems, newpages;
1.2 pk 951:
952: #ifdef DIAGNOSTIC
1.20 thorpej 953: if (storage && !(pp->pr_roflags & PR_STATIC))
1.2 pk 954: panic("pool_prime: static");
955: /* !storage && static caught below */
956: #endif
1.1 pk 957:
1.21 thorpej 958: simple_lock(&pp->pr_slock);
959:
1.3 pk 960: newnitems = pp->pr_minitems + n;
961: newpages =
1.18 thorpej 962: roundup(newnitems, pp->pr_itemsperpage) / pp->pr_itemsperpage
1.3 pk 963: - pp->pr_minpages;
964:
965: while (newpages-- > 0) {
1.20 thorpej 966: if (pp->pr_roflags & PR_STATIC) {
1.3 pk 967: cp = storage;
968: storage += pp->pr_pagesz;
969: } else {
1.21 thorpej 970: simple_unlock(&pp->pr_slock);
1.3 pk 971: cp = (*pp->pr_alloc)(pp->pr_pagesz, 0, pp->pr_mtype);
1.21 thorpej 972: simple_lock(&pp->pr_slock);
1.3 pk 973: }
1.2 pk 974:
1.3 pk 975: if (cp == NULL) {
1.21 thorpej 976: simple_unlock(&pp->pr_slock);
1.1 pk 977: return (ENOMEM);
978: }
979:
1.21.2.2.2.1 thorpej 980: pp->pr_npagealloc++;
1.3 pk 981: pool_prime_page(pp, cp);
982: pp->pr_minpages++;
1.1 pk 983: }
1.3 pk 984:
985: pp->pr_minitems = newnitems;
986:
987: if (pp->pr_minpages >= pp->pr_maxpages)
988: pp->pr_maxpages = pp->pr_minpages + 1; /* XXX */
989:
1.21 thorpej 990: simple_unlock(&pp->pr_slock);
1.1 pk 991: return (0);
992: }
1.3 pk 993:
994: /*
995: * Add a page worth of items to the pool.
1.21 thorpej 996: *
997: * Note, we must be called with the pool descriptor LOCKED.
1.3 pk 998: */
1.21 thorpej 999: static void
1.3 pk 1000: pool_prime_page(pp, storage)
1001: struct pool *pp;
1002: caddr_t storage;
1003: {
1004: struct pool_item *pi;
1005: struct pool_item_header *ph;
1006: caddr_t cp = storage;
1007: unsigned int align = pp->pr_align;
1008: unsigned int ioff = pp->pr_itemoffset;
1.21.2.2.2.1 thorpej 1009: int s, n;
1.3 pk 1010:
1.20 thorpej 1011: if ((pp->pr_roflags & PR_PHINPAGE) != 0) {
1.3 pk 1012: ph = (struct pool_item_header *)(cp + pp->pr_phoffset);
1013: } else {
1.21.2.2.2.1 thorpej 1014: s = splhigh();
1.3 pk 1015: ph = pool_get(&phpool, PR_URGENT);
1.21.2.2.2.1 thorpej 1016: splx(s);
1.3 pk 1017: LIST_INSERT_HEAD(&pp->pr_hashtab[PR_HASH_INDEX(pp, cp)],
1018: ph, ph_hashlist);
1019: }
1020:
1021: /*
1022: * Insert page header.
1023: */
1024: TAILQ_INSERT_HEAD(&pp->pr_pagelist, ph, ph_pagelist);
1025: TAILQ_INIT(&ph->ph_itemlist);
1026: ph->ph_page = storage;
1027: ph->ph_nmissing = 0;
1.21 thorpej 1028: memset(&ph->ph_time, 0, sizeof(ph->ph_time));
1.3 pk 1029:
1.6 thorpej 1030: pp->pr_nidle++;
1031:
1.3 pk 1032: /*
1033: * Color this page.
1034: */
1035: cp = (caddr_t)(cp + pp->pr_curcolor);
1036: if ((pp->pr_curcolor += align) > pp->pr_maxcolor)
1037: pp->pr_curcolor = 0;
1038:
1039: /*
1040: * Adjust storage to apply aligment to `pr_itemoffset' in each item.
1041: */
1042: if (ioff != 0)
1043: cp = (caddr_t)(cp + (align - ioff));
1044:
1045: /*
1046: * Insert remaining chunks on the bucket list.
1047: */
1048: n = pp->pr_itemsperpage;
1.20 thorpej 1049: pp->pr_nitems += n;
1.3 pk 1050:
1051: while (n--) {
1052: pi = (struct pool_item *)cp;
1053:
1054: /* Insert on page list */
1055: TAILQ_INSERT_TAIL(&ph->ph_itemlist, pi, pi_list);
1056: #ifdef DIAGNOSTIC
1057: pi->pi_magic = PI_MAGIC;
1058: #endif
1059: cp = (caddr_t)(cp + pp->pr_size);
1060: }
1061:
1062: /*
1063: * If the pool was depleted, point at the new page.
1064: */
1065: if (pp->pr_curpage == NULL)
1066: pp->pr_curpage = ph;
1067:
1068: if (++pp->pr_npages > pp->pr_hiwat)
1069: pp->pr_hiwat = pp->pr_npages;
1070: }
1071:
1.20 thorpej 1072: /*
1073: * Like pool_prime(), except this is used by pool_get() when nitems
1074: * drops below the low water mark. This is used to catch up nitmes
1075: * with the low water mark.
1076: *
1.21 thorpej 1077: * Note 1, we never wait for memory here, we let the caller decide what to do.
1.20 thorpej 1078: *
1079: * Note 2, this doesn't work with static pools.
1080: *
1081: * Note 3, we must be called with the pool already locked, and we return
1082: * with it locked.
1083: */
1084: static int
1085: pool_catchup(pp)
1086: struct pool *pp;
1087: {
1088: caddr_t cp;
1089: int error = 0;
1090:
1091: if (pp->pr_roflags & PR_STATIC) {
1092: /*
1093: * We dropped below the low water mark, and this is not a
1094: * good thing. Log a warning.
1.21 thorpej 1095: *
1096: * XXX: rate-limit this?
1.20 thorpej 1097: */
1098: printf("WARNING: static pool `%s' dropped below low water "
1099: "mark\n", pp->pr_wchan);
1100: return (0);
1101: }
1102:
1.21 thorpej 1103: while (pp->pr_nitems < pp->pr_minitems) {
1.20 thorpej 1104: /*
1.21 thorpej 1105: * Call the page back-end allocator for more memory.
1106: *
1107: * XXX: We never wait, so should we bother unlocking
1108: * the pool descriptor?
1.20 thorpej 1109: */
1.21 thorpej 1110: simple_unlock(&pp->pr_slock);
1.20 thorpej 1111: cp = (*pp->pr_alloc)(pp->pr_pagesz, 0, pp->pr_mtype);
1.21 thorpej 1112: simple_lock(&pp->pr_slock);
1.20 thorpej 1113: if (cp == NULL) {
1114: error = ENOMEM;
1115: break;
1116: }
1.21.2.2.2.1 thorpej 1117: pp->pr_npagealloc++;
1.20 thorpej 1118: pool_prime_page(pp, cp);
1119: }
1120:
1121: return (error);
1122: }
1123:
1.3 pk 1124: void
1125: pool_setlowat(pp, n)
1126: pool_handle_t pp;
1127: int n;
1128: {
1.20 thorpej 1129: int error;
1.15 pk 1130:
1.21 thorpej 1131: simple_lock(&pp->pr_slock);
1132:
1.3 pk 1133: pp->pr_minitems = n;
1.15 pk 1134: pp->pr_minpages = (n == 0)
1135: ? 0
1.18 thorpej 1136: : roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1.20 thorpej 1137:
1138: /* Make sure we're caught up with the newly-set low water mark. */
1.21 thorpej 1139: if ((error = pool_catchup(pp)) != 0) {
1.20 thorpej 1140: /*
1141: * XXX: Should we log a warning? Should we set up a timeout
1142: * to try again in a second or so? The latter could break
1143: * a caller's assumptions about interrupt protection, etc.
1144: */
1145: }
1.21 thorpej 1146:
1147: simple_unlock(&pp->pr_slock);
1.3 pk 1148: }
1149:
1150: void
1151: pool_sethiwat(pp, n)
1152: pool_handle_t pp;
1153: int n;
1154: {
1.15 pk 1155:
1.21 thorpej 1156: simple_lock(&pp->pr_slock);
1157:
1.15 pk 1158: pp->pr_maxpages = (n == 0)
1159: ? 0
1.18 thorpej 1160: : roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1.21 thorpej 1161:
1162: simple_unlock(&pp->pr_slock);
1.3 pk 1163: }
1164:
1.20 thorpej 1165: void
1166: pool_sethardlimit(pp, n, warnmess, ratecap)
1167: pool_handle_t pp;
1168: int n;
1169: const char *warnmess;
1170: int ratecap;
1171: {
1172:
1.21 thorpej 1173: simple_lock(&pp->pr_slock);
1.20 thorpej 1174:
1175: pp->pr_hardlimit = n;
1176: pp->pr_hardlimit_warning = warnmess;
1177: pp->pr_hardlimit_ratecap = ratecap;
1178: memset(&pp->pr_hardlimit_warning_last, 0,
1179: sizeof(pp->pr_hardlimit_warning_last));
1180:
1181: /*
1.21 thorpej 1182: * In-line version of pool_sethiwat(), because we don't want to
1183: * release the lock.
1.20 thorpej 1184: */
1185: pp->pr_maxpages = (n == 0)
1186: ? 0
1187: : roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1.21 thorpej 1188:
1189: simple_unlock(&pp->pr_slock);
1.20 thorpej 1190: }
1.3 pk 1191:
1192: /*
1193: * Default page allocator.
1194: */
1195: static void *
1196: pool_page_alloc(sz, flags, mtype)
1197: unsigned long sz;
1198: int flags;
1199: int mtype;
1200: {
1.11 thorpej 1201: boolean_t waitok = (flags & PR_WAITOK) ? TRUE : FALSE;
1.3 pk 1202:
1.11 thorpej 1203: return ((void *)uvm_km_alloc_poolpage(waitok));
1.3 pk 1204: }
1205:
1206: static void
1207: pool_page_free(v, sz, mtype)
1208: void *v;
1209: unsigned long sz;
1210: int mtype;
1211: {
1212:
1.10 eeh 1213: uvm_km_free_poolpage((vaddr_t)v);
1.3 pk 1214: }
1.12 thorpej 1215:
1216: /*
1217: * Alternate pool page allocator for pools that know they will
1218: * never be accessed in interrupt context.
1219: */
1220: void *
1221: pool_page_alloc_nointr(sz, flags, mtype)
1222: unsigned long sz;
1223: int flags;
1224: int mtype;
1225: {
1226: boolean_t waitok = (flags & PR_WAITOK) ? TRUE : FALSE;
1227:
1228: return ((void *)uvm_km_alloc_poolpage1(kernel_map, uvm.kernel_object,
1229: waitok));
1230: }
1231:
1232: void
1233: pool_page_free_nointr(v, sz, mtype)
1234: void *v;
1235: unsigned long sz;
1236: int mtype;
1237: {
1238:
1239: uvm_km_free_poolpage1(kernel_map, (vaddr_t)v);
1240: }
1241:
1.3 pk 1242:
1243: /*
1244: * Release all complete pages that have not been used recently.
1245: */
1246: void
1.21.2.2.2.1 thorpej 1247: _pool_reclaim(pp, file, line)
1.3 pk 1248: pool_handle_t pp;
1.21.2.2.2.1 thorpej 1249: const char *file;
1250: long line;
1.3 pk 1251: {
1252: struct pool_item_header *ph, *phnext;
1.21 thorpej 1253: struct timeval curtime;
1254: int s;
1.3 pk 1255:
1.20 thorpej 1256: if (pp->pr_roflags & PR_STATIC)
1.3 pk 1257: return;
1258:
1.21 thorpej 1259: if (simple_lock_try(&pp->pr_slock) == 0)
1.3 pk 1260: return;
1.21.2.2.2.1 thorpej 1261: pr_enter(pp, file, line);
1.3 pk 1262:
1.21 thorpej 1263: s = splclock();
1264: curtime = mono_time;
1265: splx(s);
1266:
1.3 pk 1267: for (ph = TAILQ_FIRST(&pp->pr_pagelist); ph != NULL; ph = phnext) {
1268: phnext = TAILQ_NEXT(ph, ph_pagelist);
1269:
1270: /* Check our minimum page claim */
1271: if (pp->pr_npages <= pp->pr_minpages)
1272: break;
1273:
1274: if (ph->ph_nmissing == 0) {
1275: struct timeval diff;
1276: timersub(&curtime, &ph->ph_time, &diff);
1277: if (diff.tv_sec < pool_inactive_time)
1278: continue;
1.21 thorpej 1279:
1280: /*
1281: * If freeing this page would put us below
1282: * the low water mark, stop now.
1283: */
1284: if ((pp->pr_nitems - pp->pr_itemsperpage) <
1285: pp->pr_minitems)
1286: break;
1287:
1.3 pk 1288: pr_rmpage(pp, ph);
1289: }
1290: }
1291:
1.21.2.2.2.1 thorpej 1292: pr_leave(pp);
1.21 thorpej 1293: simple_unlock(&pp->pr_slock);
1.3 pk 1294: }
1295:
1296:
1297: /*
1298: * Drain pools, one at a time.
1.21 thorpej 1299: *
1300: * Note, we must never be called from an interrupt context.
1.3 pk 1301: */
1302: void
1303: pool_drain(arg)
1304: void *arg;
1305: {
1306: struct pool *pp;
1.21.2.2 thorpej 1307: int s;
1.3 pk 1308:
1.21.2.2 thorpej 1309: s = splimp();
1310: simple_lock(&pool_head_slock);
1311:
1312: if (drainpp == NULL && (drainpp = TAILQ_FIRST(&pool_head)) == NULL)
1313: goto out;
1.3 pk 1314:
1315: pp = drainpp;
1316: drainpp = TAILQ_NEXT(pp, pr_poollist);
1317:
1318: pool_reclaim(pp);
1.21.2.2 thorpej 1319:
1320: out:
1321: simple_unlock(&pool_head_slock);
1.3 pk 1322: splx(s);
1323: }
1324:
1325:
1326: /*
1327: * Diagnostic helpers.
1328: */
1329: void
1.21.2.2.2.1 thorpej 1330: pool_print(pp, modif)
1.3 pk 1331: struct pool *pp;
1.21.2.2.2.1 thorpej 1332: const char *modif;
1.21 thorpej 1333: {
1334: int s;
1335:
1336: s = splimp();
1.21.2.2.2.1 thorpej 1337: if (simple_lock_try(&pp->pr_slock) == 0) {
1338: printf("pool %s is locked; try again later\n",
1339: pp->pr_wchan);
1340: splx(s);
1341: return;
1342: }
1343: pool_print1(pp, modif, printf);
1.21 thorpej 1344: simple_unlock(&pp->pr_slock);
1345: splx(s);
1346: }
1347:
1.21.2.2.2.1 thorpej 1348: void
1349: pool_printit(pp, modif, pr)
1350: struct pool *pp;
1351: const char *modif;
1352: void (*pr) __P((const char *, ...));
1353: {
1354: int didlock = 0;
1355:
1356: if (pp == NULL) {
1357: (*pr)("Must specify a pool to print.\n");
1358: return;
1359: }
1360:
1361: /*
1362: * Called from DDB; interrupts should be blocked, and all
1363: * other processors should be paused. We can skip locking
1364: * the pool in this case.
1365: *
1366: * We do a simple_lock_try() just to print the lock
1367: * status, however.
1368: */
1369:
1370: if (simple_lock_try(&pp->pr_slock) == 0)
1371: (*pr)("WARNING: pool %s is locked\n", pp->pr_wchan);
1372: else
1373: didlock = 1;
1374:
1375: pool_print1(pp, modif, pr);
1376:
1377: if (didlock)
1378: simple_unlock(&pp->pr_slock);
1379: }
1380:
1.21 thorpej 1381: static void
1.21.2.2.2.1 thorpej 1382: pool_print1(pp, modif, pr)
1.21 thorpej 1383: struct pool *pp;
1.21.2.2.2.1 thorpej 1384: const char *modif;
1385: void (*pr) __P((const char *, ...));
1.3 pk 1386: {
1.21.2.2.2.1 thorpej 1387: struct pool_item_header *ph;
1388: #ifdef DIAGNOSTIC
1389: struct pool_item *pi;
1390: #endif
1391: int print_log = 0, print_pagelist = 0;
1392: char c;
1393:
1394: while ((c = *modif++) != '\0') {
1395: if (c == 'l')
1396: print_log = 1;
1397: if (c == 'p')
1398: print_pagelist = 1;
1399: modif++;
1400: }
1401:
1402: (*pr)("POOL %s: size %u, align %u, ioff %u, roflags 0x%08x\n",
1403: pp->pr_wchan, pp->pr_size, pp->pr_align, pp->pr_itemoffset,
1404: pp->pr_roflags);
1405: (*pr)("\tpagesz %u, mtype %d\n", pp->pr_pagesz, pp->pr_mtype);
1406: (*pr)("\talloc %p, release %p\n", pp->pr_alloc, pp->pr_free);
1407: (*pr)("\tminitems %u, minpages %u, maxpages %u, npages %u\n",
1408: pp->pr_minitems, pp->pr_minpages, pp->pr_maxpages, pp->pr_npages);
1409: (*pr)("\titemsperpage %u, nitems %u, nout %u, hardlimit %u\n",
1410: pp->pr_itemsperpage, pp->pr_nitems, pp->pr_nout, pp->pr_hardlimit);
1411:
1412: (*pr)("\n\tnget %lu, nfail %lu, nput %lu\n",
1413: pp->pr_nget, pp->pr_nfail, pp->pr_nput);
1414: (*pr)("\tnpagealloc %lu, npagefree %lu, hiwat %u, nidle %lu\n",
1415: pp->pr_npagealloc, pp->pr_npagefree, pp->pr_hiwat, pp->pr_nidle);
1416:
1417: if (print_pagelist == 0)
1418: goto skip_pagelist;
1419:
1420: if ((ph = TAILQ_FIRST(&pp->pr_pagelist)) != NULL)
1421: (*pr)("\n\tpage list:\n");
1422: for (; ph != NULL; ph = TAILQ_NEXT(ph, ph_pagelist)) {
1423: (*pr)("\t\tpage %p, nmissing %d, time %lu,%lu\n",
1424: ph->ph_page, ph->ph_nmissing,
1425: (u_long)ph->ph_time.tv_sec,
1426: (u_long)ph->ph_time.tv_usec);
1427: #ifdef DIAGNOSTIC
1428: for (pi = TAILQ_FIRST(&ph->ph_itemlist); pi != NULL;
1429: pi = TAILQ_NEXT(pi, pi_list)) {
1430: if (pi->pi_magic != PI_MAGIC) {
1431: (*pr)("\t\t\titem %p, magic 0x%x\n",
1432: pi, pi->pi_magic);
1433: }
1434: }
1435: #endif
1436: }
1437: if (pp->pr_curpage == NULL)
1438: (*pr)("\tno current page\n");
1439: else
1440: (*pr)("\tcurpage %p\n", pp->pr_curpage->ph_page);
1441:
1442: skip_pagelist:
1443:
1444: if (print_log == 0)
1445: goto skip_log;
1446:
1447: (*pr)("\n");
1448: if ((pp->pr_roflags & PR_LOGGING) == 0)
1449: (*pr)("\tno log\n");
1450: else
1451: pr_printlog(pp, NULL, pr);
1.3 pk 1452:
1.21.2.2.2.1 thorpej 1453: skip_log:
1.3 pk 1454:
1.21.2.2.2.1 thorpej 1455: pr_enter_check(pp, pr);
1.3 pk 1456: }
1457:
1458: int
1459: pool_chk(pp, label)
1460: struct pool *pp;
1461: char *label;
1462: {
1463: struct pool_item_header *ph;
1464: int r = 0;
1465:
1.21 thorpej 1466: simple_lock(&pp->pr_slock);
1.3 pk 1467:
1468: for (ph = TAILQ_FIRST(&pp->pr_pagelist); ph != NULL;
1469: ph = TAILQ_NEXT(ph, ph_pagelist)) {
1470:
1471: struct pool_item *pi;
1472: int n;
1473: caddr_t page;
1474:
1475: page = (caddr_t)((u_long)ph & pp->pr_pagemask);
1.20 thorpej 1476: if (page != ph->ph_page &&
1477: (pp->pr_roflags & PR_PHINPAGE) != 0) {
1.3 pk 1478: if (label != NULL)
1479: printf("%s: ", label);
1.16 briggs 1480: printf("pool(%p:%s): page inconsistency: page %p;"
1481: " at page head addr %p (p %p)\n", pp,
1.3 pk 1482: pp->pr_wchan, ph->ph_page,
1483: ph, page);
1484: r++;
1485: goto out;
1486: }
1487:
1488: for (pi = TAILQ_FIRST(&ph->ph_itemlist), n = 0;
1489: pi != NULL;
1490: pi = TAILQ_NEXT(pi,pi_list), n++) {
1491:
1492: #ifdef DIAGNOSTIC
1493: if (pi->pi_magic != PI_MAGIC) {
1494: if (label != NULL)
1495: printf("%s: ", label);
1496: printf("pool(%s): free list modified: magic=%x;"
1497: " page %p; item ordinal %d;"
1498: " addr %p (p %p)\n",
1499: pp->pr_wchan, pi->pi_magic, ph->ph_page,
1500: n, pi, page);
1501: panic("pool");
1502: }
1503: #endif
1504: page = (caddr_t)((u_long)pi & pp->pr_pagemask);
1505: if (page == ph->ph_page)
1506: continue;
1507:
1508: if (label != NULL)
1509: printf("%s: ", label);
1.16 briggs 1510: printf("pool(%p:%s): page inconsistency: page %p;"
1511: " item ordinal %d; addr %p (p %p)\n", pp,
1.3 pk 1512: pp->pr_wchan, ph->ph_page,
1513: n, pi, page);
1514: r++;
1515: goto out;
1516: }
1517: }
1518: out:
1.21 thorpej 1519: simple_unlock(&pp->pr_slock);
1.3 pk 1520: return (r);
1521: }
CVSweb <webmaster@jp.NetBSD.org>