version 1.198.2.3, 2014/08/20 00:04:29 |
version 1.198.2.4, 2017/12/03 11:38:45 |
|
|
/* $NetBSD$ */ |
/* $NetBSD$ */ |
|
|
/*- |
/*- |
* Copyright (c) 1997, 1999, 2000, 2002, 2007, 2008, 2010, 2014 |
* Copyright (c) 1997, 1999, 2000, 2002, 2007, 2008, 2010, 2014, 2015 |
* The NetBSD Foundation, Inc. |
* The NetBSD Foundation, Inc. |
* All rights reserved. |
* All rights reserved. |
* |
* |
* This code is derived from software contributed to The NetBSD Foundation |
* This code is derived from software contributed to The NetBSD Foundation |
* by Paul Kranenburg; by Jason R. Thorpe of the Numerical Aerospace |
* by Paul Kranenburg; by Jason R. Thorpe of the Numerical Aerospace |
* Simulation Facility, NASA Ames Research Center, and by Andrew Doran. |
* Simulation Facility, NASA Ames Research Center; by Andrew Doran, and by |
|
* Maxime Villard. |
* |
* |
* Redistribution and use in source and binary forms, with or without |
* Redistribution and use in source and binary forms, with or without |
* modification, are permitted provided that the following conditions |
* modification, are permitted provided that the following conditions |
|
|
#include <sys/cdefs.h> |
#include <sys/cdefs.h> |
__KERNEL_RCSID(0, "$NetBSD$"); |
__KERNEL_RCSID(0, "$NetBSD$"); |
|
|
|
#ifdef _KERNEL_OPT |
#include "opt_ddb.h" |
#include "opt_ddb.h" |
#include "opt_lockdebug.h" |
#include "opt_lockdebug.h" |
|
#endif |
|
|
#include <sys/param.h> |
#include <sys/param.h> |
#include <sys/systm.h> |
#include <sys/systm.h> |
Line 82 static struct pool phpool[PHPOOL_MAX]; |
|
Line 85 static struct pool phpool[PHPOOL_MAX]; |
|
static struct pool psppool; |
static struct pool psppool; |
#endif |
#endif |
|
|
|
#ifdef POOL_REDZONE |
|
# define POOL_REDZONE_SIZE 2 |
|
static void pool_redzone_init(struct pool *, size_t); |
|
static void pool_redzone_fill(struct pool *, void *); |
|
static void pool_redzone_check(struct pool *, void *); |
|
#else |
|
# define pool_redzone_init(pp, sz) /* NOTHING */ |
|
# define pool_redzone_fill(pp, ptr) /* NOTHING */ |
|
# define pool_redzone_check(pp, ptr) /* NOTHING */ |
|
#endif |
|
|
static void *pool_page_alloc_meta(struct pool *, int); |
static void *pool_page_alloc_meta(struct pool *, int); |
static void pool_page_free_meta(struct pool *, void *); |
static void pool_page_free_meta(struct pool *, void *); |
|
|
Line 92 struct pool_allocator pool_allocator_met |
|
Line 106 struct pool_allocator pool_allocator_met |
|
.pa_pagesz = 0 |
.pa_pagesz = 0 |
}; |
}; |
|
|
|
#define POOL_ALLOCATOR_BIG_BASE 13 |
|
extern struct pool_allocator pool_allocator_big[]; |
|
static int pool_bigidx(size_t); |
|
|
/* # of seconds to retain page after last use */ |
/* # of seconds to retain page after last use */ |
int pool_inactive_time = 10; |
int pool_inactive_time = 10; |
|
|
Line 368 pr_rmpage(struct pool *pp, struct pool_i |
|
Line 386 pr_rmpage(struct pool *pp, struct pool_i |
|
* If the page was idle, decrement the idle page count. |
* If the page was idle, decrement the idle page count. |
*/ |
*/ |
if (ph->ph_nmissing == 0) { |
if (ph->ph_nmissing == 0) { |
#ifdef DIAGNOSTIC |
KASSERT(pp->pr_nidle != 0); |
if (pp->pr_nidle == 0) |
KASSERTMSG((pp->pr_nitems >= pp->pr_itemsperpage), |
panic("pr_rmpage: nidle inconsistent"); |
"nitems=%u < itemsperpage=%u", |
if (pp->pr_nitems < pp->pr_itemsperpage) |
pp->pr_nitems, pp->pr_itemsperpage); |
panic("pr_rmpage: nitems inconsistent"); |
|
#endif |
|
pp->pr_nidle--; |
pp->pr_nidle--; |
} |
} |
|
|
Line 459 pool_init(struct pool *pp, size_t size, |
|
Line 475 pool_init(struct pool *pp, size_t size, |
|
const char *wchan, struct pool_allocator *palloc, int ipl) |
const char *wchan, struct pool_allocator *palloc, int ipl) |
{ |
{ |
struct pool *pp1; |
struct pool *pp1; |
size_t trysize, phsize; |
size_t trysize, phsize, prsize; |
int off, slack; |
int off, slack; |
|
|
#ifdef DEBUG |
#ifdef DEBUG |
Line 471 pool_init(struct pool *pp, size_t size, |
|
Line 487 pool_init(struct pool *pp, size_t size, |
|
*/ |
*/ |
TAILQ_FOREACH(pp1, &pool_head, pr_poollist) { |
TAILQ_FOREACH(pp1, &pool_head, pr_poollist) { |
if (pp == pp1) |
if (pp == pp1) |
panic("pool_init: pool %s already initialised", |
panic("%s: [%s] already initialised", __func__, |
wchan); |
wchan); |
} |
} |
if (__predict_true(!cold)) |
if (__predict_true(!cold)) |
Line 506 pool_init(struct pool *pp, size_t size, |
|
Line 522 pool_init(struct pool *pp, size_t size, |
|
if (align == 0) |
if (align == 0) |
align = ALIGN(1); |
align = ALIGN(1); |
|
|
if ((flags & PR_NOTOUCH) == 0 && size < sizeof(struct pool_item)) |
prsize = size; |
size = sizeof(struct pool_item); |
if ((flags & PR_NOTOUCH) == 0 && prsize < sizeof(struct pool_item)) |
|
prsize = sizeof(struct pool_item); |
size = roundup(size, align); |
|
#ifdef DIAGNOSTIC |
prsize = roundup(prsize, align); |
if (size > palloc->pa_pagesz) |
KASSERTMSG((prsize <= palloc->pa_pagesz), |
panic("pool_init: pool item size (%zu) too large", size); |
"%s: [%s] pool item size (%zu) larger than page size (%u)", |
#endif |
__func__, wchan, prsize, palloc->pa_pagesz); |
|
|
/* |
/* |
* Initialize the pool structure. |
* Initialize the pool structure. |
Line 529 pool_init(struct pool *pp, size_t size, |
|
Line 545 pool_init(struct pool *pp, size_t size, |
|
pp->pr_maxpages = UINT_MAX; |
pp->pr_maxpages = UINT_MAX; |
pp->pr_roflags = flags; |
pp->pr_roflags = flags; |
pp->pr_flags = 0; |
pp->pr_flags = 0; |
pp->pr_size = size; |
pp->pr_size = prsize; |
pp->pr_align = align; |
pp->pr_align = align; |
pp->pr_wchan = wchan; |
pp->pr_wchan = wchan; |
pp->pr_alloc = palloc; |
pp->pr_alloc = palloc; |
Line 544 pool_init(struct pool *pp, size_t size, |
|
Line 560 pool_init(struct pool *pp, size_t size, |
|
pp->pr_drain_hook = NULL; |
pp->pr_drain_hook = NULL; |
pp->pr_drain_hook_arg = NULL; |
pp->pr_drain_hook_arg = NULL; |
pp->pr_freecheck = NULL; |
pp->pr_freecheck = NULL; |
|
pool_redzone_init(pp, size); |
|
|
/* |
/* |
* Decide whether to put the page header off page to avoid |
* Decide whether to put the page header off page to avoid |
Line 595 pool_init(struct pool *pp, size_t size, |
|
Line 612 pool_init(struct pool *pp, size_t size, |
|
* if you see this panic, consider to tweak |
* if you see this panic, consider to tweak |
* PHPOOL_MAX and PHPOOL_FREELIST_NELEM. |
* PHPOOL_MAX and PHPOOL_FREELIST_NELEM. |
*/ |
*/ |
panic("%s: too large itemsperpage(%d) for PR_NOTOUCH", |
panic("%s: [%s] too large itemsperpage(%d) for " |
|
"PR_NOTOUCH", __func__, |
pp->pr_wchan, pp->pr_itemsperpage); |
pp->pr_wchan, pp->pr_itemsperpage); |
} |
} |
pp->pr_phpool = &phpool[idx]; |
pp->pr_phpool = &phpool[idx]; |
Line 682 pool_destroy(struct pool *pp) |
|
Line 700 pool_destroy(struct pool *pp) |
|
mutex_enter(&pp->pr_lock); |
mutex_enter(&pp->pr_lock); |
|
|
KASSERT(pp->pr_cache == NULL); |
KASSERT(pp->pr_cache == NULL); |
|
KASSERTMSG((pp->pr_nout == 0), |
#ifdef DIAGNOSTIC |
"%s: pool busy: still out: %u", __func__, pp->pr_nout); |
if (pp->pr_nout != 0) { |
|
panic("pool_destroy: pool busy: still out: %u", |
|
pp->pr_nout); |
|
} |
|
#endif |
|
|
|
KASSERT(LIST_EMPTY(&pp->pr_fullpages)); |
KASSERT(LIST_EMPTY(&pp->pr_fullpages)); |
KASSERT(LIST_EMPTY(&pp->pr_partpages)); |
KASSERT(LIST_EMPTY(&pp->pr_partpages)); |
|
|
Line 710 pool_set_drain_hook(struct pool *pp, voi |
|
Line 722 pool_set_drain_hook(struct pool *pp, voi |
|
{ |
{ |
|
|
/* XXX no locking -- must be used just after pool_init() */ |
/* XXX no locking -- must be used just after pool_init() */ |
#ifdef DIAGNOSTIC |
KASSERTMSG((pp->pr_drain_hook == NULL), |
if (pp->pr_drain_hook != NULL) |
"%s: [%s] already set", __func__, pp->pr_wchan); |
panic("pool_set_drain_hook(%s): already set", pp->pr_wchan); |
|
#endif |
|
pp->pr_drain_hook = fn; |
pp->pr_drain_hook = fn; |
pp->pr_drain_hook_arg = arg; |
pp->pr_drain_hook_arg = arg; |
} |
} |
Line 724 pool_alloc_item_header(struct pool *pp, |
|
Line 734 pool_alloc_item_header(struct pool *pp, |
|
struct pool_item_header *ph; |
struct pool_item_header *ph; |
|
|
if ((pp->pr_roflags & PR_PHINPAGE) != 0) |
if ((pp->pr_roflags & PR_PHINPAGE) != 0) |
ph = (struct pool_item_header *) ((char *)storage + pp->pr_phoffset); |
ph = (void *)((char *)storage + pp->pr_phoffset); |
else |
else |
ph = pool_get(pp->pr_phpool, flags); |
ph = pool_get(pp->pr_phpool, flags); |
|
|
Line 741 pool_get(struct pool *pp, int flags) |
|
Line 751 pool_get(struct pool *pp, int flags) |
|
struct pool_item_header *ph; |
struct pool_item_header *ph; |
void *v; |
void *v; |
|
|
#ifdef DIAGNOSTIC |
KASSERT(!(flags & PR_NOWAIT) != !(flags & PR_WAITOK)); |
if (pp->pr_itemsperpage == 0) |
KASSERTMSG((pp->pr_itemsperpage != 0), |
panic("pool_get: pool '%s': pr_itemsperpage is zero, " |
"%s: [%s] pr_itemsperpage is zero, " |
"pool not initialized?", pp->pr_wchan); |
"pool not initialized?", __func__, pp->pr_wchan); |
if ((cpu_intr_p() || cpu_softintr_p()) && pp->pr_ipl == IPL_NONE && |
KASSERTMSG((!(cpu_intr_p() || cpu_softintr_p()) |
!cold && panicstr == NULL) |
|| pp->pr_ipl != IPL_NONE || cold || panicstr != NULL), |
panic("pool '%s' is IPL_NONE, but called from " |
"%s: [%s] is IPL_NONE, but called from interrupt context", |
"interrupt context\n", pp->pr_wchan); |
__func__, pp->pr_wchan); |
#endif |
|
if (flags & PR_WAITOK) { |
if (flags & PR_WAITOK) { |
ASSERT_SLEEPABLE(); |
ASSERT_SLEEPABLE(); |
} |
} |
Line 761 pool_get(struct pool *pp, int flags) |
|
Line 770 pool_get(struct pool *pp, int flags) |
|
* and we can wait, then wait until an item has been returned to |
* and we can wait, then wait until an item has been returned to |
* the pool. |
* the pool. |
*/ |
*/ |
#ifdef DIAGNOSTIC |
KASSERTMSG((pp->pr_nout <= pp->pr_hardlimit), |
if (__predict_false(pp->pr_nout > pp->pr_hardlimit)) { |
"%s: %s: crossed hard limit", __func__, pp->pr_wchan); |
mutex_exit(&pp->pr_lock); |
|
panic("pool_get: %s: crossed hard limit", pp->pr_wchan); |
|
} |
|
#endif |
|
if (__predict_false(pp->pr_nout == pp->pr_hardlimit)) { |
if (__predict_false(pp->pr_nout == pp->pr_hardlimit)) { |
if (pp->pr_drain_hook != NULL) { |
if (pp->pr_drain_hook != NULL) { |
/* |
/* |
Line 787 pool_get(struct pool *pp, int flags) |
|
Line 792 pool_get(struct pool *pp, int flags) |
|
* it be? |
* it be? |
*/ |
*/ |
pp->pr_flags |= PR_WANTED; |
pp->pr_flags |= PR_WANTED; |
cv_wait(&pp->pr_cv, &pp->pr_lock); |
do { |
|
cv_wait(&pp->pr_cv, &pp->pr_lock); |
|
} while (pp->pr_flags & PR_WANTED); |
goto startover; |
goto startover; |
} |
} |
|
|
Line 802 pool_get(struct pool *pp, int flags) |
|
Line 809 pool_get(struct pool *pp, int flags) |
|
pp->pr_nfail++; |
pp->pr_nfail++; |
|
|
mutex_exit(&pp->pr_lock); |
mutex_exit(&pp->pr_lock); |
|
KASSERT((flags & (PR_NOWAIT|PR_LIMITFAIL)) != 0); |
return (NULL); |
return (NULL); |
} |
} |
|
|
Line 814 pool_get(struct pool *pp, int flags) |
|
Line 822 pool_get(struct pool *pp, int flags) |
|
if ((ph = pp->pr_curpage) == NULL) { |
if ((ph = pp->pr_curpage) == NULL) { |
int error; |
int error; |
|
|
#ifdef DIAGNOSTIC |
KASSERTMSG((pp->pr_nitems == 0), |
if (pp->pr_nitems != 0) { |
"%s: [%s] curpage NULL, inconsistent nitems %u", |
mutex_exit(&pp->pr_lock); |
__func__, pp->pr_wchan, pp->pr_nitems); |
printf("pool_get: %s: curpage NULL, nitems %u\n", |
|
pp->pr_wchan, pp->pr_nitems); |
|
panic("pool_get: nitems inconsistent"); |
|
} |
|
#endif |
|
|
|
/* |
/* |
* Call the back-end page allocator for more memory. |
* Call the back-end page allocator for more memory. |
Line 831 pool_get(struct pool *pp, int flags) |
|
Line 834 pool_get(struct pool *pp, int flags) |
|
error = pool_grow(pp, flags); |
error = pool_grow(pp, flags); |
if (error != 0) { |
if (error != 0) { |
/* |
/* |
|
* pool_grow aborts when another thread |
|
* is allocating a new page. Retry if it |
|
* waited for it. |
|
*/ |
|
if (error == ERESTART) |
|
goto startover; |
|
|
|
/* |
* We were unable to allocate a page or item |
* We were unable to allocate a page or item |
* header, but we released the lock during |
* header, but we released the lock during |
* allocation, so perhaps items were freed |
* allocation, so perhaps items were freed |
Line 841 pool_get(struct pool *pp, int flags) |
|
Line 852 pool_get(struct pool *pp, int flags) |
|
|
|
pp->pr_nfail++; |
pp->pr_nfail++; |
mutex_exit(&pp->pr_lock); |
mutex_exit(&pp->pr_lock); |
|
KASSERT((flags & (PR_WAITOK|PR_NOWAIT)) == PR_NOWAIT); |
return (NULL); |
return (NULL); |
} |
} |
|
|
Line 848 pool_get(struct pool *pp, int flags) |
|
Line 860 pool_get(struct pool *pp, int flags) |
|
goto startover; |
goto startover; |
} |
} |
if (pp->pr_roflags & PR_NOTOUCH) { |
if (pp->pr_roflags & PR_NOTOUCH) { |
#ifdef DIAGNOSTIC |
KASSERTMSG((ph->ph_nmissing < pp->pr_itemsperpage), |
if (__predict_false(ph->ph_nmissing == pp->pr_itemsperpage)) { |
"%s: %s: page empty", __func__, pp->pr_wchan); |
mutex_exit(&pp->pr_lock); |
|
panic("pool_get: %s: page empty", pp->pr_wchan); |
|
} |
|
#endif |
|
v = pr_item_notouch_get(pp, ph); |
v = pr_item_notouch_get(pp, ph); |
} else { |
} else { |
v = pi = LIST_FIRST(&ph->ph_itemlist); |
v = pi = LIST_FIRST(&ph->ph_itemlist); |
if (__predict_false(v == NULL)) { |
if (__predict_false(v == NULL)) { |
mutex_exit(&pp->pr_lock); |
mutex_exit(&pp->pr_lock); |
panic("pool_get: %s: page empty", pp->pr_wchan); |
panic("%s: [%s] page empty", __func__, pp->pr_wchan); |
} |
} |
#ifdef DIAGNOSTIC |
KASSERTMSG((pp->pr_nitems > 0), |
if (__predict_false(pp->pr_nitems == 0)) { |
"%s: [%s] nitems %u inconsistent on itemlist", |
mutex_exit(&pp->pr_lock); |
__func__, pp->pr_wchan, pp->pr_nitems); |
printf("pool_get: %s: items on itemlist, nitems %u\n", |
KASSERTMSG((pi->pi_magic == PI_MAGIC), |
pp->pr_wchan, pp->pr_nitems); |
"%s: [%s] free list modified: " |
panic("pool_get: nitems inconsistent"); |
"magic=%x; page %p; item addr %p", __func__, |
} |
pp->pr_wchan, pi->pi_magic, ph->ph_page, pi); |
#endif |
|
|
|
#ifdef DIAGNOSTIC |
|
if (__predict_false(pi->pi_magic != PI_MAGIC)) { |
|
panic("pool_get(%s): free list modified: " |
|
"magic=%x; page %p; item addr %p\n", |
|
pp->pr_wchan, pi->pi_magic, ph->ph_page, pi); |
|
} |
|
#endif |
|
|
|
/* |
/* |
* Remove from item list. |
* Remove from item list. |
Line 886 pool_get(struct pool *pp, int flags) |
|
Line 885 pool_get(struct pool *pp, int flags) |
|
pp->pr_nitems--; |
pp->pr_nitems--; |
pp->pr_nout++; |
pp->pr_nout++; |
if (ph->ph_nmissing == 0) { |
if (ph->ph_nmissing == 0) { |
#ifdef DIAGNOSTIC |
KASSERT(pp->pr_nidle > 0); |
if (__predict_false(pp->pr_nidle == 0)) |
|
panic("pool_get: nidle inconsistent"); |
|
#endif |
|
pp->pr_nidle--; |
pp->pr_nidle--; |
|
|
/* |
/* |
Line 901 pool_get(struct pool *pp, int flags) |
|
Line 897 pool_get(struct pool *pp, int flags) |
|
} |
} |
ph->ph_nmissing++; |
ph->ph_nmissing++; |
if (ph->ph_nmissing == pp->pr_itemsperpage) { |
if (ph->ph_nmissing == pp->pr_itemsperpage) { |
#ifdef DIAGNOSTIC |
KASSERTMSG(((pp->pr_roflags & PR_NOTOUCH) || |
if (__predict_false((pp->pr_roflags & PR_NOTOUCH) == 0 && |
LIST_EMPTY(&ph->ph_itemlist)), |
!LIST_EMPTY(&ph->ph_itemlist))) { |
"%s: [%s] nmissing (%u) inconsistent", __func__, |
mutex_exit(&pp->pr_lock); |
pp->pr_wchan, ph->ph_nmissing); |
panic("pool_get: %s: nmissing inconsistent", |
|
pp->pr_wchan); |
|
} |
|
#endif |
|
/* |
/* |
* This page is now full. Move it to the full list |
* This page is now full. Move it to the full list |
* and select a new current page. |
* and select a new current page. |
Line 935 pool_get(struct pool *pp, int flags) |
|
Line 927 pool_get(struct pool *pp, int flags) |
|
mutex_exit(&pp->pr_lock); |
mutex_exit(&pp->pr_lock); |
KASSERT((((vaddr_t)v + pp->pr_itemoffset) & (pp->pr_align - 1)) == 0); |
KASSERT((((vaddr_t)v + pp->pr_itemoffset) & (pp->pr_align - 1)) == 0); |
FREECHECK_OUT(&pp->pr_freecheck, v); |
FREECHECK_OUT(&pp->pr_freecheck, v); |
|
pool_redzone_fill(pp, v); |
return (v); |
return (v); |
} |
} |
|
|
Line 948 pool_do_put(struct pool *pp, void *v, st |
|
Line 941 pool_do_put(struct pool *pp, void *v, st |
|
struct pool_item_header *ph; |
struct pool_item_header *ph; |
|
|
KASSERT(mutex_owned(&pp->pr_lock)); |
KASSERT(mutex_owned(&pp->pr_lock)); |
|
pool_redzone_check(pp, v); |
FREECHECK_IN(&pp->pr_freecheck, v); |
FREECHECK_IN(&pp->pr_freecheck, v); |
LOCKDEBUG_MEM_CHECK(v, pp->pr_size); |
LOCKDEBUG_MEM_CHECK(v, pp->pr_size); |
|
|
#ifdef DIAGNOSTIC |
KASSERTMSG((pp->pr_nout > 0), |
if (__predict_false(pp->pr_nout == 0)) { |
"%s: [%s] putting with none out", __func__, pp->pr_wchan); |
printf("pool %s: putting with none out\n", |
|
pp->pr_wchan); |
|
panic("pool_put"); |
|
} |
|
#endif |
|
|
|
if (__predict_false((ph = pr_find_pagehead(pp, v)) == NULL)) { |
if (__predict_false((ph = pr_find_pagehead(pp, v)) == NULL)) { |
panic("pool_put: %s: page header missing", pp->pr_wchan); |
panic("%s: [%s] page header missing", __func__, pp->pr_wchan); |
} |
} |
|
|
/* |
/* |
Line 1073 pool_put(struct pool *pp, void *v) |
|
Line 1062 pool_put(struct pool *pp, void *v) |
|
static int |
static int |
pool_grow(struct pool *pp, int flags) |
pool_grow(struct pool *pp, int flags) |
{ |
{ |
struct pool_item_header *ph = NULL; |
/* |
char *cp; |
* If there's a pool_grow in progress, wait for it to complete |
|
* and try again from the top. |
|
*/ |
|
if (pp->pr_flags & PR_GROWING) { |
|
if (flags & PR_WAITOK) { |
|
do { |
|
cv_wait(&pp->pr_cv, &pp->pr_lock); |
|
} while (pp->pr_flags & PR_GROWING); |
|
return ERESTART; |
|
} else { |
|
return EWOULDBLOCK; |
|
} |
|
} |
|
pp->pr_flags |= PR_GROWING; |
|
|
mutex_exit(&pp->pr_lock); |
mutex_exit(&pp->pr_lock); |
cp = pool_allocator_alloc(pp, flags); |
char *cp = pool_allocator_alloc(pp, flags); |
if (__predict_true(cp != NULL)) { |
if (__predict_false(cp == NULL)) |
ph = pool_alloc_item_header(pp, cp, flags); |
goto out; |
} |
|
if (__predict_false(cp == NULL || ph == NULL)) { |
struct pool_item_header *ph = pool_alloc_item_header(pp, cp, flags); |
if (cp != NULL) { |
if (__predict_false(ph == NULL)) { |
pool_allocator_free(pp, cp); |
pool_allocator_free(pp, cp); |
} |
goto out; |
mutex_enter(&pp->pr_lock); |
|
return ENOMEM; |
|
} |
} |
|
|
mutex_enter(&pp->pr_lock); |
mutex_enter(&pp->pr_lock); |
pool_prime_page(pp, cp, ph); |
pool_prime_page(pp, cp, ph); |
pp->pr_npagealloc++; |
pp->pr_npagealloc++; |
|
KASSERT(pp->pr_flags & PR_GROWING); |
|
pp->pr_flags &= ~PR_GROWING; |
|
/* |
|
* If anyone was waiting for pool_grow, notify them that we |
|
* may have just done it. |
|
*/ |
|
cv_broadcast(&pp->pr_cv); |
return 0; |
return 0; |
|
out: |
|
KASSERT(pp->pr_flags & PR_GROWING); |
|
pp->pr_flags &= ~PR_GROWING; |
|
mutex_enter(&pp->pr_lock); |
|
return ENOMEM; |
} |
} |
|
|
/* |
/* |
Line 1108 pool_prime(struct pool *pp, int n) |
|
Line 1120 pool_prime(struct pool *pp, int n) |
|
|
|
newpages = roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage; |
newpages = roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage; |
|
|
while (newpages-- > 0) { |
while (newpages > 0) { |
error = pool_grow(pp, PR_NOWAIT); |
error = pool_grow(pp, PR_NOWAIT); |
if (error) { |
if (error) { |
|
if (error == ERESTART) |
|
continue; |
break; |
break; |
} |
} |
pp->pr_minpages++; |
pp->pr_minpages++; |
|
newpages--; |
} |
} |
|
|
if (pp->pr_minpages >= pp->pr_maxpages) |
if (pp->pr_minpages >= pp->pr_maxpages) |
Line 1138 pool_prime_page(struct pool *pp, void *s |
|
Line 1153 pool_prime_page(struct pool *pp, void *s |
|
int n; |
int n; |
|
|
KASSERT(mutex_owned(&pp->pr_lock)); |
KASSERT(mutex_owned(&pp->pr_lock)); |
|
KASSERTMSG(((pp->pr_roflags & PR_NOALIGN) || |
#ifdef DIAGNOSTIC |
(((uintptr_t)cp & (pp->pr_alloc->pa_pagesz - 1)) == 0)), |
if ((pp->pr_roflags & PR_NOALIGN) == 0 && |
"%s: [%s] unaligned page: %p", __func__, pp->pr_wchan, cp); |
((uintptr_t)cp & (pp->pr_alloc->pa_pagesz - 1)) != 0) |
|
panic("pool_prime_page: %s: unaligned page", pp->pr_wchan); |
|
#endif |
|
|
|
/* |
/* |
* Insert page header. |
* Insert page header. |
Line 1226 pool_catchup(struct pool *pp) |
|
Line 1238 pool_catchup(struct pool *pp) |
|
while (POOL_NEEDS_CATCHUP(pp)) { |
while (POOL_NEEDS_CATCHUP(pp)) { |
error = pool_grow(pp, PR_NOWAIT); |
error = pool_grow(pp, PR_NOWAIT); |
if (error) { |
if (error) { |
|
if (error == ERESTART) |
|
continue; |
break; |
break; |
} |
} |
} |
} |
Line 1439 pool_drain(struct pool **ppp) |
|
Line 1453 pool_drain(struct pool **ppp) |
|
} |
} |
|
|
/* |
/* |
|
* Calculate the total number of pages consumed by pools. |
|
*/ |
|
int |
|
pool_totalpages(void) |
|
{ |
|
struct pool *pp; |
|
int total = 0; |
|
|
|
mutex_enter(&pool_head_lock); |
|
TAILQ_FOREACH(pp, &pool_head, pr_poollist) |
|
/* |
|
* XXXMRG |
|
if ((pp->pr_roflags & PR_RECURSIVE) == 0) |
|
*/ |
|
total += pp->pr_npages; |
|
mutex_exit(&pool_head_lock); |
|
|
|
return total; |
|
} |
|
|
|
/* |
* Diagnostic helpers. |
* Diagnostic helpers. |
*/ |
*/ |
|
|
Line 1469 pool_print_pagelist(struct pool *pp, str |
|
Line 1504 pool_print_pagelist(struct pool *pp, str |
|
void (*pr)(const char *, ...)) |
void (*pr)(const char *, ...)) |
{ |
{ |
struct pool_item_header *ph; |
struct pool_item_header *ph; |
#ifdef DIAGNOSTIC |
struct pool_item *pi __diagused; |
struct pool_item *pi; |
|
#endif |
|
|
|
LIST_FOREACH(ph, pl, ph_pagelist) { |
LIST_FOREACH(ph, pl, ph_pagelist) { |
(*pr)("\t\tpage %p, nmissing %d, time %" PRIu32 "\n", |
(*pr)("\t\tpage %p, nmissing %d, time %" PRIu32 "\n", |
Line 1733 pool_cache_bootstrap(pool_cache_t pc, si |
|
Line 1766 pool_cache_bootstrap(pool_cache_t pc, si |
|
struct pool *pp; |
struct pool *pp; |
|
|
pp = &pc->pc_pool; |
pp = &pc->pc_pool; |
if (palloc == NULL && ipl == IPL_NONE) |
if (palloc == NULL && ipl == IPL_NONE) { |
palloc = &pool_allocator_nointr; |
if (size > PAGE_SIZE) { |
|
int bigidx = pool_bigidx(size); |
|
|
|
palloc = &pool_allocator_big[bigidx]; |
|
} else |
|
palloc = &pool_allocator_nointr; |
|
} |
pool_init(pp, size, align, align_offset, flags, wchan, palloc, ipl); |
pool_init(pp, size, align, align_offset, flags, wchan, palloc, ipl); |
mutex_init(&pc->pc_lock, MUTEX_DEFAULT, ipl); |
mutex_init(&pc->pc_lock, MUTEX_DEFAULT, ipl); |
|
|
Line 2167 pool_cache_get_slow(pool_cache_cpu_t *cc |
|
Line 2206 pool_cache_get_slow(pool_cache_cpu_t *cc |
|
|
|
object = pool_get(&pc->pc_pool, flags); |
object = pool_get(&pc->pc_pool, flags); |
*objectp = object; |
*objectp = object; |
if (__predict_false(object == NULL)) |
if (__predict_false(object == NULL)) { |
|
KASSERT((flags & (PR_WAITOK|PR_NOWAIT)) == PR_NOWAIT); |
return false; |
return false; |
|
} |
|
|
if (__predict_false((*pc->pc_ctor)(pc->pc_arg, object, flags) != 0)) { |
if (__predict_false((*pc->pc_ctor)(pc->pc_arg, object, flags) != 0)) { |
pool_put(&pc->pc_pool, object); |
pool_put(&pc->pc_pool, object); |
Line 2188 pool_cache_get_slow(pool_cache_cpu_t *cc |
|
Line 2229 pool_cache_get_slow(pool_cache_cpu_t *cc |
|
} |
} |
|
|
FREECHECK_OUT(&pc->pc_freecheck, object); |
FREECHECK_OUT(&pc->pc_freecheck, object); |
|
pool_redzone_fill(&pc->pc_pool, object); |
return false; |
return false; |
} |
} |
|
|
Line 2205 pool_cache_get_paddr(pool_cache_t pc, in |
|
Line 2247 pool_cache_get_paddr(pool_cache_t pc, in |
|
void *object; |
void *object; |
int s; |
int s; |
|
|
|
KASSERT(!(flags & PR_NOWAIT) != !(flags & PR_WAITOK)); |
KASSERTMSG((!cpu_intr_p() && !cpu_softintr_p()) || |
KASSERTMSG((!cpu_intr_p() && !cpu_softintr_p()) || |
(pc->pc_pool.pr_ipl != IPL_NONE || cold || panicstr != NULL), |
(pc->pc_pool.pr_ipl != IPL_NONE || cold || panicstr != NULL), |
"pool '%s' is IPL_NONE, but called from interrupt context\n", |
"%s: [%s] is IPL_NONE, but called from interrupt context", |
pc->pc_pool.pr_wchan); |
__func__, pc->pc_pool.pr_wchan); |
|
|
if (flags & PR_WAITOK) { |
if (flags & PR_WAITOK) { |
ASSERT_SLEEPABLE(); |
ASSERT_SLEEPABLE(); |
Line 2233 pool_cache_get_paddr(pool_cache_t pc, in |
|
Line 2276 pool_cache_get_paddr(pool_cache_t pc, in |
|
cc->cc_hits++; |
cc->cc_hits++; |
splx(s); |
splx(s); |
FREECHECK_OUT(&pc->pc_freecheck, object); |
FREECHECK_OUT(&pc->pc_freecheck, object); |
|
pool_redzone_fill(&pc->pc_pool, object); |
return object; |
return object; |
} |
} |
|
|
Line 2257 pool_cache_get_paddr(pool_cache_t pc, in |
|
Line 2301 pool_cache_get_paddr(pool_cache_t pc, in |
|
break; |
break; |
} |
} |
|
|
|
/* |
|
* We would like to KASSERT(object || (flags & PR_NOWAIT)), but |
|
* pool_cache_get can fail even in the PR_WAITOK case, if the |
|
* constructor fails. |
|
*/ |
return object; |
return object; |
} |
} |
|
|
Line 2376 pool_cache_put_paddr(pool_cache_t pc, vo |
|
Line 2425 pool_cache_put_paddr(pool_cache_t pc, vo |
|
int s; |
int s; |
|
|
KASSERT(object != NULL); |
KASSERT(object != NULL); |
|
pool_redzone_check(&pc->pc_pool, object); |
FREECHECK_IN(&pc->pc_freecheck, object); |
FREECHECK_IN(&pc->pc_freecheck, object); |
|
|
/* Lock out interrupts and disable preemption. */ |
/* Lock out interrupts and disable preemption. */ |
Line 2528 struct pool_allocator pool_allocator_noi |
|
Line 2578 struct pool_allocator pool_allocator_noi |
|
}; |
}; |
#endif /* POOL_SUBPAGE */ |
#endif /* POOL_SUBPAGE */ |
|
|
|
struct pool_allocator pool_allocator_big[] = { |
|
{ |
|
.pa_alloc = pool_page_alloc, |
|
.pa_free = pool_page_free, |
|
.pa_pagesz = 1 << (POOL_ALLOCATOR_BIG_BASE + 0), |
|
}, |
|
{ |
|
.pa_alloc = pool_page_alloc, |
|
.pa_free = pool_page_free, |
|
.pa_pagesz = 1 << (POOL_ALLOCATOR_BIG_BASE + 1), |
|
}, |
|
{ |
|
.pa_alloc = pool_page_alloc, |
|
.pa_free = pool_page_free, |
|
.pa_pagesz = 1 << (POOL_ALLOCATOR_BIG_BASE + 2), |
|
}, |
|
{ |
|
.pa_alloc = pool_page_alloc, |
|
.pa_free = pool_page_free, |
|
.pa_pagesz = 1 << (POOL_ALLOCATOR_BIG_BASE + 3), |
|
}, |
|
{ |
|
.pa_alloc = pool_page_alloc, |
|
.pa_free = pool_page_free, |
|
.pa_pagesz = 1 << (POOL_ALLOCATOR_BIG_BASE + 4), |
|
}, |
|
{ |
|
.pa_alloc = pool_page_alloc, |
|
.pa_free = pool_page_free, |
|
.pa_pagesz = 1 << (POOL_ALLOCATOR_BIG_BASE + 5), |
|
}, |
|
{ |
|
.pa_alloc = pool_page_alloc, |
|
.pa_free = pool_page_free, |
|
.pa_pagesz = 1 << (POOL_ALLOCATOR_BIG_BASE + 6), |
|
}, |
|
{ |
|
.pa_alloc = pool_page_alloc, |
|
.pa_free = pool_page_free, |
|
.pa_pagesz = 1 << (POOL_ALLOCATOR_BIG_BASE + 7), |
|
} |
|
}; |
|
|
|
static int |
|
pool_bigidx(size_t size) |
|
{ |
|
int i; |
|
|
|
for (i = 0; i < __arraycount(pool_allocator_big); i++) { |
|
if (1 << (i + POOL_ALLOCATOR_BIG_BASE) >= size) |
|
return i; |
|
} |
|
panic("pool item size %zu too large, use a custom allocator", size); |
|
} |
|
|
static void * |
static void * |
pool_allocator_alloc(struct pool *pp, int flags) |
pool_allocator_alloc(struct pool *pp, int flags) |
{ |
{ |
Line 2597 pool_page_free_meta(struct pool *pp, voi |
|
Line 2702 pool_page_free_meta(struct pool *pp, voi |
|
vmem_free(kmem_meta_arena, (vmem_addr_t)v, pp->pr_alloc->pa_pagesz); |
vmem_free(kmem_meta_arena, (vmem_addr_t)v, pp->pr_alloc->pa_pagesz); |
} |
} |
|
|
|
#ifdef POOL_REDZONE |
|
#if defined(_LP64) |
|
# define PRIME 0x9e37fffffffc0000UL |
|
#else /* defined(_LP64) */ |
|
# define PRIME 0x9e3779b1 |
|
#endif /* defined(_LP64) */ |
|
#define STATIC_BYTE 0xFE |
|
CTASSERT(POOL_REDZONE_SIZE > 1); |
|
|
|
static inline uint8_t |
|
pool_pattern_generate(const void *p) |
|
{ |
|
return (uint8_t)(((uintptr_t)p) * PRIME |
|
>> ((sizeof(uintptr_t) - sizeof(uint8_t))) * CHAR_BIT); |
|
} |
|
|
|
static void |
|
pool_redzone_init(struct pool *pp, size_t requested_size) |
|
{ |
|
size_t nsz; |
|
|
|
if (pp->pr_roflags & PR_NOTOUCH) { |
|
pp->pr_reqsize = 0; |
|
pp->pr_redzone = false; |
|
return; |
|
} |
|
|
|
/* |
|
* We may have extended the requested size earlier; check if |
|
* there's naturally space in the padding for a red zone. |
|
*/ |
|
if (pp->pr_size - requested_size >= POOL_REDZONE_SIZE) { |
|
pp->pr_reqsize = requested_size; |
|
pp->pr_redzone = true; |
|
return; |
|
} |
|
|
|
/* |
|
* No space in the natural padding; check if we can extend a |
|
* bit the size of the pool. |
|
*/ |
|
nsz = roundup(pp->pr_size + POOL_REDZONE_SIZE, pp->pr_align); |
|
if (nsz <= pp->pr_alloc->pa_pagesz) { |
|
/* Ok, we can */ |
|
pp->pr_size = nsz; |
|
pp->pr_reqsize = requested_size; |
|
pp->pr_redzone = true; |
|
} else { |
|
/* No space for a red zone... snif :'( */ |
|
pp->pr_reqsize = 0; |
|
pp->pr_redzone = false; |
|
printf("pool redzone disabled for '%s'\n", pp->pr_wchan); |
|
} |
|
} |
|
|
|
static void |
|
pool_redzone_fill(struct pool *pp, void *p) |
|
{ |
|
uint8_t *cp, pat; |
|
const uint8_t *ep; |
|
|
|
if (!pp->pr_redzone) |
|
return; |
|
|
|
cp = (uint8_t *)p + pp->pr_reqsize; |
|
ep = cp + POOL_REDZONE_SIZE; |
|
|
|
/* |
|
* We really don't want the first byte of the red zone to be '\0'; |
|
* an off-by-one in a string may not be properly detected. |
|
*/ |
|
pat = pool_pattern_generate(cp); |
|
*cp = (pat == '\0') ? STATIC_BYTE: pat; |
|
cp++; |
|
|
|
while (cp < ep) { |
|
*cp = pool_pattern_generate(cp); |
|
cp++; |
|
} |
|
} |
|
|
|
static void |
|
pool_redzone_check(struct pool *pp, void *p) |
|
{ |
|
uint8_t *cp, pat, expected; |
|
const uint8_t *ep; |
|
|
|
if (!pp->pr_redzone) |
|
return; |
|
|
|
cp = (uint8_t *)p + pp->pr_reqsize; |
|
ep = cp + POOL_REDZONE_SIZE; |
|
|
|
pat = pool_pattern_generate(cp); |
|
expected = (pat == '\0') ? STATIC_BYTE: pat; |
|
if (expected != *cp) { |
|
panic("%s: %p: 0x%02x != 0x%02x\n", |
|
__func__, cp, *cp, expected); |
|
} |
|
cp++; |
|
|
|
while (cp < ep) { |
|
expected = pool_pattern_generate(cp); |
|
if (*cp != expected) { |
|
panic("%s: %p: 0x%02x != 0x%02x\n", |
|
__func__, cp, *cp, expected); |
|
} |
|
cp++; |
|
} |
|
} |
|
|
|
#endif /* POOL_REDZONE */ |
|
|
|
|
#ifdef POOL_SUBPAGE |
#ifdef POOL_SUBPAGE |
/* Sub-page allocator, for machines with large hardware pages. */ |
/* Sub-page allocator, for machines with large hardware pages. */ |
void * |
void * |
Line 2814 pool_sysctl(SYSCTLFN_ARGS) |
|
Line 3033 pool_sysctl(SYSCTLFN_ARGS) |
|
cc = pc->pc_cpus[i]; |
cc = pc->pc_cpus[i]; |
if (cc == NULL) |
if (cc == NULL) |
continue; |
continue; |
data.pr_cache_nmiss_pcpu = cc->cc_misses; |
data.pr_cache_nmiss_pcpu += cc->cc_misses; |
data.pr_cache_nhit_pcpu = cc->cc_hits; |
data.pr_cache_nhit_pcpu += cc->cc_hits; |
} |
} |
} else { |
} else { |
data.pr_cache_meta_size = 0; |
data.pr_cache_meta_size = 0; |