version 1.69, 2002/03/08 21:43:54 |
version 1.190.2.3, 2012/10/30 17:22:34 |
|
|
/* $NetBSD$ */ |
/* $NetBSD$ */ |
|
|
/*- |
/*- |
* Copyright (c) 1997, 1999, 2000 The NetBSD Foundation, Inc. |
* Copyright (c) 1997, 1999, 2000, 2002, 2007, 2008, 2010 |
|
* The NetBSD Foundation, Inc. |
* All rights reserved. |
* All rights reserved. |
* |
* |
* This code is derived from software contributed to The NetBSD Foundation |
* This code is derived from software contributed to The NetBSD Foundation |
* by Paul Kranenburg; by Jason R. Thorpe of the Numerical Aerospace |
* by Paul Kranenburg; by Jason R. Thorpe of the Numerical Aerospace |
* Simulation Facility, NASA Ames Research Center. |
* Simulation Facility, NASA Ames Research Center, and by Andrew Doran. |
* |
* |
* Redistribution and use in source and binary forms, with or without |
* Redistribution and use in source and binary forms, with or without |
* modification, are permitted provided that the following conditions |
* modification, are permitted provided that the following conditions |
|
|
* 2. Redistributions in binary form must reproduce the above copyright |
* 2. Redistributions in binary form must reproduce the above copyright |
* notice, this list of conditions and the following disclaimer in the |
* notice, this list of conditions and the following disclaimer in the |
* documentation and/or other materials provided with the distribution. |
* documentation and/or other materials provided with the distribution. |
* 3. All advertising materials mentioning features or use of this software |
|
* must display the following acknowledgement: |
|
* This product includes software developed by the NetBSD |
|
* Foundation, Inc. and its contributors. |
|
* 4. Neither the name of The NetBSD Foundation nor the names of its |
|
* contributors may be used to endorse or promote products derived |
|
* from this software without specific prior written permission. |
|
* |
* |
* THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS |
* THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS |
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED |
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED |
|
|
#include <sys/cdefs.h> |
#include <sys/cdefs.h> |
__KERNEL_RCSID(0, "$NetBSD$"); |
__KERNEL_RCSID(0, "$NetBSD$"); |
|
|
#include "opt_pool.h" |
#include "opt_ddb.h" |
#include "opt_poollog.h" |
|
#include "opt_lockdebug.h" |
#include "opt_lockdebug.h" |
|
|
#include <sys/param.h> |
#include <sys/param.h> |
#include <sys/systm.h> |
#include <sys/systm.h> |
|
#include <sys/bitops.h> |
#include <sys/proc.h> |
#include <sys/proc.h> |
#include <sys/errno.h> |
#include <sys/errno.h> |
#include <sys/kernel.h> |
#include <sys/kernel.h> |
#include <sys/malloc.h> |
#include <sys/vmem.h> |
#include <sys/lock.h> |
|
#include <sys/pool.h> |
#include <sys/pool.h> |
#include <sys/syslog.h> |
#include <sys/syslog.h> |
|
#include <sys/debug.h> |
|
#include <sys/lockdebug.h> |
|
#include <sys/xcall.h> |
|
#include <sys/cpu.h> |
|
#include <sys/atomic.h> |
|
|
#include <uvm/uvm.h> |
#include <uvm/uvm_extern.h> |
|
|
/* |
/* |
* Pool resource management utility. |
* Pool resource management utility. |
* |
* |
* Memory is allocated in pages which are split into pieces according |
* Memory is allocated in pages which are split into pieces according to |
* to the pool item size. Each page is kept on a list headed by `pr_pagelist' |
* the pool item size. Each page is kept on one of three lists in the |
* in the pool structure and the individual pool items are on a linked list |
* pool structure: `pr_emptypages', `pr_fullpages' and `pr_partpages', |
* headed by `ph_itemlist' in each page header. The memory for building |
* for empty, full and partially-full pages respectively. The individual |
* the page list is either taken from the allocated pages themselves (for |
* pool items are on a linked list headed by `ph_itemlist' in each page |
* small pool items) or taken from an internal pool of page headers (`phpool'). |
* header. The memory for building the page list is either taken from |
|
* the allocated pages themselves (for small pool items) or taken from |
|
* an internal pool of page headers (`phpool'). |
*/ |
*/ |
|
|
/* List of all pools */ |
/* List of all pools */ |
TAILQ_HEAD(,pool) pool_head = TAILQ_HEAD_INITIALIZER(pool_head); |
static TAILQ_HEAD(, pool) pool_head = TAILQ_HEAD_INITIALIZER(pool_head); |
|
|
/* Private pool for page header structures */ |
/* Private pool for page header structures */ |
static struct pool phpool; |
#define PHPOOL_MAX 8 |
|
static struct pool phpool[PHPOOL_MAX]; |
|
#define PHPOOL_FREELIST_NELEM(idx) \ |
|
(((idx) == 0) ? 0 : BITMAP_SIZE * (1 << (idx))) |
|
|
#ifdef POOL_SUBPAGE |
#ifdef POOL_SUBPAGE |
/* Pool of subpages for use by normal pools. */ |
/* Pool of subpages for use by normal pools. */ |
static struct pool psppool; |
static struct pool psppool; |
#endif |
#endif |
|
|
|
static void *pool_page_alloc_meta(struct pool *, int); |
|
static void pool_page_free_meta(struct pool *, void *); |
|
|
|
/* allocator for pool metadata */ |
|
struct pool_allocator pool_allocator_meta = { |
|
.pa_alloc = pool_page_alloc_meta, |
|
.pa_free = pool_page_free_meta, |
|
.pa_pagesz = 0 |
|
}; |
|
|
/* # of seconds to retain page after last use */ |
/* # of seconds to retain page after last use */ |
int pool_inactive_time = 10; |
int pool_inactive_time = 10; |
|
|
/* Next candidate for drainage (see pool_drain()) */ |
/* Next candidate for drainage (see pool_drain()) */ |
static struct pool *drainpp; |
static struct pool *drainpp; |
|
|
/* This spin lock protects both pool_head and drainpp. */ |
/* This lock protects both pool_head and drainpp. */ |
struct simplelock pool_head_slock = SIMPLELOCK_INITIALIZER; |
static kmutex_t pool_head_lock; |
|
static kcondvar_t pool_busy; |
|
|
|
/* This lock protects initialization of a potentially shared pool allocator */ |
|
static kmutex_t pool_allocator_lock; |
|
|
|
typedef uint32_t pool_item_bitmap_t; |
|
#define BITMAP_SIZE (CHAR_BIT * sizeof(pool_item_bitmap_t)) |
|
#define BITMAP_MASK (BITMAP_SIZE - 1) |
|
|
struct pool_item_header { |
struct pool_item_header { |
/* Page headers */ |
/* Page headers */ |
TAILQ_ENTRY(pool_item_header) |
|
ph_pagelist; /* pool page list */ |
|
TAILQ_HEAD(,pool_item) ph_itemlist; /* chunk list for this page */ |
|
LIST_ENTRY(pool_item_header) |
LIST_ENTRY(pool_item_header) |
ph_hashlist; /* Off-page page headers */ |
ph_pagelist; /* pool page list */ |
int ph_nmissing; /* # of chunks in use */ |
SPLAY_ENTRY(pool_item_header) |
caddr_t ph_page; /* this page's address */ |
ph_node; /* Off-page page headers */ |
struct timeval ph_time; /* last referenced */ |
void * ph_page; /* this page's address */ |
|
uint32_t ph_time; /* last referenced */ |
|
uint16_t ph_nmissing; /* # of chunks in use */ |
|
uint16_t ph_off; /* start offset in page */ |
|
union { |
|
/* !PR_NOTOUCH */ |
|
struct { |
|
LIST_HEAD(, pool_item) |
|
phu_itemlist; /* chunk list for this page */ |
|
} phu_normal; |
|
/* PR_NOTOUCH */ |
|
struct { |
|
pool_item_bitmap_t phu_bitmap[1]; |
|
} phu_notouch; |
|
} ph_u; |
}; |
}; |
TAILQ_HEAD(pool_pagelist,pool_item_header); |
#define ph_itemlist ph_u.phu_normal.phu_itemlist |
|
#define ph_bitmap ph_u.phu_notouch.phu_bitmap |
|
|
struct pool_item { |
struct pool_item { |
#ifdef DIAGNOSTIC |
#ifdef DIAGNOSTIC |
int pi_magic; |
u_int pi_magic; |
#endif |
#endif |
#define PI_MAGIC 0xdeadbeef |
#define PI_MAGIC 0xdeaddeadU |
/* Other entries use only this list entry */ |
/* Other entries use only this list entry */ |
TAILQ_ENTRY(pool_item) pi_list; |
LIST_ENTRY(pool_item) pi_list; |
}; |
}; |
|
|
#define PR_HASH_INDEX(pp,addr) \ |
|
(((u_long)(addr) >> (pp)->pr_alloc->pa_pageshift) & \ |
|
(PR_HASHTABSIZE - 1)) |
|
|
|
#define POOL_NEEDS_CATCHUP(pp) \ |
#define POOL_NEEDS_CATCHUP(pp) \ |
((pp)->pr_nitems < (pp)->pr_minitems) |
((pp)->pr_nitems < (pp)->pr_minitems) |
|
|
Line 124 struct pool_item { |
|
Line 153 struct pool_item { |
|
* needless object construction/destruction; it is deferred until absolutely |
* needless object construction/destruction; it is deferred until absolutely |
* necessary. |
* necessary. |
* |
* |
* Caches are grouped into cache groups. Each cache group references |
* Caches are grouped into cache groups. Each cache group references up |
* up to 16 constructed objects. When a cache allocates an object |
* to PCG_NUMOBJECTS constructed objects. When a cache allocates an |
* from the pool, it calls the object's constructor and places it into |
* object from the pool, it calls the object's constructor and places it |
* a cache group. When a cache group frees an object back to the pool, |
* into a cache group. When a cache group frees an object back to the |
* it first calls the object's destructor. This allows the object to |
* pool, it first calls the object's destructor. This allows the object |
* persist in constructed form while freed to the cache. |
* to persist in constructed form while freed to the cache. |
* |
* |
* Multiple caches may exist for each pool. This allows a single |
* The pool references each cache, so that when a pool is drained by the |
* object type to have multiple constructed forms. The pool references |
* pagedaemon, it can drain each individual cache as well. Each time a |
* each cache, so that when a pool is drained by the pagedaemon, it can |
* cache is drained, the most idle cache group is freed to the pool in |
* drain each individual cache as well. Each time a cache is drained, |
* its entirety. |
* the most idle cache group is freed to the pool in its entirety. |
|
* |
* |
* Pool caches are layed on top of pools. By layering them, we can avoid |
* Pool caches are layed on top of pools. By layering them, we can avoid |
* the complexity of cache management for pools which would not benefit |
* the complexity of cache management for pools which would not benefit |
* from it. |
* from it. |
*/ |
*/ |
|
|
/* The cache group pool. */ |
static struct pool pcg_normal_pool; |
static struct pool pcgpool; |
static struct pool pcg_large_pool; |
|
static struct pool cache_pool; |
/* The pool cache group. */ |
static struct pool cache_cpu_pool; |
#define PCG_NOBJECTS 16 |
|
struct pool_cache_group { |
pool_cache_t pnbuf_cache; /* pathname buffer cache */ |
TAILQ_ENTRY(pool_cache_group) |
|
pcg_list; /* link in the pool cache's group list */ |
/* List of all caches. */ |
u_int pcg_avail; /* # available objects */ |
TAILQ_HEAD(,pool_cache) pool_cache_head = |
/* pointers to the objects */ |
TAILQ_HEAD_INITIALIZER(pool_cache_head); |
void *pcg_objects[PCG_NOBJECTS]; |
|
}; |
int pool_cache_disable; /* global disable for caching */ |
|
static const pcg_t pcg_dummy; /* zero sized: always empty, yet always full */ |
static void pool_cache_reclaim(struct pool_cache *); |
|
|
static bool pool_cache_put_slow(pool_cache_cpu_t *, int, |
|
void *); |
|
static bool pool_cache_get_slow(pool_cache_cpu_t *, int, |
|
void **, paddr_t *, int); |
|
static void pool_cache_cpu_init1(struct cpu_info *, pool_cache_t); |
|
static void pool_cache_invalidate_groups(pool_cache_t, pcg_t *); |
|
static void pool_cache_invalidate_cpu(pool_cache_t, u_int); |
|
static void pool_cache_transfer(pool_cache_t); |
|
|
static int pool_catchup(struct pool *); |
static int pool_catchup(struct pool *); |
static void pool_prime_page(struct pool *, caddr_t, |
static void pool_prime_page(struct pool *, void *, |
struct pool_item_header *); |
struct pool_item_header *); |
|
static void pool_update_curpage(struct pool *); |
|
|
void *pool_allocator_alloc(struct pool *, int); |
static int pool_grow(struct pool *, int); |
void pool_allocator_free(struct pool *, void *); |
static void *pool_allocator_alloc(struct pool *, int); |
|
static void pool_allocator_free(struct pool *, void *); |
|
|
|
static void pool_print_pagelist(struct pool *, struct pool_pagelist *, |
|
void (*)(const char *, ...)); |
static void pool_print1(struct pool *, const char *, |
static void pool_print1(struct pool *, const char *, |
void (*)(const char *, ...)); |
void (*)(const char *, ...)); |
|
|
/* |
static int pool_chk_page(struct pool *, const char *, |
* Pool log entry. An array of these is allocated in pool_init(). |
struct pool_item_header *); |
*/ |
|
struct pool_log { |
|
const char *pl_file; |
|
long pl_line; |
|
int pl_action; |
|
#define PRLOG_GET 1 |
|
#define PRLOG_PUT 2 |
|
void *pl_addr; |
|
}; |
|
|
|
/* Number of entries in pool log buffers */ |
static inline unsigned int |
#ifndef POOL_LOGSIZE |
pr_item_notouch_index(const struct pool *pp, const struct pool_item_header *ph, |
#define POOL_LOGSIZE 10 |
const void *v) |
#endif |
{ |
|
const char *cp = v; |
|
unsigned int idx; |
|
|
int pool_logsize = POOL_LOGSIZE; |
KASSERT(pp->pr_roflags & PR_NOTOUCH); |
|
idx = (cp - (char *)ph->ph_page - ph->ph_off) / pp->pr_size; |
|
KASSERT(idx < pp->pr_itemsperpage); |
|
return idx; |
|
} |
|
|
#ifdef POOL_DIAGNOSTIC |
static inline void |
static __inline void |
pr_item_notouch_put(const struct pool *pp, struct pool_item_header *ph, |
pr_log(struct pool *pp, void *v, int action, const char *file, long line) |
void *obj) |
{ |
{ |
int n = pp->pr_curlogentry; |
unsigned int idx = pr_item_notouch_index(pp, ph, obj); |
struct pool_log *pl; |
pool_item_bitmap_t *bitmap = ph->ph_bitmap + (idx / BITMAP_SIZE); |
|
pool_item_bitmap_t mask = 1 << (idx & BITMAP_MASK); |
|
|
if ((pp->pr_roflags & PR_LOGGING) == 0) |
KASSERT((*bitmap & mask) == 0); |
return; |
*bitmap |= mask; |
|
|
/* |
|
* Fill in the current entry. Wrap around and overwrite |
|
* the oldest entry if necessary. |
|
*/ |
|
pl = &pp->pr_log[n]; |
|
pl->pl_file = file; |
|
pl->pl_line = line; |
|
pl->pl_action = action; |
|
pl->pl_addr = v; |
|
if (++n >= pp->pr_logsize) |
|
n = 0; |
|
pp->pr_curlogentry = n; |
|
} |
} |
|
|
static void |
static inline void * |
pr_printlog(struct pool *pp, struct pool_item *pi, |
pr_item_notouch_get(const struct pool *pp, struct pool_item_header *ph) |
void (*pr)(const char *, ...)) |
|
{ |
{ |
int i = pp->pr_logsize; |
pool_item_bitmap_t *bitmap = ph->ph_bitmap; |
int n = pp->pr_curlogentry; |
unsigned int idx; |
|
int i; |
|
|
if ((pp->pr_roflags & PR_LOGGING) == 0) |
for (i = 0; ; i++) { |
return; |
int bit; |
|
|
/* |
KASSERT((i * BITMAP_SIZE) < pp->pr_itemsperpage); |
* Print all entries in this pool's log. |
bit = ffs32(bitmap[i]); |
*/ |
if (bit) { |
while (i-- > 0) { |
pool_item_bitmap_t mask; |
struct pool_log *pl = &pp->pr_log[n]; |
|
if (pl->pl_action != 0) { |
bit--; |
if (pi == NULL || pi == pl->pl_addr) { |
idx = (i * BITMAP_SIZE) + bit; |
(*pr)("\tlog entry %d:\n", i); |
mask = 1 << bit; |
(*pr)("\t\taction = %s, addr = %p\n", |
KASSERT((bitmap[i] & mask) != 0); |
pl->pl_action == PRLOG_GET ? "get" : "put", |
bitmap[i] &= ~mask; |
pl->pl_addr); |
break; |
(*pr)("\t\tfile: %s at line %lu\n", |
|
pl->pl_file, pl->pl_line); |
|
} |
|
} |
} |
if (++n >= pp->pr_logsize) |
|
n = 0; |
|
} |
} |
|
KASSERT(idx < pp->pr_itemsperpage); |
|
return (char *)ph->ph_page + ph->ph_off + idx * pp->pr_size; |
} |
} |
|
|
static __inline void |
static inline void |
pr_enter(struct pool *pp, const char *file, long line) |
pr_item_notouch_init(const struct pool *pp, struct pool_item_header *ph) |
{ |
{ |
|
pool_item_bitmap_t *bitmap = ph->ph_bitmap; |
|
const int n = howmany(pp->pr_itemsperpage, BITMAP_SIZE); |
|
int i; |
|
|
if (__predict_false(pp->pr_entered_file != NULL)) { |
for (i = 0; i < n; i++) { |
printf("pool %s: reentrancy at file %s line %ld\n", |
bitmap[i] = (pool_item_bitmap_t)-1; |
pp->pr_wchan, file, line); |
|
printf(" previous entry at file %s line %ld\n", |
|
pp->pr_entered_file, pp->pr_entered_line); |
|
panic("pr_enter"); |
|
} |
} |
|
|
pp->pr_entered_file = file; |
|
pp->pr_entered_line = line; |
|
} |
} |
|
|
static __inline void |
static inline int |
pr_leave(struct pool *pp) |
phtree_compare(struct pool_item_header *a, struct pool_item_header *b) |
{ |
{ |
|
|
if (__predict_false(pp->pr_entered_file == NULL)) { |
/* |
printf("pool %s not entered?\n", pp->pr_wchan); |
* we consider pool_item_header with smaller ph_page bigger. |
panic("pr_leave"); |
* (this unnatural ordering is for the benefit of pr_find_pagehead.) |
} |
*/ |
|
|
pp->pr_entered_file = NULL; |
if (a->ph_page < b->ph_page) |
pp->pr_entered_line = 0; |
return (1); |
|
else if (a->ph_page > b->ph_page) |
|
return (-1); |
|
else |
|
return (0); |
} |
} |
|
|
static __inline void |
SPLAY_PROTOTYPE(phtree, pool_item_header, ph_node, phtree_compare); |
pr_enter_check(struct pool *pp, void (*pr)(const char *, ...)) |
SPLAY_GENERATE(phtree, pool_item_header, ph_node, phtree_compare); |
|
|
|
static inline struct pool_item_header * |
|
pr_find_pagehead_noalign(struct pool *pp, void *v) |
{ |
{ |
|
struct pool_item_header *ph, tmp; |
|
|
|
tmp.ph_page = (void *)(uintptr_t)v; |
|
ph = SPLAY_FIND(phtree, &pp->pr_phtree, &tmp); |
|
if (ph == NULL) { |
|
ph = SPLAY_ROOT(&pp->pr_phtree); |
|
if (ph != NULL && phtree_compare(&tmp, ph) >= 0) { |
|
ph = SPLAY_NEXT(phtree, &pp->pr_phtree, ph); |
|
} |
|
KASSERT(ph == NULL || phtree_compare(&tmp, ph) < 0); |
|
} |
|
|
if (pp->pr_entered_file != NULL) |
return ph; |
(*pr)("\n\tcurrently entered from file %s line %ld\n", |
|
pp->pr_entered_file, pp->pr_entered_line); |
|
} |
} |
#else |
|
#define pr_log(pp, v, action, file, line) |
|
#define pr_printlog(pp, pi, pr) |
|
#define pr_enter(pp, file, line) |
|
#define pr_leave(pp) |
|
#define pr_enter_check(pp, pr) |
|
#endif /* POOL_DIAGNOSTIC */ |
|
|
|
/* |
/* |
* Return the pool page header based on page address. |
* Return the pool page header based on item address. |
*/ |
*/ |
static __inline struct pool_item_header * |
static inline struct pool_item_header * |
pr_find_pagehead(struct pool *pp, caddr_t page) |
pr_find_pagehead(struct pool *pp, void *v) |
{ |
{ |
struct pool_item_header *ph; |
struct pool_item_header *ph, tmp; |
|
|
if ((pp->pr_roflags & PR_PHINPAGE) != 0) |
if ((pp->pr_roflags & PR_NOALIGN) != 0) { |
return ((struct pool_item_header *)(page + pp->pr_phoffset)); |
ph = pr_find_pagehead_noalign(pp, v); |
|
} else { |
|
void *page = |
|
(void *)((uintptr_t)v & pp->pr_alloc->pa_pagemask); |
|
|
|
if ((pp->pr_roflags & PR_PHINPAGE) != 0) { |
|
ph = (struct pool_item_header *)((char *)page + pp->pr_phoffset); |
|
} else { |
|
tmp.ph_page = page; |
|
ph = SPLAY_FIND(phtree, &pp->pr_phtree, &tmp); |
|
} |
|
} |
|
|
|
KASSERT(ph == NULL || ((pp->pr_roflags & PR_PHINPAGE) != 0) || |
|
((char *)ph->ph_page <= (char *)v && |
|
(char *)v < (char *)ph->ph_page + pp->pr_alloc->pa_pagesz)); |
|
return ph; |
|
} |
|
|
for (ph = LIST_FIRST(&pp->pr_hashtab[PR_HASH_INDEX(pp, page)]); |
static void |
ph != NULL; |
pr_pagelist_free(struct pool *pp, struct pool_pagelist *pq) |
ph = LIST_NEXT(ph, ph_hashlist)) { |
{ |
if (ph->ph_page == page) |
struct pool_item_header *ph; |
return (ph); |
|
|
while ((ph = LIST_FIRST(pq)) != NULL) { |
|
LIST_REMOVE(ph, ph_pagelist); |
|
pool_allocator_free(pp, ph->ph_page); |
|
if ((pp->pr_roflags & PR_PHINPAGE) == 0) |
|
pool_put(pp->pr_phpool, ph); |
} |
} |
return (NULL); |
|
} |
} |
|
|
/* |
/* |
* Remove a page from the pool. |
* Remove a page from the pool. |
*/ |
*/ |
static __inline void |
static inline void |
pr_rmpage(struct pool *pp, struct pool_item_header *ph, |
pr_rmpage(struct pool *pp, struct pool_item_header *ph, |
struct pool_pagelist *pq) |
struct pool_pagelist *pq) |
{ |
{ |
int s; |
|
|
KASSERT(mutex_owned(&pp->pr_lock)); |
|
|
/* |
/* |
* If the page was idle, decrement the idle page count. |
* If the page was idle, decrement the idle page count. |
Line 330 pr_rmpage(struct pool *pp, struct pool_i |
|
Line 379 pr_rmpage(struct pool *pp, struct pool_i |
|
pp->pr_nitems -= pp->pr_itemsperpage; |
pp->pr_nitems -= pp->pr_itemsperpage; |
|
|
/* |
/* |
* Unlink a page from the pool and release it (or queue it for release). |
* Unlink the page from the pool and queue it for release. |
*/ |
*/ |
TAILQ_REMOVE(&pp->pr_pagelist, ph, ph_pagelist); |
LIST_REMOVE(ph, ph_pagelist); |
if (pq) { |
if ((pp->pr_roflags & PR_PHINPAGE) == 0) |
TAILQ_INSERT_HEAD(pq, ph, ph_pagelist); |
SPLAY_REMOVE(phtree, &pp->pr_phtree, ph); |
} else { |
LIST_INSERT_HEAD(pq, ph, ph_pagelist); |
pool_allocator_free(pp, ph->ph_page); |
|
if ((pp->pr_roflags & PR_PHINPAGE) == 0) { |
|
LIST_REMOVE(ph, ph_hashlist); |
|
s = splhigh(); |
|
pool_put(&phpool, ph); |
|
splx(s); |
|
} |
|
} |
|
pp->pr_npages--; |
pp->pr_npages--; |
pp->pr_npagefree++; |
pp->pr_npagefree++; |
|
|
if (pp->pr_curpage == ph) { |
pool_update_curpage(pp); |
/* |
} |
* Find a new non-empty page header, if any. |
|
* Start search from the page head, to increase the |
|
* chance for "high water" pages to be freed. |
|
*/ |
|
TAILQ_FOREACH(ph, &pp->pr_pagelist, ph_pagelist) |
|
if (TAILQ_FIRST(&ph->ph_itemlist) != NULL) |
|
break; |
|
|
|
pp->pr_curpage = ph; |
/* |
|
* Initialize all the pools listed in the "pools" link set. |
|
*/ |
|
void |
|
pool_subsystem_init(void) |
|
{ |
|
size_t size; |
|
int idx; |
|
|
|
mutex_init(&pool_head_lock, MUTEX_DEFAULT, IPL_NONE); |
|
mutex_init(&pool_allocator_lock, MUTEX_DEFAULT, IPL_NONE); |
|
cv_init(&pool_busy, "poolbusy"); |
|
|
|
/* |
|
* Initialize private page header pool and cache magazine pool if we |
|
* haven't done so yet. |
|
*/ |
|
for (idx = 0; idx < PHPOOL_MAX; idx++) { |
|
static char phpool_names[PHPOOL_MAX][6+1+6+1]; |
|
int nelem; |
|
size_t sz; |
|
|
|
nelem = PHPOOL_FREELIST_NELEM(idx); |
|
snprintf(phpool_names[idx], sizeof(phpool_names[idx]), |
|
"phpool-%d", nelem); |
|
sz = sizeof(struct pool_item_header); |
|
if (nelem) { |
|
sz = offsetof(struct pool_item_header, |
|
ph_bitmap[howmany(nelem, BITMAP_SIZE)]); |
|
} |
|
pool_init(&phpool[idx], sz, 0, 0, 0, |
|
phpool_names[idx], &pool_allocator_meta, IPL_VM); |
} |
} |
|
#ifdef POOL_SUBPAGE |
|
pool_init(&psppool, POOL_SUBPAGE, POOL_SUBPAGE, 0, |
|
PR_RECURSIVE, "psppool", &pool_allocator_meta, IPL_VM); |
|
#endif |
|
|
|
size = sizeof(pcg_t) + |
|
(PCG_NOBJECTS_NORMAL - 1) * sizeof(pcgpair_t); |
|
pool_init(&pcg_normal_pool, size, coherency_unit, 0, 0, |
|
"pcgnormal", &pool_allocator_meta, IPL_VM); |
|
|
|
size = sizeof(pcg_t) + |
|
(PCG_NOBJECTS_LARGE - 1) * sizeof(pcgpair_t); |
|
pool_init(&pcg_large_pool, size, coherency_unit, 0, 0, |
|
"pcglarge", &pool_allocator_meta, IPL_VM); |
|
|
|
pool_init(&cache_pool, sizeof(struct pool_cache), coherency_unit, |
|
0, 0, "pcache", &pool_allocator_meta, IPL_NONE); |
|
|
|
pool_init(&cache_cpu_pool, sizeof(pool_cache_cpu_t), coherency_unit, |
|
0, 0, "pcachecpu", &pool_allocator_meta, IPL_NONE); |
} |
} |
|
|
/* |
/* |
* Initialize the given pool resource structure. |
* Initialize the given pool resource structure. |
* |
* |
* We export this routine to allow other kernel parts to declare |
* We export this routine to allow other kernel parts to declare |
* static pools that must be initialized before malloc() is available. |
* static pools that must be initialized before kmem(9) is available. |
*/ |
*/ |
void |
void |
pool_init(struct pool *pp, size_t size, u_int align, u_int ioff, int flags, |
pool_init(struct pool *pp, size_t size, u_int align, u_int ioff, int flags, |
const char *wchan, struct pool_allocator *palloc) |
const char *wchan, struct pool_allocator *palloc, int ipl) |
{ |
{ |
int off, slack, i; |
struct pool *pp1; |
|
size_t trysize, phsize; |
|
int off, slack; |
|
|
#ifdef POOL_DIAGNOSTIC |
#ifdef DEBUG |
|
if (__predict_true(!cold)) |
|
mutex_enter(&pool_head_lock); |
/* |
/* |
* Always log if POOL_DIAGNOSTIC is defined. |
* Check that the pool hasn't already been initialised and |
|
* added to the list of all pools. |
*/ |
*/ |
if (pool_logsize != 0) |
TAILQ_FOREACH(pp1, &pool_head, pr_poollist) { |
flags |= PR_LOGGING; |
if (pp == pp1) |
|
panic("pool_init: pool %s already initialised", |
|
wchan); |
|
} |
|
if (__predict_true(!cold)) |
|
mutex_exit(&pool_head_lock); |
#endif |
#endif |
|
|
#ifdef POOL_SUBPAGE |
|
/* |
|
* XXX We don't provide a real `nointr' back-end |
|
* yet; all sub-pages come from a kmem back-end. |
|
* maybe some day... |
|
*/ |
|
if (palloc == NULL) { |
|
extern struct pool_allocator pool_allocator_kmem_subpage; |
|
palloc = &pool_allocator_kmem_subpage; |
|
} |
|
/* |
|
* We'll assume any user-specified back-end allocator |
|
* will deal with sub-pages, or simply don't care. |
|
*/ |
|
#else |
|
if (palloc == NULL) |
if (palloc == NULL) |
palloc = &pool_allocator_kmem; |
palloc = &pool_allocator_kmem; |
#endif /* POOL_SUBPAGE */ |
|
if ((palloc->pa_flags & PA_INITIALIZED) == 0) { |
|
if (palloc->pa_pagesz == 0) { |
|
#ifdef POOL_SUBPAGE |
#ifdef POOL_SUBPAGE |
if (palloc == &pool_allocator_kmem) |
if (size > palloc->pa_pagesz) { |
palloc->pa_pagesz = PAGE_SIZE; |
if (palloc == &pool_allocator_kmem) |
else |
palloc = &pool_allocator_kmem_fullpage; |
palloc->pa_pagesz = POOL_SUBPAGE; |
else if (palloc == &pool_allocator_nointr) |
#else |
palloc = &pool_allocator_nointr_fullpage; |
palloc->pa_pagesz = PAGE_SIZE; |
} |
#endif /* POOL_SUBPAGE */ |
#endif /* POOL_SUBPAGE */ |
} |
if (!cold) |
|
mutex_enter(&pool_allocator_lock); |
|
if (palloc->pa_refcnt++ == 0) { |
|
if (palloc->pa_pagesz == 0) |
|
palloc->pa_pagesz = PAGE_SIZE; |
|
|
TAILQ_INIT(&palloc->pa_list); |
TAILQ_INIT(&palloc->pa_list); |
|
|
simple_lock_init(&palloc->pa_slock); |
mutex_init(&palloc->pa_lock, MUTEX_DEFAULT, IPL_VM); |
palloc->pa_pagemask = ~(palloc->pa_pagesz - 1); |
palloc->pa_pagemask = ~(palloc->pa_pagesz - 1); |
palloc->pa_pageshift = ffs(palloc->pa_pagesz) - 1; |
palloc->pa_pageshift = ffs(palloc->pa_pagesz) - 1; |
palloc->pa_flags |= PA_INITIALIZED; |
|
} |
} |
|
if (!cold) |
|
mutex_exit(&pool_allocator_lock); |
|
|
if (align == 0) |
if (align == 0) |
align = ALIGN(1); |
align = ALIGN(1); |
|
|
if (size < sizeof(struct pool_item)) |
if ((flags & PR_NOTOUCH) == 0 && size < sizeof(struct pool_item)) |
size = sizeof(struct pool_item); |
size = sizeof(struct pool_item); |
|
|
size = ALIGN(size); |
size = roundup(size, align); |
#ifdef DIAGNOSTIC |
#ifdef DIAGNOSTIC |
if (size > palloc->pa_pagesz) |
if (size > palloc->pa_pagesz) |
panic("pool_init: pool item size (%lu) too large", |
panic("pool_init: pool item size (%zu) too large", size); |
(u_long)size); |
|
#endif |
#endif |
|
|
/* |
/* |
* Initialize the pool structure. |
* Initialize the pool structure. |
*/ |
*/ |
TAILQ_INIT(&pp->pr_pagelist); |
LIST_INIT(&pp->pr_emptypages); |
TAILQ_INIT(&pp->pr_cachelist); |
LIST_INIT(&pp->pr_fullpages); |
|
LIST_INIT(&pp->pr_partpages); |
|
pp->pr_cache = NULL; |
pp->pr_curpage = NULL; |
pp->pr_curpage = NULL; |
pp->pr_npages = 0; |
pp->pr_npages = 0; |
pp->pr_minitems = 0; |
pp->pr_minitems = 0; |
Line 458 pool_init(struct pool *pp, size_t size, |
|
Line 542 pool_init(struct pool *pp, size_t size, |
|
pp->pr_hardlimit_warning_last.tv_usec = 0; |
pp->pr_hardlimit_warning_last.tv_usec = 0; |
pp->pr_drain_hook = NULL; |
pp->pr_drain_hook = NULL; |
pp->pr_drain_hook_arg = NULL; |
pp->pr_drain_hook_arg = NULL; |
|
pp->pr_freecheck = NULL; |
|
|
/* |
/* |
* Decide whether to put the page header off page to avoid |
* Decide whether to put the page header off page to avoid |
* wasting too large a part of the page. Off-page page headers |
* wasting too large a part of the page or too big item. |
* go on a hash table, so we can match a returned item |
* Off-page page headers go on a hash table, so we can match |
* with its header based on the page address. |
* a returned item with its header based on the page address. |
* We use 1/16 of the page size as the threshold (XXX: tune) |
* We use 1/16 of the page size and about 8 times of the item |
|
* size as the threshold (XXX: tune) |
|
* |
|
* However, we'll put the header into the page if we can put |
|
* it without wasting any items. |
|
* |
|
* Silently enforce `0 <= ioff < align'. |
*/ |
*/ |
if (pp->pr_size < palloc->pa_pagesz/16) { |
pp->pr_itemoffset = ioff %= align; |
|
/* See the comment below about reserved bytes. */ |
|
trysize = palloc->pa_pagesz - ((align - ioff) % align); |
|
phsize = ALIGN(sizeof(struct pool_item_header)); |
|
if ((pp->pr_roflags & (PR_NOTOUCH | PR_NOALIGN)) == 0 && |
|
(pp->pr_size < MIN(palloc->pa_pagesz / 16, phsize << 3) || |
|
trysize / pp->pr_size == (trysize - phsize) / pp->pr_size)) { |
/* Use the end of the page for the page header */ |
/* Use the end of the page for the page header */ |
pp->pr_roflags |= PR_PHINPAGE; |
pp->pr_roflags |= PR_PHINPAGE; |
pp->pr_phoffset = off = palloc->pa_pagesz - |
pp->pr_phoffset = off = palloc->pa_pagesz - phsize; |
ALIGN(sizeof(struct pool_item_header)); |
|
} else { |
} else { |
/* The page header will be taken from our page header pool */ |
/* The page header will be taken from our page header pool */ |
pp->pr_phoffset = 0; |
pp->pr_phoffset = 0; |
off = palloc->pa_pagesz; |
off = palloc->pa_pagesz; |
for (i = 0; i < PR_HASHTABSIZE; i++) { |
SPLAY_INIT(&pp->pr_phtree); |
LIST_INIT(&pp->pr_hashtab[i]); |
|
} |
|
} |
} |
|
|
/* |
/* |
* Alignment is to take place at `ioff' within the item. This means |
* Alignment is to take place at `ioff' within the item. This means |
* we must reserve up to `align - 1' bytes on the page to allow |
* we must reserve up to `align - 1' bytes on the page to allow |
* appropriate positioning of each item. |
* appropriate positioning of each item. |
* |
|
* Silently enforce `0 <= ioff < align'. |
|
*/ |
*/ |
pp->pr_itemoffset = ioff = ioff % align; |
|
pp->pr_itemsperpage = (off - ((align - ioff) % align)) / pp->pr_size; |
pp->pr_itemsperpage = (off - ((align - ioff) % align)) / pp->pr_size; |
KASSERT(pp->pr_itemsperpage != 0); |
KASSERT(pp->pr_itemsperpage != 0); |
|
if ((pp->pr_roflags & PR_NOTOUCH)) { |
|
int idx; |
|
|
|
for (idx = 0; pp->pr_itemsperpage > PHPOOL_FREELIST_NELEM(idx); |
|
idx++) { |
|
/* nothing */ |
|
} |
|
if (idx >= PHPOOL_MAX) { |
|
/* |
|
* if you see this panic, consider to tweak |
|
* PHPOOL_MAX and PHPOOL_FREELIST_NELEM. |
|
*/ |
|
panic("%s: too large itemsperpage(%d) for PR_NOTOUCH", |
|
pp->pr_wchan, pp->pr_itemsperpage); |
|
} |
|
pp->pr_phpool = &phpool[idx]; |
|
} else if ((pp->pr_roflags & PR_PHINPAGE) == 0) { |
|
pp->pr_phpool = &phpool[0]; |
|
} |
|
#if defined(DIAGNOSTIC) |
|
else { |
|
pp->pr_phpool = NULL; |
|
} |
|
#endif |
|
|
/* |
/* |
* Use the slack between the chunks and the page header |
* Use the slack between the chunks and the page header |
Line 506 pool_init(struct pool *pp, size_t size, |
|
Line 621 pool_init(struct pool *pp, size_t size, |
|
pp->pr_npagefree = 0; |
pp->pr_npagefree = 0; |
pp->pr_hiwat = 0; |
pp->pr_hiwat = 0; |
pp->pr_nidle = 0; |
pp->pr_nidle = 0; |
|
pp->pr_refcnt = 0; |
|
|
#ifdef POOL_DIAGNOSTIC |
mutex_init(&pp->pr_lock, MUTEX_DEFAULT, ipl); |
if (flags & PR_LOGGING) { |
cv_init(&pp->pr_cv, wchan); |
if (kmem_map == NULL || |
pp->pr_ipl = ipl; |
(pp->pr_log = malloc(pool_logsize * sizeof(struct pool_log), |
|
M_TEMP, M_NOWAIT)) == NULL) |
|
pp->pr_roflags &= ~PR_LOGGING; |
|
pp->pr_curlogentry = 0; |
|
pp->pr_logsize = pool_logsize; |
|
} |
|
#endif |
|
|
|
pp->pr_entered_file = NULL; |
|
pp->pr_entered_line = 0; |
|
|
|
simple_lock_init(&pp->pr_slock); |
|
|
|
/* |
|
* Initialize private page header pool and cache magazine pool if we |
|
* haven't done so yet. |
|
* XXX LOCKING. |
|
*/ |
|
if (phpool.pr_size == 0) { |
|
#ifdef POOL_SUBPAGE |
|
pool_init(&phpool, sizeof(struct pool_item_header), 0, 0, 0, |
|
"phpool", &pool_allocator_kmem); |
|
pool_init(&psppool, POOL_SUBPAGE, POOL_SUBPAGE, 0, |
|
PR_RECURSIVE, "psppool", &pool_allocator_kmem); |
|
#else |
|
pool_init(&phpool, sizeof(struct pool_item_header), 0, 0, |
|
0, "phpool", NULL); |
|
#endif |
|
pool_init(&pcgpool, sizeof(struct pool_cache_group), 0, 0, |
|
0, "pcgpool", NULL); |
|
} |
|
|
|
/* Insert into the list of all pools. */ |
/* Insert into the list of all pools. */ |
simple_lock(&pool_head_slock); |
if (!cold) |
TAILQ_INSERT_TAIL(&pool_head, pp, pr_poollist); |
mutex_enter(&pool_head_lock); |
simple_unlock(&pool_head_slock); |
TAILQ_FOREACH(pp1, &pool_head, pr_poollist) { |
|
if (strcmp(pp1->pr_wchan, pp->pr_wchan) > 0) |
|
break; |
|
} |
|
if (pp1 == NULL) |
|
TAILQ_INSERT_TAIL(&pool_head, pp, pr_poollist); |
|
else |
|
TAILQ_INSERT_BEFORE(pp1, pp, pr_poollist); |
|
if (!cold) |
|
mutex_exit(&pool_head_lock); |
|
|
/* Insert this into the list of pools using this allocator. */ |
/* Insert this into the list of pools using this allocator. */ |
simple_lock(&palloc->pa_slock); |
if (!cold) |
|
mutex_enter(&palloc->pa_lock); |
TAILQ_INSERT_TAIL(&palloc->pa_list, pp, pr_alloc_list); |
TAILQ_INSERT_TAIL(&palloc->pa_list, pp, pr_alloc_list); |
simple_unlock(&palloc->pa_slock); |
if (!cold) |
|
mutex_exit(&palloc->pa_lock); |
} |
} |
|
|
/* |
/* |
Line 559 pool_init(struct pool *pp, size_t size, |
|
Line 655 pool_init(struct pool *pp, size_t size, |
|
void |
void |
pool_destroy(struct pool *pp) |
pool_destroy(struct pool *pp) |
{ |
{ |
|
struct pool_pagelist pq; |
struct pool_item_header *ph; |
struct pool_item_header *ph; |
struct pool_cache *pc; |
|
|
|
/* Locking order: pool_allocator -> pool */ |
/* Remove from global pool list */ |
simple_lock(&pp->pr_alloc->pa_slock); |
mutex_enter(&pool_head_lock); |
|
while (pp->pr_refcnt != 0) |
|
cv_wait(&pool_busy, &pool_head_lock); |
|
TAILQ_REMOVE(&pool_head, pp, pr_poollist); |
|
if (drainpp == pp) |
|
drainpp = NULL; |
|
mutex_exit(&pool_head_lock); |
|
|
|
/* Remove this pool from its allocator's list of pools. */ |
|
mutex_enter(&pp->pr_alloc->pa_lock); |
TAILQ_REMOVE(&pp->pr_alloc->pa_list, pp, pr_alloc_list); |
TAILQ_REMOVE(&pp->pr_alloc->pa_list, pp, pr_alloc_list); |
simple_unlock(&pp->pr_alloc->pa_slock); |
mutex_exit(&pp->pr_alloc->pa_lock); |
|
|
|
mutex_enter(&pool_allocator_lock); |
|
if (--pp->pr_alloc->pa_refcnt == 0) |
|
mutex_destroy(&pp->pr_alloc->pa_lock); |
|
mutex_exit(&pool_allocator_lock); |
|
|
/* Destroy all caches for this pool. */ |
mutex_enter(&pp->pr_lock); |
while ((pc = TAILQ_FIRST(&pp->pr_cachelist)) != NULL) |
|
pool_cache_destroy(pc); |
KASSERT(pp->pr_cache == NULL); |
|
|
#ifdef DIAGNOSTIC |
#ifdef DIAGNOSTIC |
if (pp->pr_nout != 0) { |
if (pp->pr_nout != 0) { |
pr_printlog(pp, NULL, printf); |
panic("pool_destroy: pool busy: still out: %u", |
panic("pool_destroy: pool busy: still out: %u\n", |
|
pp->pr_nout); |
pp->pr_nout); |
} |
} |
#endif |
#endif |
|
|
/* Remove all pages */ |
KASSERT(LIST_EMPTY(&pp->pr_fullpages)); |
if ((pp->pr_roflags & PR_STATIC) == 0) |
KASSERT(LIST_EMPTY(&pp->pr_partpages)); |
while ((ph = TAILQ_FIRST(&pp->pr_pagelist)) != NULL) |
|
pr_rmpage(pp, ph, NULL); |
|
|
|
/* Remove from global pool list */ |
|
simple_lock(&pool_head_slock); |
|
TAILQ_REMOVE(&pool_head, pp, pr_poollist); |
|
if (drainpp == pp) { |
|
drainpp = NULL; |
|
} |
|
simple_unlock(&pool_head_slock); |
|
|
|
#ifdef POOL_DIAGNOSTIC |
/* Remove all pages */ |
if ((pp->pr_roflags & PR_LOGGING) != 0) |
LIST_INIT(&pq); |
free(pp->pr_log, M_TEMP); |
while ((ph = LIST_FIRST(&pp->pr_emptypages)) != NULL) |
#endif |
pr_rmpage(pp, ph, &pq); |
|
|
|
mutex_exit(&pp->pr_lock); |
|
|
|
pr_pagelist_free(pp, &pq); |
|
cv_destroy(&pp->pr_cv); |
|
mutex_destroy(&pp->pr_lock); |
} |
} |
|
|
void |
void |
Line 611 pool_set_drain_hook(struct pool *pp, voi |
|
Line 716 pool_set_drain_hook(struct pool *pp, voi |
|
pp->pr_drain_hook_arg = arg; |
pp->pr_drain_hook_arg = arg; |
} |
} |
|
|
static __inline struct pool_item_header * |
static struct pool_item_header * |
pool_alloc_item_header(struct pool *pp, caddr_t storage, int flags) |
pool_alloc_item_header(struct pool *pp, void *storage, int flags) |
{ |
{ |
struct pool_item_header *ph; |
struct pool_item_header *ph; |
int s; |
|
|
|
LOCK_ASSERT(simple_lock_held(&pp->pr_slock) == 0); |
|
|
|
if ((pp->pr_roflags & PR_PHINPAGE) != 0) |
if ((pp->pr_roflags & PR_PHINPAGE) != 0) |
ph = (struct pool_item_header *) (storage + pp->pr_phoffset); |
ph = (struct pool_item_header *) ((char *)storage + pp->pr_phoffset); |
else { |
else |
s = splhigh(); |
ph = pool_get(pp->pr_phpool, flags); |
ph = pool_get(&phpool, flags); |
|
splx(s); |
|
} |
|
|
|
return (ph); |
return (ph); |
} |
} |
|
|
/* |
/* |
* Grab an item from the pool; must be called at appropriate spl level |
* Grab an item from the pool. |
*/ |
*/ |
void * |
void * |
#ifdef POOL_DIAGNOSTIC |
|
_pool_get(struct pool *pp, int flags, const char *file, long line) |
|
#else |
|
pool_get(struct pool *pp, int flags) |
pool_get(struct pool *pp, int flags) |
#endif |
|
{ |
{ |
struct pool_item *pi; |
struct pool_item *pi; |
struct pool_item_header *ph; |
struct pool_item_header *ph; |
void *v; |
void *v; |
|
|
#ifdef DIAGNOSTIC |
#ifdef DIAGNOSTIC |
if (__predict_false((pp->pr_roflags & PR_STATIC) && |
if (pp->pr_itemsperpage == 0) |
(flags & PR_MALLOCOK))) { |
panic("pool_get: pool '%s': pr_itemsperpage is zero, " |
pr_printlog(pp, NULL, printf); |
"pool not initialized?", pp->pr_wchan); |
panic("pool_get: static"); |
if ((cpu_intr_p() || cpu_softintr_p()) && pp->pr_ipl == IPL_NONE && |
} |
!cold && panicstr == NULL) |
|
panic("pool '%s' is IPL_NONE, but called from " |
if (__predict_false(curproc == NULL && doing_shutdown == 0 && |
"interrupt context\n", pp->pr_wchan); |
(flags & PR_WAITOK) != 0)) |
|
panic("pool_get: must have NOWAIT"); |
|
|
|
#ifdef LOCKDEBUG |
|
if (flags & PR_WAITOK) |
|
simple_lock_only_held(NULL, "pool_get(PR_WAITOK)"); |
|
#endif |
#endif |
#endif /* DIAGNOSTIC */ |
if (flags & PR_WAITOK) { |
|
ASSERT_SLEEPABLE(); |
simple_lock(&pp->pr_slock); |
} |
pr_enter(pp, file, line); |
|
|
|
|
mutex_enter(&pp->pr_lock); |
startover: |
startover: |
/* |
/* |
* Check to see if we've reached the hard limit. If we have, |
* Check to see if we've reached the hard limit. If we have, |
Line 672 pool_get(struct pool *pp, int flags) |
|
Line 761 pool_get(struct pool *pp, int flags) |
|
*/ |
*/ |
#ifdef DIAGNOSTIC |
#ifdef DIAGNOSTIC |
if (__predict_false(pp->pr_nout > pp->pr_hardlimit)) { |
if (__predict_false(pp->pr_nout > pp->pr_hardlimit)) { |
pr_leave(pp); |
mutex_exit(&pp->pr_lock); |
simple_unlock(&pp->pr_slock); |
|
panic("pool_get: %s: crossed hard limit", pp->pr_wchan); |
panic("pool_get: %s: crossed hard limit", pp->pr_wchan); |
} |
} |
#endif |
#endif |
Line 684 pool_get(struct pool *pp, int flags) |
|
Line 772 pool_get(struct pool *pp, int flags) |
|
* back to the pool, unlock, call the hook, re-lock, |
* back to the pool, unlock, call the hook, re-lock, |
* and check the hardlimit condition again. |
* and check the hardlimit condition again. |
*/ |
*/ |
pr_leave(pp); |
mutex_exit(&pp->pr_lock); |
simple_unlock(&pp->pr_slock); |
|
(*pp->pr_drain_hook)(pp->pr_drain_hook_arg, flags); |
(*pp->pr_drain_hook)(pp->pr_drain_hook_arg, flags); |
simple_lock(&pp->pr_slock); |
mutex_enter(&pp->pr_lock); |
pr_enter(pp, file, line); |
|
if (pp->pr_nout < pp->pr_hardlimit) |
if (pp->pr_nout < pp->pr_hardlimit) |
goto startover; |
goto startover; |
} |
} |
Line 699 pool_get(struct pool *pp, int flags) |
|
Line 785 pool_get(struct pool *pp, int flags) |
|
* it be? |
* it be? |
*/ |
*/ |
pp->pr_flags |= PR_WANTED; |
pp->pr_flags |= PR_WANTED; |
pr_leave(pp); |
cv_wait(&pp->pr_cv, &pp->pr_lock); |
ltsleep(pp, PSWP, pp->pr_wchan, 0, &pp->pr_slock); |
|
pr_enter(pp, file, line); |
|
goto startover; |
goto startover; |
} |
} |
|
|
Line 715 pool_get(struct pool *pp, int flags) |
|
Line 799 pool_get(struct pool *pp, int flags) |
|
|
|
pp->pr_nfail++; |
pp->pr_nfail++; |
|
|
pr_leave(pp); |
mutex_exit(&pp->pr_lock); |
simple_unlock(&pp->pr_slock); |
|
return (NULL); |
return (NULL); |
} |
} |
|
|
Line 727 pool_get(struct pool *pp, int flags) |
|
Line 810 pool_get(struct pool *pp, int flags) |
|
* has no items in its bucket. |
* has no items in its bucket. |
*/ |
*/ |
if ((ph = pp->pr_curpage) == NULL) { |
if ((ph = pp->pr_curpage) == NULL) { |
|
int error; |
|
|
#ifdef DIAGNOSTIC |
#ifdef DIAGNOSTIC |
if (pp->pr_nitems != 0) { |
if (pp->pr_nitems != 0) { |
simple_unlock(&pp->pr_slock); |
mutex_exit(&pp->pr_lock); |
printf("pool_get: %s: curpage NULL, nitems %u\n", |
printf("pool_get: %s: curpage NULL, nitems %u\n", |
pp->pr_wchan, pp->pr_nitems); |
pp->pr_wchan, pp->pr_nitems); |
panic("pool_get: nitems inconsistent\n"); |
panic("pool_get: nitems inconsistent"); |
} |
} |
#endif |
#endif |
|
|
Line 741 pool_get(struct pool *pp, int flags) |
|
Line 826 pool_get(struct pool *pp, int flags) |
|
* Release the pool lock, as the back-end page allocator |
* Release the pool lock, as the back-end page allocator |
* may block. |
* may block. |
*/ |
*/ |
pr_leave(pp); |
error = pool_grow(pp, flags); |
simple_unlock(&pp->pr_slock); |
if (error != 0) { |
v = pool_allocator_alloc(pp, flags); |
|
if (__predict_true(v != NULL)) |
|
ph = pool_alloc_item_header(pp, v, flags); |
|
simple_lock(&pp->pr_slock); |
|
pr_enter(pp, file, line); |
|
|
|
if (__predict_false(v == NULL || ph == NULL)) { |
|
if (v != NULL) |
|
pool_allocator_free(pp, v); |
|
|
|
/* |
/* |
* We were unable to allocate a page or item |
* We were unable to allocate a page or item |
* header, but we released the lock during |
* header, but we released the lock during |
Line 762 pool_get(struct pool *pp, int flags) |
|
Line 837 pool_get(struct pool *pp, int flags) |
|
if (pp->pr_curpage != NULL) |
if (pp->pr_curpage != NULL) |
goto startover; |
goto startover; |
|
|
if ((flags & PR_WAITOK) == 0) { |
pp->pr_nfail++; |
pp->pr_nfail++; |
mutex_exit(&pp->pr_lock); |
pr_leave(pp); |
return (NULL); |
simple_unlock(&pp->pr_slock); |
|
return (NULL); |
|
} |
|
|
|
/* |
|
* Wait for items to be returned to this pool. |
|
* |
|
* XXX: maybe we should wake up once a second and |
|
* try again? |
|
*/ |
|
pp->pr_flags |= PR_WANTED; |
|
/* PA_WANTED is already set on the allocator. */ |
|
pr_leave(pp); |
|
ltsleep(pp, PSWP, pp->pr_wchan, 0, &pp->pr_slock); |
|
pr_enter(pp, file, line); |
|
goto startover; |
|
} |
} |
|
|
/* We have more memory; add it to the pool */ |
|
pool_prime_page(pp, v, ph); |
|
pp->pr_npagealloc++; |
|
|
|
/* Start the allocation process over. */ |
/* Start the allocation process over. */ |
goto startover; |
goto startover; |
} |
} |
|
if (pp->pr_roflags & PR_NOTOUCH) { |
if (__predict_false((v = pi = TAILQ_FIRST(&ph->ph_itemlist)) == NULL)) { |
|
pr_leave(pp); |
|
simple_unlock(&pp->pr_slock); |
|
panic("pool_get: %s: page empty", pp->pr_wchan); |
|
} |
|
#ifdef DIAGNOSTIC |
#ifdef DIAGNOSTIC |
if (__predict_false(pp->pr_nitems == 0)) { |
if (__predict_false(ph->ph_nmissing == pp->pr_itemsperpage)) { |
pr_leave(pp); |
mutex_exit(&pp->pr_lock); |
simple_unlock(&pp->pr_slock); |
panic("pool_get: %s: page empty", pp->pr_wchan); |
printf("pool_get: %s: items on itemlist, nitems %u\n", |
} |
pp->pr_wchan, pp->pr_nitems); |
|
panic("pool_get: nitems inconsistent\n"); |
|
} |
|
#endif |
#endif |
|
v = pr_item_notouch_get(pp, ph); |
#ifdef POOL_DIAGNOSTIC |
} else { |
pr_log(pp, v, PRLOG_GET, file, line); |
v = pi = LIST_FIRST(&ph->ph_itemlist); |
|
if (__predict_false(v == NULL)) { |
|
mutex_exit(&pp->pr_lock); |
|
panic("pool_get: %s: page empty", pp->pr_wchan); |
|
} |
|
#ifdef DIAGNOSTIC |
|
if (__predict_false(pp->pr_nitems == 0)) { |
|
mutex_exit(&pp->pr_lock); |
|
printf("pool_get: %s: items on itemlist, nitems %u\n", |
|
pp->pr_wchan, pp->pr_nitems); |
|
panic("pool_get: nitems inconsistent"); |
|
} |
#endif |
#endif |
|
|
#ifdef DIAGNOSTIC |
#ifdef DIAGNOSTIC |
if (__predict_false(pi->pi_magic != PI_MAGIC)) { |
if (__predict_false(pi->pi_magic != PI_MAGIC)) { |
pr_printlog(pp, pi, printf); |
panic("pool_get(%s): free list modified: " |
panic("pool_get(%s): free list modified: magic=%x; page %p;" |
"magic=%x; page %p; item addr %p\n", |
" item addr %p\n", |
pp->pr_wchan, pi->pi_magic, ph->ph_page, pi); |
pp->pr_wchan, pi->pi_magic, ph->ph_page, pi); |
} |
} |
|
#endif |
#endif |
|
|
/* |
/* |
* Remove from item list. |
* Remove from item list. |
*/ |
*/ |
TAILQ_REMOVE(&ph->ph_itemlist, pi, pi_list); |
LIST_REMOVE(pi, pi_list); |
|
} |
pp->pr_nitems--; |
pp->pr_nitems--; |
pp->pr_nout++; |
pp->pr_nout++; |
if (ph->ph_nmissing == 0) { |
if (ph->ph_nmissing == 0) { |
Line 831 pool_get(struct pool *pp, int flags) |
|
Line 889 pool_get(struct pool *pp, int flags) |
|
panic("pool_get: nidle inconsistent"); |
panic("pool_get: nidle inconsistent"); |
#endif |
#endif |
pp->pr_nidle--; |
pp->pr_nidle--; |
|
|
|
/* |
|
* This page was previously empty. Move it to the list of |
|
* partially-full pages. This page is already curpage. |
|
*/ |
|
LIST_REMOVE(ph, ph_pagelist); |
|
LIST_INSERT_HEAD(&pp->pr_partpages, ph, ph_pagelist); |
} |
} |
ph->ph_nmissing++; |
ph->ph_nmissing++; |
if (TAILQ_FIRST(&ph->ph_itemlist) == NULL) { |
if (ph->ph_nmissing == pp->pr_itemsperpage) { |
#ifdef DIAGNOSTIC |
#ifdef DIAGNOSTIC |
if (__predict_false(ph->ph_nmissing != pp->pr_itemsperpage)) { |
if (__predict_false((pp->pr_roflags & PR_NOTOUCH) == 0 && |
pr_leave(pp); |
!LIST_EMPTY(&ph->ph_itemlist))) { |
simple_unlock(&pp->pr_slock); |
mutex_exit(&pp->pr_lock); |
panic("pool_get: %s: nmissing inconsistent", |
panic("pool_get: %s: nmissing inconsistent", |
pp->pr_wchan); |
pp->pr_wchan); |
} |
} |
#endif |
#endif |
/* |
/* |
* Find a new non-empty page header, if any. |
* This page is now full. Move it to the full list |
* Start search from the page head, to increase |
* and select a new current page. |
* the chance for "high water" pages to be freed. |
|
* |
|
* Migrate empty pages to the end of the list. This |
|
* will speed the update of curpage as pages become |
|
* idle. Empty pages intermingled with idle pages |
|
* is no big deal. As soon as a page becomes un-empty, |
|
* it will move back to the head of the list. |
|
*/ |
*/ |
TAILQ_REMOVE(&pp->pr_pagelist, ph, ph_pagelist); |
LIST_REMOVE(ph, ph_pagelist); |
TAILQ_INSERT_TAIL(&pp->pr_pagelist, ph, ph_pagelist); |
LIST_INSERT_HEAD(&pp->pr_fullpages, ph, ph_pagelist); |
TAILQ_FOREACH(ph, &pp->pr_pagelist, ph_pagelist) |
pool_update_curpage(pp); |
if (TAILQ_FIRST(&ph->ph_itemlist) != NULL) |
|
break; |
|
|
|
pp->pr_curpage = ph; |
|
} |
} |
|
|
pp->pr_nget++; |
pp->pr_nget++; |
Line 876 pool_get(struct pool *pp, int flags) |
|
Line 930 pool_get(struct pool *pp, int flags) |
|
*/ |
*/ |
} |
} |
|
|
pr_leave(pp); |
mutex_exit(&pp->pr_lock); |
simple_unlock(&pp->pr_slock); |
KASSERT((((vaddr_t)v + pp->pr_itemoffset) & (pp->pr_align - 1)) == 0); |
|
FREECHECK_OUT(&pp->pr_freecheck, v); |
return (v); |
return (v); |
} |
} |
|
|
Line 885 pool_get(struct pool *pp, int flags) |
|
Line 940 pool_get(struct pool *pp, int flags) |
|
* Internal version of pool_put(). Pool is already locked/entered. |
* Internal version of pool_put(). Pool is already locked/entered. |
*/ |
*/ |
static void |
static void |
pool_do_put(struct pool *pp, void *v) |
pool_do_put(struct pool *pp, void *v, struct pool_pagelist *pq) |
{ |
{ |
struct pool_item *pi = v; |
struct pool_item *pi = v; |
struct pool_item_header *ph; |
struct pool_item_header *ph; |
caddr_t page; |
|
int s; |
|
|
|
LOCK_ASSERT(simple_lock_held(&pp->pr_slock)); |
KASSERT(mutex_owned(&pp->pr_lock)); |
|
FREECHECK_IN(&pp->pr_freecheck, v); |
page = (caddr_t)((u_long)v & pp->pr_alloc->pa_pagemask); |
LOCKDEBUG_MEM_CHECK(v, pp->pr_size); |
|
|
#ifdef DIAGNOSTIC |
#ifdef DIAGNOSTIC |
if (__predict_false(pp->pr_nout == 0)) { |
if (__predict_false(pp->pr_nout == 0)) { |
Line 904 pool_do_put(struct pool *pp, void *v) |
|
Line 957 pool_do_put(struct pool *pp, void *v) |
|
} |
} |
#endif |
#endif |
|
|
if (__predict_false((ph = pr_find_pagehead(pp, page)) == NULL)) { |
if (__predict_false((ph = pr_find_pagehead(pp, v)) == NULL)) { |
pr_printlog(pp, NULL, printf); |
|
panic("pool_put: %s: page header missing", pp->pr_wchan); |
panic("pool_put: %s: page header missing", pp->pr_wchan); |
} |
} |
|
|
#ifdef LOCKDEBUG |
|
/* |
|
* Check if we're freeing a locked simple lock. |
|
*/ |
|
simple_lock_freecheck((caddr_t)pi, ((caddr_t)pi) + pp->pr_size); |
|
#endif |
|
|
|
/* |
/* |
* Return to item list. |
* Return to item list. |
*/ |
*/ |
|
if (pp->pr_roflags & PR_NOTOUCH) { |
|
pr_item_notouch_put(pp, ph, v); |
|
} else { |
#ifdef DIAGNOSTIC |
#ifdef DIAGNOSTIC |
pi->pi_magic = PI_MAGIC; |
pi->pi_magic = PI_MAGIC; |
#endif |
#endif |
#ifdef DEBUG |
#ifdef DEBUG |
{ |
{ |
int i, *ip = v; |
int i, *ip = v; |
|
|
for (i = 0; i < pp->pr_size / sizeof(int); i++) { |
for (i = 0; i < pp->pr_size / sizeof(int); i++) { |
*ip++ = PI_MAGIC; |
*ip++ = PI_MAGIC; |
|
} |
} |
} |
} |
|
#endif |
#endif |
|
|
TAILQ_INSERT_HEAD(&ph->ph_itemlist, pi, pi_list); |
LIST_INSERT_HEAD(&ph->ph_itemlist, pi, pi_list); |
|
} |
|
KDASSERT(ph->ph_nmissing != 0); |
ph->ph_nmissing--; |
ph->ph_nmissing--; |
pp->pr_nput++; |
pp->pr_nput++; |
pp->pr_nitems++; |
pp->pr_nitems++; |
Line 944 pool_do_put(struct pool *pp, void *v) |
|
Line 994 pool_do_put(struct pool *pp, void *v) |
|
|
|
if (pp->pr_flags & PR_WANTED) { |
if (pp->pr_flags & PR_WANTED) { |
pp->pr_flags &= ~PR_WANTED; |
pp->pr_flags &= ~PR_WANTED; |
if (ph->ph_nmissing == 0) |
cv_broadcast(&pp->pr_cv); |
pp->pr_nidle++; |
|
wakeup((caddr_t)pp); |
|
return; |
|
} |
} |
|
|
/* |
/* |
* If this page is now complete, do one of two things: |
* If this page is now empty, do one of two things: |
|
* |
|
* (1) If we have more pages than the page high water mark, |
|
* free the page back to the system. ONLY CONSIDER |
|
* FREEING BACK A PAGE IF WE HAVE MORE THAN OUR MINIMUM PAGE |
|
* CLAIM. |
* |
* |
* (1) If we have more pages than the page high water |
* (2) Otherwise, move the page to the empty page list. |
* mark, free the page back to the system. |
|
* |
* |
* (2) Move it to the end of the page list, so that |
* Either way, select a new current page (so we use a partially-full |
* we minimize our chances of fragmenting the |
* page if one is available). |
* pool. Idle pages migrate to the end (along with |
|
* completely empty pages, so that we find un-empty |
|
* pages more quickly when we update curpage) of the |
|
* list so they can be more easily swept up by |
|
* the pagedaemon when pages are scarce. |
|
*/ |
*/ |
if (ph->ph_nmissing == 0) { |
if (ph->ph_nmissing == 0) { |
pp->pr_nidle++; |
pp->pr_nidle++; |
if (pp->pr_npages > pp->pr_maxpages) { |
if (pp->pr_npages > pp->pr_minpages && |
pr_rmpage(pp, ph, NULL); |
pp->pr_npages > pp->pr_maxpages) { |
|
pr_rmpage(pp, ph, pq); |
} else { |
} else { |
TAILQ_REMOVE(&pp->pr_pagelist, ph, ph_pagelist); |
LIST_REMOVE(ph, ph_pagelist); |
TAILQ_INSERT_TAIL(&pp->pr_pagelist, ph, ph_pagelist); |
LIST_INSERT_HEAD(&pp->pr_emptypages, ph, ph_pagelist); |
|
|
/* |
/* |
* Update the timestamp on the page. A page must |
* Update the timestamp on the page. A page must |
* be idle for some period of time before it can |
* be idle for some period of time before it can |
* be reclaimed by the pagedaemon. This minimizes |
* be reclaimed by the pagedaemon. This minimizes |
* ping-pong'ing for memory. |
* ping-pong'ing for memory. |
*/ |
|
s = splclock(); |
|
ph->ph_time = mono_time; |
|
splx(s); |
|
|
|
/* |
|
* Update the current page pointer. Just look for |
|
* the first page with any free items. |
|
* |
* |
* XXX: Maybe we want an option to look for the |
* note for 64-bit time_t: truncating to 32-bit is not |
* page with the fewest available items, to minimize |
* a problem for our usage. |
* fragmentation? |
|
*/ |
*/ |
TAILQ_FOREACH(ph, &pp->pr_pagelist, ph_pagelist) |
ph->ph_time = time_uptime; |
if (TAILQ_FIRST(&ph->ph_itemlist) != NULL) |
|
break; |
|
|
|
pp->pr_curpage = ph; |
|
} |
} |
|
pool_update_curpage(pp); |
} |
} |
|
|
/* |
/* |
* If the page has just become un-empty, move it to the head of |
* If the page was previously completely full, move it to the |
* the list, and make it the current page. The next allocation |
* partially-full list and make it the current page. The next |
* will get the item from this page, instead of further fragmenting |
* allocation will get the item from this page, instead of |
* the pool. |
* further fragmenting the pool. |
*/ |
*/ |
else if (ph->ph_nmissing == (pp->pr_itemsperpage - 1)) { |
else if (ph->ph_nmissing == (pp->pr_itemsperpage - 1)) { |
TAILQ_REMOVE(&pp->pr_pagelist, ph, ph_pagelist); |
LIST_REMOVE(ph, ph_pagelist); |
TAILQ_INSERT_HEAD(&pp->pr_pagelist, ph, ph_pagelist); |
LIST_INSERT_HEAD(&pp->pr_partpages, ph, ph_pagelist); |
pp->pr_curpage = ph; |
pp->pr_curpage = ph; |
} |
} |
} |
} |
|
|
/* |
|
* Return resource to the pool; must be called at appropriate spl level |
|
*/ |
|
#ifdef POOL_DIAGNOSTIC |
|
void |
void |
_pool_put(struct pool *pp, void *v, const char *file, long line) |
pool_put(struct pool *pp, void *v) |
{ |
{ |
|
struct pool_pagelist pq; |
|
|
simple_lock(&pp->pr_slock); |
LIST_INIT(&pq); |
pr_enter(pp, file, line); |
|
|
|
pr_log(pp, v, PRLOG_PUT, file, line); |
|
|
|
pool_do_put(pp, v); |
mutex_enter(&pp->pr_lock); |
|
pool_do_put(pp, v, &pq); |
|
mutex_exit(&pp->pr_lock); |
|
|
pr_leave(pp); |
pr_pagelist_free(pp, &pq); |
simple_unlock(&pp->pr_slock); |
|
} |
} |
#undef pool_put |
|
#endif /* POOL_DIAGNOSTIC */ |
|
|
|
void |
/* |
pool_put(struct pool *pp, void *v) |
* pool_grow: grow a pool by a page. |
{ |
* |
|
* => called with pool locked. |
|
* => unlock and relock the pool. |
|
* => return with pool locked. |
|
*/ |
|
|
simple_lock(&pp->pr_slock); |
static int |
|
pool_grow(struct pool *pp, int flags) |
|
{ |
|
struct pool_item_header *ph = NULL; |
|
char *cp; |
|
|
pool_do_put(pp, v); |
mutex_exit(&pp->pr_lock); |
|
cp = pool_allocator_alloc(pp, flags); |
|
if (__predict_true(cp != NULL)) { |
|
ph = pool_alloc_item_header(pp, cp, flags); |
|
} |
|
if (__predict_false(cp == NULL || ph == NULL)) { |
|
if (cp != NULL) { |
|
pool_allocator_free(pp, cp); |
|
} |
|
mutex_enter(&pp->pr_lock); |
|
return ENOMEM; |
|
} |
|
|
simple_unlock(&pp->pr_slock); |
mutex_enter(&pp->pr_lock); |
|
pool_prime_page(pp, cp, ph); |
|
pp->pr_npagealloc++; |
|
return 0; |
} |
} |
|
|
#ifdef POOL_DIAGNOSTIC |
|
#define pool_put(h, v) _pool_put((h), (v), __FILE__, __LINE__) |
|
#endif |
|
|
|
/* |
/* |
* Add N items to the pool. |
* Add N items to the pool. |
*/ |
*/ |
int |
int |
pool_prime(struct pool *pp, int n) |
pool_prime(struct pool *pp, int n) |
{ |
{ |
struct pool_item_header *ph; |
int newpages; |
caddr_t cp; |
int error = 0; |
int newpages, error = 0; |
|
|
|
simple_lock(&pp->pr_slock); |
mutex_enter(&pp->pr_lock); |
|
|
newpages = roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage; |
newpages = roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage; |
|
|
while (newpages-- > 0) { |
while (newpages-- > 0) { |
simple_unlock(&pp->pr_slock); |
error = pool_grow(pp, PR_NOWAIT); |
cp = pool_allocator_alloc(pp, PR_NOWAIT); |
if (error) { |
if (__predict_true(cp != NULL)) |
|
ph = pool_alloc_item_header(pp, cp, PR_NOWAIT); |
|
simple_lock(&pp->pr_slock); |
|
|
|
if (__predict_false(cp == NULL || ph == NULL)) { |
|
error = ENOMEM; |
|
if (cp != NULL) |
|
pool_allocator_free(pp, cp); |
|
break; |
break; |
} |
} |
|
|
pool_prime_page(pp, cp, ph); |
|
pp->pr_npagealloc++; |
|
pp->pr_minpages++; |
pp->pr_minpages++; |
} |
} |
|
|
if (pp->pr_minpages >= pp->pr_maxpages) |
if (pp->pr_minpages >= pp->pr_maxpages) |
pp->pr_maxpages = pp->pr_minpages + 1; /* XXX */ |
pp->pr_maxpages = pp->pr_minpages + 1; /* XXX */ |
|
|
simple_unlock(&pp->pr_slock); |
mutex_exit(&pp->pr_lock); |
return (0); |
return error; |
} |
} |
|
|
/* |
/* |
Line 1092 pool_prime(struct pool *pp, int n) |
|
Line 1127 pool_prime(struct pool *pp, int n) |
|
* Note, we must be called with the pool descriptor LOCKED. |
* Note, we must be called with the pool descriptor LOCKED. |
*/ |
*/ |
static void |
static void |
pool_prime_page(struct pool *pp, caddr_t storage, struct pool_item_header *ph) |
pool_prime_page(struct pool *pp, void *storage, struct pool_item_header *ph) |
{ |
{ |
struct pool_item *pi; |
struct pool_item *pi; |
caddr_t cp = storage; |
void *cp = storage; |
unsigned int align = pp->pr_align; |
const unsigned int align = pp->pr_align; |
unsigned int ioff = pp->pr_itemoffset; |
const unsigned int ioff = pp->pr_itemoffset; |
int n; |
int n; |
|
|
|
KASSERT(mutex_owned(&pp->pr_lock)); |
|
|
#ifdef DIAGNOSTIC |
#ifdef DIAGNOSTIC |
if (((u_long)cp & (pp->pr_alloc->pa_pagesz - 1)) != 0) |
if ((pp->pr_roflags & PR_NOALIGN) == 0 && |
|
((uintptr_t)cp & (pp->pr_alloc->pa_pagesz - 1)) != 0) |
panic("pool_prime_page: %s: unaligned page", pp->pr_wchan); |
panic("pool_prime_page: %s: unaligned page", pp->pr_wchan); |
#endif |
#endif |
|
|
if ((pp->pr_roflags & PR_PHINPAGE) == 0) |
|
LIST_INSERT_HEAD(&pp->pr_hashtab[PR_HASH_INDEX(pp, cp)], |
|
ph, ph_hashlist); |
|
|
|
/* |
/* |
* Insert page header. |
* Insert page header. |
*/ |
*/ |
TAILQ_INSERT_HEAD(&pp->pr_pagelist, ph, ph_pagelist); |
LIST_INSERT_HEAD(&pp->pr_emptypages, ph, ph_pagelist); |
TAILQ_INIT(&ph->ph_itemlist); |
LIST_INIT(&ph->ph_itemlist); |
ph->ph_page = storage; |
ph->ph_page = storage; |
ph->ph_nmissing = 0; |
ph->ph_nmissing = 0; |
memset(&ph->ph_time, 0, sizeof(ph->ph_time)); |
ph->ph_time = time_uptime; |
|
if ((pp->pr_roflags & PR_PHINPAGE) == 0) |
|
SPLAY_INSERT(phtree, &pp->pr_phtree, ph); |
|
|
pp->pr_nidle++; |
pp->pr_nidle++; |
|
|
/* |
/* |
* Color this page. |
* Color this page. |
*/ |
*/ |
cp = (caddr_t)(cp + pp->pr_curcolor); |
ph->ph_off = pp->pr_curcolor; |
|
cp = (char *)cp + ph->ph_off; |
if ((pp->pr_curcolor += align) > pp->pr_maxcolor) |
if ((pp->pr_curcolor += align) > pp->pr_maxcolor) |
pp->pr_curcolor = 0; |
pp->pr_curcolor = 0; |
|
|
Line 1131 pool_prime_page(struct pool *pp, caddr_t |
|
Line 1168 pool_prime_page(struct pool *pp, caddr_t |
|
* Adjust storage to apply aligment to `pr_itemoffset' in each item. |
* Adjust storage to apply aligment to `pr_itemoffset' in each item. |
*/ |
*/ |
if (ioff != 0) |
if (ioff != 0) |
cp = (caddr_t)(cp + (align - ioff)); |
cp = (char *)cp + align - ioff; |
|
|
|
KASSERT((((vaddr_t)cp + ioff) & (align - 1)) == 0); |
|
|
/* |
/* |
* Insert remaining chunks on the bucket list. |
* Insert remaining chunks on the bucket list. |
Line 1139 pool_prime_page(struct pool *pp, caddr_t |
|
Line 1178 pool_prime_page(struct pool *pp, caddr_t |
|
n = pp->pr_itemsperpage; |
n = pp->pr_itemsperpage; |
pp->pr_nitems += n; |
pp->pr_nitems += n; |
|
|
while (n--) { |
if (pp->pr_roflags & PR_NOTOUCH) { |
pi = (struct pool_item *)cp; |
pr_item_notouch_init(pp, ph); |
|
} else { |
|
while (n--) { |
|
pi = (struct pool_item *)cp; |
|
|
|
KASSERT(((((vaddr_t)pi) + ioff) & (align - 1)) == 0); |
|
|
/* Insert on page list */ |
/* Insert on page list */ |
TAILQ_INSERT_TAIL(&ph->ph_itemlist, pi, pi_list); |
LIST_INSERT_HEAD(&ph->ph_itemlist, pi, pi_list); |
#ifdef DIAGNOSTIC |
#ifdef DIAGNOSTIC |
pi->pi_magic = PI_MAGIC; |
pi->pi_magic = PI_MAGIC; |
#endif |
#endif |
cp = (caddr_t)(cp + pp->pr_size); |
cp = (char *)cp + pp->pr_size; |
|
|
|
KASSERT((((vaddr_t)cp + ioff) & (align - 1)) == 0); |
|
} |
} |
} |
|
|
/* |
/* |
Line 1162 pool_prime_page(struct pool *pp, caddr_t |
|
Line 1209 pool_prime_page(struct pool *pp, caddr_t |
|
|
|
/* |
/* |
* Used by pool_get() when nitems drops below the low water mark. This |
* Used by pool_get() when nitems drops below the low water mark. This |
* is used to catch up nitmes with the low water mark. |
* is used to catch up pr_nitems with the low water mark. |
* |
* |
* Note 1, we never wait for memory here, we let the caller decide what to do. |
* Note 1, we never wait for memory here, we let the caller decide what to do. |
* |
* |
* Note 2, this doesn't work with static pools. |
* Note 2, we must be called with the pool already locked, and we return |
* |
|
* Note 3, we must be called with the pool already locked, and we return |
|
* with it locked. |
* with it locked. |
*/ |
*/ |
static int |
static int |
pool_catchup(struct pool *pp) |
pool_catchup(struct pool *pp) |
{ |
{ |
struct pool_item_header *ph; |
|
caddr_t cp; |
|
int error = 0; |
int error = 0; |
|
|
if (pp->pr_roflags & PR_STATIC) { |
|
/* |
|
* We dropped below the low water mark, and this is not a |
|
* good thing. Log a warning. |
|
* |
|
* XXX: rate-limit this? |
|
*/ |
|
printf("WARNING: static pool `%s' dropped below low water " |
|
"mark\n", pp->pr_wchan); |
|
return (0); |
|
} |
|
|
|
while (POOL_NEEDS_CATCHUP(pp)) { |
while (POOL_NEEDS_CATCHUP(pp)) { |
/* |
error = pool_grow(pp, PR_NOWAIT); |
* Call the page back-end allocator for more memory. |
if (error) { |
* |
|
* XXX: We never wait, so should we bother unlocking |
|
* the pool descriptor? |
|
*/ |
|
simple_unlock(&pp->pr_slock); |
|
cp = pool_allocator_alloc(pp, PR_NOWAIT); |
|
if (__predict_true(cp != NULL)) |
|
ph = pool_alloc_item_header(pp, cp, PR_NOWAIT); |
|
simple_lock(&pp->pr_slock); |
|
if (__predict_false(cp == NULL || ph == NULL)) { |
|
if (cp != NULL) |
|
pool_allocator_free(pp, cp); |
|
error = ENOMEM; |
|
break; |
break; |
} |
} |
pool_prime_page(pp, cp, ph); |
|
pp->pr_npagealloc++; |
|
} |
} |
|
return error; |
|
} |
|
|
return (error); |
static void |
|
pool_update_curpage(struct pool *pp) |
|
{ |
|
|
|
pp->pr_curpage = LIST_FIRST(&pp->pr_partpages); |
|
if (pp->pr_curpage == NULL) { |
|
pp->pr_curpage = LIST_FIRST(&pp->pr_emptypages); |
|
} |
|
KASSERT((pp->pr_curpage == NULL && pp->pr_nitems == 0) || |
|
(pp->pr_curpage != NULL && pp->pr_nitems > 0)); |
} |
} |
|
|
void |
void |
pool_setlowat(struct pool *pp, int n) |
pool_setlowat(struct pool *pp, int n) |
{ |
{ |
int error; |
|
|
|
simple_lock(&pp->pr_slock); |
mutex_enter(&pp->pr_lock); |
|
|
pp->pr_minitems = n; |
pp->pr_minitems = n; |
pp->pr_minpages = (n == 0) |
pp->pr_minpages = (n == 0) |
Line 1228 pool_setlowat(struct pool *pp, int n) |
|
Line 1254 pool_setlowat(struct pool *pp, int n) |
|
: roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage; |
: roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage; |
|
|
/* Make sure we're caught up with the newly-set low water mark. */ |
/* Make sure we're caught up with the newly-set low water mark. */ |
if (POOL_NEEDS_CATCHUP(pp) && (error = pool_catchup(pp) != 0)) { |
if (POOL_NEEDS_CATCHUP(pp) && pool_catchup(pp) != 0) { |
/* |
/* |
* XXX: Should we log a warning? Should we set up a timeout |
* XXX: Should we log a warning? Should we set up a timeout |
* to try again in a second or so? The latter could break |
* to try again in a second or so? The latter could break |
Line 1236 pool_setlowat(struct pool *pp, int n) |
|
Line 1262 pool_setlowat(struct pool *pp, int n) |
|
*/ |
*/ |
} |
} |
|
|
simple_unlock(&pp->pr_slock); |
mutex_exit(&pp->pr_lock); |
} |
} |
|
|
void |
void |
pool_sethiwat(struct pool *pp, int n) |
pool_sethiwat(struct pool *pp, int n) |
{ |
{ |
|
|
simple_lock(&pp->pr_slock); |
mutex_enter(&pp->pr_lock); |
|
|
pp->pr_maxpages = (n == 0) |
pp->pr_maxpages = (n == 0) |
? 0 |
? 0 |
: roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage; |
: roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage; |
|
|
simple_unlock(&pp->pr_slock); |
mutex_exit(&pp->pr_lock); |
} |
} |
|
|
void |
void |
pool_sethardlimit(struct pool *pp, int n, const char *warnmess, int ratecap) |
pool_sethardlimit(struct pool *pp, int n, const char *warnmess, int ratecap) |
{ |
{ |
|
|
simple_lock(&pp->pr_slock); |
mutex_enter(&pp->pr_lock); |
|
|
pp->pr_hardlimit = n; |
pp->pr_hardlimit = n; |
pp->pr_hardlimit_warning = warnmess; |
pp->pr_hardlimit_warning = warnmess; |
Line 1272 pool_sethardlimit(struct pool *pp, int n |
|
Line 1298 pool_sethardlimit(struct pool *pp, int n |
|
? 0 |
? 0 |
: roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage; |
: roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage; |
|
|
simple_unlock(&pp->pr_slock); |
mutex_exit(&pp->pr_lock); |
} |
} |
|
|
/* |
/* |
* Release all complete pages that have not been used recently. |
* Release all complete pages that have not been used recently. |
|
* |
|
* Must not be called from interrupt context. |
*/ |
*/ |
int |
int |
#ifdef POOL_DIAGNOSTIC |
|
_pool_reclaim(struct pool *pp, const char *file, long line) |
|
#else |
|
pool_reclaim(struct pool *pp) |
pool_reclaim(struct pool *pp) |
#endif |
|
{ |
{ |
struct pool_item_header *ph, *phnext; |
struct pool_item_header *ph, *phnext; |
struct pool_cache *pc; |
|
struct timeval curtime; |
|
struct pool_pagelist pq; |
struct pool_pagelist pq; |
int s; |
uint32_t curtime; |
|
bool klock; |
|
int rv; |
|
|
if (pp->pr_roflags & PR_STATIC) |
KASSERT(!cpu_intr_p() && !cpu_softintr_p()); |
return (0); |
|
|
|
if (pp->pr_drain_hook != NULL) { |
if (pp->pr_drain_hook != NULL) { |
/* |
/* |
Line 1301 pool_reclaim(struct pool *pp) |
|
Line 1324 pool_reclaim(struct pool *pp) |
|
(*pp->pr_drain_hook)(pp->pr_drain_hook_arg, PR_NOWAIT); |
(*pp->pr_drain_hook)(pp->pr_drain_hook_arg, PR_NOWAIT); |
} |
} |
|
|
if (simple_lock_try(&pp->pr_slock) == 0) |
|
return (0); |
|
pr_enter(pp, file, line); |
|
|
|
TAILQ_INIT(&pq); |
|
|
|
/* |
/* |
* Reclaim items from the pool's caches. |
* XXXSMP Because we do not want to cause non-MPSAFE code |
|
* to block. |
*/ |
*/ |
TAILQ_FOREACH(pc, &pp->pr_cachelist, pc_poollist) |
if (pp->pr_ipl == IPL_SOFTNET || pp->pr_ipl == IPL_SOFTCLOCK || |
pool_cache_reclaim(pc); |
pp->pr_ipl == IPL_SOFTSERIAL) { |
|
KERNEL_LOCK(1, NULL); |
|
klock = true; |
|
} else |
|
klock = false; |
|
|
|
/* Reclaim items from the pool's cache (if any). */ |
|
if (pp->pr_cache != NULL) |
|
pool_cache_invalidate(pp->pr_cache); |
|
|
|
if (mutex_tryenter(&pp->pr_lock) == 0) { |
|
if (klock) { |
|
KERNEL_UNLOCK_ONE(NULL); |
|
} |
|
return (0); |
|
} |
|
|
s = splclock(); |
LIST_INIT(&pq); |
curtime = mono_time; |
|
splx(s); |
|
|
|
for (ph = TAILQ_FIRST(&pp->pr_pagelist); ph != NULL; ph = phnext) { |
curtime = time_uptime; |
phnext = TAILQ_NEXT(ph, ph_pagelist); |
|
|
for (ph = LIST_FIRST(&pp->pr_emptypages); ph != NULL; ph = phnext) { |
|
phnext = LIST_NEXT(ph, ph_pagelist); |
|
|
/* Check our minimum page claim */ |
/* Check our minimum page claim */ |
if (pp->pr_npages <= pp->pr_minpages) |
if (pp->pr_npages <= pp->pr_minpages) |
break; |
break; |
|
|
if (ph->ph_nmissing == 0) { |
KASSERT(ph->ph_nmissing == 0); |
struct timeval diff; |
if (curtime - ph->ph_time < pool_inactive_time) |
timersub(&curtime, &ph->ph_time, &diff); |
continue; |
if (diff.tv_sec < pool_inactive_time) |
|
continue; |
|
|
|
/* |
/* |
* If freeing this page would put us below |
* If freeing this page would put us below |
* the low water mark, stop now. |
* the low water mark, stop now. |
*/ |
*/ |
if ((pp->pr_nitems - pp->pr_itemsperpage) < |
if ((pp->pr_nitems - pp->pr_itemsperpage) < |
pp->pr_minitems) |
pp->pr_minitems) |
break; |
break; |
|
|
pr_rmpage(pp, ph, &pq); |
pr_rmpage(pp, ph, &pq); |
} |
|
} |
} |
|
|
pr_leave(pp); |
mutex_exit(&pp->pr_lock); |
simple_unlock(&pp->pr_slock); |
|
if (TAILQ_EMPTY(&pq)) |
|
return (0); |
|
|
|
while ((ph = TAILQ_FIRST(&pq)) != NULL) { |
if (LIST_EMPTY(&pq)) |
TAILQ_REMOVE(&pq, ph, ph_pagelist); |
rv = 0; |
pool_allocator_free(pp, ph->ph_page); |
else { |
if (pp->pr_roflags & PR_PHINPAGE) { |
pr_pagelist_free(pp, &pq); |
continue; |
rv = 1; |
} |
} |
LIST_REMOVE(ph, ph_hashlist); |
|
s = splhigh(); |
if (klock) { |
pool_put(&phpool, ph); |
KERNEL_UNLOCK_ONE(NULL); |
splx(s); |
|
} |
} |
|
|
return (1); |
return (rv); |
} |
} |
|
|
/* |
/* |
* Drain pools, one at a time. |
* Drain pools, one at a time. The drained pool is returned within ppp. |
* |
* |
* Note, we must never be called from an interrupt context. |
* Note, must never be called from interrupt context. |
*/ |
*/ |
void |
bool |
pool_drain(void *arg) |
pool_drain(struct pool **ppp) |
{ |
{ |
|
bool reclaimed; |
struct pool *pp; |
struct pool *pp; |
int s; |
|
|
KASSERT(!TAILQ_EMPTY(&pool_head)); |
|
|
pp = NULL; |
pp = NULL; |
s = splvm(); |
|
simple_lock(&pool_head_slock); |
/* Find next pool to drain, and add a reference. */ |
if (drainpp == NULL) { |
mutex_enter(&pool_head_lock); |
drainpp = TAILQ_FIRST(&pool_head); |
do { |
} |
if (drainpp == NULL) { |
if (drainpp) { |
drainpp = TAILQ_FIRST(&pool_head); |
pp = drainpp; |
} |
drainpp = TAILQ_NEXT(pp, pr_poollist); |
if (drainpp != NULL) { |
} |
pp = drainpp; |
simple_unlock(&pool_head_slock); |
drainpp = TAILQ_NEXT(pp, pr_poollist); |
pool_reclaim(pp); |
} |
splx(s); |
/* |
|
* Skip completely idle pools. We depend on at least |
|
* one pool in the system being active. |
|
*/ |
|
} while (pp == NULL || pp->pr_npages == 0); |
|
pp->pr_refcnt++; |
|
mutex_exit(&pool_head_lock); |
|
|
|
/* Drain the cache (if any) and pool.. */ |
|
reclaimed = pool_reclaim(pp); |
|
|
|
/* Finally, unlock the pool. */ |
|
mutex_enter(&pool_head_lock); |
|
pp->pr_refcnt--; |
|
cv_broadcast(&pool_busy); |
|
mutex_exit(&pool_head_lock); |
|
|
|
if (ppp != NULL) |
|
*ppp = pp; |
|
|
|
return reclaimed; |
} |
} |
|
|
/* |
/* |
* Diagnostic helpers. |
* Diagnostic helpers. |
*/ |
*/ |
|
|
void |
void |
pool_print(struct pool *pp, const char *modif) |
pool_printall(const char *modif, void (*pr)(const char *, ...)) |
{ |
{ |
int s; |
struct pool *pp; |
|
|
s = splvm(); |
TAILQ_FOREACH(pp, &pool_head, pr_poollist) { |
if (simple_lock_try(&pp->pr_slock) == 0) { |
pool_printit(pp, modif, pr); |
printf("pool %s is locked; try again later\n", |
|
pp->pr_wchan); |
|
splx(s); |
|
return; |
|
} |
} |
pool_print1(pp, modif, printf); |
|
simple_unlock(&pp->pr_slock); |
|
splx(s); |
|
} |
} |
|
|
void |
void |
pool_printit(struct pool *pp, const char *modif, void (*pr)(const char *, ...)) |
pool_printit(struct pool *pp, const char *modif, void (*pr)(const char *, ...)) |
{ |
{ |
int didlock = 0; |
|
|
|
if (pp == NULL) { |
if (pp == NULL) { |
(*pr)("Must specify a pool to print.\n"); |
(*pr)("Must specify a pool to print.\n"); |
return; |
return; |
} |
} |
|
|
/* |
|
* Called from DDB; interrupts should be blocked, and all |
|
* other processors should be paused. We can skip locking |
|
* the pool in this case. |
|
* |
|
* We do a simple_lock_try() just to print the lock |
|
* status, however. |
|
*/ |
|
|
|
if (simple_lock_try(&pp->pr_slock) == 0) |
|
(*pr)("WARNING: pool %s is locked\n", pp->pr_wchan); |
|
else |
|
didlock = 1; |
|
|
|
pool_print1(pp, modif, pr); |
pool_print1(pp, modif, pr); |
|
|
if (didlock) |
|
simple_unlock(&pp->pr_slock); |
|
} |
} |
|
|
static void |
static void |
pool_print1(struct pool *pp, const char *modif, void (*pr)(const char *, ...)) |
pool_print_pagelist(struct pool *pp, struct pool_pagelist *pl, |
|
void (*pr)(const char *, ...)) |
{ |
{ |
struct pool_item_header *ph; |
struct pool_item_header *ph; |
struct pool_cache *pc; |
|
struct pool_cache_group *pcg; |
|
#ifdef DIAGNOSTIC |
#ifdef DIAGNOSTIC |
struct pool_item *pi; |
struct pool_item *pi; |
#endif |
#endif |
|
|
|
LIST_FOREACH(ph, pl, ph_pagelist) { |
|
(*pr)("\t\tpage %p, nmissing %d, time %" PRIu32 "\n", |
|
ph->ph_page, ph->ph_nmissing, ph->ph_time); |
|
#ifdef DIAGNOSTIC |
|
if (!(pp->pr_roflags & PR_NOTOUCH)) { |
|
LIST_FOREACH(pi, &ph->ph_itemlist, pi_list) { |
|
if (pi->pi_magic != PI_MAGIC) { |
|
(*pr)("\t\t\titem %p, magic 0x%x\n", |
|
pi, pi->pi_magic); |
|
} |
|
} |
|
} |
|
#endif |
|
} |
|
} |
|
|
|
static void |
|
pool_print1(struct pool *pp, const char *modif, void (*pr)(const char *, ...)) |
|
{ |
|
struct pool_item_header *ph; |
|
pool_cache_t pc; |
|
pcg_t *pcg; |
|
pool_cache_cpu_t *cc; |
|
uint64_t cpuhit, cpumiss; |
int i, print_log = 0, print_pagelist = 0, print_cache = 0; |
int i, print_log = 0, print_pagelist = 0, print_cache = 0; |
char c; |
char c; |
|
|
Line 1457 pool_print1(struct pool *pp, const char |
|
Line 1505 pool_print1(struct pool *pp, const char |
|
print_pagelist = 1; |
print_pagelist = 1; |
if (c == 'c') |
if (c == 'c') |
print_cache = 1; |
print_cache = 1; |
modif++; |
|
} |
} |
|
|
(*pr)("POOL %s: size %u, align %u, ioff %u, roflags 0x%08x\n", |
if ((pc = pp->pr_cache) != NULL) { |
|
(*pr)("POOL CACHE"); |
|
} else { |
|
(*pr)("POOL"); |
|
} |
|
|
|
(*pr)(" %s: size %u, align %u, ioff %u, roflags 0x%08x\n", |
pp->pr_wchan, pp->pr_size, pp->pr_align, pp->pr_itemoffset, |
pp->pr_wchan, pp->pr_size, pp->pr_align, pp->pr_itemoffset, |
pp->pr_roflags); |
pp->pr_roflags); |
(*pr)("\talloc %p\n", pp->pr_alloc); |
(*pr)("\talloc %p\n", pp->pr_alloc); |
Line 1469 pool_print1(struct pool *pp, const char |
|
Line 1522 pool_print1(struct pool *pp, const char |
|
(*pr)("\titemsperpage %u, nitems %u, nout %u, hardlimit %u\n", |
(*pr)("\titemsperpage %u, nitems %u, nout %u, hardlimit %u\n", |
pp->pr_itemsperpage, pp->pr_nitems, pp->pr_nout, pp->pr_hardlimit); |
pp->pr_itemsperpage, pp->pr_nitems, pp->pr_nout, pp->pr_hardlimit); |
|
|
(*pr)("\n\tnget %lu, nfail %lu, nput %lu\n", |
(*pr)("\tnget %lu, nfail %lu, nput %lu\n", |
pp->pr_nget, pp->pr_nfail, pp->pr_nput); |
pp->pr_nget, pp->pr_nfail, pp->pr_nput); |
(*pr)("\tnpagealloc %lu, npagefree %lu, hiwat %u, nidle %lu\n", |
(*pr)("\tnpagealloc %lu, npagefree %lu, hiwat %u, nidle %lu\n", |
pp->pr_npagealloc, pp->pr_npagefree, pp->pr_hiwat, pp->pr_nidle); |
pp->pr_npagealloc, pp->pr_npagefree, pp->pr_hiwat, pp->pr_nidle); |
Line 1477 pool_print1(struct pool *pp, const char |
|
Line 1530 pool_print1(struct pool *pp, const char |
|
if (print_pagelist == 0) |
if (print_pagelist == 0) |
goto skip_pagelist; |
goto skip_pagelist; |
|
|
if ((ph = TAILQ_FIRST(&pp->pr_pagelist)) != NULL) |
if ((ph = LIST_FIRST(&pp->pr_emptypages)) != NULL) |
(*pr)("\n\tpage list:\n"); |
(*pr)("\n\tempty page list:\n"); |
for (; ph != NULL; ph = TAILQ_NEXT(ph, ph_pagelist)) { |
pool_print_pagelist(pp, &pp->pr_emptypages, pr); |
(*pr)("\t\tpage %p, nmissing %d, time %lu,%lu\n", |
if ((ph = LIST_FIRST(&pp->pr_fullpages)) != NULL) |
ph->ph_page, ph->ph_nmissing, |
(*pr)("\n\tfull page list:\n"); |
(u_long)ph->ph_time.tv_sec, |
pool_print_pagelist(pp, &pp->pr_fullpages, pr); |
(u_long)ph->ph_time.tv_usec); |
if ((ph = LIST_FIRST(&pp->pr_partpages)) != NULL) |
#ifdef DIAGNOSTIC |
(*pr)("\n\tpartial-page list:\n"); |
TAILQ_FOREACH(pi, &ph->ph_itemlist, pi_list) { |
pool_print_pagelist(pp, &pp->pr_partpages, pr); |
if (pi->pi_magic != PI_MAGIC) { |
|
(*pr)("\t\t\titem %p, magic 0x%x\n", |
|
pi, pi->pi_magic); |
|
} |
|
} |
|
#endif |
|
} |
|
if (pp->pr_curpage == NULL) |
if (pp->pr_curpage == NULL) |
(*pr)("\tno current page\n"); |
(*pr)("\tno current page\n"); |
else |
else |
(*pr)("\tcurpage %p\n", pp->pr_curpage->ph_page); |
(*pr)("\tcurpage %p\n", pp->pr_curpage->ph_page); |
|
|
skip_pagelist: |
skip_pagelist: |
|
|
if (print_log == 0) |
if (print_log == 0) |
goto skip_log; |
goto skip_log; |
|
|
(*pr)("\n"); |
(*pr)("\n"); |
if ((pp->pr_roflags & PR_LOGGING) == 0) |
|
(*pr)("\tno log\n"); |
|
else |
|
pr_printlog(pp, NULL, pr); |
|
|
|
skip_log: |
skip_log: |
|
|
if (print_cache == 0) |
#define PR_GROUPLIST(pcg) \ |
goto skip_cache; |
(*pr)("\t\tgroup %p: avail %d\n", pcg, pcg->pcg_avail); \ |
|
for (i = 0; i < pcg->pcg_size; i++) { \ |
TAILQ_FOREACH(pc, &pp->pr_cachelist, pc_poollist) { |
if (pcg->pcg_objects[i].pcgo_pa != \ |
(*pr)("\tcache %p: allocfrom %p freeto %p\n", pc, |
POOL_PADDR_INVALID) { \ |
pc->pc_allocfrom, pc->pc_freeto); |
(*pr)("\t\t\t%p, 0x%llx\n", \ |
(*pr)("\t hits %lu misses %lu ngroups %lu nitems %lu\n", |
pcg->pcg_objects[i].pcgo_va, \ |
pc->pc_hits, pc->pc_misses, pc->pc_ngroups, pc->pc_nitems); |
(unsigned long long) \ |
TAILQ_FOREACH(pcg, &pc->pc_grouplist, pcg_list) { |
pcg->pcg_objects[i].pcgo_pa); \ |
(*pr)("\t\tgroup %p: avail %d\n", pcg, pcg->pcg_avail); |
} else { \ |
for (i = 0; i < PCG_NOBJECTS; i++) |
(*pr)("\t\t\t%p\n", \ |
(*pr)("\t\t\t%p\n", pcg->pcg_objects[i]); |
pcg->pcg_objects[i].pcgo_va); \ |
|
} \ |
|
} |
|
|
|
if (pc != NULL) { |
|
cpuhit = 0; |
|
cpumiss = 0; |
|
for (i = 0; i < __arraycount(pc->pc_cpus); i++) { |
|
if ((cc = pc->pc_cpus[i]) == NULL) |
|
continue; |
|
cpuhit += cc->cc_hits; |
|
cpumiss += cc->cc_misses; |
|
} |
|
(*pr)("\tcpu layer hits %llu misses %llu\n", cpuhit, cpumiss); |
|
(*pr)("\tcache layer hits %llu misses %llu\n", |
|
pc->pc_hits, pc->pc_misses); |
|
(*pr)("\tcache layer entry uncontended %llu contended %llu\n", |
|
pc->pc_hits + pc->pc_misses - pc->pc_contended, |
|
pc->pc_contended); |
|
(*pr)("\tcache layer empty groups %u full groups %u\n", |
|
pc->pc_nempty, pc->pc_nfull); |
|
if (print_cache) { |
|
(*pr)("\tfull cache groups:\n"); |
|
for (pcg = pc->pc_fullgroups; pcg != NULL; |
|
pcg = pcg->pcg_next) { |
|
PR_GROUPLIST(pcg); |
|
} |
|
(*pr)("\tempty cache groups:\n"); |
|
for (pcg = pc->pc_emptygroups; pcg != NULL; |
|
pcg = pcg->pcg_next) { |
|
PR_GROUPLIST(pcg); |
|
} |
} |
} |
} |
} |
|
#undef PR_GROUPLIST |
skip_cache: |
|
|
|
pr_enter_check(pp, pr); |
|
} |
} |
|
|
int |
static int |
pool_chk(struct pool *pp, const char *label) |
pool_chk_page(struct pool *pp, const char *label, struct pool_item_header *ph) |
{ |
{ |
struct pool_item_header *ph; |
struct pool_item *pi; |
int r = 0; |
void *page; |
|
int n; |
simple_lock(&pp->pr_slock); |
|
|
|
TAILQ_FOREACH(ph, &pp->pr_pagelist, ph_pagelist) { |
|
struct pool_item *pi; |
|
int n; |
|
caddr_t page; |
|
|
|
page = (caddr_t)((u_long)ph & pp->pr_alloc->pa_pagemask); |
if ((pp->pr_roflags & PR_NOALIGN) == 0) { |
|
page = (void *)((uintptr_t)ph & pp->pr_alloc->pa_pagemask); |
if (page != ph->ph_page && |
if (page != ph->ph_page && |
(pp->pr_roflags & PR_PHINPAGE) != 0) { |
(pp->pr_roflags & PR_PHINPAGE) != 0) { |
if (label != NULL) |
if (label != NULL) |
Line 1553 pool_chk(struct pool *pp, const char *la |
|
Line 1618 pool_chk(struct pool *pp, const char *la |
|
" at page head addr %p (p %p)\n", pp, |
" at page head addr %p (p %p)\n", pp, |
pp->pr_wchan, ph->ph_page, |
pp->pr_wchan, ph->ph_page, |
ph, page); |
ph, page); |
r++; |
return 1; |
goto out; |
|
} |
} |
|
} |
|
|
for (pi = TAILQ_FIRST(&ph->ph_itemlist), n = 0; |
if ((pp->pr_roflags & PR_NOTOUCH) != 0) |
pi != NULL; |
return 0; |
pi = TAILQ_NEXT(pi,pi_list), n++) { |
|
|
|
#ifdef DIAGNOSTIC |
for (pi = LIST_FIRST(&ph->ph_itemlist), n = 0; |
if (pi->pi_magic != PI_MAGIC) { |
pi != NULL; |
if (label != NULL) |
pi = LIST_NEXT(pi,pi_list), n++) { |
printf("%s: ", label); |
|
printf("pool(%s): free list modified: magic=%x;" |
|
" page %p; item ordinal %d;" |
|
" addr %p (p %p)\n", |
|
pp->pr_wchan, pi->pi_magic, ph->ph_page, |
|
n, pi, page); |
|
panic("pool"); |
|
} |
|
#endif |
|
page = |
|
(caddr_t)((u_long)pi & pp->pr_alloc->pa_pagemask); |
|
if (page == ph->ph_page) |
|
continue; |
|
|
|
|
#ifdef DIAGNOSTIC |
|
if (pi->pi_magic != PI_MAGIC) { |
if (label != NULL) |
if (label != NULL) |
printf("%s: ", label); |
printf("%s: ", label); |
printf("pool(%p:%s): page inconsistency: page %p;" |
printf("pool(%s): free list modified: magic=%x;" |
" item ordinal %d; addr %p (p %p)\n", pp, |
" page %p; item ordinal %d; addr %p\n", |
pp->pr_wchan, ph->ph_page, |
pp->pr_wchan, pi->pi_magic, ph->ph_page, |
n, pi, page); |
n, pi); |
r++; |
panic("pool"); |
|
} |
|
#endif |
|
if ((pp->pr_roflags & PR_NOALIGN) != 0) { |
|
continue; |
|
} |
|
page = (void *)((uintptr_t)pi & pp->pr_alloc->pa_pagemask); |
|
if (page == ph->ph_page) |
|
continue; |
|
|
|
if (label != NULL) |
|
printf("%s: ", label); |
|
printf("pool(%p:%s): page inconsistency: page %p;" |
|
" item ordinal %d; addr %p (p %p)\n", pp, |
|
pp->pr_wchan, ph->ph_page, |
|
n, pi, page); |
|
return 1; |
|
} |
|
return 0; |
|
} |
|
|
|
|
|
int |
|
pool_chk(struct pool *pp, const char *label) |
|
{ |
|
struct pool_item_header *ph; |
|
int r = 0; |
|
|
|
mutex_enter(&pp->pr_lock); |
|
LIST_FOREACH(ph, &pp->pr_emptypages, ph_pagelist) { |
|
r = pool_chk_page(pp, label, ph); |
|
if (r) { |
|
goto out; |
|
} |
|
} |
|
LIST_FOREACH(ph, &pp->pr_fullpages, ph_pagelist) { |
|
r = pool_chk_page(pp, label, ph); |
|
if (r) { |
|
goto out; |
|
} |
|
} |
|
LIST_FOREACH(ph, &pp->pr_partpages, ph_pagelist) { |
|
r = pool_chk_page(pp, label, ph); |
|
if (r) { |
goto out; |
goto out; |
} |
} |
} |
} |
|
|
out: |
out: |
simple_unlock(&pp->pr_slock); |
mutex_exit(&pp->pr_lock); |
return (r); |
return (r); |
} |
} |
|
|
|
|
* pool_cache_init: |
* pool_cache_init: |
* |
* |
* Initialize a pool cache. |
* Initialize a pool cache. |
|
*/ |
|
pool_cache_t |
|
pool_cache_init(size_t size, u_int align, u_int align_offset, u_int flags, |
|
const char *wchan, struct pool_allocator *palloc, int ipl, |
|
int (*ctor)(void *, void *, int), void (*dtor)(void *, void *), void *arg) |
|
{ |
|
pool_cache_t pc; |
|
|
|
pc = pool_get(&cache_pool, PR_WAITOK); |
|
if (pc == NULL) |
|
return NULL; |
|
|
|
pool_cache_bootstrap(pc, size, align, align_offset, flags, wchan, |
|
palloc, ipl, ctor, dtor, arg); |
|
|
|
return pc; |
|
} |
|
|
|
/* |
|
* pool_cache_bootstrap: |
* |
* |
* NOTE: If the pool must be protected from interrupts, we expect |
* Kernel-private version of pool_cache_init(). The caller |
* to be called at the appropriate interrupt priority level. |
* provides initial storage. |
*/ |
*/ |
void |
void |
pool_cache_init(struct pool_cache *pc, struct pool *pp, |
pool_cache_bootstrap(pool_cache_t pc, size_t size, u_int align, |
int (*ctor)(void *, void *, int), |
u_int align_offset, u_int flags, const char *wchan, |
void (*dtor)(void *, void *), |
struct pool_allocator *palloc, int ipl, |
|
int (*ctor)(void *, void *, int), void (*dtor)(void *, void *), |
void *arg) |
void *arg) |
{ |
{ |
|
CPU_INFO_ITERATOR cii; |
|
pool_cache_t pc1; |
|
struct cpu_info *ci; |
|
struct pool *pp; |
|
|
TAILQ_INIT(&pc->pc_grouplist); |
pp = &pc->pc_pool; |
simple_lock_init(&pc->pc_slock); |
if (palloc == NULL && ipl == IPL_NONE) |
|
palloc = &pool_allocator_nointr; |
pc->pc_allocfrom = NULL; |
pool_init(pp, size, align, align_offset, flags, wchan, palloc, ipl); |
pc->pc_freeto = NULL; |
mutex_init(&pc->pc_lock, MUTEX_DEFAULT, ipl); |
pc->pc_pool = pp; |
|
|
if (ctor == NULL) { |
|
ctor = (int (*)(void *, void *, int))nullop; |
|
} |
|
if (dtor == NULL) { |
|
dtor = (void (*)(void *, void *))nullop; |
|
} |
|
|
|
pc->pc_emptygroups = NULL; |
|
pc->pc_fullgroups = NULL; |
|
pc->pc_partgroups = NULL; |
pc->pc_ctor = ctor; |
pc->pc_ctor = ctor; |
pc->pc_dtor = dtor; |
pc->pc_dtor = dtor; |
pc->pc_arg = arg; |
pc->pc_arg = arg; |
|
pc->pc_hits = 0; |
pc->pc_hits = 0; |
|
pc->pc_misses = 0; |
pc->pc_misses = 0; |
|
pc->pc_nempty = 0; |
|
pc->pc_npart = 0; |
|
pc->pc_nfull = 0; |
|
pc->pc_contended = 0; |
|
pc->pc_refcnt = 0; |
|
pc->pc_freecheck = NULL; |
|
|
|
if ((flags & PR_LARGECACHE) != 0) { |
|
pc->pc_pcgsize = PCG_NOBJECTS_LARGE; |
|
pc->pc_pcgpool = &pcg_large_pool; |
|
} else { |
|
pc->pc_pcgsize = PCG_NOBJECTS_NORMAL; |
|
pc->pc_pcgpool = &pcg_normal_pool; |
|
} |
|
|
pc->pc_ngroups = 0; |
/* Allocate per-CPU caches. */ |
|
memset(pc->pc_cpus, 0, sizeof(pc->pc_cpus)); |
|
pc->pc_ncpu = 0; |
|
if (ncpu < 2) { |
|
/* XXX For sparc: boot CPU is not attached yet. */ |
|
pool_cache_cpu_init1(curcpu(), pc); |
|
} else { |
|
for (CPU_INFO_FOREACH(cii, ci)) { |
|
pool_cache_cpu_init1(ci, pc); |
|
} |
|
} |
|
|
pc->pc_nitems = 0; |
/* Add to list of all pools. */ |
|
if (__predict_true(!cold)) |
|
mutex_enter(&pool_head_lock); |
|
TAILQ_FOREACH(pc1, &pool_cache_head, pc_cachelist) { |
|
if (strcmp(pc1->pc_pool.pr_wchan, pc->pc_pool.pr_wchan) > 0) |
|
break; |
|
} |
|
if (pc1 == NULL) |
|
TAILQ_INSERT_TAIL(&pool_cache_head, pc, pc_cachelist); |
|
else |
|
TAILQ_INSERT_BEFORE(pc1, pc, pc_cachelist); |
|
if (__predict_true(!cold)) |
|
mutex_exit(&pool_head_lock); |
|
|
simple_lock(&pp->pr_slock); |
membar_sync(); |
TAILQ_INSERT_TAIL(&pp->pr_cachelist, pc, pc_poollist); |
pp->pr_cache = pc; |
simple_unlock(&pp->pr_slock); |
|
} |
} |
|
|
/* |
/* |
Line 1637 pool_cache_init(struct pool_cache *pc, s |
|
Line 1802 pool_cache_init(struct pool_cache *pc, s |
|
* Destroy a pool cache. |
* Destroy a pool cache. |
*/ |
*/ |
void |
void |
pool_cache_destroy(struct pool_cache *pc) |
pool_cache_destroy(pool_cache_t pc) |
|
{ |
|
|
|
pool_cache_bootstrap_destroy(pc); |
|
pool_put(&cache_pool, pc); |
|
} |
|
|
|
/* |
|
* pool_cache_bootstrap_destroy: |
|
* |
|
* Destroy a pool cache. |
|
*/ |
|
void |
|
pool_cache_bootstrap_destroy(pool_cache_t pc) |
{ |
{ |
struct pool *pp = pc->pc_pool; |
struct pool *pp = &pc->pc_pool; |
|
u_int i; |
|
|
|
/* Remove it from the global list. */ |
|
mutex_enter(&pool_head_lock); |
|
while (pc->pc_refcnt != 0) |
|
cv_wait(&pool_busy, &pool_head_lock); |
|
TAILQ_REMOVE(&pool_cache_head, pc, pc_cachelist); |
|
mutex_exit(&pool_head_lock); |
|
|
/* First, invalidate the entire cache. */ |
/* First, invalidate the entire cache. */ |
pool_cache_invalidate(pc); |
pool_cache_invalidate(pc); |
|
|
/* ...and remove it from the pool's cache list. */ |
/* Disassociate it from the pool. */ |
simple_lock(&pp->pr_slock); |
mutex_enter(&pp->pr_lock); |
TAILQ_REMOVE(&pp->pr_cachelist, pc, pc_poollist); |
pp->pr_cache = NULL; |
simple_unlock(&pp->pr_slock); |
mutex_exit(&pp->pr_lock); |
|
|
|
/* Destroy per-CPU data */ |
|
for (i = 0; i < __arraycount(pc->pc_cpus); i++) |
|
pool_cache_invalidate_cpu(pc, i); |
|
|
|
/* Finally, destroy it. */ |
|
mutex_destroy(&pc->pc_lock); |
|
pool_destroy(pp); |
} |
} |
|
|
static __inline void * |
/* |
pcg_get(struct pool_cache_group *pcg) |
* pool_cache_cpu_init1: |
|
* |
|
* Called for each pool_cache whenever a new CPU is attached. |
|
*/ |
|
static void |
|
pool_cache_cpu_init1(struct cpu_info *ci, pool_cache_t pc) |
{ |
{ |
void *object; |
pool_cache_cpu_t *cc; |
u_int idx; |
int index; |
|
|
KASSERT(pcg->pcg_avail <= PCG_NOBJECTS); |
index = ci->ci_index; |
KASSERT(pcg->pcg_avail != 0); |
|
idx = --pcg->pcg_avail; |
|
|
|
KASSERT(pcg->pcg_objects[idx] != NULL); |
KASSERT(index < __arraycount(pc->pc_cpus)); |
object = pcg->pcg_objects[idx]; |
|
pcg->pcg_objects[idx] = NULL; |
|
|
|
return (object); |
if ((cc = pc->pc_cpus[index]) != NULL) { |
} |
KASSERT(cc->cc_cpuindex == index); |
|
return; |
|
} |
|
|
static __inline void |
/* |
pcg_put(struct pool_cache_group *pcg, void *object) |
* The first CPU is 'free'. This needs to be the case for |
{ |
* bootstrap - we may not be able to allocate yet. |
u_int idx; |
*/ |
|
if (pc->pc_ncpu == 0) { |
|
cc = &pc->pc_cpu0; |
|
pc->pc_ncpu = 1; |
|
} else { |
|
mutex_enter(&pc->pc_lock); |
|
pc->pc_ncpu++; |
|
mutex_exit(&pc->pc_lock); |
|
cc = pool_get(&cache_cpu_pool, PR_WAITOK); |
|
} |
|
|
KASSERT(pcg->pcg_avail < PCG_NOBJECTS); |
cc->cc_ipl = pc->pc_pool.pr_ipl; |
idx = pcg->pcg_avail++; |
cc->cc_iplcookie = makeiplcookie(cc->cc_ipl); |
|
cc->cc_cache = pc; |
|
cc->cc_cpuindex = index; |
|
cc->cc_hits = 0; |
|
cc->cc_misses = 0; |
|
cc->cc_current = __UNCONST(&pcg_dummy); |
|
cc->cc_previous = __UNCONST(&pcg_dummy); |
|
|
KASSERT(pcg->pcg_objects[idx] == NULL); |
pc->pc_cpus[index] = cc; |
pcg->pcg_objects[idx] = object; |
|
} |
} |
|
|
/* |
/* |
* pool_cache_get: |
* pool_cache_cpu_init: |
* |
* |
* Get an object from a pool cache. |
* Called whenever a new CPU is attached. |
*/ |
*/ |
void * |
void |
pool_cache_get(struct pool_cache *pc, int flags) |
pool_cache_cpu_init(struct cpu_info *ci) |
{ |
{ |
struct pool_cache_group *pcg; |
pool_cache_t pc; |
void *object; |
|
|
|
#ifdef LOCKDEBUG |
mutex_enter(&pool_head_lock); |
if (flags & PR_WAITOK) |
TAILQ_FOREACH(pc, &pool_cache_head, pc_cachelist) { |
simple_lock_only_held(NULL, "pool_cache_get(PR_WAITOK)"); |
pc->pc_refcnt++; |
#endif |
mutex_exit(&pool_head_lock); |
|
|
simple_lock(&pc->pc_slock); |
pool_cache_cpu_init1(ci, pc); |
|
|
if ((pcg = pc->pc_allocfrom) == NULL) { |
|
TAILQ_FOREACH(pcg, &pc->pc_grouplist, pcg_list) { |
|
if (pcg->pcg_avail != 0) { |
|
pc->pc_allocfrom = pcg; |
|
goto have_group; |
|
} |
|
} |
|
|
|
/* |
mutex_enter(&pool_head_lock); |
* No groups with any available objects. Allocate |
pc->pc_refcnt--; |
* a new object, construct it, and return it to |
cv_broadcast(&pool_busy); |
* the caller. We will allocate a group, if necessary, |
|
* when the object is freed back to the cache. |
|
*/ |
|
pc->pc_misses++; |
|
simple_unlock(&pc->pc_slock); |
|
object = pool_get(pc->pc_pool, flags); |
|
if (object != NULL && pc->pc_ctor != NULL) { |
|
if ((*pc->pc_ctor)(pc->pc_arg, object, flags) != 0) { |
|
pool_put(pc->pc_pool, object); |
|
return (NULL); |
|
} |
|
} |
|
return (object); |
|
} |
} |
|
mutex_exit(&pool_head_lock); |
|
} |
|
|
have_group: |
/* |
pc->pc_hits++; |
* pool_cache_reclaim: |
pc->pc_nitems--; |
* |
object = pcg_get(pcg); |
* Reclaim memory from a pool cache. |
|
*/ |
|
bool |
|
pool_cache_reclaim(pool_cache_t pc) |
|
{ |
|
|
if (pcg->pcg_avail == 0) |
return pool_reclaim(&pc->pc_pool); |
pc->pc_allocfrom = NULL; |
} |
|
|
simple_unlock(&pc->pc_slock); |
static void |
|
pool_cache_destruct_object1(pool_cache_t pc, void *object) |
|
{ |
|
|
return (object); |
(*pc->pc_dtor)(pc->pc_arg, object); |
|
pool_put(&pc->pc_pool, object); |
} |
} |
|
|
/* |
/* |
* pool_cache_put: |
* pool_cache_destruct_object: |
* |
* |
* Put an object back to the pool cache. |
* Force destruction of an object and its release back into |
|
* the pool. |
*/ |
*/ |
void |
void |
pool_cache_put(struct pool_cache *pc, void *object) |
pool_cache_destruct_object(pool_cache_t pc, void *object) |
{ |
{ |
struct pool_cache_group *pcg; |
|
int s; |
|
|
|
simple_lock(&pc->pc_slock); |
FREECHECK_IN(&pc->pc_freecheck, object); |
|
|
if ((pcg = pc->pc_freeto) == NULL) { |
pool_cache_destruct_object1(pc, object); |
TAILQ_FOREACH(pcg, &pc->pc_grouplist, pcg_list) { |
} |
if (pcg->pcg_avail != PCG_NOBJECTS) { |
|
pc->pc_freeto = pcg; |
/* |
goto have_group; |
* pool_cache_invalidate_groups: |
} |
* |
|
* Invalidate a chain of groups and destruct all objects. |
|
*/ |
|
static void |
|
pool_cache_invalidate_groups(pool_cache_t pc, pcg_t *pcg) |
|
{ |
|
void *object; |
|
pcg_t *next; |
|
int i; |
|
|
|
for (; pcg != NULL; pcg = next) { |
|
next = pcg->pcg_next; |
|
|
|
for (i = 0; i < pcg->pcg_avail; i++) { |
|
object = pcg->pcg_objects[i].pcgo_va; |
|
pool_cache_destruct_object1(pc, object); |
} |
} |
|
|
/* |
if (pcg->pcg_size == PCG_NOBJECTS_LARGE) { |
* No empty groups to free the object to. Attempt to |
pool_put(&pcg_large_pool, pcg); |
* allocate one. |
} else { |
*/ |
KASSERT(pcg->pcg_size == PCG_NOBJECTS_NORMAL); |
simple_unlock(&pc->pc_slock); |
pool_put(&pcg_normal_pool, pcg); |
s = splvm(); |
|
pcg = pool_get(&pcgpool, PR_NOWAIT); |
|
splx(s); |
|
if (pcg != NULL) { |
|
memset(pcg, 0, sizeof(*pcg)); |
|
simple_lock(&pc->pc_slock); |
|
pc->pc_ngroups++; |
|
TAILQ_INSERT_TAIL(&pc->pc_grouplist, pcg, pcg_list); |
|
if (pc->pc_freeto == NULL) |
|
pc->pc_freeto = pcg; |
|
goto have_group; |
|
} |
} |
|
} |
|
} |
|
|
|
/* |
|
* pool_cache_invalidate: |
|
* |
|
* Invalidate a pool cache (destruct and release all of the |
|
* cached objects). Does not reclaim objects from the pool. |
|
* |
|
* Note: For pool caches that provide constructed objects, there |
|
* is an assumption that another level of synchronization is occurring |
|
* between the input to the constructor and the cache invalidation. |
|
* |
|
* Invalidation is a costly process and should not be called from |
|
* interrupt context. |
|
*/ |
|
void |
|
pool_cache_invalidate(pool_cache_t pc) |
|
{ |
|
uint64_t where; |
|
pcg_t *full, *empty, *part; |
|
|
|
KASSERT(!cpu_intr_p() && !cpu_softintr_p()); |
|
|
|
if (ncpu < 2 || !mp_online) { |
|
/* |
|
* We might be called early enough in the boot process |
|
* for the CPU data structures to not be fully initialized. |
|
* In this case, transfer the content of the local CPU's |
|
* cache back into global cache as only this CPU is currently |
|
* running. |
|
*/ |
|
pool_cache_transfer(pc); |
|
} else { |
/* |
/* |
* Unable to allocate a cache group; destruct the object |
* Signal all CPUs that they must transfer their local |
* and free it back to the pool. |
* cache back to the global pool then wait for the xcall to |
|
* complete. |
*/ |
*/ |
pool_cache_destruct_object(pc, object); |
where = xc_broadcast(0, (xcfunc_t)pool_cache_transfer, |
|
pc, NULL); |
|
xc_wait(where); |
|
} |
|
|
|
/* Empty pool caches, then invalidate objects */ |
|
mutex_enter(&pc->pc_lock); |
|
full = pc->pc_fullgroups; |
|
empty = pc->pc_emptygroups; |
|
part = pc->pc_partgroups; |
|
pc->pc_fullgroups = NULL; |
|
pc->pc_emptygroups = NULL; |
|
pc->pc_partgroups = NULL; |
|
pc->pc_nfull = 0; |
|
pc->pc_nempty = 0; |
|
pc->pc_npart = 0; |
|
mutex_exit(&pc->pc_lock); |
|
|
|
pool_cache_invalidate_groups(pc, full); |
|
pool_cache_invalidate_groups(pc, empty); |
|
pool_cache_invalidate_groups(pc, part); |
|
} |
|
|
|
/* |
|
* pool_cache_invalidate_cpu: |
|
* |
|
* Invalidate all CPU-bound cached objects in pool cache, the CPU being |
|
* identified by its associated index. |
|
* It is caller's responsibility to ensure that no operation is |
|
* taking place on this pool cache while doing this invalidation. |
|
* WARNING: as no inter-CPU locking is enforced, trying to invalidate |
|
* pool cached objects from a CPU different from the one currently running |
|
* may result in an undefined behaviour. |
|
*/ |
|
static void |
|
pool_cache_invalidate_cpu(pool_cache_t pc, u_int index) |
|
{ |
|
pool_cache_cpu_t *cc; |
|
pcg_t *pcg; |
|
|
|
if ((cc = pc->pc_cpus[index]) == NULL) |
return; |
return; |
|
|
|
if ((pcg = cc->cc_current) != &pcg_dummy) { |
|
pcg->pcg_next = NULL; |
|
pool_cache_invalidate_groups(pc, pcg); |
} |
} |
|
if ((pcg = cc->cc_previous) != &pcg_dummy) { |
|
pcg->pcg_next = NULL; |
|
pool_cache_invalidate_groups(pc, pcg); |
|
} |
|
if (cc != &pc->pc_cpu0) |
|
pool_put(&cache_cpu_pool, cc); |
|
|
have_group: |
} |
pc->pc_nitems++; |
|
pcg_put(pcg, object); |
|
|
|
if (pcg->pcg_avail == PCG_NOBJECTS) |
void |
pc->pc_freeto = NULL; |
pool_cache_set_drain_hook(pool_cache_t pc, void (*fn)(void *, int), void *arg) |
|
{ |
|
|
simple_unlock(&pc->pc_slock); |
pool_set_drain_hook(&pc->pc_pool, fn, arg); |
|
} |
|
|
|
void |
|
pool_cache_setlowat(pool_cache_t pc, int n) |
|
{ |
|
|
|
pool_setlowat(&pc->pc_pool, n); |
|
} |
|
|
|
void |
|
pool_cache_sethiwat(pool_cache_t pc, int n) |
|
{ |
|
|
|
pool_sethiwat(&pc->pc_pool, n); |
} |
} |
|
|
/* |
|
* pool_cache_destruct_object: |
|
* |
|
* Force destruction of an object and its release back into |
|
* the pool. |
|
*/ |
|
void |
void |
pool_cache_destruct_object(struct pool_cache *pc, void *object) |
pool_cache_sethardlimit(pool_cache_t pc, int n, const char *warnmess, int ratecap) |
{ |
{ |
|
|
if (pc->pc_dtor != NULL) |
pool_sethardlimit(&pc->pc_pool, n, warnmess, ratecap); |
(*pc->pc_dtor)(pc->pc_arg, object); |
} |
pool_put(pc->pc_pool, object); |
|
|
static bool __noinline |
|
pool_cache_get_slow(pool_cache_cpu_t *cc, int s, void **objectp, |
|
paddr_t *pap, int flags) |
|
{ |
|
pcg_t *pcg, *cur; |
|
uint64_t ncsw; |
|
pool_cache_t pc; |
|
void *object; |
|
|
|
KASSERT(cc->cc_current->pcg_avail == 0); |
|
KASSERT(cc->cc_previous->pcg_avail == 0); |
|
|
|
pc = cc->cc_cache; |
|
cc->cc_misses++; |
|
|
|
/* |
|
* Nothing was available locally. Try and grab a group |
|
* from the cache. |
|
*/ |
|
if (__predict_false(!mutex_tryenter(&pc->pc_lock))) { |
|
ncsw = curlwp->l_ncsw; |
|
mutex_enter(&pc->pc_lock); |
|
pc->pc_contended++; |
|
|
|
/* |
|
* If we context switched while locking, then |
|
* our view of the per-CPU data is invalid: |
|
* retry. |
|
*/ |
|
if (curlwp->l_ncsw != ncsw) { |
|
mutex_exit(&pc->pc_lock); |
|
return true; |
|
} |
|
} |
|
|
|
if (__predict_true((pcg = pc->pc_fullgroups) != NULL)) { |
|
/* |
|
* If there's a full group, release our empty |
|
* group back to the cache. Install the full |
|
* group as cc_current and return. |
|
*/ |
|
if (__predict_true((cur = cc->cc_current) != &pcg_dummy)) { |
|
KASSERT(cur->pcg_avail == 0); |
|
cur->pcg_next = pc->pc_emptygroups; |
|
pc->pc_emptygroups = cur; |
|
pc->pc_nempty++; |
|
} |
|
KASSERT(pcg->pcg_avail == pcg->pcg_size); |
|
cc->cc_current = pcg; |
|
pc->pc_fullgroups = pcg->pcg_next; |
|
pc->pc_hits++; |
|
pc->pc_nfull--; |
|
mutex_exit(&pc->pc_lock); |
|
return true; |
|
} |
|
|
|
/* |
|
* Nothing available locally or in cache. Take the slow |
|
* path: fetch a new object from the pool and construct |
|
* it. |
|
*/ |
|
pc->pc_misses++; |
|
mutex_exit(&pc->pc_lock); |
|
splx(s); |
|
|
|
object = pool_get(&pc->pc_pool, flags); |
|
*objectp = object; |
|
if (__predict_false(object == NULL)) |
|
return false; |
|
|
|
if (__predict_false((*pc->pc_ctor)(pc->pc_arg, object, flags) != 0)) { |
|
pool_put(&pc->pc_pool, object); |
|
*objectp = NULL; |
|
return false; |
|
} |
|
|
|
KASSERT((((vaddr_t)object + pc->pc_pool.pr_itemoffset) & |
|
(pc->pc_pool.pr_align - 1)) == 0); |
|
|
|
if (pap != NULL) { |
|
#ifdef POOL_VTOPHYS |
|
*pap = POOL_VTOPHYS(object); |
|
#else |
|
*pap = POOL_PADDR_INVALID; |
|
#endif |
|
} |
|
|
|
FREECHECK_OUT(&pc->pc_freecheck, object); |
|
return false; |
} |
} |
|
|
/* |
/* |
* pool_cache_do_invalidate: |
* pool_cache_get{,_paddr}: |
* |
* |
* This internal function implements pool_cache_invalidate() and |
* Get an object from a pool cache (optionally returning |
* pool_cache_reclaim(). |
* the physical address of the object). |
*/ |
*/ |
static void |
void * |
pool_cache_do_invalidate(struct pool_cache *pc, int free_groups, |
pool_cache_get_paddr(pool_cache_t pc, int flags, paddr_t *pap) |
void (*putit)(struct pool *, void *)) |
|
{ |
{ |
struct pool_cache_group *pcg, *npcg; |
pool_cache_cpu_t *cc; |
|
pcg_t *pcg; |
void *object; |
void *object; |
int s; |
int s; |
|
|
for (pcg = TAILQ_FIRST(&pc->pc_grouplist); pcg != NULL; |
KASSERTMSG((!cpu_intr_p() && !cpu_softintr_p()) || |
pcg = npcg) { |
(pc->pc_pool.pr_ipl != IPL_NONE || cold || panicstr != NULL), |
npcg = TAILQ_NEXT(pcg, pcg_list); |
"pool '%s' is IPL_NONE, but called from interrupt context\n", |
while (pcg->pcg_avail != 0) { |
pc->pc_pool.pr_wchan); |
pc->pc_nitems--; |
|
object = pcg_get(pcg); |
if (flags & PR_WAITOK) { |
if (pcg->pcg_avail == 0 && pc->pc_allocfrom == pcg) |
ASSERT_SLEEPABLE(); |
pc->pc_allocfrom = NULL; |
} |
if (pc->pc_dtor != NULL) |
|
(*pc->pc_dtor)(pc->pc_arg, object); |
/* Lock out interrupts and disable preemption. */ |
(*putit)(pc->pc_pool, object); |
s = splvm(); |
} |
while (/* CONSTCOND */ true) { |
if (free_groups) { |
/* Try and allocate an object from the current group. */ |
pc->pc_ngroups--; |
cc = pc->pc_cpus[curcpu()->ci_index]; |
TAILQ_REMOVE(&pc->pc_grouplist, pcg, pcg_list); |
KASSERT(cc->cc_cache == pc); |
if (pc->pc_freeto == pcg) |
pcg = cc->cc_current; |
pc->pc_freeto = NULL; |
if (__predict_true(pcg->pcg_avail > 0)) { |
s = splvm(); |
object = pcg->pcg_objects[--pcg->pcg_avail].pcgo_va; |
pool_put(&pcgpool, pcg); |
if (__predict_false(pap != NULL)) |
|
*pap = pcg->pcg_objects[pcg->pcg_avail].pcgo_pa; |
|
#if defined(DIAGNOSTIC) |
|
pcg->pcg_objects[pcg->pcg_avail].pcgo_va = NULL; |
|
KASSERT(pcg->pcg_avail < pcg->pcg_size); |
|
KASSERT(object != NULL); |
|
#endif |
|
cc->cc_hits++; |
splx(s); |
splx(s); |
|
FREECHECK_OUT(&pc->pc_freecheck, object); |
|
return object; |
} |
} |
|
|
|
/* |
|
* That failed. If the previous group isn't empty, swap |
|
* it with the current group and allocate from there. |
|
*/ |
|
pcg = cc->cc_previous; |
|
if (__predict_true(pcg->pcg_avail > 0)) { |
|
cc->cc_previous = cc->cc_current; |
|
cc->cc_current = pcg; |
|
continue; |
|
} |
|
|
|
/* |
|
* Can't allocate from either group: try the slow path. |
|
* If get_slow() allocated an object for us, or if |
|
* no more objects are available, it will return false. |
|
* Otherwise, we need to retry. |
|
*/ |
|
if (!pool_cache_get_slow(cc, s, &object, pap, flags)) |
|
break; |
} |
} |
|
|
|
return object; |
} |
} |
|
|
|
static bool __noinline |
|
pool_cache_put_slow(pool_cache_cpu_t *cc, int s, void *object) |
|
{ |
|
pcg_t *pcg, *cur; |
|
uint64_t ncsw; |
|
pool_cache_t pc; |
|
|
|
KASSERT(cc->cc_current->pcg_avail == cc->cc_current->pcg_size); |
|
KASSERT(cc->cc_previous->pcg_avail == cc->cc_previous->pcg_size); |
|
|
|
pc = cc->cc_cache; |
|
pcg = NULL; |
|
cc->cc_misses++; |
|
|
|
/* |
|
* If there are no empty groups in the cache then allocate one |
|
* while still unlocked. |
|
*/ |
|
if (__predict_false(pc->pc_emptygroups == NULL)) { |
|
if (__predict_true(!pool_cache_disable)) { |
|
pcg = pool_get(pc->pc_pcgpool, PR_NOWAIT); |
|
} |
|
if (__predict_true(pcg != NULL)) { |
|
pcg->pcg_avail = 0; |
|
pcg->pcg_size = pc->pc_pcgsize; |
|
} |
|
} |
|
|
|
/* Lock the cache. */ |
|
if (__predict_false(!mutex_tryenter(&pc->pc_lock))) { |
|
ncsw = curlwp->l_ncsw; |
|
mutex_enter(&pc->pc_lock); |
|
pc->pc_contended++; |
|
|
|
/* |
|
* If we context switched while locking, then our view of |
|
* the per-CPU data is invalid: retry. |
|
*/ |
|
if (__predict_false(curlwp->l_ncsw != ncsw)) { |
|
mutex_exit(&pc->pc_lock); |
|
if (pcg != NULL) { |
|
pool_put(pc->pc_pcgpool, pcg); |
|
} |
|
return true; |
|
} |
|
} |
|
|
|
/* If there are no empty groups in the cache then allocate one. */ |
|
if (pcg == NULL && pc->pc_emptygroups != NULL) { |
|
pcg = pc->pc_emptygroups; |
|
pc->pc_emptygroups = pcg->pcg_next; |
|
pc->pc_nempty--; |
|
} |
|
|
|
/* |
|
* If there's a empty group, release our full group back |
|
* to the cache. Install the empty group to the local CPU |
|
* and return. |
|
*/ |
|
if (pcg != NULL) { |
|
KASSERT(pcg->pcg_avail == 0); |
|
if (__predict_false(cc->cc_previous == &pcg_dummy)) { |
|
cc->cc_previous = pcg; |
|
} else { |
|
cur = cc->cc_current; |
|
if (__predict_true(cur != &pcg_dummy)) { |
|
KASSERT(cur->pcg_avail == cur->pcg_size); |
|
cur->pcg_next = pc->pc_fullgroups; |
|
pc->pc_fullgroups = cur; |
|
pc->pc_nfull++; |
|
} |
|
cc->cc_current = pcg; |
|
} |
|
pc->pc_hits++; |
|
mutex_exit(&pc->pc_lock); |
|
return true; |
|
} |
|
|
|
/* |
|
* Nothing available locally or in cache, and we didn't |
|
* allocate an empty group. Take the slow path and destroy |
|
* the object here and now. |
|
*/ |
|
pc->pc_misses++; |
|
mutex_exit(&pc->pc_lock); |
|
splx(s); |
|
pool_cache_destruct_object(pc, object); |
|
|
|
return false; |
|
} |
|
|
/* |
/* |
* pool_cache_invalidate: |
* pool_cache_put{,_paddr}: |
* |
* |
* Invalidate a pool cache (destruct and release all of the |
* Put an object back to the pool cache (optionally caching the |
* cached objects). |
* physical address of the object). |
*/ |
*/ |
void |
void |
pool_cache_invalidate(struct pool_cache *pc) |
pool_cache_put_paddr(pool_cache_t pc, void *object, paddr_t pa) |
{ |
{ |
|
pool_cache_cpu_t *cc; |
|
pcg_t *pcg; |
|
int s; |
|
|
|
KASSERT(object != NULL); |
|
FREECHECK_IN(&pc->pc_freecheck, object); |
|
|
|
/* Lock out interrupts and disable preemption. */ |
|
s = splvm(); |
|
while (/* CONSTCOND */ true) { |
|
/* If the current group isn't full, release it there. */ |
|
cc = pc->pc_cpus[curcpu()->ci_index]; |
|
KASSERT(cc->cc_cache == pc); |
|
pcg = cc->cc_current; |
|
if (__predict_true(pcg->pcg_avail < pcg->pcg_size)) { |
|
pcg->pcg_objects[pcg->pcg_avail].pcgo_va = object; |
|
pcg->pcg_objects[pcg->pcg_avail].pcgo_pa = pa; |
|
pcg->pcg_avail++; |
|
cc->cc_hits++; |
|
splx(s); |
|
return; |
|
} |
|
|
simple_lock(&pc->pc_slock); |
/* |
pool_cache_do_invalidate(pc, 0, pool_put); |
* That failed. If the previous group isn't full, swap |
simple_unlock(&pc->pc_slock); |
* it with the current group and try again. |
|
*/ |
|
pcg = cc->cc_previous; |
|
if (__predict_true(pcg->pcg_avail < pcg->pcg_size)) { |
|
cc->cc_previous = cc->cc_current; |
|
cc->cc_current = pcg; |
|
continue; |
|
} |
|
|
|
/* |
|
* Can't free to either group: try the slow path. |
|
* If put_slow() releases the object for us, it |
|
* will return false. Otherwise we need to retry. |
|
*/ |
|
if (!pool_cache_put_slow(cc, s, object)) |
|
break; |
|
} |
} |
} |
|
|
/* |
/* |
* pool_cache_reclaim: |
* pool_cache_transfer: |
* |
* |
* Reclaim a pool cache for pool_reclaim(). |
* Transfer objects from the per-CPU cache to the global cache. |
|
* Run within a cross-call thread. |
*/ |
*/ |
static void |
static void |
pool_cache_reclaim(struct pool_cache *pc) |
pool_cache_transfer(pool_cache_t pc) |
{ |
{ |
|
pool_cache_cpu_t *cc; |
|
pcg_t *prev, *cur, **list; |
|
int s; |
|
|
simple_lock(&pc->pc_slock); |
s = splvm(); |
pool_cache_do_invalidate(pc, 1, pool_do_put); |
mutex_enter(&pc->pc_lock); |
simple_unlock(&pc->pc_slock); |
cc = pc->pc_cpus[curcpu()->ci_index]; |
|
cur = cc->cc_current; |
|
cc->cc_current = __UNCONST(&pcg_dummy); |
|
prev = cc->cc_previous; |
|
cc->cc_previous = __UNCONST(&pcg_dummy); |
|
if (cur != &pcg_dummy) { |
|
if (cur->pcg_avail == cur->pcg_size) { |
|
list = &pc->pc_fullgroups; |
|
pc->pc_nfull++; |
|
} else if (cur->pcg_avail == 0) { |
|
list = &pc->pc_emptygroups; |
|
pc->pc_nempty++; |
|
} else { |
|
list = &pc->pc_partgroups; |
|
pc->pc_npart++; |
|
} |
|
cur->pcg_next = *list; |
|
*list = cur; |
|
} |
|
if (prev != &pcg_dummy) { |
|
if (prev->pcg_avail == prev->pcg_size) { |
|
list = &pc->pc_fullgroups; |
|
pc->pc_nfull++; |
|
} else if (prev->pcg_avail == 0) { |
|
list = &pc->pc_emptygroups; |
|
pc->pc_nempty++; |
|
} else { |
|
list = &pc->pc_partgroups; |
|
pc->pc_npart++; |
|
} |
|
prev->pcg_next = *list; |
|
*list = prev; |
|
} |
|
mutex_exit(&pc->pc_lock); |
|
splx(s); |
} |
} |
|
|
/* |
/* |
Line 1891 pool_cache_reclaim(struct pool_cache *pc |
|
Line 2470 pool_cache_reclaim(struct pool_cache *pc |
|
void *pool_page_alloc(struct pool *, int); |
void *pool_page_alloc(struct pool *, int); |
void pool_page_free(struct pool *, void *); |
void pool_page_free(struct pool *, void *); |
|
|
|
#ifdef POOL_SUBPAGE |
|
struct pool_allocator pool_allocator_kmem_fullpage = { |
|
.pa_alloc = pool_page_alloc, |
|
.pa_free = pool_page_free, |
|
.pa_pagesz = 0 |
|
}; |
|
#else |
struct pool_allocator pool_allocator_kmem = { |
struct pool_allocator pool_allocator_kmem = { |
pool_page_alloc, pool_page_free, 0, |
.pa_alloc = pool_page_alloc, |
|
.pa_free = pool_page_free, |
|
.pa_pagesz = 0 |
}; |
}; |
|
#endif |
|
|
void *pool_page_alloc_nointr(struct pool *, int); |
#ifdef POOL_SUBPAGE |
void pool_page_free_nointr(struct pool *, void *); |
struct pool_allocator pool_allocator_nointr_fullpage = { |
|
.pa_alloc = pool_page_alloc, |
|
.pa_free = pool_page_free, |
|
.pa_pagesz = 0 |
|
}; |
|
#else |
struct pool_allocator pool_allocator_nointr = { |
struct pool_allocator pool_allocator_nointr = { |
pool_page_alloc_nointr, pool_page_free_nointr, 0, |
.pa_alloc = pool_page_alloc, |
|
.pa_free = pool_page_free, |
|
.pa_pagesz = 0 |
}; |
}; |
|
#endif |
|
|
#ifdef POOL_SUBPAGE |
#ifdef POOL_SUBPAGE |
void *pool_subpage_alloc(struct pool *, int); |
void *pool_subpage_alloc(struct pool *, int); |
void pool_subpage_free(struct pool *, void *); |
void pool_subpage_free(struct pool *, void *); |
|
|
struct pool_allocator pool_allocator_kmem_subpage = { |
struct pool_allocator pool_allocator_kmem = { |
pool_subpage_alloc, pool_subpage_free, 0, |
.pa_alloc = pool_subpage_alloc, |
|
.pa_free = pool_subpage_free, |
|
.pa_pagesz = POOL_SUBPAGE |
|
}; |
|
|
|
struct pool_allocator pool_allocator_nointr = { |
|
.pa_alloc = pool_subpage_alloc, |
|
.pa_free = pool_subpage_free, |
|
.pa_pagesz = POOL_SUBPAGE |
}; |
}; |
#endif /* POOL_SUBPAGE */ |
#endif /* POOL_SUBPAGE */ |
|
|
/* |
static void * |
* We have at least three different resources for the same allocation and |
pool_allocator_alloc(struct pool *pp, int flags) |
* each resource can be depleted. First, we have the ready elements in the |
|
* pool. Then we have the resource (typically a vm_map) for this allocator. |
|
* Finally, we have physical memory. Waiting for any of these can be |
|
* unnecessary when any other is freed, but the kernel doesn't support |
|
* sleeping on multiple wait channels, so we have to employ another strategy. |
|
* |
|
* The caller sleeps on the pool (so that it can be awakened when an item |
|
* is returned to the pool), but we set PA_WANT on the allocator. When a |
|
* page is returned to the allocator and PA_WANT is set, pool_allocator_free |
|
* will wake up all sleeping pools belonging to this allocator. |
|
* |
|
* XXX Thundering herd. |
|
*/ |
|
void * |
|
pool_allocator_alloc(struct pool *org, int flags) |
|
{ |
{ |
struct pool_allocator *pa = org->pr_alloc; |
struct pool_allocator *pa = pp->pr_alloc; |
struct pool *pp, *start; |
|
int s, freed; |
|
void *res; |
void *res; |
|
|
do { |
res = (*pa->pa_alloc)(pp, flags); |
if ((res = (*pa->pa_alloc)(org, flags)) != NULL) |
if (res == NULL && (flags & PR_WAITOK) == 0) { |
return (res); |
|
if ((flags & PR_WAITOK) == 0) { |
|
/* |
|
* We only run the drain hookhere if PR_NOWAIT. |
|
* In other cases, the hook will be run in |
|
* pool_reclaim(). |
|
*/ |
|
if (org->pr_drain_hook != NULL) { |
|
(*org->pr_drain_hook)(org->pr_drain_hook_arg, |
|
flags); |
|
if ((res = (*pa->pa_alloc)(org, flags)) != NULL) |
|
return (res); |
|
} |
|
break; |
|
} |
|
|
|
/* |
/* |
* Drain all pools, except "org", that use this |
* We only run the drain hook here if PR_NOWAIT. |
* allocator. We do this to reclaim VA space. |
* In other cases, the hook will be run in |
* pa_alloc is responsible for waiting for |
* pool_reclaim(). |
* physical memory. |
|
* |
|
* XXX We risk looping forever if start if someone |
|
* calls pool_destroy on "start". But there is no |
|
* other way to have potentially sleeping pool_reclaim, |
|
* non-sleeping locks on pool_allocator, and some |
|
* stirring of drained pools in the allocator. |
|
* |
|
* XXX Maybe we should use pool_head_slock for locking |
|
* the allocators? |
|
*/ |
*/ |
freed = 0; |
if (pp->pr_drain_hook != NULL) { |
|
(*pp->pr_drain_hook)(pp->pr_drain_hook_arg, flags); |
s = splvm(); |
res = (*pa->pa_alloc)(pp, flags); |
simple_lock(&pa->pa_slock); |
|
pp = start = TAILQ_FIRST(&pa->pa_list); |
|
do { |
|
TAILQ_REMOVE(&pa->pa_list, pp, pr_alloc_list); |
|
TAILQ_INSERT_TAIL(&pa->pa_list, pp, pr_alloc_list); |
|
if (pp == org) |
|
continue; |
|
simple_unlock(&pa->pa_list); |
|
freed = pool_reclaim(pp); |
|
simple_lock(&pa->pa_list); |
|
} while ((pp = TAILQ_FIRST(&pa->pa_list)) != start && |
|
freed == 0); |
|
|
|
if (freed == 0) { |
|
/* |
|
* We set PA_WANT here, the caller will most likely |
|
* sleep waiting for pages (if not, this won't hurt |
|
* that much), and there is no way to set this in |
|
* the caller without violating locking order. |
|
*/ |
|
pa->pa_flags |= PA_WANT; |
|
} |
} |
simple_unlock(&pa->pa_slock); |
} |
splx(s); |
return res; |
} while (freed); |
|
return (NULL); |
|
} |
} |
|
|
void |
static void |
pool_allocator_free(struct pool *pp, void *v) |
pool_allocator_free(struct pool *pp, void *v) |
{ |
{ |
struct pool_allocator *pa = pp->pr_alloc; |
struct pool_allocator *pa = pp->pr_alloc; |
int s; |
|
|
|
(*pa->pa_free)(pp, v); |
(*pa->pa_free)(pp, v); |
|
|
s = splvm(); |
|
simple_lock(&pa->pa_slock); |
|
if ((pa->pa_flags & PA_WANT) == 0) { |
|
simple_unlock(&pa->pa_slock); |
|
splx(s); |
|
return; |
|
} |
|
|
|
TAILQ_FOREACH(pp, &pa->pa_list, pr_alloc_list) { |
|
simple_lock(&pp->pr_slock); |
|
if ((pp->pr_flags & PR_WANTED) != 0) { |
|
pp->pr_flags &= ~PR_WANTED; |
|
wakeup(pp); |
|
} |
|
simple_unlock(&pp->pr_slock); |
|
} |
|
pa->pa_flags &= ~PA_WANT; |
|
simple_unlock(&pa->pa_slock); |
|
splx(s); |
|
} |
} |
|
|
void * |
void * |
pool_page_alloc(struct pool *pp, int flags) |
pool_page_alloc(struct pool *pp, int flags) |
{ |
{ |
boolean_t waitok = (flags & PR_WAITOK) ? TRUE : FALSE; |
const vm_flag_t vflags = (flags & PR_WAITOK) ? VM_SLEEP: VM_NOSLEEP; |
|
vmem_addr_t va; |
|
int ret; |
|
|
return ((void *) uvm_km_alloc_poolpage(waitok)); |
ret = uvm_km_kmem_alloc(kmem_va_arena, pp->pr_alloc->pa_pagesz, |
|
vflags | VM_INSTANTFIT, &va); |
|
|
|
return ret ? NULL : (void *)va; |
} |
} |
|
|
void |
void |
pool_page_free(struct pool *pp, void *v) |
pool_page_free(struct pool *pp, void *v) |
{ |
{ |
|
|
uvm_km_free_poolpage((vaddr_t) v); |
uvm_km_kmem_free(kmem_va_arena, (vaddr_t)v, pp->pr_alloc->pa_pagesz); |
|
} |
|
|
|
static void * |
|
pool_page_alloc_meta(struct pool *pp, int flags) |
|
{ |
|
const vm_flag_t vflags = (flags & PR_WAITOK) ? VM_SLEEP: VM_NOSLEEP; |
|
vmem_addr_t va; |
|
int ret; |
|
|
|
ret = vmem_alloc(kmem_meta_arena, pp->pr_alloc->pa_pagesz, |
|
vflags | VM_INSTANTFIT, &va); |
|
|
|
return ret ? NULL : (void *)va; |
|
} |
|
|
|
static void |
|
pool_page_free_meta(struct pool *pp, void *v) |
|
{ |
|
|
|
vmem_free(kmem_meta_arena, (vmem_addr_t)v, pp->pr_alloc->pa_pagesz); |
} |
} |
|
|
#ifdef POOL_SUBPAGE |
#ifdef POOL_SUBPAGE |
Line 2047 pool_page_free(struct pool *pp, void *v) |
|
Line 2589 pool_page_free(struct pool *pp, void *v) |
|
void * |
void * |
pool_subpage_alloc(struct pool *pp, int flags) |
pool_subpage_alloc(struct pool *pp, int flags) |
{ |
{ |
|
return pool_get(&psppool, flags); |
return (pool_get(&psppool, flags)); |
|
} |
} |
|
|
void |
void |
pool_subpage_free(struct pool *pp, void *v) |
pool_subpage_free(struct pool *pp, void *v) |
{ |
{ |
|
|
pool_put(&psppool, v); |
pool_put(&psppool, v); |
} |
} |
|
|
/* We don't provide a real nointr allocator. Maybe later. */ |
#endif /* POOL_SUBPAGE */ |
void * |
|
pool_page_alloc_nointr(struct pool *pp, int flags) |
#if defined(DDB) |
|
static bool |
|
pool_in_page(struct pool *pp, struct pool_item_header *ph, uintptr_t addr) |
{ |
{ |
|
|
return (pool_subpage_alloc(pp, flags)); |
return (uintptr_t)ph->ph_page <= addr && |
|
addr < (uintptr_t)ph->ph_page + pp->pr_alloc->pa_pagesz; |
} |
} |
|
|
void |
static bool |
pool_page_free_nointr(struct pool *pp, void *v) |
pool_in_item(struct pool *pp, void *item, uintptr_t addr) |
{ |
{ |
|
|
pool_subpage_free(pp, v); |
return (uintptr_t)item <= addr && addr < (uintptr_t)item + pp->pr_size; |
} |
} |
#else |
|
void * |
static bool |
pool_page_alloc_nointr(struct pool *pp, int flags) |
pool_in_cg(struct pool *pp, struct pool_cache_group *pcg, uintptr_t addr) |
|
{ |
|
int i; |
|
|
|
if (pcg == NULL) { |
|
return false; |
|
} |
|
for (i = 0; i < pcg->pcg_avail; i++) { |
|
if (pool_in_item(pp, pcg->pcg_objects[i].pcgo_va, addr)) { |
|
return true; |
|
} |
|
} |
|
return false; |
|
} |
|
|
|
static bool |
|
pool_allocated(struct pool *pp, struct pool_item_header *ph, uintptr_t addr) |
{ |
{ |
boolean_t waitok = (flags & PR_WAITOK) ? TRUE : FALSE; |
|
|
|
return ((void *) uvm_km_alloc_poolpage1(kernel_map, |
if ((pp->pr_roflags & PR_NOTOUCH) != 0) { |
uvm.kernel_object, waitok)); |
unsigned int idx = pr_item_notouch_index(pp, ph, (void *)addr); |
|
pool_item_bitmap_t *bitmap = |
|
ph->ph_bitmap + (idx / BITMAP_SIZE); |
|
pool_item_bitmap_t mask = 1 << (idx & BITMAP_MASK); |
|
|
|
return (*bitmap & mask) == 0; |
|
} else { |
|
struct pool_item *pi; |
|
|
|
LIST_FOREACH(pi, &ph->ph_itemlist, pi_list) { |
|
if (pool_in_item(pp, pi, addr)) { |
|
return false; |
|
} |
|
} |
|
return true; |
|
} |
} |
} |
|
|
void |
void |
pool_page_free_nointr(struct pool *pp, void *v) |
pool_whatis(uintptr_t addr, void (*pr)(const char *, ...)) |
{ |
{ |
|
struct pool *pp; |
|
|
uvm_km_free_poolpage1(kernel_map, (vaddr_t) v); |
TAILQ_FOREACH(pp, &pool_head, pr_poollist) { |
|
struct pool_item_header *ph; |
|
uintptr_t item; |
|
bool allocated = true; |
|
bool incache = false; |
|
bool incpucache = false; |
|
char cpucachestr[32]; |
|
|
|
if ((pp->pr_roflags & PR_PHINPAGE) != 0) { |
|
LIST_FOREACH(ph, &pp->pr_fullpages, ph_pagelist) { |
|
if (pool_in_page(pp, ph, addr)) { |
|
goto found; |
|
} |
|
} |
|
LIST_FOREACH(ph, &pp->pr_partpages, ph_pagelist) { |
|
if (pool_in_page(pp, ph, addr)) { |
|
allocated = |
|
pool_allocated(pp, ph, addr); |
|
goto found; |
|
} |
|
} |
|
LIST_FOREACH(ph, &pp->pr_emptypages, ph_pagelist) { |
|
if (pool_in_page(pp, ph, addr)) { |
|
allocated = false; |
|
goto found; |
|
} |
|
} |
|
continue; |
|
} else { |
|
ph = pr_find_pagehead_noalign(pp, (void *)addr); |
|
if (ph == NULL || !pool_in_page(pp, ph, addr)) { |
|
continue; |
|
} |
|
allocated = pool_allocated(pp, ph, addr); |
|
} |
|
found: |
|
if (allocated && pp->pr_cache) { |
|
pool_cache_t pc = pp->pr_cache; |
|
struct pool_cache_group *pcg; |
|
int i; |
|
|
|
for (pcg = pc->pc_fullgroups; pcg != NULL; |
|
pcg = pcg->pcg_next) { |
|
if (pool_in_cg(pp, pcg, addr)) { |
|
incache = true; |
|
goto print; |
|
} |
|
} |
|
for (i = 0; i < __arraycount(pc->pc_cpus); i++) { |
|
pool_cache_cpu_t *cc; |
|
|
|
if ((cc = pc->pc_cpus[i]) == NULL) { |
|
continue; |
|
} |
|
if (pool_in_cg(pp, cc->cc_current, addr) || |
|
pool_in_cg(pp, cc->cc_previous, addr)) { |
|
struct cpu_info *ci = |
|
cpu_lookup(i); |
|
|
|
incpucache = true; |
|
snprintf(cpucachestr, |
|
sizeof(cpucachestr), |
|
"cached by CPU %u", |
|
ci->ci_index); |
|
goto print; |
|
} |
|
} |
|
} |
|
print: |
|
item = (uintptr_t)ph->ph_page + ph->ph_off; |
|
item = item + rounddown(addr - item, pp->pr_size); |
|
(*pr)("%p is %p+%zu in POOL '%s' (%s)\n", |
|
(void *)addr, item, (size_t)(addr - item), |
|
pp->pr_wchan, |
|
incpucache ? cpucachestr : |
|
incache ? "cached" : allocated ? "allocated" : "free"); |
|
} |
} |
} |
#endif /* POOL_SUBPAGE */ |
#endif /* defined(DDB) */ |