version 1.16, 1998/12/16 04:28:23 |
version 1.48, 2000/12/11 05:22:56 |
|
|
/* $NetBSD$ */ |
/* $NetBSD$ */ |
|
|
/*- |
/*- |
* Copyright (c) 1997 The NetBSD Foundation, Inc. |
* Copyright (c) 1997, 1999, 2000 The NetBSD Foundation, Inc. |
* All rights reserved. |
* All rights reserved. |
* |
* |
* This code is derived from software contributed to The NetBSD Foundation |
* This code is derived from software contributed to The NetBSD Foundation |
* by Paul Kranenburg. |
* by Paul Kranenburg; by Jason R. Thorpe of the Numerical Aerospace |
|
* Simulation Facility, NASA Ames Research Center. |
* |
* |
* Redistribution and use in source and binary forms, with or without |
* Redistribution and use in source and binary forms, with or without |
* modification, are permitted provided that the following conditions |
* modification, are permitted provided that the following conditions |
|
|
* POSSIBILITY OF SUCH DAMAGE. |
* POSSIBILITY OF SUCH DAMAGE. |
*/ |
*/ |
|
|
|
#include "opt_pool.h" |
|
#include "opt_poollog.h" |
|
#include "opt_lockdebug.h" |
|
|
#include <sys/param.h> |
#include <sys/param.h> |
#include <sys/systm.h> |
#include <sys/systm.h> |
#include <sys/proc.h> |
#include <sys/proc.h> |
|
|
#include <sys/malloc.h> |
#include <sys/malloc.h> |
#include <sys/lock.h> |
#include <sys/lock.h> |
#include <sys/pool.h> |
#include <sys/pool.h> |
|
#include <sys/syslog.h> |
|
|
#include <vm/vm.h> |
|
#include <vm/vm_kern.h> |
|
|
|
#if defined(UVM) |
|
#include <uvm/uvm.h> |
#include <uvm/uvm.h> |
#endif |
|
|
|
/* |
/* |
* Pool resource management utility. |
* Pool resource management utility. |
|
|
* headed by `ph_itemlist' in each page header. The memory for building |
* headed by `ph_itemlist' in each page header. The memory for building |
* the page list is either taken from the allocated pages themselves (for |
* the page list is either taken from the allocated pages themselves (for |
* small pool items) or taken from an internal pool of page headers (`phpool'). |
* small pool items) or taken from an internal pool of page headers (`phpool'). |
* |
|
*/ |
*/ |
|
|
/* List of all pools */ |
/* List of all pools */ |
Line 74 static struct pool phpool; |
|
Line 74 static struct pool phpool; |
|
int pool_inactive_time = 10; |
int pool_inactive_time = 10; |
|
|
/* Next candidate for drainage (see pool_drain()) */ |
/* Next candidate for drainage (see pool_drain()) */ |
static struct pool *drainpp = NULL; |
static struct pool *drainpp; |
|
|
|
/* This spin lock protects both pool_head and drainpp. */ |
|
struct simplelock pool_head_slock = SIMPLELOCK_INITIALIZER; |
|
|
struct pool_item_header { |
struct pool_item_header { |
/* Page headers */ |
/* Page headers */ |
Line 91 struct pool_item_header { |
|
Line 94 struct pool_item_header { |
|
struct pool_item { |
struct pool_item { |
#ifdef DIAGNOSTIC |
#ifdef DIAGNOSTIC |
int pi_magic; |
int pi_magic; |
#define PI_MAGIC 0xdeadbeef |
|
#endif |
#endif |
|
#define PI_MAGIC 0xdeadbeef |
/* Other entries use only this list entry */ |
/* Other entries use only this list entry */ |
TAILQ_ENTRY(pool_item) pi_list; |
TAILQ_ENTRY(pool_item) pi_list; |
}; |
}; |
|
|
|
#define PR_HASH_INDEX(pp,addr) \ |
#define PR_HASH_INDEX(pp,addr) \ |
|
(((u_long)(addr) >> (pp)->pr_pageshift) & (PR_HASHTABSIZE - 1)) |
(((u_long)(addr) >> (pp)->pr_pageshift) & (PR_HASHTABSIZE - 1)) |
|
|
|
/* |
|
* Pool cache management. |
|
* |
|
* Pool caches provide a way for constructed objects to be cached by the |
|
* pool subsystem. This can lead to performance improvements by avoiding |
|
* needless object construction/destruction; it is deferred until absolutely |
|
* necessary. |
|
* |
|
* Caches are grouped into cache groups. Each cache group references |
|
* up to 16 constructed objects. When a cache allocates an object |
|
* from the pool, it calls the object's constructor and places it into |
|
* a cache group. When a cache group frees an object back to the pool, |
|
* it first calls the object's destructor. This allows the object to |
|
* persist in constructed form while freed to the cache. |
|
* |
|
* Multiple caches may exist for each pool. This allows a single |
|
* object type to have multiple constructed forms. The pool references |
|
* each cache, so that when a pool is drained by the pagedaemon, it can |
|
* drain each individual cache as well. Each time a cache is drained, |
|
* the most idle cache group is freed to the pool in its entirety. |
|
* |
|
* Pool caches are layed on top of pools. By layering them, we can avoid |
|
* the complexity of cache management for pools which would not benefit |
|
* from it. |
|
*/ |
|
|
|
/* The cache group pool. */ |
|
static struct pool pcgpool; |
|
|
static struct pool_item_header |
/* The pool cache group. */ |
*pr_find_pagehead __P((struct pool *, caddr_t)); |
#define PCG_NOBJECTS 16 |
static void pr_rmpage __P((struct pool *, struct pool_item_header *)); |
struct pool_cache_group { |
static int pool_prime_page __P((struct pool *, caddr_t)); |
TAILQ_ENTRY(pool_cache_group) |
static void *pool_page_alloc __P((unsigned long, int, int)); |
pcg_list; /* link in the pool cache's group list */ |
static void pool_page_free __P((void *, unsigned long, int)); |
u_int pcg_avail; /* # available objects */ |
|
/* pointers to the objects */ |
|
void *pcg_objects[PCG_NOBJECTS]; |
|
}; |
|
|
|
static void pool_cache_reclaim(struct pool_cache *); |
|
|
|
static int pool_catchup(struct pool *); |
|
static void pool_prime_page(struct pool *, caddr_t); |
|
static void *pool_page_alloc(unsigned long, int, int); |
|
static void pool_page_free(void *, unsigned long, int); |
|
|
|
static void pool_print1(struct pool *, const char *, |
|
void (*)(const char *, ...)); |
|
|
#ifdef POOL_DIAGNOSTIC |
|
/* |
/* |
* Pool log entry. An array of these is allocated in pool_create(). |
* Pool log entry. An array of these is allocated in pool_create(). |
*/ |
*/ |
Line 119 struct pool_log { |
|
Line 159 struct pool_log { |
|
const char *pl_file; |
const char *pl_file; |
long pl_line; |
long pl_line; |
int pl_action; |
int pl_action; |
#define PRLOG_GET 1 |
#define PRLOG_GET 1 |
#define PRLOG_PUT 2 |
#define PRLOG_PUT 2 |
void *pl_addr; |
void *pl_addr; |
}; |
}; |
|
|
/* Number of entries in pool log buffers */ |
/* Number of entries in pool log buffers */ |
int pool_logsize = 10; |
#ifndef POOL_LOGSIZE |
|
#define POOL_LOGSIZE 10 |
|
#endif |
|
|
static void pr_log __P((struct pool *, void *, int, const char *, long)); |
int pool_logsize = POOL_LOGSIZE; |
static void pr_printlog __P((struct pool *)); |
|
|
|
static __inline__ void |
#ifdef DIAGNOSTIC |
pr_log(pp, v, action, file, line) |
static __inline void |
struct pool *pp; |
pr_log(struct pool *pp, void *v, int action, const char *file, long line) |
void *v; |
|
int action; |
|
const char *file; |
|
long line; |
|
{ |
{ |
int n = pp->pr_curlogentry; |
int n = pp->pr_curlogentry; |
struct pool_log *pl; |
struct pool_log *pl; |
|
|
if ((pp->pr_flags & PR_LOGGING) == 0) |
if ((pp->pr_roflags & PR_LOGGING) == 0) |
return; |
return; |
|
|
/* |
/* |
Line 159 pr_log(pp, v, action, file, line) |
|
Line 196 pr_log(pp, v, action, file, line) |
|
} |
} |
|
|
static void |
static void |
pr_printlog(pp) |
pr_printlog(struct pool *pp, struct pool_item *pi, |
struct pool *pp; |
void (*pr)(const char *, ...)) |
{ |
{ |
int i = pp->pr_logsize; |
int i = pp->pr_logsize; |
int n = pp->pr_curlogentry; |
int n = pp->pr_curlogentry; |
|
|
if ((pp->pr_flags & PR_LOGGING) == 0) |
if ((pp->pr_roflags & PR_LOGGING) == 0) |
return; |
return; |
|
|
pool_print(pp, "printlog"); |
|
|
|
/* |
/* |
* Print all entries in this pool's log. |
* Print all entries in this pool's log. |
*/ |
*/ |
while (i-- > 0) { |
while (i-- > 0) { |
struct pool_log *pl = &pp->pr_log[n]; |
struct pool_log *pl = &pp->pr_log[n]; |
if (pl->pl_action != 0) { |
if (pl->pl_action != 0) { |
printf("log entry %d:\n", i); |
if (pi == NULL || pi == pl->pl_addr) { |
printf("\taction = %s, addr = %p\n", |
(*pr)("\tlog entry %d:\n", i); |
pl->pl_action == PRLOG_GET ? "get" : "put", |
(*pr)("\t\taction = %s, addr = %p\n", |
pl->pl_addr); |
pl->pl_action == PRLOG_GET ? "get" : "put", |
printf("\tfile: %s at line %lu\n", |
pl->pl_addr); |
pl->pl_file, pl->pl_line); |
(*pr)("\t\tfile: %s at line %lu\n", |
|
pl->pl_file, pl->pl_line); |
|
} |
} |
} |
if (++n >= pp->pr_logsize) |
if (++n >= pp->pr_logsize) |
n = 0; |
n = 0; |
} |
} |
} |
} |
#else |
|
#define pr_log(pp, v, action, file, line) |
|
#define pr_printlog(pp) |
|
#endif |
|
|
|
|
static __inline void |
|
pr_enter(struct pool *pp, const char *file, long line) |
|
{ |
|
|
|
if (__predict_false(pp->pr_entered_file != NULL)) { |
|
printf("pool %s: reentrancy at file %s line %ld\n", |
|
pp->pr_wchan, file, line); |
|
printf(" previous entry at file %s line %ld\n", |
|
pp->pr_entered_file, pp->pr_entered_line); |
|
panic("pr_enter"); |
|
} |
|
|
|
pp->pr_entered_file = file; |
|
pp->pr_entered_line = line; |
|
} |
|
|
|
static __inline void |
|
pr_leave(struct pool *pp) |
|
{ |
|
|
|
if (__predict_false(pp->pr_entered_file == NULL)) { |
|
printf("pool %s not entered?\n", pp->pr_wchan); |
|
panic("pr_leave"); |
|
} |
|
|
|
pp->pr_entered_file = NULL; |
|
pp->pr_entered_line = 0; |
|
} |
|
|
|
static __inline void |
|
pr_enter_check(struct pool *pp, void (*pr)(const char *, ...)) |
|
{ |
|
|
|
if (pp->pr_entered_file != NULL) |
|
(*pr)("\n\tcurrently entered from file %s line %ld\n", |
|
pp->pr_entered_file, pp->pr_entered_line); |
|
} |
|
#else |
|
#define pr_log(pp, v, action, file, line) |
|
#define pr_printlog(pp, pi, pr) |
|
#define pr_enter(pp, file, line) |
|
#define pr_leave(pp) |
|
#define pr_enter_check(pp, pr) |
|
#endif /* DIAGNOSTIC */ |
|
|
/* |
/* |
* Return the pool page header based on page address. |
* Return the pool page header based on page address. |
*/ |
*/ |
static __inline__ struct pool_item_header * |
static __inline struct pool_item_header * |
pr_find_pagehead(pp, page) |
pr_find_pagehead(struct pool *pp, caddr_t page) |
struct pool *pp; |
|
caddr_t page; |
|
{ |
{ |
struct pool_item_header *ph; |
struct pool_item_header *ph; |
|
|
if ((pp->pr_flags & PR_PHINPAGE) != 0) |
if ((pp->pr_roflags & PR_PHINPAGE) != 0) |
return ((struct pool_item_header *)(page + pp->pr_phoffset)); |
return ((struct pool_item_header *)(page + pp->pr_phoffset)); |
|
|
for (ph = LIST_FIRST(&pp->pr_hashtab[PR_HASH_INDEX(pp, page)]); |
for (ph = LIST_FIRST(&pp->pr_hashtab[PR_HASH_INDEX(pp, page)]); |
Line 218 pr_find_pagehead(pp, page) |
|
Line 293 pr_find_pagehead(pp, page) |
|
/* |
/* |
* Remove a page from the pool. |
* Remove a page from the pool. |
*/ |
*/ |
static __inline__ void |
static __inline void |
pr_rmpage(pp, ph) |
pr_rmpage(struct pool *pp, struct pool_item_header *ph) |
struct pool *pp; |
|
struct pool_item_header *ph; |
|
{ |
{ |
|
|
/* |
/* |
Line 231 pr_rmpage(pp, ph) |
|
Line 304 pr_rmpage(pp, ph) |
|
#ifdef DIAGNOSTIC |
#ifdef DIAGNOSTIC |
if (pp->pr_nidle == 0) |
if (pp->pr_nidle == 0) |
panic("pr_rmpage: nidle inconsistent"); |
panic("pr_rmpage: nidle inconsistent"); |
|
if (pp->pr_nitems < pp->pr_itemsperpage) |
|
panic("pr_rmpage: nitems inconsistent"); |
#endif |
#endif |
pp->pr_nidle--; |
pp->pr_nidle--; |
} |
} |
|
|
|
pp->pr_nitems -= pp->pr_itemsperpage; |
|
|
/* |
/* |
* Unlink a page from the pool and release it. |
* Unlink a page from the pool and release it. |
*/ |
*/ |
Line 243 pr_rmpage(pp, ph) |
|
Line 320 pr_rmpage(pp, ph) |
|
pp->pr_npages--; |
pp->pr_npages--; |
pp->pr_npagefree++; |
pp->pr_npagefree++; |
|
|
if ((pp->pr_flags & PR_PHINPAGE) == 0) { |
if ((pp->pr_roflags & PR_PHINPAGE) == 0) { |
|
int s; |
LIST_REMOVE(ph, ph_hashlist); |
LIST_REMOVE(ph, ph_hashlist); |
|
s = splhigh(); |
pool_put(&phpool, ph); |
pool_put(&phpool, ph); |
|
splx(s); |
} |
} |
|
|
if (pp->pr_curpage == ph) { |
if (pp->pr_curpage == ph) { |
Line 267 pr_rmpage(pp, ph) |
|
Line 347 pr_rmpage(pp, ph) |
|
* Allocate and initialize a pool. |
* Allocate and initialize a pool. |
*/ |
*/ |
struct pool * |
struct pool * |
pool_create(size, align, ioff, nitems, wchan, pagesz, alloc, release, mtype) |
pool_create(size_t size, u_int align, u_int ioff, int nitems, |
size_t size; |
const char *wchan, size_t pagesz, |
u_int align; |
void *(*alloc)(unsigned long, int, int), |
u_int ioff; |
void (*release)(void *, unsigned long, int), |
int nitems; |
int mtype) |
char *wchan; |
|
size_t pagesz; |
|
void *(*alloc) __P((unsigned long, int, int)); |
|
void (*release) __P((void *, unsigned long, int)); |
|
int mtype; |
|
{ |
{ |
struct pool *pp; |
struct pool *pp; |
int flags; |
int flags; |
Line 286 pool_create(size, align, ioff, nitems, w |
|
Line 361 pool_create(size, align, ioff, nitems, w |
|
return (NULL); |
return (NULL); |
|
|
flags = PR_FREEHEADER; |
flags = PR_FREEHEADER; |
#ifdef POOL_DIAGNOSTIC |
|
if (pool_logsize != 0) |
|
flags |= PR_LOGGING; |
|
#endif |
|
|
|
pool_init(pp, size, align, ioff, flags, wchan, pagesz, |
pool_init(pp, size, align, ioff, flags, wchan, pagesz, |
alloc, release, mtype); |
alloc, release, mtype); |
|
|
Line 311 pool_create(size, align, ioff, nitems, w |
|
Line 381 pool_create(size, align, ioff, nitems, w |
|
* static pools that must be initialized before malloc() is available. |
* static pools that must be initialized before malloc() is available. |
*/ |
*/ |
void |
void |
pool_init(pp, size, align, ioff, flags, wchan, pagesz, alloc, release, mtype) |
pool_init(struct pool *pp, size_t size, u_int align, u_int ioff, int flags, |
struct pool *pp; |
const char *wchan, size_t pagesz, |
size_t size; |
void *(*alloc)(unsigned long, int, int), |
u_int align; |
void (*release)(void *, unsigned long, int), |
u_int ioff; |
int mtype) |
int flags; |
|
char *wchan; |
|
size_t pagesz; |
|
void *(*alloc) __P((unsigned long, int, int)); |
|
void (*release) __P((void *, unsigned long, int)); |
|
int mtype; |
|
{ |
{ |
int off, slack, i; |
int off, slack, i; |
|
|
|
#ifdef POOL_DIAGNOSTIC |
|
/* |
|
* Always log if POOL_DIAGNOSTIC is defined. |
|
*/ |
|
if (pool_logsize != 0) |
|
flags |= PR_LOGGING; |
|
#endif |
|
|
/* |
/* |
* Check arguments and construct default values. |
* Check arguments and construct default values. |
*/ |
*/ |
if (!powerof2(pagesz) || pagesz > PAGE_SIZE) |
if (!powerof2(pagesz)) |
panic("pool_init: page size invalid (%lx)\n", (u_long)pagesz); |
panic("pool_init: page size invalid (%lx)\n", (u_long)pagesz); |
|
|
if (alloc == NULL && release == NULL) { |
if (alloc == NULL && release == NULL) { |
Line 349 pool_init(pp, size, align, ioff, flags, |
|
Line 421 pool_init(pp, size, align, ioff, flags, |
|
if (size < sizeof(struct pool_item)) |
if (size < sizeof(struct pool_item)) |
size = sizeof(struct pool_item); |
size = sizeof(struct pool_item); |
|
|
|
size = ALIGN(size); |
|
if (size > pagesz) |
|
panic("pool_init: pool item size (%lu) too large", |
|
(u_long)size); |
|
|
/* |
/* |
* Initialize the pool structure. |
* Initialize the pool structure. |
*/ |
*/ |
TAILQ_INSERT_TAIL(&pool_head, pp, pr_poollist); |
|
TAILQ_INIT(&pp->pr_pagelist); |
TAILQ_INIT(&pp->pr_pagelist); |
|
TAILQ_INIT(&pp->pr_cachelist); |
pp->pr_curpage = NULL; |
pp->pr_curpage = NULL; |
pp->pr_npages = 0; |
pp->pr_npages = 0; |
pp->pr_minitems = 0; |
pp->pr_minitems = 0; |
pp->pr_minpages = 0; |
pp->pr_minpages = 0; |
pp->pr_maxpages = UINT_MAX; |
pp->pr_maxpages = UINT_MAX; |
pp->pr_flags = flags; |
pp->pr_roflags = flags; |
pp->pr_size = ALIGN(size); |
pp->pr_flags = 0; |
|
pp->pr_size = size; |
pp->pr_align = align; |
pp->pr_align = align; |
pp->pr_wchan = wchan; |
pp->pr_wchan = wchan; |
pp->pr_mtype = mtype; |
pp->pr_mtype = mtype; |
Line 369 pool_init(pp, size, align, ioff, flags, |
|
Line 447 pool_init(pp, size, align, ioff, flags, |
|
pp->pr_pagesz = pagesz; |
pp->pr_pagesz = pagesz; |
pp->pr_pagemask = ~(pagesz - 1); |
pp->pr_pagemask = ~(pagesz - 1); |
pp->pr_pageshift = ffs(pagesz) - 1; |
pp->pr_pageshift = ffs(pagesz) - 1; |
|
pp->pr_nitems = 0; |
|
pp->pr_nout = 0; |
|
pp->pr_hardlimit = UINT_MAX; |
|
pp->pr_hardlimit_warning = NULL; |
|
pp->pr_hardlimit_ratecap.tv_sec = 0; |
|
pp->pr_hardlimit_ratecap.tv_usec = 0; |
|
pp->pr_hardlimit_warning_last.tv_sec = 0; |
|
pp->pr_hardlimit_warning_last.tv_usec = 0; |
|
|
/* |
/* |
* Decide whether to put the page header off page to avoid |
* Decide whether to put the page header off page to avoid |
Line 379 pool_init(pp, size, align, ioff, flags, |
|
Line 465 pool_init(pp, size, align, ioff, flags, |
|
*/ |
*/ |
if (pp->pr_size < pagesz/16) { |
if (pp->pr_size < pagesz/16) { |
/* Use the end of the page for the page header */ |
/* Use the end of the page for the page header */ |
pp->pr_flags |= PR_PHINPAGE; |
pp->pr_roflags |= PR_PHINPAGE; |
pp->pr_phoffset = off = |
pp->pr_phoffset = off = |
pagesz - ALIGN(sizeof(struct pool_item_header)); |
pagesz - ALIGN(sizeof(struct pool_item_header)); |
} else { |
} else { |
Line 400 pool_init(pp, size, align, ioff, flags, |
|
Line 486 pool_init(pp, size, align, ioff, flags, |
|
*/ |
*/ |
pp->pr_itemoffset = ioff = ioff % align; |
pp->pr_itemoffset = ioff = ioff % align; |
pp->pr_itemsperpage = (off - ((align - ioff) % align)) / pp->pr_size; |
pp->pr_itemsperpage = (off - ((align - ioff) % align)) / pp->pr_size; |
|
KASSERT(pp->pr_itemsperpage != 0); |
|
|
/* |
/* |
* Use the slack between the chunks and the page header |
* Use the slack between the chunks and the page header |
Line 417 pool_init(pp, size, align, ioff, flags, |
|
Line 504 pool_init(pp, size, align, ioff, flags, |
|
pp->pr_hiwat = 0; |
pp->pr_hiwat = 0; |
pp->pr_nidle = 0; |
pp->pr_nidle = 0; |
|
|
#ifdef POOL_DIAGNOSTIC |
if (flags & PR_LOGGING) { |
if ((flags & PR_LOGGING) != 0) { |
if (kmem_map == NULL || |
pp->pr_log = malloc(pool_logsize * sizeof(struct pool_log), |
(pp->pr_log = malloc(pool_logsize * sizeof(struct pool_log), |
M_TEMP, M_NOWAIT); |
M_TEMP, M_NOWAIT)) == NULL) |
if (pp->pr_log == NULL) |
pp->pr_roflags &= ~PR_LOGGING; |
pp->pr_flags &= ~PR_LOGGING; |
|
pp->pr_curlogentry = 0; |
pp->pr_curlogentry = 0; |
pp->pr_logsize = pool_logsize; |
pp->pr_logsize = pool_logsize; |
} |
} |
#endif |
|
|
|
simple_lock_init(&pp->pr_lock); |
pp->pr_entered_file = NULL; |
lockinit(&pp->pr_resourcelock, PSWP, wchan, 0, 0); |
pp->pr_entered_line = 0; |
|
|
|
simple_lock_init(&pp->pr_slock); |
|
|
/* |
/* |
* Initialize private page header pool if we haven't done so yet. |
* Initialize private page header pool and cache magazine pool if we |
|
* haven't done so yet. |
|
* XXX LOCKING. |
*/ |
*/ |
if (phpool.pr_size == 0) { |
if (phpool.pr_size == 0) { |
pool_init(&phpool, sizeof(struct pool_item_header), 0, 0, |
pool_init(&phpool, sizeof(struct pool_item_header), 0, 0, |
0, "phpool", 0, 0, 0, 0); |
0, "phpool", 0, 0, 0, 0); |
|
pool_init(&pcgpool, sizeof(struct pool_cache_group), 0, 0, |
|
0, "pcgpool", 0, 0, 0, 0); |
} |
} |
|
|
return; |
/* Insert into the list of all pools. */ |
|
simple_lock(&pool_head_slock); |
|
TAILQ_INSERT_TAIL(&pool_head, pp, pr_poollist); |
|
simple_unlock(&pool_head_slock); |
} |
} |
|
|
/* |
/* |
* De-commision a pool resource. |
* De-commision a pool resource. |
*/ |
*/ |
void |
void |
pool_destroy(pp) |
pool_destroy(struct pool *pp) |
struct pool *pp; |
|
{ |
{ |
struct pool_item_header *ph; |
struct pool_item_header *ph; |
|
struct pool_cache *pc; |
|
|
|
/* Destroy all caches for this pool. */ |
|
while ((pc = TAILQ_FIRST(&pp->pr_cachelist)) != NULL) |
|
pool_cache_destroy(pc); |
|
|
#ifdef DIAGNOSTIC |
#ifdef DIAGNOSTIC |
if (pp->pr_nget - pp->pr_nput != 0) { |
if (pp->pr_nout != 0) { |
pr_printlog(pp); |
pr_printlog(pp, NULL, printf); |
panic("pool_destroy: pool busy: still out: %lu\n", |
panic("pool_destroy: pool busy: still out: %u\n", |
pp->pr_nget - pp->pr_nput); |
pp->pr_nout); |
} |
} |
#endif |
#endif |
|
|
/* Remove all pages */ |
/* Remove all pages */ |
if ((pp->pr_flags & PR_STATIC) == 0) |
if ((pp->pr_roflags & PR_STATIC) == 0) |
while ((ph = pp->pr_pagelist.tqh_first) != NULL) |
while ((ph = pp->pr_pagelist.tqh_first) != NULL) |
pr_rmpage(pp, ph); |
pr_rmpage(pp, ph); |
|
|
/* Remove from global pool list */ |
/* Remove from global pool list */ |
|
simple_lock(&pool_head_slock); |
TAILQ_REMOVE(&pool_head, pp, pr_poollist); |
TAILQ_REMOVE(&pool_head, pp, pr_poollist); |
|
/* XXX Only clear this if we were drainpp? */ |
drainpp = NULL; |
drainpp = NULL; |
|
simple_unlock(&pool_head_slock); |
|
|
#ifdef POOL_DIAGNOSTIC |
if ((pp->pr_roflags & PR_LOGGING) != 0) |
if ((pp->pr_flags & PR_LOGGING) != 0) |
|
free(pp->pr_log, M_TEMP); |
free(pp->pr_log, M_TEMP); |
#endif |
|
|
|
if (pp->pr_flags & PR_FREEHEADER) |
if (pp->pr_roflags & PR_FREEHEADER) |
free(pp, M_POOL); |
free(pp, M_POOL); |
} |
} |
|
|
Line 481 pool_destroy(pp) |
|
Line 580 pool_destroy(pp) |
|
/* |
/* |
* Grab an item from the pool; must be called at appropriate spl level |
* Grab an item from the pool; must be called at appropriate spl level |
*/ |
*/ |
#ifdef POOL_DIAGNOSTIC |
|
void * |
|
_pool_get(pp, flags, file, line) |
|
struct pool *pp; |
|
int flags; |
|
const char *file; |
|
long line; |
|
#else |
|
void * |
void * |
pool_get(pp, flags) |
_pool_get(struct pool *pp, int flags, const char *file, long line) |
struct pool *pp; |
|
int flags; |
|
#endif |
|
{ |
{ |
void *v; |
void *v; |
struct pool_item *pi; |
struct pool_item *pi; |
struct pool_item_header *ph; |
struct pool_item_header *ph; |
|
|
#ifdef DIAGNOSTIC |
#ifdef DIAGNOSTIC |
if ((pp->pr_flags & PR_STATIC) && (flags & PR_MALLOCOK)) { |
if (__predict_false((pp->pr_roflags & PR_STATIC) && |
pr_printlog(pp); |
(flags & PR_MALLOCOK))) { |
|
pr_printlog(pp, NULL, printf); |
panic("pool_get: static"); |
panic("pool_get: static"); |
} |
} |
#endif |
#endif |
|
|
simple_lock(&pp->pr_lock); |
if (__predict_false(curproc == NULL && doing_shutdown == 0 && |
if (curproc == NULL && (flags & PR_WAITOK) != 0) |
(flags & PR_WAITOK) != 0)) |
panic("pool_get: must have NOWAIT"); |
panic("pool_get: must have NOWAIT"); |
|
|
|
simple_lock(&pp->pr_slock); |
|
pr_enter(pp, file, line); |
|
|
|
startover: |
|
/* |
|
* Check to see if we've reached the hard limit. If we have, |
|
* and we can wait, then wait until an item has been returned to |
|
* the pool. |
|
*/ |
|
#ifdef DIAGNOSTIC |
|
if (__predict_false(pp->pr_nout > pp->pr_hardlimit)) { |
|
pr_leave(pp); |
|
simple_unlock(&pp->pr_slock); |
|
panic("pool_get: %s: crossed hard limit", pp->pr_wchan); |
|
} |
|
#endif |
|
if (__predict_false(pp->pr_nout == pp->pr_hardlimit)) { |
|
if ((flags & PR_WAITOK) && !(flags & PR_LIMITFAIL)) { |
|
/* |
|
* XXX: A warning isn't logged in this case. Should |
|
* it be? |
|
*/ |
|
pp->pr_flags |= PR_WANTED; |
|
pr_leave(pp); |
|
ltsleep(pp, PSWP, pp->pr_wchan, 0, &pp->pr_slock); |
|
pr_enter(pp, file, line); |
|
goto startover; |
|
} |
|
|
|
/* |
|
* Log a message that the hard limit has been hit. |
|
*/ |
|
if (pp->pr_hardlimit_warning != NULL && |
|
ratecheck(&pp->pr_hardlimit_warning_last, |
|
&pp->pr_hardlimit_ratecap)) |
|
log(LOG_ERR, "%s\n", pp->pr_hardlimit_warning); |
|
|
|
if (flags & PR_URGENT) |
|
panic("pool_get: urgent"); |
|
|
|
pp->pr_nfail++; |
|
|
|
pr_leave(pp); |
|
simple_unlock(&pp->pr_slock); |
|
return (NULL); |
|
} |
|
|
/* |
/* |
* The convention we use is that if `curpage' is not NULL, then |
* The convention we use is that if `curpage' is not NULL, then |
* it points at a non-empty bucket. In particular, `curpage' |
* it points at a non-empty bucket. In particular, `curpage' |
* never points at a page header which has PR_PHINPAGE set and |
* never points at a page header which has PR_PHINPAGE set and |
* has no items in its bucket. |
* has no items in its bucket. |
*/ |
*/ |
while ((ph = pp->pr_curpage) == NULL) { |
if ((ph = pp->pr_curpage) == NULL) { |
void *v; |
void *v; |
int lkflags = LK_EXCLUSIVE | LK_INTERLOCK | |
|
((flags & PR_WAITOK) == 0 ? LK_NOWAIT : 0); |
|
|
|
/* Get long-term lock on pool */ |
#ifdef DIAGNOSTIC |
if (lockmgr(&pp->pr_resourcelock, lkflags, &pp->pr_lock) != 0) |
if (pp->pr_nitems != 0) { |
return (NULL); |
simple_unlock(&pp->pr_slock); |
|
printf("pool_get: %s: curpage NULL, nitems %u\n", |
/* Check if pool became non-empty while we slept */ |
pp->pr_wchan, pp->pr_nitems); |
if ((ph = pp->pr_curpage) != NULL) |
panic("pool_get: nitems inconsistent\n"); |
goto again; |
} |
|
#endif |
|
|
/* Call the page back-end allocator for more memory */ |
/* |
|
* Call the back-end page allocator for more memory. |
|
* Release the pool lock, as the back-end page allocator |
|
* may block. |
|
*/ |
|
pr_leave(pp); |
|
simple_unlock(&pp->pr_slock); |
v = (*pp->pr_alloc)(pp->pr_pagesz, flags, pp->pr_mtype); |
v = (*pp->pr_alloc)(pp->pr_pagesz, flags, pp->pr_mtype); |
|
simple_lock(&pp->pr_slock); |
|
pr_enter(pp, file, line); |
|
|
if (v == NULL) { |
if (v == NULL) { |
|
/* |
|
* We were unable to allocate a page, but |
|
* we released the lock during allocation, |
|
* so perhaps items were freed back to the |
|
* pool. Check for this case. |
|
*/ |
|
if (pp->pr_curpage != NULL) |
|
goto startover; |
|
|
if (flags & PR_URGENT) |
if (flags & PR_URGENT) |
panic("pool_get: urgent"); |
panic("pool_get: urgent"); |
|
|
if ((flags & PR_WAITOK) == 0) { |
if ((flags & PR_WAITOK) == 0) { |
pp->pr_nfail++; |
pp->pr_nfail++; |
lockmgr(&pp->pr_resourcelock, LK_RELEASE, NULL); |
pr_leave(pp); |
|
simple_unlock(&pp->pr_slock); |
return (NULL); |
return (NULL); |
} |
} |
|
|
/* |
/* |
* Wait for items to be returned to this pool. |
* Wait for items to be returned to this pool. |
|
* |
* XXX: we actually want to wait just until |
* XXX: we actually want to wait just until |
* the page allocator has memory again. Depending |
* the page allocator has memory again. Depending |
* on this pool's usage, we might get stuck here |
* on this pool's usage, we might get stuck here |
* for a long time. |
* for a long time. |
|
* |
|
* XXX: maybe we should wake up once a second and |
|
* try again? |
*/ |
*/ |
pp->pr_flags |= PR_WANTED; |
pp->pr_flags |= PR_WANTED; |
lockmgr(&pp->pr_resourcelock, LK_RELEASE, NULL); |
pr_leave(pp); |
tsleep((caddr_t)pp, PSWP, pp->pr_wchan, 0); |
ltsleep(pp, PSWP, pp->pr_wchan, 0, &pp->pr_slock); |
simple_lock(&pp->pr_lock); |
pr_enter(pp, file, line); |
continue; |
goto startover; |
} |
} |
|
|
/* We have more memory; add it to the pool */ |
/* We have more memory; add it to the pool */ |
pp->pr_npagealloc++; |
pp->pr_npagealloc++; |
pool_prime_page(pp, v); |
pool_prime_page(pp, v); |
|
|
again: |
/* Start the allocation process over. */ |
/* Re-acquire pool interlock */ |
goto startover; |
simple_lock(&pp->pr_lock); |
|
lockmgr(&pp->pr_resourcelock, LK_RELEASE, NULL); |
|
} |
} |
|
|
if ((v = pi = TAILQ_FIRST(&ph->ph_itemlist)) == NULL) |
if (__predict_false((v = pi = TAILQ_FIRST(&ph->ph_itemlist)) == NULL)) { |
|
pr_leave(pp); |
|
simple_unlock(&pp->pr_slock); |
panic("pool_get: %s: page empty", pp->pr_wchan); |
panic("pool_get: %s: page empty", pp->pr_wchan); |
|
} |
|
#ifdef DIAGNOSTIC |
|
if (__predict_false(pp->pr_nitems == 0)) { |
|
pr_leave(pp); |
|
simple_unlock(&pp->pr_slock); |
|
printf("pool_get: %s: items on itemlist, nitems %u\n", |
|
pp->pr_wchan, pp->pr_nitems); |
|
panic("pool_get: nitems inconsistent\n"); |
|
} |
|
#endif |
pr_log(pp, v, PRLOG_GET, file, line); |
pr_log(pp, v, PRLOG_GET, file, line); |
|
|
#ifdef DIAGNOSTIC |
#ifdef DIAGNOSTIC |
if (pi->pi_magic != PI_MAGIC) { |
if (__predict_false(pi->pi_magic != PI_MAGIC)) { |
pr_printlog(pp); |
pr_printlog(pp, pi, printf); |
panic("pool_get(%s): free list modified: magic=%x; page %p;" |
panic("pool_get(%s): free list modified: magic=%x; page %p;" |
" item addr %p\n", |
" item addr %p\n", |
pp->pr_wchan, pi->pi_magic, ph->ph_page, pi); |
pp->pr_wchan, pi->pi_magic, ph->ph_page, pi); |
|
|
* Remove from item list. |
* Remove from item list. |
*/ |
*/ |
TAILQ_REMOVE(&ph->ph_itemlist, pi, pi_list); |
TAILQ_REMOVE(&ph->ph_itemlist, pi, pi_list); |
|
pp->pr_nitems--; |
|
pp->pr_nout++; |
if (ph->ph_nmissing == 0) { |
if (ph->ph_nmissing == 0) { |
#ifdef DIAGNOSTIC |
#ifdef DIAGNOSTIC |
if (pp->pr_nidle == 0) |
if (__predict_false(pp->pr_nidle == 0)) |
panic("pool_get: nidle inconsistent"); |
panic("pool_get: nidle inconsistent"); |
#endif |
#endif |
pp->pr_nidle--; |
pp->pr_nidle--; |
} |
} |
ph->ph_nmissing++; |
ph->ph_nmissing++; |
if (TAILQ_FIRST(&ph->ph_itemlist) == NULL) { |
if (TAILQ_FIRST(&ph->ph_itemlist) == NULL) { |
|
#ifdef DIAGNOSTIC |
|
if (__predict_false(ph->ph_nmissing != pp->pr_itemsperpage)) { |
|
pr_leave(pp); |
|
simple_unlock(&pp->pr_slock); |
|
panic("pool_get: %s: nmissing inconsistent", |
|
pp->pr_wchan); |
|
} |
|
#endif |
/* |
/* |
* Find a new non-empty page header, if any. |
* Find a new non-empty page header, if any. |
* Start search from the page head, to increase |
* Start search from the page head, to increase |
* the chance for "high water" pages to be freed. |
* the chance for "high water" pages to be freed. |
* |
* |
* First, move the now empty page to the head of |
* Migrate empty pages to the end of the list. This |
* the page list. |
* will speed the update of curpage as pages become |
|
* idle. Empty pages intermingled with idle pages |
|
* is no big deal. As soon as a page becomes un-empty, |
|
* it will move back to the head of the list. |
*/ |
*/ |
TAILQ_REMOVE(&pp->pr_pagelist, ph, ph_pagelist); |
TAILQ_REMOVE(&pp->pr_pagelist, ph, ph_pagelist); |
TAILQ_INSERT_HEAD(&pp->pr_pagelist, ph, ph_pagelist); |
TAILQ_INSERT_TAIL(&pp->pr_pagelist, ph, ph_pagelist); |
while ((ph = TAILQ_NEXT(ph, ph_pagelist)) != NULL) |
for (ph = TAILQ_FIRST(&pp->pr_pagelist); ph != NULL; |
|
ph = TAILQ_NEXT(ph, ph_pagelist)) |
if (TAILQ_FIRST(&ph->ph_itemlist) != NULL) |
if (TAILQ_FIRST(&ph->ph_itemlist) != NULL) |
break; |
break; |
|
|
|
|
} |
} |
|
|
pp->pr_nget++; |
pp->pr_nget++; |
simple_unlock(&pp->pr_lock); |
|
|
/* |
|
* If we have a low water mark and we are now below that low |
|
* water mark, add more items to the pool. |
|
*/ |
|
if (pp->pr_nitems < pp->pr_minitems && pool_catchup(pp) != 0) { |
|
/* |
|
* XXX: Should we log a warning? Should we set up a timeout |
|
* to try again in a second or so? The latter could break |
|
* a caller's assumptions about interrupt protection, etc. |
|
*/ |
|
} |
|
|
|
pr_leave(pp); |
|
simple_unlock(&pp->pr_slock); |
return (v); |
return (v); |
} |
} |
|
|
/* |
/* |
* Return resource to the pool; must be called at appropriate spl level |
* Internal version of pool_put(). Pool is already locked/entered. |
*/ |
*/ |
#ifdef POOL_DIAGNOSTIC |
static void |
void |
pool_do_put(struct pool *pp, void *v, const char *file, long line) |
_pool_put(pp, v, file, line) |
|
struct pool *pp; |
|
void *v; |
|
const char *file; |
|
long line; |
|
#else |
|
void |
|
pool_put(pp, v) |
|
struct pool *pp; |
|
void *v; |
|
#endif |
|
{ |
{ |
struct pool_item *pi = v; |
struct pool_item *pi = v; |
struct pool_item_header *ph; |
struct pool_item_header *ph; |
caddr_t page; |
caddr_t page; |
|
int s; |
|
|
page = (caddr_t)((u_long)v & pp->pr_pagemask); |
page = (caddr_t)((u_long)v & pp->pr_pagemask); |
|
|
simple_lock(&pp->pr_lock); |
#ifdef DIAGNOSTIC |
|
if (__predict_false(pp->pr_nout == 0)) { |
|
printf("pool %s: putting with none out\n", |
|
pp->pr_wchan); |
|
panic("pool_put"); |
|
} |
|
#endif |
|
|
pr_log(pp, v, PRLOG_PUT, file, line); |
pr_log(pp, v, PRLOG_PUT, file, line); |
|
|
if ((ph = pr_find_pagehead(pp, page)) == NULL) { |
if (__predict_false((ph = pr_find_pagehead(pp, page)) == NULL)) { |
pr_printlog(pp); |
pr_printlog(pp, NULL, printf); |
panic("pool_put: %s: page header missing", pp->pr_wchan); |
panic("pool_put: %s: page header missing", pp->pr_wchan); |
} |
} |
|
|
|
#ifdef LOCKDEBUG |
|
/* |
|
* Check if we're freeing a locked simple lock. |
|
*/ |
|
simple_lock_freecheck((caddr_t)pi, ((caddr_t)pi) + pp->pr_size); |
|
#endif |
|
|
/* |
/* |
* Return to item list. |
* Return to item list. |
*/ |
*/ |
#ifdef DIAGNOSTIC |
#ifdef DIAGNOSTIC |
pi->pi_magic = PI_MAGIC; |
pi->pi_magic = PI_MAGIC; |
#endif |
#endif |
|
#ifdef DEBUG |
|
{ |
|
int i, *ip = v; |
|
|
|
for (i = 0; i < pp->pr_size / sizeof(int); i++) { |
|
*ip++ = PI_MAGIC; |
|
} |
|
} |
|
#endif |
|
|
TAILQ_INSERT_HEAD(&ph->ph_itemlist, pi, pi_list); |
TAILQ_INSERT_HEAD(&ph->ph_itemlist, pi, pi_list); |
ph->ph_nmissing--; |
ph->ph_nmissing--; |
pp->pr_nput++; |
pp->pr_nput++; |
|
pp->pr_nitems++; |
|
pp->pr_nout--; |
|
|
/* Cancel "pool empty" condition if it exists */ |
/* Cancel "pool empty" condition if it exists */ |
if (pp->pr_curpage == NULL) |
if (pp->pr_curpage == NULL) |
|
|
if (ph->ph_nmissing == 0) |
if (ph->ph_nmissing == 0) |
pp->pr_nidle++; |
pp->pr_nidle++; |
wakeup((caddr_t)pp); |
wakeup((caddr_t)pp); |
simple_unlock(&pp->pr_lock); |
|
return; |
return; |
} |
} |
|
|
/* |
/* |
* If this page is now complete, move it to the end of the pagelist. |
* If this page is now complete, do one of two things: |
* If this page has just become un-empty, move it the head. |
* |
|
* (1) If we have more pages than the page high water |
|
* mark, free the page back to the system. |
|
* |
|
* (2) Move it to the end of the page list, so that |
|
* we minimize our chances of fragmenting the |
|
* pool. Idle pages migrate to the end (along with |
|
* completely empty pages, so that we find un-empty |
|
* pages more quickly when we update curpage) of the |
|
* list so they can be more easily swept up by |
|
* the pagedaemon when pages are scarce. |
*/ |
*/ |
if (ph->ph_nmissing == 0) { |
if (ph->ph_nmissing == 0) { |
pp->pr_nidle++; |
pp->pr_nidle++; |
if (pp->pr_npages > pp->pr_maxpages) { |
if (pp->pr_npages > pp->pr_maxpages) { |
#if 0 |
|
timeout(pool_drain, 0, pool_inactive_time*hz); |
|
#else |
|
pr_rmpage(pp, ph); |
pr_rmpage(pp, ph); |
#endif |
|
} else { |
} else { |
TAILQ_REMOVE(&pp->pr_pagelist, ph, ph_pagelist); |
TAILQ_REMOVE(&pp->pr_pagelist, ph, ph_pagelist); |
TAILQ_INSERT_TAIL(&pp->pr_pagelist, ph, ph_pagelist); |
TAILQ_INSERT_TAIL(&pp->pr_pagelist, ph, ph_pagelist); |
ph->ph_time = time; |
|
|
|
/* XXX - update curpage */ |
/* |
|
* Update the timestamp on the page. A page must |
|
* be idle for some period of time before it can |
|
* be reclaimed by the pagedaemon. This minimizes |
|
* ping-pong'ing for memory. |
|
*/ |
|
s = splclock(); |
|
ph->ph_time = mono_time; |
|
splx(s); |
|
|
|
/* |
|
* Update the current page pointer. Just look for |
|
* the first page with any free items. |
|
* |
|
* XXX: Maybe we want an option to look for the |
|
* page with the fewest available items, to minimize |
|
* fragmentation? |
|
*/ |
for (ph = TAILQ_FIRST(&pp->pr_pagelist); ph != NULL; |
for (ph = TAILQ_FIRST(&pp->pr_pagelist); ph != NULL; |
ph = TAILQ_NEXT(ph, ph_pagelist)) |
ph = TAILQ_NEXT(ph, ph_pagelist)) |
if (TAILQ_FIRST(&ph->ph_itemlist) != NULL) |
if (TAILQ_FIRST(&ph->ph_itemlist) != NULL) |
|
|
pp->pr_curpage = ph; |
pp->pr_curpage = ph; |
} |
} |
} |
} |
|
/* |
|
* If the page has just become un-empty, move it to the head of |
|
* the list, and make it the current page. The next allocation |
|
* will get the item from this page, instead of further fragmenting |
|
* the pool. |
|
*/ |
|
else if (ph->ph_nmissing == (pp->pr_itemsperpage - 1)) { |
|
TAILQ_REMOVE(&pp->pr_pagelist, ph, ph_pagelist); |
|
TAILQ_INSERT_HEAD(&pp->pr_pagelist, ph, ph_pagelist); |
|
pp->pr_curpage = ph; |
|
} |
|
} |
|
|
simple_unlock(&pp->pr_lock); |
/* |
|
* Return resource to the pool; must be called at appropriate spl level |
|
*/ |
|
void |
|
_pool_put(struct pool *pp, void *v, const char *file, long line) |
|
{ |
|
|
|
simple_lock(&pp->pr_slock); |
|
pr_enter(pp, file, line); |
|
|
|
pool_do_put(pp, v, file, line); |
|
|
|
pr_leave(pp); |
|
simple_unlock(&pp->pr_slock); |
} |
} |
|
|
/* |
/* |
* Add N items to the pool. |
* Add N items to the pool. |
*/ |
*/ |
int |
int |
pool_prime(pp, n, storage) |
pool_prime(struct pool *pp, int n, caddr_t storage) |
struct pool *pp; |
|
int n; |
|
caddr_t storage; |
|
{ |
{ |
caddr_t cp; |
caddr_t cp; |
int newnitems, newpages; |
int newnitems, newpages; |
|
|
#ifdef DIAGNOSTIC |
#ifdef DIAGNOSTIC |
if (storage && !(pp->pr_flags & PR_STATIC)) |
if (__predict_false(storage && !(pp->pr_roflags & PR_STATIC))) |
panic("pool_prime: static"); |
panic("pool_prime: static"); |
/* !storage && static caught below */ |
/* !storage && static caught below */ |
#endif |
#endif |
|
|
(void)lockmgr(&pp->pr_resourcelock, LK_EXCLUSIVE, NULL); |
simple_lock(&pp->pr_slock); |
|
|
newnitems = pp->pr_minitems + n; |
newnitems = pp->pr_minitems + n; |
newpages = |
newpages = |
roundup(pp->pr_itemsperpage,newnitems) / pp->pr_itemsperpage |
roundup(newnitems, pp->pr_itemsperpage) / pp->pr_itemsperpage |
- pp->pr_minpages; |
- pp->pr_minpages; |
|
|
while (newpages-- > 0) { |
while (newpages-- > 0) { |
|
if (pp->pr_roflags & PR_STATIC) { |
if (pp->pr_flags & PR_STATIC) { |
|
cp = storage; |
cp = storage; |
storage += pp->pr_pagesz; |
storage += pp->pr_pagesz; |
} else { |
} else { |
|
simple_unlock(&pp->pr_slock); |
cp = (*pp->pr_alloc)(pp->pr_pagesz, 0, pp->pr_mtype); |
cp = (*pp->pr_alloc)(pp->pr_pagesz, 0, pp->pr_mtype); |
|
simple_lock(&pp->pr_slock); |
} |
} |
|
|
if (cp == NULL) { |
if (cp == NULL) { |
(void)lockmgr(&pp->pr_resourcelock, LK_RELEASE, NULL); |
simple_unlock(&pp->pr_slock); |
return (ENOMEM); |
return (ENOMEM); |
} |
} |
|
|
|
pp->pr_npagealloc++; |
pool_prime_page(pp, cp); |
pool_prime_page(pp, cp); |
pp->pr_minpages++; |
pp->pr_minpages++; |
} |
} |
Line 745 pool_prime(pp, n, storage) |
|
Line 1002 pool_prime(pp, n, storage) |
|
if (pp->pr_minpages >= pp->pr_maxpages) |
if (pp->pr_minpages >= pp->pr_maxpages) |
pp->pr_maxpages = pp->pr_minpages + 1; /* XXX */ |
pp->pr_maxpages = pp->pr_minpages + 1; /* XXX */ |
|
|
(void)lockmgr(&pp->pr_resourcelock, LK_RELEASE, NULL); |
simple_unlock(&pp->pr_slock); |
return (0); |
return (0); |
} |
} |
|
|
/* |
/* |
* Add a page worth of items to the pool. |
* Add a page worth of items to the pool. |
|
* |
|
* Note, we must be called with the pool descriptor LOCKED. |
*/ |
*/ |
int |
static void |
pool_prime_page(pp, storage) |
pool_prime_page(struct pool *pp, caddr_t storage) |
struct pool *pp; |
|
caddr_t storage; |
|
{ |
{ |
struct pool_item *pi; |
struct pool_item *pi; |
struct pool_item_header *ph; |
struct pool_item_header *ph; |
caddr_t cp = storage; |
caddr_t cp = storage; |
unsigned int align = pp->pr_align; |
unsigned int align = pp->pr_align; |
unsigned int ioff = pp->pr_itemoffset; |
unsigned int ioff = pp->pr_itemoffset; |
int n; |
int s, n; |
|
|
simple_lock(&pp->pr_lock); |
if (((u_long)cp & (pp->pr_pagesz - 1)) != 0) |
|
panic("pool_prime_page: %s: unaligned page", pp->pr_wchan); |
|
|
if ((pp->pr_flags & PR_PHINPAGE) != 0) { |
if ((pp->pr_roflags & PR_PHINPAGE) != 0) { |
ph = (struct pool_item_header *)(cp + pp->pr_phoffset); |
ph = (struct pool_item_header *)(cp + pp->pr_phoffset); |
} else { |
} else { |
|
s = splhigh(); |
ph = pool_get(&phpool, PR_URGENT); |
ph = pool_get(&phpool, PR_URGENT); |
|
splx(s); |
LIST_INSERT_HEAD(&pp->pr_hashtab[PR_HASH_INDEX(pp, cp)], |
LIST_INSERT_HEAD(&pp->pr_hashtab[PR_HASH_INDEX(pp, cp)], |
ph, ph_hashlist); |
ph, ph_hashlist); |
} |
} |
Line 781 pool_prime_page(pp, storage) |
|
Line 1041 pool_prime_page(pp, storage) |
|
TAILQ_INIT(&ph->ph_itemlist); |
TAILQ_INIT(&ph->ph_itemlist); |
ph->ph_page = storage; |
ph->ph_page = storage; |
ph->ph_nmissing = 0; |
ph->ph_nmissing = 0; |
ph->ph_time.tv_sec = ph->ph_time.tv_usec = 0; |
memset(&ph->ph_time, 0, sizeof(ph->ph_time)); |
|
|
pp->pr_nidle++; |
pp->pr_nidle++; |
|
|
Line 802 pool_prime_page(pp, storage) |
|
Line 1062 pool_prime_page(pp, storage) |
|
* Insert remaining chunks on the bucket list. |
* Insert remaining chunks on the bucket list. |
*/ |
*/ |
n = pp->pr_itemsperpage; |
n = pp->pr_itemsperpage; |
|
pp->pr_nitems += n; |
|
|
while (n--) { |
while (n--) { |
pi = (struct pool_item *)cp; |
pi = (struct pool_item *)cp; |
Line 822 pool_prime_page(pp, storage) |
|
Line 1083 pool_prime_page(pp, storage) |
|
|
|
if (++pp->pr_npages > pp->pr_hiwat) |
if (++pp->pr_npages > pp->pr_hiwat) |
pp->pr_hiwat = pp->pr_npages; |
pp->pr_hiwat = pp->pr_npages; |
|
} |
|
|
simple_unlock(&pp->pr_lock); |
/* |
return (0); |
* Like pool_prime(), except this is used by pool_get() when nitems |
|
* drops below the low water mark. This is used to catch up nitmes |
|
* with the low water mark. |
|
* |
|
* Note 1, we never wait for memory here, we let the caller decide what to do. |
|
* |
|
* Note 2, this doesn't work with static pools. |
|
* |
|
* Note 3, we must be called with the pool already locked, and we return |
|
* with it locked. |
|
*/ |
|
static int |
|
pool_catchup(struct pool *pp) |
|
{ |
|
caddr_t cp; |
|
int error = 0; |
|
|
|
if (pp->pr_roflags & PR_STATIC) { |
|
/* |
|
* We dropped below the low water mark, and this is not a |
|
* good thing. Log a warning. |
|
* |
|
* XXX: rate-limit this? |
|
*/ |
|
printf("WARNING: static pool `%s' dropped below low water " |
|
"mark\n", pp->pr_wchan); |
|
return (0); |
|
} |
|
|
|
while (pp->pr_nitems < pp->pr_minitems) { |
|
/* |
|
* Call the page back-end allocator for more memory. |
|
* |
|
* XXX: We never wait, so should we bother unlocking |
|
* the pool descriptor? |
|
*/ |
|
simple_unlock(&pp->pr_slock); |
|
cp = (*pp->pr_alloc)(pp->pr_pagesz, 0, pp->pr_mtype); |
|
simple_lock(&pp->pr_slock); |
|
if (__predict_false(cp == NULL)) { |
|
error = ENOMEM; |
|
break; |
|
} |
|
pp->pr_npagealloc++; |
|
pool_prime_page(pp, cp); |
|
} |
|
|
|
return (error); |
} |
} |
|
|
void |
void |
pool_setlowat(pp, n) |
pool_setlowat(struct pool *pp, int n) |
pool_handle_t pp; |
|
int n; |
|
{ |
{ |
|
int error; |
|
|
|
simple_lock(&pp->pr_slock); |
|
|
(void)lockmgr(&pp->pr_resourcelock, LK_EXCLUSIVE, NULL); |
|
pp->pr_minitems = n; |
pp->pr_minitems = n; |
pp->pr_minpages = (n == 0) |
pp->pr_minpages = (n == 0) |
? 0 |
? 0 |
: roundup(pp->pr_itemsperpage,n) / pp->pr_itemsperpage; |
: roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage; |
(void)lockmgr(&pp->pr_resourcelock, LK_RELEASE, NULL); |
|
|
/* Make sure we're caught up with the newly-set low water mark. */ |
|
if ((pp->pr_nitems < pp->pr_minitems) && |
|
(error = pool_catchup(pp)) != 0) { |
|
/* |
|
* XXX: Should we log a warning? Should we set up a timeout |
|
* to try again in a second or so? The latter could break |
|
* a caller's assumptions about interrupt protection, etc. |
|
*/ |
|
} |
|
|
|
simple_unlock(&pp->pr_slock); |
} |
} |
|
|
void |
void |
pool_sethiwat(pp, n) |
pool_sethiwat(struct pool *pp, int n) |
pool_handle_t pp; |
|
int n; |
|
{ |
{ |
|
|
(void)lockmgr(&pp->pr_resourcelock, LK_EXCLUSIVE, NULL); |
simple_lock(&pp->pr_slock); |
|
|
pp->pr_maxpages = (n == 0) |
pp->pr_maxpages = (n == 0) |
? 0 |
? 0 |
: roundup(pp->pr_itemsperpage,n) / pp->pr_itemsperpage; |
: roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage; |
(void)lockmgr(&pp->pr_resourcelock, LK_RELEASE, NULL); |
|
|
simple_unlock(&pp->pr_slock); |
} |
} |
|
|
|
void |
|
pool_sethardlimit(struct pool *pp, int n, const char *warnmess, int ratecap) |
|
{ |
|
|
|
simple_lock(&pp->pr_slock); |
|
|
|
pp->pr_hardlimit = n; |
|
pp->pr_hardlimit_warning = warnmess; |
|
pp->pr_hardlimit_ratecap.tv_sec = ratecap; |
|
pp->pr_hardlimit_warning_last.tv_sec = 0; |
|
pp->pr_hardlimit_warning_last.tv_usec = 0; |
|
|
|
/* |
|
* In-line version of pool_sethiwat(), because we don't want to |
|
* release the lock. |
|
*/ |
|
pp->pr_maxpages = (n == 0) |
|
? 0 |
|
: roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage; |
|
|
|
simple_unlock(&pp->pr_slock); |
|
} |
|
|
/* |
/* |
* Default page allocator. |
* Default page allocator. |
*/ |
*/ |
static void * |
static void * |
pool_page_alloc(sz, flags, mtype) |
pool_page_alloc(unsigned long sz, int flags, int mtype) |
unsigned long sz; |
|
int flags; |
|
int mtype; |
|
{ |
{ |
boolean_t waitok = (flags & PR_WAITOK) ? TRUE : FALSE; |
boolean_t waitok = (flags & PR_WAITOK) ? TRUE : FALSE; |
|
|
#if defined(UVM) |
|
return ((void *)uvm_km_alloc_poolpage(waitok)); |
return ((void *)uvm_km_alloc_poolpage(waitok)); |
#else |
|
return ((void *)kmem_alloc_poolpage(waitok)); |
|
#endif |
|
} |
} |
|
|
static void |
static void |
pool_page_free(v, sz, mtype) |
pool_page_free(void *v, unsigned long sz, int mtype) |
void *v; |
|
unsigned long sz; |
|
int mtype; |
|
{ |
{ |
|
|
#if defined(UVM) |
|
uvm_km_free_poolpage((vaddr_t)v); |
uvm_km_free_poolpage((vaddr_t)v); |
#else |
|
kmem_free_poolpage((vaddr_t)v); |
|
#endif |
|
} |
} |
|
|
/* |
/* |
Line 892 pool_page_free(v, sz, mtype) |
|
Line 1220 pool_page_free(v, sz, mtype) |
|
* never be accessed in interrupt context. |
* never be accessed in interrupt context. |
*/ |
*/ |
void * |
void * |
pool_page_alloc_nointr(sz, flags, mtype) |
pool_page_alloc_nointr(unsigned long sz, int flags, int mtype) |
unsigned long sz; |
|
int flags; |
|
int mtype; |
|
{ |
{ |
#if defined(UVM) |
|
boolean_t waitok = (flags & PR_WAITOK) ? TRUE : FALSE; |
boolean_t waitok = (flags & PR_WAITOK) ? TRUE : FALSE; |
|
|
/* |
|
* With UVM, we can use the kernel_map. |
|
*/ |
|
return ((void *)uvm_km_alloc_poolpage1(kernel_map, uvm.kernel_object, |
return ((void *)uvm_km_alloc_poolpage1(kernel_map, uvm.kernel_object, |
waitok)); |
waitok)); |
#else |
|
/* |
|
* Can't do anything so cool with Mach VM. |
|
*/ |
|
return (pool_page_alloc(sz, flags, mtype)); |
|
#endif |
|
} |
} |
|
|
void |
void |
pool_page_free_nointr(v, sz, mtype) |
pool_page_free_nointr(void *v, unsigned long sz, int mtype) |
void *v; |
|
unsigned long sz; |
|
int mtype; |
|
{ |
{ |
|
|
#if defined(UVM) |
|
uvm_km_free_poolpage1(kernel_map, (vaddr_t)v); |
uvm_km_free_poolpage1(kernel_map, (vaddr_t)v); |
#else |
|
pool_page_free(v, sz, mtype); |
|
#endif |
|
} |
} |
|
|
|
|
Line 932 pool_page_free_nointr(v, sz, mtype) |
|
Line 1240 pool_page_free_nointr(v, sz, mtype) |
|
* Release all complete pages that have not been used recently. |
* Release all complete pages that have not been used recently. |
*/ |
*/ |
void |
void |
pool_reclaim (pp) |
_pool_reclaim(struct pool *pp, const char *file, long line) |
pool_handle_t pp; |
|
{ |
{ |
struct pool_item_header *ph, *phnext; |
struct pool_item_header *ph, *phnext; |
struct timeval curtime = time; |
struct pool_cache *pc; |
|
struct timeval curtime; |
|
int s; |
|
|
if (pp->pr_flags & PR_STATIC) |
if (pp->pr_roflags & PR_STATIC) |
return; |
return; |
|
|
if (simple_lock_try(&pp->pr_lock) == 0) |
if (simple_lock_try(&pp->pr_slock) == 0) |
return; |
return; |
|
pr_enter(pp, file, line); |
|
|
|
/* |
|
* Reclaim items from the pool's caches. |
|
*/ |
|
for (pc = TAILQ_FIRST(&pp->pr_cachelist); pc != NULL; |
|
pc = TAILQ_NEXT(pc, pc_poollist)) |
|
pool_cache_reclaim(pc); |
|
|
|
s = splclock(); |
|
curtime = mono_time; |
|
splx(s); |
|
|
for (ph = TAILQ_FIRST(&pp->pr_pagelist); ph != NULL; ph = phnext) { |
for (ph = TAILQ_FIRST(&pp->pr_pagelist); ph != NULL; ph = phnext) { |
phnext = TAILQ_NEXT(ph, ph_pagelist); |
phnext = TAILQ_NEXT(ph, ph_pagelist); |
Line 956 pool_reclaim (pp) |
|
Line 1277 pool_reclaim (pp) |
|
timersub(&curtime, &ph->ph_time, &diff); |
timersub(&curtime, &ph->ph_time, &diff); |
if (diff.tv_sec < pool_inactive_time) |
if (diff.tv_sec < pool_inactive_time) |
continue; |
continue; |
|
|
|
/* |
|
* If freeing this page would put us below |
|
* the low water mark, stop now. |
|
*/ |
|
if ((pp->pr_nitems - pp->pr_itemsperpage) < |
|
pp->pr_minitems) |
|
break; |
|
|
pr_rmpage(pp, ph); |
pr_rmpage(pp, ph); |
} |
} |
} |
} |
|
|
simple_unlock(&pp->pr_lock); |
pr_leave(pp); |
|
simple_unlock(&pp->pr_slock); |
} |
} |
|
|
|
|
/* |
/* |
* Drain pools, one at a time. |
* Drain pools, one at a time. |
|
* |
|
* Note, we must never be called from an interrupt context. |
*/ |
*/ |
void |
void |
pool_drain(arg) |
pool_drain(void *arg) |
void *arg; |
|
{ |
{ |
struct pool *pp; |
struct pool *pp; |
int s = splimp(); |
int s; |
|
|
/* XXX:lock pool head */ |
s = splimp(); |
if (drainpp == NULL && (drainpp = TAILQ_FIRST(&pool_head)) == NULL) { |
simple_lock(&pool_head_slock); |
splx(s); |
|
return; |
if (drainpp == NULL && (drainpp = TAILQ_FIRST(&pool_head)) == NULL) |
} |
goto out; |
|
|
pp = drainpp; |
pp = drainpp; |
drainpp = TAILQ_NEXT(pp, pr_poollist); |
drainpp = TAILQ_NEXT(pp, pr_poollist); |
/* XXX:unlock pool head */ |
|
|
|
pool_reclaim(pp); |
pool_reclaim(pp); |
|
|
|
out: |
|
simple_unlock(&pool_head_slock); |
splx(s); |
splx(s); |
} |
} |
|
|
|
|
#ifdef DEBUG |
|
/* |
/* |
* Diagnostic helpers. |
* Diagnostic helpers. |
*/ |
*/ |
void |
void |
pool_print(pp, label) |
pool_print(struct pool *pp, const char *modif) |
struct pool *pp; |
{ |
char *label; |
int s; |
|
|
|
s = splimp(); |
|
if (simple_lock_try(&pp->pr_slock) == 0) { |
|
printf("pool %s is locked; try again later\n", |
|
pp->pr_wchan); |
|
splx(s); |
|
return; |
|
} |
|
pool_print1(pp, modif, printf); |
|
simple_unlock(&pp->pr_slock); |
|
splx(s); |
|
} |
|
|
|
void |
|
pool_printit(struct pool *pp, const char *modif, void (*pr)(const char *, ...)) |
|
{ |
|
int didlock = 0; |
|
|
|
if (pp == NULL) { |
|
(*pr)("Must specify a pool to print.\n"); |
|
return; |
|
} |
|
|
|
/* |
|
* Called from DDB; interrupts should be blocked, and all |
|
* other processors should be paused. We can skip locking |
|
* the pool in this case. |
|
* |
|
* We do a simple_lock_try() just to print the lock |
|
* status, however. |
|
*/ |
|
|
|
if (simple_lock_try(&pp->pr_slock) == 0) |
|
(*pr)("WARNING: pool %s is locked\n", pp->pr_wchan); |
|
else |
|
didlock = 1; |
|
|
|
pool_print1(pp, modif, pr); |
|
|
|
if (didlock) |
|
simple_unlock(&pp->pr_slock); |
|
} |
|
|
|
static void |
|
pool_print1(struct pool *pp, const char *modif, void (*pr)(const char *, ...)) |
{ |
{ |
|
struct pool_item_header *ph; |
|
struct pool_cache *pc; |
|
struct pool_cache_group *pcg; |
|
#ifdef DIAGNOSTIC |
|
struct pool_item *pi; |
|
#endif |
|
int i, print_log = 0, print_pagelist = 0, print_cache = 0; |
|
char c; |
|
|
if (label != NULL) |
while ((c = *modif++) != '\0') { |
printf("%s: ", label); |
if (c == 'l') |
|
print_log = 1; |
|
if (c == 'p') |
|
print_pagelist = 1; |
|
if (c == 'c') |
|
print_cache = 1; |
|
modif++; |
|
} |
|
|
|
(*pr)("POOL %s: size %u, align %u, ioff %u, roflags 0x%08x\n", |
|
pp->pr_wchan, pp->pr_size, pp->pr_align, pp->pr_itemoffset, |
|
pp->pr_roflags); |
|
(*pr)("\tpagesz %u, mtype %d\n", pp->pr_pagesz, pp->pr_mtype); |
|
(*pr)("\talloc %p, release %p\n", pp->pr_alloc, pp->pr_free); |
|
(*pr)("\tminitems %u, minpages %u, maxpages %u, npages %u\n", |
|
pp->pr_minitems, pp->pr_minpages, pp->pr_maxpages, pp->pr_npages); |
|
(*pr)("\titemsperpage %u, nitems %u, nout %u, hardlimit %u\n", |
|
pp->pr_itemsperpage, pp->pr_nitems, pp->pr_nout, pp->pr_hardlimit); |
|
|
|
(*pr)("\n\tnget %lu, nfail %lu, nput %lu\n", |
|
pp->pr_nget, pp->pr_nfail, pp->pr_nput); |
|
(*pr)("\tnpagealloc %lu, npagefree %lu, hiwat %u, nidle %lu\n", |
|
pp->pr_npagealloc, pp->pr_npagefree, pp->pr_hiwat, pp->pr_nidle); |
|
|
|
if (print_pagelist == 0) |
|
goto skip_pagelist; |
|
|
|
if ((ph = TAILQ_FIRST(&pp->pr_pagelist)) != NULL) |
|
(*pr)("\n\tpage list:\n"); |
|
for (; ph != NULL; ph = TAILQ_NEXT(ph, ph_pagelist)) { |
|
(*pr)("\t\tpage %p, nmissing %d, time %lu,%lu\n", |
|
ph->ph_page, ph->ph_nmissing, |
|
(u_long)ph->ph_time.tv_sec, |
|
(u_long)ph->ph_time.tv_usec); |
|
#ifdef DIAGNOSTIC |
|
for (pi = TAILQ_FIRST(&ph->ph_itemlist); pi != NULL; |
|
pi = TAILQ_NEXT(pi, pi_list)) { |
|
if (pi->pi_magic != PI_MAGIC) { |
|
(*pr)("\t\t\titem %p, magic 0x%x\n", |
|
pi, pi->pi_magic); |
|
} |
|
} |
|
#endif |
|
} |
|
if (pp->pr_curpage == NULL) |
|
(*pr)("\tno current page\n"); |
|
else |
|
(*pr)("\tcurpage %p\n", pp->pr_curpage->ph_page); |
|
|
|
skip_pagelist: |
|
|
|
if (print_log == 0) |
|
goto skip_log; |
|
|
|
(*pr)("\n"); |
|
if ((pp->pr_roflags & PR_LOGGING) == 0) |
|
(*pr)("\tno log\n"); |
|
else |
|
pr_printlog(pp, NULL, pr); |
|
|
|
skip_log: |
|
|
|
if (print_cache == 0) |
|
goto skip_cache; |
|
|
|
for (pc = TAILQ_FIRST(&pp->pr_cachelist); pc != NULL; |
|
pc = TAILQ_NEXT(pc, pc_poollist)) { |
|
(*pr)("\tcache %p: allocfrom %p freeto %p\n", pc, |
|
pc->pc_allocfrom, pc->pc_freeto); |
|
(*pr)("\t hits %lu misses %lu ngroups %lu nitems %lu\n", |
|
pc->pc_hits, pc->pc_misses, pc->pc_ngroups, pc->pc_nitems); |
|
for (pcg = TAILQ_FIRST(&pc->pc_grouplist); pcg != NULL; |
|
pcg = TAILQ_NEXT(pcg, pcg_list)) { |
|
(*pr)("\t\tgroup %p: avail %d\n", pcg, pcg->pcg_avail); |
|
for (i = 0; i < PCG_NOBJECTS; i++) |
|
(*pr)("\t\t\t%p\n", pcg->pcg_objects[i]); |
|
} |
|
} |
|
|
|
skip_cache: |
|
|
printf("pool %s: nalloc %lu nfree %lu npagealloc %lu npagefree %lu\n" |
pr_enter_check(pp, pr); |
" npages %u minitems %u itemsperpage %u itemoffset %u\n" |
|
" nidle %lu\n", |
|
pp->pr_wchan, |
|
pp->pr_nget, |
|
pp->pr_nput, |
|
pp->pr_npagealloc, |
|
pp->pr_npagefree, |
|
pp->pr_npages, |
|
pp->pr_minitems, |
|
pp->pr_itemsperpage, |
|
pp->pr_itemoffset, |
|
pp->pr_nidle); |
|
} |
} |
|
|
int |
int |
pool_chk(pp, label) |
pool_chk(struct pool *pp, const char *label) |
struct pool *pp; |
|
char *label; |
|
{ |
{ |
struct pool_item_header *ph; |
struct pool_item_header *ph; |
int r = 0; |
int r = 0; |
|
|
simple_lock(&pp->pr_lock); |
simple_lock(&pp->pr_slock); |
|
|
for (ph = TAILQ_FIRST(&pp->pr_pagelist); ph != NULL; |
for (ph = TAILQ_FIRST(&pp->pr_pagelist); ph != NULL; |
ph = TAILQ_NEXT(ph, ph_pagelist)) { |
ph = TAILQ_NEXT(ph, ph_pagelist)) { |
Line 1035 pool_chk(pp, label) |
|
Line 1486 pool_chk(pp, label) |
|
caddr_t page; |
caddr_t page; |
|
|
page = (caddr_t)((u_long)ph & pp->pr_pagemask); |
page = (caddr_t)((u_long)ph & pp->pr_pagemask); |
if (page != ph->ph_page && (pp->pr_flags & PR_PHINPAGE) != 0) { |
if (page != ph->ph_page && |
|
(pp->pr_roflags & PR_PHINPAGE) != 0) { |
if (label != NULL) |
if (label != NULL) |
printf("%s: ", label); |
printf("%s: ", label); |
printf("pool(%p:%s): page inconsistency: page %p;" |
printf("pool(%p:%s): page inconsistency: page %p;" |
Line 1077 pool_chk(pp, label) |
|
Line 1529 pool_chk(pp, label) |
|
} |
} |
} |
} |
out: |
out: |
simple_unlock(&pp->pr_lock); |
simple_unlock(&pp->pr_slock); |
return (r); |
return (r); |
} |
} |
#endif |
|
|
/* |
|
* pool_cache_init: |
|
* |
|
* Initialize a pool cache. |
|
* |
|
* NOTE: If the pool must be protected from interrupts, we expect |
|
* to be called at the appropriate interrupt priority level. |
|
*/ |
|
void |
|
pool_cache_init(struct pool_cache *pc, struct pool *pp, |
|
int (*ctor)(void *, void *, int), |
|
void (*dtor)(void *, void *), |
|
void *arg) |
|
{ |
|
|
|
TAILQ_INIT(&pc->pc_grouplist); |
|
simple_lock_init(&pc->pc_slock); |
|
|
|
pc->pc_allocfrom = NULL; |
|
pc->pc_freeto = NULL; |
|
pc->pc_pool = pp; |
|
|
|
pc->pc_ctor = ctor; |
|
pc->pc_dtor = dtor; |
|
pc->pc_arg = arg; |
|
|
|
pc->pc_hits = 0; |
|
pc->pc_misses = 0; |
|
|
|
pc->pc_ngroups = 0; |
|
|
|
pc->pc_nitems = 0; |
|
|
|
simple_lock(&pp->pr_slock); |
|
TAILQ_INSERT_TAIL(&pp->pr_cachelist, pc, pc_poollist); |
|
simple_unlock(&pp->pr_slock); |
|
} |
|
|
|
/* |
|
* pool_cache_destroy: |
|
* |
|
* Destroy a pool cache. |
|
*/ |
|
void |
|
pool_cache_destroy(struct pool_cache *pc) |
|
{ |
|
struct pool *pp = pc->pc_pool; |
|
|
|
/* First, invalidate the entire cache. */ |
|
pool_cache_invalidate(pc); |
|
|
|
/* ...and remove it from the pool's cache list. */ |
|
simple_lock(&pp->pr_slock); |
|
TAILQ_REMOVE(&pp->pr_cachelist, pc, pc_poollist); |
|
simple_unlock(&pp->pr_slock); |
|
} |
|
|
|
static __inline void * |
|
pcg_get(struct pool_cache_group *pcg) |
|
{ |
|
void *object; |
|
u_int idx; |
|
|
|
KASSERT(pcg->pcg_avail <= PCG_NOBJECTS); |
|
KASSERT(pcg->pcg_avail != 0); |
|
idx = --pcg->pcg_avail; |
|
|
|
KASSERT(pcg->pcg_objects[idx] != NULL); |
|
object = pcg->pcg_objects[idx]; |
|
pcg->pcg_objects[idx] = NULL; |
|
|
|
return (object); |
|
} |
|
|
|
static __inline void |
|
pcg_put(struct pool_cache_group *pcg, void *object) |
|
{ |
|
u_int idx; |
|
|
|
KASSERT(pcg->pcg_avail < PCG_NOBJECTS); |
|
idx = pcg->pcg_avail++; |
|
|
|
KASSERT(pcg->pcg_objects[idx] == NULL); |
|
pcg->pcg_objects[idx] = object; |
|
} |
|
|
|
/* |
|
* pool_cache_get: |
|
* |
|
* Get an object from a pool cache. |
|
*/ |
|
void * |
|
pool_cache_get(struct pool_cache *pc, int flags) |
|
{ |
|
struct pool_cache_group *pcg; |
|
void *object; |
|
|
|
simple_lock(&pc->pc_slock); |
|
|
|
if ((pcg = pc->pc_allocfrom) == NULL) { |
|
for (pcg = TAILQ_FIRST(&pc->pc_grouplist); pcg != NULL; |
|
pcg = TAILQ_NEXT(pcg, pcg_list)) { |
|
if (pcg->pcg_avail != 0) { |
|
pc->pc_allocfrom = pcg; |
|
goto have_group; |
|
} |
|
} |
|
|
|
/* |
|
* No groups with any available objects. Allocate |
|
* a new object, construct it, and return it to |
|
* the caller. We will allocate a group, if necessary, |
|
* when the object is freed back to the cache. |
|
*/ |
|
pc->pc_misses++; |
|
simple_unlock(&pc->pc_slock); |
|
object = pool_get(pc->pc_pool, flags); |
|
if (object != NULL && pc->pc_ctor != NULL) { |
|
if ((*pc->pc_ctor)(pc->pc_arg, object, flags) != 0) { |
|
pool_put(pc->pc_pool, object); |
|
return (NULL); |
|
} |
|
} |
|
return (object); |
|
} |
|
|
|
have_group: |
|
pc->pc_hits++; |
|
pc->pc_nitems--; |
|
object = pcg_get(pcg); |
|
|
|
if (pcg->pcg_avail == 0) |
|
pc->pc_allocfrom = NULL; |
|
|
|
simple_unlock(&pc->pc_slock); |
|
|
|
return (object); |
|
} |
|
|
|
/* |
|
* pool_cache_put: |
|
* |
|
* Put an object back to the pool cache. |
|
*/ |
|
void |
|
pool_cache_put(struct pool_cache *pc, void *object) |
|
{ |
|
struct pool_cache_group *pcg; |
|
|
|
simple_lock(&pc->pc_slock); |
|
|
|
if ((pcg = pc->pc_freeto) == NULL) { |
|
for (pcg = TAILQ_FIRST(&pc->pc_grouplist); pcg != NULL; |
|
pcg = TAILQ_NEXT(pcg, pcg_list)) { |
|
if (pcg->pcg_avail != PCG_NOBJECTS) { |
|
pc->pc_freeto = pcg; |
|
goto have_group; |
|
} |
|
} |
|
|
|
/* |
|
* No empty groups to free the object to. Attempt to |
|
* allocate one. |
|
*/ |
|
simple_unlock(&pc->pc_slock); |
|
pcg = pool_get(&pcgpool, PR_NOWAIT); |
|
if (pcg != NULL) { |
|
memset(pcg, 0, sizeof(*pcg)); |
|
simple_lock(&pc->pc_slock); |
|
pc->pc_ngroups++; |
|
TAILQ_INSERT_TAIL(&pc->pc_grouplist, pcg, pcg_list); |
|
if (pc->pc_freeto == NULL) |
|
pc->pc_freeto = pcg; |
|
goto have_group; |
|
} |
|
|
|
/* |
|
* Unable to allocate a cache group; destruct the object |
|
* and free it back to the pool. |
|
*/ |
|
if (pc->pc_dtor != NULL) |
|
(*pc->pc_dtor)(pc->pc_arg, object); |
|
pool_put(pc->pc_pool, object); |
|
return; |
|
} |
|
|
|
have_group: |
|
pc->pc_nitems++; |
|
pcg_put(pcg, object); |
|
|
|
if (pcg->pcg_avail == PCG_NOBJECTS) |
|
pc->pc_freeto = NULL; |
|
|
|
simple_unlock(&pc->pc_slock); |
|
} |
|
|
|
/* |
|
* pool_cache_do_invalidate: |
|
* |
|
* This internal function implements pool_cache_invalidate() and |
|
* pool_cache_reclaim(). |
|
*/ |
|
static void |
|
pool_cache_do_invalidate(struct pool_cache *pc, int free_groups, |
|
void (*putit)(struct pool *, void *, const char *, long)) |
|
{ |
|
struct pool_cache_group *pcg, *npcg; |
|
void *object; |
|
|
|
for (pcg = TAILQ_FIRST(&pc->pc_grouplist); pcg != NULL; |
|
pcg = npcg) { |
|
npcg = TAILQ_NEXT(pcg, pcg_list); |
|
while (pcg->pcg_avail != 0) { |
|
pc->pc_nitems--; |
|
object = pcg_get(pcg); |
|
if (pcg->pcg_avail == 0 && pc->pc_allocfrom == pcg) |
|
pc->pc_allocfrom = NULL; |
|
if (pc->pc_dtor != NULL) |
|
(*pc->pc_dtor)(pc->pc_arg, object); |
|
(*putit)(pc->pc_pool, object, __FILE__, __LINE__); |
|
} |
|
if (free_groups) { |
|
pc->pc_ngroups--; |
|
TAILQ_REMOVE(&pc->pc_grouplist, pcg, pcg_list); |
|
if (pc->pc_freeto == pcg) |
|
pc->pc_freeto = NULL; |
|
pool_put(&pcgpool, pcg); |
|
} |
|
} |
|
} |
|
|
|
/* |
|
* pool_cache_invalidate: |
|
* |
|
* Invalidate a pool cache (destruct and release all of the |
|
* cached objects). |
|
*/ |
|
void |
|
pool_cache_invalidate(struct pool_cache *pc) |
|
{ |
|
|
|
simple_lock(&pc->pc_slock); |
|
pool_cache_do_invalidate(pc, 0, _pool_put); |
|
simple_unlock(&pc->pc_slock); |
|
} |
|
|
|
/* |
|
* pool_cache_reclaim: |
|
* |
|
* Reclaim a pool cache for pool_reclaim(). |
|
*/ |
|
static void |
|
pool_cache_reclaim(struct pool_cache *pc) |
|
{ |
|
|
|
simple_lock(&pc->pc_slock); |
|
pool_cache_do_invalidate(pc, 1, pool_do_put); |
|
simple_unlock(&pc->pc_slock); |
|
} |