version 1.134, 2010/02/08 22:55:36 |
version 1.134.2.1, 2010/05/30 05:17:58 |
Line 100 int max_datalen; |
|
Line 100 int max_datalen; |
|
|
|
static int mb_ctor(void *, void *, int); |
static int mb_ctor(void *, void *, int); |
|
|
static void *mclpool_alloc(struct pool *, int); |
|
static void mclpool_release(struct pool *, void *); |
|
|
|
static void sysctl_kern_mbuf_setup(void); |
static void sysctl_kern_mbuf_setup(void); |
|
|
static struct sysctllog *mbuf_sysctllog; |
static struct sysctllog *mbuf_sysctllog; |
|
|
static struct pool_allocator mclpool_allocator = { |
|
.pa_alloc = mclpool_alloc, |
|
.pa_free = mclpool_release, |
|
}; |
|
|
|
static struct mbuf *m_copym0(struct mbuf *, int, int, int, int); |
static struct mbuf *m_copym0(struct mbuf *, int, int, int, int); |
static struct mbuf *m_split0(struct mbuf *, int, int, int); |
static struct mbuf *m_split0(struct mbuf *, int, int, int); |
static int m_copyback0(struct mbuf **, int, int, const void *, int, int); |
static int m_copyback0(struct mbuf **, int, int, const void *, int, int); |
|
|
static int |
static int |
nmbclusters_limit(void) |
nmbclusters_limit(void) |
{ |
{ |
#if defined(PMAP_MAP_POOLPAGE) || defined(_RUMPKERNEL) |
#if defined(PMAP_MAP_POOLPAGE) |
/* direct mapping, doesn't use space in kmem_map */ |
/* direct mapping, doesn't use space in kmem_map */ |
vsize_t max_size = physmem / 4; |
vsize_t max_size = physmem / 4; |
#else |
#else |
|
|
|
|
sysctl_kern_mbuf_setup(); |
sysctl_kern_mbuf_setup(); |
|
|
mclpool_allocator.pa_backingmap = kmem_map; |
|
|
|
mb_cache = pool_cache_init(msize, 0, 0, 0, "mbpl", |
mb_cache = pool_cache_init(msize, 0, 0, 0, "mbpl", |
NULL, IPL_VM, mb_ctor, NULL, NULL); |
NULL, IPL_VM, mb_ctor, NULL, NULL); |
KASSERT(mb_cache != NULL); |
KASSERT(mb_cache != NULL); |
|
|
mcl_cache = pool_cache_init(mclbytes, 0, 0, 0, "mclpl", |
mcl_cache = pool_cache_init(mclbytes, 0, 0, 0, "mclpl", NULL, |
&mclpool_allocator, IPL_VM, NULL, NULL, NULL); |
IPL_VM, NULL, NULL, NULL); |
KASSERT(mcl_cache != NULL); |
KASSERT(mcl_cache != NULL); |
|
|
pool_cache_set_drain_hook(mb_cache, m_reclaim, NULL); |
pool_cache_set_drain_hook(mb_cache, m_reclaim, NULL); |
Line 459 sysctl_kern_mbuf_setup(void) |
|
Line 449 sysctl_kern_mbuf_setup(void) |
|
#endif /* MBUFTRACE */ |
#endif /* MBUFTRACE */ |
} |
} |
|
|
static void * |
|
mclpool_alloc(struct pool *pp, int flags) |
|
{ |
|
bool waitok = (flags & PR_WAITOK) ? true : false; |
|
|
|
return ((void *)uvm_km_alloc_poolpage(kmem_map, waitok)); |
|
} |
|
|
|
static void |
|
mclpool_release(struct pool *pp, void *v) |
|
{ |
|
|
|
uvm_km_free_poolpage(kmem_map, (vaddr_t)v); |
|
} |
|
|
|
/*ARGSUSED*/ |
|
static int |
static int |
mb_ctor(void *arg, void *object, int flags) |
mb_ctor(void *arg, void *object, int flags) |
{ |
{ |