Annotation of src/sys/kern/subr_vmem.c, Revision 1.44
1.44 ! cegger 1: /* $NetBSD: subr_vmem.c,v 1.43 2008/12/07 00:51:15 cegger Exp $ */
1.1 yamt 2:
3: /*-
4: * Copyright (c)2006 YAMAMOTO Takashi,
5: * All rights reserved.
6: *
7: * Redistribution and use in source and binary forms, with or without
8: * modification, are permitted provided that the following conditions
9: * are met:
10: * 1. Redistributions of source code must retain the above copyright
11: * notice, this list of conditions and the following disclaimer.
12: * 2. Redistributions in binary form must reproduce the above copyright
13: * notice, this list of conditions and the following disclaimer in the
14: * documentation and/or other materials provided with the distribution.
15: *
16: * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17: * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18: * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19: * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20: * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21: * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22: * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23: * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24: * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25: * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26: * SUCH DAMAGE.
27: */
28:
29: /*
30: * reference:
31: * - Magazines and Vmem: Extending the Slab Allocator
32: * to Many CPUs and Arbitrary Resources
33: * http://www.usenix.org/event/usenix01/bonwick.html
1.18 yamt 34: *
35: * todo:
36: * - decide how to import segments for vmem_xalloc.
37: * - don't rely on malloc(9).
1.1 yamt 38: */
39:
40: #include <sys/cdefs.h>
1.44 ! cegger 41: __KERNEL_RCSID(0, "$NetBSD: subr_vmem.c,v 1.43 2008/12/07 00:51:15 cegger Exp $");
1.1 yamt 42:
43: #define VMEM_DEBUG
1.5 yamt 44: #if defined(_KERNEL)
1.37 yamt 45: #include "opt_ddb.h"
1.5 yamt 46: #define QCACHE
47: #endif /* defined(_KERNEL) */
1.1 yamt 48:
49: #include <sys/param.h>
50: #include <sys/hash.h>
51: #include <sys/queue.h>
52:
53: #if defined(_KERNEL)
54: #include <sys/systm.h>
1.30 yamt 55: #include <sys/kernel.h> /* hz */
56: #include <sys/callout.h>
1.1 yamt 57: #include <sys/malloc.h>
58: #include <sys/once.h>
59: #include <sys/pool.h>
60: #include <sys/vmem.h>
1.30 yamt 61: #include <sys/workqueue.h>
1.1 yamt 62: #else /* defined(_KERNEL) */
63: #include "../sys/vmem.h"
64: #endif /* defined(_KERNEL) */
65:
66: #if defined(_KERNEL)
1.31 ad 67: #define LOCK_DECL(name) kmutex_t name
1.1 yamt 68: #else /* defined(_KERNEL) */
69: #include <errno.h>
70: #include <assert.h>
71: #include <stdlib.h>
72:
73: #define KASSERT(a) assert(a)
1.31 ad 74: #define LOCK_DECL(name) /* nothing */
75: #define mutex_init(a, b, c) /* nothing */
76: #define mutex_destroy(a) /* nothing */
77: #define mutex_enter(a) /* nothing */
78: #define mutex_exit(a) /* nothing */
79: #define mutex_owned(a) /* nothing */
1.42 yamt 80: #define ASSERT_SLEEPABLE() /* nothing */
1.31 ad 81: #define IPL_VM 0
1.1 yamt 82: #endif /* defined(_KERNEL) */
83:
84: struct vmem;
85: struct vmem_btag;
86:
87: #if defined(VMEM_DEBUG)
88: void vmem_dump(const vmem_t *);
89: #endif /* defined(VMEM_DEBUG) */
90:
1.4 yamt 91: #define VMEM_MAXORDER (sizeof(vmem_size_t) * CHAR_BIT)
1.30 yamt 92:
93: #define VMEM_HASHSIZE_MIN 1 /* XXX */
94: #define VMEM_HASHSIZE_MAX 8192 /* XXX */
95: #define VMEM_HASHSIZE_INIT VMEM_HASHSIZE_MIN
1.1 yamt 96:
97: #define VM_FITMASK (VM_BESTFIT | VM_INSTANTFIT)
98:
99: CIRCLEQ_HEAD(vmem_seglist, vmem_btag);
100: LIST_HEAD(vmem_freelist, vmem_btag);
101: LIST_HEAD(vmem_hashlist, vmem_btag);
102:
1.5 yamt 103: #if defined(QCACHE)
104: #define VMEM_QCACHE_IDX_MAX 32
105:
106: #define QC_NAME_MAX 16
107:
108: struct qcache {
1.35 ad 109: pool_cache_t qc_cache;
1.5 yamt 110: vmem_t *qc_vmem;
111: char qc_name[QC_NAME_MAX];
112: };
113: typedef struct qcache qcache_t;
1.35 ad 114: #define QC_POOL_TO_QCACHE(pool) ((qcache_t *)(pool->pr_qcache))
1.5 yamt 115: #endif /* defined(QCACHE) */
116:
1.1 yamt 117: /* vmem arena */
118: struct vmem {
1.31 ad 119: LOCK_DECL(vm_lock);
1.1 yamt 120: vmem_addr_t (*vm_allocfn)(vmem_t *, vmem_size_t, vmem_size_t *,
121: vm_flag_t);
122: void (*vm_freefn)(vmem_t *, vmem_addr_t, vmem_size_t);
123: vmem_t *vm_source;
124: struct vmem_seglist vm_seglist;
125: struct vmem_freelist vm_freelist[VMEM_MAXORDER];
126: size_t vm_hashsize;
127: size_t vm_nbusytag;
128: struct vmem_hashlist *vm_hashlist;
129: size_t vm_quantum_mask;
130: int vm_quantum_shift;
131: const char *vm_name;
1.30 yamt 132: LIST_ENTRY(vmem) vm_alllist;
1.5 yamt 133:
134: #if defined(QCACHE)
135: /* quantum cache */
136: size_t vm_qcache_max;
137: struct pool_allocator vm_qcache_allocator;
1.22 yamt 138: qcache_t vm_qcache_store[VMEM_QCACHE_IDX_MAX];
139: qcache_t *vm_qcache[VMEM_QCACHE_IDX_MAX];
1.5 yamt 140: #endif /* defined(QCACHE) */
1.1 yamt 141: };
142:
1.31 ad 143: #define VMEM_LOCK(vm) mutex_enter(&vm->vm_lock)
144: #define VMEM_TRYLOCK(vm) mutex_tryenter(&vm->vm_lock)
145: #define VMEM_UNLOCK(vm) mutex_exit(&vm->vm_lock)
1.36 ad 146: #define VMEM_LOCK_INIT(vm, ipl) mutex_init(&vm->vm_lock, MUTEX_DEFAULT, ipl)
1.31 ad 147: #define VMEM_LOCK_DESTROY(vm) mutex_destroy(&vm->vm_lock)
148: #define VMEM_ASSERT_LOCKED(vm) KASSERT(mutex_owned(&vm->vm_lock))
1.1 yamt 149:
150: /* boundary tag */
151: struct vmem_btag {
152: CIRCLEQ_ENTRY(vmem_btag) bt_seglist;
153: union {
154: LIST_ENTRY(vmem_btag) u_freelist; /* BT_TYPE_FREE */
155: LIST_ENTRY(vmem_btag) u_hashlist; /* BT_TYPE_BUSY */
156: } bt_u;
157: #define bt_hashlist bt_u.u_hashlist
158: #define bt_freelist bt_u.u_freelist
159: vmem_addr_t bt_start;
160: vmem_size_t bt_size;
161: int bt_type;
162: };
163:
164: #define BT_TYPE_SPAN 1
165: #define BT_TYPE_SPAN_STATIC 2
166: #define BT_TYPE_FREE 3
167: #define BT_TYPE_BUSY 4
168: #define BT_ISSPAN_P(bt) ((bt)->bt_type <= BT_TYPE_SPAN_STATIC)
169:
170: #define BT_END(bt) ((bt)->bt_start + (bt)->bt_size)
171:
172: typedef struct vmem_btag bt_t;
173:
174: /* ---- misc */
175:
1.19 yamt 176: #define VMEM_ALIGNUP(addr, align) \
177: (-(-(addr) & -(align)))
178: #define VMEM_CROSS_P(addr1, addr2, boundary) \
179: ((((addr1) ^ (addr2)) & -(boundary)) != 0)
180:
1.4 yamt 181: #define ORDER2SIZE(order) ((vmem_size_t)1 << (order))
182:
1.1 yamt 183: static int
184: calc_order(vmem_size_t size)
185: {
1.4 yamt 186: vmem_size_t target;
1.1 yamt 187: int i;
188:
189: KASSERT(size != 0);
190:
191: i = 0;
1.4 yamt 192: target = size >> 1;
193: while (ORDER2SIZE(i) <= target) {
1.1 yamt 194: i++;
195: }
196:
1.4 yamt 197: KASSERT(ORDER2SIZE(i) <= size);
198: KASSERT(size < ORDER2SIZE(i + 1) || ORDER2SIZE(i + 1) < ORDER2SIZE(i));
1.1 yamt 199:
200: return i;
201: }
202:
203: #if defined(_KERNEL)
204: static MALLOC_DEFINE(M_VMEM, "vmem", "vmem");
205: #endif /* defined(_KERNEL) */
206:
207: static void *
208: xmalloc(size_t sz, vm_flag_t flags)
209: {
210:
211: #if defined(_KERNEL)
212: return malloc(sz, M_VMEM,
213: M_CANFAIL | ((flags & VM_SLEEP) ? M_WAITOK : M_NOWAIT));
214: #else /* defined(_KERNEL) */
215: return malloc(sz);
216: #endif /* defined(_KERNEL) */
217: }
218:
219: static void
220: xfree(void *p)
221: {
222:
223: #if defined(_KERNEL)
224: return free(p, M_VMEM);
225: #else /* defined(_KERNEL) */
226: return free(p);
227: #endif /* defined(_KERNEL) */
228: }
229:
230: /* ---- boundary tag */
231:
232: #if defined(_KERNEL)
1.35 ad 233: static struct pool_cache bt_cache;
1.1 yamt 234: #endif /* defined(_KERNEL) */
235:
236: static bt_t *
1.17 yamt 237: bt_alloc(vmem_t *vm, vm_flag_t flags)
1.1 yamt 238: {
239: bt_t *bt;
240:
241: #if defined(_KERNEL)
1.35 ad 242: bt = pool_cache_get(&bt_cache,
1.1 yamt 243: (flags & VM_SLEEP) != 0 ? PR_WAITOK : PR_NOWAIT);
244: #else /* defined(_KERNEL) */
245: bt = malloc(sizeof *bt);
246: #endif /* defined(_KERNEL) */
247:
248: return bt;
249: }
250:
251: static void
1.17 yamt 252: bt_free(vmem_t *vm, bt_t *bt)
1.1 yamt 253: {
254:
255: #if defined(_KERNEL)
1.35 ad 256: pool_cache_put(&bt_cache, bt);
1.1 yamt 257: #else /* defined(_KERNEL) */
258: free(bt);
259: #endif /* defined(_KERNEL) */
260: }
261:
262: /*
263: * freelist[0] ... [1, 1]
264: * freelist[1] ... [2, 3]
265: * freelist[2] ... [4, 7]
266: * freelist[3] ... [8, 15]
267: * :
268: * freelist[n] ... [(1 << n), (1 << (n + 1)) - 1]
269: * :
270: */
271:
272: static struct vmem_freelist *
273: bt_freehead_tofree(vmem_t *vm, vmem_size_t size)
274: {
275: const vmem_size_t qsize = size >> vm->vm_quantum_shift;
276: int idx;
277:
278: KASSERT((size & vm->vm_quantum_mask) == 0);
279: KASSERT(size != 0);
280:
281: idx = calc_order(qsize);
282: KASSERT(idx >= 0);
283: KASSERT(idx < VMEM_MAXORDER);
284:
285: return &vm->vm_freelist[idx];
286: }
287:
288: static struct vmem_freelist *
289: bt_freehead_toalloc(vmem_t *vm, vmem_size_t size, vm_flag_t strat)
290: {
291: const vmem_size_t qsize = size >> vm->vm_quantum_shift;
292: int idx;
293:
294: KASSERT((size & vm->vm_quantum_mask) == 0);
295: KASSERT(size != 0);
296:
297: idx = calc_order(qsize);
1.4 yamt 298: if (strat == VM_INSTANTFIT && ORDER2SIZE(idx) != qsize) {
1.1 yamt 299: idx++;
300: /* check too large request? */
301: }
302: KASSERT(idx >= 0);
303: KASSERT(idx < VMEM_MAXORDER);
304:
305: return &vm->vm_freelist[idx];
306: }
307:
308: /* ---- boundary tag hash */
309:
310: static struct vmem_hashlist *
311: bt_hashhead(vmem_t *vm, vmem_addr_t addr)
312: {
313: struct vmem_hashlist *list;
314: unsigned int hash;
315:
316: hash = hash32_buf(&addr, sizeof(addr), HASH32_BUF_INIT);
317: list = &vm->vm_hashlist[hash % vm->vm_hashsize];
318:
319: return list;
320: }
321:
322: static bt_t *
323: bt_lookupbusy(vmem_t *vm, vmem_addr_t addr)
324: {
325: struct vmem_hashlist *list;
326: bt_t *bt;
327:
328: list = bt_hashhead(vm, addr);
329: LIST_FOREACH(bt, list, bt_hashlist) {
330: if (bt->bt_start == addr) {
331: break;
332: }
333: }
334:
335: return bt;
336: }
337:
338: static void
339: bt_rembusy(vmem_t *vm, bt_t *bt)
340: {
341:
342: KASSERT(vm->vm_nbusytag > 0);
343: vm->vm_nbusytag--;
344: LIST_REMOVE(bt, bt_hashlist);
345: }
346:
347: static void
348: bt_insbusy(vmem_t *vm, bt_t *bt)
349: {
350: struct vmem_hashlist *list;
351:
352: KASSERT(bt->bt_type == BT_TYPE_BUSY);
353:
354: list = bt_hashhead(vm, bt->bt_start);
355: LIST_INSERT_HEAD(list, bt, bt_hashlist);
356: vm->vm_nbusytag++;
357: }
358:
359: /* ---- boundary tag list */
360:
361: static void
362: bt_remseg(vmem_t *vm, bt_t *bt)
363: {
364:
365: CIRCLEQ_REMOVE(&vm->vm_seglist, bt, bt_seglist);
366: }
367:
368: static void
369: bt_insseg(vmem_t *vm, bt_t *bt, bt_t *prev)
370: {
371:
372: CIRCLEQ_INSERT_AFTER(&vm->vm_seglist, prev, bt, bt_seglist);
373: }
374:
375: static void
376: bt_insseg_tail(vmem_t *vm, bt_t *bt)
377: {
378:
379: CIRCLEQ_INSERT_TAIL(&vm->vm_seglist, bt, bt_seglist);
380: }
381:
382: static void
1.17 yamt 383: bt_remfree(vmem_t *vm, bt_t *bt)
1.1 yamt 384: {
385:
386: KASSERT(bt->bt_type == BT_TYPE_FREE);
387:
388: LIST_REMOVE(bt, bt_freelist);
389: }
390:
391: static void
392: bt_insfree(vmem_t *vm, bt_t *bt)
393: {
394: struct vmem_freelist *list;
395:
396: list = bt_freehead_tofree(vm, bt->bt_size);
397: LIST_INSERT_HEAD(list, bt, bt_freelist);
398: }
399:
400: /* ---- vmem internal functions */
401:
1.30 yamt 402: #if defined(_KERNEL)
403: static kmutex_t vmem_list_lock;
404: static LIST_HEAD(, vmem) vmem_list = LIST_HEAD_INITIALIZER(vmem_list);
405: #endif /* defined(_KERNEL) */
406:
1.5 yamt 407: #if defined(QCACHE)
408: static inline vm_flag_t
409: prf_to_vmf(int prflags)
410: {
411: vm_flag_t vmflags;
412:
413: KASSERT((prflags & ~(PR_LIMITFAIL | PR_WAITOK | PR_NOWAIT)) == 0);
414: if ((prflags & PR_WAITOK) != 0) {
415: vmflags = VM_SLEEP;
416: } else {
417: vmflags = VM_NOSLEEP;
418: }
419: return vmflags;
420: }
421:
422: static inline int
423: vmf_to_prf(vm_flag_t vmflags)
424: {
425: int prflags;
426:
1.7 yamt 427: if ((vmflags & VM_SLEEP) != 0) {
1.5 yamt 428: prflags = PR_WAITOK;
1.7 yamt 429: } else {
1.5 yamt 430: prflags = PR_NOWAIT;
431: }
432: return prflags;
433: }
434:
435: static size_t
436: qc_poolpage_size(size_t qcache_max)
437: {
438: int i;
439:
440: for (i = 0; ORDER2SIZE(i) <= qcache_max * 3; i++) {
441: /* nothing */
442: }
443: return ORDER2SIZE(i);
444: }
445:
446: static void *
447: qc_poolpage_alloc(struct pool *pool, int prflags)
448: {
449: qcache_t *qc = QC_POOL_TO_QCACHE(pool);
450: vmem_t *vm = qc->qc_vmem;
451:
452: return (void *)vmem_alloc(vm, pool->pr_alloc->pa_pagesz,
453: prf_to_vmf(prflags) | VM_INSTANTFIT);
454: }
455:
456: static void
457: qc_poolpage_free(struct pool *pool, void *addr)
458: {
459: qcache_t *qc = QC_POOL_TO_QCACHE(pool);
460: vmem_t *vm = qc->qc_vmem;
461:
462: vmem_free(vm, (vmem_addr_t)addr, pool->pr_alloc->pa_pagesz);
463: }
464:
465: static void
1.31 ad 466: qc_init(vmem_t *vm, size_t qcache_max, int ipl)
1.5 yamt 467: {
1.22 yamt 468: qcache_t *prevqc;
1.5 yamt 469: struct pool_allocator *pa;
470: int qcache_idx_max;
471: int i;
472:
473: KASSERT((qcache_max & vm->vm_quantum_mask) == 0);
474: if (qcache_max > (VMEM_QCACHE_IDX_MAX << vm->vm_quantum_shift)) {
475: qcache_max = VMEM_QCACHE_IDX_MAX << vm->vm_quantum_shift;
476: }
477: vm->vm_qcache_max = qcache_max;
478: pa = &vm->vm_qcache_allocator;
479: memset(pa, 0, sizeof(*pa));
480: pa->pa_alloc = qc_poolpage_alloc;
481: pa->pa_free = qc_poolpage_free;
482: pa->pa_pagesz = qc_poolpage_size(qcache_max);
483:
484: qcache_idx_max = qcache_max >> vm->vm_quantum_shift;
1.22 yamt 485: prevqc = NULL;
486: for (i = qcache_idx_max; i > 0; i--) {
487: qcache_t *qc = &vm->vm_qcache_store[i - 1];
1.5 yamt 488: size_t size = i << vm->vm_quantum_shift;
489:
490: qc->qc_vmem = vm;
1.8 martin 491: snprintf(qc->qc_name, sizeof(qc->qc_name), "%s-%zu",
1.5 yamt 492: vm->vm_name, size);
1.35 ad 493: qc->qc_cache = pool_cache_init(size,
494: ORDER2SIZE(vm->vm_quantum_shift), 0,
495: PR_NOALIGN | PR_NOTOUCH /* XXX */,
496: qc->qc_name, pa, ipl, NULL, NULL, NULL);
497: KASSERT(qc->qc_cache != NULL); /* XXX */
1.22 yamt 498: if (prevqc != NULL &&
1.35 ad 499: qc->qc_cache->pc_pool.pr_itemsperpage ==
500: prevqc->qc_cache->pc_pool.pr_itemsperpage) {
501: pool_cache_destroy(qc->qc_cache);
1.22 yamt 502: vm->vm_qcache[i - 1] = prevqc;
1.27 ad 503: continue;
1.22 yamt 504: }
1.35 ad 505: qc->qc_cache->pc_pool.pr_qcache = qc;
1.22 yamt 506: vm->vm_qcache[i - 1] = qc;
507: prevqc = qc;
1.5 yamt 508: }
509: }
1.6 yamt 510:
1.23 yamt 511: static void
512: qc_destroy(vmem_t *vm)
513: {
514: const qcache_t *prevqc;
515: int i;
516: int qcache_idx_max;
517:
518: qcache_idx_max = vm->vm_qcache_max >> vm->vm_quantum_shift;
519: prevqc = NULL;
1.24 yamt 520: for (i = 0; i < qcache_idx_max; i++) {
521: qcache_t *qc = vm->vm_qcache[i];
1.23 yamt 522:
523: if (prevqc == qc) {
524: continue;
525: }
1.35 ad 526: pool_cache_destroy(qc->qc_cache);
1.23 yamt 527: prevqc = qc;
528: }
529: }
530:
1.25 thorpej 531: static bool
1.6 yamt 532: qc_reap(vmem_t *vm)
533: {
1.22 yamt 534: const qcache_t *prevqc;
1.6 yamt 535: int i;
536: int qcache_idx_max;
1.26 thorpej 537: bool didsomething = false;
1.6 yamt 538:
539: qcache_idx_max = vm->vm_qcache_max >> vm->vm_quantum_shift;
1.22 yamt 540: prevqc = NULL;
1.24 yamt 541: for (i = 0; i < qcache_idx_max; i++) {
542: qcache_t *qc = vm->vm_qcache[i];
1.6 yamt 543:
1.22 yamt 544: if (prevqc == qc) {
545: continue;
546: }
1.35 ad 547: if (pool_cache_reclaim(qc->qc_cache) != 0) {
1.26 thorpej 548: didsomething = true;
1.6 yamt 549: }
1.22 yamt 550: prevqc = qc;
1.6 yamt 551: }
552:
553: return didsomething;
554: }
1.5 yamt 555: #endif /* defined(QCACHE) */
556:
1.1 yamt 557: #if defined(_KERNEL)
558: static int
559: vmem_init(void)
560: {
561:
1.30 yamt 562: mutex_init(&vmem_list_lock, MUTEX_DEFAULT, IPL_NONE);
1.35 ad 563: pool_cache_bootstrap(&bt_cache, sizeof(bt_t), 0, 0, 0, "vmembt",
564: NULL, IPL_VM, NULL, NULL, NULL);
1.1 yamt 565: return 0;
566: }
567: #endif /* defined(_KERNEL) */
568:
569: static vmem_addr_t
570: vmem_add1(vmem_t *vm, vmem_addr_t addr, vmem_size_t size, vm_flag_t flags,
571: int spanbttype)
572: {
573: bt_t *btspan;
574: bt_t *btfree;
575:
576: KASSERT((flags & (VM_SLEEP|VM_NOSLEEP)) != 0);
577: KASSERT((~flags & (VM_SLEEP|VM_NOSLEEP)) != 0);
578:
579: btspan = bt_alloc(vm, flags);
580: if (btspan == NULL) {
581: return VMEM_ADDR_NULL;
582: }
583: btfree = bt_alloc(vm, flags);
584: if (btfree == NULL) {
585: bt_free(vm, btspan);
586: return VMEM_ADDR_NULL;
587: }
588:
589: btspan->bt_type = spanbttype;
590: btspan->bt_start = addr;
591: btspan->bt_size = size;
592:
593: btfree->bt_type = BT_TYPE_FREE;
594: btfree->bt_start = addr;
595: btfree->bt_size = size;
596:
597: VMEM_LOCK(vm);
598: bt_insseg_tail(vm, btspan);
599: bt_insseg(vm, btfree, btspan);
600: bt_insfree(vm, btfree);
601: VMEM_UNLOCK(vm);
602:
603: return addr;
604: }
605:
1.30 yamt 606: static void
607: vmem_destroy1(vmem_t *vm)
608: {
609:
610: #if defined(QCACHE)
611: qc_destroy(vm);
612: #endif /* defined(QCACHE) */
613: if (vm->vm_hashlist != NULL) {
614: int i;
615:
616: for (i = 0; i < vm->vm_hashsize; i++) {
617: bt_t *bt;
618:
619: while ((bt = LIST_FIRST(&vm->vm_hashlist[i])) != NULL) {
620: KASSERT(bt->bt_type == BT_TYPE_SPAN_STATIC);
621: bt_free(vm, bt);
622: }
623: }
624: xfree(vm->vm_hashlist);
625: }
1.31 ad 626: VMEM_LOCK_DESTROY(vm);
1.30 yamt 627: xfree(vm);
628: }
629:
1.1 yamt 630: static int
631: vmem_import(vmem_t *vm, vmem_size_t size, vm_flag_t flags)
632: {
633: vmem_addr_t addr;
634:
635: if (vm->vm_allocfn == NULL) {
636: return EINVAL;
637: }
638:
639: addr = (*vm->vm_allocfn)(vm->vm_source, size, &size, flags);
640: if (addr == VMEM_ADDR_NULL) {
641: return ENOMEM;
642: }
643:
644: if (vmem_add1(vm, addr, size, flags, BT_TYPE_SPAN) == VMEM_ADDR_NULL) {
645: (*vm->vm_freefn)(vm->vm_source, addr, size);
646: return ENOMEM;
647: }
648:
649: return 0;
650: }
651:
652: static int
653: vmem_rehash(vmem_t *vm, size_t newhashsize, vm_flag_t flags)
654: {
655: bt_t *bt;
656: int i;
657: struct vmem_hashlist *newhashlist;
658: struct vmem_hashlist *oldhashlist;
659: size_t oldhashsize;
660:
661: KASSERT(newhashsize > 0);
662:
663: newhashlist =
664: xmalloc(sizeof(struct vmem_hashlist *) * newhashsize, flags);
665: if (newhashlist == NULL) {
666: return ENOMEM;
667: }
668: for (i = 0; i < newhashsize; i++) {
669: LIST_INIT(&newhashlist[i]);
670: }
671:
1.30 yamt 672: if (!VMEM_TRYLOCK(vm)) {
673: xfree(newhashlist);
674: return EBUSY;
675: }
1.1 yamt 676: oldhashlist = vm->vm_hashlist;
677: oldhashsize = vm->vm_hashsize;
678: vm->vm_hashlist = newhashlist;
679: vm->vm_hashsize = newhashsize;
680: if (oldhashlist == NULL) {
681: VMEM_UNLOCK(vm);
682: return 0;
683: }
684: for (i = 0; i < oldhashsize; i++) {
685: while ((bt = LIST_FIRST(&oldhashlist[i])) != NULL) {
686: bt_rembusy(vm, bt); /* XXX */
687: bt_insbusy(vm, bt);
688: }
689: }
690: VMEM_UNLOCK(vm);
691:
692: xfree(oldhashlist);
693:
694: return 0;
695: }
696:
1.10 yamt 697: /*
698: * vmem_fit: check if a bt can satisfy the given restrictions.
699: */
700:
701: static vmem_addr_t
702: vmem_fit(const bt_t *bt, vmem_size_t size, vmem_size_t align, vmem_size_t phase,
703: vmem_size_t nocross, vmem_addr_t minaddr, vmem_addr_t maxaddr)
704: {
705: vmem_addr_t start;
706: vmem_addr_t end;
707:
708: KASSERT(bt->bt_size >= size);
709:
710: /*
711: * XXX assumption: vmem_addr_t and vmem_size_t are
712: * unsigned integer of the same size.
713: */
714:
715: start = bt->bt_start;
716: if (start < minaddr) {
717: start = minaddr;
718: }
719: end = BT_END(bt);
720: if (end > maxaddr - 1) {
721: end = maxaddr - 1;
722: }
723: if (start >= end) {
724: return VMEM_ADDR_NULL;
725: }
1.19 yamt 726:
727: start = VMEM_ALIGNUP(start - phase, align) + phase;
1.10 yamt 728: if (start < bt->bt_start) {
729: start += align;
730: }
1.19 yamt 731: if (VMEM_CROSS_P(start, start + size - 1, nocross)) {
1.10 yamt 732: KASSERT(align < nocross);
1.19 yamt 733: start = VMEM_ALIGNUP(start - phase, nocross) + phase;
1.10 yamt 734: }
735: if (start < end && end - start >= size) {
736: KASSERT((start & (align - 1)) == phase);
1.19 yamt 737: KASSERT(!VMEM_CROSS_P(start, start + size - 1, nocross));
1.10 yamt 738: KASSERT(minaddr <= start);
739: KASSERT(maxaddr == 0 || start + size <= maxaddr);
740: KASSERT(bt->bt_start <= start);
741: KASSERT(start + size <= BT_END(bt));
742: return start;
743: }
744: return VMEM_ADDR_NULL;
745: }
746:
1.44 ! cegger 747: #if !defined(DEBUG)
! 748: #define vmem_check_sanity(vm) true
! 749: #else
! 750:
! 751: static bool
! 752: vmem_check_sanity(vmem_t *vm)
! 753: {
! 754: const bt_t *bt, *bt2;
! 755:
! 756: KASSERT(vm != NULL);
! 757:
! 758: CIRCLEQ_FOREACH(bt, &vm->vm_seglist, bt_seglist) {
! 759: if (bt->bt_start >= BT_END(bt)) {
! 760: printf("%s: bogus VMEM '%s' span 0x%"PRIx64
! 761: " - 0x%"PRIx64" %s\n",
! 762: __func__, vm->vm_name,
! 763: bt->bt_start, BT_END(bt),
! 764: (bt->bt_type == BT_TYPE_BUSY) ?
! 765: "allocated" : "free");
! 766: return false;
! 767: }
! 768:
! 769: CIRCLEQ_FOREACH(bt2, &vm->vm_seglist, bt_seglist) {
! 770: if (bt2->bt_start >= BT_END(bt2)) {
! 771: printf("%s: bogus VMEM '%s' span 0x%"PRIx64
! 772: " - 0x%"PRIx64" %s\n",
! 773: __func__, vm->vm_name,
! 774: bt2->bt_start, BT_END(bt2),
! 775: (bt2->bt_type == BT_TYPE_BUSY) ?
! 776: "allocated" : "free");
! 777: return false;
! 778: }
! 779: if (bt == bt2)
! 780: continue;
! 781:
! 782: if (bt->bt_start > bt2->bt_start) {
! 783: if (bt->bt_start >= BT_END(bt2))
! 784: continue;
! 785:
! 786: printf("%s: overlapping VMEM '%s' span 0x%"
! 787: PRIx64" - 0x%"PRIx64" %s\n",
! 788: __func__, vm->vm_name,
! 789: bt->bt_start, BT_END(bt),
! 790: (bt->bt_type == BT_TYPE_BUSY) ?
! 791: "allocated" : "free");
! 792: printf("%s: overlapping VMEM '%s' span 0x%"
! 793: PRIx64" - 0x%"PRIx64" %s\n",
! 794: __func__, vm->vm_name,
! 795: bt2->bt_start, BT_END(bt2),
! 796: (bt2->bt_type == BT_TYPE_BUSY) ?
! 797: "allocated" : "free");
! 798: return false;
! 799: }
! 800: if (BT_END(bt) <= bt2->bt_start) {
! 801: if (BT_END(bt) < BT_END(bt2))
! 802: continue;
! 803:
! 804: printf("%s: overlapping VMEM '%s' span 0x%"
! 805: PRIx64" - 0x%"PRIx64" %s\n",
! 806: __func__, vm->vm_name,
! 807: bt->bt_start, BT_END(bt),
! 808: (bt->bt_type == BT_TYPE_BUSY) ?
! 809: "allocated" : "free");
! 810: printf("%s: overlapping VMEM '%s' span 0x%"
! 811: PRIx64" - 0x%"PRIx64" %s\n",
! 812: __func__, vm->vm_name,
! 813: bt2->bt_start, BT_END(bt2),
! 814: (bt2->bt_type == BT_TYPE_BUSY) ?
! 815: "allocated" : "free");
! 816: return false;
! 817: }
! 818: }
! 819: }
! 820:
! 821: return true;
! 822: }
! 823: #endif /* DEBUG */
! 824:
1.1 yamt 825: /* ---- vmem API */
826:
827: /*
828: * vmem_create: create an arena.
829: *
830: * => must not be called from interrupt context.
831: */
832:
833: vmem_t *
834: vmem_create(const char *name, vmem_addr_t base, vmem_size_t size,
835: vmem_size_t quantum,
836: vmem_addr_t (*allocfn)(vmem_t *, vmem_size_t, vmem_size_t *, vm_flag_t),
837: void (*freefn)(vmem_t *, vmem_addr_t, vmem_size_t),
1.31 ad 838: vmem_t *source, vmem_size_t qcache_max, vm_flag_t flags,
839: int ipl)
1.1 yamt 840: {
841: vmem_t *vm;
842: int i;
843: #if defined(_KERNEL)
844: static ONCE_DECL(control);
845: #endif /* defined(_KERNEL) */
846:
847: KASSERT((flags & (VM_SLEEP|VM_NOSLEEP)) != 0);
848: KASSERT((~flags & (VM_SLEEP|VM_NOSLEEP)) != 0);
849:
850: #if defined(_KERNEL)
851: if (RUN_ONCE(&control, vmem_init)) {
852: return NULL;
853: }
854: #endif /* defined(_KERNEL) */
855: vm = xmalloc(sizeof(*vm), flags);
856: if (vm == NULL) {
857: return NULL;
858: }
859:
1.31 ad 860: VMEM_LOCK_INIT(vm, ipl);
1.1 yamt 861: vm->vm_name = name;
862: vm->vm_quantum_mask = quantum - 1;
863: vm->vm_quantum_shift = calc_order(quantum);
1.4 yamt 864: KASSERT(ORDER2SIZE(vm->vm_quantum_shift) == quantum);
1.1 yamt 865: vm->vm_allocfn = allocfn;
866: vm->vm_freefn = freefn;
867: vm->vm_source = source;
868: vm->vm_nbusytag = 0;
1.5 yamt 869: #if defined(QCACHE)
1.31 ad 870: qc_init(vm, qcache_max, ipl);
1.5 yamt 871: #endif /* defined(QCACHE) */
1.1 yamt 872:
873: CIRCLEQ_INIT(&vm->vm_seglist);
874: for (i = 0; i < VMEM_MAXORDER; i++) {
875: LIST_INIT(&vm->vm_freelist[i]);
876: }
877: vm->vm_hashlist = NULL;
878: if (vmem_rehash(vm, VMEM_HASHSIZE_INIT, flags)) {
1.30 yamt 879: vmem_destroy1(vm);
1.1 yamt 880: return NULL;
881: }
882:
883: if (size != 0) {
884: if (vmem_add(vm, base, size, flags) == 0) {
1.30 yamt 885: vmem_destroy1(vm);
1.1 yamt 886: return NULL;
887: }
888: }
889:
1.30 yamt 890: #if defined(_KERNEL)
891: mutex_enter(&vmem_list_lock);
892: LIST_INSERT_HEAD(&vmem_list, vm, vm_alllist);
893: mutex_exit(&vmem_list_lock);
894: #endif /* defined(_KERNEL) */
895:
1.1 yamt 896: return vm;
897: }
898:
899: void
900: vmem_destroy(vmem_t *vm)
901: {
902:
1.30 yamt 903: #if defined(_KERNEL)
904: mutex_enter(&vmem_list_lock);
905: LIST_REMOVE(vm, vm_alllist);
906: mutex_exit(&vmem_list_lock);
907: #endif /* defined(_KERNEL) */
1.1 yamt 908:
1.30 yamt 909: vmem_destroy1(vm);
1.1 yamt 910: }
911:
912: vmem_size_t
913: vmem_roundup_size(vmem_t *vm, vmem_size_t size)
914: {
915:
916: return (size + vm->vm_quantum_mask) & ~vm->vm_quantum_mask;
917: }
918:
919: /*
920: * vmem_alloc:
921: *
922: * => caller must ensure appropriate spl,
923: * if the arena can be accessed from interrupt context.
924: */
925:
926: vmem_addr_t
1.38 yamt 927: vmem_alloc(vmem_t *vm, vmem_size_t size, vm_flag_t flags)
1.1 yamt 928: {
1.12 yamt 929: const vm_flag_t strat __unused = flags & VM_FITMASK;
1.1 yamt 930:
931: KASSERT((flags & (VM_SLEEP|VM_NOSLEEP)) != 0);
932: KASSERT((~flags & (VM_SLEEP|VM_NOSLEEP)) != 0);
933:
934: KASSERT(size > 0);
935: KASSERT(strat == VM_BESTFIT || strat == VM_INSTANTFIT);
1.3 yamt 936: if ((flags & VM_SLEEP) != 0) {
1.42 yamt 937: ASSERT_SLEEPABLE();
1.3 yamt 938: }
1.1 yamt 939:
1.5 yamt 940: #if defined(QCACHE)
941: if (size <= vm->vm_qcache_max) {
1.38 yamt 942: int qidx = (size + vm->vm_quantum_mask) >> vm->vm_quantum_shift;
1.22 yamt 943: qcache_t *qc = vm->vm_qcache[qidx - 1];
1.5 yamt 944:
1.35 ad 945: return (vmem_addr_t)pool_cache_get(qc->qc_cache,
1.5 yamt 946: vmf_to_prf(flags));
947: }
948: #endif /* defined(QCACHE) */
949:
1.38 yamt 950: return vmem_xalloc(vm, size, 0, 0, 0, 0, 0, flags);
1.10 yamt 951: }
952:
953: vmem_addr_t
954: vmem_xalloc(vmem_t *vm, vmem_size_t size0, vmem_size_t align, vmem_size_t phase,
955: vmem_size_t nocross, vmem_addr_t minaddr, vmem_addr_t maxaddr,
956: vm_flag_t flags)
957: {
958: struct vmem_freelist *list;
959: struct vmem_freelist *first;
960: struct vmem_freelist *end;
961: bt_t *bt;
962: bt_t *btnew;
963: bt_t *btnew2;
964: const vmem_size_t size = vmem_roundup_size(vm, size0);
965: vm_flag_t strat = flags & VM_FITMASK;
966: vmem_addr_t start;
967:
968: KASSERT(size0 > 0);
969: KASSERT(size > 0);
970: KASSERT(strat == VM_BESTFIT || strat == VM_INSTANTFIT);
971: if ((flags & VM_SLEEP) != 0) {
1.42 yamt 972: ASSERT_SLEEPABLE();
1.10 yamt 973: }
974: KASSERT((align & vm->vm_quantum_mask) == 0);
975: KASSERT((align & (align - 1)) == 0);
976: KASSERT((phase & vm->vm_quantum_mask) == 0);
977: KASSERT((nocross & vm->vm_quantum_mask) == 0);
978: KASSERT((nocross & (nocross - 1)) == 0);
979: KASSERT((align == 0 && phase == 0) || phase < align);
980: KASSERT(nocross == 0 || nocross >= size);
981: KASSERT(maxaddr == 0 || minaddr < maxaddr);
1.19 yamt 982: KASSERT(!VMEM_CROSS_P(phase, phase + size - 1, nocross));
1.44 ! cegger 983: KASSERT(vmem_check_sanity(vm));
1.10 yamt 984:
985: if (align == 0) {
986: align = vm->vm_quantum_mask + 1;
987: }
1.1 yamt 988: btnew = bt_alloc(vm, flags);
989: if (btnew == NULL) {
990: return VMEM_ADDR_NULL;
991: }
1.10 yamt 992: btnew2 = bt_alloc(vm, flags); /* XXX not necessary if no restrictions */
993: if (btnew2 == NULL) {
994: bt_free(vm, btnew);
995: return VMEM_ADDR_NULL;
996: }
1.1 yamt 997:
998: retry_strat:
999: first = bt_freehead_toalloc(vm, size, strat);
1000: end = &vm->vm_freelist[VMEM_MAXORDER];
1001: retry:
1002: bt = NULL;
1003: VMEM_LOCK(vm);
1.2 yamt 1004: if (strat == VM_INSTANTFIT) {
1005: for (list = first; list < end; list++) {
1006: bt = LIST_FIRST(list);
1007: if (bt != NULL) {
1.10 yamt 1008: start = vmem_fit(bt, size, align, phase,
1009: nocross, minaddr, maxaddr);
1010: if (start != VMEM_ADDR_NULL) {
1011: goto gotit;
1012: }
1.2 yamt 1013: }
1014: }
1015: } else { /* VM_BESTFIT */
1016: for (list = first; list < end; list++) {
1017: LIST_FOREACH(bt, list, bt_freelist) {
1018: if (bt->bt_size >= size) {
1.10 yamt 1019: start = vmem_fit(bt, size, align, phase,
1020: nocross, minaddr, maxaddr);
1021: if (start != VMEM_ADDR_NULL) {
1022: goto gotit;
1023: }
1.2 yamt 1024: }
1.1 yamt 1025: }
1026: }
1027: }
1.2 yamt 1028: VMEM_UNLOCK(vm);
1.1 yamt 1029: #if 1
1.2 yamt 1030: if (strat == VM_INSTANTFIT) {
1031: strat = VM_BESTFIT;
1032: goto retry_strat;
1033: }
1.1 yamt 1034: #endif
1.10 yamt 1035: if (align != vm->vm_quantum_mask + 1 || phase != 0 ||
1036: nocross != 0 || minaddr != 0 || maxaddr != 0) {
1037:
1038: /*
1039: * XXX should try to import a region large enough to
1040: * satisfy restrictions?
1041: */
1042:
1.20 yamt 1043: goto fail;
1.10 yamt 1044: }
1.2 yamt 1045: if (vmem_import(vm, size, flags) == 0) {
1046: goto retry;
1.1 yamt 1047: }
1.2 yamt 1048: /* XXX */
1.20 yamt 1049: fail:
1050: bt_free(vm, btnew);
1051: bt_free(vm, btnew2);
1.2 yamt 1052: return VMEM_ADDR_NULL;
1053:
1054: gotit:
1.1 yamt 1055: KASSERT(bt->bt_type == BT_TYPE_FREE);
1056: KASSERT(bt->bt_size >= size);
1057: bt_remfree(vm, bt);
1.44 ! cegger 1058: KASSERT(vmem_check_sanity(vm));
1.10 yamt 1059: if (bt->bt_start != start) {
1060: btnew2->bt_type = BT_TYPE_FREE;
1061: btnew2->bt_start = bt->bt_start;
1062: btnew2->bt_size = start - bt->bt_start;
1063: bt->bt_start = start;
1064: bt->bt_size -= btnew2->bt_size;
1065: bt_insfree(vm, btnew2);
1066: bt_insseg(vm, btnew2, CIRCLEQ_PREV(bt, bt_seglist));
1067: btnew2 = NULL;
1.44 ! cegger 1068: KASSERT(vmem_check_sanity(vm));
1.10 yamt 1069: }
1070: KASSERT(bt->bt_start == start);
1.1 yamt 1071: if (bt->bt_size != size && bt->bt_size - size > vm->vm_quantum_mask) {
1072: /* split */
1073: btnew->bt_type = BT_TYPE_BUSY;
1074: btnew->bt_start = bt->bt_start;
1075: btnew->bt_size = size;
1076: bt->bt_start = bt->bt_start + size;
1077: bt->bt_size -= size;
1078: bt_insfree(vm, bt);
1079: bt_insseg(vm, btnew, CIRCLEQ_PREV(bt, bt_seglist));
1080: bt_insbusy(vm, btnew);
1081: VMEM_UNLOCK(vm);
1.44 ! cegger 1082: KASSERT(vmem_check_sanity(vm));
1.1 yamt 1083: } else {
1084: bt->bt_type = BT_TYPE_BUSY;
1085: bt_insbusy(vm, bt);
1086: VMEM_UNLOCK(vm);
1087: bt_free(vm, btnew);
1088: btnew = bt;
1.44 ! cegger 1089: KASSERT(vmem_check_sanity(vm));
1.1 yamt 1090: }
1.10 yamt 1091: if (btnew2 != NULL) {
1092: bt_free(vm, btnew2);
1093: }
1.1 yamt 1094: KASSERT(btnew->bt_size >= size);
1095: btnew->bt_type = BT_TYPE_BUSY;
1096:
1.44 ! cegger 1097: KASSERT(vmem_check_sanity(vm));
1.1 yamt 1098: return btnew->bt_start;
1099: }
1100:
1101: /*
1102: * vmem_free:
1103: *
1104: * => caller must ensure appropriate spl,
1105: * if the arena can be accessed from interrupt context.
1106: */
1107:
1108: void
1109: vmem_free(vmem_t *vm, vmem_addr_t addr, vmem_size_t size)
1110: {
1111:
1112: KASSERT(addr != VMEM_ADDR_NULL);
1113: KASSERT(size > 0);
1114:
1.5 yamt 1115: #if defined(QCACHE)
1116: if (size <= vm->vm_qcache_max) {
1117: int qidx = (size + vm->vm_quantum_mask) >> vm->vm_quantum_shift;
1.22 yamt 1118: qcache_t *qc = vm->vm_qcache[qidx - 1];
1.5 yamt 1119:
1.35 ad 1120: return pool_cache_put(qc->qc_cache, (void *)addr);
1.5 yamt 1121: }
1122: #endif /* defined(QCACHE) */
1123:
1.10 yamt 1124: vmem_xfree(vm, addr, size);
1125: }
1126:
1127: void
1.17 yamt 1128: vmem_xfree(vmem_t *vm, vmem_addr_t addr, vmem_size_t size)
1.10 yamt 1129: {
1130: bt_t *bt;
1131: bt_t *t;
1132:
1133: KASSERT(addr != VMEM_ADDR_NULL);
1134: KASSERT(size > 0);
1135:
1.1 yamt 1136: VMEM_LOCK(vm);
1137:
1138: bt = bt_lookupbusy(vm, addr);
1139: KASSERT(bt != NULL);
1140: KASSERT(bt->bt_start == addr);
1141: KASSERT(bt->bt_size == vmem_roundup_size(vm, size) ||
1142: bt->bt_size - vmem_roundup_size(vm, size) <= vm->vm_quantum_mask);
1143: KASSERT(bt->bt_type == BT_TYPE_BUSY);
1144: bt_rembusy(vm, bt);
1145: bt->bt_type = BT_TYPE_FREE;
1146:
1147: /* coalesce */
1148: t = CIRCLEQ_NEXT(bt, bt_seglist);
1149: if (t != NULL && t->bt_type == BT_TYPE_FREE) {
1150: KASSERT(BT_END(bt) == t->bt_start);
1151: bt_remfree(vm, t);
1152: bt_remseg(vm, t);
1153: bt->bt_size += t->bt_size;
1154: bt_free(vm, t);
1155: }
1156: t = CIRCLEQ_PREV(bt, bt_seglist);
1157: if (t != NULL && t->bt_type == BT_TYPE_FREE) {
1158: KASSERT(BT_END(t) == bt->bt_start);
1159: bt_remfree(vm, t);
1160: bt_remseg(vm, t);
1161: bt->bt_size += t->bt_size;
1162: bt->bt_start = t->bt_start;
1163: bt_free(vm, t);
1164: }
1165:
1166: t = CIRCLEQ_PREV(bt, bt_seglist);
1167: KASSERT(t != NULL);
1168: KASSERT(BT_ISSPAN_P(t) || t->bt_type == BT_TYPE_BUSY);
1169: if (vm->vm_freefn != NULL && t->bt_type == BT_TYPE_SPAN &&
1170: t->bt_size == bt->bt_size) {
1171: vmem_addr_t spanaddr;
1172: vmem_size_t spansize;
1173:
1174: KASSERT(t->bt_start == bt->bt_start);
1175: spanaddr = bt->bt_start;
1176: spansize = bt->bt_size;
1177: bt_remseg(vm, bt);
1178: bt_free(vm, bt);
1179: bt_remseg(vm, t);
1180: bt_free(vm, t);
1181: VMEM_UNLOCK(vm);
1182: (*vm->vm_freefn)(vm->vm_source, spanaddr, spansize);
1183: } else {
1184: bt_insfree(vm, bt);
1185: VMEM_UNLOCK(vm);
1186: }
1187: }
1188:
1189: /*
1190: * vmem_add:
1191: *
1192: * => caller must ensure appropriate spl,
1193: * if the arena can be accessed from interrupt context.
1194: */
1195:
1196: vmem_addr_t
1197: vmem_add(vmem_t *vm, vmem_addr_t addr, vmem_size_t size, vm_flag_t flags)
1198: {
1199:
1200: return vmem_add1(vm, addr, size, flags, BT_TYPE_SPAN_STATIC);
1201: }
1202:
1.6 yamt 1203: /*
1204: * vmem_reap: reap unused resources.
1205: *
1.26 thorpej 1206: * => return true if we successfully reaped something.
1.6 yamt 1207: */
1208:
1.25 thorpej 1209: bool
1.6 yamt 1210: vmem_reap(vmem_t *vm)
1211: {
1.26 thorpej 1212: bool didsomething = false;
1.6 yamt 1213:
1214: #if defined(QCACHE)
1215: didsomething = qc_reap(vm);
1216: #endif /* defined(QCACHE) */
1217: return didsomething;
1218: }
1219:
1.30 yamt 1220: /* ---- rehash */
1221:
1222: #if defined(_KERNEL)
1223: static struct callout vmem_rehash_ch;
1224: static int vmem_rehash_interval;
1225: static struct workqueue *vmem_rehash_wq;
1226: static struct work vmem_rehash_wk;
1227:
1228: static void
1229: vmem_rehash_all(struct work *wk, void *dummy)
1230: {
1231: vmem_t *vm;
1232:
1233: KASSERT(wk == &vmem_rehash_wk);
1234: mutex_enter(&vmem_list_lock);
1235: LIST_FOREACH(vm, &vmem_list, vm_alllist) {
1236: size_t desired;
1237: size_t current;
1238:
1239: if (!VMEM_TRYLOCK(vm)) {
1240: continue;
1241: }
1242: desired = vm->vm_nbusytag;
1243: current = vm->vm_hashsize;
1244: VMEM_UNLOCK(vm);
1245:
1246: if (desired > VMEM_HASHSIZE_MAX) {
1247: desired = VMEM_HASHSIZE_MAX;
1248: } else if (desired < VMEM_HASHSIZE_MIN) {
1249: desired = VMEM_HASHSIZE_MIN;
1250: }
1251: if (desired > current * 2 || desired * 2 < current) {
1252: vmem_rehash(vm, desired, VM_NOSLEEP);
1253: }
1254: }
1255: mutex_exit(&vmem_list_lock);
1256:
1257: callout_schedule(&vmem_rehash_ch, vmem_rehash_interval);
1258: }
1259:
1260: static void
1261: vmem_rehash_all_kick(void *dummy)
1262: {
1263:
1.32 rmind 1264: workqueue_enqueue(vmem_rehash_wq, &vmem_rehash_wk, NULL);
1.30 yamt 1265: }
1266:
1267: void
1268: vmem_rehash_start(void)
1269: {
1270: int error;
1271:
1272: error = workqueue_create(&vmem_rehash_wq, "vmem_rehash",
1.41 ad 1273: vmem_rehash_all, NULL, PRI_VM, IPL_SOFTCLOCK, WQ_MPSAFE);
1.30 yamt 1274: if (error) {
1275: panic("%s: workqueue_create %d\n", __func__, error);
1276: }
1.41 ad 1277: callout_init(&vmem_rehash_ch, CALLOUT_MPSAFE);
1.30 yamt 1278: callout_setfunc(&vmem_rehash_ch, vmem_rehash_all_kick, NULL);
1279:
1280: vmem_rehash_interval = hz * 10;
1281: callout_schedule(&vmem_rehash_ch, vmem_rehash_interval);
1282: }
1283: #endif /* defined(_KERNEL) */
1284:
1.1 yamt 1285: /* ---- debug */
1286:
1.37 yamt 1287: #if defined(DDB)
1288: static bt_t *
1289: vmem_whatis_lookup(vmem_t *vm, uintptr_t addr)
1290: {
1.39 yamt 1291: bt_t *bt;
1.37 yamt 1292:
1.39 yamt 1293: CIRCLEQ_FOREACH(bt, &vm->vm_seglist, bt_seglist) {
1294: if (BT_ISSPAN_P(bt)) {
1295: continue;
1296: }
1297: if (bt->bt_start <= addr && addr < BT_END(bt)) {
1298: return bt;
1.37 yamt 1299: }
1300: }
1301:
1302: return NULL;
1303: }
1304:
1305: void
1306: vmem_whatis(uintptr_t addr, void (*pr)(const char *, ...))
1307: {
1308: vmem_t *vm;
1309:
1310: LIST_FOREACH(vm, &vmem_list, vm_alllist) {
1311: bt_t *bt;
1312:
1313: bt = vmem_whatis_lookup(vm, addr);
1314: if (bt == NULL) {
1315: continue;
1316: }
1.39 yamt 1317: (*pr)("%p is %p+%zu in VMEM '%s' (%s)\n",
1.37 yamt 1318: (void *)addr, (void *)bt->bt_start,
1.39 yamt 1319: (size_t)(addr - bt->bt_start), vm->vm_name,
1320: (bt->bt_type == BT_TYPE_BUSY) ? "allocated" : "free");
1.37 yamt 1321: }
1322: }
1.43 cegger 1323:
1324: static void
1325: vmem_showall(void (*pr)(const char *, ...))
1326: {
1327: vmem_t *vm;
1328:
1329: LIST_FOREACH(vm, &vmem_list, vm_alllist) {
1330: (*pr)("VMEM '%s' at %p\n", vm->vm_name, vm);
1331: if (vm->vm_source)
1332: (*pr)(" VMEM backend '%s' at %p\n",
1333: vm->vm_source->vm_name, vm->vm_source);
1334: }
1335: }
1336:
1337: static void
1338: vmem_show(uintptr_t addr, void (*pr)(const char *, ...))
1339: {
1340: vmem_t *vm;
1341: bt_t *bt = NULL;
1342:
1343: LIST_FOREACH(vm, &vmem_list, vm_alllist) {
1344: if ((uintptr_t)vm == addr)
1345: goto found;
1346:
1347: bt = vmem_whatis_lookup(vm, addr);
1348: if (bt != NULL)
1349: goto found;
1350: }
1351:
1352: if (bt == NULL)
1353: return;
1354: found:
1355:
1356: (*pr)("VMEM '%s' spans\n", vm->vm_name);
1357: CIRCLEQ_FOREACH(bt, &vm->vm_seglist, bt_seglist) {
1358: (*pr)(" 0x%"PRIx64" - 0x%"PRIx64" %s %s\n",
1359: bt->bt_start, BT_END(bt),
1360: (bt->bt_type == BT_TYPE_SPAN_STATIC) ? "static" : "",
1361: (bt->bt_type == BT_TYPE_BUSY) ? "allocated" : "free");
1362: }
1363: }
1364:
1365: void
1366: vmem_print(uintptr_t addr, const char *modif, void (*pr)(const char *, ...))
1367: {
1368: if (modif[0] == 'a') {
1369: vmem_showall(pr);
1370: return;
1371: }
1372:
1373: vmem_show(addr, pr);
1374: }
1.37 yamt 1375: #endif /* defined(DDB) */
1376:
1.1 yamt 1377: #if defined(VMEM_DEBUG)
1378:
1379: #if !defined(_KERNEL)
1380: #include <stdio.h>
1381: #endif /* !defined(_KERNEL) */
1382:
1383: void bt_dump(const bt_t *);
1384:
1385: void
1386: bt_dump(const bt_t *bt)
1387: {
1388:
1389: printf("\t%p: %" PRIu64 ", %" PRIu64 ", %d\n",
1390: bt, (uint64_t)bt->bt_start, (uint64_t)bt->bt_size,
1391: bt->bt_type);
1392: }
1393:
1394: void
1395: vmem_dump(const vmem_t *vm)
1396: {
1397: const bt_t *bt;
1398: int i;
1399:
1400: printf("vmem %p '%s'\n", vm, vm->vm_name);
1401: CIRCLEQ_FOREACH(bt, &vm->vm_seglist, bt_seglist) {
1402: bt_dump(bt);
1403: }
1404:
1405: for (i = 0; i < VMEM_MAXORDER; i++) {
1406: const struct vmem_freelist *fl = &vm->vm_freelist[i];
1407:
1408: if (LIST_EMPTY(fl)) {
1409: continue;
1410: }
1411:
1412: printf("freelist[%d]\n", i);
1413: LIST_FOREACH(bt, fl, bt_freelist) {
1414: bt_dump(bt);
1415: if (bt->bt_size) {
1416: }
1417: }
1418: }
1419: }
1420:
1421: #if !defined(_KERNEL)
1422:
1423: int
1424: main()
1425: {
1426: vmem_t *vm;
1427: vmem_addr_t p;
1428: struct reg {
1429: vmem_addr_t p;
1430: vmem_size_t sz;
1.25 thorpej 1431: bool x;
1.1 yamt 1432: } *reg = NULL;
1433: int nreg = 0;
1434: int nalloc = 0;
1435: int nfree = 0;
1436: vmem_size_t total = 0;
1437: #if 1
1438: vm_flag_t strat = VM_INSTANTFIT;
1439: #else
1440: vm_flag_t strat = VM_BESTFIT;
1441: #endif
1442:
1443: vm = vmem_create("test", VMEM_ADDR_NULL, 0, 1,
1.30 yamt 1444: NULL, NULL, NULL, 0, VM_SLEEP);
1.1 yamt 1445: if (vm == NULL) {
1446: printf("vmem_create\n");
1447: exit(EXIT_FAILURE);
1448: }
1449: vmem_dump(vm);
1450:
1451: p = vmem_add(vm, 100, 200, VM_SLEEP);
1452: p = vmem_add(vm, 2000, 1, VM_SLEEP);
1453: p = vmem_add(vm, 40000, 0x10000000>>12, VM_SLEEP);
1454: p = vmem_add(vm, 10000, 10000, VM_SLEEP);
1455: p = vmem_add(vm, 500, 1000, VM_SLEEP);
1456: vmem_dump(vm);
1457: for (;;) {
1458: struct reg *r;
1.10 yamt 1459: int t = rand() % 100;
1.1 yamt 1460:
1.10 yamt 1461: if (t > 45) {
1462: /* alloc */
1.1 yamt 1463: vmem_size_t sz = rand() % 500 + 1;
1.25 thorpej 1464: bool x;
1.10 yamt 1465: vmem_size_t align, phase, nocross;
1466: vmem_addr_t minaddr, maxaddr;
1467:
1468: if (t > 70) {
1.26 thorpej 1469: x = true;
1.10 yamt 1470: /* XXX */
1471: align = 1 << (rand() % 15);
1472: phase = rand() % 65536;
1473: nocross = 1 << (rand() % 15);
1474: if (align <= phase) {
1475: phase = 0;
1476: }
1.19 yamt 1477: if (VMEM_CROSS_P(phase, phase + sz - 1,
1478: nocross)) {
1.10 yamt 1479: nocross = 0;
1480: }
1481: minaddr = rand() % 50000;
1482: maxaddr = rand() % 70000;
1483: if (minaddr > maxaddr) {
1484: minaddr = 0;
1485: maxaddr = 0;
1486: }
1487: printf("=== xalloc %" PRIu64
1488: " align=%" PRIu64 ", phase=%" PRIu64
1489: ", nocross=%" PRIu64 ", min=%" PRIu64
1490: ", max=%" PRIu64 "\n",
1491: (uint64_t)sz,
1492: (uint64_t)align,
1493: (uint64_t)phase,
1494: (uint64_t)nocross,
1495: (uint64_t)minaddr,
1496: (uint64_t)maxaddr);
1497: p = vmem_xalloc(vm, sz, align, phase, nocross,
1498: minaddr, maxaddr, strat|VM_SLEEP);
1499: } else {
1.26 thorpej 1500: x = false;
1.10 yamt 1501: printf("=== alloc %" PRIu64 "\n", (uint64_t)sz);
1502: p = vmem_alloc(vm, sz, strat|VM_SLEEP);
1503: }
1.1 yamt 1504: printf("-> %" PRIu64 "\n", (uint64_t)p);
1505: vmem_dump(vm);
1506: if (p == VMEM_ADDR_NULL) {
1.10 yamt 1507: if (x) {
1508: continue;
1509: }
1.1 yamt 1510: break;
1511: }
1512: nreg++;
1513: reg = realloc(reg, sizeof(*reg) * nreg);
1514: r = ®[nreg - 1];
1515: r->p = p;
1516: r->sz = sz;
1.10 yamt 1517: r->x = x;
1.1 yamt 1518: total += sz;
1519: nalloc++;
1520: } else if (nreg != 0) {
1.10 yamt 1521: /* free */
1.1 yamt 1522: r = ®[rand() % nreg];
1523: printf("=== free %" PRIu64 ", %" PRIu64 "\n",
1524: (uint64_t)r->p, (uint64_t)r->sz);
1.10 yamt 1525: if (r->x) {
1526: vmem_xfree(vm, r->p, r->sz);
1527: } else {
1528: vmem_free(vm, r->p, r->sz);
1529: }
1.1 yamt 1530: total -= r->sz;
1531: vmem_dump(vm);
1532: *r = reg[nreg - 1];
1533: nreg--;
1534: nfree++;
1535: }
1536: printf("total=%" PRIu64 "\n", (uint64_t)total);
1537: }
1538: fprintf(stderr, "total=%" PRIu64 ", nalloc=%d, nfree=%d\n",
1539: (uint64_t)total, nalloc, nfree);
1540: exit(EXIT_SUCCESS);
1541: }
1542: #endif /* !defined(_KERNEL) */
1543: #endif /* defined(VMEM_DEBUG) */
CVSweb <webmaster@jp.NetBSD.org>