Annotation of src/sys/kern/subr_vmem.c, Revision 1.36.4.3
1.36.4.1 bouyer 1: /* $NetBSD$ */
1.1 yamt 2:
3: /*-
4: * Copyright (c)2006 YAMAMOTO Takashi,
5: * All rights reserved.
6: *
7: * Redistribution and use in source and binary forms, with or without
8: * modification, are permitted provided that the following conditions
9: * are met:
10: * 1. Redistributions of source code must retain the above copyright
11: * notice, this list of conditions and the following disclaimer.
12: * 2. Redistributions in binary form must reproduce the above copyright
13: * notice, this list of conditions and the following disclaimer in the
14: * documentation and/or other materials provided with the distribution.
15: *
16: * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17: * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18: * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19: * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20: * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21: * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22: * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23: * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24: * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25: * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26: * SUCH DAMAGE.
27: */
28:
29: /*
30: * reference:
31: * - Magazines and Vmem: Extending the Slab Allocator
32: * to Many CPUs and Arbitrary Resources
33: * http://www.usenix.org/event/usenix01/bonwick.html
1.18 yamt 34: *
35: * todo:
36: * - decide how to import segments for vmem_xalloc.
37: * - don't rely on malloc(9).
1.1 yamt 38: */
39:
40: #include <sys/cdefs.h>
1.36.4.1 bouyer 41: __KERNEL_RCSID(0, "$NetBSD$");
1.1 yamt 42:
43: #define VMEM_DEBUG
1.5 yamt 44: #if defined(_KERNEL)
1.36.4.1 bouyer 45: #include "opt_ddb.h"
1.5 yamt 46: #define QCACHE
47: #endif /* defined(_KERNEL) */
1.1 yamt 48:
49: #include <sys/param.h>
50: #include <sys/hash.h>
51: #include <sys/queue.h>
52:
53: #if defined(_KERNEL)
54: #include <sys/systm.h>
1.30 yamt 55: #include <sys/kernel.h> /* hz */
56: #include <sys/callout.h>
1.1 yamt 57: #include <sys/malloc.h>
58: #include <sys/once.h>
59: #include <sys/pool.h>
1.3 yamt 60: #include <sys/proc.h>
1.1 yamt 61: #include <sys/vmem.h>
1.30 yamt 62: #include <sys/workqueue.h>
1.1 yamt 63: #else /* defined(_KERNEL) */
64: #include "../sys/vmem.h"
65: #endif /* defined(_KERNEL) */
66:
67: #if defined(_KERNEL)
1.31 ad 68: #define LOCK_DECL(name) kmutex_t name
1.1 yamt 69: #else /* defined(_KERNEL) */
70: #include <errno.h>
71: #include <assert.h>
72: #include <stdlib.h>
73:
74: #define KASSERT(a) assert(a)
1.31 ad 75: #define LOCK_DECL(name) /* nothing */
76: #define mutex_init(a, b, c) /* nothing */
77: #define mutex_destroy(a) /* nothing */
78: #define mutex_enter(a) /* nothing */
79: #define mutex_exit(a) /* nothing */
80: #define mutex_owned(a) /* nothing */
1.3 yamt 81: #define ASSERT_SLEEPABLE(lk, msg) /* nothing */
1.31 ad 82: #define IPL_VM 0
1.1 yamt 83: #endif /* defined(_KERNEL) */
84:
85: struct vmem;
86: struct vmem_btag;
87:
88: #if defined(VMEM_DEBUG)
89: void vmem_dump(const vmem_t *);
90: #endif /* defined(VMEM_DEBUG) */
91:
1.4 yamt 92: #define VMEM_MAXORDER (sizeof(vmem_size_t) * CHAR_BIT)
1.30 yamt 93:
94: #define VMEM_HASHSIZE_MIN 1 /* XXX */
95: #define VMEM_HASHSIZE_MAX 8192 /* XXX */
96: #define VMEM_HASHSIZE_INIT VMEM_HASHSIZE_MIN
1.1 yamt 97:
98: #define VM_FITMASK (VM_BESTFIT | VM_INSTANTFIT)
99:
100: CIRCLEQ_HEAD(vmem_seglist, vmem_btag);
101: LIST_HEAD(vmem_freelist, vmem_btag);
102: LIST_HEAD(vmem_hashlist, vmem_btag);
103:
1.5 yamt 104: #if defined(QCACHE)
105: #define VMEM_QCACHE_IDX_MAX 32
106:
107: #define QC_NAME_MAX 16
108:
109: struct qcache {
1.35 ad 110: pool_cache_t qc_cache;
1.5 yamt 111: vmem_t *qc_vmem;
112: char qc_name[QC_NAME_MAX];
113: };
114: typedef struct qcache qcache_t;
1.35 ad 115: #define QC_POOL_TO_QCACHE(pool) ((qcache_t *)(pool->pr_qcache))
1.5 yamt 116: #endif /* defined(QCACHE) */
117:
1.1 yamt 118: /* vmem arena */
119: struct vmem {
1.31 ad 120: LOCK_DECL(vm_lock);
1.1 yamt 121: vmem_addr_t (*vm_allocfn)(vmem_t *, vmem_size_t, vmem_size_t *,
122: vm_flag_t);
123: void (*vm_freefn)(vmem_t *, vmem_addr_t, vmem_size_t);
124: vmem_t *vm_source;
125: struct vmem_seglist vm_seglist;
126: struct vmem_freelist vm_freelist[VMEM_MAXORDER];
127: size_t vm_hashsize;
128: size_t vm_nbusytag;
129: struct vmem_hashlist *vm_hashlist;
130: size_t vm_quantum_mask;
131: int vm_quantum_shift;
132: const char *vm_name;
1.30 yamt 133: LIST_ENTRY(vmem) vm_alllist;
1.5 yamt 134:
135: #if defined(QCACHE)
136: /* quantum cache */
137: size_t vm_qcache_max;
138: struct pool_allocator vm_qcache_allocator;
1.22 yamt 139: qcache_t vm_qcache_store[VMEM_QCACHE_IDX_MAX];
140: qcache_t *vm_qcache[VMEM_QCACHE_IDX_MAX];
1.5 yamt 141: #endif /* defined(QCACHE) */
1.1 yamt 142: };
143:
1.31 ad 144: #define VMEM_LOCK(vm) mutex_enter(&vm->vm_lock)
145: #define VMEM_TRYLOCK(vm) mutex_tryenter(&vm->vm_lock)
146: #define VMEM_UNLOCK(vm) mutex_exit(&vm->vm_lock)
1.36 ad 147: #define VMEM_LOCK_INIT(vm, ipl) mutex_init(&vm->vm_lock, MUTEX_DEFAULT, ipl)
1.31 ad 148: #define VMEM_LOCK_DESTROY(vm) mutex_destroy(&vm->vm_lock)
149: #define VMEM_ASSERT_LOCKED(vm) KASSERT(mutex_owned(&vm->vm_lock))
1.1 yamt 150:
151: /* boundary tag */
152: struct vmem_btag {
153: CIRCLEQ_ENTRY(vmem_btag) bt_seglist;
154: union {
155: LIST_ENTRY(vmem_btag) u_freelist; /* BT_TYPE_FREE */
156: LIST_ENTRY(vmem_btag) u_hashlist; /* BT_TYPE_BUSY */
157: } bt_u;
158: #define bt_hashlist bt_u.u_hashlist
159: #define bt_freelist bt_u.u_freelist
160: vmem_addr_t bt_start;
161: vmem_size_t bt_size;
162: int bt_type;
163: };
164:
165: #define BT_TYPE_SPAN 1
166: #define BT_TYPE_SPAN_STATIC 2
167: #define BT_TYPE_FREE 3
168: #define BT_TYPE_BUSY 4
169: #define BT_ISSPAN_P(bt) ((bt)->bt_type <= BT_TYPE_SPAN_STATIC)
170:
171: #define BT_END(bt) ((bt)->bt_start + (bt)->bt_size)
172:
173: typedef struct vmem_btag bt_t;
174:
175: /* ---- misc */
176:
1.19 yamt 177: #define VMEM_ALIGNUP(addr, align) \
178: (-(-(addr) & -(align)))
179: #define VMEM_CROSS_P(addr1, addr2, boundary) \
180: ((((addr1) ^ (addr2)) & -(boundary)) != 0)
181:
1.4 yamt 182: #define ORDER2SIZE(order) ((vmem_size_t)1 << (order))
183:
1.1 yamt 184: static int
185: calc_order(vmem_size_t size)
186: {
1.4 yamt 187: vmem_size_t target;
1.1 yamt 188: int i;
189:
190: KASSERT(size != 0);
191:
192: i = 0;
1.4 yamt 193: target = size >> 1;
194: while (ORDER2SIZE(i) <= target) {
1.1 yamt 195: i++;
196: }
197:
1.4 yamt 198: KASSERT(ORDER2SIZE(i) <= size);
199: KASSERT(size < ORDER2SIZE(i + 1) || ORDER2SIZE(i + 1) < ORDER2SIZE(i));
1.1 yamt 200:
201: return i;
202: }
203:
204: #if defined(_KERNEL)
205: static MALLOC_DEFINE(M_VMEM, "vmem", "vmem");
206: #endif /* defined(_KERNEL) */
207:
208: static void *
209: xmalloc(size_t sz, vm_flag_t flags)
210: {
211:
212: #if defined(_KERNEL)
213: return malloc(sz, M_VMEM,
214: M_CANFAIL | ((flags & VM_SLEEP) ? M_WAITOK : M_NOWAIT));
215: #else /* defined(_KERNEL) */
216: return malloc(sz);
217: #endif /* defined(_KERNEL) */
218: }
219:
220: static void
221: xfree(void *p)
222: {
223:
224: #if defined(_KERNEL)
225: return free(p, M_VMEM);
226: #else /* defined(_KERNEL) */
227: return free(p);
228: #endif /* defined(_KERNEL) */
229: }
230:
231: /* ---- boundary tag */
232:
233: #if defined(_KERNEL)
1.35 ad 234: static struct pool_cache bt_cache;
1.1 yamt 235: #endif /* defined(_KERNEL) */
236:
237: static bt_t *
1.17 yamt 238: bt_alloc(vmem_t *vm, vm_flag_t flags)
1.1 yamt 239: {
240: bt_t *bt;
241:
242: #if defined(_KERNEL)
1.35 ad 243: bt = pool_cache_get(&bt_cache,
1.1 yamt 244: (flags & VM_SLEEP) != 0 ? PR_WAITOK : PR_NOWAIT);
245: #else /* defined(_KERNEL) */
246: bt = malloc(sizeof *bt);
247: #endif /* defined(_KERNEL) */
248:
249: return bt;
250: }
251:
252: static void
1.17 yamt 253: bt_free(vmem_t *vm, bt_t *bt)
1.1 yamt 254: {
255:
256: #if defined(_KERNEL)
1.35 ad 257: pool_cache_put(&bt_cache, bt);
1.1 yamt 258: #else /* defined(_KERNEL) */
259: free(bt);
260: #endif /* defined(_KERNEL) */
261: }
262:
263: /*
264: * freelist[0] ... [1, 1]
265: * freelist[1] ... [2, 3]
266: * freelist[2] ... [4, 7]
267: * freelist[3] ... [8, 15]
268: * :
269: * freelist[n] ... [(1 << n), (1 << (n + 1)) - 1]
270: * :
271: */
272:
273: static struct vmem_freelist *
274: bt_freehead_tofree(vmem_t *vm, vmem_size_t size)
275: {
276: const vmem_size_t qsize = size >> vm->vm_quantum_shift;
277: int idx;
278:
279: KASSERT((size & vm->vm_quantum_mask) == 0);
280: KASSERT(size != 0);
281:
282: idx = calc_order(qsize);
283: KASSERT(idx >= 0);
284: KASSERT(idx < VMEM_MAXORDER);
285:
286: return &vm->vm_freelist[idx];
287: }
288:
289: static struct vmem_freelist *
290: bt_freehead_toalloc(vmem_t *vm, vmem_size_t size, vm_flag_t strat)
291: {
292: const vmem_size_t qsize = size >> vm->vm_quantum_shift;
293: int idx;
294:
295: KASSERT((size & vm->vm_quantum_mask) == 0);
296: KASSERT(size != 0);
297:
298: idx = calc_order(qsize);
1.4 yamt 299: if (strat == VM_INSTANTFIT && ORDER2SIZE(idx) != qsize) {
1.1 yamt 300: idx++;
301: /* check too large request? */
302: }
303: KASSERT(idx >= 0);
304: KASSERT(idx < VMEM_MAXORDER);
305:
306: return &vm->vm_freelist[idx];
307: }
308:
309: /* ---- boundary tag hash */
310:
311: static struct vmem_hashlist *
312: bt_hashhead(vmem_t *vm, vmem_addr_t addr)
313: {
314: struct vmem_hashlist *list;
315: unsigned int hash;
316:
317: hash = hash32_buf(&addr, sizeof(addr), HASH32_BUF_INIT);
318: list = &vm->vm_hashlist[hash % vm->vm_hashsize];
319:
320: return list;
321: }
322:
323: static bt_t *
324: bt_lookupbusy(vmem_t *vm, vmem_addr_t addr)
325: {
326: struct vmem_hashlist *list;
327: bt_t *bt;
328:
329: list = bt_hashhead(vm, addr);
330: LIST_FOREACH(bt, list, bt_hashlist) {
331: if (bt->bt_start == addr) {
332: break;
333: }
334: }
335:
336: return bt;
337: }
338:
339: static void
340: bt_rembusy(vmem_t *vm, bt_t *bt)
341: {
342:
343: KASSERT(vm->vm_nbusytag > 0);
344: vm->vm_nbusytag--;
345: LIST_REMOVE(bt, bt_hashlist);
346: }
347:
348: static void
349: bt_insbusy(vmem_t *vm, bt_t *bt)
350: {
351: struct vmem_hashlist *list;
352:
353: KASSERT(bt->bt_type == BT_TYPE_BUSY);
354:
355: list = bt_hashhead(vm, bt->bt_start);
356: LIST_INSERT_HEAD(list, bt, bt_hashlist);
357: vm->vm_nbusytag++;
358: }
359:
360: /* ---- boundary tag list */
361:
362: static void
363: bt_remseg(vmem_t *vm, bt_t *bt)
364: {
365:
366: CIRCLEQ_REMOVE(&vm->vm_seglist, bt, bt_seglist);
367: }
368:
369: static void
370: bt_insseg(vmem_t *vm, bt_t *bt, bt_t *prev)
371: {
372:
373: CIRCLEQ_INSERT_AFTER(&vm->vm_seglist, prev, bt, bt_seglist);
374: }
375:
376: static void
377: bt_insseg_tail(vmem_t *vm, bt_t *bt)
378: {
379:
380: CIRCLEQ_INSERT_TAIL(&vm->vm_seglist, bt, bt_seglist);
381: }
382:
383: static void
1.17 yamt 384: bt_remfree(vmem_t *vm, bt_t *bt)
1.1 yamt 385: {
386:
387: KASSERT(bt->bt_type == BT_TYPE_FREE);
388:
389: LIST_REMOVE(bt, bt_freelist);
390: }
391:
392: static void
393: bt_insfree(vmem_t *vm, bt_t *bt)
394: {
395: struct vmem_freelist *list;
396:
397: list = bt_freehead_tofree(vm, bt->bt_size);
398: LIST_INSERT_HEAD(list, bt, bt_freelist);
399: }
400:
401: /* ---- vmem internal functions */
402:
1.30 yamt 403: #if defined(_KERNEL)
404: static kmutex_t vmem_list_lock;
405: static LIST_HEAD(, vmem) vmem_list = LIST_HEAD_INITIALIZER(vmem_list);
406: #endif /* defined(_KERNEL) */
407:
1.5 yamt 408: #if defined(QCACHE)
409: static inline vm_flag_t
410: prf_to_vmf(int prflags)
411: {
412: vm_flag_t vmflags;
413:
414: KASSERT((prflags & ~(PR_LIMITFAIL | PR_WAITOK | PR_NOWAIT)) == 0);
415: if ((prflags & PR_WAITOK) != 0) {
416: vmflags = VM_SLEEP;
417: } else {
418: vmflags = VM_NOSLEEP;
419: }
420: return vmflags;
421: }
422:
423: static inline int
424: vmf_to_prf(vm_flag_t vmflags)
425: {
426: int prflags;
427:
1.7 yamt 428: if ((vmflags & VM_SLEEP) != 0) {
1.5 yamt 429: prflags = PR_WAITOK;
1.7 yamt 430: } else {
1.5 yamt 431: prflags = PR_NOWAIT;
432: }
433: return prflags;
434: }
435:
436: static size_t
437: qc_poolpage_size(size_t qcache_max)
438: {
439: int i;
440:
441: for (i = 0; ORDER2SIZE(i) <= qcache_max * 3; i++) {
442: /* nothing */
443: }
444: return ORDER2SIZE(i);
445: }
446:
447: static void *
448: qc_poolpage_alloc(struct pool *pool, int prflags)
449: {
450: qcache_t *qc = QC_POOL_TO_QCACHE(pool);
451: vmem_t *vm = qc->qc_vmem;
452:
453: return (void *)vmem_alloc(vm, pool->pr_alloc->pa_pagesz,
454: prf_to_vmf(prflags) | VM_INSTANTFIT);
455: }
456:
457: static void
458: qc_poolpage_free(struct pool *pool, void *addr)
459: {
460: qcache_t *qc = QC_POOL_TO_QCACHE(pool);
461: vmem_t *vm = qc->qc_vmem;
462:
463: vmem_free(vm, (vmem_addr_t)addr, pool->pr_alloc->pa_pagesz);
464: }
465:
466: static void
1.31 ad 467: qc_init(vmem_t *vm, size_t qcache_max, int ipl)
1.5 yamt 468: {
1.22 yamt 469: qcache_t *prevqc;
1.5 yamt 470: struct pool_allocator *pa;
471: int qcache_idx_max;
472: int i;
473:
474: KASSERT((qcache_max & vm->vm_quantum_mask) == 0);
475: if (qcache_max > (VMEM_QCACHE_IDX_MAX << vm->vm_quantum_shift)) {
476: qcache_max = VMEM_QCACHE_IDX_MAX << vm->vm_quantum_shift;
477: }
478: vm->vm_qcache_max = qcache_max;
479: pa = &vm->vm_qcache_allocator;
480: memset(pa, 0, sizeof(*pa));
481: pa->pa_alloc = qc_poolpage_alloc;
482: pa->pa_free = qc_poolpage_free;
483: pa->pa_pagesz = qc_poolpage_size(qcache_max);
484:
485: qcache_idx_max = qcache_max >> vm->vm_quantum_shift;
1.22 yamt 486: prevqc = NULL;
487: for (i = qcache_idx_max; i > 0; i--) {
488: qcache_t *qc = &vm->vm_qcache_store[i - 1];
1.5 yamt 489: size_t size = i << vm->vm_quantum_shift;
490:
491: qc->qc_vmem = vm;
1.8 martin 492: snprintf(qc->qc_name, sizeof(qc->qc_name), "%s-%zu",
1.5 yamt 493: vm->vm_name, size);
1.35 ad 494: qc->qc_cache = pool_cache_init(size,
495: ORDER2SIZE(vm->vm_quantum_shift), 0,
496: PR_NOALIGN | PR_NOTOUCH /* XXX */,
497: qc->qc_name, pa, ipl, NULL, NULL, NULL);
498: KASSERT(qc->qc_cache != NULL); /* XXX */
1.22 yamt 499: if (prevqc != NULL &&
1.35 ad 500: qc->qc_cache->pc_pool.pr_itemsperpage ==
501: prevqc->qc_cache->pc_pool.pr_itemsperpage) {
502: pool_cache_destroy(qc->qc_cache);
1.22 yamt 503: vm->vm_qcache[i - 1] = prevqc;
1.27 ad 504: continue;
1.22 yamt 505: }
1.35 ad 506: qc->qc_cache->pc_pool.pr_qcache = qc;
1.22 yamt 507: vm->vm_qcache[i - 1] = qc;
508: prevqc = qc;
1.5 yamt 509: }
510: }
1.6 yamt 511:
1.23 yamt 512: static void
513: qc_destroy(vmem_t *vm)
514: {
515: const qcache_t *prevqc;
516: int i;
517: int qcache_idx_max;
518:
519: qcache_idx_max = vm->vm_qcache_max >> vm->vm_quantum_shift;
520: prevqc = NULL;
1.24 yamt 521: for (i = 0; i < qcache_idx_max; i++) {
522: qcache_t *qc = vm->vm_qcache[i];
1.23 yamt 523:
524: if (prevqc == qc) {
525: continue;
526: }
1.35 ad 527: pool_cache_destroy(qc->qc_cache);
1.23 yamt 528: prevqc = qc;
529: }
530: }
531:
1.25 thorpej 532: static bool
1.6 yamt 533: qc_reap(vmem_t *vm)
534: {
1.22 yamt 535: const qcache_t *prevqc;
1.6 yamt 536: int i;
537: int qcache_idx_max;
1.26 thorpej 538: bool didsomething = false;
1.6 yamt 539:
540: qcache_idx_max = vm->vm_qcache_max >> vm->vm_quantum_shift;
1.22 yamt 541: prevqc = NULL;
1.24 yamt 542: for (i = 0; i < qcache_idx_max; i++) {
543: qcache_t *qc = vm->vm_qcache[i];
1.6 yamt 544:
1.22 yamt 545: if (prevqc == qc) {
546: continue;
547: }
1.35 ad 548: if (pool_cache_reclaim(qc->qc_cache) != 0) {
1.26 thorpej 549: didsomething = true;
1.6 yamt 550: }
1.22 yamt 551: prevqc = qc;
1.6 yamt 552: }
553:
554: return didsomething;
555: }
1.5 yamt 556: #endif /* defined(QCACHE) */
557:
1.1 yamt 558: #if defined(_KERNEL)
559: static int
560: vmem_init(void)
561: {
562:
1.30 yamt 563: mutex_init(&vmem_list_lock, MUTEX_DEFAULT, IPL_NONE);
1.35 ad 564: pool_cache_bootstrap(&bt_cache, sizeof(bt_t), 0, 0, 0, "vmembt",
565: NULL, IPL_VM, NULL, NULL, NULL);
1.1 yamt 566: return 0;
567: }
568: #endif /* defined(_KERNEL) */
569:
570: static vmem_addr_t
571: vmem_add1(vmem_t *vm, vmem_addr_t addr, vmem_size_t size, vm_flag_t flags,
572: int spanbttype)
573: {
574: bt_t *btspan;
575: bt_t *btfree;
576:
577: KASSERT((flags & (VM_SLEEP|VM_NOSLEEP)) != 0);
578: KASSERT((~flags & (VM_SLEEP|VM_NOSLEEP)) != 0);
579:
580: btspan = bt_alloc(vm, flags);
581: if (btspan == NULL) {
582: return VMEM_ADDR_NULL;
583: }
584: btfree = bt_alloc(vm, flags);
585: if (btfree == NULL) {
586: bt_free(vm, btspan);
587: return VMEM_ADDR_NULL;
588: }
589:
590: btspan->bt_type = spanbttype;
591: btspan->bt_start = addr;
592: btspan->bt_size = size;
593:
594: btfree->bt_type = BT_TYPE_FREE;
595: btfree->bt_start = addr;
596: btfree->bt_size = size;
597:
598: VMEM_LOCK(vm);
599: bt_insseg_tail(vm, btspan);
600: bt_insseg(vm, btfree, btspan);
601: bt_insfree(vm, btfree);
602: VMEM_UNLOCK(vm);
603:
604: return addr;
605: }
606:
1.30 yamt 607: static void
608: vmem_destroy1(vmem_t *vm)
609: {
610:
611: #if defined(QCACHE)
612: qc_destroy(vm);
613: #endif /* defined(QCACHE) */
614: if (vm->vm_hashlist != NULL) {
615: int i;
616:
617: for (i = 0; i < vm->vm_hashsize; i++) {
618: bt_t *bt;
619:
620: while ((bt = LIST_FIRST(&vm->vm_hashlist[i])) != NULL) {
621: KASSERT(bt->bt_type == BT_TYPE_SPAN_STATIC);
622: bt_free(vm, bt);
623: }
624: }
625: xfree(vm->vm_hashlist);
626: }
1.31 ad 627: VMEM_LOCK_DESTROY(vm);
1.30 yamt 628: xfree(vm);
629: }
630:
1.1 yamt 631: static int
632: vmem_import(vmem_t *vm, vmem_size_t size, vm_flag_t flags)
633: {
634: vmem_addr_t addr;
635:
636: if (vm->vm_allocfn == NULL) {
637: return EINVAL;
638: }
639:
640: addr = (*vm->vm_allocfn)(vm->vm_source, size, &size, flags);
641: if (addr == VMEM_ADDR_NULL) {
642: return ENOMEM;
643: }
644:
645: if (vmem_add1(vm, addr, size, flags, BT_TYPE_SPAN) == VMEM_ADDR_NULL) {
646: (*vm->vm_freefn)(vm->vm_source, addr, size);
647: return ENOMEM;
648: }
649:
650: return 0;
651: }
652:
653: static int
654: vmem_rehash(vmem_t *vm, size_t newhashsize, vm_flag_t flags)
655: {
656: bt_t *bt;
657: int i;
658: struct vmem_hashlist *newhashlist;
659: struct vmem_hashlist *oldhashlist;
660: size_t oldhashsize;
661:
662: KASSERT(newhashsize > 0);
663:
664: newhashlist =
665: xmalloc(sizeof(struct vmem_hashlist *) * newhashsize, flags);
666: if (newhashlist == NULL) {
667: return ENOMEM;
668: }
669: for (i = 0; i < newhashsize; i++) {
670: LIST_INIT(&newhashlist[i]);
671: }
672:
1.30 yamt 673: if (!VMEM_TRYLOCK(vm)) {
674: xfree(newhashlist);
675: return EBUSY;
676: }
1.1 yamt 677: oldhashlist = vm->vm_hashlist;
678: oldhashsize = vm->vm_hashsize;
679: vm->vm_hashlist = newhashlist;
680: vm->vm_hashsize = newhashsize;
681: if (oldhashlist == NULL) {
682: VMEM_UNLOCK(vm);
683: return 0;
684: }
685: for (i = 0; i < oldhashsize; i++) {
686: while ((bt = LIST_FIRST(&oldhashlist[i])) != NULL) {
687: bt_rembusy(vm, bt); /* XXX */
688: bt_insbusy(vm, bt);
689: }
690: }
691: VMEM_UNLOCK(vm);
692:
693: xfree(oldhashlist);
694:
695: return 0;
696: }
697:
1.10 yamt 698: /*
699: * vmem_fit: check if a bt can satisfy the given restrictions.
700: */
701:
702: static vmem_addr_t
703: vmem_fit(const bt_t *bt, vmem_size_t size, vmem_size_t align, vmem_size_t phase,
704: vmem_size_t nocross, vmem_addr_t minaddr, vmem_addr_t maxaddr)
705: {
706: vmem_addr_t start;
707: vmem_addr_t end;
708:
709: KASSERT(bt->bt_size >= size);
710:
711: /*
712: * XXX assumption: vmem_addr_t and vmem_size_t are
713: * unsigned integer of the same size.
714: */
715:
716: start = bt->bt_start;
717: if (start < minaddr) {
718: start = minaddr;
719: }
720: end = BT_END(bt);
721: if (end > maxaddr - 1) {
722: end = maxaddr - 1;
723: }
724: if (start >= end) {
725: return VMEM_ADDR_NULL;
726: }
1.19 yamt 727:
728: start = VMEM_ALIGNUP(start - phase, align) + phase;
1.10 yamt 729: if (start < bt->bt_start) {
730: start += align;
731: }
1.19 yamt 732: if (VMEM_CROSS_P(start, start + size - 1, nocross)) {
1.10 yamt 733: KASSERT(align < nocross);
1.19 yamt 734: start = VMEM_ALIGNUP(start - phase, nocross) + phase;
1.10 yamt 735: }
736: if (start < end && end - start >= size) {
737: KASSERT((start & (align - 1)) == phase);
1.19 yamt 738: KASSERT(!VMEM_CROSS_P(start, start + size - 1, nocross));
1.10 yamt 739: KASSERT(minaddr <= start);
740: KASSERT(maxaddr == 0 || start + size <= maxaddr);
741: KASSERT(bt->bt_start <= start);
742: KASSERT(start + size <= BT_END(bt));
743: return start;
744: }
745: return VMEM_ADDR_NULL;
746: }
747:
1.1 yamt 748: /* ---- vmem API */
749:
750: /*
751: * vmem_create: create an arena.
752: *
753: * => must not be called from interrupt context.
754: */
755:
756: vmem_t *
757: vmem_create(const char *name, vmem_addr_t base, vmem_size_t size,
758: vmem_size_t quantum,
759: vmem_addr_t (*allocfn)(vmem_t *, vmem_size_t, vmem_size_t *, vm_flag_t),
760: void (*freefn)(vmem_t *, vmem_addr_t, vmem_size_t),
1.31 ad 761: vmem_t *source, vmem_size_t qcache_max, vm_flag_t flags,
762: int ipl)
1.1 yamt 763: {
764: vmem_t *vm;
765: int i;
766: #if defined(_KERNEL)
767: static ONCE_DECL(control);
768: #endif /* defined(_KERNEL) */
769:
770: KASSERT((flags & (VM_SLEEP|VM_NOSLEEP)) != 0);
771: KASSERT((~flags & (VM_SLEEP|VM_NOSLEEP)) != 0);
772:
773: #if defined(_KERNEL)
774: if (RUN_ONCE(&control, vmem_init)) {
775: return NULL;
776: }
777: #endif /* defined(_KERNEL) */
778: vm = xmalloc(sizeof(*vm), flags);
779: if (vm == NULL) {
780: return NULL;
781: }
782:
1.31 ad 783: VMEM_LOCK_INIT(vm, ipl);
1.1 yamt 784: vm->vm_name = name;
785: vm->vm_quantum_mask = quantum - 1;
786: vm->vm_quantum_shift = calc_order(quantum);
1.4 yamt 787: KASSERT(ORDER2SIZE(vm->vm_quantum_shift) == quantum);
1.1 yamt 788: vm->vm_allocfn = allocfn;
789: vm->vm_freefn = freefn;
790: vm->vm_source = source;
791: vm->vm_nbusytag = 0;
1.5 yamt 792: #if defined(QCACHE)
1.31 ad 793: qc_init(vm, qcache_max, ipl);
1.5 yamt 794: #endif /* defined(QCACHE) */
1.1 yamt 795:
796: CIRCLEQ_INIT(&vm->vm_seglist);
797: for (i = 0; i < VMEM_MAXORDER; i++) {
798: LIST_INIT(&vm->vm_freelist[i]);
799: }
800: vm->vm_hashlist = NULL;
801: if (vmem_rehash(vm, VMEM_HASHSIZE_INIT, flags)) {
1.30 yamt 802: vmem_destroy1(vm);
1.1 yamt 803: return NULL;
804: }
805:
806: if (size != 0) {
807: if (vmem_add(vm, base, size, flags) == 0) {
1.30 yamt 808: vmem_destroy1(vm);
1.1 yamt 809: return NULL;
810: }
811: }
812:
1.30 yamt 813: #if defined(_KERNEL)
814: mutex_enter(&vmem_list_lock);
815: LIST_INSERT_HEAD(&vmem_list, vm, vm_alllist);
816: mutex_exit(&vmem_list_lock);
817: #endif /* defined(_KERNEL) */
818:
1.1 yamt 819: return vm;
820: }
821:
822: void
823: vmem_destroy(vmem_t *vm)
824: {
825:
1.30 yamt 826: #if defined(_KERNEL)
827: mutex_enter(&vmem_list_lock);
828: LIST_REMOVE(vm, vm_alllist);
829: mutex_exit(&vmem_list_lock);
830: #endif /* defined(_KERNEL) */
1.1 yamt 831:
1.30 yamt 832: vmem_destroy1(vm);
1.1 yamt 833: }
834:
835: vmem_size_t
836: vmem_roundup_size(vmem_t *vm, vmem_size_t size)
837: {
838:
839: return (size + vm->vm_quantum_mask) & ~vm->vm_quantum_mask;
840: }
841:
842: /*
843: * vmem_alloc:
844: *
845: * => caller must ensure appropriate spl,
846: * if the arena can be accessed from interrupt context.
847: */
848:
849: vmem_addr_t
1.36.4.2 bouyer 850: vmem_alloc(vmem_t *vm, vmem_size_t size, vm_flag_t flags)
1.1 yamt 851: {
1.12 yamt 852: const vm_flag_t strat __unused = flags & VM_FITMASK;
1.1 yamt 853:
854: KASSERT((flags & (VM_SLEEP|VM_NOSLEEP)) != 0);
855: KASSERT((~flags & (VM_SLEEP|VM_NOSLEEP)) != 0);
856:
857: KASSERT(size > 0);
858: KASSERT(strat == VM_BESTFIT || strat == VM_INSTANTFIT);
1.3 yamt 859: if ((flags & VM_SLEEP) != 0) {
1.16 yamt 860: ASSERT_SLEEPABLE(NULL, __func__);
1.3 yamt 861: }
1.1 yamt 862:
1.5 yamt 863: #if defined(QCACHE)
864: if (size <= vm->vm_qcache_max) {
1.36.4.2 bouyer 865: int qidx = (size + vm->vm_quantum_mask) >> vm->vm_quantum_shift;
1.22 yamt 866: qcache_t *qc = vm->vm_qcache[qidx - 1];
1.5 yamt 867:
1.35 ad 868: return (vmem_addr_t)pool_cache_get(qc->qc_cache,
1.5 yamt 869: vmf_to_prf(flags));
870: }
871: #endif /* defined(QCACHE) */
872:
1.36.4.2 bouyer 873: return vmem_xalloc(vm, size, 0, 0, 0, 0, 0, flags);
1.10 yamt 874: }
875:
876: vmem_addr_t
877: vmem_xalloc(vmem_t *vm, vmem_size_t size0, vmem_size_t align, vmem_size_t phase,
878: vmem_size_t nocross, vmem_addr_t minaddr, vmem_addr_t maxaddr,
879: vm_flag_t flags)
880: {
881: struct vmem_freelist *list;
882: struct vmem_freelist *first;
883: struct vmem_freelist *end;
884: bt_t *bt;
885: bt_t *btnew;
886: bt_t *btnew2;
887: const vmem_size_t size = vmem_roundup_size(vm, size0);
888: vm_flag_t strat = flags & VM_FITMASK;
889: vmem_addr_t start;
890:
891: KASSERT(size0 > 0);
892: KASSERT(size > 0);
893: KASSERT(strat == VM_BESTFIT || strat == VM_INSTANTFIT);
894: if ((flags & VM_SLEEP) != 0) {
1.16 yamt 895: ASSERT_SLEEPABLE(NULL, __func__);
1.10 yamt 896: }
897: KASSERT((align & vm->vm_quantum_mask) == 0);
898: KASSERT((align & (align - 1)) == 0);
899: KASSERT((phase & vm->vm_quantum_mask) == 0);
900: KASSERT((nocross & vm->vm_quantum_mask) == 0);
901: KASSERT((nocross & (nocross - 1)) == 0);
902: KASSERT((align == 0 && phase == 0) || phase < align);
903: KASSERT(nocross == 0 || nocross >= size);
904: KASSERT(maxaddr == 0 || minaddr < maxaddr);
1.19 yamt 905: KASSERT(!VMEM_CROSS_P(phase, phase + size - 1, nocross));
1.10 yamt 906:
907: if (align == 0) {
908: align = vm->vm_quantum_mask + 1;
909: }
1.1 yamt 910: btnew = bt_alloc(vm, flags);
911: if (btnew == NULL) {
912: return VMEM_ADDR_NULL;
913: }
1.10 yamt 914: btnew2 = bt_alloc(vm, flags); /* XXX not necessary if no restrictions */
915: if (btnew2 == NULL) {
916: bt_free(vm, btnew);
917: return VMEM_ADDR_NULL;
918: }
1.1 yamt 919:
920: retry_strat:
921: first = bt_freehead_toalloc(vm, size, strat);
922: end = &vm->vm_freelist[VMEM_MAXORDER];
923: retry:
924: bt = NULL;
925: VMEM_LOCK(vm);
1.2 yamt 926: if (strat == VM_INSTANTFIT) {
927: for (list = first; list < end; list++) {
928: bt = LIST_FIRST(list);
929: if (bt != NULL) {
1.10 yamt 930: start = vmem_fit(bt, size, align, phase,
931: nocross, minaddr, maxaddr);
932: if (start != VMEM_ADDR_NULL) {
933: goto gotit;
934: }
1.2 yamt 935: }
936: }
937: } else { /* VM_BESTFIT */
938: for (list = first; list < end; list++) {
939: LIST_FOREACH(bt, list, bt_freelist) {
940: if (bt->bt_size >= size) {
1.10 yamt 941: start = vmem_fit(bt, size, align, phase,
942: nocross, minaddr, maxaddr);
943: if (start != VMEM_ADDR_NULL) {
944: goto gotit;
945: }
1.2 yamt 946: }
1.1 yamt 947: }
948: }
949: }
1.2 yamt 950: VMEM_UNLOCK(vm);
1.1 yamt 951: #if 1
1.2 yamt 952: if (strat == VM_INSTANTFIT) {
953: strat = VM_BESTFIT;
954: goto retry_strat;
955: }
1.1 yamt 956: #endif
1.10 yamt 957: if (align != vm->vm_quantum_mask + 1 || phase != 0 ||
958: nocross != 0 || minaddr != 0 || maxaddr != 0) {
959:
960: /*
961: * XXX should try to import a region large enough to
962: * satisfy restrictions?
963: */
964:
1.20 yamt 965: goto fail;
1.10 yamt 966: }
1.2 yamt 967: if (vmem_import(vm, size, flags) == 0) {
968: goto retry;
1.1 yamt 969: }
1.2 yamt 970: /* XXX */
1.20 yamt 971: fail:
972: bt_free(vm, btnew);
973: bt_free(vm, btnew2);
1.2 yamt 974: return VMEM_ADDR_NULL;
975:
976: gotit:
1.1 yamt 977: KASSERT(bt->bt_type == BT_TYPE_FREE);
978: KASSERT(bt->bt_size >= size);
979: bt_remfree(vm, bt);
1.10 yamt 980: if (bt->bt_start != start) {
981: btnew2->bt_type = BT_TYPE_FREE;
982: btnew2->bt_start = bt->bt_start;
983: btnew2->bt_size = start - bt->bt_start;
984: bt->bt_start = start;
985: bt->bt_size -= btnew2->bt_size;
986: bt_insfree(vm, btnew2);
987: bt_insseg(vm, btnew2, CIRCLEQ_PREV(bt, bt_seglist));
988: btnew2 = NULL;
989: }
990: KASSERT(bt->bt_start == start);
1.1 yamt 991: if (bt->bt_size != size && bt->bt_size - size > vm->vm_quantum_mask) {
992: /* split */
993: btnew->bt_type = BT_TYPE_BUSY;
994: btnew->bt_start = bt->bt_start;
995: btnew->bt_size = size;
996: bt->bt_start = bt->bt_start + size;
997: bt->bt_size -= size;
998: bt_insfree(vm, bt);
999: bt_insseg(vm, btnew, CIRCLEQ_PREV(bt, bt_seglist));
1000: bt_insbusy(vm, btnew);
1001: VMEM_UNLOCK(vm);
1002: } else {
1003: bt->bt_type = BT_TYPE_BUSY;
1004: bt_insbusy(vm, bt);
1005: VMEM_UNLOCK(vm);
1006: bt_free(vm, btnew);
1007: btnew = bt;
1008: }
1.10 yamt 1009: if (btnew2 != NULL) {
1010: bt_free(vm, btnew2);
1011: }
1.1 yamt 1012: KASSERT(btnew->bt_size >= size);
1013: btnew->bt_type = BT_TYPE_BUSY;
1014:
1015: return btnew->bt_start;
1016: }
1017:
1018: /*
1019: * vmem_free:
1020: *
1021: * => caller must ensure appropriate spl,
1022: * if the arena can be accessed from interrupt context.
1023: */
1024:
1025: void
1026: vmem_free(vmem_t *vm, vmem_addr_t addr, vmem_size_t size)
1027: {
1028:
1029: KASSERT(addr != VMEM_ADDR_NULL);
1030: KASSERT(size > 0);
1031:
1.5 yamt 1032: #if defined(QCACHE)
1033: if (size <= vm->vm_qcache_max) {
1034: int qidx = (size + vm->vm_quantum_mask) >> vm->vm_quantum_shift;
1.22 yamt 1035: qcache_t *qc = vm->vm_qcache[qidx - 1];
1.5 yamt 1036:
1.35 ad 1037: return pool_cache_put(qc->qc_cache, (void *)addr);
1.5 yamt 1038: }
1039: #endif /* defined(QCACHE) */
1040:
1.10 yamt 1041: vmem_xfree(vm, addr, size);
1042: }
1043:
1044: void
1.17 yamt 1045: vmem_xfree(vmem_t *vm, vmem_addr_t addr, vmem_size_t size)
1.10 yamt 1046: {
1047: bt_t *bt;
1048: bt_t *t;
1049:
1050: KASSERT(addr != VMEM_ADDR_NULL);
1051: KASSERT(size > 0);
1052:
1.1 yamt 1053: VMEM_LOCK(vm);
1054:
1055: bt = bt_lookupbusy(vm, addr);
1056: KASSERT(bt != NULL);
1057: KASSERT(bt->bt_start == addr);
1058: KASSERT(bt->bt_size == vmem_roundup_size(vm, size) ||
1059: bt->bt_size - vmem_roundup_size(vm, size) <= vm->vm_quantum_mask);
1060: KASSERT(bt->bt_type == BT_TYPE_BUSY);
1061: bt_rembusy(vm, bt);
1062: bt->bt_type = BT_TYPE_FREE;
1063:
1064: /* coalesce */
1065: t = CIRCLEQ_NEXT(bt, bt_seglist);
1066: if (t != NULL && t->bt_type == BT_TYPE_FREE) {
1067: KASSERT(BT_END(bt) == t->bt_start);
1068: bt_remfree(vm, t);
1069: bt_remseg(vm, t);
1070: bt->bt_size += t->bt_size;
1071: bt_free(vm, t);
1072: }
1073: t = CIRCLEQ_PREV(bt, bt_seglist);
1074: if (t != NULL && t->bt_type == BT_TYPE_FREE) {
1075: KASSERT(BT_END(t) == bt->bt_start);
1076: bt_remfree(vm, t);
1077: bt_remseg(vm, t);
1078: bt->bt_size += t->bt_size;
1079: bt->bt_start = t->bt_start;
1080: bt_free(vm, t);
1081: }
1082:
1083: t = CIRCLEQ_PREV(bt, bt_seglist);
1084: KASSERT(t != NULL);
1085: KASSERT(BT_ISSPAN_P(t) || t->bt_type == BT_TYPE_BUSY);
1086: if (vm->vm_freefn != NULL && t->bt_type == BT_TYPE_SPAN &&
1087: t->bt_size == bt->bt_size) {
1088: vmem_addr_t spanaddr;
1089: vmem_size_t spansize;
1090:
1091: KASSERT(t->bt_start == bt->bt_start);
1092: spanaddr = bt->bt_start;
1093: spansize = bt->bt_size;
1094: bt_remseg(vm, bt);
1095: bt_free(vm, bt);
1096: bt_remseg(vm, t);
1097: bt_free(vm, t);
1098: VMEM_UNLOCK(vm);
1099: (*vm->vm_freefn)(vm->vm_source, spanaddr, spansize);
1100: } else {
1101: bt_insfree(vm, bt);
1102: VMEM_UNLOCK(vm);
1103: }
1104: }
1105:
1106: /*
1107: * vmem_add:
1108: *
1109: * => caller must ensure appropriate spl,
1110: * if the arena can be accessed from interrupt context.
1111: */
1112:
1113: vmem_addr_t
1114: vmem_add(vmem_t *vm, vmem_addr_t addr, vmem_size_t size, vm_flag_t flags)
1115: {
1116:
1117: return vmem_add1(vm, addr, size, flags, BT_TYPE_SPAN_STATIC);
1118: }
1119:
1.6 yamt 1120: /*
1121: * vmem_reap: reap unused resources.
1122: *
1.26 thorpej 1123: * => return true if we successfully reaped something.
1.6 yamt 1124: */
1125:
1.25 thorpej 1126: bool
1.6 yamt 1127: vmem_reap(vmem_t *vm)
1128: {
1.26 thorpej 1129: bool didsomething = false;
1.6 yamt 1130:
1131: #if defined(QCACHE)
1132: didsomething = qc_reap(vm);
1133: #endif /* defined(QCACHE) */
1134: return didsomething;
1135: }
1136:
1.30 yamt 1137: /* ---- rehash */
1138:
1139: #if defined(_KERNEL)
1140: static struct callout vmem_rehash_ch;
1141: static int vmem_rehash_interval;
1142: static struct workqueue *vmem_rehash_wq;
1143: static struct work vmem_rehash_wk;
1144:
1145: static void
1146: vmem_rehash_all(struct work *wk, void *dummy)
1147: {
1148: vmem_t *vm;
1149:
1150: KASSERT(wk == &vmem_rehash_wk);
1151: mutex_enter(&vmem_list_lock);
1152: LIST_FOREACH(vm, &vmem_list, vm_alllist) {
1153: size_t desired;
1154: size_t current;
1155:
1156: if (!VMEM_TRYLOCK(vm)) {
1157: continue;
1158: }
1159: desired = vm->vm_nbusytag;
1160: current = vm->vm_hashsize;
1161: VMEM_UNLOCK(vm);
1162:
1163: if (desired > VMEM_HASHSIZE_MAX) {
1164: desired = VMEM_HASHSIZE_MAX;
1165: } else if (desired < VMEM_HASHSIZE_MIN) {
1166: desired = VMEM_HASHSIZE_MIN;
1167: }
1168: if (desired > current * 2 || desired * 2 < current) {
1169: vmem_rehash(vm, desired, VM_NOSLEEP);
1170: }
1171: }
1172: mutex_exit(&vmem_list_lock);
1173:
1174: callout_schedule(&vmem_rehash_ch, vmem_rehash_interval);
1175: }
1176:
1177: static void
1178: vmem_rehash_all_kick(void *dummy)
1179: {
1180:
1.32 rmind 1181: workqueue_enqueue(vmem_rehash_wq, &vmem_rehash_wk, NULL);
1.30 yamt 1182: }
1183:
1184: void
1185: vmem_rehash_start(void)
1186: {
1187: int error;
1188:
1189: error = workqueue_create(&vmem_rehash_wq, "vmem_rehash",
1.34 ad 1190: vmem_rehash_all, NULL, PRI_VM, IPL_SOFTCLOCK, 0);
1.30 yamt 1191: if (error) {
1192: panic("%s: workqueue_create %d\n", __func__, error);
1193: }
1.31 ad 1194: callout_init(&vmem_rehash_ch, 0);
1.30 yamt 1195: callout_setfunc(&vmem_rehash_ch, vmem_rehash_all_kick, NULL);
1196:
1197: vmem_rehash_interval = hz * 10;
1198: callout_schedule(&vmem_rehash_ch, vmem_rehash_interval);
1199: }
1200: #endif /* defined(_KERNEL) */
1201:
1.1 yamt 1202: /* ---- debug */
1203:
1.36.4.1 bouyer 1204: #if defined(DDB)
1205: static bt_t *
1206: vmem_whatis_lookup(vmem_t *vm, uintptr_t addr)
1207: {
1.36.4.2 bouyer 1208: bt_t *bt;
1.36.4.1 bouyer 1209:
1.36.4.2 bouyer 1210: CIRCLEQ_FOREACH(bt, &vm->vm_seglist, bt_seglist) {
1211: if (BT_ISSPAN_P(bt)) {
1212: continue;
1213: }
1214: if (bt->bt_start <= addr && addr < BT_END(bt)) {
1215: return bt;
1.36.4.1 bouyer 1216: }
1217: }
1218:
1219: return NULL;
1220: }
1221:
1222: void
1223: vmem_whatis(uintptr_t addr, void (*pr)(const char *, ...))
1224: {
1225: vmem_t *vm;
1226:
1227: LIST_FOREACH(vm, &vmem_list, vm_alllist) {
1228: bt_t *bt;
1229:
1230: bt = vmem_whatis_lookup(vm, addr);
1231: if (bt == NULL) {
1232: continue;
1233: }
1.36.4.2 bouyer 1234: (*pr)("%p is %p+%zu in VMEM '%s' (%s)\n",
1.36.4.1 bouyer 1235: (void *)addr, (void *)bt->bt_start,
1.36.4.2 bouyer 1236: (size_t)(addr - bt->bt_start), vm->vm_name,
1237: (bt->bt_type == BT_TYPE_BUSY) ? "allocated" : "free");
1.36.4.1 bouyer 1238: }
1239: }
1240: #endif /* defined(DDB) */
1241:
1.1 yamt 1242: #if defined(VMEM_DEBUG)
1243:
1244: #if !defined(_KERNEL)
1245: #include <stdio.h>
1246: #endif /* !defined(_KERNEL) */
1247:
1248: void bt_dump(const bt_t *);
1249:
1250: void
1251: bt_dump(const bt_t *bt)
1252: {
1253:
1254: printf("\t%p: %" PRIu64 ", %" PRIu64 ", %d\n",
1255: bt, (uint64_t)bt->bt_start, (uint64_t)bt->bt_size,
1256: bt->bt_type);
1257: }
1258:
1259: void
1260: vmem_dump(const vmem_t *vm)
1261: {
1262: const bt_t *bt;
1263: int i;
1264:
1265: printf("vmem %p '%s'\n", vm, vm->vm_name);
1266: CIRCLEQ_FOREACH(bt, &vm->vm_seglist, bt_seglist) {
1267: bt_dump(bt);
1268: }
1269:
1270: for (i = 0; i < VMEM_MAXORDER; i++) {
1271: const struct vmem_freelist *fl = &vm->vm_freelist[i];
1272:
1273: if (LIST_EMPTY(fl)) {
1274: continue;
1275: }
1276:
1277: printf("freelist[%d]\n", i);
1278: LIST_FOREACH(bt, fl, bt_freelist) {
1279: bt_dump(bt);
1280: if (bt->bt_size) {
1281: }
1282: }
1283: }
1284: }
1285:
1286: #if !defined(_KERNEL)
1287:
1288: int
1289: main()
1290: {
1291: vmem_t *vm;
1292: vmem_addr_t p;
1293: struct reg {
1294: vmem_addr_t p;
1295: vmem_size_t sz;
1.25 thorpej 1296: bool x;
1.1 yamt 1297: } *reg = NULL;
1298: int nreg = 0;
1299: int nalloc = 0;
1300: int nfree = 0;
1301: vmem_size_t total = 0;
1302: #if 1
1303: vm_flag_t strat = VM_INSTANTFIT;
1304: #else
1305: vm_flag_t strat = VM_BESTFIT;
1306: #endif
1307:
1308: vm = vmem_create("test", VMEM_ADDR_NULL, 0, 1,
1.30 yamt 1309: NULL, NULL, NULL, 0, VM_SLEEP);
1.1 yamt 1310: if (vm == NULL) {
1311: printf("vmem_create\n");
1312: exit(EXIT_FAILURE);
1313: }
1314: vmem_dump(vm);
1315:
1316: p = vmem_add(vm, 100, 200, VM_SLEEP);
1317: p = vmem_add(vm, 2000, 1, VM_SLEEP);
1318: p = vmem_add(vm, 40000, 0x10000000>>12, VM_SLEEP);
1319: p = vmem_add(vm, 10000, 10000, VM_SLEEP);
1320: p = vmem_add(vm, 500, 1000, VM_SLEEP);
1321: vmem_dump(vm);
1322: for (;;) {
1323: struct reg *r;
1.10 yamt 1324: int t = rand() % 100;
1.1 yamt 1325:
1.10 yamt 1326: if (t > 45) {
1327: /* alloc */
1.1 yamt 1328: vmem_size_t sz = rand() % 500 + 1;
1.25 thorpej 1329: bool x;
1.10 yamt 1330: vmem_size_t align, phase, nocross;
1331: vmem_addr_t minaddr, maxaddr;
1332:
1333: if (t > 70) {
1.26 thorpej 1334: x = true;
1.10 yamt 1335: /* XXX */
1336: align = 1 << (rand() % 15);
1337: phase = rand() % 65536;
1338: nocross = 1 << (rand() % 15);
1339: if (align <= phase) {
1340: phase = 0;
1341: }
1.19 yamt 1342: if (VMEM_CROSS_P(phase, phase + sz - 1,
1343: nocross)) {
1.10 yamt 1344: nocross = 0;
1345: }
1346: minaddr = rand() % 50000;
1347: maxaddr = rand() % 70000;
1348: if (minaddr > maxaddr) {
1349: minaddr = 0;
1350: maxaddr = 0;
1351: }
1352: printf("=== xalloc %" PRIu64
1353: " align=%" PRIu64 ", phase=%" PRIu64
1354: ", nocross=%" PRIu64 ", min=%" PRIu64
1355: ", max=%" PRIu64 "\n",
1356: (uint64_t)sz,
1357: (uint64_t)align,
1358: (uint64_t)phase,
1359: (uint64_t)nocross,
1360: (uint64_t)minaddr,
1361: (uint64_t)maxaddr);
1362: p = vmem_xalloc(vm, sz, align, phase, nocross,
1363: minaddr, maxaddr, strat|VM_SLEEP);
1364: } else {
1.26 thorpej 1365: x = false;
1.10 yamt 1366: printf("=== alloc %" PRIu64 "\n", (uint64_t)sz);
1367: p = vmem_alloc(vm, sz, strat|VM_SLEEP);
1368: }
1.1 yamt 1369: printf("-> %" PRIu64 "\n", (uint64_t)p);
1370: vmem_dump(vm);
1371: if (p == VMEM_ADDR_NULL) {
1.10 yamt 1372: if (x) {
1373: continue;
1374: }
1.1 yamt 1375: break;
1376: }
1377: nreg++;
1378: reg = realloc(reg, sizeof(*reg) * nreg);
1379: r = ®[nreg - 1];
1380: r->p = p;
1381: r->sz = sz;
1.10 yamt 1382: r->x = x;
1.1 yamt 1383: total += sz;
1384: nalloc++;
1385: } else if (nreg != 0) {
1.10 yamt 1386: /* free */
1.1 yamt 1387: r = ®[rand() % nreg];
1388: printf("=== free %" PRIu64 ", %" PRIu64 "\n",
1389: (uint64_t)r->p, (uint64_t)r->sz);
1.10 yamt 1390: if (r->x) {
1391: vmem_xfree(vm, r->p, r->sz);
1392: } else {
1393: vmem_free(vm, r->p, r->sz);
1394: }
1.1 yamt 1395: total -= r->sz;
1396: vmem_dump(vm);
1397: *r = reg[nreg - 1];
1398: nreg--;
1399: nfree++;
1400: }
1401: printf("total=%" PRIu64 "\n", (uint64_t)total);
1402: }
1403: fprintf(stderr, "total=%" PRIu64 ", nalloc=%d, nfree=%d\n",
1404: (uint64_t)total, nalloc, nfree);
1405: exit(EXIT_SUCCESS);
1406: }
1407: #endif /* !defined(_KERNEL) */
1408: #endif /* defined(VMEM_DEBUG) */
CVSweb <webmaster@jp.NetBSD.org>