Annotation of src/sys/uvm/uvm_aobj.c, Revision 1.50.2.1
1.50.2.1! thorpej 1: /* $NetBSD: uvm_aobj.c,v 1.50 2002/03/08 20:48:47 thorpej Exp $ */
1.6 mrg 2:
1.7 chs 3: /*
4: * Copyright (c) 1998 Chuck Silvers, Charles D. Cranor and
5: * Washington University.
6: * All rights reserved.
7: *
8: * Redistribution and use in source and binary forms, with or without
9: * modification, are permitted provided that the following conditions
10: * are met:
11: * 1. Redistributions of source code must retain the above copyright
12: * notice, this list of conditions and the following disclaimer.
13: * 2. Redistributions in binary form must reproduce the above copyright
14: * notice, this list of conditions and the following disclaimer in the
15: * documentation and/or other materials provided with the distribution.
16: * 3. All advertising materials mentioning features or use of this software
17: * must display the following acknowledgement:
18: * This product includes software developed by Charles D. Cranor and
19: * Washington University.
20: * 4. The name of the author may not be used to endorse or promote products
21: * derived from this software without specific prior written permission.
22: *
23: * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
24: * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
25: * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26: * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
27: * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
28: * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29: * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30: * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31: * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
32: * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33: *
1.4 mrg 34: * from: Id: uvm_aobj.c,v 1.1.2.5 1998/02/06 05:14:38 chs Exp
35: */
1.7 chs 36: /*
37: * uvm_aobj.c: anonymous memory uvm_object pager
38: *
39: * author: Chuck Silvers <chuq@chuq.com>
40: * started: Jan-1998
41: *
42: * - design mostly from Chuck Cranor
43: */
1.49 lukem 44:
45: #include <sys/cdefs.h>
1.50.2.1! thorpej 46: __KERNEL_RCSID(0, "$NetBSD: uvm_aobj.c,v 1.50 2002/03/08 20:48:47 thorpej Exp $");
1.7 chs 47:
48: #include "opt_uvmhist.h"
1.1 mrg 49:
50: #include <sys/param.h>
51: #include <sys/systm.h>
52: #include <sys/proc.h>
53: #include <sys/malloc.h>
1.37 chs 54: #include <sys/kernel.h>
1.12 thorpej 55: #include <sys/pool.h>
1.27 chs 56: #include <sys/kernel.h>
1.1 mrg 57:
58: #include <uvm/uvm.h>
59:
60: /*
61: * an aobj manages anonymous-memory backed uvm_objects. in addition
62: * to keeping the list of resident pages, it also keeps a list of
63: * allocated swap blocks. depending on the size of the aobj this list
64: * of allocated swap blocks is either stored in an array (small objects)
65: * or in a hash table (large objects).
66: */
67:
68: /*
69: * local structures
70: */
71:
72: /*
73: * for hash tables, we break the address space of the aobj into blocks
74: * of UAO_SWHASH_CLUSTER_SIZE pages. we require the cluster size to
75: * be a power of two.
76: */
77:
78: #define UAO_SWHASH_CLUSTER_SHIFT 4
79: #define UAO_SWHASH_CLUSTER_SIZE (1 << UAO_SWHASH_CLUSTER_SHIFT)
80:
81: /* get the "tag" for this page index */
82: #define UAO_SWHASH_ELT_TAG(PAGEIDX) \
83: ((PAGEIDX) >> UAO_SWHASH_CLUSTER_SHIFT)
84:
85: /* given an ELT and a page index, find the swap slot */
86: #define UAO_SWHASH_ELT_PAGESLOT(ELT, PAGEIDX) \
87: ((ELT)->slots[(PAGEIDX) & (UAO_SWHASH_CLUSTER_SIZE - 1)])
88:
89: /* given an ELT, return its pageidx base */
90: #define UAO_SWHASH_ELT_PAGEIDX_BASE(ELT) \
91: ((ELT)->tag << UAO_SWHASH_CLUSTER_SHIFT)
92:
93: /*
94: * the swhash hash function
95: */
1.46 chs 96:
1.1 mrg 97: #define UAO_SWHASH_HASH(AOBJ, PAGEIDX) \
98: (&(AOBJ)->u_swhash[(((PAGEIDX) >> UAO_SWHASH_CLUSTER_SHIFT) \
99: & (AOBJ)->u_swhashmask)])
100:
101: /*
102: * the swhash threshhold determines if we will use an array or a
103: * hash table to store the list of allocated swap blocks.
104: */
105:
106: #define UAO_SWHASH_THRESHOLD (UAO_SWHASH_CLUSTER_SIZE * 4)
107: #define UAO_USES_SWHASH(AOBJ) \
108: ((AOBJ)->u_pages > UAO_SWHASH_THRESHOLD) /* use hash? */
109:
110: /*
1.3 chs 111: * the number of buckets in a swhash, with an upper bound
1.1 mrg 112: */
1.46 chs 113:
1.1 mrg 114: #define UAO_SWHASH_MAXBUCKETS 256
115: #define UAO_SWHASH_BUCKETS(AOBJ) \
1.46 chs 116: (MIN((AOBJ)->u_pages >> UAO_SWHASH_CLUSTER_SHIFT, \
1.1 mrg 117: UAO_SWHASH_MAXBUCKETS))
118:
119:
120: /*
121: * uao_swhash_elt: when a hash table is being used, this structure defines
122: * the format of an entry in the bucket list.
123: */
124:
125: struct uao_swhash_elt {
1.5 mrg 126: LIST_ENTRY(uao_swhash_elt) list; /* the hash list */
1.28 kleink 127: voff_t tag; /* our 'tag' */
1.5 mrg 128: int count; /* our number of active slots */
129: int slots[UAO_SWHASH_CLUSTER_SIZE]; /* the slots */
1.1 mrg 130: };
131:
132: /*
133: * uao_swhash: the swap hash table structure
134: */
135:
136: LIST_HEAD(uao_swhash, uao_swhash_elt);
137:
1.12 thorpej 138: /*
139: * uao_swhash_elt_pool: pool of uao_swhash_elt structures
140: */
141:
142: struct pool uao_swhash_elt_pool;
1.1 mrg 143:
144: /*
145: * uvm_aobj: the actual anon-backed uvm_object
146: *
147: * => the uvm_object is at the top of the structure, this allows
1.46 chs 148: * (struct uvm_aobj *) == (struct uvm_object *)
1.1 mrg 149: * => only one of u_swslots and u_swhash is used in any given aobj
150: */
151:
152: struct uvm_aobj {
1.5 mrg 153: struct uvm_object u_obj; /* has: lock, pgops, memq, #pages, #refs */
1.11 drochner 154: int u_pages; /* number of pages in entire object */
1.5 mrg 155: int u_flags; /* the flags (see uvm_aobj.h) */
156: int *u_swslots; /* array of offset->swapslot mappings */
157: /*
158: * hashtable of offset->swapslot mappings
159: * (u_swhash is an array of bucket heads)
160: */
161: struct uao_swhash *u_swhash;
162: u_long u_swhashmask; /* mask for hashtable */
163: LIST_ENTRY(uvm_aobj) u_list; /* global list of aobjs */
1.1 mrg 164: };
165:
166: /*
1.12 thorpej 167: * uvm_aobj_pool: pool of uvm_aobj structures
168: */
169:
170: struct pool uvm_aobj_pool;
171:
172: /*
1.1 mrg 173: * local functions
174: */
175:
1.46 chs 176: static struct uao_swhash_elt *uao_find_swhash_elt
177: __P((struct uvm_aobj *, int, boolean_t));
178:
179: static void uao_free __P((struct uvm_aobj *));
180: static int uao_get __P((struct uvm_object *, voff_t, struct vm_page **,
181: int *, int, vm_prot_t, int, int));
182: static boolean_t uao_put __P((struct uvm_object *, voff_t, voff_t, int));
183: static boolean_t uao_pagein __P((struct uvm_aobj *, int, int));
184: static boolean_t uao_pagein_page __P((struct uvm_aobj *, int));
1.1 mrg 185:
186: /*
187: * aobj_pager
1.41 chs 188: *
1.1 mrg 189: * note that some functions (e.g. put) are handled elsewhere
190: */
191:
192: struct uvm_pagerops aobj_pager = {
1.27 chs 193: NULL, /* init */
1.5 mrg 194: uao_reference, /* reference */
195: uao_detach, /* detach */
196: NULL, /* fault */
197: uao_get, /* get */
1.46 chs 198: uao_put, /* flush */
1.1 mrg 199: };
200:
201: /*
202: * uao_list: global list of active aobjs, locked by uao_list_lock
203: */
204:
205: static LIST_HEAD(aobjlist, uvm_aobj) uao_list;
1.42 chs 206: static struct simplelock uao_list_lock;
1.1 mrg 207:
208: /*
209: * functions
210: */
211:
212: /*
213: * hash table/array related functions
214: */
215:
216: /*
217: * uao_find_swhash_elt: find (or create) a hash table entry for a page
218: * offset.
219: *
220: * => the object should be locked by the caller
221: */
222:
1.5 mrg 223: static struct uao_swhash_elt *
224: uao_find_swhash_elt(aobj, pageidx, create)
225: struct uvm_aobj *aobj;
226: int pageidx;
227: boolean_t create;
228: {
229: struct uao_swhash *swhash;
230: struct uao_swhash_elt *elt;
1.28 kleink 231: voff_t page_tag;
1.1 mrg 232:
1.45 chs 233: swhash = UAO_SWHASH_HASH(aobj, pageidx);
234: page_tag = UAO_SWHASH_ELT_TAG(pageidx);
1.1 mrg 235:
1.5 mrg 236: /*
237: * now search the bucket for the requested tag
238: */
1.45 chs 239:
1.37 chs 240: LIST_FOREACH(elt, swhash, list) {
1.45 chs 241: if (elt->tag == page_tag) {
242: return elt;
243: }
1.5 mrg 244: }
1.45 chs 245: if (!create) {
1.5 mrg 246: return NULL;
1.45 chs 247: }
1.5 mrg 248:
249: /*
1.12 thorpej 250: * allocate a new entry for the bucket and init/insert it in
1.5 mrg 251: */
1.45 chs 252:
253: elt = pool_get(&uao_swhash_elt_pool, PR_NOWAIT);
254: if (elt == NULL) {
255: return NULL;
256: }
1.5 mrg 257: LIST_INSERT_HEAD(swhash, elt, list);
258: elt->tag = page_tag;
259: elt->count = 0;
1.9 perry 260: memset(elt->slots, 0, sizeof(elt->slots));
1.45 chs 261: return elt;
1.1 mrg 262: }
263:
264: /*
265: * uao_find_swslot: find the swap slot number for an aobj/pageidx
266: *
1.41 chs 267: * => object must be locked by caller
1.1 mrg 268: */
1.46 chs 269:
270: int
271: uao_find_swslot(uobj, pageidx)
272: struct uvm_object *uobj;
1.11 drochner 273: int pageidx;
1.1 mrg 274: {
1.46 chs 275: struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
276: struct uao_swhash_elt *elt;
1.1 mrg 277:
1.5 mrg 278: /*
279: * if noswap flag is set, then we never return a slot
280: */
1.1 mrg 281:
1.5 mrg 282: if (aobj->u_flags & UAO_FLAG_NOSWAP)
283: return(0);
1.1 mrg 284:
1.5 mrg 285: /*
286: * if hashing, look in hash table.
287: */
1.1 mrg 288:
1.5 mrg 289: if (UAO_USES_SWHASH(aobj)) {
1.46 chs 290: elt = uao_find_swhash_elt(aobj, pageidx, FALSE);
1.5 mrg 291: if (elt)
292: return(UAO_SWHASH_ELT_PAGESLOT(elt, pageidx));
293: else
1.31 thorpej 294: return(0);
1.5 mrg 295: }
1.1 mrg 296:
1.41 chs 297: /*
1.5 mrg 298: * otherwise, look in the array
299: */
1.46 chs 300:
1.5 mrg 301: return(aobj->u_swslots[pageidx]);
1.1 mrg 302: }
303:
304: /*
305: * uao_set_swslot: set the swap slot for a page in an aobj.
306: *
307: * => setting a slot to zero frees the slot
308: * => object must be locked by caller
1.45 chs 309: * => we return the old slot number, or -1 if we failed to allocate
310: * memory to record the new slot number
1.1 mrg 311: */
1.46 chs 312:
1.5 mrg 313: int
314: uao_set_swslot(uobj, pageidx, slot)
315: struct uvm_object *uobj;
316: int pageidx, slot;
317: {
318: struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
1.45 chs 319: struct uao_swhash_elt *elt;
1.5 mrg 320: int oldslot;
321: UVMHIST_FUNC("uao_set_swslot"); UVMHIST_CALLED(pdhist);
322: UVMHIST_LOG(pdhist, "aobj %p pageidx %d slot %d",
323: aobj, pageidx, slot, 0);
1.1 mrg 324:
1.5 mrg 325: /*
1.46 chs 326: * if noswap flag is set, then we can't set a non-zero slot.
1.5 mrg 327: */
1.1 mrg 328:
1.5 mrg 329: if (aobj->u_flags & UAO_FLAG_NOSWAP) {
330: if (slot == 0)
1.46 chs 331: return(0);
1.1 mrg 332:
1.5 mrg 333: printf("uao_set_swslot: uobj = %p\n", uobj);
1.46 chs 334: panic("uao_set_swslot: NOSWAP object");
1.5 mrg 335: }
1.1 mrg 336:
1.5 mrg 337: /*
338: * are we using a hash table? if so, add it in the hash.
339: */
1.1 mrg 340:
1.5 mrg 341: if (UAO_USES_SWHASH(aobj)) {
1.39 chs 342:
1.12 thorpej 343: /*
344: * Avoid allocating an entry just to free it again if
345: * the page had not swap slot in the first place, and
346: * we are freeing.
347: */
1.39 chs 348:
1.46 chs 349: elt = uao_find_swhash_elt(aobj, pageidx, slot != 0);
1.12 thorpej 350: if (elt == NULL) {
1.45 chs 351: return slot ? -1 : 0;
1.12 thorpej 352: }
1.5 mrg 353:
354: oldslot = UAO_SWHASH_ELT_PAGESLOT(elt, pageidx);
355: UAO_SWHASH_ELT_PAGESLOT(elt, pageidx) = slot;
356:
357: /*
358: * now adjust the elt's reference counter and free it if we've
359: * dropped it to zero.
360: */
361:
362: if (slot) {
363: if (oldslot == 0)
364: elt->count++;
1.45 chs 365: } else {
366: if (oldslot)
1.5 mrg 367: elt->count--;
368:
369: if (elt->count == 0) {
370: LIST_REMOVE(elt, list);
1.12 thorpej 371: pool_put(&uao_swhash_elt_pool, elt);
1.5 mrg 372: }
373: }
1.41 chs 374: } else {
1.5 mrg 375: /* we are using an array */
376: oldslot = aobj->u_swslots[pageidx];
377: aobj->u_swslots[pageidx] = slot;
378: }
379: return (oldslot);
1.1 mrg 380: }
381:
382: /*
383: * end of hash/array functions
384: */
385:
386: /*
387: * uao_free: free all resources held by an aobj, and then free the aobj
388: *
389: * => the aobj should be dead
390: */
1.46 chs 391:
1.1 mrg 392: static void
393: uao_free(aobj)
1.5 mrg 394: struct uvm_aobj *aobj;
1.1 mrg 395: {
1.46 chs 396: int swpgonlydelta = 0;
1.1 mrg 397:
1.27 chs 398: simple_unlock(&aobj->u_obj.vmobjlock);
1.5 mrg 399: if (UAO_USES_SWHASH(aobj)) {
400: int i, hashbuckets = aobj->u_swhashmask + 1;
1.1 mrg 401:
1.5 mrg 402: /*
403: * free the swslots from each hash bucket,
404: * then the hash bucket, and finally the hash table itself.
405: */
1.46 chs 406:
1.5 mrg 407: for (i = 0; i < hashbuckets; i++) {
408: struct uao_swhash_elt *elt, *next;
409:
1.27 chs 410: for (elt = LIST_FIRST(&aobj->u_swhash[i]);
411: elt != NULL;
412: elt = next) {
1.5 mrg 413: int j;
414:
1.27 chs 415: for (j = 0; j < UAO_SWHASH_CLUSTER_SIZE; j++) {
1.5 mrg 416: int slot = elt->slots[j];
417:
1.37 chs 418: if (slot == 0) {
419: continue;
420: }
421: uvm_swap_free(slot, 1);
1.46 chs 422: swpgonlydelta++;
1.5 mrg 423: }
424:
1.27 chs 425: next = LIST_NEXT(elt, list);
1.12 thorpej 426: pool_put(&uao_swhash_elt_pool, elt);
1.5 mrg 427: }
428: }
1.34 thorpej 429: free(aobj->u_swhash, M_UVMAOBJ);
1.5 mrg 430: } else {
431: int i;
432:
433: /*
434: * free the array
435: */
436:
1.27 chs 437: for (i = 0; i < aobj->u_pages; i++) {
1.5 mrg 438: int slot = aobj->u_swslots[i];
439:
1.18 chs 440: if (slot) {
1.5 mrg 441: uvm_swap_free(slot, 1);
1.46 chs 442: swpgonlydelta++;
1.18 chs 443: }
1.5 mrg 444: }
1.34 thorpej 445: free(aobj->u_swslots, M_UVMAOBJ);
1.1 mrg 446: }
447:
1.5 mrg 448: /*
449: * finally free the aobj itself
450: */
1.46 chs 451:
1.12 thorpej 452: pool_put(&uvm_aobj_pool, aobj);
1.46 chs 453:
454: /*
455: * adjust the counter of pages only in swap for all
456: * the swap slots we've freed.
457: */
458:
1.48 chs 459: if (swpgonlydelta > 0) {
1.50.2.1! thorpej 460: mutex_enter(&uvm.swap_data_mutex);
1.48 chs 461: KASSERT(uvmexp.swpgonly >= swpgonlydelta);
462: uvmexp.swpgonly -= swpgonlydelta;
1.50.2.1! thorpej 463: mutex_exit(&uvm.swap_data_mutex);
1.48 chs 464: }
1.1 mrg 465: }
466:
467: /*
468: * pager functions
469: */
470:
471: /*
472: * uao_create: create an aobj of the given size and return its uvm_object.
473: *
474: * => for normal use, flags are always zero
475: * => for the kernel object, the flags are:
476: * UAO_FLAG_KERNOBJ - allocate the kernel object (can only happen once)
477: * UAO_FLAG_KERNSWAP - enable swapping of kernel object (" ")
478: */
1.46 chs 479:
1.5 mrg 480: struct uvm_object *
481: uao_create(size, flags)
1.10 eeh 482: vsize_t size;
1.5 mrg 483: int flags;
484: {
1.46 chs 485: static struct uvm_aobj kernel_object_store;
486: static int kobj_alloced = 0;
1.15 chs 487: int pages = round_page(size) >> PAGE_SHIFT;
1.5 mrg 488: struct uvm_aobj *aobj;
1.1 mrg 489:
1.5 mrg 490: /*
1.27 chs 491: * malloc a new aobj unless we are asked for the kernel object
492: */
1.5 mrg 493:
1.46 chs 494: if (flags & UAO_FLAG_KERNOBJ) {
495: KASSERT(!kobj_alloced);
1.5 mrg 496: aobj = &kernel_object_store;
497: aobj->u_pages = pages;
1.46 chs 498: aobj->u_flags = UAO_FLAG_NOSWAP;
1.5 mrg 499: aobj->u_obj.uo_refs = UVM_OBJ_KERN;
500: kobj_alloced = UAO_FLAG_KERNOBJ;
501: } else if (flags & UAO_FLAG_KERNSWAP) {
1.46 chs 502: KASSERT(kobj_alloced == UAO_FLAG_KERNOBJ);
1.5 mrg 503: aobj = &kernel_object_store;
504: kobj_alloced = UAO_FLAG_KERNSWAP;
1.46 chs 505: } else {
1.12 thorpej 506: aobj = pool_get(&uvm_aobj_pool, PR_WAITOK);
1.5 mrg 507: aobj->u_pages = pages;
1.46 chs 508: aobj->u_flags = 0;
509: aobj->u_obj.uo_refs = 1;
1.5 mrg 510: }
1.1 mrg 511:
1.5 mrg 512: /*
513: * allocate hash/array if necessary
514: *
515: * note: in the KERNSWAP case no need to worry about locking since
516: * we are still booting we should be the only thread around.
517: */
1.46 chs 518:
1.5 mrg 519: if (flags == 0 || (flags & UAO_FLAG_KERNSWAP) != 0) {
520: int mflags = (flags & UAO_FLAG_KERNSWAP) != 0 ?
521: M_NOWAIT : M_WAITOK;
522:
523: /* allocate hash table or array depending on object size */
1.27 chs 524: if (UAO_USES_SWHASH(aobj)) {
1.5 mrg 525: aobj->u_swhash = hashinit(UAO_SWHASH_BUCKETS(aobj),
1.35 ad 526: HASH_LIST, M_UVMAOBJ, mflags, &aobj->u_swhashmask);
1.5 mrg 527: if (aobj->u_swhash == NULL)
528: panic("uao_create: hashinit swhash failed");
529: } else {
1.34 thorpej 530: aobj->u_swslots = malloc(pages * sizeof(int),
1.5 mrg 531: M_UVMAOBJ, mflags);
532: if (aobj->u_swslots == NULL)
533: panic("uao_create: malloc swslots failed");
1.9 perry 534: memset(aobj->u_swslots, 0, pages * sizeof(int));
1.5 mrg 535: }
536:
537: if (flags) {
538: aobj->u_flags &= ~UAO_FLAG_NOSWAP; /* clear noswap */
539: return(&aobj->u_obj);
540: }
541: }
542:
543: /*
544: * init aobj fields
545: */
1.46 chs 546:
1.5 mrg 547: simple_lock_init(&aobj->u_obj.vmobjlock);
548: aobj->u_obj.pgops = &aobj_pager;
549: TAILQ_INIT(&aobj->u_obj.memq);
550: aobj->u_obj.uo_npages = 0;
1.1 mrg 551:
1.5 mrg 552: /*
553: * now that aobj is ready, add it to the global list
554: */
1.46 chs 555:
1.5 mrg 556: simple_lock(&uao_list_lock);
557: LIST_INSERT_HEAD(&uao_list, aobj, u_list);
558: simple_unlock(&uao_list_lock);
559: return(&aobj->u_obj);
1.1 mrg 560: }
561:
562:
563:
564: /*
565: * uao_init: set up aobj pager subsystem
566: *
567: * => called at boot time from uvm_pager_init()
568: */
1.46 chs 569:
1.27 chs 570: void
1.46 chs 571: uao_init(void)
1.5 mrg 572: {
1.12 thorpej 573: static int uao_initialized;
574:
575: if (uao_initialized)
576: return;
577: uao_initialized = TRUE;
1.5 mrg 578: LIST_INIT(&uao_list);
579: simple_lock_init(&uao_list_lock);
1.12 thorpej 580:
1.14 thorpej 581: /*
582: * NOTE: Pages fror this pool must not come from a pageable
583: * kernel map!
584: */
1.46 chs 585:
1.12 thorpej 586: pool_init(&uao_swhash_elt_pool, sizeof(struct uao_swhash_elt),
1.50 thorpej 587: 0, 0, 0, "uaoeltpl", NULL);
1.12 thorpej 588: pool_init(&uvm_aobj_pool, sizeof(struct uvm_aobj), 0, 0, 0,
1.50 thorpej 589: "aobjpl", &pool_allocator_nointr);
1.1 mrg 590: }
591:
592: /*
593: * uao_reference: add a ref to an aobj
594: *
1.27 chs 595: * => aobj must be unlocked
596: * => just lock it and call the locked version
1.1 mrg 597: */
1.46 chs 598:
1.5 mrg 599: void
600: uao_reference(uobj)
601: struct uvm_object *uobj;
1.1 mrg 602: {
1.27 chs 603: simple_lock(&uobj->vmobjlock);
604: uao_reference_locked(uobj);
605: simple_unlock(&uobj->vmobjlock);
606: }
607:
608: /*
609: * uao_reference_locked: add a ref to an aobj that is already locked
610: *
611: * => aobj must be locked
612: * this needs to be separate from the normal routine
613: * since sometimes we need to add a reference to an aobj when
614: * it's already locked.
615: */
1.46 chs 616:
1.27 chs 617: void
618: uao_reference_locked(uobj)
619: struct uvm_object *uobj;
620: {
1.5 mrg 621: UVMHIST_FUNC("uao_reference"); UVMHIST_CALLED(maphist);
1.1 mrg 622:
1.5 mrg 623: /*
624: * kernel_object already has plenty of references, leave it alone.
625: */
1.1 mrg 626:
1.20 thorpej 627: if (UVM_OBJ_IS_KERN_OBJECT(uobj))
1.5 mrg 628: return;
1.1 mrg 629:
1.46 chs 630: uobj->uo_refs++;
1.41 chs 631: UVMHIST_LOG(maphist, "<- done (uobj=0x%x, ref = %d)",
1.27 chs 632: uobj, uobj->uo_refs,0,0);
1.1 mrg 633: }
634:
635: /*
636: * uao_detach: drop a reference to an aobj
637: *
1.27 chs 638: * => aobj must be unlocked
639: * => just lock it and call the locked version
1.1 mrg 640: */
1.46 chs 641:
1.5 mrg 642: void
643: uao_detach(uobj)
644: struct uvm_object *uobj;
645: {
1.27 chs 646: simple_lock(&uobj->vmobjlock);
647: uao_detach_locked(uobj);
648: }
649:
650: /*
651: * uao_detach_locked: drop a reference to an aobj
652: *
653: * => aobj must be locked, and is unlocked (or freed) upon return.
654: * this needs to be separate from the normal routine
655: * since sometimes we need to detach from an aobj when
656: * it's already locked.
657: */
1.46 chs 658:
1.27 chs 659: void
660: uao_detach_locked(uobj)
661: struct uvm_object *uobj;
662: {
1.5 mrg 663: struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
1.46 chs 664: struct vm_page *pg;
1.5 mrg 665: UVMHIST_FUNC("uao_detach"); UVMHIST_CALLED(maphist);
1.1 mrg 666:
1.5 mrg 667: /*
668: * detaching from kernel_object is a noop.
669: */
1.46 chs 670:
1.27 chs 671: if (UVM_OBJ_IS_KERN_OBJECT(uobj)) {
672: simple_unlock(&uobj->vmobjlock);
1.5 mrg 673: return;
1.27 chs 674: }
1.5 mrg 675:
676: UVMHIST_LOG(maphist," (uobj=0x%x) ref=%d", uobj,uobj->uo_refs,0,0);
1.46 chs 677: uobj->uo_refs--;
678: if (uobj->uo_refs) {
1.5 mrg 679: simple_unlock(&uobj->vmobjlock);
680: UVMHIST_LOG(maphist, "<- done (rc>0)", 0,0,0,0);
681: return;
682: }
683:
684: /*
685: * remove the aobj from the global list.
686: */
1.46 chs 687:
1.5 mrg 688: simple_lock(&uao_list_lock);
689: LIST_REMOVE(aobj, u_list);
690: simple_unlock(&uao_list_lock);
691:
692: /*
1.46 chs 693: * free all the pages left in the aobj. for each page,
694: * when the page is no longer busy (and thus after any disk i/o that
695: * it's involved in is complete), release any swap resources and
696: * free the page itself.
1.5 mrg 697: */
1.46 chs 698:
699: uvm_lock_pageq();
700: while ((pg = TAILQ_FIRST(&uobj->memq)) != NULL) {
701: pmap_page_protect(pg, VM_PROT_NONE);
1.5 mrg 702: if (pg->flags & PG_BUSY) {
1.46 chs 703: pg->flags |= PG_WANTED;
704: uvm_unlock_pageq();
705: UVM_UNLOCK_AND_WAIT(pg, &uobj->vmobjlock, FALSE,
706: "uao_det", 0);
707: simple_lock(&uobj->vmobjlock);
708: uvm_lock_pageq();
1.5 mrg 709: continue;
710: }
1.18 chs 711: uao_dropswap(&aobj->u_obj, pg->offset >> PAGE_SHIFT);
1.5 mrg 712: uvm_pagefree(pg);
713: }
1.46 chs 714: uvm_unlock_pageq();
1.1 mrg 715:
1.5 mrg 716: /*
1.46 chs 717: * finally, free the aobj itself.
1.5 mrg 718: */
1.1 mrg 719:
1.5 mrg 720: uao_free(aobj);
721: }
1.1 mrg 722:
723: /*
1.46 chs 724: * uao_put: flush pages out of a uvm object
1.22 thorpej 725: *
726: * => object should be locked by caller. we may _unlock_ the object
727: * if (and only if) we need to clean a page (PGO_CLEANIT).
728: * XXXJRT Currently, however, we don't. In the case of cleaning
729: * XXXJRT a page, we simply just deactivate it. Should probably
730: * XXXJRT handle this better, in the future (although "flushing"
731: * XXXJRT anonymous memory isn't terribly important).
732: * => if PGO_CLEANIT is not set, then we will neither unlock the object
733: * or block.
734: * => if PGO_ALLPAGE is set, then all pages in the object are valid targets
735: * for flushing.
736: * => NOTE: we rely on the fact that the object's memq is a TAILQ and
737: * that new pages are inserted on the tail end of the list. thus,
738: * we can make a complete pass through the object in one go by starting
739: * at the head and working towards the tail (new pages are put in
740: * front of us).
741: * => NOTE: we are allowed to lock the page queues, so the caller
742: * must not be holding the lock on them [e.g. pagedaemon had
743: * better not call us with the queues locked]
744: * => we return TRUE unless we encountered some sort of I/O error
745: * XXXJRT currently never happens, as we never directly initiate
746: * XXXJRT I/O
747: *
748: * note on page traversal:
749: * we can traverse the pages in an object either by going down the
750: * linked list in "uobj->memq", or we can go over the address range
751: * by page doing hash table lookups for each address. depending
752: * on how many pages are in the object it may be cheaper to do one
753: * or the other. we set "by_list" to true if we are using memq.
754: * if the cost of a hash lookup was equal to the cost of the list
755: * traversal we could compare the number of pages in the start->stop
756: * range to the total number of pages in the object. however, it
757: * seems that a hash table lookup is more expensive than the linked
758: * list traversal, so we multiply the number of pages in the
759: * start->stop range by a penalty which we define below.
1.1 mrg 760: */
1.22 thorpej 761:
1.46 chs 762: int
763: uao_put(uobj, start, stop, flags)
1.5 mrg 764: struct uvm_object *uobj;
1.28 kleink 765: voff_t start, stop;
1.5 mrg 766: int flags;
767: {
1.46 chs 768: struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
769: struct vm_page *pg, *nextpg;
770: boolean_t by_list;
1.28 kleink 771: voff_t curoff;
1.46 chs 772: UVMHIST_FUNC("uao_put"); UVMHIST_CALLED(maphist);
1.22 thorpej 773:
1.46 chs 774: curoff = 0;
1.22 thorpej 775: if (flags & PGO_ALLPAGES) {
776: start = 0;
777: stop = aobj->u_pages << PAGE_SHIFT;
778: by_list = TRUE; /* always go by the list */
779: } else {
780: start = trunc_page(start);
781: stop = round_page(stop);
782: if (stop > (aobj->u_pages << PAGE_SHIFT)) {
783: printf("uao_flush: strange, got an out of range "
784: "flush (fixed)\n");
785: stop = aobj->u_pages << PAGE_SHIFT;
786: }
787: by_list = (uobj->uo_npages <=
1.46 chs 788: ((stop - start) >> PAGE_SHIFT) * UVM_PAGE_HASH_PENALTY);
1.22 thorpej 789: }
790: UVMHIST_LOG(maphist,
791: " flush start=0x%lx, stop=0x%x, by_list=%d, flags=0x%x",
792: start, stop, by_list, flags);
1.1 mrg 793:
1.5 mrg 794: /*
1.22 thorpej 795: * Don't need to do any work here if we're not freeing
796: * or deactivating pages.
797: */
1.46 chs 798:
1.22 thorpej 799: if ((flags & (PGO_DEACTIVATE|PGO_FREE)) == 0) {
1.46 chs 800: simple_unlock(&uobj->vmobjlock);
801: return 0;
1.22 thorpej 802: }
803:
1.5 mrg 804: /*
1.46 chs 805: * now do it. note: we must update nextpg in the body of loop or we
806: * will get stuck. we need to use nextpg because we may free "pg"
1.22 thorpej 807: * before doing the next loop.
1.21 thorpej 808: */
1.22 thorpej 809:
810: if (by_list) {
1.46 chs 811: pg = TAILQ_FIRST(&uobj->memq);
1.22 thorpej 812: } else {
813: curoff = start;
1.46 chs 814: pg = uvm_pagelookup(uobj, curoff);
1.22 thorpej 815: }
816:
1.46 chs 817: nextpg = NULL;
818: uvm_lock_pageq();
1.22 thorpej 819:
820: /* locked: both page queues and uobj */
1.46 chs 821: for ( ; (by_list && pg != NULL) ||
822: (!by_list && curoff < stop) ; pg = nextpg) {
1.22 thorpej 823: if (by_list) {
1.46 chs 824: nextpg = TAILQ_NEXT(pg, listq);
825: if (pg->offset < start || pg->offset >= stop)
1.22 thorpej 826: continue;
827: } else {
828: curoff += PAGE_SIZE;
829: if (curoff < stop)
1.46 chs 830: nextpg = uvm_pagelookup(uobj, curoff);
831: if (pg == NULL)
1.22 thorpej 832: continue;
833: }
1.46 chs 834: switch (flags & (PGO_CLEANIT|PGO_FREE|PGO_DEACTIVATE)) {
1.41 chs 835:
1.22 thorpej 836: /*
837: * XXX In these first 3 cases, we always just
838: * XXX deactivate the page. We may want to
839: * XXX handle the different cases more specifically
840: * XXX in the future.
841: */
1.46 chs 842:
1.22 thorpej 843: case PGO_CLEANIT|PGO_FREE:
844: case PGO_CLEANIT|PGO_DEACTIVATE:
845: case PGO_DEACTIVATE:
1.25 thorpej 846: deactivate_it:
1.22 thorpej 847: /* skip the page if it's loaned or wired */
1.46 chs 848: if (pg->loan_count != 0 || pg->wire_count != 0)
1.22 thorpej 849: continue;
850:
851: /* ...and deactivate the page. */
1.46 chs 852: pmap_clear_reference(pg);
853: uvm_pagedeactivate(pg);
1.22 thorpej 854: continue;
855:
856: case PGO_FREE:
1.46 chs 857:
1.25 thorpej 858: /*
859: * If there are multiple references to
860: * the object, just deactivate the page.
861: */
1.46 chs 862:
1.25 thorpej 863: if (uobj->uo_refs > 1)
864: goto deactivate_it;
865:
1.22 thorpej 866: /* XXX skip the page if it's loaned or wired */
1.46 chs 867: if (pg->loan_count != 0 || pg->wire_count != 0)
1.22 thorpej 868: continue;
869:
870: /*
1.46 chs 871: * wait if the page is busy, then free the swap slot
872: * and the page.
1.22 thorpej 873: */
1.46 chs 874:
875: pmap_page_protect(pg, VM_PROT_NONE);
876: while (pg->flags & PG_BUSY) {
877: pg->flags |= PG_WANTED;
878: uvm_unlock_pageq();
879: UVM_UNLOCK_AND_WAIT(pg, &uobj->vmobjlock, 0,
880: "uao_put", 0);
881: simple_lock(&uobj->vmobjlock);
882: uvm_lock_pageq();
1.22 thorpej 883: }
1.46 chs 884: uao_dropswap(uobj, pg->offset >> PAGE_SHIFT);
885: uvm_pagefree(pg);
1.22 thorpej 886: continue;
887: }
888: }
889: uvm_unlock_pageq();
1.46 chs 890: simple_unlock(&uobj->vmobjlock);
891: return 0;
1.1 mrg 892: }
893:
894: /*
895: * uao_get: fetch me a page
896: *
897: * we have three cases:
898: * 1: page is resident -> just return the page.
899: * 2: page is zero-fill -> allocate a new page and zero it.
900: * 3: page is swapped out -> fetch the page from swap.
901: *
902: * cases 1 and 2 can be handled with PGO_LOCKED, case 3 cannot.
903: * so, if the "center" page hits case 3 (or any page, with PGO_ALLPAGES),
1.40 chs 904: * then we will need to return EBUSY.
1.1 mrg 905: *
906: * => prefer map unlocked (not required)
907: * => object must be locked! we will _unlock_ it before starting any I/O.
908: * => flags: PGO_ALLPAGES: get all of the pages
909: * PGO_LOCKED: fault data structures are locked
910: * => NOTE: offset is the offset of pps[0], _NOT_ pps[centeridx]
911: * => NOTE: caller must check for released pages!!
912: */
1.46 chs 913:
1.5 mrg 914: static int
915: uao_get(uobj, offset, pps, npagesp, centeridx, access_type, advice, flags)
916: struct uvm_object *uobj;
1.28 kleink 917: voff_t offset;
1.5 mrg 918: struct vm_page **pps;
919: int *npagesp;
920: int centeridx, advice, flags;
921: vm_prot_t access_type;
922: {
923: struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
1.28 kleink 924: voff_t current_offset;
1.43 chs 925: struct vm_page *ptmp;
1.46 chs 926: int lcv, gotpages, maxpages, swslot, error, pageidx;
1.5 mrg 927: boolean_t done;
928: UVMHIST_FUNC("uao_get"); UVMHIST_CALLED(pdhist);
929:
1.27 chs 930: UVMHIST_LOG(pdhist, "aobj=%p offset=%d, flags=%d",
931: aobj, offset, flags,0);
1.37 chs 932:
1.5 mrg 933: /*
934: * get number of pages
935: */
1.46 chs 936:
1.5 mrg 937: maxpages = *npagesp;
938:
939: /*
940: * step 1: handled the case where fault data structures are locked.
941: */
1.1 mrg 942:
1.5 mrg 943: if (flags & PGO_LOCKED) {
1.46 chs 944:
1.5 mrg 945: /*
946: * step 1a: get pages that are already resident. only do
947: * this if the data structures are locked (i.e. the first
948: * time through).
949: */
950:
951: done = TRUE; /* be optimistic */
952: gotpages = 0; /* # of pages we got so far */
953: for (lcv = 0, current_offset = offset ; lcv < maxpages ;
954: lcv++, current_offset += PAGE_SIZE) {
955: /* do we care about this page? if not, skip it */
956: if (pps[lcv] == PGO_DONTCARE)
957: continue;
958: ptmp = uvm_pagelookup(uobj, current_offset);
959:
960: /*
1.30 thorpej 961: * if page is new, attempt to allocate the page,
962: * zero-fill'd.
1.5 mrg 963: */
1.46 chs 964:
965: if (ptmp == NULL && uao_find_swslot(&aobj->u_obj,
1.15 chs 966: current_offset >> PAGE_SHIFT) == 0) {
1.5 mrg 967: ptmp = uvm_pagealloc(uobj, current_offset,
1.30 thorpej 968: NULL, UVM_PGA_ZERO);
1.5 mrg 969: if (ptmp) {
970: /* new page */
1.47 chs 971: ptmp->flags &= ~(PG_FAKE);
1.5 mrg 972: ptmp->pqflags |= PQ_AOBJ;
1.47 chs 973: goto gotpage;
1.5 mrg 974: }
975: }
976:
977: /*
1.46 chs 978: * to be useful must get a non-busy page
1.5 mrg 979: */
1.46 chs 980:
981: if (ptmp == NULL || (ptmp->flags & PG_BUSY) != 0) {
1.5 mrg 982: if (lcv == centeridx ||
983: (flags & PGO_ALLPAGES) != 0)
984: /* need to do a wait or I/O! */
1.41 chs 985: done = FALSE;
1.5 mrg 986: continue;
987: }
988:
989: /*
990: * useful page: busy/lock it and plug it in our
991: * result array
992: */
1.46 chs 993:
1.5 mrg 994: /* caller must un-busy this page */
1.41 chs 995: ptmp->flags |= PG_BUSY;
1.5 mrg 996: UVM_PAGE_OWN(ptmp, "uao_get1");
1.47 chs 997: gotpage:
1.5 mrg 998: pps[lcv] = ptmp;
999: gotpages++;
1.46 chs 1000: }
1.5 mrg 1001:
1002: /*
1003: * step 1b: now we've either done everything needed or we
1004: * to unlock and do some waiting or I/O.
1005: */
1006:
1007: UVMHIST_LOG(pdhist, "<- done (done=%d)", done, 0,0,0);
1008: *npagesp = gotpages;
1009: if (done)
1.46 chs 1010: return 0;
1.5 mrg 1011: else
1.46 chs 1012: return EBUSY;
1.1 mrg 1013: }
1014:
1.5 mrg 1015: /*
1016: * step 2: get non-resident or busy pages.
1017: * object is locked. data structures are unlocked.
1018: */
1019:
1020: for (lcv = 0, current_offset = offset ; lcv < maxpages ;
1021: lcv++, current_offset += PAGE_SIZE) {
1.27 chs 1022:
1.5 mrg 1023: /*
1024: * - skip over pages we've already gotten or don't want
1025: * - skip over pages we don't _have_ to get
1026: */
1.27 chs 1027:
1.5 mrg 1028: if (pps[lcv] != NULL ||
1029: (lcv != centeridx && (flags & PGO_ALLPAGES) == 0))
1030: continue;
1031:
1.27 chs 1032: pageidx = current_offset >> PAGE_SHIFT;
1033:
1.5 mrg 1034: /*
1035: * we have yet to locate the current page (pps[lcv]). we
1036: * first look for a page that is already at the current offset.
1037: * if we find a page, we check to see if it is busy or
1038: * released. if that is the case, then we sleep on the page
1039: * until it is no longer busy or released and repeat the lookup.
1040: * if the page we found is neither busy nor released, then we
1041: * busy it (so we own it) and plug it into pps[lcv]. this
1042: * 'break's the following while loop and indicates we are
1043: * ready to move on to the next page in the "lcv" loop above.
1044: *
1045: * if we exit the while loop with pps[lcv] still set to NULL,
1046: * then it means that we allocated a new busy/fake/clean page
1047: * ptmp in the object and we need to do I/O to fill in the data.
1048: */
1049:
1050: /* top of "pps" while loop */
1051: while (pps[lcv] == NULL) {
1052: /* look for a resident page */
1053: ptmp = uvm_pagelookup(uobj, current_offset);
1054:
1055: /* not resident? allocate one now (if we can) */
1056: if (ptmp == NULL) {
1057:
1058: ptmp = uvm_pagealloc(uobj, current_offset,
1.19 chs 1059: NULL, 0);
1.5 mrg 1060:
1061: /* out of RAM? */
1062: if (ptmp == NULL) {
1063: simple_unlock(&uobj->vmobjlock);
1064: UVMHIST_LOG(pdhist,
1065: "sleeping, ptmp == NULL\n",0,0,0,0);
1066: uvm_wait("uao_getpage");
1067: simple_lock(&uobj->vmobjlock);
1.41 chs 1068: continue;
1.5 mrg 1069: }
1070:
1071: /*
1072: * safe with PQ's unlocked: because we just
1073: * alloc'd the page
1074: */
1.46 chs 1075:
1.5 mrg 1076: ptmp->pqflags |= PQ_AOBJ;
1077:
1.41 chs 1078: /*
1.5 mrg 1079: * got new page ready for I/O. break pps while
1080: * loop. pps[lcv] is still NULL.
1081: */
1.46 chs 1082:
1.5 mrg 1083: break;
1084: }
1085:
1086: /* page is there, see if we need to wait on it */
1.46 chs 1087: if ((ptmp->flags & PG_BUSY) != 0) {
1.5 mrg 1088: ptmp->flags |= PG_WANTED;
1089: UVMHIST_LOG(pdhist,
1090: "sleeping, ptmp->flags 0x%x\n",
1091: ptmp->flags,0,0,0);
1.23 thorpej 1092: UVM_UNLOCK_AND_WAIT(ptmp, &uobj->vmobjlock,
1093: FALSE, "uao_get", 0);
1.5 mrg 1094: simple_lock(&uobj->vmobjlock);
1.46 chs 1095: continue;
1.5 mrg 1096: }
1.41 chs 1097:
1098: /*
1.5 mrg 1099: * if we get here then the page has become resident and
1100: * unbusy between steps 1 and 2. we busy it now (so we
1101: * own it) and set pps[lcv] (so that we exit the while
1102: * loop).
1103: */
1.46 chs 1104:
1.5 mrg 1105: /* we own it, caller must un-busy */
1106: ptmp->flags |= PG_BUSY;
1107: UVM_PAGE_OWN(ptmp, "uao_get2");
1108: pps[lcv] = ptmp;
1109: }
1110:
1111: /*
1112: * if we own the valid page at the correct offset, pps[lcv] will
1113: * point to it. nothing more to do except go to the next page.
1114: */
1.46 chs 1115:
1.5 mrg 1116: if (pps[lcv])
1117: continue; /* next lcv */
1118:
1119: /*
1.41 chs 1120: * we have a "fake/busy/clean" page that we just allocated.
1.5 mrg 1121: * do the needed "i/o", either reading from swap or zeroing.
1122: */
1.46 chs 1123:
1124: swslot = uao_find_swslot(&aobj->u_obj, pageidx);
1.5 mrg 1125:
1126: /*
1127: * just zero the page if there's nothing in swap.
1128: */
1.46 chs 1129:
1130: if (swslot == 0) {
1131:
1.5 mrg 1132: /*
1133: * page hasn't existed before, just zero it.
1134: */
1.46 chs 1135:
1.5 mrg 1136: uvm_pagezero(ptmp);
1.27 chs 1137: } else {
1.5 mrg 1138: UVMHIST_LOG(pdhist, "pagein from swslot %d",
1139: swslot, 0,0,0);
1140:
1141: /*
1142: * page in the swapped-out page.
1143: * unlock object for i/o, relock when done.
1144: */
1.46 chs 1145:
1.5 mrg 1146: simple_unlock(&uobj->vmobjlock);
1.46 chs 1147: error = uvm_swap_get(ptmp, swslot, PGO_SYNCIO);
1.5 mrg 1148: simple_lock(&uobj->vmobjlock);
1149:
1150: /*
1151: * I/O done. check for errors.
1152: */
1.46 chs 1153:
1154: if (error != 0) {
1.5 mrg 1155: UVMHIST_LOG(pdhist, "<- done (error=%d)",
1.46 chs 1156: error,0,0,0);
1.5 mrg 1157: if (ptmp->flags & PG_WANTED)
1.24 thorpej 1158: wakeup(ptmp);
1.27 chs 1159:
1160: /*
1161: * remove the swap slot from the aobj
1162: * and mark the aobj as having no real slot.
1163: * don't free the swap slot, thus preventing
1164: * it from being used again.
1165: */
1.46 chs 1166:
1.27 chs 1167: swslot = uao_set_swslot(&aobj->u_obj, pageidx,
1168: SWSLOT_BAD);
1.45 chs 1169: if (swslot != -1) {
1170: uvm_swap_markbad(swslot, 1);
1171: }
1.27 chs 1172:
1.5 mrg 1173: uvm_lock_pageq();
1174: uvm_pagefree(ptmp);
1175: uvm_unlock_pageq();
1176: simple_unlock(&uobj->vmobjlock);
1.46 chs 1177: return error;
1.5 mrg 1178: }
1179: }
1180:
1.41 chs 1181: /*
1.5 mrg 1182: * we got the page! clear the fake flag (indicates valid
1183: * data now in page) and plug into our result array. note
1.41 chs 1184: * that page is still busy.
1.5 mrg 1185: *
1186: * it is the callers job to:
1187: * => check if the page is released
1188: * => unbusy the page
1189: * => activate the page
1190: */
1191:
1.46 chs 1192: ptmp->flags &= ~PG_FAKE;
1.5 mrg 1193: pps[lcv] = ptmp;
1.46 chs 1194: }
1.1 mrg 1195:
1196: /*
1.5 mrg 1197: * finally, unlock object and return.
1198: */
1.1 mrg 1199:
1200: simple_unlock(&uobj->vmobjlock);
1.5 mrg 1201: UVMHIST_LOG(pdhist, "<- done (OK)",0,0,0,0);
1.46 chs 1202: return 0;
1.1 mrg 1203: }
1204:
1205: /*
1.18 chs 1206: * uao_dropswap: release any swap resources from this aobj page.
1.41 chs 1207: *
1.18 chs 1208: * => aobj must be locked or have a reference count of 0.
1209: */
1210:
1211: void
1212: uao_dropswap(uobj, pageidx)
1213: struct uvm_object *uobj;
1214: int pageidx;
1215: {
1216: int slot;
1217:
1218: slot = uao_set_swslot(uobj, pageidx, 0);
1219: if (slot) {
1220: uvm_swap_free(slot, 1);
1221: }
1.27 chs 1222: }
1223:
1224: /*
1225: * page in every page in every aobj that is paged-out to a range of swslots.
1.41 chs 1226: *
1.27 chs 1227: * => nothing should be locked.
1228: * => returns TRUE if pagein was aborted due to lack of memory.
1229: */
1.46 chs 1230:
1.27 chs 1231: boolean_t
1232: uao_swap_off(startslot, endslot)
1233: int startslot, endslot;
1234: {
1235: struct uvm_aobj *aobj, *nextaobj;
1.46 chs 1236: boolean_t rv;
1.27 chs 1237:
1238: /*
1239: * walk the list of all aobjs.
1240: */
1241:
1242: restart:
1243: simple_lock(&uao_list_lock);
1244: for (aobj = LIST_FIRST(&uao_list);
1245: aobj != NULL;
1246: aobj = nextaobj) {
1247:
1248: /*
1.46 chs 1249: * try to get the object lock, start all over if we fail.
1.27 chs 1250: * most of the time we'll get the aobj lock,
1251: * so this should be a rare case.
1252: */
1.46 chs 1253:
1.27 chs 1254: if (!simple_lock_try(&aobj->u_obj.vmobjlock)) {
1255: simple_unlock(&uao_list_lock);
1256: goto restart;
1257: }
1258:
1259: /*
1260: * add a ref to the aobj so it doesn't disappear
1261: * while we're working.
1262: */
1.46 chs 1263:
1.27 chs 1264: uao_reference_locked(&aobj->u_obj);
1265:
1266: /*
1267: * now it's safe to unlock the uao list.
1268: */
1.46 chs 1269:
1.27 chs 1270: simple_unlock(&uao_list_lock);
1271:
1272: /*
1273: * page in any pages in the swslot range.
1274: * if there's an error, abort and return the error.
1275: */
1.46 chs 1276:
1.27 chs 1277: rv = uao_pagein(aobj, startslot, endslot);
1278: if (rv) {
1279: uao_detach_locked(&aobj->u_obj);
1280: return rv;
1281: }
1282:
1283: /*
1284: * we're done with this aobj.
1285: * relock the list and drop our ref on the aobj.
1286: */
1.46 chs 1287:
1.27 chs 1288: simple_lock(&uao_list_lock);
1289: nextaobj = LIST_NEXT(aobj, u_list);
1290: uao_detach_locked(&aobj->u_obj);
1291: }
1292:
1293: /*
1294: * done with traversal, unlock the list
1295: */
1296: simple_unlock(&uao_list_lock);
1297: return FALSE;
1298: }
1299:
1300:
1301: /*
1302: * page in any pages from aobj in the given range.
1303: *
1304: * => aobj must be locked and is returned locked.
1305: * => returns TRUE if pagein was aborted due to lack of memory.
1306: */
1307: static boolean_t
1308: uao_pagein(aobj, startslot, endslot)
1309: struct uvm_aobj *aobj;
1310: int startslot, endslot;
1311: {
1312: boolean_t rv;
1313:
1314: if (UAO_USES_SWHASH(aobj)) {
1315: struct uao_swhash_elt *elt;
1316: int bucket;
1317:
1318: restart:
1319: for (bucket = aobj->u_swhashmask; bucket >= 0; bucket--) {
1320: for (elt = LIST_FIRST(&aobj->u_swhash[bucket]);
1321: elt != NULL;
1322: elt = LIST_NEXT(elt, list)) {
1323: int i;
1324:
1325: for (i = 0; i < UAO_SWHASH_CLUSTER_SIZE; i++) {
1326: int slot = elt->slots[i];
1327:
1328: /*
1329: * if the slot isn't in range, skip it.
1330: */
1.46 chs 1331:
1.41 chs 1332: if (slot < startslot ||
1.27 chs 1333: slot >= endslot) {
1334: continue;
1335: }
1336:
1337: /*
1338: * process the page,
1339: * the start over on this object
1340: * since the swhash elt
1341: * may have been freed.
1342: */
1.46 chs 1343:
1.27 chs 1344: rv = uao_pagein_page(aobj,
1345: UAO_SWHASH_ELT_PAGEIDX_BASE(elt) + i);
1346: if (rv) {
1347: return rv;
1348: }
1349: goto restart;
1350: }
1351: }
1352: }
1353: } else {
1354: int i;
1355:
1356: for (i = 0; i < aobj->u_pages; i++) {
1357: int slot = aobj->u_swslots[i];
1358:
1359: /*
1360: * if the slot isn't in range, skip it
1361: */
1.46 chs 1362:
1.27 chs 1363: if (slot < startslot || slot >= endslot) {
1364: continue;
1365: }
1366:
1367: /*
1368: * process the page.
1369: */
1.46 chs 1370:
1.27 chs 1371: rv = uao_pagein_page(aobj, i);
1372: if (rv) {
1373: return rv;
1374: }
1375: }
1376: }
1377:
1378: return FALSE;
1379: }
1380:
1381: /*
1382: * page in a page from an aobj. used for swap_off.
1383: * returns TRUE if pagein was aborted due to lack of memory.
1384: *
1385: * => aobj must be locked and is returned locked.
1386: */
1.46 chs 1387:
1.27 chs 1388: static boolean_t
1389: uao_pagein_page(aobj, pageidx)
1390: struct uvm_aobj *aobj;
1391: int pageidx;
1392: {
1393: struct vm_page *pg;
1394: int rv, slot, npages;
1395:
1396: pg = NULL;
1397: npages = 1;
1398: /* locked: aobj */
1399: rv = uao_get(&aobj->u_obj, pageidx << PAGE_SHIFT,
1400: &pg, &npages, 0, VM_PROT_READ|VM_PROT_WRITE, 0, 0);
1401: /* unlocked: aobj */
1402:
1403: /*
1404: * relock and finish up.
1405: */
1.46 chs 1406:
1.27 chs 1407: simple_lock(&aobj->u_obj.vmobjlock);
1408: switch (rv) {
1.40 chs 1409: case 0:
1.27 chs 1410: break;
1411:
1.40 chs 1412: case EIO:
1413: case ERESTART:
1.46 chs 1414:
1.27 chs 1415: /*
1416: * nothing more to do on errors.
1.40 chs 1417: * ERESTART can only mean that the anon was freed,
1.27 chs 1418: * so again there's nothing to do.
1419: */
1.46 chs 1420:
1.27 chs 1421: return FALSE;
1422: }
1423:
1424: /*
1425: * ok, we've got the page now.
1426: * mark it as dirty, clear its swslot and un-busy it.
1427: */
1.46 chs 1428:
1.27 chs 1429: slot = uao_set_swslot(&aobj->u_obj, pageidx, 0);
1430: uvm_swap_free(slot, 1);
1431: pg->flags &= ~(PG_BUSY|PG_CLEAN|PG_FAKE);
1432: UVM_PAGE_OWN(pg, NULL);
1433:
1434: /*
1.46 chs 1435: * deactivate the page (to make sure it's on a page queue).
1.27 chs 1436: */
1.46 chs 1437:
1.27 chs 1438: uvm_lock_pageq();
1439: uvm_pagedeactivate(pg);
1440: uvm_unlock_pageq();
1441: return FALSE;
1.1 mrg 1442: }
CVSweb <webmaster@jp.NetBSD.org>