[BACK]Return to uvm_page.h CVS log [TXT][DIR] Up to [cvs.NetBSD.org] / src / sys / uvm

Annotation of src/sys/uvm/uvm_page.h, Revision 1.102

1.102   ! ad          1: /*     $NetBSD: uvm_page.h,v 1.101 2020/03/16 08:03:58 rin Exp $       */
1.1       mrg         2:
1.26      chs         3: /*
1.1       mrg         4:  * Copyright (c) 1997 Charles D. Cranor and Washington University.
1.26      chs         5:  * Copyright (c) 1991, 1993, The Regents of the University of California.
1.1       mrg         6:  *
                      7:  * All rights reserved.
                      8:  *
                      9:  * This code is derived from software contributed to Berkeley by
                     10:  * The Mach Operating System project at Carnegie-Mellon University.
                     11:  *
                     12:  * Redistribution and use in source and binary forms, with or without
                     13:  * modification, are permitted provided that the following conditions
                     14:  * are met:
                     15:  * 1. Redistributions of source code must retain the above copyright
                     16:  *    notice, this list of conditions and the following disclaimer.
                     17:  * 2. Redistributions in binary form must reproduce the above copyright
                     18:  *    notice, this list of conditions and the following disclaimer in the
                     19:  *    documentation and/or other materials provided with the distribution.
1.71      chuck      20:  * 3. Neither the name of the University nor the names of its contributors
1.1       mrg        21:  *    may be used to endorse or promote products derived from this software
                     22:  *    without specific prior written permission.
                     23:  *
                     24:  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
                     25:  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
                     26:  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
                     27:  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
                     28:  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
                     29:  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
                     30:  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
                     31:  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
                     32:  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
                     33:  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
                     34:  * SUCH DAMAGE.
                     35:  *
                     36:  *     @(#)vm_page.h   7.3 (Berkeley) 4/21/91
1.3       mrg        37:  * from: Id: uvm_page.h,v 1.1.2.6 1998/02/04 02:31:42 chuck Exp
1.1       mrg        38:  *
                     39:  *
                     40:  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
                     41:  * All rights reserved.
1.26      chs        42:  *
1.1       mrg        43:  * Permission to use, copy, modify and distribute this software and
                     44:  * its documentation is hereby granted, provided that both the copyright
                     45:  * notice and this permission notice appear in all copies of the
                     46:  * software, derivative works or modified versions, and any portions
                     47:  * thereof, and that both notices appear in supporting documentation.
1.26      chs        48:  *
                     49:  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
                     50:  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
1.1       mrg        51:  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
1.26      chs        52:  *
1.1       mrg        53:  * Carnegie Mellon requests users of this software to return to
                     54:  *
                     55:  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
                     56:  *  School of Computer Science
                     57:  *  Carnegie Mellon University
                     58:  *  Pittsburgh PA 15213-3890
                     59:  *
                     60:  * any improvements or extensions that they make and grant Carnegie the
                     61:  * rights to redistribute these changes.
                     62:  */
                     63:
1.4       perry      64: #ifndef _UVM_UVM_PAGE_H_
                     65: #define _UVM_UVM_PAGE_H_
                     66:
1.99      riastrad   67: #ifdef _KERNEL_OPT
                     68: #include "opt_uvm_page_trkown.h"
                     69: #endif
                     70:
1.101     rin        71: #include <sys/rwlock.h>
                     72:
1.74      rmind      73: #include <uvm/uvm_extern.h>
                     74: #include <uvm/uvm_pglist.h>
1.1       mrg        75:
1.16      mrg        76: /*
1.74      rmind      77:  * Management of resident (logical) pages.
1.16      mrg        78:  *
1.74      rmind      79:  * Each resident page has a vm_page structure, indexed by page number.
                     80:  * There are several lists in the structure:
1.16      mrg        81:  *
1.74      rmind      82:  * - A red-black tree rooted with the containing object is used to
                     83:  *   quickly perform object+offset lookups.
                     84:  * - A list of all pages for a given object, for a quick deactivation
                     85:  *   at a time of deallocation.
                     86:  * - An ordered list of pages due for pageout.
                     87:  *
                     88:  * In addition, the structure contains the object and offset to which
                     89:  * this page belongs (for pageout) and sundry status bits.
                     90:  *
                     91:  * Note that the page structure has no lock of its own.  The page is
                     92:  * generally protected by its owner's lock (UVM object or amap/anon).
                     93:  * It should be noted that UVM has to serialize pmap(9) operations on
                     94:  * the managed pages, e.g. for pmap_enter() calls.  Hence, the lock
                     95:  * order is as follows:
                     96:  *
                     97:  *     [vmpage-owner-lock] ->
                     98:  *             any pmap locks (e.g. PV hash lock)
                     99:  *
                    100:  * Since the kernel is always self-consistent, no serialization is
                    101:  * required for unmanaged mappings, e.g. for pmap_kenter_pa() calls.
                    102:  *
                    103:  * Field markings and the corresponding locks:
                    104:  *
1.77      riastrad  105:  * f:  free page queue lock, uvm_fpageqlock
                    106:  * o:  page owner (uvm_object::vmobjlock, vm_amap::am_lock, vm_anon::an_lock)
1.85      ad        107:  * i:  vm_page::interlock
                    108:  *        => flags set and cleared only with o&i held can
                    109:  *           safely be tested for with only o held.
                    110:  * o,i:        o|i for read, o&i for write (depends on context - if could be loaned)
                    111:  *       => see uvm_loan.c
1.77      riastrad  112:  * w:  wired page queue or uvm_pglistalloc:
1.85      ad        113:  *       => wired page queue: o&i to change, stable from wire to unwire
1.77      riastrad  114:  *             XXX What about concurrent or nested wire?
                    115:  *       => uvm_pglistalloc: owned by caller
1.74      rmind     116:  * ?:  locked by pmap or assumed page owner's lock
1.85      ad        117:  * p:  locked by pagedaemon policy module (pdpolicy)
                    118:  * c:  cpu private
                    119:  * s:  stable, does not change
1.16      mrg       120:  *
1.91      ad        121:  * UVM and pmap(9) may use uvm_page_owner_locked_p() to assert whether the
1.74      rmind     122:  * page owner's lock is acquired.
1.77      riastrad  123:  *
1.80      riastrad  124:  * A page can have one of four identities:
1.77      riastrad  125:  *
                    126:  * o free
                    127:  *   => pageq.list is entry on global free page queue
                    128:  *   => uanon is unused (or (void *)0xdeadbeef for DEBUG)
                    129:  *   => uobject is unused (or (void *)0xdeadbeef for DEBUG)
1.85      ad        130:  *   => PG_FREE is set in flags
1.77      riastrad  131:  * o owned by a uvm_object
                    132:  *   => pageq.queue is entry on wired page queue, if any
1.78      riastrad  133:  *   => uanon is NULL or the vm_anon to which it has been O->A loaned
1.77      riastrad  134:  *   => uobject is owner
                    135:  * o owned by a vm_anon
                    136:  *   => pageq is unused (XXX correct?)
                    137:  *   => uanon is owner
                    138:  *   => uobject is NULL
1.85      ad        139:  *   => PG_ANON is set in flags
1.77      riastrad  140:  * o allocated by uvm_pglistalloc
                    141:  *   => pageq.queue is entry on resulting pglist, owned by caller
                    142:  *   => uanon is unused
                    143:  *   => uobject is unused
                    144:  *
                    145:  * The following transitions are allowed:
                    146:  *
                    147:  * - uvm_pagealloc: free -> owned by a uvm_object/vm_anon
                    148:  * - uvm_pagefree: owned by a uvm_object/vm_anon -> free
                    149:  * - uvm_pglistalloc: free -> allocated by uvm_pglistalloc
                    150:  * - uvm_pglistfree: allocated by uvm_pglistalloc -> free
1.92      ad        151:  *
                    152:  * On the ordering of fields:
                    153:  *
                    154:  * The fields most heavily used by the page allocator and uvmpdpol are
                    155:  * clustered together at the start of the structure, so that while under
                    156:  * global lock it's more likely that only one cache line for each page need
                    157:  * be touched.
1.16      mrg       158:  */
                    159:
                    160: struct vm_page {
1.54      ad        161:        union {
1.77      riastrad  162:                TAILQ_ENTRY(vm_page) queue;     /* w: wired page queue
                    163:                                                 * or uvm_pglistalloc output */
                    164:                LIST_ENTRY(vm_page) list;       /* f: global free page queue */
                    165:        } pageq;
1.90      ad        166:        TAILQ_ENTRY(vm_page)    pdqueue;        /* p: pagedaemon queue */
1.92      ad        167:        kmutex_t                interlock;      /* s: lock on identity */
                    168:        uint32_t                pqflags;        /* i: pagedaemon flags */
1.96      ad        169:        uint32_t                flags;          /* o: object flags */
1.92      ad        170:        paddr_t                 phys_addr;      /* o: physical address of pg */
1.85      ad        171:        uint32_t                loan_count;     /* o,i: num. active loans */
                    172:        uint32_t                wire_count;     /* o,i: wired down map refs */
1.92      ad        173:        struct vm_anon          *uanon;         /* o,i: anon */
                    174:        struct uvm_object       *uobject;       /* o,i: object */
                    175:        voff_t                  offset;         /* o: offset into object */
1.21      thorpej   176:
1.22      thorpej   177: #ifdef __HAVE_VM_PAGE_MD
1.74      rmind     178:        struct vm_page_md       mdpage;         /* ?: pmap-specific data */
1.22      thorpej   179: #endif
1.21      thorpej   180:
1.16      mrg       181: #if defined(UVM_PAGE_TRKOWN)
1.18      chs       182:        /* debugging fields to track page ownership */
                    183:        pid_t                   owner;          /* proc that set PG_BUSY */
1.48      perseant  184:        lwpid_t                 lowner;         /* lwp that set PG_BUSY */
1.40      chs       185:        const char              *owner_tag;     /* why it was set busy */
1.16      mrg       186: #endif
                    187: };
                    188:
                    189: /*
1.97      ad        190:  * Overview of UVM page flags, stored in pg->flags.
1.75      rmind     191:  *
                    192:  * Locking notes:
                    193:  *
1.77      riastrad  194:  * PG_, struct vm_page::flags  => locked by owner
1.85      ad        195:  * PG_AOBJ                     => additionally locked by vm_page::interlock
                    196:  * PG_ANON                     => additionally locked by vm_page::interlock
                    197:  * PG_FREE                     => additionally locked by uvm_fpageqlock
                    198:  *                                for uvm_pglistalloc()
1.75      rmind     199:  *
                    200:  * Flag descriptions:
                    201:  *
1.96      ad        202:  * PG_CLEAN:
                    203:  *     Page is known clean.
                    204:  *     The contents of the page is consistent with its backing store.
                    205:  *
                    206:  * PG_DIRTY:
                    207:  *     Page is known dirty.
                    208:  *     To avoid losing data, the contents of the page should be written
                    209:  *     back to the backing store before freeing the page.
                    210:  *
1.75      rmind     211:  * PG_BUSY:
                    212:  *     Page is long-term locked, usually because of I/O (transfer from the
                    213:  *     page memory to the backing store) is in progress.  LWP attempting
1.100     ad        214:  *     to access the page shall set PQ_WANTED and wait.  PG_BUSY may only
                    215:  *     be set with a write lock held on the object.
1.75      rmind     216:  *
1.96      ad        217:  * PG_PAGEOUT:
                    218:  *     Indicates that the page is being paged-out in preparation for
                    219:  *     being freed.
                    220:  *
1.75      rmind     221:  * PG_RELEASED:
                    222:  *     Indicates that the page, which is currently PG_BUSY, should be freed
                    223:  *     after the release of long-term lock.  It is responsibility of the
                    224:  *     owning LWP (i.e. which set PG_BUSY) to do it.
                    225:  *
                    226:  * PG_FAKE:
                    227:  *     Page has been allocated, but not yet initialised.  The flag is used
                    228:  *     to avoid overwriting of valid data, e.g. to prevent read from the
                    229:  *     backing store when in-core data is newer.
                    230:  *
                    231:  * PG_RDONLY:
                    232:  *     Indicates that the page must be mapped read-only.
                    233:  *
                    234:  * PG_ZERO:
                    235:  *     Indicates that the page has been pre-zeroed.  This flag is only
                    236:  *     set when the page is not in the queues and is cleared when the
                    237:  *     page is placed on the free list.
                    238:  *
                    239:  * PG_MARKER:
1.96      ad        240:  *     Dummy marker page, generally used for list traversal.
                    241:  */
                    242:
                    243: /*
                    244:  * if you want to renumber PG_CLEAN and PG_DIRTY, check __CTASSERTs in
                    245:  * uvm_page_status.c first.
1.75      rmind     246:  */
                    247:
1.96      ad        248: #define        PG_CLEAN        0x00000001      /* page is known clean */
                    249: #define        PG_DIRTY        0x00000002      /* page is known dirty */
                    250: #define        PG_BUSY         0x00000004      /* page is locked */
                    251: #define        PG_PAGEOUT      0x00000010      /* page to be freed for pagedaemon */
                    252: #define        PG_RELEASED     0x00000020      /* page to be freed when unbusied */
                    253: #define        PG_FAKE         0x00000040      /* page is not yet initialized */
                    254: #define        PG_RDONLY       0x00000080      /* page must be mapped read-only */
                    255: #define        PG_ZERO         0x00000100      /* page is pre-zero'd */
                    256: #define        PG_TABLED       0x00000200      /* page is tabled in object */
                    257: #define        PG_AOBJ         0x00000400      /* page is part of an anonymous
1.85      ad        258:                                           uvm_object */
1.96      ad        259: #define        PG_ANON         0x00000800      /* page is part of an anon, rather
1.85      ad        260:                                           than an uvm_object */
1.96      ad        261: #define        PG_FILE         0x00001000      /* file backed (non-anonymous) */
                    262: #define        PG_READAHEAD    0x00002000      /* read-ahead but not "hit" yet */
                    263: #define        PG_FREE         0x00004000      /* page is on free list */
                    264: #define        PG_MARKER       0x00008000      /* dummy marker page */
                    265: #define        PG_PAGER1       0x00010000      /* pager-specific flag */
                    266:
                    267: #define        PG_STAT         (PG_ANON|PG_AOBJ|PG_FILE)
                    268: #define        PG_SWAPBACKED   (PG_ANON|PG_AOBJ)
1.16      mrg       269:
1.46      yamt      270: #define        UVM_PGFLAGBITS \
1.100     ad        271:        "\20\1CLEAN\2DIRTY\3BUSY" \
1.96      ad        272:        "\5PAGEOUT\6RELEASED\7FAKE\10RDONLY" \
                    273:        "\11ZERO\12TABLED\13AOBJ\14ANON" \
                    274:        "\15FILE\16READAHEAD\17FREE\20MARKER" \
                    275:        "\21PAGER1"
1.16      mrg       276:
                    277: /*
1.97      ad        278:  * Flags stored in pg->pqflags, which is protected by pg->interlock.
1.93      ad        279:  *
1.100     ad        280:  * PQ_PRIVATE:
                    281:  *     ... is for uvmpdpol to do whatever it wants with.
                    282:  *
                    283:  * PQ_INTENT_SET:
                    284:  *     Indicates that the intent set on the page has not yet been realized.
                    285:  *
                    286:  * PQ_INTENT_QUEUED:
                    287:  *     Indicates that the page is, or will soon be, on a per-CPU queue for
                    288:  *     the intent to be realized.
                    289:  *
                    290:  * PQ_WANTED:
                    291:  *     Indicates that the page, which is currently PG_BUSY, is wanted by
                    292:  *     some other LWP.  The page owner (i.e. LWP which set PG_BUSY) is
                    293:  *     responsible to clear both flags and wake up any waiters once it has
                    294:  *     released the long-term lock (PG_BUSY).
1.93      ad        295:  */
                    296:
                    297: #define        PQ_INTENT_A             0x00000000      /* intend activation */
                    298: #define        PQ_INTENT_I             0x00000001      /* intend deactivation */
                    299: #define        PQ_INTENT_E             0x00000002      /* intend enqueue */
                    300: #define        PQ_INTENT_D             0x00000003      /* intend dequeue */
                    301: #define        PQ_INTENT_MASK          0x00000003      /* mask of intended state */
                    302: #define        PQ_INTENT_SET           0x00000004      /* not realized yet */
                    303: #define        PQ_INTENT_QUEUED        0x00000008      /* queued for processing */
1.97      ad        304: #define        PQ_PRIVATE              0x00000ff0      /* private for pdpolicy */
1.100     ad        305: #define        PQ_WANTED               0x00001000      /* someone is waiting for page */
1.97      ad        306:
                    307: #define        UVM_PQFLAGBITS \
                    308:        "\20\1INTENT_0\2INTENT_1\3INTENT_SET\4INTENT_QUEUED" \
                    309:        "\5PRIVATE1\6PRIVATE2\7PRIVATE3\10PRIVATE4" \
1.100     ad        310:        "\11PRIVATE5\12PRIVATE6\13PRIVATE7\14PRIVATE8" \
                    311:        "\15WANTED"
1.93      ad        312:
                    313: /*
1.16      mrg       314:  * physical memory layout structure
                    315:  *
                    316:  * MD vmparam.h must #define:
                    317:  *   VM_PHYSEG_MAX = max number of physical memory segments we support
                    318:  *                (if this is "1" then we revert to a "contig" case)
                    319:  *   VM_PHYSSEG_STRAT: memory sort/search options (for VM_PHYSEG_MAX > 1)
                    320:  *     - VM_PSTRAT_RANDOM:   linear search (random order)
                    321:  *     - VM_PSTRAT_BSEARCH:  binary search (sorted by address)
                    322:  *     - VM_PSTRAT_BIGFIRST: linear search (sorted by largest segment first)
                    323:  *      - others?
1.17      mrg       324:  *   XXXCDC: eventually we should purge all left-over global variables...
1.16      mrg       325:  */
                    326: #define VM_PSTRAT_RANDOM       1
                    327: #define VM_PSTRAT_BSEARCH      2
                    328: #define VM_PSTRAT_BIGFIRST     3
                    329:
1.13      thorpej   330: #ifdef _KERNEL
                    331:
1.1       mrg       332: /*
1.15      thorpej   333:  * globals
                    334:  */
                    335:
1.47      thorpej   336: extern bool vm_page_zero_enable;
1.15      thorpej   337:
                    338: /*
1.8       chuck     339:  * prototypes: the following prototypes define the interface to pages
1.1       mrg       340:  */
                    341:
1.37      junyoung  342: void uvm_page_init(vaddr_t *, vaddr_t *);
1.1       mrg       343: #if defined(UVM_PAGE_TRKOWN)
1.40      chs       344: void uvm_page_own(struct vm_page *, const char *);
1.1       mrg       345: #endif
1.8       chuck     346: #if !defined(PMAP_STEAL_MEMORY)
1.47      thorpej   347: bool uvm_page_physget(paddr_t *);
1.8       chuck     348: #endif
1.37      junyoung  349: void uvm_page_recolor(int);
1.89      ad        350: void uvm_page_rebucket(void);
1.37      junyoung  351: void uvm_pageidlezero(void);
                    352:
1.43      yamt      353: void uvm_pageactivate(struct vm_page *);
1.37      junyoung  354: vaddr_t uvm_pageboot_alloc(vsize_t);
1.43      yamt      355: void uvm_pagecopy(struct vm_page *, struct vm_page *);
                    356: void uvm_pagedeactivate(struct vm_page *);
                    357: void uvm_pagedequeue(struct vm_page *);
1.46      yamt      358: void uvm_pageenqueue(struct vm_page *);
1.37      junyoung  359: void uvm_pagefree(struct vm_page *);
1.93      ad        360: void uvm_pagelock(struct vm_page *);
                    361: void uvm_pagelock2(struct vm_page *, struct vm_page *);
                    362: void uvm_pageunlock(struct vm_page *);
                    363: void uvm_pageunlock2(struct vm_page *, struct vm_page *);
1.37      junyoung  364: void uvm_page_unbusy(struct vm_page **, int);
1.43      yamt      365: struct vm_page *uvm_pagelookup(struct uvm_object *, voff_t);
                    366: void uvm_pageunwire(struct vm_page *);
                    367: void uvm_pagewire(struct vm_page *);
                    368: void uvm_pagezero(struct vm_page *);
1.57      thorpej   369: bool uvm_pageismanaged(paddr_t);
1.98      ad        370: bool uvm_page_owner_locked_p(struct vm_page *, bool);
1.89      ad        371: void uvm_pgfl_lock(void);
                    372: void uvm_pgfl_unlock(void);
1.96      ad        373: unsigned int uvm_pagegetdirty(struct vm_page *);
                    374: void uvm_pagemarkdirty(struct vm_page *, unsigned int);
                    375: bool uvm_pagecheckdirty(struct vm_page *, bool);
                    376: bool uvm_pagereadonly_p(struct vm_page *);
                    377: bool uvm_page_locked_p(struct vm_page *);
1.102   ! ad        378: void uvm_pagewakeup(struct vm_page *);
1.100     ad        379: void uvm_pagewait(struct vm_page *, krwlock_t *, const char *);
1.9       thorpej   380:
1.43      yamt      381: int uvm_page_lookup_freelist(struct vm_page *);
1.16      mrg       382:
1.65      uebayasi  383: struct vm_page *uvm_phys_to_vm_page(paddr_t);
                    384: paddr_t uvm_vm_page_to_phys(const struct vm_page *);
1.16      mrg       385:
1.83      jdolecek  386: #if defined(PMAP_DIRECT)
1.84      jdolecek  387: extern bool ubc_direct;
1.83      jdolecek  388: int uvm_direct_process(struct vm_page **, u_int, voff_t, vsize_t,
                    389:            int (*)(void *, size_t, void *), void *);
                    390: #endif
                    391:
1.16      mrg       392: /*
1.96      ad        393:  * page dirtiness status for uvm_pagegetdirty and uvm_pagemarkdirty
                    394:  *
                    395:  * UNKNOWN means that we need to consult pmap to know if the page is
                    396:  * dirty or not.
                    397:  * basically, UVM_PAGE_STATUS_CLEAN implies that the page has no writable
                    398:  * mapping.
                    399:  *
                    400:  * if you want to renumber these, check __CTASSERTs in
                    401:  * uvm_page_status.c first.
                    402:  */
                    403:
                    404: #define        UVM_PAGE_STATUS_UNKNOWN 0
                    405: #define        UVM_PAGE_STATUS_CLEAN   1
                    406: #define        UVM_PAGE_STATUS_DIRTY   2
                    407: #define        UVM_PAGE_NUM_STATUS     3
                    408:
                    409: /*
1.16      mrg       410:  * macros
                    411:  */
1.31      chs       412:
1.65      uebayasi  413: #define VM_PAGE_TO_PHYS(entry) uvm_vm_page_to_phys(entry)
1.20      thorpej   414:
1.69      uebayasi  415: #ifdef __HAVE_VM_PAGE_MD
                    416: #define        VM_PAGE_TO_MD(pg)       (&(pg)->mdpage)
                    417: #endif
                    418:
1.20      thorpej   419: /*
1.88      ad        420:  * Compute the page color for a given page.
1.20      thorpej   421:  */
1.88      ad        422: #define        VM_PGCOLOR(pg) \
1.24      thorpej   423:        (atop(VM_PAGE_TO_PHYS((pg))) & uvmexp.colormask)
1.65      uebayasi  424: #define        PHYS_TO_VM_PAGE(pa)     uvm_phys_to_vm_page(pa)
1.89      ad        425:
                    426: /*
                    427:  * VM_PAGE_IS_FREE() can't tell if the page is on global free list, or a
                    428:  * per-CPU cache.  If you need to be certain, pause caching.
                    429:  */
1.85      ad        430: #define VM_PAGE_IS_FREE(entry)  ((entry)->flags & PG_FREE)
1.35      yamt      431:
1.88      ad        432: /*
                    433:  * Use the lower 10 bits of pg->phys_addr to cache some some locators for
                    434:  * the page.  This implies that the smallest possible page size is 1kB, and
                    435:  * that nobody should use pg->phys_addr directly (use VM_PAGE_TO_PHYS()).
                    436:  *
                    437:  * - 5 bits for the freelist index, because uvm_page_lookup_freelist()
                    438:  *   traverses an rbtree and therefore features prominently in traces
                    439:  *   captured during performance test.  It would probably be more useful to
                    440:  *   cache physseg index here because freelist can be inferred from physseg,
                    441:  *   but it requires changes to allocation for UVM_HOTPLUG, so for now we'll
                    442:  *   go with freelist.
                    443:  *
                    444:  * - 5 bits for "bucket", a way for us to categorise pages further as
                    445:  *   needed (e.g. NUMA node).
                    446:  *
                    447:  * None of this is set in stone; it can be adjusted as needed.
                    448:  */
1.94      ad        449:
                    450: #define        UVM_PHYSADDR_FREELIST   __BITS(0,4)
                    451: #define        UVM_PHYSADDR_BUCKET     __BITS(5,9)
                    452:
1.88      ad        453: static inline unsigned
                    454: uvm_page_get_freelist(struct vm_page *pg)
                    455: {
1.94      ad        456:        unsigned fl = __SHIFTOUT(pg->phys_addr, UVM_PHYSADDR_FREELIST);
1.88      ad        457:        KASSERT(fl == (unsigned)uvm_page_lookup_freelist(pg));
                    458:        return fl;
                    459: }
                    460:
                    461: static inline unsigned
                    462: uvm_page_get_bucket(struct vm_page *pg)
                    463: {
1.94      ad        464:        return __SHIFTOUT(pg->phys_addr, UVM_PHYSADDR_BUCKET);
1.88      ad        465: }
                    466:
                    467: static inline void
                    468: uvm_page_set_freelist(struct vm_page *pg, unsigned fl)
                    469: {
                    470:        KASSERT(fl < 32);
1.94      ad        471:        pg->phys_addr &= ~UVM_PHYSADDR_FREELIST;
                    472:        pg->phys_addr |= __SHIFTIN(fl, UVM_PHYSADDR_FREELIST);
1.88      ad        473: }
                    474:
                    475: static inline void
                    476: uvm_page_set_bucket(struct vm_page *pg, unsigned b)
                    477: {
                    478:        KASSERT(b < 32);
1.94      ad        479:        pg->phys_addr &= ~UVM_PHYSADDR_BUCKET;
                    480:        pg->phys_addr |= __SHIFTIN(b, UVM_PHYSADDR_BUCKET);
1.88      ad        481: }
                    482:
1.35      yamt      483: #ifdef DEBUG
                    484: void uvm_pagezerocheck(struct vm_page *);
                    485: #endif /* DEBUG */
1.13      thorpej   486:
                    487: #endif /* _KERNEL */
1.1       mrg       488:
1.4       perry     489: #endif /* _UVM_UVM_PAGE_H_ */

CVSweb <webmaster@jp.NetBSD.org>