[BACK]Return to subr_psref.c CVS log [TXT][DIR] Up to [cvs.NetBSD.org] / src / sys / kern

Annotation of src/sys/kern/subr_psref.c, Revision 1.10

1.10    ! msaitoh     1: /*     $NetBSD: subr_psref.c,v 1.9 2017/12/14 05:45:55 ozaki-r Exp $   */
1.1       riastrad    2:
                      3: /*-
                      4:  * Copyright (c) 2016 The NetBSD Foundation, Inc.
                      5:  * All rights reserved.
                      6:  *
                      7:  * This code is derived from software contributed to The NetBSD Foundation
                      8:  * by Taylor R. Campbell.
                      9:  *
                     10:  * Redistribution and use in source and binary forms, with or without
                     11:  * modification, are permitted provided that the following conditions
                     12:  * are met:
                     13:  * 1. Redistributions of source code must retain the above copyright
                     14:  *    notice, this list of conditions and the following disclaimer.
                     15:  * 2. Redistributions in binary form must reproduce the above copyright
                     16:  *    notice, this list of conditions and the following disclaimer in the
                     17:  *    documentation and/or other materials provided with the distribution.
                     18:  *
                     19:  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
                     20:  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
                     21:  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
                     22:  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
                     23:  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
                     24:  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
                     25:  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
                     26:  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
                     27:  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
                     28:  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
                     29:  * POSSIBILITY OF SUCH DAMAGE.
                     30:  */
                     31:
                     32: /*
                     33:  * Passive references
                     34:  *
                     35:  *     Passive references are references to objects that guarantee the
                     36:  *     object will not be destroyed until the reference is released.
                     37:  *
                     38:  *     Passive references require no interprocessor synchronization to
                     39:  *     acquire or release.  However, destroying the target of passive
                     40:  *     references requires expensive interprocessor synchronization --
                     41:  *     xcalls to determine on which CPUs the object is still in use.
                     42:  *
                     43:  *     Passive references may be held only on a single CPU and by a
                     44:  *     single LWP.  They require the caller to allocate a little stack
                     45:  *     space, a struct psref object.  Sleeping while a passive
                     46:  *     reference is held is allowed, provided that the owner's LWP is
                     47:  *     bound to a CPU -- e.g., the owner is a softint or a bound
                     48:  *     kthread.  However, sleeping should be kept to a short duration,
                     49:  *     e.g. sleeping on an adaptive lock.
                     50:  *
                     51:  *     Passive references serve as an intermediate stage between
                     52:  *     reference counting and passive serialization (pserialize(9)):
                     53:  *
                     54:  *     - If you need references to transfer from CPU to CPU or LWP to
                     55:  *       LWP, or if you need long-term references, you must use
                     56:  *       reference counting, e.g. with atomic operations or locks,
                     57:  *       which incurs interprocessor synchronization for every use --
                     58:  *       cheaper than an xcall, but not scalable.
                     59:  *
                     60:  *     - If all users *guarantee* that they will not sleep, then it is
                     61:  *       not necessary to use passive references: you may as well just
                     62:  *       use the even cheaper pserialize(9), because you have
                     63:  *       satisfied the requirements of a pserialize read section.
                     64:  */
                     65:
                     66: #include <sys/cdefs.h>
1.10    ! msaitoh    67: __KERNEL_RCSID(0, "$NetBSD: subr_psref.c,v 1.9 2017/12/14 05:45:55 ozaki-r Exp $");
1.1       riastrad   68:
                     69: #include <sys/types.h>
                     70: #include <sys/condvar.h>
                     71: #include <sys/cpu.h>
                     72: #include <sys/intr.h>
                     73: #include <sys/kmem.h>
                     74: #include <sys/lwp.h>
                     75: #include <sys/mutex.h>
                     76: #include <sys/percpu.h>
                     77: #include <sys/psref.h>
                     78: #include <sys/queue.h>
                     79: #include <sys/xcall.h>
                     80:
1.8       knakahar   81: SLIST_HEAD(psref_head, psref);
1.1       riastrad   82:
1.4       riastrad   83: static bool    _psref_held(const struct psref_target *, struct psref_class *,
                     84:                    bool);
                     85:
1.1       riastrad   86: /*
                     87:  * struct psref_class
                     88:  *
                     89:  *     Private global state for a class of passive reference targets.
                     90:  *     Opaque to callers.
                     91:  */
                     92: struct psref_class {
                     93:        kmutex_t                prc_lock;
                     94:        kcondvar_t              prc_cv;
                     95:        struct percpu           *prc_percpu; /* struct psref_cpu */
                     96:        ipl_cookie_t            prc_iplcookie;
                     97: };
                     98:
                     99: /*
                    100:  * struct psref_cpu
                    101:  *
                    102:  *     Private per-CPU state for a class of passive reference targets.
                    103:  *     Not exposed by the API.
                    104:  */
                    105: struct psref_cpu {
                    106:        struct psref_head       pcpu_head;
                    107: };
                    108:
                    109: /*
                    110:  * psref_class_create(name, ipl)
                    111:  *
                    112:  *     Create a new passive reference class, with the given wchan name
                    113:  *     and ipl.
                    114:  */
                    115: struct psref_class *
                    116: psref_class_create(const char *name, int ipl)
                    117: {
                    118:        struct psref_class *class;
                    119:
                    120:        ASSERT_SLEEPABLE();
                    121:
                    122:        class = kmem_alloc(sizeof(*class), KM_SLEEP);
                    123:        class->prc_percpu = percpu_alloc(sizeof(struct psref_cpu));
                    124:        mutex_init(&class->prc_lock, MUTEX_DEFAULT, ipl);
                    125:        cv_init(&class->prc_cv, name);
                    126:        class->prc_iplcookie = makeiplcookie(ipl);
                    127:
                    128:        return class;
                    129: }
                    130:
                    131: #ifdef DIAGNOSTIC
                    132: static void
                    133: psref_cpu_drained_p(void *p, void *cookie, struct cpu_info *ci __unused)
                    134: {
                    135:        const struct psref_cpu *pcpu = p;
                    136:        bool *retp = cookie;
                    137:
1.8       knakahar  138:        if (!SLIST_EMPTY(&pcpu->pcpu_head))
1.1       riastrad  139:                *retp = false;
                    140: }
                    141:
                    142: static bool
                    143: psref_class_drained_p(const struct psref_class *prc)
                    144: {
                    145:        bool ret = true;
                    146:
                    147:        percpu_foreach(prc->prc_percpu, &psref_cpu_drained_p, &ret);
                    148:
                    149:        return ret;
                    150: }
                    151: #endif /* DIAGNOSTIC */
                    152:
                    153: /*
                    154:  * psref_class_destroy(class)
                    155:  *
                    156:  *     Destroy a passive reference class and free memory associated
                    157:  *     with it.  All targets in this class must have been drained and
                    158:  *     destroyed already.
                    159:  */
                    160: void
                    161: psref_class_destroy(struct psref_class *class)
                    162: {
                    163:
                    164:        KASSERT(psref_class_drained_p(class));
                    165:
                    166:        cv_destroy(&class->prc_cv);
                    167:        mutex_destroy(&class->prc_lock);
                    168:        percpu_free(class->prc_percpu, sizeof(struct psref_cpu));
                    169:        kmem_free(class, sizeof(*class));
                    170: }
                    171:
                    172: /*
                    173:  * psref_target_init(target, class)
                    174:  *
                    175:  *     Initialize a passive reference target in the specified class.
                    176:  *     The caller is responsible for issuing a membar_producer after
                    177:  *     psref_target_init and before exposing a pointer to the target
                    178:  *     to other CPUs.
                    179:  */
                    180: void
                    181: psref_target_init(struct psref_target *target,
                    182:     struct psref_class *class)
                    183: {
                    184:
                    185:        target->prt_class = class;
                    186:        target->prt_draining = false;
                    187: }
                    188:
1.6       ozaki-r   189: #ifdef DEBUG
1.9       ozaki-r   190: static bool
                    191: psref_exist(struct psref_cpu *pcpu, struct psref *psref)
                    192: {
                    193:        struct psref *_psref;
                    194:
                    195:        SLIST_FOREACH(_psref, &pcpu->pcpu_head, psref_entry) {
                    196:                if (_psref == psref)
                    197:                        return true;
                    198:        }
                    199:        return false;
                    200: }
                    201:
1.6       ozaki-r   202: static void
                    203: psref_check_duplication(struct psref_cpu *pcpu, struct psref *psref,
                    204:     const struct psref_target *target)
                    205: {
                    206:        bool found = false;
                    207:
1.9       ozaki-r   208:        found = psref_exist(pcpu, psref);
                    209:        if (found) {
                    210:                panic("The psref is already in the list (acquiring twice?): "
                    211:                    "psref=%p target=%p", psref, target);
1.6       ozaki-r   212:        }
1.9       ozaki-r   213: }
                    214:
                    215: static void
                    216: psref_check_existence(struct psref_cpu *pcpu, struct psref *psref,
                    217:     const struct psref_target *target)
                    218: {
                    219:        bool found = false;
                    220:
                    221:        found = psref_exist(pcpu, psref);
                    222:        if (!found) {
                    223:                panic("The psref isn't in the list (releasing unused psref?): "
1.6       ozaki-r   224:                    "psref=%p target=%p", psref, target);
                    225:        }
                    226: }
                    227: #endif /* DEBUG */
                    228:
1.1       riastrad  229: /*
                    230:  * psref_acquire(psref, target, class)
                    231:  *
                    232:  *     Acquire a passive reference to the specified target, which must
                    233:  *     be in the specified class.
                    234:  *
                    235:  *     The caller must guarantee that the target will not be destroyed
                    236:  *     before psref_acquire returns.
                    237:  *
                    238:  *     The caller must additionally guarantee that it will not switch
                    239:  *     CPUs before releasing the passive reference, either by
                    240:  *     disabling kpreemption and avoiding sleeps, or by being in a
                    241:  *     softint or in an LWP bound to a CPU.
                    242:  */
                    243: void
                    244: psref_acquire(struct psref *psref, const struct psref_target *target,
                    245:     struct psref_class *class)
                    246: {
                    247:        struct psref_cpu *pcpu;
                    248:        int s;
                    249:
                    250:        KASSERTMSG((kpreempt_disabled() || cpu_softintr_p() ||
                    251:                ISSET(curlwp->l_pflag, LP_BOUND)),
                    252:            "passive references are CPU-local,"
                    253:            " but preemption is enabled and the caller is not"
                    254:            " in a softint or CPU-bound LWP");
                    255:        KASSERTMSG((target->prt_class == class),
                    256:            "mismatched psref target class: %p (ref) != %p (expected)",
                    257:            target->prt_class, class);
                    258:        KASSERTMSG(!target->prt_draining, "psref target already destroyed: %p",
                    259:            target);
                    260:
                    261:        /* Block interrupts and acquire the current CPU's reference list.  */
                    262:        s = splraiseipl(class->prc_iplcookie);
                    263:        pcpu = percpu_getref(class->prc_percpu);
                    264:
1.6       ozaki-r   265: #ifdef DEBUG
                    266:        /* Sanity-check if the target is already acquired with the same psref.  */
                    267:        psref_check_duplication(pcpu, psref, target);
                    268: #endif
                    269:
1.1       riastrad  270:        /* Record our reference.  */
1.8       knakahar  271:        SLIST_INSERT_HEAD(&pcpu->pcpu_head, psref, psref_entry);
1.1       riastrad  272:        psref->psref_target = target;
                    273:        psref->psref_lwp = curlwp;
                    274:        psref->psref_cpu = curcpu();
                    275:
                    276:        /* Release the CPU list and restore interrupts.  */
                    277:        percpu_putref(class->prc_percpu);
                    278:        splx(s);
                    279: }
                    280:
                    281: /*
                    282:  * psref_release(psref, target, class)
                    283:  *
                    284:  *     Release a passive reference to the specified target, which must
                    285:  *     be in the specified class.
                    286:  *
                    287:  *     The caller must not have switched CPUs or LWPs since acquiring
                    288:  *     the passive reference.
                    289:  */
                    290: void
                    291: psref_release(struct psref *psref, const struct psref_target *target,
                    292:     struct psref_class *class)
                    293: {
1.8       knakahar  294:        struct psref_cpu *pcpu;
1.1       riastrad  295:        int s;
                    296:
                    297:        KASSERTMSG((kpreempt_disabled() || cpu_softintr_p() ||
                    298:                ISSET(curlwp->l_pflag, LP_BOUND)),
                    299:            "passive references are CPU-local,"
                    300:            " but preemption is enabled and the caller is not"
                    301:            " in a softint or CPU-bound LWP");
                    302:        KASSERTMSG((target->prt_class == class),
                    303:            "mismatched psref target class: %p (ref) != %p (expected)",
                    304:            target->prt_class, class);
                    305:
                    306:        /* Make sure the psref looks sensible.  */
                    307:        KASSERTMSG((psref->psref_target == target),
                    308:            "passive reference target mismatch: %p (ref) != %p (expected)",
                    309:            psref->psref_target, target);
                    310:        KASSERTMSG((psref->psref_lwp == curlwp),
                    311:            "passive reference transferred from lwp %p to lwp %p",
                    312:            psref->psref_lwp, curlwp);
                    313:        KASSERTMSG((psref->psref_cpu == curcpu()),
                    314:            "passive reference transferred from CPU %u to CPU %u",
                    315:            cpu_index(psref->psref_cpu), cpu_index(curcpu()));
                    316:
                    317:        /*
                    318:         * Block interrupts and remove the psref from the current CPU's
                    319:         * list.  No need to percpu_getref or get the head of the list,
                    320:         * and the caller guarantees that we are bound to a CPU anyway
                    321:         * (as does blocking interrupts).
                    322:         */
                    323:        s = splraiseipl(class->prc_iplcookie);
1.8       knakahar  324:        pcpu = percpu_getref(class->prc_percpu);
1.9       ozaki-r   325: #ifdef DEBUG
                    326:        /* Sanity-check if the target is surely acquired before.  */
                    327:        psref_check_existence(pcpu, psref, target);
                    328: #endif
1.8       knakahar  329:        SLIST_REMOVE(&pcpu->pcpu_head, psref, psref, psref_entry);
                    330:        percpu_putref(class->prc_percpu);
1.1       riastrad  331:        splx(s);
                    332:
                    333:        /* If someone is waiting for users to drain, notify 'em.  */
                    334:        if (__predict_false(target->prt_draining))
                    335:                cv_broadcast(&class->prc_cv);
                    336: }
                    337:
                    338: /*
                    339:  * psref_copy(pto, pfrom, class)
                    340:  *
                    341:  *     Copy a passive reference from pfrom, which must be in the
                    342:  *     specified class, to pto.  Both pfrom and pto must later be
                    343:  *     released with psref_release.
                    344:  *
                    345:  *     The caller must not have switched CPUs or LWPs since acquiring
                    346:  *     pfrom, and must not switch CPUs or LWPs before releasing both
                    347:  *     pfrom and pto.
                    348:  */
                    349: void
                    350: psref_copy(struct psref *pto, const struct psref *pfrom,
                    351:     struct psref_class *class)
                    352: {
                    353:        struct psref_cpu *pcpu;
                    354:        int s;
                    355:
                    356:        KASSERTMSG((kpreempt_disabled() || cpu_softintr_p() ||
                    357:                ISSET(curlwp->l_pflag, LP_BOUND)),
                    358:            "passive references are CPU-local,"
                    359:            " but preemption is enabled and the caller is not"
                    360:            " in a softint or CPU-bound LWP");
                    361:        KASSERTMSG((pto != pfrom),
                    362:            "can't copy passive reference to itself: %p",
                    363:            pto);
                    364:
                    365:        /* Make sure the pfrom reference looks sensible.  */
                    366:        KASSERTMSG((pfrom->psref_lwp == curlwp),
                    367:            "passive reference transferred from lwp %p to lwp %p",
                    368:            pfrom->psref_lwp, curlwp);
                    369:        KASSERTMSG((pfrom->psref_cpu == curcpu()),
                    370:            "passive reference transferred from CPU %u to CPU %u",
                    371:            cpu_index(pfrom->psref_cpu), cpu_index(curcpu()));
                    372:        KASSERTMSG((pfrom->psref_target->prt_class == class),
                    373:            "mismatched psref target class: %p (ref) != %p (expected)",
                    374:            pfrom->psref_target->prt_class, class);
                    375:
                    376:        /* Block interrupts and acquire the current CPU's reference list.  */
                    377:        s = splraiseipl(class->prc_iplcookie);
                    378:        pcpu = percpu_getref(class->prc_percpu);
                    379:
                    380:        /* Record the new reference.  */
1.8       knakahar  381:        SLIST_INSERT_HEAD(&pcpu->pcpu_head, pto, psref_entry);
1.1       riastrad  382:        pto->psref_target = pfrom->psref_target;
                    383:        pto->psref_lwp = curlwp;
                    384:        pto->psref_cpu = curcpu();
                    385:
                    386:        /* Release the CPU list and restore interrupts.  */
                    387:        percpu_putref(class->prc_percpu);
                    388:        splx(s);
                    389: }
                    390:
                    391: /*
                    392:  * struct psreffed
                    393:  *
                    394:  *     Global state for draining a psref target.
                    395:  */
                    396: struct psreffed {
                    397:        struct psref_class      *class;
                    398:        struct psref_target     *target;
                    399:        bool                    ret;
                    400: };
                    401:
                    402: static void
                    403: psreffed_p_xc(void *cookie0, void *cookie1 __unused)
                    404: {
                    405:        struct psreffed *P = cookie0;
                    406:
                    407:        /*
                    408:         * If we hold a psref to the target, then answer true.
                    409:         *
                    410:         * This is the only dynamic decision that may be made with
                    411:         * psref_held.
                    412:         *
                    413:         * No need to lock anything here: every write transitions from
                    414:         * false to true, so there can be no conflicting writes.  No
                    415:         * need for a memory barrier here because P->ret is read only
                    416:         * after xc_wait, which has already issued any necessary memory
                    417:         * barriers.
                    418:         */
1.4       riastrad  419:        if (_psref_held(P->target, P->class, true))
1.1       riastrad  420:                P->ret = true;
                    421: }
                    422:
                    423: static bool
                    424: psreffed_p(struct psref_target *target, struct psref_class *class)
                    425: {
                    426:        struct psreffed P = {
                    427:                .class = class,
                    428:                .target = target,
                    429:                .ret = false,
                    430:        };
                    431:
1.10    ! msaitoh   432:        if (__predict_true(mp_online)) {
        !           433:                /*
        !           434:                 * Ask all CPUs to say whether they hold a psref to the
        !           435:                 * target.
        !           436:                 */
        !           437:                xc_wait(xc_broadcast(0, &psreffed_p_xc, &P, NULL));
        !           438:        } else
        !           439:                psreffed_p_xc(&P, NULL);
1.1       riastrad  440:
                    441:        return P.ret;
                    442: }
                    443:
                    444: /*
                    445:  * psref_target_destroy(target, class)
                    446:  *
                    447:  *     Destroy a passive reference target.  Waits for all existing
                    448:  *     references to drain.  Caller must guarantee no new references
                    449:  *     will be acquired once it calls psref_target_destroy, e.g. by
                    450:  *     removing the target from a global list first.  May sleep.
                    451:  */
                    452: void
                    453: psref_target_destroy(struct psref_target *target, struct psref_class *class)
                    454: {
                    455:
                    456:        ASSERT_SLEEPABLE();
                    457:
                    458:        KASSERTMSG((target->prt_class == class),
                    459:            "mismatched psref target class: %p (ref) != %p (expected)",
                    460:            target->prt_class, class);
                    461:
                    462:        /* Request psref_release to notify us when done.  */
                    463:        KASSERTMSG(!target->prt_draining, "psref target already destroyed: %p",
                    464:            target);
                    465:        target->prt_draining = true;
                    466:
                    467:        /* Wait until there are no more references on any CPU.  */
                    468:        while (psreffed_p(target, class)) {
                    469:                /*
                    470:                 * This enter/wait/exit business looks wrong, but it is
                    471:                 * both necessary, because psreffed_p performs a
                    472:                 * low-priority xcall and hence cannot run while a
                    473:                 * mutex is locked, and OK, because the wait is timed
                    474:                 * -- explicit wakeups are only an optimization.
                    475:                 */
                    476:                mutex_enter(&class->prc_lock);
                    477:                (void)cv_timedwait(&class->prc_cv, &class->prc_lock, 1);
                    478:                mutex_exit(&class->prc_lock);
                    479:        }
                    480:
                    481:        /* No more references.  Cause subsequent psref_acquire to kassert.  */
                    482:        target->prt_class = NULL;
                    483: }
                    484:
1.4       riastrad  485: static bool
                    486: _psref_held(const struct psref_target *target, struct psref_class *class,
                    487:     bool lwp_mismatch_ok)
1.1       riastrad  488: {
                    489:        const struct psref_cpu *pcpu;
                    490:        const struct psref *psref;
                    491:        int s;
                    492:        bool held = false;
                    493:
                    494:        KASSERTMSG((kpreempt_disabled() || cpu_softintr_p() ||
                    495:                ISSET(curlwp->l_pflag, LP_BOUND)),
                    496:            "passive references are CPU-local,"
                    497:            " but preemption is enabled and the caller is not"
                    498:            " in a softint or CPU-bound LWP");
                    499:        KASSERTMSG((target->prt_class == class),
                    500:            "mismatched psref target class: %p (ref) != %p (expected)",
                    501:            target->prt_class, class);
                    502:
                    503:        /* Block interrupts and acquire the current CPU's reference list.  */
                    504:        s = splraiseipl(class->prc_iplcookie);
                    505:        pcpu = percpu_getref(class->prc_percpu);
                    506:
                    507:        /* Search through all the references on this CPU.  */
1.8       knakahar  508:        SLIST_FOREACH(psref, &pcpu->pcpu_head, psref_entry) {
1.5       ozaki-r   509:                /* Sanity-check the reference's CPU.  */
                    510:                KASSERTMSG((psref->psref_cpu == curcpu()),
                    511:                    "passive reference transferred from CPU %u to CPU %u",
                    512:                    cpu_index(psref->psref_cpu), cpu_index(curcpu()));
                    513:
                    514:                /* If it doesn't match, skip it and move on.  */
                    515:                if (psref->psref_target != target)
                    516:                        continue;
                    517:
                    518:                /*
                    519:                 * Sanity-check the reference's LWP if we are asserting
                    520:                 * via psref_held that this LWP holds it, but not if we
                    521:                 * are testing in psref_target_destroy whether any LWP
                    522:                 * still holds it.
                    523:                 */
1.4       riastrad  524:                KASSERTMSG((lwp_mismatch_ok || psref->psref_lwp == curlwp),
1.1       riastrad  525:                    "passive reference transferred from lwp %p to lwp %p",
                    526:                    psref->psref_lwp, curlwp);
                    527:
1.5       ozaki-r   528:                /* Stop here and report that we found it.  */
                    529:                held = true;
                    530:                break;
1.1       riastrad  531:        }
                    532:
                    533:        /* Release the CPU list and restore interrupts.  */
                    534:        percpu_putref(class->prc_percpu);
                    535:        splx(s);
                    536:
                    537:        return held;
                    538: }
1.4       riastrad  539:
                    540: /*
                    541:  * psref_held(target, class)
                    542:  *
                    543:  *     True if the current CPU holds a passive reference to target,
                    544:  *     false otherwise.  May be used only inside assertions.
                    545:  */
                    546: bool
                    547: psref_held(const struct psref_target *target, struct psref_class *class)
                    548: {
                    549:
                    550:        return _psref_held(target, class, false);
                    551: }

CVSweb <webmaster@jp.NetBSD.org>