[BACK]Return to subr_pcu.c CVS log [TXT][DIR] Up to [cvs.NetBSD.org] / src / sys / kern

Annotation of src/sys/kern/subr_pcu.c, Revision 1.19.12.1

1.19.12.1! bouyer      1: /*     $NetBSD: subr_pcu.c,v 1.20 2017/03/16 16:13:21 chs Exp $        */
1.1       rmind       2:
                      3: /*-
1.18      rmind       4:  * Copyright (c) 2011, 2014 The NetBSD Foundation, Inc.
1.1       rmind       5:  * All rights reserved.
                      6:  *
                      7:  * This code is derived from software contributed to The NetBSD Foundation
                      8:  * by Mindaugas Rasiukevicius.
                      9:  *
                     10:  * Redistribution and use in source and binary forms, with or without
                     11:  * modification, are permitted provided that the following conditions
                     12:  * are met:
                     13:  * 1. Redistributions of source code must retain the above copyright
                     14:  *    notice, this list of conditions and the following disclaimer.
                     15:  * 2. Redistributions in binary form must reproduce the above copyright
                     16:  *    notice, this list of conditions and the following disclaimer in the
                     17:  *    documentation and/or other materials provided with the distribution.
                     18:  *
                     19:  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
                     20:  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
                     21:  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
                     22:  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
                     23:  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
                     24:  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
                     25:  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
                     26:  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
                     27:  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
                     28:  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
                     29:  * POSSIBILITY OF SUCH DAMAGE.
                     30:  */
                     31:
                     32: /*
                     33:  * Per CPU Unit (PCU) - is an interface to manage synchronization of any
                     34:  * per CPU context (unit) tied with LWP context.  Typical use: FPU state.
                     35:  *
                     36:  * Concurrency notes:
                     37:  *
                     38:  *     PCU state may be loaded only by the current LWP, that is, curlwp.
                     39:  *     Therefore, only LWP itself can set a CPU for lwp_t::l_pcu_cpu[id].
                     40:  *
1.18      rmind      41:  *     There are some important rules about operation calls.  The request
                     42:  *     for a PCU release can be from a) the owner LWP (regardless whether
                     43:  *     the PCU state is on the current CPU or remote CPU) b) any other LWP
                     44:  *     running on that CPU (in such case, the owner LWP is on a remote CPU
                     45:  *     or sleeping).
                     46:  *
                     47:  *     In any case, the PCU state can *only* be changed from the current
                     48:  *     CPU.  If said PCU state is on the remote CPU, a cross-call will be
                     49:  *     sent by the owner LWP.  Therefore struct cpu_info::ci_pcu_curlwp[id]
                     50:  *     may only be changed by the current CPU and lwp_t::l_pcu_cpu[id] may
                     51:  *     only be cleared by the CPU which has the PCU state loaded.
1.1       rmind      52:  */
                     53:
                     54: #include <sys/cdefs.h>
1.19.12.1! bouyer     55: __KERNEL_RCSID(0, "$NetBSD: subr_pcu.c,v 1.20 2017/03/16 16:13:21 chs Exp $");
1.1       rmind      56:
                     57: #include <sys/param.h>
                     58: #include <sys/cpu.h>
                     59: #include <sys/lwp.h>
                     60: #include <sys/pcu.h>
1.19      rmind      61: #include <sys/ipi.h>
1.1       rmind      62:
1.3       matt       63: #if PCU_UNIT_COUNT > 0
                     64:
1.13      matt       65: static inline void pcu_do_op(const pcu_ops_t *, lwp_t * const, const int);
                     66: static void pcu_lwp_op(const pcu_ops_t *, lwp_t *, const int);
                     67:
1.18      rmind      68: /*
                     69:  * Internal PCU commands for the pcu_do_op() function.
                     70:  */
                     71: #define        PCU_CMD_SAVE            0x01    /* save PCU state to the LWP */
                     72: #define        PCU_CMD_RELEASE         0x02    /* release PCU state on the CPU */
1.13      matt       73:
1.18      rmind      74: /*
1.19      rmind      75:  * Message structure for another CPU passed via ipi(9).
1.18      rmind      76:  */
                     77: typedef struct {
                     78:        const pcu_ops_t *pcu;
                     79:        lwp_t *         owner;
                     80:        const int       flags;
1.19      rmind      81: } pcu_ipi_msg_t;
                     82:
                     83: /*
                     84:  * PCU IPIs run at IPL_HIGH (aka IPL_PCU in this code).
                     85:  */
                     86: #define        splpcu          splhigh
1.1       rmind      87:
1.18      rmind      88: /* PCU operations structure provided by the MD code. */
                     89: extern const pcu_ops_t * const pcu_ops_md_defs[];
1.4       rmind      90:
1.11      yamt       91: /*
                     92:  * pcu_switchpoint: release PCU state if the LWP is being run on another CPU.
1.19      rmind      93:  * This routine is called on each context switch by by mi_switch().
1.11      yamt       94:  */
1.1       rmind      95: void
1.4       rmind      96: pcu_switchpoint(lwp_t *l)
1.1       rmind      97: {
1.18      rmind      98:        const uint32_t pcu_valid = l->l_pcu_valid;
1.19      rmind      99:        int s;
1.1       rmind     100:
1.12      matt      101:        KASSERTMSG(l == curlwp, "l %p != curlwp %p", l, curlwp);
1.4       rmind     102:
1.18      rmind     103:        if (__predict_true(pcu_valid == 0)) {
1.4       rmind     104:                /* PCUs are not in use. */
                    105:                return;
                    106:        }
1.19      rmind     107:        s = splpcu();
1.13      matt      108:        for (u_int id = 0; id < PCU_UNIT_COUNT; id++) {
1.18      rmind     109:                if ((pcu_valid & (1U << id)) == 0) {
1.4       rmind     110:                        continue;
                    111:                }
1.5       matt      112:                struct cpu_info * const pcu_ci = l->l_pcu_cpu[id];
1.4       rmind     113:                if (pcu_ci == NULL || pcu_ci == l->l_cpu) {
                    114:                        continue;
                    115:                }
                    116:                const pcu_ops_t * const pcu = pcu_ops_md_defs[id];
1.18      rmind     117:                pcu->pcu_state_release(l);
1.4       rmind     118:        }
1.19      rmind     119:        splx(s);
1.1       rmind     120: }
                    121:
1.11      yamt      122: /*
                    123:  * pcu_discard_all: discard PCU state of the given LWP.
                    124:  *
                    125:  * Used by exec and LWP exit.
                    126:  */
1.7       matt      127: void
                    128: pcu_discard_all(lwp_t *l)
                    129: {
1.18      rmind     130:        const uint32_t pcu_valid = l->l_pcu_valid;
1.7       matt      131:
1.18      rmind     132:        KASSERT(l == curlwp || ((l->l_flag & LW_SYSTEM) && pcu_valid == 0));
1.7       matt      133:
1.18      rmind     134:        if (__predict_true(pcu_valid == 0)) {
1.7       matt      135:                /* PCUs are not in use. */
                    136:                return;
                    137:        }
                    138:        for (u_int id = 0; id < PCU_UNIT_COUNT; id++) {
1.18      rmind     139:                if ((pcu_valid & (1U << id)) == 0) {
1.7       matt      140:                        continue;
                    141:                }
                    142:                if (__predict_true(l->l_pcu_cpu[id] == NULL)) {
                    143:                        continue;
                    144:                }
                    145:                const pcu_ops_t * const pcu = pcu_ops_md_defs[id];
1.18      rmind     146:                pcu_lwp_op(pcu, l, PCU_CMD_RELEASE);
1.7       matt      147:        }
1.18      rmind     148:        l->l_pcu_valid = 0;
1.7       matt      149: }
                    150:
1.11      yamt      151: /*
                    152:  * pcu_save_all: save PCU state of the given LWP so that eg. coredump can
                    153:  * examine it.
                    154:  */
1.7       matt      155: void
                    156: pcu_save_all(lwp_t *l)
                    157: {
1.18      rmind     158:        const uint32_t pcu_valid = l->l_pcu_valid;
                    159:        int flags = PCU_CMD_SAVE;
                    160:
                    161:        /* If LW_WCORE, we are also releasing the state. */
                    162:        if (__predict_false(l->l_flag & LW_WCORE)) {
                    163:                flags |= PCU_CMD_RELEASE;
                    164:        }
1.7       matt      165:
1.9       matt      166:        /*
                    167:         * Normally we save for the current LWP, but sometimes we get called
                    168:         * with a different LWP (forking a system LWP or doing a coredump of
                    169:         * a process with multiple threads) and we need to deal with that.
                    170:         */
1.18      rmind     171:        KASSERT(l == curlwp || (((l->l_flag & LW_SYSTEM) ||
                    172:            (curlwp->l_proc == l->l_proc && l->l_stat == LSSUSPENDED)) &&
                    173:            pcu_valid == 0));
1.7       matt      174:
1.18      rmind     175:        if (__predict_true(pcu_valid == 0)) {
1.7       matt      176:                /* PCUs are not in use. */
                    177:                return;
                    178:        }
                    179:        for (u_int id = 0; id < PCU_UNIT_COUNT; id++) {
1.18      rmind     180:                if ((pcu_valid & (1U << id)) == 0) {
1.7       matt      181:                        continue;
                    182:                }
                    183:                if (__predict_true(l->l_pcu_cpu[id] == NULL)) {
                    184:                        continue;
                    185:                }
                    186:                const pcu_ops_t * const pcu = pcu_ops_md_defs[id];
1.9       matt      187:                pcu_lwp_op(pcu, l, flags);
1.7       matt      188:        }
                    189: }
                    190:
1.1       rmind     191: /*
1.4       rmind     192:  * pcu_do_op: save/release PCU state on the current CPU.
1.1       rmind     193:  *
1.19      rmind     194:  * => Must be called at IPL_PCU or from the interrupt.
1.1       rmind     195:  */
1.4       rmind     196: static inline void
                    197: pcu_do_op(const pcu_ops_t *pcu, lwp_t * const l, const int flags)
                    198: {
                    199:        struct cpu_info * const ci = curcpu();
                    200:        const u_int id = pcu->pcu_id;
1.18      rmind     201:
                    202:        KASSERT(l->l_pcu_cpu[id] == ci);
                    203:
                    204:        if (flags & PCU_CMD_SAVE) {
                    205:                pcu->pcu_state_save(l);
                    206:        }
                    207:        if (flags & PCU_CMD_RELEASE) {
                    208:                pcu->pcu_state_release(l);
1.4       rmind     209:                ci->ci_pcu_curlwp[id] = NULL;
                    210:                l->l_pcu_cpu[id] = NULL;
                    211:        }
                    212: }
                    213:
                    214: /*
1.19      rmind     215:  * pcu_cpu_ipi: helper routine to call pcu_do_op() via ipi(9).
1.4       rmind     216:  */
1.1       rmind     217: static void
1.19      rmind     218: pcu_cpu_ipi(void *arg)
1.1       rmind     219: {
1.19      rmind     220:        const pcu_ipi_msg_t *pcu_msg = arg;
1.18      rmind     221:        const pcu_ops_t *pcu = pcu_msg->pcu;
1.1       rmind     222:        const u_int id = pcu->pcu_id;
1.18      rmind     223:        lwp_t *l = pcu_msg->owner;
1.4       rmind     224:
1.18      rmind     225:        KASSERT(pcu_msg->owner != NULL);
1.1       rmind     226:
1.18      rmind     227:        if (curcpu()->ci_pcu_curlwp[id] != l) {
                    228:                /*
                    229:                 * Different ownership: another LWP raced with us and
                    230:                 * perform save and release.  There is nothing to do.
                    231:                 */
                    232:                KASSERT(l->l_pcu_cpu[id] == NULL);
1.1       rmind     233:                return;
                    234:        }
1.18      rmind     235:        pcu_do_op(pcu, l, pcu_msg->flags);
1.1       rmind     236: }
                    237:
                    238: /*
                    239:  * pcu_lwp_op: perform PCU state save, release or both operations on LWP.
                    240:  */
                    241: static void
1.13      matt      242: pcu_lwp_op(const pcu_ops_t *pcu, lwp_t *l, const int flags)
1.1       rmind     243: {
                    244:        const u_int id = pcu->pcu_id;
                    245:        struct cpu_info *ci;
                    246:        int s;
                    247:
                    248:        /*
                    249:         * Caller should have re-checked if there is any state to manage.
                    250:         * Block the interrupts and inspect again, since cross-call sent
                    251:         * by remote CPU could have changed the state.
                    252:         */
1.19      rmind     253:        s = splpcu();
1.1       rmind     254:        ci = l->l_pcu_cpu[id];
                    255:        if (ci == curcpu()) {
                    256:                /*
                    257:                 * State is on the current CPU - just perform the operations.
                    258:                 */
1.6       matt      259:                KASSERTMSG(ci->ci_pcu_curlwp[id] == l,
1.10      jym       260:                    "%s: cpu%u: pcu_curlwp[%u] (%p) != l (%p)",
                    261:                     __func__, cpu_index(ci), id, ci->ci_pcu_curlwp[id], l);
1.4       rmind     262:                pcu_do_op(pcu, l, flags);
1.1       rmind     263:                splx(s);
                    264:                return;
                    265:        }
                    266:        if (__predict_false(ci == NULL)) {
                    267:                /* Cross-call has won the race - no state to manage. */
1.19      rmind     268:                splx(s);
1.1       rmind     269:                return;
                    270:        }
                    271:
                    272:        /*
1.18      rmind     273:         * The state is on the remote CPU: perform the operation(s) there.
1.1       rmind     274:         */
1.19      rmind     275:        pcu_ipi_msg_t pcu_msg = { .pcu = pcu, .owner = l, .flags = flags };
                    276:        ipi_msg_t ipi_msg = { .func = pcu_cpu_ipi, .arg = &pcu_msg };
                    277:        ipi_unicast(&ipi_msg, ci);
                    278:        splx(s);
                    279:
                    280:        /* Wait for completion. */
                    281:        ipi_wait(&ipi_msg);
1.1       rmind     282:
1.18      rmind     283:        KASSERT((flags & PCU_CMD_RELEASE) == 0 || l->l_pcu_cpu[id] == NULL);
1.1       rmind     284: }
                    285:
                    286: /*
                    287:  * pcu_load: load/initialize the PCU state of current LWP on current CPU.
                    288:  */
                    289: void
                    290: pcu_load(const pcu_ops_t *pcu)
                    291: {
1.18      rmind     292:        lwp_t *oncpu_lwp, * const l = curlwp;
1.1       rmind     293:        const u_int id = pcu->pcu_id;
                    294:        struct cpu_info *ci, *curci;
                    295:        int s;
                    296:
                    297:        KASSERT(!cpu_intr_p() && !cpu_softintr_p());
                    298:
1.19      rmind     299:        s = splpcu();
1.1       rmind     300:        curci = curcpu();
                    301:        ci = l->l_pcu_cpu[id];
                    302:
                    303:        /* Does this CPU already have our PCU state loaded? */
                    304:        if (ci == curci) {
1.19      rmind     305:                /*
                    306:                 * Fault reoccurred while the PCU state is loaded and
                    307:                 * therefore PCU should be reā€enabled.  This happens
                    308:                 * if LWP is context switched to another CPU and then
                    309:                 * switched back to the original CPU while the state
                    310:                 * on that CPU has not been changed by other LWPs.
                    311:                 *
                    312:                 * It may also happen due to instruction "bouncing" on
                    313:                 * some architectures.
                    314:                 */
1.1       rmind     315:                KASSERT(curci->ci_pcu_curlwp[id] == l);
1.19.12.1! bouyer    316:                KASSERT(pcu_valid_p(pcu, l));
1.18      rmind     317:                pcu->pcu_state_load(l, PCU_VALID | PCU_REENABLE);
1.1       rmind     318:                splx(s);
                    319:                return;
                    320:        }
                    321:
                    322:        /* If PCU state of this LWP is on the remote CPU - save it there. */
                    323:        if (ci) {
1.19      rmind     324:                pcu_ipi_msg_t pcu_msg = { .pcu = pcu, .owner = l,
                    325:                    .flags = PCU_CMD_SAVE | PCU_CMD_RELEASE };
                    326:                ipi_msg_t ipi_msg = { .func = pcu_cpu_ipi, .arg = &pcu_msg };
                    327:                ipi_unicast(&ipi_msg, ci);
1.1       rmind     328:                splx(s);
1.18      rmind     329:
1.19      rmind     330:                /*
                    331:                 * Wait for completion, re-enter IPL_PCU and re-fetch
                    332:                 * the current CPU.
                    333:                 */
                    334:                ipi_wait(&ipi_msg);
                    335:                s = splpcu();
1.1       rmind     336:                curci = curcpu();
                    337:        }
                    338:        KASSERT(l->l_pcu_cpu[id] == NULL);
                    339:
                    340:        /* Save the PCU state on the current CPU, if there is any. */
1.18      rmind     341:        if ((oncpu_lwp = curci->ci_pcu_curlwp[id]) != NULL) {
                    342:                pcu_do_op(pcu, oncpu_lwp, PCU_CMD_SAVE | PCU_CMD_RELEASE);
                    343:                KASSERT(curci->ci_pcu_curlwp[id] == NULL);
                    344:        }
1.1       rmind     345:
                    346:        /*
                    347:         * Finally, load the state for this LWP on this CPU.  Indicate to
1.18      rmind     348:         * the load function whether PCU state was valid before this call.
1.1       rmind     349:         */
1.18      rmind     350:        const bool valid = ((1U << id) & l->l_pcu_valid) != 0;
                    351:        pcu->pcu_state_load(l, valid ? PCU_VALID : 0);
                    352:        curci->ci_pcu_curlwp[id] = l;
                    353:        l->l_pcu_cpu[id] = curci;
                    354:        l->l_pcu_valid |= (1U << id);
1.1       rmind     355:        splx(s);
                    356: }
                    357:
                    358: /*
1.19.12.1! bouyer    359:  * pcu_discard: discard the PCU state of the given LWP.  If "valid"
1.18      rmind     360:  * parameter is true, then keep considering the PCU state as valid.
1.1       rmind     361:  */
                    362: void
1.19.12.1! bouyer    363: pcu_discard(const pcu_ops_t *pcu, lwp_t *l, bool valid)
1.1       rmind     364: {
                    365:        const u_int id = pcu->pcu_id;
                    366:
                    367:        KASSERT(!cpu_intr_p() && !cpu_softintr_p());
                    368:
1.18      rmind     369:        if (__predict_false(valid)) {
                    370:                l->l_pcu_valid |= (1U << id);
                    371:        } else {
                    372:                l->l_pcu_valid &= ~(1U << id);
                    373:        }
1.1       rmind     374:        if (__predict_true(l->l_pcu_cpu[id] == NULL)) {
                    375:                return;
                    376:        }
1.18      rmind     377:        pcu_lwp_op(pcu, l, PCU_CMD_RELEASE);
1.1       rmind     378: }
                    379:
                    380: /*
                    381:  * pcu_save_lwp: save PCU state to the given LWP.
                    382:  */
                    383: void
1.19.12.1! bouyer    384: pcu_save(const pcu_ops_t *pcu, lwp_t *l)
1.1       rmind     385: {
                    386:        const u_int id = pcu->pcu_id;
                    387:
                    388:        KASSERT(!cpu_intr_p() && !cpu_softintr_p());
                    389:
                    390:        if (__predict_true(l->l_pcu_cpu[id] == NULL)) {
                    391:                return;
                    392:        }
1.18      rmind     393:        pcu_lwp_op(pcu, l, PCU_CMD_SAVE | PCU_CMD_RELEASE);
1.1       rmind     394: }
                    395:
                    396: /*
1.18      rmind     397:  * pcu_save_all_on_cpu: save all PCU states on the current CPU.
1.15      drochner  398:  */
                    399: void
                    400: pcu_save_all_on_cpu(void)
                    401: {
1.18      rmind     402:        int s;
1.15      drochner  403:
1.19      rmind     404:        s = splpcu();
1.15      drochner  405:        for (u_int id = 0; id < PCU_UNIT_COUNT; id++) {
1.18      rmind     406:                const pcu_ops_t * const pcu = pcu_ops_md_defs[id];
                    407:                lwp_t *l;
                    408:
                    409:                if ((l = curcpu()->ci_pcu_curlwp[id]) != NULL) {
                    410:                        pcu_do_op(pcu, l, PCU_CMD_SAVE | PCU_CMD_RELEASE);
                    411:                }
1.15      drochner  412:        }
1.18      rmind     413:        splx(s);
1.15      drochner  414: }
                    415:
                    416: /*
1.18      rmind     417:  * pcu_valid_p: return true if PCU state is considered valid.  Generally,
                    418:  * it always becomes "valid" when pcu_load() is called.
1.1       rmind     419:  */
                    420: bool
1.19.12.1! bouyer    421: pcu_valid_p(const pcu_ops_t *pcu, const lwp_t *l)
1.1       rmind     422: {
                    423:        const u_int id = pcu->pcu_id;
                    424:
1.18      rmind     425:        return (l->l_pcu_valid & (1U << id)) != 0;
1.1       rmind     426: }
1.3       matt      427:
                    428: #endif /* PCU_UNIT_COUNT > 0 */

CVSweb <webmaster@jp.NetBSD.org>