[BACK]Return to subr_kcpuset.c CVS log [TXT][DIR] Up to [cvs.NetBSD.org] / src / sys / kern

Annotation of src/sys/kern/subr_kcpuset.c, Revision 1.7.2.1

1.7.2.1 ! tls         1: /*     $NetBSD: subr_kcpuset.c,v 1.8 2012/09/16 22:09:33 rmind Exp $   */
1.1       rmind       2:
                      3: /*-
                      4:  * Copyright (c) 2011 The NetBSD Foundation, Inc.
                      5:  * All rights reserved.
                      6:  *
                      7:  * This code is derived from software contributed to The NetBSD Foundation
                      8:  * by Mindaugas Rasiukevicius.
                      9:  *
                     10:  * Redistribution and use in source and binary forms, with or without
                     11:  * modification, are permitted provided that the following conditions
                     12:  * are met:
                     13:  * 1. Redistributions of source code must retain the above copyright
                     14:  *    notice, this list of conditions and the following disclaimer.
                     15:  * 2. Redistributions in binary form must reproduce the above copyright
                     16:  *    notice, this list of conditions and the following disclaimer in the
                     17:  *    documentation and/or other materials provided with the distribution.
                     18:  *
                     19:  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
                     20:  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
                     21:  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
                     22:  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
                     23:  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
                     24:  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
                     25:  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
                     26:  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
                     27:  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
                     28:  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
                     29:  * POSSIBILITY OF SUCH DAMAGE.
                     30:  */
                     31:
                     32: /*
                     33:  * Kernel CPU set implementation.
                     34:  *
                     35:  * Interface can be used by kernel subsystems as a unified dynamic CPU
                     36:  * bitset implementation handling many CPUs.  Facility also supports early
                     37:  * use by MD code on boot, as it fixups bitsets on further boot.
                     38:  *
                     39:  * TODO:
                     40:  * - Handle "reverse" bitset on fixup/grow.
                     41:  */
                     42:
                     43: #include <sys/cdefs.h>
1.7.2.1 ! tls        44: __KERNEL_RCSID(0, "$NetBSD: subr_kcpuset.c,v 1.8 2012/09/16 22:09:33 rmind Exp $");
1.1       rmind      45:
                     46: #include <sys/param.h>
                     47: #include <sys/types.h>
                     48:
                     49: #include <sys/atomic.h>
                     50: #include <sys/sched.h>
                     51: #include <sys/kcpuset.h>
                     52: #include <sys/pool.h>
                     53:
                     54: /* Number of CPUs to support. */
                     55: #define        KC_MAXCPUS              roundup2(MAXCPUS, 32)
                     56:
                     57: /*
                     58:  * Structure of dynamic CPU set in the kernel.
                     59:  */
                     60: struct kcpuset {
                     61:        uint32_t                bits[0];
                     62: };
                     63:
                     64: typedef struct kcpuset_impl {
                     65:        /* Reference count. */
                     66:        u_int                   kc_refcnt;
                     67:        /* Next to free, if non-NULL (used when multiple references). */
                     68:        struct kcpuset *        kc_next;
                     69:        /* Actual variable-sized field of bits. */
                     70:        struct kcpuset          kc_field;
                     71: } kcpuset_impl_t;
                     72:
                     73: #define        KC_BITS_OFF             (offsetof(struct kcpuset_impl, kc_field))
                     74: #define        KC_GETSTRUCT(b)         ((kcpuset_impl_t *)((char *)(b) - KC_BITS_OFF))
                     75:
                     76: /* Sizes of a single bitset. */
                     77: #define        KC_SHIFT                5
                     78: #define        KC_MASK                 31
                     79:
                     80: /* An array of noted early kcpuset creations and data. */
                     81: #define        KC_SAVE_NITEMS          8
                     82:
                     83: /* Structures for early boot mechanism (must be statically initialised). */
                     84: static kcpuset_t **            kc_noted_early[KC_SAVE_NITEMS];
                     85: static uint32_t                        kc_bits_early[KC_SAVE_NITEMS];
                     86: static int                     kc_last_idx = 0;
                     87: static bool                    kc_initialised = false;
                     88:
                     89: #define        KC_BITSIZE_EARLY        sizeof(kc_bits_early[0])
1.4       rmind      90: #define        KC_NFIELDS_EARLY        1
1.1       rmind      91:
                     92: /*
                     93:  * The size of whole bitset fields and amount of fields.
                     94:  * The whole size must statically initialise for early case.
                     95:  */
                     96: static size_t                  kc_bitsize __read_mostly = KC_BITSIZE_EARLY;
                     97: static size_t                  kc_nfields __read_mostly = KC_NFIELDS_EARLY;
                     98:
                     99: static pool_cache_t            kc_cache __read_mostly;
                    100:
1.3       rmind     101: static kcpuset_t *             kcpuset_create_raw(bool);
1.1       rmind     102:
                    103: /*
                    104:  * kcpuset_sysinit: initialize the subsystem, transfer early boot cases
                    105:  * to dynamically allocated sets.
                    106:  */
                    107: void
                    108: kcpuset_sysinit(void)
                    109: {
                    110:        kcpuset_t *kc_dynamic[KC_SAVE_NITEMS], *kcp;
                    111:        int i, s;
                    112:
                    113:        /* Set a kcpuset_t sizes. */
                    114:        kc_nfields = (KC_MAXCPUS >> KC_SHIFT);
                    115:        kc_bitsize = sizeof(uint32_t) * kc_nfields;
1.4       rmind     116:        KASSERT(kc_nfields != 0 && kc_bitsize != 0);
1.1       rmind     117:
                    118:        kc_cache = pool_cache_init(sizeof(kcpuset_impl_t) + kc_bitsize,
                    119:            coherency_unit, 0, 0, "kcpuset", NULL, IPL_NONE, NULL, NULL, NULL);
                    120:
                    121:        /* First, pre-allocate kcpuset entries. */
                    122:        for (i = 0; i < kc_last_idx; i++) {
1.3       rmind     123:                kcp = kcpuset_create_raw(true);
1.1       rmind     124:                kc_dynamic[i] = kcp;
                    125:        }
                    126:
                    127:        /*
                    128:         * Prepare to convert all early noted kcpuset uses to dynamic sets.
                    129:         * All processors, except the one we are currently running (primary),
                    130:         * must not be spinned yet.  Since MD facilities can use kcpuset,
                    131:         * raise the IPL to high.
                    132:         */
                    133:        KASSERT(mp_online == false);
                    134:
                    135:        s = splhigh();
                    136:        for (i = 0; i < kc_last_idx; i++) {
                    137:                /*
                    138:                 * Transfer the bits from early static storage to the kcpuset.
                    139:                 */
                    140:                KASSERT(kc_bitsize >= KC_BITSIZE_EARLY);
                    141:                memcpy(kc_dynamic[i], &kc_bits_early[i], KC_BITSIZE_EARLY);
                    142:
                    143:                /*
                    144:                 * Store the new pointer, pointing to the allocated kcpuset.
                    145:                 * Note: we are not in an interrupt context and it is the only
                    146:                 * CPU running - thus store is safe (e.g. no need for pointer
                    147:                 * variable to be volatile).
                    148:                 */
                    149:                *kc_noted_early[i] = kc_dynamic[i];
                    150:        }
                    151:        kc_initialised = true;
                    152:        kc_last_idx = 0;
                    153:        splx(s);
                    154: }
                    155:
                    156: /*
                    157:  * kcpuset_early_ptr: note an early boot use by saving the pointer and
                    158:  * returning a pointer to a static, temporary bit field.
                    159:  */
                    160: static kcpuset_t *
                    161: kcpuset_early_ptr(kcpuset_t **kcptr)
                    162: {
                    163:        kcpuset_t *kcp;
                    164:        int s;
                    165:
                    166:        s = splhigh();
                    167:        if (kc_last_idx < KC_SAVE_NITEMS) {
                    168:                /*
                    169:                 * Save the pointer, return pointer to static early field.
                    170:                 * Need to zero it out.
                    171:                 */
1.5       rmind     172:                kc_noted_early[kc_last_idx] = kcptr;
1.1       rmind     173:                kcp = (kcpuset_t *)&kc_bits_early[kc_last_idx];
1.5       rmind     174:                kc_last_idx++;
1.1       rmind     175:                memset(kcp, 0, KC_BITSIZE_EARLY);
                    176:                KASSERT(kc_bitsize == KC_BITSIZE_EARLY);
                    177:        } else {
                    178:                panic("kcpuset(9): all early-use entries exhausted; "
                    179:                    "increase KC_SAVE_NITEMS\n");
                    180:        }
                    181:        splx(s);
                    182:
                    183:        return kcp;
                    184: }
                    185:
                    186: /*
                    187:  * Routines to create or destroy the CPU set.
                    188:  * Early boot case is handled.
                    189:  */
                    190:
                    191: static kcpuset_t *
1.3       rmind     192: kcpuset_create_raw(bool zero)
1.1       rmind     193: {
                    194:        kcpuset_impl_t *kc;
                    195:
                    196:        kc = pool_cache_get(kc_cache, PR_WAITOK);
                    197:        kc->kc_refcnt = 1;
                    198:        kc->kc_next = NULL;
                    199:
1.3       rmind     200:        if (zero) {
                    201:                memset(&kc->kc_field, 0, kc_bitsize);
                    202:        }
                    203:
1.1       rmind     204:        /* Note: return pointer to the actual field of bits. */
                    205:        KASSERT((uint8_t *)kc + KC_BITS_OFF == (uint8_t *)&kc->kc_field);
                    206:        return &kc->kc_field;
                    207: }
                    208:
                    209: void
1.3       rmind     210: kcpuset_create(kcpuset_t **retkcp, bool zero)
1.1       rmind     211: {
                    212:        if (__predict_false(!kc_initialised)) {
                    213:                /* Early boot use - special case. */
                    214:                *retkcp = kcpuset_early_ptr(retkcp);
                    215:                return;
                    216:        }
1.3       rmind     217:        *retkcp = kcpuset_create_raw(zero);
1.1       rmind     218: }
                    219:
                    220: void
                    221: kcpuset_destroy(kcpuset_t *kcp)
                    222: {
1.2       rmind     223:        kcpuset_impl_t *kc;
1.1       rmind     224:
                    225:        KASSERT(kc_initialised);
                    226:        KASSERT(kcp != NULL);
                    227:
                    228:        do {
1.2       rmind     229:                kc = KC_GETSTRUCT(kcp);
                    230:                kcp = kc->kc_next;
1.1       rmind     231:                pool_cache_put(kc_cache, kc);
1.2       rmind     232:        } while (kcp);
1.1       rmind     233: }
                    234:
                    235: /*
1.4       rmind     236:  * Routines to reference/unreference the CPU set.
1.1       rmind     237:  * Note: early boot case is not supported by these routines.
                    238:  */
                    239:
                    240: void
                    241: kcpuset_use(kcpuset_t *kcp)
                    242: {
                    243:        kcpuset_impl_t *kc = KC_GETSTRUCT(kcp);
                    244:
                    245:        KASSERT(kc_initialised);
                    246:        atomic_inc_uint(&kc->kc_refcnt);
                    247: }
                    248:
                    249: void
                    250: kcpuset_unuse(kcpuset_t *kcp, kcpuset_t **lst)
                    251: {
                    252:        kcpuset_impl_t *kc = KC_GETSTRUCT(kcp);
                    253:
                    254:        KASSERT(kc_initialised);
                    255:        KASSERT(kc->kc_refcnt > 0);
                    256:
                    257:        if (atomic_dec_uint_nv(&kc->kc_refcnt) != 0) {
                    258:                return;
                    259:        }
                    260:        KASSERT(kc->kc_next == NULL);
                    261:        if (lst == NULL) {
                    262:                kcpuset_destroy(kcp);
                    263:                return;
                    264:        }
                    265:        kc->kc_next = *lst;
                    266:        *lst = kcp;
                    267: }
                    268:
                    269: /*
                    270:  * Routines to transfer the CPU set from / to userspace.
                    271:  * Note: early boot case is not supported by these routines.
                    272:  */
                    273:
                    274: int
                    275: kcpuset_copyin(const cpuset_t *ucp, kcpuset_t *kcp, size_t len)
                    276: {
1.5       rmind     277:        kcpuset_impl_t *kc __unused = KC_GETSTRUCT(kcp);
1.1       rmind     278:
                    279:        KASSERT(kc_initialised);
                    280:        KASSERT(kc->kc_refcnt > 0);
                    281:        KASSERT(kc->kc_next == NULL);
                    282:
1.5       rmind     283:        if (len > kc_bitsize) { /* XXX */
1.1       rmind     284:                return EINVAL;
                    285:        }
1.5       rmind     286:        return copyin(ucp, kcp, len);
1.1       rmind     287: }
                    288:
                    289: int
                    290: kcpuset_copyout(kcpuset_t *kcp, cpuset_t *ucp, size_t len)
                    291: {
1.5       rmind     292:        kcpuset_impl_t *kc __unused = KC_GETSTRUCT(kcp);
1.1       rmind     293:
                    294:        KASSERT(kc_initialised);
                    295:        KASSERT(kc->kc_refcnt > 0);
                    296:        KASSERT(kc->kc_next == NULL);
                    297:
1.5       rmind     298:        if (len > kc_bitsize) { /* XXX */
1.1       rmind     299:                return EINVAL;
                    300:        }
1.5       rmind     301:        return copyout(kcp, ucp, len);
1.1       rmind     302: }
                    303:
1.6       rmind     304: void
1.7.2.1 ! tls       305: kcpuset_export_u32(const kcpuset_t *kcp, uint32_t *bitfield, size_t len)
1.6       rmind     306: {
                    307:        size_t rlen = MIN(kc_bitsize, len);
                    308:
                    309:        KASSERT(kcp != NULL);
                    310:        memcpy(bitfield, kcp->bits, rlen);
                    311: }
                    312:
1.1       rmind     313: /*
1.4       rmind     314:  * Routines to change bit field - zero, fill, copy, set, unset, etc.
1.1       rmind     315:  */
1.4       rmind     316:
1.1       rmind     317: void
                    318: kcpuset_zero(kcpuset_t *kcp)
                    319: {
                    320:
                    321:        KASSERT(!kc_initialised || KC_GETSTRUCT(kcp)->kc_refcnt > 0);
                    322:        KASSERT(!kc_initialised || KC_GETSTRUCT(kcp)->kc_next == NULL);
                    323:        memset(kcp, 0, kc_bitsize);
                    324: }
                    325:
                    326: void
                    327: kcpuset_fill(kcpuset_t *kcp)
                    328: {
                    329:
                    330:        KASSERT(!kc_initialised || KC_GETSTRUCT(kcp)->kc_refcnt > 0);
                    331:        KASSERT(!kc_initialised || KC_GETSTRUCT(kcp)->kc_next == NULL);
                    332:        memset(kcp, ~0, kc_bitsize);
                    333: }
                    334:
                    335: void
1.4       rmind     336: kcpuset_copy(kcpuset_t *dkcp, kcpuset_t *skcp)
                    337: {
                    338:
                    339:        KASSERT(!kc_initialised || KC_GETSTRUCT(dkcp)->kc_refcnt > 0);
                    340:        KASSERT(!kc_initialised || KC_GETSTRUCT(dkcp)->kc_next == NULL);
                    341:        memcpy(dkcp, skcp, kc_bitsize);
                    342: }
                    343:
                    344: void
1.1       rmind     345: kcpuset_set(kcpuset_t *kcp, cpuid_t i)
                    346: {
                    347:        const size_t j = i >> KC_SHIFT;
                    348:
                    349:        KASSERT(!kc_initialised || KC_GETSTRUCT(kcp)->kc_next == NULL);
                    350:        KASSERT(j < kc_nfields);
                    351:
                    352:        kcp->bits[j] |= 1 << (i & KC_MASK);
                    353: }
                    354:
                    355: void
                    356: kcpuset_clear(kcpuset_t *kcp, cpuid_t i)
                    357: {
                    358:        const size_t j = i >> KC_SHIFT;
                    359:
                    360:        KASSERT(!kc_initialised || KC_GETSTRUCT(kcp)->kc_next == NULL);
                    361:        KASSERT(j < kc_nfields);
                    362:
                    363:        kcp->bits[j] &= ~(1 << (i & KC_MASK));
                    364: }
                    365:
1.4       rmind     366: bool
1.1       rmind     367: kcpuset_isset(kcpuset_t *kcp, cpuid_t i)
                    368: {
                    369:        const size_t j = i >> KC_SHIFT;
                    370:
                    371:        KASSERT(kcp != NULL);
                    372:        KASSERT(!kc_initialised || KC_GETSTRUCT(kcp)->kc_refcnt > 0);
                    373:        KASSERT(!kc_initialised || KC_GETSTRUCT(kcp)->kc_next == NULL);
                    374:        KASSERT(j < kc_nfields);
                    375:
                    376:        return ((1 << (i & KC_MASK)) & kcp->bits[j]) != 0;
                    377: }
                    378:
                    379: bool
1.4       rmind     380: kcpuset_isotherset(kcpuset_t *kcp, cpuid_t i)
                    381: {
                    382:        const size_t j2 = i >> KC_SHIFT;
                    383:        const uint32_t mask = ~(1 << (i & KC_MASK));
                    384:
                    385:        for (size_t j = 0; j < kc_nfields; j++) {
                    386:                const uint32_t bits = kcp->bits[j];
                    387:                if (bits && (j != j2 || (bits & mask) != 0)) {
                    388:                        return true;
                    389:                }
                    390:        }
                    391:        return false;
                    392: }
                    393:
                    394: bool
1.1       rmind     395: kcpuset_iszero(kcpuset_t *kcp)
                    396: {
                    397:
                    398:        for (size_t j = 0; j < kc_nfields; j++) {
                    399:                if (kcp->bits[j] != 0) {
                    400:                        return false;
                    401:                }
                    402:        }
                    403:        return true;
                    404: }
                    405:
                    406: bool
                    407: kcpuset_match(const kcpuset_t *kcp1, const kcpuset_t *kcp2)
                    408: {
                    409:
                    410:        return memcmp(kcp1, kcp2, kc_bitsize) == 0;
                    411: }
1.3       rmind     412:
                    413: void
                    414: kcpuset_merge(kcpuset_t *kcp1, kcpuset_t *kcp2)
                    415: {
                    416:
                    417:        for (size_t j = 0; j < kc_nfields; j++) {
                    418:                kcp1->bits[j] |= kcp2->bits[j];
                    419:        }
                    420: }
                    421:
1.5       rmind     422: void
                    423: kcpuset_intersect(kcpuset_t *kcp1, kcpuset_t *kcp2)
                    424: {
                    425:
                    426:        for (size_t j = 0; j < kc_nfields; j++) {
                    427:                kcp1->bits[j] &= kcp2->bits[j];
                    428:        }
                    429: }
                    430:
1.4       rmind     431: int
                    432: kcpuset_countset(kcpuset_t *kcp)
                    433: {
                    434:        int count = 0;
                    435:
                    436:        for (size_t j = 0; j < kc_nfields; j++) {
                    437:                count += popcount32(kcp->bits[j]);
                    438:        }
                    439:        return count;
                    440: }
                    441:
1.3       rmind     442: /*
                    443:  * Routines to set/clear the flags atomically.
                    444:  */
                    445:
                    446: void
                    447: kcpuset_atomic_set(kcpuset_t *kcp, cpuid_t i)
                    448: {
                    449:        const size_t j = i >> KC_SHIFT;
                    450:
                    451:        KASSERT(j < kc_nfields);
                    452:        atomic_or_32(&kcp->bits[j], 1 << (i & KC_MASK));
                    453: }
                    454:
                    455: void
                    456: kcpuset_atomic_clear(kcpuset_t *kcp, cpuid_t i)
                    457: {
                    458:        const size_t j = i >> KC_SHIFT;
                    459:
                    460:        KASSERT(j < kc_nfields);
                    461:        atomic_and_32(&kcp->bits[j], ~(1 << (i & KC_MASK)));
                    462: }

CVSweb <webmaster@jp.NetBSD.org>