[BACK]Return to tsc.c CVS log [TXT][DIR] Up to [cvs.NetBSD.org] / src / sys / arch / x86 / x86

Annotation of src/sys/arch/x86/x86/tsc.c, Revision 1.46

1.46    ! ad          1: /*     $NetBSD: tsc.c,v 1.45 2020/05/19 21:43:36 ad Exp $      */
1.2       kardel      2:
                      3: /*-
1.42      ad          4:  * Copyright (c) 2008, 2020 The NetBSD Foundation, Inc.
1.2       kardel      5:  * All rights reserved.
                      6:  *
                      7:  * Redistribution and use in source and binary forms, with or without
                      8:  * modification, are permitted provided that the following conditions
                      9:  * are met:
                     10:  * 1. Redistributions of source code must retain the above copyright
                     11:  *    notice, this list of conditions and the following disclaimer.
                     12:  * 2. Redistributions in binary form must reproduce the above copyright
                     13:  *    notice, this list of conditions and the following disclaimer in the
                     14:  *    documentation and/or other materials provided with the distribution.
                     15:  *
                     16:  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
                     17:  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
                     18:  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
                     19:  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
                     20:  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
                     21:  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
                     22:  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
                     23:  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
                     24:  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
                     25:  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
                     26:  * POSSIBILITY OF SUCH DAMAGE.
                     27:  */
                     28:
                     29: #include <sys/cdefs.h>
1.46    ! ad         30: __KERNEL_RCSID(0, "$NetBSD: tsc.c,v 1.45 2020/05/19 21:43:36 ad Exp $");
1.2       kardel     31:
                     32: #include <sys/param.h>
                     33: #include <sys/systm.h>
                     34: #include <sys/time.h>
                     35: #include <sys/timetc.h>
1.16      ad         36: #include <sys/lwp.h>
                     37: #include <sys/atomic.h>
1.2       kardel     38: #include <sys/kernel.h>
1.16      ad         39: #include <sys/cpu.h>
1.37      maxv       40: #include <sys/xcall.h>
1.16      ad         41:
1.2       kardel     42: #include <machine/cpu_counter.h>
1.16      ad         43: #include <machine/cpuvar.h>
                     44: #include <machine/cpufunc.h>
1.2       kardel     45: #include <machine/specialreg.h>
1.16      ad         46: #include <machine/cputypes.h>
1.2       kardel     47:
1.16      ad         48: #include "tsc.h"
1.2       kardel     49:
1.44      ad         50: #define        TSC_SYNC_ROUNDS         1000
                     51: #define        ABS(a)                  ((a) >= 0 ? (a) : -(a))
                     52:
1.16      ad         53: u_int  tsc_get_timecount(struct timecounter *);
1.2       kardel     54:
1.45      ad         55: static void    tsc_delay(unsigned int);
                     56:
1.25      drochner   57: uint64_t       tsc_freq; /* exported for sysctl */
1.44      ad         58: static int64_t tsc_drift_max = 1000;   /* max cycles */
1.25      drochner   59: static int64_t tsc_drift_observed;
1.2       kardel     60:
1.37      maxv       61: int tsc_user_enabled = 1;
                     62:
1.24      ad         63: static volatile int64_t        tsc_sync_val;
                     64: static volatile struct cpu_info        *tsc_sync_cpu;
                     65:
1.2       kardel     66: static struct timecounter tsc_timecounter = {
1.16      ad         67:        .tc_get_timecount = tsc_get_timecount,
                     68:        .tc_counter_mask = ~0U,
                     69:        .tc_name = "TSC",
                     70:        .tc_quality = 3000,
1.2       kardel     71: };
                     72:
1.35      msaitoh    73: bool
                     74: tsc_is_invariant(void)
1.2       kardel     75: {
1.16      ad         76:        struct cpu_info *ci;
                     77:        uint32_t descs[4];
1.34      msaitoh    78:        uint32_t family;
                     79:        bool invariant;
1.2       kardel     80:
1.35      msaitoh    81:        if (!cpu_hascounter())
                     82:                return false;
1.16      ad         83:
                     84:        ci = curcpu();
1.34      msaitoh    85:        invariant = false;
1.2       kardel     86:
1.16      ad         87:        if (cpu_vendor == CPUVENDOR_INTEL) {
                     88:                /*
                     89:                 * From Intel(tm) 64 and IA-32 Architectures Software
                     90:                 * Developer's Manual Volume 3A: System Programming Guide,
1.34      msaitoh    91:                 * Part 1, 17.13 TIME_STAMP COUNTER, these are the processors
                     92:                 * where the TSC is known invariant:
1.16      ad         93:                 *
                     94:                 * Pentium 4, Intel Xeon (family 0f, models 03 and higher)
                     95:                 * Core Solo and Core Duo processors (family 06, model 0e)
                     96:                 * Xeon 5100 series and Core 2 Duo (family 06, model 0f)
1.34      msaitoh    97:                 * Core 2 and Xeon (family 06, model 17)
                     98:                 * Atom (family 06, model 1c)
1.16      ad         99:                 *
1.18      ad        100:                 * We'll also assume that it's safe on the Pentium, and
                    101:                 * that it's safe on P-II and P-III Xeons due to the
                    102:                 * typical configuration of those systems.
1.34      msaitoh   103:                 *
1.16      ad        104:                 */
1.33      msaitoh   105:                switch (CPUID_TO_BASEFAMILY(ci->ci_signature)) {
1.16      ad        106:                case 0x05:
1.34      msaitoh   107:                        invariant = true;
1.16      ad        108:                        break;
                    109:                case 0x06:
1.34      msaitoh   110:                        invariant = CPUID_TO_MODEL(ci->ci_signature) == 0x0e ||
1.33      msaitoh   111:                            CPUID_TO_MODEL(ci->ci_signature) == 0x0f ||
1.34      msaitoh   112:                            CPUID_TO_MODEL(ci->ci_signature) == 0x17 ||
                    113:                            CPUID_TO_MODEL(ci->ci_signature) == 0x1c;
1.16      ad        114:                        break;
                    115:                case 0x0f:
1.34      msaitoh   116:                        invariant = CPUID_TO_MODEL(ci->ci_signature) >= 0x03;
1.16      ad        117:                        break;
                    118:                }
                    119:        } else if (cpu_vendor == CPUVENDOR_AMD) {
                    120:                /*
                    121:                 * TSC and Power Management Events on AMD Processors
                    122:                 * Nov 2, 2005 Rich Brunner, AMD Fellow
                    123:                 * http://lkml.org/lkml/2005/11/4/173
                    124:                 *
1.34      msaitoh   125:                 * See Appendix E.4.7 CPUID Fn8000_0007_EDX Advanced Power
1.36      msaitoh   126:                 * Management Features, AMD64 Architecture Programmer's
                    127:                 * Manual Volume 3: General-Purpose and System Instructions.
                    128:                 * The check is done below.
1.16      ad        129:                 */
1.34      msaitoh   130:        }
                    131:
                    132:        /*
                    133:         * The best way to check whether the TSC counter is invariant or not
                    134:         * is to check CPUID 80000007.
                    135:         */
                    136:        family = CPUID_TO_BASEFAMILY(ci->ci_signature);
                    137:        if (((cpu_vendor == CPUVENDOR_INTEL) || (cpu_vendor == CPUVENDOR_AMD))
                    138:            && ((family == 0x06) || (family == 0x0f))) {
                    139:                x86_cpuid(0x80000000, descs);
                    140:                if (descs[0] >= 0x80000007) {
1.16      ad        141:                        x86_cpuid(0x80000007, descs);
1.40      msaitoh   142:                        invariant = (descs[3] & CPUID_APM_ITSC) != 0;
1.16      ad        143:                }
                    144:        }
1.2       kardel    145:
1.35      msaitoh   146:        return invariant;
                    147: }
                    148:
1.41      msaitoh   149: /*
1.45      ad        150:  * Initialize timecounter(9) and DELAY() function of TSC.
                    151:  *
                    152:  * This function is called after all secondary processors were brought up
                    153:  * and drift has been measured, and after any other potential delay funcs
                    154:  * have been installed (e.g. lapic_delay()).
1.41      msaitoh   155:  */
1.35      msaitoh   156: void
                    157: tsc_tc_init(void)
                    158: {
                    159:        struct cpu_info *ci;
                    160:        bool invariant;
                    161:
                    162:        if (!cpu_hascounter())
                    163:                return;
                    164:
                    165:        ci = curcpu();
                    166:        tsc_freq = ci->ci_data.cpu_cc_freq;
                    167:        invariant = tsc_is_invariant();
1.34      msaitoh   168:        if (!invariant) {
                    169:                aprint_debug("TSC not known invariant on this CPU\n");
1.16      ad        170:                tsc_timecounter.tc_quality = -100;
                    171:        } else if (tsc_drift_observed > tsc_drift_max) {
                    172:                aprint_error("ERROR: %lld cycle TSC drift observed\n",
                    173:                    (long long)tsc_drift_observed);
                    174:                tsc_timecounter.tc_quality = -100;
1.34      msaitoh   175:                invariant = false;
1.45      ad        176:        } else if (vm_guest == VM_GUEST_NO) {
                    177:                delay_func = tsc_delay;
1.16      ad        178:        }
1.2       kardel    179:
1.20      fvdl      180:        if (tsc_freq != 0) {
                    181:                tsc_timecounter.tc_frequency = tsc_freq;
                    182:                tc_init(&tsc_timecounter);
                    183:        }
1.2       kardel    184: }
                    185:
1.16      ad        186: /*
                    187:  * Record drift (in clock cycles).  Called during AP startup.
                    188:  */
1.2       kardel    189: void
1.16      ad        190: tsc_sync_drift(int64_t drift)
1.2       kardel    191: {
1.16      ad        192:
                    193:        if (drift < 0)
                    194:                drift = -drift;
                    195:        if (drift > tsc_drift_observed)
                    196:                tsc_drift_observed = drift;
1.2       kardel    197: }
                    198:
                    199: /*
1.16      ad        200:  * Called during startup of APs, by the boot processor.  Interrupts
                    201:  * are disabled on entry.
1.2       kardel    202:  */
1.25      drochner  203: static void
                    204: tsc_read_bp(struct cpu_info *ci, uint64_t *bptscp, uint64_t *aptscp)
1.2       kardel    205: {
1.25      drochner  206:        uint64_t bptsc;
1.16      ad        207:
1.24      ad        208:        if (atomic_swap_ptr(&tsc_sync_cpu, ci) != NULL) {
                    209:                panic("tsc_sync_bp: 1");
                    210:        }
1.2       kardel    211:
1.16      ad        212:        /* Flag it and read our TSC. */
                    213:        atomic_or_uint(&ci->ci_flags, CPUF_SYNCTSC);
                    214:
                    215:        /* Wait for remote to complete, and read ours again. */
                    216:        while ((ci->ci_flags & CPUF_SYNCTSC) != 0) {
                    217:                __insn_barrier();
1.2       kardel    218:        }
1.44      ad        219:        bptsc = rdtsc();
1.2       kardel    220:
1.16      ad        221:        /* Wait for the results to come in. */
1.24      ad        222:        while (tsc_sync_cpu == ci) {
1.16      ad        223:                x86_pause();
1.14      ad        224:        }
1.24      ad        225:        if (tsc_sync_cpu != NULL) {
                    226:                panic("tsc_sync_bp: 2");
                    227:        }
1.2       kardel    228:
1.25      drochner  229:        *bptscp = bptsc;
                    230:        *aptscp = tsc_sync_val;
                    231: }
                    232:
                    233: void
                    234: tsc_sync_bp(struct cpu_info *ci)
                    235: {
1.44      ad        236:        int64_t bptsc, aptsc, val, diff;
1.25      drochner  237:
1.44      ad        238:        if (!cpu_hascounter())
                    239:                return;
                    240:
                    241:        val = INT64_MAX;
                    242:        for (int i = 0; i < TSC_SYNC_ROUNDS; i++) {
1.42      ad        243:                tsc_read_bp(ci, &bptsc, &aptsc);
1.44      ad        244:                diff = bptsc - aptsc;
                    245:                if (ABS(diff) < ABS(val)) {
                    246:                        val = diff;
                    247:                }
1.42      ad        248:        }
1.25      drochner  249:
1.44      ad        250:        ci->ci_data.cpu_cc_skew = val;
1.2       kardel    251: }
                    252:
                    253: /*
1.16      ad        254:  * Called during startup of AP, by the AP itself.  Interrupts are
                    255:  * disabled on entry.
1.2       kardel    256:  */
1.25      drochner  257: static void
                    258: tsc_post_ap(struct cpu_info *ci)
1.2       kardel    259: {
1.16      ad        260:        uint64_t tsc;
                    261:
                    262:        /* Wait for go-ahead from primary. */
                    263:        while ((ci->ci_flags & CPUF_SYNCTSC) == 0) {
                    264:                __insn_barrier();
                    265:        }
1.2       kardel    266:
1.16      ad        267:        /* Instruct primary to read its counter. */
                    268:        atomic_and_uint(&ci->ci_flags, ~CPUF_SYNCTSC);
1.44      ad        269:        tsc = rdtsc();
1.16      ad        270:
1.21      ad        271:        /* Post result.  Ensure the whole value goes out atomically. */
1.24      ad        272:        (void)atomic_swap_64(&tsc_sync_val, tsc);
                    273:
                    274:        if (atomic_swap_ptr(&tsc_sync_cpu, NULL) != ci) {
                    275:                panic("tsc_sync_ap");
                    276:        }
1.2       kardel    277: }
                    278:
1.25      drochner  279: void
                    280: tsc_sync_ap(struct cpu_info *ci)
                    281: {
                    282:
1.44      ad        283:        if (!cpu_hascounter())
                    284:                return;
                    285:
                    286:        for (int i = 0; i < TSC_SYNC_ROUNDS; i++) {
1.42      ad        287:                tsc_post_ap(ci);
                    288:        }
1.25      drochner  289: }
                    290:
1.37      maxv      291: static void
                    292: tsc_apply_cpu(void *arg1, void *arg2)
                    293: {
1.38      joerg     294:        bool enable = arg1 != NULL;
1.37      maxv      295:        if (enable) {
                    296:                lcr4(rcr4() & ~CR4_TSD);
                    297:        } else {
                    298:                lcr4(rcr4() | CR4_TSD);
                    299:        }
                    300: }
                    301:
                    302: void
                    303: tsc_user_enable(void)
                    304: {
                    305:        uint64_t xc;
                    306:
                    307:        xc = xc_broadcast(0, tsc_apply_cpu, (void *)true, NULL);
                    308:        xc_wait(xc);
                    309: }
                    310:
                    311: void
                    312: tsc_user_disable(void)
                    313: {
                    314:        uint64_t xc;
                    315:
                    316:        xc = xc_broadcast(0, tsc_apply_cpu, (void *)false, NULL);
                    317:        xc_wait(xc);
                    318: }
                    319:
1.16      ad        320: uint64_t
                    321: cpu_frequency(struct cpu_info *ci)
1.2       kardel    322: {
                    323:
1.16      ad        324:        return ci->ci_data.cpu_cc_freq;
                    325: }
1.2       kardel    326:
1.16      ad        327: int
                    328: cpu_hascounter(void)
                    329: {
1.2       kardel    330:
1.26      jym       331:        return cpu_feature[0] & CPUID_TSC;
1.2       kardel    332: }
1.45      ad        333:
                    334: static void
                    335: tsc_delay(unsigned int us)
                    336: {
                    337:        uint64_t start, delta;
                    338:
                    339:        start = cpu_counter();
                    340:        delta = (uint64_t)us * cpu_frequency(&cpu_info_primary) / 1000000;
                    341:
                    342:        while ((cpu_counter() - start) < delta) {
                    343:                x86_pause();
                    344:        }
                    345: }

CVSweb <webmaster@jp.NetBSD.org>