1/* $NetBSD: tsc.c,v 1.36 2013/12/18 03:20:19 msaitoh Exp $ */
2
3/*-
4 * Copyright (c) 2008 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
17 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
18 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
19 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
20 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 * POSSIBILITY OF SUCH DAMAGE.
27 */
28
29#include <sys/cdefs.h>
30__KERNEL_RCSID(0, "$NetBSD: tsc.c,v 1.36 2013/12/18 03:20:19 msaitoh Exp $");
31
32#include <sys/param.h>
33#include <sys/systm.h>
34#include <sys/time.h>
35#include <sys/timetc.h>
36#include <sys/lwp.h>
37#include <sys/atomic.h>
38#include <sys/kernel.h>
39#include <sys/cpu.h>
40
41#include <machine/cpu_counter.h>
42#include <machine/cpuvar.h>
43#include <machine/cpufunc.h>
44#include <machine/specialreg.h>
45#include <machine/cputypes.h>
46
47#include "tsc.h"
48
49u_int tsc_get_timecount(struct timecounter *);
50
51uint64_t tsc_freq; /* exported for sysctl */
52static int64_t tsc_drift_max = 250; /* max cycles */
53static int64_t tsc_drift_observed;
54static bool tsc_good;
55
56static volatile int64_t tsc_sync_val;
57static volatile struct cpu_info *tsc_sync_cpu;
58
59static struct timecounter tsc_timecounter = {
60 .tc_get_timecount = tsc_get_timecount,
61 .tc_counter_mask = ~0U,
62 .tc_name = "TSC",
63 .tc_quality = 3000,
64};
65
66bool
67tsc_is_invariant(void)
68{
69 struct cpu_info *ci;
70 uint32_t descs[4];
71 uint32_t family;
72 bool invariant;
73
74 if (!cpu_hascounter())
75 return false;
76
77 ci = curcpu();
78 invariant = false;
79
80 if (cpu_vendor == CPUVENDOR_INTEL) {
81 /*
82 * From Intel(tm) 64 and IA-32 Architectures Software
83 * Developer's Manual Volume 3A: System Programming Guide,
84 * Part 1, 17.13 TIME_STAMP COUNTER, these are the processors
85 * where the TSC is known invariant:
86 *
87 * Pentium 4, Intel Xeon (family 0f, models 03 and higher)
88 * Core Solo and Core Duo processors (family 06, model 0e)
89 * Xeon 5100 series and Core 2 Duo (family 06, model 0f)
90 * Core 2 and Xeon (family 06, model 17)
91 * Atom (family 06, model 1c)
92 *
93 * We'll also assume that it's safe on the Pentium, and
94 * that it's safe on P-II and P-III Xeons due to the
95 * typical configuration of those systems.
96 *
97 */
98 switch (CPUID_TO_BASEFAMILY(ci->ci_signature)) {
99 case 0x05:
100 invariant = true;
101 break;
102 case 0x06:
103 invariant = CPUID_TO_MODEL(ci->ci_signature) == 0x0e ||
104 CPUID_TO_MODEL(ci->ci_signature) == 0x0f ||
105 CPUID_TO_MODEL(ci->ci_signature) == 0x17 ||
106 CPUID_TO_MODEL(ci->ci_signature) == 0x1c;
107 break;
108 case 0x0f:
109 invariant = CPUID_TO_MODEL(ci->ci_signature) >= 0x03;
110 break;
111 }
112 } else if (cpu_vendor == CPUVENDOR_AMD) {
113 /*
114 * TSC and Power Management Events on AMD Processors
115 * Nov 2, 2005 Rich Brunner, AMD Fellow
116 * http://lkml.org/lkml/2005/11/4/173
117 *
118 * See Appendix E.4.7 CPUID Fn8000_0007_EDX Advanced Power
119 * Management Features, AMD64 Architecture Programmer's
120 * Manual Volume 3: General-Purpose and System Instructions.
121 * The check is done below.
122 */
123 }
124
125 /*
126 * The best way to check whether the TSC counter is invariant or not
127 * is to check CPUID 80000007.
128 */
129 family = CPUID_TO_BASEFAMILY(ci->ci_signature);
130 if (((cpu_vendor == CPUVENDOR_INTEL) || (cpu_vendor == CPUVENDOR_AMD))
131 && ((family == 0x06) || (family == 0x0f))) {
132 x86_cpuid(0x80000000, descs);
133 if (descs[0] >= 0x80000007) {
134 x86_cpuid(0x80000007, descs);
135 invariant = (descs[3] & CPUID_APM_TSC) != 0;
136 }
137 }
138
139 return invariant;
140}
141
142void
143tsc_tc_init(void)
144{
145 struct cpu_info *ci;
146 bool invariant;
147
148 if (!cpu_hascounter())
149 return;
150
151 ci = curcpu();
152 tsc_freq = ci->ci_data.cpu_cc_freq;
153 tsc_good = (cpu_feature[0] & CPUID_MSR) != 0 &&
154 (rdmsr(MSR_TSC) != 0 || rdmsr(MSR_TSC) != 0);
155
156 invariant = tsc_is_invariant();
157 if (!invariant) {
158 aprint_debug("TSC not known invariant on this CPU\n");
159 tsc_timecounter.tc_quality = -100;
160 } else if (tsc_drift_observed > tsc_drift_max) {
161 aprint_error("ERROR: %lld cycle TSC drift observed\n",
162 (long long)tsc_drift_observed);
163 tsc_timecounter.tc_quality = -100;
164 invariant = false;
165 }
166
167 if (tsc_freq != 0) {
168 tsc_timecounter.tc_frequency = tsc_freq;
169 tc_init(&tsc_timecounter);
170 }
171}
172
173/*
174 * Record drift (in clock cycles). Called during AP startup.
175 */
176void
177tsc_sync_drift(int64_t drift)
178{
179
180 if (drift < 0)
181 drift = -drift;
182 if (drift > tsc_drift_observed)
183 tsc_drift_observed = drift;
184}
185
186/*
187 * Called during startup of APs, by the boot processor. Interrupts
188 * are disabled on entry.
189 */
190static void
191tsc_read_bp(struct cpu_info *ci, uint64_t *bptscp, uint64_t *aptscp)
192{
193 uint64_t bptsc;
194
195 if (atomic_swap_ptr(&tsc_sync_cpu, ci) != NULL) {
196 panic("tsc_sync_bp: 1");
197 }
198
199 /* Flag it and read our TSC. */
200 atomic_or_uint(&ci->ci_flags, CPUF_SYNCTSC);
201 bptsc = cpu_counter_serializing() >> 1;
202
203 /* Wait for remote to complete, and read ours again. */
204 while ((ci->ci_flags & CPUF_SYNCTSC) != 0) {
205 __insn_barrier();
206 }
207 bptsc += (cpu_counter_serializing() >> 1);
208
209 /* Wait for the results to come in. */
210 while (tsc_sync_cpu == ci) {
211 x86_pause();
212 }
213 if (tsc_sync_cpu != NULL) {
214 panic("tsc_sync_bp: 2");
215 }
216
217 *bptscp = bptsc;
218 *aptscp = tsc_sync_val;
219}
220
221void
222tsc_sync_bp(struct cpu_info *ci)
223{
224 uint64_t bptsc, aptsc;
225
226 tsc_read_bp(ci, &bptsc, &aptsc); /* discarded - cache effects */
227 tsc_read_bp(ci, &bptsc, &aptsc);
228
229 /* Compute final value to adjust for skew. */
230 ci->ci_data.cpu_cc_skew = bptsc - aptsc;
231}
232
233/*
234 * Called during startup of AP, by the AP itself. Interrupts are
235 * disabled on entry.
236 */
237static void
238tsc_post_ap(struct cpu_info *ci)
239{
240 uint64_t tsc;
241
242 /* Wait for go-ahead from primary. */
243 while ((ci->ci_flags & CPUF_SYNCTSC) == 0) {
244 __insn_barrier();
245 }
246 tsc = (cpu_counter_serializing() >> 1);
247
248 /* Instruct primary to read its counter. */
249 atomic_and_uint(&ci->ci_flags, ~CPUF_SYNCTSC);
250 tsc += (cpu_counter_serializing() >> 1);
251
252 /* Post result. Ensure the whole value goes out atomically. */
253 (void)atomic_swap_64(&tsc_sync_val, tsc);
254
255 if (atomic_swap_ptr(&tsc_sync_cpu, NULL) != ci) {
256 panic("tsc_sync_ap");
257 }
258}
259
260void
261tsc_sync_ap(struct cpu_info *ci)
262{
263
264 tsc_post_ap(ci);
265 tsc_post_ap(ci);
266}
267
268uint64_t
269cpu_frequency(struct cpu_info *ci)
270{
271
272 return ci->ci_data.cpu_cc_freq;
273}
274
275int
276cpu_hascounter(void)
277{
278
279 return cpu_feature[0] & CPUID_TSC;
280}
281
282uint64_t
283cpu_counter_serializing(void)
284{
285 if (tsc_good)
286 return rdmsr(MSR_TSC);
287 else
288 return cpu_counter();
289}
290