1 | /* $NetBSD: subr_cpufreq.c,v 1.9 2014/02/12 20:20:15 martin Exp $ */ |
2 | |
3 | /*- |
4 | * Copyright (c) 2011 The NetBSD Foundation, Inc. |
5 | * All rights reserved. |
6 | * |
7 | * This code is derived from software contributed to The NetBSD Foundation |
8 | * by Jukka Ruohonen. |
9 | * |
10 | * Redistribution and use in source and binary forms, with or without |
11 | * modification, are permitted provided that the following conditions |
12 | * are met: |
13 | * |
14 | * 1. Redistributions of source code must retain the above copyright |
15 | * notice, this list of conditions and the following disclaimer. |
16 | * 2. Redistributions in binary form must reproduce the above copyright |
17 | * notice, this list of conditions and the following disclaimer in the |
18 | * documentation and/or other materials provided with the distribution. |
19 | * |
20 | * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS |
21 | * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED |
22 | * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
23 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS |
24 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
25 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
26 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
27 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN |
28 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
29 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE |
30 | * POSSIBILITY OF SUCH DAMAGE. |
31 | */ |
32 | #include <sys/cdefs.h> |
33 | __KERNEL_RCSID(0, "$NetBSD: subr_cpufreq.c,v 1.9 2014/02/12 20:20:15 martin Exp $" ); |
34 | |
35 | #include <sys/param.h> |
36 | #include <sys/cpu.h> |
37 | #include <sys/cpufreq.h> |
38 | #include <sys/kernel.h> |
39 | #include <sys/kmem.h> |
40 | #include <sys/mutex.h> |
41 | #include <sys/time.h> |
42 | #include <sys/xcall.h> |
43 | |
44 | static int cpufreq_latency(void); |
45 | static uint32_t cpufreq_get_max(void); |
46 | static uint32_t cpufreq_get_min(void); |
47 | static uint32_t cpufreq_get_raw(struct cpu_info *); |
48 | static void cpufreq_get_state_raw(uint32_t, struct cpufreq_state *); |
49 | static void cpufreq_set_raw(struct cpu_info *, uint32_t); |
50 | static void cpufreq_set_all_raw(uint32_t); |
51 | |
52 | static kmutex_t cpufreq_lock __cacheline_aligned; |
53 | static struct cpufreq *cf_backend __read_mostly = NULL; |
54 | |
55 | void |
56 | cpufreq_init(void) |
57 | { |
58 | |
59 | mutex_init(&cpufreq_lock, MUTEX_DEFAULT, IPL_NONE); |
60 | cf_backend = kmem_zalloc(sizeof(*cf_backend), KM_SLEEP); |
61 | } |
62 | |
63 | int |
64 | cpufreq_register(struct cpufreq *cf) |
65 | { |
66 | uint32_t c, i, j, k, m; |
67 | int rv; |
68 | |
69 | if (cold != 0) |
70 | return EBUSY; |
71 | |
72 | KASSERT(cf != NULL); |
73 | KASSERT(cf_backend != NULL); |
74 | KASSERT(cf->cf_get_freq != NULL); |
75 | KASSERT(cf->cf_set_freq != NULL); |
76 | KASSERT(cf->cf_state_count > 0); |
77 | KASSERT(cf->cf_state_count < CPUFREQ_STATE_MAX); |
78 | |
79 | mutex_enter(&cpufreq_lock); |
80 | |
81 | if (cf_backend->cf_init != false) { |
82 | mutex_exit(&cpufreq_lock); |
83 | return EALREADY; |
84 | } |
85 | |
86 | cf_backend->cf_init = true; |
87 | cf_backend->cf_mp = cf->cf_mp; |
88 | cf_backend->cf_cookie = cf->cf_cookie; |
89 | cf_backend->cf_get_freq = cf->cf_get_freq; |
90 | cf_backend->cf_set_freq = cf->cf_set_freq; |
91 | |
92 | (void)strlcpy(cf_backend->cf_name, cf->cf_name, sizeof(cf->cf_name)); |
93 | |
94 | /* |
95 | * Sanity check the values and verify descending order. |
96 | */ |
97 | for (c = i = 0; i < cf->cf_state_count; i++) { |
98 | |
99 | CTASSERT(CPUFREQ_STATE_ENABLED != 0); |
100 | CTASSERT(CPUFREQ_STATE_DISABLED != 0); |
101 | |
102 | if (cf->cf_state[i].cfs_freq == 0) |
103 | continue; |
104 | |
105 | if (cf->cf_state[i].cfs_freq > 9999 && |
106 | cf->cf_state[i].cfs_freq != CPUFREQ_STATE_ENABLED && |
107 | cf->cf_state[i].cfs_freq != CPUFREQ_STATE_DISABLED) |
108 | continue; |
109 | |
110 | for (j = k = 0; j < i; j++) { |
111 | |
112 | if (cf->cf_state[i].cfs_freq >= |
113 | cf->cf_state[j].cfs_freq) { |
114 | k = 1; |
115 | break; |
116 | } |
117 | } |
118 | |
119 | if (k != 0) |
120 | continue; |
121 | |
122 | cf_backend->cf_state[c].cfs_index = c; |
123 | cf_backend->cf_state[c].cfs_freq = cf->cf_state[i].cfs_freq; |
124 | cf_backend->cf_state[c].cfs_power = cf->cf_state[i].cfs_power; |
125 | |
126 | c++; |
127 | } |
128 | |
129 | cf_backend->cf_state_count = c; |
130 | |
131 | if (cf_backend->cf_state_count == 0) { |
132 | mutex_exit(&cpufreq_lock); |
133 | cpufreq_deregister(); |
134 | return EINVAL; |
135 | } |
136 | |
137 | rv = cpufreq_latency(); |
138 | |
139 | if (rv != 0) { |
140 | mutex_exit(&cpufreq_lock); |
141 | cpufreq_deregister(); |
142 | return rv; |
143 | } |
144 | |
145 | m = cpufreq_get_max(); |
146 | cpufreq_set_all_raw(m); |
147 | mutex_exit(&cpufreq_lock); |
148 | |
149 | return 0; |
150 | } |
151 | |
152 | void |
153 | cpufreq_deregister(void) |
154 | { |
155 | |
156 | mutex_enter(&cpufreq_lock); |
157 | memset(cf_backend, 0, sizeof(*cf_backend)); |
158 | mutex_exit(&cpufreq_lock); |
159 | } |
160 | |
161 | static int |
162 | cpufreq_latency(void) |
163 | { |
164 | struct cpufreq *cf = cf_backend; |
165 | struct timespec nta, ntb; |
166 | const uint32_t n = 10; |
167 | uint32_t i, j, l, m; |
168 | uint64_t s; |
169 | |
170 | l = cpufreq_get_min(); |
171 | m = cpufreq_get_max(); |
172 | |
173 | /* |
174 | * For each state, sample the average transition |
175 | * latency required to set the state for all CPUs. |
176 | */ |
177 | for (i = 0; i < cf->cf_state_count; i++) { |
178 | |
179 | for (s = 0, j = 0; j < n; j++) { |
180 | |
181 | /* |
182 | * Attempt to exclude possible |
183 | * caching done by the backend. |
184 | */ |
185 | if (i == 0) |
186 | cpufreq_set_all_raw(l); |
187 | else { |
188 | cpufreq_set_all_raw(m); |
189 | } |
190 | |
191 | nanotime(&nta); |
192 | cpufreq_set_all_raw(cf->cf_state[i].cfs_freq); |
193 | nanotime(&ntb); |
194 | timespecsub(&ntb, &nta, &ntb); |
195 | |
196 | if (ntb.tv_sec != 0 || |
197 | ntb.tv_nsec > CPUFREQ_LATENCY_MAX) |
198 | continue; |
199 | |
200 | if (s >= UINT64_MAX - CPUFREQ_LATENCY_MAX) |
201 | break; |
202 | |
203 | /* Convert to microseconds to prevent overflow */ |
204 | s += ntb.tv_nsec / 1000; |
205 | } |
206 | |
207 | /* |
208 | * Consider the backend unsuitable if |
209 | * the transition latency was too high. |
210 | */ |
211 | if (s == 0) |
212 | return EMSGSIZE; |
213 | |
214 | cf->cf_state[i].cfs_latency = s / n; |
215 | } |
216 | |
217 | return 0; |
218 | } |
219 | |
220 | void |
221 | cpufreq_suspend(struct cpu_info *ci) |
222 | { |
223 | struct cpufreq *cf = cf_backend; |
224 | uint32_t l, s; |
225 | |
226 | mutex_enter(&cpufreq_lock); |
227 | |
228 | if (cf->cf_init != true) { |
229 | mutex_exit(&cpufreq_lock); |
230 | return; |
231 | } |
232 | |
233 | l = cpufreq_get_min(); |
234 | s = cpufreq_get_raw(ci); |
235 | |
236 | cpufreq_set_raw(ci, l); |
237 | cf->cf_state_saved = s; |
238 | |
239 | mutex_exit(&cpufreq_lock); |
240 | } |
241 | |
242 | void |
243 | cpufreq_resume(struct cpu_info *ci) |
244 | { |
245 | struct cpufreq *cf = cf_backend; |
246 | |
247 | mutex_enter(&cpufreq_lock); |
248 | |
249 | if (cf->cf_init != true || cf->cf_state_saved == 0) { |
250 | mutex_exit(&cpufreq_lock); |
251 | return; |
252 | } |
253 | |
254 | cpufreq_set_raw(ci, cf->cf_state_saved); |
255 | mutex_exit(&cpufreq_lock); |
256 | } |
257 | |
258 | uint32_t |
259 | cpufreq_get(struct cpu_info *ci) |
260 | { |
261 | struct cpufreq *cf = cf_backend; |
262 | uint32_t freq; |
263 | |
264 | mutex_enter(&cpufreq_lock); |
265 | |
266 | if (cf->cf_init != true) { |
267 | mutex_exit(&cpufreq_lock); |
268 | return 0; |
269 | } |
270 | |
271 | freq = cpufreq_get_raw(ci); |
272 | mutex_exit(&cpufreq_lock); |
273 | |
274 | return freq; |
275 | } |
276 | |
277 | static uint32_t |
278 | cpufreq_get_max(void) |
279 | { |
280 | struct cpufreq *cf = cf_backend; |
281 | |
282 | KASSERT(cf->cf_init != false); |
283 | KASSERT(mutex_owned(&cpufreq_lock) != 0); |
284 | |
285 | return cf->cf_state[0].cfs_freq; |
286 | } |
287 | |
288 | static uint32_t |
289 | cpufreq_get_min(void) |
290 | { |
291 | struct cpufreq *cf = cf_backend; |
292 | |
293 | KASSERT(cf->cf_init != false); |
294 | KASSERT(mutex_owned(&cpufreq_lock) != 0); |
295 | |
296 | return cf->cf_state[cf->cf_state_count - 1].cfs_freq; |
297 | } |
298 | |
299 | static uint32_t |
300 | cpufreq_get_raw(struct cpu_info *ci) |
301 | { |
302 | struct cpufreq *cf = cf_backend; |
303 | uint32_t freq = 0; |
304 | uint64_t xc; |
305 | |
306 | KASSERT(cf->cf_init != false); |
307 | KASSERT(mutex_owned(&cpufreq_lock) != 0); |
308 | |
309 | xc = xc_unicast(0, (*cf->cf_get_freq), cf->cf_cookie, &freq, ci); |
310 | xc_wait(xc); |
311 | |
312 | return freq; |
313 | } |
314 | |
315 | int |
316 | cpufreq_get_backend(struct cpufreq *dst) |
317 | { |
318 | struct cpufreq *cf = cf_backend; |
319 | |
320 | mutex_enter(&cpufreq_lock); |
321 | |
322 | if (cf->cf_init != true || dst == NULL) { |
323 | mutex_exit(&cpufreq_lock); |
324 | return ENODEV; |
325 | } |
326 | |
327 | memcpy(dst, cf, sizeof(*cf)); |
328 | mutex_exit(&cpufreq_lock); |
329 | |
330 | return 0; |
331 | } |
332 | |
333 | int |
334 | cpufreq_get_state(uint32_t freq, struct cpufreq_state *cfs) |
335 | { |
336 | struct cpufreq *cf = cf_backend; |
337 | |
338 | mutex_enter(&cpufreq_lock); |
339 | |
340 | if (cf->cf_init != true || cfs == NULL) { |
341 | mutex_exit(&cpufreq_lock); |
342 | return ENODEV; |
343 | } |
344 | |
345 | cpufreq_get_state_raw(freq, cfs); |
346 | mutex_exit(&cpufreq_lock); |
347 | |
348 | return 0; |
349 | } |
350 | |
351 | int |
352 | cpufreq_get_state_index(uint32_t index, struct cpufreq_state *cfs) |
353 | { |
354 | struct cpufreq *cf = cf_backend; |
355 | |
356 | mutex_enter(&cpufreq_lock); |
357 | |
358 | if (cf->cf_init != true || cfs == NULL) { |
359 | mutex_exit(&cpufreq_lock); |
360 | return ENODEV; |
361 | } |
362 | |
363 | if (index >= cf->cf_state_count) { |
364 | mutex_exit(&cpufreq_lock); |
365 | return EINVAL; |
366 | } |
367 | |
368 | memcpy(cfs, &cf->cf_state[index], sizeof(*cfs)); |
369 | mutex_exit(&cpufreq_lock); |
370 | |
371 | return 0; |
372 | } |
373 | |
374 | static void |
375 | cpufreq_get_state_raw(uint32_t freq, struct cpufreq_state *cfs) |
376 | { |
377 | struct cpufreq *cf = cf_backend; |
378 | uint32_t f, hi, i = 0, lo = 0; |
379 | |
380 | KASSERT(mutex_owned(&cpufreq_lock) != 0); |
381 | KASSERT(cf->cf_init != false && cfs != NULL); |
382 | |
383 | hi = cf->cf_state_count; |
384 | |
385 | while (lo < hi) { |
386 | |
387 | i = (lo + hi) >> 1; |
388 | f = cf->cf_state[i].cfs_freq; |
389 | |
390 | if (freq == f) |
391 | break; |
392 | else if (freq > f) |
393 | hi = i; |
394 | else { |
395 | lo = i + 1; |
396 | } |
397 | } |
398 | |
399 | memcpy(cfs, &cf->cf_state[i], sizeof(*cfs)); |
400 | } |
401 | |
402 | void |
403 | cpufreq_set(struct cpu_info *ci, uint32_t freq) |
404 | { |
405 | struct cpufreq *cf = cf_backend; |
406 | |
407 | mutex_enter(&cpufreq_lock); |
408 | |
409 | if (__predict_false(cf->cf_init != true)) { |
410 | mutex_exit(&cpufreq_lock); |
411 | return; |
412 | } |
413 | |
414 | cpufreq_set_raw(ci, freq); |
415 | mutex_exit(&cpufreq_lock); |
416 | } |
417 | |
418 | static void |
419 | cpufreq_set_raw(struct cpu_info *ci, uint32_t freq) |
420 | { |
421 | struct cpufreq *cf = cf_backend; |
422 | uint64_t xc; |
423 | |
424 | KASSERT(cf->cf_init != false); |
425 | KASSERT(mutex_owned(&cpufreq_lock) != 0); |
426 | |
427 | xc = xc_unicast(0, (*cf->cf_set_freq), cf->cf_cookie, &freq, ci); |
428 | xc_wait(xc); |
429 | } |
430 | |
431 | void |
432 | cpufreq_set_all(uint32_t freq) |
433 | { |
434 | struct cpufreq *cf = cf_backend; |
435 | |
436 | mutex_enter(&cpufreq_lock); |
437 | |
438 | if (__predict_false(cf->cf_init != true)) { |
439 | mutex_exit(&cpufreq_lock); |
440 | return; |
441 | } |
442 | |
443 | cpufreq_set_all_raw(freq); |
444 | mutex_exit(&cpufreq_lock); |
445 | } |
446 | |
447 | static void |
448 | cpufreq_set_all_raw(uint32_t freq) |
449 | { |
450 | struct cpufreq *cf = cf_backend; |
451 | uint64_t xc; |
452 | |
453 | KASSERT(cf->cf_init != false); |
454 | KASSERT(mutex_owned(&cpufreq_lock) != 0); |
455 | |
456 | xc = xc_broadcast(0, (*cf->cf_set_freq), cf->cf_cookie, &freq); |
457 | xc_wait(xc); |
458 | } |
459 | |
460 | #ifdef notyet |
461 | void |
462 | cpufreq_set_higher(struct cpu_info *ci) |
463 | { |
464 | cpufreq_set_step(ci, -1); |
465 | } |
466 | |
467 | void |
468 | cpufreq_set_lower(struct cpu_info *ci) |
469 | { |
470 | cpufreq_set_step(ci, 1); |
471 | } |
472 | |
473 | static void |
474 | cpufreq_set_step(struct cpu_info *ci, int32_t step) |
475 | { |
476 | struct cpufreq *cf = cf_backend; |
477 | struct cpufreq_state cfs; |
478 | uint32_t freq; |
479 | int32_t index; |
480 | |
481 | mutex_enter(&cpufreq_lock); |
482 | |
483 | if (__predict_false(cf->cf_init != true)) { |
484 | mutex_exit(&cpufreq_lock); |
485 | return; |
486 | } |
487 | |
488 | freq = cpufreq_get_raw(ci); |
489 | |
490 | if (__predict_false(freq == 0)) { |
491 | mutex_exit(&cpufreq_lock); |
492 | return; |
493 | } |
494 | |
495 | cpufreq_get_state_raw(freq, &cfs); |
496 | index = cfs.cfs_index + step; |
497 | |
498 | if (index < 0 || index >= (int32_t)cf->cf_state_count) { |
499 | mutex_exit(&cpufreq_lock); |
500 | return; |
501 | } |
502 | |
503 | cpufreq_set_raw(ci, cf->cf_state[index].cfs_freq); |
504 | mutex_exit(&cpufreq_lock); |
505 | } |
506 | #endif |
507 | |