1 | /* $NetBSD: kern_cpu.c,v 1.71 2015/08/29 12:24:00 maxv Exp $ */ |
2 | |
3 | /*- |
4 | * Copyright (c) 2007, 2008, 2009, 2010, 2012 The NetBSD Foundation, Inc. |
5 | * All rights reserved. |
6 | * |
7 | * This code is derived from software contributed to The NetBSD Foundation |
8 | * by Andrew Doran. |
9 | * |
10 | * Redistribution and use in source and binary forms, with or without |
11 | * modification, are permitted provided that the following conditions |
12 | * are met: |
13 | * 1. Redistributions of source code must retain the above copyright |
14 | * notice, this list of conditions and the following disclaimer. |
15 | * 2. Redistributions in binary form must reproduce the above copyright |
16 | * notice, this list of conditions and the following disclaimer in the |
17 | * documentation and/or other materials provided with the distribution. |
18 | * |
19 | * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS |
20 | * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED |
21 | * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
22 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS |
23 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
24 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
25 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
26 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN |
27 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
28 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE |
29 | * POSSIBILITY OF SUCH DAMAGE. |
30 | */ |
31 | |
32 | /*- |
33 | * Copyright (c)2007 YAMAMOTO Takashi, |
34 | * All rights reserved. |
35 | * |
36 | * Redistribution and use in source and binary forms, with or without |
37 | * modification, are permitted provided that the following conditions |
38 | * are met: |
39 | * 1. Redistributions of source code must retain the above copyright |
40 | * notice, this list of conditions and the following disclaimer. |
41 | * 2. Redistributions in binary form must reproduce the above copyright |
42 | * notice, this list of conditions and the following disclaimer in the |
43 | * documentation and/or other materials provided with the distribution. |
44 | * |
45 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND |
46 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
47 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
48 | * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE |
49 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL |
50 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS |
51 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) |
52 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT |
53 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY |
54 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF |
55 | * SUCH DAMAGE. |
56 | */ |
57 | |
58 | #include <sys/cdefs.h> |
59 | __KERNEL_RCSID(0, "$NetBSD: kern_cpu.c,v 1.71 2015/08/29 12:24:00 maxv Exp $" ); |
60 | |
61 | #include "opt_cpu_ucode.h" |
62 | #include "opt_compat_netbsd.h" |
63 | |
64 | #include <sys/param.h> |
65 | #include <sys/systm.h> |
66 | #include <sys/idle.h> |
67 | #include <sys/sched.h> |
68 | #include <sys/intr.h> |
69 | #include <sys/conf.h> |
70 | #include <sys/cpu.h> |
71 | #include <sys/cpuio.h> |
72 | #include <sys/proc.h> |
73 | #include <sys/percpu.h> |
74 | #include <sys/kernel.h> |
75 | #include <sys/kauth.h> |
76 | #include <sys/xcall.h> |
77 | #include <sys/pool.h> |
78 | #include <sys/kmem.h> |
79 | #include <sys/select.h> |
80 | #include <sys/namei.h> |
81 | #include <sys/callout.h> |
82 | #include <sys/pcu.h> |
83 | |
84 | #include <uvm/uvm_extern.h> |
85 | |
86 | #include "ioconf.h" |
87 | |
88 | /* |
89 | * If the port has stated that cpu_data is the first thing in cpu_info, |
90 | * verify that the claim is true. This will prevent them from getting out |
91 | * of sync. |
92 | */ |
93 | #ifdef __HAVE_CPU_DATA_FIRST |
94 | CTASSERT(offsetof(struct cpu_info, ci_data) == 0); |
95 | #else |
96 | CTASSERT(offsetof(struct cpu_info, ci_data) != 0); |
97 | #endif |
98 | |
99 | static void cpu_xc_online(struct cpu_info *); |
100 | static void cpu_xc_offline(struct cpu_info *); |
101 | |
102 | dev_type_ioctl(cpuctl_ioctl); |
103 | |
104 | const struct cdevsw cpuctl_cdevsw = { |
105 | .d_open = nullopen, |
106 | .d_close = nullclose, |
107 | .d_read = nullread, |
108 | .d_write = nullwrite, |
109 | .d_ioctl = cpuctl_ioctl, |
110 | .d_stop = nullstop, |
111 | .d_tty = notty, |
112 | .d_poll = nopoll, |
113 | .d_mmap = nommap, |
114 | .d_kqfilter = nokqfilter, |
115 | .d_discard = nodiscard, |
116 | .d_flag = D_OTHER | D_MPSAFE |
117 | }; |
118 | |
119 | kmutex_t cpu_lock __cacheline_aligned; |
120 | int ncpu __read_mostly; |
121 | int ncpuonline __read_mostly; |
122 | bool mp_online __read_mostly; |
123 | |
124 | /* An array of CPUs. There are ncpu entries. */ |
125 | struct cpu_info **cpu_infos __read_mostly; |
126 | |
127 | /* Note: set on mi_cpu_attach() and idle_loop(). */ |
128 | kcpuset_t * kcpuset_attached __read_mostly = NULL; |
129 | kcpuset_t * kcpuset_running __read_mostly = NULL; |
130 | |
131 | |
132 | static char cpu_model[128]; |
133 | |
134 | /* |
135 | * mi_cpu_init: early initialisation of MI CPU related structures. |
136 | * |
137 | * Note: may not block and memory allocator is not yet available. |
138 | */ |
139 | void |
140 | mi_cpu_init(void) |
141 | { |
142 | |
143 | mutex_init(&cpu_lock, MUTEX_DEFAULT, IPL_NONE); |
144 | |
145 | kcpuset_create(&kcpuset_attached, true); |
146 | kcpuset_create(&kcpuset_running, true); |
147 | kcpuset_set(kcpuset_running, 0); |
148 | } |
149 | |
150 | int |
151 | mi_cpu_attach(struct cpu_info *ci) |
152 | { |
153 | int error; |
154 | |
155 | KASSERT(maxcpus > 0); |
156 | |
157 | ci->ci_index = ncpu; |
158 | kcpuset_set(kcpuset_attached, cpu_index(ci)); |
159 | |
160 | /* |
161 | * Create a convenience cpuset of just ourselves. |
162 | */ |
163 | kcpuset_create(&ci->ci_data.cpu_kcpuset, true); |
164 | kcpuset_set(ci->ci_data.cpu_kcpuset, cpu_index(ci)); |
165 | |
166 | TAILQ_INIT(&ci->ci_data.cpu_ld_locks); |
167 | __cpu_simple_lock_init(&ci->ci_data.cpu_ld_lock); |
168 | |
169 | /* This is useful for eg, per-cpu evcnt */ |
170 | snprintf(ci->ci_data.cpu_name, sizeof(ci->ci_data.cpu_name), "cpu%d" , |
171 | cpu_index(ci)); |
172 | |
173 | if (__predict_false(cpu_infos == NULL)) { |
174 | size_t ci_bufsize = (maxcpus + 1) * sizeof(struct cpu_info *); |
175 | cpu_infos = kmem_zalloc(ci_bufsize, KM_SLEEP); |
176 | } |
177 | cpu_infos[cpu_index(ci)] = ci; |
178 | |
179 | sched_cpuattach(ci); |
180 | |
181 | error = create_idle_lwp(ci); |
182 | if (error != 0) { |
183 | /* XXX revert sched_cpuattach */ |
184 | return error; |
185 | } |
186 | |
187 | if (ci == curcpu()) |
188 | ci->ci_data.cpu_onproc = curlwp; |
189 | else |
190 | ci->ci_data.cpu_onproc = ci->ci_data.cpu_idlelwp; |
191 | |
192 | percpu_init_cpu(ci); |
193 | softint_init(ci); |
194 | callout_init_cpu(ci); |
195 | xc_init_cpu(ci); |
196 | pool_cache_cpu_init(ci); |
197 | selsysinit(ci); |
198 | cache_cpu_init(ci); |
199 | TAILQ_INIT(&ci->ci_data.cpu_biodone); |
200 | ncpu++; |
201 | ncpuonline++; |
202 | |
203 | return 0; |
204 | } |
205 | |
206 | void |
207 | cpuctlattach(int dummy __unused) |
208 | { |
209 | |
210 | KASSERT(cpu_infos != NULL); |
211 | } |
212 | |
213 | int |
214 | cpuctl_ioctl(dev_t dev, u_long cmd, void *data, int flag, lwp_t *l) |
215 | { |
216 | CPU_INFO_ITERATOR cii; |
217 | cpustate_t *cs; |
218 | struct cpu_info *ci; |
219 | int error, i; |
220 | u_int id; |
221 | |
222 | error = 0; |
223 | |
224 | mutex_enter(&cpu_lock); |
225 | switch (cmd) { |
226 | case IOC_CPU_SETSTATE: |
227 | cs = data; |
228 | error = kauth_authorize_system(l->l_cred, |
229 | KAUTH_SYSTEM_CPU, KAUTH_REQ_SYSTEM_CPU_SETSTATE, cs, NULL, |
230 | NULL); |
231 | if (error != 0) |
232 | break; |
233 | if (cs->cs_id >= maxcpus || |
234 | (ci = cpu_lookup(cs->cs_id)) == NULL) { |
235 | error = ESRCH; |
236 | break; |
237 | } |
238 | cpu_setintr(ci, cs->cs_intr); |
239 | error = cpu_setstate(ci, cs->cs_online); |
240 | break; |
241 | |
242 | case IOC_CPU_GETSTATE: |
243 | cs = data; |
244 | id = cs->cs_id; |
245 | memset(cs, 0, sizeof(*cs)); |
246 | cs->cs_id = id; |
247 | if (cs->cs_id >= maxcpus || |
248 | (ci = cpu_lookup(id)) == NULL) { |
249 | error = ESRCH; |
250 | break; |
251 | } |
252 | if ((ci->ci_schedstate.spc_flags & SPCF_OFFLINE) != 0) |
253 | cs->cs_online = false; |
254 | else |
255 | cs->cs_online = true; |
256 | if ((ci->ci_schedstate.spc_flags & SPCF_NOINTR) != 0) |
257 | cs->cs_intr = false; |
258 | else |
259 | cs->cs_intr = true; |
260 | cs->cs_lastmod = (int32_t)ci->ci_schedstate.spc_lastmod; |
261 | cs->cs_lastmodhi = (int32_t) |
262 | (ci->ci_schedstate.spc_lastmod >> 32); |
263 | cs->cs_intrcnt = cpu_intr_count(ci) + 1; |
264 | cs->cs_hwid = ci->ci_cpuid; |
265 | break; |
266 | |
267 | case IOC_CPU_MAPID: |
268 | i = 0; |
269 | for (CPU_INFO_FOREACH(cii, ci)) { |
270 | if (i++ == *(int *)data) |
271 | break; |
272 | } |
273 | if (ci == NULL) |
274 | error = ESRCH; |
275 | else |
276 | *(int *)data = cpu_index(ci); |
277 | break; |
278 | |
279 | case IOC_CPU_GETCOUNT: |
280 | *(int *)data = ncpu; |
281 | break; |
282 | |
283 | #ifdef CPU_UCODE |
284 | case IOC_CPU_UCODE_GET_VERSION: |
285 | error = cpu_ucode_get_version((struct cpu_ucode_version *)data); |
286 | break; |
287 | |
288 | #ifdef COMPAT_60 |
289 | case OIOC_CPU_UCODE_GET_VERSION: |
290 | error = compat6_cpu_ucode_get_version((struct compat6_cpu_ucode *)data); |
291 | break; |
292 | #endif |
293 | |
294 | case IOC_CPU_UCODE_APPLY: |
295 | error = kauth_authorize_machdep(l->l_cred, |
296 | KAUTH_MACHDEP_CPU_UCODE_APPLY, |
297 | NULL, NULL, NULL, NULL); |
298 | if (error != 0) |
299 | break; |
300 | error = cpu_ucode_apply((const struct cpu_ucode *)data); |
301 | break; |
302 | |
303 | #ifdef COMPAT_60 |
304 | case OIOC_CPU_UCODE_APPLY: |
305 | error = kauth_authorize_machdep(l->l_cred, |
306 | KAUTH_MACHDEP_CPU_UCODE_APPLY, |
307 | NULL, NULL, NULL, NULL); |
308 | if (error != 0) |
309 | break; |
310 | error = compat6_cpu_ucode_apply((const struct compat6_cpu_ucode *)data); |
311 | break; |
312 | #endif |
313 | #endif |
314 | |
315 | default: |
316 | error = ENOTTY; |
317 | break; |
318 | } |
319 | mutex_exit(&cpu_lock); |
320 | |
321 | return error; |
322 | } |
323 | |
324 | struct cpu_info * |
325 | cpu_lookup(u_int idx) |
326 | { |
327 | struct cpu_info *ci; |
328 | |
329 | KASSERT(idx < maxcpus); |
330 | |
331 | if (__predict_false(cpu_infos == NULL)) { |
332 | KASSERT(idx == 0); |
333 | return curcpu(); |
334 | } |
335 | |
336 | ci = cpu_infos[idx]; |
337 | KASSERT(ci == NULL || cpu_index(ci) == idx); |
338 | |
339 | return ci; |
340 | } |
341 | |
342 | static void |
343 | cpu_xc_offline(struct cpu_info *ci) |
344 | { |
345 | struct schedstate_percpu *spc, *mspc = NULL; |
346 | struct cpu_info *target_ci; |
347 | struct lwp *l; |
348 | CPU_INFO_ITERATOR cii; |
349 | int s; |
350 | |
351 | /* |
352 | * Thread that made the cross call (separate context) holds |
353 | * cpu_lock on our behalf. |
354 | */ |
355 | spc = &ci->ci_schedstate; |
356 | s = splsched(); |
357 | spc->spc_flags |= SPCF_OFFLINE; |
358 | splx(s); |
359 | |
360 | /* Take the first available CPU for the migration. */ |
361 | for (CPU_INFO_FOREACH(cii, target_ci)) { |
362 | mspc = &target_ci->ci_schedstate; |
363 | if ((mspc->spc_flags & SPCF_OFFLINE) == 0) |
364 | break; |
365 | } |
366 | KASSERT(target_ci != NULL); |
367 | |
368 | /* |
369 | * Migrate all non-bound threads to the other CPU. Note that this |
370 | * runs from the xcall thread, thus handling of LSONPROC is not needed. |
371 | */ |
372 | mutex_enter(proc_lock); |
373 | LIST_FOREACH(l, &alllwp, l_list) { |
374 | struct cpu_info *mci; |
375 | |
376 | lwp_lock(l); |
377 | if (l->l_cpu != ci || (l->l_pflag & (LP_BOUND | LP_INTR))) { |
378 | lwp_unlock(l); |
379 | continue; |
380 | } |
381 | /* Regular case - no affinity. */ |
382 | if (l->l_affinity == NULL) { |
383 | lwp_migrate(l, target_ci); |
384 | continue; |
385 | } |
386 | /* Affinity is set, find an online CPU in the set. */ |
387 | for (CPU_INFO_FOREACH(cii, mci)) { |
388 | mspc = &mci->ci_schedstate; |
389 | if ((mspc->spc_flags & SPCF_OFFLINE) == 0 && |
390 | kcpuset_isset(l->l_affinity, cpu_index(mci))) |
391 | break; |
392 | } |
393 | if (mci == NULL) { |
394 | lwp_unlock(l); |
395 | mutex_exit(proc_lock); |
396 | goto fail; |
397 | } |
398 | lwp_migrate(l, mci); |
399 | } |
400 | mutex_exit(proc_lock); |
401 | |
402 | #if PCU_UNIT_COUNT > 0 |
403 | pcu_save_all_on_cpu(); |
404 | #endif |
405 | |
406 | #ifdef __HAVE_MD_CPU_OFFLINE |
407 | cpu_offline_md(); |
408 | #endif |
409 | return; |
410 | fail: |
411 | /* Just unset the SPCF_OFFLINE flag, caller will check */ |
412 | s = splsched(); |
413 | spc->spc_flags &= ~SPCF_OFFLINE; |
414 | splx(s); |
415 | } |
416 | |
417 | static void |
418 | cpu_xc_online(struct cpu_info *ci) |
419 | { |
420 | struct schedstate_percpu *spc; |
421 | int s; |
422 | |
423 | spc = &ci->ci_schedstate; |
424 | s = splsched(); |
425 | spc->spc_flags &= ~SPCF_OFFLINE; |
426 | splx(s); |
427 | } |
428 | |
429 | int |
430 | cpu_setstate(struct cpu_info *ci, bool online) |
431 | { |
432 | struct schedstate_percpu *spc; |
433 | CPU_INFO_ITERATOR cii; |
434 | struct cpu_info *ci2; |
435 | uint64_t where; |
436 | xcfunc_t func; |
437 | int nonline; |
438 | |
439 | spc = &ci->ci_schedstate; |
440 | |
441 | KASSERT(mutex_owned(&cpu_lock)); |
442 | |
443 | if (online) { |
444 | if ((spc->spc_flags & SPCF_OFFLINE) == 0) |
445 | return 0; |
446 | func = (xcfunc_t)cpu_xc_online; |
447 | } else { |
448 | if ((spc->spc_flags & SPCF_OFFLINE) != 0) |
449 | return 0; |
450 | nonline = 0; |
451 | /* |
452 | * Ensure that at least one CPU within the processor set |
453 | * stays online. Revisit this later. |
454 | */ |
455 | for (CPU_INFO_FOREACH(cii, ci2)) { |
456 | if ((ci2->ci_schedstate.spc_flags & SPCF_OFFLINE) != 0) |
457 | continue; |
458 | if (ci2->ci_schedstate.spc_psid != spc->spc_psid) |
459 | continue; |
460 | nonline++; |
461 | } |
462 | if (nonline == 1) |
463 | return EBUSY; |
464 | func = (xcfunc_t)cpu_xc_offline; |
465 | } |
466 | |
467 | where = xc_unicast(0, func, ci, NULL, ci); |
468 | xc_wait(where); |
469 | if (online) { |
470 | KASSERT((spc->spc_flags & SPCF_OFFLINE) == 0); |
471 | ncpuonline++; |
472 | } else { |
473 | if ((spc->spc_flags & SPCF_OFFLINE) == 0) { |
474 | /* If was not set offline, then it is busy */ |
475 | return EBUSY; |
476 | } |
477 | ncpuonline--; |
478 | } |
479 | |
480 | spc->spc_lastmod = time_second; |
481 | return 0; |
482 | } |
483 | |
484 | int |
485 | cpu_setmodel(const char *fmt, ...) |
486 | { |
487 | int len; |
488 | va_list ap; |
489 | |
490 | va_start(ap, fmt); |
491 | len = vsnprintf(cpu_model, sizeof(cpu_model), fmt, ap); |
492 | va_end(ap); |
493 | return len; |
494 | } |
495 | |
496 | const char * |
497 | cpu_getmodel(void) |
498 | { |
499 | return cpu_model; |
500 | } |
501 | |
502 | #ifdef __HAVE_INTR_CONTROL |
503 | static void |
504 | cpu_xc_intr(struct cpu_info *ci) |
505 | { |
506 | struct schedstate_percpu *spc; |
507 | int s; |
508 | |
509 | spc = &ci->ci_schedstate; |
510 | s = splsched(); |
511 | spc->spc_flags &= ~SPCF_NOINTR; |
512 | splx(s); |
513 | } |
514 | |
515 | static void |
516 | cpu_xc_nointr(struct cpu_info *ci) |
517 | { |
518 | struct schedstate_percpu *spc; |
519 | int s; |
520 | |
521 | spc = &ci->ci_schedstate; |
522 | s = splsched(); |
523 | spc->spc_flags |= SPCF_NOINTR; |
524 | splx(s); |
525 | } |
526 | |
527 | int |
528 | cpu_setintr(struct cpu_info *ci, bool intr) |
529 | { |
530 | struct schedstate_percpu *spc; |
531 | CPU_INFO_ITERATOR cii; |
532 | struct cpu_info *ci2; |
533 | uint64_t where; |
534 | xcfunc_t func; |
535 | int nintr; |
536 | |
537 | spc = &ci->ci_schedstate; |
538 | |
539 | KASSERT(mutex_owned(&cpu_lock)); |
540 | |
541 | if (intr) { |
542 | if ((spc->spc_flags & SPCF_NOINTR) == 0) |
543 | return 0; |
544 | func = (xcfunc_t)cpu_xc_intr; |
545 | } else { |
546 | if ((spc->spc_flags & SPCF_NOINTR) != 0) |
547 | return 0; |
548 | /* |
549 | * Ensure that at least one CPU within the system |
550 | * is handing device interrupts. |
551 | */ |
552 | nintr = 0; |
553 | for (CPU_INFO_FOREACH(cii, ci2)) { |
554 | if ((ci2->ci_schedstate.spc_flags & SPCF_NOINTR) != 0) |
555 | continue; |
556 | if (ci2 == ci) |
557 | continue; |
558 | nintr++; |
559 | } |
560 | if (nintr == 0) |
561 | return EBUSY; |
562 | func = (xcfunc_t)cpu_xc_nointr; |
563 | } |
564 | |
565 | where = xc_unicast(0, func, ci, NULL, ci); |
566 | xc_wait(where); |
567 | if (intr) { |
568 | KASSERT((spc->spc_flags & SPCF_NOINTR) == 0); |
569 | } else if ((spc->spc_flags & SPCF_NOINTR) == 0) { |
570 | /* If was not set offline, then it is busy */ |
571 | return EBUSY; |
572 | } |
573 | |
574 | /* Direct interrupts away from the CPU and record the change. */ |
575 | cpu_intr_redistribute(); |
576 | spc->spc_lastmod = time_second; |
577 | return 0; |
578 | } |
579 | #else /* __HAVE_INTR_CONTROL */ |
580 | int |
581 | cpu_setintr(struct cpu_info *ci, bool intr) |
582 | { |
583 | |
584 | return EOPNOTSUPP; |
585 | } |
586 | |
587 | u_int |
588 | cpu_intr_count(struct cpu_info *ci) |
589 | { |
590 | |
591 | return 0; /* 0 == "don't know" */ |
592 | } |
593 | #endif /* __HAVE_INTR_CONTROL */ |
594 | |
595 | bool |
596 | cpu_softintr_p(void) |
597 | { |
598 | |
599 | return (curlwp->l_pflag & LP_INTR) != 0; |
600 | } |
601 | |
602 | #ifdef CPU_UCODE |
603 | int |
604 | cpu_ucode_load(struct cpu_ucode_softc *sc, const char *fwname) |
605 | { |
606 | firmware_handle_t fwh; |
607 | int error; |
608 | |
609 | if (sc->sc_blob != NULL) { |
610 | firmware_free(sc->sc_blob, sc->sc_blobsize); |
611 | sc->sc_blob = NULL; |
612 | sc->sc_blobsize = 0; |
613 | } |
614 | |
615 | error = cpu_ucode_md_open(&fwh, sc->loader_version, fwname); |
616 | if (error != 0) { |
617 | aprint_error("ucode: firmware_open failed: %i\n" , error); |
618 | goto err0; |
619 | } |
620 | |
621 | sc->sc_blobsize = firmware_get_size(fwh); |
622 | sc->sc_blob = firmware_malloc(sc->sc_blobsize); |
623 | if (sc->sc_blob == NULL) { |
624 | error = ENOMEM; |
625 | firmware_close(fwh); |
626 | goto err0; |
627 | } |
628 | |
629 | error = firmware_read(fwh, 0, sc->sc_blob, sc->sc_blobsize); |
630 | firmware_close(fwh); |
631 | if (error != 0) |
632 | goto err1; |
633 | |
634 | return 0; |
635 | |
636 | err1: |
637 | firmware_free(sc->sc_blob, sc->sc_blobsize); |
638 | sc->sc_blob = NULL; |
639 | sc->sc_blobsize = 0; |
640 | err0: |
641 | return error; |
642 | } |
643 | #endif |
644 | |