1 | /* $NetBSD: ipi.c,v 1.26 2014/07/20 15:48:54 uebayasi Exp $ */ |
2 | |
3 | /*- |
4 | * Copyright (c) 2000, 2008, 2009 The NetBSD Foundation, Inc. |
5 | * All rights reserved. |
6 | * |
7 | * This code is derived from software contributed to The NetBSD Foundation |
8 | * by RedBack Networks Inc. |
9 | * |
10 | * Author: Bill Sommerfeld |
11 | * |
12 | * Redistribution and use in source and binary forms, with or without |
13 | * modification, are permitted provided that the following conditions |
14 | * are met: |
15 | * 1. Redistributions of source code must retain the above copyright |
16 | * notice, this list of conditions and the following disclaimer. |
17 | * 2. Redistributions in binary form must reproduce the above copyright |
18 | * notice, this list of conditions and the following disclaimer in the |
19 | * documentation and/or other materials provided with the distribution. |
20 | * |
21 | * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS |
22 | * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED |
23 | * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
24 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS |
25 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
26 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
27 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
28 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN |
29 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
30 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE |
31 | * POSSIBILITY OF SUCH DAMAGE. |
32 | */ |
33 | |
34 | #include <sys/cdefs.h> |
35 | __KERNEL_RCSID(0, "$NetBSD: ipi.c,v 1.26 2014/07/20 15:48:54 uebayasi Exp $" ); |
36 | |
37 | #include "opt_mtrr.h" |
38 | |
39 | #include <sys/param.h> |
40 | #include <sys/device.h> |
41 | #include <sys/systm.h> |
42 | #include <sys/atomic.h> |
43 | #include <sys/intr.h> |
44 | #include <sys/ipi.h> |
45 | #include <sys/cpu.h> |
46 | #include <sys/xcall.h> |
47 | |
48 | #ifdef MULTIPROCESSOR |
49 | |
50 | #include <machine/cpufunc.h> |
51 | #include <machine/cpuvar.h> |
52 | #include <machine/i82093var.h> |
53 | #include <machine/i82489reg.h> |
54 | #include <machine/i82489var.h> |
55 | #include <machine/mtrr.h> |
56 | #include <machine/gdt.h> |
57 | |
58 | #include "acpica.h" |
59 | |
60 | #include <x86/fpu.h> |
61 | |
62 | static void x86_ipi_halt(struct cpu_info *); |
63 | static void x86_ipi_kpreempt(struct cpu_info *); |
64 | static void x86_ipi_xcall(struct cpu_info *); |
65 | static void x86_ipi_generic(struct cpu_info *); |
66 | |
67 | #ifdef MTRR |
68 | static void x86_ipi_reload_mtrr(struct cpu_info *); |
69 | #else |
70 | #define x86_ipi_reload_mtrr NULL |
71 | #endif |
72 | |
73 | #if NACPICA > 0 |
74 | void acpi_cpu_sleep(struct cpu_info *); |
75 | #else |
76 | #define acpi_cpu_sleep NULL |
77 | #endif |
78 | |
79 | static void x86_ipi_synch_fpu(struct cpu_info *); |
80 | |
81 | void (* const ipifunc[X86_NIPI])(struct cpu_info *) = |
82 | { |
83 | x86_ipi_halt, /* X86_IPI_HALT */ |
84 | NULL, /* X86_IPI_MICROSET */ |
85 | x86_ipi_generic, /* X86_IPI_GENERIC */ |
86 | x86_ipi_synch_fpu, /* X86_IPI_SYNCH_FPU */ |
87 | x86_ipi_reload_mtrr, /* X86_IPI_MTRR */ |
88 | gdt_reload_cpu, /* X86_IPI_GDT */ |
89 | x86_ipi_xcall, /* X86_IPI_XCALL */ |
90 | acpi_cpu_sleep, /* X86_IPI_ACPI_CPU_SLEEP */ |
91 | x86_ipi_kpreempt /* X86_IPI_KPREEMPT */ |
92 | }; |
93 | |
94 | /* |
95 | * x86 IPI interface. |
96 | */ |
97 | |
98 | int |
99 | x86_send_ipi(struct cpu_info *ci, int ipimask) |
100 | { |
101 | int ret; |
102 | |
103 | atomic_or_32(&ci->ci_ipis, ipimask); |
104 | |
105 | /* Don't send IPI to CPU which isn't (yet) running. */ |
106 | if (!(ci->ci_flags & CPUF_RUNNING)) |
107 | return ENOENT; |
108 | |
109 | ret = x86_ipi(LAPIC_IPI_VECTOR, ci->ci_cpuid, LAPIC_DLMODE_FIXED); |
110 | if (ret != 0) { |
111 | printf("ipi of %x from %s to %s failed\n" , |
112 | ipimask, |
113 | device_xname(curcpu()->ci_dev), |
114 | device_xname(ci->ci_dev)); |
115 | } |
116 | |
117 | return ret; |
118 | } |
119 | |
120 | void |
121 | x86_broadcast_ipi(int ipimask) |
122 | { |
123 | struct cpu_info *ci, *self = curcpu(); |
124 | int count = 0; |
125 | CPU_INFO_ITERATOR cii; |
126 | |
127 | for (CPU_INFO_FOREACH(cii, ci)) { |
128 | if (ci == self) |
129 | continue; |
130 | if ((ci->ci_flags & CPUF_RUNNING) == 0) |
131 | continue; |
132 | atomic_or_32(&ci->ci_ipis, ipimask); |
133 | count++; |
134 | } |
135 | if (!count) |
136 | return; |
137 | |
138 | x86_ipi(LAPIC_IPI_VECTOR, LAPIC_DEST_ALLEXCL, LAPIC_DLMODE_FIXED); |
139 | } |
140 | |
141 | void |
142 | x86_ipi_handler(void) |
143 | { |
144 | struct cpu_info *ci = curcpu(); |
145 | uint32_t pending; |
146 | int bit; |
147 | |
148 | pending = atomic_swap_32(&ci->ci_ipis, 0); |
149 | |
150 | KDASSERT((pending >> X86_NIPI) == 0); |
151 | while ((bit = ffs(pending)) != 0) { |
152 | bit--; |
153 | pending &= ~(1 << bit); |
154 | ci->ci_ipi_events[bit].ev_count++; |
155 | (*ipifunc[bit])(ci); |
156 | } |
157 | } |
158 | |
159 | /* |
160 | * Common x86 IPI handlers. |
161 | */ |
162 | |
163 | static void |
164 | x86_ipi_halt(struct cpu_info *ci) |
165 | { |
166 | |
167 | x86_disable_intr(); |
168 | atomic_and_32(&ci->ci_flags, ~CPUF_RUNNING); |
169 | |
170 | for (;;) { |
171 | x86_hlt(); |
172 | } |
173 | } |
174 | |
175 | static void |
176 | x86_ipi_synch_fpu(struct cpu_info *ci) |
177 | { |
178 | |
179 | fpusave_cpu(true); |
180 | } |
181 | |
182 | #ifdef MTRR |
183 | static void |
184 | x86_ipi_reload_mtrr(struct cpu_info *ci) |
185 | { |
186 | |
187 | if (mtrr_funcs != NULL) { |
188 | /* |
189 | * mtrr_reload_cpu() is a macro in mtrr.h which picks |
190 | * the appropriate function to use. |
191 | */ |
192 | mtrr_reload_cpu(ci); |
193 | } |
194 | } |
195 | #endif |
196 | |
197 | static void |
198 | x86_ipi_kpreempt(struct cpu_info *ci) |
199 | { |
200 | |
201 | softint_trigger(1 << SIR_PREEMPT); |
202 | } |
203 | |
204 | /* |
205 | * MD support for xcall(9) interface. |
206 | */ |
207 | |
208 | static void |
209 | x86_ipi_xcall(struct cpu_info *ci) |
210 | { |
211 | xc_ipi_handler(); |
212 | } |
213 | |
214 | static void |
215 | x86_ipi_generic(struct cpu_info *ci) |
216 | { |
217 | ipi_cpu_handler(); |
218 | } |
219 | |
220 | void |
221 | xc_send_ipi(struct cpu_info *ci) |
222 | { |
223 | |
224 | KASSERT(kpreempt_disabled()); |
225 | KASSERT(curcpu() != ci); |
226 | |
227 | if (ci) { |
228 | /* Unicast: remote CPU. */ |
229 | x86_send_ipi(ci, X86_IPI_XCALL); |
230 | } else { |
231 | /* Broadcast: all, but local CPU (caller will handle it). */ |
232 | x86_broadcast_ipi(X86_IPI_XCALL); |
233 | } |
234 | } |
235 | |
236 | void |
237 | cpu_ipi(struct cpu_info *ci) |
238 | { |
239 | KASSERT(kpreempt_disabled()); |
240 | KASSERT(curcpu() != ci); |
241 | |
242 | if (ci) { |
243 | /* Unicast: remote CPU. */ |
244 | x86_send_ipi(ci, X86_IPI_GENERIC); |
245 | } else { |
246 | /* Broadcast: all, but local CPU (caller will handle it). */ |
247 | x86_broadcast_ipi(X86_IPI_GENERIC); |
248 | } |
249 | } |
250 | |
251 | #else |
252 | |
253 | int |
254 | x86_send_ipi(struct cpu_info *ci, int ipimask) |
255 | { |
256 | |
257 | return 0; |
258 | } |
259 | |
260 | void |
261 | x86_broadcast_ipi(int ipimask) |
262 | { |
263 | |
264 | } |
265 | |
266 | void |
267 | cpu_ipi(struct cpu_info *ci) |
268 | { |
269 | } |
270 | |
271 | #endif |
272 | |