1/* $NetBSD: kern_lock.c,v 1.157 2015/04/11 15:24:25 skrll Exp $ */
2
3/*-
4 * Copyright (c) 2002, 2006, 2007, 2008, 2009 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center, and by Andrew Doran.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE.
31 */
32
33#include <sys/cdefs.h>
34__KERNEL_RCSID(0, "$NetBSD: kern_lock.c,v 1.157 2015/04/11 15:24:25 skrll Exp $");
35
36#include <sys/param.h>
37#include <sys/proc.h>
38#include <sys/lock.h>
39#include <sys/systm.h>
40#include <sys/kernel.h>
41#include <sys/lockdebug.h>
42#include <sys/cpu.h>
43#include <sys/syslog.h>
44#include <sys/atomic.h>
45#include <sys/lwp.h>
46
47#include <machine/lock.h>
48
49#include <dev/lockstat.h>
50
51#define RETURN_ADDRESS (uintptr_t)__builtin_return_address(0)
52
53bool kernel_lock_dodebug;
54
55__cpu_simple_lock_t kernel_lock[CACHE_LINE_SIZE / sizeof(__cpu_simple_lock_t)]
56 __cacheline_aligned;
57
58void
59assert_sleepable(void)
60{
61 const char *reason;
62 uint64_t pctr;
63 bool idle;
64
65 if (panicstr != NULL) {
66 return;
67 }
68
69 LOCKDEBUG_BARRIER(kernel_lock, 1);
70
71 /*
72 * Avoid disabling/re-enabling preemption here since this
73 * routine may be called in delicate situations.
74 */
75 do {
76 pctr = lwp_pctr();
77 idle = CURCPU_IDLE_P();
78 } while (pctr != lwp_pctr());
79
80 reason = NULL;
81 if (idle && !cold &&
82 kcpuset_isset(kcpuset_running, cpu_index(curcpu()))) {
83 reason = "idle";
84 }
85 if (cpu_intr_p()) {
86 reason = "interrupt";
87 }
88 if (cpu_softintr_p()) {
89 reason = "softint";
90 }
91
92 if (reason) {
93 panic("%s: %s caller=%p", __func__, reason,
94 (void *)RETURN_ADDRESS);
95 }
96}
97
98/*
99 * Functions for manipulating the kernel_lock. We put them here
100 * so that they show up in profiles.
101 */
102
103#define _KERNEL_LOCK_ABORT(msg) \
104 LOCKDEBUG_ABORT(kernel_lock, &_kernel_lock_ops, __func__, msg)
105
106#ifdef LOCKDEBUG
107#define _KERNEL_LOCK_ASSERT(cond) \
108do { \
109 if (!(cond)) \
110 _KERNEL_LOCK_ABORT("assertion failed: " #cond); \
111} while (/* CONSTCOND */ 0)
112#else
113#define _KERNEL_LOCK_ASSERT(cond) /* nothing */
114#endif
115
116void _kernel_lock_dump(volatile void *);
117
118lockops_t _kernel_lock_ops = {
119 "Kernel lock",
120 LOCKOPS_SPIN,
121 _kernel_lock_dump
122};
123
124/*
125 * Initialize the kernel lock.
126 */
127void
128kernel_lock_init(void)
129{
130
131 __cpu_simple_lock_init(kernel_lock);
132 kernel_lock_dodebug = LOCKDEBUG_ALLOC(kernel_lock, &_kernel_lock_ops,
133 RETURN_ADDRESS);
134}
135CTASSERT(CACHE_LINE_SIZE >= sizeof(__cpu_simple_lock_t));
136
137/*
138 * Print debugging information about the kernel lock.
139 */
140void
141_kernel_lock_dump(volatile void *junk)
142{
143 struct cpu_info *ci = curcpu();
144
145 (void)junk;
146
147 printf_nolog("curcpu holds : %18d wanted by: %#018lx\n",
148 ci->ci_biglock_count, (long)ci->ci_biglock_wanted);
149}
150
151/*
152 * Acquire 'nlocks' holds on the kernel lock.
153 */
154void
155_kernel_lock(int nlocks)
156{
157 struct cpu_info *ci;
158 LOCKSTAT_TIMER(spintime);
159 LOCKSTAT_FLAG(lsflag);
160 struct lwp *owant;
161 u_int spins;
162 int s;
163 struct lwp *l = curlwp;
164
165 _KERNEL_LOCK_ASSERT(nlocks > 0);
166
167 s = splvm();
168 ci = curcpu();
169 if (ci->ci_biglock_count != 0) {
170 _KERNEL_LOCK_ASSERT(__SIMPLELOCK_LOCKED_P(kernel_lock));
171 ci->ci_biglock_count += nlocks;
172 l->l_blcnt += nlocks;
173 splx(s);
174 return;
175 }
176
177 _KERNEL_LOCK_ASSERT(l->l_blcnt == 0);
178 LOCKDEBUG_WANTLOCK(kernel_lock_dodebug, kernel_lock, RETURN_ADDRESS,
179 0);
180
181 if (__cpu_simple_lock_try(kernel_lock)) {
182 ci->ci_biglock_count = nlocks;
183 l->l_blcnt = nlocks;
184 LOCKDEBUG_LOCKED(kernel_lock_dodebug, kernel_lock, NULL,
185 RETURN_ADDRESS, 0);
186 splx(s);
187 return;
188 }
189
190 /*
191 * To remove the ordering constraint between adaptive mutexes
192 * and kernel_lock we must make it appear as if this thread is
193 * blocking. For non-interlocked mutex release, a store fence
194 * is required to ensure that the result of any mutex_exit()
195 * by the current LWP becomes visible on the bus before the set
196 * of ci->ci_biglock_wanted becomes visible.
197 */
198 membar_producer();
199 owant = ci->ci_biglock_wanted;
200 ci->ci_biglock_wanted = l;
201
202 /*
203 * Spin until we acquire the lock. Once we have it, record the
204 * time spent with lockstat.
205 */
206 LOCKSTAT_ENTER(lsflag);
207 LOCKSTAT_START_TIMER(lsflag, spintime);
208
209 spins = 0;
210 do {
211 splx(s);
212 while (__SIMPLELOCK_LOCKED_P(kernel_lock)) {
213 if (SPINLOCK_SPINOUT(spins)) {
214 extern int start_init_exec;
215 if (!start_init_exec)
216 _KERNEL_LOCK_ABORT("spinout");
217 }
218 SPINLOCK_BACKOFF_HOOK;
219 SPINLOCK_SPIN_HOOK;
220 }
221 s = splvm();
222 } while (!__cpu_simple_lock_try(kernel_lock));
223
224 ci->ci_biglock_count = nlocks;
225 l->l_blcnt = nlocks;
226 LOCKSTAT_STOP_TIMER(lsflag, spintime);
227 LOCKDEBUG_LOCKED(kernel_lock_dodebug, kernel_lock, NULL,
228 RETURN_ADDRESS, 0);
229 if (owant == NULL) {
230 LOCKSTAT_EVENT_RA(lsflag, kernel_lock,
231 LB_KERNEL_LOCK | LB_SPIN, 1, spintime, RETURN_ADDRESS);
232 }
233 LOCKSTAT_EXIT(lsflag);
234 splx(s);
235
236 /*
237 * Now that we have kernel_lock, reset ci_biglock_wanted. This
238 * store must be unbuffered (immediately visible on the bus) in
239 * order for non-interlocked mutex release to work correctly.
240 * It must be visible before a mutex_exit() can execute on this
241 * processor.
242 *
243 * Note: only where CAS is available in hardware will this be
244 * an unbuffered write, but non-interlocked release cannot be
245 * done on CPUs without CAS in hardware.
246 */
247 (void)atomic_swap_ptr(&ci->ci_biglock_wanted, owant);
248
249 /*
250 * Issue a memory barrier as we have acquired a lock. This also
251 * prevents stores from a following mutex_exit() being reordered
252 * to occur before our store to ci_biglock_wanted above.
253 */
254 membar_enter();
255}
256
257/*
258 * Release 'nlocks' holds on the kernel lock. If 'nlocks' is zero, release
259 * all holds.
260 */
261void
262_kernel_unlock(int nlocks, int *countp)
263{
264 struct cpu_info *ci;
265 u_int olocks;
266 int s;
267 struct lwp *l = curlwp;
268
269 _KERNEL_LOCK_ASSERT(nlocks < 2);
270
271 olocks = l->l_blcnt;
272
273 if (olocks == 0) {
274 _KERNEL_LOCK_ASSERT(nlocks <= 0);
275 if (countp != NULL)
276 *countp = 0;
277 return;
278 }
279
280 _KERNEL_LOCK_ASSERT(__SIMPLELOCK_LOCKED_P(kernel_lock));
281
282 if (nlocks == 0)
283 nlocks = olocks;
284 else if (nlocks == -1) {
285 nlocks = 1;
286 _KERNEL_LOCK_ASSERT(olocks == 1);
287 }
288 s = splvm();
289 ci = curcpu();
290 _KERNEL_LOCK_ASSERT(ci->ci_biglock_count >= l->l_blcnt);
291 if (ci->ci_biglock_count == nlocks) {
292 LOCKDEBUG_UNLOCKED(kernel_lock_dodebug, kernel_lock,
293 RETURN_ADDRESS, 0);
294 ci->ci_biglock_count = 0;
295 __cpu_simple_unlock(kernel_lock);
296 l->l_blcnt -= nlocks;
297 splx(s);
298 if (l->l_dopreempt)
299 kpreempt(0);
300 } else {
301 ci->ci_biglock_count -= nlocks;
302 l->l_blcnt -= nlocks;
303 splx(s);
304 }
305
306 if (countp != NULL)
307 *countp = olocks;
308}
309
310bool
311_kernel_locked_p(void)
312{
313 return __SIMPLELOCK_LOCKED_P(kernel_lock);
314}
315