1 | /* $NetBSD: subr_lockdebug.c,v 1.54 2015/09/29 01:44:57 ozaki-r Exp $ */ |
2 | |
3 | /*- |
4 | * Copyright (c) 2006, 2007, 2008 The NetBSD Foundation, Inc. |
5 | * All rights reserved. |
6 | * |
7 | * This code is derived from software contributed to The NetBSD Foundation |
8 | * by Andrew Doran. |
9 | * |
10 | * Redistribution and use in source and binary forms, with or without |
11 | * modification, are permitted provided that the following conditions |
12 | * are met: |
13 | * 1. Redistributions of source code must retain the above copyright |
14 | * notice, this list of conditions and the following disclaimer. |
15 | * 2. Redistributions in binary form must reproduce the above copyright |
16 | * notice, this list of conditions and the following disclaimer in the |
17 | * documentation and/or other materials provided with the distribution. |
18 | * |
19 | * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS |
20 | * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED |
21 | * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
22 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS |
23 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
24 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
25 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
26 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN |
27 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
28 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE |
29 | * POSSIBILITY OF SUCH DAMAGE. |
30 | */ |
31 | |
32 | /* |
33 | * Basic lock debugging code shared among lock primitives. |
34 | */ |
35 | |
36 | #include <sys/cdefs.h> |
37 | __KERNEL_RCSID(0, "$NetBSD: subr_lockdebug.c,v 1.54 2015/09/29 01:44:57 ozaki-r Exp $" ); |
38 | |
39 | #ifdef _KERNEL_OPT |
40 | #include "opt_ddb.h" |
41 | #endif |
42 | |
43 | #include <sys/param.h> |
44 | #include <sys/proc.h> |
45 | #include <sys/systm.h> |
46 | #include <sys/kernel.h> |
47 | #include <sys/kmem.h> |
48 | #include <sys/lockdebug.h> |
49 | #include <sys/sleepq.h> |
50 | #include <sys/cpu.h> |
51 | #include <sys/atomic.h> |
52 | #include <sys/lock.h> |
53 | #include <sys/rbtree.h> |
54 | |
55 | #include <machine/lock.h> |
56 | |
57 | unsigned int ld_panic; |
58 | |
59 | #ifdef LOCKDEBUG |
60 | |
61 | #define LD_BATCH_SHIFT 9 |
62 | #define LD_BATCH (1 << LD_BATCH_SHIFT) |
63 | #define LD_BATCH_MASK (LD_BATCH - 1) |
64 | #define LD_MAX_LOCKS 1048576 |
65 | #define LD_SLOP 16 |
66 | |
67 | #define LD_LOCKED 0x01 |
68 | #define LD_SLEEPER 0x02 |
69 | |
70 | #define LD_WRITE_LOCK 0x80000000 |
71 | |
72 | typedef struct lockdebug { |
73 | struct rb_node ld_rb_node; |
74 | __cpu_simple_lock_t ld_spinlock; |
75 | _TAILQ_ENTRY(struct lockdebug, volatile) ld_chain; |
76 | _TAILQ_ENTRY(struct lockdebug, volatile) ld_achain; |
77 | volatile void *ld_lock; |
78 | lockops_t *ld_lockops; |
79 | struct lwp *ld_lwp; |
80 | uintptr_t ld_locked; |
81 | uintptr_t ld_unlocked; |
82 | uintptr_t ld_initaddr; |
83 | uint16_t ld_shares; |
84 | uint16_t ld_cpu; |
85 | uint8_t ld_flags; |
86 | uint8_t ld_shwant; /* advisory */ |
87 | uint8_t ld_exwant; /* advisory */ |
88 | uint8_t ld_unused; |
89 | } volatile lockdebug_t; |
90 | |
91 | typedef _TAILQ_HEAD(lockdebuglist, struct lockdebug, volatile) lockdebuglist_t; |
92 | |
93 | __cpu_simple_lock_t ld_mod_lk; |
94 | lockdebuglist_t ld_free = TAILQ_HEAD_INITIALIZER(ld_free); |
95 | lockdebuglist_t ld_all = TAILQ_HEAD_INITIALIZER(ld_all); |
96 | int ld_nfree; |
97 | int ld_freeptr; |
98 | int ld_recurse; |
99 | bool ld_nomore; |
100 | lockdebug_t ld_prime[LD_BATCH]; |
101 | |
102 | static void lockdebug_abort1(lockdebug_t *, int, const char *, |
103 | const char *, bool); |
104 | static int lockdebug_more(int); |
105 | static void lockdebug_init(void); |
106 | static void lockdebug_dump(lockdebug_t *, void (*)(const char *, ...) |
107 | __printflike(1, 2)); |
108 | |
109 | static signed int |
110 | ld_rbto_compare_nodes(void *ctx, const void *n1, const void *n2) |
111 | { |
112 | const lockdebug_t *ld1 = n1; |
113 | const lockdebug_t *ld2 = n2; |
114 | const uintptr_t a = (uintptr_t)ld1->ld_lock; |
115 | const uintptr_t b = (uintptr_t)ld2->ld_lock; |
116 | |
117 | if (a < b) |
118 | return -1; |
119 | if (a > b) |
120 | return 1; |
121 | return 0; |
122 | } |
123 | |
124 | static signed int |
125 | ld_rbto_compare_key(void *ctx, const void *n, const void *key) |
126 | { |
127 | const lockdebug_t *ld = n; |
128 | const uintptr_t a = (uintptr_t)ld->ld_lock; |
129 | const uintptr_t b = (uintptr_t)key; |
130 | |
131 | if (a < b) |
132 | return -1; |
133 | if (a > b) |
134 | return 1; |
135 | return 0; |
136 | } |
137 | |
138 | static rb_tree_t ld_rb_tree; |
139 | |
140 | static const rb_tree_ops_t ld_rb_tree_ops = { |
141 | .rbto_compare_nodes = ld_rbto_compare_nodes, |
142 | .rbto_compare_key = ld_rbto_compare_key, |
143 | .rbto_node_offset = offsetof(lockdebug_t, ld_rb_node), |
144 | .rbto_context = NULL |
145 | }; |
146 | |
147 | static inline lockdebug_t * |
148 | lockdebug_lookup1(volatile void *lock) |
149 | { |
150 | lockdebug_t *ld; |
151 | struct cpu_info *ci; |
152 | |
153 | ci = curcpu(); |
154 | __cpu_simple_lock(&ci->ci_data.cpu_ld_lock); |
155 | ld = (lockdebug_t *)rb_tree_find_node(&ld_rb_tree, __UNVOLATILE(lock)); |
156 | __cpu_simple_unlock(&ci->ci_data.cpu_ld_lock); |
157 | if (ld == NULL) { |
158 | return NULL; |
159 | } |
160 | __cpu_simple_lock(&ld->ld_spinlock); |
161 | |
162 | return ld; |
163 | } |
164 | |
165 | static void |
166 | lockdebug_lock_cpus(void) |
167 | { |
168 | CPU_INFO_ITERATOR cii; |
169 | struct cpu_info *ci; |
170 | |
171 | for (CPU_INFO_FOREACH(cii, ci)) { |
172 | __cpu_simple_lock(&ci->ci_data.cpu_ld_lock); |
173 | } |
174 | } |
175 | |
176 | static void |
177 | lockdebug_unlock_cpus(void) |
178 | { |
179 | CPU_INFO_ITERATOR cii; |
180 | struct cpu_info *ci; |
181 | |
182 | for (CPU_INFO_FOREACH(cii, ci)) { |
183 | __cpu_simple_unlock(&ci->ci_data.cpu_ld_lock); |
184 | } |
185 | } |
186 | |
187 | /* |
188 | * lockdebug_lookup: |
189 | * |
190 | * Find a lockdebug structure by a pointer to a lock and return it locked. |
191 | */ |
192 | static inline lockdebug_t * |
193 | lockdebug_lookup(volatile void *lock, uintptr_t where) |
194 | { |
195 | lockdebug_t *ld; |
196 | |
197 | ld = lockdebug_lookup1(lock); |
198 | if (ld == NULL) { |
199 | panic("lockdebug_lookup: uninitialized lock " |
200 | "(lock=%p, from=%08" PRIxPTR")" , lock, where); |
201 | } |
202 | return ld; |
203 | } |
204 | |
205 | /* |
206 | * lockdebug_init: |
207 | * |
208 | * Initialize the lockdebug system. Allocate an initial pool of |
209 | * lockdebug structures before the VM system is up and running. |
210 | */ |
211 | static void |
212 | lockdebug_init(void) |
213 | { |
214 | lockdebug_t *ld; |
215 | int i; |
216 | |
217 | TAILQ_INIT(&curcpu()->ci_data.cpu_ld_locks); |
218 | TAILQ_INIT(&curlwp->l_ld_locks); |
219 | __cpu_simple_lock_init(&curcpu()->ci_data.cpu_ld_lock); |
220 | __cpu_simple_lock_init(&ld_mod_lk); |
221 | |
222 | rb_tree_init(&ld_rb_tree, &ld_rb_tree_ops); |
223 | |
224 | ld = ld_prime; |
225 | for (i = 1, ld++; i < LD_BATCH; i++, ld++) { |
226 | __cpu_simple_lock_init(&ld->ld_spinlock); |
227 | TAILQ_INSERT_TAIL(&ld_free, ld, ld_chain); |
228 | TAILQ_INSERT_TAIL(&ld_all, ld, ld_achain); |
229 | } |
230 | ld_freeptr = 1; |
231 | ld_nfree = LD_BATCH - 1; |
232 | } |
233 | |
234 | /* |
235 | * lockdebug_alloc: |
236 | * |
237 | * A lock is being initialized, so allocate an associated debug |
238 | * structure. |
239 | */ |
240 | bool |
241 | lockdebug_alloc(volatile void *lock, lockops_t *lo, uintptr_t initaddr) |
242 | { |
243 | struct cpu_info *ci; |
244 | lockdebug_t *ld; |
245 | int s; |
246 | |
247 | if (lo == NULL || panicstr != NULL || ld_panic) |
248 | return false; |
249 | if (ld_freeptr == 0) |
250 | lockdebug_init(); |
251 | |
252 | s = splhigh(); |
253 | __cpu_simple_lock(&ld_mod_lk); |
254 | if ((ld = lockdebug_lookup1(lock)) != NULL) { |
255 | __cpu_simple_unlock(&ld_mod_lk); |
256 | lockdebug_abort1(ld, s, __func__, "already initialized" , true); |
257 | return false; |
258 | } |
259 | |
260 | /* |
261 | * Pinch a new debug structure. We may recurse because we call |
262 | * kmem_alloc(), which may need to initialize new locks somewhere |
263 | * down the path. If not recursing, we try to maintain at least |
264 | * LD_SLOP structures free, which should hopefully be enough to |
265 | * satisfy kmem_alloc(). If we can't provide a structure, not to |
266 | * worry: we'll just mark the lock as not having an ID. |
267 | */ |
268 | ci = curcpu(); |
269 | ci->ci_lkdebug_recurse++; |
270 | if (TAILQ_EMPTY(&ld_free)) { |
271 | if (ci->ci_lkdebug_recurse > 1 || ld_nomore) { |
272 | ci->ci_lkdebug_recurse--; |
273 | __cpu_simple_unlock(&ld_mod_lk); |
274 | splx(s); |
275 | return false; |
276 | } |
277 | s = lockdebug_more(s); |
278 | } else if (ci->ci_lkdebug_recurse == 1 && ld_nfree < LD_SLOP) { |
279 | s = lockdebug_more(s); |
280 | } |
281 | if ((ld = TAILQ_FIRST(&ld_free)) == NULL) { |
282 | __cpu_simple_unlock(&ld_mod_lk); |
283 | splx(s); |
284 | return false; |
285 | } |
286 | TAILQ_REMOVE(&ld_free, ld, ld_chain); |
287 | ld_nfree--; |
288 | ci->ci_lkdebug_recurse--; |
289 | |
290 | if (ld->ld_lock != NULL) { |
291 | panic("lockdebug_alloc: corrupt table ld %p" , ld); |
292 | } |
293 | |
294 | /* Initialise the structure. */ |
295 | ld->ld_lock = lock; |
296 | ld->ld_lockops = lo; |
297 | ld->ld_locked = 0; |
298 | ld->ld_unlocked = 0; |
299 | ld->ld_lwp = NULL; |
300 | ld->ld_initaddr = initaddr; |
301 | ld->ld_flags = (lo->lo_type == LOCKOPS_SLEEP ? LD_SLEEPER : 0); |
302 | lockdebug_lock_cpus(); |
303 | (void)rb_tree_insert_node(&ld_rb_tree, __UNVOLATILE(ld)); |
304 | lockdebug_unlock_cpus(); |
305 | __cpu_simple_unlock(&ld_mod_lk); |
306 | |
307 | splx(s); |
308 | return true; |
309 | } |
310 | |
311 | /* |
312 | * lockdebug_free: |
313 | * |
314 | * A lock is being destroyed, so release debugging resources. |
315 | */ |
316 | void |
317 | lockdebug_free(volatile void *lock) |
318 | { |
319 | lockdebug_t *ld; |
320 | int s; |
321 | |
322 | if (panicstr != NULL || ld_panic) |
323 | return; |
324 | |
325 | s = splhigh(); |
326 | __cpu_simple_lock(&ld_mod_lk); |
327 | ld = lockdebug_lookup(lock, (uintptr_t) __builtin_return_address(0)); |
328 | if (ld == NULL) { |
329 | __cpu_simple_unlock(&ld_mod_lk); |
330 | panic("lockdebug_free: destroying uninitialized object %p" |
331 | "(ld_lock=%p)" , lock, ld->ld_lock); |
332 | return; |
333 | } |
334 | if ((ld->ld_flags & LD_LOCKED) != 0 || ld->ld_shares != 0) { |
335 | __cpu_simple_unlock(&ld_mod_lk); |
336 | lockdebug_abort1(ld, s, __func__, "is locked or in use" , true); |
337 | return; |
338 | } |
339 | lockdebug_lock_cpus(); |
340 | rb_tree_remove_node(&ld_rb_tree, __UNVOLATILE(ld)); |
341 | lockdebug_unlock_cpus(); |
342 | ld->ld_lock = NULL; |
343 | TAILQ_INSERT_TAIL(&ld_free, ld, ld_chain); |
344 | ld_nfree++; |
345 | __cpu_simple_unlock(&ld->ld_spinlock); |
346 | __cpu_simple_unlock(&ld_mod_lk); |
347 | splx(s); |
348 | } |
349 | |
350 | /* |
351 | * lockdebug_more: |
352 | * |
353 | * Allocate a batch of debug structures and add to the free list. |
354 | * Must be called with ld_mod_lk held. |
355 | */ |
356 | static int |
357 | lockdebug_more(int s) |
358 | { |
359 | lockdebug_t *ld; |
360 | void *block; |
361 | int i, base, m; |
362 | |
363 | /* |
364 | * Can't call kmem_alloc() if in interrupt context. XXX We could |
365 | * deadlock, because we don't know which locks the caller holds. |
366 | */ |
367 | if (cpu_intr_p() || (curlwp->l_pflag & LP_INTR) != 0) { |
368 | return s; |
369 | } |
370 | |
371 | while (ld_nfree < LD_SLOP) { |
372 | __cpu_simple_unlock(&ld_mod_lk); |
373 | splx(s); |
374 | block = kmem_zalloc(LD_BATCH * sizeof(lockdebug_t), KM_SLEEP); |
375 | s = splhigh(); |
376 | __cpu_simple_lock(&ld_mod_lk); |
377 | |
378 | if (block == NULL) |
379 | return s; |
380 | |
381 | if (ld_nfree > LD_SLOP) { |
382 | /* Somebody beat us to it. */ |
383 | __cpu_simple_unlock(&ld_mod_lk); |
384 | splx(s); |
385 | kmem_free(block, LD_BATCH * sizeof(lockdebug_t)); |
386 | s = splhigh(); |
387 | __cpu_simple_lock(&ld_mod_lk); |
388 | continue; |
389 | } |
390 | |
391 | base = ld_freeptr; |
392 | ld_nfree += LD_BATCH; |
393 | ld = block; |
394 | base <<= LD_BATCH_SHIFT; |
395 | m = min(LD_MAX_LOCKS, base + LD_BATCH); |
396 | |
397 | if (m == LD_MAX_LOCKS) |
398 | ld_nomore = true; |
399 | |
400 | for (i = base; i < m; i++, ld++) { |
401 | __cpu_simple_lock_init(&ld->ld_spinlock); |
402 | TAILQ_INSERT_TAIL(&ld_free, ld, ld_chain); |
403 | TAILQ_INSERT_TAIL(&ld_all, ld, ld_achain); |
404 | } |
405 | |
406 | membar_producer(); |
407 | } |
408 | |
409 | return s; |
410 | } |
411 | |
412 | /* |
413 | * lockdebug_wantlock: |
414 | * |
415 | * Process the preamble to a lock acquire. |
416 | */ |
417 | void |
418 | lockdebug_wantlock(volatile void *lock, uintptr_t where, int shared) |
419 | { |
420 | struct lwp *l = curlwp; |
421 | lockdebug_t *ld; |
422 | bool recurse; |
423 | int s; |
424 | |
425 | (void)shared; |
426 | recurse = false; |
427 | |
428 | if (panicstr != NULL || ld_panic) |
429 | return; |
430 | |
431 | s = splhigh(); |
432 | if ((ld = lockdebug_lookup(lock, where)) == NULL) { |
433 | splx(s); |
434 | return; |
435 | } |
436 | if ((ld->ld_flags & LD_LOCKED) != 0 || ld->ld_shares != 0) { |
437 | if ((ld->ld_flags & LD_SLEEPER) != 0) { |
438 | if (ld->ld_lwp == l) |
439 | recurse = true; |
440 | } else if (ld->ld_cpu == (uint16_t)cpu_index(curcpu())) |
441 | recurse = true; |
442 | } |
443 | if (cpu_intr_p()) { |
444 | if ((ld->ld_flags & LD_SLEEPER) != 0) { |
445 | lockdebug_abort1(ld, s, __func__, |
446 | "acquiring sleep lock from interrupt context" , |
447 | true); |
448 | return; |
449 | } |
450 | } |
451 | if (shared) |
452 | ld->ld_shwant++; |
453 | else |
454 | ld->ld_exwant++; |
455 | if (recurse) { |
456 | lockdebug_abort1(ld, s, __func__, "locking against myself" , |
457 | true); |
458 | return; |
459 | } |
460 | __cpu_simple_unlock(&ld->ld_spinlock); |
461 | splx(s); |
462 | } |
463 | |
464 | /* |
465 | * lockdebug_locked: |
466 | * |
467 | * Process a lock acquire operation. |
468 | */ |
469 | void |
470 | lockdebug_locked(volatile void *lock, void *cvlock, uintptr_t where, |
471 | int shared) |
472 | { |
473 | struct lwp *l = curlwp; |
474 | lockdebug_t *ld; |
475 | int s; |
476 | |
477 | if (panicstr != NULL || ld_panic) |
478 | return; |
479 | |
480 | s = splhigh(); |
481 | if ((ld = lockdebug_lookup(lock, where)) == NULL) { |
482 | splx(s); |
483 | return; |
484 | } |
485 | if (cvlock) { |
486 | KASSERT(ld->ld_lockops->lo_type == LOCKOPS_CV); |
487 | if (lock == (void *)&lbolt) { |
488 | /* nothing */ |
489 | } else if (ld->ld_shares++ == 0) { |
490 | ld->ld_locked = (uintptr_t)cvlock; |
491 | } else if (cvlock != (void *)ld->ld_locked) { |
492 | lockdebug_abort1(ld, s, __func__, "multiple locks used" |
493 | " with condition variable" , true); |
494 | return; |
495 | } |
496 | } else if (shared) { |
497 | l->l_shlocks++; |
498 | ld->ld_locked = where; |
499 | ld->ld_shares++; |
500 | ld->ld_shwant--; |
501 | } else { |
502 | if ((ld->ld_flags & LD_LOCKED) != 0) { |
503 | lockdebug_abort1(ld, s, __func__, "already locked" , |
504 | true); |
505 | return; |
506 | } |
507 | ld->ld_flags |= LD_LOCKED; |
508 | ld->ld_locked = where; |
509 | ld->ld_exwant--; |
510 | if ((ld->ld_flags & LD_SLEEPER) != 0) { |
511 | TAILQ_INSERT_TAIL(&l->l_ld_locks, ld, ld_chain); |
512 | } else { |
513 | TAILQ_INSERT_TAIL(&curcpu()->ci_data.cpu_ld_locks, |
514 | ld, ld_chain); |
515 | } |
516 | } |
517 | ld->ld_cpu = (uint16_t)cpu_index(curcpu()); |
518 | ld->ld_lwp = l; |
519 | __cpu_simple_unlock(&ld->ld_spinlock); |
520 | splx(s); |
521 | } |
522 | |
523 | /* |
524 | * lockdebug_unlocked: |
525 | * |
526 | * Process a lock release operation. |
527 | */ |
528 | void |
529 | lockdebug_unlocked(volatile void *lock, uintptr_t where, int shared) |
530 | { |
531 | struct lwp *l = curlwp; |
532 | lockdebug_t *ld; |
533 | int s; |
534 | |
535 | if (panicstr != NULL || ld_panic) |
536 | return; |
537 | |
538 | s = splhigh(); |
539 | if ((ld = lockdebug_lookup(lock, where)) == NULL) { |
540 | splx(s); |
541 | return; |
542 | } |
543 | if (ld->ld_lockops->lo_type == LOCKOPS_CV) { |
544 | if (lock == (void *)&lbolt) { |
545 | /* nothing */ |
546 | } else { |
547 | ld->ld_shares--; |
548 | } |
549 | } else if (shared) { |
550 | if (l->l_shlocks == 0) { |
551 | lockdebug_abort1(ld, s, __func__, |
552 | "no shared locks held by LWP" , true); |
553 | return; |
554 | } |
555 | if (ld->ld_shares == 0) { |
556 | lockdebug_abort1(ld, s, __func__, |
557 | "no shared holds on this lock" , true); |
558 | return; |
559 | } |
560 | l->l_shlocks--; |
561 | ld->ld_shares--; |
562 | if (ld->ld_lwp == l) { |
563 | ld->ld_unlocked = where; |
564 | ld->ld_lwp = NULL; |
565 | } |
566 | if (ld->ld_cpu == (uint16_t)cpu_index(curcpu())) |
567 | ld->ld_cpu = (uint16_t)-1; |
568 | } else { |
569 | if ((ld->ld_flags & LD_LOCKED) == 0) { |
570 | lockdebug_abort1(ld, s, __func__, "not locked" , true); |
571 | return; |
572 | } |
573 | |
574 | if ((ld->ld_flags & LD_SLEEPER) != 0) { |
575 | if (ld->ld_lwp != curlwp) { |
576 | lockdebug_abort1(ld, s, __func__, |
577 | "not held by current LWP" , true); |
578 | return; |
579 | } |
580 | TAILQ_REMOVE(&l->l_ld_locks, ld, ld_chain); |
581 | } else { |
582 | if (ld->ld_cpu != (uint16_t)cpu_index(curcpu())) { |
583 | lockdebug_abort1(ld, s, __func__, |
584 | "not held by current CPU" , true); |
585 | return; |
586 | } |
587 | TAILQ_REMOVE(&curcpu()->ci_data.cpu_ld_locks, ld, |
588 | ld_chain); |
589 | } |
590 | ld->ld_flags &= ~LD_LOCKED; |
591 | ld->ld_unlocked = where; |
592 | ld->ld_lwp = NULL; |
593 | } |
594 | __cpu_simple_unlock(&ld->ld_spinlock); |
595 | splx(s); |
596 | } |
597 | |
598 | /* |
599 | * lockdebug_wakeup: |
600 | * |
601 | * Process a wakeup on a condition variable. |
602 | */ |
603 | void |
604 | lockdebug_wakeup(volatile void *lock, uintptr_t where) |
605 | { |
606 | lockdebug_t *ld; |
607 | int s; |
608 | |
609 | if (panicstr != NULL || ld_panic || lock == (void *)&lbolt) |
610 | return; |
611 | |
612 | s = splhigh(); |
613 | /* Find the CV... */ |
614 | if ((ld = lockdebug_lookup(lock, where)) == NULL) { |
615 | splx(s); |
616 | return; |
617 | } |
618 | /* |
619 | * If it has any waiters, ensure that they are using the |
620 | * same interlock. |
621 | */ |
622 | if (ld->ld_shares != 0 && !mutex_owned((kmutex_t *)ld->ld_locked)) { |
623 | lockdebug_abort1(ld, s, __func__, "interlocking mutex not " |
624 | "held during wakeup" , true); |
625 | return; |
626 | } |
627 | __cpu_simple_unlock(&ld->ld_spinlock); |
628 | splx(s); |
629 | } |
630 | |
631 | /* |
632 | * lockdebug_barrier: |
633 | * |
634 | * Panic if we hold more than one specified spin lock, and optionally, |
635 | * if we hold sleep locks. |
636 | */ |
637 | void |
638 | lockdebug_barrier(volatile void *spinlock, int slplocks) |
639 | { |
640 | struct lwp *l = curlwp; |
641 | lockdebug_t *ld; |
642 | int s; |
643 | |
644 | if (panicstr != NULL || ld_panic) |
645 | return; |
646 | |
647 | s = splhigh(); |
648 | if ((l->l_pflag & LP_INTR) == 0) { |
649 | TAILQ_FOREACH(ld, &curcpu()->ci_data.cpu_ld_locks, ld_chain) { |
650 | if (ld->ld_lock == spinlock) { |
651 | continue; |
652 | } |
653 | __cpu_simple_lock(&ld->ld_spinlock); |
654 | lockdebug_abort1(ld, s, __func__, |
655 | "spin lock held" , true); |
656 | return; |
657 | } |
658 | } |
659 | if (slplocks) { |
660 | splx(s); |
661 | return; |
662 | } |
663 | if ((ld = TAILQ_FIRST(&l->l_ld_locks)) != NULL) { |
664 | __cpu_simple_lock(&ld->ld_spinlock); |
665 | lockdebug_abort1(ld, s, __func__, "sleep lock held" , true); |
666 | return; |
667 | } |
668 | splx(s); |
669 | if (l->l_shlocks != 0) { |
670 | TAILQ_FOREACH(ld, &ld_all, ld_achain) { |
671 | if (ld->ld_lockops->lo_type == LOCKOPS_CV) |
672 | continue; |
673 | if (ld->ld_lwp == l) |
674 | lockdebug_dump(ld, printf); |
675 | } |
676 | panic("%s: holding %d shared locks" , __func__, l->l_shlocks); |
677 | } |
678 | } |
679 | |
680 | /* |
681 | * lockdebug_mem_check: |
682 | * |
683 | * Check for in-use locks within a memory region that is |
684 | * being freed. |
685 | */ |
686 | void |
687 | lockdebug_mem_check(const char *func, void *base, size_t sz) |
688 | { |
689 | lockdebug_t *ld; |
690 | struct cpu_info *ci; |
691 | int s; |
692 | |
693 | if (panicstr != NULL || ld_panic) |
694 | return; |
695 | |
696 | s = splhigh(); |
697 | ci = curcpu(); |
698 | __cpu_simple_lock(&ci->ci_data.cpu_ld_lock); |
699 | ld = (lockdebug_t *)rb_tree_find_node_geq(&ld_rb_tree, base); |
700 | if (ld != NULL) { |
701 | const uintptr_t lock = (uintptr_t)ld->ld_lock; |
702 | |
703 | if ((uintptr_t)base > lock) |
704 | panic("%s: corrupt tree ld=%p, base=%p, sz=%zu" , |
705 | __func__, ld, base, sz); |
706 | if (lock >= (uintptr_t)base + sz) |
707 | ld = NULL; |
708 | } |
709 | __cpu_simple_unlock(&ci->ci_data.cpu_ld_lock); |
710 | if (ld != NULL) { |
711 | __cpu_simple_lock(&ld->ld_spinlock); |
712 | lockdebug_abort1(ld, s, func, |
713 | "allocation contains active lock" , !cold); |
714 | return; |
715 | } |
716 | splx(s); |
717 | } |
718 | |
719 | /* |
720 | * lockdebug_dump: |
721 | * |
722 | * Dump information about a lock on panic, or for DDB. |
723 | */ |
724 | static void |
725 | lockdebug_dump(lockdebug_t *ld, void (*pr)(const char *, ...) |
726 | __printflike(1, 2)) |
727 | { |
728 | int sleeper = (ld->ld_flags & LD_SLEEPER); |
729 | |
730 | (*pr)( |
731 | "lock address : %#018lx type : %18s\n" |
732 | "initialized : %#018lx" , |
733 | (long)ld->ld_lock, (sleeper ? "sleep/adaptive" : "spin" ), |
734 | (long)ld->ld_initaddr); |
735 | |
736 | if (ld->ld_lockops->lo_type == LOCKOPS_CV) { |
737 | (*pr)(" interlock: %#018lx\n" , (long)ld->ld_locked); |
738 | } else { |
739 | (*pr)("\n" |
740 | "shared holds : %18u exclusive: %18u\n" |
741 | "shares wanted: %18u exclusive: %18u\n" |
742 | "current cpu : %18u last held: %18u\n" |
743 | "current lwp : %#018lx last held: %#018lx\n" |
744 | "last locked%c : %#018lx unlocked%c: %#018lx\n" , |
745 | (unsigned)ld->ld_shares, ((ld->ld_flags & LD_LOCKED) != 0), |
746 | (unsigned)ld->ld_shwant, (unsigned)ld->ld_exwant, |
747 | (unsigned)cpu_index(curcpu()), (unsigned)ld->ld_cpu, |
748 | (long)curlwp, (long)ld->ld_lwp, |
749 | ((ld->ld_flags & LD_LOCKED) ? '*' : ' '), |
750 | (long)ld->ld_locked, |
751 | ((ld->ld_flags & LD_LOCKED) ? ' ' : '*'), |
752 | (long)ld->ld_unlocked); |
753 | } |
754 | |
755 | if (ld->ld_lockops->lo_dump != NULL) |
756 | (*ld->ld_lockops->lo_dump)(ld->ld_lock); |
757 | |
758 | if (sleeper) { |
759 | (*pr)("\n" ); |
760 | turnstile_print(ld->ld_lock, pr); |
761 | } |
762 | } |
763 | |
764 | /* |
765 | * lockdebug_abort1: |
766 | * |
767 | * An error has been trapped - dump lock info and panic. |
768 | */ |
769 | static void |
770 | lockdebug_abort1(lockdebug_t *ld, int s, const char *func, |
771 | const char *msg, bool dopanic) |
772 | { |
773 | |
774 | /* |
775 | * Don't make the situation worse if the system is already going |
776 | * down in flames. Once a panic is triggered, lockdebug state |
777 | * becomes stale and cannot be trusted. |
778 | */ |
779 | if (atomic_inc_uint_nv(&ld_panic) != 1) { |
780 | __cpu_simple_unlock(&ld->ld_spinlock); |
781 | splx(s); |
782 | return; |
783 | } |
784 | |
785 | printf_nolog("%s error: %s: %s\n\n" , ld->ld_lockops->lo_name, |
786 | func, msg); |
787 | lockdebug_dump(ld, printf_nolog); |
788 | __cpu_simple_unlock(&ld->ld_spinlock); |
789 | splx(s); |
790 | printf_nolog("\n" ); |
791 | if (dopanic) |
792 | panic("LOCKDEBUG: %s error: %s: %s" , ld->ld_lockops->lo_name, |
793 | func, msg); |
794 | } |
795 | |
796 | #endif /* LOCKDEBUG */ |
797 | |
798 | /* |
799 | * lockdebug_lock_print: |
800 | * |
801 | * Handle the DDB 'show lock' command. |
802 | */ |
803 | #ifdef DDB |
804 | void |
805 | lockdebug_lock_print(void *addr, void (*pr)(const char *, ...)) |
806 | { |
807 | #ifdef LOCKDEBUG |
808 | lockdebug_t *ld; |
809 | |
810 | TAILQ_FOREACH(ld, &ld_all, ld_achain) { |
811 | if (ld->ld_lock == NULL) |
812 | continue; |
813 | if (addr == NULL || ld->ld_lock == addr) { |
814 | lockdebug_dump(ld, pr); |
815 | if (addr != NULL) |
816 | return; |
817 | } |
818 | } |
819 | if (addr != NULL) { |
820 | (*pr)("Sorry, no record of a lock with address %p found.\n" , |
821 | addr); |
822 | } |
823 | #else |
824 | (*pr)("Sorry, kernel not built with the LOCKDEBUG option.\n" ); |
825 | #endif /* LOCKDEBUG */ |
826 | } |
827 | #endif /* DDB */ |
828 | |
829 | /* |
830 | * lockdebug_abort: |
831 | * |
832 | * An error has been trapped - dump lock info and call panic(). |
833 | */ |
834 | void |
835 | lockdebug_abort(volatile void *lock, lockops_t *ops, const char *func, |
836 | const char *msg) |
837 | { |
838 | #ifdef LOCKDEBUG |
839 | lockdebug_t *ld; |
840 | int s; |
841 | |
842 | s = splhigh(); |
843 | if ((ld = lockdebug_lookup(lock, |
844 | (uintptr_t) __builtin_return_address(0))) != NULL) { |
845 | lockdebug_abort1(ld, s, func, msg, true); |
846 | return; |
847 | } |
848 | splx(s); |
849 | #endif /* LOCKDEBUG */ |
850 | |
851 | /* |
852 | * Complain first on the occurrance only. Otherwise proceeed to |
853 | * panic where we will `rendezvous' with other CPUs if the machine |
854 | * is going down in flames. |
855 | */ |
856 | if (atomic_inc_uint_nv(&ld_panic) == 1) { |
857 | printf_nolog("%s error: %s: %s\n\n" |
858 | "lock address : %#018lx\n" |
859 | "current cpu : %18d\n" |
860 | "current lwp : %#018lx\n" , |
861 | ops->lo_name, func, msg, (long)lock, |
862 | (int)cpu_index(curcpu()), (long)curlwp); |
863 | (*ops->lo_dump)(lock); |
864 | printf_nolog("\n" ); |
865 | } |
866 | |
867 | panic("lock error: %s: %s: %s: lock %p cpu %d lwp %p" , |
868 | ops->lo_name, func, msg, lock, cpu_index(curcpu()), curlwp); |
869 | } |
870 | |