1 | /* $NetBSD: kern_lwp.c,v 1.185 2016/07/03 14:24:58 christos Exp $ */ |
2 | |
3 | /*- |
4 | * Copyright (c) 2001, 2006, 2007, 2008, 2009 The NetBSD Foundation, Inc. |
5 | * All rights reserved. |
6 | * |
7 | * This code is derived from software contributed to The NetBSD Foundation |
8 | * by Nathan J. Williams, and Andrew Doran. |
9 | * |
10 | * Redistribution and use in source and binary forms, with or without |
11 | * modification, are permitted provided that the following conditions |
12 | * are met: |
13 | * 1. Redistributions of source code must retain the above copyright |
14 | * notice, this list of conditions and the following disclaimer. |
15 | * 2. Redistributions in binary form must reproduce the above copyright |
16 | * notice, this list of conditions and the following disclaimer in the |
17 | * documentation and/or other materials provided with the distribution. |
18 | * |
19 | * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS |
20 | * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED |
21 | * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
22 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS |
23 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
24 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
25 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
26 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN |
27 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
28 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE |
29 | * POSSIBILITY OF SUCH DAMAGE. |
30 | */ |
31 | |
32 | /* |
33 | * Overview |
34 | * |
35 | * Lightweight processes (LWPs) are the basic unit or thread of |
36 | * execution within the kernel. The core state of an LWP is described |
37 | * by "struct lwp", also known as lwp_t. |
38 | * |
39 | * Each LWP is contained within a process (described by "struct proc"), |
40 | * Every process contains at least one LWP, but may contain more. The |
41 | * process describes attributes shared among all of its LWPs such as a |
42 | * private address space, global execution state (stopped, active, |
43 | * zombie, ...), signal disposition and so on. On a multiprocessor |
44 | * machine, multiple LWPs be executing concurrently in the kernel. |
45 | * |
46 | * Execution states |
47 | * |
48 | * At any given time, an LWP has overall state that is described by |
49 | * lwp::l_stat. The states are broken into two sets below. The first |
50 | * set is guaranteed to represent the absolute, current state of the |
51 | * LWP: |
52 | * |
53 | * LSONPROC |
54 | * |
55 | * On processor: the LWP is executing on a CPU, either in the |
56 | * kernel or in user space. |
57 | * |
58 | * LSRUN |
59 | * |
60 | * Runnable: the LWP is parked on a run queue, and may soon be |
61 | * chosen to run by an idle processor, or by a processor that |
62 | * has been asked to preempt a currently runnning but lower |
63 | * priority LWP. |
64 | * |
65 | * LSIDL |
66 | * |
67 | * Idle: the LWP has been created but has not yet executed, |
68 | * or it has ceased executing a unit of work and is waiting |
69 | * to be started again. |
70 | * |
71 | * LSSUSPENDED: |
72 | * |
73 | * Suspended: the LWP has had its execution suspended by |
74 | * another LWP in the same process using the _lwp_suspend() |
75 | * system call. User-level LWPs also enter the suspended |
76 | * state when the system is shutting down. |
77 | * |
78 | * The second set represent a "statement of intent" on behalf of the |
79 | * LWP. The LWP may in fact be executing on a processor, may be |
80 | * sleeping or idle. It is expected to take the necessary action to |
81 | * stop executing or become "running" again within a short timeframe. |
82 | * The LP_RUNNING flag in lwp::l_pflag indicates that an LWP is running. |
83 | * Importantly, it indicates that its state is tied to a CPU. |
84 | * |
85 | * LSZOMB: |
86 | * |
87 | * Dead or dying: the LWP has released most of its resources |
88 | * and is about to switch away into oblivion, or has already |
89 | * switched away. When it switches away, its few remaining |
90 | * resources can be collected. |
91 | * |
92 | * LSSLEEP: |
93 | * |
94 | * Sleeping: the LWP has entered itself onto a sleep queue, and |
95 | * has switched away or will switch away shortly to allow other |
96 | * LWPs to run on the CPU. |
97 | * |
98 | * LSSTOP: |
99 | * |
100 | * Stopped: the LWP has been stopped as a result of a job |
101 | * control signal, or as a result of the ptrace() interface. |
102 | * |
103 | * Stopped LWPs may run briefly within the kernel to handle |
104 | * signals that they receive, but will not return to user space |
105 | * until their process' state is changed away from stopped. |
106 | * |
107 | * Single LWPs within a process can not be set stopped |
108 | * selectively: all actions that can stop or continue LWPs |
109 | * occur at the process level. |
110 | * |
111 | * State transitions |
112 | * |
113 | * Note that the LSSTOP state may only be set when returning to |
114 | * user space in userret(), or when sleeping interruptably. The |
115 | * LSSUSPENDED state may only be set in userret(). Before setting |
116 | * those states, we try to ensure that the LWPs will release all |
117 | * locks that they hold, and at a minimum try to ensure that the |
118 | * LWP can be set runnable again by a signal. |
119 | * |
120 | * LWPs may transition states in the following ways: |
121 | * |
122 | * RUN -------> ONPROC ONPROC -----> RUN |
123 | * > SLEEP |
124 | * > STOPPED |
125 | * > SUSPENDED |
126 | * > ZOMB |
127 | * > IDL (special cases) |
128 | * |
129 | * STOPPED ---> RUN SUSPENDED --> RUN |
130 | * > SLEEP |
131 | * |
132 | * SLEEP -----> ONPROC IDL --------> RUN |
133 | * > RUN > SUSPENDED |
134 | * > STOPPED > STOPPED |
135 | * > ONPROC (special cases) |
136 | * |
137 | * Some state transitions are only possible with kernel threads (eg |
138 | * ONPROC -> IDL) and happen under tightly controlled circumstances |
139 | * free of unwanted side effects. |
140 | * |
141 | * Migration |
142 | * |
143 | * Migration of threads from one CPU to another could be performed |
144 | * internally by the scheduler via sched_takecpu() or sched_catchlwp() |
145 | * functions. The universal lwp_migrate() function should be used for |
146 | * any other cases. Subsystems in the kernel must be aware that CPU |
147 | * of LWP may change, while it is not locked. |
148 | * |
149 | * Locking |
150 | * |
151 | * The majority of fields in 'struct lwp' are covered by a single, |
152 | * general spin lock pointed to by lwp::l_mutex. The locks covering |
153 | * each field are documented in sys/lwp.h. |
154 | * |
155 | * State transitions must be made with the LWP's general lock held, |
156 | * and may cause the LWP's lock pointer to change. Manipulation of |
157 | * the general lock is not performed directly, but through calls to |
158 | * lwp_lock(), lwp_unlock() and others. It should be noted that the |
159 | * adaptive locks are not allowed to be released while the LWP's lock |
160 | * is being held (unlike for other spin-locks). |
161 | * |
162 | * States and their associated locks: |
163 | * |
164 | * LSONPROC, LSZOMB: |
165 | * |
166 | * Always covered by spc_lwplock, which protects running LWPs. |
167 | * This is a per-CPU lock and matches lwp::l_cpu. |
168 | * |
169 | * LSIDL, LSRUN: |
170 | * |
171 | * Always covered by spc_mutex, which protects the run queues. |
172 | * This is a per-CPU lock and matches lwp::l_cpu. |
173 | * |
174 | * LSSLEEP: |
175 | * |
176 | * Covered by a lock associated with the sleep queue that the |
177 | * LWP resides on. Matches lwp::l_sleepq::sq_mutex. |
178 | * |
179 | * LSSTOP, LSSUSPENDED: |
180 | * |
181 | * If the LWP was previously sleeping (l_wchan != NULL), then |
182 | * l_mutex references the sleep queue lock. If the LWP was |
183 | * runnable or on the CPU when halted, or has been removed from |
184 | * the sleep queue since halted, then the lock is spc_lwplock. |
185 | * |
186 | * The lock order is as follows: |
187 | * |
188 | * spc::spc_lwplock -> |
189 | * sleeptab::st_mutex -> |
190 | * tschain_t::tc_mutex -> |
191 | * spc::spc_mutex |
192 | * |
193 | * Each process has an scheduler state lock (proc::p_lock), and a |
194 | * number of counters on LWPs and their states: p_nzlwps, p_nrlwps, and |
195 | * so on. When an LWP is to be entered into or removed from one of the |
196 | * following states, p_lock must be held and the process wide counters |
197 | * adjusted: |
198 | * |
199 | * LSIDL, LSZOMB, LSSTOP, LSSUSPENDED |
200 | * |
201 | * (But not always for kernel threads. There are some special cases |
202 | * as mentioned above. See kern_softint.c.) |
203 | * |
204 | * Note that an LWP is considered running or likely to run soon if in |
205 | * one of the following states. This affects the value of p_nrlwps: |
206 | * |
207 | * LSRUN, LSONPROC, LSSLEEP |
208 | * |
209 | * p_lock does not need to be held when transitioning among these |
210 | * three states, hence p_lock is rarely taken for state transitions. |
211 | */ |
212 | |
213 | #include <sys/cdefs.h> |
214 | __KERNEL_RCSID(0, "$NetBSD: kern_lwp.c,v 1.185 2016/07/03 14:24:58 christos Exp $" ); |
215 | |
216 | #include "opt_ddb.h" |
217 | #include "opt_lockdebug.h" |
218 | #include "opt_dtrace.h" |
219 | |
220 | #define _LWP_API_PRIVATE |
221 | |
222 | #include <sys/param.h> |
223 | #include <sys/systm.h> |
224 | #include <sys/cpu.h> |
225 | #include <sys/pool.h> |
226 | #include <sys/proc.h> |
227 | #include <sys/syscallargs.h> |
228 | #include <sys/syscall_stats.h> |
229 | #include <sys/kauth.h> |
230 | #include <sys/pserialize.h> |
231 | #include <sys/sleepq.h> |
232 | #include <sys/lockdebug.h> |
233 | #include <sys/kmem.h> |
234 | #include <sys/pset.h> |
235 | #include <sys/intr.h> |
236 | #include <sys/lwpctl.h> |
237 | #include <sys/atomic.h> |
238 | #include <sys/filedesc.h> |
239 | #include <sys/dtrace_bsd.h> |
240 | #include <sys/sdt.h> |
241 | #include <sys/xcall.h> |
242 | #include <sys/uidinfo.h> |
243 | #include <sys/sysctl.h> |
244 | |
245 | #include <uvm/uvm_extern.h> |
246 | #include <uvm/uvm_object.h> |
247 | |
248 | static pool_cache_t lwp_cache __read_mostly; |
249 | struct lwplist alllwp __cacheline_aligned; |
250 | |
251 | static void lwp_dtor(void *, void *); |
252 | |
253 | /* DTrace proc provider probes */ |
254 | SDT_PROVIDER_DEFINE(proc); |
255 | |
256 | SDT_PROBE_DEFINE1(proc, kernel, , lwp__create, "struct lwp *" ); |
257 | SDT_PROBE_DEFINE1(proc, kernel, , lwp__start, "struct lwp *" ); |
258 | SDT_PROBE_DEFINE1(proc, kernel, , lwp__exit, "struct lwp *" ); |
259 | |
260 | struct turnstile turnstile0; |
261 | struct lwp lwp0 __aligned(MIN_LWP_ALIGNMENT) = { |
262 | #ifdef LWP0_CPU_INFO |
263 | .l_cpu = LWP0_CPU_INFO, |
264 | #endif |
265 | #ifdef LWP0_MD_INITIALIZER |
266 | .l_md = LWP0_MD_INITIALIZER, |
267 | #endif |
268 | .l_proc = &proc0, |
269 | .l_lid = 1, |
270 | .l_flag = LW_SYSTEM, |
271 | .l_stat = LSONPROC, |
272 | .l_ts = &turnstile0, |
273 | .l_syncobj = &sched_syncobj, |
274 | .l_refcnt = 1, |
275 | .l_priority = PRI_USER + NPRI_USER - 1, |
276 | .l_inheritedprio = -1, |
277 | .l_class = SCHED_OTHER, |
278 | .l_psid = PS_NONE, |
279 | .l_pi_lenders = SLIST_HEAD_INITIALIZER(&lwp0.l_pi_lenders), |
280 | .l_name = __UNCONST("swapper" ), |
281 | .l_fd = &filedesc0, |
282 | }; |
283 | |
284 | static int sysctl_kern_maxlwp(SYSCTLFN_PROTO); |
285 | |
286 | /* |
287 | * sysctl helper routine for kern.maxlwp. Ensures that the new |
288 | * values are not too low or too high. |
289 | */ |
290 | static int |
291 | sysctl_kern_maxlwp(SYSCTLFN_ARGS) |
292 | { |
293 | int error, nmaxlwp; |
294 | struct sysctlnode node; |
295 | |
296 | nmaxlwp = maxlwp; |
297 | node = *rnode; |
298 | node.sysctl_data = &nmaxlwp; |
299 | error = sysctl_lookup(SYSCTLFN_CALL(&node)); |
300 | if (error || newp == NULL) |
301 | return error; |
302 | |
303 | if (nmaxlwp < 0 || nmaxlwp >= 65536) |
304 | return EINVAL; |
305 | if (nmaxlwp > cpu_maxlwp()) |
306 | return EINVAL; |
307 | maxlwp = nmaxlwp; |
308 | |
309 | return 0; |
310 | } |
311 | |
312 | static void |
313 | sysctl_kern_lwp_setup(void) |
314 | { |
315 | struct sysctllog *clog = NULL; |
316 | |
317 | sysctl_createv(&clog, 0, NULL, NULL, |
318 | CTLFLAG_PERMANENT|CTLFLAG_READWRITE, |
319 | CTLTYPE_INT, "maxlwp" , |
320 | SYSCTL_DESCR("Maximum number of simultaneous threads" ), |
321 | sysctl_kern_maxlwp, 0, NULL, 0, |
322 | CTL_KERN, CTL_CREATE, CTL_EOL); |
323 | } |
324 | |
325 | void |
326 | lwpinit(void) |
327 | { |
328 | |
329 | LIST_INIT(&alllwp); |
330 | lwpinit_specificdata(); |
331 | lwp_sys_init(); |
332 | lwp_cache = pool_cache_init(sizeof(lwp_t), MIN_LWP_ALIGNMENT, 0, 0, |
333 | "lwppl" , NULL, IPL_NONE, NULL, lwp_dtor, NULL); |
334 | |
335 | maxlwp = cpu_maxlwp(); |
336 | sysctl_kern_lwp_setup(); |
337 | } |
338 | |
339 | void |
340 | lwp0_init(void) |
341 | { |
342 | struct lwp *l = &lwp0; |
343 | |
344 | KASSERT((void *)uvm_lwp_getuarea(l) != NULL); |
345 | KASSERT(l->l_lid == proc0.p_nlwpid); |
346 | |
347 | LIST_INSERT_HEAD(&alllwp, l, l_list); |
348 | |
349 | callout_init(&l->l_timeout_ch, CALLOUT_MPSAFE); |
350 | callout_setfunc(&l->l_timeout_ch, sleepq_timeout, l); |
351 | cv_init(&l->l_sigcv, "sigwait" ); |
352 | cv_init(&l->l_waitcv, "vfork" ); |
353 | |
354 | kauth_cred_hold(proc0.p_cred); |
355 | l->l_cred = proc0.p_cred; |
356 | |
357 | kdtrace_thread_ctor(NULL, l); |
358 | lwp_initspecific(l); |
359 | |
360 | SYSCALL_TIME_LWP_INIT(l); |
361 | } |
362 | |
363 | static void |
364 | lwp_dtor(void *arg, void *obj) |
365 | { |
366 | lwp_t *l = obj; |
367 | uint64_t where; |
368 | (void)l; |
369 | |
370 | /* |
371 | * Provide a barrier to ensure that all mutex_oncpu() and rw_oncpu() |
372 | * calls will exit before memory of LWP is returned to the pool, where |
373 | * KVA of LWP structure might be freed and re-used for other purposes. |
374 | * Kernel preemption is disabled around mutex_oncpu() and rw_oncpu() |
375 | * callers, therefore cross-call to all CPUs will do the job. Also, |
376 | * the value of l->l_cpu must be still valid at this point. |
377 | */ |
378 | KASSERT(l->l_cpu != NULL); |
379 | where = xc_broadcast(0, (xcfunc_t)nullop, NULL, NULL); |
380 | xc_wait(where); |
381 | } |
382 | |
383 | /* |
384 | * Set an suspended. |
385 | * |
386 | * Must be called with p_lock held, and the LWP locked. Will unlock the |
387 | * LWP before return. |
388 | */ |
389 | int |
390 | lwp_suspend(struct lwp *curl, struct lwp *t) |
391 | { |
392 | int error; |
393 | |
394 | KASSERT(mutex_owned(t->l_proc->p_lock)); |
395 | KASSERT(lwp_locked(t, NULL)); |
396 | |
397 | KASSERT(curl != t || curl->l_stat == LSONPROC); |
398 | |
399 | /* |
400 | * If the current LWP has been told to exit, we must not suspend anyone |
401 | * else or deadlock could occur. We won't return to userspace. |
402 | */ |
403 | if ((curl->l_flag & (LW_WEXIT | LW_WCORE)) != 0) { |
404 | lwp_unlock(t); |
405 | return (EDEADLK); |
406 | } |
407 | |
408 | error = 0; |
409 | |
410 | switch (t->l_stat) { |
411 | case LSRUN: |
412 | case LSONPROC: |
413 | t->l_flag |= LW_WSUSPEND; |
414 | lwp_need_userret(t); |
415 | lwp_unlock(t); |
416 | break; |
417 | |
418 | case LSSLEEP: |
419 | t->l_flag |= LW_WSUSPEND; |
420 | |
421 | /* |
422 | * Kick the LWP and try to get it to the kernel boundary |
423 | * so that it will release any locks that it holds. |
424 | * setrunnable() will release the lock. |
425 | */ |
426 | if ((t->l_flag & LW_SINTR) != 0) |
427 | setrunnable(t); |
428 | else |
429 | lwp_unlock(t); |
430 | break; |
431 | |
432 | case LSSUSPENDED: |
433 | lwp_unlock(t); |
434 | break; |
435 | |
436 | case LSSTOP: |
437 | t->l_flag |= LW_WSUSPEND; |
438 | setrunnable(t); |
439 | break; |
440 | |
441 | case LSIDL: |
442 | case LSZOMB: |
443 | error = EINTR; /* It's what Solaris does..... */ |
444 | lwp_unlock(t); |
445 | break; |
446 | } |
447 | |
448 | return (error); |
449 | } |
450 | |
451 | /* |
452 | * Restart a suspended LWP. |
453 | * |
454 | * Must be called with p_lock held, and the LWP locked. Will unlock the |
455 | * LWP before return. |
456 | */ |
457 | void |
458 | lwp_continue(struct lwp *l) |
459 | { |
460 | |
461 | KASSERT(mutex_owned(l->l_proc->p_lock)); |
462 | KASSERT(lwp_locked(l, NULL)); |
463 | |
464 | /* If rebooting or not suspended, then just bail out. */ |
465 | if ((l->l_flag & LW_WREBOOT) != 0) { |
466 | lwp_unlock(l); |
467 | return; |
468 | } |
469 | |
470 | l->l_flag &= ~LW_WSUSPEND; |
471 | |
472 | if (l->l_stat != LSSUSPENDED) { |
473 | lwp_unlock(l); |
474 | return; |
475 | } |
476 | |
477 | /* setrunnable() will release the lock. */ |
478 | setrunnable(l); |
479 | } |
480 | |
481 | /* |
482 | * Restart a stopped LWP. |
483 | * |
484 | * Must be called with p_lock held, and the LWP NOT locked. Will unlock the |
485 | * LWP before return. |
486 | */ |
487 | void |
488 | lwp_unstop(struct lwp *l) |
489 | { |
490 | struct proc *p = l->l_proc; |
491 | |
492 | KASSERT(mutex_owned(proc_lock)); |
493 | KASSERT(mutex_owned(p->p_lock)); |
494 | |
495 | lwp_lock(l); |
496 | |
497 | /* If not stopped, then just bail out. */ |
498 | if (l->l_stat != LSSTOP) { |
499 | lwp_unlock(l); |
500 | return; |
501 | } |
502 | |
503 | p->p_stat = SACTIVE; |
504 | p->p_sflag &= ~PS_STOPPING; |
505 | |
506 | if (!p->p_waited) |
507 | p->p_pptr->p_nstopchild--; |
508 | |
509 | if (l->l_wchan == NULL) { |
510 | /* setrunnable() will release the lock. */ |
511 | setrunnable(l); |
512 | } else if (p->p_xsig && (l->l_flag & LW_SINTR) != 0) { |
513 | /* setrunnable() so we can receive the signal */ |
514 | setrunnable(l); |
515 | } else { |
516 | l->l_stat = LSSLEEP; |
517 | p->p_nrlwps++; |
518 | lwp_unlock(l); |
519 | } |
520 | } |
521 | |
522 | /* |
523 | * Wait for an LWP within the current process to exit. If 'lid' is |
524 | * non-zero, we are waiting for a specific LWP. |
525 | * |
526 | * Must be called with p->p_lock held. |
527 | */ |
528 | int |
529 | lwp_wait(struct lwp *l, lwpid_t lid, lwpid_t *departed, bool exiting) |
530 | { |
531 | const lwpid_t curlid = l->l_lid; |
532 | proc_t *p = l->l_proc; |
533 | lwp_t *l2; |
534 | int error; |
535 | |
536 | KASSERT(mutex_owned(p->p_lock)); |
537 | |
538 | p->p_nlwpwait++; |
539 | l->l_waitingfor = lid; |
540 | |
541 | for (;;) { |
542 | int nfound; |
543 | |
544 | /* |
545 | * Avoid a race between exit1() and sigexit(): if the |
546 | * process is dumping core, then we need to bail out: call |
547 | * into lwp_userret() where we will be suspended until the |
548 | * deed is done. |
549 | */ |
550 | if ((p->p_sflag & PS_WCORE) != 0) { |
551 | mutex_exit(p->p_lock); |
552 | lwp_userret(l); |
553 | KASSERT(false); |
554 | } |
555 | |
556 | /* |
557 | * First off, drain any detached LWP that is waiting to be |
558 | * reaped. |
559 | */ |
560 | while ((l2 = p->p_zomblwp) != NULL) { |
561 | p->p_zomblwp = NULL; |
562 | lwp_free(l2, false, false);/* releases proc mutex */ |
563 | mutex_enter(p->p_lock); |
564 | } |
565 | |
566 | /* |
567 | * Now look for an LWP to collect. If the whole process is |
568 | * exiting, count detached LWPs as eligible to be collected, |
569 | * but don't drain them here. |
570 | */ |
571 | nfound = 0; |
572 | error = 0; |
573 | LIST_FOREACH(l2, &p->p_lwps, l_sibling) { |
574 | /* |
575 | * If a specific wait and the target is waiting on |
576 | * us, then avoid deadlock. This also traps LWPs |
577 | * that try to wait on themselves. |
578 | * |
579 | * Note that this does not handle more complicated |
580 | * cycles, like: t1 -> t2 -> t3 -> t1. The process |
581 | * can still be killed so it is not a major problem. |
582 | */ |
583 | if (l2->l_lid == lid && l2->l_waitingfor == curlid) { |
584 | error = EDEADLK; |
585 | break; |
586 | } |
587 | if (l2 == l) |
588 | continue; |
589 | if ((l2->l_prflag & LPR_DETACHED) != 0) { |
590 | nfound += exiting; |
591 | continue; |
592 | } |
593 | if (lid != 0) { |
594 | if (l2->l_lid != lid) |
595 | continue; |
596 | /* |
597 | * Mark this LWP as the first waiter, if there |
598 | * is no other. |
599 | */ |
600 | if (l2->l_waiter == 0) |
601 | l2->l_waiter = curlid; |
602 | } else if (l2->l_waiter != 0) { |
603 | /* |
604 | * It already has a waiter - so don't |
605 | * collect it. If the waiter doesn't |
606 | * grab it we'll get another chance |
607 | * later. |
608 | */ |
609 | nfound++; |
610 | continue; |
611 | } |
612 | nfound++; |
613 | |
614 | /* No need to lock the LWP in order to see LSZOMB. */ |
615 | if (l2->l_stat != LSZOMB) |
616 | continue; |
617 | |
618 | /* |
619 | * We're no longer waiting. Reset the "first waiter" |
620 | * pointer on the target, in case it was us. |
621 | */ |
622 | l->l_waitingfor = 0; |
623 | l2->l_waiter = 0; |
624 | p->p_nlwpwait--; |
625 | if (departed) |
626 | *departed = l2->l_lid; |
627 | sched_lwp_collect(l2); |
628 | |
629 | /* lwp_free() releases the proc lock. */ |
630 | lwp_free(l2, false, false); |
631 | mutex_enter(p->p_lock); |
632 | return 0; |
633 | } |
634 | |
635 | if (error != 0) |
636 | break; |
637 | if (nfound == 0) { |
638 | error = ESRCH; |
639 | break; |
640 | } |
641 | |
642 | /* |
643 | * Note: since the lock will be dropped, need to restart on |
644 | * wakeup to run all LWPs again, e.g. there may be new LWPs. |
645 | */ |
646 | if (exiting) { |
647 | KASSERT(p->p_nlwps > 1); |
648 | cv_wait(&p->p_lwpcv, p->p_lock); |
649 | error = EAGAIN; |
650 | break; |
651 | } |
652 | |
653 | /* |
654 | * If all other LWPs are waiting for exits or suspends |
655 | * and the supply of zombies and potential zombies is |
656 | * exhausted, then we are about to deadlock. |
657 | * |
658 | * If the process is exiting (and this LWP is not the one |
659 | * that is coordinating the exit) then bail out now. |
660 | */ |
661 | if ((p->p_sflag & PS_WEXIT) != 0 || |
662 | p->p_nrlwps + p->p_nzlwps - p->p_ndlwps <= p->p_nlwpwait) { |
663 | error = EDEADLK; |
664 | break; |
665 | } |
666 | |
667 | /* |
668 | * Sit around and wait for something to happen. We'll be |
669 | * awoken if any of the conditions examined change: if an |
670 | * LWP exits, is collected, or is detached. |
671 | */ |
672 | if ((error = cv_wait_sig(&p->p_lwpcv, p->p_lock)) != 0) |
673 | break; |
674 | } |
675 | |
676 | /* |
677 | * We didn't find any LWPs to collect, we may have received a |
678 | * signal, or some other condition has caused us to bail out. |
679 | * |
680 | * If waiting on a specific LWP, clear the waiters marker: some |
681 | * other LWP may want it. Then, kick all the remaining waiters |
682 | * so that they can re-check for zombies and for deadlock. |
683 | */ |
684 | if (lid != 0) { |
685 | LIST_FOREACH(l2, &p->p_lwps, l_sibling) { |
686 | if (l2->l_lid == lid) { |
687 | if (l2->l_waiter == curlid) |
688 | l2->l_waiter = 0; |
689 | break; |
690 | } |
691 | } |
692 | } |
693 | p->p_nlwpwait--; |
694 | l->l_waitingfor = 0; |
695 | cv_broadcast(&p->p_lwpcv); |
696 | |
697 | return error; |
698 | } |
699 | |
700 | static lwpid_t |
701 | lwp_find_free_lid(lwpid_t try_lid, lwp_t * new_lwp, proc_t *p) |
702 | { |
703 | #define LID_SCAN (1u << 31) |
704 | lwp_t *scan, *free_before; |
705 | lwpid_t nxt_lid; |
706 | |
707 | /* |
708 | * We want the first unused lid greater than or equal to |
709 | * try_lid (modulo 2^31). |
710 | * (If nothing else ld.elf_so doesn't want lwpid with the top bit set.) |
711 | * We must not return 0, and avoiding 'LID_SCAN - 1' makes |
712 | * the outer test easier. |
713 | * This would be much easier if the list were sorted in |
714 | * increasing order. |
715 | * The list is kept sorted in decreasing order. |
716 | * This code is only used after a process has generated 2^31 lwp. |
717 | * |
718 | * Code assumes it can always find an id. |
719 | */ |
720 | |
721 | try_lid &= LID_SCAN - 1; |
722 | if (try_lid <= 1) |
723 | try_lid = 2; |
724 | |
725 | free_before = NULL; |
726 | nxt_lid = LID_SCAN - 1; |
727 | LIST_FOREACH(scan, &p->p_lwps, l_sibling) { |
728 | if (scan->l_lid != nxt_lid) { |
729 | /* There are available lid before this entry */ |
730 | free_before = scan; |
731 | if (try_lid > scan->l_lid) |
732 | break; |
733 | } |
734 | if (try_lid == scan->l_lid) { |
735 | /* The ideal lid is busy, take a higher one */ |
736 | if (free_before != NULL) { |
737 | try_lid = free_before->l_lid + 1; |
738 | break; |
739 | } |
740 | /* No higher ones, reuse low numbers */ |
741 | try_lid = 2; |
742 | } |
743 | |
744 | nxt_lid = scan->l_lid - 1; |
745 | if (LIST_NEXT(scan, l_sibling) == NULL) { |
746 | /* The value we have is lower than any existing lwp */ |
747 | LIST_INSERT_AFTER(scan, new_lwp, l_sibling); |
748 | return try_lid; |
749 | } |
750 | } |
751 | |
752 | LIST_INSERT_BEFORE(free_before, new_lwp, l_sibling); |
753 | return try_lid; |
754 | } |
755 | |
756 | /* |
757 | * Create a new LWP within process 'p2', using LWP 'l1' as a template. |
758 | * The new LWP is created in state LSIDL and must be set running, |
759 | * suspended, or stopped by the caller. |
760 | */ |
761 | int |
762 | lwp_create(lwp_t *l1, proc_t *p2, vaddr_t uaddr, int flags, |
763 | void *stack, size_t stacksize, void (*func)(void *), void *arg, |
764 | lwp_t **rnewlwpp, int sclass) |
765 | { |
766 | struct lwp *l2, *isfree; |
767 | turnstile_t *ts; |
768 | lwpid_t lid; |
769 | |
770 | KASSERT(l1 == curlwp || l1->l_proc == &proc0); |
771 | |
772 | /* |
773 | * Enforce limits, excluding the first lwp and kthreads. |
774 | */ |
775 | if (p2->p_nlwps != 0 && p2 != &proc0) { |
776 | uid_t uid = kauth_cred_getuid(l1->l_cred); |
777 | int count = chglwpcnt(uid, 1); |
778 | if (__predict_false(count > |
779 | p2->p_rlimit[RLIMIT_NTHR].rlim_cur)) { |
780 | if (kauth_authorize_process(l1->l_cred, |
781 | KAUTH_PROCESS_RLIMIT, p2, |
782 | KAUTH_ARG(KAUTH_REQ_PROCESS_RLIMIT_BYPASS), |
783 | &p2->p_rlimit[RLIMIT_NTHR], KAUTH_ARG(RLIMIT_NTHR)) |
784 | != 0) { |
785 | (void)chglwpcnt(uid, -1); |
786 | return EAGAIN; |
787 | } |
788 | } |
789 | } |
790 | |
791 | /* |
792 | * First off, reap any detached LWP waiting to be collected. |
793 | * We can re-use its LWP structure and turnstile. |
794 | */ |
795 | isfree = NULL; |
796 | if (p2->p_zomblwp != NULL) { |
797 | mutex_enter(p2->p_lock); |
798 | if ((isfree = p2->p_zomblwp) != NULL) { |
799 | p2->p_zomblwp = NULL; |
800 | lwp_free(isfree, true, false);/* releases proc mutex */ |
801 | } else |
802 | mutex_exit(p2->p_lock); |
803 | } |
804 | if (isfree == NULL) { |
805 | l2 = pool_cache_get(lwp_cache, PR_WAITOK); |
806 | memset(l2, 0, sizeof(*l2)); |
807 | l2->l_ts = pool_cache_get(turnstile_cache, PR_WAITOK); |
808 | SLIST_INIT(&l2->l_pi_lenders); |
809 | } else { |
810 | l2 = isfree; |
811 | ts = l2->l_ts; |
812 | KASSERT(l2->l_inheritedprio == -1); |
813 | KASSERT(SLIST_EMPTY(&l2->l_pi_lenders)); |
814 | memset(l2, 0, sizeof(*l2)); |
815 | l2->l_ts = ts; |
816 | } |
817 | |
818 | l2->l_stat = LSIDL; |
819 | l2->l_proc = p2; |
820 | l2->l_refcnt = 1; |
821 | l2->l_class = sclass; |
822 | |
823 | /* |
824 | * If vfork(), we want the LWP to run fast and on the same CPU |
825 | * as its parent, so that it can reuse the VM context and cache |
826 | * footprint on the local CPU. |
827 | */ |
828 | l2->l_kpriority = ((flags & LWP_VFORK) ? true : false); |
829 | l2->l_kpribase = PRI_KERNEL; |
830 | l2->l_priority = l1->l_priority; |
831 | l2->l_inheritedprio = -1; |
832 | l2->l_protectprio = -1; |
833 | l2->l_auxprio = -1; |
834 | l2->l_flag = 0; |
835 | l2->l_pflag = LP_MPSAFE; |
836 | TAILQ_INIT(&l2->l_ld_locks); |
837 | |
838 | /* |
839 | * For vfork, borrow parent's lwpctl context if it exists. |
840 | * This also causes us to return via lwp_userret. |
841 | */ |
842 | if (flags & LWP_VFORK && l1->l_lwpctl) { |
843 | l2->l_lwpctl = l1->l_lwpctl; |
844 | l2->l_flag |= LW_LWPCTL; |
845 | } |
846 | |
847 | /* |
848 | * If not the first LWP in the process, grab a reference to the |
849 | * descriptor table. |
850 | */ |
851 | l2->l_fd = p2->p_fd; |
852 | if (p2->p_nlwps != 0) { |
853 | KASSERT(l1->l_proc == p2); |
854 | fd_hold(l2); |
855 | } else { |
856 | KASSERT(l1->l_proc != p2); |
857 | } |
858 | |
859 | if (p2->p_flag & PK_SYSTEM) { |
860 | /* Mark it as a system LWP. */ |
861 | l2->l_flag |= LW_SYSTEM; |
862 | } |
863 | |
864 | kpreempt_disable(); |
865 | l2->l_mutex = l1->l_cpu->ci_schedstate.spc_mutex; |
866 | l2->l_cpu = l1->l_cpu; |
867 | kpreempt_enable(); |
868 | |
869 | kdtrace_thread_ctor(NULL, l2); |
870 | lwp_initspecific(l2); |
871 | sched_lwp_fork(l1, l2); |
872 | lwp_update_creds(l2); |
873 | callout_init(&l2->l_timeout_ch, CALLOUT_MPSAFE); |
874 | callout_setfunc(&l2->l_timeout_ch, sleepq_timeout, l2); |
875 | cv_init(&l2->l_sigcv, "sigwait" ); |
876 | cv_init(&l2->l_waitcv, "vfork" ); |
877 | l2->l_syncobj = &sched_syncobj; |
878 | |
879 | if (rnewlwpp != NULL) |
880 | *rnewlwpp = l2; |
881 | |
882 | /* |
883 | * PCU state needs to be saved before calling uvm_lwp_fork() so that |
884 | * the MD cpu_lwp_fork() can copy the saved state to the new LWP. |
885 | */ |
886 | pcu_save_all(l1); |
887 | |
888 | uvm_lwp_setuarea(l2, uaddr); |
889 | uvm_lwp_fork(l1, l2, stack, stacksize, func, |
890 | (arg != NULL) ? arg : l2); |
891 | |
892 | if ((flags & LWP_PIDLID) != 0) { |
893 | lid = proc_alloc_pid(p2); |
894 | l2->l_pflag |= LP_PIDLID; |
895 | } else { |
896 | lid = 0; |
897 | } |
898 | |
899 | mutex_enter(p2->p_lock); |
900 | |
901 | if ((flags & LWP_DETACHED) != 0) { |
902 | l2->l_prflag = LPR_DETACHED; |
903 | p2->p_ndlwps++; |
904 | } else |
905 | l2->l_prflag = 0; |
906 | |
907 | l2->l_sigstk = l1->l_sigstk; |
908 | l2->l_sigmask = l1->l_sigmask; |
909 | TAILQ_INIT(&l2->l_sigpend.sp_info); |
910 | sigemptyset(&l2->l_sigpend.sp_set); |
911 | |
912 | if (__predict_true(lid == 0)) { |
913 | /* |
914 | * XXX: l_lid are expected to be unique (for a process) |
915 | * if LWP_PIDLID is sometimes set this won't be true. |
916 | * Once 2^31 threads have been allocated we have to |
917 | * scan to ensure we allocate a unique value. |
918 | */ |
919 | lid = ++p2->p_nlwpid; |
920 | if (__predict_false(lid & LID_SCAN)) { |
921 | lid = lwp_find_free_lid(lid, l2, p2); |
922 | p2->p_nlwpid = lid | LID_SCAN; |
923 | /* l2 as been inserted into p_lwps in order */ |
924 | goto skip_insert; |
925 | } |
926 | p2->p_nlwpid = lid; |
927 | } |
928 | LIST_INSERT_HEAD(&p2->p_lwps, l2, l_sibling); |
929 | skip_insert: |
930 | l2->l_lid = lid; |
931 | p2->p_nlwps++; |
932 | p2->p_nrlwps++; |
933 | |
934 | KASSERT(l2->l_affinity == NULL); |
935 | |
936 | if ((p2->p_flag & PK_SYSTEM) == 0) { |
937 | /* Inherit the affinity mask. */ |
938 | if (l1->l_affinity) { |
939 | /* |
940 | * Note that we hold the state lock while inheriting |
941 | * the affinity to avoid race with sched_setaffinity(). |
942 | */ |
943 | lwp_lock(l1); |
944 | if (l1->l_affinity) { |
945 | kcpuset_use(l1->l_affinity); |
946 | l2->l_affinity = l1->l_affinity; |
947 | } |
948 | lwp_unlock(l1); |
949 | } |
950 | lwp_lock(l2); |
951 | /* Inherit a processor-set */ |
952 | l2->l_psid = l1->l_psid; |
953 | /* Look for a CPU to start */ |
954 | l2->l_cpu = sched_takecpu(l2); |
955 | lwp_unlock_to(l2, l2->l_cpu->ci_schedstate.spc_mutex); |
956 | } |
957 | mutex_exit(p2->p_lock); |
958 | |
959 | SDT_PROBE(proc, kernel, , lwp__create, l2, 0, 0, 0, 0); |
960 | |
961 | mutex_enter(proc_lock); |
962 | LIST_INSERT_HEAD(&alllwp, l2, l_list); |
963 | mutex_exit(proc_lock); |
964 | |
965 | SYSCALL_TIME_LWP_INIT(l2); |
966 | |
967 | if (p2->p_emul->e_lwp_fork) |
968 | (*p2->p_emul->e_lwp_fork)(l1, l2); |
969 | |
970 | return (0); |
971 | } |
972 | |
973 | /* |
974 | * Called by MD code when a new LWP begins execution. Must be called |
975 | * with the previous LWP locked (so at splsched), or if there is no |
976 | * previous LWP, at splsched. |
977 | */ |
978 | void |
979 | lwp_startup(struct lwp *prev, struct lwp *new_lwp) |
980 | { |
981 | KASSERTMSG(new_lwp == curlwp, "l %p curlwp %p prevlwp %p" , new_lwp, curlwp, prev); |
982 | |
983 | SDT_PROBE(proc, kernel, , lwp__start, new_lwp, 0, 0, 0, 0); |
984 | |
985 | KASSERT(kpreempt_disabled()); |
986 | if (prev != NULL) { |
987 | /* |
988 | * Normalize the count of the spin-mutexes, it was |
989 | * increased in mi_switch(). Unmark the state of |
990 | * context switch - it is finished for previous LWP. |
991 | */ |
992 | curcpu()->ci_mtx_count++; |
993 | membar_exit(); |
994 | prev->l_ctxswtch = 0; |
995 | } |
996 | KPREEMPT_DISABLE(new_lwp); |
997 | if (__predict_true(new_lwp->l_proc->p_vmspace)) |
998 | pmap_activate(new_lwp); |
999 | spl0(); |
1000 | |
1001 | /* Note trip through cpu_switchto(). */ |
1002 | pserialize_switchpoint(); |
1003 | |
1004 | LOCKDEBUG_BARRIER(NULL, 0); |
1005 | KPREEMPT_ENABLE(new_lwp); |
1006 | if ((new_lwp->l_pflag & LP_MPSAFE) == 0) { |
1007 | KERNEL_LOCK(1, new_lwp); |
1008 | } |
1009 | } |
1010 | |
1011 | /* |
1012 | * Exit an LWP. |
1013 | */ |
1014 | void |
1015 | lwp_exit(struct lwp *l) |
1016 | { |
1017 | struct proc *p = l->l_proc; |
1018 | struct lwp *l2; |
1019 | bool current; |
1020 | |
1021 | current = (l == curlwp); |
1022 | |
1023 | KASSERT(current || (l->l_stat == LSIDL && l->l_target_cpu == NULL)); |
1024 | KASSERT(p == curproc); |
1025 | |
1026 | SDT_PROBE(proc, kernel, , lwp__exit, l, 0, 0, 0, 0); |
1027 | |
1028 | /* |
1029 | * Verify that we hold no locks other than the kernel lock. |
1030 | */ |
1031 | LOCKDEBUG_BARRIER(&kernel_lock, 0); |
1032 | |
1033 | /* |
1034 | * If we are the last live LWP in a process, we need to exit the |
1035 | * entire process. We do so with an exit status of zero, because |
1036 | * it's a "controlled" exit, and because that's what Solaris does. |
1037 | * |
1038 | * We are not quite a zombie yet, but for accounting purposes we |
1039 | * must increment the count of zombies here. |
1040 | * |
1041 | * Note: the last LWP's specificdata will be deleted here. |
1042 | */ |
1043 | mutex_enter(p->p_lock); |
1044 | if (p->p_nlwps - p->p_nzlwps == 1) { |
1045 | KASSERT(current == true); |
1046 | KASSERT(p != &proc0); |
1047 | /* XXXSMP kernel_lock not held */ |
1048 | exit1(l, 0, 0); |
1049 | /* NOTREACHED */ |
1050 | } |
1051 | p->p_nzlwps++; |
1052 | mutex_exit(p->p_lock); |
1053 | |
1054 | if (p->p_emul->e_lwp_exit) |
1055 | (*p->p_emul->e_lwp_exit)(l); |
1056 | |
1057 | /* Drop filedesc reference. */ |
1058 | fd_free(); |
1059 | |
1060 | /* Delete the specificdata while it's still safe to sleep. */ |
1061 | lwp_finispecific(l); |
1062 | |
1063 | /* |
1064 | * Release our cached credentials. |
1065 | */ |
1066 | kauth_cred_free(l->l_cred); |
1067 | callout_destroy(&l->l_timeout_ch); |
1068 | |
1069 | /* |
1070 | * Remove the LWP from the global list. |
1071 | * Free its LID from the PID namespace if needed. |
1072 | */ |
1073 | mutex_enter(proc_lock); |
1074 | LIST_REMOVE(l, l_list); |
1075 | if ((l->l_pflag & LP_PIDLID) != 0 && l->l_lid != p->p_pid) { |
1076 | proc_free_pid(l->l_lid); |
1077 | } |
1078 | mutex_exit(proc_lock); |
1079 | |
1080 | /* |
1081 | * Get rid of all references to the LWP that others (e.g. procfs) |
1082 | * may have, and mark the LWP as a zombie. If the LWP is detached, |
1083 | * mark it waiting for collection in the proc structure. Note that |
1084 | * before we can do that, we need to free any other dead, deatched |
1085 | * LWP waiting to meet its maker. |
1086 | */ |
1087 | mutex_enter(p->p_lock); |
1088 | lwp_drainrefs(l); |
1089 | |
1090 | if ((l->l_prflag & LPR_DETACHED) != 0) { |
1091 | while ((l2 = p->p_zomblwp) != NULL) { |
1092 | p->p_zomblwp = NULL; |
1093 | lwp_free(l2, false, false);/* releases proc mutex */ |
1094 | mutex_enter(p->p_lock); |
1095 | l->l_refcnt++; |
1096 | lwp_drainrefs(l); |
1097 | } |
1098 | p->p_zomblwp = l; |
1099 | } |
1100 | |
1101 | /* |
1102 | * If we find a pending signal for the process and we have been |
1103 | * asked to check for signals, then we lose: arrange to have |
1104 | * all other LWPs in the process check for signals. |
1105 | */ |
1106 | if ((l->l_flag & LW_PENDSIG) != 0 && |
1107 | firstsig(&p->p_sigpend.sp_set) != 0) { |
1108 | LIST_FOREACH(l2, &p->p_lwps, l_sibling) { |
1109 | lwp_lock(l2); |
1110 | l2->l_flag |= LW_PENDSIG; |
1111 | lwp_unlock(l2); |
1112 | } |
1113 | } |
1114 | |
1115 | /* |
1116 | * Release any PCU resources before becoming a zombie. |
1117 | */ |
1118 | pcu_discard_all(l); |
1119 | |
1120 | lwp_lock(l); |
1121 | l->l_stat = LSZOMB; |
1122 | if (l->l_name != NULL) { |
1123 | strcpy(l->l_name, "(zombie)" ); |
1124 | } |
1125 | lwp_unlock(l); |
1126 | p->p_nrlwps--; |
1127 | cv_broadcast(&p->p_lwpcv); |
1128 | if (l->l_lwpctl != NULL) |
1129 | l->l_lwpctl->lc_curcpu = LWPCTL_CPU_EXITED; |
1130 | mutex_exit(p->p_lock); |
1131 | |
1132 | /* |
1133 | * We can no longer block. At this point, lwp_free() may already |
1134 | * be gunning for us. On a multi-CPU system, we may be off p_lwps. |
1135 | * |
1136 | * Free MD LWP resources. |
1137 | */ |
1138 | cpu_lwp_free(l, 0); |
1139 | |
1140 | if (current) { |
1141 | pmap_deactivate(l); |
1142 | |
1143 | /* |
1144 | * Release the kernel lock, and switch away into |
1145 | * oblivion. |
1146 | */ |
1147 | #ifdef notyet |
1148 | /* XXXSMP hold in lwp_userret() */ |
1149 | KERNEL_UNLOCK_LAST(l); |
1150 | #else |
1151 | KERNEL_UNLOCK_ALL(l, NULL); |
1152 | #endif |
1153 | lwp_exit_switchaway(l); |
1154 | } |
1155 | } |
1156 | |
1157 | /* |
1158 | * Free a dead LWP's remaining resources. |
1159 | * |
1160 | * XXXLWP limits. |
1161 | */ |
1162 | void |
1163 | lwp_free(struct lwp *l, bool recycle, bool last) |
1164 | { |
1165 | struct proc *p = l->l_proc; |
1166 | struct rusage *ru; |
1167 | ksiginfoq_t kq; |
1168 | |
1169 | KASSERT(l != curlwp); |
1170 | KASSERT(last || mutex_owned(p->p_lock)); |
1171 | |
1172 | /* |
1173 | * We use the process credentials instead of the lwp credentials here |
1174 | * because the lwp credentials maybe cached (just after a setuid call) |
1175 | * and we don't want pay for syncing, since the lwp is going away |
1176 | * anyway |
1177 | */ |
1178 | if (p != &proc0 && p->p_nlwps != 1) |
1179 | (void)chglwpcnt(kauth_cred_getuid(p->p_cred), -1); |
1180 | /* |
1181 | * If this was not the last LWP in the process, then adjust |
1182 | * counters and unlock. |
1183 | */ |
1184 | if (!last) { |
1185 | /* |
1186 | * Add the LWP's run time to the process' base value. |
1187 | * This needs to co-incide with coming off p_lwps. |
1188 | */ |
1189 | bintime_add(&p->p_rtime, &l->l_rtime); |
1190 | p->p_pctcpu += l->l_pctcpu; |
1191 | ru = &p->p_stats->p_ru; |
1192 | ruadd(ru, &l->l_ru); |
1193 | ru->ru_nvcsw += (l->l_ncsw - l->l_nivcsw); |
1194 | ru->ru_nivcsw += l->l_nivcsw; |
1195 | LIST_REMOVE(l, l_sibling); |
1196 | p->p_nlwps--; |
1197 | p->p_nzlwps--; |
1198 | if ((l->l_prflag & LPR_DETACHED) != 0) |
1199 | p->p_ndlwps--; |
1200 | |
1201 | /* |
1202 | * Have any LWPs sleeping in lwp_wait() recheck for |
1203 | * deadlock. |
1204 | */ |
1205 | cv_broadcast(&p->p_lwpcv); |
1206 | mutex_exit(p->p_lock); |
1207 | } |
1208 | |
1209 | #ifdef MULTIPROCESSOR |
1210 | /* |
1211 | * In the unlikely event that the LWP is still on the CPU, |
1212 | * then spin until it has switched away. We need to release |
1213 | * all locks to avoid deadlock against interrupt handlers on |
1214 | * the target CPU. |
1215 | */ |
1216 | if ((l->l_pflag & LP_RUNNING) != 0 || l->l_cpu->ci_curlwp == l) { |
1217 | int count; |
1218 | (void)count; /* XXXgcc */ |
1219 | KERNEL_UNLOCK_ALL(curlwp, &count); |
1220 | while ((l->l_pflag & LP_RUNNING) != 0 || |
1221 | l->l_cpu->ci_curlwp == l) |
1222 | SPINLOCK_BACKOFF_HOOK; |
1223 | KERNEL_LOCK(count, curlwp); |
1224 | } |
1225 | #endif |
1226 | |
1227 | /* |
1228 | * Destroy the LWP's remaining signal information. |
1229 | */ |
1230 | ksiginfo_queue_init(&kq); |
1231 | sigclear(&l->l_sigpend, NULL, &kq); |
1232 | ksiginfo_queue_drain(&kq); |
1233 | cv_destroy(&l->l_sigcv); |
1234 | cv_destroy(&l->l_waitcv); |
1235 | |
1236 | /* |
1237 | * Free lwpctl structure and affinity. |
1238 | */ |
1239 | if (l->l_lwpctl) { |
1240 | lwp_ctl_free(l); |
1241 | } |
1242 | if (l->l_affinity) { |
1243 | kcpuset_unuse(l->l_affinity, NULL); |
1244 | l->l_affinity = NULL; |
1245 | } |
1246 | |
1247 | /* |
1248 | * Free the LWP's turnstile and the LWP structure itself unless the |
1249 | * caller wants to recycle them. Also, free the scheduler specific |
1250 | * data. |
1251 | * |
1252 | * We can't return turnstile0 to the pool (it didn't come from it), |
1253 | * so if it comes up just drop it quietly and move on. |
1254 | * |
1255 | * We don't recycle the VM resources at this time. |
1256 | */ |
1257 | |
1258 | if (!recycle && l->l_ts != &turnstile0) |
1259 | pool_cache_put(turnstile_cache, l->l_ts); |
1260 | if (l->l_name != NULL) |
1261 | kmem_free(l->l_name, MAXCOMLEN); |
1262 | |
1263 | cpu_lwp_free2(l); |
1264 | uvm_lwp_exit(l); |
1265 | |
1266 | KASSERT(SLIST_EMPTY(&l->l_pi_lenders)); |
1267 | KASSERT(l->l_inheritedprio == -1); |
1268 | KASSERT(l->l_blcnt == 0); |
1269 | kdtrace_thread_dtor(NULL, l); |
1270 | if (!recycle) |
1271 | pool_cache_put(lwp_cache, l); |
1272 | } |
1273 | |
1274 | /* |
1275 | * Migrate the LWP to the another CPU. Unlocks the LWP. |
1276 | */ |
1277 | void |
1278 | lwp_migrate(lwp_t *l, struct cpu_info *tci) |
1279 | { |
1280 | struct schedstate_percpu *tspc; |
1281 | int lstat = l->l_stat; |
1282 | |
1283 | KASSERT(lwp_locked(l, NULL)); |
1284 | KASSERT(tci != NULL); |
1285 | |
1286 | /* If LWP is still on the CPU, it must be handled like LSONPROC */ |
1287 | if ((l->l_pflag & LP_RUNNING) != 0) { |
1288 | lstat = LSONPROC; |
1289 | } |
1290 | |
1291 | /* |
1292 | * The destination CPU could be changed while previous migration |
1293 | * was not finished. |
1294 | */ |
1295 | if (l->l_target_cpu != NULL) { |
1296 | l->l_target_cpu = tci; |
1297 | lwp_unlock(l); |
1298 | return; |
1299 | } |
1300 | |
1301 | /* Nothing to do if trying to migrate to the same CPU */ |
1302 | if (l->l_cpu == tci) { |
1303 | lwp_unlock(l); |
1304 | return; |
1305 | } |
1306 | |
1307 | KASSERT(l->l_target_cpu == NULL); |
1308 | tspc = &tci->ci_schedstate; |
1309 | switch (lstat) { |
1310 | case LSRUN: |
1311 | l->l_target_cpu = tci; |
1312 | break; |
1313 | case LSIDL: |
1314 | l->l_cpu = tci; |
1315 | lwp_unlock_to(l, tspc->spc_mutex); |
1316 | return; |
1317 | case LSSLEEP: |
1318 | l->l_cpu = tci; |
1319 | break; |
1320 | case LSSTOP: |
1321 | case LSSUSPENDED: |
1322 | l->l_cpu = tci; |
1323 | if (l->l_wchan == NULL) { |
1324 | lwp_unlock_to(l, tspc->spc_lwplock); |
1325 | return; |
1326 | } |
1327 | break; |
1328 | case LSONPROC: |
1329 | l->l_target_cpu = tci; |
1330 | spc_lock(l->l_cpu); |
1331 | cpu_need_resched(l->l_cpu, RESCHED_KPREEMPT); |
1332 | spc_unlock(l->l_cpu); |
1333 | break; |
1334 | } |
1335 | lwp_unlock(l); |
1336 | } |
1337 | |
1338 | /* |
1339 | * Find the LWP in the process. Arguments may be zero, in such case, |
1340 | * the calling process and first LWP in the list will be used. |
1341 | * On success - returns proc locked. |
1342 | */ |
1343 | struct lwp * |
1344 | lwp_find2(pid_t pid, lwpid_t lid) |
1345 | { |
1346 | proc_t *p; |
1347 | lwp_t *l; |
1348 | |
1349 | /* Find the process. */ |
1350 | if (pid != 0) { |
1351 | mutex_enter(proc_lock); |
1352 | p = proc_find(pid); |
1353 | if (p == NULL) { |
1354 | mutex_exit(proc_lock); |
1355 | return NULL; |
1356 | } |
1357 | mutex_enter(p->p_lock); |
1358 | mutex_exit(proc_lock); |
1359 | } else { |
1360 | p = curlwp->l_proc; |
1361 | mutex_enter(p->p_lock); |
1362 | } |
1363 | /* Find the thread. */ |
1364 | if (lid != 0) { |
1365 | l = lwp_find(p, lid); |
1366 | } else { |
1367 | l = LIST_FIRST(&p->p_lwps); |
1368 | } |
1369 | if (l == NULL) { |
1370 | mutex_exit(p->p_lock); |
1371 | } |
1372 | return l; |
1373 | } |
1374 | |
1375 | /* |
1376 | * Look up a live LWP within the specified process. |
1377 | * |
1378 | * Must be called with p->p_lock held. |
1379 | */ |
1380 | struct lwp * |
1381 | lwp_find(struct proc *p, lwpid_t id) |
1382 | { |
1383 | struct lwp *l; |
1384 | |
1385 | KASSERT(mutex_owned(p->p_lock)); |
1386 | |
1387 | LIST_FOREACH(l, &p->p_lwps, l_sibling) { |
1388 | if (l->l_lid == id) |
1389 | break; |
1390 | } |
1391 | |
1392 | /* |
1393 | * No need to lock - all of these conditions will |
1394 | * be visible with the process level mutex held. |
1395 | */ |
1396 | if (l != NULL && (l->l_stat == LSIDL || l->l_stat == LSZOMB)) |
1397 | l = NULL; |
1398 | |
1399 | return l; |
1400 | } |
1401 | |
1402 | /* |
1403 | * Update an LWP's cached credentials to mirror the process' master copy. |
1404 | * |
1405 | * This happens early in the syscall path, on user trap, and on LWP |
1406 | * creation. A long-running LWP can also voluntarily choose to update |
1407 | * its credentials by calling this routine. This may be called from |
1408 | * LWP_CACHE_CREDS(), which checks l->l_cred != p->p_cred beforehand. |
1409 | */ |
1410 | void |
1411 | lwp_update_creds(struct lwp *l) |
1412 | { |
1413 | kauth_cred_t oc; |
1414 | struct proc *p; |
1415 | |
1416 | p = l->l_proc; |
1417 | oc = l->l_cred; |
1418 | |
1419 | mutex_enter(p->p_lock); |
1420 | kauth_cred_hold(p->p_cred); |
1421 | l->l_cred = p->p_cred; |
1422 | l->l_prflag &= ~LPR_CRMOD; |
1423 | mutex_exit(p->p_lock); |
1424 | if (oc != NULL) |
1425 | kauth_cred_free(oc); |
1426 | } |
1427 | |
1428 | /* |
1429 | * Verify that an LWP is locked, and optionally verify that the lock matches |
1430 | * one we specify. |
1431 | */ |
1432 | int |
1433 | lwp_locked(struct lwp *l, kmutex_t *mtx) |
1434 | { |
1435 | kmutex_t *cur = l->l_mutex; |
1436 | |
1437 | return mutex_owned(cur) && (mtx == cur || mtx == NULL); |
1438 | } |
1439 | |
1440 | /* |
1441 | * Lend a new mutex to an LWP. The old mutex must be held. |
1442 | */ |
1443 | void |
1444 | lwp_setlock(struct lwp *l, kmutex_t *mtx) |
1445 | { |
1446 | |
1447 | KASSERT(mutex_owned(l->l_mutex)); |
1448 | |
1449 | membar_exit(); |
1450 | l->l_mutex = mtx; |
1451 | } |
1452 | |
1453 | /* |
1454 | * Lend a new mutex to an LWP, and release the old mutex. The old mutex |
1455 | * must be held. |
1456 | */ |
1457 | void |
1458 | lwp_unlock_to(struct lwp *l, kmutex_t *mtx) |
1459 | { |
1460 | kmutex_t *old; |
1461 | |
1462 | KASSERT(lwp_locked(l, NULL)); |
1463 | |
1464 | old = l->l_mutex; |
1465 | membar_exit(); |
1466 | l->l_mutex = mtx; |
1467 | mutex_spin_exit(old); |
1468 | } |
1469 | |
1470 | int |
1471 | lwp_trylock(struct lwp *l) |
1472 | { |
1473 | kmutex_t *old; |
1474 | |
1475 | for (;;) { |
1476 | if (!mutex_tryenter(old = l->l_mutex)) |
1477 | return 0; |
1478 | if (__predict_true(l->l_mutex == old)) |
1479 | return 1; |
1480 | mutex_spin_exit(old); |
1481 | } |
1482 | } |
1483 | |
1484 | void |
1485 | lwp_unsleep(lwp_t *l, bool cleanup) |
1486 | { |
1487 | |
1488 | KASSERT(mutex_owned(l->l_mutex)); |
1489 | (*l->l_syncobj->sobj_unsleep)(l, cleanup); |
1490 | } |
1491 | |
1492 | /* |
1493 | * Handle exceptions for mi_userret(). Called if a member of LW_USERRET is |
1494 | * set. |
1495 | */ |
1496 | void |
1497 | lwp_userret(struct lwp *l) |
1498 | { |
1499 | struct proc *p; |
1500 | int sig; |
1501 | |
1502 | KASSERT(l == curlwp); |
1503 | KASSERT(l->l_stat == LSONPROC); |
1504 | p = l->l_proc; |
1505 | |
1506 | #ifndef __HAVE_FAST_SOFTINTS |
1507 | /* Run pending soft interrupts. */ |
1508 | if (l->l_cpu->ci_data.cpu_softints != 0) |
1509 | softint_overlay(); |
1510 | #endif |
1511 | |
1512 | /* |
1513 | * It is safe to do this read unlocked on a MP system.. |
1514 | */ |
1515 | while ((l->l_flag & LW_USERRET) != 0) { |
1516 | /* |
1517 | * Process pending signals first, unless the process |
1518 | * is dumping core or exiting, where we will instead |
1519 | * enter the LW_WSUSPEND case below. |
1520 | */ |
1521 | if ((l->l_flag & (LW_PENDSIG | LW_WCORE | LW_WEXIT)) == |
1522 | LW_PENDSIG) { |
1523 | mutex_enter(p->p_lock); |
1524 | while ((sig = issignal(l)) != 0) |
1525 | postsig(sig); |
1526 | mutex_exit(p->p_lock); |
1527 | } |
1528 | |
1529 | /* |
1530 | * Core-dump or suspend pending. |
1531 | * |
1532 | * In case of core dump, suspend ourselves, so that the kernel |
1533 | * stack and therefore the userland registers saved in the |
1534 | * trapframe are around for coredump() to write them out. |
1535 | * We also need to save any PCU resources that we have so that |
1536 | * they accessible for coredump(). We issue a wakeup on |
1537 | * p->p_lwpcv so that sigexit() will write the core file out |
1538 | * once all other LWPs are suspended. |
1539 | */ |
1540 | if ((l->l_flag & LW_WSUSPEND) != 0) { |
1541 | pcu_save_all(l); |
1542 | mutex_enter(p->p_lock); |
1543 | p->p_nrlwps--; |
1544 | cv_broadcast(&p->p_lwpcv); |
1545 | lwp_lock(l); |
1546 | l->l_stat = LSSUSPENDED; |
1547 | lwp_unlock(l); |
1548 | mutex_exit(p->p_lock); |
1549 | lwp_lock(l); |
1550 | mi_switch(l); |
1551 | } |
1552 | |
1553 | /* Process is exiting. */ |
1554 | if ((l->l_flag & LW_WEXIT) != 0) { |
1555 | lwp_exit(l); |
1556 | KASSERT(0); |
1557 | /* NOTREACHED */ |
1558 | } |
1559 | |
1560 | /* update lwpctl processor (for vfork child_return) */ |
1561 | if (l->l_flag & LW_LWPCTL) { |
1562 | lwp_lock(l); |
1563 | KASSERT(kpreempt_disabled()); |
1564 | l->l_lwpctl->lc_curcpu = (int)cpu_index(l->l_cpu); |
1565 | l->l_lwpctl->lc_pctr++; |
1566 | l->l_flag &= ~LW_LWPCTL; |
1567 | lwp_unlock(l); |
1568 | } |
1569 | } |
1570 | } |
1571 | |
1572 | /* |
1573 | * Force an LWP to enter the kernel, to take a trip through lwp_userret(). |
1574 | */ |
1575 | void |
1576 | lwp_need_userret(struct lwp *l) |
1577 | { |
1578 | KASSERT(lwp_locked(l, NULL)); |
1579 | |
1580 | /* |
1581 | * Since the tests in lwp_userret() are done unlocked, make sure |
1582 | * that the condition will be seen before forcing the LWP to enter |
1583 | * kernel mode. |
1584 | */ |
1585 | membar_producer(); |
1586 | cpu_signotify(l); |
1587 | } |
1588 | |
1589 | /* |
1590 | * Add one reference to an LWP. This will prevent the LWP from |
1591 | * exiting, thus keep the lwp structure and PCB around to inspect. |
1592 | */ |
1593 | void |
1594 | lwp_addref(struct lwp *l) |
1595 | { |
1596 | |
1597 | KASSERT(mutex_owned(l->l_proc->p_lock)); |
1598 | KASSERT(l->l_stat != LSZOMB); |
1599 | KASSERT(l->l_refcnt != 0); |
1600 | |
1601 | l->l_refcnt++; |
1602 | } |
1603 | |
1604 | /* |
1605 | * Remove one reference to an LWP. If this is the last reference, |
1606 | * then we must finalize the LWP's death. |
1607 | */ |
1608 | void |
1609 | lwp_delref(struct lwp *l) |
1610 | { |
1611 | struct proc *p = l->l_proc; |
1612 | |
1613 | mutex_enter(p->p_lock); |
1614 | lwp_delref2(l); |
1615 | mutex_exit(p->p_lock); |
1616 | } |
1617 | |
1618 | /* |
1619 | * Remove one reference to an LWP. If this is the last reference, |
1620 | * then we must finalize the LWP's death. The proc mutex is held |
1621 | * on entry. |
1622 | */ |
1623 | void |
1624 | lwp_delref2(struct lwp *l) |
1625 | { |
1626 | struct proc *p = l->l_proc; |
1627 | |
1628 | KASSERT(mutex_owned(p->p_lock)); |
1629 | KASSERT(l->l_stat != LSZOMB); |
1630 | KASSERT(l->l_refcnt > 0); |
1631 | if (--l->l_refcnt == 0) |
1632 | cv_broadcast(&p->p_lwpcv); |
1633 | } |
1634 | |
1635 | /* |
1636 | * Drain all references to the current LWP. |
1637 | */ |
1638 | void |
1639 | lwp_drainrefs(struct lwp *l) |
1640 | { |
1641 | struct proc *p = l->l_proc; |
1642 | |
1643 | KASSERT(mutex_owned(p->p_lock)); |
1644 | KASSERT(l->l_refcnt != 0); |
1645 | |
1646 | l->l_refcnt--; |
1647 | while (l->l_refcnt != 0) |
1648 | cv_wait(&p->p_lwpcv, p->p_lock); |
1649 | } |
1650 | |
1651 | /* |
1652 | * Return true if the specified LWP is 'alive'. Only p->p_lock need |
1653 | * be held. |
1654 | */ |
1655 | bool |
1656 | lwp_alive(lwp_t *l) |
1657 | { |
1658 | |
1659 | KASSERT(mutex_owned(l->l_proc->p_lock)); |
1660 | |
1661 | switch (l->l_stat) { |
1662 | case LSSLEEP: |
1663 | case LSRUN: |
1664 | case LSONPROC: |
1665 | case LSSTOP: |
1666 | case LSSUSPENDED: |
1667 | return true; |
1668 | default: |
1669 | return false; |
1670 | } |
1671 | } |
1672 | |
1673 | /* |
1674 | * Return first live LWP in the process. |
1675 | */ |
1676 | lwp_t * |
1677 | lwp_find_first(proc_t *p) |
1678 | { |
1679 | lwp_t *l; |
1680 | |
1681 | KASSERT(mutex_owned(p->p_lock)); |
1682 | |
1683 | LIST_FOREACH(l, &p->p_lwps, l_sibling) { |
1684 | if (lwp_alive(l)) { |
1685 | return l; |
1686 | } |
1687 | } |
1688 | |
1689 | return NULL; |
1690 | } |
1691 | |
1692 | /* |
1693 | * Allocate a new lwpctl structure for a user LWP. |
1694 | */ |
1695 | int |
1696 | lwp_ctl_alloc(vaddr_t *uaddr) |
1697 | { |
1698 | lcproc_t *lp; |
1699 | u_int bit, i, offset; |
1700 | struct uvm_object *uao; |
1701 | int error; |
1702 | lcpage_t *lcp; |
1703 | proc_t *p; |
1704 | lwp_t *l; |
1705 | |
1706 | l = curlwp; |
1707 | p = l->l_proc; |
1708 | |
1709 | /* don't allow a vforked process to create lwp ctls */ |
1710 | if (p->p_lflag & PL_PPWAIT) |
1711 | return EBUSY; |
1712 | |
1713 | if (l->l_lcpage != NULL) { |
1714 | lcp = l->l_lcpage; |
1715 | *uaddr = lcp->lcp_uaddr + (vaddr_t)l->l_lwpctl - lcp->lcp_kaddr; |
1716 | return 0; |
1717 | } |
1718 | |
1719 | /* First time around, allocate header structure for the process. */ |
1720 | if ((lp = p->p_lwpctl) == NULL) { |
1721 | lp = kmem_alloc(sizeof(*lp), KM_SLEEP); |
1722 | mutex_init(&lp->lp_lock, MUTEX_DEFAULT, IPL_NONE); |
1723 | lp->lp_uao = NULL; |
1724 | TAILQ_INIT(&lp->lp_pages); |
1725 | mutex_enter(p->p_lock); |
1726 | if (p->p_lwpctl == NULL) { |
1727 | p->p_lwpctl = lp; |
1728 | mutex_exit(p->p_lock); |
1729 | } else { |
1730 | mutex_exit(p->p_lock); |
1731 | mutex_destroy(&lp->lp_lock); |
1732 | kmem_free(lp, sizeof(*lp)); |
1733 | lp = p->p_lwpctl; |
1734 | } |
1735 | } |
1736 | |
1737 | /* |
1738 | * Set up an anonymous memory region to hold the shared pages. |
1739 | * Map them into the process' address space. The user vmspace |
1740 | * gets the first reference on the UAO. |
1741 | */ |
1742 | mutex_enter(&lp->lp_lock); |
1743 | if (lp->lp_uao == NULL) { |
1744 | lp->lp_uao = uao_create(LWPCTL_UAREA_SZ, 0); |
1745 | lp->lp_cur = 0; |
1746 | lp->lp_max = LWPCTL_UAREA_SZ; |
1747 | lp->lp_uva = p->p_emul->e_vm_default_addr(p, |
1748 | (vaddr_t)p->p_vmspace->vm_daddr, LWPCTL_UAREA_SZ, |
1749 | p->p_vmspace->vm_map.flags & VM_MAP_TOPDOWN); |
1750 | error = uvm_map(&p->p_vmspace->vm_map, &lp->lp_uva, |
1751 | LWPCTL_UAREA_SZ, lp->lp_uao, 0, 0, UVM_MAPFLAG(UVM_PROT_RW, |
1752 | UVM_PROT_RW, UVM_INH_NONE, UVM_ADV_NORMAL, 0)); |
1753 | if (error != 0) { |
1754 | uao_detach(lp->lp_uao); |
1755 | lp->lp_uao = NULL; |
1756 | mutex_exit(&lp->lp_lock); |
1757 | return error; |
1758 | } |
1759 | } |
1760 | |
1761 | /* Get a free block and allocate for this LWP. */ |
1762 | TAILQ_FOREACH(lcp, &lp->lp_pages, lcp_chain) { |
1763 | if (lcp->lcp_nfree != 0) |
1764 | break; |
1765 | } |
1766 | if (lcp == NULL) { |
1767 | /* Nothing available - try to set up a free page. */ |
1768 | if (lp->lp_cur == lp->lp_max) { |
1769 | mutex_exit(&lp->lp_lock); |
1770 | return ENOMEM; |
1771 | } |
1772 | lcp = kmem_alloc(LWPCTL_LCPAGE_SZ, KM_SLEEP); |
1773 | if (lcp == NULL) { |
1774 | mutex_exit(&lp->lp_lock); |
1775 | return ENOMEM; |
1776 | } |
1777 | /* |
1778 | * Wire the next page down in kernel space. Since this |
1779 | * is a new mapping, we must add a reference. |
1780 | */ |
1781 | uao = lp->lp_uao; |
1782 | (*uao->pgops->pgo_reference)(uao); |
1783 | lcp->lcp_kaddr = vm_map_min(kernel_map); |
1784 | error = uvm_map(kernel_map, &lcp->lcp_kaddr, PAGE_SIZE, |
1785 | uao, lp->lp_cur, PAGE_SIZE, |
1786 | UVM_MAPFLAG(UVM_PROT_RW, UVM_PROT_RW, |
1787 | UVM_INH_NONE, UVM_ADV_RANDOM, 0)); |
1788 | if (error != 0) { |
1789 | mutex_exit(&lp->lp_lock); |
1790 | kmem_free(lcp, LWPCTL_LCPAGE_SZ); |
1791 | (*uao->pgops->pgo_detach)(uao); |
1792 | return error; |
1793 | } |
1794 | error = uvm_map_pageable(kernel_map, lcp->lcp_kaddr, |
1795 | lcp->lcp_kaddr + PAGE_SIZE, FALSE, 0); |
1796 | if (error != 0) { |
1797 | mutex_exit(&lp->lp_lock); |
1798 | uvm_unmap(kernel_map, lcp->lcp_kaddr, |
1799 | lcp->lcp_kaddr + PAGE_SIZE); |
1800 | kmem_free(lcp, LWPCTL_LCPAGE_SZ); |
1801 | return error; |
1802 | } |
1803 | /* Prepare the page descriptor and link into the list. */ |
1804 | lcp->lcp_uaddr = lp->lp_uva + lp->lp_cur; |
1805 | lp->lp_cur += PAGE_SIZE; |
1806 | lcp->lcp_nfree = LWPCTL_PER_PAGE; |
1807 | lcp->lcp_rotor = 0; |
1808 | memset(lcp->lcp_bitmap, 0xff, LWPCTL_BITMAP_SZ); |
1809 | TAILQ_INSERT_HEAD(&lp->lp_pages, lcp, lcp_chain); |
1810 | } |
1811 | for (i = lcp->lcp_rotor; lcp->lcp_bitmap[i] == 0;) { |
1812 | if (++i >= LWPCTL_BITMAP_ENTRIES) |
1813 | i = 0; |
1814 | } |
1815 | bit = ffs(lcp->lcp_bitmap[i]) - 1; |
1816 | lcp->lcp_bitmap[i] ^= (1 << bit); |
1817 | lcp->lcp_rotor = i; |
1818 | lcp->lcp_nfree--; |
1819 | l->l_lcpage = lcp; |
1820 | offset = (i << 5) + bit; |
1821 | l->l_lwpctl = (lwpctl_t *)lcp->lcp_kaddr + offset; |
1822 | *uaddr = lcp->lcp_uaddr + offset * sizeof(lwpctl_t); |
1823 | mutex_exit(&lp->lp_lock); |
1824 | |
1825 | KPREEMPT_DISABLE(l); |
1826 | l->l_lwpctl->lc_curcpu = (int)curcpu()->ci_data.cpu_index; |
1827 | KPREEMPT_ENABLE(l); |
1828 | |
1829 | return 0; |
1830 | } |
1831 | |
1832 | /* |
1833 | * Free an lwpctl structure back to the per-process list. |
1834 | */ |
1835 | void |
1836 | lwp_ctl_free(lwp_t *l) |
1837 | { |
1838 | struct proc *p = l->l_proc; |
1839 | lcproc_t *lp; |
1840 | lcpage_t *lcp; |
1841 | u_int map, offset; |
1842 | |
1843 | /* don't free a lwp context we borrowed for vfork */ |
1844 | if (p->p_lflag & PL_PPWAIT) { |
1845 | l->l_lwpctl = NULL; |
1846 | return; |
1847 | } |
1848 | |
1849 | lp = p->p_lwpctl; |
1850 | KASSERT(lp != NULL); |
1851 | |
1852 | lcp = l->l_lcpage; |
1853 | offset = (u_int)((lwpctl_t *)l->l_lwpctl - (lwpctl_t *)lcp->lcp_kaddr); |
1854 | KASSERT(offset < LWPCTL_PER_PAGE); |
1855 | |
1856 | mutex_enter(&lp->lp_lock); |
1857 | lcp->lcp_nfree++; |
1858 | map = offset >> 5; |
1859 | lcp->lcp_bitmap[map] |= (1 << (offset & 31)); |
1860 | if (lcp->lcp_bitmap[lcp->lcp_rotor] == 0) |
1861 | lcp->lcp_rotor = map; |
1862 | if (TAILQ_FIRST(&lp->lp_pages)->lcp_nfree == 0) { |
1863 | TAILQ_REMOVE(&lp->lp_pages, lcp, lcp_chain); |
1864 | TAILQ_INSERT_HEAD(&lp->lp_pages, lcp, lcp_chain); |
1865 | } |
1866 | mutex_exit(&lp->lp_lock); |
1867 | } |
1868 | |
1869 | /* |
1870 | * Process is exiting; tear down lwpctl state. This can only be safely |
1871 | * called by the last LWP in the process. |
1872 | */ |
1873 | void |
1874 | lwp_ctl_exit(void) |
1875 | { |
1876 | lcpage_t *lcp, *next; |
1877 | lcproc_t *lp; |
1878 | proc_t *p; |
1879 | lwp_t *l; |
1880 | |
1881 | l = curlwp; |
1882 | l->l_lwpctl = NULL; |
1883 | l->l_lcpage = NULL; |
1884 | p = l->l_proc; |
1885 | lp = p->p_lwpctl; |
1886 | |
1887 | KASSERT(lp != NULL); |
1888 | KASSERT(p->p_nlwps == 1); |
1889 | |
1890 | for (lcp = TAILQ_FIRST(&lp->lp_pages); lcp != NULL; lcp = next) { |
1891 | next = TAILQ_NEXT(lcp, lcp_chain); |
1892 | uvm_unmap(kernel_map, lcp->lcp_kaddr, |
1893 | lcp->lcp_kaddr + PAGE_SIZE); |
1894 | kmem_free(lcp, LWPCTL_LCPAGE_SZ); |
1895 | } |
1896 | |
1897 | if (lp->lp_uao != NULL) { |
1898 | uvm_unmap(&p->p_vmspace->vm_map, lp->lp_uva, |
1899 | lp->lp_uva + LWPCTL_UAREA_SZ); |
1900 | } |
1901 | |
1902 | mutex_destroy(&lp->lp_lock); |
1903 | kmem_free(lp, sizeof(*lp)); |
1904 | p->p_lwpctl = NULL; |
1905 | } |
1906 | |
1907 | /* |
1908 | * Return the current LWP's "preemption counter". Used to detect |
1909 | * preemption across operations that can tolerate preemption without |
1910 | * crashing, but which may generate incorrect results if preempted. |
1911 | */ |
1912 | uint64_t |
1913 | lwp_pctr(void) |
1914 | { |
1915 | |
1916 | return curlwp->l_ncsw; |
1917 | } |
1918 | |
1919 | /* |
1920 | * Set an LWP's private data pointer. |
1921 | */ |
1922 | int |
1923 | lwp_setprivate(struct lwp *l, void *ptr) |
1924 | { |
1925 | int error = 0; |
1926 | |
1927 | l->l_private = ptr; |
1928 | #ifdef __HAVE_CPU_LWP_SETPRIVATE |
1929 | error = cpu_lwp_setprivate(l, ptr); |
1930 | #endif |
1931 | return error; |
1932 | } |
1933 | |
1934 | #if defined(DDB) |
1935 | #include <machine/pcb.h> |
1936 | |
1937 | void |
1938 | lwp_whatis(uintptr_t addr, void (*pr)(const char *, ...)) |
1939 | { |
1940 | lwp_t *l; |
1941 | |
1942 | LIST_FOREACH(l, &alllwp, l_list) { |
1943 | uintptr_t stack = (uintptr_t)KSTACK_LOWEST_ADDR(l); |
1944 | |
1945 | if (addr < stack || stack + KSTACK_SIZE <= addr) { |
1946 | continue; |
1947 | } |
1948 | (*pr)("%p is %p+%zu, LWP %p's stack\n" , |
1949 | (void *)addr, (void *)stack, |
1950 | (size_t)(addr - stack), l); |
1951 | } |
1952 | } |
1953 | #endif /* defined(DDB) */ |
1954 | |