1/* $NetBSD: sys_lwp.c,v 1.57 2015/07/24 13:02:52 maxv Exp $ */
2
3/*-
4 * Copyright (c) 2001, 2006, 2007, 2008 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Nathan J. Williams, and Andrew Doran.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32/*
33 * Lightweight process (LWP) system calls. See kern_lwp.c for a description
34 * of LWPs.
35 */
36
37#include <sys/cdefs.h>
38__KERNEL_RCSID(0, "$NetBSD: sys_lwp.c,v 1.57 2015/07/24 13:02:52 maxv Exp $");
39
40#include <sys/param.h>
41#include <sys/systm.h>
42#include <sys/pool.h>
43#include <sys/proc.h>
44#include <sys/types.h>
45#include <sys/syscallargs.h>
46#include <sys/kauth.h>
47#include <sys/kmem.h>
48#include <sys/sleepq.h>
49#include <sys/lwpctl.h>
50#include <sys/cpu.h>
51
52#include <uvm/uvm_extern.h>
53
54#define LWP_UNPARK_MAX 1024
55
56static syncobj_t lwp_park_sobj = {
57 SOBJ_SLEEPQ_LIFO,
58 sleepq_unsleep,
59 sleepq_changepri,
60 sleepq_lendpri,
61 syncobj_noowner,
62};
63
64static sleeptab_t lwp_park_tab;
65
66void
67lwp_sys_init(void)
68{
69 sleeptab_init(&lwp_park_tab);
70}
71
72int
73do_lwp_create(lwp_t *l, void *arg, u_long flags, lwpid_t *new_lwp)
74{
75 struct proc *p = l->l_proc;
76 struct lwp *l2;
77 struct schedstate_percpu *spc;
78 vaddr_t uaddr;
79 int error;
80
81 /* XXX check against resource limits */
82
83 uaddr = uvm_uarea_alloc();
84 if (__predict_false(uaddr == 0))
85 return ENOMEM;
86
87 error = lwp_create(l, p, uaddr, flags & LWP_DETACHED,
88 NULL, 0, p->p_emul->e_startlwp, arg, &l2, l->l_class);
89 if (__predict_false(error)) {
90 uvm_uarea_free(uaddr);
91 return error;
92 }
93
94 *new_lwp = l2->l_lid;
95
96 /*
97 * Set the new LWP running, unless the caller has requested that
98 * it be created in suspended state. If the process is stopping,
99 * then the LWP is created stopped.
100 */
101 mutex_enter(p->p_lock);
102 lwp_lock(l2);
103 spc = &l2->l_cpu->ci_schedstate;
104 if ((flags & LWP_SUSPENDED) == 0 &&
105 (l->l_flag & (LW_WREBOOT | LW_WSUSPEND | LW_WEXIT)) == 0) {
106 if (p->p_stat == SSTOP || (p->p_sflag & PS_STOPPING) != 0) {
107 KASSERT(l2->l_wchan == NULL);
108 l2->l_stat = LSSTOP;
109 p->p_nrlwps--;
110 lwp_unlock_to(l2, spc->spc_lwplock);
111 } else {
112 KASSERT(lwp_locked(l2, spc->spc_mutex));
113 l2->l_stat = LSRUN;
114 sched_enqueue(l2, false);
115 lwp_unlock(l2);
116 }
117 } else {
118 l2->l_stat = LSSUSPENDED;
119 p->p_nrlwps--;
120 lwp_unlock_to(l2, spc->spc_lwplock);
121 }
122 mutex_exit(p->p_lock);
123
124 return 0;
125}
126
127int
128sys__lwp_create(struct lwp *l, const struct sys__lwp_create_args *uap,
129 register_t *retval)
130{
131 /* {
132 syscallarg(const ucontext_t *) ucp;
133 syscallarg(u_long) flags;
134 syscallarg(lwpid_t *) new_lwp;
135 } */
136 struct proc *p = l->l_proc;
137 ucontext_t *newuc;
138 lwpid_t lid;
139 int error;
140
141 newuc = kmem_alloc(sizeof(ucontext_t), KM_SLEEP);
142 error = copyin(SCARG(uap, ucp), newuc, p->p_emul->e_ucsize);
143 if (error)
144 goto fail;
145
146 /* validate the ucontext */
147 if ((newuc->uc_flags & _UC_CPU) == 0) {
148 error = EINVAL;
149 goto fail;
150 }
151 error = cpu_mcontext_validate(l, &newuc->uc_mcontext);
152 if (error)
153 goto fail;
154
155 error = do_lwp_create(l, newuc, SCARG(uap, flags), &lid);
156 if (error)
157 goto fail;
158
159 /*
160 * do not free ucontext in case of an error here,
161 * the lwp will actually run and access it
162 */
163 return copyout(&lid, SCARG(uap, new_lwp), sizeof(lid));
164
165fail:
166 kmem_free(newuc, sizeof(ucontext_t));
167 return error;
168}
169
170int
171sys__lwp_exit(struct lwp *l, const void *v, register_t *retval)
172{
173
174 lwp_exit(l);
175 return 0;
176}
177
178int
179sys__lwp_self(struct lwp *l, const void *v, register_t *retval)
180{
181
182 *retval = l->l_lid;
183 return 0;
184}
185
186int
187sys__lwp_getprivate(struct lwp *l, const void *v, register_t *retval)
188{
189
190 *retval = (uintptr_t)l->l_private;
191 return 0;
192}
193
194int
195sys__lwp_setprivate(struct lwp *l, const struct sys__lwp_setprivate_args *uap,
196 register_t *retval)
197{
198 /* {
199 syscallarg(void *) ptr;
200 } */
201
202 return lwp_setprivate(l, SCARG(uap, ptr));
203}
204
205int
206sys__lwp_suspend(struct lwp *l, const struct sys__lwp_suspend_args *uap,
207 register_t *retval)
208{
209 /* {
210 syscallarg(lwpid_t) target;
211 } */
212 struct proc *p = l->l_proc;
213 struct lwp *t;
214 int error;
215
216 mutex_enter(p->p_lock);
217 if ((t = lwp_find(p, SCARG(uap, target))) == NULL) {
218 mutex_exit(p->p_lock);
219 return ESRCH;
220 }
221
222 /*
223 * Check for deadlock, which is only possible when we're suspending
224 * ourself. XXX There is a short race here, as p_nrlwps is only
225 * incremented when an LWP suspends itself on the kernel/user
226 * boundary. It's still possible to kill -9 the process so we
227 * don't bother checking further.
228 */
229 lwp_lock(t);
230 if ((t == l && p->p_nrlwps == 1) ||
231 (l->l_flag & (LW_WCORE | LW_WEXIT)) != 0) {
232 lwp_unlock(t);
233 mutex_exit(p->p_lock);
234 return EDEADLK;
235 }
236
237 /*
238 * Suspend the LWP. XXX If it's on a different CPU, we should wait
239 * for it to be preempted, where it will put itself to sleep.
240 *
241 * Suspension of the current LWP will happen on return to userspace.
242 */
243 error = lwp_suspend(l, t);
244 if (error) {
245 mutex_exit(p->p_lock);
246 return error;
247 }
248
249 /*
250 * Wait for:
251 * o process exiting
252 * o target LWP suspended
253 * o target LWP not suspended and L_WSUSPEND clear
254 * o target LWP exited
255 */
256 for (;;) {
257 error = cv_wait_sig(&p->p_lwpcv, p->p_lock);
258 if (error) {
259 error = ERESTART;
260 break;
261 }
262 if (lwp_find(p, SCARG(uap, target)) == NULL) {
263 error = ESRCH;
264 break;
265 }
266 if ((l->l_flag | t->l_flag) & (LW_WCORE | LW_WEXIT)) {
267 error = ERESTART;
268 break;
269 }
270 if (t->l_stat == LSSUSPENDED ||
271 (t->l_flag & LW_WSUSPEND) == 0)
272 break;
273 }
274 mutex_exit(p->p_lock);
275
276 return error;
277}
278
279int
280sys__lwp_continue(struct lwp *l, const struct sys__lwp_continue_args *uap,
281 register_t *retval)
282{
283 /* {
284 syscallarg(lwpid_t) target;
285 } */
286 int error;
287 struct proc *p = l->l_proc;
288 struct lwp *t;
289
290 error = 0;
291
292 mutex_enter(p->p_lock);
293 if ((t = lwp_find(p, SCARG(uap, target))) == NULL) {
294 mutex_exit(p->p_lock);
295 return ESRCH;
296 }
297
298 lwp_lock(t);
299 lwp_continue(t);
300 mutex_exit(p->p_lock);
301
302 return error;
303}
304
305int
306sys__lwp_wakeup(struct lwp *l, const struct sys__lwp_wakeup_args *uap,
307 register_t *retval)
308{
309 /* {
310 syscallarg(lwpid_t) target;
311 } */
312 struct lwp *t;
313 struct proc *p;
314 int error;
315
316 p = l->l_proc;
317 mutex_enter(p->p_lock);
318
319 if ((t = lwp_find(p, SCARG(uap, target))) == NULL) {
320 mutex_exit(p->p_lock);
321 return ESRCH;
322 }
323
324 lwp_lock(t);
325 t->l_flag |= (LW_CANCELLED | LW_UNPARKED);
326
327 if (t->l_stat != LSSLEEP) {
328 lwp_unlock(t);
329 error = ENODEV;
330 } else if ((t->l_flag & LW_SINTR) == 0) {
331 lwp_unlock(t);
332 error = EBUSY;
333 } else {
334 /* Wake it up. lwp_unsleep() will release the LWP lock. */
335 lwp_unsleep(t, true);
336 error = 0;
337 }
338
339 mutex_exit(p->p_lock);
340
341 return error;
342}
343
344int
345sys__lwp_wait(struct lwp *l, const struct sys__lwp_wait_args *uap,
346 register_t *retval)
347{
348 /* {
349 syscallarg(lwpid_t) wait_for;
350 syscallarg(lwpid_t *) departed;
351 } */
352 struct proc *p = l->l_proc;
353 int error;
354 lwpid_t dep;
355
356 mutex_enter(p->p_lock);
357 error = lwp_wait(l, SCARG(uap, wait_for), &dep, false);
358 mutex_exit(p->p_lock);
359
360 if (!error && SCARG(uap, departed)) {
361 error = copyout(&dep, SCARG(uap, departed), sizeof(dep));
362 }
363
364 return error;
365}
366
367int
368sys__lwp_kill(struct lwp *l, const struct sys__lwp_kill_args *uap,
369 register_t *retval)
370{
371 /* {
372 syscallarg(lwpid_t) target;
373 syscallarg(int) signo;
374 } */
375 struct proc *p = l->l_proc;
376 struct lwp *t;
377 ksiginfo_t ksi;
378 int signo = SCARG(uap, signo);
379 int error = 0;
380
381 if ((u_int)signo >= NSIG)
382 return EINVAL;
383
384 KSI_INIT(&ksi);
385 ksi.ksi_signo = signo;
386 ksi.ksi_code = SI_LWP;
387 ksi.ksi_pid = p->p_pid;
388 ksi.ksi_uid = kauth_cred_geteuid(l->l_cred);
389 ksi.ksi_lid = SCARG(uap, target);
390
391 mutex_enter(proc_lock);
392 mutex_enter(p->p_lock);
393 if ((t = lwp_find(p, ksi.ksi_lid)) == NULL)
394 error = ESRCH;
395 else if (signo != 0)
396 kpsignal2(p, &ksi);
397 mutex_exit(p->p_lock);
398 mutex_exit(proc_lock);
399
400 return error;
401}
402
403int
404sys__lwp_detach(struct lwp *l, const struct sys__lwp_detach_args *uap,
405 register_t *retval)
406{
407 /* {
408 syscallarg(lwpid_t) target;
409 } */
410 struct proc *p;
411 struct lwp *t;
412 lwpid_t target;
413 int error;
414
415 target = SCARG(uap, target);
416 p = l->l_proc;
417
418 mutex_enter(p->p_lock);
419
420 if (l->l_lid == target)
421 t = l;
422 else {
423 /*
424 * We can't use lwp_find() here because the target might
425 * be a zombie.
426 */
427 LIST_FOREACH(t, &p->p_lwps, l_sibling)
428 if (t->l_lid == target)
429 break;
430 }
431
432 /*
433 * If the LWP is already detached, there's nothing to do.
434 * If it's a zombie, we need to clean up after it. LSZOMB
435 * is visible with the proc mutex held.
436 *
437 * After we have detached or released the LWP, kick any
438 * other LWPs that may be sitting in _lwp_wait(), waiting
439 * for the target LWP to exit.
440 */
441 if (t != NULL && t->l_stat != LSIDL) {
442 if ((t->l_prflag & LPR_DETACHED) == 0) {
443 p->p_ndlwps++;
444 t->l_prflag |= LPR_DETACHED;
445 if (t->l_stat == LSZOMB) {
446 /* Releases proc mutex. */
447 lwp_free(t, false, false);
448 return 0;
449 }
450 error = 0;
451
452 /*
453 * Have any LWPs sleeping in lwp_wait() recheck
454 * for deadlock.
455 */
456 cv_broadcast(&p->p_lwpcv);
457 } else
458 error = EINVAL;
459 } else
460 error = ESRCH;
461
462 mutex_exit(p->p_lock);
463
464 return error;
465}
466
467static inline wchan_t
468lwp_park_wchan(struct proc *p, const void *hint)
469{
470
471 return (wchan_t)((uintptr_t)p ^ (uintptr_t)hint);
472}
473
474int
475lwp_unpark(lwpid_t target, const void *hint)
476{
477 sleepq_t *sq;
478 wchan_t wchan;
479 kmutex_t *mp;
480 proc_t *p;
481 lwp_t *t;
482
483 /*
484 * Easy case: search for the LWP on the sleep queue. If
485 * it's parked, remove it from the queue and set running.
486 */
487 p = curproc;
488 wchan = lwp_park_wchan(p, hint);
489 sq = sleeptab_lookup(&lwp_park_tab, wchan, &mp);
490
491 TAILQ_FOREACH(t, sq, l_sleepchain)
492 if (t->l_proc == p && t->l_lid == target)
493 break;
494
495 if (__predict_true(t != NULL)) {
496 sleepq_remove(sq, t);
497 mutex_spin_exit(mp);
498 return 0;
499 }
500
501 /*
502 * The LWP hasn't parked yet. Take the hit and mark the
503 * operation as pending.
504 */
505 mutex_spin_exit(mp);
506
507 mutex_enter(p->p_lock);
508 if ((t = lwp_find(p, target)) == NULL) {
509 mutex_exit(p->p_lock);
510 return ESRCH;
511 }
512
513 /*
514 * It may not have parked yet, we may have raced, or it
515 * is parked on a different user sync object.
516 */
517 lwp_lock(t);
518 if (t->l_syncobj == &lwp_park_sobj) {
519 /* Releases the LWP lock. */
520 lwp_unsleep(t, true);
521 } else {
522 /*
523 * Set the operation pending. The next call to _lwp_park
524 * will return early.
525 */
526 t->l_flag |= LW_UNPARKED;
527 lwp_unlock(t);
528 }
529
530 mutex_exit(p->p_lock);
531 return 0;
532}
533
534int
535lwp_park(clockid_t clock_id, int flags, struct timespec *ts, const void *hint)
536{
537 sleepq_t *sq;
538 kmutex_t *mp;
539 wchan_t wchan;
540 int timo, error;
541 lwp_t *l;
542
543 if (ts != NULL) {
544 if ((error = ts2timo(clock_id, flags, ts, &timo, NULL)) != 0)
545 return error;
546 KASSERT(timo != 0);
547 } else {
548 timo = 0;
549 }
550
551 /* Find and lock the sleep queue. */
552 l = curlwp;
553 wchan = lwp_park_wchan(l->l_proc, hint);
554 sq = sleeptab_lookup(&lwp_park_tab, wchan, &mp);
555
556 /*
557 * Before going the full route and blocking, check to see if an
558 * unpark op is pending.
559 */
560 lwp_lock(l);
561 if ((l->l_flag & (LW_CANCELLED | LW_UNPARKED)) != 0) {
562 l->l_flag &= ~(LW_CANCELLED | LW_UNPARKED);
563 lwp_unlock(l);
564 mutex_spin_exit(mp);
565 return EALREADY;
566 }
567 lwp_unlock_to(l, mp);
568 l->l_biglocks = 0;
569 sleepq_enqueue(sq, wchan, "parked", &lwp_park_sobj);
570 error = sleepq_block(timo, true);
571 switch (error) {
572 case EWOULDBLOCK:
573 error = ETIMEDOUT;
574 break;
575 case ERESTART:
576 error = EINTR;
577 break;
578 default:
579 /* nothing */
580 break;
581 }
582 return error;
583}
584
585/*
586 * 'park' an LWP waiting on a user-level synchronisation object. The LWP
587 * will remain parked until another LWP in the same process calls in and
588 * requests that it be unparked.
589 */
590int
591sys____lwp_park60(struct lwp *l, const struct sys____lwp_park60_args *uap,
592 register_t *retval)
593{
594 /* {
595 syscallarg(clockid_t) clock_id;
596 syscallarg(int) flags;
597 syscallarg(const struct timespec *) ts;
598 syscallarg(lwpid_t) unpark;
599 syscallarg(const void *) hint;
600 syscallarg(const void *) unparkhint;
601 } */
602 struct timespec ts, *tsp;
603 int error;
604
605 if (SCARG(uap, ts) == NULL)
606 tsp = NULL;
607 else {
608 error = copyin(SCARG(uap, ts), &ts, sizeof(ts));
609 if (error != 0)
610 return error;
611 tsp = &ts;
612 }
613
614 if (SCARG(uap, unpark) != 0) {
615 error = lwp_unpark(SCARG(uap, unpark), SCARG(uap, unparkhint));
616 if (error != 0)
617 return error;
618 }
619
620 return lwp_park(SCARG(uap, clock_id), SCARG(uap, flags), tsp,
621 SCARG(uap, hint));
622}
623
624int
625sys__lwp_unpark(struct lwp *l, const struct sys__lwp_unpark_args *uap,
626 register_t *retval)
627{
628 /* {
629 syscallarg(lwpid_t) target;
630 syscallarg(const void *) hint;
631 } */
632
633 return lwp_unpark(SCARG(uap, target), SCARG(uap, hint));
634}
635
636int
637sys__lwp_unpark_all(struct lwp *l, const struct sys__lwp_unpark_all_args *uap,
638 register_t *retval)
639{
640 /* {
641 syscallarg(const lwpid_t *) targets;
642 syscallarg(size_t) ntargets;
643 syscallarg(const void *) hint;
644 } */
645 struct proc *p;
646 struct lwp *t;
647 sleepq_t *sq;
648 wchan_t wchan;
649 lwpid_t targets[32], *tp, *tpp, *tmax, target;
650 int error;
651 kmutex_t *mp;
652 u_int ntargets;
653 size_t sz;
654
655 p = l->l_proc;
656 ntargets = SCARG(uap, ntargets);
657
658 if (SCARG(uap, targets) == NULL) {
659 /*
660 * Let the caller know how much we are willing to do, and
661 * let it unpark the LWPs in blocks.
662 */
663 *retval = LWP_UNPARK_MAX;
664 return 0;
665 }
666 if (ntargets > LWP_UNPARK_MAX || ntargets == 0)
667 return EINVAL;
668
669 /*
670 * Copy in the target array. If it's a small number of LWPs, then
671 * place the numbers on the stack.
672 */
673 sz = sizeof(target) * ntargets;
674 if (sz <= sizeof(targets))
675 tp = targets;
676 else {
677 tp = kmem_alloc(sz, KM_SLEEP);
678 if (tp == NULL)
679 return ENOMEM;
680 }
681 error = copyin(SCARG(uap, targets), tp, sz);
682 if (error != 0) {
683 if (tp != targets) {
684 kmem_free(tp, sz);
685 }
686 return error;
687 }
688
689 wchan = lwp_park_wchan(p, SCARG(uap, hint));
690 sq = sleeptab_lookup(&lwp_park_tab, wchan, &mp);
691
692 for (tmax = tp + ntargets, tpp = tp; tpp < tmax; tpp++) {
693 target = *tpp;
694
695 /*
696 * Easy case: search for the LWP on the sleep queue. If
697 * it's parked, remove it from the queue and set running.
698 */
699 TAILQ_FOREACH(t, sq, l_sleepchain)
700 if (t->l_proc == p && t->l_lid == target)
701 break;
702
703 if (t != NULL) {
704 sleepq_remove(sq, t);
705 continue;
706 }
707
708 /*
709 * The LWP hasn't parked yet. Take the hit and
710 * mark the operation as pending.
711 */
712 mutex_spin_exit(mp);
713 mutex_enter(p->p_lock);
714 if ((t = lwp_find(p, target)) == NULL) {
715 mutex_exit(p->p_lock);
716 mutex_spin_enter(mp);
717 continue;
718 }
719 lwp_lock(t);
720
721 /*
722 * It may not have parked yet, we may have raced, or
723 * it is parked on a different user sync object.
724 */
725 if (t->l_syncobj == &lwp_park_sobj) {
726 /* Releases the LWP lock. */
727 lwp_unsleep(t, true);
728 } else {
729 /*
730 * Set the operation pending. The next call to
731 * _lwp_park will return early.
732 */
733 t->l_flag |= LW_UNPARKED;
734 lwp_unlock(t);
735 }
736
737 mutex_exit(p->p_lock);
738 mutex_spin_enter(mp);
739 }
740
741 mutex_spin_exit(mp);
742 if (tp != targets)
743 kmem_free(tp, sz);
744
745 return 0;
746}
747
748int
749sys__lwp_setname(struct lwp *l, const struct sys__lwp_setname_args *uap,
750 register_t *retval)
751{
752 /* {
753 syscallarg(lwpid_t) target;
754 syscallarg(const char *) name;
755 } */
756 char *name, *oname;
757 lwpid_t target;
758 proc_t *p;
759 lwp_t *t;
760 int error;
761
762 if ((target = SCARG(uap, target)) == 0)
763 target = l->l_lid;
764
765 name = kmem_alloc(MAXCOMLEN, KM_SLEEP);
766 if (name == NULL)
767 return ENOMEM;
768 error = copyinstr(SCARG(uap, name), name, MAXCOMLEN, NULL);
769 switch (error) {
770 case ENAMETOOLONG:
771 case 0:
772 name[MAXCOMLEN - 1] = '\0';
773 break;
774 default:
775 kmem_free(name, MAXCOMLEN);
776 return error;
777 }
778
779 p = curproc;
780 mutex_enter(p->p_lock);
781 if ((t = lwp_find(p, target)) == NULL) {
782 mutex_exit(p->p_lock);
783 kmem_free(name, MAXCOMLEN);
784 return ESRCH;
785 }
786 lwp_lock(t);
787 oname = t->l_name;
788 t->l_name = name;
789 lwp_unlock(t);
790 mutex_exit(p->p_lock);
791
792 if (oname != NULL)
793 kmem_free(oname, MAXCOMLEN);
794
795 return 0;
796}
797
798int
799sys__lwp_getname(struct lwp *l, const struct sys__lwp_getname_args *uap,
800 register_t *retval)
801{
802 /* {
803 syscallarg(lwpid_t) target;
804 syscallarg(char *) name;
805 syscallarg(size_t) len;
806 } */
807 char name[MAXCOMLEN];
808 lwpid_t target;
809 proc_t *p;
810 lwp_t *t;
811
812 if ((target = SCARG(uap, target)) == 0)
813 target = l->l_lid;
814
815 p = curproc;
816 mutex_enter(p->p_lock);
817 if ((t = lwp_find(p, target)) == NULL) {
818 mutex_exit(p->p_lock);
819 return ESRCH;
820 }
821 lwp_lock(t);
822 if (t->l_name == NULL)
823 name[0] = '\0';
824 else
825 strcpy(name, t->l_name);
826 lwp_unlock(t);
827 mutex_exit(p->p_lock);
828
829 return copyoutstr(name, SCARG(uap, name), SCARG(uap, len), NULL);
830}
831
832int
833sys__lwp_ctl(struct lwp *l, const struct sys__lwp_ctl_args *uap,
834 register_t *retval)
835{
836 /* {
837 syscallarg(int) features;
838 syscallarg(struct lwpctl **) address;
839 } */
840 int error, features;
841 vaddr_t vaddr;
842
843 features = SCARG(uap, features);
844 features &= ~(LWPCTL_FEATURE_CURCPU | LWPCTL_FEATURE_PCTR);
845 if (features != 0)
846 return ENODEV;
847 if ((error = lwp_ctl_alloc(&vaddr)) != 0)
848 return error;
849 return copyout(&vaddr, SCARG(uap, address), sizeof(void *));
850}
851