1/* $NetBSD: netbsd32_machdep.c,v 1.97 2016/10/19 09:44:00 skrll Exp $ */
2
3/*
4 * Copyright (c) 2001 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Frank van der Linden for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
23 * written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38#include <sys/cdefs.h>
39__KERNEL_RCSID(0, "$NetBSD: netbsd32_machdep.c,v 1.97 2016/10/19 09:44:00 skrll Exp $");
40
41#ifdef _KERNEL_OPT
42#include "opt_compat_netbsd.h"
43#include "opt_coredump.h"
44#include "opt_execfmt.h"
45#include "opt_user_ldt.h"
46#include "opt_mtrr.h"
47#endif
48
49#include <sys/param.h>
50#include <sys/exec.h>
51#include <sys/exec_aout.h>
52#include <sys/kmem.h>
53#include <sys/proc.h>
54#include <sys/signalvar.h>
55#include <sys/systm.h>
56#include <sys/core.h>
57#include <sys/mount.h>
58#include <sys/buf.h>
59#include <sys/vnode.h>
60#include <sys/ras.h>
61#include <sys/ptrace.h>
62#include <sys/kauth.h>
63
64#include <x86/fpu.h>
65#include <machine/frame.h>
66#include <machine/reg.h>
67#include <machine/vmparam.h>
68#ifdef MTRR
69#include <machine/mtrr.h>
70#endif
71#include <machine/netbsd32_machdep.h>
72#include <machine/sysarch.h>
73#include <machine/userret.h>
74
75#include <compat/netbsd32/netbsd32.h>
76#include <compat/netbsd32/netbsd32_exec.h>
77#include <compat/netbsd32/netbsd32_syscallargs.h>
78
79#include <compat/sys/signal.h>
80#include <compat/sys/signalvar.h>
81
82/* Provide a the name of the architecture we're emulating */
83const char machine32[] = "i386";
84const char machine_arch32[] = "i386";
85
86#ifdef MTRR
87static int x86_64_get_mtrr32(struct lwp *, void *, register_t *);
88static int x86_64_set_mtrr32(struct lwp *, void *, register_t *);
89#else
90#define x86_64_get_mtrr32(x, y, z) ENOSYS
91#define x86_64_set_mtrr32(x, y, z) ENOSYS
92#endif
93
94static int check_sigcontext32(struct lwp *, const struct netbsd32_sigcontext *);
95
96#ifdef EXEC_AOUT
97/*
98 * There is no native a.out -- this function is required
99 * for i386 a.out emulation (COMPAT_NETBSD32+EXEC_AOUT).
100 */
101int
102cpu_exec_aout_makecmds(struct lwp *p, struct exec_package *e)
103{
104
105 return ENOEXEC;
106}
107#endif
108
109void
110netbsd32_setregs(struct lwp *l, struct exec_package *pack, vaddr_t stack)
111{
112 struct pcb *pcb;
113 struct trapframe *tf;
114 struct proc *p = l->l_proc;
115
116 pcb = lwp_getpcb(l);
117
118#if defined(USER_LDT) && 0
119 pmap_ldt_cleanup(l);
120#endif
121
122 netbsd32_adjust_limits(p);
123
124 l->l_md.md_flags |= MDL_COMPAT32; /* Force iret not sysret */
125 pcb->pcb_flags = PCB_COMPAT32;
126
127 fpu_save_area_clear(l, pack->ep_osversion >= 699002600
128 ? __NetBSD_NPXCW__ : __NetBSD_COMPAT_NPXCW__);
129
130 p->p_flag |= PK_32;
131
132 tf = l->l_md.md_regs;
133 tf->tf_ds = LSEL(LUDATA32_SEL, SEL_UPL);
134 tf->tf_es = LSEL(LUDATA32_SEL, SEL_UPL);
135 cpu_fsgs_zero(l);
136 cpu_fsgs_reload(l, tf->tf_ds, tf->tf_es);
137 tf->tf_rdi = 0;
138 tf->tf_rsi = 0;
139 tf->tf_rbp = 0;
140 tf->tf_rbx = (uint32_t)p->p_psstrp;
141 tf->tf_rdx = 0;
142 tf->tf_rcx = 0;
143 tf->tf_rax = 0;
144 tf->tf_rip = pack->ep_entry;
145 tf->tf_cs = LSEL(LUCODE32_SEL, SEL_UPL);
146 tf->tf_rflags = PSL_USERSET;
147 tf->tf_rsp = stack;
148 tf->tf_ss = LSEL(LUDATA32_SEL, SEL_UPL);
149}
150
151#ifdef COMPAT_16
152static void
153netbsd32_sendsig_sigcontext(const ksiginfo_t *ksi, const sigset_t *mask)
154{
155 struct lwp *l = curlwp;
156 struct proc *p = l->l_proc;
157 struct trapframe *tf;
158 int sig = ksi->ksi_signo;
159 sig_t catcher = SIGACTION(p, sig).sa_handler;
160 struct netbsd32_sigframe_sigcontext *fp, frame;
161 int onstack, error;
162 struct sigacts *ps = p->p_sigacts;
163
164 tf = l->l_md.md_regs;
165
166 /* Do we need to jump onto the signal stack? */
167 onstack =
168 (l->l_sigstk.ss_flags & (SS_DISABLE | SS_ONSTACK)) == 0 &&
169 (SIGACTION(p, sig).sa_flags & SA_ONSTACK) != 0;
170
171 /* Allocate space for the signal handler context. */
172 if (onstack)
173 fp = (struct netbsd32_sigframe_sigcontext *)
174 ((char *)l->l_sigstk.ss_sp + l->l_sigstk.ss_size);
175 else
176 fp = (struct netbsd32_sigframe_sigcontext *)tf->tf_rsp;
177 fp--;
178
179 /* Build stack frame for signal trampoline. */
180 switch (ps->sa_sigdesc[sig].sd_vers) {
181 case 0:
182 frame.sf_ra = (uint32_t)(u_long)p->p_sigctx.ps_sigcode;
183 break;
184 case 1:
185 frame.sf_ra = (uint32_t)(u_long)ps->sa_sigdesc[sig].sd_tramp;
186 break;
187 default:
188 /* Don't know what trampoline version; kill it. */
189 sigexit(l, SIGILL);
190 }
191 frame.sf_signum = sig;
192 frame.sf_code = ksi->ksi_trap;
193 frame.sf_scp = (uint32_t)(u_long)&fp->sf_sc;
194
195 frame.sf_sc.sc_ds = tf->tf_ds;
196 frame.sf_sc.sc_es = tf->tf_es;
197 frame.sf_sc.sc_fs = tf->tf_fs;
198 frame.sf_sc.sc_gs = tf->tf_gs;
199
200 frame.sf_sc.sc_eflags = tf->tf_rflags;
201 frame.sf_sc.sc_edi = tf->tf_rdi;
202 frame.sf_sc.sc_esi = tf->tf_rsi;
203 frame.sf_sc.sc_ebp = tf->tf_rbp;
204 frame.sf_sc.sc_ebx = tf->tf_rbx;
205 frame.sf_sc.sc_edx = tf->tf_rdx;
206 frame.sf_sc.sc_ecx = tf->tf_rcx;
207 frame.sf_sc.sc_eax = tf->tf_rax;
208 frame.sf_sc.sc_eip = tf->tf_rip;
209 frame.sf_sc.sc_cs = tf->tf_cs;
210 frame.sf_sc.sc_esp = tf->tf_rsp;
211 frame.sf_sc.sc_ss = tf->tf_ss;
212 frame.sf_sc.sc_trapno = tf->tf_trapno;
213 frame.sf_sc.sc_err = tf->tf_err;
214
215 /* Save signal stack. */
216 frame.sf_sc.sc_onstack = l->l_sigstk.ss_flags & SS_ONSTACK;
217
218 /* Save signal mask. */
219 frame.sf_sc.sc_mask = *mask;
220
221 sendsig_reset(l, sig);
222
223 mutex_exit(p->p_lock);
224 error = copyout(&frame, fp, sizeof(frame));
225 mutex_enter(p->p_lock);
226
227 if (error != 0) {
228 /*
229 * Process has trashed its stack; give it an illegal
230 * instruction to halt it in its tracks.
231 */
232 sigexit(l, SIGILL);
233 /* NOTREACHED */
234 }
235
236 /*
237 * Build context to run handler in.
238 */
239 tf->tf_ds = GSEL(GUDATA32_SEL, SEL_UPL);
240 tf->tf_es = GSEL(GUDATA32_SEL, SEL_UPL);
241 tf->tf_fs = GSEL(GUDATA32_SEL, SEL_UPL);
242 tf->tf_gs = GSEL(GUDATA32_SEL, SEL_UPL);
243
244 /* Ensure FP state is sane. */
245 fpu_save_area_reset(l);
246
247 tf->tf_rip = (uint64_t)catcher;
248 tf->tf_cs = GSEL(GUCODE32_SEL, SEL_UPL);
249 tf->tf_rflags &= ~PSL_CLEARSIG;
250 tf->tf_rsp = (uint64_t)fp;
251 tf->tf_ss = GSEL(GUDATA32_SEL, SEL_UPL);
252
253 /* Remember that we're now on the signal stack. */
254 if (onstack)
255 l->l_sigstk.ss_flags |= SS_ONSTACK;
256 if ((vaddr_t)catcher >= VM_MAXUSER_ADDRESS32) {
257 /*
258 * process has given an invalid address for the
259 * handler. Stop it, but do not do it before so
260 * we can return the right info to userland (or in core dump)
261 */
262 sigexit(l, SIGILL);
263 /* NOTREACHED */
264 }
265}
266#endif
267
268static void
269netbsd32_sendsig_siginfo(const ksiginfo_t *ksi, const sigset_t *mask)
270{
271 struct lwp *l = curlwp;
272 struct proc *p = l->l_proc;
273 struct sigacts *ps = p->p_sigacts;
274 int onstack, error;
275 int sig = ksi->ksi_signo;
276 struct netbsd32_sigframe_siginfo *fp, frame;
277 sig_t catcher = SIGACTION(p, sig).sa_handler;
278 struct trapframe *tf = l->l_md.md_regs;
279
280 /* Do we need to jump onto the signal stack? */
281 onstack =
282 (l->l_sigstk.ss_flags & (SS_DISABLE | SS_ONSTACK)) == 0 &&
283 (SIGACTION(p, sig).sa_flags & SA_ONSTACK) != 0;
284
285 /* Allocate space for the signal handler context. */
286 if (onstack)
287 fp = (struct netbsd32_sigframe_siginfo *)
288 ((char *)l->l_sigstk.ss_sp + l->l_sigstk.ss_size);
289 else
290 fp = (struct netbsd32_sigframe_siginfo *)tf->tf_rsp;
291
292 fp--;
293
294 /* Build stack frame for signal trampoline. */
295 switch (ps->sa_sigdesc[sig].sd_vers) {
296 case 0: /* handled by sendsig_sigcontext */
297 case 1: /* handled by sendsig_sigcontext */
298 default: /* unknown version */
299 printf("nsendsig: bad version %d\n",
300 ps->sa_sigdesc[sig].sd_vers);
301 sigexit(l, SIGILL);
302 case 2:
303 break;
304 }
305
306 frame.sf_ra = (uint32_t)(uintptr_t)ps->sa_sigdesc[sig].sd_tramp;
307 frame.sf_signum = sig;
308 frame.sf_sip = (uint32_t)(uintptr_t)&fp->sf_si;
309 frame.sf_ucp = (uint32_t)(uintptr_t)&fp->sf_uc;
310 netbsd32_si_to_si32(&frame.sf_si, (const siginfo_t *)&ksi->ksi_info);
311 frame.sf_uc.uc_flags = _UC_SIGMASK;
312 frame.sf_uc.uc_sigmask = *mask;
313 frame.sf_uc.uc_link = (uint32_t)(uintptr_t)l->l_ctxlink;
314 frame.sf_uc.uc_flags |= (l->l_sigstk.ss_flags & SS_ONSTACK)
315 ? _UC_SETSTACK : _UC_CLRSTACK;
316 memset(&frame.sf_uc.uc_stack, 0, sizeof(frame.sf_uc.uc_stack));
317 sendsig_reset(l, sig);
318
319 mutex_exit(p->p_lock);
320 cpu_getmcontext32(l, &frame.sf_uc.uc_mcontext, &frame.sf_uc.uc_flags);
321 error = copyout(&frame, fp, sizeof(frame));
322 mutex_enter(p->p_lock);
323
324 if (error != 0) {
325 /*
326 * Process has trashed its stack; give it an illegal
327 * instruction to halt it in its tracks.
328 */
329 sigexit(l, SIGILL);
330 /* NOTREACHED */
331 }
332
333 /*
334 * Build context to run handler in.
335 */
336 tf->tf_ds = GSEL(GUDATA32_SEL, SEL_UPL);
337 tf->tf_es = GSEL(GUDATA32_SEL, SEL_UPL);
338 tf->tf_fs = GSEL(GUDATA32_SEL, SEL_UPL);
339 tf->tf_gs = GSEL(GUDATA32_SEL, SEL_UPL);
340
341 tf->tf_rip = (uint64_t)catcher;
342 tf->tf_cs = GSEL(GUCODE32_SEL, SEL_UPL);
343 tf->tf_rflags &= ~PSL_CLEARSIG;
344 tf->tf_rsp = (uint64_t)fp;
345 tf->tf_ss = GSEL(GUDATA32_SEL, SEL_UPL);
346
347 /* Ensure FP state is sane. */
348 fpu_save_area_reset(l);
349
350 /* Remember that we're now on the signal stack. */
351 if (onstack)
352 l->l_sigstk.ss_flags |= SS_ONSTACK;
353 if ((vaddr_t)catcher >= VM_MAXUSER_ADDRESS32) {
354 /*
355 * process has given an invalid address for the
356 * handler. Stop it, but do not do it before so
357 * we can return the right info to userland (or in core dump)
358 */
359 sigexit(l, SIGILL);
360 /* NOTREACHED */
361 }
362}
363
364void
365netbsd32_sendsig(const ksiginfo_t *ksi, const sigset_t *mask)
366{
367#ifdef COMPAT_16
368 if (curproc->p_sigacts->sa_sigdesc[ksi->ksi_signo].sd_vers < 2)
369 netbsd32_sendsig_sigcontext(ksi, mask);
370 else
371#endif
372 netbsd32_sendsig_siginfo(ksi, mask);
373}
374
375int
376compat_16_netbsd32___sigreturn14(struct lwp *l, const struct compat_16_netbsd32___sigreturn14_args *uap, register_t *retval)
377{
378 /* {
379 syscallarg(netbsd32_sigcontextp_t) sigcntxp;
380 } */
381 struct netbsd32_sigcontext *scp, context;
382 struct proc *p = l->l_proc;
383 struct trapframe *tf;
384 int error;
385
386 /*
387 * The trampoline code hands us the context.
388 * It is unsafe to keep track of it ourselves, in the event that a
389 * program jumps out of a signal handler.
390 */
391 scp = NETBSD32PTR64(SCARG(uap, sigcntxp));
392 if (copyin(scp, &context, sizeof(*scp)) != 0)
393 return (EFAULT);
394
395 /*
396 * Check for security violations.
397 */
398 error = check_sigcontext32(l, &context);
399 if (error != 0)
400 return error;
401
402 /* Restore register context. */
403 tf = l->l_md.md_regs;
404 tf->tf_ds = context.sc_ds;
405 tf->tf_es = context.sc_es;
406 cpu_fsgs_reload(l, context.sc_fs, context.sc_gs);
407 tf->tf_rflags = context.sc_eflags;
408 tf->tf_rdi = context.sc_edi;
409 tf->tf_rsi = context.sc_esi;
410 tf->tf_rbp = context.sc_ebp;
411 tf->tf_rbx = context.sc_ebx;
412 tf->tf_rdx = context.sc_edx;
413 tf->tf_rcx = context.sc_ecx;
414 tf->tf_rax = context.sc_eax;
415
416 tf->tf_rip = context.sc_eip;
417 tf->tf_cs = context.sc_cs;
418 tf->tf_rsp = context.sc_esp;
419 tf->tf_ss = context.sc_ss;
420
421 mutex_enter(p->p_lock);
422 /* Restore signal stack. */
423 if (context.sc_onstack & SS_ONSTACK)
424 l->l_sigstk.ss_flags |= SS_ONSTACK;
425 else
426 l->l_sigstk.ss_flags &= ~SS_ONSTACK;
427 /* Restore signal mask. */
428 (void) sigprocmask1(l, SIG_SETMASK, &context.sc_mask, 0);
429 mutex_exit(p->p_lock);
430
431 return (EJUSTRETURN);
432}
433
434
435#ifdef COREDUMP
436/*
437 * Dump the machine specific segment at the start of a core dump.
438 */
439struct md_core32 {
440 struct reg32 intreg;
441 struct fpreg32 freg;
442};
443
444int
445cpu_coredump32(struct lwp *l, struct coredump_iostate *iocookie,
446 struct core32 *chdr)
447{
448 struct md_core32 md_core;
449 struct coreseg cseg;
450 int error;
451
452 if (iocookie == NULL) {
453 CORE_SETMAGIC(*chdr, COREMAGIC, MID_I386, 0);
454 chdr->c_hdrsize = ALIGN32(sizeof(*chdr));
455 chdr->c_seghdrsize = ALIGN32(sizeof(cseg));
456 chdr->c_cpusize = sizeof(md_core);
457 chdr->c_nseg++;
458 return 0;
459 }
460
461 /* Save integer registers. */
462 error = netbsd32_process_read_regs(l, &md_core.intreg);
463 if (error)
464 return error;
465
466 /* Save floating point registers. */
467 error = netbsd32_process_read_fpregs(l, &md_core.freg, NULL);
468 if (error)
469 return error;
470
471 CORE_SETMAGIC(cseg, CORESEGMAGIC, MID_I386, CORE_CPU);
472 cseg.c_addr = 0;
473 cseg.c_size = chdr->c_cpusize;
474
475 error = coredump_write(iocookie, UIO_SYSSPACE, &cseg,
476 chdr->c_seghdrsize);
477 if (error)
478 return error;
479
480 return coredump_write(iocookie, UIO_SYSSPACE, &md_core,
481 sizeof(md_core));
482}
483#endif
484
485int
486netbsd32_process_read_regs(struct lwp *l, struct reg32 *regs)
487{
488 struct trapframe *tf = l->l_md.md_regs;
489
490 /* XXX avoid sign extension problems with unknown upper bits? */
491 regs->r_gs = tf->tf_gs & 0xffff;
492 regs->r_fs = tf->tf_fs & 0xffff;
493 regs->r_es = tf->tf_es & 0xffff;
494 regs->r_ds = tf->tf_ds & 0xffff;
495 regs->r_eflags = tf->tf_rflags;
496 regs->r_edi = tf->tf_rdi & 0xffffffff;
497 regs->r_esi = tf->tf_rsi & 0xffffffff;
498 regs->r_ebp = tf->tf_rbp & 0xffffffff;
499 regs->r_ebx = tf->tf_rbx & 0xffffffff;
500 regs->r_edx = tf->tf_rdx & 0xffffffff;
501 regs->r_ecx = tf->tf_rcx & 0xffffffff;
502 regs->r_eax = tf->tf_rax & 0xffffffff;
503 regs->r_eip = tf->tf_rip & 0xffffffff;
504 regs->r_cs = tf->tf_cs & 0xffff;
505 regs->r_esp = tf->tf_rsp & 0xffffffff;
506 regs->r_ss = tf->tf_ss & 0xffff;
507
508 return (0);
509}
510
511int
512netbsd32_process_read_fpregs(struct lwp *l, struct fpreg32 *regs, size_t *sz)
513{
514
515 __CTASSERT(sizeof *regs == sizeof (struct save87));
516 process_read_fpregs_s87(l, (struct save87 *)regs);
517 return 0;
518}
519
520int
521netbsd32_process_write_regs(struct lwp *l, const struct reg32 *regs)
522{
523 struct trapframe *tf = l->l_md.md_regs;
524
525 /*
526 * Check for security violations. Taken from i386/process_machdep.c.
527 */
528 if (((regs->r_eflags ^ tf->tf_rflags) & PSL_USERSTATIC) != 0 ||
529 !VALID_USER_CSEL32(regs->r_cs))
530 return EINVAL;
531
532 tf->tf_rax = regs->r_eax;
533 tf->tf_rcx = regs->r_ecx;
534 tf->tf_rdx = regs->r_edx;
535 tf->tf_rbx = regs->r_ebx;
536 tf->tf_rsp = regs->r_esp;
537 tf->tf_rbp = regs->r_ebp;
538 tf->tf_rsi = regs->r_esi;
539 tf->tf_rdi = regs->r_edi;
540 tf->tf_rip = regs->r_eip;
541 tf->tf_rflags = regs->r_eflags;
542 tf->tf_cs = regs->r_cs;
543 tf->tf_ss = regs->r_ss;
544 tf->tf_ds = regs->r_ds;
545 tf->tf_es = regs->r_es;
546 tf->tf_fs = regs->r_fs;
547 tf->tf_gs = regs->r_gs;
548
549 return 0;
550}
551
552int
553netbsd32_process_write_fpregs(struct lwp *l, const struct fpreg32 *regs,
554 size_t sz)
555{
556
557 __CTASSERT(sizeof *regs == sizeof (struct save87));
558 process_write_fpregs_s87(l, (const struct save87 *)regs);
559 return 0;
560}
561
562int
563netbsd32_sysarch(struct lwp *l, const struct netbsd32_sysarch_args *uap, register_t *retval)
564{
565 /* {
566 syscallarg(int) op;
567 syscallarg(netbsd32_voidp) parms;
568 } */
569 int error;
570
571 switch (SCARG(uap, op)) {
572 case X86_IOPL:
573 error = x86_iopl(l,
574 NETBSD32PTR64(SCARG(uap, parms)), retval);
575 break;
576 case X86_GET_MTRR:
577 error = x86_64_get_mtrr32(l,
578 NETBSD32PTR64(SCARG(uap, parms)), retval);
579 break;
580 case X86_SET_MTRR:
581 error = x86_64_set_mtrr32(l,
582 NETBSD32PTR64(SCARG(uap, parms)), retval);
583 break;
584 default:
585 error = EINVAL;
586 break;
587 }
588 return error;
589}
590
591#ifdef MTRR
592static int
593x86_64_get_mtrr32(struct lwp *l, void *args, register_t *retval)
594{
595 struct x86_64_get_mtrr_args32 args32;
596 int error, i;
597 int32_t n;
598 struct mtrr32 *m32p, m32;
599 struct mtrr *m64p, *mp;
600 size_t size;
601
602 m64p = NULL;
603
604 if (mtrr_funcs == NULL)
605 return ENOSYS;
606
607 error = kauth_authorize_machdep(l->l_cred, KAUTH_MACHDEP_MTRR_GET,
608 NULL, NULL, NULL, NULL);
609 if (error)
610 return (error);
611
612 error = copyin(args, &args32, sizeof args32);
613 if (error != 0)
614 return error;
615
616 if (args32.mtrrp == 0) {
617 n = (MTRR_I686_NFIXED_SOFT + MTRR_I686_NVAR_MAX);
618 return copyout(&n, (void *)(uintptr_t)args32.n, sizeof n);
619 }
620
621 error = copyin((void *)(uintptr_t)args32.n, &n, sizeof n);
622 if (error != 0)
623 return error;
624
625 if (n <= 0 || n > (MTRR_I686_NFIXED_SOFT + MTRR_I686_NVAR_MAX))
626 return EINVAL;
627
628 size = n * sizeof(struct mtrr);
629 m64p = kmem_zalloc(size, KM_SLEEP);
630 if (m64p == NULL) {
631 error = ENOMEM;
632 goto fail;
633 }
634 error = mtrr_get(m64p, &n, l->l_proc, 0);
635 if (error != 0)
636 goto fail;
637 m32p = (struct mtrr32 *)(uintptr_t)args32.mtrrp;
638 mp = m64p;
639 for (i = 0; i < n; i++) {
640 m32.base = mp->base;
641 m32.len = mp->len;
642 m32.type = mp->type;
643 m32.flags = mp->flags;
644 m32.owner = mp->owner;
645 error = copyout(&m32, m32p, sizeof m32);
646 if (error != 0)
647 break;
648 mp++;
649 m32p++;
650 }
651fail:
652 if (m64p != NULL)
653 kmem_free(m64p, size);
654 if (error != 0)
655 n = 0;
656 copyout(&n, (void *)(uintptr_t)args32.n, sizeof n);
657 return error;
658}
659
660static int
661x86_64_set_mtrr32(struct lwp *l, void *args, register_t *retval)
662{
663 struct x86_64_set_mtrr_args32 args32;
664 struct mtrr32 *m32p, m32;
665 struct mtrr *m64p, *mp;
666 int error, i;
667 int32_t n;
668 size_t size;
669
670 m64p = NULL;
671
672 if (mtrr_funcs == NULL)
673 return ENOSYS;
674
675 error = kauth_authorize_machdep(l->l_cred, KAUTH_MACHDEP_MTRR_SET,
676 NULL, NULL, NULL, NULL);
677 if (error)
678 return (error);
679
680 error = copyin(args, &args32, sizeof args32);
681 if (error != 0)
682 return error;
683
684 error = copyin((void *)(uintptr_t)args32.n, &n, sizeof n);
685 if (error != 0)
686 return error;
687
688 if (n <= 0 || n > (MTRR_I686_NFIXED_SOFT + MTRR_I686_NVAR_MAX)) {
689 error = EINVAL;
690 goto fail;
691 }
692
693 size = n * sizeof(struct mtrr);
694 m64p = kmem_zalloc(size, KM_SLEEP);
695 if (m64p == NULL) {
696 error = ENOMEM;
697 goto fail;
698 }
699 m32p = (struct mtrr32 *)(uintptr_t)args32.mtrrp;
700 mp = m64p;
701 for (i = 0; i < n; i++) {
702 error = copyin(m32p, &m32, sizeof m32);
703 if (error != 0)
704 goto fail;
705 mp->base = m32.base;
706 mp->len = m32.len;
707 mp->type = m32.type;
708 mp->flags = m32.flags;
709 mp->owner = m32.owner;
710 m32p++;
711 mp++;
712 }
713
714 error = mtrr_set(m64p, &n, l->l_proc, 0);
715fail:
716 if (m64p != NULL)
717 kmem_free(m64p, size);
718 if (error != 0)
719 n = 0;
720 copyout(&n, (void *)(uintptr_t)args32.n, sizeof n);
721 return error;
722}
723#endif
724
725#if 0
726void
727netbsd32_mcontext_to_mcontext32(mcontext32_t *m32, mcontext_t *m, int flags)
728{
729 if ((flags & _UC_CPU) != 0) {
730 m32->__gregs[_REG32_GS] = m->__gregs[_REG_GS] & 0xffffffff;
731 m32->__gregs[_REG32_FS] = m->__gregs[_REG_FS] & 0xffffffff;
732 m32->__gregs[_REG32_ES] = m->__gregs[_REG_ES] & 0xffffffff;
733 m32->__gregs[_REG32_DS] = m->__gregs[_REG_DS] & 0xffffffff;
734 m32->__gregs[_REG32_EDI] = m->__gregs[_REG_RDI] & 0xffffffff;
735 m32->__gregs[_REG32_ESI] = m->__gregs[_REG_RSI] & 0xffffffff;
736 m32->__gregs[_REG32_EBP] = m->__gregs[_REG_RBP] & 0xffffffff;
737 m32->__gregs[_REG32_ESP] = m->__gregs[_REG_URSP] & 0xffffffff;
738 m32->__gregs[_REG32_EBX] = m->__gregs[_REG_RBX] & 0xffffffff;
739 m32->__gregs[_REG32_EDX] = m->__gregs[_REG_RDX] & 0xffffffff;
740 m32->__gregs[_REG32_ECX] = m->__gregs[_REG_RCX] & 0xffffffff;
741 m32->__gregs[_REG32_EAX] = m->__gregs[_REG_RAX] & 0xffffffff;
742 m32->__gregs[_REG32_TRAPNO] =
743 m->__gregs[_REG_TRAPNO] & 0xffffffff;
744 m32->__gregs[_REG32_ERR] = m->__gregs[_REG_ERR] & 0xffffffff;
745 m32->__gregs[_REG32_EIP] = m->__gregs[_REG_RIP] & 0xffffffff;
746 m32->__gregs[_REG32_CS] = m->__gregs[_REG_CS] & 0xffffffff;
747 m32->__gregs[_REG32_EFL] = m->__gregs[_REG_RFL] & 0xffffffff;
748 m32->__gregs[_REG32_UESP] = m->__gregs[_REG_URSP] & 0xffffffff;
749 m32->__gregs[_REG32_SS] = m->__gregs[_REG_SS] & 0xffffffff;
750 }
751 if ((flags & _UC_FPU) != 0)
752 memcpy(&m32->__fpregs, &m->__fpregs, sizeof (m32->__fpregs));
753}
754
755void
756netbsd32_mcontext32_to_mcontext(mcontext_t *m, mcontext32_t *m32, int flags)
757{
758 if ((flags & _UC_CPU) != 0) {
759 m->__gregs[_REG_GS] = m32->__gregs[_REG32_GS];
760 m->__gregs[_REG_FS] = m32->__gregs[_REG32_FS];
761 m->__gregs[_REG_ES] = m32->__gregs[_REG32_ES];
762 m->__gregs[_REG_DS] = m32->__gregs[_REG32_DS];
763 m->__gregs[_REG_RDI] = m32->__gregs[_REG32_EDI];
764 m->__gregs[_REG_RSI] = m32->__gregs[_REG32_ESI];
765 m->__gregs[_REG_RBP] = m32->__gregs[_REG32_EBP];
766 m->__gregs[_REG_URSP] = m32->__gregs[_REG32_ESP];
767 m->__gregs[_REG_RBX] = m32->__gregs[_REG32_EBX];
768 m->__gregs[_REG_RDX] = m32->__gregs[_REG32_EDX];
769 m->__gregs[_REG_RCX] = m32->__gregs[_REG32_ECX];
770 m->__gregs[_REG_RAX] = m32->__gregs[_REG32_EAX];
771 m->__gregs[_REG_TRAPNO] = m32->__gregs[_REG32_TRAPNO];
772 m->__gregs[_REG_ERR] = m32->__gregs[_REG32_ERR];
773 m->__gregs[_REG_RIP] = m32->__gregs[_REG32_EIP];
774 m->__gregs[_REG_CS] = m32->__gregs[_REG32_CS];
775 m->__gregs[_REG_RFL] = m32->__gregs[_REG32_EFL];
776 m->__gregs[_REG_URSP] = m32->__gregs[_REG32_UESP];
777 m->__gregs[_REG_SS] = m32->__gregs[_REG32_SS];
778 }
779 if (flags & _UC_FPU)
780 memcpy(&m->__fpregs, &m32->__fpregs, sizeof (m->__fpregs));
781}
782#endif
783
784
785int
786cpu_setmcontext32(struct lwp *l, const mcontext32_t *mcp, unsigned int flags)
787{
788 struct trapframe *tf = l->l_md.md_regs;
789 const __greg32_t *gr = mcp->__gregs;
790 struct proc *p = l->l_proc;
791 int error;
792
793 /* Restore register context, if any. */
794 if ((flags & _UC_CPU) != 0) {
795 /*
796 * Check for security violations.
797 */
798 error = cpu_mcontext32_validate(l, mcp);
799 if (error != 0)
800 return error;
801
802 cpu_fsgs_reload(l, gr[_REG32_FS], gr[_REG32_GS]);
803 tf->tf_es = gr[_REG32_ES];
804 tf->tf_ds = gr[_REG32_DS];
805 /* Only change the user-alterable part of eflags */
806 tf->tf_rflags &= ~PSL_USER;
807 tf->tf_rflags |= (gr[_REG32_EFL] & PSL_USER);
808 tf->tf_rdi = gr[_REG32_EDI];
809 tf->tf_rsi = gr[_REG32_ESI];
810 tf->tf_rbp = gr[_REG32_EBP];
811 tf->tf_rbx = gr[_REG32_EBX];
812 tf->tf_rdx = gr[_REG32_EDX];
813 tf->tf_rcx = gr[_REG32_ECX];
814 tf->tf_rax = gr[_REG32_EAX];
815 tf->tf_rip = gr[_REG32_EIP];
816 tf->tf_cs = gr[_REG32_CS];
817 tf->tf_rsp = gr[_REG32_UESP];
818 tf->tf_ss = gr[_REG32_SS];
819 }
820
821 if ((flags & _UC_TLSBASE) != 0)
822 lwp_setprivate(l, (void *)(uintptr_t)mcp->_mc_tlsbase);
823
824 /* Restore floating point register context, if any. */
825 if ((flags & _UC_FPU) != 0) {
826 /* Assume fxsave context */
827 process_write_fpregs_xmm(l, (const struct fxsave *)
828 &mcp->__fpregs.__fp_reg_set.__fp_xmm_state);
829 }
830
831 mutex_enter(p->p_lock);
832 if (flags & _UC_SETSTACK)
833 l->l_sigstk.ss_flags |= SS_ONSTACK;
834 if (flags & _UC_CLRSTACK)
835 l->l_sigstk.ss_flags &= ~SS_ONSTACK;
836 mutex_exit(p->p_lock);
837
838 return (0);
839}
840
841void
842cpu_getmcontext32(struct lwp *l, mcontext32_t *mcp, unsigned int *flags)
843{
844 const struct trapframe *tf = l->l_md.md_regs;
845 __greg32_t *gr = mcp->__gregs;
846 __greg32_t ras_eip;
847
848 /* Save register context. */
849 gr[_REG32_GS] = tf->tf_gs;
850 gr[_REG32_FS] = tf->tf_fs;
851 gr[_REG32_ES] = tf->tf_es;
852 gr[_REG32_DS] = tf->tf_ds;
853 gr[_REG32_EFL] = tf->tf_rflags;
854 gr[_REG32_EDI] = tf->tf_rdi;
855 gr[_REG32_ESI] = tf->tf_rsi;
856 gr[_REG32_EBP] = tf->tf_rbp;
857 gr[_REG32_EBX] = tf->tf_rbx;
858 gr[_REG32_EDX] = tf->tf_rdx;
859 gr[_REG32_ECX] = tf->tf_rcx;
860 gr[_REG32_EAX] = tf->tf_rax;
861 gr[_REG32_EIP] = tf->tf_rip;
862 gr[_REG32_CS] = tf->tf_cs;
863 gr[_REG32_ESP] = tf->tf_rsp;
864 gr[_REG32_UESP] = tf->tf_rsp;
865 gr[_REG32_SS] = tf->tf_ss;
866 gr[_REG32_TRAPNO] = tf->tf_trapno;
867 gr[_REG32_ERR] = tf->tf_err;
868
869 if ((ras_eip = (__greg32_t)(uintptr_t)ras_lookup(l->l_proc,
870 (void *) (uintptr_t)gr[_REG32_EIP])) != -1)
871 gr[_REG32_EIP] = ras_eip;
872
873 *flags |= _UC_CPU;
874
875 mcp->_mc_tlsbase = (uint32_t)(uintptr_t)l->l_private;
876 *flags |= _UC_TLSBASE;
877
878 /* Save floating point register context. */
879 process_read_fpregs_xmm(l, (struct fxsave *)
880 &mcp->__fpregs.__fp_reg_set.__fp_xmm_state);
881 memset(&mcp->__fpregs.__fp_pad, 0, sizeof mcp->__fpregs.__fp_pad);
882 *flags |= _UC_FXSAVE | _UC_FPU;
883}
884
885void
886startlwp32(void *arg)
887{
888 ucontext32_t *uc = arg;
889 lwp_t *l = curlwp;
890 int error __diagused;
891
892 error = cpu_setmcontext32(l, &uc->uc_mcontext, uc->uc_flags);
893 KASSERT(error == 0);
894
895 /* Note: we are freeing ucontext_t, not ucontext32_t. */
896 kmem_free(uc, sizeof(ucontext_t));
897 userret(l);
898}
899
900/*
901 * For various reasons, the amd64 port can't do what the i386 port does,
902 * and rely on catching invalid user contexts on exit from the kernel.
903 * These functions perform the needed checks.
904 */
905
906static int
907check_sigcontext32(struct lwp *l, const struct netbsd32_sigcontext *scp)
908{
909 struct trapframe *tf;
910 struct pcb *pcb;
911
912 tf = l->l_md.md_regs;
913 pcb = lwp_getpcb(curlwp);
914
915 if (((scp->sc_eflags ^ tf->tf_rflags) & PSL_USERSTATIC) != 0 ||
916 !VALID_USER_CSEL32(scp->sc_cs))
917 return EINVAL;
918 if (scp->sc_fs != 0 && !VALID_USER_DSEL32(scp->sc_fs) &&
919 !(VALID_USER_FSEL32(scp->sc_fs) && pcb->pcb_fs != 0))
920 return EINVAL;
921 if (scp->sc_gs != 0 && !VALID_USER_DSEL32(scp->sc_gs) &&
922 !(VALID_USER_GSEL32(scp->sc_gs) && pcb->pcb_gs != 0))
923 return EINVAL;
924 if (scp->sc_es != 0 && !VALID_USER_DSEL32(scp->sc_es))
925 return EINVAL;
926 if (!VALID_USER_DSEL32(scp->sc_ds) || !VALID_USER_DSEL32(scp->sc_ss))
927 return EINVAL;
928 if (scp->sc_eip >= VM_MAXUSER_ADDRESS32)
929 return EINVAL;
930 return 0;
931}
932
933int
934cpu_mcontext32_validate(struct lwp *l, const mcontext32_t *mcp)
935{
936 const __greg32_t *gr;
937 struct trapframe *tf;
938 struct pcb *pcb;
939
940 gr = mcp->__gregs;
941 tf = l->l_md.md_regs;
942 pcb = lwp_getpcb(l);
943
944 if (((gr[_REG32_EFL] ^ tf->tf_rflags) & PSL_USERSTATIC) != 0 ||
945 !VALID_USER_CSEL32(gr[_REG32_CS]))
946 return EINVAL;
947 if (gr[_REG32_FS] != 0 && !VALID_USER_DSEL32(gr[_REG32_FS]) &&
948 !(VALID_USER_FSEL32(gr[_REG32_FS]) && pcb->pcb_fs != 0))
949 return EINVAL;
950 if (gr[_REG32_GS] != 0 && !VALID_USER_DSEL32(gr[_REG32_GS]) &&
951 !(VALID_USER_GSEL32(gr[_REG32_GS]) && pcb->pcb_gs != 0))
952 return EINVAL;
953 if (gr[_REG32_ES] != 0 && !VALID_USER_DSEL32(gr[_REG32_ES]))
954 return EINVAL;
955 if (!VALID_USER_DSEL32(gr[_REG32_DS]) ||
956 !VALID_USER_DSEL32(gr[_REG32_SS]))
957 return EINVAL;
958 if (gr[_REG32_EIP] >= VM_MAXUSER_ADDRESS32)
959 return EINVAL;
960 return 0;
961}
962
963vaddr_t
964netbsd32_vm_default_addr(struct proc *p, vaddr_t base, vsize_t sz,
965 int topdown)
966{
967 if (topdown)
968 return VM_DEFAULT_ADDRESS32_TOPDOWN(base, sz);
969 else
970 return VM_DEFAULT_ADDRESS32_BOTTOMUP(base, sz);
971}
972
973#ifdef COMPAT_13
974int
975compat_13_netbsd32_sigreturn(struct lwp *l, const struct compat_13_netbsd32_sigreturn_args *uap, register_t *retval)
976{
977 /* {
978 syscallarg(struct netbsd32_sigcontext13 *) sigcntxp;
979 } */
980 struct proc *p = l->l_proc;
981 struct netbsd32_sigcontext13 *scp, context;
982 struct trapframe *tf;
983 sigset_t mask;
984 int error;
985
986 /*
987 * The trampoline code hands us the context.
988 * It is unsafe to keep track of it ourselves, in the event that a
989 * program jumps out of a signal handler.
990 */
991 scp = (struct netbsd32_sigcontext13 *)NETBSD32PTR64(SCARG(uap, sigcntxp));
992 if (copyin((void *)scp, &context, sizeof(*scp)) != 0)
993 return (EFAULT);
994
995 /* Restore register context. */
996 tf = l->l_md.md_regs;
997
998 /*
999 * Check for security violations.
1000 */
1001 error = check_sigcontext32(l, (const struct netbsd32_sigcontext *)&context);
1002 if (error != 0)
1003 return error;
1004
1005 tf->tf_gs = context.sc_gs;
1006 tf->tf_fs = context.sc_fs;
1007 tf->tf_es = context.sc_es;
1008 tf->tf_ds = context.sc_ds;
1009 tf->tf_rflags = context.sc_eflags;
1010 tf->tf_rdi = context.sc_edi;
1011 tf->tf_rsi = context.sc_esi;
1012 tf->tf_rbp = context.sc_ebp;
1013 tf->tf_rbx = context.sc_ebx;
1014 tf->tf_rdx = context.sc_edx;
1015 tf->tf_rcx = context.sc_ecx;
1016 tf->tf_rax = context.sc_eax;
1017 tf->tf_rip = context.sc_eip;
1018 tf->tf_cs = context.sc_cs;
1019 tf->tf_rsp = context.sc_esp;
1020 tf->tf_ss = context.sc_ss;
1021
1022 mutex_enter(p->p_lock);
1023 /* Restore signal stack. */
1024 if (context.sc_onstack & SS_ONSTACK)
1025 l->l_sigstk.ss_flags |= SS_ONSTACK;
1026 else
1027 l->l_sigstk.ss_flags &= ~SS_ONSTACK;
1028 /* Restore signal mask. */
1029 native_sigset13_to_sigset((sigset13_t *)&context.sc_mask, &mask);
1030 (void) sigprocmask1(l, SIG_SETMASK, &mask, 0);
1031 mutex_exit(p->p_lock);
1032
1033 return (EJUSTRETURN);
1034}
1035#endif
1036