1/* $NetBSD: mtrr_i686.c,v 1.28 2014/05/29 19:08:30 plunky Exp $ */
2
3/*-
4 * Copyright (c) 2000, 2011 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Bill Sommerfeld.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32#include <sys/cdefs.h>
33__KERNEL_RCSID(0, "$NetBSD: mtrr_i686.c,v 1.28 2014/05/29 19:08:30 plunky Exp $");
34
35#include "opt_multiprocessor.h"
36
37#include <sys/param.h>
38#include <sys/systm.h>
39
40#include <sys/atomic.h>
41#include <sys/cpu.h>
42#include <sys/kmem.h>
43#include <sys/proc.h>
44
45#include <uvm/uvm_extern.h>
46
47#include <machine/specialreg.h>
48#include <machine/cpuvar.h>
49#include <machine/cpufunc.h>
50#include <machine/mtrr.h>
51
52extern paddr_t avail_end;
53
54static void i686_mtrr_reload(int);
55static void i686_mtrr_init_cpu(struct cpu_info *);
56static void i686_mtrr_reload_cpu(struct cpu_info *);
57static void i686_mtrr_clean(struct proc *p);
58static int i686_mtrr_set(struct mtrr *, int *n, struct proc *p, int flags);
59static int i686_mtrr_get(struct mtrr *, int *n, struct proc *p, int flags);
60static void i686_mtrr_dump(const char *tag);
61
62static int i686_mtrr_validate(struct mtrr *, struct proc *p);
63static void i686_soft2raw(void);
64static void i686_raw2soft(void);
65static void i686_mtrr_commit(void);
66static int i686_mtrr_setone(struct mtrr *, struct proc *p);
67static int i686_mtrr_conflict(uint8_t, uint8_t);
68
69static struct mtrr_state
70mtrr_raw[] = {
71 { MSR_MTRRphysBase0, 0 },
72 { MSR_MTRRphysMask0, 0 },
73 { MSR_MTRRphysBase1, 0 },
74 { MSR_MTRRphysMask1, 0 },
75 { MSR_MTRRphysBase2, 0 },
76 { MSR_MTRRphysMask2, 0 },
77 { MSR_MTRRphysBase3, 0 },
78 { MSR_MTRRphysMask3, 0 },
79 { MSR_MTRRphysBase4, 0 },
80 { MSR_MTRRphysMask4, 0 },
81 { MSR_MTRRphysBase5, 0 },
82 { MSR_MTRRphysMask5, 0 },
83 { MSR_MTRRphysBase6, 0 },
84 { MSR_MTRRphysMask6, 0 },
85 { MSR_MTRRphysBase7, 0 },
86 { MSR_MTRRphysMask7, 0 },
87 { MSR_MTRRphysBase8, 0 },
88 { MSR_MTRRphysMask8, 0 },
89 { MSR_MTRRphysBase9, 0 },
90 { MSR_MTRRphysMask9, 0 },
91 { MSR_MTRRphysBase10, 0 },
92 { MSR_MTRRphysMask10, 0 },
93 { MSR_MTRRphysBase11, 0 },
94 { MSR_MTRRphysMask11, 0 },
95 { MSR_MTRRphysBase12, 0 },
96 { MSR_MTRRphysMask12, 0 },
97 { MSR_MTRRphysBase13, 0 },
98 { MSR_MTRRphysMask13, 0 },
99 { MSR_MTRRphysBase14, 0 },
100 { MSR_MTRRphysMask14, 0 },
101 { MSR_MTRRphysBase15, 0 },
102 { MSR_MTRRphysMask15, 0 },
103 { MSR_MTRRfix64K_00000, 0 },
104 { MSR_MTRRfix16K_80000, 0 },
105 { MSR_MTRRfix16K_A0000, 0 },
106 { MSR_MTRRfix4K_C0000, 0 },
107 { MSR_MTRRfix4K_C8000, 0 },
108 { MSR_MTRRfix4K_D0000, 0 },
109 { MSR_MTRRfix4K_D8000, 0 },
110 { MSR_MTRRfix4K_E0000, 0 },
111 { MSR_MTRRfix4K_E8000, 0 },
112 { MSR_MTRRfix4K_F0000, 0 },
113 { MSR_MTRRfix4K_F8000, 0 },
114 { MSR_MTRRdefType, 0 },
115
116};
117
118static const int nmtrr_raw = __arraycount(mtrr_raw);
119static int i686_mtrr_vcnt = 0;
120
121static struct mtrr_state *mtrr_var_raw;
122static struct mtrr_state *mtrr_fixed_raw;
123
124static struct mtrr *mtrr_fixed;
125static struct mtrr *mtrr_var;
126
127struct mtrr_funcs i686_mtrr_funcs = {
128 i686_mtrr_init_cpu,
129 i686_mtrr_reload_cpu,
130 i686_mtrr_clean,
131 i686_mtrr_set,
132 i686_mtrr_get,
133 i686_mtrr_commit,
134 i686_mtrr_dump
135};
136
137static kcpuset_t * mtrr_waiting;
138
139static uint64_t i686_mtrr_cap;
140
141static void
142i686_mtrr_dump(const char *tag)
143{
144 int i;
145
146 for (i = 0; i < nmtrr_raw; i++)
147 printf("%s: %x: %016llx\n",
148 tag, mtrr_raw[i].msraddr,
149 (unsigned long long)rdmsr(mtrr_raw[i].msraddr));
150}
151
152/*
153 * The Intel Archicture Software Developer's Manual volume 3 (systems
154 * programming) section 9.12.8 describes a simple 15-step process for
155 * updating the MTRR's on all processors on a multiprocessor system.
156 * If synch is nonzero, assume we're being called from an IPI handler,
157 * and synchronize with all running processors.
158 */
159
160/*
161 * 1. Broadcast to all processor to execute the following code sequence.
162 */
163
164static void
165i686_mtrr_reload(int synch)
166{
167 int i;
168 /* XXX cr0 is 64-bit on amd64 too, but the upper bits are
169 * unused and must be zero so it does not matter too
170 * much. Need to change the prototypes of l/rcr0 too if you
171 * want to correct it. */
172 uint32_t cr0;
173 vaddr_t cr3, cr4;
174 uint32_t origcr0;
175 vaddr_t origcr4;
176
177 /*
178 * 2. Disable interrupts
179 */
180 x86_disable_intr();
181
182#ifdef MULTIPROCESSOR
183 if (synch) {
184 /*
185 * 3. Wait for all processors to reach this point.
186 */
187 kcpuset_atomic_set(mtrr_waiting, cpu_index(curcpu()));
188 while (!kcpuset_match(mtrr_waiting, kcpuset_running)) {
189 DELAY(10);
190 }
191 }
192#endif
193
194 /*
195 * 4. Enter the no-fill cache mode (set the CD flag in CR0 to 1 and
196 * the NW flag to 0)
197 */
198
199 origcr0 = cr0 = rcr0();
200 cr0 |= CR0_CD;
201 cr0 &= ~CR0_NW;
202 lcr0(cr0);
203
204 /*
205 * 5. Flush all caches using the WBINVD instruction.
206 */
207
208 wbinvd();
209
210 /*
211 * 6. Clear the PGE flag in control register CR4 (if set).
212 */
213
214 origcr4 = cr4 = rcr4();
215 cr4 &= ~CR4_PGE;
216 lcr4(cr4);
217
218 /*
219 * 7. Flush all TLBs (execute a MOV from control register CR3
220 * to another register and then a move from that register back
221 * to CR3)
222 */
223
224 cr3 = rcr3();
225 lcr3(cr3);
226
227 /*
228 * 8. Disable all range registers (by clearing the E flag in
229 * register MTRRdefType. If only variable ranges are being
230 * modified, software may clear the valid bits for the
231 * affected register pairs instead.
232 */
233 /* disable MTRRs (E = 0) */
234 wrmsr(MSR_MTRRdefType, rdmsr(MSR_MTRRdefType) & ~MTRR_I686_ENABLE_MASK);
235
236 /*
237 * 9. Update the MTRR's
238 */
239
240 for (i = 0; i < nmtrr_raw; i++) {
241 uint64_t val = mtrr_raw[i].msrval;
242 uint32_t addr = mtrr_raw[i].msraddr;
243 if (addr == 0)
244 continue;
245 if (addr == MSR_MTRRdefType)
246 val &= ~MTRR_I686_ENABLE_MASK;
247 wrmsr(addr, val);
248 }
249
250 /*
251 * 10. Enable all range registers (by setting the E flag in
252 * register MTRRdefType). If only variable-range registers
253 * were modified and their individual valid bits were cleared,
254 * then set the valid bits for the affected ranges instead.
255 */
256
257 wrmsr(MSR_MTRRdefType, rdmsr(MSR_MTRRdefType) | MTRR_I686_ENABLE_MASK);
258
259 /*
260 * 11. Flush all caches and all TLB's a second time. (repeat
261 * steps 5, 7)
262 */
263
264 wbinvd();
265 lcr3(cr3);
266
267 /*
268 * 12. Enter the normal cache mode to reenable caching (set the CD and
269 * NW flags in CR0 to 0)
270 */
271
272 lcr0(origcr0);
273
274 /*
275 * 13. Set the PGE flag in control register CR4, if previously
276 * cleared.
277 */
278
279 lcr4(origcr4);
280
281#ifdef MULTIPROCESSOR
282 if (synch) {
283 /*
284 * 14. Wait for all processors to reach this point.
285 */
286 kcpuset_atomic_clear(mtrr_waiting, cpu_index(curcpu()));
287 while (!kcpuset_iszero(mtrr_waiting)) {
288 DELAY(10);
289 }
290 }
291#endif
292
293 /*
294 * 15. Enable interrupts.
295 */
296 x86_enable_intr();
297}
298
299static void
300i686_mtrr_reload_cpu(struct cpu_info *ci)
301{
302 i686_mtrr_reload(1);
303}
304
305void
306i686_mtrr_init_first(void)
307{
308 int i;
309
310 i686_mtrr_cap = rdmsr(MSR_MTRRcap);
311 i686_mtrr_vcnt = i686_mtrr_cap & MTRR_I686_CAP_VCNT_MASK;
312
313 if (i686_mtrr_vcnt > MTRR_I686_NVAR_MAX)
314 printf("%s: FIXME: more than %d MTRRs (%d)\n", __FILE__,
315 MTRR_I686_NVAR_MAX, i686_mtrr_vcnt);
316 else if (i686_mtrr_vcnt < MTRR_I686_NVAR_MAX) {
317 for (i = MTRR_I686_NVAR_MAX - i686_mtrr_vcnt; i; i--) {
318 mtrr_raw[(MTRR_I686_NVAR_MAX - i) * 2].msraddr = 0;
319 mtrr_raw[(MTRR_I686_NVAR_MAX - i) * 2 + 1].msraddr = 0;
320 }
321 }
322
323 for (i = 0; i < nmtrr_raw; i++) {
324 if (mtrr_raw[i].msraddr)
325 mtrr_raw[i].msrval = rdmsr(mtrr_raw[i].msraddr);
326 else
327 mtrr_raw[i].msrval = 0;
328 }
329#if 0
330 mtrr_dump("init mtrr");
331#endif
332
333 kcpuset_create(&mtrr_waiting, true);
334
335 mtrr_fixed =
336 kmem_zalloc(MTRR_I686_NFIXED_SOFT * sizeof(struct mtrr), KM_SLEEP);
337 KASSERT(mtrr_fixed != NULL);
338
339 if (i686_mtrr_vcnt) {
340 mtrr_var =
341 kmem_zalloc(i686_mtrr_vcnt * sizeof(struct mtrr), KM_SLEEP);
342 KASSERT(mtrr_var != NULL);
343 }
344
345 mtrr_var_raw = &mtrr_raw[0];
346 mtrr_fixed_raw = &mtrr_raw[MTRR_I686_NVAR_MAX * 2];
347 mtrr_funcs = &i686_mtrr_funcs;
348
349 i686_raw2soft();
350}
351
352static void
353i686_raw2soft(void)
354{
355 int i, j, idx;
356 struct mtrr *mtrrp;
357 uint64_t base, mask;
358
359 for (i = 0; i < i686_mtrr_vcnt; i++) {
360 mtrrp = &mtrr_var[i];
361 memset(mtrrp, 0, sizeof *mtrrp);
362 mask = mtrr_var_raw[i * 2 + 1].msrval;
363 if (!mtrr_valid(mask))
364 continue;
365 base = mtrr_var_raw[i * 2].msrval;
366 mtrrp->base = mtrr_base(base);
367 mtrrp->type = mtrr_type(base);
368 mtrrp->len = mtrr_len(mask);
369 mtrrp->flags |= MTRR_VALID;
370 }
371
372 idx = 0;
373 base = 0;
374 for (i = 0; i < MTRR_I686_NFIXED_64K; i++, idx++) {
375 mask = mtrr_fixed_raw[idx].msrval;
376 for (j = 0; j < 8; j++) {
377 mtrrp = &mtrr_fixed[idx * 8 + j];
378 mtrrp->owner = 0;
379 mtrrp->flags = MTRR_FIXED | MTRR_VALID;
380 mtrrp->base = base;
381 mtrrp->len = 65536;
382 mtrrp->type = mask & 0xff;
383 mask >>= 8;
384 base += 65536;
385 }
386 }
387
388 for (i = 0; i < MTRR_I686_NFIXED_16K; i++, idx++) {
389 mask = mtrr_fixed_raw[idx].msrval;
390 for (j = 0; j < 8; j++) {
391 mtrrp = &mtrr_fixed[idx * 8 + j];
392 mtrrp->owner = 0;
393 mtrrp->flags = MTRR_FIXED | MTRR_VALID;
394 mtrrp->base = base;
395 mtrrp->len = 16384;
396 mtrrp->type = mask & 0xff;
397 mask >>= 8;
398 base += 16384;
399 }
400 }
401
402 for (i = 0; i < MTRR_I686_NFIXED_4K; i++, idx++) {
403 mask = mtrr_fixed_raw[idx].msrval;
404 for (j = 0; j < 8; j++) {
405 mtrrp = &mtrr_fixed[idx * 8 + j];
406 mtrrp->owner = 0;
407 mtrrp->flags = MTRR_FIXED | MTRR_VALID;
408 mtrrp->base = base;
409 mtrrp->len = 4096;
410 mtrrp->type = mask & 0xff;
411 mask >>= 8;
412 base += 4096;
413 }
414 }
415}
416
417static void
418i686_soft2raw(void)
419{
420 int i, idx, j;
421 uint64_t val;
422 struct mtrr *mtrrp;
423
424 for (i = 0; i < i686_mtrr_vcnt; i++) {
425 mtrrp = &mtrr_var[i];
426 mtrr_var_raw[i * 2].msrval = mtrr_base_value(mtrrp);
427 mtrr_var_raw[i * 2 + 1].msrval = mtrr_mask_value(mtrrp);
428 if (mtrrp->flags & MTRR_VALID)
429 mtrr_var_raw[i * 2 + 1].msrval |= MTRR_I686_MASK_VALID;
430 }
431
432 idx = 0;
433 for (i = 0; i < MTRR_I686_NFIXED_64K; i++, idx++) {
434 val = 0;
435 for (j = 0; j < 8; j++) {
436 mtrrp = &mtrr_fixed[idx * 8 + j];
437 val |= ((uint64_t)mtrrp->type << (j << 3));
438 }
439 mtrr_fixed_raw[idx].msrval = val;
440 }
441
442 for (i = 0; i < MTRR_I686_NFIXED_16K; i++, idx++) {
443 val = 0;
444 for (j = 0; j < 8; j++) {
445 mtrrp = &mtrr_fixed[idx * 8 + j];
446 val |= ((uint64_t)mtrrp->type << (j << 3));
447 }
448 mtrr_fixed_raw[idx].msrval = val;
449 }
450
451 for (i = 0; i < MTRR_I686_NFIXED_4K; i++, idx++) {
452 val = 0;
453 for (j = 0; j < 8; j++) {
454 mtrrp = &mtrr_fixed[idx * 8 + j];
455 val |= ((uint64_t)mtrrp->type << (j << 3));
456 }
457 mtrr_fixed_raw[idx].msrval = val;
458 }
459}
460
461static void
462i686_mtrr_init_cpu(struct cpu_info *ci)
463{
464 i686_mtrr_reload(0);
465#if 0
466 mtrr_dump(device_xname(ci->ci_dev));
467#endif
468}
469
470static int
471i686_mtrr_validate(struct mtrr *mtrrp, struct proc *p)
472{
473 uint64_t high;
474
475 /*
476 * Must be at least page-aligned.
477 */
478 if (mtrrp->base & 0xfff || mtrrp->len & 0xfff || mtrrp->len == 0)
479 return EINVAL;
480
481 /*
482 * Private mappings are bound to a process.
483 */
484 if (p == NULL && (mtrrp->flags & MTRR_PRIVATE))
485 return EINVAL;
486
487 high = mtrrp->base + mtrrp->len;
488
489 /*
490 * Check for bad types.
491 */
492 if ((mtrrp->type == MTRR_TYPE_UNDEF1 || mtrrp->type == MTRR_TYPE_UNDEF2
493 || mtrrp->type > MTRR_TYPE_WB) && (mtrrp->flags & MTRR_VALID))
494 return EINVAL;
495
496 /*
497 * If write-combining is requested, make sure that the WC feature
498 * is supported by the processor.
499 */
500 if (mtrrp->type == MTRR_TYPE_WC &&
501 !(i686_mtrr_cap & MTRR_I686_CAP_WC_MASK))
502 return ENODEV;
503
504 /*
505 * Only use fixed ranges < 1M.
506 */
507 if ((mtrrp->flags & MTRR_FIXED) && high > 0x100000)
508 return EINVAL;
509
510 /*
511 * Check for the right alignment and size for fixed ranges.
512 * The requested range may span several actual MTRRs, but
513 * it must be properly aligned.
514 */
515 if (mtrrp->flags & MTRR_FIXED) {
516 if (mtrrp->base < MTRR_I686_16K_START) {
517 if ((mtrrp->base & 0xffff) != 0)
518 return EINVAL;
519 } else if (mtrrp->base < MTRR_I686_4K_START) {
520 if ((mtrrp->base & 0x3fff) != 0)
521 return EINVAL;
522 } else {
523 if ((mtrrp->base & 0xfff) != 0)
524 return EINVAL;
525 }
526
527 if (high < MTRR_I686_16K_START) {
528 if ((high & 0xffff) != 0)
529 return EINVAL;
530 } else if (high < MTRR_I686_4K_START) {
531 if ((high & 0x3fff) != 0)
532 return EINVAL;
533 } else {
534 if ((high & 0xfff) != 0)
535 return EINVAL;
536 }
537 }
538
539 return 0;
540}
541
542/*
543 * Try to find a non-conflicting match on physical MTRRs for the
544 * requested range. For fixed ranges, more than one actual MTRR
545 * may be used.
546 */
547static int
548i686_mtrr_setone(struct mtrr *mtrrp, struct proc *p)
549{
550 int i, error;
551 struct mtrr *lowp, *highp, *mp, *freep;
552 uint64_t low, high, curlow, curhigh;
553
554 /*
555 * If explicitly requested, or if the range lies below 1M,
556 * try the fixed range MTRRs.
557 */
558 if (mtrrp->flags & MTRR_FIXED ||
559 (mtrrp->base + mtrrp->len) <= 0x100000) {
560 lowp = highp = NULL;
561 for (i = 0; i < MTRR_I686_NFIXED_SOFT; i++) {
562 if (mtrr_fixed[i].base == mtrrp->base + mtrrp->len) {
563 highp = &mtrr_fixed[i];
564 break;
565 }
566 if (mtrr_fixed[i].base == mtrrp->base) {
567 lowp = &mtrr_fixed[i];
568 /*
569 * If the requested upper bound is the 1M
570 * limit, search no further.
571 */
572 if ((mtrrp->base + mtrrp->len) == 0x100000) {
573 highp =
574 &mtrr_fixed[MTRR_I686_NFIXED_SOFT];
575 break;
576 } else {
577 highp = &mtrr_fixed[i + 1];
578 continue;
579 }
580 }
581 }
582 if (lowp == NULL || highp == NULL)
583 panic("mtrr: fixed register screwup");
584 error = 0;
585 for (mp = lowp; mp < highp; mp++) {
586 if ((mp->flags & MTRR_PRIVATE) && p != NULL
587 && p->p_pid != mp->owner) {
588 error = EBUSY;
589 break;
590 }
591 }
592 if (error != 0) {
593 if (mtrrp->flags & MTRR_FIXED)
594 return error;
595 } else {
596 for (mp = lowp; mp < highp; mp++) {
597 /*
598 * Can't invalidate fixed ranges, so
599 * just reset the 'private' flag,
600 * making the range available for
601 * changing again.
602 */
603 if (!(mtrrp->flags & MTRR_VALID)) {
604 mp->flags &= ~MTRR_PRIVATE;
605 continue;
606 }
607 mp->type = mtrrp->type;
608 if (mtrrp->flags & MTRR_PRIVATE) {
609 /*
610 * Private mappings are bound to a
611 * process. This has been checked in
612 * i686_mtrr_validate()
613 */
614 mp->flags |= MTRR_PRIVATE;
615 mp->owner = p->p_pid;
616 }
617 }
618 return 0;
619 }
620 }
621
622 /*
623 * Try one of the variable range registers.
624 * XXX could be more sophisticated here by merging ranges.
625 */
626 low = mtrrp->base;
627 high = low + mtrrp->len - 1;
628 freep = NULL;
629 for (i = 0; i < i686_mtrr_vcnt; i++) {
630 if (!(mtrr_var[i].flags & MTRR_VALID)) {
631 freep = &mtrr_var[i];
632 continue;
633 }
634 curlow = mtrr_var[i].base;
635 curhigh = curlow + mtrr_var[i].len - 1;
636 if (low == curlow && high == curhigh &&
637 (!(mtrr_var[i].flags & MTRR_PRIVATE) ||
638 ((mtrrp->flags & MTRR_PRIVATE) && (p != NULL) &&
639 (mtrr_var[i].owner == p->p_pid)))) {
640 freep = &mtrr_var[i];
641 break;
642 }
643 if (((high >= curlow && high < curhigh) ||
644 (low >= curlow && low < curhigh)) &&
645 (i686_mtrr_conflict(mtrr_var[i].type, mtrrp->type) ||
646 ((mtrr_var[i].flags & MTRR_PRIVATE) &&
647 (!(mtrrp->flags & MTRR_PRIVATE) || (p == NULL) ||
648 (mtrr_var[i].owner != p->p_pid))))) {
649 return EBUSY;
650 }
651 }
652 if (freep == NULL)
653 return EBUSY;
654 mtrrp->flags &= ~MTRR_CANTSET;
655 *freep = *mtrrp;
656 freep->owner = (mtrrp->flags & MTRR_PRIVATE) ? p->p_pid : 0;
657
658 return 0;
659}
660
661static int
662i686_mtrr_conflict(uint8_t type1, uint8_t type2)
663{
664 if (type1 == MTRR_TYPE_UC || type2 == MTRR_TYPE_UC)
665 return 0;
666 if ((type1 == MTRR_TYPE_WT && type2 == MTRR_TYPE_WB) ||
667 (type1 == MTRR_TYPE_WB && type2 == MTRR_TYPE_WT))
668 return 0;
669 return 1;
670}
671
672static void
673i686_mtrr_clean(struct proc *p)
674{
675 int i;
676
677 for (i = 0; i < MTRR_I686_NFIXED_SOFT; i++) {
678 if ((mtrr_fixed[i].flags & MTRR_PRIVATE) &&
679 (mtrr_fixed[i].owner == p->p_pid))
680 mtrr_fixed[i].flags &= ~MTRR_PRIVATE;
681 }
682
683 for (i = 0; i < i686_mtrr_vcnt; i++) {
684 if ((mtrr_var[i].flags & MTRR_PRIVATE) &&
685 (mtrr_var[i].owner == p->p_pid))
686 mtrr_var[i].flags &= ~(MTRR_PRIVATE | MTRR_VALID);
687 }
688
689 i686_mtrr_commit();
690}
691
692static int
693i686_mtrr_set(struct mtrr *mtrrp, int *n, struct proc *p, int flags)
694{
695 int i, error;
696 struct mtrr mtrr;
697
698 if (*n > (MTRR_I686_NFIXED_SOFT + MTRR_I686_NVAR_MAX)) {
699 *n = 0;
700 return EINVAL;
701 }
702
703 error = 0;
704 for (i = 0; i < *n; i++) {
705 if (flags & MTRR_GETSET_USER) {
706 error = copyin(&mtrrp[i], &mtrr, sizeof mtrr);
707 if (error != 0)
708 break;
709 } else
710 mtrr = mtrrp[i];
711 error = i686_mtrr_validate(&mtrr, p);
712 if (error != 0)
713 break;
714 error = i686_mtrr_setone(&mtrr, p);
715 if (error != 0)
716 break;
717 if (mtrr.flags & MTRR_PRIVATE)
718 p->p_md.md_flags |= MDP_USEDMTRR;
719 }
720 *n = i;
721 return error;
722}
723
724static int
725i686_mtrr_get(struct mtrr *mtrrp, int *n, struct proc *p, int flags)
726{
727 int idx, i, error;
728
729 if (mtrrp == NULL) {
730 *n = MTRR_I686_NFIXED_SOFT + MTRR_I686_NVAR_MAX;
731 return 0;
732 }
733
734 error = 0;
735
736 for (idx = i = 0; i < MTRR_I686_NFIXED_SOFT && idx < *n; idx++, i++) {
737 if (flags & MTRR_GETSET_USER) {
738 error = copyout(&mtrr_fixed[i], &mtrrp[idx],
739 sizeof *mtrrp);
740 if (error != 0)
741 break;
742 } else
743 memcpy(&mtrrp[idx], &mtrr_fixed[i], sizeof *mtrrp);
744 }
745 if (error != 0) {
746 *n = idx;
747 return error;
748 }
749
750 for (i = 0; i < i686_mtrr_vcnt && idx < *n; idx++, i++) {
751 if (flags & MTRR_GETSET_USER) {
752 error = copyout(&mtrr_var[i], &mtrrp[idx],
753 sizeof *mtrrp);
754 if (error != 0)
755 break;
756 } else
757 memcpy(&mtrrp[idx], &mtrr_var[i], sizeof *mtrrp);
758 }
759 *n = idx;
760 return error;
761}
762
763static void
764i686_mtrr_commit(void)
765{
766
767 i686_soft2raw();
768 kpreempt_disable();
769#ifdef MULTIPROCESSOR
770 x86_broadcast_ipi(X86_IPI_MTRR);
771#endif
772 i686_mtrr_reload(1);
773 kpreempt_enable();
774}
775