1/* $NetBSD: msipic.c,v 1.8 2015/11/17 17:51:42 msaitoh Exp $ */
2
3/*
4 * Copyright (c) 2015 Internet Initiative Japan Inc.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
17 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
18 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
19 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
20 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 * POSSIBILITY OF SUCH DAMAGE.
27 */
28
29#include <sys/cdefs.h>
30__KERNEL_RCSID(0, "$NetBSD: msipic.c,v 1.8 2015/11/17 17:51:42 msaitoh Exp $");
31
32#include "opt_intrdebug.h"
33
34#include <sys/types.h>
35#include <sys/param.h>
36#include <sys/systm.h>
37#include <sys/errno.h>
38#include <sys/kmem.h>
39#include <sys/malloc.h>
40#include <sys/mutex.h>
41
42#include <dev/pci/pcivar.h>
43
44#include <machine/i82489reg.h>
45#include <machine/i82093reg.h>
46#include <machine/i82093var.h>
47#include <machine/pic.h>
48#include <machine/lock.h>
49
50#include <x86/pci/msipic.h>
51
52#ifdef INTRDEBUG
53#define MSIPICDEBUG
54#endif
55
56#ifdef MSIPICDEBUG
57#define DPRINTF(msg) printf msg
58#else
59#define DPRINTF(msg)
60#endif
61
62#define BUS_SPACE_WRITE_FLUSH(pc, tag) (void)bus_space_read_4(pc, tag, 0)
63
64#define MSIPICNAMEBUF 16
65
66/*
67 * A Pseudo pic for single MSI/MSI-X device.
68 * The pic and MSI/MSI-X device are distinbuished by "devid". The "devid"
69 * is managed by below "dev_seqs".
70 */
71struct msipic {
72 int mp_bus;
73 int mp_dev;
74 int mp_fun;
75
76 int mp_devid; /* The device id for the MSI/MSI-X device. */
77 int mp_veccnt; /* The number of MSI/MSI-X vectors. */
78
79 char mp_pic_name[MSIPICNAMEBUF]; /* The MSI/MSI-X device's name. */
80
81 struct pci_attach_args mp_pa;
82 bus_space_tag_t mp_bstag;
83 bus_space_handle_t mp_bshandle;
84 bus_size_t mp_bssize;
85 struct pic *mp_pic;
86
87 LIST_ENTRY(msipic) mp_list;
88};
89
90static kmutex_t msipic_list_lock;
91
92static LIST_HEAD(, msipic) msipic_list =
93 LIST_HEAD_INITIALIZER(msipic_list);
94
95/*
96 * This struct managements "devid" to use the same "devid" for the device
97 * re-attached. If the device's bus number and device numer and function
98 * number are equal, it is assumed re-attached.
99 */
100struct dev_last_used_seq {
101 bool ds_using;
102 int ds_bus;
103 int ds_dev;
104 int ds_fun;
105};
106/* The number of MSI/MSI-X devices supported by system. */
107#define NUM_MSI_DEVS 256
108/* Record devids to use the same devid when the device is re-attached. */
109static struct dev_last_used_seq dev_seqs[NUM_MSI_DEVS];
110
111static int msipic_allocate_common_msi_devid(const struct pci_attach_args *);
112static void msipic_release_common_msi_devid(int);
113
114static struct pic *msipic_find_msi_pic_locked(int);
115static struct pic *msipic_construct_common_msi_pic(const struct pci_attach_args *,
116 struct pic *);
117static void msipic_destruct_common_msi_pic(struct pic *);
118
119static void msi_set_msictl_enablebit(struct pic *, int, int);
120static void msi_hwmask(struct pic *, int);
121static void msi_hwunmask(struct pic *, int);
122static void msi_addroute(struct pic *, struct cpu_info *, int, int, int);
123static void msi_delroute(struct pic *, struct cpu_info *, int, int, int);
124
125static void msix_set_vecctl_mask(struct pic *, int, int);
126static void msix_hwmask(struct pic *, int);
127static void msix_hwunmask(struct pic *, int);
128static void msix_addroute(struct pic *, struct cpu_info *, int, int, int);
129static void msix_delroute(struct pic *, struct cpu_info *, int, int, int);
130
131/*
132 * Return new "devid" for the device attached first.
133 * Return the same "devid" for the device re-attached after dettached once.
134 * Return -1 if the number of attached MSI/MSI-X devices is over NUM_MSI_DEVS.
135 */
136static int
137msipic_allocate_common_msi_devid(const struct pci_attach_args *pa)
138{
139 pci_chipset_tag_t pc;
140 pcitag_t tag;
141 int bus, dev, fun, i;
142
143 KASSERT(mutex_owned(&msipic_list_lock));
144
145 pc = pa->pa_pc;
146 tag = pa->pa_tag;
147 pci_decompose_tag(pc, tag, &bus, &dev, &fun);
148
149 /* if the device was once attached, use same devid */
150 for (i = 0; i < NUM_MSI_DEVS; i++) {
151 /* skip host bridge */
152 if (dev_seqs[i].ds_bus == 0
153 && dev_seqs[i].ds_dev == 0
154 && dev_seqs[i].ds_fun == 0)
155 break;
156
157 if (dev_seqs[i].ds_bus == bus
158 && dev_seqs[i].ds_dev == dev
159 && dev_seqs[i].ds_fun == fun) {
160 dev_seqs[i].ds_using = true;
161 return i;
162 }
163 }
164
165 for (i = 0; i < NUM_MSI_DEVS; i++) {
166 if (dev_seqs[i].ds_using == 0) {
167 dev_seqs[i].ds_using = true;
168 dev_seqs[i].ds_bus = bus;
169 dev_seqs[i].ds_dev = dev;
170 dev_seqs[i].ds_fun = fun;
171 return i;
172 }
173 }
174
175 DPRINTF(("too many MSI devices.\n"));
176 return -1;
177}
178
179/*
180 * Set the "devid" unused, but keep reserving the "devid" to reuse when
181 * the device is re-attached.
182 */
183static void
184msipic_release_common_msi_devid(int devid)
185{
186
187 KASSERT(mutex_owned(&msipic_list_lock));
188
189 if (devid < 0 || NUM_MSI_DEVS <= devid) {
190 DPRINTF(("%s: invalid devid.\n", __func__));
191 return;
192 }
193
194 dev_seqs[devid].ds_using = false;
195 /* Keep ds_* to reuse the same devid for the same device. */
196}
197
198static struct pic *
199msipic_find_msi_pic_locked(int devid)
200{
201 struct msipic *mpp;
202
203 KASSERT(mutex_owned(&msipic_list_lock));
204
205 LIST_FOREACH(mpp, &msipic_list, mp_list) {
206 if(mpp->mp_devid == devid)
207 return mpp->mp_pic;
208 }
209 return NULL;
210}
211
212/*
213 * Return the msi_pic whose device is already registered.
214 * If the device is not registered yet, return NULL.
215 */
216struct pic *
217msipic_find_msi_pic(int devid)
218{
219 struct pic *msipic;
220
221 mutex_enter(&msipic_list_lock);
222 msipic = msipic_find_msi_pic_locked(devid);
223 mutex_exit(&msipic_list_lock);
224
225 return msipic;
226}
227
228/*
229 * A common construct process of MSI and MSI-X.
230 */
231static struct pic *
232msipic_construct_common_msi_pic(const struct pci_attach_args *pa,
233 struct pic *pic_tmpl)
234{
235 struct pic *pic;
236 struct msipic *msipic;
237 int devid;
238
239 pic = kmem_alloc(sizeof(*pic), KM_SLEEP);
240 if (pic == NULL)
241 return NULL;
242
243 msipic = kmem_zalloc(sizeof(*msipic), KM_SLEEP);
244 if (msipic == NULL) {
245 kmem_free(pic, sizeof(*pic));
246 return NULL;
247 }
248
249 mutex_enter(&msipic_list_lock);
250
251 devid = msipic_allocate_common_msi_devid(pa);
252 if (devid == -1) {
253 mutex_exit(&msipic_list_lock);
254 kmem_free(pic, sizeof(*pic));
255 kmem_free(msipic, sizeof(*msipic));
256 return NULL;
257 }
258
259 memcpy(pic, pic_tmpl, sizeof(*pic));
260 pic->pic_msipic = msipic;
261 msipic->mp_pic = pic;
262 pci_decompose_tag(pa->pa_pc, pa->pa_tag,
263 &msipic->mp_bus, &msipic->mp_dev, &msipic->mp_fun);
264 memcpy(&msipic->mp_pa, pa, sizeof(msipic->mp_pa));
265 msipic->mp_devid = devid;
266 /*
267 * pci_msi{,x}_alloc() must be called only once in the device driver.
268 */
269 KASSERT(msipic_find_msi_pic_locked(msipic->mp_devid) == NULL);
270
271 LIST_INSERT_HEAD(&msipic_list, msipic, mp_list);
272
273 mutex_exit(&msipic_list_lock);
274
275 return pic;
276}
277
278static void
279msipic_destruct_common_msi_pic(struct pic *msi_pic)
280{
281 struct msipic *msipic;
282
283 if (msi_pic == NULL)
284 return;
285
286 msipic = msi_pic->pic_msipic;
287 mutex_enter(&msipic_list_lock);
288 LIST_REMOVE(msipic, mp_list);
289 msipic_release_common_msi_devid(msipic->mp_devid);
290 mutex_exit(&msipic_list_lock);
291
292 kmem_free(msipic, sizeof(*msipic));
293 kmem_free(msi_pic, sizeof(*msi_pic));
294}
295
296/*
297 * The pic is MSI/MSI-X pic or not.
298 */
299bool
300msipic_is_msi_pic(struct pic *pic)
301{
302
303 return (pic->pic_msipic != NULL);
304}
305
306/*
307 * Return the MSI/MSI-X devid which is unique for each devices.
308 */
309int
310msipic_get_devid(struct pic *pic)
311{
312
313 KASSERT(msipic_is_msi_pic(pic));
314
315 return pic->pic_msipic->mp_devid;
316}
317
318#define MSI_MSICTL_ENABLE 1
319#define MSI_MSICTL_DISABLE 0
320static void
321msi_set_msictl_enablebit(struct pic *pic, int msi_vec, int flag)
322{
323 pci_chipset_tag_t pc;
324 struct pci_attach_args *pa;
325 pcitag_t tag;
326 pcireg_t ctl;
327 int off, err __diagused;
328
329 pc = NULL;
330 pa = &pic->pic_msipic->mp_pa;
331 tag = pa->pa_tag;
332 err = pci_get_capability(pc, tag, PCI_CAP_MSI, &off, NULL);
333 KASSERT(err != 0);
334
335 /*
336 * MSI can establish only one vector at once.
337 * So, use whole device mask bit instead of a vector mask bit.
338 */
339 ctl = pci_conf_read(pc, tag, off + PCI_MSI_CTL);
340 if (flag == MSI_MSICTL_ENABLE)
341 ctl |= PCI_MSI_CTL_MSI_ENABLE;
342 else
343 ctl &= ~PCI_MSI_CTL_MSI_ENABLE;
344
345 pci_conf_write(pc, tag, off, ctl);
346}
347
348static void
349msi_hwmask(struct pic *pic, int msi_vec)
350{
351
352 msi_set_msictl_enablebit(pic, msi_vec, MSI_MSICTL_DISABLE);
353}
354
355/*
356 * Do not use pic->hwunmask() immediately after pic->delroute().
357 * It is required to use pic->addroute() before pic->hwunmask().
358 */
359static void
360msi_hwunmask(struct pic *pic, int msi_vec)
361{
362
363 msi_set_msictl_enablebit(pic, msi_vec, MSI_MSICTL_ENABLE);
364}
365
366static void
367msi_addroute(struct pic *pic, struct cpu_info *ci,
368 int unused, int idt_vec, int type)
369{
370 pci_chipset_tag_t pc;
371 struct pci_attach_args *pa;
372 pcitag_t tag;
373 pcireg_t addr, data, ctl;
374 int off, err __diagused;
375
376 pc = NULL;
377 pa = &pic->pic_msipic->mp_pa;
378 tag = pa->pa_tag;
379 err = pci_get_capability(pc, tag, PCI_CAP_MSI, &off, NULL);
380 KASSERT(err != 0);
381
382 /*
383 * See Intel 64 and IA-32 Architectures Software Developer's Manual
384 * Volume 3 10.11 Message Signalled Interrupts.
385 */
386 /*
387 * "cpuid" for MSI address is local APIC ID. In NetBSD, the ID is
388 * the same as ci->ci_cpuid.
389 */
390 addr = LAPIC_MSIADDR_BASE | __SHIFTIN(ci->ci_cpuid,
391 LAPIC_MSIADDR_DSTID_MASK);
392 /* If trigger mode is edge, it don't care level for trigger mode. */
393 data = __SHIFTIN(idt_vec, LAPIC_MSIDATA_VECTOR_MASK)
394 | LAPIC_MSIDATA_TRGMODE_EDGE | LAPIC_MSIDATA_DM_FIXED;
395
396 ctl = pci_conf_read(pc, tag, off + PCI_MSI_CTL);
397 if (ctl & PCI_MSI_CTL_64BIT_ADDR) {
398 pci_conf_write(pc, tag, off + PCI_MSI_MADDR64_LO, addr);
399 pci_conf_write(pc, tag, off + PCI_MSI_MADDR64_HI, 0);
400 pci_conf_write(pc, tag, off + PCI_MSI_MDATA64, data);
401 } else {
402 pci_conf_write(pc, tag, off + PCI_MSI_MADDR, addr);
403 pci_conf_write(pc, tag, off + PCI_MSI_MDATA, data);
404 }
405 ctl |= PCI_MSI_CTL_MSI_ENABLE;
406 pci_conf_write(pc, tag, off + PCI_MSI_CTL, ctl);
407}
408
409/*
410 * Do not use pic->hwunmask() immediately after pic->delroute().
411 * It is required to use pic->addroute() before pic->hwunmask().
412 */
413static void
414msi_delroute(struct pic *pic, struct cpu_info *ci,
415 int msi_vec, int idt_vec, int type)
416{
417
418 msi_hwmask(pic, msi_vec);
419}
420
421/*
422 * Template for MSI pic.
423 * .pic_msipic is set later in construct_msi_pic().
424 */
425static struct pic msi_pic_tmpl = {
426 .pic_type = PIC_MSI,
427 .pic_vecbase = 0,
428 .pic_apicid = 0,
429 .pic_lock = __SIMPLELOCK_UNLOCKED, /* not used for msi_pic */
430 .pic_hwmask = msi_hwmask,
431 .pic_hwunmask = msi_hwunmask,
432 .pic_addroute = msi_addroute,
433 .pic_delroute = msi_delroute,
434 .pic_edge_stubs = ioapic_edge_stubs,
435 .pic_ioapic = NULL,
436};
437
438/*
439 * Create pseudo pic for a MSI device.
440 */
441struct pic *
442msipic_construct_msi_pic(const struct pci_attach_args *pa)
443{
444 struct pic *msi_pic;
445 char pic_name_buf[MSIPICNAMEBUF];
446
447 msi_pic = msipic_construct_common_msi_pic(pa, &msi_pic_tmpl);
448 if (msi_pic == NULL) {
449 DPRINTF(("cannot allocate MSI pic.\n"));
450 return NULL;
451 }
452
453 memset(pic_name_buf, 0, MSIPICNAMEBUF);
454 snprintf(pic_name_buf, MSIPICNAMEBUF, "msi%d",
455 msi_pic->pic_msipic->mp_devid);
456 strncpy(msi_pic->pic_msipic->mp_pic_name, pic_name_buf,
457 MSIPICNAMEBUF - 1);
458 msi_pic->pic_name = msi_pic->pic_msipic->mp_pic_name;
459
460 return msi_pic;
461}
462
463/*
464 * Delete pseudo pic for a MSI device.
465 */
466void
467msipic_destruct_msi_pic(struct pic *msi_pic)
468{
469
470 msipic_destruct_common_msi_pic(msi_pic);
471}
472
473#define MSIX_VECCTL_HWMASK 1
474#define MSIX_VECCTL_HWUNMASK 0
475static void
476msix_set_vecctl_mask(struct pic *pic, int msix_vec, int flag)
477{
478 bus_space_tag_t bstag;
479 bus_space_handle_t bshandle;
480 uint64_t entry_base;
481 uint32_t vecctl;
482
483 if (msix_vec < 0) {
484 DPRINTF(("%s: invalid MSI-X table index, devid=%d vecid=%d",
485 __func__, msipic_get_devid(pic), msix_vec));
486 return;
487 }
488
489 entry_base = PCI_MSIX_TABLE_ENTRY_SIZE * msix_vec;
490
491 bstag = pic->pic_msipic->mp_bstag;
492 bshandle = pic->pic_msipic->mp_bshandle;
493 vecctl = bus_space_read_4(bstag, bshandle,
494 entry_base + PCI_MSIX_TABLE_ENTRY_VECTCTL);
495 if (flag == MSIX_VECCTL_HWMASK)
496 vecctl |= PCI_MSIX_VECTCTL_MASK;
497 else
498 vecctl &= ~PCI_MSIX_VECTCTL_MASK;
499
500 bus_space_write_4(bstag, bshandle,
501 entry_base + PCI_MSIX_TABLE_ENTRY_VECTCTL, vecctl);
502 BUS_SPACE_WRITE_FLUSH(bstag, bshandle);
503}
504
505static void
506msix_hwmask(struct pic *pic, int msix_vec)
507{
508
509 msix_set_vecctl_mask(pic, msix_vec, MSIX_VECCTL_HWMASK);
510}
511
512/*
513 * Do not use pic->hwunmask() immediately after pic->delroute().
514 * It is required to use pic->addroute() before pic->hwunmask().
515 */
516static void
517msix_hwunmask(struct pic *pic, int msix_vec)
518{
519
520 msix_set_vecctl_mask(pic, msix_vec, MSIX_VECCTL_HWUNMASK);
521}
522
523static void
524msix_addroute(struct pic *pic, struct cpu_info *ci,
525 int msix_vec, int idt_vec, int type)
526{
527 pci_chipset_tag_t pc;
528 struct pci_attach_args *pa;
529 pcitag_t tag;
530 bus_space_tag_t bstag;
531 bus_space_handle_t bshandle;
532 uint64_t entry_base;
533 pcireg_t addr, data, ctl;
534 int off, err __diagused;
535
536 if (msix_vec < 0) {
537 DPRINTF(("%s: invalid MSI-X table index, devid=%d vecid=%d",
538 __func__, msipic_get_devid(pic), msix_vec));
539 return;
540 }
541
542 pa = &pic->pic_msipic->mp_pa;
543 pc = pa->pa_pc;
544 tag = pa->pa_tag;
545 err = pci_get_capability(pc, tag, PCI_CAP_MSIX, &off, NULL);
546 KASSERT(err != 0);
547
548 entry_base = PCI_MSIX_TABLE_ENTRY_SIZE * msix_vec;
549
550 /*
551 * See Intel 64 and IA-32 Architectures Software Developer's Manual
552 * Volume 3 10.11 Message Signalled Interrupts.
553 */
554 /*
555 * "cpuid" for MSI-X address is local APIC ID. In NetBSD, the ID is
556 * the same as ci->ci_cpuid.
557 */
558 addr = LAPIC_MSIADDR_BASE | __SHIFTIN(ci->ci_cpuid,
559 LAPIC_MSIADDR_DSTID_MASK);
560 /* If trigger mode is edge, it don't care level for trigger mode. */
561 data = __SHIFTIN(idt_vec, LAPIC_MSIDATA_VECTOR_MASK)
562 | LAPIC_MSIDATA_TRGMODE_EDGE | LAPIC_MSIDATA_DM_FIXED;
563
564 bstag = pic->pic_msipic->mp_bstag;
565 bshandle = pic->pic_msipic->mp_bshandle;
566 bus_space_write_4(bstag, bshandle,
567 entry_base + PCI_MSIX_TABLE_ENTRY_ADDR_LO, addr);
568 bus_space_write_4(bstag, bshandle,
569 entry_base + PCI_MSIX_TABLE_ENTRY_ADDR_HI, 0);
570 bus_space_write_4(bstag, bshandle,
571 entry_base + PCI_MSIX_TABLE_ENTRY_DATA, data);
572 bus_space_write_4(bstag, bshandle,
573 entry_base + PCI_MSIX_TABLE_ENTRY_VECTCTL, 0);
574 BUS_SPACE_WRITE_FLUSH(bstag, bshandle);
575
576 ctl = pci_conf_read(pc, tag, off + PCI_MSIX_CTL);
577 ctl |= PCI_MSIX_CTL_ENABLE;
578 pci_conf_write(pc, tag, off + PCI_MSIX_CTL, ctl);
579}
580
581/*
582 * Do not use pic->hwunmask() immediately after pic->delroute().
583 * It is required to use pic->addroute() before pic->hwunmask().
584 */
585static void
586msix_delroute(struct pic *pic, struct cpu_info *ci,
587 int msix_vec, int vec, int type)
588{
589
590 msix_hwmask(pic, msix_vec);
591}
592
593/*
594 * Template for MSI-X pic.
595 * .pic_msipic is set later in construct_msix_pic().
596 */
597static struct pic msix_pic_tmpl = {
598 .pic_type = PIC_MSIX,
599 .pic_vecbase = 0,
600 .pic_apicid = 0,
601 .pic_lock = __SIMPLELOCK_UNLOCKED, /* not used for msix_pic */
602 .pic_hwmask = msix_hwmask,
603 .pic_hwunmask = msix_hwunmask,
604 .pic_addroute = msix_addroute,
605 .pic_delroute = msix_delroute,
606 .pic_edge_stubs = ioapic_edge_stubs,
607};
608
609struct pic *
610msipic_construct_msix_pic(const struct pci_attach_args *pa)
611{
612 struct pic *msix_pic;
613 pci_chipset_tag_t pc;
614 pcitag_t tag;
615 pcireg_t tbl;
616 bus_space_tag_t bstag;
617 bus_space_handle_t bshandle;
618 bus_size_t bssize;
619 size_t table_size;
620 uint32_t table_offset;
621 u_int memtype;
622 bus_addr_t memaddr;
623 int flags;
624 int bir, bar, err, off, table_nentry;
625 char pic_name_buf[MSIPICNAMEBUF];
626
627 table_nentry = pci_msix_count(pa->pa_pc, pa->pa_tag);
628 if (table_nentry == 0) {
629 DPRINTF(("MSI-X table entry is 0.\n"));
630 return NULL;
631 }
632
633 pc = pa->pa_pc;
634 tag = pa->pa_tag;
635 if (pci_get_capability(pc, tag, PCI_CAP_MSIX, &off, NULL) == 0) {
636 DPRINTF(("%s: no msix capability", __func__));
637 return NULL;
638 }
639
640 msix_pic = msipic_construct_common_msi_pic(pa, &msix_pic_tmpl);
641 if (msix_pic == NULL) {
642 DPRINTF(("cannot allocate MSI-X pic.\n"));
643 return NULL;
644 }
645
646 memset(pic_name_buf, 0, MSIPICNAMEBUF);
647 snprintf(pic_name_buf, MSIPICNAMEBUF, "msix%d",
648 msix_pic->pic_msipic->mp_devid);
649 strncpy(msix_pic->pic_msipic->mp_pic_name, pic_name_buf,
650 MSIPICNAMEBUF - 1);
651 msix_pic->pic_name = msix_pic->pic_msipic->mp_pic_name;
652
653 tbl = pci_conf_read(pc, tag, off + PCI_MSIX_TBLOFFSET);
654 table_offset = tbl & PCI_MSIX_TBLOFFSET_MASK;
655 bir = tbl & PCI_MSIX_PBABIR_MASK;
656 switch(bir) {
657 case 0:
658 bar = PCI_BAR0;
659 break;
660 case 1:
661 bar = PCI_BAR1;
662 break;
663 case 2:
664 bar = PCI_BAR2;
665 break;
666 case 3:
667 bar = PCI_BAR3;
668 break;
669 case 4:
670 bar = PCI_BAR4;
671 break;
672 case 5:
673 bar = PCI_BAR5;
674 break;
675 default:
676 aprint_error("detect an illegal device! The device use reserved BIR values.\n");
677 msipic_destruct_common_msi_pic(msix_pic);
678 return NULL;
679 }
680 memtype = pci_mapreg_type(pc, tag, bar);
681 /*
682 * PCI_MSIX_TABLE_ENTRY_SIZE consists below
683 * - Vector Control (32bit)
684 * - Message Data (32bit)
685 * - Message Upper Address (32bit)
686 * - Message Lower Address (32bit)
687 */
688 table_size = table_nentry * PCI_MSIX_TABLE_ENTRY_SIZE;
689#if 0
690 err = pci_mapreg_submap(pa, bar, memtype, BUS_SPACE_MAP_LINEAR,
691 roundup(table_size, PAGE_SIZE), table_offset,
692 &bstag, &bshandle, NULL, &bssize);
693#else
694 /*
695 * Workaround for PCI prefetchable bit. Some chips (e.g. Intel 82599)
696 * report SERR and MSI-X doesn't work. This problem might not be the
697 * driver's bug but our PCI common part or VMs' bug. Until we find a
698 * real reason, we ignore the prefetchable bit.
699 */
700 if (pci_mapreg_info(pa->pa_pc, pa->pa_tag, bar, memtype,
701 &memaddr, NULL, &flags) != 0) {
702 DPRINTF(("cannot get a map info.\n"));
703 msipic_destruct_common_msi_pic(msix_pic);
704 return NULL;
705 }
706 if ((flags & BUS_SPACE_MAP_PREFETCHABLE) != 0) {
707 DPRINTF(( "clear prefetchable bit\n"));
708 flags &= ~BUS_SPACE_MAP_PREFETCHABLE;
709 }
710 bssize = roundup(table_size, PAGE_SIZE);
711 err = bus_space_map(pa->pa_memt, memaddr + table_offset, bssize, flags,
712 &bshandle);
713 bstag = pa->pa_memt;
714#endif
715 if (err) {
716 DPRINTF(("cannot map msix table.\n"));
717 msipic_destruct_common_msi_pic(msix_pic);
718 return NULL;
719 }
720 msix_pic->pic_msipic->mp_bstag = bstag;
721 msix_pic->pic_msipic->mp_bshandle = bshandle;
722 msix_pic->pic_msipic->mp_bssize = bssize;
723
724 return msix_pic;
725}
726
727/*
728 * Delete pseudo pic for a MSI-X device.
729 */
730void
731msipic_destruct_msix_pic(struct pic *msix_pic)
732{
733 struct msipic *msipic;
734
735 KASSERT(msipic_is_msi_pic(msix_pic));
736 KASSERT(msix_pic->pic_type == PIC_MSIX);
737
738 msipic = msix_pic->pic_msipic;
739 bus_space_unmap(msipic->mp_bstag, msipic->mp_bshandle,
740 msipic->mp_bssize);
741
742 msipic_destruct_common_msi_pic(msix_pic);
743}
744
745/*
746 * Set the number of MSI vectors for pseudo MSI pic.
747 */
748int
749msipic_set_msi_vectors(struct pic *msi_pic, pci_intr_handle_t *pihs,
750 int count)
751{
752
753 KASSERT(msipic_is_msi_pic(msi_pic));
754
755 msi_pic->pic_msipic->mp_veccnt = count;
756 return 0;
757}
758
759/*
760 * Initialize the system to use MSI/MSI-X.
761 */
762void
763msipic_init(void)
764{
765
766 mutex_init(&msipic_list_lock, MUTEX_DEFAULT, IPL_NONE);
767}
768