1/* $NetBSD: bus_dma.c,v 1.74 2015/10/27 18:49:26 christos Exp $ */
2
3/*-
4 * Copyright (c) 1996, 1997, 1998, 2007 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Charles M. Hannum and by Jason R. Thorpe of the Numerical Aerospace
9 * Simulation Facility NASA Ames Research Center, and by Andrew Doran.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE.
31 */
32
33#include <sys/cdefs.h>
34__KERNEL_RCSID(0, "$NetBSD: bus_dma.c,v 1.74 2015/10/27 18:49:26 christos Exp $");
35
36/*
37 * The following is included because _bus_dma_uiomove is derived from
38 * uiomove() in kern_subr.c.
39 */
40
41/*
42 * Copyright (c) 1982, 1986, 1991, 1993
43 * The Regents of the University of California. All rights reserved.
44 * (c) UNIX System Laboratories, Inc.
45 * All or some portions of this file are derived from material licensed
46 * to the University of California by American Telephone and Telegraph
47 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
48 * the permission of UNIX System Laboratories, Inc.
49 *
50 * Copyright (c) 1992, 1993
51 * The Regents of the University of California. All rights reserved.
52 *
53 * This software was developed by the Computer Systems Engineering group
54 * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
55 * contributed to Berkeley.
56 *
57 * All advertising materials mentioning features or use of this software
58 * must display the following acknowledgement:
59 * This product includes software developed by the University of
60 * California, Lawrence Berkeley Laboratory.
61 *
62 * Redistribution and use in source and binary forms, with or without
63 * modification, are permitted provided that the following conditions
64 * are met:
65 * 1. Redistributions of source code must retain the above copyright
66 * notice, this list of conditions and the following disclaimer.
67 * 2. Redistributions in binary form must reproduce the above copyright
68 * notice, this list of conditions and the following disclaimer in the
69 * documentation and/or other materials provided with the distribution.
70 * 3. Neither the name of the University nor the names of its contributors
71 * may be used to endorse or promote products derived from this software
72 * without specific prior written permission.
73 *
74 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
75 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
76 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
77 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
78 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
79 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
80 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
81 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
82 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
83 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
84 * SUCH DAMAGE.
85 */
86
87#include "ioapic.h"
88#include "isa.h"
89#include "opt_mpbios.h"
90
91#include <sys/param.h>
92#include <sys/systm.h>
93#include <sys/kernel.h>
94#include <sys/kmem.h>
95#include <sys/malloc.h>
96#include <sys/mbuf.h>
97#include <sys/proc.h>
98
99#include <sys/bus.h>
100#include <machine/bus_private.h>
101#if NIOAPIC > 0
102#include <machine/i82093var.h>
103#endif
104#ifdef MPBIOS
105#include <machine/mpbiosvar.h>
106#endif
107
108#if NISA > 0
109#include <dev/isa/isareg.h>
110#include <dev/isa/isavar.h>
111#endif
112
113#include <uvm/uvm.h>
114
115extern paddr_t avail_end;
116
117#define IDTVEC(name) __CONCAT(X,name)
118typedef void (vector)(void);
119extern vector *IDTVEC(intr)[];
120
121#define BUSDMA_BOUNCESTATS
122
123#ifdef BUSDMA_BOUNCESTATS
124#define BUSDMA_EVCNT_DECL(name) \
125static struct evcnt bus_dma_ev_##name = \
126 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "bus_dma", #name); \
127EVCNT_ATTACH_STATIC(bus_dma_ev_##name)
128
129#define STAT_INCR(name) \
130 bus_dma_ev_##name.ev_count++
131#define STAT_DECR(name) \
132 bus_dma_ev_##name.ev_count--
133
134BUSDMA_EVCNT_DECL(nbouncebufs);
135BUSDMA_EVCNT_DECL(loads);
136BUSDMA_EVCNT_DECL(bounces);
137#else
138#define STAT_INCR(x)
139#define STAT_DECR(x)
140#endif
141
142static int _bus_dmamap_create(bus_dma_tag_t, bus_size_t, int, bus_size_t,
143 bus_size_t, int, bus_dmamap_t *);
144static void _bus_dmamap_destroy(bus_dma_tag_t, bus_dmamap_t);
145static int _bus_dmamap_load(bus_dma_tag_t, bus_dmamap_t, void *,
146 bus_size_t, struct proc *, int);
147static int _bus_dmamap_load_mbuf(bus_dma_tag_t, bus_dmamap_t,
148 struct mbuf *, int);
149static int _bus_dmamap_load_uio(bus_dma_tag_t, bus_dmamap_t,
150 struct uio *, int);
151static int _bus_dmamap_load_raw(bus_dma_tag_t, bus_dmamap_t,
152 bus_dma_segment_t *, int, bus_size_t, int);
153static void _bus_dmamap_unload(bus_dma_tag_t, bus_dmamap_t);
154static void _bus_dmamap_sync(bus_dma_tag_t, bus_dmamap_t, bus_addr_t,
155 bus_size_t, int);
156
157static int _bus_dmamem_alloc(bus_dma_tag_t tag, bus_size_t size,
158 bus_size_t alignment, bus_size_t boundary,
159 bus_dma_segment_t *segs, int nsegs, int *rsegs, int flags);
160static void _bus_dmamem_free(bus_dma_tag_t tag, bus_dma_segment_t *segs,
161 int nsegs);
162static int _bus_dmamem_map(bus_dma_tag_t tag, bus_dma_segment_t *segs,
163 int nsegs, size_t size, void **kvap, int flags);
164static void _bus_dmamem_unmap(bus_dma_tag_t tag, void *kva, size_t size);
165static paddr_t _bus_dmamem_mmap(bus_dma_tag_t tag, bus_dma_segment_t *segs,
166 int nsegs, off_t off, int prot, int flags);
167
168static int _bus_dmatag_subregion(bus_dma_tag_t tag, bus_addr_t min_addr,
169 bus_addr_t max_addr, bus_dma_tag_t *newtag, int flags);
170static void _bus_dmatag_destroy(bus_dma_tag_t tag);
171
172static int _bus_dma_uiomove(void *, struct uio *, size_t, int);
173static int _bus_dma_alloc_bouncebuf(bus_dma_tag_t t, bus_dmamap_t map,
174 bus_size_t size, int flags);
175static void _bus_dma_free_bouncebuf(bus_dma_tag_t t, bus_dmamap_t map);
176static int _bus_dmamap_load_buffer(bus_dma_tag_t t, bus_dmamap_t map,
177 void *buf, bus_size_t buflen, struct vmspace *vm, int flags);
178static int _bus_dmamap_load_busaddr(bus_dma_tag_t, bus_dmamap_t,
179 bus_addr_t, bus_size_t);
180
181#ifndef _BUS_DMAMEM_ALLOC_RANGE
182static int _bus_dmamem_alloc_range(bus_dma_tag_t tag, bus_size_t size,
183 bus_size_t alignment, bus_size_t boundary,
184 bus_dma_segment_t *segs, int nsegs, int *rsegs, int flags,
185 bus_addr_t low, bus_addr_t high);
186
187#define _BUS_DMAMEM_ALLOC_RANGE _bus_dmamem_alloc_range
188
189/*
190 * Allocate physical memory from the given physical address range.
191 * Called by DMA-safe memory allocation methods.
192 */
193static int
194_bus_dmamem_alloc_range(bus_dma_tag_t t, bus_size_t size,
195 bus_size_t alignment, bus_size_t boundary, bus_dma_segment_t *segs,
196 int nsegs, int *rsegs, int flags, bus_addr_t low, bus_addr_t high)
197{
198 paddr_t curaddr, lastaddr;
199 struct vm_page *m;
200 struct pglist mlist;
201 int curseg, error;
202 bus_size_t uboundary;
203
204 /* Always round the size. */
205 size = round_page(size);
206
207 KASSERT(boundary >= PAGE_SIZE || boundary == 0);
208
209 /*
210 * Allocate pages from the VM system.
211 * We accept boundaries < size, splitting in multiple segments
212 * if needed. uvm_pglistalloc does not, so compute an appropriate
213 * boundary: next power of 2 >= size
214 */
215
216 if (boundary == 0)
217 uboundary = 0;
218 else {
219 uboundary = boundary;
220 while (uboundary < size)
221 uboundary = uboundary << 1;
222 }
223 error = uvm_pglistalloc(size, low, high, alignment, uboundary,
224 &mlist, nsegs, (flags & BUS_DMA_NOWAIT) == 0);
225 if (error)
226 return (error);
227
228 /*
229 * Compute the location, size, and number of segments actually
230 * returned by the VM code.
231 */
232 m = TAILQ_FIRST(&mlist);
233 curseg = 0;
234 lastaddr = segs[curseg].ds_addr = VM_PAGE_TO_PHYS(m);
235 segs[curseg].ds_len = PAGE_SIZE;
236 m = m->pageq.queue.tqe_next;
237
238 for (; m != NULL; m = m->pageq.queue.tqe_next) {
239 curaddr = VM_PAGE_TO_PHYS(m);
240#ifdef DIAGNOSTIC
241 if (curaddr < low || curaddr >= high) {
242 printf("vm_page_alloc_memory returned non-sensical"
243 " address %#" PRIxPADDR "\n", curaddr);
244 panic("_bus_dmamem_alloc_range");
245 }
246#endif
247 if (curaddr == (lastaddr + PAGE_SIZE) &&
248 (lastaddr & boundary) == (curaddr & boundary)) {
249 segs[curseg].ds_len += PAGE_SIZE;
250 } else {
251 curseg++;
252 if (curseg >= nsegs)
253 return EFBIG;
254 segs[curseg].ds_addr = curaddr;
255 segs[curseg].ds_len = PAGE_SIZE;
256 }
257 lastaddr = curaddr;
258 }
259
260 *rsegs = curseg + 1;
261
262 return (0);
263}
264#endif /* _BUS_DMAMEM_ALLOC_RANGE */
265
266/*
267 * Create a DMA map.
268 */
269static int
270_bus_dmamap_create(bus_dma_tag_t t, bus_size_t size, int nsegments,
271 bus_size_t maxsegsz, bus_size_t boundary, int flags, bus_dmamap_t *dmamp)
272{
273 struct x86_bus_dma_cookie *cookie;
274 bus_dmamap_t map;
275 int error, cookieflags;
276 void *cookiestore, *mapstore;
277 size_t cookiesize, mapsize;
278
279 /*
280 * Allocate and initialize the DMA map. The end of the map
281 * is a variable-sized array of segments, so we allocate enough
282 * room for them in one shot.
283 *
284 * Note we don't preserve the WAITOK or NOWAIT flags. Preservation
285 * of ALLOCNOW notifies others that we've reserved these resources,
286 * and they are not to be freed.
287 *
288 * The bus_dmamap_t includes one bus_dma_segment_t, hence
289 * the (nsegments - 1).
290 */
291 error = 0;
292 mapsize = sizeof(struct x86_bus_dmamap) +
293 (sizeof(bus_dma_segment_t) * (nsegments - 1));
294 if ((mapstore = malloc(mapsize, M_DMAMAP, M_ZERO |
295 ((flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK))) == NULL)
296 return (ENOMEM);
297
298 map = (struct x86_bus_dmamap *)mapstore;
299 map->_dm_size = size;
300 map->_dm_segcnt = nsegments;
301 map->_dm_maxmaxsegsz = maxsegsz;
302 map->_dm_boundary = boundary;
303 map->_dm_bounce_thresh = t->_bounce_thresh;
304 map->_dm_flags = flags & ~(BUS_DMA_WAITOK|BUS_DMA_NOWAIT);
305 map->dm_maxsegsz = maxsegsz;
306 map->dm_mapsize = 0; /* no valid mappings */
307 map->dm_nsegs = 0;
308
309 *dmamp = map;
310
311 if (t->_bounce_thresh == 0 || _BUS_AVAIL_END <= t->_bounce_thresh)
312 map->_dm_bounce_thresh = 0;
313 cookieflags = 0;
314
315 if (t->_may_bounce != NULL) {
316 error = t->_may_bounce(t, map, flags, &cookieflags);
317 if (error != 0)
318 goto out;
319 }
320
321 if (map->_dm_bounce_thresh != 0)
322 cookieflags |= X86_DMA_MIGHT_NEED_BOUNCE;
323
324 if ((cookieflags & X86_DMA_MIGHT_NEED_BOUNCE) == 0)
325 return 0;
326
327 cookiesize = sizeof(struct x86_bus_dma_cookie) +
328 (sizeof(bus_dma_segment_t) * map->_dm_segcnt);
329
330 /*
331 * Allocate our cookie.
332 */
333 if ((cookiestore = malloc(cookiesize, M_DMAMAP, M_ZERO |
334 ((flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK))) == NULL) {
335 error = ENOMEM;
336 goto out;
337 }
338 cookie = (struct x86_bus_dma_cookie *)cookiestore;
339 cookie->id_flags = cookieflags;
340 map->_dm_cookie = cookie;
341
342 error = _bus_dma_alloc_bouncebuf(t, map, size, flags);
343 out:
344 if (error)
345 _bus_dmamap_destroy(t, map);
346
347 return (error);
348}
349
350/*
351 * Destroy a DMA map.
352 */
353static void
354_bus_dmamap_destroy(bus_dma_tag_t t, bus_dmamap_t map)
355{
356 struct x86_bus_dma_cookie *cookie = map->_dm_cookie;
357
358 /*
359 * Free any bounce pages this map might hold.
360 */
361 if (cookie != NULL) {
362 if (cookie->id_flags & X86_DMA_HAS_BOUNCE)
363 _bus_dma_free_bouncebuf(t, map);
364 free(cookie, M_DMAMAP);
365 }
366
367 free(map, M_DMAMAP);
368}
369
370/*
371 * Load a DMA map with a linear buffer.
372 */
373static int
374_bus_dmamap_load(bus_dma_tag_t t, bus_dmamap_t map, void *buf,
375 bus_size_t buflen, struct proc *p, int flags)
376{
377 struct x86_bus_dma_cookie *cookie = map->_dm_cookie;
378 int error;
379 struct vmspace *vm;
380
381 STAT_INCR(loads);
382
383 /*
384 * Make sure that on error condition we return "no valid mappings."
385 */
386 map->dm_mapsize = 0;
387 map->dm_nsegs = 0;
388 KASSERT(map->dm_maxsegsz <= map->_dm_maxmaxsegsz);
389
390 if (buflen > map->_dm_size)
391 return EINVAL;
392
393 if (p != NULL) {
394 vm = p->p_vmspace;
395 } else {
396 vm = vmspace_kernel();
397 }
398 error = _bus_dmamap_load_buffer(t, map, buf, buflen, vm, flags);
399 if (error == 0) {
400 if (cookie != NULL)
401 cookie->id_flags &= ~X86_DMA_IS_BOUNCING;
402 map->dm_mapsize = buflen;
403 return 0;
404 }
405
406 if (cookie == NULL ||
407 (cookie->id_flags & X86_DMA_MIGHT_NEED_BOUNCE) == 0)
408 return error;
409
410 /*
411 * First attempt failed; bounce it.
412 */
413
414 STAT_INCR(bounces);
415
416 /*
417 * Allocate bounce pages, if necessary.
418 */
419 if ((cookie->id_flags & X86_DMA_HAS_BOUNCE) == 0) {
420 error = _bus_dma_alloc_bouncebuf(t, map, buflen, flags);
421 if (error)
422 return (error);
423 }
424
425 /*
426 * Cache a pointer to the caller's buffer and load the DMA map
427 * with the bounce buffer.
428 */
429 cookie->id_origbuf = buf;
430 cookie->id_origbuflen = buflen;
431 cookie->id_buftype = X86_DMA_BUFTYPE_LINEAR;
432 map->dm_nsegs = 0;
433 error = bus_dmamap_load(t, map, cookie->id_bouncebuf, buflen,
434 p, flags);
435 if (error)
436 return (error);
437
438 /* ...so _bus_dmamap_sync() knows we're bouncing */
439 cookie->id_flags |= X86_DMA_IS_BOUNCING;
440 return (0);
441}
442
443static int
444_bus_dmamap_load_busaddr(bus_dma_tag_t t, bus_dmamap_t map,
445 bus_addr_t addr, bus_size_t size)
446{
447 bus_dma_segment_t * const segs = map->dm_segs;
448 int nseg = map->dm_nsegs;
449 bus_addr_t bmask = ~(map->_dm_boundary - 1);
450 bus_addr_t lastaddr = 0xdead; /* XXX gcc */
451 bus_size_t sgsize;
452
453 if (nseg > 0)
454 lastaddr = segs[nseg-1].ds_addr + segs[nseg-1].ds_len;
455again:
456 sgsize = size;
457 /*
458 * Make sure we don't cross any boundaries.
459 */
460 if (map->_dm_boundary > 0) {
461 bus_addr_t baddr; /* next boundary address */
462
463 baddr = (addr + map->_dm_boundary) & bmask;
464 if (sgsize > (baddr - addr))
465 sgsize = (baddr - addr);
466 }
467
468 /*
469 * Insert chunk into a segment, coalescing with
470 * previous segment if possible.
471 */
472 if (nseg > 0 && addr == lastaddr &&
473 segs[nseg-1].ds_len + sgsize <= map->dm_maxsegsz &&
474 (map->_dm_boundary == 0 ||
475 (segs[nseg-1].ds_addr & bmask) == (addr & bmask))) {
476 /* coalesce */
477 segs[nseg-1].ds_len += sgsize;
478 } else if (nseg >= map->_dm_segcnt) {
479 return EFBIG;
480 } else {
481 /* new segment */
482 segs[nseg].ds_addr = addr;
483 segs[nseg].ds_len = sgsize;
484 nseg++;
485 }
486
487 lastaddr = addr + sgsize;
488 if (map->_dm_bounce_thresh != 0 && lastaddr > map->_dm_bounce_thresh)
489 return EINVAL;
490
491 addr += sgsize;
492 size -= sgsize;
493 if (size > 0)
494 goto again;
495
496 map->dm_nsegs = nseg;
497 return 0;
498}
499
500/*
501 * Like _bus_dmamap_load(), but for mbufs.
502 */
503static int
504_bus_dmamap_load_mbuf(bus_dma_tag_t t, bus_dmamap_t map, struct mbuf *m0,
505 int flags)
506{
507 struct x86_bus_dma_cookie *cookie = map->_dm_cookie;
508 int error;
509 struct mbuf *m;
510
511 /*
512 * Make sure on error condition we return "no valid mappings."
513 */
514 map->dm_mapsize = 0;
515 map->dm_nsegs = 0;
516 KASSERT(map->dm_maxsegsz <= map->_dm_maxmaxsegsz);
517
518#ifdef DIAGNOSTIC
519 if ((m0->m_flags & M_PKTHDR) == 0)
520 panic("_bus_dmamap_load_mbuf: no packet header");
521#endif
522
523 if (m0->m_pkthdr.len > map->_dm_size)
524 return (EINVAL);
525
526 error = 0;
527 for (m = m0; m != NULL && error == 0; m = m->m_next) {
528 int offset;
529 int remainbytes;
530 const struct vm_page * const *pgs;
531 paddr_t paddr;
532 int size;
533
534 if (m->m_len == 0)
535 continue;
536 switch (m->m_flags & (M_EXT|M_EXT_CLUSTER|M_EXT_PAGES)) {
537 case M_EXT|M_EXT_CLUSTER:
538 /* XXX KDASSERT */
539 KASSERT(m->m_ext.ext_paddr != M_PADDR_INVALID);
540 paddr = m->m_ext.ext_paddr +
541 (m->m_data - m->m_ext.ext_buf);
542 size = m->m_len;
543 error = _bus_dmamap_load_busaddr(t, map,
544 _BUS_PHYS_TO_BUS(paddr), size);
545 break;
546
547 case M_EXT|M_EXT_PAGES:
548 KASSERT(m->m_ext.ext_buf <= m->m_data);
549 KASSERT(m->m_data <=
550 m->m_ext.ext_buf + m->m_ext.ext_size);
551
552 offset = (vaddr_t)m->m_data -
553 trunc_page((vaddr_t)m->m_ext.ext_buf);
554 remainbytes = m->m_len;
555
556 /* skip uninteresting pages */
557 pgs = (const struct vm_page * const *)
558 m->m_ext.ext_pgs + (offset >> PAGE_SHIFT);
559
560 offset &= PAGE_MASK; /* offset in the first page */
561
562 /* load each pages */
563 while (remainbytes > 0) {
564 const struct vm_page *pg;
565 bus_addr_t busaddr;
566
567 size = MIN(remainbytes, PAGE_SIZE - offset);
568
569 pg = *pgs++;
570 KASSERT(pg);
571 busaddr = _BUS_VM_PAGE_TO_BUS(pg) + offset;
572
573 error = _bus_dmamap_load_busaddr(t, map,
574 busaddr, size);
575 if (error)
576 break;
577 offset = 0;
578 remainbytes -= size;
579 }
580 break;
581
582 case 0:
583 paddr = m->m_paddr + M_BUFOFFSET(m) +
584 (m->m_data - M_BUFADDR(m));
585 size = m->m_len;
586 error = _bus_dmamap_load_busaddr(t, map,
587 _BUS_PHYS_TO_BUS(paddr), size);
588 break;
589
590 default:
591 error = _bus_dmamap_load_buffer(t, map, m->m_data,
592 m->m_len, vmspace_kernel(), flags);
593 }
594 }
595 if (error == 0) {
596 map->dm_mapsize = m0->m_pkthdr.len;
597 return 0;
598 }
599
600 map->dm_nsegs = 0;
601
602 if (cookie == NULL ||
603 (cookie->id_flags & X86_DMA_MIGHT_NEED_BOUNCE) == 0)
604 return error;
605
606 /*
607 * First attempt failed; bounce it.
608 */
609
610 STAT_INCR(bounces);
611
612 /*
613 * Allocate bounce pages, if necessary.
614 */
615 if ((cookie->id_flags & X86_DMA_HAS_BOUNCE) == 0) {
616 error = _bus_dma_alloc_bouncebuf(t, map, m0->m_pkthdr.len,
617 flags);
618 if (error)
619 return (error);
620 }
621
622 /*
623 * Cache a pointer to the caller's buffer and load the DMA map
624 * with the bounce buffer.
625 */
626 cookie->id_origbuf = m0;
627 cookie->id_origbuflen = m0->m_pkthdr.len; /* not really used */
628 cookie->id_buftype = X86_DMA_BUFTYPE_MBUF;
629 error = bus_dmamap_load(t, map, cookie->id_bouncebuf,
630 m0->m_pkthdr.len, NULL, flags);
631 if (error)
632 return (error);
633
634 /* ...so _bus_dmamap_sync() knows we're bouncing */
635 cookie->id_flags |= X86_DMA_IS_BOUNCING;
636 return (0);
637}
638
639/*
640 * Like _bus_dmamap_load(), but for uios.
641 */
642static int
643_bus_dmamap_load_uio(bus_dma_tag_t t, bus_dmamap_t map, struct uio *uio,
644 int flags)
645{
646 int i, error;
647 bus_size_t minlen, resid;
648 struct vmspace *vm;
649 struct iovec *iov;
650 void *addr;
651 struct x86_bus_dma_cookie *cookie = map->_dm_cookie;
652
653 /*
654 * Make sure that on error condition we return "no valid mappings."
655 */
656 map->dm_mapsize = 0;
657 map->dm_nsegs = 0;
658 KASSERT(map->dm_maxsegsz <= map->_dm_maxmaxsegsz);
659
660 resid = uio->uio_resid;
661 iov = uio->uio_iov;
662
663 vm = uio->uio_vmspace;
664
665 error = 0;
666 for (i = 0; i < uio->uio_iovcnt && resid != 0 && error == 0; i++) {
667 /*
668 * Now at the first iovec to load. Load each iovec
669 * until we have exhausted the residual count.
670 */
671 minlen = resid < iov[i].iov_len ? resid : iov[i].iov_len;
672 addr = (void *)iov[i].iov_base;
673
674 error = _bus_dmamap_load_buffer(t, map, addr, minlen,
675 vm, flags);
676
677 resid -= minlen;
678 }
679 if (error == 0) {
680 map->dm_mapsize = uio->uio_resid;
681 return 0;
682 }
683
684 map->dm_nsegs = 0;
685
686 if (cookie == NULL ||
687 (cookie->id_flags & X86_DMA_MIGHT_NEED_BOUNCE) == 0)
688 return error;
689
690 STAT_INCR(bounces);
691
692 /*
693 * Allocate bounce pages, if necessary.
694 */
695 if ((cookie->id_flags & X86_DMA_HAS_BOUNCE) == 0) {
696 error = _bus_dma_alloc_bouncebuf(t, map, uio->uio_resid,
697 flags);
698 if (error)
699 return (error);
700 }
701
702 /*
703 * Cache a pointer to the caller's buffer and load the DMA map
704 * with the bounce buffer.
705 */
706 cookie->id_origbuf = uio;
707 cookie->id_origbuflen = uio->uio_resid;
708 cookie->id_buftype = X86_DMA_BUFTYPE_UIO;
709 error = bus_dmamap_load(t, map, cookie->id_bouncebuf,
710 uio->uio_resid, NULL, flags);
711 if (error)
712 return (error);
713
714 /* ...so _bus_dmamap_sync() knows we're bouncing */
715 cookie->id_flags |= X86_DMA_IS_BOUNCING;
716 return (0);
717}
718
719/*
720 * Like _bus_dmamap_load(), but for raw memory allocated with
721 * bus_dmamem_alloc().
722 */
723static int
724_bus_dmamap_load_raw(bus_dma_tag_t t, bus_dmamap_t map,
725 bus_dma_segment_t *segs, int nsegs, bus_size_t size0, int flags)
726{
727 bus_size_t size;
728 int i, error = 0;
729
730 /*
731 * Make sure that on error condition we return "no valid mappings."
732 */
733 map->dm_mapsize = 0;
734 map->dm_nsegs = 0;
735 KASSERT(map->dm_maxsegsz <= map->_dm_maxmaxsegsz);
736
737 if (size0 > map->_dm_size)
738 return EINVAL;
739
740 for (i = 0, size = size0; i < nsegs && size > 0; i++) {
741 bus_dma_segment_t *ds = &segs[i];
742 bus_size_t sgsize;
743
744 sgsize = MIN(ds->ds_len, size);
745 if (sgsize == 0)
746 continue;
747 error = _bus_dmamap_load_busaddr(t, map, ds->ds_addr, sgsize);
748 if (error != 0)
749 break;
750 size -= sgsize;
751 }
752
753 if (error != 0) {
754 map->dm_mapsize = 0;
755 map->dm_nsegs = 0;
756 return error;
757 }
758
759 /* XXX TBD bounce */
760
761 map->dm_mapsize = size0;
762 return 0;
763}
764
765/*
766 * Unload a DMA map.
767 */
768static void
769_bus_dmamap_unload(bus_dma_tag_t t, bus_dmamap_t map)
770{
771 struct x86_bus_dma_cookie *cookie = map->_dm_cookie;
772
773 /*
774 * If we have bounce pages, free them, unless they're
775 * reserved for our exclusive use.
776 */
777 if (cookie != NULL) {
778 cookie->id_flags &= ~X86_DMA_IS_BOUNCING;
779 cookie->id_buftype = X86_DMA_BUFTYPE_INVALID;
780 }
781 map->dm_maxsegsz = map->_dm_maxmaxsegsz;
782 map->dm_mapsize = 0;
783 map->dm_nsegs = 0;
784}
785
786/*
787 * Synchronize a DMA map.
788 */
789static void
790_bus_dmamap_sync(bus_dma_tag_t t, bus_dmamap_t map, bus_addr_t offset,
791 bus_size_t len, int ops)
792{
793 struct x86_bus_dma_cookie *cookie = map->_dm_cookie;
794
795 /*
796 * Mixing PRE and POST operations is not allowed.
797 */
798 if ((ops & (BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE)) != 0 &&
799 (ops & (BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE)) != 0)
800 panic("%s: mix PRE and POST", __func__);
801
802#ifdef DIAGNOSTIC
803 if ((ops & (BUS_DMASYNC_PREWRITE|BUS_DMASYNC_POSTREAD)) != 0) {
804 if (offset >= map->dm_mapsize)
805 panic("%s: bad offset 0x%jx >= 0x%jx", __func__,
806 (intmax_t)offset, (intmax_t)map->dm_mapsize);
807 if ((offset + len) > map->dm_mapsize)
808 panic("%s: bad length 0x%jx + 0x%jx > 0x%jx", __func__,
809 (intmax_t)offset, (intmax_t)len,
810 (intmax_t)map->dm_mapsize);
811 }
812#endif
813
814 /*
815 * If we're not bouncing, just return; nothing to do.
816 */
817 if (len == 0 || cookie == NULL ||
818 (cookie->id_flags & X86_DMA_IS_BOUNCING) == 0)
819 goto end;
820
821 switch (cookie->id_buftype) {
822 case X86_DMA_BUFTYPE_LINEAR:
823 /*
824 * Nothing to do for pre-read.
825 */
826
827 if (ops & BUS_DMASYNC_PREWRITE) {
828 /*
829 * Copy the caller's buffer to the bounce buffer.
830 */
831 memcpy((char *)cookie->id_bouncebuf + offset,
832 (char *)cookie->id_origbuf + offset, len);
833 }
834
835 if (ops & BUS_DMASYNC_POSTREAD) {
836 /*
837 * Copy the bounce buffer to the caller's buffer.
838 */
839 memcpy((char *)cookie->id_origbuf + offset,
840 (char *)cookie->id_bouncebuf + offset, len);
841 }
842
843 /*
844 * Nothing to do for post-write.
845 */
846 break;
847
848 case X86_DMA_BUFTYPE_MBUF:
849 {
850 struct mbuf *m, *m0 = cookie->id_origbuf;
851 bus_size_t minlen, moff;
852
853 /*
854 * Nothing to do for pre-read.
855 */
856
857 if (ops & BUS_DMASYNC_PREWRITE) {
858 /*
859 * Copy the caller's buffer to the bounce buffer.
860 */
861 m_copydata(m0, offset, len,
862 (char *)cookie->id_bouncebuf + offset);
863 }
864
865 if (ops & BUS_DMASYNC_POSTREAD) {
866 /*
867 * Copy the bounce buffer to the caller's buffer.
868 */
869 for (moff = offset, m = m0; m != NULL && len != 0;
870 m = m->m_next) {
871 /* Find the beginning mbuf. */
872 if (moff >= m->m_len) {
873 moff -= m->m_len;
874 continue;
875 }
876
877 /*
878 * Now at the first mbuf to sync; nail
879 * each one until we have exhausted the
880 * length.
881 */
882 minlen = len < m->m_len - moff ?
883 len : m->m_len - moff;
884
885 memcpy(mtod(m, char *) + moff,
886 (char *)cookie->id_bouncebuf + offset,
887 minlen);
888
889 moff = 0;
890 len -= minlen;
891 offset += minlen;
892 }
893 }
894
895 /*
896 * Nothing to do for post-write.
897 */
898 break;
899 }
900 case X86_DMA_BUFTYPE_UIO:
901 {
902 struct uio *uio;
903
904 uio = (struct uio *)cookie->id_origbuf;
905
906 /*
907 * Nothing to do for pre-read.
908 */
909
910 if (ops & BUS_DMASYNC_PREWRITE) {
911 /*
912 * Copy the caller's buffer to the bounce buffer.
913 */
914 _bus_dma_uiomove((char *)cookie->id_bouncebuf + offset,
915 uio, len, UIO_WRITE);
916 }
917
918 if (ops & BUS_DMASYNC_POSTREAD) {
919 _bus_dma_uiomove((char *)cookie->id_bouncebuf + offset,
920 uio, len, UIO_READ);
921 }
922
923 /*
924 * Nothing to do for post-write.
925 */
926 break;
927 }
928
929 case X86_DMA_BUFTYPE_RAW:
930 panic("%s: X86_DMA_BUFTYPE_RAW", __func__);
931 break;
932
933 case X86_DMA_BUFTYPE_INVALID:
934 panic("%s: X86_DMA_BUFTYPE_INVALID", __func__);
935 break;
936
937 default:
938 panic("%s: unknown buffer type %d", __func__,
939 cookie->id_buftype);
940 break;
941 }
942end:
943 if (ops & (BUS_DMASYNC_PREWRITE|BUS_DMASYNC_POSTWRITE)) {
944 /*
945 * from the memory POV a load can be reordered before a store
946 * (a load can fetch data from the write buffers, before
947 * data hits the cache or memory), a mfence avoids it.
948 */
949 x86_mfence();
950 } else if (ops & (BUS_DMASYNC_PREREAD|BUS_DMASYNC_POSTREAD)) {
951 /*
952 * all past reads should have completed at before this point,
953 * and future reads should not have started yet.
954 */
955 x86_lfence();
956 }
957}
958
959/*
960 * Allocate memory safe for DMA.
961 */
962static int
963_bus_dmamem_alloc(bus_dma_tag_t t, bus_size_t size, bus_size_t alignment,
964 bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs,
965 int flags)
966{
967 bus_addr_t high;
968
969 if (t->_bounce_alloc_hi != 0 && _BUS_AVAIL_END > t->_bounce_alloc_hi)
970 high = trunc_page(t->_bounce_alloc_hi);
971 else
972 high = trunc_page(_BUS_AVAIL_END);
973
974 return (_BUS_DMAMEM_ALLOC_RANGE(t, size, alignment, boundary,
975 segs, nsegs, rsegs, flags, t->_bounce_alloc_lo, high));
976}
977
978static int
979_bus_dma_alloc_bouncebuf(bus_dma_tag_t t, bus_dmamap_t map,
980 bus_size_t size, int flags)
981{
982 struct x86_bus_dma_cookie *cookie = map->_dm_cookie;
983 int error = 0;
984
985#ifdef DIAGNOSTIC
986 if (cookie == NULL)
987 panic("_bus_dma_alloc_bouncebuf: no cookie");
988#endif
989
990 cookie->id_bouncebuflen = round_page(size);
991 error = _bus_dmamem_alloc(t, cookie->id_bouncebuflen,
992 PAGE_SIZE, map->_dm_boundary, cookie->id_bouncesegs,
993 map->_dm_segcnt, &cookie->id_nbouncesegs, flags);
994 if (error) {
995 cookie->id_bouncebuflen = 0;
996 cookie->id_nbouncesegs = 0;
997 return error;
998 }
999
1000 error = _bus_dmamem_map(t, cookie->id_bouncesegs,
1001 cookie->id_nbouncesegs, cookie->id_bouncebuflen,
1002 (void **)&cookie->id_bouncebuf, flags);
1003
1004 if (error) {
1005 _bus_dmamem_free(t, cookie->id_bouncesegs,
1006 cookie->id_nbouncesegs);
1007 cookie->id_bouncebuflen = 0;
1008 cookie->id_nbouncesegs = 0;
1009 } else {
1010 cookie->id_flags |= X86_DMA_HAS_BOUNCE;
1011 STAT_INCR(nbouncebufs);
1012 }
1013
1014 return (error);
1015}
1016
1017static void
1018_bus_dma_free_bouncebuf(bus_dma_tag_t t, bus_dmamap_t map)
1019{
1020 struct x86_bus_dma_cookie *cookie = map->_dm_cookie;
1021
1022#ifdef DIAGNOSTIC
1023 if (cookie == NULL)
1024 panic("_bus_dma_free_bouncebuf: no cookie");
1025#endif
1026
1027 STAT_DECR(nbouncebufs);
1028
1029 _bus_dmamem_unmap(t, cookie->id_bouncebuf, cookie->id_bouncebuflen);
1030 _bus_dmamem_free(t, cookie->id_bouncesegs,
1031 cookie->id_nbouncesegs);
1032 cookie->id_bouncebuflen = 0;
1033 cookie->id_nbouncesegs = 0;
1034 cookie->id_flags &= ~X86_DMA_HAS_BOUNCE;
1035}
1036
1037
1038/*
1039 * This function does the same as uiomove, but takes an explicit
1040 * direction, and does not update the uio structure.
1041 */
1042static int
1043_bus_dma_uiomove(void *buf, struct uio *uio, size_t n, int direction)
1044{
1045 struct iovec *iov;
1046 int error;
1047 struct vmspace *vm;
1048 char *cp;
1049 size_t resid, cnt;
1050 int i;
1051
1052 iov = uio->uio_iov;
1053 vm = uio->uio_vmspace;
1054 cp = buf;
1055 resid = n;
1056
1057 for (i = 0; i < uio->uio_iovcnt && resid > 0; i++) {
1058 iov = &uio->uio_iov[i];
1059 if (iov->iov_len == 0)
1060 continue;
1061 cnt = MIN(resid, iov->iov_len);
1062
1063 if (!VMSPACE_IS_KERNEL_P(vm) &&
1064 (curlwp->l_cpu->ci_schedstate.spc_flags & SPCF_SHOULDYIELD)
1065 != 0) {
1066 preempt();
1067 }
1068 if (direction == UIO_READ) {
1069 error = copyout_vmspace(vm, cp, iov->iov_base, cnt);
1070 } else {
1071 error = copyin_vmspace(vm, iov->iov_base, cp, cnt);
1072 }
1073 if (error)
1074 return (error);
1075 cp += cnt;
1076 resid -= cnt;
1077 }
1078 return (0);
1079}
1080
1081/*
1082 * Common function for freeing DMA-safe memory. May be called by
1083 * bus-specific DMA memory free functions.
1084 */
1085static void
1086_bus_dmamem_free(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs)
1087{
1088 struct vm_page *m;
1089 bus_addr_t addr;
1090 struct pglist mlist;
1091 int curseg;
1092
1093 /*
1094 * Build a list of pages to free back to the VM system.
1095 */
1096 TAILQ_INIT(&mlist);
1097 for (curseg = 0; curseg < nsegs; curseg++) {
1098 for (addr = segs[curseg].ds_addr;
1099 addr < (segs[curseg].ds_addr + segs[curseg].ds_len);
1100 addr += PAGE_SIZE) {
1101 m = _BUS_BUS_TO_VM_PAGE(addr);
1102 TAILQ_INSERT_TAIL(&mlist, m, pageq.queue);
1103 }
1104 }
1105
1106 uvm_pglistfree(&mlist);
1107}
1108
1109/*
1110 * Common function for mapping DMA-safe memory. May be called by
1111 * bus-specific DMA memory map functions.
1112 * This supports BUS_DMA_NOCACHE.
1113 */
1114static int
1115_bus_dmamem_map(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs,
1116 size_t size, void **kvap, int flags)
1117{
1118 vaddr_t va;
1119 bus_addr_t addr;
1120 int curseg;
1121 const uvm_flag_t kmflags =
1122 (flags & BUS_DMA_NOWAIT) != 0 ? UVM_KMF_NOWAIT : 0;
1123 u_int pmapflags = PMAP_WIRED | VM_PROT_READ | VM_PROT_WRITE;
1124
1125 size = round_page(size);
1126 if (flags & BUS_DMA_NOCACHE)
1127 pmapflags |= PMAP_NOCACHE;
1128
1129 va = uvm_km_alloc(kernel_map, size, 0, UVM_KMF_VAONLY | kmflags);
1130
1131 if (va == 0)
1132 return ENOMEM;
1133
1134 *kvap = (void *)va;
1135
1136 for (curseg = 0; curseg < nsegs; curseg++) {
1137 for (addr = segs[curseg].ds_addr;
1138 addr < (segs[curseg].ds_addr + segs[curseg].ds_len);
1139 addr += PAGE_SIZE, va += PAGE_SIZE, size -= PAGE_SIZE) {
1140 if (size == 0)
1141 panic("_bus_dmamem_map: size botch");
1142 _BUS_PMAP_ENTER(pmap_kernel(), va, addr,
1143 VM_PROT_READ | VM_PROT_WRITE,
1144 pmapflags);
1145 }
1146 }
1147 pmap_update(pmap_kernel());
1148
1149 return 0;
1150}
1151
1152/*
1153 * Common function for unmapping DMA-safe memory. May be called by
1154 * bus-specific DMA memory unmapping functions.
1155 */
1156
1157static void
1158_bus_dmamem_unmap(bus_dma_tag_t t, void *kva, size_t size)
1159{
1160 pt_entry_t *pte, opte;
1161 vaddr_t va, sva, eva;
1162
1163#ifdef DIAGNOSTIC
1164 if ((u_long)kva & PGOFSET)
1165 panic("_bus_dmamem_unmap");
1166#endif
1167
1168 size = round_page(size);
1169 sva = (vaddr_t)kva;
1170 eva = sva + size;
1171
1172 /*
1173 * mark pages cacheable again.
1174 */
1175 for (va = sva; va < eva; va += PAGE_SIZE) {
1176 pte = kvtopte(va);
1177 opte = *pte;
1178 if ((opte & PG_N) != 0)
1179 pmap_pte_clearbits(pte, PG_N);
1180 }
1181 pmap_remove(pmap_kernel(), (vaddr_t)kva, (vaddr_t)kva + size);
1182 pmap_update(pmap_kernel());
1183 uvm_km_free(kernel_map, (vaddr_t)kva, size, UVM_KMF_VAONLY);
1184}
1185
1186/*
1187 * Common function for mmap(2)'ing DMA-safe memory. May be called by
1188 * bus-specific DMA mmap(2)'ing functions.
1189 */
1190static paddr_t
1191_bus_dmamem_mmap(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs,
1192 off_t off, int prot, int flags)
1193{
1194 int i;
1195
1196 for (i = 0; i < nsegs; i++) {
1197#ifdef DIAGNOSTIC
1198 if (off & PGOFSET)
1199 panic("_bus_dmamem_mmap: offset unaligned");
1200 if (segs[i].ds_addr & PGOFSET)
1201 panic("_bus_dmamem_mmap: segment unaligned");
1202 if (segs[i].ds_len & PGOFSET)
1203 panic("_bus_dmamem_mmap: segment size not multiple"
1204 " of page size");
1205#endif
1206 if (off >= segs[i].ds_len) {
1207 off -= segs[i].ds_len;
1208 continue;
1209 }
1210
1211 return (x86_btop(_BUS_BUS_TO_PHYS(segs[i].ds_addr + off)));
1212 }
1213
1214 /* Page not found. */
1215 return (-1);
1216}
1217
1218/**********************************************************************
1219 * DMA utility functions
1220 **********************************************************************/
1221
1222/*
1223 * Utility function to load a linear buffer.
1224 */
1225static int
1226_bus_dmamap_load_buffer(bus_dma_tag_t t, bus_dmamap_t map, void *buf,
1227 bus_size_t buflen, struct vmspace *vm, int flags)
1228{
1229 bus_size_t sgsize;
1230 bus_addr_t curaddr;
1231 vaddr_t vaddr = (vaddr_t)buf;
1232 pmap_t pmap;
1233
1234 if (vm != NULL)
1235 pmap = vm_map_pmap(&vm->vm_map);
1236 else
1237 pmap = pmap_kernel();
1238
1239 while (buflen > 0) {
1240 int error;
1241
1242 /*
1243 * Get the bus address for this segment.
1244 */
1245 curaddr = _BUS_VIRT_TO_BUS(pmap, vaddr);
1246
1247 /*
1248 * Compute the segment size, and adjust counts.
1249 */
1250 sgsize = PAGE_SIZE - ((u_long)vaddr & PGOFSET);
1251 if (buflen < sgsize)
1252 sgsize = buflen;
1253
1254 /*
1255 * If we're beyond the bounce threshold, notify
1256 * the caller.
1257 */
1258 if (map->_dm_bounce_thresh != 0 &&
1259 curaddr + sgsize >= map->_dm_bounce_thresh)
1260 return (EINVAL);
1261
1262
1263 error = _bus_dmamap_load_busaddr(t, map, curaddr, sgsize);
1264 if (error)
1265 return error;
1266
1267 vaddr += sgsize;
1268 buflen -= sgsize;
1269 }
1270
1271 return (0);
1272}
1273
1274static int
1275_bus_dmatag_subregion(bus_dma_tag_t tag, bus_addr_t min_addr,
1276 bus_addr_t max_addr, bus_dma_tag_t *newtag, int flags)
1277{
1278
1279 if ((tag->_bounce_thresh != 0 && max_addr >= tag->_bounce_thresh) &&
1280 (tag->_bounce_alloc_hi != 0 && max_addr >= tag->_bounce_alloc_hi) &&
1281 (min_addr <= tag->_bounce_alloc_lo)) {
1282 *newtag = tag;
1283 /* if the tag must be freed, add a reference */
1284 if (tag->_tag_needs_free)
1285 (tag->_tag_needs_free)++;
1286 return 0;
1287 }
1288
1289 if ((*newtag = malloc(sizeof(struct x86_bus_dma_tag), M_DMAMAP,
1290 (flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK)) == NULL)
1291 return ENOMEM;
1292
1293 **newtag = *tag;
1294 (*newtag)->_tag_needs_free = 1;
1295
1296 if (tag->_bounce_thresh == 0 || max_addr < tag->_bounce_thresh)
1297 (*newtag)->_bounce_thresh = max_addr;
1298 if (tag->_bounce_alloc_hi == 0 || max_addr < tag->_bounce_alloc_hi)
1299 (*newtag)->_bounce_alloc_hi = max_addr;
1300 if (min_addr > tag->_bounce_alloc_lo)
1301 (*newtag)->_bounce_alloc_lo = min_addr;
1302
1303 return 0;
1304}
1305
1306static void
1307_bus_dmatag_destroy(bus_dma_tag_t tag)
1308{
1309
1310 switch (tag->_tag_needs_free) {
1311 case 0:
1312 break; /* not allocated with malloc */
1313 case 1:
1314 free(tag, M_DMAMAP); /* last reference to tag */
1315 break;
1316 default:
1317 (tag->_tag_needs_free)--; /* one less reference */
1318 }
1319}
1320
1321
1322void
1323bus_dmamap_sync(bus_dma_tag_t t, bus_dmamap_t p, bus_addr_t o, bus_size_t l,
1324 int ops)
1325{
1326 bus_dma_tag_t it;
1327
1328 if ((t->bdt_exists & BUS_DMAMAP_OVERRIDE_SYNC) == 0)
1329 ; /* skip override */
1330 else for (it = t; it != NULL; it = it->bdt_super) {
1331 if ((it->bdt_present & BUS_DMAMAP_OVERRIDE_SYNC) == 0)
1332 continue;
1333 (*it->bdt_ov->ov_dmamap_sync)(it->bdt_ctx, t, p, o,
1334 l, ops);
1335 return;
1336 }
1337
1338 if (ops & BUS_DMASYNC_POSTREAD)
1339 x86_lfence();
1340
1341 _bus_dmamap_sync(t, p, o, l, ops);
1342}
1343
1344int
1345bus_dmamap_create(bus_dma_tag_t t, bus_size_t size, int nsegments,
1346 bus_size_t maxsegsz, bus_size_t boundary, int flags,
1347 bus_dmamap_t *dmamp)
1348{
1349 bus_dma_tag_t it;
1350
1351 if ((t->bdt_exists & BUS_DMAMAP_OVERRIDE_CREATE) == 0)
1352 ; /* skip override */
1353 else for (it = t; it != NULL; it = it->bdt_super) {
1354 if ((it->bdt_present & BUS_DMAMAP_OVERRIDE_CREATE) == 0)
1355 continue;
1356 return (*it->bdt_ov->ov_dmamap_create)(it->bdt_ctx, t, size,
1357 nsegments, maxsegsz, boundary, flags, dmamp);
1358 }
1359
1360 return _bus_dmamap_create(t, size, nsegments, maxsegsz,
1361 boundary, flags, dmamp);
1362}
1363
1364void
1365bus_dmamap_destroy(bus_dma_tag_t t, bus_dmamap_t dmam)
1366{
1367 bus_dma_tag_t it;
1368
1369 if ((t->bdt_exists & BUS_DMAMAP_OVERRIDE_DESTROY) == 0)
1370 ; /* skip override */
1371 else for (it = t; it != NULL; it = it->bdt_super) {
1372 if ((it->bdt_present & BUS_DMAMAP_OVERRIDE_DESTROY) == 0)
1373 continue;
1374 (*it->bdt_ov->ov_dmamap_destroy)(it->bdt_ctx, t, dmam);
1375 return;
1376 }
1377
1378 _bus_dmamap_destroy(t, dmam);
1379}
1380
1381int
1382bus_dmamap_load(bus_dma_tag_t t, bus_dmamap_t dmam, void *buf,
1383 bus_size_t buflen, struct proc *p, int flags)
1384{
1385 bus_dma_tag_t it;
1386
1387 if ((t->bdt_exists & BUS_DMAMAP_OVERRIDE_LOAD) == 0)
1388 ; /* skip override */
1389 else for (it = t; it != NULL; it = it->bdt_super) {
1390 if ((it->bdt_present & BUS_DMAMAP_OVERRIDE_LOAD) == 0)
1391 continue;
1392 return (*it->bdt_ov->ov_dmamap_load)(it->bdt_ctx, t, dmam,
1393 buf, buflen, p, flags);
1394 }
1395
1396 return _bus_dmamap_load(t, dmam, buf, buflen, p, flags);
1397}
1398
1399int
1400bus_dmamap_load_mbuf(bus_dma_tag_t t, bus_dmamap_t dmam,
1401 struct mbuf *chain, int flags)
1402{
1403 bus_dma_tag_t it;
1404
1405 if ((t->bdt_exists & BUS_DMAMAP_OVERRIDE_LOAD_MBUF) == 0)
1406 ; /* skip override */
1407 else for (it = t; it != NULL; it = it->bdt_super) {
1408 if ((it->bdt_present & BUS_DMAMAP_OVERRIDE_LOAD_MBUF) == 0)
1409 continue;
1410 return (*it->bdt_ov->ov_dmamap_load_mbuf)(it->bdt_ctx, t, dmam,
1411 chain, flags);
1412 }
1413
1414 return _bus_dmamap_load_mbuf(t, dmam, chain, flags);
1415}
1416
1417int
1418bus_dmamap_load_uio(bus_dma_tag_t t, bus_dmamap_t dmam,
1419 struct uio *uio, int flags)
1420{
1421 bus_dma_tag_t it;
1422
1423 if ((t->bdt_exists & BUS_DMAMAP_OVERRIDE_LOAD_UIO) == 0)
1424 ; /* skip override */
1425 else for (it = t; it != NULL; it = it->bdt_super) {
1426 if ((it->bdt_present & BUS_DMAMAP_OVERRIDE_LOAD_UIO) == 0)
1427 continue;
1428 return (*it->bdt_ov->ov_dmamap_load_uio)(it->bdt_ctx, t, dmam,
1429 uio, flags);
1430 }
1431
1432 return _bus_dmamap_load_uio(t, dmam, uio, flags);
1433}
1434
1435int
1436bus_dmamap_load_raw(bus_dma_tag_t t, bus_dmamap_t dmam,
1437 bus_dma_segment_t *segs, int nsegs,
1438 bus_size_t size, int flags)
1439{
1440 bus_dma_tag_t it;
1441
1442 if ((t->bdt_exists & BUS_DMAMAP_OVERRIDE_LOAD_RAW) == 0)
1443 ; /* skip override */
1444 else for (it = t; it != NULL; it = it->bdt_super) {
1445 if ((it->bdt_present & BUS_DMAMAP_OVERRIDE_LOAD_RAW) == 0)
1446 continue;
1447 return (*it->bdt_ov->ov_dmamap_load_raw)(it->bdt_ctx, t, dmam,
1448 segs, nsegs, size, flags);
1449 }
1450
1451 return _bus_dmamap_load_raw(t, dmam, segs, nsegs, size, flags);
1452}
1453
1454void
1455bus_dmamap_unload(bus_dma_tag_t t, bus_dmamap_t dmam)
1456{
1457 bus_dma_tag_t it;
1458
1459 if ((t->bdt_exists & BUS_DMAMAP_OVERRIDE_UNLOAD) == 0)
1460 ; /* skip override */
1461 else for (it = t; it != NULL; it = it->bdt_super) {
1462 if ((it->bdt_present & BUS_DMAMAP_OVERRIDE_UNLOAD) == 0)
1463 continue;
1464 (*it->bdt_ov->ov_dmamap_unload)(it->bdt_ctx, t, dmam);
1465 return;
1466 }
1467
1468 _bus_dmamap_unload(t, dmam);
1469}
1470
1471int
1472bus_dmamem_alloc(bus_dma_tag_t t, bus_size_t size, bus_size_t alignment,
1473 bus_size_t boundary, bus_dma_segment_t *segs, int nsegs,
1474 int *rsegs, int flags)
1475{
1476 bus_dma_tag_t it;
1477
1478 if ((t->bdt_exists & BUS_DMAMEM_OVERRIDE_ALLOC) == 0)
1479 ; /* skip override */
1480 else for (it = t; it != NULL; it = it->bdt_super) {
1481 if ((it->bdt_present & BUS_DMAMEM_OVERRIDE_ALLOC) == 0)
1482 continue;
1483 return (*it->bdt_ov->ov_dmamem_alloc)(it->bdt_ctx, t, size,
1484 alignment, boundary, segs, nsegs, rsegs, flags);
1485 }
1486
1487 return _bus_dmamem_alloc(t, size, alignment, boundary, segs,
1488 nsegs, rsegs, flags);
1489}
1490
1491void
1492bus_dmamem_free(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs)
1493{
1494 bus_dma_tag_t it;
1495
1496 if ((t->bdt_exists & BUS_DMAMEM_OVERRIDE_FREE) == 0)
1497 ; /* skip override */
1498 else for (it = t; it != NULL; it = it->bdt_super) {
1499 if ((it->bdt_present & BUS_DMAMEM_OVERRIDE_FREE) == 0)
1500 continue;
1501 (*it->bdt_ov->ov_dmamem_free)(it->bdt_ctx, t, segs, nsegs);
1502 return;
1503 }
1504
1505 _bus_dmamem_free(t, segs, nsegs);
1506}
1507
1508int
1509bus_dmamem_map(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs,
1510 size_t size, void **kvap, int flags)
1511{
1512 bus_dma_tag_t it;
1513
1514 if ((t->bdt_exists & BUS_DMAMEM_OVERRIDE_MAP) == 0)
1515 ; /* skip override */
1516 else for (it = t; it != NULL; it = it->bdt_super) {
1517 if ((it->bdt_present & BUS_DMAMEM_OVERRIDE_MAP) == 0)
1518 continue;
1519 return (*it->bdt_ov->ov_dmamem_map)(it->bdt_ctx, t,
1520 segs, nsegs, size, kvap, flags);
1521 }
1522
1523 return _bus_dmamem_map(t, segs, nsegs, size, kvap, flags);
1524}
1525
1526void
1527bus_dmamem_unmap(bus_dma_tag_t t, void *kva, size_t size)
1528{
1529 bus_dma_tag_t it;
1530
1531 if ((t->bdt_exists & BUS_DMAMEM_OVERRIDE_UNMAP) == 0)
1532 ; /* skip override */
1533 else for (it = t; it != NULL; it = it->bdt_super) {
1534 if ((it->bdt_present & BUS_DMAMEM_OVERRIDE_UNMAP) == 0)
1535 continue;
1536 (*it->bdt_ov->ov_dmamem_unmap)(it->bdt_ctx, t, kva, size);
1537 return;
1538 }
1539
1540 _bus_dmamem_unmap(t, kva, size);
1541}
1542
1543paddr_t
1544bus_dmamem_mmap(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs,
1545 off_t off, int prot, int flags)
1546{
1547 bus_dma_tag_t it;
1548
1549 if ((t->bdt_exists & BUS_DMAMEM_OVERRIDE_MMAP) == 0)
1550 ; /* skip override */
1551 else for (it = t; it != NULL; it = it->bdt_super) {
1552 if ((it->bdt_present & BUS_DMAMEM_OVERRIDE_MMAP) == 0)
1553 continue;
1554 return (*it->bdt_ov->ov_dmamem_mmap)(it->bdt_ctx, t, segs,
1555 nsegs, off, prot, flags);
1556 }
1557
1558 return _bus_dmamem_mmap(t, segs, nsegs, off, prot, flags);
1559}
1560
1561int
1562bus_dmatag_subregion(bus_dma_tag_t t, bus_addr_t min_addr,
1563 bus_addr_t max_addr, bus_dma_tag_t *newtag, int flags)
1564{
1565 bus_dma_tag_t it;
1566
1567 if ((t->bdt_exists & BUS_DMATAG_OVERRIDE_SUBREGION) == 0)
1568 ; /* skip override */
1569 else for (it = t; it != NULL; it = it->bdt_super) {
1570 if ((it->bdt_present & BUS_DMATAG_OVERRIDE_SUBREGION) == 0)
1571 continue;
1572 return (*it->bdt_ov->ov_dmatag_subregion)(it->bdt_ctx, t,
1573 min_addr, max_addr, newtag, flags);
1574 }
1575
1576 return _bus_dmatag_subregion(t, min_addr, max_addr, newtag, flags);
1577}
1578
1579void
1580bus_dmatag_destroy(bus_dma_tag_t t)
1581{
1582 bus_dma_tag_t it;
1583
1584 if ((t->bdt_exists & BUS_DMATAG_OVERRIDE_DESTROY) == 0)
1585 ; /* skip override */
1586 else for (it = t; it != NULL; it = it->bdt_super) {
1587 if ((it->bdt_present & BUS_DMATAG_OVERRIDE_DESTROY) == 0)
1588 continue;
1589 (*it->bdt_ov->ov_dmatag_destroy)(it->bdt_ctx, t);
1590 return;
1591 }
1592
1593 _bus_dmatag_destroy(t);
1594}
1595
1596static const void *
1597bit_to_function_pointer(const struct bus_dma_overrides *ov, uint64_t bit)
1598{
1599 switch (bit) {
1600 case BUS_DMAMAP_OVERRIDE_CREATE:
1601 return ov->ov_dmamap_create;
1602 case BUS_DMAMAP_OVERRIDE_DESTROY:
1603 return ov->ov_dmamap_destroy;
1604 case BUS_DMAMAP_OVERRIDE_LOAD:
1605 return ov->ov_dmamap_load;
1606 case BUS_DMAMAP_OVERRIDE_LOAD_MBUF:
1607 return ov->ov_dmamap_load_mbuf;
1608 case BUS_DMAMAP_OVERRIDE_LOAD_UIO:
1609 return ov->ov_dmamap_load_uio;
1610 case BUS_DMAMAP_OVERRIDE_LOAD_RAW:
1611 return ov->ov_dmamap_load_raw;
1612 case BUS_DMAMAP_OVERRIDE_UNLOAD:
1613 return ov->ov_dmamap_unload;
1614 case BUS_DMAMAP_OVERRIDE_SYNC:
1615 return ov->ov_dmamap_sync;
1616 case BUS_DMAMEM_OVERRIDE_ALLOC:
1617 return ov->ov_dmamem_alloc;
1618 case BUS_DMAMEM_OVERRIDE_FREE:
1619 return ov->ov_dmamem_free;
1620 case BUS_DMAMEM_OVERRIDE_MAP:
1621 return ov->ov_dmamem_map;
1622 case BUS_DMAMEM_OVERRIDE_UNMAP:
1623 return ov->ov_dmamem_unmap;
1624 case BUS_DMAMEM_OVERRIDE_MMAP:
1625 return ov->ov_dmamem_mmap;
1626 case BUS_DMATAG_OVERRIDE_SUBREGION:
1627 return ov->ov_dmatag_subregion;
1628 case BUS_DMATAG_OVERRIDE_DESTROY:
1629 return ov->ov_dmatag_destroy;
1630 default:
1631 return NULL;
1632 }
1633}
1634
1635void
1636bus_dma_tag_destroy(bus_dma_tag_t bdt)
1637{
1638 if (bdt->bdt_super != NULL)
1639 bus_dmatag_destroy(bdt->bdt_super);
1640 kmem_free(bdt, sizeof(struct x86_bus_dma_tag));
1641}
1642
1643int
1644bus_dma_tag_create(bus_dma_tag_t obdt, const uint64_t present,
1645 const struct bus_dma_overrides *ov, void *ctx, bus_dma_tag_t *bdtp)
1646{
1647 uint64_t bit, bits, nbits;
1648 bus_dma_tag_t bdt;
1649 const void *fp;
1650
1651 if (ov == NULL || present == 0)
1652 return EINVAL;
1653
1654 bdt = kmem_alloc(sizeof(struct x86_bus_dma_tag), KM_SLEEP);
1655
1656 if (bdt == NULL)
1657 return ENOMEM;
1658
1659 *bdt = *obdt;
1660 /* don't let bus_dmatag_destroy free these */
1661 bdt->_tag_needs_free = 0;
1662
1663 bdt->bdt_super = obdt;
1664
1665 for (bits = present; bits != 0; bits = nbits) {
1666 nbits = bits & (bits - 1);
1667 bit = nbits ^ bits;
1668 if ((fp = bit_to_function_pointer(ov, bit)) == NULL) {
1669#ifdef DEBUG
1670 printf("%s: missing bit %" PRIx64 "\n", __func__, bit);
1671#endif
1672 goto einval;
1673 }
1674 }
1675
1676 bdt->bdt_ov = ov;
1677 bdt->bdt_exists = obdt->bdt_exists | present;
1678 bdt->bdt_present = present;
1679 bdt->bdt_ctx = ctx;
1680
1681 *bdtp = bdt;
1682 if (obdt->_tag_needs_free)
1683 obdt->_tag_needs_free++;
1684
1685 return 0;
1686einval:
1687 kmem_free(bdt, sizeof(struct x86_bus_dma_tag));
1688 return EINVAL;
1689}
1690