Annotation of src/sys/arch/arm/arm32/bus_dma.c, Revision 1.34
1.34 ! briggs 1: /* $NetBSD: bus_dma.c,v 1.33 2003/07/15 00:24:40 lukem Exp $ */
1.1 chris 2:
3: /*-
4: * Copyright (c) 1996, 1997, 1998 The NetBSD Foundation, Inc.
5: * All rights reserved.
6: *
7: * This code is derived from software contributed to The NetBSD Foundation
8: * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9: * NASA Ames Research Center.
10: *
11: * Redistribution and use in source and binary forms, with or without
12: * modification, are permitted provided that the following conditions
13: * are met:
14: * 1. Redistributions of source code must retain the above copyright
15: * notice, this list of conditions and the following disclaimer.
16: * 2. Redistributions in binary form must reproduce the above copyright
17: * notice, this list of conditions and the following disclaimer in the
18: * documentation and/or other materials provided with the distribution.
19: * 3. All advertising materials mentioning features or use of this software
20: * must display the following acknowledgement:
21: * This product includes software developed by the NetBSD
22: * Foundation, Inc. and its contributors.
23: * 4. Neither the name of The NetBSD Foundation nor the names of its
24: * contributors may be used to endorse or promote products derived
25: * from this software without specific prior written permission.
26: *
27: * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28: * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29: * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30: * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31: * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32: * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33: * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34: * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35: * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36: * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37: * POSSIBILITY OF SUCH DAMAGE.
38: */
1.33 lukem 39:
40: #include <sys/cdefs.h>
1.34 ! briggs 41: __KERNEL_RCSID(0, "$NetBSD: bus_dma.c,v 1.33 2003/07/15 00:24:40 lukem Exp $");
1.1 chris 42:
43: #include <sys/param.h>
44: #include <sys/systm.h>
45: #include <sys/kernel.h>
46: #include <sys/proc.h>
47: #include <sys/buf.h>
48: #include <sys/reboot.h>
49: #include <sys/conf.h>
50: #include <sys/file.h>
51: #include <sys/malloc.h>
52: #include <sys/mbuf.h>
53: #include <sys/vnode.h>
54: #include <sys/device.h>
55:
56: #include <uvm/uvm_extern.h>
57:
58: #define _ARM32_BUS_DMA_PRIVATE
59: #include <machine/bus.h>
60:
61: #include <machine/cpu.h>
1.4 thorpej 62:
63: #include <arm/cpufunc.h>
1.1 chris 64:
1.7 thorpej 65: int _bus_dmamap_load_buffer(bus_dma_tag_t, bus_dmamap_t, void *,
1.11 thorpej 66: bus_size_t, struct proc *, int, paddr_t *, int *, int);
1.15 thorpej 67: struct arm32_dma_range *_bus_dma_inrange(struct arm32_dma_range *,
68: int, bus_addr_t);
1.1 chris 69:
70: /*
1.19 briggs 71: * Check to see if the specified page is in an allowed DMA range.
72: */
73: __inline struct arm32_dma_range *
74: _bus_dma_inrange(struct arm32_dma_range *ranges, int nranges,
75: bus_addr_t curaddr)
76: {
77: struct arm32_dma_range *dr;
78: int i;
79:
80: for (i = 0, dr = ranges; i < nranges; i++, dr++) {
81: if (curaddr >= dr->dr_sysbase &&
82: round_page(curaddr) <= (dr->dr_sysbase + dr->dr_len))
83: return (dr);
84: }
85:
86: return (NULL);
87: }
88:
89: /*
1.1 chris 90: * Common function for DMA map creation. May be called by bus-specific
91: * DMA map creation functions.
92: */
93: int
1.7 thorpej 94: _bus_dmamap_create(bus_dma_tag_t t, bus_size_t size, int nsegments,
95: bus_size_t maxsegsz, bus_size_t boundary, int flags, bus_dmamap_t *dmamp)
1.1 chris 96: {
97: struct arm32_bus_dmamap *map;
98: void *mapstore;
99: size_t mapsize;
100:
101: #ifdef DEBUG_DMA
102: printf("dmamap_create: t=%p size=%lx nseg=%x msegsz=%lx boundary=%lx flags=%x\n",
103: t, size, nsegments, maxsegsz, boundary, flags);
104: #endif /* DEBUG_DMA */
105:
106: /*
107: * Allocate and initialize the DMA map. The end of the map
108: * is a variable-sized array of segments, so we allocate enough
109: * room for them in one shot.
110: *
111: * Note we don't preserve the WAITOK or NOWAIT flags. Preservation
112: * of ALLOCNOW notifies others that we've reserved these resources,
113: * and they are not to be freed.
114: *
115: * The bus_dmamap_t includes one bus_dma_segment_t, hence
116: * the (nsegments - 1).
117: */
118: mapsize = sizeof(struct arm32_bus_dmamap) +
119: (sizeof(bus_dma_segment_t) * (nsegments - 1));
120: if ((mapstore = malloc(mapsize, M_DMAMAP,
121: (flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK)) == NULL)
122: return (ENOMEM);
123:
124: memset(mapstore, 0, mapsize);
125: map = (struct arm32_bus_dmamap *)mapstore;
126: map->_dm_size = size;
127: map->_dm_segcnt = nsegments;
128: map->_dm_maxsegsz = maxsegsz;
129: map->_dm_boundary = boundary;
130: map->_dm_flags = flags & ~(BUS_DMA_WAITOK|BUS_DMA_NOWAIT);
1.14 thorpej 131: map->_dm_origbuf = NULL;
132: map->_dm_buftype = ARM32_BUFTYPE_INVALID;
1.8 thorpej 133: map->_dm_proc = NULL;
1.1 chris 134: map->dm_mapsize = 0; /* no valid mappings */
135: map->dm_nsegs = 0;
136:
137: *dmamp = map;
138: #ifdef DEBUG_DMA
139: printf("dmamap_create:map=%p\n", map);
140: #endif /* DEBUG_DMA */
141: return (0);
142: }
143:
144: /*
145: * Common function for DMA map destruction. May be called by bus-specific
146: * DMA map destruction functions.
147: */
148: void
1.7 thorpej 149: _bus_dmamap_destroy(bus_dma_tag_t t, bus_dmamap_t map)
1.1 chris 150: {
151:
152: #ifdef DEBUG_DMA
153: printf("dmamap_destroy: t=%p map=%p\n", t, map);
154: #endif /* DEBUG_DMA */
1.13 briggs 155:
156: /*
157: * Explicit unload.
158: */
159: map->dm_mapsize = 0;
160: map->dm_nsegs = 0;
1.14 thorpej 161: map->_dm_origbuf = NULL;
162: map->_dm_buftype = ARM32_BUFTYPE_INVALID;
1.13 briggs 163: map->_dm_proc = NULL;
164:
1.25 chris 165: free(map, M_DMAMAP);
1.1 chris 166: }
167:
168: /*
169: * Common function for loading a DMA map with a linear buffer. May
170: * be called by bus-specific DMA map load functions.
171: */
172: int
1.7 thorpej 173: _bus_dmamap_load(bus_dma_tag_t t, bus_dmamap_t map, void *buf,
174: bus_size_t buflen, struct proc *p, int flags)
1.1 chris 175: {
1.11 thorpej 176: paddr_t lastaddr;
1.1 chris 177: int seg, error;
178:
179: #ifdef DEBUG_DMA
180: printf("dmamap_load: t=%p map=%p buf=%p len=%lx p=%p f=%d\n",
181: t, map, buf, buflen, p, flags);
182: #endif /* DEBUG_DMA */
183:
184: /*
185: * Make sure that on error condition we return "no valid mappings".
186: */
187: map->dm_mapsize = 0;
188: map->dm_nsegs = 0;
189:
190: if (buflen > map->_dm_size)
191: return (EINVAL);
192:
1.17 thorpej 193: /* _bus_dmamap_load_buffer() clears this if we're not... */
194: map->_dm_flags |= ARM32_DMAMAP_COHERENT;
195:
1.1 chris 196: seg = 0;
197: error = _bus_dmamap_load_buffer(t, map, buf, buflen, p, flags,
198: &lastaddr, &seg, 1);
199: if (error == 0) {
200: map->dm_mapsize = buflen;
201: map->dm_nsegs = seg + 1;
1.14 thorpej 202: map->_dm_origbuf = buf;
203: map->_dm_buftype = ARM32_BUFTYPE_LINEAR;
1.8 thorpej 204: map->_dm_proc = p;
1.1 chris 205: }
206: #ifdef DEBUG_DMA
207: printf("dmamap_load: error=%d\n", error);
208: #endif /* DEBUG_DMA */
209: return (error);
210: }
211:
212: /*
213: * Like _bus_dmamap_load(), but for mbufs.
214: */
215: int
1.7 thorpej 216: _bus_dmamap_load_mbuf(bus_dma_tag_t t, bus_dmamap_t map, struct mbuf *m0,
217: int flags)
1.1 chris 218: {
1.28 thorpej 219: struct arm32_dma_range *dr;
1.11 thorpej 220: paddr_t lastaddr;
1.1 chris 221: int seg, error, first;
222: struct mbuf *m;
223:
224: #ifdef DEBUG_DMA
225: printf("dmamap_load_mbuf: t=%p map=%p m0=%p f=%d\n",
226: t, map, m0, flags);
227: #endif /* DEBUG_DMA */
228:
229: /*
230: * Make sure that on error condition we return "no valid mappings."
231: */
232: map->dm_mapsize = 0;
233: map->dm_nsegs = 0;
234:
235: #ifdef DIAGNOSTIC
236: if ((m0->m_flags & M_PKTHDR) == 0)
237: panic("_bus_dmamap_load_mbuf: no packet header");
238: #endif /* DIAGNOSTIC */
239:
240: if (m0->m_pkthdr.len > map->_dm_size)
241: return (EINVAL);
242:
1.28 thorpej 243: /*
244: * Mbuf chains should almost never have coherent (i.e.
245: * un-cached) mappings, so clear that flag now.
246: */
247: map->_dm_flags &= ~ARM32_DMAMAP_COHERENT;
1.17 thorpej 248:
1.1 chris 249: first = 1;
250: seg = 0;
251: error = 0;
252: for (m = m0; m != NULL && error == 0; m = m->m_next) {
1.28 thorpej 253: if (m->m_len == 0)
254: continue;
255: /* XXX Could be better about coalescing. */
256: /* XXX Doesn't check boundaries. */
257: switch (m->m_flags & (M_EXT|M_CLUSTER)) {
258: case M_EXT|M_CLUSTER:
259: /* XXX KDASSERT */
260: KASSERT(m->m_ext.ext_paddr != M_PADDR_INVALID);
261: lastaddr = m->m_ext.ext_paddr +
262: (m->m_data - m->m_ext.ext_buf);
263: have_addr:
264: if (first == 0 &&
265: ++seg >= map->_dm_segcnt) {
266: error = EFBIG;
267: break;
268: }
269: /*
270: * Make sure we're in an allowed DMA range.
271: */
272: if (t->_ranges != NULL) {
273: /* XXX cache last result? */
274: dr = _bus_dma_inrange(t->_ranges, t->_nranges,
275: lastaddr);
276: if (dr == NULL) {
277: error = EINVAL;
278: break;
279: }
280:
281: /*
282: * In a valid DMA range. Translate the
283: * physical memory address to an address
284: * in the DMA window.
285: */
286: lastaddr = (lastaddr - dr->dr_sysbase) +
287: dr->dr_busbase;
288: }
289: map->dm_segs[seg].ds_addr = lastaddr;
290: map->dm_segs[seg].ds_len = m->m_len;
291: lastaddr += m->m_len;
292: break;
293:
294: case 0:
295: lastaddr = m->m_paddr + M_BUFOFFSET(m) +
296: (m->m_data - M_BUFADDR(m));
297: goto have_addr;
298:
299: default:
300: error = _bus_dmamap_load_buffer(t, map, m->m_data,
301: m->m_len, NULL, flags, &lastaddr, &seg, first);
302: }
1.1 chris 303: first = 0;
304: }
305: if (error == 0) {
306: map->dm_mapsize = m0->m_pkthdr.len;
307: map->dm_nsegs = seg + 1;
1.14 thorpej 308: map->_dm_origbuf = m0;
309: map->_dm_buftype = ARM32_BUFTYPE_MBUF;
1.8 thorpej 310: map->_dm_proc = NULL; /* always kernel */
1.1 chris 311: }
312: #ifdef DEBUG_DMA
313: printf("dmamap_load_mbuf: error=%d\n", error);
314: #endif /* DEBUG_DMA */
315: return (error);
316: }
317:
318: /*
319: * Like _bus_dmamap_load(), but for uios.
320: */
321: int
1.7 thorpej 322: _bus_dmamap_load_uio(bus_dma_tag_t t, bus_dmamap_t map, struct uio *uio,
323: int flags)
1.1 chris 324: {
1.11 thorpej 325: paddr_t lastaddr;
1.1 chris 326: int seg, i, error, first;
327: bus_size_t minlen, resid;
328: struct proc *p = NULL;
329: struct iovec *iov;
330: caddr_t addr;
331:
332: /*
333: * Make sure that on error condition we return "no valid mappings."
334: */
335: map->dm_mapsize = 0;
336: map->dm_nsegs = 0;
337:
338: resid = uio->uio_resid;
339: iov = uio->uio_iov;
340:
341: if (uio->uio_segflg == UIO_USERSPACE) {
1.32 fvdl 342: p = uio->uio_procp;
1.1 chris 343: #ifdef DIAGNOSTIC
344: if (p == NULL)
345: panic("_bus_dmamap_load_uio: USERSPACE but no proc");
346: #endif
347: }
348:
1.17 thorpej 349: /* _bus_dmamap_load_buffer() clears this if we're not... */
350: map->_dm_flags |= ARM32_DMAMAP_COHERENT;
351:
1.1 chris 352: first = 1;
353: seg = 0;
354: error = 0;
355: for (i = 0; i < uio->uio_iovcnt && resid != 0 && error == 0; i++) {
356: /*
357: * Now at the first iovec to load. Load each iovec
358: * until we have exhausted the residual count.
359: */
360: minlen = resid < iov[i].iov_len ? resid : iov[i].iov_len;
361: addr = (caddr_t)iov[i].iov_base;
362:
363: error = _bus_dmamap_load_buffer(t, map, addr, minlen,
364: p, flags, &lastaddr, &seg, first);
365: first = 0;
366:
367: resid -= minlen;
368: }
369: if (error == 0) {
370: map->dm_mapsize = uio->uio_resid;
371: map->dm_nsegs = seg + 1;
1.14 thorpej 372: map->_dm_origbuf = uio;
373: map->_dm_buftype = ARM32_BUFTYPE_UIO;
1.8 thorpej 374: map->_dm_proc = p;
1.1 chris 375: }
376: return (error);
377: }
378:
379: /*
380: * Like _bus_dmamap_load(), but for raw memory allocated with
381: * bus_dmamem_alloc().
382: */
383: int
1.7 thorpej 384: _bus_dmamap_load_raw(bus_dma_tag_t t, bus_dmamap_t map,
385: bus_dma_segment_t *segs, int nsegs, bus_size_t size, int flags)
1.1 chris 386: {
387:
388: panic("_bus_dmamap_load_raw: not implemented");
389: }
390:
391: /*
392: * Common function for unloading a DMA map. May be called by
393: * bus-specific DMA map unload functions.
394: */
395: void
1.7 thorpej 396: _bus_dmamap_unload(bus_dma_tag_t t, bus_dmamap_t map)
1.1 chris 397: {
398:
399: #ifdef DEBUG_DMA
400: printf("dmamap_unload: t=%p map=%p\n", t, map);
401: #endif /* DEBUG_DMA */
402:
403: /*
404: * No resources to free; just mark the mappings as
405: * invalid.
406: */
407: map->dm_mapsize = 0;
408: map->dm_nsegs = 0;
1.14 thorpej 409: map->_dm_origbuf = NULL;
410: map->_dm_buftype = ARM32_BUFTYPE_INVALID;
1.8 thorpej 411: map->_dm_proc = NULL;
1.1 chris 412: }
413:
1.19 briggs 414: static __inline void
1.14 thorpej 415: _bus_dmamap_sync_linear(bus_dma_tag_t t, bus_dmamap_t map, bus_addr_t offset,
416: bus_size_t len, int ops)
417: {
418: vaddr_t addr = (vaddr_t) map->_dm_origbuf;
419:
420: addr += offset;
421:
422: switch (ops) {
423: case BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE:
424: cpu_dcache_wbinv_range(addr, len);
425: break;
426:
427: case BUS_DMASYNC_PREREAD:
1.18 thorpej 428: if (((addr | len) & arm_dcache_align_mask) == 0)
429: cpu_dcache_inv_range(addr, len);
430: else
431: cpu_dcache_wbinv_range(addr, len);
1.14 thorpej 432: break;
433:
434: case BUS_DMASYNC_PREWRITE:
435: cpu_dcache_wb_range(addr, len);
436: break;
437: }
438: }
439:
1.19 briggs 440: static __inline void
1.14 thorpej 441: _bus_dmamap_sync_mbuf(bus_dma_tag_t t, bus_dmamap_t map, bus_addr_t offset,
442: bus_size_t len, int ops)
443: {
444: struct mbuf *m, *m0 = map->_dm_origbuf;
445: bus_size_t minlen, moff;
446: vaddr_t maddr;
447:
448: for (moff = offset, m = m0; m != NULL && len != 0;
449: m = m->m_next) {
450: /* Find the beginning mbuf. */
451: if (moff >= m->m_len) {
452: moff -= m->m_len;
453: continue;
454: }
455:
456: /*
457: * Now at the first mbuf to sync; nail each one until
458: * we have exhausted the length.
459: */
460: minlen = m->m_len - moff;
461: if (len < minlen)
462: minlen = len;
463:
464: maddr = mtod(m, vaddr_t);
465: maddr += moff;
466:
1.28 thorpej 467: /*
468: * We can save a lot of work here if we know the mapping
469: * is read-only at the MMU:
470: *
471: * If a mapping is read-only, no dirty cache blocks will
472: * exist for it. If a writable mapping was made read-only,
473: * we know any dirty cache lines for the range will have
474: * been cleaned for us already. Therefore, if the upper
475: * layer can tell us we have a read-only mapping, we can
476: * skip all cache cleaning.
477: *
478: * NOTE: This only works if we know the pmap cleans pages
479: * before making a read-write -> read-only transition. If
480: * this ever becomes non-true (e.g. Physically Indexed
481: * cache), this will have to be revisited.
482: */
1.14 thorpej 483: switch (ops) {
484: case BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE:
1.28 thorpej 485: if (! M_ROMAP(m)) {
486: cpu_dcache_wbinv_range(maddr, minlen);
487: break;
488: }
489: /* else FALLTHROUGH */
1.14 thorpej 490:
491: case BUS_DMASYNC_PREREAD:
1.18 thorpej 492: if (((maddr | minlen) & arm_dcache_align_mask) == 0)
493: cpu_dcache_inv_range(maddr, minlen);
494: else
495: cpu_dcache_wbinv_range(maddr, minlen);
1.14 thorpej 496: break;
497:
498: case BUS_DMASYNC_PREWRITE:
1.28 thorpej 499: if (! M_ROMAP(m))
500: cpu_dcache_wb_range(maddr, minlen);
1.14 thorpej 501: break;
502: }
503: moff = 0;
504: len -= minlen;
505: }
506: }
507:
1.19 briggs 508: static __inline void
1.14 thorpej 509: _bus_dmamap_sync_uio(bus_dma_tag_t t, bus_dmamap_t map, bus_addr_t offset,
510: bus_size_t len, int ops)
511: {
512: struct uio *uio = map->_dm_origbuf;
513: struct iovec *iov;
514: bus_size_t minlen, ioff;
515: vaddr_t addr;
516:
517: for (iov = uio->uio_iov, ioff = offset; len != 0; iov++) {
518: /* Find the beginning iovec. */
519: if (ioff >= iov->iov_len) {
520: ioff -= iov->iov_len;
521: continue;
522: }
523:
524: /*
525: * Now at the first iovec to sync; nail each one until
526: * we have exhausted the length.
527: */
528: minlen = iov->iov_len - ioff;
529: if (len < minlen)
530: minlen = len;
531:
532: addr = (vaddr_t) iov->iov_base;
533: addr += ioff;
534:
535: switch (ops) {
536: case BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE:
537: cpu_dcache_wbinv_range(addr, minlen);
538: break;
539:
540: case BUS_DMASYNC_PREREAD:
1.18 thorpej 541: if (((addr | minlen) & arm_dcache_align_mask) == 0)
542: cpu_dcache_inv_range(addr, minlen);
543: else
544: cpu_dcache_wbinv_range(addr, minlen);
1.14 thorpej 545: break;
546:
547: case BUS_DMASYNC_PREWRITE:
548: cpu_dcache_wb_range(addr, minlen);
549: break;
550: }
551: ioff = 0;
552: len -= minlen;
553: }
554: }
555:
1.1 chris 556: /*
557: * Common function for DMA map synchronization. May be called
558: * by bus-specific DMA map synchronization functions.
1.8 thorpej 559: *
560: * This version works for the Virtually Indexed Virtually Tagged
561: * cache found on 32-bit ARM processors.
562: *
563: * XXX Should have separate versions for write-through vs.
564: * XXX write-back caches. We currently assume write-back
565: * XXX here, which is not as efficient as it could be for
566: * XXX the write-through case.
1.1 chris 567: */
568: void
1.7 thorpej 569: _bus_dmamap_sync(bus_dma_tag_t t, bus_dmamap_t map, bus_addr_t offset,
570: bus_size_t len, int ops)
1.1 chris 571: {
572:
573: #ifdef DEBUG_DMA
574: printf("dmamap_sync: t=%p map=%p offset=%lx len=%lx ops=%x\n",
575: t, map, offset, len, ops);
576: #endif /* DEBUG_DMA */
577:
1.8 thorpej 578: /*
579: * Mixing of PRE and POST operations is not allowed.
580: */
581: if ((ops & (BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE)) != 0 &&
582: (ops & (BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE)) != 0)
583: panic("_bus_dmamap_sync: mix PRE and POST");
584:
585: #ifdef DIAGNOSTIC
586: if (offset >= map->dm_mapsize)
587: panic("_bus_dmamap_sync: bad offset %lu (map size is %lu)",
588: offset, map->dm_mapsize);
589: if (len == 0 || (offset + len) > map->dm_mapsize)
590: panic("_bus_dmamap_sync: bad length");
591: #endif
592:
593: /*
594: * For a virtually-indexed write-back cache, we need
595: * to do the following things:
596: *
597: * PREREAD -- Invalidate the D-cache. We do this
598: * here in case a write-back is required by the back-end.
599: *
600: * PREWRITE -- Write-back the D-cache. Note that if
601: * we are doing a PREREAD|PREWRITE, we can collapse
602: * the whole thing into a single Wb-Inv.
603: *
604: * POSTREAD -- Nothing.
605: *
606: * POSTWRITE -- Nothing.
607: */
608:
609: ops &= (BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
610: if (ops == 0)
611: return;
612:
1.17 thorpej 613: /* Skip cache frobbing if mapping was COHERENT. */
614: if (map->_dm_flags & ARM32_DMAMAP_COHERENT) {
615: /* Drain the write buffer. */
616: cpu_drain_writebuf();
617: return;
618: }
1.8 thorpej 619:
620: /*
621: * If the mapping is not the kernel's and also not the
622: * current process's (XXX actually, vmspace), then we
623: * don't have anything to do, since the cache is Wb-Inv'd
624: * on context switch.
625: *
626: * XXX REVISIT WHEN WE DO FCSE!
627: */
1.23 thorpej 628: if (__predict_false(map->_dm_proc != NULL &&
629: curlwp != NULL && map->_dm_proc != curproc))
1.8 thorpej 630: return;
631:
1.14 thorpej 632: switch (map->_dm_buftype) {
633: case ARM32_BUFTYPE_LINEAR:
634: _bus_dmamap_sync_linear(t, map, offset, len, ops);
635: break;
636:
637: case ARM32_BUFTYPE_MBUF:
638: _bus_dmamap_sync_mbuf(t, map, offset, len, ops);
639: break;
640:
641: case ARM32_BUFTYPE_UIO:
642: _bus_dmamap_sync_uio(t, map, offset, len, ops);
643: break;
644:
645: case ARM32_BUFTYPE_RAW:
646: panic("_bus_dmamap_sync: ARM32_BUFTYPE_RAW");
647: break;
648:
649: case ARM32_BUFTYPE_INVALID:
650: panic("_bus_dmamap_sync: ARM32_BUFTYPE_INVALID");
651: break;
652:
653: default:
654: printf("unknown buffer type %d\n", map->_dm_buftype);
655: panic("_bus_dmamap_sync");
1.8 thorpej 656: }
1.1 chris 657:
1.8 thorpej 658: /* Drain the write buffer. */
659: cpu_drain_writebuf();
1.1 chris 660: }
661:
662: /*
663: * Common function for DMA-safe memory allocation. May be called
664: * by bus-specific DMA memory allocation functions.
665: */
666:
1.11 thorpej 667: extern paddr_t physical_start;
668: extern paddr_t physical_end;
1.1 chris 669:
670: int
1.7 thorpej 671: _bus_dmamem_alloc(bus_dma_tag_t t, bus_size_t size, bus_size_t alignment,
672: bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs,
673: int flags)
1.1 chris 674: {
1.15 thorpej 675: struct arm32_dma_range *dr;
676: int error, i;
677:
1.1 chris 678: #ifdef DEBUG_DMA
1.15 thorpej 679: printf("dmamem_alloc t=%p size=%lx align=%lx boundary=%lx "
680: "segs=%p nsegs=%x rsegs=%p flags=%x\n", t, size, alignment,
681: boundary, segs, nsegs, rsegs, flags);
682: #endif
683:
684: if ((dr = t->_ranges) != NULL) {
685: for (i = 0; i < t->_nranges; i++, dr++) {
686: if (dr->dr_len == 0) {
687: error = ENOMEM;
688: continue;
689: }
690: error = _bus_dmamem_alloc_range(t, size, alignment,
691: boundary, segs, nsegs, rsegs, flags,
692: trunc_page(dr->dr_sysbase),
693: trunc_page(dr->dr_sysbase + dr->dr_len));
694: if (error == 0)
695: break;
696: }
697: } else {
698: error = _bus_dmamem_alloc_range(t, size, alignment, boundary,
699: segs, nsegs, rsegs, flags, trunc_page(physical_start),
700: trunc_page(physical_end));
701: }
702:
1.1 chris 703: #ifdef DEBUG_DMA
704: printf("dmamem_alloc: =%d\n", error);
1.15 thorpej 705: #endif
706:
1.1 chris 707: return(error);
708: }
709:
710: /*
711: * Common function for freeing DMA-safe memory. May be called by
712: * bus-specific DMA memory free functions.
713: */
714: void
1.7 thorpej 715: _bus_dmamem_free(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs)
1.1 chris 716: {
717: struct vm_page *m;
718: bus_addr_t addr;
719: struct pglist mlist;
720: int curseg;
721:
722: #ifdef DEBUG_DMA
723: printf("dmamem_free: t=%p segs=%p nsegs=%x\n", t, segs, nsegs);
724: #endif /* DEBUG_DMA */
725:
726: /*
727: * Build a list of pages to free back to the VM system.
728: */
729: TAILQ_INIT(&mlist);
730: for (curseg = 0; curseg < nsegs; curseg++) {
731: for (addr = segs[curseg].ds_addr;
732: addr < (segs[curseg].ds_addr + segs[curseg].ds_len);
733: addr += PAGE_SIZE) {
734: m = PHYS_TO_VM_PAGE(addr);
735: TAILQ_INSERT_TAIL(&mlist, m, pageq);
736: }
737: }
738: uvm_pglistfree(&mlist);
739: }
740:
741: /*
742: * Common function for mapping DMA-safe memory. May be called by
743: * bus-specific DMA memory map functions.
744: */
745: int
1.7 thorpej 746: _bus_dmamem_map(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs,
747: size_t size, caddr_t *kvap, int flags)
1.1 chris 748: {
1.11 thorpej 749: vaddr_t va;
1.1 chris 750: bus_addr_t addr;
751: int curseg;
752: pt_entry_t *ptep/*, pte*/;
753:
754: #ifdef DEBUG_DMA
1.3 rearnsha 755: printf("dmamem_map: t=%p segs=%p nsegs=%x size=%lx flags=%x\n", t,
756: segs, nsegs, (unsigned long)size, flags);
1.1 chris 757: #endif /* DEBUG_DMA */
758:
759: size = round_page(size);
760: va = uvm_km_valloc(kernel_map, size);
761:
762: if (va == 0)
763: return (ENOMEM);
764:
765: *kvap = (caddr_t)va;
766:
767: for (curseg = 0; curseg < nsegs; curseg++) {
768: for (addr = segs[curseg].ds_addr;
769: addr < (segs[curseg].ds_addr + segs[curseg].ds_len);
1.27 thorpej 770: addr += PAGE_SIZE, va += PAGE_SIZE, size -= PAGE_SIZE) {
1.1 chris 771: #ifdef DEBUG_DMA
772: printf("wiring p%lx to v%lx", addr, va);
773: #endif /* DEBUG_DMA */
774: if (size == 0)
775: panic("_bus_dmamem_map: size botch");
776: pmap_enter(pmap_kernel(), va, addr,
777: VM_PROT_READ | VM_PROT_WRITE,
778: VM_PROT_READ | VM_PROT_WRITE | PMAP_WIRED);
779: /*
780: * If the memory must remain coherent with the
781: * cache then we must make the memory uncacheable
782: * in order to maintain virtual cache coherency.
1.24 wiz 783: * We must also guarantee the cache does not already
1.1 chris 784: * contain the virtal addresses we are making
785: * uncacheable.
786: */
787: if (flags & BUS_DMA_COHERENT) {
1.27 thorpej 788: cpu_dcache_wbinv_range(va, PAGE_SIZE);
1.1 chris 789: cpu_drain_writebuf();
790: ptep = vtopte(va);
1.17 thorpej 791: *ptep &= ~L2_S_CACHE_MASK;
1.21 thorpej 792: PTE_SYNC(ptep);
1.1 chris 793: tlb_flush();
794: }
795: #ifdef DEBUG_DMA
796: ptep = vtopte(va);
797: printf(" pte=v%p *pte=%x\n", ptep, *ptep);
798: #endif /* DEBUG_DMA */
799: }
800: }
1.2 chris 801: pmap_update(pmap_kernel());
1.1 chris 802: #ifdef DEBUG_DMA
803: printf("dmamem_map: =%p\n", *kvap);
804: #endif /* DEBUG_DMA */
805: return (0);
806: }
807:
808: /*
809: * Common function for unmapping DMA-safe memory. May be called by
810: * bus-specific DMA memory unmapping functions.
811: */
812: void
1.7 thorpej 813: _bus_dmamem_unmap(bus_dma_tag_t t, caddr_t kva, size_t size)
1.1 chris 814: {
815:
816: #ifdef DEBUG_DMA
1.3 rearnsha 817: printf("dmamem_unmap: t=%p kva=%p size=%lx\n", t, kva,
818: (unsigned long)size);
1.1 chris 819: #endif /* DEBUG_DMA */
820: #ifdef DIAGNOSTIC
821: if ((u_long)kva & PGOFSET)
822: panic("_bus_dmamem_unmap");
823: #endif /* DIAGNOSTIC */
824:
825: size = round_page(size);
1.11 thorpej 826: uvm_km_free(kernel_map, (vaddr_t)kva, size);
1.1 chris 827: }
828:
829: /*
830: * Common functin for mmap(2)'ing DMA-safe memory. May be called by
831: * bus-specific DMA mmap(2)'ing functions.
832: */
833: paddr_t
1.7 thorpej 834: _bus_dmamem_mmap(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs,
835: off_t off, int prot, int flags)
1.1 chris 836: {
837: int i;
838:
839: for (i = 0; i < nsegs; i++) {
840: #ifdef DIAGNOSTIC
841: if (off & PGOFSET)
842: panic("_bus_dmamem_mmap: offset unaligned");
843: if (segs[i].ds_addr & PGOFSET)
844: panic("_bus_dmamem_mmap: segment unaligned");
845: if (segs[i].ds_len & PGOFSET)
846: panic("_bus_dmamem_mmap: segment size not multiple"
847: " of page size");
848: #endif /* DIAGNOSTIC */
849: if (off >= segs[i].ds_len) {
850: off -= segs[i].ds_len;
851: continue;
852: }
853:
1.9 thorpej 854: return (arm_btop((u_long)segs[i].ds_addr + off));
1.1 chris 855: }
856:
857: /* Page not found. */
858: return (-1);
859: }
860:
861: /**********************************************************************
862: * DMA utility functions
863: **********************************************************************/
864:
865: /*
866: * Utility function to load a linear buffer. lastaddrp holds state
867: * between invocations (for multiple-buffer loads). segp contains
868: * the starting segment on entrace, and the ending segment on exit.
869: * first indicates if this is the first invocation of this function.
870: */
871: int
1.7 thorpej 872: _bus_dmamap_load_buffer(bus_dma_tag_t t, bus_dmamap_t map, void *buf,
1.11 thorpej 873: bus_size_t buflen, struct proc *p, int flags, paddr_t *lastaddrp,
1.7 thorpej 874: int *segp, int first)
1.1 chris 875: {
1.15 thorpej 876: struct arm32_dma_range *dr;
1.1 chris 877: bus_size_t sgsize;
878: bus_addr_t curaddr, lastaddr, baddr, bmask;
1.11 thorpej 879: vaddr_t vaddr = (vaddr_t)buf;
1.17 thorpej 880: pd_entry_t *pde;
881: pt_entry_t pte;
1.1 chris 882: int seg;
883: pmap_t pmap;
1.29 scw 884: pt_entry_t *ptep;
1.1 chris 885:
886: #ifdef DEBUG_DMA
887: printf("_bus_dmamem_load_buffer(buf=%p, len=%lx, flags=%d, 1st=%d)\n",
888: buf, buflen, flags, first);
889: #endif /* DEBUG_DMA */
890:
891: if (p != NULL)
892: pmap = p->p_vmspace->vm_map.pmap;
893: else
894: pmap = pmap_kernel();
895:
896: lastaddr = *lastaddrp;
897: bmask = ~(map->_dm_boundary - 1);
898:
899: for (seg = *segp; buflen > 0; ) {
900: /*
901: * Get the physical address for this segment.
1.17 thorpej 902: *
903: * XXX Don't support checking for coherent mappings
904: * XXX in user address space.
1.1 chris 905: */
1.17 thorpej 906: if (__predict_true(pmap == pmap_kernel())) {
1.29 scw 907: (void) pmap_get_pde_pte(pmap, vaddr, &pde, &ptep);
1.17 thorpej 908: if (__predict_false(pmap_pde_section(pde))) {
909: curaddr = (*pde & L1_S_FRAME) |
910: (vaddr & L1_S_OFFSET);
911: if (*pde & L1_S_CACHE_MASK) {
912: map->_dm_flags &=
913: ~ARM32_DMAMAP_COHERENT;
914: }
915: } else {
1.29 scw 916: pte = *ptep;
1.17 thorpej 917: KDASSERT((pte & L2_TYPE_MASK) != L2_TYPE_INV);
918: if (__predict_false((pte & L2_TYPE_MASK)
919: == L2_TYPE_L)) {
920: curaddr = (pte & L2_L_FRAME) |
921: (vaddr & L2_L_OFFSET);
922: if (pte & L2_L_CACHE_MASK) {
923: map->_dm_flags &=
924: ~ARM32_DMAMAP_COHERENT;
925: }
926: } else {
927: curaddr = (pte & L2_S_FRAME) |
928: (vaddr & L2_S_OFFSET);
929: if (pte & L2_S_CACHE_MASK) {
930: map->_dm_flags &=
931: ~ARM32_DMAMAP_COHERENT;
932: }
933: }
934: }
1.34 ! briggs 935: } else {
1.17 thorpej 936: (void) pmap_extract(pmap, vaddr, &curaddr);
1.34 ! briggs 937: map->_dm_flags &= ~ARM32_DMAMAP_COHERENT;
! 938: }
1.1 chris 939:
940: /*
941: * Make sure we're in an allowed DMA range.
942: */
1.15 thorpej 943: if (t->_ranges != NULL) {
944: /* XXX cache last result? */
945: dr = _bus_dma_inrange(t->_ranges, t->_nranges,
946: curaddr);
947: if (dr == NULL)
948: return (EINVAL);
949:
950: /*
951: * In a valid DMA range. Translate the physical
952: * memory address to an address in the DMA window.
953: */
954: curaddr = (curaddr - dr->dr_sysbase) + dr->dr_busbase;
955: }
1.1 chris 956:
957: /*
958: * Compute the segment size, and adjust counts.
959: */
1.27 thorpej 960: sgsize = PAGE_SIZE - ((u_long)vaddr & PGOFSET);
1.1 chris 961: if (buflen < sgsize)
962: sgsize = buflen;
963:
964: /*
965: * Make sure we don't cross any boundaries.
966: */
967: if (map->_dm_boundary > 0) {
968: baddr = (curaddr + map->_dm_boundary) & bmask;
969: if (sgsize > (baddr - curaddr))
970: sgsize = (baddr - curaddr);
971: }
972:
973: /*
974: * Insert chunk into a segment, coalescing with
975: * previous segment if possible.
976: */
977: if (first) {
978: map->dm_segs[seg].ds_addr = curaddr;
979: map->dm_segs[seg].ds_len = sgsize;
980: first = 0;
981: } else {
982: if (curaddr == lastaddr &&
983: (map->dm_segs[seg].ds_len + sgsize) <=
984: map->_dm_maxsegsz &&
985: (map->_dm_boundary == 0 ||
986: (map->dm_segs[seg].ds_addr & bmask) ==
987: (curaddr & bmask)))
988: map->dm_segs[seg].ds_len += sgsize;
989: else {
990: if (++seg >= map->_dm_segcnt)
991: break;
992: map->dm_segs[seg].ds_addr = curaddr;
993: map->dm_segs[seg].ds_len = sgsize;
994: }
995: }
996:
997: lastaddr = curaddr + sgsize;
998: vaddr += sgsize;
999: buflen -= sgsize;
1000: }
1001:
1002: *segp = seg;
1003: *lastaddrp = lastaddr;
1004:
1005: /*
1006: * Did we fit?
1007: */
1008: if (buflen != 0)
1009: return (EFBIG); /* XXX better return value here? */
1010: return (0);
1011: }
1012:
1013: /*
1014: * Allocate physical memory from the given physical address range.
1015: * Called by DMA-safe memory allocation methods.
1016: */
1017: int
1.7 thorpej 1018: _bus_dmamem_alloc_range(bus_dma_tag_t t, bus_size_t size, bus_size_t alignment,
1019: bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs,
1.11 thorpej 1020: int flags, paddr_t low, paddr_t high)
1.1 chris 1021: {
1.11 thorpej 1022: paddr_t curaddr, lastaddr;
1.1 chris 1023: struct vm_page *m;
1024: struct pglist mlist;
1025: int curseg, error;
1026:
1027: #ifdef DEBUG_DMA
1028: printf("alloc_range: t=%p size=%lx align=%lx boundary=%lx segs=%p nsegs=%x rsegs=%p flags=%x lo=%lx hi=%lx\n",
1029: t, size, alignment, boundary, segs, nsegs, rsegs, flags, low, high);
1030: #endif /* DEBUG_DMA */
1031:
1032: /* Always round the size. */
1033: size = round_page(size);
1034:
1035: /*
1036: * Allocate pages from the VM system.
1037: */
1038: error = uvm_pglistalloc(size, low, high, alignment, boundary,
1039: &mlist, nsegs, (flags & BUS_DMA_NOWAIT) == 0);
1040: if (error)
1041: return (error);
1042:
1043: /*
1044: * Compute the location, size, and number of segments actually
1045: * returned by the VM code.
1046: */
1047: m = mlist.tqh_first;
1048: curseg = 0;
1049: lastaddr = segs[curseg].ds_addr = VM_PAGE_TO_PHYS(m);
1050: segs[curseg].ds_len = PAGE_SIZE;
1051: #ifdef DEBUG_DMA
1052: printf("alloc: page %lx\n", lastaddr);
1053: #endif /* DEBUG_DMA */
1054: m = m->pageq.tqe_next;
1055:
1056: for (; m != NULL; m = m->pageq.tqe_next) {
1057: curaddr = VM_PAGE_TO_PHYS(m);
1058: #ifdef DIAGNOSTIC
1059: if (curaddr < low || curaddr >= high) {
1060: printf("uvm_pglistalloc returned non-sensical"
1061: " address 0x%lx\n", curaddr);
1062: panic("_bus_dmamem_alloc_range");
1063: }
1064: #endif /* DIAGNOSTIC */
1065: #ifdef DEBUG_DMA
1066: printf("alloc: page %lx\n", curaddr);
1067: #endif /* DEBUG_DMA */
1068: if (curaddr == (lastaddr + PAGE_SIZE))
1069: segs[curseg].ds_len += PAGE_SIZE;
1070: else {
1071: curseg++;
1072: segs[curseg].ds_addr = curaddr;
1073: segs[curseg].ds_len = PAGE_SIZE;
1074: }
1075: lastaddr = curaddr;
1076: }
1077:
1078: *rsegs = curseg + 1;
1079:
1.15 thorpej 1080: return (0);
1081: }
1082:
1083: /*
1084: * Check if a memory region intersects with a DMA range, and return the
1085: * page-rounded intersection if it does.
1086: */
1087: int
1088: arm32_dma_range_intersect(struct arm32_dma_range *ranges, int nranges,
1089: paddr_t pa, psize_t size, paddr_t *pap, psize_t *sizep)
1090: {
1091: struct arm32_dma_range *dr;
1092: int i;
1093:
1094: if (ranges == NULL)
1095: return (0);
1096:
1097: for (i = 0, dr = ranges; i < nranges; i++, dr++) {
1098: if (dr->dr_sysbase <= pa &&
1099: pa < (dr->dr_sysbase + dr->dr_len)) {
1100: /*
1101: * Beginning of region intersects with this range.
1102: */
1103: *pap = trunc_page(pa);
1104: *sizep = round_page(min(pa + size,
1105: dr->dr_sysbase + dr->dr_len) - pa);
1106: return (1);
1107: }
1108: if (pa < dr->dr_sysbase && dr->dr_sysbase < (pa + size)) {
1109: /*
1110: * End of region intersects with this range.
1111: */
1112: *pap = trunc_page(dr->dr_sysbase);
1113: *sizep = round_page(min((pa + size) - dr->dr_sysbase,
1114: dr->dr_len));
1115: return (1);
1116: }
1117: }
1118:
1119: /* No intersection found. */
1.1 chris 1120: return (0);
1121: }
CVSweb <webmaster@jp.NetBSD.org>