[BACK]Return to bus_dma.c CVS log [TXT][DIR] Up to [cvs.NetBSD.org] / src / sys / arch / arm / arm32

Annotation of src/sys/arch/arm/arm32/bus_dma.c, Revision 1.54

1.54    ! dyoung      1: /*     $NetBSD: bus_dma.c,v 1.53 2010/11/06 11:46:00 uebayasi Exp $    */
1.1       chris       2:
                      3: /*-
                      4:  * Copyright (c) 1996, 1997, 1998 The NetBSD Foundation, Inc.
                      5:  * All rights reserved.
                      6:  *
                      7:  * This code is derived from software contributed to The NetBSD Foundation
                      8:  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
                      9:  * NASA Ames Research Center.
                     10:  *
                     11:  * Redistribution and use in source and binary forms, with or without
                     12:  * modification, are permitted provided that the following conditions
                     13:  * are met:
                     14:  * 1. Redistributions of source code must retain the above copyright
                     15:  *    notice, this list of conditions and the following disclaimer.
                     16:  * 2. Redistributions in binary form must reproduce the above copyright
                     17:  *    notice, this list of conditions and the following disclaimer in the
                     18:  *    documentation and/or other materials provided with the distribution.
                     19:  *
                     20:  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
                     21:  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
                     22:  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
                     23:  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
                     24:  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
                     25:  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
                     26:  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
                     27:  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
                     28:  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
                     29:  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
                     30:  * POSSIBILITY OF SUCH DAMAGE.
                     31:  */
1.33      lukem      32:
1.35      rearnsha   33: #define _ARM32_BUS_DMA_PRIVATE
                     34:
1.33      lukem      35: #include <sys/cdefs.h>
1.54    ! dyoung     36: __KERNEL_RCSID(0, "$NetBSD: bus_dma.c,v 1.53 2010/11/06 11:46:00 uebayasi Exp $");
1.1       chris      37:
                     38: #include <sys/param.h>
                     39: #include <sys/systm.h>
                     40: #include <sys/kernel.h>
                     41: #include <sys/proc.h>
                     42: #include <sys/buf.h>
                     43: #include <sys/reboot.h>
                     44: #include <sys/conf.h>
                     45: #include <sys/file.h>
                     46: #include <sys/malloc.h>
                     47: #include <sys/mbuf.h>
                     48: #include <sys/vnode.h>
                     49: #include <sys/device.h>
                     50:
1.53      uebayasi   51: #include <uvm/uvm.h>
1.1       chris      52:
1.54    ! dyoung     53: #include <sys/bus.h>
1.1       chris      54: #include <machine/cpu.h>
1.4       thorpej    55:
                     56: #include <arm/cpufunc.h>
1.1       chris      57:
1.7       thorpej    58: int    _bus_dmamap_load_buffer(bus_dma_tag_t, bus_dmamap_t, void *,
1.48      yamt       59:            bus_size_t, struct vmspace *, int);
1.15      thorpej    60: struct arm32_dma_range *_bus_dma_inrange(struct arm32_dma_range *,
                     61:            int, bus_addr_t);
1.1       chris      62:
                     63: /*
1.19      briggs     64:  * Check to see if the specified page is in an allowed DMA range.
                     65:  */
1.47      perry      66: inline struct arm32_dma_range *
1.19      briggs     67: _bus_dma_inrange(struct arm32_dma_range *ranges, int nranges,
                     68:     bus_addr_t curaddr)
                     69: {
                     70:        struct arm32_dma_range *dr;
                     71:        int i;
                     72:
                     73:        for (i = 0, dr = ranges; i < nranges; i++, dr++) {
                     74:                if (curaddr >= dr->dr_sysbase &&
                     75:                    round_page(curaddr) <= (dr->dr_sysbase + dr->dr_len))
                     76:                        return (dr);
                     77:        }
                     78:
                     79:        return (NULL);
                     80: }
                     81:
                     82: /*
1.41      thorpej    83:  * Common function to load the specified physical address into the
                     84:  * DMA map, coalescing segments and boundary checking as necessary.
                     85:  */
                     86: static int
                     87: _bus_dmamap_load_paddr(bus_dma_tag_t t, bus_dmamap_t map,
                     88:     bus_addr_t paddr, bus_size_t size)
                     89: {
                     90:        bus_dma_segment_t * const segs = map->dm_segs;
                     91:        int nseg = map->dm_nsegs;
                     92:        bus_addr_t lastaddr = 0xdead;   /* XXX gcc */
                     93:        bus_addr_t bmask = ~(map->_dm_boundary - 1);
                     94:        bus_addr_t curaddr;
                     95:        bus_size_t sgsize;
                     96:
                     97:        if (nseg > 0)
                     98:                lastaddr = segs[nseg-1].ds_addr + segs[nseg-1].ds_len;
                     99:  again:
                    100:        sgsize = size;
                    101:
                    102:        /* Make sure we're in an allowed DMA range. */
                    103:        if (t->_ranges != NULL) {
                    104:                /* XXX cache last result? */
                    105:                const struct arm32_dma_range * const dr =
                    106:                    _bus_dma_inrange(t->_ranges, t->_nranges, paddr);
                    107:                if (dr == NULL)
                    108:                        return (EINVAL);
                    109:
                    110:                /*
                    111:                 * In a valid DMA range.  Translate the physical
                    112:                 * memory address to an address in the DMA window.
                    113:                 */
                    114:                curaddr = (paddr - dr->dr_sysbase) + dr->dr_busbase;
                    115:        } else
                    116:                curaddr = paddr;
                    117:
                    118:        /*
                    119:         * Make sure we don't cross any boundaries.
                    120:         */
                    121:        if (map->_dm_boundary > 0) {
                    122:                bus_addr_t baddr;       /* next boundary address */
                    123:
                    124:                baddr = (curaddr + map->_dm_boundary) & bmask;
                    125:                if (sgsize > (baddr - curaddr))
                    126:                        sgsize = (baddr - curaddr);
                    127:        }
                    128:
                    129:        /*
                    130:         * Insert chunk into a segment, coalescing with the
                    131:         * previous segment if possible.
                    132:         */
                    133:        if (nseg > 0 && curaddr == lastaddr &&
1.43      matt      134:            segs[nseg-1].ds_len + sgsize <= map->dm_maxsegsz &&
1.41      thorpej   135:            (map->_dm_boundary == 0 ||
                    136:             (segs[nseg-1].ds_addr & bmask) == (curaddr & bmask))) {
                    137:                /* coalesce */
                    138:                segs[nseg-1].ds_len += sgsize;
                    139:        } else if (nseg >= map->_dm_segcnt) {
                    140:                return (EFBIG);
                    141:        } else {
                    142:                /* new segment */
                    143:                segs[nseg].ds_addr = curaddr;
                    144:                segs[nseg].ds_len = sgsize;
                    145:                nseg++;
                    146:        }
                    147:
                    148:        lastaddr = curaddr + sgsize;
                    149:
                    150:        paddr += sgsize;
                    151:        size -= sgsize;
                    152:        if (size > 0)
                    153:                goto again;
                    154:
                    155:        map->dm_nsegs = nseg;
                    156:        return (0);
                    157: }
                    158:
                    159: /*
1.1       chris     160:  * Common function for DMA map creation.  May be called by bus-specific
                    161:  * DMA map creation functions.
                    162:  */
                    163: int
1.7       thorpej   164: _bus_dmamap_create(bus_dma_tag_t t, bus_size_t size, int nsegments,
                    165:     bus_size_t maxsegsz, bus_size_t boundary, int flags, bus_dmamap_t *dmamp)
1.1       chris     166: {
                    167:        struct arm32_bus_dmamap *map;
                    168:        void *mapstore;
                    169:        size_t mapsize;
                    170:
                    171: #ifdef DEBUG_DMA
                    172:        printf("dmamap_create: t=%p size=%lx nseg=%x msegsz=%lx boundary=%lx flags=%x\n",
                    173:            t, size, nsegments, maxsegsz, boundary, flags);
                    174: #endif /* DEBUG_DMA */
                    175:
                    176:        /*
                    177:         * Allocate and initialize the DMA map.  The end of the map
                    178:         * is a variable-sized array of segments, so we allocate enough
                    179:         * room for them in one shot.
                    180:         *
                    181:         * Note we don't preserve the WAITOK or NOWAIT flags.  Preservation
                    182:         * of ALLOCNOW notifies others that we've reserved these resources,
                    183:         * and they are not to be freed.
                    184:         *
                    185:         * The bus_dmamap_t includes one bus_dma_segment_t, hence
                    186:         * the (nsegments - 1).
                    187:         */
                    188:        mapsize = sizeof(struct arm32_bus_dmamap) +
                    189:            (sizeof(bus_dma_segment_t) * (nsegments - 1));
                    190:        if ((mapstore = malloc(mapsize, M_DMAMAP,
                    191:            (flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK)) == NULL)
                    192:                return (ENOMEM);
                    193:
                    194:        memset(mapstore, 0, mapsize);
                    195:        map = (struct arm32_bus_dmamap *)mapstore;
                    196:        map->_dm_size = size;
                    197:        map->_dm_segcnt = nsegments;
1.43      matt      198:        map->_dm_maxmaxsegsz = maxsegsz;
1.1       chris     199:        map->_dm_boundary = boundary;
                    200:        map->_dm_flags = flags & ~(BUS_DMA_WAITOK|BUS_DMA_NOWAIT);
1.14      thorpej   201:        map->_dm_origbuf = NULL;
                    202:        map->_dm_buftype = ARM32_BUFTYPE_INVALID;
1.48      yamt      203:        map->_dm_vmspace = vmspace_kernel();
1.43      matt      204:        map->dm_maxsegsz = maxsegsz;
1.1       chris     205:        map->dm_mapsize = 0;            /* no valid mappings */
                    206:        map->dm_nsegs = 0;
                    207:
                    208:        *dmamp = map;
                    209: #ifdef DEBUG_DMA
                    210:        printf("dmamap_create:map=%p\n", map);
                    211: #endif /* DEBUG_DMA */
                    212:        return (0);
                    213: }
                    214:
                    215: /*
                    216:  * Common function for DMA map destruction.  May be called by bus-specific
                    217:  * DMA map destruction functions.
                    218:  */
                    219: void
1.7       thorpej   220: _bus_dmamap_destroy(bus_dma_tag_t t, bus_dmamap_t map)
1.1       chris     221: {
                    222:
                    223: #ifdef DEBUG_DMA
                    224:        printf("dmamap_destroy: t=%p map=%p\n", t, map);
                    225: #endif /* DEBUG_DMA */
1.13      briggs    226:
                    227:        /*
                    228:         * Explicit unload.
                    229:         */
1.43      matt      230:        map->dm_maxsegsz = map->_dm_maxmaxsegsz;
1.13      briggs    231:        map->dm_mapsize = 0;
                    232:        map->dm_nsegs = 0;
1.14      thorpej   233:        map->_dm_origbuf = NULL;
                    234:        map->_dm_buftype = ARM32_BUFTYPE_INVALID;
1.48      yamt      235:        map->_dm_vmspace = NULL;
1.13      briggs    236:
1.25      chris     237:        free(map, M_DMAMAP);
1.1       chris     238: }
                    239:
                    240: /*
                    241:  * Common function for loading a DMA map with a linear buffer.  May
                    242:  * be called by bus-specific DMA map load functions.
                    243:  */
                    244: int
1.7       thorpej   245: _bus_dmamap_load(bus_dma_tag_t t, bus_dmamap_t map, void *buf,
                    246:     bus_size_t buflen, struct proc *p, int flags)
1.1       chris     247: {
1.41      thorpej   248:        int error;
1.48      yamt      249:        struct vmspace *vm;
1.1       chris     250:
                    251: #ifdef DEBUG_DMA
                    252:        printf("dmamap_load: t=%p map=%p buf=%p len=%lx p=%p f=%d\n",
                    253:            t, map, buf, buflen, p, flags);
                    254: #endif /* DEBUG_DMA */
                    255:
                    256:        /*
                    257:         * Make sure that on error condition we return "no valid mappings".
                    258:         */
                    259:        map->dm_mapsize = 0;
                    260:        map->dm_nsegs = 0;
1.43      matt      261:        KASSERT(map->dm_maxsegsz <= map->_dm_maxmaxsegsz);
1.1       chris     262:
                    263:        if (buflen > map->_dm_size)
                    264:                return (EINVAL);
                    265:
1.48      yamt      266:        if (p != NULL) {
                    267:                vm = p->p_vmspace;
                    268:        } else {
                    269:                vm = vmspace_kernel();
                    270:        }
                    271:
1.17      thorpej   272:        /* _bus_dmamap_load_buffer() clears this if we're not... */
                    273:        map->_dm_flags |= ARM32_DMAMAP_COHERENT;
                    274:
1.48      yamt      275:        error = _bus_dmamap_load_buffer(t, map, buf, buflen, vm, flags);
1.1       chris     276:        if (error == 0) {
                    277:                map->dm_mapsize = buflen;
1.14      thorpej   278:                map->_dm_origbuf = buf;
                    279:                map->_dm_buftype = ARM32_BUFTYPE_LINEAR;
1.48      yamt      280:                map->_dm_vmspace = vm;
1.1       chris     281:        }
                    282: #ifdef DEBUG_DMA
                    283:        printf("dmamap_load: error=%d\n", error);
                    284: #endif /* DEBUG_DMA */
                    285:        return (error);
                    286: }
                    287:
                    288: /*
                    289:  * Like _bus_dmamap_load(), but for mbufs.
                    290:  */
                    291: int
1.7       thorpej   292: _bus_dmamap_load_mbuf(bus_dma_tag_t t, bus_dmamap_t map, struct mbuf *m0,
                    293:     int flags)
1.1       chris     294: {
1.41      thorpej   295:        int error;
1.1       chris     296:        struct mbuf *m;
                    297:
                    298: #ifdef DEBUG_DMA
                    299:        printf("dmamap_load_mbuf: t=%p map=%p m0=%p f=%d\n",
                    300:            t, map, m0, flags);
                    301: #endif /* DEBUG_DMA */
                    302:
                    303:        /*
                    304:         * Make sure that on error condition we return "no valid mappings."
                    305:         */
                    306:        map->dm_mapsize = 0;
                    307:        map->dm_nsegs = 0;
1.43      matt      308:        KASSERT(map->dm_maxsegsz <= map->_dm_maxmaxsegsz);
1.1       chris     309:
                    310: #ifdef DIAGNOSTIC
                    311:        if ((m0->m_flags & M_PKTHDR) == 0)
                    312:                panic("_bus_dmamap_load_mbuf: no packet header");
                    313: #endif /* DIAGNOSTIC */
                    314:
                    315:        if (m0->m_pkthdr.len > map->_dm_size)
                    316:                return (EINVAL);
                    317:
1.28      thorpej   318:        /*
                    319:         * Mbuf chains should almost never have coherent (i.e.
                    320:         * un-cached) mappings, so clear that flag now.
                    321:         */
                    322:        map->_dm_flags &= ~ARM32_DMAMAP_COHERENT;
1.17      thorpej   323:
1.1       chris     324:        error = 0;
                    325:        for (m = m0; m != NULL && error == 0; m = m->m_next) {
1.41      thorpej   326:                int offset;
                    327:                int remainbytes;
                    328:                const struct vm_page * const *pgs;
                    329:                paddr_t paddr;
                    330:                int size;
                    331:
1.28      thorpej   332:                if (m->m_len == 0)
                    333:                        continue;
1.41      thorpej   334:                switch (m->m_flags & (M_EXT|M_CLUSTER|M_EXT_PAGES)) {
1.28      thorpej   335:                case M_EXT|M_CLUSTER:
                    336:                        /* XXX KDASSERT */
                    337:                        KASSERT(m->m_ext.ext_paddr != M_PADDR_INVALID);
1.41      thorpej   338:                        paddr = m->m_ext.ext_paddr +
1.28      thorpej   339:                            (m->m_data - m->m_ext.ext_buf);
1.41      thorpej   340:                        size = m->m_len;
                    341:                        error = _bus_dmamap_load_paddr(t, map, paddr, size);
                    342:                        break;
                    343:
                    344:                case M_EXT|M_EXT_PAGES:
                    345:                        KASSERT(m->m_ext.ext_buf <= m->m_data);
                    346:                        KASSERT(m->m_data <=
                    347:                            m->m_ext.ext_buf + m->m_ext.ext_size);
                    348:
                    349:                        offset = (vaddr_t)m->m_data -
                    350:                            trunc_page((vaddr_t)m->m_ext.ext_buf);
                    351:                        remainbytes = m->m_len;
                    352:
                    353:                        /* skip uninteresting pages */
                    354:                        pgs = (const struct vm_page * const *)
                    355:                            m->m_ext.ext_pgs + (offset >> PAGE_SHIFT);
                    356:
                    357:                        offset &= PAGE_MASK;    /* offset in the first page */
                    358:
                    359:                        /* load each page */
                    360:                        while (remainbytes > 0) {
                    361:                                const struct vm_page *pg;
                    362:
                    363:                                size = MIN(remainbytes, PAGE_SIZE - offset);
                    364:
                    365:                                pg = *pgs++;
                    366:                                KASSERT(pg);
                    367:                                paddr = VM_PAGE_TO_PHYS(pg) + offset;
                    368:
                    369:                                error = _bus_dmamap_load_paddr(t, map,
                    370:                                    paddr, size);
                    371:                                if (error)
1.28      thorpej   372:                                        break;
1.41      thorpej   373:                                offset = 0;
                    374:                                remainbytes -= size;
1.28      thorpej   375:                        }
                    376:                        break;
                    377:
                    378:                case 0:
1.41      thorpej   379:                        paddr = m->m_paddr + M_BUFOFFSET(m) +
1.28      thorpej   380:                            (m->m_data - M_BUFADDR(m));
1.41      thorpej   381:                        size = m->m_len;
                    382:                        error = _bus_dmamap_load_paddr(t, map, paddr, size);
                    383:                        break;
1.28      thorpej   384:
                    385:                default:
                    386:                        error = _bus_dmamap_load_buffer(t, map, m->m_data,
1.48      yamt      387:                            m->m_len, vmspace_kernel(), flags);
1.28      thorpej   388:                }
1.1       chris     389:        }
                    390:        if (error == 0) {
                    391:                map->dm_mapsize = m0->m_pkthdr.len;
1.14      thorpej   392:                map->_dm_origbuf = m0;
                    393:                map->_dm_buftype = ARM32_BUFTYPE_MBUF;
1.48      yamt      394:                map->_dm_vmspace = vmspace_kernel();    /* always kernel */
1.1       chris     395:        }
                    396: #ifdef DEBUG_DMA
                    397:        printf("dmamap_load_mbuf: error=%d\n", error);
                    398: #endif /* DEBUG_DMA */
                    399:        return (error);
                    400: }
                    401:
                    402: /*
                    403:  * Like _bus_dmamap_load(), but for uios.
                    404:  */
                    405: int
1.7       thorpej   406: _bus_dmamap_load_uio(bus_dma_tag_t t, bus_dmamap_t map, struct uio *uio,
                    407:     int flags)
1.1       chris     408: {
1.41      thorpej   409:        int i, error;
1.1       chris     410:        bus_size_t minlen, resid;
                    411:        struct iovec *iov;
1.50      christos  412:        void *addr;
1.1       chris     413:
                    414:        /*
                    415:         * Make sure that on error condition we return "no valid mappings."
                    416:         */
                    417:        map->dm_mapsize = 0;
                    418:        map->dm_nsegs = 0;
1.43      matt      419:        KASSERT(map->dm_maxsegsz <= map->_dm_maxmaxsegsz);
1.1       chris     420:
                    421:        resid = uio->uio_resid;
                    422:        iov = uio->uio_iov;
                    423:
1.17      thorpej   424:        /* _bus_dmamap_load_buffer() clears this if we're not... */
                    425:        map->_dm_flags |= ARM32_DMAMAP_COHERENT;
                    426:
1.1       chris     427:        error = 0;
                    428:        for (i = 0; i < uio->uio_iovcnt && resid != 0 && error == 0; i++) {
                    429:                /*
                    430:                 * Now at the first iovec to load.  Load each iovec
                    431:                 * until we have exhausted the residual count.
                    432:                 */
                    433:                minlen = resid < iov[i].iov_len ? resid : iov[i].iov_len;
1.50      christos  434:                addr = (void *)iov[i].iov_base;
1.1       chris     435:
                    436:                error = _bus_dmamap_load_buffer(t, map, addr, minlen,
1.48      yamt      437:                    uio->uio_vmspace, flags);
1.1       chris     438:
                    439:                resid -= minlen;
                    440:        }
                    441:        if (error == 0) {
                    442:                map->dm_mapsize = uio->uio_resid;
1.14      thorpej   443:                map->_dm_origbuf = uio;
                    444:                map->_dm_buftype = ARM32_BUFTYPE_UIO;
1.48      yamt      445:                map->_dm_vmspace = uio->uio_vmspace;
1.1       chris     446:        }
                    447:        return (error);
                    448: }
                    449:
                    450: /*
                    451:  * Like _bus_dmamap_load(), but for raw memory allocated with
                    452:  * bus_dmamem_alloc().
                    453:  */
                    454: int
1.7       thorpej   455: _bus_dmamap_load_raw(bus_dma_tag_t t, bus_dmamap_t map,
                    456:     bus_dma_segment_t *segs, int nsegs, bus_size_t size, int flags)
1.1       chris     457: {
                    458:
                    459:        panic("_bus_dmamap_load_raw: not implemented");
                    460: }
                    461:
                    462: /*
                    463:  * Common function for unloading a DMA map.  May be called by
                    464:  * bus-specific DMA map unload functions.
                    465:  */
                    466: void
1.7       thorpej   467: _bus_dmamap_unload(bus_dma_tag_t t, bus_dmamap_t map)
1.1       chris     468: {
                    469:
                    470: #ifdef DEBUG_DMA
                    471:        printf("dmamap_unload: t=%p map=%p\n", t, map);
                    472: #endif /* DEBUG_DMA */
                    473:
                    474:        /*
                    475:         * No resources to free; just mark the mappings as
                    476:         * invalid.
                    477:         */
                    478:        map->dm_mapsize = 0;
                    479:        map->dm_nsegs = 0;
1.14      thorpej   480:        map->_dm_origbuf = NULL;
                    481:        map->_dm_buftype = ARM32_BUFTYPE_INVALID;
1.48      yamt      482:        map->_dm_vmspace = NULL;
1.1       chris     483: }
                    484:
1.47      perry     485: static inline void
1.14      thorpej   486: _bus_dmamap_sync_linear(bus_dma_tag_t t, bus_dmamap_t map, bus_addr_t offset,
                    487:     bus_size_t len, int ops)
                    488: {
                    489:        vaddr_t addr = (vaddr_t) map->_dm_origbuf;
                    490:
                    491:        addr += offset;
                    492:
                    493:        switch (ops) {
                    494:        case BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE:
                    495:                cpu_dcache_wbinv_range(addr, len);
                    496:                break;
                    497:
                    498:        case BUS_DMASYNC_PREREAD:
1.18      thorpej   499:                if (((addr | len) & arm_dcache_align_mask) == 0)
                    500:                        cpu_dcache_inv_range(addr, len);
                    501:                else
                    502:                        cpu_dcache_wbinv_range(addr, len);
1.14      thorpej   503:                break;
                    504:
                    505:        case BUS_DMASYNC_PREWRITE:
                    506:                cpu_dcache_wb_range(addr, len);
                    507:                break;
                    508:        }
                    509: }
                    510:
1.47      perry     511: static inline void
1.14      thorpej   512: _bus_dmamap_sync_mbuf(bus_dma_tag_t t, bus_dmamap_t map, bus_addr_t offset,
                    513:     bus_size_t len, int ops)
                    514: {
                    515:        struct mbuf *m, *m0 = map->_dm_origbuf;
                    516:        bus_size_t minlen, moff;
                    517:        vaddr_t maddr;
                    518:
1.49      simonb    519:        for (moff = offset, m = m0; m != NULL && len != 0; m = m->m_next) {
1.14      thorpej   520:                /* Find the beginning mbuf. */
                    521:                if (moff >= m->m_len) {
                    522:                        moff -= m->m_len;
                    523:                        continue;
                    524:                }
                    525:
                    526:                /*
                    527:                 * Now at the first mbuf to sync; nail each one until
                    528:                 * we have exhausted the length.
                    529:                 */
                    530:                minlen = m->m_len - moff;
                    531:                if (len < minlen)
                    532:                        minlen = len;
                    533:
                    534:                maddr = mtod(m, vaddr_t);
                    535:                maddr += moff;
                    536:
1.28      thorpej   537:                /*
                    538:                 * We can save a lot of work here if we know the mapping
                    539:                 * is read-only at the MMU:
                    540:                 *
                    541:                 * If a mapping is read-only, no dirty cache blocks will
                    542:                 * exist for it.  If a writable mapping was made read-only,
                    543:                 * we know any dirty cache lines for the range will have
                    544:                 * been cleaned for us already.  Therefore, if the upper
                    545:                 * layer can tell us we have a read-only mapping, we can
                    546:                 * skip all cache cleaning.
                    547:                 *
                    548:                 * NOTE: This only works if we know the pmap cleans pages
                    549:                 * before making a read-write -> read-only transition.  If
                    550:                 * this ever becomes non-true (e.g. Physically Indexed
                    551:                 * cache), this will have to be revisited.
                    552:                 */
1.14      thorpej   553:                switch (ops) {
                    554:                case BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE:
1.28      thorpej   555:                        if (! M_ROMAP(m)) {
                    556:                                cpu_dcache_wbinv_range(maddr, minlen);
                    557:                                break;
                    558:                        }
                    559:                        /* else FALLTHROUGH */
1.14      thorpej   560:
                    561:                case BUS_DMASYNC_PREREAD:
1.18      thorpej   562:                        if (((maddr | minlen) & arm_dcache_align_mask) == 0)
                    563:                                cpu_dcache_inv_range(maddr, minlen);
                    564:                        else
                    565:                                cpu_dcache_wbinv_range(maddr, minlen);
1.14      thorpej   566:                        break;
                    567:
                    568:                case BUS_DMASYNC_PREWRITE:
1.28      thorpej   569:                        if (! M_ROMAP(m))
                    570:                                cpu_dcache_wb_range(maddr, minlen);
1.14      thorpej   571:                        break;
                    572:                }
                    573:                moff = 0;
                    574:                len -= minlen;
                    575:        }
                    576: }
                    577:
1.47      perry     578: static inline void
1.14      thorpej   579: _bus_dmamap_sync_uio(bus_dma_tag_t t, bus_dmamap_t map, bus_addr_t offset,
                    580:     bus_size_t len, int ops)
                    581: {
                    582:        struct uio *uio = map->_dm_origbuf;
                    583:        struct iovec *iov;
                    584:        bus_size_t minlen, ioff;
                    585:        vaddr_t addr;
                    586:
                    587:        for (iov = uio->uio_iov, ioff = offset; len != 0; iov++) {
                    588:                /* Find the beginning iovec. */
                    589:                if (ioff >= iov->iov_len) {
                    590:                        ioff -= iov->iov_len;
                    591:                        continue;
                    592:                }
                    593:
                    594:                /*
                    595:                 * Now at the first iovec to sync; nail each one until
                    596:                 * we have exhausted the length.
                    597:                 */
                    598:                minlen = iov->iov_len - ioff;
                    599:                if (len < minlen)
                    600:                        minlen = len;
                    601:
                    602:                addr = (vaddr_t) iov->iov_base;
                    603:                addr += ioff;
                    604:
                    605:                switch (ops) {
                    606:                case BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE:
                    607:                        cpu_dcache_wbinv_range(addr, minlen);
                    608:                        break;
                    609:
                    610:                case BUS_DMASYNC_PREREAD:
1.18      thorpej   611:                        if (((addr | minlen) & arm_dcache_align_mask) == 0)
                    612:                                cpu_dcache_inv_range(addr, minlen);
                    613:                        else
                    614:                                cpu_dcache_wbinv_range(addr, minlen);
1.14      thorpej   615:                        break;
                    616:
                    617:                case BUS_DMASYNC_PREWRITE:
                    618:                        cpu_dcache_wb_range(addr, minlen);
                    619:                        break;
                    620:                }
                    621:                ioff = 0;
                    622:                len -= minlen;
                    623:        }
                    624: }
                    625:
1.1       chris     626: /*
                    627:  * Common function for DMA map synchronization.  May be called
                    628:  * by bus-specific DMA map synchronization functions.
1.8       thorpej   629:  *
                    630:  * This version works for the Virtually Indexed Virtually Tagged
                    631:  * cache found on 32-bit ARM processors.
                    632:  *
                    633:  * XXX Should have separate versions for write-through vs.
                    634:  * XXX write-back caches.  We currently assume write-back
                    635:  * XXX here, which is not as efficient as it could be for
                    636:  * XXX the write-through case.
1.1       chris     637:  */
                    638: void
1.7       thorpej   639: _bus_dmamap_sync(bus_dma_tag_t t, bus_dmamap_t map, bus_addr_t offset,
                    640:     bus_size_t len, int ops)
1.1       chris     641: {
                    642:
                    643: #ifdef DEBUG_DMA
                    644:        printf("dmamap_sync: t=%p map=%p offset=%lx len=%lx ops=%x\n",
                    645:            t, map, offset, len, ops);
                    646: #endif /* DEBUG_DMA */
                    647:
1.8       thorpej   648:        /*
                    649:         * Mixing of PRE and POST operations is not allowed.
                    650:         */
                    651:        if ((ops & (BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE)) != 0 &&
                    652:            (ops & (BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE)) != 0)
                    653:                panic("_bus_dmamap_sync: mix PRE and POST");
                    654:
                    655: #ifdef DIAGNOSTIC
                    656:        if (offset >= map->dm_mapsize)
                    657:                panic("_bus_dmamap_sync: bad offset %lu (map size is %lu)",
                    658:                    offset, map->dm_mapsize);
                    659:        if (len == 0 || (offset + len) > map->dm_mapsize)
                    660:                panic("_bus_dmamap_sync: bad length");
                    661: #endif
                    662:
                    663:        /*
                    664:         * For a virtually-indexed write-back cache, we need
                    665:         * to do the following things:
                    666:         *
                    667:         *      PREREAD -- Invalidate the D-cache.  We do this
                    668:         *      here in case a write-back is required by the back-end.
                    669:         *
                    670:         *      PREWRITE -- Write-back the D-cache.  Note that if
                    671:         *      we are doing a PREREAD|PREWRITE, we can collapse
                    672:         *      the whole thing into a single Wb-Inv.
                    673:         *
                    674:         *      POSTREAD -- Nothing.
                    675:         *
                    676:         *      POSTWRITE -- Nothing.
                    677:         */
                    678:
                    679:        ops &= (BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
                    680:        if (ops == 0)
                    681:                return;
                    682:
1.17      thorpej   683:        /* Skip cache frobbing if mapping was COHERENT. */
                    684:        if (map->_dm_flags & ARM32_DMAMAP_COHERENT) {
                    685:                /* Drain the write buffer. */
                    686:                cpu_drain_writebuf();
                    687:                return;
                    688:        }
1.8       thorpej   689:
                    690:        /*
1.38      scw       691:         * If the mapping belongs to a non-kernel vmspace, and the
                    692:         * vmspace has not been active since the last time a full
                    693:         * cache flush was performed, we don't need to do anything.
1.8       thorpej   694:         */
1.48      yamt      695:        if (__predict_false(!VMSPACE_IS_KERNEL_P(map->_dm_vmspace) &&
                    696:            vm_map_pmap(&map->_dm_vmspace->vm_map)->pm_cstate.cs_cache_d == 0))
1.8       thorpej   697:                return;
                    698:
1.14      thorpej   699:        switch (map->_dm_buftype) {
                    700:        case ARM32_BUFTYPE_LINEAR:
                    701:                _bus_dmamap_sync_linear(t, map, offset, len, ops);
                    702:                break;
                    703:
                    704:        case ARM32_BUFTYPE_MBUF:
                    705:                _bus_dmamap_sync_mbuf(t, map, offset, len, ops);
                    706:                break;
                    707:
                    708:        case ARM32_BUFTYPE_UIO:
                    709:                _bus_dmamap_sync_uio(t, map, offset, len, ops);
                    710:                break;
                    711:
                    712:        case ARM32_BUFTYPE_RAW:
                    713:                panic("_bus_dmamap_sync: ARM32_BUFTYPE_RAW");
                    714:                break;
                    715:
                    716:        case ARM32_BUFTYPE_INVALID:
                    717:                panic("_bus_dmamap_sync: ARM32_BUFTYPE_INVALID");
                    718:                break;
                    719:
                    720:        default:
                    721:                printf("unknown buffer type %d\n", map->_dm_buftype);
                    722:                panic("_bus_dmamap_sync");
1.8       thorpej   723:        }
1.1       chris     724:
1.8       thorpej   725:        /* Drain the write buffer. */
                    726:        cpu_drain_writebuf();
1.1       chris     727: }
                    728:
                    729: /*
                    730:  * Common function for DMA-safe memory allocation.  May be called
                    731:  * by bus-specific DMA memory allocation functions.
                    732:  */
                    733:
1.11      thorpej   734: extern paddr_t physical_start;
                    735: extern paddr_t physical_end;
1.1       chris     736:
                    737: int
1.7       thorpej   738: _bus_dmamem_alloc(bus_dma_tag_t t, bus_size_t size, bus_size_t alignment,
                    739:     bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs,
                    740:     int flags)
1.1       chris     741: {
1.15      thorpej   742:        struct arm32_dma_range *dr;
1.37      mycroft   743:        int error, i;
1.15      thorpej   744:
1.1       chris     745: #ifdef DEBUG_DMA
1.15      thorpej   746:        printf("dmamem_alloc t=%p size=%lx align=%lx boundary=%lx "
                    747:            "segs=%p nsegs=%x rsegs=%p flags=%x\n", t, size, alignment,
                    748:            boundary, segs, nsegs, rsegs, flags);
                    749: #endif
                    750:
                    751:        if ((dr = t->_ranges) != NULL) {
1.37      mycroft   752:                error = ENOMEM;
1.15      thorpej   753:                for (i = 0; i < t->_nranges; i++, dr++) {
1.37      mycroft   754:                        if (dr->dr_len == 0)
1.15      thorpej   755:                                continue;
                    756:                        error = _bus_dmamem_alloc_range(t, size, alignment,
                    757:                            boundary, segs, nsegs, rsegs, flags,
                    758:                            trunc_page(dr->dr_sysbase),
                    759:                            trunc_page(dr->dr_sysbase + dr->dr_len));
                    760:                        if (error == 0)
                    761:                                break;
                    762:                }
                    763:        } else {
                    764:                error = _bus_dmamem_alloc_range(t, size, alignment, boundary,
                    765:                    segs, nsegs, rsegs, flags, trunc_page(physical_start),
                    766:                    trunc_page(physical_end));
                    767:        }
                    768:
1.1       chris     769: #ifdef DEBUG_DMA
                    770:        printf("dmamem_alloc: =%d\n", error);
1.15      thorpej   771: #endif
                    772:
1.1       chris     773:        return(error);
                    774: }
                    775:
                    776: /*
                    777:  * Common function for freeing DMA-safe memory.  May be called by
                    778:  * bus-specific DMA memory free functions.
                    779:  */
                    780: void
1.7       thorpej   781: _bus_dmamem_free(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs)
1.1       chris     782: {
                    783:        struct vm_page *m;
                    784:        bus_addr_t addr;
                    785:        struct pglist mlist;
                    786:        int curseg;
                    787:
                    788: #ifdef DEBUG_DMA
                    789:        printf("dmamem_free: t=%p segs=%p nsegs=%x\n", t, segs, nsegs);
                    790: #endif /* DEBUG_DMA */
                    791:
                    792:        /*
                    793:         * Build a list of pages to free back to the VM system.
                    794:         */
                    795:        TAILQ_INIT(&mlist);
                    796:        for (curseg = 0; curseg < nsegs; curseg++) {
                    797:                for (addr = segs[curseg].ds_addr;
                    798:                    addr < (segs[curseg].ds_addr + segs[curseg].ds_len);
                    799:                    addr += PAGE_SIZE) {
                    800:                        m = PHYS_TO_VM_PAGE(addr);
1.52      ad        801:                        TAILQ_INSERT_TAIL(&mlist, m, pageq.queue);
1.1       chris     802:                }
                    803:        }
                    804:        uvm_pglistfree(&mlist);
                    805: }
                    806:
                    807: /*
                    808:  * Common function for mapping DMA-safe memory.  May be called by
                    809:  * bus-specific DMA memory map functions.
                    810:  */
                    811: int
1.7       thorpej   812: _bus_dmamem_map(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs,
1.50      christos  813:     size_t size, void **kvap, int flags)
1.1       chris     814: {
1.11      thorpej   815:        vaddr_t va;
1.1       chris     816:        bus_addr_t addr;
                    817:        int curseg;
                    818:        pt_entry_t *ptep/*, pte*/;
1.45      yamt      819:        const uvm_flag_t kmflags =
                    820:            (flags & BUS_DMA_NOWAIT) != 0 ? UVM_KMF_NOWAIT : 0;
1.1       chris     821:
                    822: #ifdef DEBUG_DMA
1.3       rearnsha  823:        printf("dmamem_map: t=%p segs=%p nsegs=%x size=%lx flags=%x\n", t,
                    824:            segs, nsegs, (unsigned long)size, flags);
1.1       chris     825: #endif /* DEBUG_DMA */
                    826:
                    827:        size = round_page(size);
1.45      yamt      828:        va = uvm_km_alloc(kernel_map, size, 0, UVM_KMF_VAONLY | kmflags);
1.1       chris     829:
                    830:        if (va == 0)
                    831:                return (ENOMEM);
                    832:
1.50      christos  833:        *kvap = (void *)va;
1.1       chris     834:
                    835:        for (curseg = 0; curseg < nsegs; curseg++) {
                    836:                for (addr = segs[curseg].ds_addr;
                    837:                    addr < (segs[curseg].ds_addr + segs[curseg].ds_len);
1.27      thorpej   838:                    addr += PAGE_SIZE, va += PAGE_SIZE, size -= PAGE_SIZE) {
1.1       chris     839: #ifdef DEBUG_DMA
                    840:                        printf("wiring p%lx to v%lx", addr, va);
                    841: #endif /* DEBUG_DMA */
                    842:                        if (size == 0)
                    843:                                panic("_bus_dmamem_map: size botch");
                    844:                        pmap_enter(pmap_kernel(), va, addr,
                    845:                            VM_PROT_READ | VM_PROT_WRITE,
                    846:                            VM_PROT_READ | VM_PROT_WRITE | PMAP_WIRED);
                    847:                        /*
                    848:                         * If the memory must remain coherent with the
                    849:                         * cache then we must make the memory uncacheable
                    850:                         * in order to maintain virtual cache coherency.
1.24      wiz       851:                         * We must also guarantee the cache does not already
1.1       chris     852:                         * contain the virtal addresses we are making
                    853:                         * uncacheable.
                    854:                         */
                    855:                        if (flags & BUS_DMA_COHERENT) {
1.27      thorpej   856:                                cpu_dcache_wbinv_range(va, PAGE_SIZE);
1.1       chris     857:                                cpu_drain_writebuf();
                    858:                                ptep = vtopte(va);
1.17      thorpej   859:                                *ptep &= ~L2_S_CACHE_MASK;
1.21      thorpej   860:                                PTE_SYNC(ptep);
1.1       chris     861:                                tlb_flush();
                    862:                        }
                    863: #ifdef DEBUG_DMA
                    864:                        ptep = vtopte(va);
                    865:                        printf(" pte=v%p *pte=%x\n", ptep, *ptep);
                    866: #endif /* DEBUG_DMA */
                    867:                }
                    868:        }
1.2       chris     869:        pmap_update(pmap_kernel());
1.1       chris     870: #ifdef DEBUG_DMA
                    871:        printf("dmamem_map: =%p\n", *kvap);
                    872: #endif /* DEBUG_DMA */
                    873:        return (0);
                    874: }
                    875:
                    876: /*
                    877:  * Common function for unmapping DMA-safe memory.  May be called by
                    878:  * bus-specific DMA memory unmapping functions.
                    879:  */
                    880: void
1.50      christos  881: _bus_dmamem_unmap(bus_dma_tag_t t, void *kva, size_t size)
1.1       chris     882: {
                    883:
                    884: #ifdef DEBUG_DMA
1.3       rearnsha  885:        printf("dmamem_unmap: t=%p kva=%p size=%lx\n", t, kva,
                    886:            (unsigned long)size);
1.1       chris     887: #endif /* DEBUG_DMA */
                    888: #ifdef DIAGNOSTIC
                    889:        if ((u_long)kva & PGOFSET)
                    890:                panic("_bus_dmamem_unmap");
                    891: #endif /* DIAGNOSTIC */
                    892:
                    893:        size = round_page(size);
1.44      yamt      894:        pmap_remove(pmap_kernel(), (vaddr_t)kva, (vaddr_t)kva + size);
                    895:        pmap_update(pmap_kernel());
                    896:        uvm_km_free(kernel_map, (vaddr_t)kva, size, UVM_KMF_VAONLY);
1.1       chris     897: }
                    898:
                    899: /*
                    900:  * Common functin for mmap(2)'ing DMA-safe memory.  May be called by
                    901:  * bus-specific DMA mmap(2)'ing functions.
                    902:  */
                    903: paddr_t
1.7       thorpej   904: _bus_dmamem_mmap(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs,
                    905:     off_t off, int prot, int flags)
1.1       chris     906: {
                    907:        int i;
                    908:
                    909:        for (i = 0; i < nsegs; i++) {
                    910: #ifdef DIAGNOSTIC
                    911:                if (off & PGOFSET)
                    912:                        panic("_bus_dmamem_mmap: offset unaligned");
                    913:                if (segs[i].ds_addr & PGOFSET)
                    914:                        panic("_bus_dmamem_mmap: segment unaligned");
                    915:                if (segs[i].ds_len & PGOFSET)
                    916:                        panic("_bus_dmamem_mmap: segment size not multiple"
                    917:                            " of page size");
                    918: #endif /* DIAGNOSTIC */
                    919:                if (off >= segs[i].ds_len) {
                    920:                        off -= segs[i].ds_len;
                    921:                        continue;
                    922:                }
                    923:
1.9       thorpej   924:                return (arm_btop((u_long)segs[i].ds_addr + off));
1.1       chris     925:        }
                    926:
                    927:        /* Page not found. */
                    928:        return (-1);
                    929: }
                    930:
                    931: /**********************************************************************
                    932:  * DMA utility functions
                    933:  **********************************************************************/
                    934:
                    935: /*
                    936:  * Utility function to load a linear buffer.  lastaddrp holds state
                    937:  * between invocations (for multiple-buffer loads).  segp contains
                    938:  * the starting segment on entrace, and the ending segment on exit.
                    939:  * first indicates if this is the first invocation of this function.
                    940:  */
                    941: int
1.7       thorpej   942: _bus_dmamap_load_buffer(bus_dma_tag_t t, bus_dmamap_t map, void *buf,
1.48      yamt      943:     bus_size_t buflen, struct vmspace *vm, int flags)
1.1       chris     944: {
                    945:        bus_size_t sgsize;
1.41      thorpej   946:        bus_addr_t curaddr;
1.11      thorpej   947:        vaddr_t vaddr = (vaddr_t)buf;
1.17      thorpej   948:        pd_entry_t *pde;
                    949:        pt_entry_t pte;
1.41      thorpej   950:        int error;
1.1       chris     951:        pmap_t pmap;
1.29      scw       952:        pt_entry_t *ptep;
1.1       chris     953:
                    954: #ifdef DEBUG_DMA
1.40      scw       955:        printf("_bus_dmamem_load_buffer(buf=%p, len=%lx, flags=%d)\n",
                    956:            buf, buflen, flags);
1.1       chris     957: #endif /* DEBUG_DMA */
                    958:
1.48      yamt      959:        pmap = vm_map_pmap(&vm->vm_map);
1.1       chris     960:
1.41      thorpej   961:        while (buflen > 0) {
1.1       chris     962:                /*
                    963:                 * Get the physical address for this segment.
1.17      thorpej   964:                 *
                    965:                 * XXX Don't support checking for coherent mappings
                    966:                 * XXX in user address space.
1.1       chris     967:                 */
1.17      thorpej   968:                if (__predict_true(pmap == pmap_kernel())) {
1.29      scw       969:                        (void) pmap_get_pde_pte(pmap, vaddr, &pde, &ptep);
1.17      thorpej   970:                        if (__predict_false(pmap_pde_section(pde))) {
                    971:                                curaddr = (*pde & L1_S_FRAME) |
                    972:                                    (vaddr & L1_S_OFFSET);
                    973:                                if (*pde & L1_S_CACHE_MASK) {
                    974:                                        map->_dm_flags &=
                    975:                                            ~ARM32_DMAMAP_COHERENT;
                    976:                                }
                    977:                        } else {
1.29      scw       978:                                pte = *ptep;
1.17      thorpej   979:                                KDASSERT((pte & L2_TYPE_MASK) != L2_TYPE_INV);
                    980:                                if (__predict_false((pte & L2_TYPE_MASK)
                    981:                                                    == L2_TYPE_L)) {
                    982:                                        curaddr = (pte & L2_L_FRAME) |
                    983:                                            (vaddr & L2_L_OFFSET);
                    984:                                        if (pte & L2_L_CACHE_MASK) {
                    985:                                                map->_dm_flags &=
                    986:                                                    ~ARM32_DMAMAP_COHERENT;
                    987:                                        }
                    988:                                } else {
                    989:                                        curaddr = (pte & L2_S_FRAME) |
                    990:                                            (vaddr & L2_S_OFFSET);
                    991:                                        if (pte & L2_S_CACHE_MASK) {
                    992:                                                map->_dm_flags &=
                    993:                                                    ~ARM32_DMAMAP_COHERENT;
                    994:                                        }
                    995:                                }
                    996:                        }
1.34      briggs    997:                } else {
1.17      thorpej   998:                        (void) pmap_extract(pmap, vaddr, &curaddr);
1.34      briggs    999:                        map->_dm_flags &= ~ARM32_DMAMAP_COHERENT;
                   1000:                }
1.1       chris    1001:
                   1002:                /*
                   1003:                 * Compute the segment size, and adjust counts.
                   1004:                 */
1.27      thorpej  1005:                sgsize = PAGE_SIZE - ((u_long)vaddr & PGOFSET);
1.1       chris    1006:                if (buflen < sgsize)
                   1007:                        sgsize = buflen;
                   1008:
1.41      thorpej  1009:                error = _bus_dmamap_load_paddr(t, map, curaddr, sgsize);
                   1010:                if (error)
                   1011:                        return (error);
1.1       chris    1012:
                   1013:                vaddr += sgsize;
                   1014:                buflen -= sgsize;
                   1015:        }
                   1016:
                   1017:        return (0);
                   1018: }
                   1019:
                   1020: /*
                   1021:  * Allocate physical memory from the given physical address range.
                   1022:  * Called by DMA-safe memory allocation methods.
                   1023:  */
                   1024: int
1.7       thorpej  1025: _bus_dmamem_alloc_range(bus_dma_tag_t t, bus_size_t size, bus_size_t alignment,
                   1026:     bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs,
1.11      thorpej  1027:     int flags, paddr_t low, paddr_t high)
1.1       chris    1028: {
1.11      thorpej  1029:        paddr_t curaddr, lastaddr;
1.1       chris    1030:        struct vm_page *m;
                   1031:        struct pglist mlist;
                   1032:        int curseg, error;
                   1033:
                   1034: #ifdef DEBUG_DMA
                   1035:        printf("alloc_range: t=%p size=%lx align=%lx boundary=%lx segs=%p nsegs=%x rsegs=%p flags=%x lo=%lx hi=%lx\n",
                   1036:            t, size, alignment, boundary, segs, nsegs, rsegs, flags, low, high);
                   1037: #endif /* DEBUG_DMA */
                   1038:
                   1039:        /* Always round the size. */
                   1040:        size = round_page(size);
                   1041:
                   1042:        /*
                   1043:         * Allocate pages from the VM system.
                   1044:         */
                   1045:        error = uvm_pglistalloc(size, low, high, alignment, boundary,
                   1046:            &mlist, nsegs, (flags & BUS_DMA_NOWAIT) == 0);
                   1047:        if (error)
                   1048:                return (error);
                   1049:
                   1050:        /*
                   1051:         * Compute the location, size, and number of segments actually
                   1052:         * returned by the VM code.
                   1053:         */
1.42      chris    1054:        m = TAILQ_FIRST(&mlist);
1.1       chris    1055:        curseg = 0;
                   1056:        lastaddr = segs[curseg].ds_addr = VM_PAGE_TO_PHYS(m);
                   1057:        segs[curseg].ds_len = PAGE_SIZE;
                   1058: #ifdef DEBUG_DMA
                   1059:                printf("alloc: page %lx\n", lastaddr);
                   1060: #endif /* DEBUG_DMA */
1.52      ad       1061:        m = TAILQ_NEXT(m, pageq.queue);
1.1       chris    1062:
1.52      ad       1063:        for (; m != NULL; m = TAILQ_NEXT(m, pageq.queue)) {
1.1       chris    1064:                curaddr = VM_PAGE_TO_PHYS(m);
                   1065: #ifdef DIAGNOSTIC
                   1066:                if (curaddr < low || curaddr >= high) {
                   1067:                        printf("uvm_pglistalloc returned non-sensical"
                   1068:                            " address 0x%lx\n", curaddr);
                   1069:                        panic("_bus_dmamem_alloc_range");
                   1070:                }
                   1071: #endif /* DIAGNOSTIC */
                   1072: #ifdef DEBUG_DMA
                   1073:                printf("alloc: page %lx\n", curaddr);
                   1074: #endif /* DEBUG_DMA */
                   1075:                if (curaddr == (lastaddr + PAGE_SIZE))
                   1076:                        segs[curseg].ds_len += PAGE_SIZE;
                   1077:                else {
                   1078:                        curseg++;
                   1079:                        segs[curseg].ds_addr = curaddr;
                   1080:                        segs[curseg].ds_len = PAGE_SIZE;
                   1081:                }
                   1082:                lastaddr = curaddr;
                   1083:        }
                   1084:
                   1085:        *rsegs = curseg + 1;
                   1086:
1.15      thorpej  1087:        return (0);
                   1088: }
                   1089:
                   1090: /*
                   1091:  * Check if a memory region intersects with a DMA range, and return the
                   1092:  * page-rounded intersection if it does.
                   1093:  */
                   1094: int
                   1095: arm32_dma_range_intersect(struct arm32_dma_range *ranges, int nranges,
                   1096:     paddr_t pa, psize_t size, paddr_t *pap, psize_t *sizep)
                   1097: {
                   1098:        struct arm32_dma_range *dr;
                   1099:        int i;
                   1100:
                   1101:        if (ranges == NULL)
                   1102:                return (0);
                   1103:
                   1104:        for (i = 0, dr = ranges; i < nranges; i++, dr++) {
                   1105:                if (dr->dr_sysbase <= pa &&
                   1106:                    pa < (dr->dr_sysbase + dr->dr_len)) {
                   1107:                        /*
                   1108:                         * Beginning of region intersects with this range.
                   1109:                         */
                   1110:                        *pap = trunc_page(pa);
                   1111:                        *sizep = round_page(min(pa + size,
                   1112:                            dr->dr_sysbase + dr->dr_len) - pa);
                   1113:                        return (1);
                   1114:                }
                   1115:                if (pa < dr->dr_sysbase && dr->dr_sysbase < (pa + size)) {
                   1116:                        /*
                   1117:                         * End of region intersects with this range.
                   1118:                         */
                   1119:                        *pap = trunc_page(dr->dr_sysbase);
                   1120:                        *sizep = round_page(min((pa + size) - dr->dr_sysbase,
                   1121:                            dr->dr_len));
                   1122:                        return (1);
                   1123:                }
                   1124:        }
                   1125:
                   1126:        /* No intersection found. */
1.1       chris    1127:        return (0);
                   1128: }

CVSweb <webmaster@jp.NetBSD.org>