[BACK]Return to bus_dma.c CVS log [TXT][DIR] Up to [cvs.NetBSD.org] / src / sys / arch / arm / arm32

Annotation of src/sys/arch/arm/arm32/bus_dma.c, Revision 1.1.4.1

1.1.4.1 ! fvdl        1: /*     $NetBSD: bus_dma.c,v 1.3 2001/09/12 09:41:50 rearnsha Exp $     */
1.1       chris       2:
                      3: /*-
                      4:  * Copyright (c) 1996, 1997, 1998 The NetBSD Foundation, Inc.
                      5:  * All rights reserved.
                      6:  *
                      7:  * This code is derived from software contributed to The NetBSD Foundation
                      8:  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
                      9:  * NASA Ames Research Center.
                     10:  *
                     11:  * Redistribution and use in source and binary forms, with or without
                     12:  * modification, are permitted provided that the following conditions
                     13:  * are met:
                     14:  * 1. Redistributions of source code must retain the above copyright
                     15:  *    notice, this list of conditions and the following disclaimer.
                     16:  * 2. Redistributions in binary form must reproduce the above copyright
                     17:  *    notice, this list of conditions and the following disclaimer in the
                     18:  *    documentation and/or other materials provided with the distribution.
                     19:  * 3. All advertising materials mentioning features or use of this software
                     20:  *    must display the following acknowledgement:
                     21:  *     This product includes software developed by the NetBSD
                     22:  *     Foundation, Inc. and its contributors.
                     23:  * 4. Neither the name of The NetBSD Foundation nor the names of its
                     24:  *    contributors may be used to endorse or promote products derived
                     25:  *    from this software without specific prior written permission.
                     26:  *
                     27:  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
                     28:  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
                     29:  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
                     30:  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
                     31:  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
                     32:  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
                     33:  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
                     34:  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
                     35:  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
                     36:  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
                     37:  * POSSIBILITY OF SUCH DAMAGE.
                     38:  */
                     39:
                     40: #include <sys/param.h>
                     41: #include <sys/systm.h>
                     42: #include <sys/kernel.h>
                     43: #include <sys/map.h>
                     44: #include <sys/proc.h>
                     45: #include <sys/buf.h>
                     46: #include <sys/reboot.h>
                     47: #include <sys/conf.h>
                     48: #include <sys/file.h>
                     49: #include <sys/malloc.h>
                     50: #include <sys/mbuf.h>
                     51: #include <sys/vnode.h>
                     52: #include <sys/device.h>
                     53:
                     54: #include <uvm/uvm_extern.h>
                     55:
                     56: #define _ARM32_BUS_DMA_PRIVATE
                     57: #include <machine/bus.h>
                     58:
                     59: #include <machine/cpu.h>
                     60: #include <machine/cpufunc.h>
                     61: #include <machine/psl.h>
                     62:
                     63: int    _bus_dmamap_load_buffer __P((bus_dma_tag_t, bus_dmamap_t, void *,
                     64:            bus_size_t, struct proc *, int, vm_offset_t *, int *, int));
                     65: int    _bus_dma_inrange __P((bus_dma_segment_t *, int, bus_addr_t));
                     66:
                     67: /*
                     68:  * Common function for DMA map creation.  May be called by bus-specific
                     69:  * DMA map creation functions.
                     70:  */
                     71: int
                     72: _bus_dmamap_create(t, size, nsegments, maxsegsz, boundary, flags, dmamp)
                     73:        bus_dma_tag_t t;
                     74:        bus_size_t size;
                     75:        int nsegments;
                     76:        bus_size_t maxsegsz;
                     77:        bus_size_t boundary;
                     78:        int flags;
                     79:        bus_dmamap_t *dmamp;
                     80: {
                     81:        struct arm32_bus_dmamap *map;
                     82:        void *mapstore;
                     83:        size_t mapsize;
                     84:
                     85: #ifdef DEBUG_DMA
                     86:        printf("dmamap_create: t=%p size=%lx nseg=%x msegsz=%lx boundary=%lx flags=%x\n",
                     87:            t, size, nsegments, maxsegsz, boundary, flags);
                     88: #endif /* DEBUG_DMA */
                     89:
                     90:        /*
                     91:         * Allocate and initialize the DMA map.  The end of the map
                     92:         * is a variable-sized array of segments, so we allocate enough
                     93:         * room for them in one shot.
                     94:         *
                     95:         * Note we don't preserve the WAITOK or NOWAIT flags.  Preservation
                     96:         * of ALLOCNOW notifies others that we've reserved these resources,
                     97:         * and they are not to be freed.
                     98:         *
                     99:         * The bus_dmamap_t includes one bus_dma_segment_t, hence
                    100:         * the (nsegments - 1).
                    101:         */
                    102:        mapsize = sizeof(struct arm32_bus_dmamap) +
                    103:            (sizeof(bus_dma_segment_t) * (nsegments - 1));
                    104:        if ((mapstore = malloc(mapsize, M_DMAMAP,
                    105:            (flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK)) == NULL)
                    106:                return (ENOMEM);
                    107:
                    108:        memset(mapstore, 0, mapsize);
                    109:        map = (struct arm32_bus_dmamap *)mapstore;
                    110:        map->_dm_size = size;
                    111:        map->_dm_segcnt = nsegments;
                    112:        map->_dm_maxsegsz = maxsegsz;
                    113:        map->_dm_boundary = boundary;
                    114:        map->_dm_flags = flags & ~(BUS_DMA_WAITOK|BUS_DMA_NOWAIT);
                    115:        map->dm_mapsize = 0;            /* no valid mappings */
                    116:        map->dm_nsegs = 0;
                    117:
                    118:        *dmamp = map;
                    119: #ifdef DEBUG_DMA
                    120:        printf("dmamap_create:map=%p\n", map);
                    121: #endif /* DEBUG_DMA */
                    122:        return (0);
                    123: }
                    124:
                    125: /*
                    126:  * Common function for DMA map destruction.  May be called by bus-specific
                    127:  * DMA map destruction functions.
                    128:  */
                    129: void
                    130: _bus_dmamap_destroy(t, map)
                    131:        bus_dma_tag_t t;
                    132:        bus_dmamap_t map;
                    133: {
                    134:
                    135: #ifdef DEBUG_DMA
                    136:        printf("dmamap_destroy: t=%p map=%p\n", t, map);
                    137: #endif /* DEBUG_DMA */
                    138: #ifdef DIAGNOSTIC
                    139:        if (map->dm_nsegs > 0)
                    140:                printf("bus_dmamap_destroy() called for map with valid mappings\n");
                    141: #endif /* DIAGNOSTIC */
                    142:        free(map, M_DEVBUF);
                    143: }
                    144:
                    145: /*
                    146:  * Common function for loading a DMA map with a linear buffer.  May
                    147:  * be called by bus-specific DMA map load functions.
                    148:  */
                    149: int
                    150: _bus_dmamap_load(t, map, buf, buflen, p, flags)
                    151:        bus_dma_tag_t t;
                    152:        bus_dmamap_t map;
                    153:        void *buf;
                    154:        bus_size_t buflen;
                    155:        struct proc *p;
                    156:        int flags;
                    157: {
                    158:        vm_offset_t lastaddr;
                    159:        int seg, error;
                    160:
                    161: #ifdef DEBUG_DMA
                    162:        printf("dmamap_load: t=%p map=%p buf=%p len=%lx p=%p f=%d\n",
                    163:            t, map, buf, buflen, p, flags);
                    164: #endif /* DEBUG_DMA */
                    165:
                    166:        /*
                    167:         * Make sure that on error condition we return "no valid mappings".
                    168:         */
                    169:        map->dm_mapsize = 0;
                    170:        map->dm_nsegs = 0;
                    171:
                    172:        if (buflen > map->_dm_size)
                    173:                return (EINVAL);
                    174:
                    175:        seg = 0;
                    176:        error = _bus_dmamap_load_buffer(t, map, buf, buflen, p, flags,
                    177:            &lastaddr, &seg, 1);
                    178:        if (error == 0) {
                    179:                map->dm_mapsize = buflen;
                    180:                map->dm_nsegs = seg + 1;
                    181:        }
                    182: #ifdef DEBUG_DMA
                    183:        printf("dmamap_load: error=%d\n", error);
                    184: #endif /* DEBUG_DMA */
                    185:        return (error);
                    186: }
                    187:
                    188: /*
                    189:  * Like _bus_dmamap_load(), but for mbufs.
                    190:  */
                    191: int
                    192: _bus_dmamap_load_mbuf(t, map, m0, flags)
                    193:        bus_dma_tag_t t;
                    194:        bus_dmamap_t map;
                    195:        struct mbuf *m0;
                    196:        int flags;
                    197: {
                    198:        vm_offset_t lastaddr;
                    199:        int seg, error, first;
                    200:        struct mbuf *m;
                    201:
                    202: #ifdef DEBUG_DMA
                    203:        printf("dmamap_load_mbuf: t=%p map=%p m0=%p f=%d\n",
                    204:            t, map, m0, flags);
                    205: #endif /* DEBUG_DMA */
                    206:
                    207:        /*
                    208:         * Make sure that on error condition we return "no valid mappings."
                    209:         */
                    210:        map->dm_mapsize = 0;
                    211:        map->dm_nsegs = 0;
                    212:
                    213: #ifdef DIAGNOSTIC
                    214:        if ((m0->m_flags & M_PKTHDR) == 0)
                    215:                panic("_bus_dmamap_load_mbuf: no packet header");
                    216: #endif /* DIAGNOSTIC */
                    217:
                    218:        if (m0->m_pkthdr.len > map->_dm_size)
                    219:                return (EINVAL);
                    220:
                    221:        first = 1;
                    222:        seg = 0;
                    223:        error = 0;
                    224:        for (m = m0; m != NULL && error == 0; m = m->m_next) {
                    225:                error = _bus_dmamap_load_buffer(t, map, m->m_data, m->m_len,
                    226:                    NULL, flags, &lastaddr, &seg, first);
                    227:                first = 0;
                    228:        }
                    229:        if (error == 0) {
                    230:                map->dm_mapsize = m0->m_pkthdr.len;
                    231:                map->dm_nsegs = seg + 1;
                    232:        }
                    233: #ifdef DEBUG_DMA
                    234:        printf("dmamap_load_mbuf: error=%d\n", error);
                    235: #endif /* DEBUG_DMA */
                    236:        return (error);
                    237: }
                    238:
                    239: /*
                    240:  * Like _bus_dmamap_load(), but for uios.
                    241:  */
                    242: int
                    243: _bus_dmamap_load_uio(t, map, uio, flags)
                    244:        bus_dma_tag_t t;
                    245:        bus_dmamap_t map;
                    246:        struct uio *uio;
                    247:        int flags;
                    248: {
                    249:        vm_offset_t lastaddr;
                    250:        int seg, i, error, first;
                    251:        bus_size_t minlen, resid;
                    252:        struct proc *p = NULL;
                    253:        struct iovec *iov;
                    254:        caddr_t addr;
                    255:
                    256:        /*
                    257:         * Make sure that on error condition we return "no valid mappings."
                    258:         */
                    259:        map->dm_mapsize = 0;
                    260:        map->dm_nsegs = 0;
                    261:
                    262:        resid = uio->uio_resid;
                    263:        iov = uio->uio_iov;
                    264:
                    265:        if (uio->uio_segflg == UIO_USERSPACE) {
                    266:                p = uio->uio_procp;
                    267: #ifdef DIAGNOSTIC
                    268:                if (p == NULL)
                    269:                        panic("_bus_dmamap_load_uio: USERSPACE but no proc");
                    270: #endif
                    271:        }
                    272:
                    273:        first = 1;
                    274:        seg = 0;
                    275:        error = 0;
                    276:        for (i = 0; i < uio->uio_iovcnt && resid != 0 && error == 0; i++) {
                    277:                /*
                    278:                 * Now at the first iovec to load.  Load each iovec
                    279:                 * until we have exhausted the residual count.
                    280:                 */
                    281:                minlen = resid < iov[i].iov_len ? resid : iov[i].iov_len;
                    282:                addr = (caddr_t)iov[i].iov_base;
                    283:
                    284:                error = _bus_dmamap_load_buffer(t, map, addr, minlen,
                    285:                    p, flags, &lastaddr, &seg, first);
                    286:                first = 0;
                    287:
                    288:                resid -= minlen;
                    289:        }
                    290:        if (error == 0) {
                    291:                map->dm_mapsize = uio->uio_resid;
                    292:                map->dm_nsegs = seg + 1;
                    293:        }
                    294:        return (error);
                    295: }
                    296:
                    297: /*
                    298:  * Like _bus_dmamap_load(), but for raw memory allocated with
                    299:  * bus_dmamem_alloc().
                    300:  */
                    301: int
                    302: _bus_dmamap_load_raw(t, map, segs, nsegs, size, flags)
                    303:        bus_dma_tag_t t;
                    304:        bus_dmamap_t map;
                    305:        bus_dma_segment_t *segs;
                    306:        int nsegs;
                    307:        bus_size_t size;
                    308:        int flags;
                    309: {
                    310:
                    311:        panic("_bus_dmamap_load_raw: not implemented");
                    312: }
                    313:
                    314: /*
                    315:  * Common function for unloading a DMA map.  May be called by
                    316:  * bus-specific DMA map unload functions.
                    317:  */
                    318: void
                    319: _bus_dmamap_unload(t, map)
                    320:        bus_dma_tag_t t;
                    321:        bus_dmamap_t map;
                    322: {
                    323:
                    324: #ifdef DEBUG_DMA
                    325:        printf("dmamap_unload: t=%p map=%p\n", t, map);
                    326: #endif /* DEBUG_DMA */
                    327:
                    328:        /*
                    329:         * No resources to free; just mark the mappings as
                    330:         * invalid.
                    331:         */
                    332:        map->dm_mapsize = 0;
                    333:        map->dm_nsegs = 0;
                    334: }
                    335:
                    336: /*
                    337:  * Common function for DMA map synchronization.  May be called
                    338:  * by bus-specific DMA map synchronization functions.
                    339:  */
                    340: void
                    341: _bus_dmamap_sync(t, map, offset, len, ops)
                    342:        bus_dma_tag_t t;
                    343:        bus_dmamap_t map;
                    344:        bus_addr_t offset;
                    345:        bus_size_t len;
                    346:        int ops;
                    347: {
                    348:        int loop;
                    349:        bus_addr_t vaddr;
                    350:        bus_size_t length;
                    351:        bus_dma_segment_t *seg;
                    352:
                    353: #ifdef DEBUG_DMA
                    354:        printf("dmamap_sync: t=%p map=%p offset=%lx len=%lx ops=%x\n",
                    355:            t, map, offset, len, ops);
                    356: #endif /* DEBUG_DMA */
                    357:
                    358:        if (ops & (BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE)) {
                    359:                /* Quick exit if length is zero */
                    360:                if (len == 0)
                    361:                        return;
                    362:
                    363:                /* Find the segment pointed to by offset */
                    364:                loop = map->dm_nsegs;
                    365:                seg = &map->dm_segs[0];
                    366:                while (offset >= seg->ds_len) {
                    367:                        offset -= seg->ds_len;
                    368:                        ++seg;
                    369:                        /* Got any more segments ? */
                    370:                        --loop;
                    371:                        if (loop == 0)
                    372:                                return;
                    373:                }
                    374:
                    375:                /* Set the starting address and maximum length */
                    376:                vaddr = seg->_ds_vaddr + offset;
                    377:                length = seg->ds_len - offset;
                    378:                do {
                    379:                        /* Limit the length if not the whole segment */
                    380:                        if (len < length)
                    381:                                length = len;
                    382: #ifdef DEBUG_DMA
                    383:                        printf("syncing: %lx,%lx\n", vaddr, length);
                    384: #endif /* DEBUG_DMA */
                    385:                        /* Actually sync the cache */
                    386:                        cpu_cache_purgeD_rng(vaddr, length);
                    387:
                    388:                        /* Adjust the length */
                    389:                        len -= length;
                    390:
                    391:                        /* sync complete ? */
                    392:                        if (len > 0) {
                    393:                                /* Got any more segments ? */
                    394:                                --loop;
                    395:                                if (loop == 0)
                    396:                                        return;
                    397:                                ++seg;
                    398:                                vaddr = seg->_ds_vaddr;
                    399:                                length = seg->ds_len;
                    400:                        }
                    401:                } while (len > 0);
                    402:
                    403:                cpu_drain_writebuf();
                    404:        }
                    405: }
                    406:
                    407: /*
                    408:  * Common function for DMA-safe memory allocation.  May be called
                    409:  * by bus-specific DMA memory allocation functions.
                    410:  */
                    411:
                    412: extern vm_offset_t physical_start;
                    413: extern vm_offset_t physical_freestart;
                    414: extern vm_offset_t physical_freeend;
                    415: extern vm_offset_t physical_end;
                    416:
                    417: int
                    418: _bus_dmamem_alloc(t, size, alignment, boundary, segs, nsegs, rsegs, flags)
                    419:        bus_dma_tag_t t;
                    420:        bus_size_t size, alignment, boundary;
                    421:        bus_dma_segment_t *segs;
                    422:        int nsegs;
                    423:        int *rsegs;
                    424:        int flags;
                    425: {
                    426:        int error;
                    427: #ifdef DEBUG_DMA
                    428:        printf("dmamem_alloc t=%p size=%lx align=%lx boundary=%lx segs=%p nsegs=%x rsegs=%p flags=%x\n",
                    429:            t, size, alignment, boundary, segs, nsegs, rsegs, flags);
                    430: #endif /* DEBUG_DMA */
                    431:        error =  (_bus_dmamem_alloc_range(t, size, alignment, boundary,
                    432:            segs, nsegs, rsegs, flags, trunc_page(physical_start), trunc_page(physical_end)));
                    433: #ifdef DEBUG_DMA
                    434:        printf("dmamem_alloc: =%d\n", error);
                    435: #endif /* DEBUG_DMA */
                    436:        return(error);
                    437: }
                    438:
                    439: /*
                    440:  * Common function for freeing DMA-safe memory.  May be called by
                    441:  * bus-specific DMA memory free functions.
                    442:  */
                    443: void
                    444: _bus_dmamem_free(t, segs, nsegs)
                    445:        bus_dma_tag_t t;
                    446:        bus_dma_segment_t *segs;
                    447:        int nsegs;
                    448: {
                    449:        struct vm_page *m;
                    450:        bus_addr_t addr;
                    451:        struct pglist mlist;
                    452:        int curseg;
                    453:
                    454: #ifdef DEBUG_DMA
                    455:        printf("dmamem_free: t=%p segs=%p nsegs=%x\n", t, segs, nsegs);
                    456: #endif /* DEBUG_DMA */
                    457:
                    458:        /*
                    459:         * Build a list of pages to free back to the VM system.
                    460:         */
                    461:        TAILQ_INIT(&mlist);
                    462:        for (curseg = 0; curseg < nsegs; curseg++) {
                    463:                for (addr = segs[curseg].ds_addr;
                    464:                    addr < (segs[curseg].ds_addr + segs[curseg].ds_len);
                    465:                    addr += PAGE_SIZE) {
                    466:                        m = PHYS_TO_VM_PAGE(addr);
                    467:                        TAILQ_INSERT_TAIL(&mlist, m, pageq);
                    468:                }
                    469:        }
                    470:        uvm_pglistfree(&mlist);
                    471: }
                    472:
                    473: /*
                    474:  * Common function for mapping DMA-safe memory.  May be called by
                    475:  * bus-specific DMA memory map functions.
                    476:  */
                    477: int
                    478: _bus_dmamem_map(t, segs, nsegs, size, kvap, flags)
                    479:        bus_dma_tag_t t;
                    480:        bus_dma_segment_t *segs;
                    481:        int nsegs;
                    482:        size_t size;
                    483:        caddr_t *kvap;
                    484:        int flags;
                    485: {
                    486:        vm_offset_t va;
                    487:        bus_addr_t addr;
                    488:        int curseg;
                    489:        pt_entry_t *ptep/*, pte*/;
                    490:
                    491: #ifdef DEBUG_DMA
1.1.4.1 ! fvdl      492:        printf("dmamem_map: t=%p segs=%p nsegs=%x size=%lx flags=%x\n", t,
        !           493:            segs, nsegs, (unsigned long)size, flags);
1.1       chris     494: #endif /* DEBUG_DMA */
                    495:
                    496:        size = round_page(size);
                    497:        va = uvm_km_valloc(kernel_map, size);
                    498:
                    499:        if (va == 0)
                    500:                return (ENOMEM);
                    501:
                    502:        *kvap = (caddr_t)va;
                    503:
                    504:        for (curseg = 0; curseg < nsegs; curseg++) {
                    505:                for (addr = segs[curseg].ds_addr;
                    506:                    addr < (segs[curseg].ds_addr + segs[curseg].ds_len);
                    507:                    addr += NBPG, va += NBPG, size -= NBPG) {
                    508: #ifdef DEBUG_DMA
                    509:                        printf("wiring p%lx to v%lx", addr, va);
                    510: #endif /* DEBUG_DMA */
                    511:                        if (size == 0)
                    512:                                panic("_bus_dmamem_map: size botch");
                    513:                        pmap_enter(pmap_kernel(), va, addr,
                    514:                            VM_PROT_READ | VM_PROT_WRITE,
                    515:                            VM_PROT_READ | VM_PROT_WRITE | PMAP_WIRED);
                    516:                        /*
                    517:                         * If the memory must remain coherent with the
                    518:                         * cache then we must make the memory uncacheable
                    519:                         * in order to maintain virtual cache coherency.
                    520:                         * We must also guarentee the cache does not already
                    521:                         * contain the virtal addresses we are making
                    522:                         * uncacheable.
                    523:                         */
                    524:                        if (flags & BUS_DMA_COHERENT) {
                    525:                                cpu_cache_purgeD_rng(va, NBPG);
                    526:                                cpu_drain_writebuf();
                    527:                                ptep = vtopte(va);
                    528:                                *ptep = ((*ptep) & (~PT_C | PT_B));
                    529:                                tlb_flush();
                    530:                        }
                    531: #ifdef DEBUG_DMA
                    532:                        ptep = vtopte(va);
                    533:                        printf(" pte=v%p *pte=%x\n", ptep, *ptep);
                    534: #endif /* DEBUG_DMA */
                    535:                }
                    536:        }
1.1.4.1 ! fvdl      537:        pmap_update(pmap_kernel());
1.1       chris     538: #ifdef DEBUG_DMA
                    539:        printf("dmamem_map: =%p\n", *kvap);
                    540: #endif /* DEBUG_DMA */
                    541:        return (0);
                    542: }
                    543:
                    544: /*
                    545:  * Common function for unmapping DMA-safe memory.  May be called by
                    546:  * bus-specific DMA memory unmapping functions.
                    547:  */
                    548: void
                    549: _bus_dmamem_unmap(t, kva, size)
                    550:        bus_dma_tag_t t;
                    551:        caddr_t kva;
                    552:        size_t size;
                    553: {
                    554:
                    555: #ifdef DEBUG_DMA
1.1.4.1 ! fvdl      556:        printf("dmamem_unmap: t=%p kva=%p size=%lx\n", t, kva,
        !           557:            (unsigned long)size);
1.1       chris     558: #endif /* DEBUG_DMA */
                    559: #ifdef DIAGNOSTIC
                    560:        if ((u_long)kva & PGOFSET)
                    561:                panic("_bus_dmamem_unmap");
                    562: #endif /* DIAGNOSTIC */
                    563:
                    564:        size = round_page(size);
                    565:        uvm_km_free(kernel_map, (vm_offset_t)kva, size);
                    566: }
                    567:
                    568: /*
                    569:  * Common functin for mmap(2)'ing DMA-safe memory.  May be called by
                    570:  * bus-specific DMA mmap(2)'ing functions.
                    571:  */
                    572: paddr_t
                    573: _bus_dmamem_mmap(t, segs, nsegs, off, prot, flags)
                    574:        bus_dma_tag_t t;
                    575:        bus_dma_segment_t *segs;
                    576:        int nsegs;
                    577:        off_t off;
                    578:        int prot, flags;
                    579: {
                    580:        int i;
                    581:
                    582:        for (i = 0; i < nsegs; i++) {
                    583: #ifdef DIAGNOSTIC
                    584:                if (off & PGOFSET)
                    585:                        panic("_bus_dmamem_mmap: offset unaligned");
                    586:                if (segs[i].ds_addr & PGOFSET)
                    587:                        panic("_bus_dmamem_mmap: segment unaligned");
                    588:                if (segs[i].ds_len & PGOFSET)
                    589:                        panic("_bus_dmamem_mmap: segment size not multiple"
                    590:                            " of page size");
                    591: #endif /* DIAGNOSTIC */
                    592:                if (off >= segs[i].ds_len) {
                    593:                        off -= segs[i].ds_len;
                    594:                        continue;
                    595:                }
                    596:
                    597:                return (arm_byte_to_page((u_long)segs[i].ds_addr + off));
                    598:        }
                    599:
                    600:        /* Page not found. */
                    601:        return (-1);
                    602: }
                    603:
                    604: /**********************************************************************
                    605:  * DMA utility functions
                    606:  **********************************************************************/
                    607:
                    608: /*
                    609:  * Utility function to load a linear buffer.  lastaddrp holds state
                    610:  * between invocations (for multiple-buffer loads).  segp contains
                    611:  * the starting segment on entrace, and the ending segment on exit.
                    612:  * first indicates if this is the first invocation of this function.
                    613:  */
                    614: int
                    615: _bus_dmamap_load_buffer(t, map, buf, buflen, p, flags, lastaddrp, segp, first)
                    616:        bus_dma_tag_t t;
                    617:        bus_dmamap_t map;
                    618:        void *buf;
                    619:        bus_size_t buflen;
                    620:        struct proc *p;
                    621:        int flags;
                    622:        vm_offset_t *lastaddrp;
                    623:        int *segp;
                    624:        int first;
                    625: {
                    626:        bus_size_t sgsize;
                    627:        bus_addr_t curaddr, lastaddr, baddr, bmask;
                    628:        vm_offset_t vaddr = (vm_offset_t)buf;
                    629:        int seg;
                    630:        pmap_t pmap;
                    631:
                    632: #ifdef DEBUG_DMA
                    633:        printf("_bus_dmamem_load_buffer(buf=%p, len=%lx, flags=%d, 1st=%d)\n",
                    634:            buf, buflen, flags, first);
                    635: #endif /* DEBUG_DMA */
                    636:
                    637:        if (p != NULL)
                    638:                pmap = p->p_vmspace->vm_map.pmap;
                    639:        else
                    640:                pmap = pmap_kernel();
                    641:
                    642:        lastaddr = *lastaddrp;
                    643:        bmask  = ~(map->_dm_boundary - 1);
                    644:
                    645:        for (seg = *segp; buflen > 0; ) {
                    646:                /*
                    647:                 * Get the physical address for this segment.
                    648:                 */
                    649:                (void) pmap_extract(pmap, (vaddr_t)vaddr, &curaddr);
                    650:
                    651:                /*
                    652:                 * Make sure we're in an allowed DMA range.
                    653:                 */
                    654:                if (t->_ranges != NULL &&
                    655:                    _bus_dma_inrange(t->_ranges, t->_nranges, curaddr) == 0)
                    656:                        return (EINVAL);
                    657:
                    658:                /*
                    659:                 * Compute the segment size, and adjust counts.
                    660:                 */
                    661:                sgsize = NBPG - ((u_long)vaddr & PGOFSET);
                    662:                if (buflen < sgsize)
                    663:                        sgsize = buflen;
                    664:
                    665:                /*
                    666:                 * Make sure we don't cross any boundaries.
                    667:                 */
                    668:                if (map->_dm_boundary > 0) {
                    669:                        baddr = (curaddr + map->_dm_boundary) & bmask;
                    670:                        if (sgsize > (baddr - curaddr))
                    671:                                sgsize = (baddr - curaddr);
                    672:                }
                    673:
                    674:                /*
                    675:                 * Insert chunk into a segment, coalescing with
                    676:                 * previous segment if possible.
                    677:                 */
                    678:                if (first) {
                    679:                        map->dm_segs[seg].ds_addr = curaddr;
                    680:                        map->dm_segs[seg].ds_len = sgsize;
                    681:                        map->dm_segs[seg]._ds_vaddr = vaddr;
                    682:                        first = 0;
                    683:                } else {
                    684:                        if (curaddr == lastaddr &&
                    685:                            (map->dm_segs[seg].ds_len + sgsize) <=
                    686:                             map->_dm_maxsegsz &&
                    687:                            (map->_dm_boundary == 0 ||
                    688:                             (map->dm_segs[seg].ds_addr & bmask) ==
                    689:                             (curaddr & bmask)))
                    690:                                map->dm_segs[seg].ds_len += sgsize;
                    691:                        else {
                    692:                                if (++seg >= map->_dm_segcnt)
                    693:                                        break;
                    694:                                map->dm_segs[seg].ds_addr = curaddr;
                    695:                                map->dm_segs[seg].ds_len = sgsize;
                    696:                                map->dm_segs[seg]._ds_vaddr = vaddr;
                    697:                        }
                    698:                }
                    699:
                    700:                lastaddr = curaddr + sgsize;
                    701:                vaddr += sgsize;
                    702:                buflen -= sgsize;
                    703:        }
                    704:
                    705:        *segp = seg;
                    706:        *lastaddrp = lastaddr;
                    707:
                    708:        /*
                    709:         * Did we fit?
                    710:         */
                    711:        if (buflen != 0)
                    712:                return (EFBIG);         /* XXX better return value here? */
                    713:        return (0);
                    714: }
                    715:
                    716: /*
                    717:  * Check to see if the specified page is in an allowed DMA range.
                    718:  */
                    719: int
                    720: _bus_dma_inrange(ranges, nranges, curaddr)
                    721:        bus_dma_segment_t *ranges;
                    722:        int nranges;
                    723:        bus_addr_t curaddr;
                    724: {
                    725:        bus_dma_segment_t *ds;
                    726:        int i;
                    727:
                    728:        for (i = 0, ds = ranges; i < nranges; i++, ds++) {
                    729:                if (curaddr >= ds->ds_addr &&
                    730:                    round_page(curaddr) <= (ds->ds_addr + ds->ds_len))
                    731:                        return (1);
                    732:        }
                    733:
                    734:        return (0);
                    735: }
                    736:
                    737: /*
                    738:  * Allocate physical memory from the given physical address range.
                    739:  * Called by DMA-safe memory allocation methods.
                    740:  */
                    741: int
                    742: _bus_dmamem_alloc_range(t, size, alignment, boundary, segs, nsegs, rsegs,
                    743:     flags, low, high)
                    744:        bus_dma_tag_t t;
                    745:        bus_size_t size, alignment, boundary;
                    746:        bus_dma_segment_t *segs;
                    747:        int nsegs;
                    748:        int *rsegs;
                    749:        int flags;
                    750:        vm_offset_t low;
                    751:        vm_offset_t high;
                    752: {
                    753:        vm_offset_t curaddr, lastaddr;
                    754:        struct vm_page *m;
                    755:        struct pglist mlist;
                    756:        int curseg, error;
                    757:
                    758: #ifdef DEBUG_DMA
                    759:        printf("alloc_range: t=%p size=%lx align=%lx boundary=%lx segs=%p nsegs=%x rsegs=%p flags=%x lo=%lx hi=%lx\n",
                    760:            t, size, alignment, boundary, segs, nsegs, rsegs, flags, low, high);
                    761: #endif /* DEBUG_DMA */
                    762:
                    763:        /* Always round the size. */
                    764:        size = round_page(size);
                    765:
                    766:        /*
                    767:         * Allocate pages from the VM system.
                    768:         */
                    769:        TAILQ_INIT(&mlist);
                    770:        error = uvm_pglistalloc(size, low, high, alignment, boundary,
                    771:            &mlist, nsegs, (flags & BUS_DMA_NOWAIT) == 0);
                    772:        if (error)
                    773:                return (error);
                    774:
                    775:        /*
                    776:         * Compute the location, size, and number of segments actually
                    777:         * returned by the VM code.
                    778:         */
                    779:        m = mlist.tqh_first;
                    780:        curseg = 0;
                    781:        lastaddr = segs[curseg].ds_addr = VM_PAGE_TO_PHYS(m);
                    782:        segs[curseg].ds_len = PAGE_SIZE;
                    783: #ifdef DEBUG_DMA
                    784:                printf("alloc: page %lx\n", lastaddr);
                    785: #endif /* DEBUG_DMA */
                    786:        m = m->pageq.tqe_next;
                    787:
                    788:        for (; m != NULL; m = m->pageq.tqe_next) {
                    789:                curaddr = VM_PAGE_TO_PHYS(m);
                    790: #ifdef DIAGNOSTIC
                    791:                if (curaddr < low || curaddr >= high) {
                    792:                        printf("uvm_pglistalloc returned non-sensical"
                    793:                            " address 0x%lx\n", curaddr);
                    794:                        panic("_bus_dmamem_alloc_range");
                    795:                }
                    796: #endif /* DIAGNOSTIC */
                    797: #ifdef DEBUG_DMA
                    798:                printf("alloc: page %lx\n", curaddr);
                    799: #endif /* DEBUG_DMA */
                    800:                if (curaddr == (lastaddr + PAGE_SIZE))
                    801:                        segs[curseg].ds_len += PAGE_SIZE;
                    802:                else {
                    803:                        curseg++;
                    804:                        segs[curseg].ds_addr = curaddr;
                    805:                        segs[curseg].ds_len = PAGE_SIZE;
                    806:                }
                    807:                lastaddr = curaddr;
                    808:        }
                    809:
                    810:        *rsegs = curseg + 1;
                    811:
                    812:        return (0);
                    813: }

CVSweb <webmaster@jp.NetBSD.org>