[BACK]Return to bus_dma.c CVS log [TXT][DIR] Up to [cvs.NetBSD.org] / src / sys / arch / arm / arm32

Please note that diffs are not public domain; they are subject to the copyright notices on the relevant files.

Diff for /src/sys/arch/arm/arm32/bus_dma.c between version 1.75 and 1.76

version 1.75, 2013/02/14 01:12:39 version 1.76, 2013/02/14 08:07:35
Line 55  __KERNEL_RCSID(0, "$NetBSD$");
Line 55  __KERNEL_RCSID(0, "$NetBSD$");
   
 #include <arm/cpufunc.h>  #include <arm/cpufunc.h>
   
   #ifdef BUSDMA_COUNTERS
 static struct evcnt bus_dma_creates =  static struct evcnt bus_dma_creates =
         EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "busdma", "creates");          EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "busdma", "creates");
 static struct evcnt bus_dma_bounced_creates =  static struct evcnt bus_dma_bounced_creates =
Line 75  static struct evcnt bus_dma_bounced_dest
Line 76  static struct evcnt bus_dma_bounced_dest
         EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "busdma", "bounced destroys");          EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "busdma", "bounced destroys");
 static struct evcnt bus_dma_destroys =  static struct evcnt bus_dma_destroys =
         EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "busdma", "destroys");          EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "busdma", "destroys");
   static struct evcnt bus_dma_sync_prereadwrite =
           EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "busdma", "sync prereadwrite");
   static struct evcnt bus_dma_sync_preread_begin =
           EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "busdma", "sync preread begin");
   static struct evcnt bus_dma_sync_preread =
           EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "busdma", "sync preread");
   static struct evcnt bus_dma_sync_preread_tail =
           EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "busdma", "sync preread tail");
   static struct evcnt bus_dma_sync_prewrite =
           EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "busdma", "sync prewrite");
   static struct evcnt bus_dma_sync_postread =
           EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "busdma", "sync postread");
   static struct evcnt bus_dma_sync_postreadwrite =
           EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "busdma", "sync postreadwrite");
   static struct evcnt bus_dma_sync_postwrite =
           EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "busdma", "sync postwrite");
   
 EVCNT_ATTACH_STATIC(bus_dma_creates);  EVCNT_ATTACH_STATIC(bus_dma_creates);
 EVCNT_ATTACH_STATIC(bus_dma_bounced_creates);  EVCNT_ATTACH_STATIC(bus_dma_bounced_creates);
Line 86  EVCNT_ATTACH_STATIC(bus_dma_unloads);
Line 103  EVCNT_ATTACH_STATIC(bus_dma_unloads);
 EVCNT_ATTACH_STATIC(bus_dma_bounced_unloads);  EVCNT_ATTACH_STATIC(bus_dma_bounced_unloads);
 EVCNT_ATTACH_STATIC(bus_dma_destroys);  EVCNT_ATTACH_STATIC(bus_dma_destroys);
 EVCNT_ATTACH_STATIC(bus_dma_bounced_destroys);  EVCNT_ATTACH_STATIC(bus_dma_bounced_destroys);
   EVCNT_ATTACH_STATIC(bus_dma_sync_prereadwrite);
   EVCNT_ATTACH_STATIC(bus_dma_sync_preread_begin);
   EVCNT_ATTACH_STATIC(bus_dma_sync_preread);
   EVCNT_ATTACH_STATIC(bus_dma_sync_preread_tail);
   EVCNT_ATTACH_STATIC(bus_dma_sync_prewrite);
   EVCNT_ATTACH_STATIC(bus_dma_sync_postread);
   EVCNT_ATTACH_STATIC(bus_dma_sync_postreadwrite);
   EVCNT_ATTACH_STATIC(bus_dma_sync_postwrite);
   
 #define STAT_INCR(x)    (bus_dma_ ## x.ev_count++)  #define STAT_INCR(x)    (bus_dma_ ## x.ev_count++)
   #else
   #define STAT_INCR(x)    /*(bus_dma_ ## x.ev_count++)*/
   #endif
   
 int     _bus_dmamap_load_buffer(bus_dma_tag_t, bus_dmamap_t, void *,  int     _bus_dmamap_load_buffer(bus_dma_tag_t, bus_dmamap_t, void *,
             bus_size_t, struct vmspace *, int);              bus_size_t, struct vmspace *, int);
Line 724  _bus_dmamap_sync_segment(vaddr_t va, pad
Line 752  _bus_dmamap_sync_segment(vaddr_t va, pad
         switch (ops) {          switch (ops) {
         case BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE:          case BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE:
                 if (!readonly_p) {                  if (!readonly_p) {
                           STAT_INCR(sync_prereadwrite);
                         cpu_dcache_wbinv_range(va, len);                          cpu_dcache_wbinv_range(va, len);
                         cpu_sdcache_wbinv_range(va, pa, len);                          cpu_sdcache_wbinv_range(va, pa, len);
                         break;                          break;
Line 734  _bus_dmamap_sync_segment(vaddr_t va, pad
Line 763  _bus_dmamap_sync_segment(vaddr_t va, pad
                 const size_t line_size = arm_dcache_align;                  const size_t line_size = arm_dcache_align;
                 const size_t line_mask = arm_dcache_align_mask;                  const size_t line_mask = arm_dcache_align_mask;
                 vsize_t misalignment = va & line_mask;                  vsize_t misalignment = va & line_mask;
                   STAT_INCR(sync_preread);
                 if (misalignment) {                  if (misalignment) {
                         va -= misalignment;                          va -= misalignment;
                         pa -= misalignment;                          pa -= misalignment;
Line 762  _bus_dmamap_sync_segment(vaddr_t va, pad
Line 792  _bus_dmamap_sync_segment(vaddr_t va, pad
         }          }
   
         case BUS_DMASYNC_PREWRITE:          case BUS_DMASYNC_PREWRITE:
                   STAT_INCR(sync_prewrite);
                 cpu_dcache_wb_range(va, len);                  cpu_dcache_wb_range(va, len);
                 cpu_sdcache_wb_range(va, pa, len);                  cpu_sdcache_wb_range(va, pa, len);
                 break;                  break;
Line 774  _bus_dmamap_sync_segment(vaddr_t va, pad
Line 805  _bus_dmamap_sync_segment(vaddr_t va, pad
          * have to worry about having to write back their contents.           * have to worry about having to write back their contents.
          */           */
         case BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE:          case BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE:
                   STAT_INCR(sync_postreadwrite);
                   cpu_dcache_inv_range(va, len);
                   cpu_sdcache_inv_range(va, pa, len);
                   break;
         case BUS_DMASYNC_POSTREAD:          case BUS_DMASYNC_POSTREAD:
                   STAT_INCR(sync_postread);
                 cpu_dcache_inv_range(va, len);                  cpu_dcache_inv_range(va, len);
                 cpu_sdcache_inv_range(va, pa, len);                  cpu_sdcache_inv_range(va, pa, len);
                 break;                  break;
Line 975  _bus_dmamap_sync(bus_dma_tag_t t, bus_dm
Line 1011  _bus_dmamap_sync(bus_dma_tag_t t, bus_dm
         const int post_ops = 0;          const int post_ops = 0;
 #endif  #endif
         if (!bouncing && pre_ops == 0 && post_ops == BUS_DMASYNC_POSTWRITE) {          if (!bouncing && pre_ops == 0 && post_ops == BUS_DMASYNC_POSTWRITE) {
                   STAT_INCR(sync_postwrite);
                 return;                  return;
         }          }
         KASSERTMSG(bouncing || pre_ops != 0 || (post_ops & BUS_DMASYNC_POSTREAD),          KASSERTMSG(bouncing || pre_ops != 0 || (post_ops & BUS_DMASYNC_POSTREAD),
Line 1076  _bus_dmamap_sync(bus_dma_tag_t t, bus_dm
Line 1113  _bus_dmamap_sync(bus_dma_tag_t t, bus_dm
   
 #ifdef _ARM32_NEED_BUS_DMA_BOUNCE  #ifdef _ARM32_NEED_BUS_DMA_BOUNCE
   bounce_it:    bounce_it:
         if ((ops & BUS_DMASYNC_POSTREAD) == 0          if (!bouncing || (ops & BUS_DMASYNC_POSTREAD) == 0)
             || (map->_dm_flags & _BUS_DMAMAP_IS_BOUNCING) == 0)  
                 return;                  return;
   
         struct arm32_bus_dma_cookie * const cookie = map->_dm_cookie;          struct arm32_bus_dma_cookie * const cookie = map->_dm_cookie;
Line 1496  _bus_dmamem_alloc_range(bus_dma_tag_t t,
Line 1532  _bus_dmamem_alloc_range(bus_dma_tag_t t,
         struct pglist mlist;          struct pglist mlist;
         int curseg, error;          int curseg, error;
   
           KASSERTMSG(boundary == 0 || (boundary & (boundary-1)) == 0,
               "invalid boundary %#lx", boundary);
   
 #ifdef DEBUG_DMA  #ifdef DEBUG_DMA
         printf("alloc_range: t=%p size=%lx align=%lx boundary=%lx segs=%p nsegs=%x rsegs=%p flags=%x lo=%lx hi=%lx\n",          printf("alloc_range: t=%p size=%lx align=%lx boundary=%lx segs=%p nsegs=%x rsegs=%p flags=%x lo=%lx hi=%lx\n",
             t, size, alignment, boundary, segs, nsegs, rsegs, flags, low, high);              t, size, alignment, boundary, segs, nsegs, rsegs, flags, low, high);
Line 1505  _bus_dmamem_alloc_range(bus_dma_tag_t t,
Line 1544  _bus_dmamem_alloc_range(bus_dma_tag_t t,
         size = round_page(size);          size = round_page(size);
   
         /*          /*
            * We accept boundaries < size, splitting in multiple segments
            * if needed. uvm_pglistalloc does not, so compute an appropriate
            * boundary: next power of 2 >= size
            */
           bus_size_t uboundary = boundary;
           if (uboundary <= PAGE_SIZE) {
                   uboundary = 0;
           } else {
                   while (uboundary < size) {
                           uboundary <<= 1;
                   }
           }
   
           /*
          * Allocate pages from the VM system.           * Allocate pages from the VM system.
          */           */
         error = uvm_pglistalloc(size, low, high, alignment, boundary,          error = uvm_pglistalloc(size, low, high, alignment, boundary,
Line 1527  _bus_dmamem_alloc_range(bus_dma_tag_t t,
Line 1580  _bus_dmamem_alloc_range(bus_dma_tag_t t,
   
         for (; m != NULL; m = TAILQ_NEXT(m, pageq.queue)) {          for (; m != NULL; m = TAILQ_NEXT(m, pageq.queue)) {
                 curaddr = VM_PAGE_TO_PHYS(m);                  curaddr = VM_PAGE_TO_PHYS(m);
 #ifdef DIAGNOSTIC                  KASSERTMSG(low <= curaddr && curaddr < high,
                 if (curaddr < low || curaddr >= high) {                      "uvm_pglistalloc returned non-sensicaladdress %#lx "
                         printf("uvm_pglistalloc returned non-sensical"                      "(low=%#lx, high=%#lx\n", curaddr, low, high);
                             " address 0x%lx\n", curaddr);  
                         panic("_bus_dmamem_alloc_range");  
                 }  
 #endif  /* DIAGNOSTIC */  
 #ifdef DEBUG_DMA  #ifdef DEBUG_DMA
                 printf("alloc: page %lx\n", curaddr);                  printf("alloc: page %lx\n", curaddr);
 #endif  /* DEBUG_DMA */  #endif  /* DEBUG_DMA */
                 if (curaddr == (lastaddr + PAGE_SIZE))                  if (curaddr == lastaddr + PAGE_SIZE
                       && (lastaddr & boundary) == (curaddr & boundary))
                         segs[curseg].ds_len += PAGE_SIZE;                          segs[curseg].ds_len += PAGE_SIZE;
                 else {                  else {
                         curseg++;                          curseg++;
                           if (curseg >= nsegs) {
                                   uvm_pglistfree(&mlist);
                                   return EFBIG;
                           }
                         segs[curseg].ds_addr = curaddr;                          segs[curseg].ds_addr = curaddr;
                         segs[curseg].ds_len = PAGE_SIZE;                          segs[curseg].ds_len = PAGE_SIZE;
                 }                  }
Line 1609  _bus_dma_alloc_bouncebuf(bus_dma_tag_t t
Line 1663  _bus_dma_alloc_bouncebuf(bus_dma_tag_t t
         error = _bus_dmamem_alloc(t, cookie->id_bouncebuflen,          error = _bus_dmamem_alloc(t, cookie->id_bouncebuflen,
             PAGE_SIZE, map->_dm_boundary, cookie->id_bouncesegs,              PAGE_SIZE, map->_dm_boundary, cookie->id_bouncesegs,
             map->_dm_segcnt, &cookie->id_nbouncesegs, flags);              map->_dm_segcnt, &cookie->id_nbouncesegs, flags);
         if (error)          if (error == 0) {
                 goto out;                  error = _bus_dmamem_map(t, cookie->id_bouncesegs,
         error = _bus_dmamem_map(t, cookie->id_bouncesegs,                      cookie->id_nbouncesegs, cookie->id_bouncebuflen,
             cookie->id_nbouncesegs, cookie->id_bouncebuflen,                      (void **)&cookie->id_bouncebuf, flags);
             (void **)&cookie->id_bouncebuf, flags);                  if (error) {
                           _bus_dmamem_free(t, cookie->id_bouncesegs,
  out:                              cookie->id_nbouncesegs);
         if (error) {                          cookie->id_bouncebuflen = 0;
                 _bus_dmamem_free(t, cookie->id_bouncesegs,                          cookie->id_nbouncesegs = 0;
                     cookie->id_nbouncesegs);                  } else {
                           cookie->id_flags |= _BUS_DMA_HAS_BOUNCE;
                   }
           } else {
                 cookie->id_bouncebuflen = 0;                  cookie->id_bouncebuflen = 0;
                 cookie->id_nbouncesegs = 0;                  cookie->id_nbouncesegs = 0;
         } else {  
                 cookie->id_flags |= _BUS_DMA_HAS_BOUNCE;  
         }          }
   
         return (error);          return (error);

Legend:
Removed from v.1.75  
changed lines
  Added in v.1.76

CVSweb <webmaster@jp.NetBSD.org>