Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
iommu-helpers.h
Go to the documentation of this file.
1 #include <linux/prefetch.h>
2 
13 static inline unsigned int
14 iommu_fill_pdir(struct ioc *ioc, struct scatterlist *startsg, int nents,
15  unsigned long hint,
16  void (*iommu_io_pdir_entry)(u64 *, space_t, unsigned long,
17  unsigned long))
18 {
19  struct scatterlist *dma_sg = startsg; /* pointer to current DMA */
20  unsigned int n_mappings = 0;
21  unsigned long dma_offset = 0, dma_len = 0;
22  u64 *pdirp = NULL;
23 
24  /* Horrible hack. For efficiency's sake, dma_sg starts one
25  * entry below the true start (it is immediately incremented
26  * in the loop) */
27  dma_sg--;
28 
29  while (nents-- > 0) {
30  unsigned long vaddr;
31  long size;
32 
33  DBG_RUN_SG(" %d : %08lx/%05x %08lx/%05x\n", nents,
34  (unsigned long)sg_dma_address(startsg), cnt,
35  sg_virt_addr(startsg), startsg->length
36  );
37 
38 
39  /*
40  ** Look for the start of a new DMA stream
41  */
42 
43  if (sg_dma_address(startsg) & PIDE_FLAG) {
44  u32 pide = sg_dma_address(startsg) & ~PIDE_FLAG;
45 
46  BUG_ON(pdirp && (dma_len != sg_dma_len(dma_sg)));
47 
48  dma_sg++;
49 
50  dma_len = sg_dma_len(startsg);
51  sg_dma_len(startsg) = 0;
52  dma_offset = (unsigned long) pide & ~IOVP_MASK;
53  n_mappings++;
54 #if defined(ZX1_SUPPORT)
55  /* Pluto IOMMU IO Virt Address is not zero based */
56  sg_dma_address(dma_sg) = pide | ioc->ibase;
57 #else
58  /* SBA, ccio, and dino are zero based.
59  * Trying to save a few CPU cycles for most users.
60  */
61  sg_dma_address(dma_sg) = pide;
62 #endif
63  pdirp = &(ioc->pdir_base[pide >> IOVP_SHIFT]);
64  prefetchw(pdirp);
65  }
66 
67  BUG_ON(pdirp == NULL);
68 
69  vaddr = sg_virt_addr(startsg);
70  sg_dma_len(dma_sg) += startsg->length;
71  size = startsg->length + dma_offset;
72  dma_offset = 0;
73 #ifdef IOMMU_MAP_STATS
74  ioc->msg_pages += startsg->length >> IOVP_SHIFT;
75 #endif
76  do {
77  iommu_io_pdir_entry(pdirp, KERNEL_SPACE,
78  vaddr, hint);
79  vaddr += IOVP_SIZE;
80  size -= IOVP_SIZE;
81  pdirp++;
82  } while(unlikely(size > 0));
83  startsg++;
84  }
85  return(n_mappings);
86 }
87 
88 
89 /*
90 ** First pass is to walk the SG list and determine where the breaks are
91 ** in the DMA stream. Allocates PDIR entries but does not fill them.
92 ** Returns the number of DMA chunks.
93 **
94 ** Doing the fill separate from the coalescing/allocation keeps the
95 ** code simpler. Future enhancement could make one pass through
96 ** the sglist do both.
97 */
98 
99 static inline unsigned int
100 iommu_coalesce_chunks(struct ioc *ioc, struct device *dev,
101  struct scatterlist *startsg, int nents,
102  int (*iommu_alloc_range)(struct ioc *, struct device *, size_t))
103 {
104  struct scatterlist *contig_sg; /* contig chunk head */
105  unsigned long dma_offset, dma_len; /* start/len of DMA stream */
106  unsigned int n_mappings = 0;
107  unsigned int max_seg_size = dma_get_max_seg_size(dev);
108 
109  while (nents > 0) {
110 
111  /*
112  ** Prepare for first/next DMA stream
113  */
114  contig_sg = startsg;
115  dma_len = startsg->length;
116  dma_offset = sg_virt_addr(startsg) & ~IOVP_MASK;
117 
118  /* PARANOID: clear entries */
119  sg_dma_address(startsg) = 0;
120  sg_dma_len(startsg) = 0;
121 
122  /*
123  ** This loop terminates one iteration "early" since
124  ** it's always looking one "ahead".
125  */
126  while(--nents > 0) {
127  unsigned long prevstartsg_end, startsg_end;
128 
129  prevstartsg_end = sg_virt_addr(startsg) +
130  startsg->length;
131 
132  startsg++;
133  startsg_end = sg_virt_addr(startsg) +
134  startsg->length;
135 
136  /* PARANOID: clear entries */
137  sg_dma_address(startsg) = 0;
138  sg_dma_len(startsg) = 0;
139 
140  /*
141  ** First make sure current dma stream won't
142  ** exceed DMA_CHUNK_SIZE if we coalesce the
143  ** next entry.
144  */
145  if(unlikely(ALIGN(dma_len + dma_offset + startsg->length,
147  break;
148 
149  if (startsg->length + dma_len > max_seg_size)
150  break;
151 
152  /*
153  ** Next see if we can append the next chunk (i.e.
154  ** it must end on one page and begin on another
155  */
156  if (unlikely(((prevstartsg_end | sg_virt_addr(startsg)) & ~PAGE_MASK) != 0))
157  break;
158 
159  dma_len += startsg->length;
160  }
161 
162  /*
163  ** End of DMA Stream
164  ** Terminate last VCONTIG block.
165  ** Allocate space for DMA stream.
166  */
167  sg_dma_len(contig_sg) = dma_len;
168  dma_len = ALIGN(dma_len + dma_offset, IOVP_SIZE);
169  sg_dma_address(contig_sg) =
170  PIDE_FLAG
171  | (iommu_alloc_range(ioc, dev, dma_len) << IOVP_SHIFT)
172  | dma_offset;
173  n_mappings++;
174  }
175 
176  return n_mappings;
177 }
178