Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
scatterlist.h
Go to the documentation of this file.
1 #ifndef _LINUX_SCATTERLIST_H
2 #define _LINUX_SCATTERLIST_H
3 
4 #include <linux/string.h>
5 #include <linux/bug.h>
6 #include <linux/mm.h>
7 
8 #include <asm/types.h>
9 #include <asm/scatterlist.h>
10 #include <asm/io.h>
11 
12 struct sg_table {
13  struct scatterlist *sgl; /* the list */
14  unsigned int nents; /* number of mapped entries */
15  unsigned int orig_nents; /* original size of list */
16 };
17 
18 /*
19  * Notes on SG table design.
20  *
21  * Architectures must provide an unsigned long page_link field in the
22  * scatterlist struct. We use that to place the page pointer AND encode
23  * information about the sg table as well. The two lower bits are reserved
24  * for this information.
25  *
26  * If bit 0 is set, then the page_link contains a pointer to the next sg
27  * table list. Otherwise the next entry is at sg + 1.
28  *
29  * If bit 1 is set, then this sg entry is the last element in a list.
30  *
31  * See sg_next().
32  *
33  */
34 
35 #define SG_MAGIC 0x87654321
36 
37 /*
38  * We overload the LSB of the page pointer to indicate whether it's
39  * a valid sg entry, or whether it points to the start of a new scatterlist.
40  * Those low bits are there for everyone! (thanks mason :-)
41  */
42 #define sg_is_chain(sg) ((sg)->page_link & 0x01)
43 #define sg_is_last(sg) ((sg)->page_link & 0x02)
44 #define sg_chain_ptr(sg) \
45  ((struct scatterlist *) ((sg)->page_link & ~0x03))
46 
57 static inline void sg_assign_page(struct scatterlist *sg, struct page *page)
58 {
59  unsigned long page_link = sg->page_link & 0x3;
60 
61  /*
62  * In order for the low bit stealing approach to work, pages
63  * must be aligned at a 32-bit boundary as a minimum.
64  */
65  BUG_ON((unsigned long) page & 0x03);
66 #ifdef CONFIG_DEBUG_SG
67  BUG_ON(sg->sg_magic != SG_MAGIC);
68  BUG_ON(sg_is_chain(sg));
69 #endif
70  sg->page_link = page_link | (unsigned long) page;
71 }
72 
87 static inline void sg_set_page(struct scatterlist *sg, struct page *page,
88  unsigned int len, unsigned int offset)
89 {
90  sg_assign_page(sg, page);
91  sg->offset = offset;
92  sg->length = len;
93 }
94 
95 static inline struct page *sg_page(struct scatterlist *sg)
96 {
97 #ifdef CONFIG_DEBUG_SG
98  BUG_ON(sg->sg_magic != SG_MAGIC);
99  BUG_ON(sg_is_chain(sg));
100 #endif
101  return (struct page *)((sg)->page_link & ~0x3);
102 }
103 
111 static inline void sg_set_buf(struct scatterlist *sg, const void *buf,
112  unsigned int buflen)
113 {
114  sg_set_page(sg, virt_to_page(buf), buflen, offset_in_page(buf));
115 }
116 
117 /*
118  * Loop over each sg element, following the pointer to a new list if necessary
119  */
120 #define for_each_sg(sglist, sg, nr, __i) \
121  for (__i = 0, sg = (sglist); __i < (nr); __i++, sg = sg_next(sg))
122 
133 static inline void sg_chain(struct scatterlist *prv, unsigned int prv_nents,
134  struct scatterlist *sgl)
135 {
136 #ifndef ARCH_HAS_SG_CHAIN
137  BUG();
138 #endif
139 
140  /*
141  * offset and length are unused for chain entry. Clear them.
142  */
143  prv[prv_nents - 1].offset = 0;
144  prv[prv_nents - 1].length = 0;
145 
146  /*
147  * Set lowest bit to indicate a link pointer, and make sure to clear
148  * the termination bit if it happens to be set.
149  */
150  prv[prv_nents - 1].page_link = ((unsigned long) sgl | 0x01) & ~0x02;
151 }
152 
162 static inline void sg_mark_end(struct scatterlist *sg)
163 {
164 #ifdef CONFIG_DEBUG_SG
165  BUG_ON(sg->sg_magic != SG_MAGIC);
166 #endif
167  /*
168  * Set termination bit, clear potential chain bit
169  */
170  sg->page_link |= 0x02;
171  sg->page_link &= ~0x01;
172 }
173 
184 static inline dma_addr_t sg_phys(struct scatterlist *sg)
185 {
186  return page_to_phys(sg_page(sg)) + sg->offset;
187 }
188 
199 static inline void *sg_virt(struct scatterlist *sg)
200 {
201  return page_address(sg_page(sg)) + sg->offset;
202 }
203 
204 int sg_nents(struct scatterlist *sg);
205 struct scatterlist *sg_next(struct scatterlist *);
206 struct scatterlist *sg_last(struct scatterlist *s, unsigned int);
207 void sg_init_table(struct scatterlist *, unsigned int);
208 void sg_init_one(struct scatterlist *, const void *, unsigned int);
209 
210 typedef struct scatterlist *(sg_alloc_fn)(unsigned int, gfp_t);
211 typedef void (sg_free_fn)(struct scatterlist *, unsigned int);
212 
213 void __sg_free_table(struct sg_table *, unsigned int, sg_free_fn *);
214 void sg_free_table(struct sg_table *);
215 int __sg_alloc_table(struct sg_table *, unsigned int, unsigned int, gfp_t,
216  sg_alloc_fn *);
217 int sg_alloc_table(struct sg_table *, unsigned int, gfp_t);
219  struct page **pages, unsigned int n_pages,
220  unsigned long offset, unsigned long size,
221  gfp_t gfp_mask);
222 
223 size_t sg_copy_from_buffer(struct scatterlist *sgl, unsigned int nents,
224  void *buf, size_t buflen);
225 size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents,
226  void *buf, size_t buflen);
227 
228 /*
229  * Maximum number of entries that will be allocated in one piece, if
230  * a list larger than this is required then chaining will be utilized.
231  */
232 #define SG_MAX_SINGLE_ALLOC (PAGE_SIZE / sizeof(struct scatterlist))
233 
234 
235 /*
236  * Mapping sg iterator
237  *
238  * Iterates over sg entries mapping page-by-page. On each successful
239  * iteration, @miter->page points to the mapped page and
240  * @miter->length bytes of data can be accessed at @miter->addr. As
241  * long as an interation is enclosed between start and stop, the user
242  * is free to choose control structure and when to stop.
243  *
244  * @miter->consumed is set to @miter->length on each iteration. It
245  * can be adjusted if the user can't consume all the bytes in one go.
246  * Also, a stopped iteration can be resumed by calling next on it.
247  * This is useful when iteration needs to release all resources and
248  * continue later (e.g. at the next interrupt).
249  */
250 
251 #define SG_MITER_ATOMIC (1 << 0) /* use kmap_atomic */
252 #define SG_MITER_TO_SG (1 << 1) /* flush back to phys on unmap */
253 #define SG_MITER_FROM_SG (1 << 2) /* nop */
254 
256  /* the following three fields can be accessed directly */
257  struct page *page; /* currently mapped page */
258  void *addr; /* pointer to the mapped area */
259  size_t length; /* length of the mapped area */
260  size_t consumed; /* number of consumed bytes */
261 
262  /* these are internal states, keep away */
263  struct scatterlist *__sg; /* current entry */
264  unsigned int __nents; /* nr of remaining entries */
265  unsigned int __offset; /* offset within sg */
266  unsigned int __flags;
267 };
268 
269 void sg_miter_start(struct sg_mapping_iter *miter, struct scatterlist *sgl,
270  unsigned int nents, unsigned int flags);
271 bool sg_miter_next(struct sg_mapping_iter *miter);
272 void sg_miter_stop(struct sg_mapping_iter *miter);
273 
274 #endif /* _LINUX_SCATTERLIST_H */