Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
scatterlist.c
Go to the documentation of this file.
1 /*
2  * Copyright (C) 2007 Jens Axboe <[email protected]>
3  *
4  * Scatterlist handling helpers.
5  *
6  * This source code is licensed under the GNU General Public License,
7  * Version 2. See the file COPYING for more details.
8  */
9 #include <linux/export.h>
10 #include <linux/slab.h>
11 #include <linux/scatterlist.h>
12 #include <linux/highmem.h>
13 #include <linux/kmemleak.h>
14 
26 {
27 #ifdef CONFIG_DEBUG_SG
28  BUG_ON(sg->sg_magic != SG_MAGIC);
29 #endif
30  if (sg_is_last(sg))
31  return NULL;
32 
33  sg++;
34  if (unlikely(sg_is_chain(sg)))
35  sg = sg_chain_ptr(sg);
36 
37  return sg;
38 }
39 EXPORT_SYMBOL(sg_next);
40 
50 int sg_nents(struct scatterlist *sg)
51 {
52  int nents;
53  for (nents = 0; sg; sg = sg_next(sg))
54  nents++;
55  return nents;
56 }
58 
59 
74 struct scatterlist *sg_last(struct scatterlist *sgl, unsigned int nents)
75 {
76 #ifndef ARCH_HAS_SG_CHAIN
77  struct scatterlist *ret = &sgl[nents - 1];
78 #else
79  struct scatterlist *sg, *ret = NULL;
80  unsigned int i;
81 
82  for_each_sg(sgl, sg, nents, i)
83  ret = sg;
84 
85 #endif
86 #ifdef CONFIG_DEBUG_SG
87  BUG_ON(sgl[0].sg_magic != SG_MAGIC);
88  BUG_ON(!sg_is_last(ret));
89 #endif
90  return ret;
91 }
93 
104 void sg_init_table(struct scatterlist *sgl, unsigned int nents)
105 {
106  memset(sgl, 0, sizeof(*sgl) * nents);
107 #ifdef CONFIG_DEBUG_SG
108  {
109  unsigned int i;
110  for (i = 0; i < nents; i++)
111  sgl[i].sg_magic = SG_MAGIC;
112  }
113 #endif
114  sg_mark_end(&sgl[nents - 1]);
115 }
117 
125 void sg_init_one(struct scatterlist *sg, const void *buf, unsigned int buflen)
126 {
127  sg_init_table(sg, 1);
128  sg_set_buf(sg, buf, buflen);
129 }
131 
132 /*
133  * The default behaviour of sg_alloc_table() is to use these kmalloc/kfree
134  * helpers.
135  */
136 static struct scatterlist *sg_kmalloc(unsigned int nents, gfp_t gfp_mask)
137 {
138  if (nents == SG_MAX_SINGLE_ALLOC) {
139  /*
140  * Kmemleak doesn't track page allocations as they are not
141  * commonly used (in a raw form) for kernel data structures.
142  * As we chain together a list of pages and then a normal
143  * kmalloc (tracked by kmemleak), in order to for that last
144  * allocation not to become decoupled (and thus a
145  * false-positive) we need to inform kmemleak of all the
146  * intermediate allocations.
147  */
148  void *ptr = (void *) __get_free_page(gfp_mask);
149  kmemleak_alloc(ptr, PAGE_SIZE, 1, gfp_mask);
150  return ptr;
151  } else
152  return kmalloc(nents * sizeof(struct scatterlist), gfp_mask);
153 }
154 
155 static void sg_kfree(struct scatterlist *sg, unsigned int nents)
156 {
157  if (nents == SG_MAX_SINGLE_ALLOC) {
158  kmemleak_free(sg);
159  free_page((unsigned long) sg);
160  } else
161  kfree(sg);
162 }
163 
176 void __sg_free_table(struct sg_table *table, unsigned int max_ents,
177  sg_free_fn *free_fn)
178 {
179  struct scatterlist *sgl, *next;
180 
181  if (unlikely(!table->sgl))
182  return;
183 
184  sgl = table->sgl;
185  while (table->orig_nents) {
186  unsigned int alloc_size = table->orig_nents;
187  unsigned int sg_size;
188 
189  /*
190  * If we have more than max_ents segments left,
191  * then assign 'next' to the sg table after the current one.
192  * sg_size is then one less than alloc size, since the last
193  * element is the chain pointer.
194  */
195  if (alloc_size > max_ents) {
196  next = sg_chain_ptr(&sgl[max_ents - 1]);
197  alloc_size = max_ents;
198  sg_size = alloc_size - 1;
199  } else {
200  sg_size = alloc_size;
201  next = NULL;
202  }
203 
204  table->orig_nents -= sg_size;
205  free_fn(sgl, alloc_size);
206  sgl = next;
207  }
208 
209  table->sgl = NULL;
210 }
212 
219 {
220  __sg_free_table(table, SG_MAX_SINGLE_ALLOC, sg_kfree);
221 }
223 
243 int __sg_alloc_table(struct sg_table *table, unsigned int nents,
244  unsigned int max_ents, gfp_t gfp_mask,
245  sg_alloc_fn *alloc_fn)
246 {
247  struct scatterlist *sg, *prv;
248  unsigned int left;
249 
250 #ifndef ARCH_HAS_SG_CHAIN
251  BUG_ON(nents > max_ents);
252 #endif
253 
254  memset(table, 0, sizeof(*table));
255 
256  left = nents;
257  prv = NULL;
258  do {
259  unsigned int sg_size, alloc_size = left;
260 
261  if (alloc_size > max_ents) {
262  alloc_size = max_ents;
263  sg_size = alloc_size - 1;
264  } else
265  sg_size = alloc_size;
266 
267  left -= sg_size;
268 
269  sg = alloc_fn(alloc_size, gfp_mask);
270  if (unlikely(!sg)) {
271  /*
272  * Adjust entry count to reflect that the last
273  * entry of the previous table won't be used for
274  * linkage. Without this, sg_kfree() may get
275  * confused.
276  */
277  if (prv)
278  table->nents = ++table->orig_nents;
279 
280  return -ENOMEM;
281  }
282 
283  sg_init_table(sg, alloc_size);
284  table->nents = table->orig_nents += sg_size;
285 
286  /*
287  * If this is the first mapping, assign the sg table header.
288  * If this is not the first mapping, chain previous part.
289  */
290  if (prv)
291  sg_chain(prv, max_ents, sg);
292  else
293  table->sgl = sg;
294 
295  /*
296  * If no more entries after this one, mark the end
297  */
298  if (!left)
299  sg_mark_end(&sg[sg_size - 1]);
300 
301  prv = sg;
302  } while (left);
303 
304  return 0;
305 }
307 
319 int sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask)
320 {
321  int ret;
322 
323  ret = __sg_alloc_table(table, nents, SG_MAX_SINGLE_ALLOC,
324  gfp_mask, sg_kmalloc);
325  if (unlikely(ret))
326  __sg_free_table(table, SG_MAX_SINGLE_ALLOC, sg_kfree);
327 
328  return ret;
329 }
331 
353  struct page **pages, unsigned int n_pages,
354  unsigned long offset, unsigned long size,
355  gfp_t gfp_mask)
356 {
357  unsigned int chunks;
358  unsigned int i;
359  unsigned int cur_page;
360  int ret;
361  struct scatterlist *s;
362 
363  /* compute number of contiguous chunks */
364  chunks = 1;
365  for (i = 1; i < n_pages; ++i)
366  if (page_to_pfn(pages[i]) != page_to_pfn(pages[i - 1]) + 1)
367  ++chunks;
368 
369  ret = sg_alloc_table(sgt, chunks, gfp_mask);
370  if (unlikely(ret))
371  return ret;
372 
373  /* merging chunks and putting them into the scatterlist */
374  cur_page = 0;
375  for_each_sg(sgt->sgl, s, sgt->orig_nents, i) {
376  unsigned long chunk_size;
377  unsigned int j;
378 
379  /* look for the end of the current chunk */
380  for (j = cur_page + 1; j < n_pages; ++j)
381  if (page_to_pfn(pages[j]) !=
382  page_to_pfn(pages[j - 1]) + 1)
383  break;
384 
385  chunk_size = ((j - cur_page) << PAGE_SHIFT) - offset;
386  sg_set_page(s, pages[cur_page], min(size, chunk_size), offset);
387  size -= chunk_size;
388  offset = 0;
389  cur_page = j;
390  }
391 
392  return 0;
393 }
395 
408 void sg_miter_start(struct sg_mapping_iter *miter, struct scatterlist *sgl,
409  unsigned int nents, unsigned int flags)
410 {
411  memset(miter, 0, sizeof(struct sg_mapping_iter));
412 
413  miter->__sg = sgl;
414  miter->__nents = nents;
415  miter->__offset = 0;
416  WARN_ON(!(flags & (SG_MITER_TO_SG | SG_MITER_FROM_SG)));
417  miter->__flags = flags;
418 }
420 
438 bool sg_miter_next(struct sg_mapping_iter *miter)
439 {
440  unsigned int off, len;
441 
442  /* check for end and drop resources from the last iteration */
443  if (!miter->__nents)
444  return false;
445 
446  sg_miter_stop(miter);
447 
448  /* get to the next sg if necessary. __offset is adjusted by stop */
449  while (miter->__offset == miter->__sg->length) {
450  if (--miter->__nents) {
451  miter->__sg = sg_next(miter->__sg);
452  miter->__offset = 0;
453  } else
454  return false;
455  }
456 
457  /* map the next page */
458  off = miter->__sg->offset + miter->__offset;
459  len = miter->__sg->length - miter->__offset;
460 
461  miter->page = nth_page(sg_page(miter->__sg), off >> PAGE_SHIFT);
462  off &= ~PAGE_MASK;
463  miter->length = min_t(unsigned int, len, PAGE_SIZE - off);
464  miter->consumed = miter->length;
465 
466  if (miter->__flags & SG_MITER_ATOMIC)
467  miter->addr = kmap_atomic(miter->page) + off;
468  else
469  miter->addr = kmap(miter->page) + off;
470 
471  return true;
472 }
474 
489 void sg_miter_stop(struct sg_mapping_iter *miter)
490 {
491  WARN_ON(miter->consumed > miter->length);
492 
493  /* drop resources from the last iteration */
494  if (miter->addr) {
495  miter->__offset += miter->consumed;
496 
497  if (miter->__flags & SG_MITER_TO_SG)
498  flush_kernel_dcache_page(miter->page);
499 
500  if (miter->__flags & SG_MITER_ATOMIC) {
502  kunmap_atomic(miter->addr);
503  } else
504  kunmap(miter->page);
505 
506  miter->page = NULL;
507  miter->addr = NULL;
508  miter->length = 0;
509  miter->consumed = 0;
510  }
511 }
513 
526 static size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents,
527  void *buf, size_t buflen, int to_buffer)
528 {
529  unsigned int offset = 0;
530  struct sg_mapping_iter miter;
531  unsigned long flags;
532  unsigned int sg_flags = SG_MITER_ATOMIC;
533 
534  if (to_buffer)
535  sg_flags |= SG_MITER_FROM_SG;
536  else
537  sg_flags |= SG_MITER_TO_SG;
538 
539  sg_miter_start(&miter, sgl, nents, sg_flags);
540 
541  local_irq_save(flags);
542 
543  while (sg_miter_next(&miter) && offset < buflen) {
544  unsigned int len;
545 
546  len = min(miter.length, buflen - offset);
547 
548  if (to_buffer)
549  memcpy(buf + offset, miter.addr, len);
550  else
551  memcpy(miter.addr, buf + offset, len);
552 
553  offset += len;
554  }
555 
556  sg_miter_stop(&miter);
557 
558  local_irq_restore(flags);
559  return offset;
560 }
561 
572 size_t sg_copy_from_buffer(struct scatterlist *sgl, unsigned int nents,
573  void *buf, size_t buflen)
574 {
575  return sg_copy_buffer(sgl, nents, buf, buflen, 0);
576 }
578 
589 size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents,
590  void *buf, size_t buflen)
591 {
592  return sg_copy_buffer(sgl, nents, buf, buflen, 1);
593 }