Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
blk-lib.c
Go to the documentation of this file.
1 /*
2  * Functions related to generic helpers functions
3  */
4 #include <linux/kernel.h>
5 #include <linux/module.h>
6 #include <linux/bio.h>
7 #include <linux/blkdev.h>
8 #include <linux/scatterlist.h>
9 
10 #include "blk.h"
11 
12 struct bio_batch {
14  unsigned long flags;
15  struct completion *wait;
16 };
17 
18 static void bio_batch_end_io(struct bio *bio, int err)
19 {
20  struct bio_batch *bb = bio->bi_private;
21 
22  if (err && (err != -EOPNOTSUPP))
23  clear_bit(BIO_UPTODATE, &bb->flags);
24  if (atomic_dec_and_test(&bb->done))
25  complete(bb->wait);
26  bio_put(bio);
27 }
28 
41  sector_t nr_sects, gfp_t gfp_mask, unsigned long flags)
42 {
44  struct request_queue *q = bdev_get_queue(bdev);
45  int type = REQ_WRITE | REQ_DISCARD;
46  unsigned int max_discard_sectors;
47  unsigned int granularity, alignment, mask;
48  struct bio_batch bb;
49  struct bio *bio;
50  int ret = 0;
51 
52  if (!q)
53  return -ENXIO;
54 
55  if (!blk_queue_discard(q))
56  return -EOPNOTSUPP;
57 
58  /* Zero-sector (unknown) and one-sector granularities are the same. */
59  granularity = max(q->limits.discard_granularity >> 9, 1U);
60  mask = granularity - 1;
61  alignment = (bdev_discard_alignment(bdev) >> 9) & mask;
62 
63  /*
64  * Ensure that max_discard_sectors is of the proper
65  * granularity, so that requests stay aligned after a split.
66  */
67  max_discard_sectors = min(q->limits.max_discard_sectors, UINT_MAX >> 9);
68  max_discard_sectors = round_down(max_discard_sectors, granularity);
69  if (unlikely(!max_discard_sectors)) {
70  /* Avoid infinite loop below. Being cautious never hurts. */
71  return -EOPNOTSUPP;
72  }
73 
74  if (flags & BLKDEV_DISCARD_SECURE) {
75  if (!blk_queue_secdiscard(q))
76  return -EOPNOTSUPP;
77  type |= REQ_SECURE;
78  }
79 
80  atomic_set(&bb.done, 1);
81  bb.flags = 1 << BIO_UPTODATE;
82  bb.wait = &wait;
83 
84  while (nr_sects) {
85  unsigned int req_sects;
86  sector_t end_sect;
87 
88  bio = bio_alloc(gfp_mask, 1);
89  if (!bio) {
90  ret = -ENOMEM;
91  break;
92  }
93 
94  req_sects = min_t(sector_t, nr_sects, max_discard_sectors);
95 
96  /*
97  * If splitting a request, and the next starting sector would be
98  * misaligned, stop the discard at the previous aligned sector.
99  */
100  end_sect = sector + req_sects;
101  if (req_sects < nr_sects && (end_sect & mask) != alignment) {
102  end_sect =
103  round_down(end_sect - alignment, granularity)
104  + alignment;
105  req_sects = end_sect - sector;
106  }
107 
108  bio->bi_sector = sector;
109  bio->bi_end_io = bio_batch_end_io;
110  bio->bi_bdev = bdev;
111  bio->bi_private = &bb;
112 
113  bio->bi_size = req_sects << 9;
114  nr_sects -= req_sects;
115  sector = end_sect;
116 
117  atomic_inc(&bb.done);
118  submit_bio(type, bio);
119  }
120 
121  /* Wait for bios in-flight */
122  if (!atomic_dec_and_test(&bb.done))
124 
125  if (!test_bit(BIO_UPTODATE, &bb.flags))
126  ret = -EIO;
127 
128  return ret;
129 }
131 
144  sector_t nr_sects, gfp_t gfp_mask,
145  struct page *page)
146 {
148  struct request_queue *q = bdev_get_queue(bdev);
149  unsigned int max_write_same_sectors;
150  struct bio_batch bb;
151  struct bio *bio;
152  int ret = 0;
153 
154  if (!q)
155  return -ENXIO;
156 
157  max_write_same_sectors = q->limits.max_write_same_sectors;
158 
159  if (max_write_same_sectors == 0)
160  return -EOPNOTSUPP;
161 
162  atomic_set(&bb.done, 1);
163  bb.flags = 1 << BIO_UPTODATE;
164  bb.wait = &wait;
165 
166  while (nr_sects) {
167  bio = bio_alloc(gfp_mask, 1);
168  if (!bio) {
169  ret = -ENOMEM;
170  break;
171  }
172 
173  bio->bi_sector = sector;
174  bio->bi_end_io = bio_batch_end_io;
175  bio->bi_bdev = bdev;
176  bio->bi_private = &bb;
177  bio->bi_vcnt = 1;
178  bio->bi_io_vec->bv_page = page;
179  bio->bi_io_vec->bv_offset = 0;
180  bio->bi_io_vec->bv_len = bdev_logical_block_size(bdev);
181 
182  if (nr_sects > max_write_same_sectors) {
183  bio->bi_size = max_write_same_sectors << 9;
184  nr_sects -= max_write_same_sectors;
185  sector += max_write_same_sectors;
186  } else {
187  bio->bi_size = nr_sects << 9;
188  nr_sects = 0;
189  }
190 
191  atomic_inc(&bb.done);
193  }
194 
195  /* Wait for bios in-flight */
196  if (!atomic_dec_and_test(&bb.done))
198 
199  if (!test_bit(BIO_UPTODATE, &bb.flags))
200  ret = -ENOTSUPP;
201 
202  return ret;
203 }
205 
218  sector_t nr_sects, gfp_t gfp_mask)
219 {
220  int ret;
221  struct bio *bio;
222  struct bio_batch bb;
223  unsigned int sz;
225 
226  atomic_set(&bb.done, 1);
227  bb.flags = 1 << BIO_UPTODATE;
228  bb.wait = &wait;
229 
230  ret = 0;
231  while (nr_sects != 0) {
232  bio = bio_alloc(gfp_mask,
233  min(nr_sects, (sector_t)BIO_MAX_PAGES));
234  if (!bio) {
235  ret = -ENOMEM;
236  break;
237  }
238 
239  bio->bi_sector = sector;
240  bio->bi_bdev = bdev;
241  bio->bi_end_io = bio_batch_end_io;
242  bio->bi_private = &bb;
243 
244  while (nr_sects != 0) {
245  sz = min((sector_t) PAGE_SIZE >> 9 , nr_sects);
246  ret = bio_add_page(bio, ZERO_PAGE(0), sz << 9, 0);
247  nr_sects -= ret >> 9;
248  sector += ret >> 9;
249  if (ret < (sz << 9))
250  break;
251  }
252  ret = 0;
253  atomic_inc(&bb.done);
254  submit_bio(WRITE, bio);
255  }
256 
257  /* Wait for bios in-flight */
258  if (!atomic_dec_and_test(&bb.done))
260 
261  if (!test_bit(BIO_UPTODATE, &bb.flags))
262  /* One of bios in the batch was completed with error.*/
263  ret = -EIO;
264 
265  return ret;
266 }
267 
280  sector_t nr_sects, gfp_t gfp_mask)
281 {
282  if (bdev_write_same(bdev)) {
283  unsigned char bdn[BDEVNAME_SIZE];
284 
285  if (!blkdev_issue_write_same(bdev, sector, nr_sects, gfp_mask,
286  ZERO_PAGE(0)))
287  return 0;
288 
289  bdevname(bdev, bdn);
290  pr_err("%s: WRITE SAME failed. Manually zeroing.\n", bdn);
291  }
292 
293  return __blkdev_issue_zeroout(bdev, sector, nr_sects, gfp_mask);
294 }