14 #include <linux/module.h>
15 #include <linux/sched.h>
16 #include <linux/slab.h>
19 #define DM_MSG_PREFIX "io"
21 #define DM_IO_MAX_REGIONS BITS_PER_LONG
58 client->
pool = mempool_create_slab_pool(
MIN_IOS, _dm_io_cache);
91 static void store_io_and_region_in_bio(
struct bio *bio,
struct io *
io,
95 DMCRIT(
"Unaligned struct io pointer %p", io);
99 bio->bi_private = (
void *)((
unsigned long)io | region);
102 static void retrieve_io_and_region_from_bio(
struct bio *bio,
struct io **io,
105 unsigned long val = (
unsigned long)bio->bi_private;
115 static void dec_count(
struct io *io,
unsigned int region,
int error)
139 static void endio(
struct bio *bio,
int error)
144 if (error && bio_data_dir(bio) ==
READ)
150 retrieve_io_and_region_from_bio(bio, &io, ®ion);
154 dec_count(io, region, error);
176 static void list_get_page(
struct dpages *
dp,
177 struct page **
p,
unsigned long *len,
unsigned *
offset)
187 static void list_next_page(
struct dpages *
dp)
194 static void list_dp_init(
struct dpages *
dp,
struct page_list *
pl,
unsigned offset)
205 static void bvec_get_page(
struct dpages *
dp,
206 struct page **
p,
unsigned long *len,
unsigned *
offset)
208 struct bio_vec *bvec = (
struct bio_vec *) dp->
context_ptr;
211 *offset = bvec->bv_offset;
214 static void bvec_next_page(
struct dpages *
dp)
216 struct bio_vec *bvec = (
struct bio_vec *) dp->
context_ptr;
220 static void bvec_dp_init(
struct dpages *
dp,
struct bio_vec *bvec)
230 static void vm_get_page(
struct dpages *
dp,
231 struct page **
p,
unsigned long *len,
unsigned *
offset)
238 static void vm_next_page(
struct dpages *
dp)
255 static void km_get_page(
struct dpages *
dp,
struct page **
p,
unsigned long *len,
263 static void km_next_page(
struct dpages *
dp)
280 static void do_region(
int rw,
unsigned region,
struct dm_io_region *where,
307 bio->bi_sector = where->sector + (where->count - remaining);
308 bio->bi_bdev = where->bdev;
309 bio->bi_end_io = endio;
310 store_io_and_region_in_bio(bio, io, region);
312 if (rw & REQ_DISCARD) {
313 discard_sectors =
min_t(
sector_t, q->limits.max_discard_sectors, remaining);
315 remaining -= discard_sectors;
316 }
else while (remaining) {
320 dp->
get_page(dp, &page, &len, &offset);
321 len =
min(len, to_bytes(remaining));
326 remaining -= to_sector(len);
335 static void dispatch_io(
int rw,
unsigned int num_regions,
336 struct dm_io_region *where,
struct dpages *dp,
337 struct io *io,
int sync)
351 for (i = 0; i < num_regions; i++) {
354 do_region(rw, i, where + i, dp, io);
365 struct dm_io_region *where,
int rw,
struct dpages *dp,
374 volatile char io_[
sizeof(
struct io) + __alignof__(
struct io) - 1];
375 struct io *io = (
struct io *)
PTR_ALIGN(&io_, __alignof__(
struct io));
390 dispatch_io(rw, num_regions, where, dp, io, 1);
408 static int async_io(
struct dm_io_client *client,
unsigned int num_regions,
409 struct dm_io_region *where,
int rw,
struct dpages *dp,
414 if (num_regions > 1 && (rw & RW_MASK) !=
WRITE) {
431 dispatch_io(rw, num_regions, where, dp, io, 0);
435 static int dp_init(
struct dm_io_request *
io_req,
struct dpages *dp,
443 switch (io_req->mem.type) {
444 case DM_IO_PAGE_LIST:
445 list_dp_init(dp, io_req->mem.ptr.pl, io_req->mem.offset);
449 bvec_dp_init(dp, io_req->mem.ptr.bvec);
453 flush_kernel_vmap_range(io_req->mem.ptr.vma, size);
454 if ((io_req->bi_rw & RW_MASK) ==
READ) {
458 vm_dp_init(dp, io_req->mem.ptr.vma);
462 km_dp_init(dp, io_req->mem.ptr.addr);
480 int dm_io(
struct dm_io_request *io_req,
unsigned num_regions,
481 struct dm_io_region *where,
unsigned long *sync_error_bits)
486 r = dp_init(io_req, &dp, (
unsigned long)where->count <<
SECTOR_SHIFT);
490 if (!io_req->notify.fn)
491 return sync_io(io_req->client, num_regions, where,
492 io_req->bi_rw, &dp, sync_error_bits);
494 return async_io(io_req->client, num_regions, where, io_req->bi_rw,
495 &dp, io_req->notify.fn, io_req->notify.context);