39 #include <linux/list.h>
43 #include <xen/events.h>
46 #include <asm/xen/hypervisor.h>
47 #include <asm/xen/hypercall.h>
60 static int xen_blkif_reqs = 64;
65 static unsigned int log_stats;
84 #define BLKBACK_INVALID_HANDLE (~0)
113 #define pending_page(req, seg) pending_pages[vaddr_pagenr(req, seg)]
117 unsigned long pfn =
page_to_pfn(blkbk->pending_page(req, seg));
121 #define pending_handle(_req, _seg) \
122 (blkbk->pending_grant_handles[vaddr_pagenr(_req, _seg)])
125 static int do_block_io_op(
struct xen_blkif *blkif);
126 static int dispatch_rw_block_io(
struct xen_blkif *blkif,
129 static void make_response(
struct xen_blkif *blkif,
u64 id,
130 unsigned short op,
int st);
196 static void xen_vbd_resize(
struct xen_blkif *blkif)
202 unsigned long long new_size =
vbd_sz(vbd);
207 vbd->
size = new_size;
215 (
unsigned long long)
vbd_sz(vbd));
244 static void blkif_notify_work(
struct xen_blkif *blkif)
252 blkif_notify_work(dev_id);
260 static void print_stats(
struct xen_blkif *blkif)
262 pr_info(
"xen-blkback (%s): oo %3d | rd %4d | wr %4d | f %4d"
285 xen_vbd_resize(blkif);
298 if (do_block_io_op(blkif))
322 static void xen_blkbk_unmap(
struct pending_req *req)
326 unsigned int i, invcount = 0;
330 for (i = 0; i < req->
nr_pages; i++) {
334 gnttab_set_unmap_op(&unmap[invcount],
vaddr(req, i),
351 int nseg = req->
u.
rw.nr_segments;
359 for (i = 0; i < nseg; i++) {
365 gnttab_set_map_op(&
map[i],
vaddr(pending_req, i), flags,
366 req->
u.
rw.seg[i].gref,
367 pending_req->
blkif->domid);
378 for (i = 0; i < nseg; i++) {
391 (req->
u.
rw.seg[
i].first_sect << 9);
396 static int dispatch_discard_io(
struct xen_blkif *blkif,
402 unsigned long secure;
407 secure = (blkif->
vbd.discard_secure &&
409 BLKDEV_DISCARD_SECURE : 0;
426 static void xen_blk_drain_io(
struct xen_blkif *blkif)
447 static void __end_block_io_op(
struct pending_req *pending_req,
int error)
462 " error=%d\n", error);
472 xen_blkbk_unmap(pending_req);
473 make_response(pending_req->
blkif, pending_req->
id,
480 free_req(pending_req);
487 static void end_block_io_op(
struct bio *bio,
int error)
489 __end_block_io_op(bio->bi_private, error);
501 __do_block_io_op(
struct xen_blkif *blkif)
505 struct pending_req *pending_req;
509 rc = blk_rings->
common.req_cons;
510 rp = blk_rings->
common.sring->req_prod;
523 pending_req = alloc_req();
524 if (
NULL == pending_req) {
548 free_req(pending_req);
549 if (dispatch_discard_io(blkif, &req))
551 }
else if (dispatch_rw_block_io(blkif, &req, pending_req))
568 more_to_do = __do_block_io_op(blkif);
573 }
while (more_to_do);
581 static int dispatch_rw_block_io(
struct xen_blkif *blkif,
583 struct pending_req *pending_req)
588 struct bio *bio =
NULL;
617 nseg = req->
u.
rw.nr_segments;
627 preq.dev = req->
u.
rw.handle;
628 preq.sector_number = req->
u.
rw.sector_number;
631 pending_req->
blkif = blkif;
632 pending_req->
id = req->
u.
rw.id;
637 for (i = 0; i < nseg; i++) {
638 seg[
i].
nsec = req->
u.
rw.seg[
i].last_sect -
639 req->
u.
rw.seg[
i].first_sect + 1;
641 (req->
u.
rw.seg[i].last_sect < req->
u.
rw.seg[i].first_sect))
643 preq.nr_sects += seg[
i].
nsec;
647 if (xen_vbd_translate(&preq, blkif, operation) != 0) {
649 operation ==
READ ?
"read" :
"write",
651 preq.sector_number + preq.nr_sects, preq.dev);
659 for (i = 0; i < nseg; i++) {
660 if (((
int)preq.sector_number|(
int)seg[i].
nsec) &
661 ((bdev_logical_block_size(preq.bdev) >> 9) - 1)) {
672 xen_blk_drain_io(pending_req->
blkif);
680 if (xen_blkbk_map(req, pending_req, seg))
689 for (i = 0; i < nseg; i++) {
690 while ((bio ==
NULL) ||
692 blkbk->pending_page(pending_req, i),
700 biolist[nbio++] = bio;
701 bio->bi_bdev = preq.bdev;
702 bio->bi_private = pending_req;
703 bio->bi_end_io = end_block_io_op;
704 bio->bi_sector = preq.sector_number;
707 preq.sector_number += seg[
i].
nsec;
718 biolist[nbio++] = bio;
719 bio->bi_bdev = preq.bdev;
720 bio->bi_private = pending_req;
721 bio->bi_end_io = end_block_io_op;
733 for (i = 0; i < nbio; i++)
739 if (operation ==
READ)
741 else if (operation &
WRITE)
747 xen_blkbk_unmap(pending_req);
751 free_req(pending_req);
756 for (i = 0; i < nbio; i++)
758 __end_block_io_op(pending_req, -
EINVAL);
768 static void make_response(
struct xen_blkif *blkif,
u64 id,
769 unsigned short op,
int st)
798 blk_rings->
common.rsp_prod_pvt++;
805 static int __init xen_blkif_init(
void)
834 for (i = 0; i < mmap_pages; i++) {
850 for (i = 0; i < xen_blkif_reqs; i++)
866 for (i = 0; i < mmap_pages; i++) {