67 #include <linux/kernel.h>
68 #include <linux/module.h>
94 static unsigned int blk_flush_policy(
unsigned int fflags,
struct request *
rq)
96 unsigned int policy = 0;
98 if (blk_rq_sectors(rq))
102 if (rq->cmd_flags & REQ_FLUSH)
104 if (!(fflags &
REQ_FUA) && (rq->cmd_flags & REQ_FUA))
110 static unsigned int blk_flush_cur_seq(
struct request *rq)
112 return 1 <<
ffz(rq->flush.seq);
115 static void blk_flush_restore_request(
struct request *rq)
122 rq->bio = rq->biotail;
126 rq->end_io = rq->flush.saved_end_io;
144 static bool blk_flush_complete_seq(
struct request *rq,
unsigned int seq,
148 struct list_head *pending = &q->flush_queue[q->flush_pending_idx];
151 BUG_ON(rq->flush.seq & seq);
152 rq->flush.seq |= seq;
155 seq = blk_flush_cur_seq(rq);
163 if (list_empty(pending))
164 q->flush_pending_since =
jiffies;
165 list_move_tail(&rq->flush.list, pending);
169 list_move_tail(&rq->flush.list, &q->flush_data_in_flight);
170 list_add(&rq->queuelist, &q->queue_head);
181 BUG_ON(!list_empty(&rq->queuelist));
182 list_del_init(&rq->flush.list);
183 blk_flush_restore_request(rq);
191 return blk_kick_flush(q) | queued;
194 static void flush_end_io(
struct request *flush_rq,
int error)
201 BUG_ON(q->flush_pending_idx == q->flush_running_idx);
204 q->flush_running_idx ^= 1;
209 unsigned int seq = blk_flush_cur_seq(rq);
212 queued |= blk_flush_complete_seq(rq, seq, error);
226 if (queued || q->flush_queue_delayed)
228 q->flush_queue_delayed = 0;
246 struct list_head *pending = &q->flush_queue[q->flush_pending_idx];
251 if (q->flush_pending_idx != q->flush_running_idx || list_empty(pending))
255 if (!list_empty(&q->flush_data_in_flight) &&
265 q->flush_rq.cmd_type = REQ_TYPE_FS;
267 q->flush_rq.rq_disk = first_rq->rq_disk;
268 q->flush_rq.end_io = flush_end_io;
270 q->flush_pending_idx ^= 1;
275 static void flush_data_end_io(
struct request *rq,
int error)
301 unsigned int fflags = q->flush_flags;
302 unsigned int policy = blk_flush_policy(fflags, rq);
308 rq->cmd_flags &= ~REQ_FLUSH;
310 rq->cmd_flags &= ~REQ_FUA;
323 BUG_ON(rq->bio != rq->biotail);
340 memset(&rq->flush, 0,
sizeof(rq->flush));
341 INIT_LIST_HEAD(&rq->flush.list);
343 rq->flush.saved_end_io = rq->end_io;
344 rq->end_io = flush_data_end_io;
369 list_del_init(&rq->flush.list);
370 blk_flush_restore_request(rq);
377 for (i = 0; i <
ARRAY_SIZE(q->flush_queue); i++) {
380 list_del_init(&rq->flush.list);
381 blk_flush_restore_request(rq);
387 static void bio_end_flush(
struct bio *bio,
int err)
419 q = bdev_get_queue(bdev);
429 if (!q->make_request_fn)
432 bio = bio_alloc(gfp_mask, 0);
433 bio->bi_end_io = bio_end_flush;
435 bio->bi_private = &
wait;
447 *error_sector = bio->bi_sector;
449 if (!bio_flagged(bio, BIO_UPTODATE))