9 #include <linux/module.h>
12 #include <linux/slab.h>
13 #include <linux/list.h>
17 static unsigned int write_cluster_size = 64;
20 "Number of pages used for contiguous writes.");
22 #define CLUSTER_SIZE (write_cluster_size * PAGE_SIZE)
28 if (!scmrq->cluster.buf)
31 for (i = 0; i < 2 * write_cluster_size; i++)
32 free_page((
unsigned long) scmrq->cluster.buf[i]);
34 kfree(scmrq->cluster.buf);
41 scmrq->cluster.buf = kzalloc(
sizeof(
void *) * 2 * write_cluster_size,
43 if (!scmrq->cluster.buf)
46 for (i = 0; i < 2 * write_cluster_size; i++) {
48 if (!scmrq->cluster.buf[i])
51 INIT_LIST_HEAD(&scmrq->cluster.
list);
57 scmrq->cluster.state = CLUSTER_NONE;
62 unsigned long firstA, lastA, firstB, lastB;
72 return (firstB <= lastA && firstA <= lastB);
80 if (write_cluster_size == 0)
83 spin_lock(&bdev->
lock);
85 if (clusters_intersect(scmrq, iter) &&
88 spin_unlock(&bdev->
lock);
92 list_add(&scmrq->cluster.
list, &bdev->cluster_list);
93 spin_unlock(&bdev->
lock);
103 if (write_cluster_size == 0)
108 spin_unlock_irqrestore(&bdev->
lock, flags);
113 INIT_LIST_HEAD(&bdev->cluster_list);
117 static void scm_prepare_cluster_request(
struct scm_request *scmrq)
124 struct req_iterator iter;
129 switch (scmrq->cluster.state) {
131 scmrq->cluster.state = CLUSTER_READ;
134 scmrq->
aob->request.msb_count = 1;
141 addr = scmdev->
address + ((
u64) blk_rq_pos(req) << 9);
159 addr < scmdev->
address + ((
u64) blk_rq_pos(req) << 9);
165 rq_for_each_segment(bv, req, iter) {
189 scm_prepare_cluster_request(scmrq);
196 return scmrq->cluster.state != CLUSTER_NONE;
204 switch (scmrq->cluster.state) {
213 scmrq->cluster.state = CLUSTER_WRITE;
216 spin_unlock_irqrestore(&bdev->
rq_lock, flags);
226 return write_cluster_size == 0 || write_cluster_size == 32 ||
227 write_cluster_size == 64 || write_cluster_size == 128;