Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
scm_blk.c
Go to the documentation of this file.
1 /*
2  * Block driver for s390 storage class memory.
3  *
4  * Copyright IBM Corp. 2012
5  * Author(s): Sebastian Ott <[email protected]>
6  */
7 
8 #define KMSG_COMPONENT "scm_block"
9 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
10 
11 #include <linux/interrupt.h>
12 #include <linux/spinlock.h>
13 #include <linux/module.h>
14 #include <linux/blkdev.h>
15 #include <linux/genhd.h>
16 #include <linux/slab.h>
17 #include <linux/list.h>
18 #include <asm/eadm.h>
19 #include "scm_blk.h"
20 
22 static int scm_major;
24 static LIST_HEAD(inactive_requests);
25 static unsigned int nr_requests = 64;
26 static atomic_t nr_devices = ATOMIC_INIT(0);
27 module_param(nr_requests, uint, S_IRUGO);
28 MODULE_PARM_DESC(nr_requests, "Number of parallel requests.");
29 
30 MODULE_DESCRIPTION("Block driver for s390 storage class memory.");
31 MODULE_LICENSE("GPL");
32 MODULE_ALIAS("scm:scmdev*");
33 
34 static void __scm_free_rq(struct scm_request *scmrq)
35 {
36  struct aob_rq_header *aobrq = to_aobrq(scmrq);
37 
38  free_page((unsigned long) scmrq->aob);
39  free_page((unsigned long) scmrq->aidaw);
40  __scm_free_rq_cluster(scmrq);
41  kfree(aobrq);
42 }
43 
44 static void scm_free_rqs(void)
45 {
46  struct list_head *iter, *safe;
47  struct scm_request *scmrq;
48 
49  spin_lock_irq(&list_lock);
50  list_for_each_safe(iter, safe, &inactive_requests) {
51  scmrq = list_entry(iter, struct scm_request, list);
52  list_del(&scmrq->list);
53  __scm_free_rq(scmrq);
54  }
55  spin_unlock_irq(&list_lock);
56 }
57 
58 static int __scm_alloc_rq(void)
59 {
60  struct aob_rq_header *aobrq;
61  struct scm_request *scmrq;
62 
63  aobrq = kzalloc(sizeof(*aobrq) + sizeof(*scmrq), GFP_KERNEL);
64  if (!aobrq)
65  return -ENOMEM;
66 
67  scmrq = (void *) aobrq->data;
68  scmrq->aidaw = (void *) get_zeroed_page(GFP_DMA);
69  scmrq->aob = (void *) get_zeroed_page(GFP_DMA);
70  if (!scmrq->aob || !scmrq->aidaw) {
71  __scm_free_rq(scmrq);
72  return -ENOMEM;
73  }
74 
75  if (__scm_alloc_rq_cluster(scmrq)) {
76  __scm_free_rq(scmrq);
77  return -ENOMEM;
78  }
79 
80  INIT_LIST_HEAD(&scmrq->list);
81  spin_lock_irq(&list_lock);
82  list_add(&scmrq->list, &inactive_requests);
83  spin_unlock_irq(&list_lock);
84 
85  return 0;
86 }
87 
88 static int scm_alloc_rqs(unsigned int nrqs)
89 {
90  int ret = 0;
91 
92  while (nrqs-- && !ret)
93  ret = __scm_alloc_rq();
94 
95  return ret;
96 }
97 
98 static struct scm_request *scm_request_fetch(void)
99 {
100  struct scm_request *scmrq = NULL;
101 
102  spin_lock(&list_lock);
103  if (list_empty(&inactive_requests))
104  goto out;
105  scmrq = list_first_entry(&inactive_requests, struct scm_request, list);
106  list_del(&scmrq->list);
107 out:
108  spin_unlock(&list_lock);
109  return scmrq;
110 }
111 
112 static void scm_request_done(struct scm_request *scmrq)
113 {
114  unsigned long flags;
115 
116  spin_lock_irqsave(&list_lock, flags);
117  list_add(&scmrq->list, &inactive_requests);
118  spin_unlock_irqrestore(&list_lock, flags);
119 }
120 
121 static int scm_open(struct block_device *blkdev, fmode_t mode)
122 {
123  return scm_get_ref();
124 }
125 
126 static int scm_release(struct gendisk *gendisk, fmode_t mode)
127 {
128  scm_put_ref();
129  return 0;
130 }
131 
132 static const struct block_device_operations scm_blk_devops = {
133  .owner = THIS_MODULE,
134  .open = scm_open,
135  .release = scm_release,
136 };
137 
138 static void scm_request_prepare(struct scm_request *scmrq)
139 {
140  struct scm_blk_dev *bdev = scmrq->bdev;
141  struct scm_device *scmdev = bdev->gendisk->private_data;
142  struct aidaw *aidaw = scmrq->aidaw;
143  struct msb *msb = &scmrq->aob->msb[0];
144  struct req_iterator iter;
145  struct bio_vec *bv;
146 
147  msb->bs = MSB_BS_4K;
148  scmrq->aob->request.msb_count = 1;
149  msb->scm_addr = scmdev->address +
150  ((u64) blk_rq_pos(scmrq->request) << 9);
151  msb->oc = (rq_data_dir(scmrq->request) == READ) ?
153  msb->flags |= MSB_FLAG_IDA;
154  msb->data_addr = (u64) aidaw;
155 
156  rq_for_each_segment(bv, scmrq->request, iter) {
157  WARN_ON(bv->bv_offset);
158  msb->blk_count += bv->bv_len >> 12;
159  aidaw->data_addr = (u64) page_address(bv->bv_page);
160  aidaw++;
161  }
162 }
163 
164 static inline void scm_request_init(struct scm_blk_dev *bdev,
165  struct scm_request *scmrq,
166  struct request *req)
167 {
168  struct aob_rq_header *aobrq = to_aobrq(scmrq);
169  struct aob *aob = scmrq->aob;
170 
171  memset(aob, 0, sizeof(*aob));
172  memset(scmrq->aidaw, 0, PAGE_SIZE);
173  aobrq->scmdev = bdev->scmdev;
174  aob->request.cmd_code = ARQB_CMD_MOVE;
175  aob->request.data = (u64) aobrq;
176  scmrq->request = req;
177  scmrq->bdev = bdev;
178  scmrq->retries = 4;
179  scmrq->error = 0;
181 }
182 
183 static void scm_ensure_queue_restart(struct scm_blk_dev *bdev)
184 {
185  if (atomic_read(&bdev->queued_reqs)) {
186  /* Queue restart is triggered by the next interrupt. */
187  return;
188  }
190 }
191 
192 void scm_request_requeue(struct scm_request *scmrq)
193 {
194  struct scm_blk_dev *bdev = scmrq->bdev;
195 
196  scm_release_cluster(scmrq);
197  blk_requeue_request(bdev->rq, scmrq->request);
198  scm_request_done(scmrq);
199  scm_ensure_queue_restart(bdev);
200 }
201 
202 void scm_request_finish(struct scm_request *scmrq)
203 {
204  scm_release_cluster(scmrq);
205  blk_end_request_all(scmrq->request, scmrq->error);
206  scm_request_done(scmrq);
207 }
208 
209 static void scm_blk_request(struct request_queue *rq)
210 {
211  struct scm_device *scmdev = rq->queuedata;
212  struct scm_blk_dev *bdev = dev_get_drvdata(&scmdev->dev);
213  struct scm_request *scmrq;
214  struct request *req;
215  int ret;
216 
217  while ((req = blk_peek_request(rq))) {
218  if (req->cmd_type != REQ_TYPE_FS)
219  continue;
220 
221  scmrq = scm_request_fetch();
222  if (!scmrq) {
223  SCM_LOG(5, "no request");
224  scm_ensure_queue_restart(bdev);
225  return;
226  }
227  scm_request_init(bdev, scmrq, req);
228  if (!scm_reserve_cluster(scmrq)) {
229  SCM_LOG(5, "cluster busy");
230  scm_request_done(scmrq);
231  return;
232  }
233  if (scm_need_cluster_request(scmrq)) {
234  blk_start_request(req);
236  return;
237  }
238  scm_request_prepare(scmrq);
239  blk_start_request(req);
240 
241  ret = scm_start_aob(scmrq->aob);
242  if (ret) {
243  SCM_LOG(5, "no subchannel");
244  scm_request_requeue(scmrq);
245  return;
246  }
247  atomic_inc(&bdev->queued_reqs);
248  }
249 }
250 
251 static void __scmrq_log_error(struct scm_request *scmrq)
252 {
253  struct aob *aob = scmrq->aob;
254 
255  if (scmrq->error == -ETIMEDOUT)
256  SCM_LOG(1, "Request timeout");
257  else {
258  SCM_LOG(1, "Request error");
259  SCM_LOG_HEX(1, &aob->response, sizeof(aob->response));
260  }
261  if (scmrq->retries)
262  SCM_LOG(1, "Retry request");
263  else
264  pr_err("An I/O operation to SCM failed with rc=%d\n",
265  scmrq->error);
266 }
267 
268 void scm_blk_irq(struct scm_device *scmdev, void *data, int error)
269 {
270  struct scm_request *scmrq = data;
271  struct scm_blk_dev *bdev = scmrq->bdev;
272 
273  scmrq->error = error;
274  if (error)
275  __scmrq_log_error(scmrq);
276 
277  spin_lock(&bdev->lock);
278  list_add_tail(&scmrq->list, &bdev->finished_requests);
279  spin_unlock(&bdev->lock);
280  tasklet_hi_schedule(&bdev->tasklet);
281 }
282 
283 static void scm_blk_tasklet(struct scm_blk_dev *bdev)
284 {
285  struct scm_request *scmrq;
286  unsigned long flags;
287 
288  spin_lock_irqsave(&bdev->lock, flags);
289  while (!list_empty(&bdev->finished_requests)) {
290  scmrq = list_first_entry(&bdev->finished_requests,
291  struct scm_request, list);
292  list_del(&scmrq->list);
293  spin_unlock_irqrestore(&bdev->lock, flags);
294 
295  if (scmrq->error && scmrq->retries-- > 0) {
296  if (scm_start_aob(scmrq->aob)) {
297  spin_lock_irqsave(&bdev->rq_lock, flags);
298  scm_request_requeue(scmrq);
299  spin_unlock_irqrestore(&bdev->rq_lock, flags);
300  }
301  /* Request restarted or requeued, handle next. */
302  spin_lock_irqsave(&bdev->lock, flags);
303  continue;
304  }
305 
306  if (scm_test_cluster_request(scmrq)) {
308  spin_lock_irqsave(&bdev->lock, flags);
309  continue;
310  }
311 
312  scm_request_finish(scmrq);
313  atomic_dec(&bdev->queued_reqs);
314  spin_lock_irqsave(&bdev->lock, flags);
315  }
316  spin_unlock_irqrestore(&bdev->lock, flags);
317  /* Look out for more requests. */
318  blk_run_queue(bdev->rq);
319 }
320 
321 int scm_blk_dev_setup(struct scm_blk_dev *bdev, struct scm_device *scmdev)
322 {
323  struct request_queue *rq;
324  int len, ret = -ENOMEM;
325  unsigned int devindex, nr_max_blk;
326 
327  devindex = atomic_inc_return(&nr_devices) - 1;
328  /* scma..scmz + scmaa..scmzz */
329  if (devindex > 701) {
330  ret = -ENODEV;
331  goto out;
332  }
333 
334  bdev->scmdev = scmdev;
335  spin_lock_init(&bdev->rq_lock);
336  spin_lock_init(&bdev->lock);
337  INIT_LIST_HEAD(&bdev->finished_requests);
338  atomic_set(&bdev->queued_reqs, 0);
339  tasklet_init(&bdev->tasklet,
340  (void (*)(unsigned long)) scm_blk_tasklet,
341  (unsigned long) bdev);
342 
343  rq = blk_init_queue(scm_blk_request, &bdev->rq_lock);
344  if (!rq)
345  goto out;
346 
347  bdev->rq = rq;
348  nr_max_blk = min(scmdev->nr_max_block,
349  (unsigned int) (PAGE_SIZE / sizeof(struct aidaw)));
350 
351  blk_queue_logical_block_size(rq, 1 << 12);
352  blk_queue_max_hw_sectors(rq, nr_max_blk << 3); /* 8 * 512 = blk_size */
353  blk_queue_max_segments(rq, nr_max_blk);
354  queue_flag_set_unlocked(QUEUE_FLAG_NONROT, rq);
356 
357  bdev->gendisk = alloc_disk(SCM_NR_PARTS);
358  if (!bdev->gendisk)
359  goto out_queue;
360 
361  rq->queuedata = scmdev;
362  bdev->gendisk->driverfs_dev = &scmdev->dev;
363  bdev->gendisk->private_data = scmdev;
364  bdev->gendisk->fops = &scm_blk_devops;
365  bdev->gendisk->queue = rq;
366  bdev->gendisk->major = scm_major;
367  bdev->gendisk->first_minor = devindex * SCM_NR_PARTS;
368 
369  len = snprintf(bdev->gendisk->disk_name, DISK_NAME_LEN, "scm");
370  if (devindex > 25) {
371  len += snprintf(bdev->gendisk->disk_name + len,
372  DISK_NAME_LEN - len, "%c",
373  'a' + (devindex / 26) - 1);
374  devindex = devindex % 26;
375  }
376  snprintf(bdev->gendisk->disk_name + len, DISK_NAME_LEN - len, "%c",
377  'a' + devindex);
378 
379  /* 512 byte sectors */
380  set_capacity(bdev->gendisk, scmdev->size >> 9);
381  add_disk(bdev->gendisk);
382  return 0;
383 
384 out_queue:
385  blk_cleanup_queue(rq);
386 out:
387  atomic_dec(&nr_devices);
388  return ret;
389 }
390 
392 {
393  tasklet_kill(&bdev->tasklet);
394  del_gendisk(bdev->gendisk);
395  blk_cleanup_queue(bdev->gendisk->queue);
396  put_disk(bdev->gendisk);
397 }
398 
399 static int __init scm_blk_init(void)
400 {
401  int ret = -EINVAL;
402 
403  if (!scm_cluster_size_valid())
404  goto out;
405 
406  ret = register_blkdev(0, "scm");
407  if (ret < 0)
408  goto out;
409 
410  scm_major = ret;
411  if (scm_alloc_rqs(nr_requests))
412  goto out_unreg;
413 
414  scm_debug = debug_register("scm_log", 16, 1, 16);
415  if (!scm_debug)
416  goto out_free;
417 
419  debug_set_level(scm_debug, 2);
420 
421  ret = scm_drv_init();
422  if (ret)
423  goto out_dbf;
424 
425  return ret;
426 
427 out_dbf:
428  debug_unregister(scm_debug);
429 out_free:
430  scm_free_rqs();
431 out_unreg:
432  unregister_blkdev(scm_major, "scm");
433 out:
434  return ret;
435 }
436 module_init(scm_blk_init);
437 
438 static void __exit scm_blk_cleanup(void)
439 {
440  scm_drv_cleanup();
441  debug_unregister(scm_debug);
442  scm_free_rqs();
443  unregister_blkdev(scm_major, "scm");
444 }
445 module_exit(scm_blk_cleanup);