Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
blk-tag.c
Go to the documentation of this file.
1 /*
2  * Functions related to tagged command queuing
3  */
4 #include <linux/kernel.h>
5 #include <linux/module.h>
6 #include <linux/bio.h>
7 #include <linux/blkdev.h>
8 #include <linux/slab.h>
9 
10 #include "blk.h"
11 
24 {
25  return blk_map_queue_find_tag(q->queue_tags, tag);
26 }
28 
36 static int __blk_free_tags(struct blk_queue_tag *bqt)
37 {
38  int retval;
39 
40  retval = atomic_dec_and_test(&bqt->refcnt);
41  if (retval) {
42  BUG_ON(find_first_bit(bqt->tag_map, bqt->max_depth) <
43  bqt->max_depth);
44 
45  kfree(bqt->tag_index);
46  bqt->tag_index = NULL;
47 
48  kfree(bqt->tag_map);
49  bqt->tag_map = NULL;
50 
51  kfree(bqt);
52  }
53 
54  return retval;
55 }
56 
66 {
67  struct blk_queue_tag *bqt = q->queue_tags;
68 
69  if (!bqt)
70  return;
71 
72  __blk_free_tags(bqt);
73 
74  q->queue_tags = NULL;
75  queue_flag_clear_unlocked(QUEUE_FLAG_QUEUED, q);
76 }
77 
86 void blk_free_tags(struct blk_queue_tag *bqt)
87 {
88  if (unlikely(!__blk_free_tags(bqt)))
89  BUG();
90 }
92 
102 {
103  queue_flag_clear_unlocked(QUEUE_FLAG_QUEUED, q);
104 }
106 
107 static int
108 init_tag_map(struct request_queue *q, struct blk_queue_tag *tags, int depth)
109 {
110  struct request **tag_index;
111  unsigned long *tag_map;
112  int nr_ulongs;
113 
114  if (q && depth > q->nr_requests * 2) {
115  depth = q->nr_requests * 2;
116  printk(KERN_ERR "%s: adjusted depth to %d\n",
117  __func__, depth);
118  }
119 
120  tag_index = kzalloc(depth * sizeof(struct request *), GFP_ATOMIC);
121  if (!tag_index)
122  goto fail;
123 
124  nr_ulongs = ALIGN(depth, BITS_PER_LONG) / BITS_PER_LONG;
125  tag_map = kzalloc(nr_ulongs * sizeof(unsigned long), GFP_ATOMIC);
126  if (!tag_map)
127  goto fail;
128 
129  tags->real_max_depth = depth;
130  tags->max_depth = depth;
131  tags->tag_index = tag_index;
132  tags->tag_map = tag_map;
133 
134  return 0;
135 fail:
136  kfree(tag_index);
137  return -ENOMEM;
138 }
139 
140 static struct blk_queue_tag *__blk_queue_init_tags(struct request_queue *q,
141  int depth)
142 {
143  struct blk_queue_tag *tags;
144 
145  tags = kmalloc(sizeof(struct blk_queue_tag), GFP_ATOMIC);
146  if (!tags)
147  goto fail;
148 
149  if (init_tag_map(q, tags, depth))
150  goto fail;
151 
152  atomic_set(&tags->refcnt, 1);
153  return tags;
154 fail:
155  kfree(tags);
156  return NULL;
157 }
158 
163 struct blk_queue_tag *blk_init_tags(int depth)
164 {
165  return __blk_queue_init_tags(NULL, depth);
166 }
168 
178 int blk_queue_init_tags(struct request_queue *q, int depth,
179  struct blk_queue_tag *tags)
180 {
181  int rc;
182 
183  BUG_ON(tags && q->queue_tags && tags != q->queue_tags);
184 
185  if (!tags && !q->queue_tags) {
186  tags = __blk_queue_init_tags(q, depth);
187 
188  if (!tags)
189  return -ENOMEM;
190 
191  } else if (q->queue_tags) {
192  rc = blk_queue_resize_tags(q, depth);
193  if (rc)
194  return rc;
195  queue_flag_set(QUEUE_FLAG_QUEUED, q);
196  return 0;
197  } else
198  atomic_inc(&tags->refcnt);
199 
200  /*
201  * assign it, all done
202  */
203  q->queue_tags = tags;
204  queue_flag_set_unlocked(QUEUE_FLAG_QUEUED, q);
205  INIT_LIST_HEAD(&q->tag_busy_list);
206  return 0;
207 }
209 
219 {
220  struct blk_queue_tag *bqt = q->queue_tags;
221  struct request **tag_index;
222  unsigned long *tag_map;
223  int max_depth, nr_ulongs;
224 
225  if (!bqt)
226  return -ENXIO;
227 
228  /*
229  * if we already have large enough real_max_depth. just
230  * adjust max_depth. *NOTE* as requests with tag value
231  * between new_depth and real_max_depth can be in-flight, tag
232  * map can not be shrunk blindly here.
233  */
234  if (new_depth <= bqt->real_max_depth) {
235  bqt->max_depth = new_depth;
236  return 0;
237  }
238 
239  /*
240  * Currently cannot replace a shared tag map with a new
241  * one, so error out if this is the case
242  */
243  if (atomic_read(&bqt->refcnt) != 1)
244  return -EBUSY;
245 
246  /*
247  * save the old state info, so we can copy it back
248  */
249  tag_index = bqt->tag_index;
250  tag_map = bqt->tag_map;
251  max_depth = bqt->real_max_depth;
252 
253  if (init_tag_map(q, bqt, new_depth))
254  return -ENOMEM;
255 
256  memcpy(bqt->tag_index, tag_index, max_depth * sizeof(struct request *));
257  nr_ulongs = ALIGN(max_depth, BITS_PER_LONG) / BITS_PER_LONG;
258  memcpy(bqt->tag_map, tag_map, nr_ulongs * sizeof(unsigned long));
259 
260  kfree(tag_index);
261  kfree(tag_map);
262  return 0;
263 }
265 
280 void blk_queue_end_tag(struct request_queue *q, struct request *rq)
281 {
282  struct blk_queue_tag *bqt = q->queue_tags;
283  unsigned tag = rq->tag; /* negative tags invalid */
284 
285  BUG_ON(tag >= bqt->real_max_depth);
286 
287  list_del_init(&rq->queuelist);
288  rq->cmd_flags &= ~REQ_QUEUED;
289  rq->tag = -1;
290 
291  if (unlikely(bqt->tag_index[tag] == NULL))
292  printk(KERN_ERR "%s: tag %d is missing\n",
293  __func__, tag);
294 
295  bqt->tag_index[tag] = NULL;
296 
297  if (unlikely(!test_bit(tag, bqt->tag_map))) {
298  printk(KERN_ERR "%s: attempt to clear non-busy tag (%d)\n",
299  __func__, tag);
300  return;
301  }
302  /*
303  * The tag_map bit acts as a lock for tag_index[bit], so we need
304  * unlock memory barrier semantics.
305  */
306  clear_bit_unlock(tag, bqt->tag_map);
307 }
309 
329 {
330  struct blk_queue_tag *bqt = q->queue_tags;
331  unsigned max_depth;
332  int tag;
333 
334  if (unlikely((rq->cmd_flags & REQ_QUEUED))) {
336  "%s: request %p for device [%s] already tagged %d",
337  __func__, rq,
338  rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->tag);
339  BUG();
340  }
341 
342  /*
343  * Protect against shared tag maps, as we may not have exclusive
344  * access to the tag map.
345  *
346  * We reserve a few tags just for sync IO, since we don't want
347  * to starve sync IO on behalf of flooding async IO.
348  */
349  max_depth = bqt->max_depth;
350  if (!rq_is_sync(rq) && max_depth > 1) {
351  max_depth -= 2;
352  if (!max_depth)
353  max_depth = 1;
354  if (q->in_flight[BLK_RW_ASYNC] > max_depth)
355  return 1;
356  }
357 
358  do {
359  tag = find_first_zero_bit(bqt->tag_map, max_depth);
360  if (tag >= max_depth)
361  return 1;
362 
363  } while (test_and_set_bit_lock(tag, bqt->tag_map));
364  /*
365  * We need lock ordering semantics given by test_and_set_bit_lock.
366  * See blk_queue_end_tag for details.
367  */
368 
369  rq->cmd_flags |= REQ_QUEUED;
370  rq->tag = tag;
371  bqt->tag_index[tag] = rq;
372  blk_start_request(rq);
373  list_add(&rq->queuelist, &q->tag_busy_list);
374  return 0;
375 }
377 
391 {
392  struct list_head *tmp, *n;
393 
394  list_for_each_safe(tmp, n, &q->tag_busy_list)
395  blk_requeue_request(q, list_entry_rq(tmp));
396 }