Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
blk-ioc.c
Go to the documentation of this file.
1 /*
2  * Functions related to io context handling
3  */
4 #include <linux/kernel.h>
5 #include <linux/module.h>
6 #include <linux/init.h>
7 #include <linux/bio.h>
8 #include <linux/blkdev.h>
9 #include <linux/bootmem.h> /* for max_pfn/max_low_pfn */
10 #include <linux/slab.h>
11 
12 #include "blk.h"
13 
14 /*
15  * For io context allocations
16  */
17 static struct kmem_cache *iocontext_cachep;
18 
26 {
27  BUG_ON(atomic_long_read(&ioc->refcount) <= 0);
28  atomic_long_inc(&ioc->refcount);
29 }
31 
32 static void icq_free_icq_rcu(struct rcu_head *head)
33 {
34  struct io_cq *icq = container_of(head, struct io_cq, __rcu_head);
35 
37 }
38 
39 /* Exit an icq. Called with both ioc and q locked. */
40 static void ioc_exit_icq(struct io_cq *icq)
41 {
42  struct elevator_type *et = icq->q->elevator->type;
43 
44  if (icq->flags & ICQ_EXITED)
45  return;
46 
47  if (et->ops.elevator_exit_icq_fn)
48  et->ops.elevator_exit_icq_fn(icq);
49 
50  icq->flags |= ICQ_EXITED;
51 }
52 
53 /* Release an icq. Called with both ioc and q locked. */
54 static void ioc_destroy_icq(struct io_cq *icq)
55 {
56  struct io_context *ioc = icq->ioc;
57  struct request_queue *q = icq->q;
58  struct elevator_type *et = q->elevator->type;
59 
61  lockdep_assert_held(q->queue_lock);
62 
63  radix_tree_delete(&ioc->icq_tree, icq->q->id);
64  hlist_del_init(&icq->ioc_node);
65  list_del_init(&icq->q_node);
66 
67  /*
68  * Both setting lookup hint to and clearing it from @icq are done
69  * under queue_lock. If it's not pointing to @icq now, it never
70  * will. Hint assignment itself can race safely.
71  */
72  if (rcu_dereference_raw(ioc->icq_hint) == icq)
74 
75  ioc_exit_icq(icq);
76 
77  /*
78  * @icq->q might have gone away by the time RCU callback runs
79  * making it impossible to determine icq_cache. Record it in @icq.
80  */
81  icq->__rcu_icq_cache = et->icq_cache;
82  call_rcu(&icq->__rcu_head, icq_free_icq_rcu);
83 }
84 
85 /*
86  * Slow path for ioc release in put_io_context(). Performs double-lock
87  * dancing to unlink all icq's and then frees ioc.
88  */
89 static void ioc_release_fn(struct work_struct *work)
90 {
91  struct io_context *ioc = container_of(work, struct io_context,
92  release_work);
93  unsigned long flags;
94 
95  /*
96  * Exiting icq may call into put_io_context() through elevator
97  * which will trigger lockdep warning. The ioc's are guaranteed to
98  * be different, use a different locking subclass here. Use
99  * irqsave variant as there's no spin_lock_irq_nested().
100  */
101  spin_lock_irqsave_nested(&ioc->lock, flags, 1);
102 
103  while (!hlist_empty(&ioc->icq_list)) {
104  struct io_cq *icq = hlist_entry(ioc->icq_list.first,
105  struct io_cq, ioc_node);
106  struct request_queue *q = icq->q;
107 
108  if (spin_trylock(q->queue_lock)) {
109  ioc_destroy_icq(icq);
110  spin_unlock(q->queue_lock);
111  } else {
112  spin_unlock_irqrestore(&ioc->lock, flags);
113  cpu_relax();
114  spin_lock_irqsave_nested(&ioc->lock, flags, 1);
115  }
116  }
117 
118  spin_unlock_irqrestore(&ioc->lock, flags);
119 
120  kmem_cache_free(iocontext_cachep, ioc);
121 }
122 
130 void put_io_context(struct io_context *ioc)
131 {
132  unsigned long flags;
133  bool free_ioc = false;
134 
135  if (ioc == NULL)
136  return;
137 
138  BUG_ON(atomic_long_read(&ioc->refcount) <= 0);
139 
140  /*
141  * Releasing ioc requires reverse order double locking and we may
142  * already be holding a queue_lock. Do it asynchronously from wq.
143  */
144  if (atomic_long_dec_and_test(&ioc->refcount)) {
145  spin_lock_irqsave(&ioc->lock, flags);
146  if (!hlist_empty(&ioc->icq_list))
148  else
149  free_ioc = true;
150  spin_unlock_irqrestore(&ioc->lock, flags);
151  }
152 
153  if (free_ioc)
154  kmem_cache_free(iocontext_cachep, ioc);
155 }
157 
166 {
167  struct hlist_node *n;
168  unsigned long flags;
169  struct io_cq *icq;
170 
171  if (!atomic_dec_and_test(&ioc->active_ref)) {
172  put_io_context(ioc);
173  return;
174  }
175 
176  /*
177  * Need ioc lock to walk icq_list and q lock to exit icq. Perform
178  * reverse double locking. Read comment in ioc_release_fn() for
179  * explanation on the nested locking annotation.
180  */
181 retry:
182  spin_lock_irqsave_nested(&ioc->lock, flags, 1);
183  hlist_for_each_entry(icq, n, &ioc->icq_list, ioc_node) {
184  if (icq->flags & ICQ_EXITED)
185  continue;
186  if (spin_trylock(icq->q->queue_lock)) {
187  ioc_exit_icq(icq);
188  spin_unlock(icq->q->queue_lock);
189  } else {
190  spin_unlock_irqrestore(&ioc->lock, flags);
191  cpu_relax();
192  goto retry;
193  }
194  }
195  spin_unlock_irqrestore(&ioc->lock, flags);
196 
197  put_io_context(ioc);
198 }
199 
200 /* Called by the exiting task */
202 {
203  struct io_context *ioc;
204 
205  task_lock(task);
206  ioc = task->io_context;
207  task->io_context = NULL;
208  task_unlock(task);
209 
210  atomic_dec(&ioc->nr_tasks);
212 }
213 
221 {
222  lockdep_assert_held(q->queue_lock);
223 
224  while (!list_empty(&q->icq_list)) {
225  struct io_cq *icq = list_entry(q->icq_list.next,
226  struct io_cq, q_node);
227  struct io_context *ioc = icq->ioc;
228 
229  spin_lock(&ioc->lock);
230  ioc_destroy_icq(icq);
231  spin_unlock(&ioc->lock);
232  }
233 }
234 
235 int create_task_io_context(struct task_struct *task, gfp_t gfp_flags, int node)
236 {
237  struct io_context *ioc;
238  int ret;
239 
240  ioc = kmem_cache_alloc_node(iocontext_cachep, gfp_flags | __GFP_ZERO,
241  node);
242  if (unlikely(!ioc))
243  return -ENOMEM;
244 
245  /* initialize */
246  atomic_long_set(&ioc->refcount, 1);
247  atomic_set(&ioc->nr_tasks, 1);
248  atomic_set(&ioc->active_ref, 1);
249  spin_lock_init(&ioc->lock);
251  INIT_HLIST_HEAD(&ioc->icq_list);
252  INIT_WORK(&ioc->release_work, ioc_release_fn);
253 
254  /*
255  * Try to install. ioc shouldn't be installed if someone else
256  * already did or @task, which isn't %current, is exiting. Note
257  * that we need to allow ioc creation on exiting %current as exit
258  * path may issue IOs from e.g. exit_files(). The exit path is
259  * responsible for not issuing IO after exit_io_context().
260  */
261  task_lock(task);
262  if (!task->io_context &&
263  (task == current || !(task->flags & PF_EXITING)))
264  task->io_context = ioc;
265  else
266  kmem_cache_free(iocontext_cachep, ioc);
267 
268  ret = task->io_context ? 0 : -EBUSY;
269 
270  task_unlock(task);
271 
272  return ret;
273 }
274 
289  gfp_t gfp_flags, int node)
290 {
291  struct io_context *ioc;
292 
293  might_sleep_if(gfp_flags & __GFP_WAIT);
294 
295  do {
296  task_lock(task);
297  ioc = task->io_context;
298  if (likely(ioc)) {
299  get_io_context(ioc);
300  task_unlock(task);
301  return ioc;
302  }
303  task_unlock(task);
304  } while (!create_task_io_context(task, gfp_flags, node));
305 
306  return NULL;
307 }
309 
318 struct io_cq *ioc_lookup_icq(struct io_context *ioc, struct request_queue *q)
319 {
320  struct io_cq *icq;
321 
322  lockdep_assert_held(q->queue_lock);
323 
324  /*
325  * icq's are indexed from @ioc using radix tree and hint pointer,
326  * both of which are protected with RCU. All removals are done
327  * holding both q and ioc locks, and we're holding q lock - if we
328  * find a icq which points to us, it's guaranteed to be valid.
329  */
330  rcu_read_lock();
331  icq = rcu_dereference(ioc->icq_hint);
332  if (icq && icq->q == q)
333  goto out;
334 
335  icq = radix_tree_lookup(&ioc->icq_tree, q->id);
336  if (icq && icq->q == q)
337  rcu_assign_pointer(ioc->icq_hint, icq); /* allowed to race */
338  else
339  icq = NULL;
340 out:
341  rcu_read_unlock();
342  return icq;
343 }
345 
358 struct io_cq *ioc_create_icq(struct io_context *ioc, struct request_queue *q,
359  gfp_t gfp_mask)
360 {
361  struct elevator_type *et = q->elevator->type;
362  struct io_cq *icq;
363 
364  /* allocate stuff */
365  icq = kmem_cache_alloc_node(et->icq_cache, gfp_mask | __GFP_ZERO,
366  q->node);
367  if (!icq)
368  return NULL;
369 
370  if (radix_tree_preload(gfp_mask) < 0) {
371  kmem_cache_free(et->icq_cache, icq);
372  return NULL;
373  }
374 
375  icq->ioc = ioc;
376  icq->q = q;
377  INIT_LIST_HEAD(&icq->q_node);
378  INIT_HLIST_NODE(&icq->ioc_node);
379 
380  /* lock both q and ioc and try to link @icq */
381  spin_lock_irq(q->queue_lock);
382  spin_lock(&ioc->lock);
383 
384  if (likely(!radix_tree_insert(&ioc->icq_tree, q->id, icq))) {
385  hlist_add_head(&icq->ioc_node, &ioc->icq_list);
386  list_add(&icq->q_node, &q->icq_list);
387  if (et->ops.elevator_init_icq_fn)
388  et->ops.elevator_init_icq_fn(icq);
389  } else {
390  kmem_cache_free(et->icq_cache, icq);
391  icq = ioc_lookup_icq(ioc, q);
392  if (!icq)
393  printk(KERN_ERR "cfq: icq link failed!\n");
394  }
395 
396  spin_unlock(&ioc->lock);
397  spin_unlock_irq(q->queue_lock);
398  radix_tree_preload_end();
399  return icq;
400 }
401 
402 static int __init blk_ioc_init(void)
403 {
404  iocontext_cachep = kmem_cache_create("blkdev_ioc",
405  sizeof(struct io_context), 0, SLAB_PANIC, NULL);
406  return 0;
407 }
408 subsys_initcall(blk_ioc_init);