Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
blk-iopoll.c
Go to the documentation of this file.
1 /*
2  * Functions related to interrupt-poll handling in the block layer. This
3  * is similar to NAPI for network devices.
4  */
5 #include <linux/kernel.h>
6 #include <linux/module.h>
7 #include <linux/init.h>
8 #include <linux/bio.h>
9 #include <linux/blkdev.h>
10 #include <linux/interrupt.h>
11 #include <linux/cpu.h>
12 #include <linux/blk-iopoll.h>
13 #include <linux/delay.h>
14 
15 #include "blk.h"
16 
19 
20 static unsigned int blk_iopoll_budget __read_mostly = 256;
21 
22 static DEFINE_PER_CPU(struct list_head, blk_cpu_iopoll);
23 
33 void blk_iopoll_sched(struct blk_iopoll *iop)
34 {
35  unsigned long flags;
36 
37  local_irq_save(flags);
38  list_add_tail(&iop->list, &__get_cpu_var(blk_cpu_iopoll));
40  local_irq_restore(flags);
41 }
43 
53 {
54  list_del(&iop->list);
57 }
59 
70 void blk_iopoll_complete(struct blk_iopoll *iopoll)
71 {
72  unsigned long flags;
73 
74  local_irq_save(flags);
75  __blk_iopoll_complete(iopoll);
76  local_irq_restore(flags);
77 }
79 
80 static void blk_iopoll_softirq(struct softirq_action *h)
81 {
82  struct list_head *list = &__get_cpu_var(blk_cpu_iopoll);
83  int rearm = 0, budget = blk_iopoll_budget;
84  unsigned long start_time = jiffies;
85 
87 
88  while (!list_empty(list)) {
89  struct blk_iopoll *iop;
90  int work, weight;
91 
92  /*
93  * If softirq window is exhausted then punt.
94  */
95  if (budget <= 0 || time_after(jiffies, start_time)) {
96  rearm = 1;
97  break;
98  }
99 
101 
102  /* Even though interrupts have been re-enabled, this
103  * access is safe because interrupts can only add new
104  * entries to the tail of this list, and only ->poll()
105  * calls can remove this head entry from the list.
106  */
107  iop = list_entry(list->next, struct blk_iopoll, list);
108 
109  weight = iop->weight;
110  work = 0;
111  if (test_bit(IOPOLL_F_SCHED, &iop->state))
112  work = iop->poll(iop, weight);
113 
114  budget -= work;
115 
117 
118  /*
119  * Drivers must not modify the iopoll state, if they
120  * consume their assigned weight (or more, some drivers can't
121  * easily just stop processing, they have to complete an
122  * entire mask of commands).In such cases this code
123  * still "owns" the iopoll instance and therefore can
124  * move the instance around on the list at-will.
125  */
126  if (work >= weight) {
127  if (blk_iopoll_disable_pending(iop))
129  else
130  list_move_tail(&iop->list, list);
131  }
132  }
133 
134  if (rearm)
136 
138 }
139 
148 {
150  while (test_and_set_bit(IOPOLL_F_SCHED, &iop->state))
151  msleep(1);
153 }
155 
164 void blk_iopoll_enable(struct blk_iopoll *iop)
165 {
169 }
171 
182 void blk_iopoll_init(struct blk_iopoll *iop, int weight, blk_iopoll_fn *poll_fn)
183 {
184  memset(iop, 0, sizeof(*iop));
185  INIT_LIST_HEAD(&iop->list);
186  iop->weight = weight;
187  iop->poll = poll_fn;
188  set_bit(IOPOLL_F_SCHED, &iop->state);
189 }
191 
192 static int __cpuinit blk_iopoll_cpu_notify(struct notifier_block *self,
193  unsigned long action, void *hcpu)
194 {
195  /*
196  * If a CPU goes away, splice its entries to the current CPU
197  * and trigger a run of the softirq
198  */
199  if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
200  int cpu = (unsigned long) hcpu;
201 
203  list_splice_init(&per_cpu(blk_cpu_iopoll, cpu),
204  &__get_cpu_var(blk_cpu_iopoll));
207  }
208 
209  return NOTIFY_OK;
210 }
211 
212 static struct notifier_block __cpuinitdata blk_iopoll_cpu_notifier = {
213  .notifier_call = blk_iopoll_cpu_notify,
214 };
215 
216 static __init int blk_iopoll_setup(void)
217 {
218  int i;
219 
221  INIT_LIST_HEAD(&per_cpu(blk_cpu_iopoll, i));
222 
223  open_softirq(BLOCK_IOPOLL_SOFTIRQ, blk_iopoll_softirq);
224  register_hotcpu_notifier(&blk_iopoll_cpu_notifier);
225  return 0;
226 }
227 subsys_initcall(blk_iopoll_setup);