Linux Kernel
3.7.1
Main Page
Related Pages
Modules
Namespaces
Data Structures
Files
File List
Globals
All
Data Structures
Namespaces
Files
Functions
Variables
Typedefs
Enumerations
Enumerator
Macros
Groups
Pages
block
blk-iopoll.c
Go to the documentation of this file.
1
/*
2
* Functions related to interrupt-poll handling in the block layer. This
3
* is similar to NAPI for network devices.
4
*/
5
#include <linux/kernel.h>
6
#include <linux/module.h>
7
#include <
linux/init.h
>
8
#include <
linux/bio.h
>
9
#include <
linux/blkdev.h
>
10
#include <
linux/interrupt.h
>
11
#include <
linux/cpu.h
>
12
#include <
linux/blk-iopoll.h
>
13
#include <
linux/delay.h
>
14
15
#include "
blk.h
"
16
17
int
blk_iopoll_enabled
= 1;
18
EXPORT_SYMBOL
(
blk_iopoll_enabled
);
19
20
static
unsigned
int
blk_iopoll_budget
__read_mostly
= 256;
21
22
static
DEFINE_PER_CPU
(
struct
list_head
, blk_cpu_iopoll);
23
33
void
blk_iopoll_sched
(
struct
blk_iopoll
*iop)
34
{
35
unsigned
long
flags
;
36
37
local_irq_save
(flags);
38
list_add_tail
(&iop->
list
, &
__get_cpu_var
(blk_cpu_iopoll));
39
__raise_softirq_irqoff
(
BLOCK_IOPOLL_SOFTIRQ
);
40
local_irq_restore
(flags);
41
}
42
EXPORT_SYMBOL
(
blk_iopoll_sched
);
43
52
void
__blk_iopoll_complete
(
struct
blk_iopoll
*iop)
53
{
54
list_del
(&iop->
list
);
55
smp_mb__before_clear_bit
();
56
clear_bit_unlock
(
IOPOLL_F_SCHED
, &iop->
state
);
57
}
58
EXPORT_SYMBOL
(
__blk_iopoll_complete
);
59
70
void
blk_iopoll_complete
(
struct
blk_iopoll
*iopoll)
71
{
72
unsigned
long
flags
;
73
74
local_irq_save
(flags);
75
__blk_iopoll_complete
(iopoll);
76
local_irq_restore
(flags);
77
}
78
EXPORT_SYMBOL
(
blk_iopoll_complete
);
79
80
static
void
blk_iopoll_softirq(
struct
softirq_action
*
h
)
81
{
82
struct
list_head
*
list
= &
__get_cpu_var
(blk_cpu_iopoll);
83
int
rearm = 0,
budget
= blk_iopoll_budget;
84
unsigned
long
start_time
=
jiffies
;
85
86
local_irq_disable
();
87
88
while
(!list_empty(list)) {
89
struct
blk_iopoll
*iop;
90
int
work
,
weight
;
91
92
/*
93
* If softirq window is exhausted then punt.
94
*/
95
if
(
budget
<= 0 ||
time_after
(jiffies, start_time)) {
96
rearm = 1;
97
break
;
98
}
99
100
local_irq_enable
();
101
102
/* Even though interrupts have been re-enabled, this
103
* access is safe because interrupts can only add new
104
* entries to the tail of this list, and only ->poll()
105
* calls can remove this head entry from the list.
106
*/
107
iop =
list_entry
(list->
next
,
struct
blk_iopoll
, list);
108
109
weight = iop->
weight
;
110
work = 0;
111
if
(
test_bit
(
IOPOLL_F_SCHED
, &iop->
state
))
112
work = iop->
poll
(iop, weight);
113
114
budget
-=
work
;
115
116
local_irq_disable
();
117
118
/*
119
* Drivers must not modify the iopoll state, if they
120
* consume their assigned weight (or more, some drivers can't
121
* easily just stop processing, they have to complete an
122
* entire mask of commands).In such cases this code
123
* still "owns" the iopoll instance and therefore can
124
* move the instance around on the list at-will.
125
*/
126
if
(work >= weight) {
127
if
(blk_iopoll_disable_pending(iop))
128
__blk_iopoll_complete
(iop);
129
else
130
list_move_tail(&iop->
list
, list);
131
}
132
}
133
134
if
(rearm)
135
__raise_softirq_irqoff
(
BLOCK_IOPOLL_SOFTIRQ
);
136
137
local_irq_enable
();
138
}
139
147
void
blk_iopoll_disable
(
struct
blk_iopoll
*iop)
148
{
149
set_bit
(
IOPOLL_F_DISABLE
, &iop->
state
);
150
while
(
test_and_set_bit
(
IOPOLL_F_SCHED
, &iop->
state
))
151
msleep
(1);
152
clear_bit
(
IOPOLL_F_DISABLE
, &iop->
state
);
153
}
154
EXPORT_SYMBOL
(
blk_iopoll_disable
);
155
164
void
blk_iopoll_enable
(
struct
blk_iopoll
*iop)
165
{
166
BUG_ON
(!
test_bit
(
IOPOLL_F_SCHED
, &iop->
state
));
167
smp_mb__before_clear_bit
();
168
clear_bit_unlock
(
IOPOLL_F_SCHED
, &iop->
state
);
169
}
170
EXPORT_SYMBOL
(
blk_iopoll_enable
);
171
182
void
blk_iopoll_init
(
struct
blk_iopoll
*iop,
int
weight,
blk_iopoll_fn
*poll_fn)
183
{
184
memset
(iop, 0,
sizeof
(*iop));
185
INIT_LIST_HEAD(&iop->
list
);
186
iop->
weight
=
weight
;
187
iop->
poll
= poll_fn;
188
set_bit
(
IOPOLL_F_SCHED
, &iop->
state
);
189
}
190
EXPORT_SYMBOL
(
blk_iopoll_init
);
191
192
static
int
__cpuinit
blk_iopoll_cpu_notify(
struct
notifier_block
*
self
,
193
unsigned
long
action
,
void
*hcpu)
194
{
195
/*
196
* If a CPU goes away, splice its entries to the current CPU
197
* and trigger a run of the softirq
198
*/
199
if
(action ==
CPU_DEAD
|| action ==
CPU_DEAD_FROZEN
) {
200
int
cpu
= (
unsigned
long
) hcpu;
201
202
local_irq_disable
();
203
list_splice_init(&
per_cpu
(blk_cpu_iopoll, cpu),
204
&
__get_cpu_var
(blk_cpu_iopoll));
205
__raise_softirq_irqoff
(
BLOCK_IOPOLL_SOFTIRQ
);
206
local_irq_enable
();
207
}
208
209
return
NOTIFY_OK;
210
}
211
212
static
struct
notifier_block
__cpuinitdata
blk_iopoll_cpu_notifier = {
213
.notifier_call = blk_iopoll_cpu_notify,
214
};
215
216
static
__init
int
blk_iopoll_setup(
void
)
217
{
218
int
i
;
219
220
for_each_possible_cpu
(i)
221
INIT_LIST_HEAD(&
per_cpu
(blk_cpu_iopoll, i));
222
223
open_softirq
(
BLOCK_IOPOLL_SOFTIRQ
, blk_iopoll_softirq);
224
register_hotcpu_notifier
(&blk_iopoll_cpu_notifier);
225
return
0;
226
}
227
subsys_initcall
(blk_iopoll_setup);
Generated on Thu Jan 10 2013 13:22:21 for Linux Kernel by
1.8.2