Linux Kernel
3.7.1
Main Page
Related Pages
Modules
Namespaces
Data Structures
Files
File List
Globals
All
Data Structures
Namespaces
Files
Functions
Variables
Typedefs
Enumerations
Enumerator
Macros
Groups
Pages
drivers
md
md.h
Go to the documentation of this file.
1
/*
2
md.h : kernel internal structure of the Linux MD driver
3
Copyright (C) 1996-98 Ingo Molnar, Gadi Oxman
4
5
This program is free software; you can redistribute it and/or modify
6
it under the terms of the GNU General Public License as published by
7
the Free Software Foundation; either version 2, or (at your option)
8
any later version.
9
10
You should have received a copy of the GNU General Public License
11
(for example /usr/src/linux/COPYING); if not, write to the Free
12
Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
13
*/
14
15
#ifndef _MD_MD_H
16
#define _MD_MD_H
17
18
#include <
linux/blkdev.h
>
19
#include <
linux/kobject.h
>
20
#include <linux/list.h>
21
#include <
linux/mm.h
>
22
#include <
linux/mutex.h
>
23
#include <
linux/timer.h
>
24
#include <linux/wait.h>
25
#include <
linux/workqueue.h
>
26
27
#define MaxSector (~(sector_t)0)
28
29
/* Bad block numbers are stored sorted in a single page.
30
* 64bits is used for each block or extent.
31
* 54 bits are sector number, 9 bits are extent size,
32
* 1 bit is an 'acknowledged' flag.
33
*/
34
#define MD_MAX_BADBLOCKS (PAGE_SIZE/8)
35
36
/*
37
* MD's 'extended' device
38
*/
39
struct
md_rdev
{
40
struct
list_head
same_set
;
/* RAID devices within the same set */
41
42
sector_t
sectors
;
/* Device size (in 512bytes sectors) */
43
struct
mddev
*
mddev
;
/* RAID array if running */
44
int
last_events
;
/* IO event timestamp */
45
46
/*
47
* If meta_bdev is non-NULL, it means that a separate device is
48
* being used to store the metadata (superblock/bitmap) which
49
* would otherwise be contained on the same device as the data (bdev).
50
*/
51
struct
block_device
*
meta_bdev
;
52
struct
block_device
*
bdev
;
/* block device handle */
53
54
struct
page
*
sb_page
, *
bb_page
;
55
int
sb_loaded
;
56
__u64
sb_events
;
57
sector_t
data_offset
;
/* start of data in array */
58
sector_t
new_data_offset
;
/* only relevant while reshaping */
59
sector_t
sb_start
;
/* offset of the super block (in 512byte sectors) */
60
int
sb_size
;
/* bytes in the superblock */
61
int
preferred_minor
;
/* autorun support */
62
63
struct
kobject
kobj
;
64
65
/* A device can be in one of three states based on two flags:
66
* Not working: faulty==1 in_sync==0
67
* Fully working: faulty==0 in_sync==1
68
* Working, but not
69
* in sync with array
70
* faulty==0 in_sync==0
71
*
72
* It can never have faulty==1, in_sync==1
73
* This reduces the burden of testing multiple flags in many cases
74
*/
75
76
unsigned
long
flags
;
/* bit set of 'enum flag_bits' bits. */
77
wait_queue_head_t
blocked_wait
;
78
79
int
desc_nr
;
/* descriptor index in the superblock */
80
int
raid_disk
;
/* role of device in array */
81
int
new_raid_disk
;
/* role that the device will have in
82
* the array after a level-change completes.
83
*/
84
int
saved_raid_disk
;
/* role that device used to have in the
85
* array and could again if we did a partial
86
* resync from the bitmap
87
*/
88
sector_t
recovery_offset
;
/* If this device has been partially
89
* recovered, this is where we were
90
* up to.
91
*/
92
93
atomic_t
nr_pending
;
/* number of pending requests.
94
* only maintained for arrays that
95
* support hot removal
96
*/
97
atomic_t
read_errors
;
/* number of consecutive read errors that
98
* we have tried to ignore.
99
*/
100
struct
timespec
last_read_error
;
/* monotonic time since our
101
* last read error
102
*/
103
atomic_t
corrected_errors
;
/* number of corrected read errors,
104
* for reporting to userspace and storing
105
* in superblock.
106
*/
107
struct
work_struct
del_work
;
/* used for delayed sysfs removal */
108
109
struct
sysfs_dirent
*
sysfs_state
;
/* handle for 'state'
110
* sysfs entry */
111
112
struct
badblocks
{
113
int
count
;
/* count of bad blocks */
114
int
unacked_exist
;
/* there probably are unacknowledged
115
* bad blocks. This is only cleared
116
* when a read discovers none
117
*/
118
int
shift
;
/* shift from sectors to block size
119
* a -ve shift means badblocks are
120
* disabled.*/
121
u64
*
page
;
/* badblock list */
122
int
changed
;
123
seqlock_t
lock
;
124
125
sector_t
sector
;
126
sector_t
size
;
/* in sectors */
127
}
badblocks
;
128
};
129
enum
flag_bits
{
130
Faulty
,
/* device is known to have a fault */
131
In_sync
,
/* device is in_sync with rest of array */
132
Unmerged
,
/* device is being added to array and should
133
* be considerred for bvec_merge_fn but not
134
* yet for actual IO
135
*/
136
WriteMostly
,
/* Avoid reading if at all possible */
137
AutoDetected
,
/* added by auto-detect */
138
Blocked
,
/* An error occurred but has not yet
139
* been acknowledged by the metadata
140
* handler, so don't allow writes
141
* until it is cleared */
142
WriteErrorSeen
,
/* A write error has been seen on this
143
* device
144
*/
145
FaultRecorded
,
/* Intermediate state for clearing
146
* Blocked. The Fault is/will-be
147
* recorded in the metadata, but that
148
* metadata hasn't been stored safely
149
* on disk yet.
150
*/
151
BlockedBadBlocks
,
/* A writer is blocked because they
152
* found an unacknowledged bad-block.
153
* This can safely be cleared at any
154
* time, and the writer will re-check.
155
* It may be set at any time, and at
156
* worst the writer will timeout and
157
* re-check. So setting it as
158
* accurately as possible is good, but
159
* not absolutely critical.
160
*/
161
WantReplacement
,
/* This device is a candidate to be
162
* hot-replaced, either because it has
163
* reported some faults, or because
164
* of explicit request.
165
*/
166
Replacement
,
/* This device is a replacement for
167
* a want_replacement device with same
168
* raid_disk number.
169
*/
170
};
171
172
#define BB_LEN_MASK (0x00000000000001FFULL)
173
#define BB_OFFSET_MASK (0x7FFFFFFFFFFFFE00ULL)
174
#define BB_ACK_MASK (0x8000000000000000ULL)
175
#define BB_MAX_LEN 512
176
#define BB_OFFSET(x) (((x) & BB_OFFSET_MASK) >> 9)
177
#define BB_LEN(x) (((x) & BB_LEN_MASK) + 1)
178
#define BB_ACK(x) (!!((x) & BB_ACK_MASK))
179
#define BB_MAKE(a, l, ack) (((a)<<9) | ((l)-1) | ((u64)(!!(ack)) << 63))
180
181
extern
int
md_is_badblock
(
struct
badblocks *
bb
,
sector_t
s
,
int
sectors
,
182
sector_t
*first_bad,
int
*bad_sectors);
183
static
inline
int
is_badblock(
struct
md_rdev
*
rdev
,
sector_t
s
,
int
sectors
,
184
sector_t
*first_bad,
int
*bad_sectors)
185
{
186
if
(
unlikely
(rdev->
badblocks
.count)) {
187
int
rv =
md_is_badblock
(&rdev->
badblocks
, rdev->
data_offset
+ s,
188
sectors,
189
first_bad, bad_sectors);
190
if
(rv)
191
*first_bad -= rdev->
data_offset
;
192
return
rv;
193
}
194
return
0;
195
}
196
extern
int
rdev_set_badblocks
(
struct
md_rdev
*rdev,
sector_t
s,
int
sectors,
197
int
is_new);
198
extern
int
rdev_clear_badblocks
(
struct
md_rdev
*rdev,
sector_t
s,
int
sectors,
199
int
is_new);
200
extern
void
md_ack_all_badblocks
(
struct
badblocks *
bb
);
201
202
struct
mddev
{
203
void
*
private
;
204
struct
md_personality
*
pers
;
205
dev_t
unit
;
206
int
md_minor
;
207
struct
list_head
disks
;
208
unsigned
long
flags
;
209
#define MD_CHANGE_DEVS 0
/* Some device status has changed */
210
#define MD_CHANGE_CLEAN 1
/* transition to or from 'clean' */
211
#define MD_CHANGE_PENDING 2
/* switch from 'clean' to 'active' in progress */
212
#define MD_ARRAY_FIRST_USE 3
/* First use of array, needs initialization */
213
214
int
suspended
;
215
atomic_t
active_io
;
216
int
ro
;
217
int
sysfs_active
;
/* set when sysfs deletes
218
* are happening, so run/
219
* takeover/stop are not safe
220
*/
221
int
ready
;
/* See when safe to pass
222
* IO requests down */
223
struct
gendisk
*
gendisk
;
224
225
struct
kobject
kobj
;
226
int
hold_active
;
227
#define UNTIL_IOCTL 1
228
#define UNTIL_STOP 2
229
230
/* Superblock information */
231
int
major_version
,
232
minor_version
,
233
patch_version
;
234
int
persistent
;
235
int
external
;
/* metadata is
236
* managed externally */
237
char
metadata_type
[17];
/* externally set*/
238
int
chunk_sectors
;
239
time_t
ctime
,
utime
;
240
int
level
,
layout
;
241
char
clevel
[16];
242
int
raid_disks
;
243
int
max_disks
;
244
sector_t
dev_sectors
;
/* used size of
245
* component devices */
246
sector_t
array_sectors
;
/* exported array size */
247
int
external_size
;
/* size managed
248
* externally */
249
__u64
events
;
250
/* If the last 'event' was simply a clean->dirty transition, and
251
* we didn't write it to the spares, then it is safe and simple
252
* to just decrement the event count on a dirty->clean transition.
253
* So we record that possibility here.
254
*/
255
int
can_decrease_events
;
256
257
char
uuid
[16];
258
259
/* If the array is being reshaped, we need to record the
260
* new shape and an indication of where we are up to.
261
* This is written to the superblock.
262
* If reshape_position is MaxSector, then no reshape is happening (yet).
263
*/
264
sector_t
reshape_position
;
265
int
delta_disks
,
new_level
,
new_layout
;
266
int
new_chunk_sectors
;
267
int
reshape_backwards
;
268
269
struct
md_thread
*
thread
;
/* management thread */
270
struct
md_thread
*
sync_thread
;
/* doing resync or reconstruct */
271
sector_t
curr_resync
;
/* last block scheduled */
272
/* As resync requests can complete out of order, we cannot easily track
273
* how much resync has been completed. So we occasionally pause until
274
* everything completes, then set curr_resync_completed to curr_resync.
275
* As such it may be well behind the real resync mark, but it is a value
276
* we are certain of.
277
*/
278
sector_t
curr_resync_completed
;
279
unsigned
long
resync_mark
;
/* a recent timestamp */
280
sector_t
resync_mark_cnt
;
/* blocks written at resync_mark */
281
sector_t
curr_mark_cnt
;
/* blocks scheduled now */
282
283
sector_t
resync_max_sectors
;
/* may be set by personality */
284
285
atomic64_t
resync_mismatches
;
/* count of sectors where
286
* parity/replica mismatch found
287
*/
288
289
/* allow user-space to request suspension of IO to regions of the array */
290
sector_t
suspend_lo
;
291
sector_t
suspend_hi
;
292
/* if zero, use the system-wide default */
293
int
sync_speed_min
;
294
int
sync_speed_max
;
295
296
/* resync even though the same disks are shared among md-devices */
297
int
parallel_resync
;
298
299
int
ok_start_degraded
;
300
/* recovery/resync flags
301
* NEEDED: we might need to start a resync/recover
302
* RUNNING: a thread is running, or about to be started
303
* SYNC: actually doing a resync, not a recovery
304
* RECOVER: doing recovery, or need to try it.
305
* INTR: resync needs to be aborted for some reason
306
* DONE: thread is done and is waiting to be reaped
307
* REQUEST: user-space has requested a sync (used with SYNC)
308
* CHECK: user-space request for check-only, no repair
309
* RESHAPE: A reshape is happening
310
*
311
* If neither SYNC or RESHAPE are set, then it is a recovery.
312
*/
313
#define MD_RECOVERY_RUNNING 0
314
#define MD_RECOVERY_SYNC 1
315
#define MD_RECOVERY_RECOVER 2
316
#define MD_RECOVERY_INTR 3
317
#define MD_RECOVERY_DONE 4
318
#define MD_RECOVERY_NEEDED 5
319
#define MD_RECOVERY_REQUESTED 6
320
#define MD_RECOVERY_CHECK 7
321
#define MD_RECOVERY_RESHAPE 8
322
#define MD_RECOVERY_FROZEN 9
323
324
unsigned
long
recovery
;
325
/* If a RAID personality determines that recovery (of a particular
326
* device) will fail due to a read error on the source device, it
327
* takes a copy of this number and does not attempt recovery again
328
* until this number changes.
329
*/
330
int
recovery_disabled
;
331
332
int
in_sync
;
/* know to not need resync */
333
/* 'open_mutex' avoids races between 'md_open' and 'do_md_stop', so
334
* that we are never stopping an array while it is open.
335
* 'reconfig_mutex' protects all other reconfiguration.
336
* These locks are separate due to conflicting interactions
337
* with bdev->bd_mutex.
338
* Lock ordering is:
339
* reconfig_mutex -> bd_mutex : e.g. do_md_run -> revalidate_disk
340
* bd_mutex -> open_mutex: e.g. __blkdev_get -> md_open
341
*/
342
struct
mutex
open_mutex
;
343
struct
mutex
reconfig_mutex
;
344
atomic_t
active
;
/* general refcount */
345
atomic_t
openers
;
/* number of active opens */
346
347
int
changed
;
/* True if we might need to
348
* reread partition info */
349
int
degraded
;
/* whether md should consider
350
* adding a spare
351
*/
352
int
merge_check_needed
;
/* at least one
353
* member device
354
* has a
355
* merge_bvec_fn */
356
357
atomic_t
recovery_active
;
/* blocks scheduled, but not written */
358
wait_queue_head_t
recovery_wait
;
359
sector_t
recovery_cp
;
360
sector_t
resync_min
;
/* user requested sync
361
* starts here */
362
sector_t
resync_max
;
/* resync should pause
363
* when it gets here */
364
365
struct
sysfs_dirent
*
sysfs_state
;
/* handle for 'array_state'
366
* file in sysfs.
367
*/
368
struct
sysfs_dirent
*
sysfs_action
;
/* handle for 'sync_action' */
369
370
struct
work_struct
del_work
;
/* used for delayed sysfs removal */
371
372
spinlock_t
write_lock
;
373
wait_queue_head_t
sb_wait
;
/* for waiting on superblock updates */
374
atomic_t
pending_writes
;
/* number of active superblock writes */
375
376
unsigned
int
safemode
;
/* if set, update "clean" superblock
377
* when no writes pending.
378
*/
379
unsigned
int
safemode_delay
;
380
struct
timer_list
safemode_timer
;
381
atomic_t
writes_pending
;
382
struct
request_queue
*
queue
;
/* for plugging ... */
383
384
struct
bitmap
*
bitmap
;
/* the bitmap for the device */
385
struct
{
386
struct
file
*
file
;
/* the bitmap file */
387
loff_t
offset
;
/* offset from superblock of
388
* start of bitmap. May be
389
* negative, but not '0'
390
* For external metadata, offset
391
* from start of device.
392
*/
393
unsigned
long
space
;
/* space available at this offset */
394
loff_t
default_offset
;
/* this is the offset to use when
395
* hot-adding a bitmap. It should
396
* eventually be settable by sysfs.
397
*/
398
unsigned
long
default_space
;
/* space available at
399
* default offset */
400
struct
mutex
mutex
;
401
unsigned
long
chunksize
;
402
unsigned
long
daemon_sleep
;
/* how many jiffies between updates? */
403
unsigned
long
max_write_behind
;
/* write-behind mode */
404
int
external
;
405
}
bitmap_info
;
406
407
atomic_t
max_corr_read_errors
;
/* max read retries */
408
struct
list_head
all_mddevs
;
409
410
struct
attribute_group
*
to_remove
;
411
412
struct
bio_set
*
bio_set
;
413
414
/* Generic flush handling.
415
* The last to finish preflush schedules a worker to submit
416
* the rest of the request (without the REQ_FLUSH flag).
417
*/
418
struct
bio *
flush_bio
;
419
atomic_t
flush_pending
;
420
struct
work_struct
flush_work
;
421
struct
work_struct
event_work
;
/* used by dm to report failure event */
422
void
(*
sync_super
)(
struct
mddev
*
mddev
,
struct
md_rdev
*
rdev
);
423
};
424
425
426
static
inline
void
rdev_dec_pending(
struct
md_rdev
*rdev,
struct
mddev
*
mddev
)
427
{
428
int
faulty =
test_bit
(
Faulty
, &rdev->
flags
);
429
if
(
atomic_dec_and_test
(&rdev->
nr_pending
) && faulty)
430
set_bit
(
MD_RECOVERY_NEEDED
, &mddev->
recovery
);
431
}
432
433
static
inline
void
md_sync_acct(
struct
block_device
*
bdev
,
unsigned
long
nr_sectors
)
434
{
435
atomic_add
(nr_sectors, &bdev->
bd_contains
->bd_disk->sync_io);
436
}
437
438
struct
md_personality
439
{
440
char
*
name
;
441
int
level
;
442
struct
list_head
list
;
443
struct
module
*
owner
;
444
void
(*
make_request
)(
struct
mddev
*
mddev
,
struct
bio *bio);
445
int
(*
run
)(
struct
mddev
*
mddev
);
446
int
(*
stop
)(
struct
mddev
*
mddev
);
447
void
(*
status
)(
struct
seq_file
*
seq
,
struct
mddev
*
mddev
);
448
/* error_handler must set ->faulty and clear ->in_sync
449
* if appropriate, and should abort recovery if needed
450
*/
451
void
(*
error_handler
)(
struct
mddev
*
mddev
,
struct
md_rdev
*
rdev
);
452
int
(*
hot_add_disk
) (
struct
mddev
*
mddev
,
struct
md_rdev
*
rdev
);
453
int
(*
hot_remove_disk
) (
struct
mddev
*
mddev
,
struct
md_rdev
*
rdev
);
454
int
(*
spare_active
) (
struct
mddev
*
mddev
);
455
sector_t
(*
sync_request
)(
struct
mddev
*
mddev
,
sector_t
sector_nr,
int
*skipped,
int
go_faster);
456
int
(*
resize
) (
struct
mddev
*
mddev
,
sector_t
sectors
);
457
sector_t
(*
size
) (
struct
mddev
*
mddev
,
sector_t
sectors
,
int
raid_disks
);
458
int
(*
check_reshape
) (
struct
mddev
*
mddev
);
459
int
(*
start_reshape
) (
struct
mddev
*
mddev
);
460
void
(*
finish_reshape
) (
struct
mddev
*
mddev
);
461
/* quiesce moves between quiescence states
462
* 0 - fully active
463
* 1 - no new requests allowed
464
* others - reserved
465
*/
466
void
(*
quiesce
) (
struct
mddev
*
mddev
,
int
state
);
467
/* takeover is used to transition an array from one
468
* personality to another. The new personality must be able
469
* to handle the data in the current layout.
470
* e.g. 2drive raid1 -> 2drive raid5
471
* ndrive raid5 -> degraded n+1drive raid6 with special layout
472
* If the takeover succeeds, a new 'private' structure is returned.
473
* This needs to be installed and then ->run used to activate the
474
* array.
475
*/
476
void
*(*takeover) (
struct
mddev
*
mddev
);
477
};
478
479
480
struct
md_sysfs_entry
{
481
struct
attribute
attr
;
482
ssize_t
(*
show
)(
struct
mddev
*,
char
*);
483
ssize_t
(*
store
)(
struct
mddev
*,
const
char
*,
size_t
);
484
};
485
extern
struct
attribute_group
md_bitmap_group
;
486
487
static
inline
struct
sysfs_dirent
*sysfs_get_dirent_safe(
struct
sysfs_dirent
*
sd
,
char
*
name
)
488
{
489
if
(sd)
490
return
sysfs_get_dirent
(sd,
NULL
, name);
491
return
sd;
492
}
493
static
inline
void
sysfs_notify_dirent_safe(
struct
sysfs_dirent
*sd)
494
{
495
if
(sd)
496
sysfs_notify_dirent
(sd);
497
}
498
499
static
inline
char
* mdname (
struct
mddev
*
mddev
)
500
{
501
return
mddev->
gendisk
? mddev->
gendisk
->disk_name :
"mdX"
;
502
}
503
504
static
inline
int
sysfs_link_rdev(
struct
mddev
*
mddev
,
struct
md_rdev
*rdev)
505
{
506
char
nm[20];
507
if
(!
test_bit
(
Replacement
, &rdev->
flags
)) {
508
sprintf
(nm,
"rd%d"
, rdev->
raid_disk
);
509
return
sysfs_create_link
(&mddev->
kobj
, &rdev->
kobj
, nm);
510
}
else
511
return
0;
512
}
513
514
static
inline
void
sysfs_unlink_rdev(
struct
mddev *mddev,
struct
md_rdev
*rdev)
515
{
516
char
nm[20];
517
if
(!
test_bit
(
Replacement
, &rdev->
flags
)) {
518
sprintf
(nm,
"rd%d"
, rdev->
raid_disk
);
519
sysfs_remove_link
(&mddev->
kobj
, nm);
520
}
521
}
522
523
/*
524
* iterates through some rdev ringlist. It's safe to remove the
525
* current 'rdev'. Dont touch 'tmp' though.
526
*/
527
#define rdev_for_each_list(rdev, tmp, head) \
528
list_for_each_entry_safe(rdev, tmp, head, same_set)
529
530
/*
531
* iterates through the 'same array disks' ringlist
532
*/
533
#define rdev_for_each(rdev, mddev) \
534
list_for_each_entry(rdev, &((mddev)->disks), same_set)
535
536
#define rdev_for_each_safe(rdev, tmp, mddev) \
537
list_for_each_entry_safe(rdev, tmp, &((mddev)->disks), same_set)
538
539
#define rdev_for_each_rcu(rdev, mddev) \
540
list_for_each_entry_rcu(rdev, &((mddev)->disks), same_set)
541
542
struct
md_thread
{
543
void
(*
run
) (
struct
md_thread
*
thread
);
544
struct
mddev *
mddev
;
545
wait_queue_head_t
wqueue
;
546
unsigned
long
flags
;
547
struct
task_struct
*
tsk
;
548
unsigned
long
timeout
;
549
void
*
private
;
550
};
551
552
#define THREAD_WAKEUP 0
553
554
#define __wait_event_lock_irq(wq, condition, lock, cmd) \
555
do { \
556
wait_queue_t __wait; \
557
init_waitqueue_entry(&__wait, current); \
558
\
559
add_wait_queue(&wq, &__wait); \
560
for (;;) { \
561
set_current_state(TASK_UNINTERRUPTIBLE); \
562
if (condition) \
563
break; \
564
spin_unlock_irq(&lock); \
565
cmd; \
566
schedule(); \
567
spin_lock_irq(&lock); \
568
} \
569
current->state = TASK_RUNNING; \
570
remove_wait_queue(&wq, &__wait); \
571
} while (0)
572
573
#define wait_event_lock_irq(wq, condition, lock, cmd) \
574
do { \
575
if (condition) \
576
break; \
577
__wait_event_lock_irq(wq, condition, lock, cmd); \
578
} while (0)
579
580
static
inline
void
safe_put_page(
struct
page
*
p
)
581
{
582
if
(p)
put_page
(p);
583
}
584
585
extern
int
register_md_personality
(
struct
md_personality
*
p
);
586
extern
int
unregister_md_personality
(
struct
md_personality
*
p
);
587
extern
struct
md_thread
*
md_register_thread
(
588
void
(*
run
)(
struct
md_thread
*
thread
),
589
struct
mddev *mddev,
590
const
char
*name);
591
extern
void
md_unregister_thread
(
struct
md_thread
**threadp);
592
extern
void
md_wakeup_thread
(
struct
md_thread
*
thread
);
593
extern
void
md_check_recovery
(
struct
mddev *mddev);
594
extern
void
md_write_start
(
struct
mddev *mddev,
struct
bio *
bi
);
595
extern
void
md_write_end
(
struct
mddev *mddev);
596
extern
void
md_done_sync
(
struct
mddev *mddev,
int
blocks,
int
ok);
597
extern
void
md_error
(
struct
mddev *mddev,
struct
md_rdev
*rdev);
598
extern
void
md_finish_reshape
(
struct
mddev *mddev);
599
600
extern
int
mddev_congested
(
struct
mddev *mddev,
int
bits
);
601
extern
void
md_flush_request
(
struct
mddev *mddev,
struct
bio *bio);
602
extern
void
md_super_write
(
struct
mddev *mddev,
struct
md_rdev
*rdev,
603
sector_t
sector
,
int
size
,
struct
page
*
page
);
604
extern
void
md_super_wait
(
struct
mddev *mddev);
605
extern
int
sync_page_io
(
struct
md_rdev
*rdev,
sector_t
sector
,
int
size
,
606
struct
page
*
page
,
int
rw
,
bool
metadata_op);
607
extern
void
md_do_sync
(
struct
md_thread
*
thread
);
608
extern
void
md_new_event
(
struct
mddev *mddev);
609
extern
int
md_allow_write
(
struct
mddev *mddev);
610
extern
void
md_wait_for_blocked_rdev
(
struct
md_rdev
*rdev,
struct
mddev *mddev);
611
extern
void
md_set_array_sectors
(
struct
mddev *mddev,
sector_t
array_sectors);
612
extern
int
md_check_no_bitmap
(
struct
mddev *mddev);
613
extern
int
md_integrity_register
(
struct
mddev *mddev);
614
extern
void
md_integrity_add_rdev
(
struct
md_rdev
*rdev,
struct
mddev *mddev);
615
extern
int
strict_strtoul_scaled
(
const
char
*
cp
,
unsigned
long
*
res
,
int
scale);
616
extern
void
restore_bitmap_write_access
(
struct
file
*
file
);
617
618
extern
void
mddev_init
(
struct
mddev *mddev);
619
extern
int
md_run
(
struct
mddev *mddev);
620
extern
void
md_stop
(
struct
mddev *mddev);
621
extern
void
md_stop_writes
(
struct
mddev *mddev);
622
extern
int
md_rdev_init
(
struct
md_rdev
*rdev);
623
extern
void
md_rdev_clear
(
struct
md_rdev
*rdev);
624
625
extern
void
mddev_suspend
(
struct
mddev *mddev);
626
extern
void
mddev_resume
(
struct
mddev *mddev);
627
extern
struct
bio *
bio_clone_mddev
(
struct
bio *bio,
gfp_t
gfp_mask
,
628
struct
mddev *mddev);
629
extern
struct
bio *
bio_alloc_mddev
(
gfp_t
gfp_mask
,
int
nr_iovecs,
630
struct
mddev *mddev);
631
extern
void
md_trim_bio
(
struct
bio *bio,
int
offset
,
int
size
);
632
633
extern
void
md_unplug
(
struct
blk_plug_cb *
cb
,
bool
from_schedule);
634
static
inline
int
mddev_check_plugged(
struct
mddev *mddev)
635
{
636
return
!!
blk_check_plugged
(
md_unplug
, mddev,
637
sizeof
(
struct
blk_plug_cb));
638
}
639
#endif
/* _MD_MD_H */
Generated on Thu Jan 10 2013 13:44:12 for Linux Kernel by
1.8.2