Linux Kernel
3.7.1
Main Page
Related Pages
Modules
Namespaces
Data Structures
Files
File List
Globals
All
Data Structures
Namespaces
Files
Functions
Variables
Typedefs
Enumerations
Enumerator
Macros
Groups
Pages
fs
gfs2
incore.h
Go to the documentation of this file.
1
/*
2
* Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3
* Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
4
*
5
* This copyrighted material is made available to anyone wishing to use,
6
* modify, copy, or redistribute it subject to the terms and conditions
7
* of the GNU General Public License version 2.
8
*/
9
10
#ifndef __INCORE_DOT_H__
11
#define __INCORE_DOT_H__
12
13
#include <linux/fs.h>
14
#include <
linux/kobject.h
>
15
#include <
linux/workqueue.h
>
16
#include <linux/dlm.h>
17
#include <
linux/buffer_head.h
>
18
#include <
linux/rcupdate.h
>
19
#include <
linux/rculist_bl.h
>
20
#include <
linux/completion.h
>
21
#include <linux/rbtree.h>
22
#include <
linux/ktime.h
>
23
#include <
linux/percpu.h
>
24
25
#define DIO_WAIT 0x00000010
26
#define DIO_METADATA 0x00000020
27
28
struct
gfs2_log_operations
;
29
struct
gfs2_bufdata
;
30
struct
gfs2_holder
;
31
struct
gfs2_glock
;
32
struct
gfs2_quota_data
;
33
struct
gfs2_trans
;
34
struct
gfs2_ail
;
35
struct
gfs2_jdesc
;
36
struct
gfs2_sbd
;
37
struct
lm_lockops
;
38
39
typedef
void
(*
gfs2_glop_bh_t
) (
struct
gfs2_glock
*gl,
unsigned
int
ret
);
40
41
struct
gfs2_log_header_host
{
42
u64
lh_sequence
;
/* Sequence number of this transaction */
43
u32
lh_flags
;
/* GFS2_LOG_HEAD_... */
44
u32
lh_tail
;
/* Block number of log tail */
45
u32
lh_blkno
;
46
u32
lh_hash
;
47
};
48
49
/*
50
* Structure of operations that are associated with each
51
* type of element in the log.
52
*/
53
54
struct
gfs2_log_operations
{
55
void
(*
lo_add
) (
struct
gfs2_sbd
*sdp,
struct
gfs2_bufdata
*bd);
56
void
(*
lo_before_commit
) (
struct
gfs2_sbd
*sdp);
57
void
(*
lo_after_commit
) (
struct
gfs2_sbd
*sdp,
struct
gfs2_ail
*ai);
58
void
(*
lo_before_scan
) (
struct
gfs2_jdesc
*jd,
59
struct
gfs2_log_header_host
*
head
,
int
pass);
60
int
(*
lo_scan_elements
) (
struct
gfs2_jdesc
*jd,
unsigned
int
start
,
61
struct
gfs2_log_descriptor
*
ld
,
__be64
*
ptr
,
62
int
pass);
63
void
(*
lo_after_scan
) (
struct
gfs2_jdesc
*jd,
int
error
,
int
pass);
64
const
char
*
lo_name
;
65
};
66
67
#define GBF_FULL 1
68
69
struct
gfs2_bitmap
{
70
struct
buffer_head *
bi_bh
;
71
char
*
bi_clone
;
72
unsigned
long
bi_flags
;
73
u32
bi_offset
;
74
u32
bi_start
;
75
u32
bi_len
;
76
};
77
78
struct
gfs2_rgrpd
{
79
struct
rb_node
rd_node
;
/* Link with superblock */
80
struct
gfs2_glock
*
rd_gl
;
/* Glock for this rgrp */
81
u64
rd_addr
;
/* grp block disk address */
82
u64
rd_data0
;
/* first data location */
83
u32
rd_length
;
/* length of rgrp header in fs blocks */
84
u32
rd_data
;
/* num of data blocks in rgrp */
85
u32
rd_bitbytes
;
/* number of bytes in data bitmaps */
86
u32
rd_free
;
87
u32
rd_reserved
;
/* number of blocks reserved */
88
u32
rd_free_clone
;
89
u32
rd_dinodes
;
90
u64
rd_igeneration
;
91
struct
gfs2_bitmap
*
rd_bits
;
92
struct
gfs2_sbd
*
rd_sbd
;
93
struct
gfs2_rgrp_lvb
*
rd_rgl
;
94
u32
rd_last_alloc
;
95
u32
rd_flags
;
96
#define GFS2_RDF_CHECK 0x10000000
/* check for unlinked inodes */
97
#define GFS2_RDF_UPTODATE 0x20000000
/* rg is up to date */
98
#define GFS2_RDF_ERROR 0x40000000
/* error in rg */
99
#define GFS2_RDF_MASK 0xf0000000
/* mask for internal flags */
100
spinlock_t
rd_rsspin
;
/* protects reservation related vars */
101
struct
rb_root
rd_rstree
;
/* multi-block reservation tree */
102
};
103
104
struct
gfs2_rbm
{
105
struct
gfs2_rgrpd
*
rgd
;
106
struct
gfs2_bitmap
*
bi
;
/* Bitmap must belong to the rgd */
107
u32
offset
;
/* The offset is bitmap relative */
108
};
109
110
static
inline
u64
gfs2_rbm_to_block(
const
struct
gfs2_rbm
*rbm)
111
{
112
return
rbm->
rgd
->rd_data0 + (rbm->
bi
->bi_start *
GFS2_NBBY
) + rbm->
offset
;
113
}
114
115
static
inline
bool
gfs2_rbm_eq(
const
struct
gfs2_rbm
*rbm1,
116
const
struct
gfs2_rbm
*rbm2)
117
{
118
return
(rbm1->
rgd
== rbm2->
rgd
) && (rbm1->
bi
== rbm2->
bi
) &&
119
(rbm1->
offset
== rbm2->
offset
);
120
}
121
122
enum
gfs2_state_bits
{
123
BH_Pinned
= BH_PrivateStart,
124
BH_Escaped
= BH_PrivateStart + 1,
125
BH_Zeronew
= BH_PrivateStart + 2,
126
};
127
128
BUFFER_FNS
(Pinned, pinned)
129
TAS_BUFFER_FNS
(Pinned, pinned)
130
BUFFER_FNS
(Escaped, escaped)
131
TAS_BUFFER_FNS
(Escaped, escaped)
132
BUFFER_FNS
(Zeronew, zeronew)
133
TAS_BUFFER_FNS
(Zeronew, zeronew)
134
135
struct
gfs2_bufdata
{
136
struct
buffer_head *
bd_bh
;
137
struct
gfs2_glock
*
bd_gl
;
138
u64
bd_blkno
;
139
140
struct
list_head
bd_list;
141
const
struct
gfs2_log_operations
*
bd_ops
;
142
143
struct
gfs2_ail
*
bd_ail
;
144
struct
list_head
bd_ail_st_list;
145
struct
list_head
bd_ail_gl_list;
146
};
147
148
/*
149
* Internally, we prefix things with gdlm_ and GDLM_ (for gfs-dlm) since a
150
* prefix of lock_dlm_ gets awkward.
151
*/
152
153
#define GDLM_STRNAME_BYTES 25
154
#define GDLM_LVB_SIZE 32
155
156
/*
157
* ls_recover_flags:
158
*
159
* DFL_BLOCK_LOCKS: dlm is in recovery and will grant locks that had been
160
* held by failed nodes whose journals need recovery. Those locks should
161
* only be used for journal recovery until the journal recovery is done.
162
* This is set by the dlm recover_prep callback and cleared by the
163
* gfs2_control thread when journal recovery is complete. To avoid
164
* races between recover_prep setting and gfs2_control clearing, recover_spin
165
* is held while changing this bit and reading/writing recover_block
166
* and recover_start.
167
*
168
* DFL_NO_DLM_OPS: dlm lockspace ops/callbacks are not being used.
169
*
170
* DFL_FIRST_MOUNT: this node is the first to mount this fs and is doing
171
* recovery of all journals before allowing other nodes to mount the fs.
172
* This is cleared when FIRST_MOUNT_DONE is set.
173
*
174
* DFL_FIRST_MOUNT_DONE: this node was the first mounter, and has finished
175
* recovery of all journals, and now allows other nodes to mount the fs.
176
*
177
* DFL_MOUNT_DONE: gdlm_mount has completed successfully and cleared
178
* BLOCK_LOCKS for the first time. The gfs2_control thread should now
179
* control clearing BLOCK_LOCKS for further recoveries.
180
*
181
* DFL_UNMOUNT: gdlm_unmount sets to keep sdp off gfs2_control_wq.
182
*
183
* DFL_DLM_RECOVERY: set while dlm is in recovery, between recover_prep()
184
* and recover_done(), i.e. set while recover_block == recover_start.
185
*/
186
187
enum
{
188
DFL_BLOCK_LOCKS
= 0,
189
DFL_NO_DLM_OPS
= 1,
190
DFL_FIRST_MOUNT
= 2,
191
DFL_FIRST_MOUNT_DONE
= 3,
192
DFL_MOUNT_DONE
= 4,
193
DFL_UNMOUNT
= 5,
194
DFL_DLM_RECOVERY
= 6,
195
};
196
197
struct
lm_lockname
{
198
u64
ln_number
;
199
unsigned
int
ln_type
;
200
};
201
202
#define lm_name_equal(name1, name2) \
203
(((name1)->ln_number == (name2)->ln_number) && \
204
((name1)->ln_type == (name2)->ln_type))
205
206
207
struct
gfs2_glock_operations
{
208
void
(*
go_xmote_th
) (
struct
gfs2_glock
*gl);
209
int
(*
go_xmote_bh
) (
struct
gfs2_glock
*gl,
struct
gfs2_holder
*gh);
210
void
(*
go_inval
) (
struct
gfs2_glock
*gl,
int
flags
);
211
int
(*
go_demote_ok
) (
const
struct
gfs2_glock
*gl);
212
int
(*
go_lock
) (
struct
gfs2_holder
*gh);
213
void
(*
go_unlock
) (
struct
gfs2_holder
*gh);
214
int
(*
go_dump
)(
struct
seq_file
*
seq
,
const
struct
gfs2_glock
*gl);
215
void
(*
go_callback
) (
struct
gfs2_glock
*gl);
216
const
int
go_type
;
217
const
unsigned
long
go_flags
;
218
#define GLOF_ASPACE 1
219
};
220
221
enum
{
222
GFS2_LKS_SRTT
= 0,
/* Non blocking smoothed round trip time */
223
GFS2_LKS_SRTTVAR
= 1,
/* Non blocking smoothed variance */
224
GFS2_LKS_SRTTB
= 2,
/* Blocking smoothed round trip time */
225
GFS2_LKS_SRTTVARB
= 3,
/* Blocking smoothed variance */
226
GFS2_LKS_SIRT
= 4,
/* Smoothed Inter-request time */
227
GFS2_LKS_SIRTVAR
= 5,
/* Smoothed Inter-request variance */
228
GFS2_LKS_DCOUNT
= 6,
/* Count of dlm requests */
229
GFS2_LKS_QCOUNT
= 7,
/* Count of gfs2_holder queues */
230
GFS2_NR_LKSTATS
231
};
232
233
struct
gfs2_lkstats
{
234
s64
stats
[
GFS2_NR_LKSTATS
];
235
};
236
237
enum
{
238
/* States */
239
HIF_HOLDER
= 6,
/* Set for gh that "holds" the glock */
240
HIF_FIRST
= 7,
241
HIF_WAIT
= 10,
242
};
243
244
struct
gfs2_holder
{
245
struct
list_head
gh_list
;
246
247
struct
gfs2_glock
*
gh_gl
;
248
struct
pid
*
gh_owner_pid
;
249
unsigned
int
gh_state
;
250
unsigned
gh_flags
;
251
252
int
gh_error
;
253
unsigned
long
gh_iflags
;
/* HIF_... */
254
unsigned
long
gh_ip
;
255
};
256
257
/* Resource group multi-block reservation, in order of appearance:
258
259
Step 1. Function prepares to write, allocates a mb, sets the size hint.
260
Step 2. User calls inplace_reserve to target an rgrp, sets the rgrp info
261
Step 3. Function get_local_rgrp locks the rgrp, determines which bits to use
262
Step 4. Bits are assigned from the rgrp based on either the reservation
263
or wherever it can.
264
*/
265
266
struct
gfs2_blkreserv
{
267
/* components used during write (step 1): */
268
atomic_t
rs_sizehint
;
/* hint of the write size */
269
270
struct
gfs2_holder
rs_rgd_gh
;
/* Filled in by get_local_rgrp */
271
struct
rb_node
rs_node
;
/* link to other block reservations */
272
struct
gfs2_rbm
rs_rbm
;
/* Start of reservation */
273
u32
rs_free
;
/* how many blocks are still free */
274
u64
rs_inum
;
/* Inode number for reservation */
275
276
/* ancillary quota stuff */
277
struct
gfs2_quota_data
*
rs_qa_qd
[2 *
MAXQUOTAS
];
278
struct
gfs2_holder
rs_qa_qd_ghs
[2 *
MAXQUOTAS
];
279
unsigned
int
rs_qa_qd_num
;
280
};
281
282
enum
{
283
GLF_LOCK
= 1,
284
GLF_DEMOTE
= 3,
285
GLF_PENDING_DEMOTE
= 4,
286
GLF_DEMOTE_IN_PROGRESS
= 5,
287
GLF_DIRTY
= 6,
288
GLF_LFLUSH
= 7,
289
GLF_INVALIDATE_IN_PROGRESS
= 8,
290
GLF_REPLY_PENDING
= 9,
291
GLF_INITIAL
= 10,
292
GLF_FROZEN
= 11,
293
GLF_QUEUED
= 12,
294
GLF_LRU
= 13,
295
GLF_OBJECT
= 14,
/* Used only for tracing */
296
GLF_BLOCKING
= 15,
297
};
298
299
struct
gfs2_glock
{
300
struct
hlist_bl_node
gl_list
;
301
struct
gfs2_sbd
*
gl_sbd
;
302
unsigned
long
gl_flags
;
/* GLF_... */
303
struct
lm_lockname
gl_name
;
304
atomic_t
gl_ref
;
305
306
spinlock_t
gl_spin
;
307
308
/* State fields protected by gl_spin */
309
unsigned
int
gl_state
:2,
/* Current state */
310
gl_target
:2,
/* Target state */
311
gl_demote_state
:2,
/* State requested by remote node */
312
gl_req
:2,
/* State in last dlm request */
313
gl_reply
:8;
/* Last reply from the dlm */
314
315
unsigned
int
gl_hash
;
316
unsigned
long
gl_demote_time
;
/* time of first demote request */
317
long
gl_hold_time
;
318
struct
list_head
gl_holders
;
319
320
const
struct
gfs2_glock_operations
*
gl_ops
;
321
ktime_t
gl_dstamp
;
322
struct
gfs2_lkstats
gl_stats
;
323
struct
dlm_lksb
gl_lksb
;
324
char
gl_lvb
[32];
325
unsigned
long
gl_tchange
;
326
void
*
gl_object
;
327
328
struct
list_head
gl_lru
;
329
struct
list_head
gl_ail_list
;
330
atomic_t
gl_ail_count
;
331
atomic_t
gl_revokes
;
332
struct
delayed_work
gl_work
;
333
struct
work_struct
gl_delete
;
334
struct
rcu_head
gl_rcu
;
335
};
336
337
#define GFS2_MIN_LVB_SIZE 32
/* Min size of LVB that gfs2 supports */
338
339
enum
{
340
GIF_INVALID
= 0,
341
GIF_QD_LOCKED
= 1,
342
GIF_ALLOC_FAILED
= 2,
343
GIF_SW_PAGED
= 3,
344
};
345
346
struct
gfs2_inode
{
347
struct
inode
i_inode
;
348
u64
i_no_addr
;
349
u64
i_no_formal_ino
;
350
u64
i_generation
;
351
u64
i_eattr
;
352
unsigned
long
i_flags
;
/* GIF_... */
353
struct
gfs2_glock
*
i_gl
;
/* Move into i_gh? */
354
struct
gfs2_holder
i_iopen_gh
;
355
struct
gfs2_holder
i_gh
;
/* for prepare/commit_write only */
356
struct
gfs2_blkreserv
*
i_res
;
/* rgrp multi-block reservation */
357
struct
gfs2_rgrpd
*
i_rgd
;
358
u64
i_goal
;
/* goal block for allocations */
359
struct
rw_semaphore
i_rw_mutex
;
360
struct
list_head
i_trunc_list
;
361
__be64
*
i_hash_cache
;
362
u32
i_entries
;
363
u32
i_diskflags
;
364
u8
i_height
;
365
u8
i_depth
;
366
};
367
368
/*
369
* Since i_inode is the first element of struct gfs2_inode,
370
* this is effectively a cast.
371
*/
372
static
inline
struct
gfs2_inode
*GFS2_I(
struct
inode
*
inode
)
373
{
374
return
container_of
(inode,
struct
gfs2_inode
,
i_inode
);
375
}
376
377
static
inline
struct
gfs2_sbd
*GFS2_SB(
const
struct
inode
*
inode
)
378
{
379
return
inode->
i_sb
->s_fs_info;
380
}
381
382
struct
gfs2_file
{
383
struct
mutex
f_fl_mutex
;
384
struct
gfs2_holder
f_fl_gh
;
385
};
386
387
struct
gfs2_revoke_replay
{
388
struct
list_head
rr_list
;
389
u64
rr_blkno
;
390
unsigned
int
rr_where
;
391
};
392
393
enum
{
394
QDF_USER
= 0,
395
QDF_CHANGE
= 1,
396
QDF_LOCKED
= 2,
397
QDF_REFRESH
= 3,
398
};
399
400
struct
gfs2_quota_data
{
401
struct
list_head
qd_list
;
402
struct
list_head
qd_reclaim
;
403
404
atomic_t
qd_count
;
405
406
u32
qd_id
;
407
unsigned
long
qd_flags
;
/* QDF_... */
408
409
s64
qd_change
;
410
s64
qd_change_sync
;
411
412
unsigned
int
qd_slot
;
413
unsigned
int
qd_slot_count
;
414
415
struct
buffer_head *
qd_bh
;
416
struct
gfs2_quota_change
*
qd_bh_qc
;
417
unsigned
int
qd_bh_count
;
418
419
struct
gfs2_glock
*
qd_gl
;
420
struct
gfs2_quota_lvb
qd_qb
;
421
422
u64
qd_sync_gen
;
423
unsigned
long
qd_last_warn
;
424
};
425
426
struct
gfs2_trans
{
427
unsigned
long
tr_ip
;
428
429
unsigned
int
tr_blocks
;
430
unsigned
int
tr_revokes
;
431
unsigned
int
tr_reserved
;
432
433
struct
gfs2_holder
tr_t_gh
;
434
435
int
tr_touched
;
436
437
unsigned
int
tr_num_buf_new
;
438
unsigned
int
tr_num_databuf_new
;
439
unsigned
int
tr_num_buf_rm
;
440
unsigned
int
tr_num_databuf_rm
;
441
unsigned
int
tr_num_revoke
;
442
unsigned
int
tr_num_revoke_rm
;
443
};
444
445
struct
gfs2_ail
{
446
struct
list_head
ai_list
;
447
448
unsigned
int
ai_first
;
449
struct
list_head
ai_ail1_list
;
450
struct
list_head
ai_ail2_list
;
451
};
452
453
struct
gfs2_journal_extent
{
454
struct
list_head
extent_list
;
455
456
unsigned
int
lblock
;
/* First logical block */
457
u64
dblock
;
/* First disk block */
458
u64
blocks
;
459
};
460
461
struct
gfs2_jdesc
{
462
struct
list_head
jd_list
;
463
struct
list_head
extent_list
;
464
struct
work_struct
jd_work
;
465
struct
inode
*
jd_inode
;
466
unsigned
long
jd_flags
;
467
#define JDF_RECOVERY 1
468
unsigned
int
jd_jid
;
469
unsigned
int
jd_blocks
;
470
int
jd_recover_error
;
471
};
472
473
struct
gfs2_statfs_change_host
{
474
s64
sc_total
;
475
s64
sc_free
;
476
s64
sc_dinodes
;
477
};
478
479
#define GFS2_QUOTA_DEFAULT GFS2_QUOTA_OFF
480
#define GFS2_QUOTA_OFF 0
481
#define GFS2_QUOTA_ACCOUNT 1
482
#define GFS2_QUOTA_ON 2
483
484
#define GFS2_DATA_DEFAULT GFS2_DATA_ORDERED
485
#define GFS2_DATA_WRITEBACK 1
486
#define GFS2_DATA_ORDERED 2
487
488
#define GFS2_ERRORS_DEFAULT GFS2_ERRORS_WITHDRAW
489
#define GFS2_ERRORS_WITHDRAW 0
490
#define GFS2_ERRORS_CONTINUE 1
/* place holder for future feature */
491
#define GFS2_ERRORS_RO 2
/* place holder for future feature */
492
#define GFS2_ERRORS_PANIC 3
493
494
struct
gfs2_args
{
495
char
ar_lockproto
[
GFS2_LOCKNAME_LEN
];
/* Name of the Lock Protocol */
496
char
ar_locktable
[
GFS2_LOCKNAME_LEN
];
/* Name of the Lock Table */
497
char
ar_hostdata
[
GFS2_LOCKNAME_LEN
];
/* Host specific data */
498
unsigned
int
ar_spectator
:1;
/* Don't get a journal */
499
unsigned
int
ar_localflocks
:1;
/* Let the VFS do flock|fcntl */
500
unsigned
int
ar_debug
:1;
/* Oops on errors */
501
unsigned
int
ar_posix_acl
:1;
/* Enable posix acls */
502
unsigned
int
ar_quota
:2;
/* off/account/on */
503
unsigned
int
ar_suiddir
:1;
/* suiddir support */
504
unsigned
int
ar_data
:2;
/* ordered/writeback */
505
unsigned
int
ar_meta
:1;
/* mount metafs */
506
unsigned
int
ar_discard
:1;
/* discard requests */
507
unsigned
int
ar_errors
:2;
/* errors=withdraw | panic */
508
unsigned
int
ar_nobarrier
:1;
/* do not send barriers */
509
unsigned
int
ar_rgrplvb
:1;
/* use lvbs for rgrp info */
510
int
ar_commit
;
/* Commit interval */
511
int
ar_statfs_quantum
;
/* The fast statfs interval */
512
int
ar_quota_quantum
;
/* The quota interval */
513
int
ar_statfs_percent
;
/* The % change to force sync */
514
};
515
516
struct
gfs2_tune
{
517
spinlock_t
gt_spin
;
518
519
unsigned
int
gt_logd_secs
;
520
521
unsigned
int
gt_quota_simul_sync
;
/* Max quotavals to sync at once */
522
unsigned
int
gt_quota_warn_period
;
/* Secs between quota warn msgs */
523
unsigned
int
gt_quota_scale_num
;
/* Numerator */
524
unsigned
int
gt_quota_scale_den
;
/* Denominator */
525
unsigned
int
gt_quota_quantum
;
/* Secs between syncs to quota file */
526
unsigned
int
gt_new_files_jdata
;
527
unsigned
int
gt_max_readahead
;
/* Max bytes to read-ahead from disk */
528
unsigned
int
gt_complain_secs
;
529
unsigned
int
gt_statfs_quantum
;
530
unsigned
int
gt_statfs_slow
;
531
};
532
533
enum
{
534
SDF_JOURNAL_CHECKED
= 0,
535
SDF_JOURNAL_LIVE
= 1,
536
SDF_SHUTDOWN
= 2,
537
SDF_NOBARRIERS
= 3,
538
SDF_NORECOVERY
= 4,
539
SDF_DEMOTE
= 5,
540
SDF_NOJOURNALID
= 6,
541
SDF_RORECOVERY
= 7,
/* read only recovery */
542
};
543
544
#define GFS2_FSNAME_LEN 256
545
546
struct
gfs2_inum_host
{
547
u64
no_formal_ino
;
548
u64
no_addr
;
549
};
550
551
struct
gfs2_sb_host
{
552
u32
sb_magic
;
553
u32
sb_type
;
554
u32
sb_format
;
555
556
u32
sb_fs_format
;
557
u32
sb_multihost_format
;
558
u32
sb_bsize
;
559
u32
sb_bsize_shift
;
560
561
struct
gfs2_inum_host
sb_master_dir
;
562
struct
gfs2_inum_host
sb_root_dir
;
563
564
char
sb_lockproto
[
GFS2_LOCKNAME_LEN
];
565
char
sb_locktable
[
GFS2_LOCKNAME_LEN
];
566
};
567
568
/*
569
* lm_mount() return values
570
*
571
* ls_jid - the journal ID this node should use
572
* ls_first - this node is the first to mount the file system
573
* ls_lockspace - lock module's context for this file system
574
* ls_ops - lock module's functions
575
*/
576
577
struct
lm_lockstruct
{
578
int
ls_jid
;
579
unsigned
int
ls_first
;
580
const
struct
lm_lockops
*
ls_ops
;
581
dlm_lockspace_t
*
ls_dlm
;
582
583
int
ls_recover_jid_done
;
/* These two are deprecated, */
584
int
ls_recover_jid_status
;
/* used previously by gfs_controld */
585
586
struct
dlm_lksb
ls_mounted_lksb
;
/* mounted_lock */
587
struct
dlm_lksb
ls_control_lksb
;
/* control_lock */
588
char
ls_control_lvb
[
GDLM_LVB_SIZE
];
/* control_lock lvb */
589
struct
completion
ls_sync_wait
;
/* {control,mounted}_{lock,unlock} */
590
591
spinlock_t
ls_recover_spin
;
/* protects following fields */
592
unsigned
long
ls_recover_flags
;
/* DFL_ */
593
uint32_t
ls_recover_mount
;
/* gen in first recover_done cb */
594
uint32_t
ls_recover_start
;
/* gen in last recover_done cb */
595
uint32_t
ls_recover_block
;
/* copy recover_start in last recover_prep */
596
uint32_t
ls_recover_size
;
/* size of recover_submit, recover_result */
597
uint32_t
*
ls_recover_submit
;
/* gen in last recover_slot cb per jid */
598
uint32_t
*
ls_recover_result
;
/* result of last jid recovery */
599
};
600
601
struct
gfs2_pcpu_lkstats
{
602
/* One struct for each glock type */
603
struct
gfs2_lkstats
lkstats
[10];
604
};
605
606
struct
gfs2_sbd
{
607
struct
super_block
*
sd_vfs
;
608
struct
gfs2_pcpu_lkstats
__percpu
*
sd_lkstats
;
609
struct
kobject
sd_kobj
;
610
unsigned
long
sd_flags
;
/* SDF_... */
611
struct
gfs2_sb_host
sd_sb
;
612
613
/* Constants computed on mount */
614
615
u32
sd_fsb2bb
;
616
u32
sd_fsb2bb_shift
;
617
u32
sd_diptrs
;
/* Number of pointers in a dinode */
618
u32
sd_inptrs
;
/* Number of pointers in a indirect block */
619
u32
sd_jbsize
;
/* Size of a journaled data block */
620
u32
sd_hash_bsize
;
/* sizeof(exhash block) */
621
u32
sd_hash_bsize_shift
;
622
u32
sd_hash_ptrs
;
/* Number of pointers in a hash block */
623
u32
sd_qc_per_block
;
624
u32
sd_max_dirres
;
/* Max blocks needed to add a directory entry */
625
u32
sd_max_height
;
/* Max height of a file's metadata tree */
626
u64
sd_heightsize
[
GFS2_MAX_META_HEIGHT
+ 1];
627
u32
sd_max_jheight
;
/* Max height of journaled file's meta tree */
628
u64
sd_jheightsize
[
GFS2_MAX_META_HEIGHT
+ 1];
629
630
struct
gfs2_args
sd_args
;
/* Mount arguments */
631
struct
gfs2_tune
sd_tune
;
/* Filesystem tuning structure */
632
633
/* Lock Stuff */
634
635
struct
lm_lockstruct
sd_lockstruct
;
636
struct
gfs2_holder
sd_live_gh
;
637
struct
gfs2_glock
*
sd_rename_gl
;
638
struct
gfs2_glock
*
sd_trans_gl
;
639
wait_queue_head_t
sd_glock_wait
;
640
atomic_t
sd_glock_disposal
;
641
struct
completion
sd_locking_init
;
642
struct
delayed_work
sd_control_work
;
643
644
/* Inode Stuff */
645
646
struct
dentry
*
sd_master_dir
;
647
struct
dentry
*
sd_root_dir
;
648
649
struct
inode
*
sd_jindex
;
650
struct
inode
*
sd_statfs_inode
;
651
struct
inode
*
sd_sc_inode
;
652
struct
inode
*
sd_qc_inode
;
653
struct
inode
*
sd_rindex
;
654
struct
inode
*
sd_quota_inode
;
655
656
/* StatFS stuff */
657
658
spinlock_t
sd_statfs_spin
;
659
struct
gfs2_statfs_change_host
sd_statfs_master
;
660
struct
gfs2_statfs_change_host
sd_statfs_local
;
661
int
sd_statfs_force_sync
;
662
663
/* Resource group stuff */
664
665
int
sd_rindex_uptodate
;
666
spinlock_t
sd_rindex_spin
;
667
struct
rb_root
sd_rindex_tree
;
668
unsigned
int
sd_rgrps
;
669
unsigned
int
sd_max_rg_data
;
670
671
/* Journal index stuff */
672
673
struct
list_head
sd_jindex_list
;
674
spinlock_t
sd_jindex_spin
;
675
struct
mutex
sd_jindex_mutex
;
676
unsigned
int
sd_journals
;
677
678
struct
gfs2_jdesc
*
sd_jdesc
;
679
struct
gfs2_holder
sd_journal_gh
;
680
struct
gfs2_holder
sd_jinode_gh
;
681
682
struct
gfs2_holder
sd_sc_gh
;
683
struct
gfs2_holder
sd_qc_gh
;
684
685
/* Daemon stuff */
686
687
struct
task_struct
*
sd_logd_process
;
688
struct
task_struct
*
sd_quotad_process
;
689
690
/* Quota stuff */
691
692
struct
list_head
sd_quota_list
;
693
atomic_t
sd_quota_count
;
694
struct
mutex
sd_quota_mutex
;
695
wait_queue_head_t
sd_quota_wait
;
696
struct
list_head
sd_trunc_list
;
697
spinlock_t
sd_trunc_lock
;
698
699
unsigned
int
sd_quota_slots
;
700
unsigned
int
sd_quota_chunks
;
701
unsigned
char
**
sd_quota_bitmap
;
702
703
u64
sd_quota_sync_gen
;
704
705
/* Log stuff */
706
707
spinlock_t
sd_log_lock
;
708
709
unsigned
int
sd_log_blks_reserved
;
710
unsigned
int
sd_log_commited_buf
;
711
unsigned
int
sd_log_commited_databuf
;
712
int
sd_log_commited_revoke
;
713
714
atomic_t
sd_log_pinned
;
715
unsigned
int
sd_log_num_buf
;
716
unsigned
int
sd_log_num_revoke
;
717
unsigned
int
sd_log_num_rg
;
718
unsigned
int
sd_log_num_databuf
;
719
720
struct
list_head
sd_log_le_buf
;
721
struct
list_head
sd_log_le_revoke
;
722
struct
list_head
sd_log_le_databuf
;
723
struct
list_head
sd_log_le_ordered
;
724
725
atomic_t
sd_log_thresh1
;
726
atomic_t
sd_log_thresh2
;
727
atomic_t
sd_log_blks_free
;
728
wait_queue_head_t
sd_log_waitq
;
729
wait_queue_head_t
sd_logd_waitq
;
730
731
u64
sd_log_sequence
;
732
unsigned
int
sd_log_head
;
733
unsigned
int
sd_log_tail
;
734
int
sd_log_idle
;
735
736
struct
rw_semaphore
sd_log_flush_lock
;
737
atomic_t
sd_log_in_flight
;
738
struct
bio *
sd_log_bio
;
739
wait_queue_head_t
sd_log_flush_wait
;
740
int
sd_log_error
;
741
742
unsigned
int
sd_log_flush_head
;
743
u64
sd_log_flush_wrapped
;
744
745
spinlock_t
sd_ail_lock
;
746
struct
list_head
sd_ail1_list
;
747
struct
list_head
sd_ail2_list
;
748
749
/* Replay stuff */
750
751
struct
list_head
sd_revoke_list
;
752
unsigned
int
sd_replay_tail
;
753
754
unsigned
int
sd_found_blocks
;
755
unsigned
int
sd_found_revokes
;
756
unsigned
int
sd_replayed_blocks
;
757
758
/* For quiescing the filesystem */
759
760
struct
gfs2_holder
sd_freeze_gh
;
761
struct
mutex
sd_freeze_lock
;
762
unsigned
int
sd_freeze_count
;
763
764
char
sd_fsname
[
GFS2_FSNAME_LEN
];
765
char
sd_table_name
[
GFS2_FSNAME_LEN
];
766
char
sd_proto_name
[
GFS2_FSNAME_LEN
];
767
768
/* Debugging crud */
769
770
unsigned
long
sd_last_warning
;
771
struct
dentry
*
debugfs_dir
;
/* debugfs directory */
772
struct
dentry
*
debugfs_dentry_glocks
;
773
struct
dentry
*
debugfs_dentry_glstats
;
774
struct
dentry
*
debugfs_dentry_sbstats
;
775
};
776
777
static
inline
void
gfs2_glstats_inc(
struct
gfs2_glock
*gl,
int
which)
778
{
779
gl->
gl_stats
.stats[which]++;
780
}
781
782
static
inline
void
gfs2_sbstats_inc(
const
struct
gfs2_glock
*gl,
int
which)
783
{
784
const
struct
gfs2_sbd
*sdp = gl->
gl_sbd
;
785
preempt_disable
();
786
this_cpu_ptr
(sdp->
sd_lkstats
)->lkstats[gl->
gl_name
.ln_type].stats[which]++;
787
preempt_enable
();
788
}
789
790
#endif
/* __INCORE_DOT_H__ */
791
Generated on Thu Jan 10 2013 14:47:14 for Linux Kernel by
1.8.2