Linux Kernel
3.7.1
Main Page
Related Pages
Modules
Namespaces
Data Structures
Files
File List
Globals
All
Data Structures
Namespaces
Files
Functions
Variables
Typedefs
Enumerations
Enumerator
Macros
Groups
Pages
fs
gfs2
glock.h
Go to the documentation of this file.
1
/*
2
* Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3
* Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
4
*
5
* This copyrighted material is made available to anyone wishing to use,
6
* modify, copy, or redistribute it subject to the terms and conditions
7
* of the GNU General Public License version 2.
8
*/
9
10
#ifndef __GLOCK_DOT_H__
11
#define __GLOCK_DOT_H__
12
13
#include <linux/sched.h>
14
#include <
linux/parser.h
>
15
#include "
incore.h
"
16
17
/* Options for hostdata parser */
18
19
enum
{
20
Opt_jid
,
21
Opt_id
,
22
Opt_first
,
23
Opt_nodir
,
24
Opt_err
,
25
};
26
27
/*
28
* lm_lockname types
29
*/
30
31
#define LM_TYPE_RESERVED 0x00
32
#define LM_TYPE_NONDISK 0x01
33
#define LM_TYPE_INODE 0x02
34
#define LM_TYPE_RGRP 0x03
35
#define LM_TYPE_META 0x04
36
#define LM_TYPE_IOPEN 0x05
37
#define LM_TYPE_FLOCK 0x06
38
#define LM_TYPE_PLOCK 0x07
39
#define LM_TYPE_QUOTA 0x08
40
#define LM_TYPE_JOURNAL 0x09
41
42
/*
43
* lm_lock() states
44
*
45
* SHARED is compatible with SHARED, not with DEFERRED or EX.
46
* DEFERRED is compatible with DEFERRED, not with SHARED or EX.
47
*/
48
49
#define LM_ST_UNLOCKED 0
50
#define LM_ST_EXCLUSIVE 1
51
#define LM_ST_DEFERRED 2
52
#define LM_ST_SHARED 3
53
54
/*
55
* lm_lock() flags
56
*
57
* LM_FLAG_TRY
58
* Don't wait to acquire the lock if it can't be granted immediately.
59
*
60
* LM_FLAG_TRY_1CB
61
* Send one blocking callback if TRY is set and the lock is not granted.
62
*
63
* LM_FLAG_NOEXP
64
* GFS sets this flag on lock requests it makes while doing journal recovery.
65
* These special requests should not be blocked due to the recovery like
66
* ordinary locks would be.
67
*
68
* LM_FLAG_ANY
69
* A SHARED request may also be granted in DEFERRED, or a DEFERRED request may
70
* also be granted in SHARED. The preferred state is whichever is compatible
71
* with other granted locks, or the specified state if no other locks exist.
72
*
73
* LM_FLAG_PRIORITY
74
* Override fairness considerations. Suppose a lock is held in a shared state
75
* and there is a pending request for the deferred state. A shared lock
76
* request with the priority flag would be allowed to bypass the deferred
77
* request and directly join the other shared lock. A shared lock request
78
* without the priority flag might be forced to wait until the deferred
79
* requested had acquired and released the lock.
80
*/
81
82
#define LM_FLAG_TRY 0x00000001
83
#define LM_FLAG_TRY_1CB 0x00000002
84
#define LM_FLAG_NOEXP 0x00000004
85
#define LM_FLAG_ANY 0x00000008
86
#define LM_FLAG_PRIORITY 0x00000010
87
#define GL_ASYNC 0x00000040
88
#define GL_EXACT 0x00000080
89
#define GL_SKIP 0x00000100
90
#define GL_NOCACHE 0x00000400
91
92
/*
93
* lm_async_cb return flags
94
*
95
* LM_OUT_ST_MASK
96
* Masks the lower two bits of lock state in the returned value.
97
*
98
* LM_OUT_CANCELED
99
* The lock request was canceled.
100
*
101
*/
102
103
#define LM_OUT_ST_MASK 0x00000003
104
#define LM_OUT_CANCELED 0x00000008
105
#define LM_OUT_ERROR 0x00000004
106
107
/*
108
* lm_recovery_done() messages
109
*/
110
111
#define LM_RD_GAVEUP 308
112
#define LM_RD_SUCCESS 309
113
114
#define GLR_TRYFAILED 13
115
116
#define GL_GLOCK_MAX_HOLD (long)(HZ / 5)
117
#define GL_GLOCK_DFT_HOLD (long)(HZ / 5)
118
#define GL_GLOCK_MIN_HOLD (long)(10)
119
#define GL_GLOCK_HOLD_INCR (long)(HZ / 20)
120
#define GL_GLOCK_HOLD_DECR (long)(HZ / 40)
121
122
struct
lm_lockops
{
123
const
char
*
lm_proto_name
;
124
int
(*
lm_mount
) (
struct
gfs2_sbd
*sdp,
const
char
*
table
);
125
void
(*
lm_first_done
) (
struct
gfs2_sbd
*sdp);
126
void
(*
lm_recovery_result
) (
struct
gfs2_sbd
*sdp,
unsigned
int
jid,
127
unsigned
int
result
);
128
void
(*
lm_unmount
) (
struct
gfs2_sbd
*sdp);
129
void
(*
lm_withdraw
) (
struct
gfs2_sbd
*sdp);
130
void
(*
lm_put_lock
) (
struct
gfs2_glock
*gl);
131
int
(*
lm_lock
) (
struct
gfs2_glock
*gl,
unsigned
int
req_state,
132
unsigned
int
flags
);
133
void
(*
lm_cancel
) (
struct
gfs2_glock
*gl);
134
const
match_table_t
*
lm_tokens
;
135
};
136
137
extern
struct
workqueue_struct
*
gfs2_delete_workqueue
;
138
static
inline
struct
gfs2_holder
*gfs2_glock_is_locked_by_me(
struct
gfs2_glock
*gl)
139
{
140
struct
gfs2_holder
*gh;
141
struct
pid
*
pid
;
142
143
/* Look in glock's list of holders for one with current task as owner */
144
spin_lock(&gl->
gl_spin
);
145
pid = task_pid(
current
);
146
list_for_each_entry
(gh, &gl->
gl_holders
, gh_list) {
147
if
(!
test_bit
(
HIF_HOLDER
, &gh->
gh_iflags
))
148
break
;
149
if
(gh->
gh_owner_pid
== pid)
150
goto
out
;
151
}
152
gh =
NULL
;
153
out
:
154
spin_unlock(&gl->
gl_spin
);
155
156
return
gh;
157
}
158
159
static
inline
int
gfs2_glock_is_held_excl(
struct
gfs2_glock
*gl)
160
{
161
return
gl->
gl_state
==
LM_ST_EXCLUSIVE
;
162
}
163
164
static
inline
int
gfs2_glock_is_held_dfrd(
struct
gfs2_glock
*gl)
165
{
166
return
gl->
gl_state
==
LM_ST_DEFERRED
;
167
}
168
169
static
inline
int
gfs2_glock_is_held_shrd(
struct
gfs2_glock
*gl)
170
{
171
return
gl->
gl_state
==
LM_ST_SHARED
;
172
}
173
174
static
inline
struct
address_space
*gfs2_glock2aspace(
struct
gfs2_glock
*gl)
175
{
176
if
(gl->
gl_ops
->go_flags &
GLOF_ASPACE
)
177
return
(
struct
address_space
*)(gl + 1);
178
return
NULL
;
179
}
180
181
int
gfs2_glock_get
(
struct
gfs2_sbd
*sdp,
182
u64
number,
const
struct
gfs2_glock_operations
*glops,
183
int
create
,
struct
gfs2_glock
**glp);
184
void
gfs2_glock_hold
(
struct
gfs2_glock
*gl);
185
void
gfs2_glock_put_nolock
(
struct
gfs2_glock
*gl);
186
void
gfs2_glock_put
(
struct
gfs2_glock
*gl);
187
void
gfs2_holder_init
(
struct
gfs2_glock
*gl,
unsigned
int
state
,
unsigned
flags
,
188
struct
gfs2_holder
*gh);
189
void
gfs2_holder_reinit
(
unsigned
int
state
,
unsigned
flags
,
190
struct
gfs2_holder
*gh);
191
void
gfs2_holder_uninit
(
struct
gfs2_holder
*gh);
192
int
gfs2_glock_nq
(
struct
gfs2_holder
*gh);
193
int
gfs2_glock_poll
(
struct
gfs2_holder
*gh);
194
int
gfs2_glock_wait
(
struct
gfs2_holder
*gh);
195
void
gfs2_glock_dq
(
struct
gfs2_holder
*gh);
196
void
gfs2_glock_dq_wait
(
struct
gfs2_holder
*gh);
197
198
void
gfs2_glock_dq_uninit
(
struct
gfs2_holder
*gh);
199
int
gfs2_glock_nq_num
(
struct
gfs2_sbd
*sdp,
200
u64
number,
const
struct
gfs2_glock_operations
*glops,
201
unsigned
int
state
,
int
flags
,
struct
gfs2_holder
*gh);
202
203
int
gfs2_glock_nq_m
(
unsigned
int
num_gh,
struct
gfs2_holder
*ghs);
204
void
gfs2_glock_dq_m
(
unsigned
int
num_gh,
struct
gfs2_holder
*ghs);
205
void
gfs2_glock_dq_uninit_m
(
unsigned
int
num_gh,
struct
gfs2_holder
*ghs);
206
207
__printf
(2, 3)
208
void
gfs2_print_dbg
(
struct
seq_file
*seq,
const
char
*
fmt
, ...);
209
220
static
inline
int
gfs2_glock_nq_init(
struct
gfs2_glock
*gl,
221
unsigned
int
state
,
int
flags
,
222
struct
gfs2_holder
*gh)
223
{
224
int
error
;
225
226
gfs2_holder_init
(gl, state, flags, gh);
227
228
error =
gfs2_glock_nq
(gh);
229
if
(error)
230
gfs2_holder_uninit
(gh);
231
232
return
error
;
233
}
234
235
extern
void
gfs2_glock_cb
(
struct
gfs2_glock
*gl,
unsigned
int
state
);
236
extern
void
gfs2_glock_complete
(
struct
gfs2_glock
*gl,
int
ret
);
237
extern
void
gfs2_gl_hash_clear
(
struct
gfs2_sbd
*sdp);
238
extern
void
gfs2_glock_finish_truncate
(
struct
gfs2_inode
*
ip
);
239
extern
void
gfs2_glock_thaw
(
struct
gfs2_sbd
*sdp);
240
extern
void
gfs2_glock_add_to_lru
(
struct
gfs2_glock
*gl);
241
extern
void
gfs2_glock_free
(
struct
gfs2_glock
*gl);
242
243
extern
int
__init
gfs2_glock_init
(
void
);
244
extern
void
gfs2_glock_exit
(
void
);
245
246
extern
int
gfs2_create_debugfs_file
(
struct
gfs2_sbd
*sdp);
247
extern
void
gfs2_delete_debugfs_file
(
struct
gfs2_sbd
*sdp);
248
extern
int
gfs2_register_debugfs
(
void
);
249
extern
void
gfs2_unregister_debugfs
(
void
);
250
251
extern
const
struct
lm_lockops
gfs2_dlm_ops
;
252
253
#endif
/* __GLOCK_DOT_H__ */
Generated on Thu Jan 10 2013 14:47:14 for Linux Kernel by
1.8.2