Linux Kernel
3.7.1
Main Page
Related Pages
Modules
Namespaces
Data Structures
Files
File List
Globals
All
Data Structures
Namespaces
Files
Functions
Variables
Typedefs
Enumerations
Enumerator
Macros
Groups
Pages
fs
ocfs2
dlm
dlmcommon.h
Go to the documentation of this file.
1
/* -*- mode: c; c-basic-offset: 8; -*-
2
* vim: noexpandtab sw=8 ts=8 sts=0:
3
*
4
* dlmcommon.h
5
*
6
* Copyright (C) 2004 Oracle. All rights reserved.
7
*
8
* This program is free software; you can redistribute it and/or
9
* modify it under the terms of the GNU General Public
10
* License as published by the Free Software Foundation; either
11
* version 2 of the License, or (at your option) any later version.
12
*
13
* This program is distributed in the hope that it will be useful,
14
* but WITHOUT ANY WARRANTY; without even the implied warranty of
15
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16
* General Public License for more details.
17
*
18
* You should have received a copy of the GNU General Public
19
* License along with this program; if not, write to the
20
* Free Software Foundation, Inc., 59 Temple Place - Suite 330,
21
* Boston, MA 021110-1307, USA.
22
*
23
*/
24
25
#ifndef DLMCOMMON_H
26
#define DLMCOMMON_H
27
28
#include <
linux/kref.h
>
29
30
#define DLM_HB_NODE_DOWN_PRI (0xf000000)
31
#define DLM_HB_NODE_UP_PRI (0x8000000)
32
33
#define DLM_LOCKID_NAME_MAX 32
34
35
#define DLM_DOMAIN_NAME_MAX_LEN 255
36
#define DLM_LOCK_RES_OWNER_UNKNOWN O2NM_MAX_NODES
37
#define DLM_THREAD_SHUFFLE_INTERVAL 5 // flush everything every 5 passes
38
#define DLM_THREAD_MS 200 // flush at least every 200 ms
39
40
#define DLM_HASH_SIZE_DEFAULT (1 << 17)
41
#if DLM_HASH_SIZE_DEFAULT < PAGE_SIZE
42
# define DLM_HASH_PAGES 1
43
#else
44
# define DLM_HASH_PAGES (DLM_HASH_SIZE_DEFAULT / PAGE_SIZE)
45
#endif
46
#define DLM_BUCKETS_PER_PAGE (PAGE_SIZE / sizeof(struct hlist_head))
47
#define DLM_HASH_BUCKETS (DLM_HASH_PAGES * DLM_BUCKETS_PER_PAGE)
48
49
/* Intended to make it easier for us to switch out hash functions */
50
#define dlm_lockid_hash(_n, _l) full_name_hash(_n, _l)
51
52
enum
dlm_mle_type
{
53
DLM_MLE_BLOCK
= 0,
54
DLM_MLE_MASTER
= 1,
55
DLM_MLE_MIGRATION
= 2,
56
DLM_MLE_NUM_TYPES
= 3,
57
};
58
59
struct
dlm_master_list_entry
{
60
struct
hlist_node
master_hash_node
;
61
struct
list_head
hb_events
;
62
struct
dlm_ctxt
*
dlm
;
63
spinlock_t
spinlock
;
64
wait_queue_head_t
wq
;
65
atomic_t
woken
;
66
struct
kref
mle_refs
;
67
int
inuse
;
68
unsigned
long
maybe_map
[
BITS_TO_LONGS
(
O2NM_MAX_NODES
)];
69
unsigned
long
vote_map
[
BITS_TO_LONGS
(
O2NM_MAX_NODES
)];
70
unsigned
long
response_map
[
BITS_TO_LONGS
(
O2NM_MAX_NODES
)];
71
unsigned
long
node_map
[
BITS_TO_LONGS
(
O2NM_MAX_NODES
)];
72
u8
master
;
73
u8
new_master
;
74
enum
dlm_mle_type
type
;
75
struct
o2hb_callback_func
mle_hb_up
;
76
struct
o2hb_callback_func
mle_hb_down
;
77
struct
dlm_lock_resource
*
mleres
;
78
unsigned
char
mname
[
DLM_LOCKID_NAME_MAX
];
79
unsigned
int
mnamelen
;
80
unsigned
int
mnamehash
;
81
};
82
83
enum
dlm_ast_type
{
84
DLM_AST
= 0,
85
DLM_BAST
= 1,
86
DLM_ASTUNLOCK
= 2,
87
};
88
89
90
#define LKM_VALID_FLAGS (LKM_VALBLK | LKM_CONVERT | LKM_UNLOCK | \
91
LKM_CANCEL | LKM_INVVALBLK | LKM_FORCE | \
92
LKM_RECOVERY | LKM_LOCAL | LKM_NOQUEUE)
93
94
#define DLM_RECOVERY_LOCK_NAME "$RECOVERY"
95
#define DLM_RECOVERY_LOCK_NAME_LEN 9
96
97
static
inline
int
dlm_is_recovery_lock(
const
char
*lock_name,
int
name_len
)
98
{
99
if
(name_len ==
DLM_RECOVERY_LOCK_NAME_LEN
&&
100
memcmp
(lock_name,
DLM_RECOVERY_LOCK_NAME
, name_len)==0)
101
return
1;
102
return
0;
103
}
104
105
#define DLM_RECO_STATE_ACTIVE 0x0001
106
#define DLM_RECO_STATE_FINALIZE 0x0002
107
108
struct
dlm_recovery_ctxt
109
{
110
struct
list_head
resources
;
111
struct
list_head
received
;
112
struct
list_head
node_data
;
113
u8
new_master
;
114
u8
dead_node
;
115
u16
state
;
116
unsigned
long
node_map
[
BITS_TO_LONGS
(
O2NM_MAX_NODES
)];
117
wait_queue_head_t
event
;
118
};
119
120
enum
dlm_ctxt_state
{
121
DLM_CTXT_NEW
= 0,
122
DLM_CTXT_JOINED
= 1,
123
DLM_CTXT_IN_SHUTDOWN
= 2,
124
DLM_CTXT_LEAVING
= 3,
125
};
126
127
struct
dlm_ctxt
128
{
129
struct
list_head
list
;
130
struct
hlist_head
**
lockres_hash
;
131
struct
list_head
dirty_list
;
132
struct
list_head
purge_list
;
133
struct
list_head
pending_asts
;
134
struct
list_head
pending_basts
;
135
struct
list_head
tracking_list
;
136
unsigned
int
purge_count
;
137
spinlock_t
spinlock
;
138
spinlock_t
ast_lock
;
139
spinlock_t
track_lock
;
140
char
*
name
;
141
u8
node_num
;
142
u32
key
;
143
u8
joining_node
;
144
wait_queue_head_t
dlm_join_events
;
145
unsigned
long
live_nodes_map
[
BITS_TO_LONGS
(
O2NM_MAX_NODES
)];
146
unsigned
long
domain_map
[
BITS_TO_LONGS
(
O2NM_MAX_NODES
)];
147
unsigned
long
exit_domain_map
[
BITS_TO_LONGS
(
O2NM_MAX_NODES
)];
148
unsigned
long
recovery_map
[
BITS_TO_LONGS
(
O2NM_MAX_NODES
)];
149
struct
dlm_recovery_ctxt
reco
;
150
spinlock_t
master_lock
;
151
struct
hlist_head
**
master_hash
;
152
struct
list_head
mle_hb_events
;
153
154
/* these give a really vague idea of the system load */
155
atomic_t
mle_tot_count
[
DLM_MLE_NUM_TYPES
];
156
atomic_t
mle_cur_count
[
DLM_MLE_NUM_TYPES
];
157
atomic_t
res_tot_count
;
158
atomic_t
res_cur_count
;
159
160
struct
dlm_debug_ctxt
*
dlm_debug_ctxt
;
161
struct
dentry
*
dlm_debugfs_subroot
;
162
163
/* NOTE: Next three are protected by dlm_domain_lock */
164
struct
kref
dlm_refs
;
165
enum
dlm_ctxt_state
dlm_state
;
166
unsigned
int
num_joins
;
167
168
struct
o2hb_callback_func
dlm_hb_up
;
169
struct
o2hb_callback_func
dlm_hb_down
;
170
struct
task_struct
*
dlm_thread_task
;
171
struct
task_struct
*
dlm_reco_thread_task
;
172
struct
workqueue_struct
*
dlm_worker
;
173
wait_queue_head_t
dlm_thread_wq
;
174
wait_queue_head_t
dlm_reco_thread_wq
;
175
wait_queue_head_t
ast_wq
;
176
wait_queue_head_t
migration_wq
;
177
178
struct
work_struct
dispatched_work
;
179
struct
list_head
work_list
;
180
spinlock_t
work_lock
;
181
struct
list_head
dlm_domain_handlers
;
182
struct
list_head
dlm_eviction_callbacks
;
183
184
/* The filesystem specifies this at domain registration. We
185
* cache it here to know what to tell other nodes. */
186
struct
dlm_protocol_version
fs_locking_proto
;
187
/* This is the inter-dlm communication version */
188
struct
dlm_protocol_version
dlm_locking_proto
;
189
};
190
191
static
inline
struct
hlist_head
*dlm_lockres_hash(
struct
dlm_ctxt
*
dlm
,
unsigned
i
)
192
{
193
return
dlm->
lockres_hash
[(i /
DLM_BUCKETS_PER_PAGE
) %
DLM_HASH_PAGES
] + (i %
DLM_BUCKETS_PER_PAGE
);
194
}
195
196
static
inline
struct
hlist_head
*dlm_master_hash(
struct
dlm_ctxt
*dlm,
197
unsigned
i)
198
{
199
return
dlm->
master_hash
[(i /
DLM_BUCKETS_PER_PAGE
) %
DLM_HASH_PAGES
] +
200
(i %
DLM_BUCKETS_PER_PAGE
);
201
}
202
203
/* these keventd work queue items are for less-frequently
204
* called functions that cannot be directly called from the
205
* net message handlers for some reason, usually because
206
* they need to send net messages of their own. */
207
void
dlm_dispatch_work
(
struct
work_struct
*
work
);
208
209
struct
dlm_lock_resource
;
210
struct
dlm_work_item
;
211
212
typedef
void
(
dlm_workfunc_t
)(
struct
dlm_work_item
*,
void
*);
213
214
struct
dlm_request_all_locks_priv
215
{
216
u8
reco_master
;
217
u8
dead_node
;
218
};
219
220
struct
dlm_mig_lockres_priv
221
{
222
struct
dlm_lock_resource
*
lockres
;
223
u8
real_master
;
224
u8
extra_ref
;
225
};
226
227
struct
dlm_assert_master_priv
228
{
229
struct
dlm_lock_resource
*
lockres
;
230
u8
request_from
;
231
u32
flags
;
232
unsigned
ignore_higher
:1;
233
};
234
235
struct
dlm_deref_lockres_priv
236
{
237
struct
dlm_lock_resource
*
deref_res
;
238
u8
deref_node
;
239
};
240
241
struct
dlm_work_item
242
{
243
struct
list_head
list
;
244
dlm_workfunc_t
*
func
;
245
struct
dlm_ctxt
*
dlm
;
246
void
*
data
;
247
union
{
248
struct
dlm_request_all_locks_priv
ral
;
249
struct
dlm_mig_lockres_priv
ml
;
250
struct
dlm_assert_master_priv
am
;
251
struct
dlm_deref_lockres_priv
dl
;
252
}
u
;
253
};
254
255
static
inline
void
dlm_init_work_item(
struct
dlm_ctxt
*dlm,
256
struct
dlm_work_item
*i,
257
dlm_workfunc_t
*
f
,
void
*
data
)
258
{
259
memset
(i, 0,
sizeof
(*i));
260
i->
func
=
f
;
261
INIT_LIST_HEAD(&i->
list
);
262
i->
data
=
data
;
263
i->
dlm
=
dlm
;
/* must have already done a dlm_grab on this! */
264
}
265
266
267
268
static
inline
void
__dlm_set_joining_node(
struct
dlm_ctxt
*dlm,
269
u8
node
)
270
{
271
assert_spin_locked
(&dlm->
spinlock
);
272
273
dlm->
joining_node
=
node
;
274
wake_up
(&dlm->
dlm_join_events
);
275
}
276
277
#define DLM_LOCK_RES_UNINITED 0x00000001
278
#define DLM_LOCK_RES_RECOVERING 0x00000002
279
#define DLM_LOCK_RES_READY 0x00000004
280
#define DLM_LOCK_RES_DIRTY 0x00000008
281
#define DLM_LOCK_RES_IN_PROGRESS 0x00000010
282
#define DLM_LOCK_RES_MIGRATING 0x00000020
283
#define DLM_LOCK_RES_DROPPING_REF 0x00000040
284
#define DLM_LOCK_RES_BLOCK_DIRTY 0x00001000
285
#define DLM_LOCK_RES_SETREF_INPROG 0x00002000
286
287
/* max milliseconds to wait to sync up a network failure with a node death */
288
#define DLM_NODE_DEATH_WAIT_MAX (5 * 1000)
289
290
#define DLM_PURGE_INTERVAL_MS (8 * 1000)
291
292
struct
dlm_lock_resource
293
{
294
/* WARNING: Please see the comment in dlm_init_lockres before
295
* adding fields here. */
296
struct
hlist_node
hash_node
;
297
struct
qstr
lockname
;
298
struct
kref
refs
;
299
300
/*
301
* Please keep granted, converting, and blocked in this order,
302
* as some funcs want to iterate over all lists.
303
*
304
* All four lists are protected by the hash's reference.
305
*/
306
struct
list_head
granted
;
307
struct
list_head
converting
;
308
struct
list_head
blocked
;
309
struct
list_head
purge
;
310
311
/*
312
* These two lists require you to hold an additional reference
313
* while they are on the list.
314
*/
315
struct
list_head
dirty
;
316
struct
list_head
recovering
;
// dlm_recovery_ctxt.resources list
317
318
/* Added during init and removed during release */
319
struct
list_head
tracking
;
/* dlm->tracking_list */
320
321
/* unused lock resources have their last_used stamped and are
322
* put on a list for the dlm thread to run. */
323
unsigned
long
last_used
;
324
325
struct
dlm_ctxt
*
dlm
;
326
327
unsigned
migration_pending
:1;
328
atomic_t
asts_reserved
;
329
spinlock_t
spinlock
;
330
wait_queue_head_t
wq
;
331
u8
owner
;
//node which owns the lock resource, or unknown
332
u16
state
;
333
char
lvb
[
DLM_LVB_LEN
];
334
unsigned
int
inflight_locks
;
335
unsigned
long
refmap
[
BITS_TO_LONGS
(
O2NM_MAX_NODES
)];
336
};
337
338
struct
dlm_migratable_lock
339
{
340
__be64
cookie
;
341
342
/* these 3 are just padding for the in-memory structure, but
343
* list and flags are actually used when sent over the wire */
344
__be16
pad1
;
345
u8
list
;
// 0=granted, 1=converting, 2=blocked
346
u8
flags
;
347
348
s8
type
;
349
s8
convert_type
;
350
s8
highest_blocked
;
351
u8
node
;
352
};
// 16 bytes
353
354
struct
dlm_lock
355
{
356
struct
dlm_migratable_lock
ml
;
357
358
struct
list_head
list
;
359
struct
list_head
ast_list
;
360
struct
list_head
bast_list
;
361
struct
dlm_lock_resource
*
lockres
;
362
spinlock_t
spinlock
;
363
struct
kref
lock_refs
;
364
365
// ast and bast must be callable while holding a spinlock!
366
dlm_astlockfunc_t
*
ast
;
367
dlm_bastlockfunc_t
*
bast
;
368
void
*
astdata
;
369
struct
dlm_lockstatus
*
lksb
;
370
unsigned
ast_pending
:1,
371
bast_pending
:1,
372
convert_pending
:1,
373
lock_pending
:1,
374
cancel_pending
:1,
375
unlock_pending
:1,
376
lksb_kernel_allocated
:1;
377
};
378
379
380
#define DLM_LKSB_UNUSED1 0x01
381
#define DLM_LKSB_PUT_LVB 0x02
382
#define DLM_LKSB_GET_LVB 0x04
383
#define DLM_LKSB_UNUSED2 0x08
384
#define DLM_LKSB_UNUSED3 0x10
385
#define DLM_LKSB_UNUSED4 0x20
386
#define DLM_LKSB_UNUSED5 0x40
387
#define DLM_LKSB_UNUSED6 0x80
388
389
390
enum
dlm_lockres_list
{
391
DLM_GRANTED_LIST
= 0,
392
DLM_CONVERTING_LIST
= 1,
393
DLM_BLOCKED_LIST
= 2,
394
};
395
396
static
inline
int
dlm_lvb_is_empty(
char
*
lvb
)
397
{
398
int
i
;
399
for
(i=0; i<
DLM_LVB_LEN
; i++)
400
if
(lvb[i])
401
return
0;
402
return
1;
403
}
404
405
static
inline
char
*dlm_list_in_text(
enum
dlm_lockres_list
idx
)
406
{
407
if
(idx ==
DLM_GRANTED_LIST
)
408
return
"granted"
;
409
else
if
(idx ==
DLM_CONVERTING_LIST
)
410
return
"converting"
;
411
else
if
(idx ==
DLM_BLOCKED_LIST
)
412
return
"blocked"
;
413
else
414
return
"unknown"
;
415
}
416
417
static
inline
struct
list_head
*
418
dlm_list_idx_to_ptr(
struct
dlm_lock_resource
*
res
,
enum
dlm_lockres_list
idx)
419
{
420
struct
list_head
*
ret
=
NULL
;
421
if
(idx ==
DLM_GRANTED_LIST
)
422
ret = &res->
granted
;
423
else
if
(idx ==
DLM_CONVERTING_LIST
)
424
ret = &res->
converting
;
425
else
if
(idx ==
DLM_BLOCKED_LIST
)
426
ret = &res->
blocked
;
427
else
428
BUG
();
429
return
ret
;
430
}
431
432
433
434
435
struct
dlm_node_iter
436
{
437
unsigned
long
node_map
[
BITS_TO_LONGS
(
O2NM_MAX_NODES
)];
438
int
curnode
;
439
};
440
441
442
enum
{
443
DLM_MASTER_REQUEST_MSG
= 500,
444
DLM_UNUSED_MSG1
= 501,
445
DLM_ASSERT_MASTER_MSG
= 502,
446
DLM_CREATE_LOCK_MSG
= 503,
447
DLM_CONVERT_LOCK_MSG
= 504,
448
DLM_PROXY_AST_MSG
= 505,
449
DLM_UNLOCK_LOCK_MSG
= 506,
450
DLM_DEREF_LOCKRES_MSG
= 507,
451
DLM_MIGRATE_REQUEST_MSG
= 508,
452
DLM_MIG_LOCKRES_MSG
= 509,
453
DLM_QUERY_JOIN_MSG
= 510,
454
DLM_ASSERT_JOINED_MSG
= 511,
455
DLM_CANCEL_JOIN_MSG
= 512,
456
DLM_EXIT_DOMAIN_MSG
= 513,
457
DLM_MASTER_REQUERY_MSG
= 514,
458
DLM_LOCK_REQUEST_MSG
= 515,
459
DLM_RECO_DATA_DONE_MSG
= 516,
460
DLM_BEGIN_RECO_MSG
= 517,
461
DLM_FINALIZE_RECO_MSG
= 518,
462
DLM_QUERY_REGION
= 519,
463
DLM_QUERY_NODEINFO
= 520,
464
DLM_BEGIN_EXIT_DOMAIN_MSG
= 521,
465
};
466
467
struct
dlm_reco_node_data
468
{
469
int
state
;
470
u8
node_num
;
471
struct
list_head
list
;
472
};
473
474
enum
{
475
DLM_RECO_NODE_DATA_DEAD
= -1,
476
DLM_RECO_NODE_DATA_INIT
= 0,
477
DLM_RECO_NODE_DATA_REQUESTING
= 1,
478
DLM_RECO_NODE_DATA_REQUESTED
= 2,
479
DLM_RECO_NODE_DATA_RECEIVING
= 3,
480
DLM_RECO_NODE_DATA_DONE
= 4,
481
DLM_RECO_NODE_DATA_FINALIZE_SENT
= 5,
482
};
483
484
485
enum
{
486
DLM_MASTER_RESP_NO
= 0,
487
DLM_MASTER_RESP_YES
= 1,
488
DLM_MASTER_RESP_MAYBE
= 2,
489
DLM_MASTER_RESP_ERROR
= 3,
490
};
491
492
493
struct
dlm_master_request
494
{
495
u8
node_idx
;
496
u8
namelen
;
497
__be16
pad1
;
498
__be32
flags
;
499
500
u8
name
[
O2NM_MAX_NAME_LEN
];
501
};
502
503
#define DLM_ASSERT_RESPONSE_REASSERT 0x00000001
504
#define DLM_ASSERT_RESPONSE_MASTERY_REF 0x00000002
505
506
#define DLM_ASSERT_MASTER_MLE_CLEANUP 0x00000001
507
#define DLM_ASSERT_MASTER_REQUERY 0x00000002
508
#define DLM_ASSERT_MASTER_FINISH_MIGRATION 0x00000004
509
struct
dlm_assert_master
510
{
511
u8
node_idx
;
512
u8
namelen
;
513
__be16
pad1
;
514
__be32
flags
;
515
516
u8
name
[
O2NM_MAX_NAME_LEN
];
517
};
518
519
#define DLM_MIGRATE_RESPONSE_MASTERY_REF 0x00000001
520
521
struct
dlm_migrate_request
522
{
523
u8
master
;
524
u8
new_master
;
525
u8
namelen
;
526
u8
pad1
;
527
__be32
pad2
;
528
u8
name
[
O2NM_MAX_NAME_LEN
];
529
};
530
531
struct
dlm_master_requery
532
{
533
u8
pad1
;
534
u8
pad2
;
535
u8
node_idx
;
536
u8
namelen
;
537
__be32
pad3
;
538
u8
name
[
O2NM_MAX_NAME_LEN
];
539
};
540
541
#define DLM_MRES_RECOVERY 0x01
542
#define DLM_MRES_MIGRATION 0x02
543
#define DLM_MRES_ALL_DONE 0x04
544
545
/*
546
* We would like to get one whole lockres into a single network
547
* message whenever possible. Generally speaking, there will be
548
* at most one dlm_lock on a lockres for each node in the cluster,
549
* plus (infrequently) any additional locks coming in from userdlm.
550
*
551
* struct _dlm_lockres_page
552
* {
553
* dlm_migratable_lockres mres;
554
* dlm_migratable_lock ml[DLM_MAX_MIGRATABLE_LOCKS];
555
* u8 pad[DLM_MIG_LOCKRES_RESERVED];
556
* };
557
*
558
* from ../cluster/tcp.h
559
* NET_MAX_PAYLOAD_BYTES (4096 - sizeof(net_msg))
560
* (roughly 4080 bytes)
561
* and sizeof(dlm_migratable_lockres) = 112 bytes
562
* and sizeof(dlm_migratable_lock) = 16 bytes
563
*
564
* Choosing DLM_MAX_MIGRATABLE_LOCKS=240 and
565
* DLM_MIG_LOCKRES_RESERVED=128 means we have this:
566
*
567
* (DLM_MAX_MIGRATABLE_LOCKS * sizeof(dlm_migratable_lock)) +
568
* sizeof(dlm_migratable_lockres) + DLM_MIG_LOCKRES_RESERVED =
569
* NET_MAX_PAYLOAD_BYTES
570
* (240 * 16) + 112 + 128 = 4080
571
*
572
* So a lockres would need more than 240 locks before it would
573
* use more than one network packet to recover. Not too bad.
574
*/
575
#define DLM_MAX_MIGRATABLE_LOCKS 240
576
577
struct
dlm_migratable_lockres
578
{
579
u8
master
;
580
u8
lockname_len
;
581
u8
num_locks
;
// locks sent in this structure
582
u8
flags
;
583
__be32
total_locks
;
// locks to be sent for this migration cookie
584
__be64
mig_cookie
;
// cookie for this lockres migration
585
// or zero if not needed
586
// 16 bytes
587
u8
lockname
[
DLM_LOCKID_NAME_MAX
];
588
// 48 bytes
589
u8
lvb
[
DLM_LVB_LEN
];
590
// 112 bytes
591
struct
dlm_migratable_lock
ml
[0];
// 16 bytes each, begins at byte 112
592
};
593
#define DLM_MIG_LOCKRES_MAX_LEN \
594
(sizeof(struct dlm_migratable_lockres) + \
595
(sizeof(struct dlm_migratable_lock) * \
596
DLM_MAX_MIGRATABLE_LOCKS) )
597
598
/* from above, 128 bytes
599
* for some undetermined future use */
600
#define DLM_MIG_LOCKRES_RESERVED (NET_MAX_PAYLOAD_BYTES - \
601
DLM_MIG_LOCKRES_MAX_LEN)
602
603
struct
dlm_create_lock
604
{
605
__be64
cookie
;
606
607
__be32
flags
;
608
u8
pad1
;
609
u8
node_idx
;
610
s8
requested_type
;
611
u8
namelen
;
612
613
u8
name
[
O2NM_MAX_NAME_LEN
];
614
};
615
616
struct
dlm_convert_lock
617
{
618
__be64
cookie
;
619
620
__be32
flags
;
621
u8
pad1
;
622
u8
node_idx
;
623
s8
requested_type
;
624
u8
namelen
;
625
626
u8
name
[
O2NM_MAX_NAME_LEN
];
627
628
s8
lvb
[0];
629
};
630
#define DLM_CONVERT_LOCK_MAX_LEN (sizeof(struct dlm_convert_lock)+DLM_LVB_LEN)
631
632
struct
dlm_unlock_lock
633
{
634
__be64
cookie
;
635
636
__be32
flags
;
637
__be16
pad1
;
638
u8
node_idx
;
639
u8
namelen
;
640
641
u8
name
[
O2NM_MAX_NAME_LEN
];
642
643
s8
lvb
[0];
644
};
645
#define DLM_UNLOCK_LOCK_MAX_LEN (sizeof(struct dlm_unlock_lock)+DLM_LVB_LEN)
646
647
struct
dlm_proxy_ast
648
{
649
__be64
cookie
;
650
651
__be32
flags
;
652
u8
node_idx
;
653
u8
type
;
654
u8
blocked_type
;
655
u8
namelen
;
656
657
u8
name
[
O2NM_MAX_NAME_LEN
];
658
659
s8
lvb
[0];
660
};
661
#define DLM_PROXY_AST_MAX_LEN (sizeof(struct dlm_proxy_ast)+DLM_LVB_LEN)
662
663
#define DLM_MOD_KEY (0x666c6172)
664
enum
dlm_query_join_response_code
{
665
JOIN_DISALLOW
= 0,
666
JOIN_OK
= 1,
667
JOIN_OK_NO_MAP
= 2,
668
JOIN_PROTOCOL_MISMATCH
= 3,
669
};
670
671
struct
dlm_query_join_packet
{
672
u8
code
;
/* Response code. dlm_minor and fs_minor
673
are only valid if this is JOIN_OK */
674
u8
dlm_minor
;
/* The minor version of the protocol the
675
dlm is speaking. */
676
u8
fs_minor
;
/* The minor version of the protocol the
677
filesystem is speaking. */
678
u8
reserved
;
679
};
680
681
union
dlm_query_join_response
{
682
__be32
intval
;
683
struct
dlm_query_join_packet
packet
;
684
};
685
686
struct
dlm_lock_request
687
{
688
u8
node_idx
;
689
u8
dead_node
;
690
__be16
pad1
;
691
__be32
pad2
;
692
};
693
694
struct
dlm_reco_data_done
695
{
696
u8
node_idx
;
697
u8
dead_node
;
698
__be16
pad1
;
699
__be32
pad2
;
700
701
/* unused for now */
702
/* eventually we can use this to attempt
703
* lvb recovery based on each node's info */
704
u8
reco_lvb
[
DLM_LVB_LEN
];
705
};
706
707
struct
dlm_begin_reco
708
{
709
u8
node_idx
;
710
u8
dead_node
;
711
__be16
pad1
;
712
__be32
pad2
;
713
};
714
715
716
#define BITS_PER_BYTE 8
717
#define BITS_TO_BYTES(bits) (((bits)+BITS_PER_BYTE-1)/BITS_PER_BYTE)
718
719
struct
dlm_query_join_request
720
{
721
u8
node_idx
;
722
u8
pad1
[2];
723
u8
name_len
;
724
struct
dlm_protocol_version
dlm_proto
;
725
struct
dlm_protocol_version
fs_proto
;
726
u8
domain
[
O2NM_MAX_NAME_LEN
];
727
u8
node_map
[
BITS_TO_BYTES
(
O2NM_MAX_NODES
)];
728
};
729
730
struct
dlm_assert_joined
731
{
732
u8
node_idx
;
733
u8
pad1
[2];
734
u8
name_len
;
735
u8
domain
[
O2NM_MAX_NAME_LEN
];
736
};
737
738
struct
dlm_cancel_join
739
{
740
u8
node_idx
;
741
u8
pad1
[2];
742
u8
name_len
;
743
u8
domain
[
O2NM_MAX_NAME_LEN
];
744
};
745
746
struct
dlm_query_region
{
747
u8
qr_node
;
748
u8
qr_numregions
;
749
u8
qr_namelen
;
750
u8
pad1
;
751
u8
qr_domain
[
O2NM_MAX_NAME_LEN
];
752
u8
qr_regions
[
O2HB_MAX_REGION_NAME_LEN
*
O2NM_MAX_REGIONS
];
753
};
754
755
struct
dlm_node_info
{
756
u8
ni_nodenum
;
757
u8
pad1
;
758
__be16
ni_ipv4_port
;
759
__be32
ni_ipv4_address
;
760
};
761
762
struct
dlm_query_nodeinfo
{
763
u8
qn_nodenum
;
764
u8
qn_numnodes
;
765
u8
qn_namelen
;
766
u8
pad1
;
767
u8
qn_domain
[
O2NM_MAX_NAME_LEN
];
768
struct
dlm_node_info
qn_nodes
[
O2NM_MAX_NODES
];
769
};
770
771
struct
dlm_exit_domain
772
{
773
u8
node_idx
;
774
u8
pad1
[3];
775
};
776
777
struct
dlm_finalize_reco
778
{
779
u8
node_idx
;
780
u8
dead_node
;
781
u8
flags
;
782
u8
pad1
;
783
__be32
pad2
;
784
};
785
786
struct
dlm_deref_lockres
787
{
788
u32
pad1
;
789
u16
pad2
;
790
u8
node_idx
;
791
u8
namelen
;
792
793
u8
name
[
O2NM_MAX_NAME_LEN
];
794
};
795
796
static
inline
enum
dlm_status
797
__dlm_lockres_state_to_status(
struct
dlm_lock_resource
*res)
798
{
799
enum
dlm_status
status
=
DLM_NORMAL
;
800
801
assert_spin_locked
(&res->
spinlock
);
802
803
if
(res->
state
&
DLM_LOCK_RES_RECOVERING
)
804
status =
DLM_RECOVERING
;
805
else
if
(res->
state
&
DLM_LOCK_RES_MIGRATING
)
806
status =
DLM_MIGRATING
;
807
else
if
(res->
state
&
DLM_LOCK_RES_IN_PROGRESS
)
808
status =
DLM_FORWARD
;
809
810
return
status
;
811
}
812
813
static
inline
u8
dlm_get_lock_cookie_node(
u64
cookie
)
814
{
815
u8
ret
;
816
cookie >>= 56;
817
ret = (
u8
)(cookie & 0xffULL);
818
return
ret
;
819
}
820
821
static
inline
unsigned
long
long
dlm_get_lock_cookie_seq(
u64
cookie)
822
{
823
unsigned
long
long
ret
;
824
ret = ((
unsigned
long
long
)cookie) & 0x00ffffffffffffffULL;
825
return
ret
;
826
}
827
828
struct
dlm_lock
*
dlm_new_lock
(
int
type
,
u8
node
,
u64
cookie,
829
struct
dlm_lockstatus
*
lksb
);
830
void
dlm_lock_get
(
struct
dlm_lock
*lock);
831
void
dlm_lock_put
(
struct
dlm_lock
*lock);
832
833
void
dlm_lock_attach_lockres
(
struct
dlm_lock
*lock,
834
struct
dlm_lock_resource
*res);
835
836
int
dlm_create_lock_handler
(
struct
o2net_msg
*
msg
,
u32
len,
void
*data,
837
void
**ret_data);
838
int
dlm_convert_lock_handler
(
struct
o2net_msg
*
msg
,
u32
len,
void
*data,
839
void
**ret_data);
840
int
dlm_proxy_ast_handler
(
struct
o2net_msg
*
msg
,
u32
len,
void
*data,
841
void
**ret_data);
842
843
void
dlm_revert_pending_convert
(
struct
dlm_lock_resource
*res,
844
struct
dlm_lock
*lock);
845
void
dlm_revert_pending_lock
(
struct
dlm_lock_resource
*res,
846
struct
dlm_lock
*lock);
847
848
int
dlm_unlock_lock_handler
(
struct
o2net_msg
*
msg
,
u32
len,
void
*data,
849
void
**ret_data);
850
void
dlm_commit_pending_cancel
(
struct
dlm_lock_resource
*res,
851
struct
dlm_lock
*lock);
852
void
dlm_commit_pending_unlock
(
struct
dlm_lock_resource
*res,
853
struct
dlm_lock
*lock);
854
855
int
dlm_launch_thread
(
struct
dlm_ctxt
*dlm);
856
void
dlm_complete_thread
(
struct
dlm_ctxt
*dlm);
857
int
dlm_launch_recovery_thread
(
struct
dlm_ctxt
*dlm);
858
void
dlm_complete_recovery_thread
(
struct
dlm_ctxt
*dlm);
859
void
dlm_wait_for_recovery
(
struct
dlm_ctxt
*dlm);
860
void
dlm_kick_recovery_thread
(
struct
dlm_ctxt
*dlm);
861
int
dlm_is_node_dead
(
struct
dlm_ctxt
*dlm,
u8
node
);
862
void
dlm_wait_for_node_death
(
struct
dlm_ctxt
*dlm,
u8
node
,
int
timeout);
863
void
dlm_wait_for_node_recovery
(
struct
dlm_ctxt
*dlm,
u8
node
,
int
timeout);
864
865
void
dlm_put
(
struct
dlm_ctxt
*dlm);
866
struct
dlm_ctxt
*
dlm_grab
(
struct
dlm_ctxt
*dlm);
867
int
dlm_domain_fully_joined
(
struct
dlm_ctxt
*dlm);
868
869
void
__dlm_lockres_calc_usage
(
struct
dlm_ctxt
*dlm,
870
struct
dlm_lock_resource
*res);
871
void
dlm_lockres_calc_usage
(
struct
dlm_ctxt
*dlm,
872
struct
dlm_lock_resource
*res);
873
static
inline
void
dlm_lockres_get(
struct
dlm_lock_resource
*res)
874
{
875
/* This is called on every lookup, so it might be worth
876
* inlining. */
877
kref_get(&res->
refs
);
878
}
879
void
dlm_lockres_put
(
struct
dlm_lock_resource
*res);
880
void
__dlm_unhash_lockres
(
struct
dlm_ctxt
*dlm,
struct
dlm_lock_resource
*res);
881
void
__dlm_insert_lockres
(
struct
dlm_ctxt
*dlm,
struct
dlm_lock_resource
*res);
882
struct
dlm_lock_resource
*
__dlm_lookup_lockres_full
(
struct
dlm_ctxt
*dlm,
883
const
char
*
name
,
884
unsigned
int
len,
885
unsigned
int
hash
);
886
struct
dlm_lock_resource
*
__dlm_lookup_lockres
(
struct
dlm_ctxt
*dlm,
887
const
char
*
name
,
888
unsigned
int
len,
889
unsigned
int
hash
);
890
struct
dlm_lock_resource
*
dlm_lookup_lockres
(
struct
dlm_ctxt
*dlm,
891
const
char
*
name
,
892
unsigned
int
len);
893
894
int
dlm_is_host_down
(
int
errno);
895
896
struct
dlm_lock_resource
*
dlm_get_lock_resource
(
struct
dlm_ctxt
*dlm,
897
const
char
*lockid,
898
int
namelen
,
899
int
flags
);
900
struct
dlm_lock_resource
*
dlm_new_lockres
(
struct
dlm_ctxt
*dlm,
901
const
char
*
name
,
902
unsigned
int
namelen
);
903
904
void
dlm_lockres_set_refmap_bit
(
struct
dlm_ctxt
*dlm,
905
struct
dlm_lock_resource
*res,
int
bit
);
906
void
dlm_lockres_clear_refmap_bit
(
struct
dlm_ctxt
*dlm,
907
struct
dlm_lock_resource
*res,
int
bit
);
908
909
void
dlm_lockres_drop_inflight_ref
(
struct
dlm_ctxt
*dlm,
910
struct
dlm_lock_resource
*res);
911
void
dlm_lockres_grab_inflight_ref
(
struct
dlm_ctxt
*dlm,
912
struct
dlm_lock_resource
*res);
913
914
void
dlm_queue_ast
(
struct
dlm_ctxt
*dlm,
struct
dlm_lock
*lock);
915
void
dlm_queue_bast
(
struct
dlm_ctxt
*dlm,
struct
dlm_lock
*lock);
916
void
__dlm_queue_ast
(
struct
dlm_ctxt
*dlm,
struct
dlm_lock
*lock);
917
void
__dlm_queue_bast
(
struct
dlm_ctxt
*dlm,
struct
dlm_lock
*lock);
918
void
dlm_do_local_ast
(
struct
dlm_ctxt
*dlm,
919
struct
dlm_lock_resource
*res,
920
struct
dlm_lock
*lock);
921
int
dlm_do_remote_ast
(
struct
dlm_ctxt
*dlm,
922
struct
dlm_lock_resource
*res,
923
struct
dlm_lock
*lock);
924
void
dlm_do_local_bast
(
struct
dlm_ctxt
*dlm,
925
struct
dlm_lock_resource
*res,
926
struct
dlm_lock
*lock,
927
int
blocked_type);
928
int
dlm_send_proxy_ast_msg
(
struct
dlm_ctxt
*dlm,
929
struct
dlm_lock_resource
*res,
930
struct
dlm_lock
*lock,
931
int
msg_type
,
932
int
blocked_type,
int
flags
);
933
static
inline
int
dlm_send_proxy_bast(
struct
dlm_ctxt
*dlm,
934
struct
dlm_lock_resource
*res,
935
struct
dlm_lock
*lock,
936
int
blocked_type)
937
{
938
return
dlm_send_proxy_ast_msg
(dlm, res, lock,
DLM_BAST
,
939
blocked_type, 0);
940
}
941
942
static
inline
int
dlm_send_proxy_ast(
struct
dlm_ctxt
*dlm,
943
struct
dlm_lock_resource
*res,
944
struct
dlm_lock
*lock,
945
int
flags
)
946
{
947
return
dlm_send_proxy_ast_msg
(dlm, res, lock,
DLM_AST
,
948
0, flags);
949
}
950
951
void
dlm_print_one_lock_resource
(
struct
dlm_lock_resource
*res);
952
void
__dlm_print_one_lock_resource
(
struct
dlm_lock_resource
*res);
953
954
u8
dlm_nm_this_node
(
struct
dlm_ctxt
*dlm);
955
void
dlm_kick_thread
(
struct
dlm_ctxt
*dlm,
struct
dlm_lock_resource
*res);
956
void
__dlm_dirty_lockres
(
struct
dlm_ctxt
*dlm,
struct
dlm_lock_resource
*res);
957
958
959
int
dlm_nm_init
(
struct
dlm_ctxt
*dlm);
960
int
dlm_heartbeat_init
(
struct
dlm_ctxt
*dlm);
961
void
dlm_hb_node_down_cb
(
struct
o2nm_node
*
node
,
int
idx,
void
*data);
962
void
dlm_hb_node_up_cb
(
struct
o2nm_node
*
node
,
int
idx,
void
*data);
963
964
int
dlm_empty_lockres
(
struct
dlm_ctxt
*dlm,
struct
dlm_lock_resource
*res);
965
int
dlm_finish_migration
(
struct
dlm_ctxt
*dlm,
966
struct
dlm_lock_resource
*res,
967
u8
old_master);
968
void
dlm_lockres_release_ast
(
struct
dlm_ctxt
*dlm,
969
struct
dlm_lock_resource
*res);
970
void
__dlm_lockres_reserve_ast
(
struct
dlm_lock_resource
*res);
971
972
int
dlm_master_request_handler
(
struct
o2net_msg
*
msg
,
u32
len,
void
*data,
973
void
**ret_data);
974
int
dlm_assert_master_handler
(
struct
o2net_msg
*
msg
,
u32
len,
void
*data,
975
void
**ret_data);
976
void
dlm_assert_master_post_handler
(
int
status
,
void
*data,
void
*ret_data);
977
int
dlm_deref_lockres_handler
(
struct
o2net_msg
*
msg
,
u32
len,
void
*data,
978
void
**ret_data);
979
int
dlm_migrate_request_handler
(
struct
o2net_msg
*
msg
,
u32
len,
void
*data,
980
void
**ret_data);
981
int
dlm_mig_lockres_handler
(
struct
o2net_msg
*
msg
,
u32
len,
void
*data,
982
void
**ret_data);
983
int
dlm_master_requery_handler
(
struct
o2net_msg
*
msg
,
u32
len,
void
*data,
984
void
**ret_data);
985
int
dlm_request_all_locks_handler
(
struct
o2net_msg
*
msg
,
u32
len,
void
*data,
986
void
**ret_data);
987
int
dlm_reco_data_done_handler
(
struct
o2net_msg
*
msg
,
u32
len,
void
*data,
988
void
**ret_data);
989
int
dlm_begin_reco_handler
(
struct
o2net_msg
*
msg
,
u32
len,
void
*data,
990
void
**ret_data);
991
int
dlm_finalize_reco_handler
(
struct
o2net_msg
*
msg
,
u32
len,
void
*data,
992
void
**ret_data);
993
int
dlm_do_master_requery
(
struct
dlm_ctxt
*dlm,
struct
dlm_lock_resource
*res,
994
u8
nodenum,
u8
*real_master);
995
996
997
int
dlm_dispatch_assert_master
(
struct
dlm_ctxt
*dlm,
998
struct
dlm_lock_resource
*res,
999
int
ignore_higher,
1000
u8
request_from,
1001
u32
flags
);
1002
1003
1004
int
dlm_send_one_lockres
(
struct
dlm_ctxt
*dlm,
1005
struct
dlm_lock_resource
*res,
1006
struct
dlm_migratable_lockres
*mres,
1007
u8
send_to,
1008
u8
flags
);
1009
void
dlm_move_lockres_to_recovery_list
(
struct
dlm_ctxt
*dlm,
1010
struct
dlm_lock_resource
*res);
1011
1012
/* will exit holding res->spinlock, but may drop in function */
1013
void
__dlm_wait_on_lockres_flags
(
struct
dlm_lock_resource
*res,
int
flags
);
1014
void
__dlm_wait_on_lockres_flags_set
(
struct
dlm_lock_resource
*res,
int
flags
);
1015
1016
/* will exit holding res->spinlock, but may drop in function */
1017
static
inline
void
__dlm_wait_on_lockres(
struct
dlm_lock_resource
*res)
1018
{
1019
__dlm_wait_on_lockres_flags
(res, (
DLM_LOCK_RES_IN_PROGRESS
|
1020
DLM_LOCK_RES_RECOVERING
|
1021
DLM_LOCK_RES_MIGRATING
));
1022
}
1023
1024
void
__dlm_unlink_mle
(
struct
dlm_ctxt
*dlm,
struct
dlm_master_list_entry
*mle);
1025
void
__dlm_insert_mle
(
struct
dlm_ctxt
*dlm,
struct
dlm_master_list_entry
*mle);
1026
1027
/* create/destroy slab caches */
1028
int
dlm_init_master_caches
(
void
);
1029
void
dlm_destroy_master_caches
(
void
);
1030
1031
int
dlm_init_lock_cache
(
void
);
1032
void
dlm_destroy_lock_cache
(
void
);
1033
1034
int
dlm_init_mle_cache
(
void
);
1035
void
dlm_destroy_mle_cache
(
void
);
1036
1037
void
dlm_hb_event_notify_attached
(
struct
dlm_ctxt
*dlm,
int
idx,
int
node_up);
1038
int
dlm_drop_lockres_ref
(
struct
dlm_ctxt
*dlm,
1039
struct
dlm_lock_resource
*res);
1040
void
dlm_clean_master_list
(
struct
dlm_ctxt
*dlm,
1041
u8
dead_node);
1042
void
dlm_force_free_mles
(
struct
dlm_ctxt
*dlm);
1043
int
dlm_lock_basts_flushed
(
struct
dlm_ctxt
*dlm,
struct
dlm_lock
*lock);
1044
int
__dlm_lockres_has_locks
(
struct
dlm_lock_resource
*res);
1045
int
__dlm_lockres_unused
(
struct
dlm_lock_resource
*res);
1046
1047
static
inline
const
char
* dlm_lock_mode_name(
int
mode
)
1048
{
1049
switch
(mode) {
1050
case
LKM_EXMODE
:
1051
return
"EX"
;
1052
case
LKM_PRMODE
:
1053
return
"PR"
;
1054
case
LKM_NLMODE
:
1055
return
"NL"
;
1056
}
1057
return
"UNKNOWN"
;
1058
}
1059
1060
1061
static
inline
int
dlm_lock_compatible(
int
existing,
int
request
)
1062
{
1063
/* NO_LOCK compatible with all */
1064
if
(request ==
LKM_NLMODE
||
1065
existing ==
LKM_NLMODE
)
1066
return
1;
1067
1068
/* EX incompatible with all non-NO_LOCK */
1069
if
(request ==
LKM_EXMODE
)
1070
return
0;
1071
1072
/* request must be PR, which is compatible with PR */
1073
if
(existing ==
LKM_PRMODE
)
1074
return
1;
1075
1076
return
0;
1077
}
1078
1079
static
inline
int
dlm_lock_on_list(
struct
list_head
*
head
,
1080
struct
dlm_lock
*lock)
1081
{
1082
struct
list_head
*iter;
1083
struct
dlm_lock
*tmplock;
1084
1085
list_for_each
(iter, head) {
1086
tmplock =
list_entry
(iter,
struct
dlm_lock
,
list
);
1087
if
(tmplock == lock)
1088
return
1;
1089
}
1090
return
0;
1091
}
1092
1093
1094
static
inline
enum
dlm_status
dlm_err_to_dlm_status(
int
err
)
1095
{
1096
enum
dlm_status
ret
;
1097
if
(err == -
ENOMEM
)
1098
ret =
DLM_SYSERR
;
1099
else
if
(err == -
ETIMEDOUT
|| o2net_link_down(err,
NULL
))
1100
ret =
DLM_NOLOCKMGR
;
1101
else
if
(err == -
EINVAL
)
1102
ret =
DLM_BADPARAM
;
1103
else
if
(err == -
ENAMETOOLONG
)
1104
ret =
DLM_IVBUFLEN
;
1105
else
1106
ret =
DLM_BADARGS
;
1107
return
ret
;
1108
}
1109
1110
1111
static
inline
void
dlm_node_iter_init(
unsigned
long
*
map
,
1112
struct
dlm_node_iter
*iter)
1113
{
1114
memcpy
(iter->
node_map
, map,
sizeof
(iter->
node_map
));
1115
iter->
curnode
= -1;
1116
}
1117
1118
static
inline
int
dlm_node_iter_next(
struct
dlm_node_iter
*iter)
1119
{
1120
int
bit
;
1121
bit =
find_next_bit
(iter->
node_map
,
O2NM_MAX_NODES
, iter->
curnode
+1);
1122
if
(bit >=
O2NM_MAX_NODES
) {
1123
iter->
curnode
=
O2NM_MAX_NODES
;
1124
return
-
ENOENT
;
1125
}
1126
iter->
curnode
=
bit
;
1127
return
bit
;
1128
}
1129
1130
static
inline
void
dlm_set_lockres_owner(
struct
dlm_ctxt
*dlm,
1131
struct
dlm_lock_resource
*res,
1132
u8
owner
)
1133
{
1134
assert_spin_locked
(&res->
spinlock
);
1135
1136
res->
owner
=
owner
;
1137
}
1138
1139
static
inline
void
dlm_change_lockres_owner(
struct
dlm_ctxt
*dlm,
1140
struct
dlm_lock_resource
*res,
1141
u8
owner
)
1142
{
1143
assert_spin_locked
(&res->
spinlock
);
1144
1145
if
(owner != res->
owner
)
1146
dlm_set_lockres_owner(dlm, res, owner);
1147
}
1148
1149
#endif
/* DLMCOMMON_H */
Generated on Thu Jan 10 2013 14:49:22 for Linux Kernel by
1.8.2