Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
dlmrecovery.c
Go to the documentation of this file.
1 /* -*- mode: c; c-basic-offset: 8; -*-
2  * vim: noexpandtab sw=8 ts=8 sts=0:
3  *
4  * dlmrecovery.c
5  *
6  * recovery stuff
7  *
8  * Copyright (C) 2004 Oracle. All rights reserved.
9  *
10  * This program is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU General Public
12  * License as published by the Free Software Foundation; either
13  * version 2 of the License, or (at your option) any later version.
14  *
15  * This program is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18  * General Public License for more details.
19  *
20  * You should have received a copy of the GNU General Public
21  * License along with this program; if not, write to the
22  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
23  * Boston, MA 021110-1307, USA.
24  *
25  */
26 
27 
28 #include <linux/module.h>
29 #include <linux/fs.h>
30 #include <linux/types.h>
31 #include <linux/slab.h>
32 #include <linux/highmem.h>
33 #include <linux/init.h>
34 #include <linux/sysctl.h>
35 #include <linux/random.h>
36 #include <linux/blkdev.h>
37 #include <linux/socket.h>
38 #include <linux/inet.h>
39 #include <linux/timer.h>
40 #include <linux/kthread.h>
41 #include <linux/delay.h>
42 
43 
44 #include "cluster/heartbeat.h"
45 #include "cluster/nodemanager.h"
46 #include "cluster/tcp.h"
47 
48 #include "dlmapi.h"
49 #include "dlmcommon.h"
50 #include "dlmdomain.h"
51 
52 #define MLOG_MASK_PREFIX (ML_DLM|ML_DLM_RECOVERY)
53 #include "cluster/masklog.h"
54 
55 static void dlm_do_local_recovery_cleanup(struct dlm_ctxt *dlm, u8 dead_node);
56 
57 static int dlm_recovery_thread(void *data);
61 static int dlm_do_recovery(struct dlm_ctxt *dlm);
62 
63 static int dlm_pick_recovery_master(struct dlm_ctxt *dlm);
64 static int dlm_remaster_locks(struct dlm_ctxt *dlm, u8 dead_node);
65 static int dlm_init_recovery_area(struct dlm_ctxt *dlm, u8 dead_node);
66 static int dlm_request_all_locks(struct dlm_ctxt *dlm,
67  u8 request_from, u8 dead_node);
68 static void dlm_destroy_recovery_area(struct dlm_ctxt *dlm, u8 dead_node);
69 
70 static inline int dlm_num_locks_in_lockres(struct dlm_lock_resource *res);
71 static void dlm_init_migratable_lockres(struct dlm_migratable_lockres *mres,
72  const char *lockname, int namelen,
73  int total_locks, u64 cookie,
74  u8 flags, u8 master);
75 static int dlm_send_mig_lockres_msg(struct dlm_ctxt *dlm,
76  struct dlm_migratable_lockres *mres,
77  u8 send_to,
78  struct dlm_lock_resource *res,
79  int total_locks);
80 static int dlm_process_recovery_data(struct dlm_ctxt *dlm,
81  struct dlm_lock_resource *res,
82  struct dlm_migratable_lockres *mres);
83 static int dlm_send_finalize_reco_message(struct dlm_ctxt *dlm);
84 static int dlm_send_all_done_msg(struct dlm_ctxt *dlm,
85  u8 dead_node, u8 send_to);
86 static int dlm_send_begin_reco_message(struct dlm_ctxt *dlm, u8 dead_node);
87 static void dlm_move_reco_locks_to_list(struct dlm_ctxt *dlm,
88  struct list_head *list, u8 dead_node);
89 static void dlm_finish_local_lockres_recovery(struct dlm_ctxt *dlm,
90  u8 dead_node, u8 new_master);
91 static void dlm_reco_ast(void *astdata);
92 static void dlm_reco_bast(void *astdata, int blocked_type);
93 static void dlm_reco_unlock_ast(void *astdata, enum dlm_status st);
94 static void dlm_request_all_locks_worker(struct dlm_work_item *item,
95  void *data);
96 static void dlm_mig_lockres_worker(struct dlm_work_item *item, void *data);
97 static int dlm_lockres_master_requery(struct dlm_ctxt *dlm,
98  struct dlm_lock_resource *res,
99  u8 *real_master);
100 
101 static u64 dlm_get_next_mig_cookie(void);
102 
103 static DEFINE_SPINLOCK(dlm_reco_state_lock);
104 static DEFINE_SPINLOCK(dlm_mig_cookie_lock);
105 static u64 dlm_mig_cookie = 1;
106 
107 static u64 dlm_get_next_mig_cookie(void)
108 {
109  u64 c;
110  spin_lock(&dlm_mig_cookie_lock);
111  c = dlm_mig_cookie;
112  if (dlm_mig_cookie == (~0ULL))
113  dlm_mig_cookie = 1;
114  else
115  dlm_mig_cookie++;
116  spin_unlock(&dlm_mig_cookie_lock);
117  return c;
118 }
119 
120 static inline void dlm_set_reco_dead_node(struct dlm_ctxt *dlm,
121  u8 dead_node)
122 {
124  if (dlm->reco.dead_node != dead_node)
125  mlog(0, "%s: changing dead_node from %u to %u\n",
126  dlm->name, dlm->reco.dead_node, dead_node);
127  dlm->reco.dead_node = dead_node;
128 }
129 
130 static inline void dlm_set_reco_master(struct dlm_ctxt *dlm,
131  u8 master)
132 {
134  mlog(0, "%s: changing new_master from %u to %u\n",
135  dlm->name, dlm->reco.new_master, master);
136  dlm->reco.new_master = master;
137 }
138 
139 static inline void __dlm_reset_recovery(struct dlm_ctxt *dlm)
140 {
142  clear_bit(dlm->reco.dead_node, dlm->recovery_map);
143  dlm_set_reco_dead_node(dlm, O2NM_INVALID_NODE_NUM);
144  dlm_set_reco_master(dlm, O2NM_INVALID_NODE_NUM);
145 }
146 
147 static inline void dlm_reset_recovery(struct dlm_ctxt *dlm)
148 {
149  spin_lock(&dlm->spinlock);
150  __dlm_reset_recovery(dlm);
151  spin_unlock(&dlm->spinlock);
152 }
153 
154 /* Worker function used during recovery. */
156 {
157  struct dlm_ctxt *dlm =
158  container_of(work, struct dlm_ctxt, dispatched_work);
159  LIST_HEAD(tmp_list);
160  struct dlm_work_item *item, *next;
161  dlm_workfunc_t *workfunc;
162  int tot=0;
163 
164  spin_lock(&dlm->work_lock);
165  list_splice_init(&dlm->work_list, &tmp_list);
166  spin_unlock(&dlm->work_lock);
167 
168  list_for_each_entry(item, &tmp_list, list) {
169  tot++;
170  }
171  mlog(0, "%s: work thread has %d work items\n", dlm->name, tot);
172 
173  list_for_each_entry_safe(item, next, &tmp_list, list) {
174  workfunc = item->func;
175  list_del_init(&item->list);
176 
177  /* already have ref on dlm to avoid having
178  * it disappear. just double-check. */
179  BUG_ON(item->dlm != dlm);
180 
181  /* this is allowed to sleep and
182  * call network stuff */
183  workfunc(item, item->data);
184 
185  dlm_put(dlm);
186  kfree(item);
187  }
188 }
189 
190 /*
191  * RECOVERY THREAD
192  */
193 
195 {
196  /* wake the recovery thread
197  * this will wake the reco thread in one of three places
198  * 1) sleeping with no recovery happening
199  * 2) sleeping with recovery mastered elsewhere
200  * 3) recovery mastered here, waiting on reco data */
201 
203 }
204 
205 /* Launch the recovery thread */
207 {
208  mlog(0, "starting dlm recovery thread...\n");
209 
210  dlm->dlm_reco_thread_task = kthread_run(dlm_recovery_thread, dlm,
211  "dlm_reco_thread");
212  if (IS_ERR(dlm->dlm_reco_thread_task)) {
213  mlog_errno(PTR_ERR(dlm->dlm_reco_thread_task));
214  dlm->dlm_reco_thread_task = NULL;
215  return -EINVAL;
216  }
217 
218  return 0;
219 }
220 
222 {
223  if (dlm->dlm_reco_thread_task) {
224  mlog(0, "waiting for dlm recovery thread to exit\n");
226  dlm->dlm_reco_thread_task = NULL;
227  }
228 }
229 
230 
231 
232 /*
233  * this is lame, but here's how recovery works...
234  * 1) all recovery threads cluster wide will work on recovering
235  * ONE node at a time
236  * 2) negotiate who will take over all the locks for the dead node.
237  * thats right... ALL the locks.
238  * 3) once a new master is chosen, everyone scans all locks
239  * and moves aside those mastered by the dead guy
240  * 4) each of these locks should be locked until recovery is done
241  * 5) the new master collects up all of secondary lock queue info
242  * one lock at a time, forcing each node to communicate back
243  * before continuing
244  * 6) each secondary lock queue responds with the full known lock info
245  * 7) once the new master has run all its locks, it sends a ALLDONE!
246  * message to everyone
247  * 8) upon receiving this message, the secondary queue node unlocks
248  * and responds to the ALLDONE
249  * 9) once the new master gets responses from everyone, he unlocks
250  * everything and recovery for this dead node is done
251  *10) go back to 2) while there are still dead nodes
252  *
253  */
254 
255 static void dlm_print_reco_node_status(struct dlm_ctxt *dlm)
256 {
257  struct dlm_reco_node_data *ndata;
258  struct dlm_lock_resource *res;
259 
260  mlog(ML_NOTICE, "%s(%d): recovery info, state=%s, dead=%u, master=%u\n",
261  dlm->name, task_pid_nr(dlm->dlm_reco_thread_task),
262  dlm->reco.state & DLM_RECO_STATE_ACTIVE ? "ACTIVE" : "inactive",
263  dlm->reco.dead_node, dlm->reco.new_master);
264 
265  list_for_each_entry(ndata, &dlm->reco.node_data, list) {
266  char *st = "unknown";
267  switch (ndata->state) {
269  st = "init";
270  break;
272  st = "requesting";
273  break;
275  st = "dead";
276  break;
278  st = "receiving";
279  break;
281  st = "requested";
282  break;
284  st = "done";
285  break;
287  st = "finalize-sent";
288  break;
289  default:
290  st = "bad";
291  break;
292  }
293  mlog(ML_NOTICE, "%s: reco state, node %u, state=%s\n",
294  dlm->name, ndata->node_num, st);
295  }
296  list_for_each_entry(res, &dlm->reco.resources, recovering) {
297  mlog(ML_NOTICE, "%s: lockres %.*s on recovering list\n",
298  dlm->name, res->lockname.len, res->lockname.name);
299  }
300 }
301 
302 #define DLM_RECO_THREAD_TIMEOUT_MS (5 * 1000)
303 
304 static int dlm_recovery_thread(void *data)
305 {
306  int status;
307  struct dlm_ctxt *dlm = data;
309 
310  mlog(0, "dlm thread running for %s...\n", dlm->name);
311 
312  while (!kthread_should_stop()) {
313  if (dlm_domain_fully_joined(dlm)) {
314  status = dlm_do_recovery(dlm);
315  if (status == -EAGAIN) {
316  /* do not sleep, recheck immediately. */
317  continue;
318  }
319  if (status < 0)
320  mlog_errno(status);
321  }
322 
325  timeout);
326  }
327 
328  mlog(0, "quitting DLM recovery thread\n");
329  return 0;
330 }
331 
332 /* returns true when the recovery master has contacted us */
333 static int dlm_reco_master_ready(struct dlm_ctxt *dlm)
334 {
335  int ready;
336  spin_lock(&dlm->spinlock);
337  ready = (dlm->reco.new_master != O2NM_INVALID_NODE_NUM);
338  spin_unlock(&dlm->spinlock);
339  return ready;
340 }
341 
342 /* returns true if node is no longer in the domain
343  * could be dead or just not joined */
344 int dlm_is_node_dead(struct dlm_ctxt *dlm, u8 node)
345 {
346  int dead;
347  spin_lock(&dlm->spinlock);
348  dead = !test_bit(node, dlm->domain_map);
349  spin_unlock(&dlm->spinlock);
350  return dead;
351 }
352 
353 /* returns true if node is no longer in the domain
354  * could be dead or just not joined */
355 static int dlm_is_node_recovered(struct dlm_ctxt *dlm, u8 node)
356 {
357  int recovered;
358  spin_lock(&dlm->spinlock);
359  recovered = !test_bit(node, dlm->recovery_map);
360  spin_unlock(&dlm->spinlock);
361  return recovered;
362 }
363 
364 
365 void dlm_wait_for_node_death(struct dlm_ctxt *dlm, u8 node, int timeout)
366 {
367  if (dlm_is_node_dead(dlm, node))
368  return;
369 
370  printk(KERN_NOTICE "o2dlm: Waiting on the death of node %u in "
371  "domain %s\n", node, dlm->name);
372 
373  if (timeout)
375  dlm_is_node_dead(dlm, node),
376  msecs_to_jiffies(timeout));
377  else
379  dlm_is_node_dead(dlm, node));
380 }
381 
382 void dlm_wait_for_node_recovery(struct dlm_ctxt *dlm, u8 node, int timeout)
383 {
384  if (dlm_is_node_recovered(dlm, node))
385  return;
386 
387  printk(KERN_NOTICE "o2dlm: Waiting on the recovery of node %u in "
388  "domain %s\n", node, dlm->name);
389 
390  if (timeout)
392  dlm_is_node_recovered(dlm, node),
393  msecs_to_jiffies(timeout));
394  else
396  dlm_is_node_recovered(dlm, node));
397 }
398 
399 /* callers of the top-level api calls (dlmlock/dlmunlock) should
400  * block on the dlm->reco.event when recovery is in progress.
401  * the dlm recovery thread will set this state when it begins
402  * recovering a dead node (as the new master or not) and clear
403  * the state and wake as soon as all affected lock resources have
404  * been marked with the RECOVERY flag */
405 static int dlm_in_recovery(struct dlm_ctxt *dlm)
406 {
407  int in_recovery;
408  spin_lock(&dlm->spinlock);
409  in_recovery = !!(dlm->reco.state & DLM_RECO_STATE_ACTIVE);
410  spin_unlock(&dlm->spinlock);
411  return in_recovery;
412 }
413 
414 
416 {
417  if (dlm_in_recovery(dlm)) {
418  mlog(0, "%s: reco thread %d in recovery: "
419  "state=%d, master=%u, dead=%u\n",
420  dlm->name, task_pid_nr(dlm->dlm_reco_thread_task),
421  dlm->reco.state, dlm->reco.new_master,
422  dlm->reco.dead_node);
423  }
424  wait_event(dlm->reco.event, !dlm_in_recovery(dlm));
425 }
426 
427 static void dlm_begin_recovery(struct dlm_ctxt *dlm)
428 {
429  spin_lock(&dlm->spinlock);
430  BUG_ON(dlm->reco.state & DLM_RECO_STATE_ACTIVE);
431  printk(KERN_NOTICE "o2dlm: Begin recovery on domain %s for node %u\n",
432  dlm->name, dlm->reco.dead_node);
433  dlm->reco.state |= DLM_RECO_STATE_ACTIVE;
434  spin_unlock(&dlm->spinlock);
435 }
436 
437 static void dlm_end_recovery(struct dlm_ctxt *dlm)
438 {
439  spin_lock(&dlm->spinlock);
440  BUG_ON(!(dlm->reco.state & DLM_RECO_STATE_ACTIVE));
441  dlm->reco.state &= ~DLM_RECO_STATE_ACTIVE;
442  spin_unlock(&dlm->spinlock);
443  printk(KERN_NOTICE "o2dlm: End recovery on domain %s\n", dlm->name);
444  wake_up(&dlm->reco.event);
445 }
446 
447 static void dlm_print_recovery_master(struct dlm_ctxt *dlm)
448 {
449  printk(KERN_NOTICE "o2dlm: Node %u (%s) is the Recovery Master for the "
450  "dead node %u in domain %s\n", dlm->reco.new_master,
451  (dlm->node_num == dlm->reco.new_master ? "me" : "he"),
452  dlm->reco.dead_node, dlm->name);
453 }
454 
455 static int dlm_do_recovery(struct dlm_ctxt *dlm)
456 {
457  int status = 0;
458  int ret;
459 
460  spin_lock(&dlm->spinlock);
461 
462  /* check to see if the new master has died */
463  if (dlm->reco.new_master != O2NM_INVALID_NODE_NUM &&
464  test_bit(dlm->reco.new_master, dlm->recovery_map)) {
465  mlog(0, "new master %u died while recovering %u!\n",
466  dlm->reco.new_master, dlm->reco.dead_node);
467  /* unset the new_master, leave dead_node */
468  dlm_set_reco_master(dlm, O2NM_INVALID_NODE_NUM);
469  }
470 
471  /* select a target to recover */
472  if (dlm->reco.dead_node == O2NM_INVALID_NODE_NUM) {
473  int bit;
474 
475  bit = find_next_bit (dlm->recovery_map, O2NM_MAX_NODES, 0);
476  if (bit >= O2NM_MAX_NODES || bit < 0)
477  dlm_set_reco_dead_node(dlm, O2NM_INVALID_NODE_NUM);
478  else
479  dlm_set_reco_dead_node(dlm, bit);
480  } else if (!test_bit(dlm->reco.dead_node, dlm->recovery_map)) {
481  /* BUG? */
482  mlog(ML_ERROR, "dead_node %u no longer in recovery map!\n",
483  dlm->reco.dead_node);
484  dlm_set_reco_dead_node(dlm, O2NM_INVALID_NODE_NUM);
485  }
486 
487  if (dlm->reco.dead_node == O2NM_INVALID_NODE_NUM) {
488  // mlog(0, "nothing to recover! sleeping now!\n");
489  spin_unlock(&dlm->spinlock);
490  /* return to main thread loop and sleep. */
491  return 0;
492  }
493  mlog(0, "%s(%d):recovery thread found node %u in the recovery map!\n",
494  dlm->name, task_pid_nr(dlm->dlm_reco_thread_task),
495  dlm->reco.dead_node);
496  spin_unlock(&dlm->spinlock);
497 
498  /* take write barrier */
499  /* (stops the list reshuffling thread, proxy ast handling) */
500  dlm_begin_recovery(dlm);
501 
502  if (dlm->reco.new_master == dlm->node_num)
503  goto master_here;
504 
505  if (dlm->reco.new_master == O2NM_INVALID_NODE_NUM) {
506  /* choose a new master, returns 0 if this node
507  * is the master, -EEXIST if it's another node.
508  * this does not return until a new master is chosen
509  * or recovery completes entirely. */
510  ret = dlm_pick_recovery_master(dlm);
511  if (!ret) {
512  /* already notified everyone. go. */
513  goto master_here;
514  }
515  mlog(0, "another node will master this recovery session.\n");
516  }
517 
518  dlm_print_recovery_master(dlm);
519 
520  /* it is safe to start everything back up here
521  * because all of the dead node's lock resources
522  * have been marked as in-recovery */
523  dlm_end_recovery(dlm);
524 
525  /* sleep out in main dlm_recovery_thread loop. */
526  return 0;
527 
528 master_here:
529  dlm_print_recovery_master(dlm);
530 
531  status = dlm_remaster_locks(dlm, dlm->reco.dead_node);
532  if (status < 0) {
533  /* we should never hit this anymore */
534  mlog(ML_ERROR, "%s: Error %d remastering locks for node %u, "
535  "retrying.\n", dlm->name, status, dlm->reco.dead_node);
536  /* yield a bit to allow any final network messages
537  * to get handled on remaining nodes */
538  msleep(100);
539  } else {
540  /* success! see if any other nodes need recovery */
541  mlog(0, "DONE mastering recovery of %s:%u here(this=%u)!\n",
542  dlm->name, dlm->reco.dead_node, dlm->node_num);
543  dlm_reset_recovery(dlm);
544  }
545  dlm_end_recovery(dlm);
546 
547  /* continue and look for another dead node */
548  return -EAGAIN;
549 }
550 
551 static int dlm_remaster_locks(struct dlm_ctxt *dlm, u8 dead_node)
552 {
553  int status = 0;
554  struct dlm_reco_node_data *ndata;
555  int all_nodes_done;
556  int destroy = 0;
557  int pass = 0;
558 
559  do {
560  /* we have become recovery master. there is no escaping
561  * this, so just keep trying until we get it. */
562  status = dlm_init_recovery_area(dlm, dead_node);
563  if (status < 0) {
564  mlog(ML_ERROR, "%s: failed to alloc recovery area, "
565  "retrying\n", dlm->name);
566  msleep(1000);
567  }
568  } while (status != 0);
569 
570  /* safe to access the node data list without a lock, since this
571  * process is the only one to change the list */
572  list_for_each_entry(ndata, &dlm->reco.node_data, list) {
575 
576  mlog(0, "%s: Requesting lock info from node %u\n", dlm->name,
577  ndata->node_num);
578 
579  if (ndata->node_num == dlm->node_num) {
581  continue;
582  }
583 
584  do {
585  status = dlm_request_all_locks(dlm, ndata->node_num,
586  dead_node);
587  if (status < 0) {
588  mlog_errno(status);
589  if (dlm_is_host_down(status)) {
590  /* node died, ignore it for recovery */
591  status = 0;
593  /* wait for the domain map to catch up
594  * with the network state. */
596  dlm_is_node_dead(dlm,
597  ndata->node_num),
598  msecs_to_jiffies(1000));
599  mlog(0, "waited 1 sec for %u, "
600  "dead? %s\n", ndata->node_num,
601  dlm_is_node_dead(dlm, ndata->node_num) ?
602  "yes" : "no");
603  } else {
604  /* -ENOMEM on the other node */
605  mlog(0, "%s: node %u returned "
606  "%d during recovery, retrying "
607  "after a short wait\n",
608  dlm->name, ndata->node_num,
609  status);
610  msleep(100);
611  }
612  }
613  } while (status != 0);
614 
615  spin_lock(&dlm_reco_state_lock);
616  switch (ndata->state) {
620  BUG();
621  break;
623  mlog(0, "node %u died after requesting "
624  "recovery info for node %u\n",
625  ndata->node_num, dead_node);
626  /* fine. don't need this node's info.
627  * continue without it. */
628  break;
631  mlog(0, "now receiving recovery data from "
632  "node %u for dead node %u\n",
633  ndata->node_num, dead_node);
634  break;
636  mlog(0, "already receiving recovery data from "
637  "node %u for dead node %u\n",
638  ndata->node_num, dead_node);
639  break;
641  mlog(0, "already DONE receiving recovery data "
642  "from node %u for dead node %u\n",
643  ndata->node_num, dead_node);
644  break;
645  }
646  spin_unlock(&dlm_reco_state_lock);
647  }
648 
649  mlog(0, "%s: Done requesting all lock info\n", dlm->name);
650 
651  /* nodes should be sending reco data now
652  * just need to wait */
653 
654  while (1) {
655  /* check all the nodes now to see if we are
656  * done, or if anyone died */
657  all_nodes_done = 1;
658  spin_lock(&dlm_reco_state_lock);
659  list_for_each_entry(ndata, &dlm->reco.node_data, list) {
660  mlog(0, "checking recovery state of node %u\n",
661  ndata->node_num);
662  switch (ndata->state) {
665  mlog(ML_ERROR, "bad ndata state for "
666  "node %u: state=%d\n",
667  ndata->node_num, ndata->state);
668  BUG();
669  break;
671  mlog(0, "node %u died after "
672  "requesting recovery info for "
673  "node %u\n", ndata->node_num,
674  dead_node);
675  break;
678  mlog(0, "%s: node %u still in state %s\n",
679  dlm->name, ndata->node_num,
681  "receiving" : "requested");
682  all_nodes_done = 0;
683  break;
685  mlog(0, "%s: node %u state is done\n",
686  dlm->name, ndata->node_num);
687  break;
689  mlog(0, "%s: node %u state is finalize\n",
690  dlm->name, ndata->node_num);
691  break;
692  }
693  }
694  spin_unlock(&dlm_reco_state_lock);
695 
696  mlog(0, "pass #%d, all_nodes_done?: %s\n", ++pass,
697  all_nodes_done?"yes":"no");
698  if (all_nodes_done) {
699  int ret;
700 
701  /* all nodes are now in DLM_RECO_NODE_DATA_DONE state
702  * just send a finalize message to everyone and
703  * clean up */
704  mlog(0, "all nodes are done! send finalize\n");
705  ret = dlm_send_finalize_reco_message(dlm);
706  if (ret < 0)
707  mlog_errno(ret);
708 
709  spin_lock(&dlm->spinlock);
710  dlm_finish_local_lockres_recovery(dlm, dead_node,
711  dlm->node_num);
712  spin_unlock(&dlm->spinlock);
713  mlog(0, "should be done with recovery!\n");
714 
715  mlog(0, "finishing recovery of %s at %lu, "
716  "dead=%u, this=%u, new=%u\n", dlm->name,
717  jiffies, dlm->reco.dead_node,
718  dlm->node_num, dlm->reco.new_master);
719  destroy = 1;
720  status = 0;
721  /* rescan everything marked dirty along the way */
722  dlm_kick_thread(dlm, NULL);
723  break;
724  }
725  /* wait to be signalled, with periodic timeout
726  * to check for node death */
730 
731  }
732 
733  if (destroy)
734  dlm_destroy_recovery_area(dlm, dead_node);
735 
736  return status;
737 }
738 
739 static int dlm_init_recovery_area(struct dlm_ctxt *dlm, u8 dead_node)
740 {
741  int num=0;
742  struct dlm_reco_node_data *ndata;
743 
744  spin_lock(&dlm->spinlock);
745  memcpy(dlm->reco.node_map, dlm->domain_map, sizeof(dlm->domain_map));
746  /* nodes can only be removed (by dying) after dropping
747  * this lock, and death will be trapped later, so this should do */
748  spin_unlock(&dlm->spinlock);
749 
750  while (1) {
751  num = find_next_bit (dlm->reco.node_map, O2NM_MAX_NODES, num);
752  if (num >= O2NM_MAX_NODES) {
753  break;
754  }
755  BUG_ON(num == dead_node);
756 
757  ndata = kzalloc(sizeof(*ndata), GFP_NOFS);
758  if (!ndata) {
759  dlm_destroy_recovery_area(dlm, dead_node);
760  return -ENOMEM;
761  }
762  ndata->node_num = num;
764  spin_lock(&dlm_reco_state_lock);
765  list_add_tail(&ndata->list, &dlm->reco.node_data);
766  spin_unlock(&dlm_reco_state_lock);
767  num++;
768  }
769 
770  return 0;
771 }
772 
773 static void dlm_destroy_recovery_area(struct dlm_ctxt *dlm, u8 dead_node)
774 {
775  struct dlm_reco_node_data *ndata, *next;
776  LIST_HEAD(tmplist);
777 
778  spin_lock(&dlm_reco_state_lock);
779  list_splice_init(&dlm->reco.node_data, &tmplist);
780  spin_unlock(&dlm_reco_state_lock);
781 
782  list_for_each_entry_safe(ndata, next, &tmplist, list) {
783  list_del_init(&ndata->list);
784  kfree(ndata);
785  }
786 }
787 
788 static int dlm_request_all_locks(struct dlm_ctxt *dlm, u8 request_from,
789  u8 dead_node)
790 {
791  struct dlm_lock_request lr;
792  enum dlm_status ret;
793 
794  mlog(0, "\n");
795 
796 
797  mlog(0, "dlm_request_all_locks: dead node is %u, sending request "
798  "to %u\n", dead_node, request_from);
799 
800  memset(&lr, 0, sizeof(lr));
801  lr.node_idx = dlm->node_num;
802  lr.dead_node = dead_node;
803 
804  // send message
805  ret = DLM_NOLOCKMGR;
807  &lr, sizeof(lr), request_from, NULL);
808 
809  /* negative status is handled by caller */
810  if (ret < 0)
811  mlog(ML_ERROR, "%s: Error %d send LOCK_REQUEST to node %u "
812  "to recover dead node %u\n", dlm->name, ret,
813  request_from, dead_node);
814  // return from here, then
815  // sleep until all received or error
816  return ret;
817 
818 }
819 
821  void **ret_data)
822 {
823  struct dlm_ctxt *dlm = data;
824  struct dlm_lock_request *lr = (struct dlm_lock_request *)msg->buf;
825  char *buf = NULL;
826  struct dlm_work_item *item = NULL;
827 
828  if (!dlm_grab(dlm))
829  return -EINVAL;
830 
831  if (lr->dead_node != dlm->reco.dead_node) {
832  mlog(ML_ERROR, "%s: node %u sent dead_node=%u, but local "
833  "dead_node is %u\n", dlm->name, lr->node_idx,
834  lr->dead_node, dlm->reco.dead_node);
835  dlm_print_reco_node_status(dlm);
836  /* this is a hack */
837  dlm_put(dlm);
838  return -ENOMEM;
839  }
840  BUG_ON(lr->dead_node != dlm->reco.dead_node);
841 
842  item = kzalloc(sizeof(*item), GFP_NOFS);
843  if (!item) {
844  dlm_put(dlm);
845  return -ENOMEM;
846  }
847 
848  /* this will get freed by dlm_request_all_locks_worker */
849  buf = (char *) __get_free_page(GFP_NOFS);
850  if (!buf) {
851  kfree(item);
852  dlm_put(dlm);
853  return -ENOMEM;
854  }
855 
856  /* queue up work for dlm_request_all_locks_worker */
857  dlm_grab(dlm); /* get an extra ref for the work item */
858  dlm_init_work_item(dlm, item, dlm_request_all_locks_worker, buf);
859  item->u.ral.reco_master = lr->node_idx;
860  item->u.ral.dead_node = lr->dead_node;
861  spin_lock(&dlm->work_lock);
862  list_add_tail(&item->list, &dlm->work_list);
863  spin_unlock(&dlm->work_lock);
865 
866  dlm_put(dlm);
867  return 0;
868 }
869 
870 static void dlm_request_all_locks_worker(struct dlm_work_item *item, void *data)
871 {
872  struct dlm_migratable_lockres *mres;
873  struct dlm_lock_resource *res;
874  struct dlm_ctxt *dlm;
876  int ret;
877  u8 dead_node, reco_master;
878  int skip_all_done = 0;
879 
880  dlm = item->dlm;
881  dead_node = item->u.ral.dead_node;
882  reco_master = item->u.ral.reco_master;
883  mres = (struct dlm_migratable_lockres *)data;
884 
885  mlog(0, "%s: recovery worker started, dead=%u, master=%u\n",
886  dlm->name, dead_node, reco_master);
887 
888  if (dead_node != dlm->reco.dead_node ||
889  reco_master != dlm->reco.new_master) {
890  /* worker could have been created before the recovery master
891  * died. if so, do not continue, but do not error. */
892  if (dlm->reco.new_master == O2NM_INVALID_NODE_NUM) {
893  mlog(ML_NOTICE, "%s: will not send recovery state, "
894  "recovery master %u died, thread=(dead=%u,mas=%u)"
895  " current=(dead=%u,mas=%u)\n", dlm->name,
896  reco_master, dead_node, reco_master,
897  dlm->reco.dead_node, dlm->reco.new_master);
898  } else {
899  mlog(ML_NOTICE, "%s: reco state invalid: reco(dead=%u, "
900  "master=%u), request(dead=%u, master=%u)\n",
901  dlm->name, dlm->reco.dead_node,
902  dlm->reco.new_master, dead_node, reco_master);
903  }
904  goto leave;
905  }
906 
907  /* lock resources should have already been moved to the
908  * dlm->reco.resources list. now move items from that list
909  * to a temp list if the dead owner matches. note that the
910  * whole cluster recovers only one node at a time, so we
911  * can safely move UNKNOWN lock resources for each recovery
912  * session. */
913  dlm_move_reco_locks_to_list(dlm, &resources, dead_node);
914 
915  /* now we can begin blasting lockreses without the dlm lock */
916 
917  /* any errors returned will be due to the new_master dying,
918  * the dlm_reco_thread should detect this */
919  list_for_each_entry(res, &resources, recovering) {
920  ret = dlm_send_one_lockres(dlm, res, mres, reco_master,
922  if (ret < 0) {
923  mlog(ML_ERROR, "%s: node %u went down while sending "
924  "recovery state for dead node %u, ret=%d\n", dlm->name,
925  reco_master, dead_node, ret);
926  skip_all_done = 1;
927  break;
928  }
929  }
930 
931  /* move the resources back to the list */
932  spin_lock(&dlm->spinlock);
933  list_splice_init(&resources, &dlm->reco.resources);
934  spin_unlock(&dlm->spinlock);
935 
936  if (!skip_all_done) {
937  ret = dlm_send_all_done_msg(dlm, dead_node, reco_master);
938  if (ret < 0) {
939  mlog(ML_ERROR, "%s: node %u went down while sending "
940  "recovery all-done for dead node %u, ret=%d\n",
941  dlm->name, reco_master, dead_node, ret);
942  }
943  }
944 leave:
945  free_page((unsigned long)data);
946 }
947 
948 
949 static int dlm_send_all_done_msg(struct dlm_ctxt *dlm, u8 dead_node, u8 send_to)
950 {
951  int ret, tmpret;
952  struct dlm_reco_data_done done_msg;
953 
954  memset(&done_msg, 0, sizeof(done_msg));
955  done_msg.node_idx = dlm->node_num;
956  done_msg.dead_node = dead_node;
957  mlog(0, "sending DATA DONE message to %u, "
958  "my node=%u, dead node=%u\n", send_to, done_msg.node_idx,
959  done_msg.dead_node);
960 
961  ret = o2net_send_message(DLM_RECO_DATA_DONE_MSG, dlm->key, &done_msg,
962  sizeof(done_msg), send_to, &tmpret);
963  if (ret < 0) {
964  mlog(ML_ERROR, "%s: Error %d send RECO_DATA_DONE to node %u "
965  "to recover dead node %u\n", dlm->name, ret, send_to,
966  dead_node);
967  if (!dlm_is_host_down(ret)) {
968  BUG();
969  }
970  } else
971  ret = tmpret;
972  return ret;
973 }
974 
975 
976 int dlm_reco_data_done_handler(struct o2net_msg *msg, u32 len, void *data,
977  void **ret_data)
978 {
979  struct dlm_ctxt *dlm = data;
980  struct dlm_reco_data_done *done = (struct dlm_reco_data_done *)msg->buf;
981  struct dlm_reco_node_data *ndata = NULL;
982  int ret = -EINVAL;
983 
984  if (!dlm_grab(dlm))
985  return -EINVAL;
986 
987  mlog(0, "got DATA DONE: dead_node=%u, reco.dead_node=%u, "
988  "node_idx=%u, this node=%u\n", done->dead_node,
989  dlm->reco.dead_node, done->node_idx, dlm->node_num);
990 
991  mlog_bug_on_msg((done->dead_node != dlm->reco.dead_node),
992  "Got DATA DONE: dead_node=%u, reco.dead_node=%u, "
993  "node_idx=%u, this node=%u\n", done->dead_node,
994  dlm->reco.dead_node, done->node_idx, dlm->node_num);
995 
996  spin_lock(&dlm_reco_state_lock);
997  list_for_each_entry(ndata, &dlm->reco.node_data, list) {
998  if (ndata->node_num != done->node_idx)
999  continue;
1000 
1001  switch (ndata->state) {
1002  /* should have moved beyond INIT but not to FINALIZE yet */
1006  mlog(ML_ERROR, "bad ndata state for node %u:"
1007  " state=%d\n", ndata->node_num,
1008  ndata->state);
1009  BUG();
1010  break;
1011  /* these states are possible at this point, anywhere along
1012  * the line of recovery */
1017  mlog(0, "node %u is DONE sending "
1018  "recovery data!\n",
1019  ndata->node_num);
1020 
1021  ndata->state = DLM_RECO_NODE_DATA_DONE;
1022  ret = 0;
1023  break;
1024  }
1025  }
1026  spin_unlock(&dlm_reco_state_lock);
1027 
1028  /* wake the recovery thread, some node is done */
1029  if (!ret)
1031 
1032  if (ret < 0)
1033  mlog(ML_ERROR, "failed to find recovery node data for node "
1034  "%u\n", done->node_idx);
1035  dlm_put(dlm);
1036 
1037  mlog(0, "leaving reco data done handler, ret=%d\n", ret);
1038  return ret;
1039 }
1040 
1041 static void dlm_move_reco_locks_to_list(struct dlm_ctxt *dlm,
1042  struct list_head *list,
1043  u8 dead_node)
1044 {
1045  struct dlm_lock_resource *res, *next;
1046  struct dlm_lock *lock;
1047 
1048  spin_lock(&dlm->spinlock);
1049  list_for_each_entry_safe(res, next, &dlm->reco.resources, recovering) {
1050  /* always prune any $RECOVERY entries for dead nodes,
1051  * otherwise hangs can occur during later recovery */
1052  if (dlm_is_recovery_lock(res->lockname.name,
1053  res->lockname.len)) {
1054  spin_lock(&res->spinlock);
1055  list_for_each_entry(lock, &res->granted, list) {
1056  if (lock->ml.node == dead_node) {
1057  mlog(0, "AHA! there was "
1058  "a $RECOVERY lock for dead "
1059  "node %u (%s)!\n",
1060  dead_node, dlm->name);
1061  list_del_init(&lock->list);
1062  dlm_lock_put(lock);
1063  break;
1064  }
1065  }
1066  spin_unlock(&res->spinlock);
1067  continue;
1068  }
1069 
1070  if (res->owner == dead_node) {
1071  mlog(0, "found lockres owned by dead node while "
1072  "doing recovery for node %u. sending it.\n",
1073  dead_node);
1074  list_move_tail(&res->recovering, list);
1075  } else if (res->owner == DLM_LOCK_RES_OWNER_UNKNOWN) {
1076  mlog(0, "found UNKNOWN owner while doing recovery "
1077  "for node %u. sending it.\n", dead_node);
1078  list_move_tail(&res->recovering, list);
1079  }
1080  }
1081  spin_unlock(&dlm->spinlock);
1082 }
1083 
1084 static inline int dlm_num_locks_in_lockres(struct dlm_lock_resource *res)
1085 {
1086  int total_locks = 0;
1087  struct list_head *iter, *queue = &res->granted;
1088  int i;
1089 
1090  for (i=0; i<3; i++) {
1091  list_for_each(iter, queue)
1092  total_locks++;
1093  queue++;
1094  }
1095  return total_locks;
1096 }
1097 
1098 
1099 static int dlm_send_mig_lockres_msg(struct dlm_ctxt *dlm,
1101  u8 send_to,
1103  int total_locks)
1104 {
1105  u64 mig_cookie = be64_to_cpu(mres->mig_cookie);
1106  int mres_total_locks = be32_to_cpu(mres->total_locks);
1107  int sz, ret = 0, status = 0;
1108  u8 orig_flags = mres->flags,
1109  orig_master = mres->master;
1110 
1111  BUG_ON(mres->num_locks > DLM_MAX_MIGRATABLE_LOCKS);
1112  if (!mres->num_locks)
1113  return 0;
1114 
1115  sz = sizeof(struct dlm_migratable_lockres) +
1116  (mres->num_locks * sizeof(struct dlm_migratable_lock));
1117 
1118  /* add an all-done flag if we reached the last lock */
1119  orig_flags = mres->flags;
1120  BUG_ON(total_locks > mres_total_locks);
1121  if (total_locks == mres_total_locks)
1122  mres->flags |= DLM_MRES_ALL_DONE;
1123 
1124  mlog(0, "%s:%.*s: sending mig lockres (%s) to %u\n",
1125  dlm->name, res->lockname.len, res->lockname.name,
1126  orig_flags & DLM_MRES_MIGRATION ? "migration" : "recovery",
1127  send_to);
1128 
1129  /* send it */
1130  ret = o2net_send_message(DLM_MIG_LOCKRES_MSG, dlm->key, mres,
1131  sz, send_to, &status);
1132  if (ret < 0) {
1133  /* XXX: negative status is not handled.
1134  * this will end up killing this node. */
1135  mlog(ML_ERROR, "%s: res %.*s, Error %d send MIG_LOCKRES to "
1136  "node %u (%s)\n", dlm->name, mres->lockname_len,
1137  mres->lockname, ret, send_to,
1138  (orig_flags & DLM_MRES_MIGRATION ?
1139  "migration" : "recovery"));
1140  } else {
1141  /* might get an -ENOMEM back here */
1142  ret = status;
1143  if (ret < 0) {
1144  mlog_errno(ret);
1145 
1146  if (ret == -EFAULT) {
1147  mlog(ML_ERROR, "node %u told me to kill "
1148  "myself!\n", send_to);
1149  BUG();
1150  }
1151  }
1152  }
1153 
1154  /* zero and reinit the message buffer */
1155  dlm_init_migratable_lockres(mres, res->lockname.name,
1156  res->lockname.len, mres_total_locks,
1157  mig_cookie, orig_flags, orig_master);
1158  return ret;
1159 }
1160 
1161 static void dlm_init_migratable_lockres(struct dlm_migratable_lockres *mres,
1162  const char *lockname, int namelen,
1163  int total_locks, u64 cookie,
1164  u8 flags, u8 master)
1165 {
1166  /* mres here is one full page */
1167  clear_page(mres);
1168  mres->lockname_len = namelen;
1169  memcpy(mres->lockname, lockname, namelen);
1170  mres->num_locks = 0;
1171  mres->total_locks = cpu_to_be32(total_locks);
1172  mres->mig_cookie = cpu_to_be64(cookie);
1173  mres->flags = flags;
1174  mres->master = master;
1175 }
1176 
1177 static void dlm_prepare_lvb_for_migration(struct dlm_lock *lock,
1178  struct dlm_migratable_lockres *mres,
1179  int queue)
1180 {
1181  if (!lock->lksb)
1182  return;
1183 
1184  /* Ignore lvb in all locks in the blocked list */
1185  if (queue == DLM_BLOCKED_LIST)
1186  return;
1187 
1188  /* Only consider lvbs in locks with granted EX or PR lock levels */
1189  if (lock->ml.type != LKM_EXMODE && lock->ml.type != LKM_PRMODE)
1190  return;
1191 
1192  if (dlm_lvb_is_empty(mres->lvb)) {
1193  memcpy(mres->lvb, lock->lksb->lvb, DLM_LVB_LEN);
1194  return;
1195  }
1196 
1197  /* Ensure the lvb copied for migration matches in other valid locks */
1198  if (!memcmp(mres->lvb, lock->lksb->lvb, DLM_LVB_LEN))
1199  return;
1200 
1201  mlog(ML_ERROR, "Mismatched lvb in lock cookie=%u:%llu, name=%.*s, "
1202  "node=%u\n",
1203  dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)),
1204  dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)),
1205  lock->lockres->lockname.len, lock->lockres->lockname.name,
1206  lock->ml.node);
1208  BUG();
1209 }
1210 
1211 /* returns 1 if this lock fills the network structure,
1212  * 0 otherwise */
1213 static int dlm_add_lock_to_array(struct dlm_lock *lock,
1214  struct dlm_migratable_lockres *mres, int queue)
1215 {
1216  struct dlm_migratable_lock *ml;
1217  int lock_num = mres->num_locks;
1218 
1219  ml = &(mres->ml[lock_num]);
1220  ml->cookie = lock->ml.cookie;
1221  ml->type = lock->ml.type;
1222  ml->convert_type = lock->ml.convert_type;
1223  ml->highest_blocked = lock->ml.highest_blocked;
1224  ml->list = queue;
1225  if (lock->lksb) {
1226  ml->flags = lock->lksb->flags;
1227  dlm_prepare_lvb_for_migration(lock, mres, queue);
1228  }
1229  ml->node = lock->ml.node;
1230  mres->num_locks++;
1231  /* we reached the max, send this network message */
1232  if (mres->num_locks == DLM_MAX_MIGRATABLE_LOCKS)
1233  return 1;
1234  return 0;
1235 }
1236 
1237 static void dlm_add_dummy_lock(struct dlm_ctxt *dlm,
1238  struct dlm_migratable_lockres *mres)
1239 {
1240  struct dlm_lock dummy;
1241  memset(&dummy, 0, sizeof(dummy));
1242  dummy.ml.cookie = 0;
1243  dummy.ml.type = LKM_IVMODE;
1244  dummy.ml.convert_type = LKM_IVMODE;
1245  dummy.ml.highest_blocked = LKM_IVMODE;
1246  dummy.lksb = NULL;
1247  dummy.ml.node = dlm->node_num;
1248  dlm_add_lock_to_array(&dummy, mres, DLM_BLOCKED_LIST);
1249 }
1250 
1251 static inline int dlm_is_dummy_lock(struct dlm_ctxt *dlm,
1252  struct dlm_migratable_lock *ml,
1253  u8 *nodenum)
1254 {
1255  if (unlikely(ml->cookie == 0 &&
1256  ml->type == LKM_IVMODE &&
1257  ml->convert_type == LKM_IVMODE &&
1258  ml->highest_blocked == LKM_IVMODE &&
1259  ml->list == DLM_BLOCKED_LIST)) {
1260  *nodenum = ml->node;
1261  return 1;
1262  }
1263  return 0;
1264 }
1265 
1266 int dlm_send_one_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
1267  struct dlm_migratable_lockres *mres,
1268  u8 send_to, u8 flags)
1269 {
1270  struct list_head *queue;
1271  int total_locks, i;
1272  u64 mig_cookie = 0;
1273  struct dlm_lock *lock;
1274  int ret = 0;
1275 
1277 
1278  mlog(0, "sending to %u\n", send_to);
1279 
1280  total_locks = dlm_num_locks_in_lockres(res);
1281  if (total_locks > DLM_MAX_MIGRATABLE_LOCKS) {
1282  /* rare, but possible */
1283  mlog(0, "argh. lockres has %d locks. this will "
1284  "require more than one network packet to "
1285  "migrate\n", total_locks);
1286  mig_cookie = dlm_get_next_mig_cookie();
1287  }
1288 
1289  dlm_init_migratable_lockres(mres, res->lockname.name,
1290  res->lockname.len, total_locks,
1291  mig_cookie, flags, res->owner);
1292 
1293  total_locks = 0;
1294  for (i=DLM_GRANTED_LIST; i<=DLM_BLOCKED_LIST; i++) {
1295  queue = dlm_list_idx_to_ptr(res, i);
1296  list_for_each_entry(lock, queue, list) {
1297  /* add another lock. */
1298  total_locks++;
1299  if (!dlm_add_lock_to_array(lock, mres, i))
1300  continue;
1301 
1302  /* this filled the lock message,
1303  * we must send it immediately. */
1304  ret = dlm_send_mig_lockres_msg(dlm, mres, send_to,
1305  res, total_locks);
1306  if (ret < 0)
1307  goto error;
1308  }
1309  }
1310  if (total_locks == 0) {
1311  /* send a dummy lock to indicate a mastery reference only */
1312  mlog(0, "%s:%.*s: sending dummy lock to %u, %s\n",
1313  dlm->name, res->lockname.len, res->lockname.name,
1314  send_to, flags & DLM_MRES_RECOVERY ? "recovery" :
1315  "migration");
1316  dlm_add_dummy_lock(dlm, mres);
1317  }
1318  /* flush any remaining locks */
1319  ret = dlm_send_mig_lockres_msg(dlm, mres, send_to, res, total_locks);
1320  if (ret < 0)
1321  goto error;
1322  return ret;
1323 
1324 error:
1325  mlog(ML_ERROR, "%s: dlm_send_mig_lockres_msg returned %d\n",
1326  dlm->name, ret);
1327  if (!dlm_is_host_down(ret))
1328  BUG();
1329  mlog(0, "%s: node %u went down while sending %s "
1330  "lockres %.*s\n", dlm->name, send_to,
1331  flags & DLM_MRES_RECOVERY ? "recovery" : "migration",
1332  res->lockname.len, res->lockname.name);
1333  return ret;
1334 }
1335 
1336 
1337 
1338 /*
1339  * this message will contain no more than one page worth of
1340  * recovery data, and it will work on only one lockres.
1341  * there may be many locks in this page, and we may need to wait
1342  * for additional packets to complete all the locks (rare, but
1343  * possible).
1344  */
1345 /*
1346  * NOTE: the allocation error cases here are scary
1347  * we really cannot afford to fail an alloc in recovery
1348  * do we spin? returning an error only delays the problem really
1349  */
1350 
1351 int dlm_mig_lockres_handler(struct o2net_msg *msg, u32 len, void *data,
1352  void **ret_data)
1353 {
1354  struct dlm_ctxt *dlm = data;
1355  struct dlm_migratable_lockres *mres =
1356  (struct dlm_migratable_lockres *)msg->buf;
1357  int ret = 0;
1358  u8 real_master;
1359  u8 extra_refs = 0;
1360  char *buf = NULL;
1361  struct dlm_work_item *item = NULL;
1362  struct dlm_lock_resource *res = NULL;
1363 
1364  if (!dlm_grab(dlm))
1365  return -EINVAL;
1366 
1368 
1369  real_master = mres->master;
1370  if (real_master == DLM_LOCK_RES_OWNER_UNKNOWN) {
1371  /* cannot migrate a lockres with no master */
1372  BUG_ON(!(mres->flags & DLM_MRES_RECOVERY));
1373  }
1374 
1375  mlog(0, "%s message received from node %u\n",
1376  (mres->flags & DLM_MRES_RECOVERY) ?
1377  "recovery" : "migration", mres->master);
1378  if (mres->flags & DLM_MRES_ALL_DONE)
1379  mlog(0, "all done flag. all lockres data received!\n");
1380 
1381  ret = -ENOMEM;
1383  item = kzalloc(sizeof(*item), GFP_NOFS);
1384  if (!buf || !item)
1385  goto leave;
1386 
1387  /* lookup the lock to see if we have a secondary queue for this
1388  * already... just add the locks in and this will have its owner
1389  * and RECOVERY flag changed when it completes. */
1390  res = dlm_lookup_lockres(dlm, mres->lockname, mres->lockname_len);
1391  if (res) {
1392  /* this will get a ref on res */
1393  /* mark it as recovering/migrating and hash it */
1394  spin_lock(&res->spinlock);
1395  if (mres->flags & DLM_MRES_RECOVERY) {
1396  res->state |= DLM_LOCK_RES_RECOVERING;
1397  } else {
1398  if (res->state & DLM_LOCK_RES_MIGRATING) {
1399  /* this is at least the second
1400  * lockres message */
1401  mlog(0, "lock %.*s is already migrating\n",
1402  mres->lockname_len,
1403  mres->lockname);
1404  } else if (res->state & DLM_LOCK_RES_RECOVERING) {
1405  /* caller should BUG */
1406  mlog(ML_ERROR, "node is attempting to migrate "
1407  "lock %.*s, but marked as recovering!\n",
1408  mres->lockname_len, mres->lockname);
1409  ret = -EFAULT;
1410  spin_unlock(&res->spinlock);
1411  goto leave;
1412  }
1413  res->state |= DLM_LOCK_RES_MIGRATING;
1414  }
1415  spin_unlock(&res->spinlock);
1416  } else {
1417  /* need to allocate, just like if it was
1418  * mastered here normally */
1419  res = dlm_new_lockres(dlm, mres->lockname, mres->lockname_len);
1420  if (!res)
1421  goto leave;
1422 
1423  /* to match the ref that we would have gotten if
1424  * dlm_lookup_lockres had succeeded */
1425  dlm_lockres_get(res);
1426 
1427  /* mark it as recovering/migrating and hash it */
1428  if (mres->flags & DLM_MRES_RECOVERY)
1429  res->state |= DLM_LOCK_RES_RECOVERING;
1430  else
1431  res->state |= DLM_LOCK_RES_MIGRATING;
1432 
1433  spin_lock(&dlm->spinlock);
1434  __dlm_insert_lockres(dlm, res);
1435  spin_unlock(&dlm->spinlock);
1436 
1437  /* Add an extra ref for this lock-less lockres lest the
1438  * dlm_thread purges it before we get the chance to add
1439  * locks to it */
1440  dlm_lockres_get(res);
1441 
1442  /* There are three refs that need to be put.
1443  * 1. Taken above.
1444  * 2. kref_init in dlm_new_lockres()->dlm_init_lockres().
1445  * 3. dlm_lookup_lockres()
1446  * The first one is handled at the end of this function. The
1447  * other two are handled in the worker thread after locks have
1448  * been attached. Yes, we don't wait for purge time to match
1449  * kref_init. The lockres will still have atleast one ref
1450  * added because it is in the hash __dlm_insert_lockres() */
1451  extra_refs++;
1452 
1453  /* now that the new lockres is inserted,
1454  * make it usable by other processes */
1455  spin_lock(&res->spinlock);
1456  res->state &= ~DLM_LOCK_RES_IN_PROGRESS;
1457  spin_unlock(&res->spinlock);
1458  wake_up(&res->wq);
1459  }
1460 
1461  /* at this point we have allocated everything we need,
1462  * and we have a hashed lockres with an extra ref and
1463  * the proper res->state flags. */
1464  ret = 0;
1465  spin_lock(&res->spinlock);
1466  /* drop this either when master requery finds a different master
1467  * or when a lock is added by the recovery worker */
1469  if (mres->master == DLM_LOCK_RES_OWNER_UNKNOWN) {
1470  /* migration cannot have an unknown master */
1471  BUG_ON(!(mres->flags & DLM_MRES_RECOVERY));
1472  mlog(0, "recovery has passed me a lockres with an "
1473  "unknown owner.. will need to requery: "
1474  "%.*s\n", mres->lockname_len, mres->lockname);
1475  } else {
1476  /* take a reference now to pin the lockres, drop it
1477  * when locks are added in the worker */
1478  dlm_change_lockres_owner(dlm, res, dlm->node_num);
1479  }
1480  spin_unlock(&res->spinlock);
1481 
1482  /* queue up work for dlm_mig_lockres_worker */
1483  dlm_grab(dlm); /* get an extra ref for the work item */
1484  memcpy(buf, msg->buf, be16_to_cpu(msg->data_len)); /* copy the whole message */
1485  dlm_init_work_item(dlm, item, dlm_mig_lockres_worker, buf);
1486  item->u.ml.lockres = res; /* already have a ref */
1487  item->u.ml.real_master = real_master;
1488  item->u.ml.extra_ref = extra_refs;
1489  spin_lock(&dlm->work_lock);
1490  list_add_tail(&item->list, &dlm->work_list);
1491  spin_unlock(&dlm->work_lock);
1492  queue_work(dlm->dlm_worker, &dlm->dispatched_work);
1493 
1494 leave:
1495  /* One extra ref taken needs to be put here */
1496  if (extra_refs)
1497  dlm_lockres_put(res);
1498 
1499  dlm_put(dlm);
1500  if (ret < 0) {
1501  if (buf)
1502  kfree(buf);
1503  if (item)
1504  kfree(item);
1505  mlog_errno(ret);
1506  }
1507 
1508  return ret;
1509 }
1510 
1511 
1512 static void dlm_mig_lockres_worker(struct dlm_work_item *item, void *data)
1513 {
1514  struct dlm_ctxt *dlm;
1515  struct dlm_migratable_lockres *mres;
1516  int ret = 0;
1517  struct dlm_lock_resource *res;
1518  u8 real_master;
1519  u8 extra_ref;
1520 
1521  dlm = item->dlm;
1522  mres = (struct dlm_migratable_lockres *)data;
1523 
1524  res = item->u.ml.lockres;
1525  real_master = item->u.ml.real_master;
1526  extra_ref = item->u.ml.extra_ref;
1527 
1528  if (real_master == DLM_LOCK_RES_OWNER_UNKNOWN) {
1529  /* this case is super-rare. only occurs if
1530  * node death happens during migration. */
1531 again:
1532  ret = dlm_lockres_master_requery(dlm, res, &real_master);
1533  if (ret < 0) {
1534  mlog(0, "dlm_lockres_master_requery ret=%d\n",
1535  ret);
1536  goto again;
1537  }
1538  if (real_master == DLM_LOCK_RES_OWNER_UNKNOWN) {
1539  mlog(0, "lockres %.*s not claimed. "
1540  "this node will take it.\n",
1541  res->lockname.len, res->lockname.name);
1542  } else {
1543  spin_lock(&res->spinlock);
1545  spin_unlock(&res->spinlock);
1546  mlog(0, "master needs to respond to sender "
1547  "that node %u still owns %.*s\n",
1548  real_master, res->lockname.len,
1549  res->lockname.name);
1550  /* cannot touch this lockres */
1551  goto leave;
1552  }
1553  }
1554 
1555  ret = dlm_process_recovery_data(dlm, res, mres);
1556  if (ret < 0)
1557  mlog(0, "dlm_process_recovery_data returned %d\n", ret);
1558  else
1559  mlog(0, "dlm_process_recovery_data succeeded\n");
1560 
1561  if ((mres->flags & (DLM_MRES_MIGRATION|DLM_MRES_ALL_DONE)) ==
1563  ret = dlm_finish_migration(dlm, res, mres->master);
1564  if (ret < 0)
1565  mlog_errno(ret);
1566  }
1567 
1568 leave:
1569  /* See comment in dlm_mig_lockres_handler() */
1570  if (res) {
1571  if (extra_ref)
1572  dlm_lockres_put(res);
1573  dlm_lockres_put(res);
1574  }
1575  kfree(data);
1576 }
1577 
1578 
1579 
1580 static int dlm_lockres_master_requery(struct dlm_ctxt *dlm,
1581  struct dlm_lock_resource *res,
1582  u8 *real_master)
1583 {
1584  struct dlm_node_iter iter;
1585  int nodenum;
1586  int ret = 0;
1587 
1588  *real_master = DLM_LOCK_RES_OWNER_UNKNOWN;
1589 
1590  /* we only reach here if one of the two nodes in a
1591  * migration died while the migration was in progress.
1592  * at this point we need to requery the master. we
1593  * know that the new_master got as far as creating
1594  * an mle on at least one node, but we do not know
1595  * if any nodes had actually cleared the mle and set
1596  * the master to the new_master. the old master
1597  * is supposed to set the owner to UNKNOWN in the
1598  * event of a new_master death, so the only possible
1599  * responses that we can get from nodes here are
1600  * that the master is new_master, or that the master
1601  * is UNKNOWN.
1602  * if all nodes come back with UNKNOWN then we know
1603  * the lock needs remastering here.
1604  * if any node comes back with a valid master, check
1605  * to see if that master is the one that we are
1606  * recovering. if so, then the new_master died and
1607  * we need to remaster this lock. if not, then the
1608  * new_master survived and that node will respond to
1609  * other nodes about the owner.
1610  * if there is an owner, this node needs to dump this
1611  * lockres and alert the sender that this lockres
1612  * was rejected. */
1613  spin_lock(&dlm->spinlock);
1614  dlm_node_iter_init(dlm->domain_map, &iter);
1615  spin_unlock(&dlm->spinlock);
1616 
1617  while ((nodenum = dlm_node_iter_next(&iter)) >= 0) {
1618  /* do not send to self */
1619  if (nodenum == dlm->node_num)
1620  continue;
1621  ret = dlm_do_master_requery(dlm, res, nodenum, real_master);
1622  if (ret < 0) {
1623  mlog_errno(ret);
1624  if (!dlm_is_host_down(ret))
1625  BUG();
1626  /* host is down, so answer for that node would be
1627  * DLM_LOCK_RES_OWNER_UNKNOWN. continue. */
1628  }
1629  if (*real_master != DLM_LOCK_RES_OWNER_UNKNOWN) {
1630  mlog(0, "lock master is %u\n", *real_master);
1631  break;
1632  }
1633  }
1634  return ret;
1635 }
1636 
1637 
1638 int dlm_do_master_requery(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
1639  u8 nodenum, u8 *real_master)
1640 {
1641  int ret = -EINVAL;
1642  struct dlm_master_requery req;
1643  int status = DLM_LOCK_RES_OWNER_UNKNOWN;
1644 
1645  memset(&req, 0, sizeof(req));
1646  req.node_idx = dlm->node_num;
1647  req.namelen = res->lockname.len;
1648  memcpy(req.name, res->lockname.name, res->lockname.len);
1649 
1651  &req, sizeof(req), nodenum, &status);
1652  /* XXX: negative status not handled properly here. */
1653  if (ret < 0)
1654  mlog(ML_ERROR, "Error %d when sending message %u (key "
1655  "0x%x) to node %u\n", ret, DLM_MASTER_REQUERY_MSG,
1656  dlm->key, nodenum);
1657  else {
1658  BUG_ON(status < 0);
1660  *real_master = (u8) (status & 0xff);
1661  mlog(0, "node %u responded to master requery with %u\n",
1662  nodenum, *real_master);
1663  ret = 0;
1664  }
1665  return ret;
1666 }
1667 
1668 
1669 /* this function cannot error, so unless the sending
1670  * or receiving of the message failed, the owner can
1671  * be trusted */
1672 int dlm_master_requery_handler(struct o2net_msg *msg, u32 len, void *data,
1673  void **ret_data)
1674 {
1675  struct dlm_ctxt *dlm = data;
1676  struct dlm_master_requery *req = (struct dlm_master_requery *)msg->buf;
1677  struct dlm_lock_resource *res = NULL;
1678  unsigned int hash;
1679  int master = DLM_LOCK_RES_OWNER_UNKNOWN;
1681 
1682  if (!dlm_grab(dlm)) {
1683  /* since the domain has gone away on this
1684  * node, the proper response is UNKNOWN */
1685  return master;
1686  }
1687 
1688  hash = dlm_lockid_hash(req->name, req->namelen);
1689 
1690  spin_lock(&dlm->spinlock);
1691  res = __dlm_lookup_lockres(dlm, req->name, req->namelen, hash);
1692  if (res) {
1693  spin_lock(&res->spinlock);
1694  master = res->owner;
1695  if (master == dlm->node_num) {
1696  int ret = dlm_dispatch_assert_master(dlm, res,
1697  0, 0, flags);
1698  if (ret < 0) {
1699  mlog_errno(-ENOMEM);
1700  /* retry!? */
1701  BUG();
1702  }
1703  } else /* put.. incase we are not the master */
1704  dlm_lockres_put(res);
1705  spin_unlock(&res->spinlock);
1706  }
1707  spin_unlock(&dlm->spinlock);
1708 
1709  dlm_put(dlm);
1710  return master;
1711 }
1712 
1713 static inline struct list_head *
1714 dlm_list_num_to_pointer(struct dlm_lock_resource *res, int list_num)
1715 {
1716  struct list_head *ret;
1717  BUG_ON(list_num < 0);
1718  BUG_ON(list_num > 2);
1719  ret = &(res->granted);
1720  ret += list_num;
1721  return ret;
1722 }
1723 /* TODO: do ast flush business
1724  * TODO: do MIGRATING and RECOVERING spinning
1725  */
1726 
1727 /*
1728 * NOTE about in-flight requests during migration:
1729 *
1730 * Before attempting the migrate, the master has marked the lockres as
1731 * MIGRATING and then flushed all of its pending ASTS. So any in-flight
1732 * requests either got queued before the MIGRATING flag got set, in which
1733 * case the lock data will reflect the change and a return message is on
1734 * the way, or the request failed to get in before MIGRATING got set. In
1735 * this case, the caller will be told to spin and wait for the MIGRATING
1736 * flag to be dropped, then recheck the master.
1737 * This holds true for the convert, cancel and unlock cases, and since lvb
1738 * updates are tied to these same messages, it applies to lvb updates as
1739 * well. For the lock case, there is no way a lock can be on the master
1740 * queue and not be on the secondary queue since the lock is always added
1741 * locally first. This means that the new target node will never be sent
1742 * a lock that he doesn't already have on the list.
1743 * In total, this means that the local lock is correct and should not be
1744 * updated to match the one sent by the master. Any messages sent back
1745 * from the master before the MIGRATING flag will bring the lock properly
1746 * up-to-date, and the change will be ordered properly for the waiter.
1747 * We will *not* attempt to modify the lock underneath the waiter.
1748 */
1749 
1750 static int dlm_process_recovery_data(struct dlm_ctxt *dlm,
1751  struct dlm_lock_resource *res,
1752  struct dlm_migratable_lockres *mres)
1753 {
1754  struct dlm_migratable_lock *ml;
1755  struct list_head *queue;
1756  struct list_head *tmpq = NULL;
1757  struct dlm_lock *newlock = NULL;
1758  struct dlm_lockstatus *lksb = NULL;
1759  int ret = 0;
1760  int i, j, bad;
1761  struct dlm_lock *lock = NULL;
1763  unsigned int added = 0;
1764  __be64 c;
1765 
1766  mlog(0, "running %d locks for this lockres\n", mres->num_locks);
1767  for (i=0; i<mres->num_locks; i++) {
1768  ml = &(mres->ml[i]);
1769 
1770  if (dlm_is_dummy_lock(dlm, ml, &from)) {
1771  /* placeholder, just need to set the refmap bit */
1772  BUG_ON(mres->num_locks != 1);
1773  mlog(0, "%s:%.*s: dummy lock for %u\n",
1774  dlm->name, mres->lockname_len, mres->lockname,
1775  from);
1776  spin_lock(&res->spinlock);
1777  dlm_lockres_set_refmap_bit(dlm, res, from);
1778  spin_unlock(&res->spinlock);
1779  added++;
1780  break;
1781  }
1783  newlock = NULL;
1784  lksb = NULL;
1785 
1786  queue = dlm_list_num_to_pointer(res, ml->list);
1787  tmpq = NULL;
1788 
1789  /* if the lock is for the local node it needs to
1790  * be moved to the proper location within the queue.
1791  * do not allocate a new lock structure. */
1792  if (ml->node == dlm->node_num) {
1793  /* MIGRATION ONLY! */
1794  BUG_ON(!(mres->flags & DLM_MRES_MIGRATION));
1795 
1796  spin_lock(&res->spinlock);
1797  for (j = DLM_GRANTED_LIST; j <= DLM_BLOCKED_LIST; j++) {
1798  tmpq = dlm_list_idx_to_ptr(res, j);
1799  list_for_each_entry(lock, tmpq, list) {
1800  if (lock->ml.cookie != ml->cookie)
1801  lock = NULL;
1802  else
1803  break;
1804  }
1805  if (lock)
1806  break;
1807  }
1808 
1809  /* lock is always created locally first, and
1810  * destroyed locally last. it must be on the list */
1811  if (!lock) {
1812  c = ml->cookie;
1813  mlog(ML_ERROR, "Could not find local lock "
1814  "with cookie %u:%llu, node %u, "
1815  "list %u, flags 0x%x, type %d, "
1816  "conv %d, highest blocked %d\n",
1817  dlm_get_lock_cookie_node(be64_to_cpu(c)),
1818  dlm_get_lock_cookie_seq(be64_to_cpu(c)),
1819  ml->node, ml->list, ml->flags, ml->type,
1820  ml->convert_type, ml->highest_blocked);
1822  BUG();
1823  }
1824 
1825  if (lock->ml.node != ml->node) {
1826  c = lock->ml.cookie;
1827  mlog(ML_ERROR, "Mismatched node# in lock "
1828  "cookie %u:%llu, name %.*s, node %u\n",
1829  dlm_get_lock_cookie_node(be64_to_cpu(c)),
1830  dlm_get_lock_cookie_seq(be64_to_cpu(c)),
1831  res->lockname.len, res->lockname.name,
1832  lock->ml.node);
1833  c = ml->cookie;
1834  mlog(ML_ERROR, "Migrate lock cookie %u:%llu, "
1835  "node %u, list %u, flags 0x%x, type %d, "
1836  "conv %d, highest blocked %d\n",
1837  dlm_get_lock_cookie_node(be64_to_cpu(c)),
1838  dlm_get_lock_cookie_seq(be64_to_cpu(c)),
1839  ml->node, ml->list, ml->flags, ml->type,
1840  ml->convert_type, ml->highest_blocked);
1842  BUG();
1843  }
1844 
1845  if (tmpq != queue) {
1846  c = ml->cookie;
1847  mlog(0, "Lock cookie %u:%llu was on list %u "
1848  "instead of list %u for %.*s\n",
1849  dlm_get_lock_cookie_node(be64_to_cpu(c)),
1850  dlm_get_lock_cookie_seq(be64_to_cpu(c)),
1851  j, ml->list, res->lockname.len,
1852  res->lockname.name);
1854  spin_unlock(&res->spinlock);
1855  continue;
1856  }
1857 
1858  /* see NOTE above about why we do not update
1859  * to match the master here */
1860 
1861  /* move the lock to its proper place */
1862  /* do not alter lock refcount. switching lists. */
1863  list_move_tail(&lock->list, queue);
1864  spin_unlock(&res->spinlock);
1865  added++;
1866 
1867  mlog(0, "just reordered a local lock!\n");
1868  continue;
1869  }
1870 
1871  /* lock is for another node. */
1872  newlock = dlm_new_lock(ml->type, ml->node,
1873  be64_to_cpu(ml->cookie), NULL);
1874  if (!newlock) {
1875  ret = -ENOMEM;
1876  goto leave;
1877  }
1878  lksb = newlock->lksb;
1879  dlm_lock_attach_lockres(newlock, res);
1880 
1881  if (ml->convert_type != LKM_IVMODE) {
1882  BUG_ON(queue != &res->converting);
1883  newlock->ml.convert_type = ml->convert_type;
1884  }
1885  lksb->flags |= (ml->flags &
1887 
1888  if (ml->type == LKM_NLMODE)
1889  goto skip_lvb;
1890 
1891  if (!dlm_lvb_is_empty(mres->lvb)) {
1892  if (lksb->flags & DLM_LKSB_PUT_LVB) {
1893  /* other node was trying to update
1894  * lvb when node died. recreate the
1895  * lksb with the updated lvb. */
1896  memcpy(lksb->lvb, mres->lvb, DLM_LVB_LEN);
1897  /* the lock resource lvb update must happen
1898  * NOW, before the spinlock is dropped.
1899  * we no longer wait for the AST to update
1900  * the lvb. */
1901  memcpy(res->lvb, mres->lvb, DLM_LVB_LEN);
1902  } else {
1903  /* otherwise, the node is sending its
1904  * most recent valid lvb info */
1905  BUG_ON(ml->type != LKM_EXMODE &&
1906  ml->type != LKM_PRMODE);
1907  if (!dlm_lvb_is_empty(res->lvb) &&
1908  (ml->type == LKM_EXMODE ||
1909  memcmp(res->lvb, mres->lvb, DLM_LVB_LEN))) {
1910  int i;
1911  mlog(ML_ERROR, "%s:%.*s: received bad "
1912  "lvb! type=%d\n", dlm->name,
1913  res->lockname.len,
1914  res->lockname.name, ml->type);
1915  printk("lockres lvb=[");
1916  for (i=0; i<DLM_LVB_LEN; i++)
1917  printk("%02x", res->lvb[i]);
1918  printk("]\nmigrated lvb=[");
1919  for (i=0; i<DLM_LVB_LEN; i++)
1920  printk("%02x", mres->lvb[i]);
1921  printk("]\n");
1923  BUG();
1924  }
1925  memcpy(res->lvb, mres->lvb, DLM_LVB_LEN);
1926  }
1927  }
1928 skip_lvb:
1929 
1930  /* NOTE:
1931  * wrt lock queue ordering and recovery:
1932  * 1. order of locks on granted queue is
1933  * meaningless.
1934  * 2. order of locks on converting queue is
1935  * LOST with the node death. sorry charlie.
1936  * 3. order of locks on the blocked queue is
1937  * also LOST.
1938  * order of locks does not affect integrity, it
1939  * just means that a lock request may get pushed
1940  * back in line as a result of the node death.
1941  * also note that for a given node the lock order
1942  * for its secondary queue locks is preserved
1943  * relative to each other, but clearly *not*
1944  * preserved relative to locks from other nodes.
1945  */
1946  bad = 0;
1947  spin_lock(&res->spinlock);
1948  list_for_each_entry(lock, queue, list) {
1949  if (lock->ml.cookie == ml->cookie) {
1950  c = lock->ml.cookie;
1951  mlog(ML_ERROR, "%s:%.*s: %u:%llu: lock already "
1952  "exists on this lockres!\n", dlm->name,
1953  res->lockname.len, res->lockname.name,
1954  dlm_get_lock_cookie_node(be64_to_cpu(c)),
1955  dlm_get_lock_cookie_seq(be64_to_cpu(c)));
1956 
1957  mlog(ML_NOTICE, "sent lock: type=%d, conv=%d, "
1958  "node=%u, cookie=%u:%llu, queue=%d\n",
1959  ml->type, ml->convert_type, ml->node,
1960  dlm_get_lock_cookie_node(be64_to_cpu(ml->cookie)),
1961  dlm_get_lock_cookie_seq(be64_to_cpu(ml->cookie)),
1962  ml->list);
1963 
1965  bad = 1;
1966  break;
1967  }
1968  }
1969  if (!bad) {
1970  dlm_lock_get(newlock);
1971  list_add_tail(&newlock->list, queue);
1972  mlog(0, "%s:%.*s: added lock for node %u, "
1973  "setting refmap bit\n", dlm->name,
1974  res->lockname.len, res->lockname.name, ml->node);
1975  dlm_lockres_set_refmap_bit(dlm, res, ml->node);
1976  added++;
1977  }
1978  spin_unlock(&res->spinlock);
1979  }
1980  mlog(0, "done running all the locks\n");
1981 
1982 leave:
1983  /* balance the ref taken when the work was queued */
1984  spin_lock(&res->spinlock);
1986  spin_unlock(&res->spinlock);
1987 
1988  if (ret < 0) {
1989  mlog_errno(ret);
1990  if (newlock)
1991  dlm_lock_put(newlock);
1992  }
1993 
1994  return ret;
1995 }
1996 
1998  struct dlm_lock_resource *res)
1999 {
2000  int i;
2001  struct list_head *queue;
2002  struct dlm_lock *lock, *next;
2003 
2007  if (!list_empty(&res->recovering)) {
2008  mlog(0,
2009  "Recovering res %s:%.*s, is already on recovery list!\n",
2010  dlm->name, res->lockname.len, res->lockname.name);
2011  list_del_init(&res->recovering);
2012  dlm_lockres_put(res);
2013  }
2014  /* We need to hold a reference while on the recovery list */
2015  dlm_lockres_get(res);
2016  list_add_tail(&res->recovering, &dlm->reco.resources);
2017 
2018  /* find any pending locks and put them back on proper list */
2019  for (i=DLM_BLOCKED_LIST; i>=DLM_GRANTED_LIST; i--) {
2020  queue = dlm_list_idx_to_ptr(res, i);
2021  list_for_each_entry_safe(lock, next, queue, list) {
2022  dlm_lock_get(lock);
2023  if (lock->convert_pending) {
2024  /* move converting lock back to granted */
2026  mlog(0, "node died with convert pending "
2027  "on %.*s. move back to granted list.\n",
2028  res->lockname.len, res->lockname.name);
2029  dlm_revert_pending_convert(res, lock);
2030  lock->convert_pending = 0;
2031  } else if (lock->lock_pending) {
2032  /* remove pending lock requests completely */
2033  BUG_ON(i != DLM_BLOCKED_LIST);
2034  mlog(0, "node died with lock pending "
2035  "on %.*s. remove from blocked list and skip.\n",
2036  res->lockname.len, res->lockname.name);
2037  /* lock will be floating until ref in
2038  * dlmlock_remote is freed after the network
2039  * call returns. ok for it to not be on any
2040  * list since no ast can be called
2041  * (the master is dead). */
2042  dlm_revert_pending_lock(res, lock);
2043  lock->lock_pending = 0;
2044  } else if (lock->unlock_pending) {
2045  /* if an unlock was in progress, treat as
2046  * if this had completed successfully
2047  * before sending this lock state to the
2048  * new master. note that the dlm_unlock
2049  * call is still responsible for calling
2050  * the unlockast. that will happen after
2051  * the network call times out. for now,
2052  * just move lists to prepare the new
2053  * recovery master. */
2054  BUG_ON(i != DLM_GRANTED_LIST);
2055  mlog(0, "node died with unlock pending "
2056  "on %.*s. remove from blocked list and skip.\n",
2057  res->lockname.len, res->lockname.name);
2058  dlm_commit_pending_unlock(res, lock);
2059  lock->unlock_pending = 0;
2060  } else if (lock->cancel_pending) {
2061  /* if a cancel was in progress, treat as
2062  * if this had completed successfully
2063  * before sending this lock state to the
2064  * new master */
2066  mlog(0, "node died with cancel pending "
2067  "on %.*s. move back to granted list.\n",
2068  res->lockname.len, res->lockname.name);
2069  dlm_commit_pending_cancel(res, lock);
2070  lock->cancel_pending = 0;
2071  }
2072  dlm_lock_put(lock);
2073  }
2074  }
2075 }
2076 
2077 
2078 
2079 /* removes all recovered locks from the recovery list.
2080  * sets the res->owner to the new master.
2081  * unsets the RECOVERY flag and wakes waiters. */
2082 static void dlm_finish_local_lockres_recovery(struct dlm_ctxt *dlm,
2083  u8 dead_node, u8 new_master)
2084 {
2085  int i;
2086  struct hlist_node *hash_iter;
2087  struct hlist_head *bucket;
2088  struct dlm_lock_resource *res, *next;
2089 
2091 
2092  list_for_each_entry_safe(res, next, &dlm->reco.resources, recovering) {
2093  if (res->owner == dead_node) {
2094  mlog(0, "%s: res %.*s, Changing owner from %u to %u\n",
2095  dlm->name, res->lockname.len, res->lockname.name,
2096  res->owner, new_master);
2097  list_del_init(&res->recovering);
2098  spin_lock(&res->spinlock);
2099  /* new_master has our reference from
2100  * the lock state sent during recovery */
2101  dlm_change_lockres_owner(dlm, res, new_master);
2102  res->state &= ~DLM_LOCK_RES_RECOVERING;
2103  if (__dlm_lockres_has_locks(res))
2104  __dlm_dirty_lockres(dlm, res);
2105  spin_unlock(&res->spinlock);
2106  wake_up(&res->wq);
2107  dlm_lockres_put(res);
2108  }
2109  }
2110 
2111  /* this will become unnecessary eventually, but
2112  * for now we need to run the whole hash, clear
2113  * the RECOVERING state and set the owner
2114  * if necessary */
2115  for (i = 0; i < DLM_HASH_BUCKETS; i++) {
2116  bucket = dlm_lockres_hash(dlm, i);
2117  hlist_for_each_entry(res, hash_iter, bucket, hash_node) {
2118  if (!(res->state & DLM_LOCK_RES_RECOVERING))
2119  continue;
2120 
2121  if (res->owner != dead_node &&
2122  res->owner != dlm->node_num)
2123  continue;
2124 
2125  if (!list_empty(&res->recovering)) {
2126  list_del_init(&res->recovering);
2127  dlm_lockres_put(res);
2128  }
2129 
2130  /* new_master has our reference from
2131  * the lock state sent during recovery */
2132  mlog(0, "%s: res %.*s, Changing owner from %u to %u\n",
2133  dlm->name, res->lockname.len, res->lockname.name,
2134  res->owner, new_master);
2135  spin_lock(&res->spinlock);
2136  dlm_change_lockres_owner(dlm, res, new_master);
2137  res->state &= ~DLM_LOCK_RES_RECOVERING;
2138  if (__dlm_lockres_has_locks(res))
2139  __dlm_dirty_lockres(dlm, res);
2140  spin_unlock(&res->spinlock);
2141  wake_up(&res->wq);
2142  }
2143  }
2144 }
2145 
2146 static inline int dlm_lvb_needs_invalidation(struct dlm_lock *lock, int local)
2147 {
2148  if (local) {
2149  if (lock->ml.type != LKM_EXMODE &&
2150  lock->ml.type != LKM_PRMODE)
2151  return 1;
2152  } else if (lock->ml.type == LKM_EXMODE)
2153  return 1;
2154  return 0;
2155 }
2156 
2157 static void dlm_revalidate_lvb(struct dlm_ctxt *dlm,
2158  struct dlm_lock_resource *res, u8 dead_node)
2159 {
2160  struct list_head *queue;
2161  struct dlm_lock *lock;
2162  int blank_lvb = 0, local = 0;
2163  int i;
2164  u8 search_node;
2165 
2168 
2169  if (res->owner == dlm->node_num)
2170  /* if this node owned the lockres, and if the dead node
2171  * had an EX when he died, blank out the lvb */
2172  search_node = dead_node;
2173  else {
2174  /* if this is a secondary lockres, and we had no EX or PR
2175  * locks granted, we can no longer trust the lvb */
2176  search_node = dlm->node_num;
2177  local = 1; /* check local state for valid lvb */
2178  }
2179 
2180  for (i=DLM_GRANTED_LIST; i<=DLM_CONVERTING_LIST; i++) {
2181  queue = dlm_list_idx_to_ptr(res, i);
2182  list_for_each_entry(lock, queue, list) {
2183  if (lock->ml.node == search_node) {
2184  if (dlm_lvb_needs_invalidation(lock, local)) {
2185  /* zero the lksb lvb and lockres lvb */
2186  blank_lvb = 1;
2187  memset(lock->lksb->lvb, 0, DLM_LVB_LEN);
2188  }
2189  }
2190  }
2191  }
2192 
2193  if (blank_lvb) {
2194  mlog(0, "clearing %.*s lvb, dead node %u had EX\n",
2195  res->lockname.len, res->lockname.name, dead_node);
2196  memset(res->lvb, 0, DLM_LVB_LEN);
2197  }
2198 }
2199 
2200 static void dlm_free_dead_locks(struct dlm_ctxt *dlm,
2201  struct dlm_lock_resource *res, u8 dead_node)
2202 {
2203  struct dlm_lock *lock, *next;
2204  unsigned int freed = 0;
2205 
2206  /* this node is the lockres master:
2207  * 1) remove any stale locks for the dead node
2208  * 2) if the dead node had an EX when he died, blank out the lvb
2209  */
2212 
2213  /* We do two dlm_lock_put(). One for removing from list and the other is
2214  * to force the DLM_UNLOCK_FREE_LOCK action so as to free the locks */
2215 
2216  /* TODO: check pending_asts, pending_basts here */
2217  list_for_each_entry_safe(lock, next, &res->granted, list) {
2218  if (lock->ml.node == dead_node) {
2219  list_del_init(&lock->list);
2220  dlm_lock_put(lock);
2221  /* Can't schedule DLM_UNLOCK_FREE_LOCK - do manually */
2222  dlm_lock_put(lock);
2223  freed++;
2224  }
2225  }
2226  list_for_each_entry_safe(lock, next, &res->converting, list) {
2227  if (lock->ml.node == dead_node) {
2228  list_del_init(&lock->list);
2229  dlm_lock_put(lock);
2230  /* Can't schedule DLM_UNLOCK_FREE_LOCK - do manually */
2231  dlm_lock_put(lock);
2232  freed++;
2233  }
2234  }
2235  list_for_each_entry_safe(lock, next, &res->blocked, list) {
2236  if (lock->ml.node == dead_node) {
2237  list_del_init(&lock->list);
2238  dlm_lock_put(lock);
2239  /* Can't schedule DLM_UNLOCK_FREE_LOCK - do manually */
2240  dlm_lock_put(lock);
2241  freed++;
2242  }
2243  }
2244 
2245  if (freed) {
2246  mlog(0, "%s:%.*s: freed %u locks for dead node %u, "
2247  "dropping ref from lockres\n", dlm->name,
2248  res->lockname.len, res->lockname.name, freed, dead_node);
2249  if(!test_bit(dead_node, res->refmap)) {
2250  mlog(ML_ERROR, "%s:%.*s: freed %u locks for dead node %u, "
2251  "but ref was not set\n", dlm->name,
2252  res->lockname.len, res->lockname.name, freed, dead_node);
2254  }
2255  dlm_lockres_clear_refmap_bit(dlm, res, dead_node);
2256  } else if (test_bit(dead_node, res->refmap)) {
2257  mlog(0, "%s:%.*s: dead node %u had a ref, but had "
2258  "no locks and had not purged before dying\n", dlm->name,
2259  res->lockname.len, res->lockname.name, dead_node);
2260  dlm_lockres_clear_refmap_bit(dlm, res, dead_node);
2261  }
2262 
2263  /* do not kick thread yet */
2264  __dlm_dirty_lockres(dlm, res);
2265 }
2266 
2267 /* if this node is the recovery master, and there are no
2268  * locks for a given lockres owned by this node that are in
2269  * either PR or EX mode, zero out the lvb before requesting.
2270  *
2271  */
2272 
2273 
2274 static void dlm_do_local_recovery_cleanup(struct dlm_ctxt *dlm, u8 dead_node)
2275 {
2276  struct hlist_node *iter;
2277  struct dlm_lock_resource *res;
2278  int i;
2279  struct hlist_head *bucket;
2280  struct dlm_lock *lock;
2281 
2282 
2283  /* purge any stale mles */
2284  dlm_clean_master_list(dlm, dead_node);
2285 
2286  /*
2287  * now clean up all lock resources. there are two rules:
2288  *
2289  * 1) if the dead node was the master, move the lockres
2290  * to the recovering list. set the RECOVERING flag.
2291  * this lockres needs to be cleaned up before it can
2292  * be used further.
2293  *
2294  * 2) if this node was the master, remove all locks from
2295  * each of the lockres queues that were owned by the
2296  * dead node. once recovery finishes, the dlm thread
2297  * can be kicked again to see if any ASTs or BASTs
2298  * need to be fired as a result.
2299  */
2300  for (i = 0; i < DLM_HASH_BUCKETS; i++) {
2301  bucket = dlm_lockres_hash(dlm, i);
2302  hlist_for_each_entry(res, iter, bucket, hash_node) {
2303  /* always prune any $RECOVERY entries for dead nodes,
2304  * otherwise hangs can occur during later recovery */
2305  if (dlm_is_recovery_lock(res->lockname.name,
2306  res->lockname.len)) {
2307  spin_lock(&res->spinlock);
2308  list_for_each_entry(lock, &res->granted, list) {
2309  if (lock->ml.node == dead_node) {
2310  mlog(0, "AHA! there was "
2311  "a $RECOVERY lock for dead "
2312  "node %u (%s)!\n",
2313  dead_node, dlm->name);
2314  list_del_init(&lock->list);
2315  dlm_lock_put(lock);
2316  break;
2317  }
2318  }
2319  spin_unlock(&res->spinlock);
2320  continue;
2321  }
2322  spin_lock(&res->spinlock);
2323  /* zero the lvb if necessary */
2324  dlm_revalidate_lvb(dlm, res, dead_node);
2325  if (res->owner == dead_node) {
2326  if (res->state & DLM_LOCK_RES_DROPPING_REF) {
2327  mlog(ML_NOTICE, "%s: res %.*s, Skip "
2328  "recovery as it is being freed\n",
2329  dlm->name, res->lockname.len,
2330  res->lockname.name);
2331  } else
2333  res);
2334 
2335  } else if (res->owner == dlm->node_num) {
2336  dlm_free_dead_locks(dlm, res, dead_node);
2337  __dlm_lockres_calc_usage(dlm, res);
2338  }
2339  spin_unlock(&res->spinlock);
2340  }
2341  }
2342 
2343 }
2344 
2345 static void __dlm_hb_node_down(struct dlm_ctxt *dlm, int idx)
2346 {
2348 
2349  if (dlm->reco.new_master == idx) {
2350  mlog(0, "%s: recovery master %d just died\n",
2351  dlm->name, idx);
2352  if (dlm->reco.state & DLM_RECO_STATE_FINALIZE) {
2353  /* finalize1 was reached, so it is safe to clear
2354  * the new_master and dead_node. that recovery
2355  * is complete. */
2356  mlog(0, "%s: dead master %d had reached "
2357  "finalize1 state, clearing\n", dlm->name, idx);
2358  dlm->reco.state &= ~DLM_RECO_STATE_FINALIZE;
2359  __dlm_reset_recovery(dlm);
2360  }
2361  }
2362 
2363  /* Clean up join state on node death. */
2364  if (dlm->joining_node == idx) {
2365  mlog(0, "Clearing join state for node %u\n", idx);
2366  __dlm_set_joining_node(dlm, DLM_LOCK_RES_OWNER_UNKNOWN);
2367  }
2368 
2369  /* check to see if the node is already considered dead */
2370  if (!test_bit(idx, dlm->live_nodes_map)) {
2371  mlog(0, "for domain %s, node %d is already dead. "
2372  "another node likely did recovery already.\n",
2373  dlm->name, idx);
2374  return;
2375  }
2376 
2377  /* check to see if we do not care about this node */
2378  if (!test_bit(idx, dlm->domain_map)) {
2379  /* This also catches the case that we get a node down
2380  * but haven't joined the domain yet. */
2381  mlog(0, "node %u already removed from domain!\n", idx);
2382  return;
2383  }
2384 
2385  clear_bit(idx, dlm->live_nodes_map);
2386 
2387  /* make sure local cleanup occurs before the heartbeat events */
2388  if (!test_bit(idx, dlm->recovery_map))
2389  dlm_do_local_recovery_cleanup(dlm, idx);
2390 
2391  /* notify anything attached to the heartbeat events */
2392  dlm_hb_event_notify_attached(dlm, idx, 0);
2393 
2394  mlog(0, "node %u being removed from domain map!\n", idx);
2395  clear_bit(idx, dlm->domain_map);
2396  clear_bit(idx, dlm->exit_domain_map);
2397  /* wake up migration waiters if a node goes down.
2398  * perhaps later we can genericize this for other waiters. */
2399  wake_up(&dlm->migration_wq);
2400 
2401  if (test_bit(idx, dlm->recovery_map))
2402  mlog(0, "domain %s, node %u already added "
2403  "to recovery map!\n", dlm->name, idx);
2404  else
2405  set_bit(idx, dlm->recovery_map);
2406 }
2407 
2408 void dlm_hb_node_down_cb(struct o2nm_node *node, int idx, void *data)
2409 {
2410  struct dlm_ctxt *dlm = data;
2411 
2412  if (!dlm_grab(dlm))
2413  return;
2414 
2415  /*
2416  * This will notify any dlm users that a node in our domain
2417  * went away without notifying us first.
2418  */
2419  if (test_bit(idx, dlm->domain_map))
2421 
2422  spin_lock(&dlm->spinlock);
2423  __dlm_hb_node_down(dlm, idx);
2424  spin_unlock(&dlm->spinlock);
2425 
2426  dlm_put(dlm);
2427 }
2428 
2429 void dlm_hb_node_up_cb(struct o2nm_node *node, int idx, void *data)
2430 {
2431  struct dlm_ctxt *dlm = data;
2432 
2433  if (!dlm_grab(dlm))
2434  return;
2435 
2436  spin_lock(&dlm->spinlock);
2437  set_bit(idx, dlm->live_nodes_map);
2438  /* do NOT notify mle attached to the heartbeat events.
2439  * new nodes are not interesting in mastery until joined. */
2440  spin_unlock(&dlm->spinlock);
2441 
2442  dlm_put(dlm);
2443 }
2444 
2445 static void dlm_reco_ast(void *astdata)
2446 {
2447  struct dlm_ctxt *dlm = astdata;
2448  mlog(0, "ast for recovery lock fired!, this=%u, dlm=%s\n",
2449  dlm->node_num, dlm->name);
2450 }
2451 static void dlm_reco_bast(void *astdata, int blocked_type)
2452 {
2453  struct dlm_ctxt *dlm = astdata;
2454  mlog(0, "bast for recovery lock fired!, this=%u, dlm=%s\n",
2455  dlm->node_num, dlm->name);
2456 }
2457 static void dlm_reco_unlock_ast(void *astdata, enum dlm_status st)
2458 {
2459  mlog(0, "unlockast for recovery lock fired!\n");
2460 }
2461 
2462 /*
2463  * dlm_pick_recovery_master will continually attempt to use
2464  * dlmlock() on the special "$RECOVERY" lockres with the
2465  * LKM_NOQUEUE flag to get an EX. every thread that enters
2466  * this function on each node racing to become the recovery
2467  * master will not stop attempting this until either:
2468  * a) this node gets the EX (and becomes the recovery master),
2469  * or b) dlm->reco.new_master gets set to some nodenum
2470  * != O2NM_INVALID_NODE_NUM (another node will do the reco).
2471  * so each time a recovery master is needed, the entire cluster
2472  * will sync at this point. if the new master dies, that will
2473  * be detected in dlm_do_recovery */
2474 static int dlm_pick_recovery_master(struct dlm_ctxt *dlm)
2475 {
2476  enum dlm_status ret;
2477  struct dlm_lockstatus lksb;
2478  int status = -EINVAL;
2479 
2480  mlog(0, "starting recovery of %s at %lu, dead=%u, this=%u\n",
2481  dlm->name, jiffies, dlm->reco.dead_node, dlm->node_num);
2482 again:
2483  memset(&lksb, 0, sizeof(lksb));
2484 
2485  ret = dlmlock(dlm, LKM_EXMODE, &lksb, LKM_NOQUEUE|LKM_RECOVERY,
2487  dlm_reco_ast, dlm, dlm_reco_bast);
2488 
2489  mlog(0, "%s: dlmlock($RECOVERY) returned %d, lksb=%d\n",
2490  dlm->name, ret, lksb.status);
2491 
2492  if (ret == DLM_NORMAL) {
2493  mlog(0, "dlm=%s dlmlock says I got it (this=%u)\n",
2494  dlm->name, dlm->node_num);
2495 
2496  /* got the EX lock. check to see if another node
2497  * just became the reco master */
2498  if (dlm_reco_master_ready(dlm)) {
2499  mlog(0, "%s: got reco EX lock, but %u will "
2500  "do the recovery\n", dlm->name,
2501  dlm->reco.new_master);
2502  status = -EEXIST;
2503  } else {
2504  status = 0;
2505 
2506  /* see if recovery was already finished elsewhere */
2507  spin_lock(&dlm->spinlock);
2508  if (dlm->reco.dead_node == O2NM_INVALID_NODE_NUM) {
2509  status = -EINVAL;
2510  mlog(0, "%s: got reco EX lock, but "
2511  "node got recovered already\n", dlm->name);
2512  if (dlm->reco.new_master != O2NM_INVALID_NODE_NUM) {
2513  mlog(ML_ERROR, "%s: new master is %u "
2514  "but no dead node!\n",
2515  dlm->name, dlm->reco.new_master);
2516  BUG();
2517  }
2518  }
2519  spin_unlock(&dlm->spinlock);
2520  }
2521 
2522  /* if this node has actually become the recovery master,
2523  * set the master and send the messages to begin recovery */
2524  if (!status) {
2525  mlog(0, "%s: dead=%u, this=%u, sending "
2526  "begin_reco now\n", dlm->name,
2527  dlm->reco.dead_node, dlm->node_num);
2528  status = dlm_send_begin_reco_message(dlm,
2529  dlm->reco.dead_node);
2530  /* this always succeeds */
2531  BUG_ON(status);
2532 
2533  /* set the new_master to this node */
2534  spin_lock(&dlm->spinlock);
2535  dlm_set_reco_master(dlm, dlm->node_num);
2536  spin_unlock(&dlm->spinlock);
2537  }
2538 
2539  /* recovery lock is a special case. ast will not get fired,
2540  * so just go ahead and unlock it. */
2541  ret = dlmunlock(dlm, &lksb, 0, dlm_reco_unlock_ast, dlm);
2542  if (ret == DLM_DENIED) {
2543  mlog(0, "got DLM_DENIED, trying LKM_CANCEL\n");
2544  ret = dlmunlock(dlm, &lksb, LKM_CANCEL, dlm_reco_unlock_ast, dlm);
2545  }
2546  if (ret != DLM_NORMAL) {
2547  /* this would really suck. this could only happen
2548  * if there was a network error during the unlock
2549  * because of node death. this means the unlock
2550  * is actually "done" and the lock structure is
2551  * even freed. we can continue, but only
2552  * because this specific lock name is special. */
2553  mlog(ML_ERROR, "dlmunlock returned %d\n", ret);
2554  }
2555  } else if (ret == DLM_NOTQUEUED) {
2556  mlog(0, "dlm=%s dlmlock says another node got it (this=%u)\n",
2557  dlm->name, dlm->node_num);
2558  /* another node is master. wait on
2559  * reco.new_master != O2NM_INVALID_NODE_NUM
2560  * for at most one second */
2562  dlm_reco_master_ready(dlm),
2563  msecs_to_jiffies(1000));
2564  if (!dlm_reco_master_ready(dlm)) {
2565  mlog(0, "%s: reco master taking awhile\n",
2566  dlm->name);
2567  goto again;
2568  }
2569  /* another node has informed this one that it is reco master */
2570  mlog(0, "%s: reco master %u is ready to recover %u\n",
2571  dlm->name, dlm->reco.new_master, dlm->reco.dead_node);
2572  status = -EEXIST;
2573  } else if (ret == DLM_RECOVERING) {
2574  mlog(0, "dlm=%s dlmlock says master node died (this=%u)\n",
2575  dlm->name, dlm->node_num);
2576  goto again;
2577  } else {
2578  struct dlm_lock_resource *res;
2579 
2580  /* dlmlock returned something other than NOTQUEUED or NORMAL */
2581  mlog(ML_ERROR, "%s: got %s from dlmlock($RECOVERY), "
2582  "lksb.status=%s\n", dlm->name, dlm_errname(ret),
2583  dlm_errname(lksb.status));
2586  if (res) {
2588  dlm_lockres_put(res);
2589  } else {
2590  mlog(ML_ERROR, "recovery lock not found\n");
2591  }
2592  BUG();
2593  }
2594 
2595  return status;
2596 }
2597 
2598 static int dlm_send_begin_reco_message(struct dlm_ctxt *dlm, u8 dead_node)
2599 {
2600  struct dlm_begin_reco br;
2601  int ret = 0;
2602  struct dlm_node_iter iter;
2603  int nodenum;
2604  int status;
2605 
2606  mlog(0, "%s: dead node is %u\n", dlm->name, dead_node);
2607 
2608  spin_lock(&dlm->spinlock);
2609  dlm_node_iter_init(dlm->domain_map, &iter);
2610  spin_unlock(&dlm->spinlock);
2611 
2612  clear_bit(dead_node, iter.node_map);
2613 
2614  memset(&br, 0, sizeof(br));
2615  br.node_idx = dlm->node_num;
2616  br.dead_node = dead_node;
2617 
2618  while ((nodenum = dlm_node_iter_next(&iter)) >= 0) {
2619  ret = 0;
2620  if (nodenum == dead_node) {
2621  mlog(0, "not sending begin reco to dead node "
2622  "%u\n", dead_node);
2623  continue;
2624  }
2625  if (nodenum == dlm->node_num) {
2626  mlog(0, "not sending begin reco to self\n");
2627  continue;
2628  }
2629 retry:
2630  ret = -EINVAL;
2631  mlog(0, "attempting to send begin reco msg to %d\n",
2632  nodenum);
2634  &br, sizeof(br), nodenum, &status);
2635  /* negative status is handled ok by caller here */
2636  if (ret >= 0)
2637  ret = status;
2638  if (dlm_is_host_down(ret)) {
2639  /* node is down. not involved in recovery
2640  * so just keep going */
2641  mlog(ML_NOTICE, "%s: node %u was down when sending "
2642  "begin reco msg (%d)\n", dlm->name, nodenum, ret);
2643  ret = 0;
2644  }
2645 
2646  /*
2647  * Prior to commit aad1b15310b9bcd59fa81ab8f2b1513b59553ea8,
2648  * dlm_begin_reco_handler() returned EAGAIN and not -EAGAIN.
2649  * We are handling both for compatibility reasons.
2650  */
2651  if (ret == -EAGAIN || ret == EAGAIN) {
2652  mlog(0, "%s: trying to start recovery of node "
2653  "%u, but node %u is waiting for last recovery "
2654  "to complete, backoff for a bit\n", dlm->name,
2655  dead_node, nodenum);
2656  msleep(100);
2657  goto retry;
2658  }
2659  if (ret < 0) {
2660  struct dlm_lock_resource *res;
2661 
2662  /* this is now a serious problem, possibly ENOMEM
2663  * in the network stack. must retry */
2664  mlog_errno(ret);
2665  mlog(ML_ERROR, "begin reco of dlm %s to node %u "
2666  "returned %d\n", dlm->name, nodenum, ret);
2669  if (res) {
2671  dlm_lockres_put(res);
2672  } else {
2673  mlog(ML_ERROR, "recovery lock not found\n");
2674  }
2675  /* sleep for a bit in hopes that we can avoid
2676  * another ENOMEM */
2677  msleep(100);
2678  goto retry;
2679  }
2680  }
2681 
2682  return ret;
2683 }
2684 
2685 int dlm_begin_reco_handler(struct o2net_msg *msg, u32 len, void *data,
2686  void **ret_data)
2687 {
2688  struct dlm_ctxt *dlm = data;
2689  struct dlm_begin_reco *br = (struct dlm_begin_reco *)msg->buf;
2690 
2691  /* ok to return 0, domain has gone away */
2692  if (!dlm_grab(dlm))
2693  return 0;
2694 
2695  spin_lock(&dlm->spinlock);
2696  if (dlm->reco.state & DLM_RECO_STATE_FINALIZE) {
2697  mlog(0, "%s: node %u wants to recover node %u (%u:%u) "
2698  "but this node is in finalize state, waiting on finalize2\n",
2699  dlm->name, br->node_idx, br->dead_node,
2700  dlm->reco.dead_node, dlm->reco.new_master);
2701  spin_unlock(&dlm->spinlock);
2702  return -EAGAIN;
2703  }
2704  spin_unlock(&dlm->spinlock);
2705 
2706  mlog(0, "%s: node %u wants to recover node %u (%u:%u)\n",
2707  dlm->name, br->node_idx, br->dead_node,
2708  dlm->reco.dead_node, dlm->reco.new_master);
2709 
2711 
2712  spin_lock(&dlm->spinlock);
2713  if (dlm->reco.new_master != O2NM_INVALID_NODE_NUM) {
2714  if (test_bit(dlm->reco.new_master, dlm->recovery_map)) {
2715  mlog(0, "%s: new_master %u died, changing "
2716  "to %u\n", dlm->name, dlm->reco.new_master,
2717  br->node_idx);
2718  } else {
2719  mlog(0, "%s: new_master %u NOT DEAD, changing "
2720  "to %u\n", dlm->name, dlm->reco.new_master,
2721  br->node_idx);
2722  /* may not have seen the new master as dead yet */
2723  }
2724  }
2725  if (dlm->reco.dead_node != O2NM_INVALID_NODE_NUM) {
2726  mlog(ML_NOTICE, "%s: dead_node previously set to %u, "
2727  "node %u changing it to %u\n", dlm->name,
2728  dlm->reco.dead_node, br->node_idx, br->dead_node);
2729  }
2730  dlm_set_reco_master(dlm, br->node_idx);
2731  dlm_set_reco_dead_node(dlm, br->dead_node);
2732  if (!test_bit(br->dead_node, dlm->recovery_map)) {
2733  mlog(0, "recovery master %u sees %u as dead, but this "
2734  "node has not yet. marking %u as dead\n",
2735  br->node_idx, br->dead_node, br->dead_node);
2736  if (!test_bit(br->dead_node, dlm->domain_map) ||
2737  !test_bit(br->dead_node, dlm->live_nodes_map))
2738  mlog(0, "%u not in domain/live_nodes map "
2739  "so setting it in reco map manually\n",
2740  br->dead_node);
2741  /* force the recovery cleanup in __dlm_hb_node_down
2742  * both of these will be cleared in a moment */
2743  set_bit(br->dead_node, dlm->domain_map);
2744  set_bit(br->dead_node, dlm->live_nodes_map);
2745  __dlm_hb_node_down(dlm, br->dead_node);
2746  }
2747  spin_unlock(&dlm->spinlock);
2748 
2750 
2751  mlog(0, "%s: recovery started by node %u, for %u (%u:%u)\n",
2752  dlm->name, br->node_idx, br->dead_node,
2753  dlm->reco.dead_node, dlm->reco.new_master);
2754 
2755  dlm_put(dlm);
2756  return 0;
2757 }
2758 
2759 #define DLM_FINALIZE_STAGE2 0x01
2760 static int dlm_send_finalize_reco_message(struct dlm_ctxt *dlm)
2761 {
2762  int ret = 0;
2763  struct dlm_finalize_reco fr;
2764  struct dlm_node_iter iter;
2765  int nodenum;
2766  int status;
2767  int stage = 1;
2768 
2769  mlog(0, "finishing recovery for node %s:%u, "
2770  "stage %d\n", dlm->name, dlm->reco.dead_node, stage);
2771 
2772  spin_lock(&dlm->spinlock);
2773  dlm_node_iter_init(dlm->domain_map, &iter);
2774  spin_unlock(&dlm->spinlock);
2775 
2776 stage2:
2777  memset(&fr, 0, sizeof(fr));
2778  fr.node_idx = dlm->node_num;
2779  fr.dead_node = dlm->reco.dead_node;
2780  if (stage == 2)
2781  fr.flags |= DLM_FINALIZE_STAGE2;
2782 
2783  while ((nodenum = dlm_node_iter_next(&iter)) >= 0) {
2784  if (nodenum == dlm->node_num)
2785  continue;
2787  &fr, sizeof(fr), nodenum, &status);
2788  if (ret >= 0)
2789  ret = status;
2790  if (ret < 0) {
2791  mlog(ML_ERROR, "Error %d when sending message %u (key "
2792  "0x%x) to node %u\n", ret, DLM_FINALIZE_RECO_MSG,
2793  dlm->key, nodenum);
2794  if (dlm_is_host_down(ret)) {
2795  /* this has no effect on this recovery
2796  * session, so set the status to zero to
2797  * finish out the last recovery */
2798  mlog(ML_ERROR, "node %u went down after this "
2799  "node finished recovery.\n", nodenum);
2800  ret = 0;
2801  continue;
2802  }
2803  break;
2804  }
2805  }
2806  if (stage == 1) {
2807  /* reset the node_iter back to the top and send finalize2 */
2808  iter.curnode = -1;
2809  stage = 2;
2810  goto stage2;
2811  }
2812 
2813  return ret;
2814 }
2815 
2816 int dlm_finalize_reco_handler(struct o2net_msg *msg, u32 len, void *data,
2817  void **ret_data)
2818 {
2819  struct dlm_ctxt *dlm = data;
2820  struct dlm_finalize_reco *fr = (struct dlm_finalize_reco *)msg->buf;
2821  int stage = 1;
2822 
2823  /* ok to return 0, domain has gone away */
2824  if (!dlm_grab(dlm))
2825  return 0;
2826 
2827  if (fr->flags & DLM_FINALIZE_STAGE2)
2828  stage = 2;
2829 
2830  mlog(0, "%s: node %u finalizing recovery stage%d of "
2831  "node %u (%u:%u)\n", dlm->name, fr->node_idx, stage,
2832  fr->dead_node, dlm->reco.dead_node, dlm->reco.new_master);
2833 
2834  spin_lock(&dlm->spinlock);
2835 
2836  if (dlm->reco.new_master != fr->node_idx) {
2837  mlog(ML_ERROR, "node %u sent recovery finalize msg, but node "
2838  "%u is supposed to be the new master, dead=%u\n",
2839  fr->node_idx, dlm->reco.new_master, fr->dead_node);
2840  BUG();
2841  }
2842  if (dlm->reco.dead_node != fr->dead_node) {
2843  mlog(ML_ERROR, "node %u sent recovery finalize msg for dead "
2844  "node %u, but node %u is supposed to be dead\n",
2845  fr->node_idx, fr->dead_node, dlm->reco.dead_node);
2846  BUG();
2847  }
2848 
2849  switch (stage) {
2850  case 1:
2851  dlm_finish_local_lockres_recovery(dlm, fr->dead_node, fr->node_idx);
2852  if (dlm->reco.state & DLM_RECO_STATE_FINALIZE) {
2853  mlog(ML_ERROR, "%s: received finalize1 from "
2854  "new master %u for dead node %u, but "
2855  "this node has already received it!\n",
2856  dlm->name, fr->node_idx, fr->dead_node);
2857  dlm_print_reco_node_status(dlm);
2858  BUG();
2859  }
2860  dlm->reco.state |= DLM_RECO_STATE_FINALIZE;
2861  spin_unlock(&dlm->spinlock);
2862  break;
2863  case 2:
2864  if (!(dlm->reco.state & DLM_RECO_STATE_FINALIZE)) {
2865  mlog(ML_ERROR, "%s: received finalize2 from "
2866  "new master %u for dead node %u, but "
2867  "this node did not have finalize1!\n",
2868  dlm->name, fr->node_idx, fr->dead_node);
2869  dlm_print_reco_node_status(dlm);
2870  BUG();
2871  }
2872  dlm->reco.state &= ~DLM_RECO_STATE_FINALIZE;
2873  spin_unlock(&dlm->spinlock);
2874  dlm_reset_recovery(dlm);
2876  break;
2877  default:
2878  BUG();
2879  }
2880 
2881  mlog(0, "%s: recovery done, reco master was %u, dead now %u, master now %u\n",
2882  dlm->name, fr->node_idx, dlm->reco.dead_node, dlm->reco.new_master);
2883 
2884  dlm_put(dlm);
2885  return 0;
2886 }