Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
kexec.c
Go to the documentation of this file.
1 /*
2  * kexec.c - kexec system call
3  * Copyright (C) 2002-2004 Eric Biederman <[email protected]>
4  *
5  * This source code is licensed under the GNU General Public License,
6  * Version 2. See the file COPYING for more details.
7  */
8 
9 #include <linux/capability.h>
10 #include <linux/mm.h>
11 #include <linux/file.h>
12 #include <linux/slab.h>
13 #include <linux/fs.h>
14 #include <linux/kexec.h>
15 #include <linux/mutex.h>
16 #include <linux/list.h>
17 #include <linux/highmem.h>
18 #include <linux/syscalls.h>
19 #include <linux/reboot.h>
20 #include <linux/ioport.h>
21 #include <linux/hardirq.h>
22 #include <linux/elf.h>
23 #include <linux/elfcore.h>
24 #include <linux/utsname.h>
25 #include <linux/numa.h>
26 #include <linux/suspend.h>
27 #include <linux/device.h>
28 #include <linux/freezer.h>
29 #include <linux/pm.h>
30 #include <linux/cpu.h>
31 #include <linux/console.h>
32 #include <linux/vmalloc.h>
33 #include <linux/swap.h>
34 #include <linux/syscore_ops.h>
35 
36 #include <asm/page.h>
37 #include <asm/uaccess.h>
38 #include <asm/io.h>
39 #include <asm/sections.h>
40 
41 /* Per cpu memory for storing cpu states in case of system crash. */
42 note_buf_t __percpu *crash_notes;
43 
44 /* vmcoreinfo stuff */
45 static unsigned char vmcoreinfo_data[VMCOREINFO_BYTES];
46 u32 vmcoreinfo_note[VMCOREINFO_NOTE_SIZE/4];
48 size_t vmcoreinfo_max_size = sizeof(vmcoreinfo_data);
49 
50 /* Location of the reserved area for the crash kernel */
51 struct resource crashk_res = {
52  .name = "Crash kernel",
53  .start = 0,
54  .end = 0,
56 };
57 
59 {
60  if (in_interrupt() || !p->pid || is_global_init(p) || panic_on_oops)
61  return 1;
62  return 0;
63 }
64 
65 /*
66  * When kexec transitions to the new kernel there is a one-to-one
67  * mapping between physical and virtual addresses. On processors
68  * where you can disable the MMU this is trivial, and easy. For
69  * others it is still a simple predictable page table to setup.
70  *
71  * In that environment kexec copies the new kernel to its final
72  * resting place. This means I can only support memory whose
73  * physical address can fit in an unsigned long. In particular
74  * addresses where (pfn << PAGE_SHIFT) > ULONG_MAX cannot be handled.
75  * If the assembly stub has more restrictive requirements
76  * KEXEC_SOURCE_MEMORY_LIMIT and KEXEC_DEST_MEMORY_LIMIT can be
77  * defined more restrictively in <asm/kexec.h>.
78  *
79  * The code for the transition from the current kernel to the
80  * the new kernel is placed in the control_code_buffer, whose size
81  * is given by KEXEC_CONTROL_PAGE_SIZE. In the best case only a single
82  * page of memory is necessary, but some architectures require more.
83  * Because this memory must be identity mapped in the transition from
84  * virtual to physical addresses it must live in the range
85  * 0 - TASK_SIZE, as only the user space mappings are arbitrarily
86  * modifiable.
87  *
88  * The assembly stub in the control code buffer is passed a linked list
89  * of descriptor pages detailing the source pages of the new kernel,
90  * and the destination addresses of those source pages. As this data
91  * structure is not used in the context of the current OS, it must
92  * be self-contained.
93  *
94  * The code has been made to work with highmem pages and will use a
95  * destination page in its final resting place (if it happens
96  * to allocate it). The end product of this is that most of the
97  * physical address space, and most of RAM can be used.
98  *
99  * Future directions include:
100  * - allocating a page table with the control code buffer identity
101  * mapped, to simplify machine_kexec and make kexec_on_panic more
102  * reliable.
103  */
104 
105 /*
106  * KIMAGE_NO_DEST is an impossible destination address..., for
107  * allocating pages whose destination address we do not care about.
108  */
109 #define KIMAGE_NO_DEST (-1UL)
110 
111 static int kimage_is_destination_range(struct kimage *image,
112  unsigned long start, unsigned long end);
113 static struct page *kimage_alloc_page(struct kimage *image,
114  gfp_t gfp_mask,
115  unsigned long dest);
116 
117 static int do_kimage_alloc(struct kimage **rimage, unsigned long entry,
118  unsigned long nr_segments,
119  struct kexec_segment __user *segments)
120 {
121  size_t segment_bytes;
122  struct kimage *image;
123  unsigned long i;
124  int result;
125 
126  /* Allocate a controlling structure */
127  result = -ENOMEM;
128  image = kzalloc(sizeof(*image), GFP_KERNEL);
129  if (!image)
130  goto out;
131 
132  image->head = 0;
133  image->entry = &image->head;
134  image->last_entry = &image->head;
135  image->control_page = ~0; /* By default this does not apply */
136  image->start = entry;
137  image->type = KEXEC_TYPE_DEFAULT;
138 
139  /* Initialize the list of control pages */
140  INIT_LIST_HEAD(&image->control_pages);
141 
142  /* Initialize the list of destination pages */
143  INIT_LIST_HEAD(&image->dest_pages);
144 
145  /* Initialize the list of unusable pages */
146  INIT_LIST_HEAD(&image->unuseable_pages);
147 
148  /* Read in the segments */
149  image->nr_segments = nr_segments;
150  segment_bytes = nr_segments * sizeof(*segments);
151  result = copy_from_user(image->segment, segments, segment_bytes);
152  if (result) {
153  result = -EFAULT;
154  goto out;
155  }
156 
157  /*
158  * Verify we have good destination addresses. The caller is
159  * responsible for making certain we don't attempt to load
160  * the new image into invalid or reserved areas of RAM. This
161  * just verifies it is an address we can use.
162  *
163  * Since the kernel does everything in page size chunks ensure
164  * the destination addresses are page aligned. Too many
165  * special cases crop of when we don't do this. The most
166  * insidious is getting overlapping destination addresses
167  * simply because addresses are changed to page size
168  * granularity.
169  */
170  result = -EADDRNOTAVAIL;
171  for (i = 0; i < nr_segments; i++) {
172  unsigned long mstart, mend;
173 
174  mstart = image->segment[i].mem;
175  mend = mstart + image->segment[i].memsz;
176  if ((mstart & ~PAGE_MASK) || (mend & ~PAGE_MASK))
177  goto out;
178  if (mend >= KEXEC_DESTINATION_MEMORY_LIMIT)
179  goto out;
180  }
181 
182  /* Verify our destination addresses do not overlap.
183  * If we alloed overlapping destination addresses
184  * through very weird things can happen with no
185  * easy explanation as one segment stops on another.
186  */
187  result = -EINVAL;
188  for (i = 0; i < nr_segments; i++) {
189  unsigned long mstart, mend;
190  unsigned long j;
191 
192  mstart = image->segment[i].mem;
193  mend = mstart + image->segment[i].memsz;
194  for (j = 0; j < i; j++) {
195  unsigned long pstart, pend;
196  pstart = image->segment[j].mem;
197  pend = pstart + image->segment[j].memsz;
198  /* Do the segments overlap ? */
199  if ((mend > pstart) && (mstart < pend))
200  goto out;
201  }
202  }
203 
204  /* Ensure our buffer sizes are strictly less than
205  * our memory sizes. This should always be the case,
206  * and it is easier to check up front than to be surprised
207  * later on.
208  */
209  result = -EINVAL;
210  for (i = 0; i < nr_segments; i++) {
211  if (image->segment[i].bufsz > image->segment[i].memsz)
212  goto out;
213  }
214 
215  result = 0;
216 out:
217  if (result == 0)
218  *rimage = image;
219  else
220  kfree(image);
221 
222  return result;
223 
224 }
225 
226 static int kimage_normal_alloc(struct kimage **rimage, unsigned long entry,
227  unsigned long nr_segments,
228  struct kexec_segment __user *segments)
229 {
230  int result;
231  struct kimage *image;
232 
233  /* Allocate and initialize a controlling structure */
234  image = NULL;
235  result = do_kimage_alloc(&image, entry, nr_segments, segments);
236  if (result)
237  goto out;
238 
239  *rimage = image;
240 
241  /*
242  * Find a location for the control code buffer, and add it
243  * the vector of segments so that it's pages will also be
244  * counted as destination pages.
245  */
246  result = -ENOMEM;
247  image->control_code_page = kimage_alloc_control_pages(image,
249  if (!image->control_code_page) {
250  printk(KERN_ERR "Could not allocate control_code_buffer\n");
251  goto out;
252  }
253 
254  image->swap_page = kimage_alloc_control_pages(image, 0);
255  if (!image->swap_page) {
256  printk(KERN_ERR "Could not allocate swap buffer\n");
257  goto out;
258  }
259 
260  result = 0;
261  out:
262  if (result == 0)
263  *rimage = image;
264  else
265  kfree(image);
266 
267  return result;
268 }
269 
270 static int kimage_crash_alloc(struct kimage **rimage, unsigned long entry,
271  unsigned long nr_segments,
272  struct kexec_segment __user *segments)
273 {
274  int result;
275  struct kimage *image;
276  unsigned long i;
277 
278  image = NULL;
279  /* Verify we have a valid entry point */
280  if ((entry < crashk_res.start) || (entry > crashk_res.end)) {
281  result = -EADDRNOTAVAIL;
282  goto out;
283  }
284 
285  /* Allocate and initialize a controlling structure */
286  result = do_kimage_alloc(&image, entry, nr_segments, segments);
287  if (result)
288  goto out;
289 
290  /* Enable the special crash kernel control page
291  * allocation policy.
292  */
293  image->control_page = crashk_res.start;
294  image->type = KEXEC_TYPE_CRASH;
295 
296  /*
297  * Verify we have good destination addresses. Normally
298  * the caller is responsible for making certain we don't
299  * attempt to load the new image into invalid or reserved
300  * areas of RAM. But crash kernels are preloaded into a
301  * reserved area of ram. We must ensure the addresses
302  * are in the reserved area otherwise preloading the
303  * kernel could corrupt things.
304  */
305  result = -EADDRNOTAVAIL;
306  for (i = 0; i < nr_segments; i++) {
307  unsigned long mstart, mend;
308 
309  mstart = image->segment[i].mem;
310  mend = mstart + image->segment[i].memsz - 1;
311  /* Ensure we are within the crash kernel limits */
312  if ((mstart < crashk_res.start) || (mend > crashk_res.end))
313  goto out;
314  }
315 
316  /*
317  * Find a location for the control code buffer, and add
318  * the vector of segments so that it's pages will also be
319  * counted as destination pages.
320  */
321  result = -ENOMEM;
322  image->control_code_page = kimage_alloc_control_pages(image,
324  if (!image->control_code_page) {
325  printk(KERN_ERR "Could not allocate control_code_buffer\n");
326  goto out;
327  }
328 
329  result = 0;
330 out:
331  if (result == 0)
332  *rimage = image;
333  else
334  kfree(image);
335 
336  return result;
337 }
338 
339 static int kimage_is_destination_range(struct kimage *image,
340  unsigned long start,
341  unsigned long end)
342 {
343  unsigned long i;
344 
345  for (i = 0; i < image->nr_segments; i++) {
346  unsigned long mstart, mend;
347 
348  mstart = image->segment[i].mem;
349  mend = mstart + image->segment[i].memsz;
350  if ((end > mstart) && (start < mend))
351  return 1;
352  }
353 
354  return 0;
355 }
356 
357 static struct page *kimage_alloc_pages(gfp_t gfp_mask, unsigned int order)
358 {
359  struct page *pages;
360 
361  pages = alloc_pages(gfp_mask, order);
362  if (pages) {
363  unsigned int count, i;
364  pages->mapping = NULL;
365  set_page_private(pages, order);
366  count = 1 << order;
367  for (i = 0; i < count; i++)
368  SetPageReserved(pages + i);
369  }
370 
371  return pages;
372 }
373 
374 static void kimage_free_pages(struct page *page)
375 {
376  unsigned int order, count, i;
377 
378  order = page_private(page);
379  count = 1 << order;
380  for (i = 0; i < count; i++)
381  ClearPageReserved(page + i);
382  __free_pages(page, order);
383 }
384 
385 static void kimage_free_page_list(struct list_head *list)
386 {
387  struct list_head *pos, *next;
388 
389  list_for_each_safe(pos, next, list) {
390  struct page *page;
391 
392  page = list_entry(pos, struct page, lru);
393  list_del(&page->lru);
394  kimage_free_pages(page);
395  }
396 }
397 
398 static struct page *kimage_alloc_normal_control_pages(struct kimage *image,
399  unsigned int order)
400 {
401  /* Control pages are special, they are the intermediaries
402  * that are needed while we copy the rest of the pages
403  * to their final resting place. As such they must
404  * not conflict with either the destination addresses
405  * or memory the kernel is already using.
406  *
407  * The only case where we really need more than one of
408  * these are for architectures where we cannot disable
409  * the MMU and must instead generate an identity mapped
410  * page table for all of the memory.
411  *
412  * At worst this runs in O(N) of the image size.
413  */
414  struct list_head extra_pages;
415  struct page *pages;
416  unsigned int count;
417 
418  count = 1 << order;
419  INIT_LIST_HEAD(&extra_pages);
420 
421  /* Loop while I can allocate a page and the page allocated
422  * is a destination page.
423  */
424  do {
425  unsigned long pfn, epfn, addr, eaddr;
426 
427  pages = kimage_alloc_pages(GFP_KERNEL, order);
428  if (!pages)
429  break;
430  pfn = page_to_pfn(pages);
431  epfn = pfn + count;
432  addr = pfn << PAGE_SHIFT;
433  eaddr = epfn << PAGE_SHIFT;
434  if ((epfn >= (KEXEC_CONTROL_MEMORY_LIMIT >> PAGE_SHIFT)) ||
435  kimage_is_destination_range(image, addr, eaddr)) {
436  list_add(&pages->lru, &extra_pages);
437  pages = NULL;
438  }
439  } while (!pages);
440 
441  if (pages) {
442  /* Remember the allocated page... */
443  list_add(&pages->lru, &image->control_pages);
444 
445  /* Because the page is already in it's destination
446  * location we will never allocate another page at
447  * that address. Therefore kimage_alloc_pages
448  * will not return it (again) and we don't need
449  * to give it an entry in image->segment[].
450  */
451  }
452  /* Deal with the destination pages I have inadvertently allocated.
453  *
454  * Ideally I would convert multi-page allocations into single
455  * page allocations, and add everything to image->dest_pages.
456  *
457  * For now it is simpler to just free the pages.
458  */
459  kimage_free_page_list(&extra_pages);
460 
461  return pages;
462 }
463 
464 static struct page *kimage_alloc_crash_control_pages(struct kimage *image,
465  unsigned int order)
466 {
467  /* Control pages are special, they are the intermediaries
468  * that are needed while we copy the rest of the pages
469  * to their final resting place. As such they must
470  * not conflict with either the destination addresses
471  * or memory the kernel is already using.
472  *
473  * Control pages are also the only pags we must allocate
474  * when loading a crash kernel. All of the other pages
475  * are specified by the segments and we just memcpy
476  * into them directly.
477  *
478  * The only case where we really need more than one of
479  * these are for architectures where we cannot disable
480  * the MMU and must instead generate an identity mapped
481  * page table for all of the memory.
482  *
483  * Given the low demand this implements a very simple
484  * allocator that finds the first hole of the appropriate
485  * size in the reserved memory region, and allocates all
486  * of the memory up to and including the hole.
487  */
488  unsigned long hole_start, hole_end, size;
489  struct page *pages;
490 
491  pages = NULL;
492  size = (1 << order) << PAGE_SHIFT;
493  hole_start = (image->control_page + (size - 1)) & ~(size - 1);
494  hole_end = hole_start + size - 1;
495  while (hole_end <= crashk_res.end) {
496  unsigned long i;
497 
498  if (hole_end > KEXEC_CRASH_CONTROL_MEMORY_LIMIT)
499  break;
500  if (hole_end > crashk_res.end)
501  break;
502  /* See if I overlap any of the segments */
503  for (i = 0; i < image->nr_segments; i++) {
504  unsigned long mstart, mend;
505 
506  mstart = image->segment[i].mem;
507  mend = mstart + image->segment[i].memsz - 1;
508  if ((hole_end >= mstart) && (hole_start <= mend)) {
509  /* Advance the hole to the end of the segment */
510  hole_start = (mend + (size - 1)) & ~(size - 1);
511  hole_end = hole_start + size - 1;
512  break;
513  }
514  }
515  /* If I don't overlap any segments I have found my hole! */
516  if (i == image->nr_segments) {
517  pages = pfn_to_page(hole_start >> PAGE_SHIFT);
518  break;
519  }
520  }
521  if (pages)
522  image->control_page = hole_end;
523 
524  return pages;
525 }
526 
527 
528 struct page *kimage_alloc_control_pages(struct kimage *image,
529  unsigned int order)
530 {
531  struct page *pages = NULL;
532 
533  switch (image->type) {
534  case KEXEC_TYPE_DEFAULT:
535  pages = kimage_alloc_normal_control_pages(image, order);
536  break;
537  case KEXEC_TYPE_CRASH:
538  pages = kimage_alloc_crash_control_pages(image, order);
539  break;
540  }
541 
542  return pages;
543 }
544 
545 static int kimage_add_entry(struct kimage *image, kimage_entry_t entry)
546 {
547  if (*image->entry != 0)
548  image->entry++;
549 
550  if (image->entry == image->last_entry) {
551  kimage_entry_t *ind_page;
552  struct page *page;
553 
554  page = kimage_alloc_page(image, GFP_KERNEL, KIMAGE_NO_DEST);
555  if (!page)
556  return -ENOMEM;
557 
558  ind_page = page_address(page);
559  *image->entry = virt_to_phys(ind_page) | IND_INDIRECTION;
560  image->entry = ind_page;
561  image->last_entry = ind_page +
562  ((PAGE_SIZE/sizeof(kimage_entry_t)) - 1);
563  }
564  *image->entry = entry;
565  image->entry++;
566  *image->entry = 0;
567 
568  return 0;
569 }
570 
571 static int kimage_set_destination(struct kimage *image,
572  unsigned long destination)
573 {
574  int result;
575 
576  destination &= PAGE_MASK;
577  result = kimage_add_entry(image, destination | IND_DESTINATION);
578  if (result == 0)
579  image->destination = destination;
580 
581  return result;
582 }
583 
584 
585 static int kimage_add_page(struct kimage *image, unsigned long page)
586 {
587  int result;
588 
589  page &= PAGE_MASK;
590  result = kimage_add_entry(image, page | IND_SOURCE);
591  if (result == 0)
592  image->destination += PAGE_SIZE;
593 
594  return result;
595 }
596 
597 
598 static void kimage_free_extra_pages(struct kimage *image)
599 {
600  /* Walk through and free any extra destination pages I may have */
601  kimage_free_page_list(&image->dest_pages);
602 
603  /* Walk through and free any unusable pages I have cached */
604  kimage_free_page_list(&image->unuseable_pages);
605 
606 }
607 static void kimage_terminate(struct kimage *image)
608 {
609  if (*image->entry != 0)
610  image->entry++;
611 
612  *image->entry = IND_DONE;
613 }
614 
615 #define for_each_kimage_entry(image, ptr, entry) \
616  for (ptr = &image->head; (entry = *ptr) && !(entry & IND_DONE); \
617  ptr = (entry & IND_INDIRECTION)? \
618  phys_to_virt((entry & PAGE_MASK)): ptr +1)
619 
620 static void kimage_free_entry(kimage_entry_t entry)
621 {
622  struct page *page;
623 
624  page = pfn_to_page(entry >> PAGE_SHIFT);
625  kimage_free_pages(page);
626 }
627 
628 static void kimage_free(struct kimage *image)
629 {
630  kimage_entry_t *ptr, entry;
631  kimage_entry_t ind = 0;
632 
633  if (!image)
634  return;
635 
636  kimage_free_extra_pages(image);
637  for_each_kimage_entry(image, ptr, entry) {
638  if (entry & IND_INDIRECTION) {
639  /* Free the previous indirection page */
640  if (ind & IND_INDIRECTION)
641  kimage_free_entry(ind);
642  /* Save this indirection page until we are
643  * done with it.
644  */
645  ind = entry;
646  }
647  else if (entry & IND_SOURCE)
648  kimage_free_entry(entry);
649  }
650  /* Free the final indirection page */
651  if (ind & IND_INDIRECTION)
652  kimage_free_entry(ind);
653 
654  /* Handle any machine specific cleanup */
655  machine_kexec_cleanup(image);
656 
657  /* Free the kexec control pages... */
658  kimage_free_page_list(&image->control_pages);
659  kfree(image);
660 }
661 
662 static kimage_entry_t *kimage_dst_used(struct kimage *image,
663  unsigned long page)
664 {
665  kimage_entry_t *ptr, entry;
666  unsigned long destination = 0;
667 
668  for_each_kimage_entry(image, ptr, entry) {
669  if (entry & IND_DESTINATION)
670  destination = entry & PAGE_MASK;
671  else if (entry & IND_SOURCE) {
672  if (page == destination)
673  return ptr;
674  destination += PAGE_SIZE;
675  }
676  }
677 
678  return NULL;
679 }
680 
681 static struct page *kimage_alloc_page(struct kimage *image,
682  gfp_t gfp_mask,
683  unsigned long destination)
684 {
685  /*
686  * Here we implement safeguards to ensure that a source page
687  * is not copied to its destination page before the data on
688  * the destination page is no longer useful.
689  *
690  * To do this we maintain the invariant that a source page is
691  * either its own destination page, or it is not a
692  * destination page at all.
693  *
694  * That is slightly stronger than required, but the proof
695  * that no problems will not occur is trivial, and the
696  * implementation is simply to verify.
697  *
698  * When allocating all pages normally this algorithm will run
699  * in O(N) time, but in the worst case it will run in O(N^2)
700  * time. If the runtime is a problem the data structures can
701  * be fixed.
702  */
703  struct page *page;
704  unsigned long addr;
705 
706  /*
707  * Walk through the list of destination pages, and see if I
708  * have a match.
709  */
710  list_for_each_entry(page, &image->dest_pages, lru) {
711  addr = page_to_pfn(page) << PAGE_SHIFT;
712  if (addr == destination) {
713  list_del(&page->lru);
714  return page;
715  }
716  }
717  page = NULL;
718  while (1) {
719  kimage_entry_t *old;
720 
721  /* Allocate a page, if we run out of memory give up */
722  page = kimage_alloc_pages(gfp_mask, 0);
723  if (!page)
724  return NULL;
725  /* If the page cannot be used file it away */
726  if (page_to_pfn(page) >
727  (KEXEC_SOURCE_MEMORY_LIMIT >> PAGE_SHIFT)) {
728  list_add(&page->lru, &image->unuseable_pages);
729  continue;
730  }
731  addr = page_to_pfn(page) << PAGE_SHIFT;
732 
733  /* If it is the destination page we want use it */
734  if (addr == destination)
735  break;
736 
737  /* If the page is not a destination page use it */
738  if (!kimage_is_destination_range(image, addr,
739  addr + PAGE_SIZE))
740  break;
741 
742  /*
743  * I know that the page is someones destination page.
744  * See if there is already a source page for this
745  * destination page. And if so swap the source pages.
746  */
747  old = kimage_dst_used(image, addr);
748  if (old) {
749  /* If so move it */
750  unsigned long old_addr;
751  struct page *old_page;
752 
753  old_addr = *old & PAGE_MASK;
754  old_page = pfn_to_page(old_addr >> PAGE_SHIFT);
755  copy_highpage(page, old_page);
756  *old = addr | (*old & ~PAGE_MASK);
757 
758  /* The old page I have found cannot be a
759  * destination page, so return it if it's
760  * gfp_flags honor the ones passed in.
761  */
762  if (!(gfp_mask & __GFP_HIGHMEM) &&
763  PageHighMem(old_page)) {
764  kimage_free_pages(old_page);
765  continue;
766  }
767  addr = old_addr;
768  page = old_page;
769  break;
770  }
771  else {
772  /* Place the page on the destination list I
773  * will use it later.
774  */
775  list_add(&page->lru, &image->dest_pages);
776  }
777  }
778 
779  return page;
780 }
781 
782 static int kimage_load_normal_segment(struct kimage *image,
783  struct kexec_segment *segment)
784 {
785  unsigned long maddr;
786  unsigned long ubytes, mbytes;
787  int result;
788  unsigned char __user *buf;
789 
790  result = 0;
791  buf = segment->buf;
792  ubytes = segment->bufsz;
793  mbytes = segment->memsz;
794  maddr = segment->mem;
795 
796  result = kimage_set_destination(image, maddr);
797  if (result < 0)
798  goto out;
799 
800  while (mbytes) {
801  struct page *page;
802  char *ptr;
803  size_t uchunk, mchunk;
804 
805  page = kimage_alloc_page(image, GFP_HIGHUSER, maddr);
806  if (!page) {
807  result = -ENOMEM;
808  goto out;
809  }
810  result = kimage_add_page(image, page_to_pfn(page)
811  << PAGE_SHIFT);
812  if (result < 0)
813  goto out;
814 
815  ptr = kmap(page);
816  /* Start with a clear page */
817  clear_page(ptr);
818  ptr += maddr & ~PAGE_MASK;
819  mchunk = PAGE_SIZE - (maddr & ~PAGE_MASK);
820  if (mchunk > mbytes)
821  mchunk = mbytes;
822 
823  uchunk = mchunk;
824  if (uchunk > ubytes)
825  uchunk = ubytes;
826 
827  result = copy_from_user(ptr, buf, uchunk);
828  kunmap(page);
829  if (result) {
830  result = -EFAULT;
831  goto out;
832  }
833  ubytes -= uchunk;
834  maddr += mchunk;
835  buf += mchunk;
836  mbytes -= mchunk;
837  }
838 out:
839  return result;
840 }
841 
842 static int kimage_load_crash_segment(struct kimage *image,
843  struct kexec_segment *segment)
844 {
845  /* For crash dumps kernels we simply copy the data from
846  * user space to it's destination.
847  * We do things a page at a time for the sake of kmap.
848  */
849  unsigned long maddr;
850  unsigned long ubytes, mbytes;
851  int result;
852  unsigned char __user *buf;
853 
854  result = 0;
855  buf = segment->buf;
856  ubytes = segment->bufsz;
857  mbytes = segment->memsz;
858  maddr = segment->mem;
859  while (mbytes) {
860  struct page *page;
861  char *ptr;
862  size_t uchunk, mchunk;
863 
864  page = pfn_to_page(maddr >> PAGE_SHIFT);
865  if (!page) {
866  result = -ENOMEM;
867  goto out;
868  }
869  ptr = kmap(page);
870  ptr += maddr & ~PAGE_MASK;
871  mchunk = PAGE_SIZE - (maddr & ~PAGE_MASK);
872  if (mchunk > mbytes)
873  mchunk = mbytes;
874 
875  uchunk = mchunk;
876  if (uchunk > ubytes) {
877  uchunk = ubytes;
878  /* Zero the trailing part of the page */
879  memset(ptr + uchunk, 0, mchunk - uchunk);
880  }
881  result = copy_from_user(ptr, buf, uchunk);
883  kunmap(page);
884  if (result) {
885  result = -EFAULT;
886  goto out;
887  }
888  ubytes -= uchunk;
889  maddr += mchunk;
890  buf += mchunk;
891  mbytes -= mchunk;
892  }
893 out:
894  return result;
895 }
896 
897 static int kimage_load_segment(struct kimage *image,
898  struct kexec_segment *segment)
899 {
900  int result = -ENOMEM;
901 
902  switch (image->type) {
903  case KEXEC_TYPE_DEFAULT:
904  result = kimage_load_normal_segment(image, segment);
905  break;
906  case KEXEC_TYPE_CRASH:
907  result = kimage_load_crash_segment(image, segment);
908  break;
909  }
910 
911  return result;
912 }
913 
914 /*
915  * Exec Kernel system call: for obvious reasons only root may call it.
916  *
917  * This call breaks up into three pieces.
918  * - A generic part which loads the new kernel from the current
919  * address space, and very carefully places the data in the
920  * allocated pages.
921  *
922  * - A generic part that interacts with the kernel and tells all of
923  * the devices to shut down. Preventing on-going dmas, and placing
924  * the devices in a consistent state so a later kernel can
925  * reinitialize them.
926  *
927  * - A machine specific part that includes the syscall number
928  * and the copies the image to it's final destination. And
929  * jumps into the image at entry.
930  *
931  * kexec does not sync, or unmount filesystems so if you need
932  * that to happen you need to do that yourself.
933  */
934 struct kimage *kexec_image;
935 struct kimage *kexec_crash_image;
936 
937 static DEFINE_MUTEX(kexec_mutex);
938 
939 SYSCALL_DEFINE4(kexec_load, unsigned long, entry, unsigned long, nr_segments,
940  struct kexec_segment __user *, segments, unsigned long, flags)
941 {
942  struct kimage **dest_image, *image;
943  int result;
944 
945  /* We only trust the superuser with rebooting the system. */
946  if (!capable(CAP_SYS_BOOT))
947  return -EPERM;
948 
949  /*
950  * Verify we have a legal set of flags
951  * This leaves us room for future extensions.
952  */
953  if ((flags & KEXEC_FLAGS) != (flags & ~KEXEC_ARCH_MASK))
954  return -EINVAL;
955 
956  /* Verify we are on the appropriate architecture */
957  if (((flags & KEXEC_ARCH_MASK) != KEXEC_ARCH) &&
958  ((flags & KEXEC_ARCH_MASK) != KEXEC_ARCH_DEFAULT))
959  return -EINVAL;
960 
961  /* Put an artificial cap on the number
962  * of segments passed to kexec_load.
963  */
964  if (nr_segments > KEXEC_SEGMENT_MAX)
965  return -EINVAL;
966 
967  image = NULL;
968  result = 0;
969 
970  /* Because we write directly to the reserved memory
971  * region when loading crash kernels we need a mutex here to
972  * prevent multiple crash kernels from attempting to load
973  * simultaneously, and to prevent a crash kernel from loading
974  * over the top of a in use crash kernel.
975  *
976  * KISS: always take the mutex.
977  */
978  if (!mutex_trylock(&kexec_mutex))
979  return -EBUSY;
980 
981  dest_image = &kexec_image;
982  if (flags & KEXEC_ON_CRASH)
983  dest_image = &kexec_crash_image;
984  if (nr_segments > 0) {
985  unsigned long i;
986 
987  /* Loading another kernel to reboot into */
988  if ((flags & KEXEC_ON_CRASH) == 0)
989  result = kimage_normal_alloc(&image, entry,
990  nr_segments, segments);
991  /* Loading another kernel to switch to if this one crashes */
992  else if (flags & KEXEC_ON_CRASH) {
993  /* Free any current crash dump kernel before
994  * we corrupt it.
995  */
996  kimage_free(xchg(&kexec_crash_image, NULL));
997  result = kimage_crash_alloc(&image, entry,
998  nr_segments, segments);
1000  }
1001  if (result)
1002  goto out;
1003 
1005  image->preserve_context = 1;
1006  result = machine_kexec_prepare(image);
1007  if (result)
1008  goto out;
1009 
1010  for (i = 0; i < nr_segments; i++) {
1011  result = kimage_load_segment(image, &image->segment[i]);
1012  if (result)
1013  goto out;
1014  }
1015  kimage_terminate(image);
1016  if (flags & KEXEC_ON_CRASH)
1018  }
1019  /* Install the new kernel, and Uninstall the old */
1020  image = xchg(dest_image, image);
1021 
1022 out:
1023  mutex_unlock(&kexec_mutex);
1024  kimage_free(image);
1025 
1026  return result;
1027 }
1028 
1029 /*
1030  * Add and remove page tables for crashkernel memory
1031  *
1032  * Provide an empty default implementation here -- architecture
1033  * code may override this
1034  */
1036 {}
1037 
1039 {}
1040 
1041 #ifdef CONFIG_COMPAT
1042 asmlinkage long compat_sys_kexec_load(unsigned long entry,
1043  unsigned long nr_segments,
1044  struct compat_kexec_segment __user *segments,
1045  unsigned long flags)
1046 {
1047  struct compat_kexec_segment in;
1048  struct kexec_segment out, __user *ksegments;
1049  unsigned long i, result;
1050 
1051  /* Don't allow clients that don't understand the native
1052  * architecture to do anything.
1053  */
1054  if ((flags & KEXEC_ARCH_MASK) == KEXEC_ARCH_DEFAULT)
1055  return -EINVAL;
1056 
1057  if (nr_segments > KEXEC_SEGMENT_MAX)
1058  return -EINVAL;
1059 
1060  ksegments = compat_alloc_user_space(nr_segments * sizeof(out));
1061  for (i=0; i < nr_segments; i++) {
1062  result = copy_from_user(&in, &segments[i], sizeof(in));
1063  if (result)
1064  return -EFAULT;
1065 
1066  out.buf = compat_ptr(in.buf);
1067  out.bufsz = in.bufsz;
1068  out.mem = in.mem;
1069  out.memsz = in.memsz;
1070 
1071  result = copy_to_user(&ksegments[i], &out, sizeof(out));
1072  if (result)
1073  return -EFAULT;
1074  }
1075 
1076  return sys_kexec_load(entry, nr_segments, ksegments, flags);
1077 }
1078 #endif
1079 
1080 void crash_kexec(struct pt_regs *regs)
1081 {
1082  /* Take the kexec_mutex here to prevent sys_kexec_load
1083  * running on one cpu from replacing the crash kernel
1084  * we are using after a panic on a different cpu.
1085  *
1086  * If the crash kernel was not located in a fixed area
1087  * of memory the xchg(&kexec_crash_image) would be
1088  * sufficient. But since I reuse the memory...
1089  */
1090  if (mutex_trylock(&kexec_mutex)) {
1091  if (kexec_crash_image) {
1092  struct pt_regs fixed_regs;
1093 
1094  crash_setup_regs(&fixed_regs, regs);
1096  machine_crash_shutdown(&fixed_regs);
1097  machine_kexec(kexec_crash_image);
1098  }
1099  mutex_unlock(&kexec_mutex);
1100  }
1101 }
1102 
1104 {
1105  size_t size = 0;
1106  mutex_lock(&kexec_mutex);
1107  if (crashk_res.end != crashk_res.start)
1108  size = resource_size(&crashk_res);
1109  mutex_unlock(&kexec_mutex);
1110  return size;
1111 }
1112 
1113 void __weak crash_free_reserved_phys_range(unsigned long begin,
1114  unsigned long end)
1115 {
1116  unsigned long addr;
1117 
1118  for (addr = begin; addr < end; addr += PAGE_SIZE) {
1119  ClearPageReserved(pfn_to_page(addr >> PAGE_SHIFT));
1120  init_page_count(pfn_to_page(addr >> PAGE_SHIFT));
1121  free_page((unsigned long)__va(addr));
1122  totalram_pages++;
1123  }
1124 }
1125 
1126 int crash_shrink_memory(unsigned long new_size)
1127 {
1128  int ret = 0;
1129  unsigned long start, end;
1130  unsigned long old_size;
1131  struct resource *ram_res;
1132 
1133  mutex_lock(&kexec_mutex);
1134 
1135  if (kexec_crash_image) {
1136  ret = -ENOENT;
1137  goto unlock;
1138  }
1139  start = crashk_res.start;
1140  end = crashk_res.end;
1141  old_size = (end == 0) ? 0 : end - start + 1;
1142  if (new_size >= old_size) {
1143  ret = (new_size == old_size) ? 0 : -EINVAL;
1144  goto unlock;
1145  }
1146 
1147  ram_res = kzalloc(sizeof(*ram_res), GFP_KERNEL);
1148  if (!ram_res) {
1149  ret = -ENOMEM;
1150  goto unlock;
1151  }
1152 
1153  start = roundup(start, KEXEC_CRASH_MEM_ALIGN);
1154  end = roundup(start + new_size, KEXEC_CRASH_MEM_ALIGN);
1155 
1157  crash_free_reserved_phys_range(end, crashk_res.end);
1158 
1159  if ((start == end) && (crashk_res.parent != NULL))
1160  release_resource(&crashk_res);
1161 
1162  ram_res->start = end;
1163  ram_res->end = crashk_res.end;
1164  ram_res->flags = IORESOURCE_BUSY | IORESOURCE_MEM;
1165  ram_res->name = "System RAM";
1166 
1167  crashk_res.end = end - 1;
1168 
1169  insert_resource(&iomem_resource, ram_res);
1171 
1172 unlock:
1173  mutex_unlock(&kexec_mutex);
1174  return ret;
1175 }
1176 
1177 static u32 *append_elf_note(u32 *buf, char *name, unsigned type, void *data,
1178  size_t data_len)
1179 {
1180  struct elf_note note;
1181 
1182  note.n_namesz = strlen(name) + 1;
1183  note.n_descsz = data_len;
1184  note.n_type = type;
1185  memcpy(buf, &note, sizeof(note));
1186  buf += (sizeof(note) + 3)/4;
1187  memcpy(buf, name, note.n_namesz);
1188  buf += (note.n_namesz + 3)/4;
1189  memcpy(buf, data, note.n_descsz);
1190  buf += (note.n_descsz + 3)/4;
1191 
1192  return buf;
1193 }
1194 
1195 static void final_note(u32 *buf)
1196 {
1197  struct elf_note note;
1198 
1199  note.n_namesz = 0;
1200  note.n_descsz = 0;
1201  note.n_type = 0;
1202  memcpy(buf, &note, sizeof(note));
1203 }
1204 
1205 void crash_save_cpu(struct pt_regs *regs, int cpu)
1206 {
1207  struct elf_prstatus prstatus;
1208  u32 *buf;
1209 
1210  if ((cpu < 0) || (cpu >= nr_cpu_ids))
1211  return;
1212 
1213  /* Using ELF notes here is opportunistic.
1214  * I need a well defined structure format
1215  * for the data I pass, and I need tags
1216  * on the data to indicate what information I have
1217  * squirrelled away. ELF notes happen to provide
1218  * all of that, so there is no need to invent something new.
1219  */
1220  buf = (u32*)per_cpu_ptr(crash_notes, cpu);
1221  if (!buf)
1222  return;
1223  memset(&prstatus, 0, sizeof(prstatus));
1224  prstatus.pr_pid = current->pid;
1225  elf_core_copy_kernel_regs(&prstatus.pr_reg, regs);
1226  buf = append_elf_note(buf, KEXEC_CORE_NOTE_NAME, NT_PRSTATUS,
1227  &prstatus, sizeof(prstatus));
1228  final_note(buf);
1229 }
1230 
1231 static int __init crash_notes_memory_init(void)
1232 {
1233  /* Allocate memory for saving cpu registers. */
1234  crash_notes = alloc_percpu(note_buf_t);
1235  if (!crash_notes) {
1236  printk("Kexec: Memory allocation for saving cpu register"
1237  " states failed\n");
1238  return -ENOMEM;
1239  }
1240  return 0;
1241 }
1242 module_init(crash_notes_memory_init)
1243 
1244 
1245 /*
1246  * parsing the "crashkernel" commandline
1247  *
1248  * this code is intended to be called from architecture specific code
1249  */
1250 
1251 
1252 /*
1253  * This function parses command lines in the format
1254  *
1255  * crashkernel=ramsize-range:size[,...][@offset]
1256  *
1257  * The function returns 0 on success and -EINVAL on failure.
1258  */
1259 static int __init parse_crashkernel_mem(char *cmdline,
1260  unsigned long long system_ram,
1261  unsigned long long *crash_size,
1262  unsigned long long *crash_base)
1263 {
1264  char *cur = cmdline, *tmp;
1265 
1266  /* for each entry of the comma-separated list */
1267  do {
1268  unsigned long long start, end = ULLONG_MAX, size;
1269 
1270  /* get the start of the range */
1271  start = memparse(cur, &tmp);
1272  if (cur == tmp) {
1273  pr_warning("crashkernel: Memory value expected\n");
1274  return -EINVAL;
1275  }
1276  cur = tmp;
1277  if (*cur != '-') {
1278  pr_warning("crashkernel: '-' expected\n");
1279  return -EINVAL;
1280  }
1281  cur++;
1282 
1283  /* if no ':' is here, than we read the end */
1284  if (*cur != ':') {
1285  end = memparse(cur, &tmp);
1286  if (cur == tmp) {
1287  pr_warning("crashkernel: Memory "
1288  "value expected\n");
1289  return -EINVAL;
1290  }
1291  cur = tmp;
1292  if (end <= start) {
1293  pr_warning("crashkernel: end <= start\n");
1294  return -EINVAL;
1295  }
1296  }
1297 
1298  if (*cur != ':') {
1299  pr_warning("crashkernel: ':' expected\n");
1300  return -EINVAL;
1301  }
1302  cur++;
1303 
1304  size = memparse(cur, &tmp);
1305  if (cur == tmp) {
1306  pr_warning("Memory value expected\n");
1307  return -EINVAL;
1308  }
1309  cur = tmp;
1310  if (size >= system_ram) {
1311  pr_warning("crashkernel: invalid size\n");
1312  return -EINVAL;
1313  }
1314 
1315  /* match ? */
1316  if (system_ram >= start && system_ram < end) {
1317  *crash_size = size;
1318  break;
1319  }
1320  } while (*cur++ == ',');
1321 
1322  if (*crash_size > 0) {
1323  while (*cur && *cur != ' ' && *cur != '@')
1324  cur++;
1325  if (*cur == '@') {
1326  cur++;
1327  *crash_base = memparse(cur, &tmp);
1328  if (cur == tmp) {
1329  pr_warning("Memory value expected "
1330  "after '@'\n");
1331  return -EINVAL;
1332  }
1333  }
1334  }
1335 
1336  return 0;
1337 }
1338 
1339 /*
1340  * That function parses "simple" (old) crashkernel command lines like
1341  *
1342  * crashkernel=size[@offset]
1343  *
1344  * It returns 0 on success and -EINVAL on failure.
1345  */
1346 static int __init parse_crashkernel_simple(char *cmdline,
1347  unsigned long long *crash_size,
1348  unsigned long long *crash_base)
1349 {
1350  char *cur = cmdline;
1351 
1352  *crash_size = memparse(cmdline, &cur);
1353  if (cmdline == cur) {
1354  pr_warning("crashkernel: memory value expected\n");
1355  return -EINVAL;
1356  }
1357 
1358  if (*cur == '@')
1359  *crash_base = memparse(cur+1, &cur);
1360  else if (*cur != ' ' && *cur != '\0') {
1361  pr_warning("crashkernel: unrecognized char\n");
1362  return -EINVAL;
1363  }
1364 
1365  return 0;
1366 }
1367 
1368 /*
1369  * That function is the entry point for command line parsing and should be
1370  * called from the arch-specific code.
1371  */
1372 int __init parse_crashkernel(char *cmdline,
1373  unsigned long long system_ram,
1374  unsigned long long *crash_size,
1375  unsigned long long *crash_base)
1376 {
1377  char *p = cmdline, *ck_cmdline = NULL;
1378  char *first_colon, *first_space;
1379 
1380  BUG_ON(!crash_size || !crash_base);
1381  *crash_size = 0;
1382  *crash_base = 0;
1383 
1384  /* find crashkernel and use the last one if there are more */
1385  p = strstr(p, "crashkernel=");
1386  while (p) {
1387  ck_cmdline = p;
1388  p = strstr(p+1, "crashkernel=");
1389  }
1390 
1391  if (!ck_cmdline)
1392  return -EINVAL;
1393 
1394  ck_cmdline += 12; /* strlen("crashkernel=") */
1395 
1396  /*
1397  * if the commandline contains a ':', then that's the extended
1398  * syntax -- if not, it must be the classic syntax
1399  */
1400  first_colon = strchr(ck_cmdline, ':');
1401  first_space = strchr(ck_cmdline, ' ');
1402  if (first_colon && (!first_space || first_colon < first_space))
1403  return parse_crashkernel_mem(ck_cmdline, system_ram,
1404  crash_size, crash_base);
1405  else
1406  return parse_crashkernel_simple(ck_cmdline, crash_size,
1407  crash_base);
1408 
1409  return 0;
1410 }
1411 
1412 
1413 static void update_vmcoreinfo_note(void)
1414 {
1415  u32 *buf = vmcoreinfo_note;
1416 
1417  if (!vmcoreinfo_size)
1418  return;
1419  buf = append_elf_note(buf, VMCOREINFO_NOTE_NAME, 0, vmcoreinfo_data,
1420  vmcoreinfo_size);
1421  final_note(buf);
1422 }
1423 
1425 {
1426  vmcoreinfo_append_str("CRASHTIME=%ld\n", get_seconds());
1427  update_vmcoreinfo_note();
1428 }
1429 
1430 void vmcoreinfo_append_str(const char *fmt, ...)
1431 {
1432  va_list args;
1433  char buf[0x50];
1434  int r;
1435 
1436  va_start(args, fmt);
1437  r = vsnprintf(buf, sizeof(buf), fmt, args);
1438  va_end(args);
1439 
1442 
1443  memcpy(&vmcoreinfo_data[vmcoreinfo_size], buf, r);
1444 
1445  vmcoreinfo_size += r;
1446 }
1447 
1448 /*
1449  * provide an empty default implementation here -- architecture
1450  * code may override this
1451  */
1453 {}
1454 
1455 unsigned long __attribute__ ((weak)) paddr_vmcoreinfo_note(void)
1456 {
1457  return __pa((unsigned long)(char *)&vmcoreinfo_note);
1458 }
1459 
1460 static int __init crash_save_vmcoreinfo_init(void)
1461 {
1462  VMCOREINFO_OSRELEASE(init_uts_ns.name.release);
1463  VMCOREINFO_PAGESIZE(PAGE_SIZE);
1464 
1465  VMCOREINFO_SYMBOL(init_uts_ns);
1466  VMCOREINFO_SYMBOL(node_online_map);
1467 #ifdef CONFIG_MMU
1468  VMCOREINFO_SYMBOL(swapper_pg_dir);
1469 #endif
1470  VMCOREINFO_SYMBOL(_stext);
1471  VMCOREINFO_SYMBOL(vmlist);
1472 
1473 #ifndef CONFIG_NEED_MULTIPLE_NODES
1474  VMCOREINFO_SYMBOL(mem_map);
1475  VMCOREINFO_SYMBOL(contig_page_data);
1476 #endif
1477 #ifdef CONFIG_SPARSEMEM
1478  VMCOREINFO_SYMBOL(mem_section);
1479  VMCOREINFO_LENGTH(mem_section, NR_SECTION_ROOTS);
1480  VMCOREINFO_STRUCT_SIZE(mem_section);
1481  VMCOREINFO_OFFSET(mem_section, section_mem_map);
1482 #endif
1483  VMCOREINFO_STRUCT_SIZE(page);
1484  VMCOREINFO_STRUCT_SIZE(pglist_data);
1485  VMCOREINFO_STRUCT_SIZE(zone);
1486  VMCOREINFO_STRUCT_SIZE(free_area);
1487  VMCOREINFO_STRUCT_SIZE(list_head);
1488  VMCOREINFO_SIZE(nodemask_t);
1489  VMCOREINFO_OFFSET(page, flags);
1490  VMCOREINFO_OFFSET(page, _count);
1491  VMCOREINFO_OFFSET(page, mapping);
1492  VMCOREINFO_OFFSET(page, lru);
1493  VMCOREINFO_OFFSET(pglist_data, node_zones);
1494  VMCOREINFO_OFFSET(pglist_data, nr_zones);
1495 #ifdef CONFIG_FLAT_NODE_MEM_MAP
1496  VMCOREINFO_OFFSET(pglist_data, node_mem_map);
1497 #endif
1498  VMCOREINFO_OFFSET(pglist_data, node_start_pfn);
1499  VMCOREINFO_OFFSET(pglist_data, node_spanned_pages);
1500  VMCOREINFO_OFFSET(pglist_data, node_id);
1501  VMCOREINFO_OFFSET(zone, free_area);
1502  VMCOREINFO_OFFSET(zone, vm_stat);
1503  VMCOREINFO_OFFSET(zone, spanned_pages);
1504  VMCOREINFO_OFFSET(free_area, free_list);
1505  VMCOREINFO_OFFSET(list_head, next);
1506  VMCOREINFO_OFFSET(list_head, prev);
1507  VMCOREINFO_OFFSET(vm_struct, addr);
1508  VMCOREINFO_LENGTH(zone.free_area, MAX_ORDER);
1509  log_buf_kexec_setup();
1510  VMCOREINFO_LENGTH(free_area.free_list, MIGRATE_TYPES);
1511  VMCOREINFO_NUMBER(NR_FREE_PAGES);
1512  VMCOREINFO_NUMBER(PG_lru);
1513  VMCOREINFO_NUMBER(PG_private);
1514  VMCOREINFO_NUMBER(PG_swapcache);
1515 
1517  update_vmcoreinfo_note();
1518 
1519  return 0;
1520 }
1521 
1522 module_init(crash_save_vmcoreinfo_init)
1523 
1524 /*
1525  * Move into place and start executing a preloaded standalone
1526  * executable. If nothing was preloaded return an error.
1527  */
1528 int kernel_kexec(void)
1529 {
1530  int error = 0;
1531 
1532  if (!mutex_trylock(&kexec_mutex))
1533  return -EBUSY;
1534  if (!kexec_image) {
1535  error = -EINVAL;
1536  goto Unlock;
1537  }
1538 
1539 #ifdef CONFIG_KEXEC_JUMP
1540  if (kexec_image->preserve_context) {
1541  lock_system_sleep();
1543  error = freeze_processes();
1544  if (error) {
1545  error = -EBUSY;
1546  goto Restore_console;
1547  }
1548  suspend_console();
1549  error = dpm_suspend_start(PMSG_FREEZE);
1550  if (error)
1551  goto Resume_console;
1552  /* At this point, dpm_suspend_start() has been called,
1553  * but *not* dpm_suspend_end(). We *must* call
1554  * dpm_suspend_end() now. Otherwise, drivers for
1555  * some devices (e.g. interrupt controllers) become
1556  * desynchronized with the actual state of the
1557  * hardware at resume time, and evil weirdness ensues.
1558  */
1559  error = dpm_suspend_end(PMSG_FREEZE);
1560  if (error)
1561  goto Resume_devices;
1562  error = disable_nonboot_cpus();
1563  if (error)
1564  goto Enable_cpus;
1566  error = syscore_suspend();
1567  if (error)
1568  goto Enable_irqs;
1569  } else
1570 #endif
1571  {
1573  printk(KERN_EMERG "Starting new kernel\n");
1574  machine_shutdown();
1575  }
1576 
1577  machine_kexec(kexec_image);
1578 
1579 #ifdef CONFIG_KEXEC_JUMP
1580  if (kexec_image->preserve_context) {
1581  syscore_resume();
1582  Enable_irqs:
1583  local_irq_enable();
1584  Enable_cpus:
1585  enable_nonboot_cpus();
1587  Resume_devices:
1589  Resume_console:
1590  resume_console();
1591  thaw_processes();
1592  Restore_console:
1594  unlock_system_sleep();
1595  }
1596 #endif
1597 
1598  Unlock:
1599  mutex_unlock(&kexec_mutex);
1600  return error;
1601 }