13 #include <linux/elf.h>
14 #include <linux/elfcore.h>
15 #include <linux/export.h>
16 #include <linux/slab.h>
21 #include <linux/list.h>
22 #include <asm/uaccess.h>
31 static char *elfcorebuf;
32 static size_t elfcorebuf_sz;
35 static u64 vmcore_size;
43 static int (*oldmem_pfn_is_ram)(
unsigned long pfn);
47 if (oldmem_pfn_is_ram)
49 oldmem_pfn_is_ram =
fn;
56 oldmem_pfn_is_ram =
NULL;
61 static int pfn_is_ram(
unsigned long pfn)
63 int (*
fn)(
unsigned long pfn);
72 fn = oldmem_pfn_is_ram;
81 u64 *ppos,
int userbuf)
91 pfn = (
unsigned long)(*ppos / PAGE_SIZE);
94 if (count > (PAGE_SIZE - offset))
95 nr_bytes = PAGE_SIZE - offset;
100 if (pfn_is_ram(pfn) == 0)
120 static u64 map_offset_to_paddr(loff_t offset,
struct list_head *vc_list,
130 if (offset >= start && offset <= end) {
144 size_t buflen, loff_t *fpos)
151 if (buflen == 0 || *fpos >= vmcore_size)
155 if (buflen > vmcore_size - *fpos)
156 buflen = vmcore_size - *fpos;
159 if (*fpos < elfcorebuf_sz) {
160 tsz = elfcorebuf_sz - *fpos;
175 start = map_offset_to_paddr(*fpos, &vmcore_list, &curr_m);
178 if ((tsz = (PAGE_SIZE - (start & ~
PAGE_MASK))) > buflen)
182 nr_bytes = (curr_m->
size - (start - curr_m->
paddr));
187 tmp = read_from_oldmem(buffer, tsz, &start, 1);
194 if (start >= (curr_m->
paddr + curr_m->
size)) {
195 if (curr_m->
list.next == &vmcore_list)
199 start = curr_m->
paddr;
201 if ((tsz = (PAGE_SIZE - (start & ~
PAGE_MASK))) > buflen)
204 nr_bytes = (curr_m->
size - (start - curr_m->
paddr));
221 static u64 __init get_vmcore_size_elf64(
char *elfptr)
231 for (i = 0; i < ehdr_ptr->
e_phnum; i++) {
238 static u64 __init get_vmcore_size_elf32(
char *elfptr)
248 for (i = 0; i < ehdr_ptr->
e_phnum; i++) {
256 static int __init merge_note_headers_elf64(
char *elfptr,
size_t *elfsz,
259 int i, nr_ptnote=0,
rc=0;
264 u64 phdr_sz = 0, note_off;
268 for (i = 0; i < ehdr_ptr->
e_phnum; i++, phdr_ptr++) {
281 rc = read_from_oldmem(notes_section, max_sz, &offset, 0);
283 kfree(notes_section);
286 nhdr_ptr = notes_section;
287 for (j = 0; j < max_sz; j += sz) {
294 nhdr_ptr = (
Elf64_Nhdr*)((
char*)nhdr_ptr + sz);
298 new = get_new_element();
300 kfree(notes_section);
307 kfree(notes_section);
322 memcpy(tmp, &phdr,
sizeof(phdr));
337 static int __init merge_note_headers_elf32(
char *elfptr,
size_t *elfsz,
340 int i, nr_ptnote=0,
rc=0;
345 u64 phdr_sz = 0, note_off;
349 for (i = 0; i < ehdr_ptr->
e_phnum; i++, phdr_ptr++) {
362 rc = read_from_oldmem(notes_section, max_sz, &offset, 0);
364 kfree(notes_section);
367 nhdr_ptr = notes_section;
368 for (j = 0; j < max_sz; j += sz) {
375 nhdr_ptr = (
Elf32_Nhdr*)((
char*)nhdr_ptr + sz);
379 new = get_new_element();
381 kfree(notes_section);
388 kfree(notes_section);
403 memcpy(tmp, &phdr,
sizeof(phdr));
419 static int __init process_ptload_program_headers_elf64(
char *elfptr,
437 for (i = 0; i < ehdr_ptr->
e_phnum; i++, phdr_ptr++) {
442 new = get_new_element();
451 vmcore_off = vmcore_off + phdr_ptr->
p_memsz;
456 static int __init process_ptload_program_headers_elf32(
char *elfptr,
474 for (i = 0; i < ehdr_ptr->
e_phnum; i++, phdr_ptr++) {
479 new = get_new_element();
488 vmcore_off = vmcore_off + phdr_ptr->
p_memsz;
494 static void __init set_vmcore_list_offsets_elf64(
char *elfptr,
509 vmcore_off += m->
size;
514 static void __init set_vmcore_list_offsets_elf32(
char *elfptr,
529 vmcore_off += m->
size;
533 static int __init parse_crash_elf64_headers(
void)
542 rc = read_from_oldmem((
char*)&ehdr,
sizeof(
Elf64_Ehdr), &addr, 0);
567 rc = read_from_oldmem(elfcorebuf, elfcorebuf_sz, &addr, 0);
574 rc = merge_note_headers_elf64(elfcorebuf, &elfcorebuf_sz, &vmcore_list);
579 rc = process_ptload_program_headers_elf64(elfcorebuf, elfcorebuf_sz,
585 set_vmcore_list_offsets_elf64(elfcorebuf, &vmcore_list);
589 static int __init parse_crash_elf32_headers(
void)
598 rc = read_from_oldmem((
char*)&ehdr,
sizeof(
Elf32_Ehdr), &addr, 0);
623 rc = read_from_oldmem(elfcorebuf, elfcorebuf_sz, &addr, 0);
630 rc = merge_note_headers_elf32(elfcorebuf, &elfcorebuf_sz, &vmcore_list);
635 rc = process_ptload_program_headers_elf32(elfcorebuf, elfcorebuf_sz,
641 set_vmcore_list_offsets_elf32(elfcorebuf, &vmcore_list);
645 static int __init parse_crash_elf_headers(
void)
652 rc = read_from_oldmem(e_ident,
EI_NIDENT, &addr, 0);
662 rc = parse_crash_elf64_headers();
667 vmcore_size = get_vmcore_size_elf64(elfcorebuf);
669 rc = parse_crash_elf32_headers();
674 vmcore_size = get_vmcore_size_elf32(elfcorebuf);
684 static int __init vmcore_init(
void)
689 if (!(is_vmcore_usable()))
691 rc = parse_crash_elf_headers();
697 proc_vmcore = proc_create(
"vmcore",
S_IRUSR,
NULL, &proc_vmcore_operations);
699 proc_vmcore->
size = vmcore_size;
705 void vmcore_cleanup(
void)