24 #include <linux/module.h>
27 #include <linux/kernel.h>
29 #include <linux/types.h>
30 #include <linux/slab.h>
31 #include <linux/time.h>
33 #include <linux/kexec.h>
39 #include <asm/pgtable.h>
40 #include <asm/processor.h>
42 #include <asm/setup.h>
43 #include <asm/tlbflush.h>
52 static u64 mem_limit = ~0
UL, max_addr = ~0
UL, min_addr = 0
UL;
54 #define efi_call_virt(f, args...) (*(f))(args)
56 #define STUB_GET_TIME(prefix, adjust_arg) \
58 prefix##_get_time (efi_time_t *tm, efi_time_cap_t *tc) \
60 struct ia64_fpreg fr[6]; \
61 efi_time_cap_t *atc = NULL; \
65 atc = adjust_arg(tc); \
66 ia64_save_scratch_fpregs(fr); \
67 ret = efi_call_##prefix((efi_get_time_t *) __va(runtime->get_time), \
68 adjust_arg(tm), atc); \
69 ia64_load_scratch_fpregs(fr); \
73 #define STUB_SET_TIME(prefix, adjust_arg) \
75 prefix##_set_time (efi_time_t *tm) \
77 struct ia64_fpreg fr[6]; \
80 ia64_save_scratch_fpregs(fr); \
81 ret = efi_call_##prefix((efi_set_time_t *) __va(runtime->set_time), \
83 ia64_load_scratch_fpregs(fr); \
87 #define STUB_GET_WAKEUP_TIME(prefix, adjust_arg) \
89 prefix##_get_wakeup_time (efi_bool_t *enabled, efi_bool_t *pending, \
92 struct ia64_fpreg fr[6]; \
95 ia64_save_scratch_fpregs(fr); \
96 ret = efi_call_##prefix( \
97 (efi_get_wakeup_time_t *) __va(runtime->get_wakeup_time), \
98 adjust_arg(enabled), adjust_arg(pending), adjust_arg(tm)); \
99 ia64_load_scratch_fpregs(fr); \
103 #define STUB_SET_WAKEUP_TIME(prefix, adjust_arg) \
104 static efi_status_t \
105 prefix##_set_wakeup_time (efi_bool_t enabled, efi_time_t *tm) \
107 struct ia64_fpreg fr[6]; \
108 efi_time_t *atm = NULL; \
112 atm = adjust_arg(tm); \
113 ia64_save_scratch_fpregs(fr); \
114 ret = efi_call_##prefix( \
115 (efi_set_wakeup_time_t *) __va(runtime->set_wakeup_time), \
117 ia64_load_scratch_fpregs(fr); \
121 #define STUB_GET_VARIABLE(prefix, adjust_arg) \
122 static efi_status_t \
123 prefix##_get_variable (efi_char16_t *name, efi_guid_t *vendor, u32 *attr, \
124 unsigned long *data_size, void *data) \
126 struct ia64_fpreg fr[6]; \
131 aattr = adjust_arg(attr); \
132 ia64_save_scratch_fpregs(fr); \
133 ret = efi_call_##prefix( \
134 (efi_get_variable_t *) __va(runtime->get_variable), \
135 adjust_arg(name), adjust_arg(vendor), aattr, \
136 adjust_arg(data_size), adjust_arg(data)); \
137 ia64_load_scratch_fpregs(fr); \
141 #define STUB_GET_NEXT_VARIABLE(prefix, adjust_arg) \
142 static efi_status_t \
143 prefix##_get_next_variable (unsigned long *name_size, efi_char16_t *name, \
144 efi_guid_t *vendor) \
146 struct ia64_fpreg fr[6]; \
149 ia64_save_scratch_fpregs(fr); \
150 ret = efi_call_##prefix( \
151 (efi_get_next_variable_t *) __va(runtime->get_next_variable), \
152 adjust_arg(name_size), adjust_arg(name), adjust_arg(vendor)); \
153 ia64_load_scratch_fpregs(fr); \
157 #define STUB_SET_VARIABLE(prefix, adjust_arg) \
158 static efi_status_t \
159 prefix##_set_variable (efi_char16_t *name, efi_guid_t *vendor, \
160 u32 attr, unsigned long data_size, \
163 struct ia64_fpreg fr[6]; \
166 ia64_save_scratch_fpregs(fr); \
167 ret = efi_call_##prefix( \
168 (efi_set_variable_t *) __va(runtime->set_variable), \
169 adjust_arg(name), adjust_arg(vendor), attr, data_size, \
171 ia64_load_scratch_fpregs(fr); \
175 #define STUB_GET_NEXT_HIGH_MONO_COUNT(prefix, adjust_arg) \
176 static efi_status_t \
177 prefix##_get_next_high_mono_count (u32 *count) \
179 struct ia64_fpreg fr[6]; \
182 ia64_save_scratch_fpregs(fr); \
183 ret = efi_call_##prefix((efi_get_next_high_mono_count_t *) \
184 __va(runtime->get_next_high_mono_count), \
185 adjust_arg(count)); \
186 ia64_load_scratch_fpregs(fr); \
190 #define STUB_RESET_SYSTEM(prefix, adjust_arg) \
192 prefix##_reset_system (int reset_type, efi_status_t status, \
193 unsigned long data_size, efi_char16_t *data) \
195 struct ia64_fpreg fr[6]; \
196 efi_char16_t *adata = NULL; \
199 adata = adjust_arg(data); \
201 ia64_save_scratch_fpregs(fr); \
203 (efi_reset_system_t *) __va(runtime->reset_system), \
204 reset_type, status, data_size, adata); \
206 ia64_load_scratch_fpregs(fr); \
209 #define phys_ptr(arg) ((__typeof__(arg)) ia64_tpa(arg))
239 memset(ts, 0,
sizeof(*ts));
273 #define efi_md_size(md) (md->num_pages << EFI_PAGE_SHIFT)
306 for (k = kern_memmap; k->
start != ~0
UL; k++) {
312 if ((*callback)(start + voff, end + voff,
arg) < 0)
345 void *efi_map_start, *efi_map_end, *
p;
348 int pal_code_count = 0;
355 for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
360 if (++pal_code_count > 1) {
392 panic(
"Whoa! PAL code size bigger than a granule!");
395 mask = ~((1 << IA64_GRANULE_SHIFT) - 1);
398 "[0x%lx-0x%lx) into [0x%lx-0x%lx)\n",
417 sum = (
u8) (sum + *(buffer++));
426 static void __init handle_palo(
unsigned long palo_phys)
436 checksum = palo_checksum((
u8 *)palo, palo->
length);
457 psr = ia64_clear_ic();
469 void *efi_map_start, *efi_map_end;
475 unsigned long palo_phys;
482 if (
memcmp(cp,
"mem=", 4) == 0) {
484 }
else if (
memcmp(cp,
"max_addr=", 9) == 0) {
486 }
else if (
memcmp(cp,
"min_addr=", 9) == 0) {
489 while (*cp !=
' ' && *cp)
498 if (max_addr != ~0
UL)
508 panic(
"Whoa! Can't find EFI system table.\n");
510 panic(
"Whoa! EFI system table signature incorrect\n");
513 "%d.%02d, expected 1.00 or greater\n",
522 for (i = 0;i < (
int)
sizeof(vendor) - 1 && *c16; ++
i)
557 printk(
" SALsystab=0x%lx", config_tables[i].
table);
561 }
else if (efi_guidcmp(config_tables[i].guid,
563 palo_phys = config_tables[
i].
table;
570 handle_palo(palo_phys);
593 for (i = 0, p = efi_map_start; p < efi_map_end;
594 ++
i, p += efi_desc_size)
602 if ((size >> 40) > 0) {
605 }
else if ((size >> 30) > 0) {
608 }
else if ((size >> 20) > 0) {
616 printk(
"mem%02d: type=%2u, attr=0x%016lx, "
617 "range=[0x%016lx-0x%016lx) (%4lu%s)\n",
631 void *efi_map_start, *efi_map_end, *
p;
640 for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
688 "virtual mode (status=%lu)\n", status);
714 void *efi_map_start, *efi_map_end, *
p;
722 for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
733 kern_memory_descriptor (
unsigned long phys_addr)
737 for (md = kern_memmap; md->
start != ~0
UL; md++) {
745 efi_memory_descriptor (
unsigned long phys_addr)
747 void *efi_map_start, *efi_map_end, *
p;
755 for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
765 efi_memmap_intersects (
unsigned long phys_addr,
unsigned long size)
767 void *efi_map_start, *efi_map_end, *
p;
776 end = phys_addr +
size;
778 for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
780 if (md->
phys_addr < end && efi_md_end(md) > phys_addr)
810 unsigned long end = phys_addr +
size;
823 unsigned long md_end = efi_md_end(md);
828 md = efi_memory_descriptor(md_end);
838 unsigned long end = phys_addr +
size;
853 md = kern_memory_descriptor(phys_addr);
859 unsigned long md_end = kmd_end(md);
864 md = kern_memory_descriptor(md_end);
911 if (efi_memmap_intersects(phys_addr, size))
949 char *
s,
name[] =
"ConOut";
952 unsigned char data[1024];
953 unsigned long size =
sizeof(
data);
961 *utf16++ = *s++ & 0x7f;
972 while (hdr < end_addr) {
998 u64 contig_low=0, contig_high=0;
1000 void *efi_map_start, *efi_map_end, *
p, *
q;
1002 u64 space_needed, efi_desc_size;
1003 unsigned long total_mem = 0;
1017 for (p = efi_map_start; p < efi_map_end; pmd =
md, p += efi_desc_size) {
1022 if (pmd ==
NULL || !efi_wb(pmd) ||
1025 contig_high = efi_md_end(md);
1026 for (q = p + efi_desc_size; q < efi_map_end;
1027 q += efi_desc_size) {
1029 if (!efi_wb(check_md))
1031 if (contig_high != check_md->phys_addr)
1033 contig_high = efi_md_end(check_md);
1042 ae =
min(contig_high, efi_md_end(md));
1045 as =
max(as, min_addr);
1051 if (total_mem + (
ae - as) > mem_limit)
1052 ae -= total_mem + (
ae -
as) - mem_limit;
1057 if (
ae - as > space_needed)
1060 if (p >= efi_map_end)
1061 panic(
"Can't allocate space for kernel memory descriptors");
1075 u64 contig_low=0, contig_high=0;
1077 void *efi_map_start, *efi_map_end, *
p, *
q;
1080 unsigned long total_mem = 0;
1088 for (p = efi_map_start; p < efi_map_end; pmd =
md, p += efi_desc_size) {
1101 if (pmd ==
NULL || !efi_wb(pmd) ||
1104 contig_high = efi_md_end(md);
1105 for (q = p + efi_desc_size; q < efi_map_end;
1106 q += efi_desc_size) {
1108 if (!efi_wb(check_md))
1110 if (contig_high != check_md->phys_addr)
1112 contig_high = efi_md_end(check_md);
1116 if (!is_memory_available(md))
1119 #ifdef CONFIG_CRASH_DUMP
1129 lim =
min(efi_md_end(md), contig_low);
1131 if (k > kern_memmap &&
1149 if (efi_md_end(md) > contig_high) {
1152 if (lim == md->
phys_addr && k > kern_memmap &&
1166 ae = efi_md_end(md);
1169 as =
max(as, min_addr);
1170 ae =
min(ae, max_addr);
1175 if (total_mem + (ae - as) > mem_limit)
1176 ae -= total_mem + (ae -
as) - mem_limit;
1180 if (prev && kmd_end(prev) == md->
phys_addr) {
1182 total_mem += ae -
as;
1188 total_mem += ae -
as;
1194 *s = (
u64)kern_memmap;
1206 void *efi_map_start, *efi_map_end, *
p;
1210 unsigned long flags;
1218 for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
1237 name =
"System ROM";
1240 name =
"Uncached RAM";
1242 name =
"System RAM";
1246 name =
"ACPI Non-volatile Storage";
1263 if ((res = kzalloc(
sizeof(
struct resource),
1266 "failed to allocate resource for iomem\n");
1306 void *efi_map_start, *efi_map_end, *
p;
1314 for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
1319 end = efi_md_end(md);
1320 for (i = 0; i <
n; i++) {
1321 if (
__pa(r[i].start) >= start &&
__pa(r[i].end) < end) {
1322 if (
__pa(r[i].start) > start + size)
1324 start =
ALIGN(
__pa(r[i].end), alignment);
1326 __pa(r[i+1].start) < start + size)
1332 if (end > start + size)
1337 "Cannot reserve 0x%lx byte of memory for crashdump\n", size);
1342 #ifdef CONFIG_CRASH_DUMP
1347 void *efi_map_start, *efi_map_end, *
p;
1350 unsigned long ret = 0;
1356 for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {