29 #include <linux/kernel.h>
30 #include <linux/sched.h>
31 #include <linux/elf.h>
33 #include <linux/string.h>
37 #include <asm/unaligned.h>
39 #define ARCH_MODULE_DEBUG 0
42 # define DEBUGP printk
45 # define DEBUGP(fmt , a...)
54 #define MAX_LTOFF ((uint64_t) (1 << 22))
58 #define FORMAT_SHIFT 0
60 #define FORMAT_MASK ((1 << FORMAT_BITS) - 1)
63 #define VALUE_MASK ((1 << VALUE_BITS) - 1)
111 #define N(reloc) [R_IA64_##reloc] = #reloc
113 static const char *reloc_name[256] = {
114 N(
NONE),
N(IMM14),
N(IMM22),
N(IMM64),
115 N(DIR32MSB),
N(DIR32LSB),
N(DIR64MSB),
N(DIR64LSB),
116 N(GPREL22),
N(GPREL64I),
N(GPREL32MSB),
N(GPREL32LSB),
117 N(GPREL64MSB),
N(GPREL64LSB),
N(LTOFF22),
N(LTOFF64I),
118 N(PLTOFF22),
N(PLTOFF64I),
N(PLTOFF64MSB),
N(PLTOFF64LSB),
119 N(FPTR64I),
N(FPTR32MSB),
N(FPTR32LSB),
N(FPTR64MSB),
120 N(FPTR64LSB),
N(PCREL60B),
N(PCREL21B),
N(PCREL21M),
121 N(PCREL21F),
N(PCREL32MSB),
N(PCREL32LSB),
N(PCREL64MSB),
122 N(PCREL64LSB),
N(LTOFF_FPTR22),
N(LTOFF_FPTR64I),
N(LTOFF_FPTR32MSB),
123 N(LTOFF_FPTR32LSB),
N(LTOFF_FPTR64MSB),
N(LTOFF_FPTR64LSB),
N(SEGREL32MSB),
124 N(SEGREL32LSB),
N(SEGREL64MSB),
N(SEGREL64LSB),
N(SECREL32MSB),
125 N(SECREL32LSB),
N(SECREL64MSB),
N(SECREL64LSB),
N(REL32MSB),
126 N(REL32LSB),
N(REL64MSB),
N(REL64LSB),
N(LTV32MSB),
127 N(LTV32LSB),
N(LTV64MSB),
N(LTV64LSB),
N(PCREL21BI),
128 N(PCREL22),
N(PCREL64I),
N(IPLTMSB),
N(IPLTLSB),
129 N(
COPY),
N(LTOFF22X),
N(LDXMOV),
N(TPREL14),
130 N(TPREL22),
N(TPREL64I),
N(TPREL64MSB),
N(TPREL64LSB),
131 N(LTOFF_TPREL22),
N(DTPMOD64MSB),
N(DTPMOD64LSB),
N(LTOFF_DTPMOD22),
132 N(DTPREL14),
N(DTPREL22),
N(DTPREL64I),
N(DTPREL32MSB),
133 N(DTPREL32LSB),
N(DTPREL64MSB),
N(DTPREL64LSB),
N(LTOFF_DTPREL22)
148 slot (
const struct insn *insn)
156 if (
slot(insn) != 2) {
168 if (
slot(insn) != 2) {
173 if (val + ((
uint64_t) 1 << 59) >= (1
UL << 60)) {
175 mod->
name, (
long) val);
185 if (val + (1 << 21) >= (1 << 22)) {
187 mod->
name, (
long)val);
190 ia64_patch((
u64) insn, 0x01fffcfe000UL, ( ((val & 0x200000UL) << 15)
191 | ((val & 0x1f0000UL) << 6)
192 | ((val & 0x00ff80UL) << 20)
193 | ((val & 0x00007fUL) << 13) ));
198 apply_imm21b (
struct module *mod,
struct insn *insn,
uint64_t val)
200 if (val + (1 << 20) >= (1 << 21)) {
202 mod->
name, (
long)val);
205 ia64_patch((
u64) insn, 0x11ffffe000UL, ( ((val & 0x100000UL) << 16)
206 | ((val & 0x0fffffUL) << 13) ));
217 static const struct plt_entry ia64_plt_template = {
220 0x04, 0x00, 0x00, 0x00, 0x01, 0x00,
221 0x00, 0x00, 0x00, 0x00, 0x00, 0x20,
222 0x00, 0x00, 0x00, 0x60
225 0x05, 0x00, 0x00, 0x00, 0x01, 0x00,
226 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
227 0x08, 0x00, 0x00, 0xc0
233 patch_plt (
struct module *mod,
struct plt_entry *plt,
long target_ip,
unsigned long target_gp)
235 if (apply_imm64(mod, (
struct insn *) (plt->
bundle[0] + 2), target_gp)
236 && apply_imm60(mod, (
struct insn *) (plt->
bundle[1] + 2),
237 (target_ip - (int64_t) plt->
bundle[1]) / 16))
248 b0 = b[0]; b1 = b[1];
249 off = ( ((b1 & 0x00fffff000000000UL) >> 36)
250 | ((b0 >> 48) << 20) | ((b1 & 0x7fffff
UL) << 36)
251 | ((b1 & 0x0800000000000000
UL) << 0));
252 return (
long) plt->
bundle[1] + 16*off;
259 unsigned char bundle[3][16];
262 static const struct plt_entry ia64_plt_template = {
265 0x05, 0x00, 0x00, 0x00, 0x01, 0x00,
266 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
267 0x02, 0x00, 0x00, 0x60
270 0x04, 0x00, 0x00, 0x00, 0x01, 0x00,
271 0x00, 0x00, 0x00, 0x00, 0x00, 0x20,
272 0x00, 0x00, 0x00, 0x60
275 0x11, 0x00, 0x00, 0x00, 0x01, 0x00,
276 0x60, 0x80, 0x04, 0x80, 0x03, 0x00,
277 0x60, 0x00, 0x80, 0x00
283 patch_plt (
struct module *mod,
struct plt_entry *plt,
long target_ip,
unsigned long target_gp)
285 if (apply_imm64(mod, (
struct insn *) (plt->
bundle[0] + 2), target_ip)
286 && apply_imm64(mod, (
struct insn *) (plt->
bundle[1] + 2), target_gp))
296 b0 = b[0]; b1 = b[1];
297 return ( ((b1 & 0x000007f000000000) >> 36)
298 | ((b1 & 0x07fc000000000000) >> 43)
299 | ((b1 & 0x0003e00000000000) >> 29)
300 | ((b1 & 0x0000100000000000) >> 23)
301 | ((b0 >> 46) << 22) | ((b1 & 0x7fffff) << 40)
302 | ((b1 & 0x0800000000000000) << 4));
310 if (mod && mod->arch.init_unw_table &&
311 module_region == mod->module_init) {
313 mod->arch.init_unw_table =
NULL;
315 vfree(module_region);
321 duplicate_reloc (
const Elf64_Rela *rela,
unsigned int num)
325 for (i = 0; i < num; i++) {
326 if (rela[i].r_info == rela[num].r_info && rela[i].r_addend == rela[num].r_addend)
334 count_gots (
const Elf64_Rela *rela,
unsigned int num)
336 unsigned int i,
ret = 0;
340 for (i = 0; i < num; i++) {
351 if (!duplicate_reloc(rela, i))
361 count_plts (
const Elf64_Rela *rela,
unsigned int num)
363 unsigned int i, ret = 0;
367 for (i = 0; i < num; i++) {
376 if (!duplicate_reloc(rela, i))
387 count_fdescs (
const Elf64_Rela *rela,
unsigned int num)
389 unsigned int i, ret = 0;
392 for (i = 0; i < num; i++) {
414 if (!duplicate_reloc(rela, i))
426 unsigned long core_plts = 0, init_plts = 0, gots = 0, fdescs = 0;
427 Elf64_Shdr *
s, *sechdrs_end = sechdrs + ehdr->e_shnum;
433 for (s = sechdrs; s < sechdrs_end; ++
s)
435 mod->arch.core_plt =
s;
437 mod->arch.init_plt =
s;
442 else if (
strcmp(
".IA_64.unwind", secstrings + s->
sh_name) == 0)
443 mod->arch.unwind =
s;
444 #ifdef CONFIG_PARAVIRT
445 else if (
strcmp(
".paravirt_bundles",
447 mod->arch.paravirt_bundles =
s;
448 else if (
strcmp(
".paravirt_insts",
450 mod->arch.paravirt_insts =
s;
453 if (!mod->arch.core_plt || !mod->arch.init_plt || !mod->arch.got || !mod->arch.opd) {
459 for (s = sechdrs + 1; s < sechdrs_end; ++
s) {
466 gots += count_gots(rels, numrels);
467 fdescs += count_fdescs(rels, numrels);
469 init_plts += count_plts(rels, numrels);
471 core_plts += count_plts(rels, numrels);
476 mod->arch.core_plt->sh_addralign = 16;
477 mod->arch.core_plt->sh_size = core_plts *
sizeof(
struct plt_entry);
480 mod->arch.init_plt->sh_addralign = 16;
481 mod->arch.init_plt->sh_size = init_plts *
sizeof(
struct plt_entry);
484 mod->arch.got->sh_addralign = 8;
485 mod->arch.got->sh_size = gots *
sizeof(
struct got_entry);
488 mod->arch.opd->sh_addralign = 8;
489 mod->arch.opd->sh_size = fdescs *
sizeof(
struct fdesc);
490 DEBUGP(
"%s: core.plt=%lx, init.plt=%lx, got=%lx, fdesc=%lx\n",
491 __func__, mod->arch.core_plt->sh_size, mod->arch.init_plt->sh_size,
492 mod->arch.got->sh_size, mod->arch.opd->sh_size);
499 return addr - (
uint64_t) mod->module_init < mod->init_size;
505 return addr - (
uint64_t) mod->module_core < mod->core_size;
511 return in_init(mod, value) || in_core(mod, value);
525 got = (
void *) mod->arch.got->sh_addr;
526 for (e = got; e < got + mod->arch.next_got_entry; ++
e)
531 BUG_ON(e >= (
struct got_entry *) (mod->arch.got->sh_addr + mod->arch.got->sh_size));
534 ++mod->arch.next_got_entry;
547 get_plt (
struct module *mod,
const struct insn *insn,
uint64_t value,
int *okp)
555 if (in_init(mod, (
uint64_t) insn)) {
556 plt = (
void *) mod->arch.init_plt->sh_addr;
557 plt_end = (
void *) plt + mod->arch.init_plt->sh_size;
559 plt = (
void *) mod->arch.core_plt->sh_addr;
560 plt_end = (
void *) plt + mod->arch.core_plt->sh_size;
564 target_ip = ((
uint64_t *) value)[0];
565 target_gp = ((
uint64_t *) value)[1];
568 while (plt->
bundle[0][0]) {
571 if (++plt >= plt_end)
574 *plt = ia64_plt_template;
575 if (!patch_plt(mod, plt, target_ip, target_gp)) {
579 #if ARCH_MODULE_DEBUG
581 printk(
"%s: mistargeted PLT: wanted %lx, got %lx\n",
595 struct fdesc *
fdesc = (
void *) mod->arch.opd->sh_addr;
605 if (!is_internal(mod, value))
614 if (fdesc->
ip == value)
616 if ((
uint64_t) ++fdesc >= mod->arch.opd->sh_addr + mod->arch.opd->sh_size)
622 fdesc->
gp = mod->arch.gp;
642 case RV_GPREL: val -= mod->arch.gp;
break;
643 case RV_LTREL: val = get_ltoff(mod, val, &ok);
break;
644 case RV_PLTREL: val = get_plt(mod, location, val, &ok);
break;
645 case RV_FPTR: val = get_fdesc(mod, val, &ok);
break;
647 case RV_LTREL_FPTR: val = get_ltoff(mod, get_fdesc(mod, val, &ok), &ok);
break;
652 if ((in_init(mod, val) && in_core(mod, (
uint64_t)location)) ||
653 (in_core(mod, val) && in_init(mod, (
uint64_t)location))) {
659 if (delta + (1 << 20) >= (1 << 21)) {
660 val = get_fdesc(mod, val, &ok);
661 val = get_plt(mod, location, val, &ok);
663 }
else if (!is_internal(mod, val))
664 val = get_plt(mod, location, val, &ok);
667 val -= bundle(location);
688 val -= (
uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core);
698 if (!is_internal(mod, val)) {
700 "non-local symbol (%lx)\n", __func__,
701 reloc_name[r_type], (
unsigned long)val);
706 val -= bundle(location);
713 val = get_fdesc(mod, get_plt(mod, location, val, &ok), &ok);
725 if (gp_addressable(mod, val))
728 val = get_ltoff(mod, val, &ok);
733 if (gp_addressable(mod, val)) {
735 DEBUGP(
"%s: patching ld8 at %p to mov\n", __func__, location);
736 ia64_patch((
u64) location, 0x1fff80fe000UL, 0x10000000000UL);
741 if (reloc_name[r_type])
743 mod->
name, reloc_name[r_type]);
758 mod->
name, reloc_name[r_type] ? reloc_name[r_type] :
"?");
769 DEBUGP(
"%s: [%p]<-%016lx = %s(%lx)\n", __func__, location, val,
770 reloc_name[r_type] ? reloc_name[r_type] :
"?", sym->
st_value + addend);
773 case RF_INSN21B: ok = apply_imm21b(mod, location, (int64_t) val / 16);
break;
774 case RF_INSN22: ok = apply_imm22(mod, location, val);
break;
775 case RF_INSN64: ok = apply_imm64(mod, location, val);
break;
776 case RF_INSN60: ok = apply_imm60(mod, location, (int64_t) val / 16);
break;
784 printk(
KERN_ERR "%s: format %u needed by %s reloc is not supported\n",
785 mod->
name, format, reloc_name[r_type] ? reloc_name[r_type] :
"?");
789 printk(
KERN_ERR "%s: relocation %s resulted in unknown format %u\n",
790 mod->
name, reloc_name[r_type] ? reloc_name[r_type] :
"?", format);
798 unsigned int relsec,
struct module *mod)
801 Elf64_Rela *rela = (
void *) sechdrs[relsec].sh_addr;
805 DEBUGP(
"%s: applying section %u (%u relocs) to %u\n", __func__,
806 relsec, n, sechdrs[relsec].sh_info);
808 target_sec = sechdrs + sechdrs[relsec].
sh_info;
830 gp = mod->core_size / 2;
831 gp = (
uint64_t) mod->module_core + ((gp + 7) & -8);
833 DEBUGP(
"%s: placing gp at 0x%lx\n", __func__, gp);
836 for (i = 0; i <
n; i++) {
840 rela[i].r_addend, target_sec,
855 register_unwind_table (
struct module *mod)
860 unsigned long num_init = 0, num_core = 0;
863 for (e1 = start; e1 <
end; ++
e1)
873 for (e1 = start; e1 <
end; ++
e1) {
874 for (e2 = e1 + 1; e2 <
end; ++
e2) {
887 core = start + num_init;
890 init = start + num_core;
893 DEBUGP(
"%s: name=%s, gp=%lx, num_init=%lu, num_core=%lu\n", __func__,
894 mod->
name, mod->arch.gp, num_init, num_core);
901 core, core + num_core);
902 DEBUGP(
"%s: core: handle=%p [%p-%p)\n", __func__,
903 mod->arch.core_unw_table, core, core + num_core);
907 init, init + num_init);
908 DEBUGP(
"%s: init: handle=%p [%p-%p)\n", __func__,
909 mod->arch.init_unw_table, init, init + num_init);
916 DEBUGP(
"%s: init: entry=%p\n", __func__, mod->init);
917 if (mod->arch.unwind)
918 register_unwind_table(mod);
919 #ifdef CONFIG_PARAVIRT
920 if (mod->arch.paravirt_bundles) {
923 mod->arch.paravirt_bundles->sh_addr;
926 (mod->arch.paravirt_bundles->sh_addr +
927 mod->arch.paravirt_bundles->sh_size);
931 if (mod->arch.paravirt_insts) {
934 mod->arch.paravirt_insts->sh_addr;
937 (mod->arch.paravirt_insts->sh_addr +
938 mod->arch.paravirt_insts->sh_size);
949 if (mod->arch.init_unw_table)
951 if (mod->arch.core_unw_table)