11 #include <asm/cacheflush.h>
12 #include <linux/netdevice.h>
13 #include <linux/filter.h>
51 #define EMIT(bytes, len) do { prog = emit_code(prog, bytes, len); } while (0)
53 #define EMIT1(b1) EMIT(b1, 1)
54 #define EMIT2(b1, b2) EMIT((b1) + ((b2) << 8), 2)
55 #define EMIT3(b1, b2, b3) EMIT((b1) + ((b2) << 8) + ((b3) << 16), 3)
56 #define EMIT4(b1, b2, b3, b4) EMIT((b1) + ((b2) << 8) + ((b3) << 16) + ((b4) << 24), 4)
57 #define EMIT1_off32(b1, off) do { EMIT1(b1); EMIT(off, 4);} while (0)
59 #define CLEAR_A() EMIT2(0x31, 0xc0)
60 #define CLEAR_X() EMIT2(0x31, 0xdb)
62 static inline bool is_imm8(
int value)
64 return value <= 127 && value >= -128;
67 static inline bool is_near(
int offset)
72 #define EMIT_JMP(offset) \
75 if (is_near(offset)) \
76 EMIT2(0xeb, offset); \
78 EMIT1_off32(0xe9, offset); \
92 #define EMIT_COND_JMP(op, offset) \
94 if (is_near(offset)) \
97 EMIT2(0x0f, op + 0x10); \
102 #define COND_SEL(CODE, TOP, FOP) \
109 #define SEEN_DATAREF 1
113 static inline void bpf_flush_icache(
void *
start,
void *
end)
123 #define CHOOSE_LOAD_FUNC(K, func) \
124 ((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset)
130 unsigned int proglen, oldproglen = 0;
132 int t_offset, f_offset;
133 u8 t_op, f_op, seen = 0, pass;
137 unsigned int cleanup_addr;
152 for (proglen = 0, i = 0; i < flen; i++) {
156 cleanup_addr = proglen;
158 for (pass = 0; pass < 10; pass++) {
165 EMIT4(0x55, 0x48, 0x89, 0xe5);
166 EMIT4(0x48, 0x83, 0xec, 96);
169 EMIT4(0x48, 0x89, 0x5d, 0xf8);
185 EMIT3(0x44, 0x8b, 0x8f);
192 EMIT3(0x44, 0x2b, 0x8f);
201 EMIT3(0x4c, 0x8b, 0x87);
207 switch (filter[0].
code) {
226 for (i = 0; i < flen; i++) {
227 unsigned int K = filter[
i].
k;
229 switch (filter[i].code) {
238 EMIT3(0x83, 0xc0, K);
250 EMIT3(0x83, 0xe8, K);
256 EMIT3(0x0f, 0xaf, 0xc3);
260 EMIT3(0x6b, 0xc0, K);
281 EMIT4(0x31, 0xd2, 0xf7, 0xf3);
309 EMIT3(0x48, 0x69, 0xc0);
311 EMIT4(0x48, 0xc1, 0xe8, 0x20);
318 if (K >= 0xFFFFFF00) {
319 EMIT2(0x24, K & 0xFF);
320 }
else if (K >= 0xFFFF0000) {
333 EMIT3(0x83, 0xc8, K);
346 EMIT3(0x83, 0xf0, K);
352 EMIT4(0x89, 0xd9, 0xd3, 0xe0);
360 EMIT3(0xc1, 0xe0, K);
364 EMIT4(0x89, 0xd9, 0xd3, 0xe8);
372 EMIT3(0xc1, 0xe8, K);
393 EMIT4(0x48, 0x8b, 0x5d, 0xf8);
421 EMIT3(0x8b, 0x45, 0xf0 - K*4);
425 EMIT3(0x8b, 0x5d, 0xf0 - K*4);
429 EMIT3(0x89, 0x45, 0xf0 - K*4);
433 EMIT3(0x89, 0x5d, 0xf0 - K*4);
461 EMIT3(0x0f, 0xb7, 0x87);
471 EMIT3(0x48, 0x8b, 0x87);
474 EMIT3(0x48, 0x85, 0xc0);
506 EMIT3(0x0f, 0xb7, 0x87);
512 EMIT4(0x65, 0x8b, 0x04, 0x25);
513 EMIT((
u32)(
unsigned long)&cpu_number, 4);
521 t_offset = func - (image + addrs[
i]);
534 t_offset = func - (image + addrs[
i]);
541 t_offset = func - (image + addrs[
i]);
544 EMIT3(0x8d, 0x73, K);
556 goto common_load_ind;
559 goto common_load_ind;
561 t_offset = addrs[i +
K] - addrs[
i];
573 cond_branch: f_offset = addrs[i + filter[
i].
jf] - addrs[
i];
574 t_offset = addrs[i + filter[
i].
jt] - addrs[
i];
577 if (filter[i].
jt == filter[i].
jf) {
582 switch (filter[i].code) {
601 EMIT3(0x83, 0xf8, K);
608 else if (!(K & 0xFFFF00FF))
609 EMIT3(0xf6, 0xc4, K >> 8);
610 else if (K <= 0xFFFF) {
618 if (filter[i].
jt != 0) {
619 if (filter[i].jf && f_offset)
620 t_offset += is_near(f_offset) ? 2 : 5;
634 if (
unlikely(proglen + ilen > oldproglen)) {
635 pr_err(
"bpb_jit_compile fatal error\n");
640 memcpy(image + proglen, temp, ilen);
649 cleanup_addr = proglen - 1;
656 if (proglen != oldproglen)
657 pr_err(
"bpb_jit_compile proglen=%u != oldproglen=%u\n", proglen, oldproglen);
660 if (proglen == oldproglen) {
667 oldproglen = proglen;
670 pr_err(
"flen=%d proglen=%u pass=%d image=%p\n",
671 flen, proglen, pass, image);
676 16, 1, image, proglen,
false);
678 bpf_flush_icache(image, image + proglen);