Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
bpf_jit_comp.c
Go to the documentation of this file.
1 /* bpf_jit_comp.c: BPF JIT compiler for PPC64
2  *
3  * Copyright 2011 Matt Evans <[email protected]>, IBM Corporation
4  *
5  * Based on the x86 BPF compiler, by Eric Dumazet ([email protected])
6  *
7  * This program is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU General Public License
9  * as published by the Free Software Foundation; version 2
10  * of the License.
11  */
12 #include <linux/moduleloader.h>
13 #include <asm/cacheflush.h>
14 #include <linux/netdevice.h>
15 #include <linux/filter.h>
16 #include "bpf_jit.h"
17 
18 #ifndef __BIG_ENDIAN
19 /* There are endianness assumptions herein. */
20 #error "Little-endian PPC not supported in BPF compiler"
21 #endif
22 
24 
25 
26 static inline void bpf_flush_icache(void *start, void *end)
27 {
28  smp_wmb();
29  flush_icache_range((unsigned long)start, (unsigned long)end);
30 }
31 
32 static void bpf_jit_build_prologue(struct sk_filter *fp, u32 *image,
33  struct codegen_context *ctx)
34 {
35  int i;
36  const struct sock_filter *filter = fp->insns;
37 
38  if (ctx->seen & (SEEN_MEM | SEEN_DATAREF)) {
39  /* Make stackframe */
40  if (ctx->seen & SEEN_DATAREF) {
41  /* If we call any helpers (for loads), save LR */
43  PPC_STD(0, 1, 16);
44 
45  /* Back up non-volatile regs. */
46  PPC_STD(r_D, 1, -(8*(32-r_D)));
47  PPC_STD(r_HL, 1, -(8*(32-r_HL)));
48  }
49  if (ctx->seen & SEEN_MEM) {
50  /*
51  * Conditionally save regs r15-r31 as some will be used
52  * for M[] data.
53  */
54  for (i = r_M; i < (r_M+16); i++) {
55  if (ctx->seen & (1 << (i-r_M)))
56  PPC_STD(i, 1, -(8*(32-i)));
57  }
58  }
60  (-BPF_PPC_STACKFRAME & 0xfffc));
61  }
62 
63  if (ctx->seen & SEEN_DATAREF) {
64  /*
65  * If this filter needs to access skb data,
66  * prepare r_D and r_HL:
67  * r_HL = skb->len - skb->data_len
68  * r_D = skb->data
69  */
71  data_len));
72  PPC_LWZ_OFFS(r_HL, r_skb, offsetof(struct sk_buff, len));
75  }
76 
77  if (ctx->seen & SEEN_XREG) {
78  /*
79  * TODO: Could also detect whether first instr. sets X and
80  * avoid this (as below, with A).
81  */
82  PPC_LI(r_X, 0);
83  }
84 
85  switch (filter[0].code) {
86  case BPF_S_RET_K:
87  case BPF_S_LD_W_LEN:
88  case BPF_S_ANC_PROTOCOL:
89  case BPF_S_ANC_IFINDEX:
90  case BPF_S_ANC_MARK:
91  case BPF_S_ANC_RXHASH:
92  case BPF_S_ANC_CPU:
93  case BPF_S_ANC_QUEUE:
94  case BPF_S_LD_W_ABS:
95  case BPF_S_LD_H_ABS:
96  case BPF_S_LD_B_ABS:
97  /* first instruction sets A register (or is RET 'constant') */
98  break;
99  default:
100  /* make sure we dont leak kernel information to user */
101  PPC_LI(r_A, 0);
102  }
103 }
104 
105 static void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx)
106 {
107  int i;
108 
109  if (ctx->seen & (SEEN_MEM | SEEN_DATAREF)) {
111  if (ctx->seen & SEEN_DATAREF) {
112  PPC_LD(0, 1, 16);
113  PPC_MTLR(0);
114  PPC_LD(r_D, 1, -(8*(32-r_D)));
115  PPC_LD(r_HL, 1, -(8*(32-r_HL)));
116  }
117  if (ctx->seen & SEEN_MEM) {
118  /* Restore any saved non-vol registers */
119  for (i = r_M; i < (r_M+16); i++) {
120  if (ctx->seen & (1 << (i-r_M)))
121  PPC_LD(i, 1, -(8*(32-i)));
122  }
123  }
124  }
125  /* The RETs have left a return value in R3. */
126 
127  PPC_BLR();
128 }
129 
130 #define CHOOSE_LOAD_FUNC(K, func) \
131  ((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset)
132 
133 /* Assemble the body code between the prologue & epilogue. */
134 static int bpf_jit_build_body(struct sk_filter *fp, u32 *image,
135  struct codegen_context *ctx,
136  unsigned int *addrs)
137 {
138  const struct sock_filter *filter = fp->insns;
139  int flen = fp->len;
140  u8 *func;
141  unsigned int true_cond;
142  int i;
143 
144  /* Start of epilogue code */
145  unsigned int exit_addr = addrs[flen];
146 
147  for (i = 0; i < flen; i++) {
148  unsigned int K = filter[i].k;
149 
150  /*
151  * addrs[] maps a BPF bytecode address into a real offset from
152  * the start of the body code.
153  */
154  addrs[i] = ctx->idx * 4;
155 
156  switch (filter[i].code) {
157  /*** ALU ops ***/
158  case BPF_S_ALU_ADD_X: /* A += X; */
159  ctx->seen |= SEEN_XREG;
160  PPC_ADD(r_A, r_A, r_X);
161  break;
162  case BPF_S_ALU_ADD_K: /* A += K; */
163  if (!K)
164  break;
165  PPC_ADDI(r_A, r_A, IMM_L(K));
166  if (K >= 32768)
167  PPC_ADDIS(r_A, r_A, IMM_HA(K));
168  break;
169  case BPF_S_ALU_SUB_X: /* A -= X; */
170  ctx->seen |= SEEN_XREG;
171  PPC_SUB(r_A, r_A, r_X);
172  break;
173  case BPF_S_ALU_SUB_K: /* A -= K */
174  if (!K)
175  break;
176  PPC_ADDI(r_A, r_A, IMM_L(-K));
177  if (K >= 32768)
178  PPC_ADDIS(r_A, r_A, IMM_HA(-K));
179  break;
180  case BPF_S_ALU_MUL_X: /* A *= X; */
181  ctx->seen |= SEEN_XREG;
182  PPC_MUL(r_A, r_A, r_X);
183  break;
184  case BPF_S_ALU_MUL_K: /* A *= K */
185  if (K < 32768)
186  PPC_MULI(r_A, r_A, K);
187  else {
188  PPC_LI32(r_scratch1, K);
190  }
191  break;
192  case BPF_S_ALU_DIV_X: /* A /= X; */
193  ctx->seen |= SEEN_XREG;
194  PPC_CMPWI(r_X, 0);
195  if (ctx->pc_ret0 != -1) {
196  PPC_BCC(COND_EQ, addrs[ctx->pc_ret0]);
197  } else {
198  /*
199  * Exit, returning 0; first pass hits here
200  * (longer worst-case code size).
201  */
202  PPC_BCC_SHORT(COND_NE, (ctx->idx*4)+12);
203  PPC_LI(r_ret, 0);
204  PPC_JMP(exit_addr);
205  }
206  PPC_DIVWU(r_A, r_A, r_X);
207  break;
208  case BPF_S_ALU_DIV_K: /* A = reciprocal_divide(A, K); */
209  PPC_LI32(r_scratch1, K);
210  /* Top 32 bits of 64bit result -> A */
212  break;
213  case BPF_S_ALU_AND_X:
214  ctx->seen |= SEEN_XREG;
215  PPC_AND(r_A, r_A, r_X);
216  break;
217  case BPF_S_ALU_AND_K:
218  if (!IMM_H(K))
219  PPC_ANDI(r_A, r_A, K);
220  else {
221  PPC_LI32(r_scratch1, K);
223  }
224  break;
225  case BPF_S_ALU_OR_X:
226  ctx->seen |= SEEN_XREG;
227  PPC_OR(r_A, r_A, r_X);
228  break;
229  case BPF_S_ALU_OR_K:
230  if (IMM_L(K))
231  PPC_ORI(r_A, r_A, IMM_L(K));
232  if (K >= 65536)
233  PPC_ORIS(r_A, r_A, IMM_H(K));
234  break;
235  case BPF_S_ALU_LSH_X: /* A <<= X; */
236  ctx->seen |= SEEN_XREG;
237  PPC_SLW(r_A, r_A, r_X);
238  break;
239  case BPF_S_ALU_LSH_K:
240  if (K == 0)
241  break;
242  else
243  PPC_SLWI(r_A, r_A, K);
244  break;
245  case BPF_S_ALU_RSH_X: /* A >>= X; */
246  ctx->seen |= SEEN_XREG;
247  PPC_SRW(r_A, r_A, r_X);
248  break;
249  case BPF_S_ALU_RSH_K: /* A >>= K; */
250  if (K == 0)
251  break;
252  else
253  PPC_SRWI(r_A, r_A, K);
254  break;
255  case BPF_S_ALU_NEG:
256  PPC_NEG(r_A, r_A);
257  break;
258  case BPF_S_RET_K:
259  PPC_LI32(r_ret, K);
260  if (!K) {
261  if (ctx->pc_ret0 == -1)
262  ctx->pc_ret0 = i;
263  }
264  /*
265  * If this isn't the very last instruction, branch to
266  * the epilogue if we've stuff to clean up. Otherwise,
267  * if there's nothing to tidy, just return. If we /are/
268  * the last instruction, we're about to fall through to
269  * the epilogue to return.
270  */
271  if (i != flen - 1) {
272  /*
273  * Note: 'seen' is properly valid only on pass
274  * #2. Both parts of this conditional are the
275  * same instruction size though, meaning the
276  * first pass will still correctly determine the
277  * code size/addresses.
278  */
279  if (ctx->seen)
280  PPC_JMP(exit_addr);
281  else
282  PPC_BLR();
283  }
284  break;
285  case BPF_S_RET_A:
286  PPC_MR(r_ret, r_A);
287  if (i != flen - 1) {
288  if (ctx->seen)
289  PPC_JMP(exit_addr);
290  else
291  PPC_BLR();
292  }
293  break;
294  case BPF_S_MISC_TAX: /* X = A */
295  PPC_MR(r_X, r_A);
296  break;
297  case BPF_S_MISC_TXA: /* A = X */
298  ctx->seen |= SEEN_XREG;
299  PPC_MR(r_A, r_X);
300  break;
301 
302  /*** Constant loads/M[] access ***/
303  case BPF_S_LD_IMM: /* A = K */
304  PPC_LI32(r_A, K);
305  break;
306  case BPF_S_LDX_IMM: /* X = K */
307  PPC_LI32(r_X, K);
308  break;
309  case BPF_S_LD_MEM: /* A = mem[K] */
310  PPC_MR(r_A, r_M + (K & 0xf));
311  ctx->seen |= SEEN_MEM | (1<<(K & 0xf));
312  break;
313  case BPF_S_LDX_MEM: /* X = mem[K] */
314  PPC_MR(r_X, r_M + (K & 0xf));
315  ctx->seen |= SEEN_MEM | (1<<(K & 0xf));
316  break;
317  case BPF_S_ST: /* mem[K] = A */
318  PPC_MR(r_M + (K & 0xf), r_A);
319  ctx->seen |= SEEN_MEM | (1<<(K & 0xf));
320  break;
321  case BPF_S_STX: /* mem[K] = X */
322  PPC_MR(r_M + (K & 0xf), r_X);
323  ctx->seen |= SEEN_XREG | SEEN_MEM | (1<<(K & 0xf));
324  break;
325  case BPF_S_LD_W_LEN: /* A = skb->len; */
326  BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, len) != 4);
328  break;
329  case BPF_S_LDX_W_LEN: /* X = skb->len; */
331  break;
332 
333  /*** Ancillary info loads ***/
334 
335  /* None of the BPF_S_ANC* codes appear to be passed by
336  * sk_chk_filter(). The interpreter and the x86 BPF
337  * compiler implement them so we do too -- they may be
338  * planted in future.
339  */
340  case BPF_S_ANC_PROTOCOL: /* A = ntohs(skb->protocol); */
342  protocol) != 2);
344  protocol));
345  /* ntohs is a NOP with BE loads. */
346  break;
347  case BPF_S_ANC_IFINDEX:
349  dev));
350  PPC_CMPDI(r_scratch1, 0);
351  if (ctx->pc_ret0 != -1) {
352  PPC_BCC(COND_EQ, addrs[ctx->pc_ret0]);
353  } else {
354  /* Exit, returning 0; first pass hits here. */
355  PPC_BCC_SHORT(COND_NE, (ctx->idx*4)+12);
356  PPC_LI(r_ret, 0);
357  PPC_JMP(exit_addr);
358  }
360  ifindex) != 4);
362  offsetof(struct net_device, ifindex));
363  break;
364  case BPF_S_ANC_MARK:
365  BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4);
367  mark));
368  break;
369  case BPF_S_ANC_RXHASH:
370  BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, rxhash) != 4);
372  rxhash));
373  break;
374  case BPF_S_ANC_QUEUE:
376  queue_mapping) != 2);
378  queue_mapping));
379  break;
380  case BPF_S_ANC_CPU:
381 #ifdef CONFIG_SMP
382  /*
383  * PACA ptr is r13:
384  * raw_smp_processor_id() = local_paca->paca_index
385  */
386  BUILD_BUG_ON(FIELD_SIZEOF(struct paca_struct,
387  paca_index) != 2);
388  PPC_LHZ_OFFS(r_A, 13,
389  offsetof(struct paca_struct, paca_index));
390 #else
391  PPC_LI(r_A, 0);
392 #endif
393  break;
394 
395  /*** Absolute loads from packet header/data ***/
396  case BPF_S_LD_W_ABS:
397  func = CHOOSE_LOAD_FUNC(K, sk_load_word);
398  goto common_load;
399  case BPF_S_LD_H_ABS:
400  func = CHOOSE_LOAD_FUNC(K, sk_load_half);
401  goto common_load;
402  case BPF_S_LD_B_ABS:
403  func = CHOOSE_LOAD_FUNC(K, sk_load_byte);
404  common_load:
405  /* Load from [K]. */
406  ctx->seen |= SEEN_DATAREF;
407  PPC_LI64(r_scratch1, func);
409  PPC_LI32(r_addr, K);
410  PPC_BLRL();
411  /*
412  * Helper returns 'lt' condition on error, and an
413  * appropriate return value in r3
414  */
415  PPC_BCC(COND_LT, exit_addr);
416  break;
417 
418  /*** Indirect loads from packet header/data ***/
419  case BPF_S_LD_W_IND:
420  func = sk_load_word;
421  goto common_load_ind;
422  case BPF_S_LD_H_IND:
423  func = sk_load_half;
424  goto common_load_ind;
425  case BPF_S_LD_B_IND:
426  func = sk_load_byte;
427  common_load_ind:
428  /*
429  * Load from [X + K]. Negative offsets are tested for
430  * in the helper functions.
431  */
432  ctx->seen |= SEEN_DATAREF | SEEN_XREG;
433  PPC_LI64(r_scratch1, func);
435  PPC_ADDI(r_addr, r_X, IMM_L(K));
436  if (K >= 32768)
438  PPC_BLRL();
439  /* If error, cr0.LT set */
440  PPC_BCC(COND_LT, exit_addr);
441  break;
442 
443  case BPF_S_LDX_B_MSH:
445  goto common_load;
446  break;
447 
448  /*** Jump and branches ***/
449  case BPF_S_JMP_JA:
450  if (K != 0)
451  PPC_JMP(addrs[i + 1 + K]);
452  break;
453 
454  case BPF_S_JMP_JGT_K:
455  case BPF_S_JMP_JGT_X:
456  true_cond = COND_GT;
457  goto cond_branch;
458  case BPF_S_JMP_JGE_K:
459  case BPF_S_JMP_JGE_X:
460  true_cond = COND_GE;
461  goto cond_branch;
462  case BPF_S_JMP_JEQ_K:
463  case BPF_S_JMP_JEQ_X:
464  true_cond = COND_EQ;
465  goto cond_branch;
466  case BPF_S_JMP_JSET_K:
467  case BPF_S_JMP_JSET_X:
468  true_cond = COND_NE;
469  /* Fall through */
470  cond_branch:
471  /* same targets, can avoid doing the test :) */
472  if (filter[i].jt == filter[i].jf) {
473  if (filter[i].jt > 0)
474  PPC_JMP(addrs[i + 1 + filter[i].jt]);
475  break;
476  }
477 
478  switch (filter[i].code) {
479  case BPF_S_JMP_JGT_X:
480  case BPF_S_JMP_JGE_X:
481  case BPF_S_JMP_JEQ_X:
482  ctx->seen |= SEEN_XREG;
483  PPC_CMPLW(r_A, r_X);
484  break;
485  case BPF_S_JMP_JSET_X:
486  ctx->seen |= SEEN_XREG;
488  break;
489  case BPF_S_JMP_JEQ_K:
490  case BPF_S_JMP_JGT_K:
491  case BPF_S_JMP_JGE_K:
492  if (K < 32768)
493  PPC_CMPLWI(r_A, K);
494  else {
495  PPC_LI32(r_scratch1, K);
497  }
498  break;
499  case BPF_S_JMP_JSET_K:
500  if (K < 32768)
501  /* PPC_ANDI is /only/ dot-form */
502  PPC_ANDI(r_scratch1, r_A, K);
503  else {
504  PPC_LI32(r_scratch1, K);
506  r_scratch1);
507  }
508  break;
509  }
510  /* Sometimes branches are constructed "backward", with
511  * the false path being the branch and true path being
512  * a fallthrough to the next instruction.
513  */
514  if (filter[i].jt == 0)
515  /* Swap the sense of the branch */
516  PPC_BCC(true_cond ^ COND_CMP_TRUE,
517  addrs[i + 1 + filter[i].jf]);
518  else {
519  PPC_BCC(true_cond, addrs[i + 1 + filter[i].jt]);
520  if (filter[i].jf != 0)
521  PPC_JMP(addrs[i + 1 + filter[i].jf]);
522  }
523  break;
524  default:
525  /* The filter contains something cruel & unusual.
526  * We don't handle it, but also there shouldn't be
527  * anything missing from our list.
528  */
529  if (printk_ratelimit())
530  pr_err("BPF filter opcode %04x (@%d) unsupported\n",
531  filter[i].code, i);
532  return -ENOTSUPP;
533  }
534 
535  }
536  /* Set end-of-body-code address for exit. */
537  addrs[i] = ctx->idx * 4;
538 
539  return 0;
540 }
541 
542 void bpf_jit_compile(struct sk_filter *fp)
543 {
544  unsigned int proglen;
545  unsigned int alloclen;
546  u32 *image = NULL;
547  u32 *code_base;
548  unsigned int *addrs;
549  struct codegen_context cgctx;
550  int pass;
551  int flen = fp->len;
552 
553  if (!bpf_jit_enable)
554  return;
555 
556  addrs = kzalloc((flen+1) * sizeof(*addrs), GFP_KERNEL);
557  if (addrs == NULL)
558  return;
559 
560  /*
561  * There are multiple assembly passes as the generated code will change
562  * size as it settles down, figuring out the max branch offsets/exit
563  * paths required.
564  *
565  * The range of standard conditional branches is +/- 32Kbytes. Since
566  * BPF_MAXINSNS = 4096, we can only jump from (worst case) start to
567  * finish with 8 bytes/instruction. Not feasible, so long jumps are
568  * used, distinct from short branches.
569  *
570  * Current:
571  *
572  * For now, both branch types assemble to 2 words (short branches padded
573  * with a NOP); this is less efficient, but assembly will always complete
574  * after exactly 3 passes:
575  *
576  * First pass: No code buffer; Program is "faux-generated" -- no code
577  * emitted but maximum size of output determined (and addrs[] filled
578  * in). Also, we note whether we use M[], whether we use skb data, etc.
579  * All generation choices assumed to be 'worst-case', e.g. branches all
580  * far (2 instructions), return path code reduction not available, etc.
581  *
582  * Second pass: Code buffer allocated with size determined previously.
583  * Prologue generated to support features we have seen used. Exit paths
584  * determined and addrs[] is filled in again, as code may be slightly
585  * smaller as a result.
586  *
587  * Third pass: Code generated 'for real', and branch destinations
588  * determined from now-accurate addrs[] map.
589  *
590  * Ideal:
591  *
592  * If we optimise this, near branches will be shorter. On the
593  * first assembly pass, we should err on the side of caution and
594  * generate the biggest code. On subsequent passes, branches will be
595  * generated short or long and code size will reduce. With smaller
596  * code, more branches may fall into the short category, and code will
597  * reduce more.
598  *
599  * Finally, if we see one pass generate code the same size as the
600  * previous pass we have converged and should now generate code for
601  * real. Allocating at the end will also save the memory that would
602  * otherwise be wasted by the (small) current code shrinkage.
603  * Preferably, we should do a small number of passes (e.g. 5) and if we
604  * haven't converged by then, get impatient and force code to generate
605  * as-is, even if the odd branch would be left long. The chances of a
606  * long jump are tiny with all but the most enormous of BPF filter
607  * inputs, so we should usually converge on the third pass.
608  */
609 
610  cgctx.idx = 0;
611  cgctx.seen = 0;
612  cgctx.pc_ret0 = -1;
613  /* Scouting faux-generate pass 0 */
614  if (bpf_jit_build_body(fp, 0, &cgctx, addrs))
615  /* We hit something illegal or unsupported. */
616  goto out;
617 
618  /*
619  * Pretend to build prologue, given the features we've seen. This will
620  * update ctgtx.idx as it pretends to output instructions, then we can
621  * calculate total size from idx.
622  */
623  bpf_jit_build_prologue(fp, 0, &cgctx);
624  bpf_jit_build_epilogue(0, &cgctx);
625 
626  proglen = cgctx.idx * 4;
627  alloclen = proglen + FUNCTION_DESCR_SIZE;
628  image = module_alloc(max_t(unsigned int, alloclen,
629  sizeof(struct work_struct)));
630  if (!image)
631  goto out;
632 
633  code_base = image + (FUNCTION_DESCR_SIZE/4);
634 
635  /* Code generation passes 1-2 */
636  for (pass = 1; pass < 3; pass++) {
637  /* Now build the prologue, body code & epilogue for real. */
638  cgctx.idx = 0;
639  bpf_jit_build_prologue(fp, code_base, &cgctx);
640  bpf_jit_build_body(fp, code_base, &cgctx, addrs);
641  bpf_jit_build_epilogue(code_base, &cgctx);
642 
643  if (bpf_jit_enable > 1)
644  pr_info("Pass %d: shrink = %d, seen = 0x%x\n", pass,
645  proglen - (cgctx.idx * 4), cgctx.seen);
646  }
647 
648  if (bpf_jit_enable > 1)
649  pr_info("flen=%d proglen=%u pass=%d image=%p\n",
650  flen, proglen, pass, image);
651 
652  if (image) {
653  if (bpf_jit_enable > 1)
654  print_hex_dump(KERN_ERR, "JIT code: ",
656  16, 1, code_base,
657  proglen, false);
658 
659  bpf_flush_icache(code_base, code_base + (proglen/4));
660  /* Function descriptor nastiness: Address + TOC */
661  ((u64 *)image)[0] = (u64)code_base;
662  ((u64 *)image)[1] = local_paca->kernel_toc;
663  fp->bpf_func = (void *)image;
664  }
665 out:
666  kfree(addrs);
667  return;
668 }
669 
670 static void jit_free_defer(struct work_struct *arg)
671 {
672  module_free(NULL, arg);
673 }
674 
675 /* run from softirq, we must use a work_struct to call
676  * module_free() from process context
677  */
678 void bpf_jit_free(struct sk_filter *fp)
679 {
680  if (fp->bpf_func != sk_run_filter) {
681  struct work_struct *work = (struct work_struct *)fp->bpf_func;
682 
683  INIT_WORK(work, jit_free_defer);
684  schedule_work(work);
685  }
686 }