Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
bpf_jit_comp.c
Go to the documentation of this file.
1 /*
2  * BPF Jit compiler for s390.
3  *
4  * Copyright IBM Corp. 2012
5  *
6  * Author(s): Martin Schwidefsky <[email protected]>
7  */
8 #include <linux/moduleloader.h>
9 #include <linux/netdevice.h>
10 #include <linux/filter.h>
11 #include <asm/cacheflush.h>
12 #include <asm/processor.h>
13 #include <asm/facility.h>
14 
15 /*
16  * Conventions:
17  * %r2 = skb pointer
18  * %r3 = offset parameter
19  * %r4 = scratch register / length parameter
20  * %r5 = BPF A accumulator
21  * %r8 = return address
22  * %r9 = save register for skb pointer
23  * %r10 = skb->data
24  * %r11 = skb->len - skb->data_len (headlen)
25  * %r12 = BPF X accumulator
26  * %r13 = literal pool pointer
27  * 0(%r15) - 63(%r15) scratch memory array with BPF_MEMWORDS
28  */
30 
31 /*
32  * assembly code in arch/x86/net/bpf_jit.S
33  */
36 
37 struct bpf_jit {
38  unsigned int seen;
40  u8 *prg;
41  u8 *mid;
42  u8 *lit;
43  u8 *end;
47  unsigned int off_load_word;
48  unsigned int off_load_half;
49  unsigned int off_load_byte;
50  unsigned int off_load_bmsh;
51  unsigned int off_load_iword;
52  unsigned int off_load_ihalf;
53  unsigned int off_load_ibyte;
54 };
55 
56 #define BPF_SIZE_MAX 4096 /* Max size for program */
57 
58 #define SEEN_DATAREF 1 /* might call external helpers */
59 #define SEEN_XREG 2 /* ebx is used */
60 #define SEEN_MEM 4 /* use mem[] for temporary storage */
61 #define SEEN_RET0 8 /* pc_ret0 points to a valid return 0 */
62 #define SEEN_LITERAL 16 /* code uses literals */
63 #define SEEN_LOAD_WORD 32 /* code uses sk_load_word */
64 #define SEEN_LOAD_HALF 64 /* code uses sk_load_half */
65 #define SEEN_LOAD_BYTE 128 /* code uses sk_load_byte */
66 #define SEEN_LOAD_BMSH 256 /* code uses sk_load_byte_msh */
67 #define SEEN_LOAD_IWORD 512 /* code uses sk_load_word_ind */
68 #define SEEN_LOAD_IHALF 1024 /* code uses sk_load_half_ind */
69 #define SEEN_LOAD_IBYTE 2048 /* code uses sk_load_byte_ind */
70 
71 #define EMIT2(op) \
72 ({ \
73  if (jit->prg + 2 <= jit->mid) \
74  *(u16 *) jit->prg = op; \
75  jit->prg += 2; \
76 })
77 
78 #define EMIT4(op) \
79 ({ \
80  if (jit->prg + 4 <= jit->mid) \
81  *(u32 *) jit->prg = op; \
82  jit->prg += 4; \
83 })
84 
85 #define EMIT4_DISP(op, disp) \
86 ({ \
87  unsigned int __disp = (disp) & 0xfff; \
88  EMIT4(op | __disp); \
89 })
90 
91 #define EMIT4_IMM(op, imm) \
92 ({ \
93  unsigned int __imm = (imm) & 0xffff; \
94  EMIT4(op | __imm); \
95 })
96 
97 #define EMIT4_PCREL(op, pcrel) \
98 ({ \
99  long __pcrel = ((pcrel) >> 1) & 0xffff; \
100  EMIT4(op | __pcrel); \
101 })
102 
103 #define EMIT6(op1, op2) \
104 ({ \
105  if (jit->prg + 6 <= jit->mid) { \
106  *(u32 *) jit->prg = op1; \
107  *(u16 *) (jit->prg + 4) = op2; \
108  } \
109  jit->prg += 6; \
110 })
111 
112 #define EMIT6_DISP(op1, op2, disp) \
113 ({ \
114  unsigned int __disp = (disp) & 0xfff; \
115  EMIT6(op1 | __disp, op2); \
116 })
117 
118 #define EMIT6_IMM(op, imm) \
119 ({ \
120  unsigned int __imm = (imm); \
121  EMIT6(op | (__imm >> 16), __imm & 0xffff); \
122 })
123 
124 #define EMIT_CONST(val) \
125 ({ \
126  unsigned int ret; \
127  ret = (unsigned int) (jit->lit - jit->base_ip); \
128  jit->seen |= SEEN_LITERAL; \
129  if (jit->lit + 4 <= jit->end) \
130  *(u32 *) jit->lit = val; \
131  jit->lit += 4; \
132  ret; \
133 })
134 
135 #define EMIT_FN_CONST(bit, fn) \
136 ({ \
137  unsigned int ret; \
138  ret = (unsigned int) (jit->lit - jit->base_ip); \
139  if (jit->seen & bit) { \
140  jit->seen |= SEEN_LITERAL; \
141  if (jit->lit + 8 <= jit->end) \
142  *(void **) jit->lit = fn; \
143  jit->lit += 8; \
144  } \
145  ret; \
146 })
147 
148 static void bpf_jit_prologue(struct bpf_jit *jit)
149 {
150  /* Save registers and create stack frame if necessary */
151  if (jit->seen & SEEN_DATAREF) {
152  /* stmg %r8,%r15,88(%r15) */
153  EMIT6(0xeb8ff058, 0x0024);
154  /* lgr %r14,%r15 */
155  EMIT4(0xb90400ef);
156  /* ahi %r15,<offset> */
157  EMIT4_IMM(0xa7fa0000, (jit->seen & SEEN_MEM) ? -112 : -80);
158  /* stg %r14,152(%r15) */
159  EMIT6(0xe3e0f098, 0x0024);
160  } else if ((jit->seen & SEEN_XREG) && (jit->seen & SEEN_LITERAL))
161  /* stmg %r12,%r13,120(%r15) */
162  EMIT6(0xebcdf078, 0x0024);
163  else if (jit->seen & SEEN_XREG)
164  /* stg %r12,120(%r15) */
165  EMIT6(0xe3c0f078, 0x0024);
166  else if (jit->seen & SEEN_LITERAL)
167  /* stg %r13,128(%r15) */
168  EMIT6(0xe3d0f080, 0x0024);
169 
170  /* Setup literal pool */
171  if (jit->seen & SEEN_LITERAL) {
172  /* basr %r13,0 */
173  EMIT2(0x0dd0);
174  jit->base_ip = jit->prg;
175  }
183 
184  /* Filter needs to access skb data */
185  if (jit->seen & SEEN_DATAREF) {
186  /* l %r11,<len>(%r2) */
187  EMIT4_DISP(0x58b02000, offsetof(struct sk_buff, len));
188  /* s %r11,<data_len>(%r2) */
189  EMIT4_DISP(0x5bb02000, offsetof(struct sk_buff, data_len));
190  /* lg %r10,<data>(%r2) */
191  EMIT6_DISP(0xe3a02000, 0x0004,
192  offsetof(struct sk_buff, data));
193  }
194 }
195 
196 static void bpf_jit_epilogue(struct bpf_jit *jit)
197 {
198  /* Return 0 */
199  if (jit->seen & SEEN_RET0) {
200  jit->ret0_ip = jit->prg;
201  /* lghi %r2,0 */
202  EMIT4(0xa7290000);
203  }
204  jit->exit_ip = jit->prg;
205  /* Restore registers */
206  if (jit->seen & SEEN_DATAREF)
207  /* lmg %r8,%r15,<offset>(%r15) */
208  EMIT6_DISP(0xeb8ff000, 0x0004,
209  (jit->seen & SEEN_MEM) ? 200 : 168);
210  else if ((jit->seen & SEEN_XREG) && (jit->seen & SEEN_LITERAL))
211  /* lmg %r12,%r13,120(%r15) */
212  EMIT6(0xebcdf078, 0x0004);
213  else if (jit->seen & SEEN_XREG)
214  /* lg %r12,120(%r15) */
215  EMIT6(0xe3c0f078, 0x0004);
216  else if (jit->seen & SEEN_LITERAL)
217  /* lg %r13,128(%r15) */
218  EMIT6(0xe3d0f080, 0x0004);
219  /* br %r14 */
220  EMIT2(0x07fe);
221 }
222 
223 /*
224  * make sure we dont leak kernel information to user
225  */
226 static void bpf_jit_noleaks(struct bpf_jit *jit, struct sock_filter *filter)
227 {
228  /* Clear temporary memory if (seen & SEEN_MEM) */
229  if (jit->seen & SEEN_MEM)
230  /* xc 0(64,%r15),0(%r15) */
231  EMIT6(0xd73ff000, 0xf000);
232  /* Clear X if (seen & SEEN_XREG) */
233  if (jit->seen & SEEN_XREG)
234  /* lhi %r12,0 */
235  EMIT4(0xa7c80000);
236  /* Clear A if the first register does not set it. */
237  switch (filter[0].code) {
238  case BPF_S_LD_W_ABS:
239  case BPF_S_LD_H_ABS:
240  case BPF_S_LD_B_ABS:
241  case BPF_S_LD_W_LEN:
242  case BPF_S_LD_W_IND:
243  case BPF_S_LD_H_IND:
244  case BPF_S_LD_B_IND:
245  case BPF_S_LDX_B_MSH:
246  case BPF_S_LD_IMM:
247  case BPF_S_LD_MEM:
248  case BPF_S_MISC_TXA:
249  case BPF_S_ANC_PROTOCOL:
250  case BPF_S_ANC_PKTTYPE:
251  case BPF_S_ANC_IFINDEX:
252  case BPF_S_ANC_MARK:
253  case BPF_S_ANC_QUEUE:
254  case BPF_S_ANC_HATYPE:
255  case BPF_S_ANC_RXHASH:
256  case BPF_S_ANC_CPU:
257  case BPF_S_RET_K:
258  /* first instruction sets A register */
259  break;
260  default: /* A = 0 */
261  /* lhi %r5,0 */
262  EMIT4(0xa7580000);
263  }
264 }
265 
266 static int bpf_jit_insn(struct bpf_jit *jit, struct sock_filter *filter,
267  unsigned int *addrs, int i, int last)
268 {
269  unsigned int K;
270  int offset;
271  unsigned int mask;
272 
273  K = filter->k;
274  switch (filter->code) {
275  case BPF_S_ALU_ADD_X: /* A += X */
276  jit->seen |= SEEN_XREG;
277  /* ar %r5,%r12 */
278  EMIT2(0x1a5c);
279  break;
280  case BPF_S_ALU_ADD_K: /* A += K */
281  if (!K)
282  break;
283  if (K <= 16383)
284  /* ahi %r5,<K> */
285  EMIT4_IMM(0xa75a0000, K);
286  else if (test_facility(21))
287  /* alfi %r5,<K> */
288  EMIT6_IMM(0xc25b0000, K);
289  else
290  /* a %r5,<d(K)>(%r13) */
291  EMIT4_DISP(0x5a50d000, EMIT_CONST(K));
292  break;
293  case BPF_S_ALU_SUB_X: /* A -= X */
294  jit->seen |= SEEN_XREG;
295  /* sr %r5,%r12 */
296  EMIT2(0x1b5c);
297  break;
298  case BPF_S_ALU_SUB_K: /* A -= K */
299  if (!K)
300  break;
301  if (K <= 16384)
302  /* ahi %r5,-K */
303  EMIT4_IMM(0xa75a0000, -K);
304  else if (test_facility(21))
305  /* alfi %r5,-K */
306  EMIT6_IMM(0xc25b0000, -K);
307  else
308  /* s %r5,<d(K)>(%r13) */
309  EMIT4_DISP(0x5b50d000, EMIT_CONST(K));
310  break;
311  case BPF_S_ALU_MUL_X: /* A *= X */
312  jit->seen |= SEEN_XREG;
313  /* msr %r5,%r12 */
314  EMIT4(0xb252005c);
315  break;
316  case BPF_S_ALU_MUL_K: /* A *= K */
317  if (K <= 16383)
318  /* mhi %r5,K */
319  EMIT4_IMM(0xa75c0000, K);
320  else if (test_facility(34))
321  /* msfi %r5,<K> */
322  EMIT6_IMM(0xc2510000, K);
323  else
324  /* ms %r5,<d(K)>(%r13) */
325  EMIT4_DISP(0x7150d000, EMIT_CONST(K));
326  break;
327  case BPF_S_ALU_DIV_X: /* A /= X */
328  jit->seen |= SEEN_XREG | SEEN_RET0;
329  /* ltr %r12,%r12 */
330  EMIT2(0x12cc);
331  /* jz <ret0> */
332  EMIT4_PCREL(0xa7840000, (jit->ret0_ip - jit->prg));
333  /* lhi %r4,0 */
334  EMIT4(0xa7480000);
335  /* dr %r4,%r12 */
336  EMIT2(0x1d4c);
337  break;
338  case BPF_S_ALU_DIV_K: /* A = reciprocal_divide(A, K) */
339  /* m %r4,<d(K)>(%r13) */
340  EMIT4_DISP(0x5c40d000, EMIT_CONST(K));
341  /* lr %r5,%r4 */
342  EMIT2(0x1854);
343  break;
344  case BPF_S_ALU_AND_X: /* A &= X */
345  jit->seen |= SEEN_XREG;
346  /* nr %r5,%r12 */
347  EMIT2(0x145c);
348  break;
349  case BPF_S_ALU_AND_K: /* A &= K */
350  if (test_facility(21))
351  /* nilf %r5,<K> */
352  EMIT6_IMM(0xc05b0000, K);
353  else
354  /* n %r5,<d(K)>(%r13) */
355  EMIT4_DISP(0x5450d000, EMIT_CONST(K));
356  break;
357  case BPF_S_ALU_OR_X: /* A |= X */
358  jit->seen |= SEEN_XREG;
359  /* or %r5,%r12 */
360  EMIT2(0x165c);
361  break;
362  case BPF_S_ALU_OR_K: /* A |= K */
363  if (test_facility(21))
364  /* oilf %r5,<K> */
365  EMIT6_IMM(0xc05d0000, K);
366  else
367  /* o %r5,<d(K)>(%r13) */
368  EMIT4_DISP(0x5650d000, EMIT_CONST(K));
369  break;
370  case BPF_S_ANC_ALU_XOR_X: /* A ^= X; */
371  jit->seen |= SEEN_XREG;
372  /* xr %r5,%r12 */
373  EMIT2(0x175c);
374  break;
375  case BPF_S_ALU_LSH_X: /* A <<= X; */
376  jit->seen |= SEEN_XREG;
377  /* sll %r5,0(%r12) */
378  EMIT4(0x8950c000);
379  break;
380  case BPF_S_ALU_LSH_K: /* A <<= K */
381  if (K == 0)
382  break;
383  /* sll %r5,K */
384  EMIT4_DISP(0x89500000, K);
385  break;
386  case BPF_S_ALU_RSH_X: /* A >>= X; */
387  jit->seen |= SEEN_XREG;
388  /* srl %r5,0(%r12) */
389  EMIT4(0x8850c000);
390  break;
391  case BPF_S_ALU_RSH_K: /* A >>= K; */
392  if (K == 0)
393  break;
394  /* srl %r5,K */
395  EMIT4_DISP(0x88500000, K);
396  break;
397  case BPF_S_ALU_NEG: /* A = -A */
398  /* lnr %r5,%r5 */
399  EMIT2(0x1155);
400  break;
401  case BPF_S_JMP_JA: /* ip += K */
402  offset = addrs[i + K] + jit->start - jit->prg;
403  EMIT4_PCREL(0xa7f40000, offset);
404  break;
405  case BPF_S_JMP_JGT_K: /* ip += (A > K) ? jt : jf */
406  mask = 0x200000; /* jh */
407  goto kbranch;
408  case BPF_S_JMP_JGE_K: /* ip += (A >= K) ? jt : jf */
409  mask = 0xa00000; /* jhe */
410  goto kbranch;
411  case BPF_S_JMP_JEQ_K: /* ip += (A == K) ? jt : jf */
412  mask = 0x800000; /* je */
413 kbranch: /* Emit compare if the branch targets are different */
414  if (filter->jt != filter->jf) {
415  if (K <= 16383)
416  /* chi %r5,<K> */
417  EMIT4_IMM(0xa75e0000, K);
418  else if (test_facility(21))
419  /* clfi %r5,<K> */
420  EMIT6_IMM(0xc25f0000, K);
421  else
422  /* c %r5,<d(K)>(%r13) */
423  EMIT4_DISP(0x5950d000, EMIT_CONST(K));
424  }
425 branch: if (filter->jt == filter->jf) {
426  if (filter->jt == 0)
427  break;
428  /* j <jt> */
429  offset = addrs[i + filter->jt] + jit->start - jit->prg;
430  EMIT4_PCREL(0xa7f40000, offset);
431  break;
432  }
433  if (filter->jt != 0) {
434  /* brc <mask>,<jt> */
435  offset = addrs[i + filter->jt] + jit->start - jit->prg;
436  EMIT4_PCREL(0xa7040000 | mask, offset);
437  }
438  if (filter->jf != 0) {
439  /* brc <mask^15>,<jf> */
440  offset = addrs[i + filter->jf] + jit->start - jit->prg;
441  EMIT4_PCREL(0xa7040000 | (mask ^ 0xf00000), offset);
442  }
443  break;
444  case BPF_S_JMP_JSET_K: /* ip += (A & K) ? jt : jf */
445  mask = 0x700000; /* jnz */
446  /* Emit test if the branch targets are different */
447  if (filter->jt != filter->jf) {
448  if (K > 65535) {
449  /* lr %r4,%r5 */
450  EMIT2(0x1845);
451  /* n %r4,<d(K)>(%r13) */
452  EMIT4_DISP(0x5440d000, EMIT_CONST(K));
453  } else
454  /* tmll %r5,K */
455  EMIT4_IMM(0xa7510000, K);
456  }
457  goto branch;
458  case BPF_S_JMP_JGT_X: /* ip += (A > X) ? jt : jf */
459  mask = 0x200000; /* jh */
460  goto xbranch;
461  case BPF_S_JMP_JGE_X: /* ip += (A >= X) ? jt : jf */
462  mask = 0xa00000; /* jhe */
463  goto xbranch;
464  case BPF_S_JMP_JEQ_X: /* ip += (A == X) ? jt : jf */
465  mask = 0x800000; /* je */
466 xbranch: /* Emit compare if the branch targets are different */
467  if (filter->jt != filter->jf) {
468  jit->seen |= SEEN_XREG;
469  /* cr %r5,%r12 */
470  EMIT2(0x195c);
471  }
472  goto branch;
473  case BPF_S_JMP_JSET_X: /* ip += (A & X) ? jt : jf */
474  mask = 0x700000; /* jnz */
475  /* Emit test if the branch targets are different */
476  if (filter->jt != filter->jf) {
477  jit->seen |= SEEN_XREG;
478  /* lr %r4,%r5 */
479  EMIT2(0x1845);
480  /* nr %r4,%r12 */
481  EMIT2(0x144c);
482  }
483  goto branch;
484  case BPF_S_LD_W_ABS: /* A = *(u32 *) (skb->data+K) */
486  offset = jit->off_load_word;
487  goto load_abs;
488  case BPF_S_LD_H_ABS: /* A = *(u16 *) (skb->data+K) */
490  offset = jit->off_load_half;
491  goto load_abs;
492  case BPF_S_LD_B_ABS: /* A = *(u8 *) (skb->data+K) */
494  offset = jit->off_load_byte;
495 load_abs: if ((int) K < 0)
496  goto out;
497 call_fn: /* lg %r1,<d(function)>(%r13) */
498  EMIT6_DISP(0xe310d000, 0x0004, offset);
499  /* l %r3,<d(K)>(%r13) */
500  EMIT4_DISP(0x5830d000, EMIT_CONST(K));
501  /* basr %r8,%r1 */
502  EMIT2(0x0d81);
503  /* jnz <ret0> */
504  EMIT4_PCREL(0xa7740000, (jit->ret0_ip - jit->prg));
505  break;
506  case BPF_S_LD_W_IND: /* A = *(u32 *) (skb->data+K+X) */
508  offset = jit->off_load_iword;
509  goto call_fn;
510  case BPF_S_LD_H_IND: /* A = *(u16 *) (skb->data+K+X) */
512  offset = jit->off_load_ihalf;
513  goto call_fn;
514  case BPF_S_LD_B_IND: /* A = *(u8 *) (skb->data+K+X) */
516  offset = jit->off_load_ibyte;
517  goto call_fn;
518  case BPF_S_LDX_B_MSH:
519  /* X = (*(u8 *)(skb->data+K) & 0xf) << 2 */
520  jit->seen |= SEEN_RET0;
521  if ((int) K < 0) {
522  /* j <ret0> */
523  EMIT4_PCREL(0xa7f40000, (jit->ret0_ip - jit->prg));
524  break;
525  }
526  jit->seen |= SEEN_DATAREF | SEEN_LOAD_BMSH;
527  offset = jit->off_load_bmsh;
528  goto call_fn;
529  case BPF_S_LD_W_LEN: /* A = skb->len; */
530  BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, len) != 4);
531  /* l %r5,<d(len)>(%r2) */
532  EMIT4_DISP(0x58502000, offsetof(struct sk_buff, len));
533  break;
534  case BPF_S_LDX_W_LEN: /* X = skb->len; */
535  jit->seen |= SEEN_XREG;
536  /* l %r12,<d(len)>(%r2) */
537  EMIT4_DISP(0x58c02000, offsetof(struct sk_buff, len));
538  break;
539  case BPF_S_LD_IMM: /* A = K */
540  if (K <= 16383)
541  /* lhi %r5,K */
542  EMIT4_IMM(0xa7580000, K);
543  else if (test_facility(21))
544  /* llilf %r5,<K> */
545  EMIT6_IMM(0xc05f0000, K);
546  else
547  /* l %r5,<d(K)>(%r13) */
548  EMIT4_DISP(0x5850d000, EMIT_CONST(K));
549  break;
550  case BPF_S_LDX_IMM: /* X = K */
551  jit->seen |= SEEN_XREG;
552  if (K <= 16383)
553  /* lhi %r12,<K> */
554  EMIT4_IMM(0xa7c80000, K);
555  else if (test_facility(21))
556  /* llilf %r12,<K> */
557  EMIT6_IMM(0xc0cf0000, K);
558  else
559  /* l %r12,<d(K)>(%r13) */
560  EMIT4_DISP(0x58c0d000, EMIT_CONST(K));
561  break;
562  case BPF_S_LD_MEM: /* A = mem[K] */
563  jit->seen |= SEEN_MEM;
564  /* l %r5,<K>(%r15) */
565  EMIT4_DISP(0x5850f000,
566  (jit->seen & SEEN_DATAREF) ? 160 + K*4 : K*4);
567  break;
568  case BPF_S_LDX_MEM: /* X = mem[K] */
569  jit->seen |= SEEN_XREG | SEEN_MEM;
570  /* l %r12,<K>(%r15) */
571  EMIT4_DISP(0x58c0f000,
572  (jit->seen & SEEN_DATAREF) ? 160 + K*4 : K*4);
573  break;
574  case BPF_S_MISC_TAX: /* X = A */
575  jit->seen |= SEEN_XREG;
576  /* lr %r12,%r5 */
577  EMIT2(0x18c5);
578  break;
579  case BPF_S_MISC_TXA: /* A = X */
580  jit->seen |= SEEN_XREG;
581  /* lr %r5,%r12 */
582  EMIT2(0x185c);
583  break;
584  case BPF_S_RET_K:
585  if (K == 0) {
586  jit->seen |= SEEN_RET0;
587  if (last)
588  break;
589  /* j <ret0> */
590  EMIT4_PCREL(0xa7f40000, jit->ret0_ip - jit->prg);
591  } else {
592  if (K <= 16383)
593  /* lghi %r2,K */
594  EMIT4_IMM(0xa7290000, K);
595  else
596  /* llgf %r2,<K>(%r13) */
597  EMIT6_DISP(0xe320d000, 0x0016, EMIT_CONST(K));
598  /* j <exit> */
599  if (last && !(jit->seen & SEEN_RET0))
600  break;
601  EMIT4_PCREL(0xa7f40000, jit->exit_ip - jit->prg);
602  }
603  break;
604  case BPF_S_RET_A:
605  /* llgfr %r2,%r5 */
606  EMIT4(0xb9160025);
607  /* j <exit> */
608  EMIT4_PCREL(0xa7f40000, jit->exit_ip - jit->prg);
609  break;
610  case BPF_S_ST: /* mem[K] = A */
611  jit->seen |= SEEN_MEM;
612  /* st %r5,<K>(%r15) */
613  EMIT4_DISP(0x5050f000,
614  (jit->seen & SEEN_DATAREF) ? 160 + K*4 : K*4);
615  break;
616  case BPF_S_STX: /* mem[K] = X : mov %ebx,off8(%rbp) */
617  jit->seen |= SEEN_XREG | SEEN_MEM;
618  /* st %r12,<K>(%r15) */
619  EMIT4_DISP(0x50c0f000,
620  (jit->seen & SEEN_DATAREF) ? 160 + K*4 : K*4);
621  break;
622  case BPF_S_ANC_PROTOCOL: /* A = ntohs(skb->protocol); */
623  BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, protocol) != 2);
624  /* lhi %r5,0 */
625  EMIT4(0xa7580000);
626  /* icm %r5,3,<d(protocol)>(%r2) */
627  EMIT4_DISP(0xbf532000, offsetof(struct sk_buff, protocol));
628  break;
629  case BPF_S_ANC_IFINDEX: /* if (!skb->dev) return 0;
630  * A = skb->dev->ifindex */
631  BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, ifindex) != 4);
632  jit->seen |= SEEN_RET0;
633  /* lg %r1,<d(dev)>(%r2) */
634  EMIT6_DISP(0xe3102000, 0x0004, offsetof(struct sk_buff, dev));
635  /* ltgr %r1,%r1 */
636  EMIT4(0xb9020011);
637  /* jz <ret0> */
638  EMIT4_PCREL(0xa7840000, jit->ret0_ip - jit->prg);
639  /* l %r5,<d(ifindex)>(%r1) */
640  EMIT4_DISP(0x58501000, offsetof(struct net_device, ifindex));
641  break;
642  case BPF_S_ANC_MARK: /* A = skb->mark */
643  BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4);
644  /* l %r5,<d(mark)>(%r2) */
645  EMIT4_DISP(0x58502000, offsetof(struct sk_buff, mark));
646  break;
647  case BPF_S_ANC_QUEUE: /* A = skb->queue_mapping */
648  BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, queue_mapping) != 2);
649  /* lhi %r5,0 */
650  EMIT4(0xa7580000);
651  /* icm %r5,3,<d(queue_mapping)>(%r2) */
652  EMIT4_DISP(0xbf532000, offsetof(struct sk_buff, queue_mapping));
653  break;
654  case BPF_S_ANC_HATYPE: /* if (!skb->dev) return 0;
655  * A = skb->dev->type */
656  BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, type) != 2);
657  jit->seen |= SEEN_RET0;
658  /* lg %r1,<d(dev)>(%r2) */
659  EMIT6_DISP(0xe3102000, 0x0004, offsetof(struct sk_buff, dev));
660  /* ltgr %r1,%r1 */
661  EMIT4(0xb9020011);
662  /* jz <ret0> */
663  EMIT4_PCREL(0xa7840000, jit->ret0_ip - jit->prg);
664  /* lhi %r5,0 */
665  EMIT4(0xa7580000);
666  /* icm %r5,3,<d(type)>(%r1) */
667  EMIT4_DISP(0xbf531000, offsetof(struct net_device, type));
668  break;
669  case BPF_S_ANC_RXHASH: /* A = skb->rxhash */
670  BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, rxhash) != 4);
671  /* l %r5,<d(rxhash)>(%r2) */
672  EMIT4_DISP(0x58502000, offsetof(struct sk_buff, rxhash));
673  break;
674  case BPF_S_ANC_CPU: /* A = smp_processor_id() */
675 #ifdef CONFIG_SMP
676  /* l %r5,<d(cpu_nr)> */
677  EMIT4_DISP(0x58500000, offsetof(struct _lowcore, cpu_nr));
678 #else
679  /* lhi %r5,0 */
680  EMIT4(0xa7580000);
681 #endif
682  break;
683  default: /* too complex, give up */
684  goto out;
685  }
686  addrs[i] = jit->prg - jit->start;
687  return 0;
688 out:
689  return -1;
690 }
691 
693 {
694  unsigned long size, prg_len, lit_len;
695  struct bpf_jit jit, cjit;
696  unsigned int *addrs;
697  int pass, i;
698 
699  if (!bpf_jit_enable)
700  return;
701  addrs = kmalloc(fp->len * sizeof(*addrs), GFP_KERNEL);
702  if (addrs == NULL)
703  return;
704  memset(addrs, 0, fp->len * sizeof(*addrs));
705  memset(&jit, 0, sizeof(cjit));
706  memset(&cjit, 0, sizeof(cjit));
707 
708  for (pass = 0; pass < 10; pass++) {
709  jit.prg = jit.start;
710  jit.lit = jit.mid;
711 
712  bpf_jit_prologue(&jit);
713  bpf_jit_noleaks(&jit, fp->insns);
714  for (i = 0; i < fp->len; i++) {
715  if (bpf_jit_insn(&jit, fp->insns + i, addrs, i,
716  i == fp->len - 1))
717  goto out;
718  }
719  bpf_jit_epilogue(&jit);
720  if (jit.start) {
721  WARN_ON(jit.prg > cjit.prg || jit.lit > cjit.lit);
722  if (memcmp(&jit, &cjit, sizeof(jit)) == 0)
723  break;
724  } else if (jit.prg == cjit.prg && jit.lit == cjit.lit) {
725  prg_len = jit.prg - jit.start;
726  lit_len = jit.lit - jit.mid;
727  size = max_t(unsigned long, prg_len + lit_len,
728  sizeof(struct work_struct));
729  if (size >= BPF_SIZE_MAX)
730  goto out;
731  jit.start = module_alloc(size);
732  if (!jit.start)
733  goto out;
734  jit.prg = jit.mid = jit.start + prg_len;
735  jit.lit = jit.end = jit.start + prg_len + lit_len;
736  jit.base_ip += (unsigned long) jit.start;
737  jit.exit_ip += (unsigned long) jit.start;
738  jit.ret0_ip += (unsigned long) jit.start;
739  }
740  cjit = jit;
741  }
742  if (bpf_jit_enable > 1) {
743  pr_err("flen=%d proglen=%lu pass=%d image=%p\n",
744  fp->len, jit.end - jit.start, pass, jit.start);
745  if (jit.start) {
746  printk(KERN_ERR "JIT code:\n");
747  print_fn_code(jit.start, jit.mid - jit.start);
748  print_hex_dump(KERN_ERR, "JIT literals:\n",
749  DUMP_PREFIX_ADDRESS, 16, 1,
750  jit.mid, jit.end - jit.mid, false);
751  }
752  }
753  if (jit.start)
754  fp->bpf_func = (void *) jit.start;
755 out:
756  kfree(addrs);
757 }
758 
759 static void jit_free_defer(struct work_struct *arg)
760 {
761  module_free(NULL, arg);
762 }
763 
764 /* run from softirq, we must use a work_struct to call
765  * module_free() from process context
766  */
767 void bpf_jit_free(struct sk_filter *fp)
768 {
769  struct work_struct *work;
770 
771  if (fp->bpf_func == sk_run_filter)
772  return;
773  work = (struct work_struct *)fp->bpf_func;
774  INIT_WORK(work, jit_free_defer);
775  schedule_work(work);
776 }