Linux Kernel
3.7.1
Main Page
Related Pages
Modules
Namespaces
Data Structures
Files
File List
Globals
All
Data Structures
Namespaces
Files
Functions
Variables
Typedefs
Enumerations
Enumerator
Macros
Groups
Pages
arch
powerpc
net
bpf_jit.h
Go to the documentation of this file.
1
/* bpf_jit.h: BPF JIT compiler for PPC64
2
*
3
* Copyright 2011 Matt Evans <
[email protected]
>, IBM Corporation
4
*
5
* This program is free software; you can redistribute it and/or
6
* modify it under the terms of the GNU General Public License
7
* as published by the Free Software Foundation; version 2
8
* of the License.
9
*/
10
#ifndef _BPF_JIT_H
11
#define _BPF_JIT_H
12
13
#define BPF_PPC_STACK_LOCALS 32
14
#define BPF_PPC_STACK_BASIC (48+64)
15
#define BPF_PPC_STACK_SAVE (18*8)
16
#define BPF_PPC_STACKFRAME (BPF_PPC_STACK_BASIC+BPF_PPC_STACK_LOCALS+ \
17
BPF_PPC_STACK_SAVE)
18
#define BPF_PPC_SLOWPATH_FRAME (48+64)
19
20
/*
21
* Generated code register usage:
22
*
23
* As normal PPC C ABI (e.g. r1=sp, r2=TOC), with:
24
*
25
* skb r3 (Entry parameter)
26
* A register r4
27
* X register r5
28
* addr param r6
29
* r7-r10 scratch
30
* skb->data r14
31
* skb headlen r15 (skb->len - skb->data_len)
32
* m[0] r16
33
* m[...] ...
34
* m[15] r31
35
*/
36
#define r_skb 3
37
#define r_ret 3
38
#define r_A 4
39
#define r_X 5
40
#define r_addr 6
41
#define r_scratch1 7
42
#define r_D 14
43
#define r_HL 15
44
#define r_M 16
45
46
#ifndef __ASSEMBLY__
47
48
/*
49
* Assembly helpers from arch/powerpc/net/bpf_jit.S:
50
*/
51
#define DECLARE_LOAD_FUNC(func) \
52
extern u8 func[], func##_negative_offset[], func##_positive_offset[]
53
54
DECLARE_LOAD_FUNC
(sk_load_word);
55
DECLARE_LOAD_FUNC
(sk_load_half);
56
DECLARE_LOAD_FUNC
(sk_load_byte);
57
DECLARE_LOAD_FUNC
(sk_load_byte_msh);
58
59
#define FUNCTION_DESCR_SIZE 24
60
61
/*
62
* 16-bit immediate helper macros: HA() is for use with sign-extending instrs
63
* (e.g. LD, ADDI). If the bottom 16 bits is "-ve", add another bit into the
64
* top half to negate the effect (i.e. 0xffff + 1 = 0x(1)0000).
65
*/
66
#define IMM_H(i) ((uintptr_t)(i)>>16)
67
#define IMM_HA(i) (((uintptr_t)(i)>>16) + \
68
(((uintptr_t)(i) & 0x8000) >> 15))
69
#define IMM_L(i) ((uintptr_t)(i) & 0xffff)
70
71
#define PLANT_INSTR(d, idx, instr) \
72
do { if (d) { (d)[idx] = instr; } idx++; } while (0)
73
#define EMIT(instr) PLANT_INSTR(image, ctx->idx, instr)
74
75
#define PPC_NOP() EMIT(PPC_INST_NOP)
76
#define PPC_BLR() EMIT(PPC_INST_BLR)
77
#define PPC_BLRL() EMIT(PPC_INST_BLRL)
78
#define PPC_MTLR(r) EMIT(PPC_INST_MTLR | ___PPC_RT(r))
79
#define PPC_ADDI(d, a, i) EMIT(PPC_INST_ADDI | ___PPC_RT(d) | \
80
___PPC_RA(a) | IMM_L(i))
81
#define PPC_MR(d, a) PPC_OR(d, a, a)
82
#define PPC_LI(r, i) PPC_ADDI(r, 0, i)
83
#define PPC_ADDIS(d, a, i) EMIT(PPC_INST_ADDIS | \
84
___PPC_RS(d) | ___PPC_RA(a) | IMM_L(i))
85
#define PPC_LIS(r, i) PPC_ADDIS(r, 0, i)
86
#define PPC_STD(r, base, i) EMIT(PPC_INST_STD | ___PPC_RS(r) | \
87
___PPC_RA(base) | ((i) & 0xfffc))
88
89
#define PPC_LD(r, base, i) EMIT(PPC_INST_LD | ___PPC_RT(r) | \
90
___PPC_RA(base) | IMM_L(i))
91
#define PPC_LWZ(r, base, i) EMIT(PPC_INST_LWZ | ___PPC_RT(r) | \
92
___PPC_RA(base) | IMM_L(i))
93
#define PPC_LHZ(r, base, i) EMIT(PPC_INST_LHZ | ___PPC_RT(r) | \
94
___PPC_RA(base) | IMM_L(i))
95
/* Convenience helpers for the above with 'far' offsets: */
96
#define PPC_LD_OFFS(r, base, i) do { if ((i) < 32768) PPC_LD(r, base, i); \
97
else { PPC_ADDIS(r, base, IMM_HA(i)); \
98
PPC_LD(r, r, IMM_L(i)); } } while(0)
99
100
#define PPC_LWZ_OFFS(r, base, i) do { if ((i) < 32768) PPC_LWZ(r, base, i); \
101
else { PPC_ADDIS(r, base, IMM_HA(i)); \
102
PPC_LWZ(r, r, IMM_L(i)); } } while(0)
103
104
#define PPC_LHZ_OFFS(r, base, i) do { if ((i) < 32768) PPC_LHZ(r, base, i); \
105
else { PPC_ADDIS(r, base, IMM_HA(i)); \
106
PPC_LHZ(r, r, IMM_L(i)); } } while(0)
107
108
#define PPC_CMPWI(a, i) EMIT(PPC_INST_CMPWI | ___PPC_RA(a) | IMM_L(i))
109
#define PPC_CMPDI(a, i) EMIT(PPC_INST_CMPDI | ___PPC_RA(a) | IMM_L(i))
110
#define PPC_CMPLWI(a, i) EMIT(PPC_INST_CMPLWI | ___PPC_RA(a) | IMM_L(i))
111
#define PPC_CMPLW(a, b) EMIT(PPC_INST_CMPLW | ___PPC_RA(a) | ___PPC_RB(b))
112
113
#define PPC_SUB(d, a, b) EMIT(PPC_INST_SUB | ___PPC_RT(d) | \
114
___PPC_RB(a) | ___PPC_RA(b))
115
#define PPC_ADD(d, a, b) EMIT(PPC_INST_ADD | ___PPC_RT(d) | \
116
___PPC_RA(a) | ___PPC_RB(b))
117
#define PPC_MUL(d, a, b) EMIT(PPC_INST_MULLW | ___PPC_RT(d) | \
118
___PPC_RA(a) | ___PPC_RB(b))
119
#define PPC_MULHWU(d, a, b) EMIT(PPC_INST_MULHWU | ___PPC_RT(d) | \
120
___PPC_RA(a) | ___PPC_RB(b))
121
#define PPC_MULI(d, a, i) EMIT(PPC_INST_MULLI | ___PPC_RT(d) | \
122
___PPC_RA(a) | IMM_L(i))
123
#define PPC_DIVWU(d, a, b) EMIT(PPC_INST_DIVWU | ___PPC_RT(d) | \
124
___PPC_RA(a) | ___PPC_RB(b))
125
#define PPC_AND(d, a, b) EMIT(PPC_INST_AND | ___PPC_RA(d) | \
126
___PPC_RS(a) | ___PPC_RB(b))
127
#define PPC_ANDI(d, a, i) EMIT(PPC_INST_ANDI | ___PPC_RA(d) | \
128
___PPC_RS(a) | IMM_L(i))
129
#define PPC_AND_DOT(d, a, b) EMIT(PPC_INST_ANDDOT | ___PPC_RA(d) | \
130
___PPC_RS(a) | ___PPC_RB(b))
131
#define PPC_OR(d, a, b) EMIT(PPC_INST_OR | ___PPC_RA(d) | \
132
___PPC_RS(a) | ___PPC_RB(b))
133
#define PPC_ORI(d, a, i) EMIT(PPC_INST_ORI | ___PPC_RA(d) | \
134
___PPC_RS(a) | IMM_L(i))
135
#define PPC_ORIS(d, a, i) EMIT(PPC_INST_ORIS | ___PPC_RA(d) | \
136
___PPC_RS(a) | IMM_L(i))
137
#define PPC_SLW(d, a, s) EMIT(PPC_INST_SLW | ___PPC_RA(d) | \
138
___PPC_RS(a) | ___PPC_RB(s))
139
#define PPC_SRW(d, a, s) EMIT(PPC_INST_SRW | ___PPC_RA(d) | \
140
___PPC_RS(a) | ___PPC_RB(s))
141
/* slwi = rlwinm Rx, Ry, n, 0, 31-n */
142
#define PPC_SLWI(d, a, i) EMIT(PPC_INST_RLWINM | ___PPC_RA(d) | \
143
___PPC_RS(a) | __PPC_SH(i) | \
144
__PPC_MB(0) | __PPC_ME(31-(i)))
145
/* srwi = rlwinm Rx, Ry, 32-n, n, 31 */
146
#define PPC_SRWI(d, a, i) EMIT(PPC_INST_RLWINM | ___PPC_RA(d) | \
147
___PPC_RS(a) | __PPC_SH(32-(i)) | \
148
__PPC_MB(i) | __PPC_ME(31))
149
/* sldi = rldicr Rx, Ry, n, 63-n */
150
#define PPC_SLDI(d, a, i) EMIT(PPC_INST_RLDICR | ___PPC_RA(d) | \
151
___PPC_RS(a) | __PPC_SH(i) | \
152
__PPC_MB(63-(i)) | (((i) & 0x20) >> 4))
153
#define PPC_NEG(d, a) EMIT(PPC_INST_NEG | ___PPC_RT(d) | ___PPC_RA(a))
154
155
/* Long jump; (unconditional 'branch') */
156
#define PPC_JMP(dest) EMIT(PPC_INST_BRANCH | \
157
(((dest) - (ctx->idx * 4)) & 0x03fffffc))
158
/* "cond" here covers BO:BI fields. */
159
#define PPC_BCC_SHORT(cond, dest) EMIT(PPC_INST_BRANCH_COND | \
160
(((cond) & 0x3ff) << 16) | \
161
(((dest) - (ctx->idx * 4)) & \
162
0xfffc))
163
#define PPC_LI32(d, i) do { PPC_LI(d, IMM_L(i)); \
164
if ((u32)(uintptr_t)(i) >= 32768) { \
165
PPC_ADDIS(d, d, IMM_HA(i)); \
166
} } while(0)
167
#define PPC_LI64(d, i) do { \
168
if (!((uintptr_t)(i) & 0xffffffff00000000ULL)) \
169
PPC_LI32(d, i); \
170
else { \
171
PPC_LIS(d, ((uintptr_t)(i) >> 48)); \
172
if ((uintptr_t)(i) & 0x0000ffff00000000ULL) \
173
PPC_ORI(d, d, \
174
((uintptr_t)(i) >> 32) & 0xffff); \
175
PPC_SLDI(d, d, 32); \
176
if ((uintptr_t)(i) & 0x00000000ffff0000ULL) \
177
PPC_ORIS(d, d, \
178
((uintptr_t)(i) >> 16) & 0xffff); \
179
if ((uintptr_t)(i) & 0x000000000000ffffULL) \
180
PPC_ORI(d, d, (uintptr_t)(i) & 0xffff); \
181
} } while (0);
182
183
static
inline
bool
is_nearbranch(
int
offset
)
184
{
185
return
(offset < 32768) && (offset >= -32768);
186
}
187
188
/*
189
* The fly in the ointment of code size changing from pass to pass is
190
* avoided by padding the short branch case with a NOP. If code size differs
191
* with different branch reaches we will have the issue of code moving from
192
* one pass to the next and will need a few passes to converge on a stable
193
* state.
194
*/
195
#define PPC_BCC(cond, dest) do { \
196
if (is_nearbranch((dest) - (ctx->idx * 4))) { \
197
PPC_BCC_SHORT(cond, dest); \
198
PPC_NOP(); \
199
} else { \
200
/* Flip the 'T or F' bit to invert comparison */
\
201
PPC_BCC_SHORT(cond ^ COND_CMP_TRUE, (ctx->idx+2)*4); \
202
PPC_JMP(dest); \
203
} } while(0)
204
205
/* To create a branch condition, select a bit of cr0... */
206
#define CR0_LT 0
207
#define CR0_GT 1
208
#define CR0_EQ 2
209
/* ...and modify BO[3] */
210
#define COND_CMP_TRUE 0x100
211
#define COND_CMP_FALSE 0x000
212
/* Together, they make all required comparisons: */
213
#define COND_GT (CR0_GT | COND_CMP_TRUE)
214
#define COND_GE (CR0_LT | COND_CMP_FALSE)
215
#define COND_EQ (CR0_EQ | COND_CMP_TRUE)
216
#define COND_NE (CR0_EQ | COND_CMP_FALSE)
217
#define COND_LT (CR0_LT | COND_CMP_TRUE)
218
219
#define SEEN_DATAREF 0x10000
/* might call external helpers */
220
#define SEEN_XREG 0x20000
/* X reg is used */
221
#define SEEN_MEM 0x40000
/* SEEN_MEM+(1<<n) = use mem[n] for temporary
222
* storage */
223
#define SEEN_MEM_MSK 0x0ffff
224
225
struct
codegen_context
{
226
unsigned
int
seen
;
227
unsigned
int
idx
;
228
int
pc_ret0
;
/* bpf index of first RET #0 instruction (if any) */
229
};
230
231
#endif
232
233
#endif
Generated on Thu Jan 10 2013 13:14:31 for Linux Kernel by
1.8.2