PCI: OF: Move of_pci_dma_configure() to pci_dma_configure()
[linux-2.6/btrfs-unstable.git] / arch / sparc / net / bpf_jit_comp.c
blobf8b9f71b9a2b631816df61ff9b95657786e7cd51
1 #include <linux/moduleloader.h>
2 #include <linux/workqueue.h>
3 #include <linux/netdevice.h>
4 #include <linux/filter.h>
5 #include <linux/cache.h>
6 #include <linux/if_vlan.h>
8 #include <asm/cacheflush.h>
9 #include <asm/ptrace.h>
11 #include "bpf_jit.h"
13 int bpf_jit_enable __read_mostly;
15 static inline bool is_simm13(unsigned int value)
17 return value + 0x1000 < 0x2000;
20 static void bpf_flush_icache(void *start_, void *end_)
22 #ifdef CONFIG_SPARC64
23 /* Cheetah's I-cache is fully coherent. */
24 if (tlb_type == spitfire) {
25 unsigned long start = (unsigned long) start_;
26 unsigned long end = (unsigned long) end_;
28 start &= ~7UL;
29 end = (end + 7UL) & ~7UL;
30 while (start < end) {
31 flushi(start);
32 start += 32;
35 #endif
38 #define SEEN_DATAREF 1 /* might call external helpers */
39 #define SEEN_XREG 2 /* ebx is used */
40 #define SEEN_MEM 4 /* use mem[] for temporary storage */
42 #define S13(X) ((X) & 0x1fff)
43 #define IMMED 0x00002000
44 #define RD(X) ((X) << 25)
45 #define RS1(X) ((X) << 14)
46 #define RS2(X) ((X))
47 #define OP(X) ((X) << 30)
48 #define OP2(X) ((X) << 22)
49 #define OP3(X) ((X) << 19)
50 #define COND(X) ((X) << 25)
51 #define F1(X) OP(X)
52 #define F2(X, Y) (OP(X) | OP2(Y))
53 #define F3(X, Y) (OP(X) | OP3(Y))
55 #define CONDN COND(0x0)
56 #define CONDE COND(0x1)
57 #define CONDLE COND(0x2)
58 #define CONDL COND(0x3)
59 #define CONDLEU COND(0x4)
60 #define CONDCS COND(0x5)
61 #define CONDNEG COND(0x6)
62 #define CONDVC COND(0x7)
63 #define CONDA COND(0x8)
64 #define CONDNE COND(0x9)
65 #define CONDG COND(0xa)
66 #define CONDGE COND(0xb)
67 #define CONDGU COND(0xc)
68 #define CONDCC COND(0xd)
69 #define CONDPOS COND(0xe)
70 #define CONDVS COND(0xf)
72 #define CONDGEU CONDCC
73 #define CONDLU CONDCS
75 #define WDISP22(X) (((X) >> 2) & 0x3fffff)
77 #define BA (F2(0, 2) | CONDA)
78 #define BGU (F2(0, 2) | CONDGU)
79 #define BLEU (F2(0, 2) | CONDLEU)
80 #define BGEU (F2(0, 2) | CONDGEU)
81 #define BLU (F2(0, 2) | CONDLU)
82 #define BE (F2(0, 2) | CONDE)
83 #define BNE (F2(0, 2) | CONDNE)
85 #ifdef CONFIG_SPARC64
86 #define BE_PTR (F2(0, 1) | CONDE | (2 << 20))
87 #else
88 #define BE_PTR BE
89 #endif
91 #define SETHI(K, REG) \
92 (F2(0, 0x4) | RD(REG) | (((K) >> 10) & 0x3fffff))
93 #define OR_LO(K, REG) \
94 (F3(2, 0x02) | IMMED | RS1(REG) | ((K) & 0x3ff) | RD(REG))
96 #define ADD F3(2, 0x00)
97 #define AND F3(2, 0x01)
98 #define ANDCC F3(2, 0x11)
99 #define OR F3(2, 0x02)
100 #define XOR F3(2, 0x03)
101 #define SUB F3(2, 0x04)
102 #define SUBCC F3(2, 0x14)
103 #define MUL F3(2, 0x0a) /* umul */
104 #define DIV F3(2, 0x0e) /* udiv */
105 #define SLL F3(2, 0x25)
106 #define SRL F3(2, 0x26)
107 #define JMPL F3(2, 0x38)
108 #define CALL F1(1)
109 #define BR F2(0, 0x01)
110 #define RD_Y F3(2, 0x28)
111 #define WR_Y F3(2, 0x30)
113 #define LD32 F3(3, 0x00)
114 #define LD8 F3(3, 0x01)
115 #define LD16 F3(3, 0x02)
116 #define LD64 F3(3, 0x0b)
117 #define ST32 F3(3, 0x04)
119 #ifdef CONFIG_SPARC64
120 #define LDPTR LD64
121 #define BASE_STACKFRAME 176
122 #else
123 #define LDPTR LD32
124 #define BASE_STACKFRAME 96
125 #endif
127 #define LD32I (LD32 | IMMED)
128 #define LD8I (LD8 | IMMED)
129 #define LD16I (LD16 | IMMED)
130 #define LD64I (LD64 | IMMED)
131 #define LDPTRI (LDPTR | IMMED)
132 #define ST32I (ST32 | IMMED)
134 #define emit_nop() \
135 do { \
136 *prog++ = SETHI(0, G0); \
137 } while (0)
139 #define emit_neg() \
140 do { /* sub %g0, r_A, r_A */ \
141 *prog++ = SUB | RS1(G0) | RS2(r_A) | RD(r_A); \
142 } while (0)
144 #define emit_reg_move(FROM, TO) \
145 do { /* or %g0, FROM, TO */ \
146 *prog++ = OR | RS1(G0) | RS2(FROM) | RD(TO); \
147 } while (0)
149 #define emit_clear(REG) \
150 do { /* or %g0, %g0, REG */ \
151 *prog++ = OR | RS1(G0) | RS2(G0) | RD(REG); \
152 } while (0)
154 #define emit_set_const(K, REG) \
155 do { /* sethi %hi(K), REG */ \
156 *prog++ = SETHI(K, REG); \
157 /* or REG, %lo(K), REG */ \
158 *prog++ = OR_LO(K, REG); \
159 } while (0)
161 /* Emit
163 * OP r_A, r_X, r_A
165 #define emit_alu_X(OPCODE) \
166 do { \
167 seen |= SEEN_XREG; \
168 *prog++ = OPCODE | RS1(r_A) | RS2(r_X) | RD(r_A); \
169 } while (0)
171 /* Emit either:
173 * OP r_A, K, r_A
175 * or
177 * sethi %hi(K), r_TMP
178 * or r_TMP, %lo(K), r_TMP
179 * OP r_A, r_TMP, r_A
181 * depending upon whether K fits in a signed 13-bit
182 * immediate instruction field. Emit nothing if K
183 * is zero.
185 #define emit_alu_K(OPCODE, K) \
186 do { \
187 if (K || OPCODE == AND || OPCODE == MUL) { \
188 unsigned int _insn = OPCODE; \
189 _insn |= RS1(r_A) | RD(r_A); \
190 if (is_simm13(K)) { \
191 *prog++ = _insn | IMMED | S13(K); \
192 } else { \
193 emit_set_const(K, r_TMP); \
194 *prog++ = _insn | RS2(r_TMP); \
197 } while (0)
199 #define emit_loadimm(K, DEST) \
200 do { \
201 if (is_simm13(K)) { \
202 /* or %g0, K, DEST */ \
203 *prog++ = OR | IMMED | RS1(G0) | S13(K) | RD(DEST); \
204 } else { \
205 emit_set_const(K, DEST); \
207 } while (0)
209 #define emit_loadptr(BASE, STRUCT, FIELD, DEST) \
210 do { unsigned int _off = offsetof(STRUCT, FIELD); \
211 BUILD_BUG_ON(FIELD_SIZEOF(STRUCT, FIELD) != sizeof(void *)); \
212 *prog++ = LDPTRI | RS1(BASE) | S13(_off) | RD(DEST); \
213 } while (0)
215 #define emit_load32(BASE, STRUCT, FIELD, DEST) \
216 do { unsigned int _off = offsetof(STRUCT, FIELD); \
217 BUILD_BUG_ON(FIELD_SIZEOF(STRUCT, FIELD) != sizeof(u32)); \
218 *prog++ = LD32I | RS1(BASE) | S13(_off) | RD(DEST); \
219 } while (0)
221 #define emit_load16(BASE, STRUCT, FIELD, DEST) \
222 do { unsigned int _off = offsetof(STRUCT, FIELD); \
223 BUILD_BUG_ON(FIELD_SIZEOF(STRUCT, FIELD) != sizeof(u16)); \
224 *prog++ = LD16I | RS1(BASE) | S13(_off) | RD(DEST); \
225 } while (0)
227 #define __emit_load8(BASE, STRUCT, FIELD, DEST) \
228 do { unsigned int _off = offsetof(STRUCT, FIELD); \
229 *prog++ = LD8I | RS1(BASE) | S13(_off) | RD(DEST); \
230 } while (0)
232 #define emit_load8(BASE, STRUCT, FIELD, DEST) \
233 do { BUILD_BUG_ON(FIELD_SIZEOF(STRUCT, FIELD) != sizeof(u8)); \
234 __emit_load8(BASE, STRUCT, FIELD, DEST); \
235 } while (0)
237 #ifdef CONFIG_SPARC64
238 #define BIAS (STACK_BIAS - 4)
239 #else
240 #define BIAS (-4)
241 #endif
243 #define emit_ldmem(OFF, DEST) \
244 do { *prog++ = LD32I | RS1(SP) | S13(BIAS - (OFF)) | RD(DEST); \
245 } while (0)
247 #define emit_stmem(OFF, SRC) \
248 do { *prog++ = ST32I | RS1(SP) | S13(BIAS - (OFF)) | RD(SRC); \
249 } while (0)
251 #ifdef CONFIG_SMP
252 #ifdef CONFIG_SPARC64
253 #define emit_load_cpu(REG) \
254 emit_load16(G6, struct thread_info, cpu, REG)
255 #else
256 #define emit_load_cpu(REG) \
257 emit_load32(G6, struct thread_info, cpu, REG)
258 #endif
259 #else
260 #define emit_load_cpu(REG) emit_clear(REG)
261 #endif
263 #define emit_skb_loadptr(FIELD, DEST) \
264 emit_loadptr(r_SKB, struct sk_buff, FIELD, DEST)
265 #define emit_skb_load32(FIELD, DEST) \
266 emit_load32(r_SKB, struct sk_buff, FIELD, DEST)
267 #define emit_skb_load16(FIELD, DEST) \
268 emit_load16(r_SKB, struct sk_buff, FIELD, DEST)
269 #define __emit_skb_load8(FIELD, DEST) \
270 __emit_load8(r_SKB, struct sk_buff, FIELD, DEST)
271 #define emit_skb_load8(FIELD, DEST) \
272 emit_load8(r_SKB, struct sk_buff, FIELD, DEST)
274 #define emit_jmpl(BASE, IMM_OFF, LREG) \
275 *prog++ = (JMPL | IMMED | RS1(BASE) | S13(IMM_OFF) | RD(LREG))
277 #define emit_call(FUNC) \
278 do { void *_here = image + addrs[i] - 8; \
279 unsigned int _off = (void *)(FUNC) - _here; \
280 *prog++ = CALL | (((_off) >> 2) & 0x3fffffff); \
281 emit_nop(); \
282 } while (0)
284 #define emit_branch(BR_OPC, DEST) \
285 do { unsigned int _here = addrs[i] - 8; \
286 *prog++ = BR_OPC | WDISP22((DEST) - _here); \
287 } while (0)
289 #define emit_branch_off(BR_OPC, OFF) \
290 do { *prog++ = BR_OPC | WDISP22(OFF); \
291 } while (0)
293 #define emit_jump(DEST) emit_branch(BA, DEST)
295 #define emit_read_y(REG) *prog++ = RD_Y | RD(REG)
296 #define emit_write_y(REG) *prog++ = WR_Y | IMMED | RS1(REG) | S13(0)
298 #define emit_cmp(R1, R2) \
299 *prog++ = (SUBCC | RS1(R1) | RS2(R2) | RD(G0))
301 #define emit_cmpi(R1, IMM) \
302 *prog++ = (SUBCC | IMMED | RS1(R1) | S13(IMM) | RD(G0));
304 #define emit_btst(R1, R2) \
305 *prog++ = (ANDCC | RS1(R1) | RS2(R2) | RD(G0))
307 #define emit_btsti(R1, IMM) \
308 *prog++ = (ANDCC | IMMED | RS1(R1) | S13(IMM) | RD(G0));
310 #define emit_sub(R1, R2, R3) \
311 *prog++ = (SUB | RS1(R1) | RS2(R2) | RD(R3))
313 #define emit_subi(R1, IMM, R3) \
314 *prog++ = (SUB | IMMED | RS1(R1) | S13(IMM) | RD(R3))
316 #define emit_add(R1, R2, R3) \
317 *prog++ = (ADD | RS1(R1) | RS2(R2) | RD(R3))
319 #define emit_addi(R1, IMM, R3) \
320 *prog++ = (ADD | IMMED | RS1(R1) | S13(IMM) | RD(R3))
322 #define emit_and(R1, R2, R3) \
323 *prog++ = (AND | RS1(R1) | RS2(R2) | RD(R3))
325 #define emit_andi(R1, IMM, R3) \
326 *prog++ = (AND | IMMED | RS1(R1) | S13(IMM) | RD(R3))
328 #define emit_alloc_stack(SZ) \
329 *prog++ = (SUB | IMMED | RS1(SP) | S13(SZ) | RD(SP))
331 #define emit_release_stack(SZ) \
332 *prog++ = (ADD | IMMED | RS1(SP) | S13(SZ) | RD(SP))
334 /* A note about branch offset calculations. The addrs[] array,
335 * indexed by BPF instruction, records the address after all the
336 * sparc instructions emitted for that BPF instruction.
338 * The most common case is to emit a branch at the end of such
339 * a code sequence. So this would be two instructions, the
340 * branch and it's delay slot.
342 * Therefore by default the branch emitters calculate the branch
343 * offset field as:
345 * destination - (addrs[i] - 8)
347 * This "addrs[i] - 8" is the address of the branch itself or
348 * what "." would be in assembler notation. The "8" part is
349 * how we take into consideration the branch and it's delay
350 * slot mentioned above.
352 * Sometimes we need to emit a branch earlier in the code
353 * sequence. And in these situations we adjust "destination"
354 * to accomodate this difference. For example, if we needed
355 * to emit a branch (and it's delay slot) right before the
356 * final instruction emitted for a BPF opcode, we'd use
357 * "destination + 4" instead of just plain "destination" above.
359 * This is why you see all of these funny emit_branch() and
360 * emit_jump() calls with adjusted offsets.
363 void bpf_jit_compile(struct bpf_prog *fp)
365 unsigned int cleanup_addr, proglen, oldproglen = 0;
366 u32 temp[8], *prog, *func, seen = 0, pass;
367 const struct sock_filter *filter = fp->insns;
368 int i, flen = fp->len, pc_ret0 = -1;
369 unsigned int *addrs;
370 void *image;
372 if (!bpf_jit_enable)
373 return;
375 addrs = kmalloc(flen * sizeof(*addrs), GFP_KERNEL);
376 if (addrs == NULL)
377 return;
379 /* Before first pass, make a rough estimation of addrs[]
380 * each bpf instruction is translated to less than 64 bytes
382 for (proglen = 0, i = 0; i < flen; i++) {
383 proglen += 64;
384 addrs[i] = proglen;
386 cleanup_addr = proglen; /* epilogue address */
387 image = NULL;
388 for (pass = 0; pass < 10; pass++) {
389 u8 seen_or_pass0 = (pass == 0) ? (SEEN_XREG | SEEN_DATAREF | SEEN_MEM) : seen;
391 /* no prologue/epilogue for trivial filters (RET something) */
392 proglen = 0;
393 prog = temp;
395 /* Prologue */
396 if (seen_or_pass0) {
397 if (seen_or_pass0 & SEEN_MEM) {
398 unsigned int sz = BASE_STACKFRAME;
399 sz += BPF_MEMWORDS * sizeof(u32);
400 emit_alloc_stack(sz);
403 /* Make sure we dont leek kernel memory. */
404 if (seen_or_pass0 & SEEN_XREG)
405 emit_clear(r_X);
407 /* If this filter needs to access skb data,
408 * load %o4 and %o5 with:
409 * %o4 = skb->len - skb->data_len
410 * %o5 = skb->data
411 * And also back up %o7 into r_saved_O7 so we can
412 * invoke the stubs using 'call'.
414 if (seen_or_pass0 & SEEN_DATAREF) {
415 emit_load32(r_SKB, struct sk_buff, len, r_HEADLEN);
416 emit_load32(r_SKB, struct sk_buff, data_len, r_TMP);
417 emit_sub(r_HEADLEN, r_TMP, r_HEADLEN);
418 emit_loadptr(r_SKB, struct sk_buff, data, r_SKB_DATA);
421 emit_reg_move(O7, r_saved_O7);
423 switch (filter[0].code) {
424 case BPF_RET | BPF_K:
425 case BPF_LD | BPF_W | BPF_LEN:
426 case BPF_LD | BPF_W | BPF_ABS:
427 case BPF_LD | BPF_H | BPF_ABS:
428 case BPF_LD | BPF_B | BPF_ABS:
429 /* The first instruction sets the A register (or is
430 * a "RET 'constant'")
432 break;
433 default:
434 /* Make sure we dont leak kernel information to the
435 * user.
437 emit_clear(r_A); /* A = 0 */
440 for (i = 0; i < flen; i++) {
441 unsigned int K = filter[i].k;
442 unsigned int t_offset;
443 unsigned int f_offset;
444 u32 t_op, f_op;
445 u16 code = bpf_anc_helper(&filter[i]);
446 int ilen;
448 switch (code) {
449 case BPF_ALU | BPF_ADD | BPF_X: /* A += X; */
450 emit_alu_X(ADD);
451 break;
452 case BPF_ALU | BPF_ADD | BPF_K: /* A += K; */
453 emit_alu_K(ADD, K);
454 break;
455 case BPF_ALU | BPF_SUB | BPF_X: /* A -= X; */
456 emit_alu_X(SUB);
457 break;
458 case BPF_ALU | BPF_SUB | BPF_K: /* A -= K */
459 emit_alu_K(SUB, K);
460 break;
461 case BPF_ALU | BPF_AND | BPF_X: /* A &= X */
462 emit_alu_X(AND);
463 break;
464 case BPF_ALU | BPF_AND | BPF_K: /* A &= K */
465 emit_alu_K(AND, K);
466 break;
467 case BPF_ALU | BPF_OR | BPF_X: /* A |= X */
468 emit_alu_X(OR);
469 break;
470 case BPF_ALU | BPF_OR | BPF_K: /* A |= K */
471 emit_alu_K(OR, K);
472 break;
473 case BPF_ANC | SKF_AD_ALU_XOR_X: /* A ^= X; */
474 case BPF_ALU | BPF_XOR | BPF_X:
475 emit_alu_X(XOR);
476 break;
477 case BPF_ALU | BPF_XOR | BPF_K: /* A ^= K */
478 emit_alu_K(XOR, K);
479 break;
480 case BPF_ALU | BPF_LSH | BPF_X: /* A <<= X */
481 emit_alu_X(SLL);
482 break;
483 case BPF_ALU | BPF_LSH | BPF_K: /* A <<= K */
484 emit_alu_K(SLL, K);
485 break;
486 case BPF_ALU | BPF_RSH | BPF_X: /* A >>= X */
487 emit_alu_X(SRL);
488 break;
489 case BPF_ALU | BPF_RSH | BPF_K: /* A >>= K */
490 emit_alu_K(SRL, K);
491 break;
492 case BPF_ALU | BPF_MUL | BPF_X: /* A *= X; */
493 emit_alu_X(MUL);
494 break;
495 case BPF_ALU | BPF_MUL | BPF_K: /* A *= K */
496 emit_alu_K(MUL, K);
497 break;
498 case BPF_ALU | BPF_DIV | BPF_K: /* A /= K with K != 0*/
499 if (K == 1)
500 break;
501 emit_write_y(G0);
502 #ifdef CONFIG_SPARC32
503 /* The Sparc v8 architecture requires
504 * three instructions between a %y
505 * register write and the first use.
507 emit_nop();
508 emit_nop();
509 emit_nop();
510 #endif
511 emit_alu_K(DIV, K);
512 break;
513 case BPF_ALU | BPF_DIV | BPF_X: /* A /= X; */
514 emit_cmpi(r_X, 0);
515 if (pc_ret0 > 0) {
516 t_offset = addrs[pc_ret0 - 1];
517 #ifdef CONFIG_SPARC32
518 emit_branch(BE, t_offset + 20);
519 #else
520 emit_branch(BE, t_offset + 8);
521 #endif
522 emit_nop(); /* delay slot */
523 } else {
524 emit_branch_off(BNE, 16);
525 emit_nop();
526 #ifdef CONFIG_SPARC32
527 emit_jump(cleanup_addr + 20);
528 #else
529 emit_jump(cleanup_addr + 8);
530 #endif
531 emit_clear(r_A);
533 emit_write_y(G0);
534 #ifdef CONFIG_SPARC32
535 /* The Sparc v8 architecture requires
536 * three instructions between a %y
537 * register write and the first use.
539 emit_nop();
540 emit_nop();
541 emit_nop();
542 #endif
543 emit_alu_X(DIV);
544 break;
545 case BPF_ALU | BPF_NEG:
546 emit_neg();
547 break;
548 case BPF_RET | BPF_K:
549 if (!K) {
550 if (pc_ret0 == -1)
551 pc_ret0 = i;
552 emit_clear(r_A);
553 } else {
554 emit_loadimm(K, r_A);
556 /* Fallthrough */
557 case BPF_RET | BPF_A:
558 if (seen_or_pass0) {
559 if (i != flen - 1) {
560 emit_jump(cleanup_addr);
561 emit_nop();
562 break;
564 if (seen_or_pass0 & SEEN_MEM) {
565 unsigned int sz = BASE_STACKFRAME;
566 sz += BPF_MEMWORDS * sizeof(u32);
567 emit_release_stack(sz);
570 /* jmpl %r_saved_O7 + 8, %g0 */
571 emit_jmpl(r_saved_O7, 8, G0);
572 emit_reg_move(r_A, O0); /* delay slot */
573 break;
574 case BPF_MISC | BPF_TAX:
575 seen |= SEEN_XREG;
576 emit_reg_move(r_A, r_X);
577 break;
578 case BPF_MISC | BPF_TXA:
579 seen |= SEEN_XREG;
580 emit_reg_move(r_X, r_A);
581 break;
582 case BPF_ANC | SKF_AD_CPU:
583 emit_load_cpu(r_A);
584 break;
585 case BPF_ANC | SKF_AD_PROTOCOL:
586 emit_skb_load16(protocol, r_A);
587 break;
588 case BPF_ANC | SKF_AD_PKTTYPE:
589 __emit_skb_load8(__pkt_type_offset, r_A);
590 emit_andi(r_A, PKT_TYPE_MAX, r_A);
591 emit_alu_K(SRL, 5);
592 break;
593 case BPF_ANC | SKF_AD_IFINDEX:
594 emit_skb_loadptr(dev, r_A);
595 emit_cmpi(r_A, 0);
596 emit_branch(BE_PTR, cleanup_addr + 4);
597 emit_nop();
598 emit_load32(r_A, struct net_device, ifindex, r_A);
599 break;
600 case BPF_ANC | SKF_AD_MARK:
601 emit_skb_load32(mark, r_A);
602 break;
603 case BPF_ANC | SKF_AD_QUEUE:
604 emit_skb_load16(queue_mapping, r_A);
605 break;
606 case BPF_ANC | SKF_AD_HATYPE:
607 emit_skb_loadptr(dev, r_A);
608 emit_cmpi(r_A, 0);
609 emit_branch(BE_PTR, cleanup_addr + 4);
610 emit_nop();
611 emit_load16(r_A, struct net_device, type, r_A);
612 break;
613 case BPF_ANC | SKF_AD_RXHASH:
614 emit_skb_load32(hash, r_A);
615 break;
616 case BPF_ANC | SKF_AD_VLAN_TAG:
617 case BPF_ANC | SKF_AD_VLAN_TAG_PRESENT:
618 emit_skb_load16(vlan_tci, r_A);
619 if (code != (BPF_ANC | SKF_AD_VLAN_TAG)) {
620 emit_alu_K(SRL, 12);
621 emit_andi(r_A, 1, r_A);
622 } else {
623 emit_loadimm(~VLAN_TAG_PRESENT, r_TMP);
624 emit_and(r_A, r_TMP, r_A);
626 break;
627 case BPF_LD | BPF_W | BPF_LEN:
628 emit_skb_load32(len, r_A);
629 break;
630 case BPF_LDX | BPF_W | BPF_LEN:
631 emit_skb_load32(len, r_X);
632 break;
633 case BPF_LD | BPF_IMM:
634 emit_loadimm(K, r_A);
635 break;
636 case BPF_LDX | BPF_IMM:
637 emit_loadimm(K, r_X);
638 break;
639 case BPF_LD | BPF_MEM:
640 seen |= SEEN_MEM;
641 emit_ldmem(K * 4, r_A);
642 break;
643 case BPF_LDX | BPF_MEM:
644 seen |= SEEN_MEM | SEEN_XREG;
645 emit_ldmem(K * 4, r_X);
646 break;
647 case BPF_ST:
648 seen |= SEEN_MEM;
649 emit_stmem(K * 4, r_A);
650 break;
651 case BPF_STX:
652 seen |= SEEN_MEM | SEEN_XREG;
653 emit_stmem(K * 4, r_X);
654 break;
656 #define CHOOSE_LOAD_FUNC(K, func) \
657 ((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset)
659 case BPF_LD | BPF_W | BPF_ABS:
660 func = CHOOSE_LOAD_FUNC(K, bpf_jit_load_word);
661 common_load: seen |= SEEN_DATAREF;
662 emit_loadimm(K, r_OFF);
663 emit_call(func);
664 break;
665 case BPF_LD | BPF_H | BPF_ABS:
666 func = CHOOSE_LOAD_FUNC(K, bpf_jit_load_half);
667 goto common_load;
668 case BPF_LD | BPF_B | BPF_ABS:
669 func = CHOOSE_LOAD_FUNC(K, bpf_jit_load_byte);
670 goto common_load;
671 case BPF_LDX | BPF_B | BPF_MSH:
672 func = CHOOSE_LOAD_FUNC(K, bpf_jit_load_byte_msh);
673 goto common_load;
674 case BPF_LD | BPF_W | BPF_IND:
675 func = bpf_jit_load_word;
676 common_load_ind: seen |= SEEN_DATAREF | SEEN_XREG;
677 if (K) {
678 if (is_simm13(K)) {
679 emit_addi(r_X, K, r_OFF);
680 } else {
681 emit_loadimm(K, r_TMP);
682 emit_add(r_X, r_TMP, r_OFF);
684 } else {
685 emit_reg_move(r_X, r_OFF);
687 emit_call(func);
688 break;
689 case BPF_LD | BPF_H | BPF_IND:
690 func = bpf_jit_load_half;
691 goto common_load_ind;
692 case BPF_LD | BPF_B | BPF_IND:
693 func = bpf_jit_load_byte;
694 goto common_load_ind;
695 case BPF_JMP | BPF_JA:
696 emit_jump(addrs[i + K]);
697 emit_nop();
698 break;
700 #define COND_SEL(CODE, TOP, FOP) \
701 case CODE: \
702 t_op = TOP; \
703 f_op = FOP; \
704 goto cond_branch
706 COND_SEL(BPF_JMP | BPF_JGT | BPF_K, BGU, BLEU);
707 COND_SEL(BPF_JMP | BPF_JGE | BPF_K, BGEU, BLU);
708 COND_SEL(BPF_JMP | BPF_JEQ | BPF_K, BE, BNE);
709 COND_SEL(BPF_JMP | BPF_JSET | BPF_K, BNE, BE);
710 COND_SEL(BPF_JMP | BPF_JGT | BPF_X, BGU, BLEU);
711 COND_SEL(BPF_JMP | BPF_JGE | BPF_X, BGEU, BLU);
712 COND_SEL(BPF_JMP | BPF_JEQ | BPF_X, BE, BNE);
713 COND_SEL(BPF_JMP | BPF_JSET | BPF_X, BNE, BE);
715 cond_branch: f_offset = addrs[i + filter[i].jf];
716 t_offset = addrs[i + filter[i].jt];
718 /* same targets, can avoid doing the test :) */
719 if (filter[i].jt == filter[i].jf) {
720 emit_jump(t_offset);
721 emit_nop();
722 break;
725 switch (code) {
726 case BPF_JMP | BPF_JGT | BPF_X:
727 case BPF_JMP | BPF_JGE | BPF_X:
728 case BPF_JMP | BPF_JEQ | BPF_X:
729 seen |= SEEN_XREG;
730 emit_cmp(r_A, r_X);
731 break;
732 case BPF_JMP | BPF_JSET | BPF_X:
733 seen |= SEEN_XREG;
734 emit_btst(r_A, r_X);
735 break;
736 case BPF_JMP | BPF_JEQ | BPF_K:
737 case BPF_JMP | BPF_JGT | BPF_K:
738 case BPF_JMP | BPF_JGE | BPF_K:
739 if (is_simm13(K)) {
740 emit_cmpi(r_A, K);
741 } else {
742 emit_loadimm(K, r_TMP);
743 emit_cmp(r_A, r_TMP);
745 break;
746 case BPF_JMP | BPF_JSET | BPF_K:
747 if (is_simm13(K)) {
748 emit_btsti(r_A, K);
749 } else {
750 emit_loadimm(K, r_TMP);
751 emit_btst(r_A, r_TMP);
753 break;
755 if (filter[i].jt != 0) {
756 if (filter[i].jf)
757 t_offset += 8;
758 emit_branch(t_op, t_offset);
759 emit_nop(); /* delay slot */
760 if (filter[i].jf) {
761 emit_jump(f_offset);
762 emit_nop();
764 break;
766 emit_branch(f_op, f_offset);
767 emit_nop(); /* delay slot */
768 break;
770 default:
771 /* hmm, too complex filter, give up with jit compiler */
772 goto out;
774 ilen = (void *) prog - (void *) temp;
775 if (image) {
776 if (unlikely(proglen + ilen > oldproglen)) {
777 pr_err("bpb_jit_compile fatal error\n");
778 kfree(addrs);
779 module_memfree(image);
780 return;
782 memcpy(image + proglen, temp, ilen);
784 proglen += ilen;
785 addrs[i] = proglen;
786 prog = temp;
788 /* last bpf instruction is always a RET :
789 * use it to give the cleanup instruction(s) addr
791 cleanup_addr = proglen - 8; /* jmpl; mov r_A,%o0; */
792 if (seen_or_pass0 & SEEN_MEM)
793 cleanup_addr -= 4; /* add %sp, X, %sp; */
795 if (image) {
796 if (proglen != oldproglen)
797 pr_err("bpb_jit_compile proglen=%u != oldproglen=%u\n",
798 proglen, oldproglen);
799 break;
801 if (proglen == oldproglen) {
802 image = module_alloc(proglen);
803 if (!image)
804 goto out;
806 oldproglen = proglen;
809 if (bpf_jit_enable > 1)
810 bpf_jit_dump(flen, proglen, pass + 1, image);
812 if (image) {
813 bpf_flush_icache(image, image + proglen);
814 fp->bpf_func = (void *)image;
815 fp->jited = true;
817 out:
818 kfree(addrs);
819 return;
822 void bpf_jit_free(struct bpf_prog *fp)
824 if (fp->jited)
825 module_memfree(fp->bpf_func);
827 bpf_prog_unlock_free(fp);