2 * Copyright (c) 1988, 1989, 1990, 1991, 1993, 1994, 1995, 1996
3 * The Regents of the University of California. All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that: (1) source code distributions
7 * retain the above copyright notice and this paragraph in its entirety, (2)
8 * distributions including binary code include the above copyright notice and
9 * this paragraph in its entirety in the documentation or other materials
10 * provided with the distribution, and (3) all advertising materials mentioning
11 * features or use of this software display the following acknowledgement:
12 * ``This product includes software developed by the University of California,
13 * Lawrence Berkeley Laboratory and its contributors.'' Neither the name of
14 * the University nor the names of its contributors may be used to endorse
15 * or promote products derived from this software without specific prior
17 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED
18 * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF
19 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
21 * Optimization module for tcpdump intermediate representation.
24 static const char rcsid
[] _U_
=
25 "@(#) $Header: /tcpdump/master/libpcap/optimize.c,v 1.91 2008-01-02 04:16:46 guy Exp $ (LBL)";
33 #include <pcap-stdinc.h>
40 #ifdef HAVE_SYS_BITYPES_H
41 #include <sys/bitypes.h>
43 #include <sys/types.h>
57 #ifdef HAVE_OS_PROTO_H
65 #if defined(MSDOS) && !defined(__DJGPP__)
66 extern int _w32_ffs (int mask
);
70 #if defined(WIN32) && defined (_MSC_VER)
75 * Represents a deleted instruction.
80 * Register numbers for use-def values.
81 * 0 through BPF_MEMWORDS-1 represent the corresponding scratch memory
82 * location. A_ATOM is the accumulator and X_ATOM is the index
85 #define A_ATOM BPF_MEMWORDS
86 #define X_ATOM (BPF_MEMWORDS+1)
89 * This define is used to represent *both* the accumulator and
90 * x register in use-def computations.
91 * Currently, the use-def code assumes only one definition per instruction.
93 #define AX_ATOM N_ATOMS
96 * A flag to indicate that further optimization is needed.
97 * Iterative passes are continued until a given pass yields no
103 * A block is marked if only if its mark equals the current mark.
104 * Rather than traverse the code array, marking each item, 'cur_mark' is
105 * incremented. This automatically makes each element unmarked.
108 #define isMarked(p) ((p)->mark == cur_mark)
109 #define unMarkAll() cur_mark += 1
110 #define Mark(p) ((p)->mark = cur_mark)
112 static void opt_init(struct block
*);
113 static void opt_cleanup(void);
115 static void intern_blocks(struct block
*);
117 static void find_inedges(struct block
*);
119 static void opt_dump(struct block
*);
123 struct block
**blocks
;
128 * A bit vector set representation of the dominators.
129 * We round up the set size to the next power of two.
131 static int nodewords
;
132 static int edgewords
;
133 struct block
**levels
;
135 #define BITS_PER_WORD (8*sizeof(bpf_u_int32))
137 * True if a is in uset {p}
139 #define SET_MEMBER(p, a) \
140 ((p)[(unsigned)(a) / BITS_PER_WORD] & (1 << ((unsigned)(a) % BITS_PER_WORD)))
145 #define SET_INSERT(p, a) \
146 (p)[(unsigned)(a) / BITS_PER_WORD] |= (1 << ((unsigned)(a) % BITS_PER_WORD))
149 * Delete 'a' from uset p.
151 #define SET_DELETE(p, a) \
152 (p)[(unsigned)(a) / BITS_PER_WORD] &= ~(1 << ((unsigned)(a) % BITS_PER_WORD))
157 #define SET_INTERSECT(a, b, n)\
159 register bpf_u_int32 *_x = a, *_y = b;\
160 register int _n = n;\
161 while (--_n >= 0) *_x++ &= *_y++;\
167 #define SET_SUBTRACT(a, b, n)\
169 register bpf_u_int32 *_x = a, *_y = b;\
170 register int _n = n;\
171 while (--_n >= 0) *_x++ &=~ *_y++;\
177 #define SET_UNION(a, b, n)\
179 register bpf_u_int32 *_x = a, *_y = b;\
180 register int _n = n;\
181 while (--_n >= 0) *_x++ |= *_y++;\
184 static uset all_dom_sets
;
185 static uset all_closure_sets
;
186 static uset all_edge_sets
;
189 #define MAX(a,b) ((a)>(b)?(a):(b))
193 find_levels_r(struct block
*b
)
204 find_levels_r(JT(b
));
205 find_levels_r(JF(b
));
206 level
= MAX(JT(b
)->level
, JF(b
)->level
) + 1;
210 b
->link
= levels
[level
];
215 * Level graph. The levels go from 0 at the leaves to
216 * N_LEVELS at the root. The levels[] array points to the
217 * first node of the level list, whose elements are linked
218 * with the 'link' field of the struct block.
221 find_levels(struct block
*root
)
223 memset((char *)levels
, 0, n_blocks
* sizeof(*levels
));
229 * Find dominator relationships.
230 * Assumes graph has been leveled.
233 find_dom(struct block
*root
)
240 * Initialize sets to contain all nodes.
243 i
= n_blocks
* nodewords
;
246 /* Root starts off empty. */
247 for (i
= nodewords
; --i
>= 0;)
250 /* root->level is the highest level no found. */
251 for (i
= root
->level
; i
>= 0; --i
) {
252 for (b
= levels
[i
]; b
; b
= b
->link
) {
253 SET_INSERT(b
->dom
, b
->id
);
256 SET_INTERSECT(JT(b
)->dom
, b
->dom
, nodewords
);
257 SET_INTERSECT(JF(b
)->dom
, b
->dom
, nodewords
);
263 propedom(struct edge
*ep
)
265 SET_INSERT(ep
->edom
, ep
->id
);
267 SET_INTERSECT(ep
->succ
->et
.edom
, ep
->edom
, edgewords
);
268 SET_INTERSECT(ep
->succ
->ef
.edom
, ep
->edom
, edgewords
);
273 * Compute edge dominators.
274 * Assumes graph has been leveled and predecessors established.
277 find_edom(struct block
*root
)
284 for (i
= n_edges
* edgewords
; --i
>= 0; )
287 /* root->level is the highest level no found. */
288 memset(root
->et
.edom
, 0, edgewords
* sizeof(*(uset
)0));
289 memset(root
->ef
.edom
, 0, edgewords
* sizeof(*(uset
)0));
290 for (i
= root
->level
; i
>= 0; --i
) {
291 for (b
= levels
[i
]; b
!= 0; b
= b
->link
) {
299 * Find the backwards transitive closure of the flow graph. These sets
300 * are backwards in the sense that we find the set of nodes that reach
301 * a given node, not the set of nodes that can be reached by a node.
303 * Assumes graph has been leveled.
306 find_closure(struct block
*root
)
312 * Initialize sets to contain no nodes.
314 memset((char *)all_closure_sets
, 0,
315 n_blocks
* nodewords
* sizeof(*all_closure_sets
));
317 /* root->level is the highest level no found. */
318 for (i
= root
->level
; i
>= 0; --i
) {
319 for (b
= levels
[i
]; b
; b
= b
->link
) {
320 SET_INSERT(b
->closure
, b
->id
);
323 SET_UNION(JT(b
)->closure
, b
->closure
, nodewords
);
324 SET_UNION(JF(b
)->closure
, b
->closure
, nodewords
);
330 * Return the register number that is used by s. If A and X are both
331 * used, return AX_ATOM. If no register is used, return -1.
333 * The implementation should probably change to an array access.
336 atomuse(struct stmt
*s
)
338 register int c
= s
->code
;
343 switch (BPF_CLASS(c
)) {
346 return (BPF_RVAL(c
) == BPF_A
) ? A_ATOM
:
347 (BPF_RVAL(c
) == BPF_X
) ? X_ATOM
: -1;
351 return (BPF_MODE(c
) == BPF_IND
) ? X_ATOM
:
352 (BPF_MODE(c
) == BPF_MEM
) ? s
->k
: -1;
362 if (BPF_SRC(c
) == BPF_X
)
367 return BPF_MISCOP(c
) == BPF_TXA
? X_ATOM
: A_ATOM
;
374 * Return the register number that is defined by 's'. We assume that
375 * a single stmt cannot define more than one register. If no register
376 * is defined, return -1.
378 * The implementation should probably change to an array access.
381 atomdef(struct stmt
*s
)
386 switch (BPF_CLASS(s
->code
)) {
400 return BPF_MISCOP(s
->code
) == BPF_TAX
? X_ATOM
: A_ATOM
;
406 * Compute the sets of registers used, defined, and killed by 'b'.
408 * "Used" means that a statement in 'b' uses the register before any
409 * statement in 'b' defines it, i.e. it uses the value left in
410 * that register by a predecessor block of this block.
411 * "Defined" means that a statement in 'b' defines it.
412 * "Killed" means that a statement in 'b' defines it before any
413 * statement in 'b' uses it, i.e. it kills the value left in that
414 * register by a predecessor block of this block.
417 compute_local_ud(struct block
*b
)
420 atomset def
= 0, use
= 0, kill
= 0;
423 for (s
= b
->stmts
; s
; s
= s
->next
) {
424 if (s
->s
.code
== NOP
)
426 atom
= atomuse(&s
->s
);
428 if (atom
== AX_ATOM
) {
429 if (!ATOMELEM(def
, X_ATOM
))
430 use
|= ATOMMASK(X_ATOM
);
431 if (!ATOMELEM(def
, A_ATOM
))
432 use
|= ATOMMASK(A_ATOM
);
434 else if (atom
< N_ATOMS
) {
435 if (!ATOMELEM(def
, atom
))
436 use
|= ATOMMASK(atom
);
441 atom
= atomdef(&s
->s
);
443 if (!ATOMELEM(use
, atom
))
444 kill
|= ATOMMASK(atom
);
445 def
|= ATOMMASK(atom
);
448 if (BPF_CLASS(b
->s
.code
) == BPF_JMP
) {
450 * XXX - what about RET?
452 atom
= atomuse(&b
->s
);
454 if (atom
== AX_ATOM
) {
455 if (!ATOMELEM(def
, X_ATOM
))
456 use
|= ATOMMASK(X_ATOM
);
457 if (!ATOMELEM(def
, A_ATOM
))
458 use
|= ATOMMASK(A_ATOM
);
460 else if (atom
< N_ATOMS
) {
461 if (!ATOMELEM(def
, atom
))
462 use
|= ATOMMASK(atom
);
475 * Assume graph is already leveled.
478 find_ud(struct block
*root
)
484 * root->level is the highest level no found;
485 * count down from there.
487 maxlevel
= root
->level
;
488 for (i
= maxlevel
; i
>= 0; --i
)
489 for (p
= levels
[i
]; p
; p
= p
->link
) {
494 for (i
= 1; i
<= maxlevel
; ++i
) {
495 for (p
= levels
[i
]; p
; p
= p
->link
) {
496 p
->out_use
|= JT(p
)->in_use
| JF(p
)->in_use
;
497 p
->in_use
|= p
->out_use
&~ p
->kill
;
503 * These data structures are used in a Cocke and Shwarz style
504 * value numbering scheme. Since the flowgraph is acyclic,
505 * exit values can be propagated from a node's predecessors
506 * provided it is uniquely defined.
512 struct valnode
*next
;
516 static struct valnode
*hashtbl
[MODULUS
];
520 /* Integer constants mapped with the load immediate opcode. */
521 #define K(i) F(BPF_LD|BPF_IMM|BPF_W, i, 0L)
528 struct vmapinfo
*vmap
;
529 struct valnode
*vnode_base
;
530 struct valnode
*next_vnode
;
536 next_vnode
= vnode_base
;
537 memset((char *)vmap
, 0, maxval
* sizeof(*vmap
));
538 memset((char *)hashtbl
, 0, sizeof hashtbl
);
541 /* Because we really don't have an IR, this stuff is a little messy. */
543 F(int code
, int v0
, int v1
)
549 hash
= (u_int
)code
^ (v0
<< 4) ^ (v1
<< 8);
552 for (p
= hashtbl
[hash
]; p
; p
= p
->next
)
553 if (p
->code
== code
&& p
->v0
== v0
&& p
->v1
== v1
)
557 if (BPF_MODE(code
) == BPF_IMM
&&
558 (BPF_CLASS(code
) == BPF_LD
|| BPF_CLASS(code
) == BPF_LDX
)) {
559 vmap
[val
].const_val
= v0
;
560 vmap
[val
].is_const
= 1;
567 p
->next
= hashtbl
[hash
];
574 vstore(struct stmt
*s
, int *valp
, int newval
, int alter
)
576 if (alter
&& *valp
== newval
)
583 fold_op(struct stmt
*s
, int v0
, int v1
)
587 a
= vmap
[v0
].const_val
;
588 b
= vmap
[v1
].const_val
;
590 switch (BPF_OP(s
->code
)) {
605 bpf_error("division by zero");
633 s
->code
= BPF_LD
|BPF_IMM
;
637 static inline struct slist
*
638 this_op(struct slist
*s
)
640 while (s
!= 0 && s
->s
.code
== NOP
)
646 opt_not(struct block
*b
)
648 struct block
*tmp
= JT(b
);
655 opt_peep(struct block
*b
)
658 struct slist
*next
, *last
;
666 for (/*empty*/; /*empty*/; s
= next
) {
672 break; /* nothing left in the block */
675 * Find the next real instruction after that one
678 next
= this_op(s
->next
);
680 break; /* no next instruction */
684 * st M[k] --> st M[k]
687 if (s
->s
.code
== BPF_ST
&&
688 next
->s
.code
== (BPF_LDX
|BPF_MEM
) &&
689 s
->s
.k
== next
->s
.k
) {
691 next
->s
.code
= BPF_MISC
|BPF_TAX
;
697 if (s
->s
.code
== (BPF_LD
|BPF_IMM
) &&
698 next
->s
.code
== (BPF_MISC
|BPF_TAX
)) {
699 s
->s
.code
= BPF_LDX
|BPF_IMM
;
700 next
->s
.code
= BPF_MISC
|BPF_TXA
;
704 * This is an ugly special case, but it happens
705 * when you say tcp[k] or udp[k] where k is a constant.
707 if (s
->s
.code
== (BPF_LD
|BPF_IMM
)) {
708 struct slist
*add
, *tax
, *ild
;
711 * Check that X isn't used on exit from this
712 * block (which the optimizer might cause).
713 * We know the code generator won't generate
714 * any local dependencies.
716 if (ATOMELEM(b
->out_use
, X_ATOM
))
720 * Check that the instruction following the ldi
721 * is an addx, or it's an ldxms with an addx
722 * following it (with 0 or more nops between the
725 if (next
->s
.code
!= (BPF_LDX
|BPF_MSH
|BPF_B
))
728 add
= this_op(next
->next
);
729 if (add
== 0 || add
->s
.code
!= (BPF_ALU
|BPF_ADD
|BPF_X
))
733 * Check that a tax follows that (with 0 or more
734 * nops between them).
736 tax
= this_op(add
->next
);
737 if (tax
== 0 || tax
->s
.code
!= (BPF_MISC
|BPF_TAX
))
741 * Check that an ild follows that (with 0 or more
742 * nops between them).
744 ild
= this_op(tax
->next
);
745 if (ild
== 0 || BPF_CLASS(ild
->s
.code
) != BPF_LD
||
746 BPF_MODE(ild
->s
.code
) != BPF_IND
)
749 * We want to turn this sequence:
752 * (005) ldxms [14] {next} -- optional
755 * (008) ild [x+0] {ild}
757 * into this sequence:
765 * XXX We need to check that X is not
766 * subsequently used, because we want to change
767 * what'll be in it after this sequence.
769 * We know we can eliminate the accumulator
770 * modifications earlier in the sequence since
771 * it is defined by the last stmt of this sequence
772 * (i.e., the last statement of the sequence loads
773 * a value into the accumulator, so we can eliminate
774 * earlier operations on the accumulator).
784 * If the comparison at the end of a block is an equality
785 * comparison against a constant, and nobody uses the value
786 * we leave in the A register at the end of a block, and
787 * the operation preceding the comparison is an arithmetic
788 * operation, we can sometime optimize it away.
790 if (b
->s
.code
== (BPF_JMP
|BPF_JEQ
|BPF_K
) &&
791 !ATOMELEM(b
->out_use
, A_ATOM
)) {
793 * We can optimize away certain subtractions of the
796 if (last
->s
.code
== (BPF_ALU
|BPF_SUB
|BPF_X
)) {
797 val
= b
->val
[X_ATOM
];
798 if (vmap
[val
].is_const
) {
800 * If we have a subtract to do a comparison,
801 * and the X register is a known constant,
802 * we can merge this value into the
808 b
->s
.k
+= vmap
[val
].const_val
;
811 } else if (b
->s
.k
== 0) {
813 * If the X register isn't a constant,
814 * and the comparison in the test is
815 * against 0, we can compare with the
816 * X register, instead:
822 b
->s
.code
= BPF_JMP
|BPF_JEQ
|BPF_X
;
827 * Likewise, a constant subtract can be simplified:
830 * jeq #y -> jeq #(x+y)
832 else if (last
->s
.code
== (BPF_ALU
|BPF_SUB
|BPF_K
)) {
838 * And, similarly, a constant AND can be simplified
839 * if we're testing against 0, i.e.:
844 else if (last
->s
.code
== (BPF_ALU
|BPF_AND
|BPF_K
) &&
847 b
->s
.code
= BPF_JMP
|BPF_K
|BPF_JSET
;
855 * jset #ffffffff -> always
857 if (b
->s
.code
== (BPF_JMP
|BPF_K
|BPF_JSET
)) {
860 if (b
->s
.k
== 0xffffffff)
864 * If we're comparing against the index register, and the index
865 * register is a known constant, we can just compare against that
868 val
= b
->val
[X_ATOM
];
869 if (vmap
[val
].is_const
&& BPF_SRC(b
->s
.code
) == BPF_X
) {
870 bpf_int32 v
= vmap
[val
].const_val
;
875 * If the accumulator is a known constant, we can compute the
878 val
= b
->val
[A_ATOM
];
879 if (vmap
[val
].is_const
&& BPF_SRC(b
->s
.code
) == BPF_K
) {
880 bpf_int32 v
= vmap
[val
].const_val
;
881 switch (BPF_OP(b
->s
.code
)) {
888 v
= (unsigned)v
> b
->s
.k
;
892 v
= (unsigned)v
>= b
->s
.k
;
912 * Compute the symbolic value of expression of 's', and update
913 * anything it defines in the value table 'val'. If 'alter' is true,
914 * do various optimizations. This code would be cleaner if symbolic
915 * evaluation and code transformations weren't folded together.
918 opt_stmt(struct stmt
*s
, int val
[], int alter
)
925 case BPF_LD
|BPF_ABS
|BPF_W
:
926 case BPF_LD
|BPF_ABS
|BPF_H
:
927 case BPF_LD
|BPF_ABS
|BPF_B
:
928 v
= F(s
->code
, s
->k
, 0L);
929 vstore(s
, &val
[A_ATOM
], v
, alter
);
932 case BPF_LD
|BPF_IND
|BPF_W
:
933 case BPF_LD
|BPF_IND
|BPF_H
:
934 case BPF_LD
|BPF_IND
|BPF_B
:
936 if (alter
&& vmap
[v
].is_const
) {
937 s
->code
= BPF_LD
|BPF_ABS
|BPF_SIZE(s
->code
);
938 s
->k
+= vmap
[v
].const_val
;
939 v
= F(s
->code
, s
->k
, 0L);
943 v
= F(s
->code
, s
->k
, v
);
944 vstore(s
, &val
[A_ATOM
], v
, alter
);
948 v
= F(s
->code
, 0L, 0L);
949 vstore(s
, &val
[A_ATOM
], v
, alter
);
954 vstore(s
, &val
[A_ATOM
], v
, alter
);
957 case BPF_LDX
|BPF_IMM
:
959 vstore(s
, &val
[X_ATOM
], v
, alter
);
962 case BPF_LDX
|BPF_MSH
|BPF_B
:
963 v
= F(s
->code
, s
->k
, 0L);
964 vstore(s
, &val
[X_ATOM
], v
, alter
);
967 case BPF_ALU
|BPF_NEG
:
968 if (alter
&& vmap
[val
[A_ATOM
]].is_const
) {
969 s
->code
= BPF_LD
|BPF_IMM
;
970 s
->k
= -vmap
[val
[A_ATOM
]].const_val
;
971 val
[A_ATOM
] = K(s
->k
);
974 val
[A_ATOM
] = F(s
->code
, val
[A_ATOM
], 0L);
977 case BPF_ALU
|BPF_ADD
|BPF_K
:
978 case BPF_ALU
|BPF_SUB
|BPF_K
:
979 case BPF_ALU
|BPF_MUL
|BPF_K
:
980 case BPF_ALU
|BPF_DIV
|BPF_K
:
981 case BPF_ALU
|BPF_AND
|BPF_K
:
982 case BPF_ALU
|BPF_OR
|BPF_K
:
983 case BPF_ALU
|BPF_LSH
|BPF_K
:
984 case BPF_ALU
|BPF_RSH
|BPF_K
:
985 op
= BPF_OP(s
->code
);
988 /* don't optimize away "sub #0"
989 * as it may be needed later to
990 * fixup the generated math code */
992 op
== BPF_LSH
|| op
== BPF_RSH
||
997 if (op
== BPF_MUL
|| op
== BPF_AND
) {
998 s
->code
= BPF_LD
|BPF_IMM
;
999 val
[A_ATOM
] = K(s
->k
);
1003 if (vmap
[val
[A_ATOM
]].is_const
) {
1004 fold_op(s
, val
[A_ATOM
], K(s
->k
));
1005 val
[A_ATOM
] = K(s
->k
);
1009 val
[A_ATOM
] = F(s
->code
, val
[A_ATOM
], K(s
->k
));
1012 case BPF_ALU
|BPF_ADD
|BPF_X
:
1013 case BPF_ALU
|BPF_SUB
|BPF_X
:
1014 case BPF_ALU
|BPF_MUL
|BPF_X
:
1015 case BPF_ALU
|BPF_DIV
|BPF_X
:
1016 case BPF_ALU
|BPF_AND
|BPF_X
:
1017 case BPF_ALU
|BPF_OR
|BPF_X
:
1018 case BPF_ALU
|BPF_LSH
|BPF_X
:
1019 case BPF_ALU
|BPF_RSH
|BPF_X
:
1020 op
= BPF_OP(s
->code
);
1021 if (alter
&& vmap
[val
[X_ATOM
]].is_const
) {
1022 if (vmap
[val
[A_ATOM
]].is_const
) {
1023 fold_op(s
, val
[A_ATOM
], val
[X_ATOM
]);
1024 val
[A_ATOM
] = K(s
->k
);
1027 s
->code
= BPF_ALU
|BPF_K
|op
;
1028 s
->k
= vmap
[val
[X_ATOM
]].const_val
;
1031 F(s
->code
, val
[A_ATOM
], K(s
->k
));
1036 * Check if we're doing something to an accumulator
1037 * that is 0, and simplify. This may not seem like
1038 * much of a simplification but it could open up further
1040 * XXX We could also check for mul by 1, etc.
1042 if (alter
&& vmap
[val
[A_ATOM
]].is_const
1043 && vmap
[val
[A_ATOM
]].const_val
== 0) {
1044 if (op
== BPF_ADD
|| op
== BPF_OR
) {
1045 s
->code
= BPF_MISC
|BPF_TXA
;
1046 vstore(s
, &val
[A_ATOM
], val
[X_ATOM
], alter
);
1049 else if (op
== BPF_MUL
|| op
== BPF_DIV
||
1050 op
== BPF_AND
|| op
== BPF_LSH
|| op
== BPF_RSH
) {
1051 s
->code
= BPF_LD
|BPF_IMM
;
1053 vstore(s
, &val
[A_ATOM
], K(s
->k
), alter
);
1056 else if (op
== BPF_NEG
) {
1061 val
[A_ATOM
] = F(s
->code
, val
[A_ATOM
], val
[X_ATOM
]);
1064 case BPF_MISC
|BPF_TXA
:
1065 vstore(s
, &val
[A_ATOM
], val
[X_ATOM
], alter
);
1068 case BPF_LD
|BPF_MEM
:
1070 if (alter
&& vmap
[v
].is_const
) {
1071 s
->code
= BPF_LD
|BPF_IMM
;
1072 s
->k
= vmap
[v
].const_val
;
1075 vstore(s
, &val
[A_ATOM
], v
, alter
);
1078 case BPF_MISC
|BPF_TAX
:
1079 vstore(s
, &val
[X_ATOM
], val
[A_ATOM
], alter
);
1082 case BPF_LDX
|BPF_MEM
:
1084 if (alter
&& vmap
[v
].is_const
) {
1085 s
->code
= BPF_LDX
|BPF_IMM
;
1086 s
->k
= vmap
[v
].const_val
;
1089 vstore(s
, &val
[X_ATOM
], v
, alter
);
1093 vstore(s
, &val
[s
->k
], val
[A_ATOM
], alter
);
1097 vstore(s
, &val
[s
->k
], val
[X_ATOM
], alter
);
1103 deadstmt(register struct stmt
*s
, register struct stmt
*last
[])
1109 if (atom
== AX_ATOM
) {
1120 last
[atom
]->code
= NOP
;
1127 opt_deadstores(register struct block
*b
)
1129 register struct slist
*s
;
1131 struct stmt
*last
[N_ATOMS
];
1133 memset((char *)last
, 0, sizeof last
);
1135 for (s
= b
->stmts
; s
!= 0; s
= s
->next
)
1136 deadstmt(&s
->s
, last
);
1137 deadstmt(&b
->s
, last
);
1139 for (atom
= 0; atom
< N_ATOMS
; ++atom
)
1140 if (last
[atom
] && !ATOMELEM(b
->out_use
, atom
)) {
1141 last
[atom
]->code
= NOP
;
1147 opt_blk(struct block
*b
, int do_stmts
)
1152 bpf_int32 aval
, xval
;
1155 for (s
= b
->stmts
; s
&& s
->next
; s
= s
->next
)
1156 if (BPF_CLASS(s
->s
.code
) == BPF_JMP
) {
1163 * Initialize the atom values.
1168 * We have no predecessors, so everything is undefined
1169 * upon entry to this block.
1171 memset((char *)b
->val
, 0, sizeof(b
->val
));
1174 * Inherit values from our predecessors.
1176 * First, get the values from the predecessor along the
1177 * first edge leading to this node.
1179 memcpy((char *)b
->val
, (char *)p
->pred
->val
, sizeof(b
->val
));
1181 * Now look at all the other nodes leading to this node.
1182 * If, for the predecessor along that edge, a register
1183 * has a different value from the one we have (i.e.,
1184 * control paths are merging, and the merging paths
1185 * assign different values to that register), give the
1186 * register the undefined value of 0.
1188 while ((p
= p
->next
) != NULL
) {
1189 for (i
= 0; i
< N_ATOMS
; ++i
)
1190 if (b
->val
[i
] != p
->pred
->val
[i
])
1194 aval
= b
->val
[A_ATOM
];
1195 xval
= b
->val
[X_ATOM
];
1196 for (s
= b
->stmts
; s
; s
= s
->next
)
1197 opt_stmt(&s
->s
, b
->val
, do_stmts
);
1200 * This is a special case: if we don't use anything from this
1201 * block, and we load the accumulator or index register with a
1202 * value that is already there, or if this block is a return,
1203 * eliminate all the statements.
1205 * XXX - what if it does a store?
1207 * XXX - why does it matter whether we use anything from this
1208 * block? If the accumulator or index register doesn't change
1209 * its value, isn't that OK even if we use that value?
1211 * XXX - if we load the accumulator with a different value,
1212 * and the block ends with a conditional branch, we obviously
1213 * can't eliminate it, as the branch depends on that value.
1214 * For the index register, the conditional branch only depends
1215 * on the index register value if the test is against the index
1216 * register value rather than a constant; if nothing uses the
1217 * value we put into the index register, and we're not testing
1218 * against the index register's value, and there aren't any
1219 * other problems that would keep us from eliminating this
1220 * block, can we eliminate it?
1223 ((b
->out_use
== 0 && aval
!= 0 && b
->val
[A_ATOM
] == aval
&&
1224 xval
!= 0 && b
->val
[X_ATOM
] == xval
) ||
1225 BPF_CLASS(b
->s
.code
) == BPF_RET
)) {
1226 if (b
->stmts
!= 0) {
1235 * Set up values for branch optimizer.
1237 if (BPF_SRC(b
->s
.code
) == BPF_K
)
1238 b
->oval
= K(b
->s
.k
);
1240 b
->oval
= b
->val
[X_ATOM
];
1241 b
->et
.code
= b
->s
.code
;
1242 b
->ef
.code
= -b
->s
.code
;
1246 * Return true if any register that is used on exit from 'succ', has
1247 * an exit value that is different from the corresponding exit value
1251 use_conflict(struct block
*b
, struct block
*succ
)
1254 atomset use
= succ
->out_use
;
1259 for (atom
= 0; atom
< N_ATOMS
; ++atom
)
1260 if (ATOMELEM(use
, atom
))
1261 if (b
->val
[atom
] != succ
->val
[atom
])
1266 static struct block
*
1267 fold_edge(struct block
*child
, struct edge
*ep
)
1270 int aval0
, aval1
, oval0
, oval1
;
1271 int code
= ep
->code
;
1279 if (child
->s
.code
!= code
)
1282 aval0
= child
->val
[A_ATOM
];
1283 oval0
= child
->oval
;
1284 aval1
= ep
->pred
->val
[A_ATOM
];
1285 oval1
= ep
->pred
->oval
;
1292 * The operands of the branch instructions are
1293 * identical, so the result is true if a true
1294 * branch was taken to get here, otherwise false.
1296 return sense
? JT(child
) : JF(child
);
1298 if (sense
&& code
== (BPF_JMP
|BPF_JEQ
|BPF_K
))
1300 * At this point, we only know the comparison if we
1301 * came down the true branch, and it was an equality
1302 * comparison with a constant.
1304 * I.e., if we came down the true branch, and the branch
1305 * was an equality comparison with a constant, we know the
1306 * accumulator contains that constant. If we came down
1307 * the false branch, or the comparison wasn't with a
1308 * constant, we don't know what was in the accumulator.
1310 * We rely on the fact that distinct constants have distinct
1319 opt_j(struct edge
*ep
)
1322 register struct block
*target
;
1324 if (JT(ep
->succ
) == 0)
1327 if (JT(ep
->succ
) == JF(ep
->succ
)) {
1329 * Common branch targets can be eliminated, provided
1330 * there is no data dependency.
1332 if (!use_conflict(ep
->pred
, ep
->succ
->et
.succ
)) {
1334 ep
->succ
= JT(ep
->succ
);
1338 * For each edge dominator that matches the successor of this
1339 * edge, promote the edge successor to the its grandchild.
1341 * XXX We violate the set abstraction here in favor a reasonably
1345 for (i
= 0; i
< edgewords
; ++i
) {
1346 register bpf_u_int32 x
= ep
->edom
[i
];
1351 k
+= i
* BITS_PER_WORD
;
1353 target
= fold_edge(ep
->succ
, edges
[k
]);
1355 * Check that there is no data dependency between
1356 * nodes that will be violated if we move the edge.
1358 if (target
!= 0 && !use_conflict(ep
->pred
, target
)) {
1361 if (JT(target
) != 0)
1363 * Start over unless we hit a leaf.
1374 or_pullup(struct block
*b
)
1378 struct block
**diffp
, **samep
;
1386 * Make sure each predecessor loads the same value.
1389 val
= ep
->pred
->val
[A_ATOM
];
1390 for (ep
= ep
->next
; ep
!= 0; ep
= ep
->next
)
1391 if (val
!= ep
->pred
->val
[A_ATOM
])
1394 if (JT(b
->in_edges
->pred
) == b
)
1395 diffp
= &JT(b
->in_edges
->pred
);
1397 diffp
= &JF(b
->in_edges
->pred
);
1404 if (JT(*diffp
) != JT(b
))
1407 if (!SET_MEMBER((*diffp
)->dom
, b
->id
))
1410 if ((*diffp
)->val
[A_ATOM
] != val
)
1413 diffp
= &JF(*diffp
);
1416 samep
= &JF(*diffp
);
1421 if (JT(*samep
) != JT(b
))
1424 if (!SET_MEMBER((*samep
)->dom
, b
->id
))
1427 if ((*samep
)->val
[A_ATOM
] == val
)
1430 /* XXX Need to check that there are no data dependencies
1431 between dp0 and dp1. Currently, the code generator
1432 will not produce such dependencies. */
1433 samep
= &JF(*samep
);
1436 /* XXX This doesn't cover everything. */
1437 for (i
= 0; i
< N_ATOMS
; ++i
)
1438 if ((*samep
)->val
[i
] != pred
->val
[i
])
1441 /* Pull up the node. */
1447 * At the top of the chain, each predecessor needs to point at the
1448 * pulled up node. Inside the chain, there is only one predecessor
1452 for (ep
= b
->in_edges
; ep
!= 0; ep
= ep
->next
) {
1453 if (JT(ep
->pred
) == b
)
1454 JT(ep
->pred
) = pull
;
1456 JF(ep
->pred
) = pull
;
1466 and_pullup(struct block
*b
)
1470 struct block
**diffp
, **samep
;
1478 * Make sure each predecessor loads the same value.
1480 val
= ep
->pred
->val
[A_ATOM
];
1481 for (ep
= ep
->next
; ep
!= 0; ep
= ep
->next
)
1482 if (val
!= ep
->pred
->val
[A_ATOM
])
1485 if (JT(b
->in_edges
->pred
) == b
)
1486 diffp
= &JT(b
->in_edges
->pred
);
1488 diffp
= &JF(b
->in_edges
->pred
);
1495 if (JF(*diffp
) != JF(b
))
1498 if (!SET_MEMBER((*diffp
)->dom
, b
->id
))
1501 if ((*diffp
)->val
[A_ATOM
] != val
)
1504 diffp
= &JT(*diffp
);
1507 samep
= &JT(*diffp
);
1512 if (JF(*samep
) != JF(b
))
1515 if (!SET_MEMBER((*samep
)->dom
, b
->id
))
1518 if ((*samep
)->val
[A_ATOM
] == val
)
1521 /* XXX Need to check that there are no data dependencies
1522 between diffp and samep. Currently, the code generator
1523 will not produce such dependencies. */
1524 samep
= &JT(*samep
);
1527 /* XXX This doesn't cover everything. */
1528 for (i
= 0; i
< N_ATOMS
; ++i
)
1529 if ((*samep
)->val
[i
] != pred
->val
[i
])
1532 /* Pull up the node. */
1538 * At the top of the chain, each predecessor needs to point at the
1539 * pulled up node. Inside the chain, there is only one predecessor
1543 for (ep
= b
->in_edges
; ep
!= 0; ep
= ep
->next
) {
1544 if (JT(ep
->pred
) == b
)
1545 JT(ep
->pred
) = pull
;
1547 JF(ep
->pred
) = pull
;
1557 opt_blks(struct block
*root
, int do_stmts
)
1563 maxlevel
= root
->level
;
1566 for (i
= maxlevel
; i
>= 0; --i
)
1567 for (p
= levels
[i
]; p
; p
= p
->link
)
1568 opt_blk(p
, do_stmts
);
1572 * No point trying to move branches; it can't possibly
1573 * make a difference at this point.
1577 for (i
= 1; i
<= maxlevel
; ++i
) {
1578 for (p
= levels
[i
]; p
; p
= p
->link
) {
1585 for (i
= 1; i
<= maxlevel
; ++i
) {
1586 for (p
= levels
[i
]; p
; p
= p
->link
) {
1594 link_inedge(struct edge
*parent
, struct block
*child
)
1596 parent
->next
= child
->in_edges
;
1597 child
->in_edges
= parent
;
1601 find_inedges(struct block
*root
)
1606 for (i
= 0; i
< n_blocks
; ++i
)
1607 blocks
[i
]->in_edges
= 0;
1610 * Traverse the graph, adding each edge to the predecessor
1611 * list of its successors. Skip the leaves (i.e. level 0).
1613 for (i
= root
->level
; i
> 0; --i
) {
1614 for (b
= levels
[i
]; b
!= 0; b
= b
->link
) {
1615 link_inedge(&b
->et
, JT(b
));
1616 link_inedge(&b
->ef
, JF(b
));
1622 opt_root(struct block
**b
)
1624 struct slist
*tmp
, *s
;
1628 while (BPF_CLASS((*b
)->s
.code
) == BPF_JMP
&& JT(*b
) == JF(*b
))
1637 * If the root node is a return, then there is no
1638 * point executing any statements (since the bpf machine
1639 * has no side effects).
1641 if (BPF_CLASS((*b
)->s
.code
) == BPF_RET
)
1646 opt_loop(struct block
*root
, int do_stmts
)
1651 printf("opt_loop(root, %d) begin\n", do_stmts
);
1662 opt_blks(root
, do_stmts
);
1665 printf("opt_loop(root, %d) bottom, done=%d\n", do_stmts
, done
);
1673 * Optimize the filter code in its dag representation.
1676 bpf_optimize(struct block
**rootp
)
1685 intern_blocks(root
);
1688 printf("after intern_blocks()\n");
1695 printf("after opt_root()\n");
1703 make_marks(struct block
*p
)
1707 if (BPF_CLASS(p
->s
.code
) != BPF_RET
) {
1715 * Mark code array such that isMarked(i) is true
1716 * only for nodes that are alive.
1719 mark_code(struct block
*p
)
1726 * True iff the two stmt lists load the same value from the packet into
1730 eq_slist(struct slist
*x
, struct slist
*y
)
1733 while (x
&& x
->s
.code
== NOP
)
1735 while (y
&& y
->s
.code
== NOP
)
1741 if (x
->s
.code
!= y
->s
.code
|| x
->s
.k
!= y
->s
.k
)
1749 eq_blk(struct block
*b0
, struct block
*b1
)
1751 if (b0
->s
.code
== b1
->s
.code
&&
1752 b0
->s
.k
== b1
->s
.k
&&
1753 b0
->et
.succ
== b1
->et
.succ
&&
1754 b0
->ef
.succ
== b1
->ef
.succ
)
1755 return eq_slist(b0
->stmts
, b1
->stmts
);
1760 intern_blocks(struct block
*root
)
1764 int done1
; /* don't shadow global */
1767 for (i
= 0; i
< n_blocks
; ++i
)
1768 blocks
[i
]->link
= 0;
1772 for (i
= n_blocks
- 1; --i
>= 0; ) {
1773 if (!isMarked(blocks
[i
]))
1775 for (j
= i
+ 1; j
< n_blocks
; ++j
) {
1776 if (!isMarked(blocks
[j
]))
1778 if (eq_blk(blocks
[i
], blocks
[j
])) {
1779 blocks
[i
]->link
= blocks
[j
]->link
?
1780 blocks
[j
]->link
: blocks
[j
];
1785 for (i
= 0; i
< n_blocks
; ++i
) {
1791 JT(p
) = JT(p
)->link
;
1795 JF(p
) = JF(p
)->link
;
1805 free((void *)vnode_base
);
1807 free((void *)edges
);
1808 free((void *)space
);
1809 free((void *)levels
);
1810 free((void *)blocks
);
1814 * Return the number of stmts in 's'.
1817 slength(struct slist
*s
)
1821 for (; s
; s
= s
->next
)
1822 if (s
->s
.code
!= NOP
)
1828 * Return the number of nodes reachable by 'p'.
1829 * All nodes should be initially unmarked.
1832 count_blocks(struct block
*p
)
1834 if (p
== 0 || isMarked(p
))
1837 return count_blocks(JT(p
)) + count_blocks(JF(p
)) + 1;
1841 * Do a depth first search on the flow graph, numbering the
1842 * the basic blocks, and entering them into the 'blocks' array.`
1845 number_blks_r(struct block
*p
)
1849 if (p
== 0 || isMarked(p
))
1857 number_blks_r(JT(p
));
1858 number_blks_r(JF(p
));
1862 * Return the number of stmts in the flowgraph reachable by 'p'.
1863 * The nodes should be unmarked before calling.
1865 * Note that "stmts" means "instructions", and that this includes
1867 * side-effect statements in 'p' (slength(p->stmts));
1869 * statements in the true branch from 'p' (count_stmts(JT(p)));
1871 * statements in the false branch from 'p' (count_stmts(JF(p)));
1873 * the conditional jump itself (1);
1875 * an extra long jump if the true branch requires it (p->longjt);
1877 * an extra long jump if the false branch requires it (p->longjf).
1880 count_stmts(struct block
*p
)
1884 if (p
== 0 || isMarked(p
))
1887 n
= count_stmts(JT(p
)) + count_stmts(JF(p
));
1888 return slength(p
->stmts
) + n
+ 1 + p
->longjt
+ p
->longjf
;
1892 * Allocate memory. All allocation is done before optimization
1893 * is begun. A linear bound on the size of all data structures is computed
1894 * from the total number of blocks and/or statements.
1897 opt_init(struct block
*root
)
1900 int i
, n
, max_stmts
;
1903 * First, count the blocks, so we can malloc an array to map
1904 * block number to block. Then, put the blocks into the array.
1907 n
= count_blocks(root
);
1908 blocks
= (struct block
**)calloc(n
, sizeof(*blocks
));
1910 bpf_error("malloc");
1913 number_blks_r(root
);
1915 n_edges
= 2 * n_blocks
;
1916 edges
= (struct edge
**)calloc(n_edges
, sizeof(*edges
));
1918 bpf_error("malloc");
1921 * The number of levels is bounded by the number of nodes.
1923 levels
= (struct block
**)calloc(n_blocks
, sizeof(*levels
));
1925 bpf_error("malloc");
1927 edgewords
= n_edges
/ (8 * sizeof(bpf_u_int32
)) + 1;
1928 nodewords
= n_blocks
/ (8 * sizeof(bpf_u_int32
)) + 1;
1931 space
= (bpf_u_int32
*)malloc(2 * n_blocks
* nodewords
* sizeof(*space
)
1932 + n_edges
* edgewords
* sizeof(*space
));
1934 bpf_error("malloc");
1937 for (i
= 0; i
< n
; ++i
) {
1941 all_closure_sets
= p
;
1942 for (i
= 0; i
< n
; ++i
) {
1943 blocks
[i
]->closure
= p
;
1947 for (i
= 0; i
< n
; ++i
) {
1948 register struct block
*b
= blocks
[i
];
1956 b
->ef
.id
= n_blocks
+ i
;
1957 edges
[n_blocks
+ i
] = &b
->ef
;
1962 for (i
= 0; i
< n
; ++i
)
1963 max_stmts
+= slength(blocks
[i
]->stmts
) + 1;
1965 * We allocate at most 3 value numbers per statement,
1966 * so this is an upper bound on the number of valnodes
1969 maxval
= 3 * max_stmts
;
1970 vmap
= (struct vmapinfo
*)calloc(maxval
, sizeof(*vmap
));
1971 vnode_base
= (struct valnode
*)calloc(maxval
, sizeof(*vnode_base
));
1972 if (vmap
== NULL
|| vnode_base
== NULL
)
1973 bpf_error("malloc");
1977 * Some pointers used to convert the basic block form of the code,
1978 * into the array form that BPF requires. 'fstart' will point to
1979 * the malloc'd array while 'ftail' is used during the recursive traversal.
1981 static struct bpf_insn
*fstart
;
1982 static struct bpf_insn
*ftail
;
1989 * Returns true if successful. Returns false if a branch has
1990 * an offset that is too large. If so, we have marked that
1991 * branch so that on a subsequent iteration, it will be treated
1995 convert_code_r(struct block
*p
)
1997 struct bpf_insn
*dst
;
2001 int extrajmps
; /* number of extra jumps inserted */
2002 struct slist
**offset
= NULL
;
2004 if (p
== 0 || isMarked(p
))
2008 if (convert_code_r(JF(p
)) == 0)
2010 if (convert_code_r(JT(p
)) == 0)
2013 slen
= slength(p
->stmts
);
2014 dst
= ftail
-= (slen
+ 1 + p
->longjt
+ p
->longjf
);
2015 /* inflate length by any extra jumps */
2017 p
->offset
= dst
- fstart
;
2019 /* generate offset[] for convenience */
2021 offset
= (struct slist
**)calloc(slen
, sizeof(struct slist
*));
2023 bpf_error("not enough core");
2028 for (off
= 0; off
< slen
&& src
; off
++) {
2030 printf("off=%d src=%x\n", off
, src
);
2037 for (src
= p
->stmts
; src
; src
= src
->next
) {
2038 if (src
->s
.code
== NOP
)
2040 dst
->code
= (u_short
)src
->s
.code
;
2043 /* fill block-local relative jump */
2044 if (BPF_CLASS(src
->s
.code
) != BPF_JMP
|| src
->s
.code
== (BPF_JMP
|BPF_JA
)) {
2046 if (src
->s
.jt
|| src
->s
.jf
) {
2047 bpf_error("illegal jmp destination");
2053 if (off
== slen
- 2) /*???*/
2059 const char *ljerr
= "%s for block-local relative jump: off=%d";
2062 printf("code=%x off=%d %x %x\n", src
->s
.code
,
2063 off
, src
->s
.jt
, src
->s
.jf
);
2066 if (!src
->s
.jt
|| !src
->s
.jf
) {
2067 bpf_error(ljerr
, "no jmp destination", off
);
2072 for (i
= 0; i
< slen
; i
++) {
2073 if (offset
[i
] == src
->s
.jt
) {
2075 bpf_error(ljerr
, "multiple matches", off
);
2079 dst
->jt
= i
- off
- 1;
2082 if (offset
[i
] == src
->s
.jf
) {
2084 bpf_error(ljerr
, "multiple matches", off
);
2087 dst
->jf
= i
- off
- 1;
2092 bpf_error(ljerr
, "no destination found", off
);
2104 bids
[dst
- fstart
] = p
->id
+ 1;
2106 dst
->code
= (u_short
)p
->s
.code
;
2110 off
= JT(p
)->offset
- (p
->offset
+ slen
) - 1;
2112 /* offset too large for branch, must add a jump */
2113 if (p
->longjt
== 0) {
2114 /* mark this instruction and retry */
2118 /* branch if T to following jump */
2119 dst
->jt
= extrajmps
;
2121 dst
[extrajmps
].code
= BPF_JMP
|BPF_JA
;
2122 dst
[extrajmps
].k
= off
- extrajmps
;
2126 off
= JF(p
)->offset
- (p
->offset
+ slen
) - 1;
2128 /* offset too large for branch, must add a jump */
2129 if (p
->longjf
== 0) {
2130 /* mark this instruction and retry */
2134 /* branch if F to following jump */
2135 /* if two jumps are inserted, F goes to second one */
2136 dst
->jf
= extrajmps
;
2138 dst
[extrajmps
].code
= BPF_JMP
|BPF_JA
;
2139 dst
[extrajmps
].k
= off
- extrajmps
;
2149 * Convert flowgraph intermediate representation to the
2150 * BPF array representation. Set *lenp to the number of instructions.
2152 * This routine does *NOT* leak the memory pointed to by fp. It *must
2153 * not* do free(fp) before returning fp; doing so would make no sense,
2154 * as the BPF array pointed to by the return value of icode_to_fcode()
2155 * must be valid - it's being returned for use in a bpf_program structure.
2157 * If it appears that icode_to_fcode() is leaking, the problem is that
2158 * the program using pcap_compile() is failing to free the memory in
2159 * the BPF program when it's done - the leak is in the program, not in
2160 * the routine that happens to be allocating the memory. (By analogy, if
2161 * a program calls fopen() without ever calling fclose() on the FILE *,
2162 * it will leak the FILE structure; the leak is not in fopen(), it's in
2163 * the program.) Change the program to use pcap_freecode() when it's
2164 * done with the filter program. See the pcap man page.
2167 icode_to_fcode(struct block
*root
, u_int
*lenp
)
2170 struct bpf_insn
*fp
;
2173 * Loop doing convert_code_r() until no branches remain
2174 * with too-large offsets.
2178 n
= *lenp
= count_stmts(root
);
2180 fp
= (struct bpf_insn
*)malloc(sizeof(*fp
) * n
);
2182 bpf_error("malloc");
2183 memset((char *)fp
, 0, sizeof(*fp
) * n
);
2188 if (convert_code_r(root
))
2197 * Make a copy of a BPF program and put it in the "fcode" member of
2200 * If we fail to allocate memory for the copy, fill in the "errbuf"
2201 * member of the "pcap_t" with an error message, and return -1;
2202 * otherwise, return 0.
2205 install_bpf_program(pcap_t
*p
, struct bpf_program
*fp
)
2210 * Validate the program.
2212 if (!bpf_validate(fp
->bf_insns
, fp
->bf_len
)) {
2213 snprintf(p
->errbuf
, sizeof(p
->errbuf
),
2214 "BPF program is not valid");
2219 * Free up any already installed program.
2221 pcap_freecode(&p
->fcode
);
2223 prog_size
= sizeof(*fp
->bf_insns
) * fp
->bf_len
;
2224 p
->fcode
.bf_len
= fp
->bf_len
;
2225 p
->fcode
.bf_insns
= (struct bpf_insn
*)malloc(prog_size
);
2226 if (p
->fcode
.bf_insns
== NULL
) {
2227 snprintf(p
->errbuf
, sizeof(p
->errbuf
),
2228 "malloc: %s", pcap_strerror(errno
));
2231 memcpy(p
->fcode
.bf_insns
, fp
->bf_insns
, prog_size
);
2237 opt_dump(struct block
*root
)
2239 struct bpf_program f
;
2241 memset(bids
, 0, sizeof bids
);
2242 f
.bf_insns
= icode_to_fcode(root
, &f
.bf_len
);
2245 free((char *)f
.bf_insns
);