2 * Copyright (c) 1988, 1989, 1990, 1991, 1993, 1994, 1995, 1996
3 * The Regents of the University of California. All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that: (1) source code distributions
7 * retain the above copyright notice and this paragraph in its entirety, (2)
8 * distributions including binary code include the above copyright notice and
9 * this paragraph in its entirety in the documentation or other materials
10 * provided with the distribution, and (3) all advertising materials mentioning
11 * features or use of this software display the following acknowledgement:
12 * ``This product includes software developed by the University of California,
13 * Lawrence Berkeley Laboratory and its contributors.'' Neither the name of
14 * the University nor the names of its contributors may be used to endorse
15 * or promote products derived from this software without specific prior
17 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED
18 * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF
19 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
21 * Optimization module for tcpdump intermediate representation.
24 static const char rcsid
[] _U_
=
25 "@(#) $Header: /tcpdump/master/libpcap/optimize.c,v 1.90.2.1 2008/01/02 04:22:16 guy Exp $ (LBL)";
43 #ifdef HAVE_OS_PROTO_H
51 #if defined(MSDOS) && !defined(__DJGPP__)
52 extern int _w32_ffs (int mask
);
56 #if defined(WIN32) && defined (_MSC_VER)
61 * Represents a deleted instruction.
66 * Register numbers for use-def values.
67 * 0 through BPF_MEMWORDS-1 represent the corresponding scratch memory
68 * location. A_ATOM is the accumulator and X_ATOM is the index
71 #define A_ATOM BPF_MEMWORDS
72 #define X_ATOM (BPF_MEMWORDS+1)
75 * This define is used to represent *both* the accumulator and
76 * x register in use-def computations.
77 * Currently, the use-def code assumes only one definition per instruction.
79 #define AX_ATOM N_ATOMS
82 * A flag to indicate that further optimization is needed.
83 * Iterative passes are continued until a given pass yields no
89 * A block is marked if only if its mark equals the current mark.
90 * Rather than traverse the code array, marking each item, 'cur_mark' is
91 * incremented. This automatically makes each element unmarked.
94 #define isMarked(p) ((p)->mark == cur_mark)
95 #define unMarkAll() cur_mark += 1
96 #define Mark(p) ((p)->mark = cur_mark)
98 static void opt_init(struct block
*);
99 static void opt_cleanup(void);
101 static void make_marks(struct block
*);
102 static void mark_code(struct block
*);
104 static void intern_blocks(struct block
*);
106 static int eq_slist(struct slist
*, struct slist
*);
108 static void find_levels_r(struct block
*);
110 static void find_levels(struct block
*);
111 static void find_dom(struct block
*);
112 static void propedom(struct edge
*);
113 static void find_edom(struct block
*);
114 static void find_closure(struct block
*);
115 static int atomuse(struct stmt
*);
116 static int atomdef(struct stmt
*);
117 static void compute_local_ud(struct block
*);
118 static void find_ud(struct block
*);
119 static void init_val(void);
120 static int F(int, int, int);
121 static inline void vstore(struct stmt
*, int *, int, int);
122 static void opt_blk(struct block
*, int);
123 static int use_conflict(struct block
*, struct block
*);
124 static void opt_j(struct edge
*);
125 static void or_pullup(struct block
*);
126 static void and_pullup(struct block
*);
127 static void opt_blks(struct block
*, int);
128 static inline void link_inedge(struct edge
*, struct block
*);
129 static void find_inedges(struct block
*);
130 static void opt_root(struct block
**);
131 static void opt_loop(struct block
*, int);
132 static void fold_op(struct stmt
*, int, int);
133 static inline struct slist
*this_op(struct slist
*);
134 static void opt_not(struct block
*);
135 static void opt_peep(struct block
*);
136 static void opt_stmt(struct stmt
*, int[], int);
137 static void deadstmt(struct stmt
*, struct stmt
*[]);
138 static void opt_deadstores(struct block
*);
139 static struct block
*fold_edge(struct block
*, struct edge
*);
140 static inline int eq_blk(struct block
*, struct block
*);
141 static int slength(struct slist
*);
142 static int count_blocks(struct block
*);
143 static void number_blks_r(struct block
*);
144 static int count_stmts(struct block
*);
145 static int convert_code_r(struct block
*);
147 static void opt_dump(struct block
*);
151 struct block
**blocks
;
156 * A bit vector set representation of the dominators.
157 * We round up the set size to the next power of two.
159 static int nodewords
;
160 static int edgewords
;
161 struct block
**levels
;
163 #define BITS_PER_WORD (8*sizeof(bpf_u_int32))
165 * True if a is in uset {p}
167 #define SET_MEMBER(p, a) \
168 ((p)[(unsigned)(a) / BITS_PER_WORD] & (1 << ((unsigned)(a) % BITS_PER_WORD)))
173 #define SET_INSERT(p, a) \
174 (p)[(unsigned)(a) / BITS_PER_WORD] |= (1 << ((unsigned)(a) % BITS_PER_WORD))
177 * Delete 'a' from uset p.
179 #define SET_DELETE(p, a) \
180 (p)[(unsigned)(a) / BITS_PER_WORD] &= ~(1 << ((unsigned)(a) % BITS_PER_WORD))
185 #define SET_INTERSECT(a, b, n)\
187 register bpf_u_int32 *_x = a, *_y = b;\
188 register int _n = n;\
189 while (--_n >= 0) *_x++ &= *_y++;\
195 #define SET_SUBTRACT(a, b, n)\
197 register bpf_u_int32 *_x = a, *_y = b;\
198 register int _n = n;\
199 while (--_n >= 0) *_x++ &=~ *_y++;\
205 #define SET_UNION(a, b, n)\
207 register bpf_u_int32 *_x = a, *_y = b;\
208 register int _n = n;\
209 while (--_n >= 0) *_x++ |= *_y++;\
212 static uset all_dom_sets
;
213 static uset all_closure_sets
;
214 static uset all_edge_sets
;
217 #define MAX(a,b) ((a)>(b)?(a):(b))
233 find_levels_r(JT(b
));
234 find_levels_r(JF(b
));
235 level
= MAX(JT(b
)->level
, JF(b
)->level
) + 1;
239 b
->link
= levels
[level
];
244 * Level graph. The levels go from 0 at the leaves to
245 * N_LEVELS at the root. The levels[] array points to the
246 * first node of the level list, whose elements are linked
247 * with the 'link' field of the struct block.
253 memset((char *)levels
, 0, n_blocks
* sizeof(*levels
));
259 * Find dominator relationships.
260 * Assumes graph has been leveled.
271 * Initialize sets to contain all nodes.
274 i
= n_blocks
* nodewords
;
277 /* Root starts off empty. */
278 for (i
= nodewords
; --i
>= 0;)
281 /* root->level is the highest level no found. */
282 for (i
= root
->level
; i
>= 0; --i
) {
283 for (b
= levels
[i
]; b
; b
= b
->link
) {
284 SET_INSERT(b
->dom
, b
->id
);
287 SET_INTERSECT(JT(b
)->dom
, b
->dom
, nodewords
);
288 SET_INTERSECT(JF(b
)->dom
, b
->dom
, nodewords
);
297 SET_INSERT(ep
->edom
, ep
->id
);
299 SET_INTERSECT(ep
->succ
->et
.edom
, ep
->edom
, edgewords
);
300 SET_INTERSECT(ep
->succ
->ef
.edom
, ep
->edom
, edgewords
);
305 * Compute edge dominators.
306 * Assumes graph has been leveled and predecessors established.
317 for (i
= n_edges
* edgewords
; --i
>= 0; )
320 /* root->level is the highest level no found. */
321 memset(root
->et
.edom
, 0, edgewords
* sizeof(*(uset
)0));
322 memset(root
->ef
.edom
, 0, edgewords
* sizeof(*(uset
)0));
323 for (i
= root
->level
; i
>= 0; --i
) {
324 for (b
= levels
[i
]; b
!= 0; b
= b
->link
) {
332 * Find the backwards transitive closure of the flow graph. These sets
333 * are backwards in the sense that we find the set of nodes that reach
334 * a given node, not the set of nodes that can be reached by a node.
336 * Assumes graph has been leveled.
346 * Initialize sets to contain no nodes.
348 memset((char *)all_closure_sets
, 0,
349 n_blocks
* nodewords
* sizeof(*all_closure_sets
));
351 /* root->level is the highest level no found. */
352 for (i
= root
->level
; i
>= 0; --i
) {
353 for (b
= levels
[i
]; b
; b
= b
->link
) {
354 SET_INSERT(b
->closure
, b
->id
);
357 SET_UNION(JT(b
)->closure
, b
->closure
, nodewords
);
358 SET_UNION(JF(b
)->closure
, b
->closure
, nodewords
);
364 * Return the register number that is used by s. If A and X are both
365 * used, return AX_ATOM. If no register is used, return -1.
367 * The implementation should probably change to an array access.
373 register int c
= s
->code
;
378 switch (BPF_CLASS(c
)) {
381 return (BPF_RVAL(c
) == BPF_A
) ? A_ATOM
:
382 (BPF_RVAL(c
) == BPF_X
) ? X_ATOM
: -1;
386 return (BPF_MODE(c
) == BPF_IND
) ? X_ATOM
:
387 (BPF_MODE(c
) == BPF_MEM
) ? s
->k
: -1;
397 if (BPF_SRC(c
) == BPF_X
)
402 return BPF_MISCOP(c
) == BPF_TXA
? X_ATOM
: A_ATOM
;
409 * Return the register number that is defined by 's'. We assume that
410 * a single stmt cannot define more than one register. If no register
411 * is defined, return -1.
413 * The implementation should probably change to an array access.
422 switch (BPF_CLASS(s
->code
)) {
436 return BPF_MISCOP(s
->code
) == BPF_TAX
? X_ATOM
: A_ATOM
;
442 * Compute the sets of registers used, defined, and killed by 'b'.
444 * "Used" means that a statement in 'b' uses the register before any
445 * statement in 'b' defines it, i.e. it uses the value left in
446 * that register by a predecessor block of this block.
447 * "Defined" means that a statement in 'b' defines it.
448 * "Killed" means that a statement in 'b' defines it before any
449 * statement in 'b' uses it, i.e. it kills the value left in that
450 * register by a predecessor block of this block.
457 atomset def
= 0, use
= 0, kill
= 0;
460 for (s
= b
->stmts
; s
; s
= s
->next
) {
461 if (s
->s
.code
== NOP
)
463 atom
= atomuse(&s
->s
);
465 if (atom
== AX_ATOM
) {
466 if (!ATOMELEM(def
, X_ATOM
))
467 use
|= ATOMMASK(X_ATOM
);
468 if (!ATOMELEM(def
, A_ATOM
))
469 use
|= ATOMMASK(A_ATOM
);
471 else if (atom
< N_ATOMS
) {
472 if (!ATOMELEM(def
, atom
))
473 use
|= ATOMMASK(atom
);
478 atom
= atomdef(&s
->s
);
480 if (!ATOMELEM(use
, atom
))
481 kill
|= ATOMMASK(atom
);
482 def
|= ATOMMASK(atom
);
485 if (BPF_CLASS(b
->s
.code
) == BPF_JMP
) {
487 * XXX - what about RET?
489 atom
= atomuse(&b
->s
);
491 if (atom
== AX_ATOM
) {
492 if (!ATOMELEM(def
, X_ATOM
))
493 use
|= ATOMMASK(X_ATOM
);
494 if (!ATOMELEM(def
, A_ATOM
))
495 use
|= ATOMMASK(A_ATOM
);
497 else if (atom
< N_ATOMS
) {
498 if (!ATOMELEM(def
, atom
))
499 use
|= ATOMMASK(atom
);
512 * Assume graph is already leveled.
522 * root->level is the highest level no found;
523 * count down from there.
525 maxlevel
= root
->level
;
526 for (i
= maxlevel
; i
>= 0; --i
)
527 for (p
= levels
[i
]; p
; p
= p
->link
) {
532 for (i
= 1; i
<= maxlevel
; ++i
) {
533 for (p
= levels
[i
]; p
; p
= p
->link
) {
534 p
->out_use
|= JT(p
)->in_use
| JF(p
)->in_use
;
535 p
->in_use
|= p
->out_use
&~ p
->kill
;
541 * These data structures are used in a Cocke and Shwarz style
542 * value numbering scheme. Since the flowgraph is acyclic,
543 * exit values can be propagated from a node's predecessors
544 * provided it is uniquely defined.
550 struct valnode
*next
;
554 static struct valnode
*hashtbl
[MODULUS
];
558 /* Integer constants mapped with the load immediate opcode. */
559 #define K(i) F(BPF_LD|BPF_IMM|BPF_W, i, 0L)
566 struct vmapinfo
*vmap
;
567 struct valnode
*vnode_base
;
568 struct valnode
*next_vnode
;
574 next_vnode
= vnode_base
;
575 memset((char *)vmap
, 0, maxval
* sizeof(*vmap
));
576 memset((char *)hashtbl
, 0, sizeof hashtbl
);
579 /* Because we really don't have an IR, this stuff is a little messy. */
589 hash
= (u_int
)code
^ (v0
<< 4) ^ (v1
<< 8);
592 for (p
= hashtbl
[hash
]; p
; p
= p
->next
)
593 if (p
->code
== code
&& p
->v0
== v0
&& p
->v1
== v1
)
597 if (BPF_MODE(code
) == BPF_IMM
&&
598 (BPF_CLASS(code
) == BPF_LD
|| BPF_CLASS(code
) == BPF_LDX
)) {
599 vmap
[val
].const_val
= v0
;
600 vmap
[val
].is_const
= 1;
607 p
->next
= hashtbl
[hash
];
614 vstore(s
, valp
, newval
, alter
)
620 if (alter
&& *valp
== newval
)
633 a
= vmap
[v0
].const_val
;
634 b
= vmap
[v1
].const_val
;
636 switch (BPF_OP(s
->code
)) {
651 bpf_error("division by zero");
679 s
->code
= BPF_LD
|BPF_IMM
;
683 static inline struct slist
*
687 while (s
!= 0 && s
->s
.code
== NOP
)
696 struct block
*tmp
= JT(b
);
707 struct slist
*next
, *last
;
715 for (/*empty*/; /*empty*/; s
= next
) {
721 break; /* nothing left in the block */
724 * Find the next real instruction after that one
727 next
= this_op(s
->next
);
729 break; /* no next instruction */
733 * st M[k] --> st M[k]
736 if (s
->s
.code
== BPF_ST
&&
737 next
->s
.code
== (BPF_LDX
|BPF_MEM
) &&
738 s
->s
.k
== next
->s
.k
) {
740 next
->s
.code
= BPF_MISC
|BPF_TAX
;
746 if (s
->s
.code
== (BPF_LD
|BPF_IMM
) &&
747 next
->s
.code
== (BPF_MISC
|BPF_TAX
)) {
748 s
->s
.code
= BPF_LDX
|BPF_IMM
;
749 next
->s
.code
= BPF_MISC
|BPF_TXA
;
753 * This is an ugly special case, but it happens
754 * when you say tcp[k] or udp[k] where k is a constant.
756 if (s
->s
.code
== (BPF_LD
|BPF_IMM
)) {
757 struct slist
*add
, *tax
, *ild
;
760 * Check that X isn't used on exit from this
761 * block (which the optimizer might cause).
762 * We know the code generator won't generate
763 * any local dependencies.
765 if (ATOMELEM(b
->out_use
, X_ATOM
))
769 * Check that the instruction following the ldi
770 * is an addx, or it's an ldxms with an addx
771 * following it (with 0 or more nops between the
774 if (next
->s
.code
!= (BPF_LDX
|BPF_MSH
|BPF_B
))
777 add
= this_op(next
->next
);
778 if (add
== 0 || add
->s
.code
!= (BPF_ALU
|BPF_ADD
|BPF_X
))
782 * Check that a tax follows that (with 0 or more
783 * nops between them).
785 tax
= this_op(add
->next
);
786 if (tax
== 0 || tax
->s
.code
!= (BPF_MISC
|BPF_TAX
))
790 * Check that an ild follows that (with 0 or more
791 * nops between them).
793 ild
= this_op(tax
->next
);
794 if (ild
== 0 || BPF_CLASS(ild
->s
.code
) != BPF_LD
||
795 BPF_MODE(ild
->s
.code
) != BPF_IND
)
798 * We want to turn this sequence:
801 * (005) ldxms [14] {next} -- optional
804 * (008) ild [x+0] {ild}
806 * into this sequence:
814 * XXX We need to check that X is not
815 * subsequently used, because we want to change
816 * what'll be in it after this sequence.
818 * We know we can eliminate the accumulator
819 * modifications earlier in the sequence since
820 * it is defined by the last stmt of this sequence
821 * (i.e., the last statement of the sequence loads
822 * a value into the accumulator, so we can eliminate
823 * earlier operations on the accumulator).
833 * If the comparison at the end of a block is an equality
834 * comparison against a constant, and nobody uses the value
835 * we leave in the A register at the end of a block, and
836 * the operation preceding the comparison is an arithmetic
837 * operation, we can sometime optimize it away.
839 if (b
->s
.code
== (BPF_JMP
|BPF_JEQ
|BPF_K
) &&
840 !ATOMELEM(b
->out_use
, A_ATOM
)) {
842 * We can optimize away certain subtractions of the
845 if (last
->s
.code
== (BPF_ALU
|BPF_SUB
|BPF_X
)) {
846 val
= b
->val
[X_ATOM
];
847 if (vmap
[val
].is_const
) {
849 * If we have a subtract to do a comparison,
850 * and the X register is a known constant,
851 * we can merge this value into the
857 b
->s
.k
+= vmap
[val
].const_val
;
860 } else if (b
->s
.k
== 0) {
862 * If the X register isn't a constant,
863 * and the comparison in the test is
864 * against 0, we can compare with the
865 * X register, instead:
871 b
->s
.code
= BPF_JMP
|BPF_JEQ
|BPF_X
;
876 * Likewise, a constant subtract can be simplified:
879 * jeq #y -> jeq #(x+y)
881 else if (last
->s
.code
== (BPF_ALU
|BPF_SUB
|BPF_K
)) {
887 * And, similarly, a constant AND can be simplified
888 * if we're testing against 0, i.e.:
893 else if (last
->s
.code
== (BPF_ALU
|BPF_AND
|BPF_K
) &&
896 b
->s
.code
= BPF_JMP
|BPF_K
|BPF_JSET
;
904 * jset #ffffffff -> always
906 if (b
->s
.code
== (BPF_JMP
|BPF_K
|BPF_JSET
)) {
909 if (b
->s
.k
== 0xffffffff)
913 * If we're comparing against the index register, and the index
914 * register is a known constant, we can just compare against that
917 val
= b
->val
[X_ATOM
];
918 if (vmap
[val
].is_const
&& BPF_SRC(b
->s
.code
) == BPF_X
) {
919 bpf_int32 v
= vmap
[val
].const_val
;
924 * If the accumulator is a known constant, we can compute the
927 val
= b
->val
[A_ATOM
];
928 if (vmap
[val
].is_const
&& BPF_SRC(b
->s
.code
) == BPF_K
) {
929 bpf_int32 v
= vmap
[val
].const_val
;
930 switch (BPF_OP(b
->s
.code
)) {
937 v
= (unsigned)v
> b
->s
.k
;
941 v
= (unsigned)v
>= b
->s
.k
;
961 * Compute the symbolic value of expression of 's', and update
962 * anything it defines in the value table 'val'. If 'alter' is true,
963 * do various optimizations. This code would be cleaner if symbolic
964 * evaluation and code transformations weren't folded together.
967 opt_stmt(s
, val
, alter
)
977 case BPF_LD
|BPF_ABS
|BPF_W
:
978 case BPF_LD
|BPF_ABS
|BPF_H
:
979 case BPF_LD
|BPF_ABS
|BPF_B
:
980 v
= F(s
->code
, s
->k
, 0L);
981 vstore(s
, &val
[A_ATOM
], v
, alter
);
984 case BPF_LD
|BPF_IND
|BPF_W
:
985 case BPF_LD
|BPF_IND
|BPF_H
:
986 case BPF_LD
|BPF_IND
|BPF_B
:
988 if (alter
&& vmap
[v
].is_const
) {
989 s
->code
= BPF_LD
|BPF_ABS
|BPF_SIZE(s
->code
);
990 s
->k
+= vmap
[v
].const_val
;
991 v
= F(s
->code
, s
->k
, 0L);
995 v
= F(s
->code
, s
->k
, v
);
996 vstore(s
, &val
[A_ATOM
], v
, alter
);
1000 v
= F(s
->code
, 0L, 0L);
1001 vstore(s
, &val
[A_ATOM
], v
, alter
);
1004 case BPF_LD
|BPF_IMM
:
1006 vstore(s
, &val
[A_ATOM
], v
, alter
);
1009 case BPF_LDX
|BPF_IMM
:
1011 vstore(s
, &val
[X_ATOM
], v
, alter
);
1014 case BPF_LDX
|BPF_MSH
|BPF_B
:
1015 v
= F(s
->code
, s
->k
, 0L);
1016 vstore(s
, &val
[X_ATOM
], v
, alter
);
1019 case BPF_ALU
|BPF_NEG
:
1020 if (alter
&& vmap
[val
[A_ATOM
]].is_const
) {
1021 s
->code
= BPF_LD
|BPF_IMM
;
1022 s
->k
= -vmap
[val
[A_ATOM
]].const_val
;
1023 val
[A_ATOM
] = K(s
->k
);
1026 val
[A_ATOM
] = F(s
->code
, val
[A_ATOM
], 0L);
1029 case BPF_ALU
|BPF_ADD
|BPF_K
:
1030 case BPF_ALU
|BPF_SUB
|BPF_K
:
1031 case BPF_ALU
|BPF_MUL
|BPF_K
:
1032 case BPF_ALU
|BPF_DIV
|BPF_K
:
1033 case BPF_ALU
|BPF_AND
|BPF_K
:
1034 case BPF_ALU
|BPF_OR
|BPF_K
:
1035 case BPF_ALU
|BPF_LSH
|BPF_K
:
1036 case BPF_ALU
|BPF_RSH
|BPF_K
:
1037 op
= BPF_OP(s
->code
);
1040 /* don't optimize away "sub #0"
1041 * as it may be needed later to
1042 * fixup the generated math code */
1043 if (op
== BPF_ADD
||
1044 op
== BPF_LSH
|| op
== BPF_RSH
||
1049 if (op
== BPF_MUL
|| op
== BPF_AND
) {
1050 s
->code
= BPF_LD
|BPF_IMM
;
1051 val
[A_ATOM
] = K(s
->k
);
1055 if (vmap
[val
[A_ATOM
]].is_const
) {
1056 fold_op(s
, val
[A_ATOM
], K(s
->k
));
1057 val
[A_ATOM
] = K(s
->k
);
1061 val
[A_ATOM
] = F(s
->code
, val
[A_ATOM
], K(s
->k
));
1064 case BPF_ALU
|BPF_ADD
|BPF_X
:
1065 case BPF_ALU
|BPF_SUB
|BPF_X
:
1066 case BPF_ALU
|BPF_MUL
|BPF_X
:
1067 case BPF_ALU
|BPF_DIV
|BPF_X
:
1068 case BPF_ALU
|BPF_AND
|BPF_X
:
1069 case BPF_ALU
|BPF_OR
|BPF_X
:
1070 case BPF_ALU
|BPF_LSH
|BPF_X
:
1071 case BPF_ALU
|BPF_RSH
|BPF_X
:
1072 op
= BPF_OP(s
->code
);
1073 if (alter
&& vmap
[val
[X_ATOM
]].is_const
) {
1074 if (vmap
[val
[A_ATOM
]].is_const
) {
1075 fold_op(s
, val
[A_ATOM
], val
[X_ATOM
]);
1076 val
[A_ATOM
] = K(s
->k
);
1079 s
->code
= BPF_ALU
|BPF_K
|op
;
1080 s
->k
= vmap
[val
[X_ATOM
]].const_val
;
1083 F(s
->code
, val
[A_ATOM
], K(s
->k
));
1088 * Check if we're doing something to an accumulator
1089 * that is 0, and simplify. This may not seem like
1090 * much of a simplification but it could open up further
1092 * XXX We could also check for mul by 1, etc.
1094 if (alter
&& vmap
[val
[A_ATOM
]].is_const
1095 && vmap
[val
[A_ATOM
]].const_val
== 0) {
1096 if (op
== BPF_ADD
|| op
== BPF_OR
) {
1097 s
->code
= BPF_MISC
|BPF_TXA
;
1098 vstore(s
, &val
[A_ATOM
], val
[X_ATOM
], alter
);
1101 else if (op
== BPF_MUL
|| op
== BPF_DIV
||
1102 op
== BPF_AND
|| op
== BPF_LSH
|| op
== BPF_RSH
) {
1103 s
->code
= BPF_LD
|BPF_IMM
;
1105 vstore(s
, &val
[A_ATOM
], K(s
->k
), alter
);
1108 else if (op
== BPF_NEG
) {
1113 val
[A_ATOM
] = F(s
->code
, val
[A_ATOM
], val
[X_ATOM
]);
1116 case BPF_MISC
|BPF_TXA
:
1117 vstore(s
, &val
[A_ATOM
], val
[X_ATOM
], alter
);
1120 case BPF_LD
|BPF_MEM
:
1122 if (alter
&& vmap
[v
].is_const
) {
1123 s
->code
= BPF_LD
|BPF_IMM
;
1124 s
->k
= vmap
[v
].const_val
;
1127 vstore(s
, &val
[A_ATOM
], v
, alter
);
1130 case BPF_MISC
|BPF_TAX
:
1131 vstore(s
, &val
[X_ATOM
], val
[A_ATOM
], alter
);
1134 case BPF_LDX
|BPF_MEM
:
1136 if (alter
&& vmap
[v
].is_const
) {
1137 s
->code
= BPF_LDX
|BPF_IMM
;
1138 s
->k
= vmap
[v
].const_val
;
1141 vstore(s
, &val
[X_ATOM
], v
, alter
);
1145 vstore(s
, &val
[s
->k
], val
[A_ATOM
], alter
);
1149 vstore(s
, &val
[s
->k
], val
[X_ATOM
], alter
);
1156 register struct stmt
*s
;
1157 register struct stmt
*last
[];
1163 if (atom
== AX_ATOM
) {
1174 last
[atom
]->code
= NOP
;
1182 register struct block
*b
;
1184 register struct slist
*s
;
1186 struct stmt
*last
[N_ATOMS
];
1188 memset((char *)last
, 0, sizeof last
);
1190 for (s
= b
->stmts
; s
!= 0; s
= s
->next
)
1191 deadstmt(&s
->s
, last
);
1192 deadstmt(&b
->s
, last
);
1194 for (atom
= 0; atom
< N_ATOMS
; ++atom
)
1195 if (last
[atom
] && !ATOMELEM(b
->out_use
, atom
)) {
1196 last
[atom
]->code
= NOP
;
1202 opt_blk(b
, do_stmts
)
1209 bpf_int32 aval
, xval
;
1212 for (s
= b
->stmts
; s
&& s
->next
; s
= s
->next
)
1213 if (BPF_CLASS(s
->s
.code
) == BPF_JMP
) {
1220 * Initialize the atom values.
1225 * We have no predecessors, so everything is undefined
1226 * upon entry to this block.
1228 memset((char *)b
->val
, 0, sizeof(b
->val
));
1231 * Inherit values from our predecessors.
1233 * First, get the values from the predecessor along the
1234 * first edge leading to this node.
1236 memcpy((char *)b
->val
, (char *)p
->pred
->val
, sizeof(b
->val
));
1238 * Now look at all the other nodes leading to this node.
1239 * If, for the predecessor along that edge, a register
1240 * has a different value from the one we have (i.e.,
1241 * control paths are merging, and the merging paths
1242 * assign different values to that register), give the
1243 * register the undefined value of 0.
1245 while ((p
= p
->next
) != NULL
) {
1246 for (i
= 0; i
< N_ATOMS
; ++i
)
1247 if (b
->val
[i
] != p
->pred
->val
[i
])
1251 aval
= b
->val
[A_ATOM
];
1252 xval
= b
->val
[X_ATOM
];
1253 for (s
= b
->stmts
; s
; s
= s
->next
)
1254 opt_stmt(&s
->s
, b
->val
, do_stmts
);
1257 * This is a special case: if we don't use anything from this
1258 * block, and we load the accumulator or index register with a
1259 * value that is already there, or if this block is a return,
1260 * eliminate all the statements.
1262 * XXX - what if it does a store?
1264 * XXX - why does it matter whether we use anything from this
1265 * block? If the accumulator or index register doesn't change
1266 * its value, isn't that OK even if we use that value?
1268 * XXX - if we load the accumulator with a different value,
1269 * and the block ends with a conditional branch, we obviously
1270 * can't eliminate it, as the branch depends on that value.
1271 * For the index register, the conditional branch only depends
1272 * on the index register value if the test is against the index
1273 * register value rather than a constant; if nothing uses the
1274 * value we put into the index register, and we're not testing
1275 * against the index register's value, and there aren't any
1276 * other problems that would keep us from eliminating this
1277 * block, can we eliminate it?
1280 ((b
->out_use
== 0 && aval
!= 0 && b
->val
[A_ATOM
] == aval
&&
1281 xval
!= 0 && b
->val
[X_ATOM
] == xval
) ||
1282 BPF_CLASS(b
->s
.code
) == BPF_RET
)) {
1283 if (b
->stmts
!= 0) {
1292 * Set up values for branch optimizer.
1294 if (BPF_SRC(b
->s
.code
) == BPF_K
)
1295 b
->oval
= K(b
->s
.k
);
1297 b
->oval
= b
->val
[X_ATOM
];
1298 b
->et
.code
= b
->s
.code
;
1299 b
->ef
.code
= -b
->s
.code
;
1303 * Return true if any register that is used on exit from 'succ', has
1304 * an exit value that is different from the corresponding exit value
1308 use_conflict(b
, succ
)
1309 struct block
*b
, *succ
;
1312 atomset use
= succ
->out_use
;
1317 for (atom
= 0; atom
< N_ATOMS
; ++atom
)
1318 if (ATOMELEM(use
, atom
))
1319 if (b
->val
[atom
] != succ
->val
[atom
])
1324 static struct block
*
1325 fold_edge(child
, ep
)
1326 struct block
*child
;
1330 int aval0
, aval1
, oval0
, oval1
;
1331 int code
= ep
->code
;
1339 if (child
->s
.code
!= code
)
1342 aval0
= child
->val
[A_ATOM
];
1343 oval0
= child
->oval
;
1344 aval1
= ep
->pred
->val
[A_ATOM
];
1345 oval1
= ep
->pred
->oval
;
1352 * The operands of the branch instructions are
1353 * identical, so the result is true if a true
1354 * branch was taken to get here, otherwise false.
1356 return sense
? JT(child
) : JF(child
);
1358 if (sense
&& code
== (BPF_JMP
|BPF_JEQ
|BPF_K
))
1360 * At this point, we only know the comparison if we
1361 * came down the true branch, and it was an equality
1362 * comparison with a constant.
1364 * I.e., if we came down the true branch, and the branch
1365 * was an equality comparison with a constant, we know the
1366 * accumulator contains that constant. If we came down
1367 * the false branch, or the comparison wasn't with a
1368 * constant, we don't know what was in the accumulator.
1370 * We rely on the fact that distinct constants have distinct
1383 register struct block
*target
;
1385 if (JT(ep
->succ
) == 0)
1388 if (JT(ep
->succ
) == JF(ep
->succ
)) {
1390 * Common branch targets can be eliminated, provided
1391 * there is no data dependency.
1393 if (!use_conflict(ep
->pred
, ep
->succ
->et
.succ
)) {
1395 ep
->succ
= JT(ep
->succ
);
1399 * For each edge dominator that matches the successor of this
1400 * edge, promote the edge successor to the its grandchild.
1402 * XXX We violate the set abstraction here in favor a reasonably
1406 for (i
= 0; i
< edgewords
; ++i
) {
1407 register bpf_u_int32 x
= ep
->edom
[i
];
1412 k
+= i
* BITS_PER_WORD
;
1414 target
= fold_edge(ep
->succ
, edges
[k
]);
1416 * Check that there is no data dependency between
1417 * nodes that will be violated if we move the edge.
1419 if (target
!= 0 && !use_conflict(ep
->pred
, target
)) {
1422 if (JT(target
) != 0)
1424 * Start over unless we hit a leaf.
1440 struct block
**diffp
, **samep
;
1448 * Make sure each predecessor loads the same value.
1451 val
= ep
->pred
->val
[A_ATOM
];
1452 for (ep
= ep
->next
; ep
!= 0; ep
= ep
->next
)
1453 if (val
!= ep
->pred
->val
[A_ATOM
])
1456 if (JT(b
->in_edges
->pred
) == b
)
1457 diffp
= &JT(b
->in_edges
->pred
);
1459 diffp
= &JF(b
->in_edges
->pred
);
1466 if (JT(*diffp
) != JT(b
))
1469 if (!SET_MEMBER((*diffp
)->dom
, b
->id
))
1472 if ((*diffp
)->val
[A_ATOM
] != val
)
1475 diffp
= &JF(*diffp
);
1478 samep
= &JF(*diffp
);
1483 if (JT(*samep
) != JT(b
))
1486 if (!SET_MEMBER((*samep
)->dom
, b
->id
))
1489 if ((*samep
)->val
[A_ATOM
] == val
)
1492 /* XXX Need to check that there are no data dependencies
1493 between dp0 and dp1. Currently, the code generator
1494 will not produce such dependencies. */
1495 samep
= &JF(*samep
);
1498 /* XXX This doesn't cover everything. */
1499 for (i
= 0; i
< N_ATOMS
; ++i
)
1500 if ((*samep
)->val
[i
] != pred
->val
[i
])
1503 /* Pull up the node. */
1509 * At the top of the chain, each predecessor needs to point at the
1510 * pulled up node. Inside the chain, there is only one predecessor
1514 for (ep
= b
->in_edges
; ep
!= 0; ep
= ep
->next
) {
1515 if (JT(ep
->pred
) == b
)
1516 JT(ep
->pred
) = pull
;
1518 JF(ep
->pred
) = pull
;
1533 struct block
**diffp
, **samep
;
1541 * Make sure each predecessor loads the same value.
1543 val
= ep
->pred
->val
[A_ATOM
];
1544 for (ep
= ep
->next
; ep
!= 0; ep
= ep
->next
)
1545 if (val
!= ep
->pred
->val
[A_ATOM
])
1548 if (JT(b
->in_edges
->pred
) == b
)
1549 diffp
= &JT(b
->in_edges
->pred
);
1551 diffp
= &JF(b
->in_edges
->pred
);
1558 if (JF(*diffp
) != JF(b
))
1561 if (!SET_MEMBER((*diffp
)->dom
, b
->id
))
1564 if ((*diffp
)->val
[A_ATOM
] != val
)
1567 diffp
= &JT(*diffp
);
1570 samep
= &JT(*diffp
);
1575 if (JF(*samep
) != JF(b
))
1578 if (!SET_MEMBER((*samep
)->dom
, b
->id
))
1581 if ((*samep
)->val
[A_ATOM
] == val
)
1584 /* XXX Need to check that there are no data dependencies
1585 between diffp and samep. Currently, the code generator
1586 will not produce such dependencies. */
1587 samep
= &JT(*samep
);
1590 /* XXX This doesn't cover everything. */
1591 for (i
= 0; i
< N_ATOMS
; ++i
)
1592 if ((*samep
)->val
[i
] != pred
->val
[i
])
1595 /* Pull up the node. */
1601 * At the top of the chain, each predecessor needs to point at the
1602 * pulled up node. Inside the chain, there is only one predecessor
1606 for (ep
= b
->in_edges
; ep
!= 0; ep
= ep
->next
) {
1607 if (JT(ep
->pred
) == b
)
1608 JT(ep
->pred
) = pull
;
1610 JF(ep
->pred
) = pull
;
1620 opt_blks(root
, do_stmts
)
1628 maxlevel
= root
->level
;
1631 for (i
= maxlevel
; i
>= 0; --i
)
1632 for (p
= levels
[i
]; p
; p
= p
->link
)
1633 opt_blk(p
, do_stmts
);
1637 * No point trying to move branches; it can't possibly
1638 * make a difference at this point.
1642 for (i
= 1; i
<= maxlevel
; ++i
) {
1643 for (p
= levels
[i
]; p
; p
= p
->link
) {
1650 for (i
= 1; i
<= maxlevel
; ++i
) {
1651 for (p
= levels
[i
]; p
; p
= p
->link
) {
1659 link_inedge(parent
, child
)
1660 struct edge
*parent
;
1661 struct block
*child
;
1663 parent
->next
= child
->in_edges
;
1664 child
->in_edges
= parent
;
1674 for (i
= 0; i
< n_blocks
; ++i
)
1675 blocks
[i
]->in_edges
= 0;
1678 * Traverse the graph, adding each edge to the predecessor
1679 * list of its successors. Skip the leaves (i.e. level 0).
1681 for (i
= root
->level
; i
> 0; --i
) {
1682 for (b
= levels
[i
]; b
!= 0; b
= b
->link
) {
1683 link_inedge(&b
->et
, JT(b
));
1684 link_inedge(&b
->ef
, JF(b
));
1693 struct slist
*tmp
, *s
;
1697 while (BPF_CLASS((*b
)->s
.code
) == BPF_JMP
&& JT(*b
) == JF(*b
))
1706 * If the root node is a return, then there is no
1707 * point executing any statements (since the bpf machine
1708 * has no side effects).
1710 if (BPF_CLASS((*b
)->s
.code
) == BPF_RET
)
1715 opt_loop(root
, do_stmts
)
1722 printf("opt_loop(root, %d) begin\n", do_stmts
);
1733 opt_blks(root
, do_stmts
);
1736 printf("opt_loop(root, %d) bottom, done=%d\n", do_stmts
, done
);
1744 * Optimize the filter code in its dag representation.
1748 struct block
**rootp
;
1757 intern_blocks(root
);
1760 printf("after intern_blocks()\n");
1767 printf("after opt_root()\n");
1780 if (BPF_CLASS(p
->s
.code
) != BPF_RET
) {
1788 * Mark code array such that isMarked(i) is true
1789 * only for nodes that are alive.
1800 * True iff the two stmt lists load the same value from the packet into
1805 struct slist
*x
, *y
;
1808 while (x
&& x
->s
.code
== NOP
)
1810 while (y
&& y
->s
.code
== NOP
)
1816 if (x
->s
.code
!= y
->s
.code
|| x
->s
.k
!= y
->s
.k
)
1825 struct block
*b0
, *b1
;
1827 if (b0
->s
.code
== b1
->s
.code
&&
1828 b0
->s
.k
== b1
->s
.k
&&
1829 b0
->et
.succ
== b1
->et
.succ
&&
1830 b0
->ef
.succ
== b1
->ef
.succ
)
1831 return eq_slist(b0
->stmts
, b1
->stmts
);
1841 int done1
; /* don't shadow global */
1844 for (i
= 0; i
< n_blocks
; ++i
)
1845 blocks
[i
]->link
= 0;
1849 for (i
= n_blocks
- 1; --i
>= 0; ) {
1850 if (!isMarked(blocks
[i
]))
1852 for (j
= i
+ 1; j
< n_blocks
; ++j
) {
1853 if (!isMarked(blocks
[j
]))
1855 if (eq_blk(blocks
[i
], blocks
[j
])) {
1856 blocks
[i
]->link
= blocks
[j
]->link
?
1857 blocks
[j
]->link
: blocks
[j
];
1862 for (i
= 0; i
< n_blocks
; ++i
) {
1868 JT(p
) = JT(p
)->link
;
1872 JF(p
) = JF(p
)->link
;
1882 free((void *)vnode_base
);
1884 free((void *)edges
);
1885 free((void *)space
);
1886 free((void *)levels
);
1887 free((void *)blocks
);
1891 * Return the number of stmts in 's'.
1899 for (; s
; s
= s
->next
)
1900 if (s
->s
.code
!= NOP
)
1906 * Return the number of nodes reachable by 'p'.
1907 * All nodes should be initially unmarked.
1913 if (p
== 0 || isMarked(p
))
1916 return count_blocks(JT(p
)) + count_blocks(JF(p
)) + 1;
1920 * Do a depth first search on the flow graph, numbering the
1921 * the basic blocks, and entering them into the 'blocks' array.`
1929 if (p
== 0 || isMarked(p
))
1937 number_blks_r(JT(p
));
1938 number_blks_r(JF(p
));
1942 * Return the number of stmts in the flowgraph reachable by 'p'.
1943 * The nodes should be unmarked before calling.
1945 * Note that "stmts" means "instructions", and that this includes
1947 * side-effect statements in 'p' (slength(p->stmts));
1949 * statements in the true branch from 'p' (count_stmts(JT(p)));
1951 * statements in the false branch from 'p' (count_stmts(JF(p)));
1953 * the conditional jump itself (1);
1955 * an extra long jump if the true branch requires it (p->longjt);
1957 * an extra long jump if the false branch requires it (p->longjf).
1965 if (p
== 0 || isMarked(p
))
1968 n
= count_stmts(JT(p
)) + count_stmts(JF(p
));
1969 return slength(p
->stmts
) + n
+ 1 + p
->longjt
+ p
->longjf
;
1973 * Allocate memory. All allocation is done before optimization
1974 * is begun. A linear bound on the size of all data structures is computed
1975 * from the total number of blocks and/or statements.
1982 int i
, n
, max_stmts
;
1985 * First, count the blocks, so we can malloc an array to map
1986 * block number to block. Then, put the blocks into the array.
1989 n
= count_blocks(root
);
1990 blocks
= (struct block
**)calloc(n
, sizeof(*blocks
));
1992 bpf_error("malloc");
1995 number_blks_r(root
);
1997 n_edges
= 2 * n_blocks
;
1998 edges
= (struct edge
**)calloc(n_edges
, sizeof(*edges
));
2000 bpf_error("malloc");
2003 * The number of levels is bounded by the number of nodes.
2005 levels
= (struct block
**)calloc(n_blocks
, sizeof(*levels
));
2007 bpf_error("malloc");
2009 edgewords
= n_edges
/ (8 * sizeof(bpf_u_int32
)) + 1;
2010 nodewords
= n_blocks
/ (8 * sizeof(bpf_u_int32
)) + 1;
2013 space
= (bpf_u_int32
*)malloc(2 * n_blocks
* nodewords
* sizeof(*space
)
2014 + n_edges
* edgewords
* sizeof(*space
));
2016 bpf_error("malloc");
2019 for (i
= 0; i
< n
; ++i
) {
2023 all_closure_sets
= p
;
2024 for (i
= 0; i
< n
; ++i
) {
2025 blocks
[i
]->closure
= p
;
2029 for (i
= 0; i
< n
; ++i
) {
2030 register struct block
*b
= blocks
[i
];
2038 b
->ef
.id
= n_blocks
+ i
;
2039 edges
[n_blocks
+ i
] = &b
->ef
;
2044 for (i
= 0; i
< n
; ++i
)
2045 max_stmts
+= slength(blocks
[i
]->stmts
) + 1;
2047 * We allocate at most 3 value numbers per statement,
2048 * so this is an upper bound on the number of valnodes
2051 maxval
= 3 * max_stmts
;
2052 vmap
= (struct vmapinfo
*)calloc(maxval
, sizeof(*vmap
));
2053 vnode_base
= (struct valnode
*)calloc(maxval
, sizeof(*vnode_base
));
2054 if (vmap
== NULL
|| vnode_base
== NULL
)
2055 bpf_error("malloc");
2059 * Some pointers used to convert the basic block form of the code,
2060 * into the array form that BPF requires. 'fstart' will point to
2061 * the malloc'd array while 'ftail' is used during the recursive traversal.
2063 static struct bpf_insn
*fstart
;
2064 static struct bpf_insn
*ftail
;
2071 * Returns true if successful. Returns false if a branch has
2072 * an offset that is too large. If so, we have marked that
2073 * branch so that on a subsequent iteration, it will be treated
2080 struct bpf_insn
*dst
;
2084 int extrajmps
; /* number of extra jumps inserted */
2085 struct slist
**offset
= NULL
;
2087 if (p
== 0 || isMarked(p
))
2091 if (convert_code_r(JF(p
)) == 0)
2093 if (convert_code_r(JT(p
)) == 0)
2096 slen
= slength(p
->stmts
);
2097 dst
= ftail
-= (slen
+ 1 + p
->longjt
+ p
->longjf
);
2098 /* inflate length by any extra jumps */
2100 p
->offset
= dst
- fstart
;
2102 /* generate offset[] for convenience */
2104 offset
= (struct slist
**)calloc(slen
, sizeof(struct slist
*));
2106 bpf_error("not enough core");
2111 for (off
= 0; off
< slen
&& src
; off
++) {
2113 printf("off=%d src=%x\n", off
, src
);
2120 for (src
= p
->stmts
; src
; src
= src
->next
) {
2121 if (src
->s
.code
== NOP
)
2123 dst
->code
= (u_short
)src
->s
.code
;
2126 /* fill block-local relative jump */
2127 if (BPF_CLASS(src
->s
.code
) != BPF_JMP
|| src
->s
.code
== (BPF_JMP
|BPF_JA
)) {
2129 if (src
->s
.jt
|| src
->s
.jf
) {
2130 bpf_error("illegal jmp destination");
2136 if (off
== slen
- 2) /*???*/
2142 const char *ljerr
= "%s for block-local relative jump: off=%d";
2145 printf("code=%x off=%d %x %x\n", src
->s
.code
,
2146 off
, src
->s
.jt
, src
->s
.jf
);
2149 if (!src
->s
.jt
|| !src
->s
.jf
) {
2150 bpf_error(ljerr
, "no jmp destination", off
);
2155 for (i
= 0; i
< slen
; i
++) {
2156 if (offset
[i
] == src
->s
.jt
) {
2158 bpf_error(ljerr
, "multiple matches", off
);
2162 dst
->jt
= i
- off
- 1;
2165 if (offset
[i
] == src
->s
.jf
) {
2167 bpf_error(ljerr
, "multiple matches", off
);
2170 dst
->jf
= i
- off
- 1;
2175 bpf_error(ljerr
, "no destination found", off
);
2187 bids
[dst
- fstart
] = p
->id
+ 1;
2189 dst
->code
= (u_short
)p
->s
.code
;
2193 off
= JT(p
)->offset
- (p
->offset
+ slen
) - 1;
2195 /* offset too large for branch, must add a jump */
2196 if (p
->longjt
== 0) {
2197 /* mark this instruction and retry */
2201 /* branch if T to following jump */
2202 dst
->jt
= extrajmps
;
2204 dst
[extrajmps
].code
= BPF_JMP
|BPF_JA
;
2205 dst
[extrajmps
].k
= off
- extrajmps
;
2209 off
= JF(p
)->offset
- (p
->offset
+ slen
) - 1;
2211 /* offset too large for branch, must add a jump */
2212 if (p
->longjf
== 0) {
2213 /* mark this instruction and retry */
2217 /* branch if F to following jump */
2218 /* if two jumps are inserted, F goes to second one */
2219 dst
->jf
= extrajmps
;
2221 dst
[extrajmps
].code
= BPF_JMP
|BPF_JA
;
2222 dst
[extrajmps
].k
= off
- extrajmps
;
2232 * Convert flowgraph intermediate representation to the
2233 * BPF array representation. Set *lenp to the number of instructions.
2235 * This routine does *NOT* leak the memory pointed to by fp. It *must
2236 * not* do free(fp) before returning fp; doing so would make no sense,
2237 * as the BPF array pointed to by the return value of icode_to_fcode()
2238 * must be valid - it's being returned for use in a bpf_program structure.
2240 * If it appears that icode_to_fcode() is leaking, the problem is that
2241 * the program using pcap_compile() is failing to free the memory in
2242 * the BPF program when it's done - the leak is in the program, not in
2243 * the routine that happens to be allocating the memory. (By analogy, if
2244 * a program calls fopen() without ever calling fclose() on the FILE *,
2245 * it will leak the FILE structure; the leak is not in fopen(), it's in
2246 * the program.) Change the program to use pcap_freecode() when it's
2247 * done with the filter program. See the pcap man page.
2250 icode_to_fcode(root
, lenp
)
2255 struct bpf_insn
*fp
;
2258 * Loop doing convert_code_r() until no branches remain
2259 * with too-large offsets.
2263 n
= *lenp
= count_stmts(root
);
2265 fp
= (struct bpf_insn
*)malloc(sizeof(*fp
) * n
);
2267 bpf_error("malloc");
2268 memset((char *)fp
, 0, sizeof(*fp
) * n
);
2273 if (convert_code_r(root
))
2282 * Make a copy of a BPF program and put it in the "fcode" member of
2285 * If we fail to allocate memory for the copy, fill in the "errbuf"
2286 * member of the "pcap_t" with an error message, and return -1;
2287 * otherwise, return 0.
2290 install_bpf_program(pcap_t
*p
, struct bpf_program
*fp
)
2295 * Validate the program.
2297 if (!bpf_validate(fp
->bf_insns
, fp
->bf_len
)) {
2298 snprintf(p
->errbuf
, sizeof(p
->errbuf
),
2299 "BPF program is not valid");
2304 * Free up any already installed program.
2306 pcap_freecode(&p
->fcode
);
2308 prog_size
= sizeof(*fp
->bf_insns
) * fp
->bf_len
;
2309 p
->fcode
.bf_len
= fp
->bf_len
;
2310 p
->fcode
.bf_insns
= (struct bpf_insn
*)malloc(prog_size
);
2311 if (p
->fcode
.bf_insns
== NULL
) {
2312 snprintf(p
->errbuf
, sizeof(p
->errbuf
),
2313 "malloc: %s", pcap_strerror(errno
));
2316 memcpy(p
->fcode
.bf_insns
, fp
->bf_insns
, prog_size
);
2325 struct bpf_program f
;
2327 memset(bids
, 0, sizeof bids
);
2328 f
.bf_insns
= icode_to_fcode(root
, &f
.bf_len
);
2331 free((char *)f
.bf_insns
);