mach-ux500: update SoC and board IRQ handling
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / net / core / filter.c
blob0eb8c4466eaa001bad837087e72df34685778c9a
1 /*
2 * Linux Socket Filter - Kernel level socket filtering
4 * Author:
5 * Jay Schulist <jschlst@samba.org>
7 * Based on the design of:
8 * - The Berkeley Packet Filter
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
15 * Andi Kleen - Fix a few bad bugs and races.
16 * Kris Katterjohn - Added many additional checks in sk_chk_filter()
19 #include <linux/module.h>
20 #include <linux/types.h>
21 #include <linux/mm.h>
22 #include <linux/fcntl.h>
23 #include <linux/socket.h>
24 #include <linux/in.h>
25 #include <linux/inet.h>
26 #include <linux/netdevice.h>
27 #include <linux/if_packet.h>
28 #include <linux/gfp.h>
29 #include <net/ip.h>
30 #include <net/protocol.h>
31 #include <net/netlink.h>
32 #include <linux/skbuff.h>
33 #include <net/sock.h>
34 #include <linux/errno.h>
35 #include <linux/timer.h>
36 #include <asm/system.h>
37 #include <asm/uaccess.h>
38 #include <asm/unaligned.h>
39 #include <linux/filter.h>
40 #include <linux/reciprocal_div.h>
42 /* No hurry in this branch */
43 static void *__load_pointer(const struct sk_buff *skb, int k, unsigned int size)
45 u8 *ptr = NULL;
47 if (k >= SKF_NET_OFF)
48 ptr = skb_network_header(skb) + k - SKF_NET_OFF;
49 else if (k >= SKF_LL_OFF)
50 ptr = skb_mac_header(skb) + k - SKF_LL_OFF;
52 if (ptr >= skb->head && ptr + size <= skb_tail_pointer(skb))
53 return ptr;
54 return NULL;
57 static inline void *load_pointer(const struct sk_buff *skb, int k,
58 unsigned int size, void *buffer)
60 if (k >= 0)
61 return skb_header_pointer(skb, k, size, buffer);
62 return __load_pointer(skb, k, size);
65 /**
66 * sk_filter - run a packet through a socket filter
67 * @sk: sock associated with &sk_buff
68 * @skb: buffer to filter
70 * Run the filter code and then cut skb->data to correct size returned by
71 * sk_run_filter. If pkt_len is 0 we toss packet. If skb->len is smaller
72 * than pkt_len we keep whole skb->data. This is the socket level
73 * wrapper to sk_run_filter. It returns 0 if the packet should
74 * be accepted or -EPERM if the packet should be tossed.
77 int sk_filter(struct sock *sk, struct sk_buff *skb)
79 int err;
80 struct sk_filter *filter;
82 err = security_sock_rcv_skb(sk, skb);
83 if (err)
84 return err;
86 rcu_read_lock();
87 filter = rcu_dereference(sk->sk_filter);
88 if (filter) {
89 unsigned int pkt_len = SK_RUN_FILTER(filter, skb);
91 err = pkt_len ? pskb_trim(skb, pkt_len) : -EPERM;
93 rcu_read_unlock();
95 return err;
97 EXPORT_SYMBOL(sk_filter);
99 /**
100 * sk_run_filter - run a filter on a socket
101 * @skb: buffer to run the filter on
102 * @fentry: filter to apply
104 * Decode and apply filter instructions to the skb->data.
105 * Return length to keep, 0 for none. @skb is the data we are
106 * filtering, @filter is the array of filter instructions.
107 * Because all jumps are guaranteed to be before last instruction,
108 * and last instruction guaranteed to be a RET, we dont need to check
109 * flen. (We used to pass to this function the length of filter)
111 unsigned int sk_run_filter(const struct sk_buff *skb,
112 const struct sock_filter *fentry)
114 void *ptr;
115 u32 A = 0; /* Accumulator */
116 u32 X = 0; /* Index Register */
117 u32 mem[BPF_MEMWORDS]; /* Scratch Memory Store */
118 u32 tmp;
119 int k;
122 * Process array of filter instructions.
124 for (;; fentry++) {
125 #if defined(CONFIG_X86_32)
126 #define K (fentry->k)
127 #else
128 const u32 K = fentry->k;
129 #endif
131 switch (fentry->code) {
132 case BPF_S_ALU_ADD_X:
133 A += X;
134 continue;
135 case BPF_S_ALU_ADD_K:
136 A += K;
137 continue;
138 case BPF_S_ALU_SUB_X:
139 A -= X;
140 continue;
141 case BPF_S_ALU_SUB_K:
142 A -= K;
143 continue;
144 case BPF_S_ALU_MUL_X:
145 A *= X;
146 continue;
147 case BPF_S_ALU_MUL_K:
148 A *= K;
149 continue;
150 case BPF_S_ALU_DIV_X:
151 if (X == 0)
152 return 0;
153 A /= X;
154 continue;
155 case BPF_S_ALU_DIV_K:
156 A = reciprocal_divide(A, K);
157 continue;
158 case BPF_S_ALU_AND_X:
159 A &= X;
160 continue;
161 case BPF_S_ALU_AND_K:
162 A &= K;
163 continue;
164 case BPF_S_ALU_OR_X:
165 A |= X;
166 continue;
167 case BPF_S_ALU_OR_K:
168 A |= K;
169 continue;
170 case BPF_S_ALU_LSH_X:
171 A <<= X;
172 continue;
173 case BPF_S_ALU_LSH_K:
174 A <<= K;
175 continue;
176 case BPF_S_ALU_RSH_X:
177 A >>= X;
178 continue;
179 case BPF_S_ALU_RSH_K:
180 A >>= K;
181 continue;
182 case BPF_S_ALU_NEG:
183 A = -A;
184 continue;
185 case BPF_S_JMP_JA:
186 fentry += K;
187 continue;
188 case BPF_S_JMP_JGT_K:
189 fentry += (A > K) ? fentry->jt : fentry->jf;
190 continue;
191 case BPF_S_JMP_JGE_K:
192 fentry += (A >= K) ? fentry->jt : fentry->jf;
193 continue;
194 case BPF_S_JMP_JEQ_K:
195 fentry += (A == K) ? fentry->jt : fentry->jf;
196 continue;
197 case BPF_S_JMP_JSET_K:
198 fentry += (A & K) ? fentry->jt : fentry->jf;
199 continue;
200 case BPF_S_JMP_JGT_X:
201 fentry += (A > X) ? fentry->jt : fentry->jf;
202 continue;
203 case BPF_S_JMP_JGE_X:
204 fentry += (A >= X) ? fentry->jt : fentry->jf;
205 continue;
206 case BPF_S_JMP_JEQ_X:
207 fentry += (A == X) ? fentry->jt : fentry->jf;
208 continue;
209 case BPF_S_JMP_JSET_X:
210 fentry += (A & X) ? fentry->jt : fentry->jf;
211 continue;
212 case BPF_S_LD_W_ABS:
213 k = K;
214 load_w:
215 ptr = load_pointer(skb, k, 4, &tmp);
216 if (ptr != NULL) {
217 A = get_unaligned_be32(ptr);
218 continue;
220 return 0;
221 case BPF_S_LD_H_ABS:
222 k = K;
223 load_h:
224 ptr = load_pointer(skb, k, 2, &tmp);
225 if (ptr != NULL) {
226 A = get_unaligned_be16(ptr);
227 continue;
229 return 0;
230 case BPF_S_LD_B_ABS:
231 k = K;
232 load_b:
233 ptr = load_pointer(skb, k, 1, &tmp);
234 if (ptr != NULL) {
235 A = *(u8 *)ptr;
236 continue;
238 return 0;
239 case BPF_S_LD_W_LEN:
240 A = skb->len;
241 continue;
242 case BPF_S_LDX_W_LEN:
243 X = skb->len;
244 continue;
245 case BPF_S_LD_W_IND:
246 k = X + K;
247 goto load_w;
248 case BPF_S_LD_H_IND:
249 k = X + K;
250 goto load_h;
251 case BPF_S_LD_B_IND:
252 k = X + K;
253 goto load_b;
254 case BPF_S_LDX_B_MSH:
255 ptr = load_pointer(skb, K, 1, &tmp);
256 if (ptr != NULL) {
257 X = (*(u8 *)ptr & 0xf) << 2;
258 continue;
260 return 0;
261 case BPF_S_LD_IMM:
262 A = K;
263 continue;
264 case BPF_S_LDX_IMM:
265 X = K;
266 continue;
267 case BPF_S_LD_MEM:
268 A = mem[K];
269 continue;
270 case BPF_S_LDX_MEM:
271 X = mem[K];
272 continue;
273 case BPF_S_MISC_TAX:
274 X = A;
275 continue;
276 case BPF_S_MISC_TXA:
277 A = X;
278 continue;
279 case BPF_S_RET_K:
280 return K;
281 case BPF_S_RET_A:
282 return A;
283 case BPF_S_ST:
284 mem[K] = A;
285 continue;
286 case BPF_S_STX:
287 mem[K] = X;
288 continue;
289 case BPF_S_ANC_PROTOCOL:
290 A = ntohs(skb->protocol);
291 continue;
292 case BPF_S_ANC_PKTTYPE:
293 A = skb->pkt_type;
294 continue;
295 case BPF_S_ANC_IFINDEX:
296 if (!skb->dev)
297 return 0;
298 A = skb->dev->ifindex;
299 continue;
300 case BPF_S_ANC_MARK:
301 A = skb->mark;
302 continue;
303 case BPF_S_ANC_QUEUE:
304 A = skb->queue_mapping;
305 continue;
306 case BPF_S_ANC_HATYPE:
307 if (!skb->dev)
308 return 0;
309 A = skb->dev->type;
310 continue;
311 case BPF_S_ANC_RXHASH:
312 A = skb->rxhash;
313 continue;
314 case BPF_S_ANC_CPU:
315 A = raw_smp_processor_id();
316 continue;
317 case BPF_S_ANC_NLATTR: {
318 struct nlattr *nla;
320 if (skb_is_nonlinear(skb))
321 return 0;
322 if (A > skb->len - sizeof(struct nlattr))
323 return 0;
325 nla = nla_find((struct nlattr *)&skb->data[A],
326 skb->len - A, X);
327 if (nla)
328 A = (void *)nla - (void *)skb->data;
329 else
330 A = 0;
331 continue;
333 case BPF_S_ANC_NLATTR_NEST: {
334 struct nlattr *nla;
336 if (skb_is_nonlinear(skb))
337 return 0;
338 if (A > skb->len - sizeof(struct nlattr))
339 return 0;
341 nla = (struct nlattr *)&skb->data[A];
342 if (nla->nla_len > A - skb->len)
343 return 0;
345 nla = nla_find_nested(nla, X);
346 if (nla)
347 A = (void *)nla - (void *)skb->data;
348 else
349 A = 0;
350 continue;
352 default:
353 WARN_ON(1);
354 return 0;
358 return 0;
360 EXPORT_SYMBOL(sk_run_filter);
363 * Security :
364 * A BPF program is able to use 16 cells of memory to store intermediate
365 * values (check u32 mem[BPF_MEMWORDS] in sk_run_filter())
366 * As we dont want to clear mem[] array for each packet going through
367 * sk_run_filter(), we check that filter loaded by user never try to read
368 * a cell if not previously written, and we check all branches to be sure
369 * a malicious user doesn't try to abuse us.
371 static int check_load_and_stores(struct sock_filter *filter, int flen)
373 u16 *masks, memvalid = 0; /* one bit per cell, 16 cells */
374 int pc, ret = 0;
376 BUILD_BUG_ON(BPF_MEMWORDS > 16);
377 masks = kmalloc(flen * sizeof(*masks), GFP_KERNEL);
378 if (!masks)
379 return -ENOMEM;
380 memset(masks, 0xff, flen * sizeof(*masks));
382 for (pc = 0; pc < flen; pc++) {
383 memvalid &= masks[pc];
385 switch (filter[pc].code) {
386 case BPF_S_ST:
387 case BPF_S_STX:
388 memvalid |= (1 << filter[pc].k);
389 break;
390 case BPF_S_LD_MEM:
391 case BPF_S_LDX_MEM:
392 if (!(memvalid & (1 << filter[pc].k))) {
393 ret = -EINVAL;
394 goto error;
396 break;
397 case BPF_S_JMP_JA:
398 /* a jump must set masks on target */
399 masks[pc + 1 + filter[pc].k] &= memvalid;
400 memvalid = ~0;
401 break;
402 case BPF_S_JMP_JEQ_K:
403 case BPF_S_JMP_JEQ_X:
404 case BPF_S_JMP_JGE_K:
405 case BPF_S_JMP_JGE_X:
406 case BPF_S_JMP_JGT_K:
407 case BPF_S_JMP_JGT_X:
408 case BPF_S_JMP_JSET_X:
409 case BPF_S_JMP_JSET_K:
410 /* a jump must set masks on targets */
411 masks[pc + 1 + filter[pc].jt] &= memvalid;
412 masks[pc + 1 + filter[pc].jf] &= memvalid;
413 memvalid = ~0;
414 break;
417 error:
418 kfree(masks);
419 return ret;
423 * sk_chk_filter - verify socket filter code
424 * @filter: filter to verify
425 * @flen: length of filter
427 * Check the user's filter code. If we let some ugly
428 * filter code slip through kaboom! The filter must contain
429 * no references or jumps that are out of range, no illegal
430 * instructions, and must end with a RET instruction.
432 * All jumps are forward as they are not signed.
434 * Returns 0 if the rule set is legal or -EINVAL if not.
436 int sk_chk_filter(struct sock_filter *filter, int flen)
439 * Valid instructions are initialized to non-0.
440 * Invalid instructions are initialized to 0.
442 static const u8 codes[] = {
443 [BPF_ALU|BPF_ADD|BPF_K] = BPF_S_ALU_ADD_K,
444 [BPF_ALU|BPF_ADD|BPF_X] = BPF_S_ALU_ADD_X,
445 [BPF_ALU|BPF_SUB|BPF_K] = BPF_S_ALU_SUB_K,
446 [BPF_ALU|BPF_SUB|BPF_X] = BPF_S_ALU_SUB_X,
447 [BPF_ALU|BPF_MUL|BPF_K] = BPF_S_ALU_MUL_K,
448 [BPF_ALU|BPF_MUL|BPF_X] = BPF_S_ALU_MUL_X,
449 [BPF_ALU|BPF_DIV|BPF_X] = BPF_S_ALU_DIV_X,
450 [BPF_ALU|BPF_AND|BPF_K] = BPF_S_ALU_AND_K,
451 [BPF_ALU|BPF_AND|BPF_X] = BPF_S_ALU_AND_X,
452 [BPF_ALU|BPF_OR|BPF_K] = BPF_S_ALU_OR_K,
453 [BPF_ALU|BPF_OR|BPF_X] = BPF_S_ALU_OR_X,
454 [BPF_ALU|BPF_LSH|BPF_K] = BPF_S_ALU_LSH_K,
455 [BPF_ALU|BPF_LSH|BPF_X] = BPF_S_ALU_LSH_X,
456 [BPF_ALU|BPF_RSH|BPF_K] = BPF_S_ALU_RSH_K,
457 [BPF_ALU|BPF_RSH|BPF_X] = BPF_S_ALU_RSH_X,
458 [BPF_ALU|BPF_NEG] = BPF_S_ALU_NEG,
459 [BPF_LD|BPF_W|BPF_ABS] = BPF_S_LD_W_ABS,
460 [BPF_LD|BPF_H|BPF_ABS] = BPF_S_LD_H_ABS,
461 [BPF_LD|BPF_B|BPF_ABS] = BPF_S_LD_B_ABS,
462 [BPF_LD|BPF_W|BPF_LEN] = BPF_S_LD_W_LEN,
463 [BPF_LD|BPF_W|BPF_IND] = BPF_S_LD_W_IND,
464 [BPF_LD|BPF_H|BPF_IND] = BPF_S_LD_H_IND,
465 [BPF_LD|BPF_B|BPF_IND] = BPF_S_LD_B_IND,
466 [BPF_LD|BPF_IMM] = BPF_S_LD_IMM,
467 [BPF_LDX|BPF_W|BPF_LEN] = BPF_S_LDX_W_LEN,
468 [BPF_LDX|BPF_B|BPF_MSH] = BPF_S_LDX_B_MSH,
469 [BPF_LDX|BPF_IMM] = BPF_S_LDX_IMM,
470 [BPF_MISC|BPF_TAX] = BPF_S_MISC_TAX,
471 [BPF_MISC|BPF_TXA] = BPF_S_MISC_TXA,
472 [BPF_RET|BPF_K] = BPF_S_RET_K,
473 [BPF_RET|BPF_A] = BPF_S_RET_A,
474 [BPF_ALU|BPF_DIV|BPF_K] = BPF_S_ALU_DIV_K,
475 [BPF_LD|BPF_MEM] = BPF_S_LD_MEM,
476 [BPF_LDX|BPF_MEM] = BPF_S_LDX_MEM,
477 [BPF_ST] = BPF_S_ST,
478 [BPF_STX] = BPF_S_STX,
479 [BPF_JMP|BPF_JA] = BPF_S_JMP_JA,
480 [BPF_JMP|BPF_JEQ|BPF_K] = BPF_S_JMP_JEQ_K,
481 [BPF_JMP|BPF_JEQ|BPF_X] = BPF_S_JMP_JEQ_X,
482 [BPF_JMP|BPF_JGE|BPF_K] = BPF_S_JMP_JGE_K,
483 [BPF_JMP|BPF_JGE|BPF_X] = BPF_S_JMP_JGE_X,
484 [BPF_JMP|BPF_JGT|BPF_K] = BPF_S_JMP_JGT_K,
485 [BPF_JMP|BPF_JGT|BPF_X] = BPF_S_JMP_JGT_X,
486 [BPF_JMP|BPF_JSET|BPF_K] = BPF_S_JMP_JSET_K,
487 [BPF_JMP|BPF_JSET|BPF_X] = BPF_S_JMP_JSET_X,
489 int pc;
491 if (flen == 0 || flen > BPF_MAXINSNS)
492 return -EINVAL;
494 /* check the filter code now */
495 for (pc = 0; pc < flen; pc++) {
496 struct sock_filter *ftest = &filter[pc];
497 u16 code = ftest->code;
499 if (code >= ARRAY_SIZE(codes))
500 return -EINVAL;
501 code = codes[code];
502 if (!code)
503 return -EINVAL;
504 /* Some instructions need special checks */
505 switch (code) {
506 case BPF_S_ALU_DIV_K:
507 /* check for division by zero */
508 if (ftest->k == 0)
509 return -EINVAL;
510 ftest->k = reciprocal_value(ftest->k);
511 break;
512 case BPF_S_LD_MEM:
513 case BPF_S_LDX_MEM:
514 case BPF_S_ST:
515 case BPF_S_STX:
516 /* check for invalid memory addresses */
517 if (ftest->k >= BPF_MEMWORDS)
518 return -EINVAL;
519 break;
520 case BPF_S_JMP_JA:
522 * Note, the large ftest->k might cause loops.
523 * Compare this with conditional jumps below,
524 * where offsets are limited. --ANK (981016)
526 if (ftest->k >= (unsigned)(flen-pc-1))
527 return -EINVAL;
528 break;
529 case BPF_S_JMP_JEQ_K:
530 case BPF_S_JMP_JEQ_X:
531 case BPF_S_JMP_JGE_K:
532 case BPF_S_JMP_JGE_X:
533 case BPF_S_JMP_JGT_K:
534 case BPF_S_JMP_JGT_X:
535 case BPF_S_JMP_JSET_X:
536 case BPF_S_JMP_JSET_K:
537 /* for conditionals both must be safe */
538 if (pc + ftest->jt + 1 >= flen ||
539 pc + ftest->jf + 1 >= flen)
540 return -EINVAL;
541 break;
542 case BPF_S_LD_W_ABS:
543 case BPF_S_LD_H_ABS:
544 case BPF_S_LD_B_ABS:
545 #define ANCILLARY(CODE) case SKF_AD_OFF + SKF_AD_##CODE: \
546 code = BPF_S_ANC_##CODE; \
547 break
548 switch (ftest->k) {
549 ANCILLARY(PROTOCOL);
550 ANCILLARY(PKTTYPE);
551 ANCILLARY(IFINDEX);
552 ANCILLARY(NLATTR);
553 ANCILLARY(NLATTR_NEST);
554 ANCILLARY(MARK);
555 ANCILLARY(QUEUE);
556 ANCILLARY(HATYPE);
557 ANCILLARY(RXHASH);
558 ANCILLARY(CPU);
561 ftest->code = code;
564 /* last instruction must be a RET code */
565 switch (filter[flen - 1].code) {
566 case BPF_S_RET_K:
567 case BPF_S_RET_A:
568 return check_load_and_stores(filter, flen);
570 return -EINVAL;
572 EXPORT_SYMBOL(sk_chk_filter);
575 * sk_filter_release_rcu - Release a socket filter by rcu_head
576 * @rcu: rcu_head that contains the sk_filter to free
578 void sk_filter_release_rcu(struct rcu_head *rcu)
580 struct sk_filter *fp = container_of(rcu, struct sk_filter, rcu);
582 bpf_jit_free(fp);
583 kfree(fp);
585 EXPORT_SYMBOL(sk_filter_release_rcu);
588 * sk_attach_filter - attach a socket filter
589 * @fprog: the filter program
590 * @sk: the socket to use
592 * Attach the user's filter code. We first run some sanity checks on
593 * it to make sure it does not explode on us later. If an error
594 * occurs or there is insufficient memory for the filter a negative
595 * errno code is returned. On success the return is zero.
597 int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
599 struct sk_filter *fp, *old_fp;
600 unsigned int fsize = sizeof(struct sock_filter) * fprog->len;
601 int err;
603 /* Make sure new filter is there and in the right amounts. */
604 if (fprog->filter == NULL)
605 return -EINVAL;
607 fp = sock_kmalloc(sk, fsize+sizeof(*fp), GFP_KERNEL);
608 if (!fp)
609 return -ENOMEM;
610 if (copy_from_user(fp->insns, fprog->filter, fsize)) {
611 sock_kfree_s(sk, fp, fsize+sizeof(*fp));
612 return -EFAULT;
615 atomic_set(&fp->refcnt, 1);
616 fp->len = fprog->len;
617 fp->bpf_func = sk_run_filter;
619 err = sk_chk_filter(fp->insns, fp->len);
620 if (err) {
621 sk_filter_uncharge(sk, fp);
622 return err;
625 bpf_jit_compile(fp);
627 old_fp = rcu_dereference_protected(sk->sk_filter,
628 sock_owned_by_user(sk));
629 rcu_assign_pointer(sk->sk_filter, fp);
631 if (old_fp)
632 sk_filter_uncharge(sk, old_fp);
633 return 0;
635 EXPORT_SYMBOL_GPL(sk_attach_filter);
637 int sk_detach_filter(struct sock *sk)
639 int ret = -ENOENT;
640 struct sk_filter *filter;
642 filter = rcu_dereference_protected(sk->sk_filter,
643 sock_owned_by_user(sk));
644 if (filter) {
645 rcu_assign_pointer(sk->sk_filter, NULL);
646 sk_filter_uncharge(sk, filter);
647 ret = 0;
649 return ret;
651 EXPORT_SYMBOL_GPL(sk_detach_filter);