bug.h: Move ratelimit warn interfaces to ratelimit.h
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / net / core / filter.c
blob1238cbd488fb411e46f2269946bf987bda6ddc11
1 /*
2 * Linux Socket Filter - Kernel level socket filtering
4 * Author:
5 * Jay Schulist <jschlst@samba.org>
7 * Based on the design of:
8 * - The Berkeley Packet Filter
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
15 * Andi Kleen - Fix a few bad bugs and races.
16 * Kris Katterjohn - Added many additional checks in sk_chk_filter()
19 #include <linux/module.h>
20 #include <linux/types.h>
21 #include <linux/mm.h>
22 #include <linux/fcntl.h>
23 #include <linux/socket.h>
24 #include <linux/in.h>
25 #include <linux/inet.h>
26 #include <linux/netdevice.h>
27 #include <linux/if_packet.h>
28 #include <linux/gfp.h>
29 #include <net/ip.h>
30 #include <net/protocol.h>
31 #include <net/netlink.h>
32 #include <linux/skbuff.h>
33 #include <net/sock.h>
34 #include <linux/errno.h>
35 #include <linux/timer.h>
36 #include <asm/system.h>
37 #include <asm/uaccess.h>
38 #include <asm/unaligned.h>
39 #include <linux/filter.h>
40 #include <linux/reciprocal_div.h>
41 #include <linux/ratelimit.h>
43 enum {
44 BPF_S_RET_K = 1,
45 BPF_S_RET_A,
46 BPF_S_ALU_ADD_K,
47 BPF_S_ALU_ADD_X,
48 BPF_S_ALU_SUB_K,
49 BPF_S_ALU_SUB_X,
50 BPF_S_ALU_MUL_K,
51 BPF_S_ALU_MUL_X,
52 BPF_S_ALU_DIV_X,
53 BPF_S_ALU_AND_K,
54 BPF_S_ALU_AND_X,
55 BPF_S_ALU_OR_K,
56 BPF_S_ALU_OR_X,
57 BPF_S_ALU_LSH_K,
58 BPF_S_ALU_LSH_X,
59 BPF_S_ALU_RSH_K,
60 BPF_S_ALU_RSH_X,
61 BPF_S_ALU_NEG,
62 BPF_S_LD_W_ABS,
63 BPF_S_LD_H_ABS,
64 BPF_S_LD_B_ABS,
65 BPF_S_LD_W_LEN,
66 BPF_S_LD_W_IND,
67 BPF_S_LD_H_IND,
68 BPF_S_LD_B_IND,
69 BPF_S_LD_IMM,
70 BPF_S_LDX_W_LEN,
71 BPF_S_LDX_B_MSH,
72 BPF_S_LDX_IMM,
73 BPF_S_MISC_TAX,
74 BPF_S_MISC_TXA,
75 BPF_S_ALU_DIV_K,
76 BPF_S_LD_MEM,
77 BPF_S_LDX_MEM,
78 BPF_S_ST,
79 BPF_S_STX,
80 BPF_S_JMP_JA,
81 BPF_S_JMP_JEQ_K,
82 BPF_S_JMP_JEQ_X,
83 BPF_S_JMP_JGE_K,
84 BPF_S_JMP_JGE_X,
85 BPF_S_JMP_JGT_K,
86 BPF_S_JMP_JGT_X,
87 BPF_S_JMP_JSET_K,
88 BPF_S_JMP_JSET_X,
89 /* Ancillary data */
90 BPF_S_ANC_PROTOCOL,
91 BPF_S_ANC_PKTTYPE,
92 BPF_S_ANC_IFINDEX,
93 BPF_S_ANC_NLATTR,
94 BPF_S_ANC_NLATTR_NEST,
95 BPF_S_ANC_MARK,
96 BPF_S_ANC_QUEUE,
97 BPF_S_ANC_HATYPE,
98 BPF_S_ANC_RXHASH,
99 BPF_S_ANC_CPU,
102 /* No hurry in this branch */
103 static void *__load_pointer(const struct sk_buff *skb, int k, unsigned int size)
105 u8 *ptr = NULL;
107 if (k >= SKF_NET_OFF)
108 ptr = skb_network_header(skb) + k - SKF_NET_OFF;
109 else if (k >= SKF_LL_OFF)
110 ptr = skb_mac_header(skb) + k - SKF_LL_OFF;
112 if (ptr >= skb->head && ptr + size <= skb_tail_pointer(skb))
113 return ptr;
114 return NULL;
117 static inline void *load_pointer(const struct sk_buff *skb, int k,
118 unsigned int size, void *buffer)
120 if (k >= 0)
121 return skb_header_pointer(skb, k, size, buffer);
122 return __load_pointer(skb, k, size);
126 * sk_filter - run a packet through a socket filter
127 * @sk: sock associated with &sk_buff
128 * @skb: buffer to filter
130 * Run the filter code and then cut skb->data to correct size returned by
131 * sk_run_filter. If pkt_len is 0 we toss packet. If skb->len is smaller
132 * than pkt_len we keep whole skb->data. This is the socket level
133 * wrapper to sk_run_filter. It returns 0 if the packet should
134 * be accepted or -EPERM if the packet should be tossed.
137 int sk_filter(struct sock *sk, struct sk_buff *skb)
139 int err;
140 struct sk_filter *filter;
142 err = security_sock_rcv_skb(sk, skb);
143 if (err)
144 return err;
146 rcu_read_lock();
147 filter = rcu_dereference(sk->sk_filter);
148 if (filter) {
149 unsigned int pkt_len = sk_run_filter(skb, filter->insns);
151 err = pkt_len ? pskb_trim(skb, pkt_len) : -EPERM;
153 rcu_read_unlock();
155 return err;
157 EXPORT_SYMBOL(sk_filter);
160 * sk_run_filter - run a filter on a socket
161 * @skb: buffer to run the filter on
162 * @fentry: filter to apply
164 * Decode and apply filter instructions to the skb->data.
165 * Return length to keep, 0 for none. @skb is the data we are
166 * filtering, @filter is the array of filter instructions.
167 * Because all jumps are guaranteed to be before last instruction,
168 * and last instruction guaranteed to be a RET, we dont need to check
169 * flen. (We used to pass to this function the length of filter)
171 unsigned int sk_run_filter(const struct sk_buff *skb,
172 const struct sock_filter *fentry)
174 void *ptr;
175 u32 A = 0; /* Accumulator */
176 u32 X = 0; /* Index Register */
177 u32 mem[BPF_MEMWORDS]; /* Scratch Memory Store */
178 u32 tmp;
179 int k;
182 * Process array of filter instructions.
184 for (;; fentry++) {
185 #if defined(CONFIG_X86_32)
186 #define K (fentry->k)
187 #else
188 const u32 K = fentry->k;
189 #endif
191 switch (fentry->code) {
192 case BPF_S_ALU_ADD_X:
193 A += X;
194 continue;
195 case BPF_S_ALU_ADD_K:
196 A += K;
197 continue;
198 case BPF_S_ALU_SUB_X:
199 A -= X;
200 continue;
201 case BPF_S_ALU_SUB_K:
202 A -= K;
203 continue;
204 case BPF_S_ALU_MUL_X:
205 A *= X;
206 continue;
207 case BPF_S_ALU_MUL_K:
208 A *= K;
209 continue;
210 case BPF_S_ALU_DIV_X:
211 if (X == 0)
212 return 0;
213 A /= X;
214 continue;
215 case BPF_S_ALU_DIV_K:
216 A = reciprocal_divide(A, K);
217 continue;
218 case BPF_S_ALU_AND_X:
219 A &= X;
220 continue;
221 case BPF_S_ALU_AND_K:
222 A &= K;
223 continue;
224 case BPF_S_ALU_OR_X:
225 A |= X;
226 continue;
227 case BPF_S_ALU_OR_K:
228 A |= K;
229 continue;
230 case BPF_S_ALU_LSH_X:
231 A <<= X;
232 continue;
233 case BPF_S_ALU_LSH_K:
234 A <<= K;
235 continue;
236 case BPF_S_ALU_RSH_X:
237 A >>= X;
238 continue;
239 case BPF_S_ALU_RSH_K:
240 A >>= K;
241 continue;
242 case BPF_S_ALU_NEG:
243 A = -A;
244 continue;
245 case BPF_S_JMP_JA:
246 fentry += K;
247 continue;
248 case BPF_S_JMP_JGT_K:
249 fentry += (A > K) ? fentry->jt : fentry->jf;
250 continue;
251 case BPF_S_JMP_JGE_K:
252 fentry += (A >= K) ? fentry->jt : fentry->jf;
253 continue;
254 case BPF_S_JMP_JEQ_K:
255 fentry += (A == K) ? fentry->jt : fentry->jf;
256 continue;
257 case BPF_S_JMP_JSET_K:
258 fentry += (A & K) ? fentry->jt : fentry->jf;
259 continue;
260 case BPF_S_JMP_JGT_X:
261 fentry += (A > X) ? fentry->jt : fentry->jf;
262 continue;
263 case BPF_S_JMP_JGE_X:
264 fentry += (A >= X) ? fentry->jt : fentry->jf;
265 continue;
266 case BPF_S_JMP_JEQ_X:
267 fentry += (A == X) ? fentry->jt : fentry->jf;
268 continue;
269 case BPF_S_JMP_JSET_X:
270 fentry += (A & X) ? fentry->jt : fentry->jf;
271 continue;
272 case BPF_S_LD_W_ABS:
273 k = K;
274 load_w:
275 ptr = load_pointer(skb, k, 4, &tmp);
276 if (ptr != NULL) {
277 A = get_unaligned_be32(ptr);
278 continue;
280 return 0;
281 case BPF_S_LD_H_ABS:
282 k = K;
283 load_h:
284 ptr = load_pointer(skb, k, 2, &tmp);
285 if (ptr != NULL) {
286 A = get_unaligned_be16(ptr);
287 continue;
289 return 0;
290 case BPF_S_LD_B_ABS:
291 k = K;
292 load_b:
293 ptr = load_pointer(skb, k, 1, &tmp);
294 if (ptr != NULL) {
295 A = *(u8 *)ptr;
296 continue;
298 return 0;
299 case BPF_S_LD_W_LEN:
300 A = skb->len;
301 continue;
302 case BPF_S_LDX_W_LEN:
303 X = skb->len;
304 continue;
305 case BPF_S_LD_W_IND:
306 k = X + K;
307 goto load_w;
308 case BPF_S_LD_H_IND:
309 k = X + K;
310 goto load_h;
311 case BPF_S_LD_B_IND:
312 k = X + K;
313 goto load_b;
314 case BPF_S_LDX_B_MSH:
315 ptr = load_pointer(skb, K, 1, &tmp);
316 if (ptr != NULL) {
317 X = (*(u8 *)ptr & 0xf) << 2;
318 continue;
320 return 0;
321 case BPF_S_LD_IMM:
322 A = K;
323 continue;
324 case BPF_S_LDX_IMM:
325 X = K;
326 continue;
327 case BPF_S_LD_MEM:
328 A = mem[K];
329 continue;
330 case BPF_S_LDX_MEM:
331 X = mem[K];
332 continue;
333 case BPF_S_MISC_TAX:
334 X = A;
335 continue;
336 case BPF_S_MISC_TXA:
337 A = X;
338 continue;
339 case BPF_S_RET_K:
340 return K;
341 case BPF_S_RET_A:
342 return A;
343 case BPF_S_ST:
344 mem[K] = A;
345 continue;
346 case BPF_S_STX:
347 mem[K] = X;
348 continue;
349 case BPF_S_ANC_PROTOCOL:
350 A = ntohs(skb->protocol);
351 continue;
352 case BPF_S_ANC_PKTTYPE:
353 A = skb->pkt_type;
354 continue;
355 case BPF_S_ANC_IFINDEX:
356 if (!skb->dev)
357 return 0;
358 A = skb->dev->ifindex;
359 continue;
360 case BPF_S_ANC_MARK:
361 A = skb->mark;
362 continue;
363 case BPF_S_ANC_QUEUE:
364 A = skb->queue_mapping;
365 continue;
366 case BPF_S_ANC_HATYPE:
367 if (!skb->dev)
368 return 0;
369 A = skb->dev->type;
370 continue;
371 case BPF_S_ANC_RXHASH:
372 A = skb->rxhash;
373 continue;
374 case BPF_S_ANC_CPU:
375 A = raw_smp_processor_id();
376 continue;
377 case BPF_S_ANC_NLATTR: {
378 struct nlattr *nla;
380 if (skb_is_nonlinear(skb))
381 return 0;
382 if (A > skb->len - sizeof(struct nlattr))
383 return 0;
385 nla = nla_find((struct nlattr *)&skb->data[A],
386 skb->len - A, X);
387 if (nla)
388 A = (void *)nla - (void *)skb->data;
389 else
390 A = 0;
391 continue;
393 case BPF_S_ANC_NLATTR_NEST: {
394 struct nlattr *nla;
396 if (skb_is_nonlinear(skb))
397 return 0;
398 if (A > skb->len - sizeof(struct nlattr))
399 return 0;
401 nla = (struct nlattr *)&skb->data[A];
402 if (nla->nla_len > A - skb->len)
403 return 0;
405 nla = nla_find_nested(nla, X);
406 if (nla)
407 A = (void *)nla - (void *)skb->data;
408 else
409 A = 0;
410 continue;
412 default:
413 WARN_RATELIMIT(1, "Unknown code:%u jt:%u tf:%u k:%u\n",
414 fentry->code, fentry->jt,
415 fentry->jf, fentry->k);
416 return 0;
420 return 0;
422 EXPORT_SYMBOL(sk_run_filter);
425 * Security :
426 * A BPF program is able to use 16 cells of memory to store intermediate
427 * values (check u32 mem[BPF_MEMWORDS] in sk_run_filter())
428 * As we dont want to clear mem[] array for each packet going through
429 * sk_run_filter(), we check that filter loaded by user never try to read
430 * a cell if not previously written, and we check all branches to be sure
431 * a malicious user doesn't try to abuse us.
433 static int check_load_and_stores(struct sock_filter *filter, int flen)
435 u16 *masks, memvalid = 0; /* one bit per cell, 16 cells */
436 int pc, ret = 0;
438 BUILD_BUG_ON(BPF_MEMWORDS > 16);
439 masks = kmalloc(flen * sizeof(*masks), GFP_KERNEL);
440 if (!masks)
441 return -ENOMEM;
442 memset(masks, 0xff, flen * sizeof(*masks));
444 for (pc = 0; pc < flen; pc++) {
445 memvalid &= masks[pc];
447 switch (filter[pc].code) {
448 case BPF_S_ST:
449 case BPF_S_STX:
450 memvalid |= (1 << filter[pc].k);
451 break;
452 case BPF_S_LD_MEM:
453 case BPF_S_LDX_MEM:
454 if (!(memvalid & (1 << filter[pc].k))) {
455 ret = -EINVAL;
456 goto error;
458 break;
459 case BPF_S_JMP_JA:
460 /* a jump must set masks on target */
461 masks[pc + 1 + filter[pc].k] &= memvalid;
462 memvalid = ~0;
463 break;
464 case BPF_S_JMP_JEQ_K:
465 case BPF_S_JMP_JEQ_X:
466 case BPF_S_JMP_JGE_K:
467 case BPF_S_JMP_JGE_X:
468 case BPF_S_JMP_JGT_K:
469 case BPF_S_JMP_JGT_X:
470 case BPF_S_JMP_JSET_X:
471 case BPF_S_JMP_JSET_K:
472 /* a jump must set masks on targets */
473 masks[pc + 1 + filter[pc].jt] &= memvalid;
474 masks[pc + 1 + filter[pc].jf] &= memvalid;
475 memvalid = ~0;
476 break;
479 error:
480 kfree(masks);
481 return ret;
485 * sk_chk_filter - verify socket filter code
486 * @filter: filter to verify
487 * @flen: length of filter
489 * Check the user's filter code. If we let some ugly
490 * filter code slip through kaboom! The filter must contain
491 * no references or jumps that are out of range, no illegal
492 * instructions, and must end with a RET instruction.
494 * All jumps are forward as they are not signed.
496 * Returns 0 if the rule set is legal or -EINVAL if not.
498 int sk_chk_filter(struct sock_filter *filter, int flen)
501 * Valid instructions are initialized to non-0.
502 * Invalid instructions are initialized to 0.
504 static const u8 codes[] = {
505 [BPF_ALU|BPF_ADD|BPF_K] = BPF_S_ALU_ADD_K,
506 [BPF_ALU|BPF_ADD|BPF_X] = BPF_S_ALU_ADD_X,
507 [BPF_ALU|BPF_SUB|BPF_K] = BPF_S_ALU_SUB_K,
508 [BPF_ALU|BPF_SUB|BPF_X] = BPF_S_ALU_SUB_X,
509 [BPF_ALU|BPF_MUL|BPF_K] = BPF_S_ALU_MUL_K,
510 [BPF_ALU|BPF_MUL|BPF_X] = BPF_S_ALU_MUL_X,
511 [BPF_ALU|BPF_DIV|BPF_X] = BPF_S_ALU_DIV_X,
512 [BPF_ALU|BPF_AND|BPF_K] = BPF_S_ALU_AND_K,
513 [BPF_ALU|BPF_AND|BPF_X] = BPF_S_ALU_AND_X,
514 [BPF_ALU|BPF_OR|BPF_K] = BPF_S_ALU_OR_K,
515 [BPF_ALU|BPF_OR|BPF_X] = BPF_S_ALU_OR_X,
516 [BPF_ALU|BPF_LSH|BPF_K] = BPF_S_ALU_LSH_K,
517 [BPF_ALU|BPF_LSH|BPF_X] = BPF_S_ALU_LSH_X,
518 [BPF_ALU|BPF_RSH|BPF_K] = BPF_S_ALU_RSH_K,
519 [BPF_ALU|BPF_RSH|BPF_X] = BPF_S_ALU_RSH_X,
520 [BPF_ALU|BPF_NEG] = BPF_S_ALU_NEG,
521 [BPF_LD|BPF_W|BPF_ABS] = BPF_S_LD_W_ABS,
522 [BPF_LD|BPF_H|BPF_ABS] = BPF_S_LD_H_ABS,
523 [BPF_LD|BPF_B|BPF_ABS] = BPF_S_LD_B_ABS,
524 [BPF_LD|BPF_W|BPF_LEN] = BPF_S_LD_W_LEN,
525 [BPF_LD|BPF_W|BPF_IND] = BPF_S_LD_W_IND,
526 [BPF_LD|BPF_H|BPF_IND] = BPF_S_LD_H_IND,
527 [BPF_LD|BPF_B|BPF_IND] = BPF_S_LD_B_IND,
528 [BPF_LD|BPF_IMM] = BPF_S_LD_IMM,
529 [BPF_LDX|BPF_W|BPF_LEN] = BPF_S_LDX_W_LEN,
530 [BPF_LDX|BPF_B|BPF_MSH] = BPF_S_LDX_B_MSH,
531 [BPF_LDX|BPF_IMM] = BPF_S_LDX_IMM,
532 [BPF_MISC|BPF_TAX] = BPF_S_MISC_TAX,
533 [BPF_MISC|BPF_TXA] = BPF_S_MISC_TXA,
534 [BPF_RET|BPF_K] = BPF_S_RET_K,
535 [BPF_RET|BPF_A] = BPF_S_RET_A,
536 [BPF_ALU|BPF_DIV|BPF_K] = BPF_S_ALU_DIV_K,
537 [BPF_LD|BPF_MEM] = BPF_S_LD_MEM,
538 [BPF_LDX|BPF_MEM] = BPF_S_LDX_MEM,
539 [BPF_ST] = BPF_S_ST,
540 [BPF_STX] = BPF_S_STX,
541 [BPF_JMP|BPF_JA] = BPF_S_JMP_JA,
542 [BPF_JMP|BPF_JEQ|BPF_K] = BPF_S_JMP_JEQ_K,
543 [BPF_JMP|BPF_JEQ|BPF_X] = BPF_S_JMP_JEQ_X,
544 [BPF_JMP|BPF_JGE|BPF_K] = BPF_S_JMP_JGE_K,
545 [BPF_JMP|BPF_JGE|BPF_X] = BPF_S_JMP_JGE_X,
546 [BPF_JMP|BPF_JGT|BPF_K] = BPF_S_JMP_JGT_K,
547 [BPF_JMP|BPF_JGT|BPF_X] = BPF_S_JMP_JGT_X,
548 [BPF_JMP|BPF_JSET|BPF_K] = BPF_S_JMP_JSET_K,
549 [BPF_JMP|BPF_JSET|BPF_X] = BPF_S_JMP_JSET_X,
551 int pc;
553 if (flen == 0 || flen > BPF_MAXINSNS)
554 return -EINVAL;
556 /* check the filter code now */
557 for (pc = 0; pc < flen; pc++) {
558 struct sock_filter *ftest = &filter[pc];
559 u16 code = ftest->code;
561 if (code >= ARRAY_SIZE(codes))
562 return -EINVAL;
563 code = codes[code];
564 if (!code)
565 return -EINVAL;
566 /* Some instructions need special checks */
567 switch (code) {
568 case BPF_S_ALU_DIV_K:
569 /* check for division by zero */
570 if (ftest->k == 0)
571 return -EINVAL;
572 ftest->k = reciprocal_value(ftest->k);
573 break;
574 case BPF_S_LD_MEM:
575 case BPF_S_LDX_MEM:
576 case BPF_S_ST:
577 case BPF_S_STX:
578 /* check for invalid memory addresses */
579 if (ftest->k >= BPF_MEMWORDS)
580 return -EINVAL;
581 break;
582 case BPF_S_JMP_JA:
584 * Note, the large ftest->k might cause loops.
585 * Compare this with conditional jumps below,
586 * where offsets are limited. --ANK (981016)
588 if (ftest->k >= (unsigned)(flen-pc-1))
589 return -EINVAL;
590 break;
591 case BPF_S_JMP_JEQ_K:
592 case BPF_S_JMP_JEQ_X:
593 case BPF_S_JMP_JGE_K:
594 case BPF_S_JMP_JGE_X:
595 case BPF_S_JMP_JGT_K:
596 case BPF_S_JMP_JGT_X:
597 case BPF_S_JMP_JSET_X:
598 case BPF_S_JMP_JSET_K:
599 /* for conditionals both must be safe */
600 if (pc + ftest->jt + 1 >= flen ||
601 pc + ftest->jf + 1 >= flen)
602 return -EINVAL;
603 break;
604 case BPF_S_LD_W_ABS:
605 case BPF_S_LD_H_ABS:
606 case BPF_S_LD_B_ABS:
607 #define ANCILLARY(CODE) case SKF_AD_OFF + SKF_AD_##CODE: \
608 code = BPF_S_ANC_##CODE; \
609 break
610 switch (ftest->k) {
611 ANCILLARY(PROTOCOL);
612 ANCILLARY(PKTTYPE);
613 ANCILLARY(IFINDEX);
614 ANCILLARY(NLATTR);
615 ANCILLARY(NLATTR_NEST);
616 ANCILLARY(MARK);
617 ANCILLARY(QUEUE);
618 ANCILLARY(HATYPE);
619 ANCILLARY(RXHASH);
620 ANCILLARY(CPU);
623 ftest->code = code;
626 /* last instruction must be a RET code */
627 switch (filter[flen - 1].code) {
628 case BPF_S_RET_K:
629 case BPF_S_RET_A:
630 return check_load_and_stores(filter, flen);
632 return -EINVAL;
634 EXPORT_SYMBOL(sk_chk_filter);
637 * sk_filter_release_rcu - Release a socket filter by rcu_head
638 * @rcu: rcu_head that contains the sk_filter to free
640 void sk_filter_release_rcu(struct rcu_head *rcu)
642 struct sk_filter *fp = container_of(rcu, struct sk_filter, rcu);
644 kfree(fp);
646 EXPORT_SYMBOL(sk_filter_release_rcu);
649 * sk_attach_filter - attach a socket filter
650 * @fprog: the filter program
651 * @sk: the socket to use
653 * Attach the user's filter code. We first run some sanity checks on
654 * it to make sure it does not explode on us later. If an error
655 * occurs or there is insufficient memory for the filter a negative
656 * errno code is returned. On success the return is zero.
658 int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
660 struct sk_filter *fp, *old_fp;
661 unsigned int fsize = sizeof(struct sock_filter) * fprog->len;
662 int err;
664 /* Make sure new filter is there and in the right amounts. */
665 if (fprog->filter == NULL)
666 return -EINVAL;
668 fp = sock_kmalloc(sk, fsize+sizeof(*fp), GFP_KERNEL);
669 if (!fp)
670 return -ENOMEM;
671 if (copy_from_user(fp->insns, fprog->filter, fsize)) {
672 sock_kfree_s(sk, fp, fsize+sizeof(*fp));
673 return -EFAULT;
676 atomic_set(&fp->refcnt, 1);
677 fp->len = fprog->len;
679 err = sk_chk_filter(fp->insns, fp->len);
680 if (err) {
681 sk_filter_uncharge(sk, fp);
682 return err;
685 old_fp = rcu_dereference_protected(sk->sk_filter,
686 sock_owned_by_user(sk));
687 rcu_assign_pointer(sk->sk_filter, fp);
689 if (old_fp)
690 sk_filter_uncharge(sk, old_fp);
691 return 0;
693 EXPORT_SYMBOL_GPL(sk_attach_filter);
695 int sk_detach_filter(struct sock *sk)
697 int ret = -ENOENT;
698 struct sk_filter *filter;
700 filter = rcu_dereference_protected(sk->sk_filter,
701 sock_owned_by_user(sk));
702 if (filter) {
703 rcu_assign_pointer(sk->sk_filter, NULL);
704 sk_filter_uncharge(sk, filter);
705 ret = 0;
707 return ret;
709 EXPORT_SYMBOL_GPL(sk_detach_filter);