[SECURITY] secmark: nul-terminate secdata
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / net / ipv4 / inet_diag.c
blob492858e6faf0140fee625d8f966093c57c141773
1 /*
2 * inet_diag.c Module for monitoring INET transport protocols sockets.
4 * Version: $Id: inet_diag.c,v 1.3 2002/02/01 22:01:04 davem Exp $
6 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
14 #include <linux/module.h>
15 #include <linux/types.h>
16 #include <linux/fcntl.h>
17 #include <linux/random.h>
18 #include <linux/cache.h>
19 #include <linux/init.h>
20 #include <linux/time.h>
22 #include <net/icmp.h>
23 #include <net/tcp.h>
24 #include <net/ipv6.h>
25 #include <net/inet_common.h>
26 #include <net/inet_connection_sock.h>
27 #include <net/inet_hashtables.h>
28 #include <net/inet_timewait_sock.h>
29 #include <net/inet6_hashtables.h>
31 #include <linux/inet.h>
32 #include <linux/stddef.h>
34 #include <linux/inet_diag.h>
36 static const struct inet_diag_handler **inet_diag_table;
38 struct inet_diag_entry {
39 u32 *saddr;
40 u32 *daddr;
41 u16 sport;
42 u16 dport;
43 u16 family;
44 u16 userlocks;
47 static struct sock *idiagnl;
49 #define INET_DIAG_PUT(skb, attrtype, attrlen) \
50 RTA_DATA(__RTA_PUT(skb, attrtype, attrlen))
52 static int inet_csk_diag_fill(struct sock *sk,
53 struct sk_buff *skb,
54 int ext, u32 pid, u32 seq, u16 nlmsg_flags,
55 const struct nlmsghdr *unlh)
57 const struct inet_sock *inet = inet_sk(sk);
58 const struct inet_connection_sock *icsk = inet_csk(sk);
59 struct inet_diag_msg *r;
60 struct nlmsghdr *nlh;
61 void *info = NULL;
62 struct inet_diag_meminfo *minfo = NULL;
63 unsigned char *b = skb->tail;
64 const struct inet_diag_handler *handler;
66 handler = inet_diag_table[unlh->nlmsg_type];
67 BUG_ON(handler == NULL);
69 nlh = NLMSG_PUT(skb, pid, seq, unlh->nlmsg_type, sizeof(*r));
70 nlh->nlmsg_flags = nlmsg_flags;
72 r = NLMSG_DATA(nlh);
73 BUG_ON(sk->sk_state == TCP_TIME_WAIT);
75 if (ext & (1 << (INET_DIAG_MEMINFO - 1)))
76 minfo = INET_DIAG_PUT(skb, INET_DIAG_MEMINFO, sizeof(*minfo));
78 if (ext & (1 << (INET_DIAG_INFO - 1)))
79 info = INET_DIAG_PUT(skb, INET_DIAG_INFO,
80 handler->idiag_info_size);
82 if ((ext & (1 << (INET_DIAG_CONG - 1))) && icsk->icsk_ca_ops) {
83 const size_t len = strlen(icsk->icsk_ca_ops->name);
85 strcpy(INET_DIAG_PUT(skb, INET_DIAG_CONG, len + 1),
86 icsk->icsk_ca_ops->name);
89 r->idiag_family = sk->sk_family;
90 r->idiag_state = sk->sk_state;
91 r->idiag_timer = 0;
92 r->idiag_retrans = 0;
94 r->id.idiag_if = sk->sk_bound_dev_if;
95 r->id.idiag_cookie[0] = (u32)(unsigned long)sk;
96 r->id.idiag_cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1);
98 r->id.idiag_sport = inet->sport;
99 r->id.idiag_dport = inet->dport;
100 r->id.idiag_src[0] = inet->rcv_saddr;
101 r->id.idiag_dst[0] = inet->daddr;
103 #if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
104 if (r->idiag_family == AF_INET6) {
105 struct ipv6_pinfo *np = inet6_sk(sk);
107 ipv6_addr_copy((struct in6_addr *)r->id.idiag_src,
108 &np->rcv_saddr);
109 ipv6_addr_copy((struct in6_addr *)r->id.idiag_dst,
110 &np->daddr);
112 #endif
114 #define EXPIRES_IN_MS(tmo) ((tmo - jiffies) * 1000 + HZ - 1) / HZ
116 if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
117 r->idiag_timer = 1;
118 r->idiag_retrans = icsk->icsk_retransmits;
119 r->idiag_expires = EXPIRES_IN_MS(icsk->icsk_timeout);
120 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
121 r->idiag_timer = 4;
122 r->idiag_retrans = icsk->icsk_probes_out;
123 r->idiag_expires = EXPIRES_IN_MS(icsk->icsk_timeout);
124 } else if (timer_pending(&sk->sk_timer)) {
125 r->idiag_timer = 2;
126 r->idiag_retrans = icsk->icsk_probes_out;
127 r->idiag_expires = EXPIRES_IN_MS(sk->sk_timer.expires);
128 } else {
129 r->idiag_timer = 0;
130 r->idiag_expires = 0;
132 #undef EXPIRES_IN_MS
134 r->idiag_uid = sock_i_uid(sk);
135 r->idiag_inode = sock_i_ino(sk);
137 if (minfo) {
138 minfo->idiag_rmem = atomic_read(&sk->sk_rmem_alloc);
139 minfo->idiag_wmem = sk->sk_wmem_queued;
140 minfo->idiag_fmem = sk->sk_forward_alloc;
141 minfo->idiag_tmem = atomic_read(&sk->sk_wmem_alloc);
144 handler->idiag_get_info(sk, r, info);
146 if (sk->sk_state < TCP_TIME_WAIT &&
147 icsk->icsk_ca_ops && icsk->icsk_ca_ops->get_info)
148 icsk->icsk_ca_ops->get_info(sk, ext, skb);
150 nlh->nlmsg_len = skb->tail - b;
151 return skb->len;
153 rtattr_failure:
154 nlmsg_failure:
155 skb_trim(skb, b - skb->data);
156 return -1;
159 static int inet_twsk_diag_fill(struct inet_timewait_sock *tw,
160 struct sk_buff *skb, int ext, u32 pid,
161 u32 seq, u16 nlmsg_flags,
162 const struct nlmsghdr *unlh)
164 long tmo;
165 struct inet_diag_msg *r;
166 const unsigned char *previous_tail = skb->tail;
167 struct nlmsghdr *nlh = NLMSG_PUT(skb, pid, seq,
168 unlh->nlmsg_type, sizeof(*r));
170 r = NLMSG_DATA(nlh);
171 BUG_ON(tw->tw_state != TCP_TIME_WAIT);
173 nlh->nlmsg_flags = nlmsg_flags;
175 tmo = tw->tw_ttd - jiffies;
176 if (tmo < 0)
177 tmo = 0;
179 r->idiag_family = tw->tw_family;
180 r->idiag_state = tw->tw_state;
181 r->idiag_timer = 0;
182 r->idiag_retrans = 0;
183 r->id.idiag_if = tw->tw_bound_dev_if;
184 r->id.idiag_cookie[0] = (u32)(unsigned long)tw;
185 r->id.idiag_cookie[1] = (u32)(((unsigned long)tw >> 31) >> 1);
186 r->id.idiag_sport = tw->tw_sport;
187 r->id.idiag_dport = tw->tw_dport;
188 r->id.idiag_src[0] = tw->tw_rcv_saddr;
189 r->id.idiag_dst[0] = tw->tw_daddr;
190 r->idiag_state = tw->tw_substate;
191 r->idiag_timer = 3;
192 r->idiag_expires = (tmo * 1000 + HZ - 1) / HZ;
193 r->idiag_rqueue = 0;
194 r->idiag_wqueue = 0;
195 r->idiag_uid = 0;
196 r->idiag_inode = 0;
197 #if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
198 if (tw->tw_family == AF_INET6) {
199 const struct inet6_timewait_sock *tw6 =
200 inet6_twsk((struct sock *)tw);
202 ipv6_addr_copy((struct in6_addr *)r->id.idiag_src,
203 &tw6->tw_v6_rcv_saddr);
204 ipv6_addr_copy((struct in6_addr *)r->id.idiag_dst,
205 &tw6->tw_v6_daddr);
207 #endif
208 nlh->nlmsg_len = skb->tail - previous_tail;
209 return skb->len;
210 nlmsg_failure:
211 skb_trim(skb, previous_tail - skb->data);
212 return -1;
215 static int sk_diag_fill(struct sock *sk, struct sk_buff *skb,
216 int ext, u32 pid, u32 seq, u16 nlmsg_flags,
217 const struct nlmsghdr *unlh)
219 if (sk->sk_state == TCP_TIME_WAIT)
220 return inet_twsk_diag_fill((struct inet_timewait_sock *)sk,
221 skb, ext, pid, seq, nlmsg_flags,
222 unlh);
223 return inet_csk_diag_fill(sk, skb, ext, pid, seq, nlmsg_flags, unlh);
226 static int inet_diag_get_exact(struct sk_buff *in_skb,
227 const struct nlmsghdr *nlh)
229 int err;
230 struct sock *sk;
231 struct inet_diag_req *req = NLMSG_DATA(nlh);
232 struct sk_buff *rep;
233 struct inet_hashinfo *hashinfo;
234 const struct inet_diag_handler *handler;
236 handler = inet_diag_table[nlh->nlmsg_type];
237 BUG_ON(handler == NULL);
238 hashinfo = handler->idiag_hashinfo;
240 if (req->idiag_family == AF_INET) {
241 sk = inet_lookup(hashinfo, req->id.idiag_dst[0],
242 req->id.idiag_dport, req->id.idiag_src[0],
243 req->id.idiag_sport, req->id.idiag_if);
245 #if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
246 else if (req->idiag_family == AF_INET6) {
247 sk = inet6_lookup(hashinfo,
248 (struct in6_addr *)req->id.idiag_dst,
249 req->id.idiag_dport,
250 (struct in6_addr *)req->id.idiag_src,
251 req->id.idiag_sport,
252 req->id.idiag_if);
254 #endif
255 else {
256 return -EINVAL;
259 if (sk == NULL)
260 return -ENOENT;
262 err = -ESTALE;
263 if ((req->id.idiag_cookie[0] != INET_DIAG_NOCOOKIE ||
264 req->id.idiag_cookie[1] != INET_DIAG_NOCOOKIE) &&
265 ((u32)(unsigned long)sk != req->id.idiag_cookie[0] ||
266 (u32)((((unsigned long)sk) >> 31) >> 1) != req->id.idiag_cookie[1]))
267 goto out;
269 err = -ENOMEM;
270 rep = alloc_skb(NLMSG_SPACE((sizeof(struct inet_diag_msg) +
271 sizeof(struct inet_diag_meminfo) +
272 handler->idiag_info_size + 64)),
273 GFP_KERNEL);
274 if (!rep)
275 goto out;
277 if (sk_diag_fill(sk, rep, req->idiag_ext,
278 NETLINK_CB(in_skb).pid,
279 nlh->nlmsg_seq, 0, nlh) <= 0)
280 BUG();
282 err = netlink_unicast(idiagnl, rep, NETLINK_CB(in_skb).pid,
283 MSG_DONTWAIT);
284 if (err > 0)
285 err = 0;
287 out:
288 if (sk) {
289 if (sk->sk_state == TCP_TIME_WAIT)
290 inet_twsk_put((struct inet_timewait_sock *)sk);
291 else
292 sock_put(sk);
294 return err;
297 static int bitstring_match(const u32 *a1, const u32 *a2, int bits)
299 int words = bits >> 5;
301 bits &= 0x1f;
303 if (words) {
304 if (memcmp(a1, a2, words << 2))
305 return 0;
307 if (bits) {
308 __u32 w1, w2;
309 __u32 mask;
311 w1 = a1[words];
312 w2 = a2[words];
314 mask = htonl((0xffffffff) << (32 - bits));
316 if ((w1 ^ w2) & mask)
317 return 0;
320 return 1;
324 static int inet_diag_bc_run(const void *bc, int len,
325 const struct inet_diag_entry *entry)
327 while (len > 0) {
328 int yes = 1;
329 const struct inet_diag_bc_op *op = bc;
331 switch (op->code) {
332 case INET_DIAG_BC_NOP:
333 break;
334 case INET_DIAG_BC_JMP:
335 yes = 0;
336 break;
337 case INET_DIAG_BC_S_GE:
338 yes = entry->sport >= op[1].no;
339 break;
340 case INET_DIAG_BC_S_LE:
341 yes = entry->dport <= op[1].no;
342 break;
343 case INET_DIAG_BC_D_GE:
344 yes = entry->dport >= op[1].no;
345 break;
346 case INET_DIAG_BC_D_LE:
347 yes = entry->dport <= op[1].no;
348 break;
349 case INET_DIAG_BC_AUTO:
350 yes = !(entry->userlocks & SOCK_BINDPORT_LOCK);
351 break;
352 case INET_DIAG_BC_S_COND:
353 case INET_DIAG_BC_D_COND: {
354 struct inet_diag_hostcond *cond;
355 u32 *addr;
357 cond = (struct inet_diag_hostcond *)(op + 1);
358 if (cond->port != -1 &&
359 cond->port != (op->code == INET_DIAG_BC_S_COND ?
360 entry->sport : entry->dport)) {
361 yes = 0;
362 break;
365 if (cond->prefix_len == 0)
366 break;
368 if (op->code == INET_DIAG_BC_S_COND)
369 addr = entry->saddr;
370 else
371 addr = entry->daddr;
373 if (bitstring_match(addr, cond->addr,
374 cond->prefix_len))
375 break;
376 if (entry->family == AF_INET6 &&
377 cond->family == AF_INET) {
378 if (addr[0] == 0 && addr[1] == 0 &&
379 addr[2] == htonl(0xffff) &&
380 bitstring_match(addr + 3, cond->addr,
381 cond->prefix_len))
382 break;
384 yes = 0;
385 break;
389 if (yes) {
390 len -= op->yes;
391 bc += op->yes;
392 } else {
393 len -= op->no;
394 bc += op->no;
397 return (len == 0);
400 static int valid_cc(const void *bc, int len, int cc)
402 while (len >= 0) {
403 const struct inet_diag_bc_op *op = bc;
405 if (cc > len)
406 return 0;
407 if (cc == len)
408 return 1;
409 if (op->yes < 4)
410 return 0;
411 len -= op->yes;
412 bc += op->yes;
414 return 0;
417 static int inet_diag_bc_audit(const void *bytecode, int bytecode_len)
419 const unsigned char *bc = bytecode;
420 int len = bytecode_len;
422 while (len > 0) {
423 struct inet_diag_bc_op *op = (struct inet_diag_bc_op *)bc;
425 //printk("BC: %d %d %d {%d} / %d\n", op->code, op->yes, op->no, op[1].no, len);
426 switch (op->code) {
427 case INET_DIAG_BC_AUTO:
428 case INET_DIAG_BC_S_COND:
429 case INET_DIAG_BC_D_COND:
430 case INET_DIAG_BC_S_GE:
431 case INET_DIAG_BC_S_LE:
432 case INET_DIAG_BC_D_GE:
433 case INET_DIAG_BC_D_LE:
434 if (op->yes < 4 || op->yes > len + 4)
435 return -EINVAL;
436 case INET_DIAG_BC_JMP:
437 if (op->no < 4 || op->no > len + 4)
438 return -EINVAL;
439 if (op->no < len &&
440 !valid_cc(bytecode, bytecode_len, len - op->no))
441 return -EINVAL;
442 break;
443 case INET_DIAG_BC_NOP:
444 if (op->yes < 4 || op->yes > len + 4)
445 return -EINVAL;
446 break;
447 default:
448 return -EINVAL;
450 bc += op->yes;
451 len -= op->yes;
453 return len == 0 ? 0 : -EINVAL;
456 static int inet_csk_diag_dump(struct sock *sk,
457 struct sk_buff *skb,
458 struct netlink_callback *cb)
460 struct inet_diag_req *r = NLMSG_DATA(cb->nlh);
462 if (cb->nlh->nlmsg_len > 4 + NLMSG_SPACE(sizeof(*r))) {
463 struct inet_diag_entry entry;
464 struct rtattr *bc = (struct rtattr *)(r + 1);
465 struct inet_sock *inet = inet_sk(sk);
467 entry.family = sk->sk_family;
468 #if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
469 if (entry.family == AF_INET6) {
470 struct ipv6_pinfo *np = inet6_sk(sk);
472 entry.saddr = np->rcv_saddr.s6_addr32;
473 entry.daddr = np->daddr.s6_addr32;
474 } else
475 #endif
477 entry.saddr = &inet->rcv_saddr;
478 entry.daddr = &inet->daddr;
480 entry.sport = inet->num;
481 entry.dport = ntohs(inet->dport);
482 entry.userlocks = sk->sk_userlocks;
484 if (!inet_diag_bc_run(RTA_DATA(bc), RTA_PAYLOAD(bc), &entry))
485 return 0;
488 return inet_csk_diag_fill(sk, skb, r->idiag_ext,
489 NETLINK_CB(cb->skb).pid,
490 cb->nlh->nlmsg_seq, NLM_F_MULTI, cb->nlh);
493 static int inet_twsk_diag_dump(struct inet_timewait_sock *tw,
494 struct sk_buff *skb,
495 struct netlink_callback *cb)
497 struct inet_diag_req *r = NLMSG_DATA(cb->nlh);
499 if (cb->nlh->nlmsg_len > 4 + NLMSG_SPACE(sizeof(*r))) {
500 struct inet_diag_entry entry;
501 struct rtattr *bc = (struct rtattr *)(r + 1);
503 entry.family = tw->tw_family;
504 #if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
505 if (tw->tw_family == AF_INET6) {
506 struct inet6_timewait_sock *tw6 =
507 inet6_twsk((struct sock *)tw);
508 entry.saddr = tw6->tw_v6_rcv_saddr.s6_addr32;
509 entry.daddr = tw6->tw_v6_daddr.s6_addr32;
510 } else
511 #endif
513 entry.saddr = &tw->tw_rcv_saddr;
514 entry.daddr = &tw->tw_daddr;
516 entry.sport = tw->tw_num;
517 entry.dport = ntohs(tw->tw_dport);
518 entry.userlocks = 0;
520 if (!inet_diag_bc_run(RTA_DATA(bc), RTA_PAYLOAD(bc), &entry))
521 return 0;
524 return inet_twsk_diag_fill(tw, skb, r->idiag_ext,
525 NETLINK_CB(cb->skb).pid,
526 cb->nlh->nlmsg_seq, NLM_F_MULTI, cb->nlh);
529 static int inet_diag_fill_req(struct sk_buff *skb, struct sock *sk,
530 struct request_sock *req, u32 pid, u32 seq,
531 const struct nlmsghdr *unlh)
533 const struct inet_request_sock *ireq = inet_rsk(req);
534 struct inet_sock *inet = inet_sk(sk);
535 unsigned char *b = skb->tail;
536 struct inet_diag_msg *r;
537 struct nlmsghdr *nlh;
538 long tmo;
540 nlh = NLMSG_PUT(skb, pid, seq, unlh->nlmsg_type, sizeof(*r));
541 nlh->nlmsg_flags = NLM_F_MULTI;
542 r = NLMSG_DATA(nlh);
544 r->idiag_family = sk->sk_family;
545 r->idiag_state = TCP_SYN_RECV;
546 r->idiag_timer = 1;
547 r->idiag_retrans = req->retrans;
549 r->id.idiag_if = sk->sk_bound_dev_if;
550 r->id.idiag_cookie[0] = (u32)(unsigned long)req;
551 r->id.idiag_cookie[1] = (u32)(((unsigned long)req >> 31) >> 1);
553 tmo = req->expires - jiffies;
554 if (tmo < 0)
555 tmo = 0;
557 r->id.idiag_sport = inet->sport;
558 r->id.idiag_dport = ireq->rmt_port;
559 r->id.idiag_src[0] = ireq->loc_addr;
560 r->id.idiag_dst[0] = ireq->rmt_addr;
561 r->idiag_expires = jiffies_to_msecs(tmo);
562 r->idiag_rqueue = 0;
563 r->idiag_wqueue = 0;
564 r->idiag_uid = sock_i_uid(sk);
565 r->idiag_inode = 0;
566 #if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
567 if (r->idiag_family == AF_INET6) {
568 ipv6_addr_copy((struct in6_addr *)r->id.idiag_src,
569 &inet6_rsk(req)->loc_addr);
570 ipv6_addr_copy((struct in6_addr *)r->id.idiag_dst,
571 &inet6_rsk(req)->rmt_addr);
573 #endif
574 nlh->nlmsg_len = skb->tail - b;
576 return skb->len;
578 nlmsg_failure:
579 skb_trim(skb, b - skb->data);
580 return -1;
583 static int inet_diag_dump_reqs(struct sk_buff *skb, struct sock *sk,
584 struct netlink_callback *cb)
586 struct inet_diag_entry entry;
587 struct inet_diag_req *r = NLMSG_DATA(cb->nlh);
588 struct inet_connection_sock *icsk = inet_csk(sk);
589 struct listen_sock *lopt;
590 struct rtattr *bc = NULL;
591 struct inet_sock *inet = inet_sk(sk);
592 int j, s_j;
593 int reqnum, s_reqnum;
594 int err = 0;
596 s_j = cb->args[3];
597 s_reqnum = cb->args[4];
599 if (s_j > 0)
600 s_j--;
602 entry.family = sk->sk_family;
604 read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
606 lopt = icsk->icsk_accept_queue.listen_opt;
607 if (!lopt || !lopt->qlen)
608 goto out;
610 if (cb->nlh->nlmsg_len > 4 + NLMSG_SPACE(sizeof(*r))) {
611 bc = (struct rtattr *)(r + 1);
612 entry.sport = inet->num;
613 entry.userlocks = sk->sk_userlocks;
616 for (j = s_j; j < lopt->nr_table_entries; j++) {
617 struct request_sock *req, *head = lopt->syn_table[j];
619 reqnum = 0;
620 for (req = head; req; reqnum++, req = req->dl_next) {
621 struct inet_request_sock *ireq = inet_rsk(req);
623 if (reqnum < s_reqnum)
624 continue;
625 if (r->id.idiag_dport != ireq->rmt_port &&
626 r->id.idiag_dport)
627 continue;
629 if (bc) {
630 entry.saddr =
631 #if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
632 (entry.family == AF_INET6) ?
633 inet6_rsk(req)->loc_addr.s6_addr32 :
634 #endif
635 &ireq->loc_addr;
636 entry.daddr =
637 #if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
638 (entry.family == AF_INET6) ?
639 inet6_rsk(req)->rmt_addr.s6_addr32 :
640 #endif
641 &ireq->rmt_addr;
642 entry.dport = ntohs(ireq->rmt_port);
644 if (!inet_diag_bc_run(RTA_DATA(bc),
645 RTA_PAYLOAD(bc), &entry))
646 continue;
649 err = inet_diag_fill_req(skb, sk, req,
650 NETLINK_CB(cb->skb).pid,
651 cb->nlh->nlmsg_seq, cb->nlh);
652 if (err < 0) {
653 cb->args[3] = j + 1;
654 cb->args[4] = reqnum;
655 goto out;
659 s_reqnum = 0;
662 out:
663 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
665 return err;
668 static int inet_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
670 int i, num;
671 int s_i, s_num;
672 struct inet_diag_req *r = NLMSG_DATA(cb->nlh);
673 const struct inet_diag_handler *handler;
674 struct inet_hashinfo *hashinfo;
676 handler = inet_diag_table[cb->nlh->nlmsg_type];
677 BUG_ON(handler == NULL);
678 hashinfo = handler->idiag_hashinfo;
680 s_i = cb->args[1];
681 s_num = num = cb->args[2];
683 if (cb->args[0] == 0) {
684 if (!(r->idiag_states & (TCPF_LISTEN | TCPF_SYN_RECV)))
685 goto skip_listen_ht;
687 inet_listen_lock(hashinfo);
688 for (i = s_i; i < INET_LHTABLE_SIZE; i++) {
689 struct sock *sk;
690 struct hlist_node *node;
692 num = 0;
693 sk_for_each(sk, node, &hashinfo->listening_hash[i]) {
694 struct inet_sock *inet = inet_sk(sk);
696 if (num < s_num) {
697 num++;
698 continue;
701 if (r->id.idiag_sport != inet->sport &&
702 r->id.idiag_sport)
703 goto next_listen;
705 if (!(r->idiag_states & TCPF_LISTEN) ||
706 r->id.idiag_dport ||
707 cb->args[3] > 0)
708 goto syn_recv;
710 if (inet_csk_diag_dump(sk, skb, cb) < 0) {
711 inet_listen_unlock(hashinfo);
712 goto done;
715 syn_recv:
716 if (!(r->idiag_states & TCPF_SYN_RECV))
717 goto next_listen;
719 if (inet_diag_dump_reqs(skb, sk, cb) < 0) {
720 inet_listen_unlock(hashinfo);
721 goto done;
724 next_listen:
725 cb->args[3] = 0;
726 cb->args[4] = 0;
727 ++num;
730 s_num = 0;
731 cb->args[3] = 0;
732 cb->args[4] = 0;
734 inet_listen_unlock(hashinfo);
735 skip_listen_ht:
736 cb->args[0] = 1;
737 s_i = num = s_num = 0;
740 if (!(r->idiag_states & ~(TCPF_LISTEN | TCPF_SYN_RECV)))
741 return skb->len;
743 for (i = s_i; i < hashinfo->ehash_size; i++) {
744 struct inet_ehash_bucket *head = &hashinfo->ehash[i];
745 struct sock *sk;
746 struct hlist_node *node;
748 if (i > s_i)
749 s_num = 0;
751 read_lock_bh(&head->lock);
752 num = 0;
753 sk_for_each(sk, node, &head->chain) {
754 struct inet_sock *inet = inet_sk(sk);
756 if (num < s_num)
757 goto next_normal;
758 if (!(r->idiag_states & (1 << sk->sk_state)))
759 goto next_normal;
760 if (r->id.idiag_sport != inet->sport &&
761 r->id.idiag_sport)
762 goto next_normal;
763 if (r->id.idiag_dport != inet->dport &&
764 r->id.idiag_dport)
765 goto next_normal;
766 if (inet_csk_diag_dump(sk, skb, cb) < 0) {
767 read_unlock_bh(&head->lock);
768 goto done;
770 next_normal:
771 ++num;
774 if (r->idiag_states & TCPF_TIME_WAIT) {
775 struct inet_timewait_sock *tw;
777 inet_twsk_for_each(tw, node,
778 &hashinfo->ehash[i + hashinfo->ehash_size].chain) {
780 if (num < s_num)
781 goto next_dying;
782 if (r->id.idiag_sport != tw->tw_sport &&
783 r->id.idiag_sport)
784 goto next_dying;
785 if (r->id.idiag_dport != tw->tw_dport &&
786 r->id.idiag_dport)
787 goto next_dying;
788 if (inet_twsk_diag_dump(tw, skb, cb) < 0) {
789 read_unlock_bh(&head->lock);
790 goto done;
792 next_dying:
793 ++num;
796 read_unlock_bh(&head->lock);
799 done:
800 cb->args[1] = i;
801 cb->args[2] = num;
802 return skb->len;
805 static inline int inet_diag_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
807 if (!(nlh->nlmsg_flags&NLM_F_REQUEST))
808 return 0;
810 if (nlh->nlmsg_type >= INET_DIAG_GETSOCK_MAX)
811 goto err_inval;
813 if (inet_diag_table[nlh->nlmsg_type] == NULL)
814 return -ENOENT;
816 if (NLMSG_LENGTH(sizeof(struct inet_diag_req)) > skb->len)
817 goto err_inval;
819 if (nlh->nlmsg_flags&NLM_F_DUMP) {
820 if (nlh->nlmsg_len >
821 (4 + NLMSG_SPACE(sizeof(struct inet_diag_req)))) {
822 struct rtattr *rta = (void *)(NLMSG_DATA(nlh) +
823 sizeof(struct inet_diag_req));
824 if (rta->rta_type != INET_DIAG_REQ_BYTECODE ||
825 rta->rta_len < 8 ||
826 rta->rta_len >
827 (nlh->nlmsg_len -
828 NLMSG_SPACE(sizeof(struct inet_diag_req))))
829 goto err_inval;
830 if (inet_diag_bc_audit(RTA_DATA(rta), RTA_PAYLOAD(rta)))
831 goto err_inval;
833 return netlink_dump_start(idiagnl, skb, nlh,
834 inet_diag_dump, NULL);
835 } else
836 return inet_diag_get_exact(skb, nlh);
838 err_inval:
839 return -EINVAL;
843 static inline void inet_diag_rcv_skb(struct sk_buff *skb)
845 if (skb->len >= NLMSG_SPACE(0)) {
846 int err;
847 struct nlmsghdr *nlh = (struct nlmsghdr *)skb->data;
849 if (nlh->nlmsg_len < sizeof(*nlh) ||
850 skb->len < nlh->nlmsg_len)
851 return;
852 err = inet_diag_rcv_msg(skb, nlh);
853 if (err || nlh->nlmsg_flags & NLM_F_ACK)
854 netlink_ack(skb, nlh, err);
858 static void inet_diag_rcv(struct sock *sk, int len)
860 struct sk_buff *skb;
861 unsigned int qlen = skb_queue_len(&sk->sk_receive_queue);
863 while (qlen-- && (skb = skb_dequeue(&sk->sk_receive_queue))) {
864 inet_diag_rcv_skb(skb);
865 kfree_skb(skb);
869 static DEFINE_SPINLOCK(inet_diag_register_lock);
871 int inet_diag_register(const struct inet_diag_handler *h)
873 const __u16 type = h->idiag_type;
874 int err = -EINVAL;
876 if (type >= INET_DIAG_GETSOCK_MAX)
877 goto out;
879 spin_lock(&inet_diag_register_lock);
880 err = -EEXIST;
881 if (inet_diag_table[type] == NULL) {
882 inet_diag_table[type] = h;
883 err = 0;
885 spin_unlock(&inet_diag_register_lock);
886 out:
887 return err;
889 EXPORT_SYMBOL_GPL(inet_diag_register);
891 void inet_diag_unregister(const struct inet_diag_handler *h)
893 const __u16 type = h->idiag_type;
895 if (type >= INET_DIAG_GETSOCK_MAX)
896 return;
898 spin_lock(&inet_diag_register_lock);
899 inet_diag_table[type] = NULL;
900 spin_unlock(&inet_diag_register_lock);
902 synchronize_rcu();
904 EXPORT_SYMBOL_GPL(inet_diag_unregister);
906 static int __init inet_diag_init(void)
908 const int inet_diag_table_size = (INET_DIAG_GETSOCK_MAX *
909 sizeof(struct inet_diag_handler *));
910 int err = -ENOMEM;
912 inet_diag_table = kzalloc(inet_diag_table_size, GFP_KERNEL);
913 if (!inet_diag_table)
914 goto out;
916 idiagnl = netlink_kernel_create(NETLINK_INET_DIAG, 0, inet_diag_rcv,
917 THIS_MODULE);
918 if (idiagnl == NULL)
919 goto out_free_table;
920 err = 0;
921 out:
922 return err;
923 out_free_table:
924 kfree(inet_diag_table);
925 goto out;
928 static void __exit inet_diag_exit(void)
930 sock_release(idiagnl->sk_socket);
931 kfree(inet_diag_table);
934 module_init(inet_diag_init);
935 module_exit(inet_diag_exit);
936 MODULE_LICENSE("GPL");