inet: restore gso for vxlan
[linux-2.6/btrfs-unstable.git] / net / ipv4 / inet_diag.c
blob56a964a553d2c739a03b880acd7158a4c9714b66
1 /*
2 * inet_diag.c Module for monitoring INET transport protocols sockets.
4 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
12 #include <linux/kernel.h>
13 #include <linux/module.h>
14 #include <linux/types.h>
15 #include <linux/fcntl.h>
16 #include <linux/random.h>
17 #include <linux/slab.h>
18 #include <linux/cache.h>
19 #include <linux/init.h>
20 #include <linux/time.h>
22 #include <net/icmp.h>
23 #include <net/tcp.h>
24 #include <net/ipv6.h>
25 #include <net/inet_common.h>
26 #include <net/inet_connection_sock.h>
27 #include <net/inet_hashtables.h>
28 #include <net/inet_timewait_sock.h>
29 #include <net/inet6_hashtables.h>
30 #include <net/netlink.h>
32 #include <linux/inet.h>
33 #include <linux/stddef.h>
35 #include <linux/inet_diag.h>
36 #include <linux/sock_diag.h>
38 static const struct inet_diag_handler **inet_diag_table;
40 struct inet_diag_entry {
41 __be32 *saddr;
42 __be32 *daddr;
43 u16 sport;
44 u16 dport;
45 u16 family;
46 u16 userlocks;
47 #if IS_ENABLED(CONFIG_IPV6)
48 struct in6_addr saddr_storage; /* for IPv4-mapped-IPv6 addresses */
49 struct in6_addr daddr_storage; /* for IPv4-mapped-IPv6 addresses */
50 #endif
53 static DEFINE_MUTEX(inet_diag_table_mutex);
55 static const struct inet_diag_handler *inet_diag_lock_handler(int proto)
57 if (!inet_diag_table[proto])
58 request_module("net-pf-%d-proto-%d-type-%d-%d", PF_NETLINK,
59 NETLINK_SOCK_DIAG, AF_INET, proto);
61 mutex_lock(&inet_diag_table_mutex);
62 if (!inet_diag_table[proto])
63 return ERR_PTR(-ENOENT);
65 return inet_diag_table[proto];
68 static inline void inet_diag_unlock_handler(
69 const struct inet_diag_handler *handler)
71 mutex_unlock(&inet_diag_table_mutex);
74 int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
75 struct sk_buff *skb, struct inet_diag_req_v2 *req,
76 struct user_namespace *user_ns,
77 u32 portid, u32 seq, u16 nlmsg_flags,
78 const struct nlmsghdr *unlh)
80 const struct inet_sock *inet = inet_sk(sk);
81 struct inet_diag_msg *r;
82 struct nlmsghdr *nlh;
83 struct nlattr *attr;
84 void *info = NULL;
85 const struct inet_diag_handler *handler;
86 int ext = req->idiag_ext;
88 handler = inet_diag_table[req->sdiag_protocol];
89 BUG_ON(handler == NULL);
91 nlh = nlmsg_put(skb, portid, seq, unlh->nlmsg_type, sizeof(*r),
92 nlmsg_flags);
93 if (!nlh)
94 return -EMSGSIZE;
96 r = nlmsg_data(nlh);
97 BUG_ON(sk->sk_state == TCP_TIME_WAIT);
99 r->idiag_family = sk->sk_family;
100 r->idiag_state = sk->sk_state;
101 r->idiag_timer = 0;
102 r->idiag_retrans = 0;
104 r->id.idiag_if = sk->sk_bound_dev_if;
105 sock_diag_save_cookie(sk, r->id.idiag_cookie);
107 r->id.idiag_sport = inet->inet_sport;
108 r->id.idiag_dport = inet->inet_dport;
109 r->id.idiag_src[0] = inet->inet_rcv_saddr;
110 r->id.idiag_dst[0] = inet->inet_daddr;
112 if (nla_put_u8(skb, INET_DIAG_SHUTDOWN, sk->sk_shutdown))
113 goto errout;
115 /* IPv6 dual-stack sockets use inet->tos for IPv4 connections,
116 * hence this needs to be included regardless of socket family.
118 if (ext & (1 << (INET_DIAG_TOS - 1)))
119 if (nla_put_u8(skb, INET_DIAG_TOS, inet->tos) < 0)
120 goto errout;
122 #if IS_ENABLED(CONFIG_IPV6)
123 if (r->idiag_family == AF_INET6) {
125 *(struct in6_addr *)r->id.idiag_src = sk->sk_v6_rcv_saddr;
126 *(struct in6_addr *)r->id.idiag_dst = sk->sk_v6_daddr;
128 if (ext & (1 << (INET_DIAG_TCLASS - 1)))
129 if (nla_put_u8(skb, INET_DIAG_TCLASS,
130 inet6_sk(sk)->tclass) < 0)
131 goto errout;
133 #endif
135 r->idiag_uid = from_kuid_munged(user_ns, sock_i_uid(sk));
136 r->idiag_inode = sock_i_ino(sk);
138 if (ext & (1 << (INET_DIAG_MEMINFO - 1))) {
139 struct inet_diag_meminfo minfo = {
140 .idiag_rmem = sk_rmem_alloc_get(sk),
141 .idiag_wmem = sk->sk_wmem_queued,
142 .idiag_fmem = sk->sk_forward_alloc,
143 .idiag_tmem = sk_wmem_alloc_get(sk),
146 if (nla_put(skb, INET_DIAG_MEMINFO, sizeof(minfo), &minfo) < 0)
147 goto errout;
150 if (ext & (1 << (INET_DIAG_SKMEMINFO - 1)))
151 if (sock_diag_put_meminfo(sk, skb, INET_DIAG_SKMEMINFO))
152 goto errout;
154 if (icsk == NULL) {
155 handler->idiag_get_info(sk, r, NULL);
156 goto out;
159 #define EXPIRES_IN_MS(tmo) DIV_ROUND_UP((tmo - jiffies) * 1000, HZ)
161 if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
162 icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS ||
163 icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
164 r->idiag_timer = 1;
165 r->idiag_retrans = icsk->icsk_retransmits;
166 r->idiag_expires = EXPIRES_IN_MS(icsk->icsk_timeout);
167 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
168 r->idiag_timer = 4;
169 r->idiag_retrans = icsk->icsk_probes_out;
170 r->idiag_expires = EXPIRES_IN_MS(icsk->icsk_timeout);
171 } else if (timer_pending(&sk->sk_timer)) {
172 r->idiag_timer = 2;
173 r->idiag_retrans = icsk->icsk_probes_out;
174 r->idiag_expires = EXPIRES_IN_MS(sk->sk_timer.expires);
175 } else {
176 r->idiag_timer = 0;
177 r->idiag_expires = 0;
179 #undef EXPIRES_IN_MS
181 if (ext & (1 << (INET_DIAG_INFO - 1))) {
182 attr = nla_reserve(skb, INET_DIAG_INFO,
183 sizeof(struct tcp_info));
184 if (!attr)
185 goto errout;
187 info = nla_data(attr);
190 if ((ext & (1 << (INET_DIAG_CONG - 1))) && icsk->icsk_ca_ops)
191 if (nla_put_string(skb, INET_DIAG_CONG,
192 icsk->icsk_ca_ops->name) < 0)
193 goto errout;
195 handler->idiag_get_info(sk, r, info);
197 if (sk->sk_state < TCP_TIME_WAIT &&
198 icsk->icsk_ca_ops && icsk->icsk_ca_ops->get_info)
199 icsk->icsk_ca_ops->get_info(sk, ext, skb);
201 out:
202 return nlmsg_end(skb, nlh);
204 errout:
205 nlmsg_cancel(skb, nlh);
206 return -EMSGSIZE;
208 EXPORT_SYMBOL_GPL(inet_sk_diag_fill);
210 static int inet_csk_diag_fill(struct sock *sk,
211 struct sk_buff *skb, struct inet_diag_req_v2 *req,
212 struct user_namespace *user_ns,
213 u32 portid, u32 seq, u16 nlmsg_flags,
214 const struct nlmsghdr *unlh)
216 return inet_sk_diag_fill(sk, inet_csk(sk),
217 skb, req, user_ns, portid, seq, nlmsg_flags, unlh);
220 static int inet_twsk_diag_fill(struct inet_timewait_sock *tw,
221 struct sk_buff *skb, struct inet_diag_req_v2 *req,
222 u32 portid, u32 seq, u16 nlmsg_flags,
223 const struct nlmsghdr *unlh)
225 s32 tmo;
226 struct inet_diag_msg *r;
227 struct nlmsghdr *nlh;
229 nlh = nlmsg_put(skb, portid, seq, unlh->nlmsg_type, sizeof(*r),
230 nlmsg_flags);
231 if (!nlh)
232 return -EMSGSIZE;
234 r = nlmsg_data(nlh);
235 BUG_ON(tw->tw_state != TCP_TIME_WAIT);
237 tmo = tw->tw_ttd - inet_tw_time_stamp();
238 if (tmo < 0)
239 tmo = 0;
241 r->idiag_family = tw->tw_family;
242 r->idiag_retrans = 0;
243 r->id.idiag_if = tw->tw_bound_dev_if;
244 sock_diag_save_cookie(tw, r->id.idiag_cookie);
245 r->id.idiag_sport = tw->tw_sport;
246 r->id.idiag_dport = tw->tw_dport;
247 r->id.idiag_src[0] = tw->tw_rcv_saddr;
248 r->id.idiag_dst[0] = tw->tw_daddr;
249 r->idiag_state = tw->tw_substate;
250 r->idiag_timer = 3;
251 r->idiag_expires = jiffies_to_msecs(tmo);
252 r->idiag_rqueue = 0;
253 r->idiag_wqueue = 0;
254 r->idiag_uid = 0;
255 r->idiag_inode = 0;
256 #if IS_ENABLED(CONFIG_IPV6)
257 if (tw->tw_family == AF_INET6) {
258 *(struct in6_addr *)r->id.idiag_src = tw->tw_v6_rcv_saddr;
259 *(struct in6_addr *)r->id.idiag_dst = tw->tw_v6_daddr;
261 #endif
263 return nlmsg_end(skb, nlh);
266 static int sk_diag_fill(struct sock *sk, struct sk_buff *skb,
267 struct inet_diag_req_v2 *r,
268 struct user_namespace *user_ns,
269 u32 portid, u32 seq, u16 nlmsg_flags,
270 const struct nlmsghdr *unlh)
272 if (sk->sk_state == TCP_TIME_WAIT)
273 return inet_twsk_diag_fill(inet_twsk(sk), skb, r, portid, seq,
274 nlmsg_flags, unlh);
276 return inet_csk_diag_fill(sk, skb, r, user_ns, portid, seq,
277 nlmsg_flags, unlh);
280 int inet_diag_dump_one_icsk(struct inet_hashinfo *hashinfo, struct sk_buff *in_skb,
281 const struct nlmsghdr *nlh, struct inet_diag_req_v2 *req)
283 int err;
284 struct sock *sk;
285 struct sk_buff *rep;
286 struct net *net = sock_net(in_skb->sk);
288 err = -EINVAL;
289 if (req->sdiag_family == AF_INET) {
290 sk = inet_lookup(net, hashinfo, req->id.idiag_dst[0],
291 req->id.idiag_dport, req->id.idiag_src[0],
292 req->id.idiag_sport, req->id.idiag_if);
294 #if IS_ENABLED(CONFIG_IPV6)
295 else if (req->sdiag_family == AF_INET6) {
296 sk = inet6_lookup(net, hashinfo,
297 (struct in6_addr *)req->id.idiag_dst,
298 req->id.idiag_dport,
299 (struct in6_addr *)req->id.idiag_src,
300 req->id.idiag_sport,
301 req->id.idiag_if);
303 #endif
304 else {
305 goto out_nosk;
308 err = -ENOENT;
309 if (sk == NULL)
310 goto out_nosk;
312 err = sock_diag_check_cookie(sk, req->id.idiag_cookie);
313 if (err)
314 goto out;
316 rep = nlmsg_new(sizeof(struct inet_diag_msg) +
317 sizeof(struct inet_diag_meminfo) +
318 sizeof(struct tcp_info) + 64, GFP_KERNEL);
319 if (!rep) {
320 err = -ENOMEM;
321 goto out;
324 err = sk_diag_fill(sk, rep, req,
325 sk_user_ns(NETLINK_CB(in_skb).sk),
326 NETLINK_CB(in_skb).portid,
327 nlh->nlmsg_seq, 0, nlh);
328 if (err < 0) {
329 WARN_ON(err == -EMSGSIZE);
330 nlmsg_free(rep);
331 goto out;
333 err = netlink_unicast(net->diag_nlsk, rep, NETLINK_CB(in_skb).portid,
334 MSG_DONTWAIT);
335 if (err > 0)
336 err = 0;
338 out:
339 if (sk)
340 sock_gen_put(sk);
342 out_nosk:
343 return err;
345 EXPORT_SYMBOL_GPL(inet_diag_dump_one_icsk);
347 static int inet_diag_get_exact(struct sk_buff *in_skb,
348 const struct nlmsghdr *nlh,
349 struct inet_diag_req_v2 *req)
351 const struct inet_diag_handler *handler;
352 int err;
354 handler = inet_diag_lock_handler(req->sdiag_protocol);
355 if (IS_ERR(handler))
356 err = PTR_ERR(handler);
357 else
358 err = handler->dump_one(in_skb, nlh, req);
359 inet_diag_unlock_handler(handler);
361 return err;
364 static int bitstring_match(const __be32 *a1, const __be32 *a2, int bits)
366 int words = bits >> 5;
368 bits &= 0x1f;
370 if (words) {
371 if (memcmp(a1, a2, words << 2))
372 return 0;
374 if (bits) {
375 __be32 w1, w2;
376 __be32 mask;
378 w1 = a1[words];
379 w2 = a2[words];
381 mask = htonl((0xffffffff) << (32 - bits));
383 if ((w1 ^ w2) & mask)
384 return 0;
387 return 1;
391 static int inet_diag_bc_run(const struct nlattr *_bc,
392 const struct inet_diag_entry *entry)
394 const void *bc = nla_data(_bc);
395 int len = nla_len(_bc);
397 while (len > 0) {
398 int yes = 1;
399 const struct inet_diag_bc_op *op = bc;
401 switch (op->code) {
402 case INET_DIAG_BC_NOP:
403 break;
404 case INET_DIAG_BC_JMP:
405 yes = 0;
406 break;
407 case INET_DIAG_BC_S_GE:
408 yes = entry->sport >= op[1].no;
409 break;
410 case INET_DIAG_BC_S_LE:
411 yes = entry->sport <= op[1].no;
412 break;
413 case INET_DIAG_BC_D_GE:
414 yes = entry->dport >= op[1].no;
415 break;
416 case INET_DIAG_BC_D_LE:
417 yes = entry->dport <= op[1].no;
418 break;
419 case INET_DIAG_BC_AUTO:
420 yes = !(entry->userlocks & SOCK_BINDPORT_LOCK);
421 break;
422 case INET_DIAG_BC_S_COND:
423 case INET_DIAG_BC_D_COND: {
424 struct inet_diag_hostcond *cond;
425 __be32 *addr;
427 cond = (struct inet_diag_hostcond *)(op + 1);
428 if (cond->port != -1 &&
429 cond->port != (op->code == INET_DIAG_BC_S_COND ?
430 entry->sport : entry->dport)) {
431 yes = 0;
432 break;
435 if (op->code == INET_DIAG_BC_S_COND)
436 addr = entry->saddr;
437 else
438 addr = entry->daddr;
440 if (cond->family != AF_UNSPEC &&
441 cond->family != entry->family) {
442 if (entry->family == AF_INET6 &&
443 cond->family == AF_INET) {
444 if (addr[0] == 0 && addr[1] == 0 &&
445 addr[2] == htonl(0xffff) &&
446 bitstring_match(addr + 3,
447 cond->addr,
448 cond->prefix_len))
449 break;
451 yes = 0;
452 break;
455 if (cond->prefix_len == 0)
456 break;
457 if (bitstring_match(addr, cond->addr,
458 cond->prefix_len))
459 break;
460 yes = 0;
461 break;
465 if (yes) {
466 len -= op->yes;
467 bc += op->yes;
468 } else {
469 len -= op->no;
470 bc += op->no;
473 return len == 0;
476 int inet_diag_bc_sk(const struct nlattr *bc, struct sock *sk)
478 struct inet_diag_entry entry;
479 struct inet_sock *inet = inet_sk(sk);
481 if (bc == NULL)
482 return 1;
484 entry.family = sk->sk_family;
485 #if IS_ENABLED(CONFIG_IPV6)
486 if (entry.family == AF_INET6) {
488 entry.saddr = sk->sk_v6_rcv_saddr.s6_addr32;
489 entry.daddr = sk->sk_v6_daddr.s6_addr32;
490 } else
491 #endif
493 entry.saddr = &inet->inet_rcv_saddr;
494 entry.daddr = &inet->inet_daddr;
496 entry.sport = inet->inet_num;
497 entry.dport = ntohs(inet->inet_dport);
498 entry.userlocks = sk->sk_userlocks;
500 return inet_diag_bc_run(bc, &entry);
502 EXPORT_SYMBOL_GPL(inet_diag_bc_sk);
504 static int valid_cc(const void *bc, int len, int cc)
506 while (len >= 0) {
507 const struct inet_diag_bc_op *op = bc;
509 if (cc > len)
510 return 0;
511 if (cc == len)
512 return 1;
513 if (op->yes < 4 || op->yes & 3)
514 return 0;
515 len -= op->yes;
516 bc += op->yes;
518 return 0;
521 /* Validate an inet_diag_hostcond. */
522 static bool valid_hostcond(const struct inet_diag_bc_op *op, int len,
523 int *min_len)
525 int addr_len;
526 struct inet_diag_hostcond *cond;
528 /* Check hostcond space. */
529 *min_len += sizeof(struct inet_diag_hostcond);
530 if (len < *min_len)
531 return false;
532 cond = (struct inet_diag_hostcond *)(op + 1);
534 /* Check address family and address length. */
535 switch (cond->family) {
536 case AF_UNSPEC:
537 addr_len = 0;
538 break;
539 case AF_INET:
540 addr_len = sizeof(struct in_addr);
541 break;
542 case AF_INET6:
543 addr_len = sizeof(struct in6_addr);
544 break;
545 default:
546 return false;
548 *min_len += addr_len;
549 if (len < *min_len)
550 return false;
552 /* Check prefix length (in bits) vs address length (in bytes). */
553 if (cond->prefix_len > 8 * addr_len)
554 return false;
556 return true;
559 /* Validate a port comparison operator. */
560 static inline bool valid_port_comparison(const struct inet_diag_bc_op *op,
561 int len, int *min_len)
563 /* Port comparisons put the port in a follow-on inet_diag_bc_op. */
564 *min_len += sizeof(struct inet_diag_bc_op);
565 if (len < *min_len)
566 return false;
567 return true;
570 static int inet_diag_bc_audit(const void *bytecode, int bytecode_len)
572 const void *bc = bytecode;
573 int len = bytecode_len;
575 while (len > 0) {
576 const struct inet_diag_bc_op *op = bc;
577 int min_len = sizeof(struct inet_diag_bc_op);
579 //printk("BC: %d %d %d {%d} / %d\n", op->code, op->yes, op->no, op[1].no, len);
580 switch (op->code) {
581 case INET_DIAG_BC_S_COND:
582 case INET_DIAG_BC_D_COND:
583 if (!valid_hostcond(bc, len, &min_len))
584 return -EINVAL;
585 break;
586 case INET_DIAG_BC_S_GE:
587 case INET_DIAG_BC_S_LE:
588 case INET_DIAG_BC_D_GE:
589 case INET_DIAG_BC_D_LE:
590 if (!valid_port_comparison(bc, len, &min_len))
591 return -EINVAL;
592 break;
593 case INET_DIAG_BC_AUTO:
594 case INET_DIAG_BC_JMP:
595 case INET_DIAG_BC_NOP:
596 break;
597 default:
598 return -EINVAL;
601 if (op->code != INET_DIAG_BC_NOP) {
602 if (op->no < min_len || op->no > len + 4 || op->no & 3)
603 return -EINVAL;
604 if (op->no < len &&
605 !valid_cc(bytecode, bytecode_len, len - op->no))
606 return -EINVAL;
609 if (op->yes < min_len || op->yes > len + 4 || op->yes & 3)
610 return -EINVAL;
611 bc += op->yes;
612 len -= op->yes;
614 return len == 0 ? 0 : -EINVAL;
617 static int inet_csk_diag_dump(struct sock *sk,
618 struct sk_buff *skb,
619 struct netlink_callback *cb,
620 struct inet_diag_req_v2 *r,
621 const struct nlattr *bc)
623 if (!inet_diag_bc_sk(bc, sk))
624 return 0;
626 return inet_csk_diag_fill(sk, skb, r,
627 sk_user_ns(NETLINK_CB(cb->skb).sk),
628 NETLINK_CB(cb->skb).portid,
629 cb->nlh->nlmsg_seq, NLM_F_MULTI, cb->nlh);
632 static int inet_twsk_diag_dump(struct sock *sk,
633 struct sk_buff *skb,
634 struct netlink_callback *cb,
635 struct inet_diag_req_v2 *r,
636 const struct nlattr *bc)
638 struct inet_timewait_sock *tw = inet_twsk(sk);
640 if (bc != NULL) {
641 struct inet_diag_entry entry;
643 entry.family = tw->tw_family;
644 #if IS_ENABLED(CONFIG_IPV6)
645 if (tw->tw_family == AF_INET6) {
646 entry.saddr = tw->tw_v6_rcv_saddr.s6_addr32;
647 entry.daddr = tw->tw_v6_daddr.s6_addr32;
648 } else
649 #endif
651 entry.saddr = &tw->tw_rcv_saddr;
652 entry.daddr = &tw->tw_daddr;
654 entry.sport = tw->tw_num;
655 entry.dport = ntohs(tw->tw_dport);
656 entry.userlocks = 0;
658 if (!inet_diag_bc_run(bc, &entry))
659 return 0;
662 return inet_twsk_diag_fill(tw, skb, r,
663 NETLINK_CB(cb->skb).portid,
664 cb->nlh->nlmsg_seq, NLM_F_MULTI, cb->nlh);
667 /* Get the IPv4, IPv6, or IPv4-mapped-IPv6 local and remote addresses
668 * from a request_sock. For IPv4-mapped-IPv6 we must map IPv4 to IPv6.
670 static inline void inet_diag_req_addrs(const struct sock *sk,
671 const struct request_sock *req,
672 struct inet_diag_entry *entry)
674 struct inet_request_sock *ireq = inet_rsk(req);
676 #if IS_ENABLED(CONFIG_IPV6)
677 if (sk->sk_family == AF_INET6) {
678 if (req->rsk_ops->family == AF_INET6) {
679 entry->saddr = ireq->ir_v6_loc_addr.s6_addr32;
680 entry->daddr = ireq->ir_v6_rmt_addr.s6_addr32;
681 } else if (req->rsk_ops->family == AF_INET) {
682 ipv6_addr_set_v4mapped(ireq->ir_loc_addr,
683 &entry->saddr_storage);
684 ipv6_addr_set_v4mapped(ireq->ir_rmt_addr,
685 &entry->daddr_storage);
686 entry->saddr = entry->saddr_storage.s6_addr32;
687 entry->daddr = entry->daddr_storage.s6_addr32;
689 } else
690 #endif
692 entry->saddr = &ireq->ir_loc_addr;
693 entry->daddr = &ireq->ir_rmt_addr;
697 static int inet_diag_fill_req(struct sk_buff *skb, struct sock *sk,
698 struct request_sock *req,
699 struct user_namespace *user_ns,
700 u32 portid, u32 seq,
701 const struct nlmsghdr *unlh)
703 const struct inet_request_sock *ireq = inet_rsk(req);
704 struct inet_sock *inet = inet_sk(sk);
705 struct inet_diag_msg *r;
706 struct nlmsghdr *nlh;
707 long tmo;
709 nlh = nlmsg_put(skb, portid, seq, unlh->nlmsg_type, sizeof(*r),
710 NLM_F_MULTI);
711 if (!nlh)
712 return -EMSGSIZE;
714 r = nlmsg_data(nlh);
715 r->idiag_family = sk->sk_family;
716 r->idiag_state = TCP_SYN_RECV;
717 r->idiag_timer = 1;
718 r->idiag_retrans = req->num_retrans;
720 r->id.idiag_if = sk->sk_bound_dev_if;
721 sock_diag_save_cookie(req, r->id.idiag_cookie);
723 tmo = req->expires - jiffies;
724 if (tmo < 0)
725 tmo = 0;
727 r->id.idiag_sport = inet->inet_sport;
728 r->id.idiag_dport = ireq->ir_rmt_port;
729 r->id.idiag_src[0] = ireq->ir_loc_addr;
730 r->id.idiag_dst[0] = ireq->ir_rmt_addr;
731 r->idiag_expires = jiffies_to_msecs(tmo);
732 r->idiag_rqueue = 0;
733 r->idiag_wqueue = 0;
734 r->idiag_uid = from_kuid_munged(user_ns, sock_i_uid(sk));
735 r->idiag_inode = 0;
736 #if IS_ENABLED(CONFIG_IPV6)
737 if (r->idiag_family == AF_INET6) {
738 struct inet_diag_entry entry;
739 inet_diag_req_addrs(sk, req, &entry);
740 memcpy(r->id.idiag_src, entry.saddr, sizeof(struct in6_addr));
741 memcpy(r->id.idiag_dst, entry.daddr, sizeof(struct in6_addr));
743 #endif
745 return nlmsg_end(skb, nlh);
748 static int inet_diag_dump_reqs(struct sk_buff *skb, struct sock *sk,
749 struct netlink_callback *cb,
750 struct inet_diag_req_v2 *r,
751 const struct nlattr *bc)
753 struct inet_diag_entry entry;
754 struct inet_connection_sock *icsk = inet_csk(sk);
755 struct listen_sock *lopt;
756 struct inet_sock *inet = inet_sk(sk);
757 int j, s_j;
758 int reqnum, s_reqnum;
759 int err = 0;
761 s_j = cb->args[3];
762 s_reqnum = cb->args[4];
764 if (s_j > 0)
765 s_j--;
767 entry.family = sk->sk_family;
769 read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
771 lopt = icsk->icsk_accept_queue.listen_opt;
772 if (!lopt || !lopt->qlen)
773 goto out;
775 if (bc != NULL) {
776 entry.sport = inet->inet_num;
777 entry.userlocks = sk->sk_userlocks;
780 for (j = s_j; j < lopt->nr_table_entries; j++) {
781 struct request_sock *req, *head = lopt->syn_table[j];
783 reqnum = 0;
784 for (req = head; req; reqnum++, req = req->dl_next) {
785 struct inet_request_sock *ireq = inet_rsk(req);
787 if (reqnum < s_reqnum)
788 continue;
789 if (r->id.idiag_dport != ireq->ir_rmt_port &&
790 r->id.idiag_dport)
791 continue;
793 if (bc) {
794 inet_diag_req_addrs(sk, req, &entry);
795 entry.dport = ntohs(ireq->ir_rmt_port);
797 if (!inet_diag_bc_run(bc, &entry))
798 continue;
801 err = inet_diag_fill_req(skb, sk, req,
802 sk_user_ns(NETLINK_CB(cb->skb).sk),
803 NETLINK_CB(cb->skb).portid,
804 cb->nlh->nlmsg_seq, cb->nlh);
805 if (err < 0) {
806 cb->args[3] = j + 1;
807 cb->args[4] = reqnum;
808 goto out;
812 s_reqnum = 0;
815 out:
816 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
818 return err;
821 void inet_diag_dump_icsk(struct inet_hashinfo *hashinfo, struct sk_buff *skb,
822 struct netlink_callback *cb, struct inet_diag_req_v2 *r, struct nlattr *bc)
824 int i, num;
825 int s_i, s_num;
826 struct net *net = sock_net(skb->sk);
828 s_i = cb->args[1];
829 s_num = num = cb->args[2];
831 if (cb->args[0] == 0) {
832 if (!(r->idiag_states & (TCPF_LISTEN | TCPF_SYN_RECV)))
833 goto skip_listen_ht;
835 for (i = s_i; i < INET_LHTABLE_SIZE; i++) {
836 struct sock *sk;
837 struct hlist_nulls_node *node;
838 struct inet_listen_hashbucket *ilb;
840 num = 0;
841 ilb = &hashinfo->listening_hash[i];
842 spin_lock_bh(&ilb->lock);
843 sk_nulls_for_each(sk, node, &ilb->head) {
844 struct inet_sock *inet = inet_sk(sk);
846 if (!net_eq(sock_net(sk), net))
847 continue;
849 if (num < s_num) {
850 num++;
851 continue;
854 if (r->sdiag_family != AF_UNSPEC &&
855 sk->sk_family != r->sdiag_family)
856 goto next_listen;
858 if (r->id.idiag_sport != inet->inet_sport &&
859 r->id.idiag_sport)
860 goto next_listen;
862 if (!(r->idiag_states & TCPF_LISTEN) ||
863 r->id.idiag_dport ||
864 cb->args[3] > 0)
865 goto syn_recv;
867 if (inet_csk_diag_dump(sk, skb, cb, r, bc) < 0) {
868 spin_unlock_bh(&ilb->lock);
869 goto done;
872 syn_recv:
873 if (!(r->idiag_states & TCPF_SYN_RECV))
874 goto next_listen;
876 if (inet_diag_dump_reqs(skb, sk, cb, r, bc) < 0) {
877 spin_unlock_bh(&ilb->lock);
878 goto done;
881 next_listen:
882 cb->args[3] = 0;
883 cb->args[4] = 0;
884 ++num;
886 spin_unlock_bh(&ilb->lock);
888 s_num = 0;
889 cb->args[3] = 0;
890 cb->args[4] = 0;
892 skip_listen_ht:
893 cb->args[0] = 1;
894 s_i = num = s_num = 0;
897 if (!(r->idiag_states & ~(TCPF_LISTEN | TCPF_SYN_RECV)))
898 goto out;
900 for (i = s_i; i <= hashinfo->ehash_mask; i++) {
901 struct inet_ehash_bucket *head = &hashinfo->ehash[i];
902 spinlock_t *lock = inet_ehash_lockp(hashinfo, i);
903 struct sock *sk;
904 struct hlist_nulls_node *node;
906 num = 0;
908 if (hlist_nulls_empty(&head->chain))
909 continue;
911 if (i > s_i)
912 s_num = 0;
914 spin_lock_bh(lock);
915 sk_nulls_for_each(sk, node, &head->chain) {
916 int res;
918 if (!net_eq(sock_net(sk), net))
919 continue;
920 if (num < s_num)
921 goto next_normal;
922 if (!(r->idiag_states & (1 << sk->sk_state)))
923 goto next_normal;
924 if (r->sdiag_family != AF_UNSPEC &&
925 sk->sk_family != r->sdiag_family)
926 goto next_normal;
927 if (r->id.idiag_sport != htons(sk->sk_num) &&
928 r->id.idiag_sport)
929 goto next_normal;
930 if (r->id.idiag_dport != sk->sk_dport &&
931 r->id.idiag_dport)
932 goto next_normal;
933 if (sk->sk_state == TCP_TIME_WAIT)
934 res = inet_twsk_diag_dump(sk, skb, cb, r, bc);
935 else
936 res = inet_csk_diag_dump(sk, skb, cb, r, bc);
937 if (res < 0) {
938 spin_unlock_bh(lock);
939 goto done;
941 next_normal:
942 ++num;
945 spin_unlock_bh(lock);
948 done:
949 cb->args[1] = i;
950 cb->args[2] = num;
951 out:
954 EXPORT_SYMBOL_GPL(inet_diag_dump_icsk);
956 static int __inet_diag_dump(struct sk_buff *skb, struct netlink_callback *cb,
957 struct inet_diag_req_v2 *r, struct nlattr *bc)
959 const struct inet_diag_handler *handler;
960 int err = 0;
962 handler = inet_diag_lock_handler(r->sdiag_protocol);
963 if (!IS_ERR(handler))
964 handler->dump(skb, cb, r, bc);
965 else
966 err = PTR_ERR(handler);
967 inet_diag_unlock_handler(handler);
969 return err ? : skb->len;
972 static int inet_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
974 struct nlattr *bc = NULL;
975 int hdrlen = sizeof(struct inet_diag_req_v2);
977 if (nlmsg_attrlen(cb->nlh, hdrlen))
978 bc = nlmsg_find_attr(cb->nlh, hdrlen, INET_DIAG_REQ_BYTECODE);
980 return __inet_diag_dump(skb, cb, nlmsg_data(cb->nlh), bc);
983 static inline int inet_diag_type2proto(int type)
985 switch (type) {
986 case TCPDIAG_GETSOCK:
987 return IPPROTO_TCP;
988 case DCCPDIAG_GETSOCK:
989 return IPPROTO_DCCP;
990 default:
991 return 0;
995 static int inet_diag_dump_compat(struct sk_buff *skb, struct netlink_callback *cb)
997 struct inet_diag_req *rc = nlmsg_data(cb->nlh);
998 struct inet_diag_req_v2 req;
999 struct nlattr *bc = NULL;
1000 int hdrlen = sizeof(struct inet_diag_req);
1002 req.sdiag_family = AF_UNSPEC; /* compatibility */
1003 req.sdiag_protocol = inet_diag_type2proto(cb->nlh->nlmsg_type);
1004 req.idiag_ext = rc->idiag_ext;
1005 req.idiag_states = rc->idiag_states;
1006 req.id = rc->id;
1008 if (nlmsg_attrlen(cb->nlh, hdrlen))
1009 bc = nlmsg_find_attr(cb->nlh, hdrlen, INET_DIAG_REQ_BYTECODE);
1011 return __inet_diag_dump(skb, cb, &req, bc);
1014 static int inet_diag_get_exact_compat(struct sk_buff *in_skb,
1015 const struct nlmsghdr *nlh)
1017 struct inet_diag_req *rc = nlmsg_data(nlh);
1018 struct inet_diag_req_v2 req;
1020 req.sdiag_family = rc->idiag_family;
1021 req.sdiag_protocol = inet_diag_type2proto(nlh->nlmsg_type);
1022 req.idiag_ext = rc->idiag_ext;
1023 req.idiag_states = rc->idiag_states;
1024 req.id = rc->id;
1026 return inet_diag_get_exact(in_skb, nlh, &req);
1029 static int inet_diag_rcv_msg_compat(struct sk_buff *skb, struct nlmsghdr *nlh)
1031 int hdrlen = sizeof(struct inet_diag_req);
1032 struct net *net = sock_net(skb->sk);
1034 if (nlh->nlmsg_type >= INET_DIAG_GETSOCK_MAX ||
1035 nlmsg_len(nlh) < hdrlen)
1036 return -EINVAL;
1038 if (nlh->nlmsg_flags & NLM_F_DUMP) {
1039 if (nlmsg_attrlen(nlh, hdrlen)) {
1040 struct nlattr *attr;
1042 attr = nlmsg_find_attr(nlh, hdrlen,
1043 INET_DIAG_REQ_BYTECODE);
1044 if (attr == NULL ||
1045 nla_len(attr) < sizeof(struct inet_diag_bc_op) ||
1046 inet_diag_bc_audit(nla_data(attr), nla_len(attr)))
1047 return -EINVAL;
1050 struct netlink_dump_control c = {
1051 .dump = inet_diag_dump_compat,
1053 return netlink_dump_start(net->diag_nlsk, skb, nlh, &c);
1057 return inet_diag_get_exact_compat(skb, nlh);
1060 static int inet_diag_handler_dump(struct sk_buff *skb, struct nlmsghdr *h)
1062 int hdrlen = sizeof(struct inet_diag_req_v2);
1063 struct net *net = sock_net(skb->sk);
1065 if (nlmsg_len(h) < hdrlen)
1066 return -EINVAL;
1068 if (h->nlmsg_flags & NLM_F_DUMP) {
1069 if (nlmsg_attrlen(h, hdrlen)) {
1070 struct nlattr *attr;
1071 attr = nlmsg_find_attr(h, hdrlen,
1072 INET_DIAG_REQ_BYTECODE);
1073 if (attr == NULL ||
1074 nla_len(attr) < sizeof(struct inet_diag_bc_op) ||
1075 inet_diag_bc_audit(nla_data(attr), nla_len(attr)))
1076 return -EINVAL;
1079 struct netlink_dump_control c = {
1080 .dump = inet_diag_dump,
1082 return netlink_dump_start(net->diag_nlsk, skb, h, &c);
1086 return inet_diag_get_exact(skb, h, nlmsg_data(h));
1089 static const struct sock_diag_handler inet_diag_handler = {
1090 .family = AF_INET,
1091 .dump = inet_diag_handler_dump,
1094 static const struct sock_diag_handler inet6_diag_handler = {
1095 .family = AF_INET6,
1096 .dump = inet_diag_handler_dump,
1099 int inet_diag_register(const struct inet_diag_handler *h)
1101 const __u16 type = h->idiag_type;
1102 int err = -EINVAL;
1104 if (type >= IPPROTO_MAX)
1105 goto out;
1107 mutex_lock(&inet_diag_table_mutex);
1108 err = -EEXIST;
1109 if (inet_diag_table[type] == NULL) {
1110 inet_diag_table[type] = h;
1111 err = 0;
1113 mutex_unlock(&inet_diag_table_mutex);
1114 out:
1115 return err;
1117 EXPORT_SYMBOL_GPL(inet_diag_register);
1119 void inet_diag_unregister(const struct inet_diag_handler *h)
1121 const __u16 type = h->idiag_type;
1123 if (type >= IPPROTO_MAX)
1124 return;
1126 mutex_lock(&inet_diag_table_mutex);
1127 inet_diag_table[type] = NULL;
1128 mutex_unlock(&inet_diag_table_mutex);
1130 EXPORT_SYMBOL_GPL(inet_diag_unregister);
1132 static int __init inet_diag_init(void)
1134 const int inet_diag_table_size = (IPPROTO_MAX *
1135 sizeof(struct inet_diag_handler *));
1136 int err = -ENOMEM;
1138 inet_diag_table = kzalloc(inet_diag_table_size, GFP_KERNEL);
1139 if (!inet_diag_table)
1140 goto out;
1142 err = sock_diag_register(&inet_diag_handler);
1143 if (err)
1144 goto out_free_nl;
1146 err = sock_diag_register(&inet6_diag_handler);
1147 if (err)
1148 goto out_free_inet;
1150 sock_diag_register_inet_compat(inet_diag_rcv_msg_compat);
1151 out:
1152 return err;
1154 out_free_inet:
1155 sock_diag_unregister(&inet_diag_handler);
1156 out_free_nl:
1157 kfree(inet_diag_table);
1158 goto out;
1161 static void __exit inet_diag_exit(void)
1163 sock_diag_unregister(&inet6_diag_handler);
1164 sock_diag_unregister(&inet_diag_handler);
1165 sock_diag_unregister_inet_compat(inet_diag_rcv_msg_compat);
1166 kfree(inet_diag_table);
1169 module_init(inet_diag_init);
1170 module_exit(inet_diag_exit);
1171 MODULE_LICENSE("GPL");
1172 MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, 2 /* AF_INET */);
1173 MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, 10 /* AF_INET6 */);