1 #include <linux/rcupdate.h>
2 #include <linux/spinlock.h>
3 #include <linux/jiffies.h>
4 #include <linux/module.h>
5 #include <linux/cache.h>
6 #include <linux/slab.h>
7 #include <linux/init.h>
9 #include <linux/hash.h>
10 #include <linux/tcp_metrics.h>
11 #include <linux/vmalloc.h>
13 #include <net/inet_connection_sock.h>
14 #include <net/net_namespace.h>
15 #include <net/request_sock.h>
16 #include <net/inetpeer.h>
21 #include <net/genetlink.h>
23 int sysctl_tcp_nometrics_save __read_mostly
;
25 struct tcp_fastopen_metrics
{
27 u16 syn_loss
:10; /* Recurring Fast Open SYN losses */
28 unsigned long last_syn_loss
; /* Last Fast Open SYN loss */
29 struct tcp_fastopen_cookie cookie
;
32 struct tcp_metrics_block
{
33 struct tcp_metrics_block __rcu
*tcpm_next
;
34 struct inetpeer_addr tcpm_addr
;
35 unsigned long tcpm_stamp
;
39 u32 tcpm_vals
[TCP_METRIC_MAX
+ 1];
40 struct tcp_fastopen_metrics tcpm_fastopen
;
42 struct rcu_head rcu_head
;
45 static bool tcp_metric_locked(struct tcp_metrics_block
*tm
,
46 enum tcp_metric_index idx
)
48 return tm
->tcpm_lock
& (1 << idx
);
51 static u32
tcp_metric_get(struct tcp_metrics_block
*tm
,
52 enum tcp_metric_index idx
)
54 return tm
->tcpm_vals
[idx
];
57 static u32
tcp_metric_get_jiffies(struct tcp_metrics_block
*tm
,
58 enum tcp_metric_index idx
)
60 return msecs_to_jiffies(tm
->tcpm_vals
[idx
]);
63 static void tcp_metric_set(struct tcp_metrics_block
*tm
,
64 enum tcp_metric_index idx
,
67 tm
->tcpm_vals
[idx
] = val
;
70 static void tcp_metric_set_msecs(struct tcp_metrics_block
*tm
,
71 enum tcp_metric_index idx
,
74 tm
->tcpm_vals
[idx
] = jiffies_to_msecs(val
);
77 static bool addr_same(const struct inetpeer_addr
*a
,
78 const struct inetpeer_addr
*b
)
80 const struct in6_addr
*a6
, *b6
;
82 if (a
->family
!= b
->family
)
84 if (a
->family
== AF_INET
)
85 return a
->addr
.a4
== b
->addr
.a4
;
87 a6
= (const struct in6_addr
*) &a
->addr
.a6
[0];
88 b6
= (const struct in6_addr
*) &b
->addr
.a6
[0];
90 return ipv6_addr_equal(a6
, b6
);
93 struct tcpm_hash_bucket
{
94 struct tcp_metrics_block __rcu
*chain
;
97 static DEFINE_SPINLOCK(tcp_metrics_lock
);
99 static void tcpm_suck_dst(struct tcp_metrics_block
*tm
, struct dst_entry
*dst
,
104 tm
->tcpm_stamp
= jiffies
;
107 if (dst_metric_locked(dst
, RTAX_RTT
))
108 val
|= 1 << TCP_METRIC_RTT
;
109 if (dst_metric_locked(dst
, RTAX_RTTVAR
))
110 val
|= 1 << TCP_METRIC_RTTVAR
;
111 if (dst_metric_locked(dst
, RTAX_SSTHRESH
))
112 val
|= 1 << TCP_METRIC_SSTHRESH
;
113 if (dst_metric_locked(dst
, RTAX_CWND
))
114 val
|= 1 << TCP_METRIC_CWND
;
115 if (dst_metric_locked(dst
, RTAX_REORDERING
))
116 val
|= 1 << TCP_METRIC_REORDERING
;
119 tm
->tcpm_vals
[TCP_METRIC_RTT
] = dst_metric_raw(dst
, RTAX_RTT
);
120 tm
->tcpm_vals
[TCP_METRIC_RTTVAR
] = dst_metric_raw(dst
, RTAX_RTTVAR
);
121 tm
->tcpm_vals
[TCP_METRIC_SSTHRESH
] = dst_metric_raw(dst
, RTAX_SSTHRESH
);
122 tm
->tcpm_vals
[TCP_METRIC_CWND
] = dst_metric_raw(dst
, RTAX_CWND
);
123 tm
->tcpm_vals
[TCP_METRIC_REORDERING
] = dst_metric_raw(dst
, RTAX_REORDERING
);
125 tm
->tcpm_ts_stamp
= 0;
126 if (fastopen_clear
) {
127 tm
->tcpm_fastopen
.mss
= 0;
128 tm
->tcpm_fastopen
.syn_loss
= 0;
129 tm
->tcpm_fastopen
.cookie
.len
= 0;
133 static struct tcp_metrics_block
*tcpm_new(struct dst_entry
*dst
,
134 struct inetpeer_addr
*addr
,
138 struct tcp_metrics_block
*tm
;
141 spin_lock_bh(&tcp_metrics_lock
);
142 net
= dev_net(dst
->dev
);
143 if (unlikely(reclaim
)) {
144 struct tcp_metrics_block
*oldest
;
146 oldest
= rcu_dereference(net
->ipv4
.tcp_metrics_hash
[hash
].chain
);
147 for (tm
= rcu_dereference(oldest
->tcpm_next
); tm
;
148 tm
= rcu_dereference(tm
->tcpm_next
)) {
149 if (time_before(tm
->tcpm_stamp
, oldest
->tcpm_stamp
))
154 tm
= kmalloc(sizeof(*tm
), GFP_ATOMIC
);
158 tm
->tcpm_addr
= *addr
;
160 tcpm_suck_dst(tm
, dst
, true);
162 if (likely(!reclaim
)) {
163 tm
->tcpm_next
= net
->ipv4
.tcp_metrics_hash
[hash
].chain
;
164 rcu_assign_pointer(net
->ipv4
.tcp_metrics_hash
[hash
].chain
, tm
);
168 spin_unlock_bh(&tcp_metrics_lock
);
172 #define TCP_METRICS_TIMEOUT (60 * 60 * HZ)
174 static void tcpm_check_stamp(struct tcp_metrics_block
*tm
, struct dst_entry
*dst
)
176 if (tm
&& unlikely(time_after(jiffies
, tm
->tcpm_stamp
+ TCP_METRICS_TIMEOUT
)))
177 tcpm_suck_dst(tm
, dst
, false);
180 #define TCP_METRICS_RECLAIM_DEPTH 5
181 #define TCP_METRICS_RECLAIM_PTR (struct tcp_metrics_block *) 0x1UL
183 static struct tcp_metrics_block
*tcp_get_encode(struct tcp_metrics_block
*tm
, int depth
)
187 if (depth
> TCP_METRICS_RECLAIM_DEPTH
)
188 return TCP_METRICS_RECLAIM_PTR
;
192 static struct tcp_metrics_block
*__tcp_get_metrics(const struct inetpeer_addr
*addr
,
193 struct net
*net
, unsigned int hash
)
195 struct tcp_metrics_block
*tm
;
198 for (tm
= rcu_dereference(net
->ipv4
.tcp_metrics_hash
[hash
].chain
); tm
;
199 tm
= rcu_dereference(tm
->tcpm_next
)) {
200 if (addr_same(&tm
->tcpm_addr
, addr
))
204 return tcp_get_encode(tm
, depth
);
207 static struct tcp_metrics_block
*__tcp_get_metrics_req(struct request_sock
*req
,
208 struct dst_entry
*dst
)
210 struct tcp_metrics_block
*tm
;
211 struct inetpeer_addr addr
;
215 addr
.family
= req
->rsk_ops
->family
;
216 switch (addr
.family
) {
218 addr
.addr
.a4
= inet_rsk(req
)->rmt_addr
;
219 hash
= (__force
unsigned int) addr
.addr
.a4
;
222 *(struct in6_addr
*)addr
.addr
.a6
= inet6_rsk(req
)->rmt_addr
;
223 hash
= ipv6_addr_hash(&inet6_rsk(req
)->rmt_addr
);
229 net
= dev_net(dst
->dev
);
230 hash
= hash_32(hash
, net
->ipv4
.tcp_metrics_hash_log
);
232 for (tm
= rcu_dereference(net
->ipv4
.tcp_metrics_hash
[hash
].chain
); tm
;
233 tm
= rcu_dereference(tm
->tcpm_next
)) {
234 if (addr_same(&tm
->tcpm_addr
, &addr
))
237 tcpm_check_stamp(tm
, dst
);
241 static struct tcp_metrics_block
*__tcp_get_metrics_tw(struct inet_timewait_sock
*tw
)
243 struct inet6_timewait_sock
*tw6
;
244 struct tcp_metrics_block
*tm
;
245 struct inetpeer_addr addr
;
249 addr
.family
= tw
->tw_family
;
250 switch (addr
.family
) {
252 addr
.addr
.a4
= tw
->tw_daddr
;
253 hash
= (__force
unsigned int) addr
.addr
.a4
;
256 tw6
= inet6_twsk((struct sock
*)tw
);
257 *(struct in6_addr
*)addr
.addr
.a6
= tw6
->tw_v6_daddr
;
258 hash
= ipv6_addr_hash(&tw6
->tw_v6_daddr
);
265 hash
= hash_32(hash
, net
->ipv4
.tcp_metrics_hash_log
);
267 for (tm
= rcu_dereference(net
->ipv4
.tcp_metrics_hash
[hash
].chain
); tm
;
268 tm
= rcu_dereference(tm
->tcpm_next
)) {
269 if (addr_same(&tm
->tcpm_addr
, &addr
))
275 static struct tcp_metrics_block
*tcp_get_metrics(struct sock
*sk
,
276 struct dst_entry
*dst
,
279 struct tcp_metrics_block
*tm
;
280 struct inetpeer_addr addr
;
285 addr
.family
= sk
->sk_family
;
286 switch (addr
.family
) {
288 addr
.addr
.a4
= inet_sk(sk
)->inet_daddr
;
289 hash
= (__force
unsigned int) addr
.addr
.a4
;
292 *(struct in6_addr
*)addr
.addr
.a6
= inet6_sk(sk
)->daddr
;
293 hash
= ipv6_addr_hash(&inet6_sk(sk
)->daddr
);
299 net
= dev_net(dst
->dev
);
300 hash
= hash_32(hash
, net
->ipv4
.tcp_metrics_hash_log
);
302 tm
= __tcp_get_metrics(&addr
, net
, hash
);
304 if (tm
== TCP_METRICS_RECLAIM_PTR
) {
309 tm
= tcpm_new(dst
, &addr
, hash
, reclaim
);
311 tcpm_check_stamp(tm
, dst
);
316 /* Save metrics learned by this TCP session. This function is called
317 * only, when TCP finishes successfully i.e. when it enters TIME-WAIT
318 * or goes from LAST-ACK to CLOSE.
320 void tcp_update_metrics(struct sock
*sk
)
322 const struct inet_connection_sock
*icsk
= inet_csk(sk
);
323 struct dst_entry
*dst
= __sk_dst_get(sk
);
324 struct tcp_sock
*tp
= tcp_sk(sk
);
325 struct tcp_metrics_block
*tm
;
330 if (sysctl_tcp_nometrics_save
|| !dst
)
333 if (dst
->flags
& DST_HOST
)
337 if (icsk
->icsk_backoff
|| !tp
->srtt
) {
338 /* This session failed to estimate rtt. Why?
339 * Probably, no packets returned in time. Reset our
342 tm
= tcp_get_metrics(sk
, dst
, false);
343 if (tm
&& !tcp_metric_locked(tm
, TCP_METRIC_RTT
))
344 tcp_metric_set(tm
, TCP_METRIC_RTT
, 0);
347 tm
= tcp_get_metrics(sk
, dst
, true);
352 rtt
= tcp_metric_get_jiffies(tm
, TCP_METRIC_RTT
);
355 /* If newly calculated rtt larger than stored one, store new
356 * one. Otherwise, use EWMA. Remember, rtt overestimation is
357 * always better than underestimation.
359 if (!tcp_metric_locked(tm
, TCP_METRIC_RTT
)) {
364 tcp_metric_set_msecs(tm
, TCP_METRIC_RTT
, rtt
);
367 if (!tcp_metric_locked(tm
, TCP_METRIC_RTTVAR
)) {
373 /* Scale deviation to rttvar fixed point */
378 var
= tcp_metric_get_jiffies(tm
, TCP_METRIC_RTTVAR
);
382 var
-= (var
- m
) >> 2;
384 tcp_metric_set_msecs(tm
, TCP_METRIC_RTTVAR
, var
);
387 if (tcp_in_initial_slowstart(tp
)) {
388 /* Slow start still did not finish. */
389 if (!tcp_metric_locked(tm
, TCP_METRIC_SSTHRESH
)) {
390 val
= tcp_metric_get(tm
, TCP_METRIC_SSTHRESH
);
391 if (val
&& (tp
->snd_cwnd
>> 1) > val
)
392 tcp_metric_set(tm
, TCP_METRIC_SSTHRESH
,
395 if (!tcp_metric_locked(tm
, TCP_METRIC_CWND
)) {
396 val
= tcp_metric_get(tm
, TCP_METRIC_CWND
);
397 if (tp
->snd_cwnd
> val
)
398 tcp_metric_set(tm
, TCP_METRIC_CWND
,
401 } else if (tp
->snd_cwnd
> tp
->snd_ssthresh
&&
402 icsk
->icsk_ca_state
== TCP_CA_Open
) {
403 /* Cong. avoidance phase, cwnd is reliable. */
404 if (!tcp_metric_locked(tm
, TCP_METRIC_SSTHRESH
))
405 tcp_metric_set(tm
, TCP_METRIC_SSTHRESH
,
406 max(tp
->snd_cwnd
>> 1, tp
->snd_ssthresh
));
407 if (!tcp_metric_locked(tm
, TCP_METRIC_CWND
)) {
408 val
= tcp_metric_get(tm
, TCP_METRIC_CWND
);
409 tcp_metric_set(tm
, TCP_METRIC_CWND
, (val
+ tp
->snd_cwnd
) >> 1);
412 /* Else slow start did not finish, cwnd is non-sense,
413 * ssthresh may be also invalid.
415 if (!tcp_metric_locked(tm
, TCP_METRIC_CWND
)) {
416 val
= tcp_metric_get(tm
, TCP_METRIC_CWND
);
417 tcp_metric_set(tm
, TCP_METRIC_CWND
,
418 (val
+ tp
->snd_ssthresh
) >> 1);
420 if (!tcp_metric_locked(tm
, TCP_METRIC_SSTHRESH
)) {
421 val
= tcp_metric_get(tm
, TCP_METRIC_SSTHRESH
);
422 if (val
&& tp
->snd_ssthresh
> val
)
423 tcp_metric_set(tm
, TCP_METRIC_SSTHRESH
,
426 if (!tcp_metric_locked(tm
, TCP_METRIC_REORDERING
)) {
427 val
= tcp_metric_get(tm
, TCP_METRIC_REORDERING
);
428 if (val
< tp
->reordering
&&
429 tp
->reordering
!= sysctl_tcp_reordering
)
430 tcp_metric_set(tm
, TCP_METRIC_REORDERING
,
434 tm
->tcpm_stamp
= jiffies
;
439 /* Initialize metrics on socket. */
441 void tcp_init_metrics(struct sock
*sk
)
443 struct dst_entry
*dst
= __sk_dst_get(sk
);
444 struct tcp_sock
*tp
= tcp_sk(sk
);
445 struct tcp_metrics_block
*tm
;
454 tm
= tcp_get_metrics(sk
, dst
, true);
460 if (tcp_metric_locked(tm
, TCP_METRIC_CWND
))
461 tp
->snd_cwnd_clamp
= tcp_metric_get(tm
, TCP_METRIC_CWND
);
463 val
= tcp_metric_get(tm
, TCP_METRIC_SSTHRESH
);
465 tp
->snd_ssthresh
= val
;
466 if (tp
->snd_ssthresh
> tp
->snd_cwnd_clamp
)
467 tp
->snd_ssthresh
= tp
->snd_cwnd_clamp
;
469 /* ssthresh may have been reduced unnecessarily during.
470 * 3WHS. Restore it back to its initial default.
472 tp
->snd_ssthresh
= TCP_INFINITE_SSTHRESH
;
474 val
= tcp_metric_get(tm
, TCP_METRIC_REORDERING
);
475 if (val
&& tp
->reordering
!= val
) {
476 tcp_disable_fack(tp
);
477 tcp_disable_early_retrans(tp
);
478 tp
->reordering
= val
;
481 val
= tcp_metric_get(tm
, TCP_METRIC_RTT
);
482 if (val
== 0 || tp
->srtt
== 0) {
486 /* Initial rtt is determined from SYN,SYN-ACK.
487 * The segment is small and rtt may appear much
488 * less than real one. Use per-dst memory
489 * to make it more realistic.
491 * A bit of theory. RTT is time passed after "normal" sized packet
492 * is sent until it is ACKed. In normal circumstances sending small
493 * packets force peer to delay ACKs and calculation is correct too.
494 * The algorithm is adaptive and, provided we follow specs, it
495 * NEVER underestimate RTT. BUT! If peer tries to make some clever
496 * tricks sort of "quick acks" for time long enough to decrease RTT
497 * to low value, and then abruptly stops to do it and starts to delay
498 * ACKs, wait for troubles.
500 val
= msecs_to_jiffies(val
);
501 if (val
> tp
->srtt
) {
503 tp
->rtt_seq
= tp
->snd_nxt
;
505 val
= tcp_metric_get_jiffies(tm
, TCP_METRIC_RTTVAR
);
506 if (val
> tp
->mdev
) {
508 tp
->mdev_max
= tp
->rttvar
= max(tp
->mdev
, tcp_rto_min(sk
));
515 /* RFC6298: 5.7 We've failed to get a valid RTT sample from
516 * 3WHS. This is most likely due to retransmission,
517 * including spurious one. Reset the RTO back to 3secs
518 * from the more aggressive 1sec to avoid more spurious
521 tp
->mdev
= tp
->mdev_max
= tp
->rttvar
= TCP_TIMEOUT_FALLBACK
;
522 inet_csk(sk
)->icsk_rto
= TCP_TIMEOUT_FALLBACK
;
524 /* Cut cwnd down to 1 per RFC5681 if SYN or SYN-ACK has been
525 * retransmitted. In light of RFC6298 more aggressive 1sec
526 * initRTO, we only reset cwnd when more than 1 SYN/SYN-ACK
527 * retransmission has occurred.
529 if (tp
->total_retrans
> 1)
532 tp
->snd_cwnd
= tcp_init_cwnd(tp
, dst
);
533 tp
->snd_cwnd_stamp
= tcp_time_stamp
;
536 bool tcp_peer_is_proven(struct request_sock
*req
, struct dst_entry
*dst
, bool paws_check
)
538 struct tcp_metrics_block
*tm
;
545 tm
= __tcp_get_metrics_req(req
, dst
);
548 (u32
)get_seconds() - tm
->tcpm_ts_stamp
< TCP_PAWS_MSL
&&
549 (s32
)(tm
->tcpm_ts
- req
->ts_recent
) > TCP_PAWS_WINDOW
)
554 if (tm
&& tcp_metric_get(tm
, TCP_METRIC_RTT
) && tm
->tcpm_ts_stamp
)
563 EXPORT_SYMBOL_GPL(tcp_peer_is_proven
);
565 void tcp_fetch_timewait_stamp(struct sock
*sk
, struct dst_entry
*dst
)
567 struct tcp_metrics_block
*tm
;
570 tm
= tcp_get_metrics(sk
, dst
, true);
572 struct tcp_sock
*tp
= tcp_sk(sk
);
574 if ((u32
)get_seconds() - tm
->tcpm_ts_stamp
<= TCP_PAWS_MSL
) {
575 tp
->rx_opt
.ts_recent_stamp
= tm
->tcpm_ts_stamp
;
576 tp
->rx_opt
.ts_recent
= tm
->tcpm_ts
;
581 EXPORT_SYMBOL_GPL(tcp_fetch_timewait_stamp
);
583 /* VJ's idea. Save last timestamp seen from this destination and hold
584 * it at least for normal timewait interval to use for duplicate
585 * segment detection in subsequent connections, before they enter
586 * synchronized state.
588 bool tcp_remember_stamp(struct sock
*sk
)
590 struct dst_entry
*dst
= __sk_dst_get(sk
);
594 struct tcp_metrics_block
*tm
;
597 tm
= tcp_get_metrics(sk
, dst
, true);
599 struct tcp_sock
*tp
= tcp_sk(sk
);
601 if ((s32
)(tm
->tcpm_ts
- tp
->rx_opt
.ts_recent
) <= 0 ||
602 ((u32
)get_seconds() - tm
->tcpm_ts_stamp
> TCP_PAWS_MSL
&&
603 tm
->tcpm_ts_stamp
<= (u32
)tp
->rx_opt
.ts_recent_stamp
)) {
604 tm
->tcpm_ts_stamp
= (u32
)tp
->rx_opt
.ts_recent_stamp
;
605 tm
->tcpm_ts
= tp
->rx_opt
.ts_recent
;
614 bool tcp_tw_remember_stamp(struct inet_timewait_sock
*tw
)
616 struct tcp_metrics_block
*tm
;
620 tm
= __tcp_get_metrics_tw(tw
);
622 const struct tcp_timewait_sock
*tcptw
;
623 struct sock
*sk
= (struct sock
*) tw
;
625 tcptw
= tcp_twsk(sk
);
626 if ((s32
)(tm
->tcpm_ts
- tcptw
->tw_ts_recent
) <= 0 ||
627 ((u32
)get_seconds() - tm
->tcpm_ts_stamp
> TCP_PAWS_MSL
&&
628 tm
->tcpm_ts_stamp
<= (u32
)tcptw
->tw_ts_recent_stamp
)) {
629 tm
->tcpm_ts_stamp
= (u32
)tcptw
->tw_ts_recent_stamp
;
630 tm
->tcpm_ts
= tcptw
->tw_ts_recent
;
639 static DEFINE_SEQLOCK(fastopen_seqlock
);
641 void tcp_fastopen_cache_get(struct sock
*sk
, u16
*mss
,
642 struct tcp_fastopen_cookie
*cookie
,
643 int *syn_loss
, unsigned long *last_syn_loss
)
645 struct tcp_metrics_block
*tm
;
648 tm
= tcp_get_metrics(sk
, __sk_dst_get(sk
), false);
650 struct tcp_fastopen_metrics
*tfom
= &tm
->tcpm_fastopen
;
654 seq
= read_seqbegin(&fastopen_seqlock
);
657 *cookie
= tfom
->cookie
;
658 *syn_loss
= tfom
->syn_loss
;
659 *last_syn_loss
= *syn_loss
? tfom
->last_syn_loss
: 0;
660 } while (read_seqretry(&fastopen_seqlock
, seq
));
665 void tcp_fastopen_cache_set(struct sock
*sk
, u16 mss
,
666 struct tcp_fastopen_cookie
*cookie
, bool syn_lost
)
668 struct tcp_metrics_block
*tm
;
671 tm
= tcp_get_metrics(sk
, __sk_dst_get(sk
), true);
673 struct tcp_fastopen_metrics
*tfom
= &tm
->tcpm_fastopen
;
675 write_seqlock_bh(&fastopen_seqlock
);
678 tfom
->cookie
= *cookie
;
681 tfom
->last_syn_loss
= jiffies
;
684 write_sequnlock_bh(&fastopen_seqlock
);
689 static struct genl_family tcp_metrics_nl_family
= {
690 .id
= GENL_ID_GENERATE
,
692 .name
= TCP_METRICS_GENL_NAME
,
693 .version
= TCP_METRICS_GENL_VERSION
,
694 .maxattr
= TCP_METRICS_ATTR_MAX
,
698 static struct nla_policy tcp_metrics_nl_policy
[TCP_METRICS_ATTR_MAX
+ 1] = {
699 [TCP_METRICS_ATTR_ADDR_IPV4
] = { .type
= NLA_U32
, },
700 [TCP_METRICS_ATTR_ADDR_IPV6
] = { .type
= NLA_BINARY
,
701 .len
= sizeof(struct in6_addr
), },
702 /* Following attributes are not received for GET/DEL,
703 * we keep them for reference
706 [TCP_METRICS_ATTR_AGE
] = { .type
= NLA_MSECS
, },
707 [TCP_METRICS_ATTR_TW_TSVAL
] = { .type
= NLA_U32
, },
708 [TCP_METRICS_ATTR_TW_TS_STAMP
] = { .type
= NLA_S32
, },
709 [TCP_METRICS_ATTR_VALS
] = { .type
= NLA_NESTED
, },
710 [TCP_METRICS_ATTR_FOPEN_MSS
] = { .type
= NLA_U16
, },
711 [TCP_METRICS_ATTR_FOPEN_SYN_DROPS
] = { .type
= NLA_U16
, },
712 [TCP_METRICS_ATTR_FOPEN_SYN_DROP_TS
] = { .type
= NLA_MSECS
, },
713 [TCP_METRICS_ATTR_FOPEN_COOKIE
] = { .type
= NLA_BINARY
,
714 .len
= TCP_FASTOPEN_COOKIE_MAX
, },
718 /* Add attributes, caller cancels its header on failure */
719 static int tcp_metrics_fill_info(struct sk_buff
*msg
,
720 struct tcp_metrics_block
*tm
)
725 switch (tm
->tcpm_addr
.family
) {
727 if (nla_put_be32(msg
, TCP_METRICS_ATTR_ADDR_IPV4
,
728 tm
->tcpm_addr
.addr
.a4
) < 0)
729 goto nla_put_failure
;
732 if (nla_put(msg
, TCP_METRICS_ATTR_ADDR_IPV6
, 16,
733 tm
->tcpm_addr
.addr
.a6
) < 0)
734 goto nla_put_failure
;
737 return -EAFNOSUPPORT
;
740 if (nla_put_msecs(msg
, TCP_METRICS_ATTR_AGE
,
741 jiffies
- tm
->tcpm_stamp
) < 0)
742 goto nla_put_failure
;
743 if (tm
->tcpm_ts_stamp
) {
744 if (nla_put_s32(msg
, TCP_METRICS_ATTR_TW_TS_STAMP
,
745 (s32
) (get_seconds() - tm
->tcpm_ts_stamp
)) < 0)
746 goto nla_put_failure
;
747 if (nla_put_u32(msg
, TCP_METRICS_ATTR_TW_TSVAL
,
749 goto nla_put_failure
;
755 nest
= nla_nest_start(msg
, TCP_METRICS_ATTR_VALS
);
757 goto nla_put_failure
;
758 for (i
= 0; i
< TCP_METRIC_MAX
+ 1; i
++) {
759 if (!tm
->tcpm_vals
[i
])
761 if (nla_put_u32(msg
, i
+ 1, tm
->tcpm_vals
[i
]) < 0)
762 goto nla_put_failure
;
766 nla_nest_end(msg
, nest
);
768 nla_nest_cancel(msg
, nest
);
772 struct tcp_fastopen_metrics tfom_copy
[1], *tfom
;
776 seq
= read_seqbegin(&fastopen_seqlock
);
777 tfom_copy
[0] = tm
->tcpm_fastopen
;
778 } while (read_seqretry(&fastopen_seqlock
, seq
));
782 nla_put_u16(msg
, TCP_METRICS_ATTR_FOPEN_MSS
,
784 goto nla_put_failure
;
785 if (tfom
->syn_loss
&&
786 (nla_put_u16(msg
, TCP_METRICS_ATTR_FOPEN_SYN_DROPS
,
787 tfom
->syn_loss
) < 0 ||
788 nla_put_msecs(msg
, TCP_METRICS_ATTR_FOPEN_SYN_DROP_TS
,
789 jiffies
- tfom
->last_syn_loss
) < 0))
790 goto nla_put_failure
;
791 if (tfom
->cookie
.len
> 0 &&
792 nla_put(msg
, TCP_METRICS_ATTR_FOPEN_COOKIE
,
793 tfom
->cookie
.len
, tfom
->cookie
.val
) < 0)
794 goto nla_put_failure
;
803 static int tcp_metrics_dump_info(struct sk_buff
*skb
,
804 struct netlink_callback
*cb
,
805 struct tcp_metrics_block
*tm
)
809 hdr
= genlmsg_put(skb
, NETLINK_CB(cb
->skb
).portid
, cb
->nlh
->nlmsg_seq
,
810 &tcp_metrics_nl_family
, NLM_F_MULTI
,
811 TCP_METRICS_CMD_GET
);
815 if (tcp_metrics_fill_info(skb
, tm
) < 0)
816 goto nla_put_failure
;
818 return genlmsg_end(skb
, hdr
);
821 genlmsg_cancel(skb
, hdr
);
825 static int tcp_metrics_nl_dump(struct sk_buff
*skb
,
826 struct netlink_callback
*cb
)
828 struct net
*net
= sock_net(skb
->sk
);
829 unsigned int max_rows
= 1U << net
->ipv4
.tcp_metrics_hash_log
;
830 unsigned int row
, s_row
= cb
->args
[0];
831 int s_col
= cb
->args
[1], col
= s_col
;
833 for (row
= s_row
; row
< max_rows
; row
++, s_col
= 0) {
834 struct tcp_metrics_block
*tm
;
835 struct tcpm_hash_bucket
*hb
= net
->ipv4
.tcp_metrics_hash
+ row
;
838 for (col
= 0, tm
= rcu_dereference(hb
->chain
); tm
;
839 tm
= rcu_dereference(tm
->tcpm_next
), col
++) {
842 if (tcp_metrics_dump_info(skb
, cb
, tm
) < 0) {
856 static int parse_nl_addr(struct genl_info
*info
, struct inetpeer_addr
*addr
,
857 unsigned int *hash
, int optional
)
861 a
= info
->attrs
[TCP_METRICS_ATTR_ADDR_IPV4
];
863 addr
->family
= AF_INET
;
864 addr
->addr
.a4
= nla_get_be32(a
);
865 *hash
= (__force
unsigned int) addr
->addr
.a4
;
868 a
= info
->attrs
[TCP_METRICS_ATTR_ADDR_IPV6
];
870 if (nla_len(a
) != sizeof(struct in6_addr
))
872 addr
->family
= AF_INET6
;
873 memcpy(addr
->addr
.a6
, nla_data(a
), sizeof(addr
->addr
.a6
));
874 *hash
= ipv6_addr_hash((struct in6_addr
*) addr
->addr
.a6
);
877 return optional
? 1 : -EAFNOSUPPORT
;
880 static int tcp_metrics_nl_cmd_get(struct sk_buff
*skb
, struct genl_info
*info
)
882 struct tcp_metrics_block
*tm
;
883 struct inetpeer_addr addr
;
886 struct net
*net
= genl_info_net(info
);
890 ret
= parse_nl_addr(info
, &addr
, &hash
, 0);
894 msg
= nlmsg_new(NLMSG_DEFAULT_SIZE
, GFP_KERNEL
);
898 reply
= genlmsg_put_reply(msg
, info
, &tcp_metrics_nl_family
, 0,
901 goto nla_put_failure
;
903 hash
= hash_32(hash
, net
->ipv4
.tcp_metrics_hash_log
);
906 for (tm
= rcu_dereference(net
->ipv4
.tcp_metrics_hash
[hash
].chain
); tm
;
907 tm
= rcu_dereference(tm
->tcpm_next
)) {
908 if (addr_same(&tm
->tcpm_addr
, &addr
)) {
909 ret
= tcp_metrics_fill_info(msg
, tm
);
917 genlmsg_end(msg
, reply
);
918 return genlmsg_reply(msg
, info
);
928 #define deref_locked_genl(p) \
929 rcu_dereference_protected(p, lockdep_genl_is_held() && \
930 lockdep_is_held(&tcp_metrics_lock))
932 #define deref_genl(p) rcu_dereference_protected(p, lockdep_genl_is_held())
934 static int tcp_metrics_flush_all(struct net
*net
)
936 unsigned int max_rows
= 1U << net
->ipv4
.tcp_metrics_hash_log
;
937 struct tcpm_hash_bucket
*hb
= net
->ipv4
.tcp_metrics_hash
;
938 struct tcp_metrics_block
*tm
;
941 for (row
= 0; row
< max_rows
; row
++, hb
++) {
942 spin_lock_bh(&tcp_metrics_lock
);
943 tm
= deref_locked_genl(hb
->chain
);
946 spin_unlock_bh(&tcp_metrics_lock
);
948 struct tcp_metrics_block
*next
;
950 next
= deref_genl(tm
->tcpm_next
);
951 kfree_rcu(tm
, rcu_head
);
958 static int tcp_metrics_nl_cmd_del(struct sk_buff
*skb
, struct genl_info
*info
)
960 struct tcpm_hash_bucket
*hb
;
961 struct tcp_metrics_block
*tm
;
962 struct tcp_metrics_block __rcu
**pp
;
963 struct inetpeer_addr addr
;
965 struct net
*net
= genl_info_net(info
);
968 ret
= parse_nl_addr(info
, &addr
, &hash
, 1);
972 return tcp_metrics_flush_all(net
);
974 hash
= hash_32(hash
, net
->ipv4
.tcp_metrics_hash_log
);
975 hb
= net
->ipv4
.tcp_metrics_hash
+ hash
;
977 spin_lock_bh(&tcp_metrics_lock
);
978 for (tm
= deref_locked_genl(*pp
); tm
;
979 pp
= &tm
->tcpm_next
, tm
= deref_locked_genl(*pp
)) {
980 if (addr_same(&tm
->tcpm_addr
, &addr
)) {
985 spin_unlock_bh(&tcp_metrics_lock
);
988 kfree_rcu(tm
, rcu_head
);
992 static struct genl_ops tcp_metrics_nl_ops
[] = {
994 .cmd
= TCP_METRICS_CMD_GET
,
995 .doit
= tcp_metrics_nl_cmd_get
,
996 .dumpit
= tcp_metrics_nl_dump
,
997 .policy
= tcp_metrics_nl_policy
,
998 .flags
= GENL_ADMIN_PERM
,
1001 .cmd
= TCP_METRICS_CMD_DEL
,
1002 .doit
= tcp_metrics_nl_cmd_del
,
1003 .policy
= tcp_metrics_nl_policy
,
1004 .flags
= GENL_ADMIN_PERM
,
1008 static unsigned int tcpmhash_entries
;
1009 static int __init
set_tcpmhash_entries(char *str
)
1016 ret
= kstrtouint(str
, 0, &tcpmhash_entries
);
1022 __setup("tcpmhash_entries=", set_tcpmhash_entries
);
1024 static int __net_init
tcp_net_metrics_init(struct net
*net
)
1029 slots
= tcpmhash_entries
;
1031 if (totalram_pages
>= 128 * 1024)
1037 net
->ipv4
.tcp_metrics_hash_log
= order_base_2(slots
);
1038 size
= sizeof(struct tcpm_hash_bucket
) << net
->ipv4
.tcp_metrics_hash_log
;
1040 net
->ipv4
.tcp_metrics_hash
= kzalloc(size
, GFP_KERNEL
| __GFP_NOWARN
);
1041 if (!net
->ipv4
.tcp_metrics_hash
)
1042 net
->ipv4
.tcp_metrics_hash
= vzalloc(size
);
1044 if (!net
->ipv4
.tcp_metrics_hash
)
1050 static void __net_exit
tcp_net_metrics_exit(struct net
*net
)
1054 for (i
= 0; i
< (1U << net
->ipv4
.tcp_metrics_hash_log
) ; i
++) {
1055 struct tcp_metrics_block
*tm
, *next
;
1057 tm
= rcu_dereference_protected(net
->ipv4
.tcp_metrics_hash
[i
].chain
, 1);
1059 next
= rcu_dereference_protected(tm
->tcpm_next
, 1);
1064 if (is_vmalloc_addr(net
->ipv4
.tcp_metrics_hash
))
1065 vfree(net
->ipv4
.tcp_metrics_hash
);
1067 kfree(net
->ipv4
.tcp_metrics_hash
);
1070 static __net_initdata
struct pernet_operations tcp_net_metrics_ops
= {
1071 .init
= tcp_net_metrics_init
,
1072 .exit
= tcp_net_metrics_exit
,
1075 void __init
tcp_metrics_init(void)
1079 ret
= register_pernet_subsys(&tcp_net_metrics_ops
);
1082 ret
= genl_register_family_with_ops(&tcp_metrics_nl_family
,
1084 ARRAY_SIZE(tcp_metrics_nl_ops
));
1086 goto cleanup_subsys
;
1090 unregister_pernet_subsys(&tcp_net_metrics_ops
);