MAINTAINERS: CHINESE MAINTAINERS mailing list is subscribers only
[linux-2.6/libata-dev.git] / net / ipv4 / tcp_metrics.c
blobf696d7c2e9faac9b2fbfd2e61472d47a7c2fa823
1 #include <linux/rcupdate.h>
2 #include <linux/spinlock.h>
3 #include <linux/jiffies.h>
4 #include <linux/module.h>
5 #include <linux/cache.h>
6 #include <linux/slab.h>
7 #include <linux/init.h>
8 #include <linux/tcp.h>
9 #include <linux/hash.h>
10 #include <linux/tcp_metrics.h>
11 #include <linux/vmalloc.h>
13 #include <net/inet_connection_sock.h>
14 #include <net/net_namespace.h>
15 #include <net/request_sock.h>
16 #include <net/inetpeer.h>
17 #include <net/sock.h>
18 #include <net/ipv6.h>
19 #include <net/dst.h>
20 #include <net/tcp.h>
21 #include <net/genetlink.h>
23 int sysctl_tcp_nometrics_save __read_mostly;
25 struct tcp_fastopen_metrics {
26 u16 mss;
27 u16 syn_loss:10; /* Recurring Fast Open SYN losses */
28 unsigned long last_syn_loss; /* Last Fast Open SYN loss */
29 struct tcp_fastopen_cookie cookie;
32 struct tcp_metrics_block {
33 struct tcp_metrics_block __rcu *tcpm_next;
34 struct inetpeer_addr tcpm_addr;
35 unsigned long tcpm_stamp;
36 u32 tcpm_ts;
37 u32 tcpm_ts_stamp;
38 u32 tcpm_lock;
39 u32 tcpm_vals[TCP_METRIC_MAX + 1];
40 struct tcp_fastopen_metrics tcpm_fastopen;
42 struct rcu_head rcu_head;
45 static bool tcp_metric_locked(struct tcp_metrics_block *tm,
46 enum tcp_metric_index idx)
48 return tm->tcpm_lock & (1 << idx);
51 static u32 tcp_metric_get(struct tcp_metrics_block *tm,
52 enum tcp_metric_index idx)
54 return tm->tcpm_vals[idx];
57 static u32 tcp_metric_get_jiffies(struct tcp_metrics_block *tm,
58 enum tcp_metric_index idx)
60 return msecs_to_jiffies(tm->tcpm_vals[idx]);
63 static void tcp_metric_set(struct tcp_metrics_block *tm,
64 enum tcp_metric_index idx,
65 u32 val)
67 tm->tcpm_vals[idx] = val;
70 static void tcp_metric_set_msecs(struct tcp_metrics_block *tm,
71 enum tcp_metric_index idx,
72 u32 val)
74 tm->tcpm_vals[idx] = jiffies_to_msecs(val);
77 static bool addr_same(const struct inetpeer_addr *a,
78 const struct inetpeer_addr *b)
80 const struct in6_addr *a6, *b6;
82 if (a->family != b->family)
83 return false;
84 if (a->family == AF_INET)
85 return a->addr.a4 == b->addr.a4;
87 a6 = (const struct in6_addr *) &a->addr.a6[0];
88 b6 = (const struct in6_addr *) &b->addr.a6[0];
90 return ipv6_addr_equal(a6, b6);
93 struct tcpm_hash_bucket {
94 struct tcp_metrics_block __rcu *chain;
97 static DEFINE_SPINLOCK(tcp_metrics_lock);
99 static void tcpm_suck_dst(struct tcp_metrics_block *tm, struct dst_entry *dst)
101 u32 val;
103 tm->tcpm_stamp = jiffies;
105 val = 0;
106 if (dst_metric_locked(dst, RTAX_RTT))
107 val |= 1 << TCP_METRIC_RTT;
108 if (dst_metric_locked(dst, RTAX_RTTVAR))
109 val |= 1 << TCP_METRIC_RTTVAR;
110 if (dst_metric_locked(dst, RTAX_SSTHRESH))
111 val |= 1 << TCP_METRIC_SSTHRESH;
112 if (dst_metric_locked(dst, RTAX_CWND))
113 val |= 1 << TCP_METRIC_CWND;
114 if (dst_metric_locked(dst, RTAX_REORDERING))
115 val |= 1 << TCP_METRIC_REORDERING;
116 tm->tcpm_lock = val;
118 tm->tcpm_vals[TCP_METRIC_RTT] = dst_metric_raw(dst, RTAX_RTT);
119 tm->tcpm_vals[TCP_METRIC_RTTVAR] = dst_metric_raw(dst, RTAX_RTTVAR);
120 tm->tcpm_vals[TCP_METRIC_SSTHRESH] = dst_metric_raw(dst, RTAX_SSTHRESH);
121 tm->tcpm_vals[TCP_METRIC_CWND] = dst_metric_raw(dst, RTAX_CWND);
122 tm->tcpm_vals[TCP_METRIC_REORDERING] = dst_metric_raw(dst, RTAX_REORDERING);
123 tm->tcpm_ts = 0;
124 tm->tcpm_ts_stamp = 0;
125 tm->tcpm_fastopen.mss = 0;
126 tm->tcpm_fastopen.syn_loss = 0;
127 tm->tcpm_fastopen.cookie.len = 0;
130 static struct tcp_metrics_block *tcpm_new(struct dst_entry *dst,
131 struct inetpeer_addr *addr,
132 unsigned int hash,
133 bool reclaim)
135 struct tcp_metrics_block *tm;
136 struct net *net;
138 spin_lock_bh(&tcp_metrics_lock);
139 net = dev_net(dst->dev);
140 if (unlikely(reclaim)) {
141 struct tcp_metrics_block *oldest;
143 oldest = rcu_dereference(net->ipv4.tcp_metrics_hash[hash].chain);
144 for (tm = rcu_dereference(oldest->tcpm_next); tm;
145 tm = rcu_dereference(tm->tcpm_next)) {
146 if (time_before(tm->tcpm_stamp, oldest->tcpm_stamp))
147 oldest = tm;
149 tm = oldest;
150 } else {
151 tm = kmalloc(sizeof(*tm), GFP_ATOMIC);
152 if (!tm)
153 goto out_unlock;
155 tm->tcpm_addr = *addr;
157 tcpm_suck_dst(tm, dst);
159 if (likely(!reclaim)) {
160 tm->tcpm_next = net->ipv4.tcp_metrics_hash[hash].chain;
161 rcu_assign_pointer(net->ipv4.tcp_metrics_hash[hash].chain, tm);
164 out_unlock:
165 spin_unlock_bh(&tcp_metrics_lock);
166 return tm;
169 #define TCP_METRICS_TIMEOUT (60 * 60 * HZ)
171 static void tcpm_check_stamp(struct tcp_metrics_block *tm, struct dst_entry *dst)
173 if (tm && unlikely(time_after(jiffies, tm->tcpm_stamp + TCP_METRICS_TIMEOUT)))
174 tcpm_suck_dst(tm, dst);
177 #define TCP_METRICS_RECLAIM_DEPTH 5
178 #define TCP_METRICS_RECLAIM_PTR (struct tcp_metrics_block *) 0x1UL
180 static struct tcp_metrics_block *tcp_get_encode(struct tcp_metrics_block *tm, int depth)
182 if (tm)
183 return tm;
184 if (depth > TCP_METRICS_RECLAIM_DEPTH)
185 return TCP_METRICS_RECLAIM_PTR;
186 return NULL;
189 static struct tcp_metrics_block *__tcp_get_metrics(const struct inetpeer_addr *addr,
190 struct net *net, unsigned int hash)
192 struct tcp_metrics_block *tm;
193 int depth = 0;
195 for (tm = rcu_dereference(net->ipv4.tcp_metrics_hash[hash].chain); tm;
196 tm = rcu_dereference(tm->tcpm_next)) {
197 if (addr_same(&tm->tcpm_addr, addr))
198 break;
199 depth++;
201 return tcp_get_encode(tm, depth);
204 static struct tcp_metrics_block *__tcp_get_metrics_req(struct request_sock *req,
205 struct dst_entry *dst)
207 struct tcp_metrics_block *tm;
208 struct inetpeer_addr addr;
209 unsigned int hash;
210 struct net *net;
212 addr.family = req->rsk_ops->family;
213 switch (addr.family) {
214 case AF_INET:
215 addr.addr.a4 = inet_rsk(req)->rmt_addr;
216 hash = (__force unsigned int) addr.addr.a4;
217 break;
218 case AF_INET6:
219 *(struct in6_addr *)addr.addr.a6 = inet6_rsk(req)->rmt_addr;
220 hash = ipv6_addr_hash(&inet6_rsk(req)->rmt_addr);
221 break;
222 default:
223 return NULL;
226 net = dev_net(dst->dev);
227 hash = hash_32(hash, net->ipv4.tcp_metrics_hash_log);
229 for (tm = rcu_dereference(net->ipv4.tcp_metrics_hash[hash].chain); tm;
230 tm = rcu_dereference(tm->tcpm_next)) {
231 if (addr_same(&tm->tcpm_addr, &addr))
232 break;
234 tcpm_check_stamp(tm, dst);
235 return tm;
238 static struct tcp_metrics_block *__tcp_get_metrics_tw(struct inet_timewait_sock *tw)
240 struct inet6_timewait_sock *tw6;
241 struct tcp_metrics_block *tm;
242 struct inetpeer_addr addr;
243 unsigned int hash;
244 struct net *net;
246 addr.family = tw->tw_family;
247 switch (addr.family) {
248 case AF_INET:
249 addr.addr.a4 = tw->tw_daddr;
250 hash = (__force unsigned int) addr.addr.a4;
251 break;
252 case AF_INET6:
253 tw6 = inet6_twsk((struct sock *)tw);
254 *(struct in6_addr *)addr.addr.a6 = tw6->tw_v6_daddr;
255 hash = ipv6_addr_hash(&tw6->tw_v6_daddr);
256 break;
257 default:
258 return NULL;
261 net = twsk_net(tw);
262 hash = hash_32(hash, net->ipv4.tcp_metrics_hash_log);
264 for (tm = rcu_dereference(net->ipv4.tcp_metrics_hash[hash].chain); tm;
265 tm = rcu_dereference(tm->tcpm_next)) {
266 if (addr_same(&tm->tcpm_addr, &addr))
267 break;
269 return tm;
272 static struct tcp_metrics_block *tcp_get_metrics(struct sock *sk,
273 struct dst_entry *dst,
274 bool create)
276 struct tcp_metrics_block *tm;
277 struct inetpeer_addr addr;
278 unsigned int hash;
279 struct net *net;
280 bool reclaim;
282 addr.family = sk->sk_family;
283 switch (addr.family) {
284 case AF_INET:
285 addr.addr.a4 = inet_sk(sk)->inet_daddr;
286 hash = (__force unsigned int) addr.addr.a4;
287 break;
288 case AF_INET6:
289 *(struct in6_addr *)addr.addr.a6 = inet6_sk(sk)->daddr;
290 hash = ipv6_addr_hash(&inet6_sk(sk)->daddr);
291 break;
292 default:
293 return NULL;
296 net = dev_net(dst->dev);
297 hash = hash_32(hash, net->ipv4.tcp_metrics_hash_log);
299 tm = __tcp_get_metrics(&addr, net, hash);
300 reclaim = false;
301 if (tm == TCP_METRICS_RECLAIM_PTR) {
302 reclaim = true;
303 tm = NULL;
305 if (!tm && create)
306 tm = tcpm_new(dst, &addr, hash, reclaim);
307 else
308 tcpm_check_stamp(tm, dst);
310 return tm;
313 /* Save metrics learned by this TCP session. This function is called
314 * only, when TCP finishes successfully i.e. when it enters TIME-WAIT
315 * or goes from LAST-ACK to CLOSE.
317 void tcp_update_metrics(struct sock *sk)
319 const struct inet_connection_sock *icsk = inet_csk(sk);
320 struct dst_entry *dst = __sk_dst_get(sk);
321 struct tcp_sock *tp = tcp_sk(sk);
322 struct tcp_metrics_block *tm;
323 unsigned long rtt;
324 u32 val;
325 int m;
327 if (sysctl_tcp_nometrics_save || !dst)
328 return;
330 if (dst->flags & DST_HOST)
331 dst_confirm(dst);
333 rcu_read_lock();
334 if (icsk->icsk_backoff || !tp->srtt) {
335 /* This session failed to estimate rtt. Why?
336 * Probably, no packets returned in time. Reset our
337 * results.
339 tm = tcp_get_metrics(sk, dst, false);
340 if (tm && !tcp_metric_locked(tm, TCP_METRIC_RTT))
341 tcp_metric_set(tm, TCP_METRIC_RTT, 0);
342 goto out_unlock;
343 } else
344 tm = tcp_get_metrics(sk, dst, true);
346 if (!tm)
347 goto out_unlock;
349 rtt = tcp_metric_get_jiffies(tm, TCP_METRIC_RTT);
350 m = rtt - tp->srtt;
352 /* If newly calculated rtt larger than stored one, store new
353 * one. Otherwise, use EWMA. Remember, rtt overestimation is
354 * always better than underestimation.
356 if (!tcp_metric_locked(tm, TCP_METRIC_RTT)) {
357 if (m <= 0)
358 rtt = tp->srtt;
359 else
360 rtt -= (m >> 3);
361 tcp_metric_set_msecs(tm, TCP_METRIC_RTT, rtt);
364 if (!tcp_metric_locked(tm, TCP_METRIC_RTTVAR)) {
365 unsigned long var;
367 if (m < 0)
368 m = -m;
370 /* Scale deviation to rttvar fixed point */
371 m >>= 1;
372 if (m < tp->mdev)
373 m = tp->mdev;
375 var = tcp_metric_get_jiffies(tm, TCP_METRIC_RTTVAR);
376 if (m >= var)
377 var = m;
378 else
379 var -= (var - m) >> 2;
381 tcp_metric_set_msecs(tm, TCP_METRIC_RTTVAR, var);
384 if (tcp_in_initial_slowstart(tp)) {
385 /* Slow start still did not finish. */
386 if (!tcp_metric_locked(tm, TCP_METRIC_SSTHRESH)) {
387 val = tcp_metric_get(tm, TCP_METRIC_SSTHRESH);
388 if (val && (tp->snd_cwnd >> 1) > val)
389 tcp_metric_set(tm, TCP_METRIC_SSTHRESH,
390 tp->snd_cwnd >> 1);
392 if (!tcp_metric_locked(tm, TCP_METRIC_CWND)) {
393 val = tcp_metric_get(tm, TCP_METRIC_CWND);
394 if (tp->snd_cwnd > val)
395 tcp_metric_set(tm, TCP_METRIC_CWND,
396 tp->snd_cwnd);
398 } else if (tp->snd_cwnd > tp->snd_ssthresh &&
399 icsk->icsk_ca_state == TCP_CA_Open) {
400 /* Cong. avoidance phase, cwnd is reliable. */
401 if (!tcp_metric_locked(tm, TCP_METRIC_SSTHRESH))
402 tcp_metric_set(tm, TCP_METRIC_SSTHRESH,
403 max(tp->snd_cwnd >> 1, tp->snd_ssthresh));
404 if (!tcp_metric_locked(tm, TCP_METRIC_CWND)) {
405 val = tcp_metric_get(tm, TCP_METRIC_CWND);
406 tcp_metric_set(tm, TCP_METRIC_CWND, (val + tp->snd_cwnd) >> 1);
408 } else {
409 /* Else slow start did not finish, cwnd is non-sense,
410 * ssthresh may be also invalid.
412 if (!tcp_metric_locked(tm, TCP_METRIC_CWND)) {
413 val = tcp_metric_get(tm, TCP_METRIC_CWND);
414 tcp_metric_set(tm, TCP_METRIC_CWND,
415 (val + tp->snd_ssthresh) >> 1);
417 if (!tcp_metric_locked(tm, TCP_METRIC_SSTHRESH)) {
418 val = tcp_metric_get(tm, TCP_METRIC_SSTHRESH);
419 if (val && tp->snd_ssthresh > val)
420 tcp_metric_set(tm, TCP_METRIC_SSTHRESH,
421 tp->snd_ssthresh);
423 if (!tcp_metric_locked(tm, TCP_METRIC_REORDERING)) {
424 val = tcp_metric_get(tm, TCP_METRIC_REORDERING);
425 if (val < tp->reordering &&
426 tp->reordering != sysctl_tcp_reordering)
427 tcp_metric_set(tm, TCP_METRIC_REORDERING,
428 tp->reordering);
431 tm->tcpm_stamp = jiffies;
432 out_unlock:
433 rcu_read_unlock();
436 /* Initialize metrics on socket. */
438 void tcp_init_metrics(struct sock *sk)
440 struct dst_entry *dst = __sk_dst_get(sk);
441 struct tcp_sock *tp = tcp_sk(sk);
442 struct tcp_metrics_block *tm;
443 u32 val;
445 if (dst == NULL)
446 goto reset;
448 dst_confirm(dst);
450 rcu_read_lock();
451 tm = tcp_get_metrics(sk, dst, true);
452 if (!tm) {
453 rcu_read_unlock();
454 goto reset;
457 if (tcp_metric_locked(tm, TCP_METRIC_CWND))
458 tp->snd_cwnd_clamp = tcp_metric_get(tm, TCP_METRIC_CWND);
460 val = tcp_metric_get(tm, TCP_METRIC_SSTHRESH);
461 if (val) {
462 tp->snd_ssthresh = val;
463 if (tp->snd_ssthresh > tp->snd_cwnd_clamp)
464 tp->snd_ssthresh = tp->snd_cwnd_clamp;
465 } else {
466 /* ssthresh may have been reduced unnecessarily during.
467 * 3WHS. Restore it back to its initial default.
469 tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
471 val = tcp_metric_get(tm, TCP_METRIC_REORDERING);
472 if (val && tp->reordering != val) {
473 tcp_disable_fack(tp);
474 tcp_disable_early_retrans(tp);
475 tp->reordering = val;
478 val = tcp_metric_get(tm, TCP_METRIC_RTT);
479 if (val == 0 || tp->srtt == 0) {
480 rcu_read_unlock();
481 goto reset;
483 /* Initial rtt is determined from SYN,SYN-ACK.
484 * The segment is small and rtt may appear much
485 * less than real one. Use per-dst memory
486 * to make it more realistic.
488 * A bit of theory. RTT is time passed after "normal" sized packet
489 * is sent until it is ACKed. In normal circumstances sending small
490 * packets force peer to delay ACKs and calculation is correct too.
491 * The algorithm is adaptive and, provided we follow specs, it
492 * NEVER underestimate RTT. BUT! If peer tries to make some clever
493 * tricks sort of "quick acks" for time long enough to decrease RTT
494 * to low value, and then abruptly stops to do it and starts to delay
495 * ACKs, wait for troubles.
497 val = msecs_to_jiffies(val);
498 if (val > tp->srtt) {
499 tp->srtt = val;
500 tp->rtt_seq = tp->snd_nxt;
502 val = tcp_metric_get_jiffies(tm, TCP_METRIC_RTTVAR);
503 if (val > tp->mdev) {
504 tp->mdev = val;
505 tp->mdev_max = tp->rttvar = max(tp->mdev, tcp_rto_min(sk));
507 rcu_read_unlock();
509 tcp_set_rto(sk);
510 reset:
511 if (tp->srtt == 0) {
512 /* RFC6298: 5.7 We've failed to get a valid RTT sample from
513 * 3WHS. This is most likely due to retransmission,
514 * including spurious one. Reset the RTO back to 3secs
515 * from the more aggressive 1sec to avoid more spurious
516 * retransmission.
518 tp->mdev = tp->mdev_max = tp->rttvar = TCP_TIMEOUT_FALLBACK;
519 inet_csk(sk)->icsk_rto = TCP_TIMEOUT_FALLBACK;
521 /* Cut cwnd down to 1 per RFC5681 if SYN or SYN-ACK has been
522 * retransmitted. In light of RFC6298 more aggressive 1sec
523 * initRTO, we only reset cwnd when more than 1 SYN/SYN-ACK
524 * retransmission has occurred.
526 if (tp->total_retrans > 1)
527 tp->snd_cwnd = 1;
528 else
529 tp->snd_cwnd = tcp_init_cwnd(tp, dst);
530 tp->snd_cwnd_stamp = tcp_time_stamp;
533 bool tcp_peer_is_proven(struct request_sock *req, struct dst_entry *dst, bool paws_check)
535 struct tcp_metrics_block *tm;
536 bool ret;
538 if (!dst)
539 return false;
541 rcu_read_lock();
542 tm = __tcp_get_metrics_req(req, dst);
543 if (paws_check) {
544 if (tm &&
545 (u32)get_seconds() - tm->tcpm_ts_stamp < TCP_PAWS_MSL &&
546 (s32)(tm->tcpm_ts - req->ts_recent) > TCP_PAWS_WINDOW)
547 ret = false;
548 else
549 ret = true;
550 } else {
551 if (tm && tcp_metric_get(tm, TCP_METRIC_RTT) && tm->tcpm_ts_stamp)
552 ret = true;
553 else
554 ret = false;
556 rcu_read_unlock();
558 return ret;
560 EXPORT_SYMBOL_GPL(tcp_peer_is_proven);
562 void tcp_fetch_timewait_stamp(struct sock *sk, struct dst_entry *dst)
564 struct tcp_metrics_block *tm;
566 rcu_read_lock();
567 tm = tcp_get_metrics(sk, dst, true);
568 if (tm) {
569 struct tcp_sock *tp = tcp_sk(sk);
571 if ((u32)get_seconds() - tm->tcpm_ts_stamp <= TCP_PAWS_MSL) {
572 tp->rx_opt.ts_recent_stamp = tm->tcpm_ts_stamp;
573 tp->rx_opt.ts_recent = tm->tcpm_ts;
576 rcu_read_unlock();
578 EXPORT_SYMBOL_GPL(tcp_fetch_timewait_stamp);
580 /* VJ's idea. Save last timestamp seen from this destination and hold
581 * it at least for normal timewait interval to use for duplicate
582 * segment detection in subsequent connections, before they enter
583 * synchronized state.
585 bool tcp_remember_stamp(struct sock *sk)
587 struct dst_entry *dst = __sk_dst_get(sk);
588 bool ret = false;
590 if (dst) {
591 struct tcp_metrics_block *tm;
593 rcu_read_lock();
594 tm = tcp_get_metrics(sk, dst, true);
595 if (tm) {
596 struct tcp_sock *tp = tcp_sk(sk);
598 if ((s32)(tm->tcpm_ts - tp->rx_opt.ts_recent) <= 0 ||
599 ((u32)get_seconds() - tm->tcpm_ts_stamp > TCP_PAWS_MSL &&
600 tm->tcpm_ts_stamp <= (u32)tp->rx_opt.ts_recent_stamp)) {
601 tm->tcpm_ts_stamp = (u32)tp->rx_opt.ts_recent_stamp;
602 tm->tcpm_ts = tp->rx_opt.ts_recent;
604 ret = true;
606 rcu_read_unlock();
608 return ret;
611 bool tcp_tw_remember_stamp(struct inet_timewait_sock *tw)
613 struct tcp_metrics_block *tm;
614 bool ret = false;
616 rcu_read_lock();
617 tm = __tcp_get_metrics_tw(tw);
618 if (tm) {
619 const struct tcp_timewait_sock *tcptw;
620 struct sock *sk = (struct sock *) tw;
622 tcptw = tcp_twsk(sk);
623 if ((s32)(tm->tcpm_ts - tcptw->tw_ts_recent) <= 0 ||
624 ((u32)get_seconds() - tm->tcpm_ts_stamp > TCP_PAWS_MSL &&
625 tm->tcpm_ts_stamp <= (u32)tcptw->tw_ts_recent_stamp)) {
626 tm->tcpm_ts_stamp = (u32)tcptw->tw_ts_recent_stamp;
627 tm->tcpm_ts = tcptw->tw_ts_recent;
629 ret = true;
631 rcu_read_unlock();
633 return ret;
636 static DEFINE_SEQLOCK(fastopen_seqlock);
638 void tcp_fastopen_cache_get(struct sock *sk, u16 *mss,
639 struct tcp_fastopen_cookie *cookie,
640 int *syn_loss, unsigned long *last_syn_loss)
642 struct tcp_metrics_block *tm;
644 rcu_read_lock();
645 tm = tcp_get_metrics(sk, __sk_dst_get(sk), false);
646 if (tm) {
647 struct tcp_fastopen_metrics *tfom = &tm->tcpm_fastopen;
648 unsigned int seq;
650 do {
651 seq = read_seqbegin(&fastopen_seqlock);
652 if (tfom->mss)
653 *mss = tfom->mss;
654 *cookie = tfom->cookie;
655 *syn_loss = tfom->syn_loss;
656 *last_syn_loss = *syn_loss ? tfom->last_syn_loss : 0;
657 } while (read_seqretry(&fastopen_seqlock, seq));
659 rcu_read_unlock();
662 void tcp_fastopen_cache_set(struct sock *sk, u16 mss,
663 struct tcp_fastopen_cookie *cookie, bool syn_lost)
665 struct tcp_metrics_block *tm;
667 rcu_read_lock();
668 tm = tcp_get_metrics(sk, __sk_dst_get(sk), true);
669 if (tm) {
670 struct tcp_fastopen_metrics *tfom = &tm->tcpm_fastopen;
672 write_seqlock_bh(&fastopen_seqlock);
673 tfom->mss = mss;
674 if (cookie->len > 0)
675 tfom->cookie = *cookie;
676 if (syn_lost) {
677 ++tfom->syn_loss;
678 tfom->last_syn_loss = jiffies;
679 } else
680 tfom->syn_loss = 0;
681 write_sequnlock_bh(&fastopen_seqlock);
683 rcu_read_unlock();
686 static struct genl_family tcp_metrics_nl_family = {
687 .id = GENL_ID_GENERATE,
688 .hdrsize = 0,
689 .name = TCP_METRICS_GENL_NAME,
690 .version = TCP_METRICS_GENL_VERSION,
691 .maxattr = TCP_METRICS_ATTR_MAX,
692 .netnsok = true,
695 static struct nla_policy tcp_metrics_nl_policy[TCP_METRICS_ATTR_MAX + 1] = {
696 [TCP_METRICS_ATTR_ADDR_IPV4] = { .type = NLA_U32, },
697 [TCP_METRICS_ATTR_ADDR_IPV6] = { .type = NLA_BINARY,
698 .len = sizeof(struct in6_addr), },
699 /* Following attributes are not received for GET/DEL,
700 * we keep them for reference
702 #if 0
703 [TCP_METRICS_ATTR_AGE] = { .type = NLA_MSECS, },
704 [TCP_METRICS_ATTR_TW_TSVAL] = { .type = NLA_U32, },
705 [TCP_METRICS_ATTR_TW_TS_STAMP] = { .type = NLA_S32, },
706 [TCP_METRICS_ATTR_VALS] = { .type = NLA_NESTED, },
707 [TCP_METRICS_ATTR_FOPEN_MSS] = { .type = NLA_U16, },
708 [TCP_METRICS_ATTR_FOPEN_SYN_DROPS] = { .type = NLA_U16, },
709 [TCP_METRICS_ATTR_FOPEN_SYN_DROP_TS] = { .type = NLA_MSECS, },
710 [TCP_METRICS_ATTR_FOPEN_COOKIE] = { .type = NLA_BINARY,
711 .len = TCP_FASTOPEN_COOKIE_MAX, },
712 #endif
715 /* Add attributes, caller cancels its header on failure */
716 static int tcp_metrics_fill_info(struct sk_buff *msg,
717 struct tcp_metrics_block *tm)
719 struct nlattr *nest;
720 int i;
722 switch (tm->tcpm_addr.family) {
723 case AF_INET:
724 if (nla_put_be32(msg, TCP_METRICS_ATTR_ADDR_IPV4,
725 tm->tcpm_addr.addr.a4) < 0)
726 goto nla_put_failure;
727 break;
728 case AF_INET6:
729 if (nla_put(msg, TCP_METRICS_ATTR_ADDR_IPV6, 16,
730 tm->tcpm_addr.addr.a6) < 0)
731 goto nla_put_failure;
732 break;
733 default:
734 return -EAFNOSUPPORT;
737 if (nla_put_msecs(msg, TCP_METRICS_ATTR_AGE,
738 jiffies - tm->tcpm_stamp) < 0)
739 goto nla_put_failure;
740 if (tm->tcpm_ts_stamp) {
741 if (nla_put_s32(msg, TCP_METRICS_ATTR_TW_TS_STAMP,
742 (s32) (get_seconds() - tm->tcpm_ts_stamp)) < 0)
743 goto nla_put_failure;
744 if (nla_put_u32(msg, TCP_METRICS_ATTR_TW_TSVAL,
745 tm->tcpm_ts) < 0)
746 goto nla_put_failure;
750 int n = 0;
752 nest = nla_nest_start(msg, TCP_METRICS_ATTR_VALS);
753 if (!nest)
754 goto nla_put_failure;
755 for (i = 0; i < TCP_METRIC_MAX + 1; i++) {
756 if (!tm->tcpm_vals[i])
757 continue;
758 if (nla_put_u32(msg, i + 1, tm->tcpm_vals[i]) < 0)
759 goto nla_put_failure;
760 n++;
762 if (n)
763 nla_nest_end(msg, nest);
764 else
765 nla_nest_cancel(msg, nest);
769 struct tcp_fastopen_metrics tfom_copy[1], *tfom;
770 unsigned int seq;
772 do {
773 seq = read_seqbegin(&fastopen_seqlock);
774 tfom_copy[0] = tm->tcpm_fastopen;
775 } while (read_seqretry(&fastopen_seqlock, seq));
777 tfom = tfom_copy;
778 if (tfom->mss &&
779 nla_put_u16(msg, TCP_METRICS_ATTR_FOPEN_MSS,
780 tfom->mss) < 0)
781 goto nla_put_failure;
782 if (tfom->syn_loss &&
783 (nla_put_u16(msg, TCP_METRICS_ATTR_FOPEN_SYN_DROPS,
784 tfom->syn_loss) < 0 ||
785 nla_put_msecs(msg, TCP_METRICS_ATTR_FOPEN_SYN_DROP_TS,
786 jiffies - tfom->last_syn_loss) < 0))
787 goto nla_put_failure;
788 if (tfom->cookie.len > 0 &&
789 nla_put(msg, TCP_METRICS_ATTR_FOPEN_COOKIE,
790 tfom->cookie.len, tfom->cookie.val) < 0)
791 goto nla_put_failure;
794 return 0;
796 nla_put_failure:
797 return -EMSGSIZE;
800 static int tcp_metrics_dump_info(struct sk_buff *skb,
801 struct netlink_callback *cb,
802 struct tcp_metrics_block *tm)
804 void *hdr;
806 hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
807 &tcp_metrics_nl_family, NLM_F_MULTI,
808 TCP_METRICS_CMD_GET);
809 if (!hdr)
810 return -EMSGSIZE;
812 if (tcp_metrics_fill_info(skb, tm) < 0)
813 goto nla_put_failure;
815 return genlmsg_end(skb, hdr);
817 nla_put_failure:
818 genlmsg_cancel(skb, hdr);
819 return -EMSGSIZE;
822 static int tcp_metrics_nl_dump(struct sk_buff *skb,
823 struct netlink_callback *cb)
825 struct net *net = sock_net(skb->sk);
826 unsigned int max_rows = 1U << net->ipv4.tcp_metrics_hash_log;
827 unsigned int row, s_row = cb->args[0];
828 int s_col = cb->args[1], col = s_col;
830 for (row = s_row; row < max_rows; row++, s_col = 0) {
831 struct tcp_metrics_block *tm;
832 struct tcpm_hash_bucket *hb = net->ipv4.tcp_metrics_hash + row;
834 rcu_read_lock();
835 for (col = 0, tm = rcu_dereference(hb->chain); tm;
836 tm = rcu_dereference(tm->tcpm_next), col++) {
837 if (col < s_col)
838 continue;
839 if (tcp_metrics_dump_info(skb, cb, tm) < 0) {
840 rcu_read_unlock();
841 goto done;
844 rcu_read_unlock();
847 done:
848 cb->args[0] = row;
849 cb->args[1] = col;
850 return skb->len;
853 static int parse_nl_addr(struct genl_info *info, struct inetpeer_addr *addr,
854 unsigned int *hash, int optional)
856 struct nlattr *a;
858 a = info->attrs[TCP_METRICS_ATTR_ADDR_IPV4];
859 if (a) {
860 addr->family = AF_INET;
861 addr->addr.a4 = nla_get_be32(a);
862 *hash = (__force unsigned int) addr->addr.a4;
863 return 0;
865 a = info->attrs[TCP_METRICS_ATTR_ADDR_IPV6];
866 if (a) {
867 if (nla_len(a) != sizeof(struct in6_addr))
868 return -EINVAL;
869 addr->family = AF_INET6;
870 memcpy(addr->addr.a6, nla_data(a), sizeof(addr->addr.a6));
871 *hash = ipv6_addr_hash((struct in6_addr *) addr->addr.a6);
872 return 0;
874 return optional ? 1 : -EAFNOSUPPORT;
877 static int tcp_metrics_nl_cmd_get(struct sk_buff *skb, struct genl_info *info)
879 struct tcp_metrics_block *tm;
880 struct inetpeer_addr addr;
881 unsigned int hash;
882 struct sk_buff *msg;
883 struct net *net = genl_info_net(info);
884 void *reply;
885 int ret;
887 ret = parse_nl_addr(info, &addr, &hash, 0);
888 if (ret < 0)
889 return ret;
891 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
892 if (!msg)
893 return -ENOMEM;
895 reply = genlmsg_put_reply(msg, info, &tcp_metrics_nl_family, 0,
896 info->genlhdr->cmd);
897 if (!reply)
898 goto nla_put_failure;
900 hash = hash_32(hash, net->ipv4.tcp_metrics_hash_log);
901 ret = -ESRCH;
902 rcu_read_lock();
903 for (tm = rcu_dereference(net->ipv4.tcp_metrics_hash[hash].chain); tm;
904 tm = rcu_dereference(tm->tcpm_next)) {
905 if (addr_same(&tm->tcpm_addr, &addr)) {
906 ret = tcp_metrics_fill_info(msg, tm);
907 break;
910 rcu_read_unlock();
911 if (ret < 0)
912 goto out_free;
914 genlmsg_end(msg, reply);
915 return genlmsg_reply(msg, info);
917 nla_put_failure:
918 ret = -EMSGSIZE;
920 out_free:
921 nlmsg_free(msg);
922 return ret;
925 #define deref_locked_genl(p) \
926 rcu_dereference_protected(p, lockdep_genl_is_held() && \
927 lockdep_is_held(&tcp_metrics_lock))
929 #define deref_genl(p) rcu_dereference_protected(p, lockdep_genl_is_held())
931 static int tcp_metrics_flush_all(struct net *net)
933 unsigned int max_rows = 1U << net->ipv4.tcp_metrics_hash_log;
934 struct tcpm_hash_bucket *hb = net->ipv4.tcp_metrics_hash;
935 struct tcp_metrics_block *tm;
936 unsigned int row;
938 for (row = 0; row < max_rows; row++, hb++) {
939 spin_lock_bh(&tcp_metrics_lock);
940 tm = deref_locked_genl(hb->chain);
941 if (tm)
942 hb->chain = NULL;
943 spin_unlock_bh(&tcp_metrics_lock);
944 while (tm) {
945 struct tcp_metrics_block *next;
947 next = deref_genl(tm->tcpm_next);
948 kfree_rcu(tm, rcu_head);
949 tm = next;
952 return 0;
955 static int tcp_metrics_nl_cmd_del(struct sk_buff *skb, struct genl_info *info)
957 struct tcpm_hash_bucket *hb;
958 struct tcp_metrics_block *tm;
959 struct tcp_metrics_block __rcu **pp;
960 struct inetpeer_addr addr;
961 unsigned int hash;
962 struct net *net = genl_info_net(info);
963 int ret;
965 ret = parse_nl_addr(info, &addr, &hash, 1);
966 if (ret < 0)
967 return ret;
968 if (ret > 0)
969 return tcp_metrics_flush_all(net);
971 hash = hash_32(hash, net->ipv4.tcp_metrics_hash_log);
972 hb = net->ipv4.tcp_metrics_hash + hash;
973 pp = &hb->chain;
974 spin_lock_bh(&tcp_metrics_lock);
975 for (tm = deref_locked_genl(*pp); tm;
976 pp = &tm->tcpm_next, tm = deref_locked_genl(*pp)) {
977 if (addr_same(&tm->tcpm_addr, &addr)) {
978 *pp = tm->tcpm_next;
979 break;
982 spin_unlock_bh(&tcp_metrics_lock);
983 if (!tm)
984 return -ESRCH;
985 kfree_rcu(tm, rcu_head);
986 return 0;
989 static struct genl_ops tcp_metrics_nl_ops[] = {
991 .cmd = TCP_METRICS_CMD_GET,
992 .doit = tcp_metrics_nl_cmd_get,
993 .dumpit = tcp_metrics_nl_dump,
994 .policy = tcp_metrics_nl_policy,
995 .flags = GENL_ADMIN_PERM,
998 .cmd = TCP_METRICS_CMD_DEL,
999 .doit = tcp_metrics_nl_cmd_del,
1000 .policy = tcp_metrics_nl_policy,
1001 .flags = GENL_ADMIN_PERM,
1005 static unsigned int tcpmhash_entries;
1006 static int __init set_tcpmhash_entries(char *str)
1008 ssize_t ret;
1010 if (!str)
1011 return 0;
1013 ret = kstrtouint(str, 0, &tcpmhash_entries);
1014 if (ret)
1015 return 0;
1017 return 1;
1019 __setup("tcpmhash_entries=", set_tcpmhash_entries);
1021 static int __net_init tcp_net_metrics_init(struct net *net)
1023 size_t size;
1024 unsigned int slots;
1026 slots = tcpmhash_entries;
1027 if (!slots) {
1028 if (totalram_pages >= 128 * 1024)
1029 slots = 16 * 1024;
1030 else
1031 slots = 8 * 1024;
1034 net->ipv4.tcp_metrics_hash_log = order_base_2(slots);
1035 size = sizeof(struct tcpm_hash_bucket) << net->ipv4.tcp_metrics_hash_log;
1037 net->ipv4.tcp_metrics_hash = kzalloc(size, GFP_KERNEL | __GFP_NOWARN);
1038 if (!net->ipv4.tcp_metrics_hash)
1039 net->ipv4.tcp_metrics_hash = vzalloc(size);
1041 if (!net->ipv4.tcp_metrics_hash)
1042 return -ENOMEM;
1044 return 0;
1047 static void __net_exit tcp_net_metrics_exit(struct net *net)
1049 unsigned int i;
1051 for (i = 0; i < (1U << net->ipv4.tcp_metrics_hash_log) ; i++) {
1052 struct tcp_metrics_block *tm, *next;
1054 tm = rcu_dereference_protected(net->ipv4.tcp_metrics_hash[i].chain, 1);
1055 while (tm) {
1056 next = rcu_dereference_protected(tm->tcpm_next, 1);
1057 kfree(tm);
1058 tm = next;
1061 if (is_vmalloc_addr(net->ipv4.tcp_metrics_hash))
1062 vfree(net->ipv4.tcp_metrics_hash);
1063 else
1064 kfree(net->ipv4.tcp_metrics_hash);
1067 static __net_initdata struct pernet_operations tcp_net_metrics_ops = {
1068 .init = tcp_net_metrics_init,
1069 .exit = tcp_net_metrics_exit,
1072 void __init tcp_metrics_init(void)
1074 int ret;
1076 ret = register_pernet_subsys(&tcp_net_metrics_ops);
1077 if (ret < 0)
1078 goto cleanup;
1079 ret = genl_register_family_with_ops(&tcp_metrics_nl_family,
1080 tcp_metrics_nl_ops,
1081 ARRAY_SIZE(tcp_metrics_nl_ops));
1082 if (ret < 0)
1083 goto cleanup_subsys;
1084 return;
1086 cleanup_subsys:
1087 unregister_pernet_subsys(&tcp_net_metrics_ops);
1089 cleanup:
1090 return;