blob: 8aa56caefde86426dc60e8e9f207447d1e48e07d [file] [log] [blame]
Yuchung Cheng659a8ad2015-10-16 21:57:46 -07001#include <linux/tcp.h>
2#include <net/tcp.h>
3
Yuchung Chenga0370b32017-01-12 22:11:36 -08004int sysctl_tcp_recovery __read_mostly = TCP_RACK_LOSS_DETECTION;
Yuchung Cheng4f41b1c2015-10-16 21:57:47 -07005
Yuchung Chengdb8da6b2017-01-12 22:11:30 -08006static void tcp_rack_mark_skb_lost(struct sock *sk, struct sk_buff *skb)
7{
8 struct tcp_sock *tp = tcp_sk(sk);
9
10 tcp_skb_mark_lost_uncond_verify(tp, skb);
11 if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) {
12 /* Account for retransmits that are lost again */
13 TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS;
14 tp->retrans_out -= tcp_skb_pcount(skb);
Yuchung Chengecde8f32017-04-04 14:15:39 -070015 NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPLOSTRETRANSMIT,
16 tcp_skb_pcount(skb));
Yuchung Chengdb8da6b2017-01-12 22:11:30 -080017 }
18}
19
Eric Dumazet9a568de2017-05-16 14:00:14 -070020static bool tcp_rack_sent_after(u64 t1, u64 t2, u32 seq1, u32 seq2)
Yuchung Cheng1d0833d2017-01-12 22:11:34 -080021{
Eric Dumazet9a568de2017-05-16 14:00:14 -070022 return t1 > t2 || (t1 == t2 && after(seq1, seq2));
Yuchung Cheng1d0833d2017-01-12 22:11:34 -080023}
24
Yuchung Chenga0370b32017-01-12 22:11:36 -080025/* RACK loss detection (IETF draft draft-ietf-tcpm-rack-01):
26 *
27 * Marks a packet lost, if some packet sent later has been (s)acked.
Yuchung Cheng4f41b1c2015-10-16 21:57:47 -070028 * The underlying idea is similar to the traditional dupthresh and FACK
29 * but they look at different metrics:
30 *
31 * dupthresh: 3 OOO packets delivered (packet count)
32 * FACK: sequence delta to highest sacked sequence (sequence space)
33 * RACK: sent time delta to the latest delivered packet (time domain)
34 *
35 * The advantage of RACK is it applies to both original and retransmitted
36 * packet and therefore is robust against tail losses. Another advantage
37 * is being more resilient to reordering by simply allowing some
38 * "settling delay", instead of tweaking the dupthresh.
39 *
Yuchung Chenga0370b32017-01-12 22:11:36 -080040 * When tcp_rack_detect_loss() detects some packets are lost and we
41 * are not already in the CA_Recovery state, either tcp_rack_reo_timeout()
42 * or tcp_time_to_recover()'s "Trick#1: the loss is proven" code path will
43 * make us enter the CA_Recovery state.
Yuchung Cheng4f41b1c2015-10-16 21:57:47 -070044 */
Eric Dumazet7c1c7302017-04-25 10:15:33 -070045static void tcp_rack_detect_loss(struct sock *sk, u32 *reo_timeout)
Yuchung Cheng4f41b1c2015-10-16 21:57:47 -070046{
47 struct tcp_sock *tp = tcp_sk(sk);
Yuchung Cheng043b87d2017-10-04 12:59:59 -070048 struct sk_buff *skb, *n;
Yuchung Chenge636f8b2017-01-12 22:11:31 -080049 u32 reo_wnd;
Yuchung Cheng4f41b1c2015-10-16 21:57:47 -070050
Yuchung Cheng57dde7f2017-01-12 22:11:33 -080051 *reo_timeout = 0;
Yuchung Cheng4f41b1c2015-10-16 21:57:47 -070052 /* To be more reordering resilient, allow min_rtt/4 settling delay
53 * (lower-bounded to 1000uS). We use min_rtt instead of the smoothed
54 * RTT because reordering is often a path property and less related
55 * to queuing or delayed ACKs.
Yuchung Cheng4f41b1c2015-10-16 21:57:47 -070056 */
57 reo_wnd = 1000;
Yuchung Chenga0370b32017-01-12 22:11:36 -080058 if ((tp->rack.reord || !tp->lost_out) && tcp_min_rtt(tp) != ~0U)
Yuchung Cheng4f41b1c2015-10-16 21:57:47 -070059 reo_wnd = max(tcp_min_rtt(tp) >> 2, reo_wnd);
60
Yuchung Cheng043b87d2017-10-04 12:59:59 -070061 list_for_each_entry_safe(skb, n, &tp->tsorted_sent_queue,
62 tcp_tsorted_anchor) {
Yuchung Cheng4f41b1c2015-10-16 21:57:47 -070063 struct tcp_skb_cb *scb = TCP_SKB_CB(skb);
64
Eric Dumazet9a568de2017-05-16 14:00:14 -070065 if (tcp_rack_sent_after(tp->rack.mstamp, skb->skb_mstamp,
Yuchung Cheng1d0833d2017-01-12 22:11:34 -080066 tp->rack.end_seq, scb->end_seq)) {
Yuchung Chengdeed7be2017-01-12 22:11:32 -080067 /* Step 3 in draft-cheng-tcpm-rack-00.txt:
68 * A packet is lost if its elapsed time is beyond
69 * the recent RTT plus the reordering window.
70 */
Eric Dumazet9a568de2017-05-16 14:00:14 -070071 u32 elapsed = tcp_stamp_us_delta(tp->tcp_mstamp,
72 skb->skb_mstamp);
Yuchung Cheng57dde7f2017-01-12 22:11:33 -080073 s32 remaining = tp->rack.rtt_us + reo_wnd - elapsed;
74
75 if (remaining < 0) {
Yuchung Chengdeed7be2017-01-12 22:11:32 -080076 tcp_rack_mark_skb_lost(sk, skb);
Yuchung Cheng043b87d2017-10-04 12:59:59 -070077 list_del_init(&skb->tcp_tsorted_anchor);
Yuchung Cheng57dde7f2017-01-12 22:11:33 -080078 continue;
Yuchung Chengdeed7be2017-01-12 22:11:32 -080079 }
Yuchung Cheng57dde7f2017-01-12 22:11:33 -080080
81 /* Skip ones marked lost but not yet retransmitted */
82 if ((scb->sacked & TCPCB_LOST) &&
83 !(scb->sacked & TCPCB_SACKED_RETRANS))
84 continue;
85
86 /* Record maximum wait time (+1 to avoid 0) */
87 *reo_timeout = max_t(u32, *reo_timeout, 1 + remaining);
Yuchung Cheng043b87d2017-10-04 12:59:59 -070088 } else {
Yuchung Cheng4f41b1c2015-10-16 21:57:47 -070089 break;
90 }
91 }
Yuchung Chenge636f8b2017-01-12 22:11:31 -080092}
93
Eric Dumazet128eda82017-04-25 10:15:34 -070094void tcp_rack_mark_lost(struct sock *sk)
Yuchung Chenge636f8b2017-01-12 22:11:31 -080095{
96 struct tcp_sock *tp = tcp_sk(sk);
Yuchung Cheng57dde7f2017-01-12 22:11:33 -080097 u32 timeout;
Yuchung Chenge636f8b2017-01-12 22:11:31 -080098
Yuchung Chenga0370b32017-01-12 22:11:36 -080099 if (!tp->rack.advanced)
Yuchung Chenge636f8b2017-01-12 22:11:31 -0800100 return;
Yuchung Cheng57dde7f2017-01-12 22:11:33 -0800101
Yuchung Chenge636f8b2017-01-12 22:11:31 -0800102 /* Reset the advanced flag to avoid unnecessary queue scanning */
103 tp->rack.advanced = 0;
Eric Dumazet7c1c7302017-04-25 10:15:33 -0700104 tcp_rack_detect_loss(sk, &timeout);
Yuchung Cheng57dde7f2017-01-12 22:11:33 -0800105 if (timeout) {
Yuchung Chengbb4d9912017-07-19 15:41:26 -0700106 timeout = usecs_to_jiffies(timeout) + TCP_TIMEOUT_MIN;
Yuchung Cheng57dde7f2017-01-12 22:11:33 -0800107 inet_csk_reset_xmit_timer(sk, ICSK_TIME_REO_TIMEOUT,
108 timeout, inet_csk(sk)->icsk_rto);
109 }
Yuchung Cheng4f41b1c2015-10-16 21:57:47 -0700110}
111
Yuchung Chengdeed7be2017-01-12 22:11:32 -0800112/* Record the most recently (re)sent time among the (s)acked packets
113 * This is "Step 3: Advance RACK.xmit_time and update RACK.RTT" from
114 * draft-cheng-tcpm-rack-00.txt
115 */
Yuchung Cheng1d0833d2017-01-12 22:11:34 -0800116void tcp_rack_advance(struct tcp_sock *tp, u8 sacked, u32 end_seq,
Eric Dumazet9a568de2017-05-16 14:00:14 -0700117 u64 xmit_time)
Yuchung Cheng659a8ad2015-10-16 21:57:46 -0700118{
Yuchung Chengdeed7be2017-01-12 22:11:32 -0800119 u32 rtt_us;
120
Eric Dumazet9a568de2017-05-16 14:00:14 -0700121 if (tp->rack.mstamp &&
122 !tcp_rack_sent_after(xmit_time, tp->rack.mstamp,
Yuchung Cheng1d0833d2017-01-12 22:11:34 -0800123 end_seq, tp->rack.end_seq))
Yuchung Cheng659a8ad2015-10-16 21:57:46 -0700124 return;
125
Eric Dumazet9a568de2017-05-16 14:00:14 -0700126 rtt_us = tcp_stamp_us_delta(tp->tcp_mstamp, xmit_time);
Yuchung Cheng659a8ad2015-10-16 21:57:46 -0700127 if (sacked & TCPCB_RETRANS) {
Yuchung Cheng659a8ad2015-10-16 21:57:46 -0700128 /* If the sacked packet was retransmitted, it's ambiguous
129 * whether the retransmission or the original (or the prior
130 * retransmission) was sacked.
131 *
132 * If the original is lost, there is no ambiguity. Otherwise
133 * we assume the original can be delayed up to aRTT + min_rtt.
134 * the aRTT term is bounded by the fast recovery or timeout,
135 * so it's at least one RTT (i.e., retransmission is at least
136 * an RTT later).
137 */
Yuchung Chengdeed7be2017-01-12 22:11:32 -0800138 if (rtt_us < tcp_min_rtt(tp))
Yuchung Cheng659a8ad2015-10-16 21:57:46 -0700139 return;
140 }
Yuchung Chengdeed7be2017-01-12 22:11:32 -0800141 tp->rack.rtt_us = rtt_us;
Eric Dumazet9a568de2017-05-16 14:00:14 -0700142 tp->rack.mstamp = xmit_time;
Yuchung Cheng1d0833d2017-01-12 22:11:34 -0800143 tp->rack.end_seq = end_seq;
Yuchung Cheng659a8ad2015-10-16 21:57:46 -0700144 tp->rack.advanced = 1;
145}
Yuchung Cheng57dde7f2017-01-12 22:11:33 -0800146
147/* We have waited long enough to accommodate reordering. Mark the expired
148 * packets lost and retransmit them.
149 */
150void tcp_rack_reo_timeout(struct sock *sk)
151{
152 struct tcp_sock *tp = tcp_sk(sk);
Yuchung Cheng57dde7f2017-01-12 22:11:33 -0800153 u32 timeout, prior_inflight;
154
Yuchung Cheng57dde7f2017-01-12 22:11:33 -0800155 prior_inflight = tcp_packets_in_flight(tp);
Eric Dumazet7c1c7302017-04-25 10:15:33 -0700156 tcp_rack_detect_loss(sk, &timeout);
Yuchung Cheng57dde7f2017-01-12 22:11:33 -0800157 if (prior_inflight != tcp_packets_in_flight(tp)) {
158 if (inet_csk(sk)->icsk_ca_state != TCP_CA_Recovery) {
159 tcp_enter_recovery(sk, false);
160 if (!inet_csk(sk)->icsk_ca_ops->cong_control)
161 tcp_cwnd_reduction(sk, 1, 0);
162 }
163 tcp_xmit_retransmit_queue(sk);
164 }
165 if (inet_csk(sk)->icsk_pending != ICSK_TIME_RETRANS)
166 tcp_rearm_rto(sk);
167}