blob: dc415c66a33a15d4e5d81a8999500e4cabbf12b3 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Implementation of the Transmission Control Protocol(TCP).
7 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07008 * IPv4 specific functions
9 *
10 *
11 * code split from:
12 * linux/ipv4/tcp.c
13 * linux/ipv4/tcp_input.c
14 * linux/ipv4/tcp_output.c
15 *
16 * See tcp.c for author information
17 *
18 * This program is free software; you can redistribute it and/or
19 * modify it under the terms of the GNU General Public License
20 * as published by the Free Software Foundation; either version
21 * 2 of the License, or (at your option) any later version.
22 */
23
24/*
25 * Changes:
26 * David S. Miller : New socket lookup architecture.
27 * This code is dedicated to John Dyson.
28 * David S. Miller : Change semantics of established hash,
29 * half is devoted to TIME_WAIT sockets
30 * and the rest go in the other half.
31 * Andi Kleen : Add support for syncookies and fixed
32 * some bugs: ip options weren't passed to
33 * the TCP layer, missed a check for an
34 * ACK bit.
35 * Andi Kleen : Implemented fast path mtu discovery.
36 * Fixed many serious bugs in the
Arnaldo Carvalho de Melo60236fd2005-06-18 22:47:21 -070037 * request_sock handling and moved
Linus Torvalds1da177e2005-04-16 15:20:36 -070038 * most of it into the af independent code.
39 * Added tail drop and some other bugfixes.
Stephen Hemmingercaa20d9a2005-11-10 17:13:47 -080040 * Added new listen semantics.
Linus Torvalds1da177e2005-04-16 15:20:36 -070041 * Mike McLagan : Routing by source
42 * Juan Jose Ciarlante: ip_dynaddr bits
43 * Andi Kleen: various fixes.
44 * Vitaly E. Lavrov : Transparent proxy revived after year
45 * coma.
46 * Andi Kleen : Fix new listen.
47 * Andi Kleen : Fix accept error reporting.
48 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
49 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
50 * a single port at the same time.
51 */
52
Joe Perchesafd465032012-03-12 07:03:32 +000053#define pr_fmt(fmt) "TCP: " fmt
Linus Torvalds1da177e2005-04-16 15:20:36 -070054
Herbert Xueb4dea52008-12-29 23:04:08 -080055#include <linux/bottom_half.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070056#include <linux/types.h>
57#include <linux/fcntl.h>
58#include <linux/module.h>
59#include <linux/random.h>
60#include <linux/cache.h>
61#include <linux/jhash.h>
62#include <linux/init.h>
63#include <linux/times.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090064#include <linux/slab.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070065
Eric W. Biederman457c4cb2007-09-12 12:01:34 +020066#include <net/net_namespace.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070067#include <net/icmp.h>
Arnaldo Carvalho de Melo304a1612005-08-09 19:59:20 -070068#include <net/inet_hashtables.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070069#include <net/tcp.h>
Arnaldo Carvalho de Melo20380732005-08-16 02:18:02 -030070#include <net/transp_v6.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070071#include <net/ipv6.h>
72#include <net/inet_common.h>
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -080073#include <net/timewait_sock.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070074#include <net/xfrm.h>
David S. Miller6e5714e2011-08-03 20:50:44 -070075#include <net/secure_seq.h>
Eliezer Tamir076bb0c2013-07-10 17:13:17 +030076#include <net/busy_poll.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070077
78#include <linux/inet.h>
79#include <linux/ipv6.h>
80#include <linux/stddef.h>
81#include <linux/proc_fs.h>
82#include <linux/seq_file.h>
Ivan Delalande67973182017-06-15 18:07:06 -070083#include <linux/inetdevice.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070084
Herbert Xucf80e0e2016-01-24 21:20:23 +080085#include <crypto/hash.h>
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -080086#include <linux/scatterlist.h>
87
Song Liuc24b14c2017-10-23 09:20:24 -070088#include <trace/events/tcp.h>
89
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -080090#ifdef CONFIG_TCP_MD5SIG
Eric Dumazeta915da9b2012-01-31 05:18:33 +000091static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
Eric Dumazet318cf7a2011-10-24 02:46:04 -040092 __be32 daddr, __be32 saddr, const struct tcphdr *th);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -080093#endif
94
Eric Dumazet5caea4e2008-11-20 00:40:07 -080095struct inet_hashinfo tcp_hashinfo;
Eric Dumazet4bc2f182010-07-09 21:22:10 +000096EXPORT_SYMBOL(tcp_hashinfo);
Linus Torvalds1da177e2005-04-16 15:20:36 -070097
Eric Dumazet84b114b2017-05-05 06:56:54 -070098static u32 tcp_v4_init_seq(const struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -070099{
Eric Dumazet84b114b2017-05-05 06:56:54 -0700100 return secure_tcp_seq(ip_hdr(skb)->daddr,
101 ip_hdr(skb)->saddr,
102 tcp_hdr(skb)->dest,
103 tcp_hdr(skb)->source);
104}
105
Eric Dumazet5d2ed052017-06-07 10:34:39 -0700106static u32 tcp_v4_init_ts_off(const struct net *net, const struct sk_buff *skb)
Eric Dumazet84b114b2017-05-05 06:56:54 -0700107{
Eric Dumazet5d2ed052017-06-07 10:34:39 -0700108 return secure_tcp_ts_off(net, ip_hdr(skb)->daddr, ip_hdr(skb)->saddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109}
110
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -0800111int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
112{
Maciej Żenczykowski79e9fed2018-06-03 10:41:17 -0700113 const struct inet_timewait_sock *tw = inet_twsk(sktw);
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -0800114 const struct tcp_timewait_sock *tcptw = tcp_twsk(sktw);
115 struct tcp_sock *tp = tcp_sk(sk);
Maciej Żenczykowski79e9fed2018-06-03 10:41:17 -0700116 int reuse = sock_net(sk)->ipv4.sysctl_tcp_tw_reuse;
117
118 if (reuse == 2) {
119 /* Still does not detect *everything* that goes through
120 * lo, since we require a loopback src or dst address
121 * or direct binding to 'lo' interface.
122 */
123 bool loopback = false;
124 if (tw->tw_bound_dev_if == LOOPBACK_IFINDEX)
125 loopback = true;
126#if IS_ENABLED(CONFIG_IPV6)
127 if (tw->tw_family == AF_INET6) {
128 if (ipv6_addr_loopback(&tw->tw_v6_daddr) ||
129 (ipv6_addr_v4mapped(&tw->tw_v6_daddr) &&
130 (tw->tw_v6_daddr.s6_addr[12] == 127)) ||
131 ipv6_addr_loopback(&tw->tw_v6_rcv_saddr) ||
132 (ipv6_addr_v4mapped(&tw->tw_v6_rcv_saddr) &&
133 (tw->tw_v6_rcv_saddr.s6_addr[12] == 127)))
134 loopback = true;
135 } else
136#endif
137 {
138 if (ipv4_is_loopback(tw->tw_daddr) ||
139 ipv4_is_loopback(tw->tw_rcv_saddr))
140 loopback = true;
141 }
142 if (!loopback)
143 reuse = 0;
144 }
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -0800145
146 /* With PAWS, it is safe from the viewpoint
147 of data integrity. Even without PAWS it is safe provided sequence
148 spaces do not overlap i.e. at data rates <= 80Mbit/sec.
149
150 Actually, the idea is close to VJ's one, only timestamp cache is
151 held not per host, but per port pair and TW bucket is used as state
152 holder.
153
154 If TW bucket has been already destroyed we fall back to VJ's scheme
155 and use initial timestamp retrieved from peer table.
156 */
157 if (tcptw->tw_ts_recent_stamp &&
Arnd Bergmanncca9bab2018-07-11 12:16:12 +0200158 (!twp || (reuse && time_after32(ktime_get_seconds(),
159 tcptw->tw_ts_recent_stamp)))) {
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -0800160 tp->write_seq = tcptw->tw_snd_nxt + 65535 + 2;
161 if (tp->write_seq == 0)
162 tp->write_seq = 1;
163 tp->rx_opt.ts_recent = tcptw->tw_ts_recent;
164 tp->rx_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
165 sock_hold(sktw);
166 return 1;
167 }
168
169 return 0;
170}
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -0800171EXPORT_SYMBOL_GPL(tcp_twsk_unique);
172
Andrey Ignatovd74bad42018-03-30 15:08:05 -0700173static int tcp_v4_pre_connect(struct sock *sk, struct sockaddr *uaddr,
174 int addr_len)
175{
176 /* This check is replicated from tcp_v4_connect() and intended to
177 * prevent BPF program called below from accessing bytes that are out
178 * of the bound specified by user in addr_len.
179 */
180 if (addr_len < sizeof(struct sockaddr_in))
181 return -EINVAL;
182
183 sock_owned_by_me(sk);
184
185 return BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr);
186}
187
Linus Torvalds1da177e2005-04-16 15:20:36 -0700188/* This will initiate an outgoing connection. */
189int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
190{
David S. Miller2d7192d2011-04-26 13:28:44 -0700191 struct sockaddr_in *usin = (struct sockaddr_in *)uaddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700192 struct inet_sock *inet = inet_sk(sk);
193 struct tcp_sock *tp = tcp_sk(sk);
David S. Millerdca8b082011-02-24 13:38:12 -0800194 __be16 orig_sport, orig_dport;
Al Virobada8ad2006-09-26 21:27:15 -0700195 __be32 daddr, nexthop;
David S. Millerda905bd2011-05-06 16:11:19 -0700196 struct flowi4 *fl4;
David S. Miller2d7192d2011-04-26 13:28:44 -0700197 struct rtable *rt;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700198 int err;
Eric Dumazetf6d8bd02011-04-21 09:45:37 +0000199 struct ip_options_rcu *inet_opt;
Haishuang Yan1946e672016-12-28 17:52:32 +0800200 struct inet_timewait_death_row *tcp_death_row = &sock_net(sk)->ipv4.tcp_death_row;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700201
202 if (addr_len < sizeof(struct sockaddr_in))
203 return -EINVAL;
204
205 if (usin->sin_family != AF_INET)
206 return -EAFNOSUPPORT;
207
208 nexthop = daddr = usin->sin_addr.s_addr;
Eric Dumazetf6d8bd02011-04-21 09:45:37 +0000209 inet_opt = rcu_dereference_protected(inet->inet_opt,
Hannes Frederic Sowa1e1d04e2016-04-05 17:10:15 +0200210 lockdep_sock_is_held(sk));
Eric Dumazetf6d8bd02011-04-21 09:45:37 +0000211 if (inet_opt && inet_opt->opt.srr) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700212 if (!daddr)
213 return -EINVAL;
Eric Dumazetf6d8bd02011-04-21 09:45:37 +0000214 nexthop = inet_opt->opt.faddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700215 }
216
David S. Millerdca8b082011-02-24 13:38:12 -0800217 orig_sport = inet->inet_sport;
218 orig_dport = usin->sin_port;
David S. Millerda905bd2011-05-06 16:11:19 -0700219 fl4 = &inet->cork.fl.u.ip4;
220 rt = ip_route_connect(fl4, nexthop, inet->inet_saddr,
David S. Millerb23dd4f2011-03-02 14:31:35 -0800221 RT_CONN_FLAGS(sk), sk->sk_bound_dev_if,
222 IPPROTO_TCP,
Steffen Klassert0e0d44a2013-08-28 08:04:14 +0200223 orig_sport, orig_dport, sk);
David S. Millerb23dd4f2011-03-02 14:31:35 -0800224 if (IS_ERR(rt)) {
225 err = PTR_ERR(rt);
226 if (err == -ENETUNREACH)
Eric Dumazetf1d8cba2013-11-28 09:51:22 -0800227 IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
David S. Millerb23dd4f2011-03-02 14:31:35 -0800228 return err;
Wei Dong584bdf82007-05-31 22:49:28 -0700229 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700230
231 if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
232 ip_rt_put(rt);
233 return -ENETUNREACH;
234 }
235
Eric Dumazetf6d8bd02011-04-21 09:45:37 +0000236 if (!inet_opt || !inet_opt->opt.srr)
David S. Millerda905bd2011-05-06 16:11:19 -0700237 daddr = fl4->daddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700238
Eric Dumazetc720c7e82009-10-15 06:30:45 +0000239 if (!inet->inet_saddr)
David S. Millerda905bd2011-05-06 16:11:19 -0700240 inet->inet_saddr = fl4->saddr;
Eric Dumazetd1e559d2015-03-18 14:05:35 -0700241 sk_rcv_saddr_set(sk, inet->inet_saddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700242
Eric Dumazetc720c7e82009-10-15 06:30:45 +0000243 if (tp->rx_opt.ts_recent_stamp && inet->inet_daddr != daddr) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700244 /* Reset inherited state */
245 tp->rx_opt.ts_recent = 0;
246 tp->rx_opt.ts_recent_stamp = 0;
Pavel Emelyanovee995282012-04-19 03:40:39 +0000247 if (likely(!tp->repair))
248 tp->write_seq = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700249 }
250
Eric Dumazetc720c7e82009-10-15 06:30:45 +0000251 inet->inet_dport = usin->sin_port;
Eric Dumazetd1e559d2015-03-18 14:05:35 -0700252 sk_daddr_set(sk, daddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700253
Arnaldo Carvalho de Melod83d8462005-12-13 23:26:10 -0800254 inet_csk(sk)->icsk_ext_hdr_len = 0;
Eric Dumazetf6d8bd02011-04-21 09:45:37 +0000255 if (inet_opt)
256 inet_csk(sk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700257
William Allen Simpsonbee7ca92009-11-10 09:51:18 +0000258 tp->rx_opt.mss_clamp = TCP_MSS_DEFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700259
260 /* Socket identity is still unknown (sport may be zero).
261 * However we set state to SYN-SENT and not releasing socket
262 * lock select source port, enter ourselves into the hash tables and
263 * complete initialization after this.
264 */
265 tcp_set_state(sk, TCP_SYN_SENT);
Haishuang Yan1946e672016-12-28 17:52:32 +0800266 err = inet_hash_connect(tcp_death_row, sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700267 if (err)
268 goto failure;
269
Tom Herbert877d1f62015-07-28 16:02:05 -0700270 sk_set_txhash(sk);
Sathya Perla9e7ceb02014-10-22 21:42:01 +0530271
David S. Millerda905bd2011-05-06 16:11:19 -0700272 rt = ip_route_newports(fl4, rt, orig_sport, orig_dport,
David S. Millerb23dd4f2011-03-02 14:31:35 -0800273 inet->inet_sport, inet->inet_dport, sk);
274 if (IS_ERR(rt)) {
275 err = PTR_ERR(rt);
276 rt = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700277 goto failure;
David S. Millerb23dd4f2011-03-02 14:31:35 -0800278 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700279 /* OK, now commit destination to socket. */
Herbert Xubcd76112006-06-30 13:36:35 -0700280 sk->sk_gso_type = SKB_GSO_TCPV4;
Changli Gaod8d1f302010-06-10 23:31:35 -0700281 sk_setup_caps(sk, &rt->dst);
Wei Wang19f6d3f32017-01-23 10:59:22 -0800282 rt = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700283
Alexey Kodanev00355fa2017-02-22 13:23:55 +0300284 if (likely(!tp->repair)) {
Alexey Kodanev00355fa2017-02-22 13:23:55 +0300285 if (!tp->write_seq)
Eric Dumazet84b114b2017-05-05 06:56:54 -0700286 tp->write_seq = secure_tcp_seq(inet->inet_saddr,
287 inet->inet_daddr,
288 inet->inet_sport,
289 usin->sin_port);
Eric Dumazet5d2ed052017-06-07 10:34:39 -0700290 tp->tsoffset = secure_tcp_ts_off(sock_net(sk),
291 inet->inet_saddr,
Eric Dumazet84b114b2017-05-05 06:56:54 -0700292 inet->inet_daddr);
Alexey Kodanev00355fa2017-02-22 13:23:55 +0300293 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700294
Eric Dumazetc720c7e82009-10-15 06:30:45 +0000295 inet->inet_id = tp->write_seq ^ jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700296
Wei Wang19f6d3f32017-01-23 10:59:22 -0800297 if (tcp_fastopen_defer_connect(sk, &err))
298 return err;
299 if (err)
300 goto failure;
301
Andrey Vagin2b916472012-11-22 01:13:58 +0000302 err = tcp_connect(sk);
Pavel Emelyanovee995282012-04-19 03:40:39 +0000303
Linus Torvalds1da177e2005-04-16 15:20:36 -0700304 if (err)
305 goto failure;
306
307 return 0;
308
309failure:
Arnaldo Carvalho de Melo71742592006-11-17 10:57:30 -0200310 /*
311 * This unhashes the socket and releases the local port,
312 * if necessary.
313 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700314 tcp_set_state(sk, TCP_CLOSE);
315 ip_rt_put(rt);
316 sk->sk_route_caps = 0;
Eric Dumazetc720c7e82009-10-15 06:30:45 +0000317 inet->inet_dport = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700318 return err;
319}
Eric Dumazet4bc2f182010-07-09 21:22:10 +0000320EXPORT_SYMBOL(tcp_v4_connect);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700321
Linus Torvalds1da177e2005-04-16 15:20:36 -0700322/*
Eric Dumazet563d34d2012-07-23 09:48:52 +0200323 * This routine reacts to ICMP_FRAG_NEEDED mtu indications as defined in RFC1191.
324 * It can be called through tcp_release_cb() if socket was owned by user
325 * at the time tcp_v4_err() was called to handle ICMP message.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700326 */
Neal Cardwell4fab9072014-08-14 12:40:05 -0400327void tcp_v4_mtu_reduced(struct sock *sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700328{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700329 struct inet_sock *inet = inet_sk(sk);
Eric Dumazet02b2faa2017-03-03 14:08:21 -0800330 struct dst_entry *dst;
331 u32 mtu;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700332
Eric Dumazet02b2faa2017-03-03 14:08:21 -0800333 if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
334 return;
335 mtu = tcp_sk(sk)->mtu_info;
David S. Miller80d0a692012-07-16 03:28:06 -0700336 dst = inet_csk_update_pmtu(sk, mtu);
337 if (!dst)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700338 return;
339
Linus Torvalds1da177e2005-04-16 15:20:36 -0700340 /* Something is about to be wrong... Remember soft error
341 * for the case, if this connection will not able to recover.
342 */
343 if (mtu < dst_mtu(dst) && ip_dont_fragment(sk, dst))
344 sk->sk_err_soft = EMSGSIZE;
345
346 mtu = dst_mtu(dst);
347
348 if (inet->pmtudisc != IP_PMTUDISC_DONT &&
Hannes Frederic Sowa482fc602013-11-05 02:24:17 +0100349 ip_sk_accept_pmtu(sk) &&
Arnaldo Carvalho de Melod83d8462005-12-13 23:26:10 -0800350 inet_csk(sk)->icsk_pmtu_cookie > mtu) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700351 tcp_sync_mss(sk, mtu);
352
353 /* Resend the TCP packet because it's
354 * clear that the old packet has been
355 * dropped. This is the new "fast" path mtu
356 * discovery.
357 */
358 tcp_simple_retransmit(sk);
359 } /* else let the usual retransmit timer handle it */
360}
Neal Cardwell4fab9072014-08-14 12:40:05 -0400361EXPORT_SYMBOL(tcp_v4_mtu_reduced);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700362
David S. Miller55be7a92012-07-11 21:27:49 -0700363static void do_redirect(struct sk_buff *skb, struct sock *sk)
364{
365 struct dst_entry *dst = __sk_dst_check(sk, 0);
366
David S. Miller1ed5c482012-07-12 00:41:25 -0700367 if (dst)
David S. Miller6700c272012-07-17 03:29:28 -0700368 dst->ops->redirect(dst, sk, skb);
David S. Miller55be7a92012-07-11 21:27:49 -0700369}
370
Eric Dumazet26e37362015-03-22 10:22:22 -0700371
372/* handle ICMP messages on TCP_NEW_SYN_RECV request sockets */
Eric Dumazet9cf74902016-02-02 19:31:12 -0800373void tcp_req_err(struct sock *sk, u32 seq, bool abort)
Eric Dumazet26e37362015-03-22 10:22:22 -0700374{
375 struct request_sock *req = inet_reqsk(sk);
376 struct net *net = sock_net(sk);
377
378 /* ICMPs are not backlogged, hence we cannot get
379 * an established socket here.
380 */
Eric Dumazet26e37362015-03-22 10:22:22 -0700381 if (seq != tcp_rsk(req)->snt_isn) {
Eric Dumazet02a1d6e2016-04-27 16:44:39 -0700382 __NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS);
Eric Dumazet9cf74902016-02-02 19:31:12 -0800383 } else if (abort) {
Eric Dumazet26e37362015-03-22 10:22:22 -0700384 /*
385 * Still in SYN_RECV, just remove it silently.
386 * There is no good way to pass the error to the newly
387 * created socket, and POSIX does not want network
388 * errors returned from accept().
389 */
Fan Duc6973662015-03-23 15:00:41 -0700390 inet_csk_reqsk_queue_drop(req->rsk_listener, req);
Eric Dumazet9caad862016-04-01 08:52:20 -0700391 tcp_listendrop(req->rsk_listener);
Eric Dumazet26e37362015-03-22 10:22:22 -0700392 }
Eric Dumazetef84d8c2015-10-14 11:16:26 -0700393 reqsk_put(req);
Eric Dumazet26e37362015-03-22 10:22:22 -0700394}
395EXPORT_SYMBOL(tcp_req_err);
396
Linus Torvalds1da177e2005-04-16 15:20:36 -0700397/*
398 * This routine is called by the ICMP module when it gets some
399 * sort of error condition. If err < 0 then the socket should
400 * be closed and the error returned to the user. If err > 0
401 * it's just the icmp type << 8 | icmp code. After adjustment
402 * header points to the first 8 bytes of the tcp header. We need
403 * to find the appropriate port.
404 *
405 * The locking strategy used here is very "optimistic". When
406 * someone else accesses the socket the ICMP is just dropped
407 * and for some paths there is no check at all.
408 * A more general error queue to queue errors for later handling
409 * is probably better.
410 *
411 */
412
Damian Lukowski4d1a2d92009-08-26 00:16:27 +0000413void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700414{
Eric Dumazetb71d1d42011-04-22 04:53:02 +0000415 const struct iphdr *iph = (const struct iphdr *)icmp_skb->data;
Damian Lukowski4d1a2d92009-08-26 00:16:27 +0000416 struct tcphdr *th = (struct tcphdr *)(icmp_skb->data + (iph->ihl << 2));
Damian Lukowskif1ecd5d2009-08-26 00:16:31 +0000417 struct inet_connection_sock *icsk;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700418 struct tcp_sock *tp;
419 struct inet_sock *inet;
Damian Lukowski4d1a2d92009-08-26 00:16:27 +0000420 const int type = icmp_hdr(icmp_skb)->type;
421 const int code = icmp_hdr(icmp_skb)->code;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700422 struct sock *sk;
Damian Lukowskif1ecd5d2009-08-26 00:16:31 +0000423 struct sk_buff *skb;
Yuchung Cheng0a672f742014-05-11 20:22:12 -0700424 struct request_sock *fastopen;
Eric Dumazet9a568de2017-05-16 14:00:14 -0700425 u32 seq, snd_una;
426 s32 remaining;
427 u32 delta_us;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700428 int err;
Damian Lukowski4d1a2d92009-08-26 00:16:27 +0000429 struct net *net = dev_net(icmp_skb->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700430
Eric Dumazet26e37362015-03-22 10:22:22 -0700431 sk = __inet_lookup_established(net, &tcp_hashinfo, iph->daddr,
432 th->dest, iph->saddr, ntohs(th->source),
David Ahern3fa6f612017-08-07 08:44:17 -0700433 inet_iif(icmp_skb), 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700434 if (!sk) {
Eric Dumazet5d3848b2016-04-27 16:44:29 -0700435 __ICMP_INC_STATS(net, ICMP_MIB_INERRORS);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700436 return;
437 }
438 if (sk->sk_state == TCP_TIME_WAIT) {
YOSHIFUJI Hideaki9469c7b2006-10-10 19:41:46 -0700439 inet_twsk_put(inet_twsk(sk));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700440 return;
441 }
Eric Dumazet26e37362015-03-22 10:22:22 -0700442 seq = ntohl(th->seq);
443 if (sk->sk_state == TCP_NEW_SYN_RECV)
Eric Dumazet9cf74902016-02-02 19:31:12 -0800444 return tcp_req_err(sk, seq,
445 type == ICMP_PARAMETERPROB ||
446 type == ICMP_TIME_EXCEEDED ||
447 (type == ICMP_DEST_UNREACH &&
448 (code == ICMP_NET_UNREACH ||
449 code == ICMP_HOST_UNREACH)));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700450
451 bh_lock_sock(sk);
452 /* If too many ICMPs get dropped on busy
453 * servers this needs to be solved differently.
Eric Dumazet563d34d2012-07-23 09:48:52 +0200454 * We do take care of PMTU discovery (RFC1191) special case :
455 * we can receive locally generated ICMP messages while socket is held.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700456 */
Eric Dumazetb74aa932013-01-19 16:10:37 +0000457 if (sock_owned_by_user(sk)) {
458 if (!(type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED))
Eric Dumazet02a1d6e2016-04-27 16:44:39 -0700459 __NET_INC_STATS(net, LINUX_MIB_LOCKDROPPEDICMPS);
Eric Dumazetb74aa932013-01-19 16:10:37 +0000460 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700461 if (sk->sk_state == TCP_CLOSE)
462 goto out;
463
stephen hemminger97e3ecd12010-03-18 11:27:32 +0000464 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
Eric Dumazet02a1d6e2016-04-27 16:44:39 -0700465 __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
stephen hemminger97e3ecd12010-03-18 11:27:32 +0000466 goto out;
467 }
468
Damian Lukowskif1ecd5d2009-08-26 00:16:31 +0000469 icsk = inet_csk(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700470 tp = tcp_sk(sk);
Yuchung Cheng0a672f742014-05-11 20:22:12 -0700471 /* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
472 fastopen = tp->fastopen_rsk;
473 snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700474 if (sk->sk_state != TCP_LISTEN &&
Yuchung Cheng0a672f742014-05-11 20:22:12 -0700475 !between(seq, snd_una, tp->snd_nxt)) {
Eric Dumazet02a1d6e2016-04-27 16:44:39 -0700476 __NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700477 goto out;
478 }
479
480 switch (type) {
David S. Miller55be7a92012-07-11 21:27:49 -0700481 case ICMP_REDIRECT:
Jon Maxwell45caeaa2017-03-10 16:40:33 +1100482 if (!sock_owned_by_user(sk))
483 do_redirect(icmp_skb, sk);
David S. Miller55be7a92012-07-11 21:27:49 -0700484 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700485 case ICMP_SOURCE_QUENCH:
486 /* Just silently ignore these. */
487 goto out;
488 case ICMP_PARAMETERPROB:
489 err = EPROTO;
490 break;
491 case ICMP_DEST_UNREACH:
492 if (code > NR_ICMP_UNREACH)
493 goto out;
494
495 if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */
Eric Dumazet0d4f0602013-03-18 07:01:28 +0000496 /* We are not interested in TCP_LISTEN and open_requests
497 * (SYN-ACKs send out by Linux are always <576bytes so
498 * they should go through unfragmented).
499 */
500 if (sk->sk_state == TCP_LISTEN)
501 goto out;
502
Eric Dumazet563d34d2012-07-23 09:48:52 +0200503 tp->mtu_info = info;
Eric Dumazet144d56e2012-08-20 00:22:46 +0000504 if (!sock_owned_by_user(sk)) {
Eric Dumazet563d34d2012-07-23 09:48:52 +0200505 tcp_v4_mtu_reduced(sk);
Eric Dumazet144d56e2012-08-20 00:22:46 +0000506 } else {
Eric Dumazet7aa54702016-12-03 11:14:57 -0800507 if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED, &sk->sk_tsq_flags))
Eric Dumazet144d56e2012-08-20 00:22:46 +0000508 sock_hold(sk);
509 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700510 goto out;
511 }
512
513 err = icmp_err_convert[code].errno;
Damian Lukowskif1ecd5d2009-08-26 00:16:31 +0000514 /* check if icmp_skb allows revert of backoff
515 * (see draft-zimmermann-tcp-lcd) */
516 if (code != ICMP_NET_UNREACH && code != ICMP_HOST_UNREACH)
517 break;
518 if (seq != tp->snd_una || !icsk->icsk_retransmits ||
Yuchung Cheng0a672f742014-05-11 20:22:12 -0700519 !icsk->icsk_backoff || fastopen)
Damian Lukowskif1ecd5d2009-08-26 00:16:31 +0000520 break;
521
David S. Miller8f49c272010-11-12 13:35:00 -0800522 if (sock_owned_by_user(sk))
523 break;
524
Damian Lukowskif1ecd5d2009-08-26 00:16:31 +0000525 icsk->icsk_backoff--;
Eric Dumazetfcdd1cf2014-09-22 13:19:44 -0700526 icsk->icsk_rto = tp->srtt_us ? __tcp_set_rto(tp) :
527 TCP_TIMEOUT_INIT;
528 icsk->icsk_rto = inet_csk_rto_backoff(icsk, TCP_RTO_MAX);
Damian Lukowskif1ecd5d2009-08-26 00:16:31 +0000529
Eric Dumazet75c119a2017-10-05 22:21:27 -0700530 skb = tcp_rtx_queue_head(sk);
Damian Lukowskif1ecd5d2009-08-26 00:16:31 +0000531 BUG_ON(!skb);
532
Eric Dumazet9a568de2017-05-16 14:00:14 -0700533 tcp_mstamp_refresh(tp);
534 delta_us = (u32)(tp->tcp_mstamp - skb->skb_mstamp);
Eric Dumazet7faee5c2014-09-05 15:33:33 -0700535 remaining = icsk->icsk_rto -
Eric Dumazet9a568de2017-05-16 14:00:14 -0700536 usecs_to_jiffies(delta_us);
Damian Lukowskif1ecd5d2009-08-26 00:16:31 +0000537
Eric Dumazet9a568de2017-05-16 14:00:14 -0700538 if (remaining > 0) {
Damian Lukowskif1ecd5d2009-08-26 00:16:31 +0000539 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
540 remaining, TCP_RTO_MAX);
Damian Lukowskif1ecd5d2009-08-26 00:16:31 +0000541 } else {
542 /* RTO revert clocked out retransmission.
543 * Will retransmit now */
544 tcp_retransmit_timer(sk);
545 }
546
Linus Torvalds1da177e2005-04-16 15:20:36 -0700547 break;
548 case ICMP_TIME_EXCEEDED:
549 err = EHOSTUNREACH;
550 break;
551 default:
552 goto out;
553 }
554
555 switch (sk->sk_state) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700556 case TCP_SYN_SENT:
Yuchung Cheng0a672f742014-05-11 20:22:12 -0700557 case TCP_SYN_RECV:
558 /* Only in fast or simultaneous open. If a fast open socket is
559 * is already accepted it is treated as a connected one below.
560 */
Ian Morris51456b22015-04-03 09:17:26 +0100561 if (fastopen && !fastopen->sk)
Yuchung Cheng0a672f742014-05-11 20:22:12 -0700562 break;
563
Linus Torvalds1da177e2005-04-16 15:20:36 -0700564 if (!sock_owned_by_user(sk)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700565 sk->sk_err = err;
566
567 sk->sk_error_report(sk);
568
569 tcp_done(sk);
570 } else {
571 sk->sk_err_soft = err;
572 }
573 goto out;
574 }
575
576 /* If we've already connected we will keep trying
577 * until we time out, or the user gives up.
578 *
579 * rfc1122 4.2.3.9 allows to consider as hard errors
580 * only PROTO_UNREACH and PORT_UNREACH (well, FRAG_FAILED too,
581 * but it is obsoleted by pmtu discovery).
582 *
583 * Note, that in modern internet, where routing is unreliable
584 * and in each dark corner broken firewalls sit, sending random
585 * errors ordered by their masters even this two messages finally lose
586 * their original sense (even Linux sends invalid PORT_UNREACHs)
587 *
588 * Now we are in compliance with RFCs.
589 * --ANK (980905)
590 */
591
592 inet = inet_sk(sk);
593 if (!sock_owned_by_user(sk) && inet->recverr) {
594 sk->sk_err = err;
595 sk->sk_error_report(sk);
596 } else { /* Only an error on timeout */
597 sk->sk_err_soft = err;
598 }
599
600out:
601 bh_unlock_sock(sk);
602 sock_put(sk);
603}
604
Daniel Borkmann28850dc2013-06-07 05:11:46 +0000605void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr, __be32 daddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700606{
Arnaldo Carvalho de Meloaa8223c2007-04-10 21:04:22 -0700607 struct tcphdr *th = tcp_hdr(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700608
Eric Dumazet98be9b12018-02-19 11:56:52 -0800609 th->check = ~tcp_v4_check(skb->len, saddr, daddr, 0);
610 skb->csum_start = skb_transport_header(skb) - skb->head;
611 skb->csum_offset = offsetof(struct tcphdr, check);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700612}
613
Herbert Xu419f9f82010-04-11 02:15:53 +0000614/* This routine computes an IPv4 TCP checksum. */
Herbert Xubb296242010-04-11 02:15:55 +0000615void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb)
Herbert Xu419f9f82010-04-11 02:15:53 +0000616{
Eric Dumazetcf533ea2011-10-21 05:22:42 -0400617 const struct inet_sock *inet = inet_sk(sk);
Herbert Xu419f9f82010-04-11 02:15:53 +0000618
619 __tcp_v4_send_check(skb, inet->inet_saddr, inet->inet_daddr);
620}
Eric Dumazet4bc2f182010-07-09 21:22:10 +0000621EXPORT_SYMBOL(tcp_v4_send_check);
Herbert Xu419f9f82010-04-11 02:15:53 +0000622
Linus Torvalds1da177e2005-04-16 15:20:36 -0700623/*
624 * This routine will send an RST to the other tcp.
625 *
626 * Someone asks: why I NEVER use socket parameters (TOS, TTL etc.)
627 * for reset.
628 * Answer: if a packet caused RST, it is not for a socket
629 * existing in our system, if it is matched to a socket,
630 * it is just duplicate segment or bug in other side's TCP.
631 * So that we build reply only basing on parameters
632 * arrived with segment.
633 * Exception: precedence violation. We do not implement it in any case.
634 */
635
Eric Dumazeta00e7442015-09-29 07:42:39 -0700636static void tcp_v4_send_reset(const struct sock *sk, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700637{
Eric Dumazetcf533ea2011-10-21 05:22:42 -0400638 const struct tcphdr *th = tcp_hdr(skb);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800639 struct {
640 struct tcphdr th;
641#ifdef CONFIG_TCP_MD5SIG
Al Viro714e85b2006-11-14 20:51:49 -0800642 __be32 opt[(TCPOLEN_MD5SIG_ALIGNED >> 2)];
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800643#endif
644 } rep;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700645 struct ip_reply_arg arg;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800646#ifdef CONFIG_TCP_MD5SIG
Florian Westphale46787f2015-12-21 21:29:25 +0100647 struct tcp_md5sig_key *key = NULL;
Shawn Lu658ddaa2012-01-31 22:35:48 +0000648 const __u8 *hash_location = NULL;
649 unsigned char newhash[16];
650 int genhash;
651 struct sock *sk1 = NULL;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800652#endif
Pavel Emelyanova86b1e32008-07-16 20:20:58 -0700653 struct net *net;
Jon Maxwell00483692018-05-10 16:53:51 +1000654 struct sock *ctl_sk;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700655
656 /* Never send a reset in response to a reset. */
657 if (th->rst)
658 return;
659
Eric Dumazetc3658e82014-11-25 07:40:04 -0800660 /* If sk not NULL, it means we did a successful lookup and incoming
661 * route had to be correct. prequeue might have dropped our dst.
662 */
663 if (!sk && skb_rtable(skb)->rt_type != RTN_LOCAL)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700664 return;
665
666 /* Swap the send and the receive. */
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800667 memset(&rep, 0, sizeof(rep));
668 rep.th.dest = th->source;
669 rep.th.source = th->dest;
670 rep.th.doff = sizeof(struct tcphdr) / 4;
671 rep.th.rst = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700672
673 if (th->ack) {
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800674 rep.th.seq = th->ack_seq;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700675 } else {
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800676 rep.th.ack = 1;
677 rep.th.ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin +
678 skb->len - (th->doff << 2));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700679 }
680
Arnaldo Carvalho de Melo71742592006-11-17 10:57:30 -0200681 memset(&arg, 0, sizeof(arg));
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800682 arg.iov[0].iov_base = (unsigned char *)&rep;
683 arg.iov[0].iov_len = sizeof(rep.th);
684
Eric Dumazet0f85fea2014-12-09 09:56:08 -0800685 net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800686#ifdef CONFIG_TCP_MD5SIG
Eric Dumazet3b24d852016-04-01 08:52:17 -0700687 rcu_read_lock();
Shawn Lu658ddaa2012-01-31 22:35:48 +0000688 hash_location = tcp_parse_md5sig_option(th);
Florian Westphal271c3b92015-12-21 21:29:26 +0100689 if (sk && sk_fullsock(sk)) {
Florian Westphale46787f2015-12-21 21:29:25 +0100690 key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)
691 &ip_hdr(skb)->saddr, AF_INET);
692 } else if (hash_location) {
Shawn Lu658ddaa2012-01-31 22:35:48 +0000693 /*
694 * active side is lost. Try to find listening socket through
695 * source port, and then find md5 key through listening socket.
696 * we are not loose security here:
697 * Incoming packet is checked with md5 hash with finding key,
698 * no RST generated if md5 hash doesn't match.
699 */
Craig Galleka5836362016-02-10 11:50:38 -0500700 sk1 = __inet_lookup_listener(net, &tcp_hashinfo, NULL, 0,
701 ip_hdr(skb)->saddr,
Tom Herbertda5e36302013-01-22 09:50:24 +0000702 th->source, ip_hdr(skb)->daddr,
David Ahern3fa6f612017-08-07 08:44:17 -0700703 ntohs(th->source), inet_iif(skb),
704 tcp_v4_sdif(skb));
Shawn Lu658ddaa2012-01-31 22:35:48 +0000705 /* don't send rst if it can't find key */
706 if (!sk1)
Eric Dumazet3b24d852016-04-01 08:52:17 -0700707 goto out;
708
Shawn Lu658ddaa2012-01-31 22:35:48 +0000709 key = tcp_md5_do_lookup(sk1, (union tcp_md5_addr *)
710 &ip_hdr(skb)->saddr, AF_INET);
711 if (!key)
Eric Dumazet3b24d852016-04-01 08:52:17 -0700712 goto out;
713
Shawn Lu658ddaa2012-01-31 22:35:48 +0000714
Eric Dumazet39f8e582015-03-24 15:58:55 -0700715 genhash = tcp_v4_md5_hash_skb(newhash, key, NULL, skb);
Shawn Lu658ddaa2012-01-31 22:35:48 +0000716 if (genhash || memcmp(hash_location, newhash, 16) != 0)
Eric Dumazet3b24d852016-04-01 08:52:17 -0700717 goto out;
718
Shawn Lu658ddaa2012-01-31 22:35:48 +0000719 }
720
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800721 if (key) {
722 rep.opt[0] = htonl((TCPOPT_NOP << 24) |
723 (TCPOPT_NOP << 16) |
724 (TCPOPT_MD5SIG << 8) |
725 TCPOLEN_MD5SIG);
726 /* Update length and the length the header thinks exists */
727 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
728 rep.th.doff = arg.iov[0].iov_len / 4;
729
Adam Langley49a72df2008-07-19 00:01:42 -0700730 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[1],
Ilpo Järvinen78e645cb2008-10-09 14:37:47 -0700731 key, ip_hdr(skb)->saddr,
732 ip_hdr(skb)->daddr, &rep.th);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800733 }
734#endif
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -0700735 arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
736 ip_hdr(skb)->saddr, /* XXX */
Ilpo Järvinen52cd5752008-10-08 11:34:06 -0700737 arg.iov[0].iov_len, IPPROTO_TCP, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700738 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
Florian Westphal271c3b92015-12-21 21:29:26 +0100739 arg.flags = (sk && inet_sk_transparent(sk)) ? IP_REPLY_ARG_NOSRCCHECK : 0;
740
Shawn Lue2446ea2012-02-04 12:38:09 +0000741 /* When socket is gone, all binding information is lost.
Alexey Kuznetsov4c675252012-10-12 04:34:17 +0000742 * routing might fail in this case. No choice here, if we choose to force
743 * input interface, we will misroute in case of asymmetric route.
Shawn Lue2446ea2012-02-04 12:38:09 +0000744 */
Song Liuc24b14c2017-10-23 09:20:24 -0700745 if (sk) {
Alexey Kuznetsov4c675252012-10-12 04:34:17 +0000746 arg.bound_dev_if = sk->sk_bound_dev_if;
Song Liu5c487bb2018-02-06 20:50:23 -0800747 if (sk_fullsock(sk))
748 trace_tcp_send_reset(sk, skb);
Song Liuc24b14c2017-10-23 09:20:24 -0700749 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700750
Florian Westphal271c3b92015-12-21 21:29:26 +0100751 BUILD_BUG_ON(offsetof(struct sock, sk_bound_dev_if) !=
752 offsetof(struct inet_timewait_sock, tw_bound_dev_if));
753
Eric Dumazet66b13d92011-10-24 03:06:21 -0400754 arg.tos = ip_hdr(skb)->tos;
Lorenzo Colittie2d118a2016-11-04 02:23:43 +0900755 arg.uid = sock_net_uid(net, sk && sk_fullsock(sk) ? sk : NULL);
Eric Dumazet47dcc202016-05-06 09:46:18 -0700756 local_bh_disable();
Jon Maxwell00483692018-05-10 16:53:51 +1000757 ctl_sk = *this_cpu_ptr(net->ipv4.tcp_sk);
758 if (sk)
759 ctl_sk->sk_mark = (sk->sk_state == TCP_TIME_WAIT) ?
760 inet_twsk(sk)->tw_mark : sk->sk_mark;
761 ip_send_unicast_reply(ctl_sk,
Eric Dumazetbdbbb852015-01-29 21:35:05 -0800762 skb, &TCP_SKB_CB(skb)->header.h4.opt,
Eric Dumazet24a2d432014-09-27 09:50:55 -0700763 ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
764 &arg, arg.iov[0].iov_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700765
Jon Maxwell00483692018-05-10 16:53:51 +1000766 ctl_sk->sk_mark = 0;
Eric Dumazet90bbcc62016-04-27 16:44:32 -0700767 __TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
768 __TCP_INC_STATS(net, TCP_MIB_OUTRSTS);
Eric Dumazet47dcc202016-05-06 09:46:18 -0700769 local_bh_enable();
Shawn Lu658ddaa2012-01-31 22:35:48 +0000770
771#ifdef CONFIG_TCP_MD5SIG
Eric Dumazet3b24d852016-04-01 08:52:17 -0700772out:
773 rcu_read_unlock();
Shawn Lu658ddaa2012-01-31 22:35:48 +0000774#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700775}
776
777/* The code following below sending ACKs in SYN-RECV and TIME-WAIT states
778 outside socket context is ugly, certainly. What can I do?
779 */
780
Lorenzo Colittie2d118a2016-11-04 02:23:43 +0900781static void tcp_v4_send_ack(const struct sock *sk,
Eric Dumazete62a1232016-01-21 08:02:54 -0800782 struct sk_buff *skb, u32 seq, u32 ack,
Andrey Vaginee684b62013-02-11 05:50:19 +0000783 u32 win, u32 tsval, u32 tsecr, int oif,
KOVACS Krisztian88ef4a52008-10-01 07:41:00 -0700784 struct tcp_md5sig_key *key,
Eric Dumazet66b13d92011-10-24 03:06:21 -0400785 int reply_flags, u8 tos)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700786{
Eric Dumazetcf533ea2011-10-21 05:22:42 -0400787 const struct tcphdr *th = tcp_hdr(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700788 struct {
789 struct tcphdr th;
Al Viro714e85b2006-11-14 20:51:49 -0800790 __be32 opt[(TCPOLEN_TSTAMP_ALIGNED >> 2)
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800791#ifdef CONFIG_TCP_MD5SIG
Al Viro714e85b2006-11-14 20:51:49 -0800792 + (TCPOLEN_MD5SIG_ALIGNED >> 2)
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800793#endif
794 ];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700795 } rep;
Lorenzo Colittie2d118a2016-11-04 02:23:43 +0900796 struct net *net = sock_net(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700797 struct ip_reply_arg arg;
Jon Maxwell00483692018-05-10 16:53:51 +1000798 struct sock *ctl_sk;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700799
800 memset(&rep.th, 0, sizeof(struct tcphdr));
Arnaldo Carvalho de Melo71742592006-11-17 10:57:30 -0200801 memset(&arg, 0, sizeof(arg));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700802
803 arg.iov[0].iov_base = (unsigned char *)&rep;
804 arg.iov[0].iov_len = sizeof(rep.th);
Andrey Vaginee684b62013-02-11 05:50:19 +0000805 if (tsecr) {
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800806 rep.opt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
807 (TCPOPT_TIMESTAMP << 8) |
808 TCPOLEN_TIMESTAMP);
Andrey Vaginee684b62013-02-11 05:50:19 +0000809 rep.opt[1] = htonl(tsval);
810 rep.opt[2] = htonl(tsecr);
Craig Schlentercb48cfe2007-01-09 00:11:15 -0800811 arg.iov[0].iov_len += TCPOLEN_TSTAMP_ALIGNED;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700812 }
813
814 /* Swap the send and the receive. */
815 rep.th.dest = th->source;
816 rep.th.source = th->dest;
817 rep.th.doff = arg.iov[0].iov_len / 4;
818 rep.th.seq = htonl(seq);
819 rep.th.ack_seq = htonl(ack);
820 rep.th.ack = 1;
821 rep.th.window = htons(win);
822
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800823#ifdef CONFIG_TCP_MD5SIG
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800824 if (key) {
Andrey Vaginee684b62013-02-11 05:50:19 +0000825 int offset = (tsecr) ? 3 : 0;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800826
827 rep.opt[offset++] = htonl((TCPOPT_NOP << 24) |
828 (TCPOPT_NOP << 16) |
829 (TCPOPT_MD5SIG << 8) |
830 TCPOLEN_MD5SIG);
831 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
832 rep.th.doff = arg.iov[0].iov_len/4;
833
Adam Langley49a72df2008-07-19 00:01:42 -0700834 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[offset],
Adam Langley90b7e112008-07-31 20:49:48 -0700835 key, ip_hdr(skb)->saddr,
836 ip_hdr(skb)->daddr, &rep.th);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800837 }
838#endif
KOVACS Krisztian88ef4a52008-10-01 07:41:00 -0700839 arg.flags = reply_flags;
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -0700840 arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
841 ip_hdr(skb)->saddr, /* XXX */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700842 arg.iov[0].iov_len, IPPROTO_TCP, 0);
843 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
YOSHIFUJI Hideaki9501f972008-04-18 12:45:16 +0900844 if (oif)
845 arg.bound_dev_if = oif;
Eric Dumazet66b13d92011-10-24 03:06:21 -0400846 arg.tos = tos;
Lorenzo Colittie2d118a2016-11-04 02:23:43 +0900847 arg.uid = sock_net_uid(net, sk_fullsock(sk) ? sk : NULL);
Eric Dumazet47dcc202016-05-06 09:46:18 -0700848 local_bh_disable();
Jon Maxwell00483692018-05-10 16:53:51 +1000849 ctl_sk = *this_cpu_ptr(net->ipv4.tcp_sk);
850 if (sk)
851 ctl_sk->sk_mark = (sk->sk_state == TCP_TIME_WAIT) ?
852 inet_twsk(sk)->tw_mark : sk->sk_mark;
853 ip_send_unicast_reply(ctl_sk,
Eric Dumazetbdbbb852015-01-29 21:35:05 -0800854 skb, &TCP_SKB_CB(skb)->header.h4.opt,
Eric Dumazet24a2d432014-09-27 09:50:55 -0700855 ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
856 &arg, arg.iov[0].iov_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700857
Jon Maxwell00483692018-05-10 16:53:51 +1000858 ctl_sk->sk_mark = 0;
Eric Dumazet90bbcc62016-04-27 16:44:32 -0700859 __TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
Eric Dumazet47dcc202016-05-06 09:46:18 -0700860 local_bh_enable();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700861}
862
863static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
864{
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -0700865 struct inet_timewait_sock *tw = inet_twsk(sk);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800866 struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700867
Lorenzo Colittie2d118a2016-11-04 02:23:43 +0900868 tcp_v4_send_ack(sk, skb,
Eric Dumazete62a1232016-01-21 08:02:54 -0800869 tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
Arnaldo Carvalho de Melo71742592006-11-17 10:57:30 -0200870 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
Eric Dumazet9a568de2017-05-16 14:00:14 -0700871 tcp_time_stamp_raw() + tcptw->tw_ts_offset,
YOSHIFUJI Hideaki9501f972008-04-18 12:45:16 +0900872 tcptw->tw_ts_recent,
873 tw->tw_bound_dev_if,
KOVACS Krisztian88ef4a52008-10-01 07:41:00 -0700874 tcp_twsk_md5_key(tcptw),
Eric Dumazet66b13d92011-10-24 03:06:21 -0400875 tw->tw_transparent ? IP_REPLY_ARG_NOSRCCHECK : 0,
876 tw->tw_tos
YOSHIFUJI Hideaki9501f972008-04-18 12:45:16 +0900877 );
Linus Torvalds1da177e2005-04-16 15:20:36 -0700878
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -0700879 inet_twsk_put(tw);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700880}
881
Eric Dumazeta00e7442015-09-29 07:42:39 -0700882static void tcp_v4_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
Arnaldo Carvalho de Melo71742592006-11-17 10:57:30 -0200883 struct request_sock *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700884{
Jerry Chu168a8f52012-08-31 12:29:13 +0000885 /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
886 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
887 */
Eric Dumazete62a1232016-01-21 08:02:54 -0800888 u32 seq = (sk->sk_state == TCP_LISTEN) ? tcp_rsk(req)->snt_isn + 1 :
889 tcp_sk(sk)->snd_nxt;
890
Eric Dumazet20a2b492016-08-22 11:31:10 -0700891 /* RFC 7323 2.3
892 * The window field (SEG.WND) of every outgoing segment, with the
893 * exception of <SYN> segments, MUST be right-shifted by
894 * Rcv.Wind.Shift bits:
895 */
Lorenzo Colittie2d118a2016-11-04 02:23:43 +0900896 tcp_v4_send_ack(sk, skb, seq,
Eric Dumazet20a2b492016-08-22 11:31:10 -0700897 tcp_rsk(req)->rcv_nxt,
898 req->rsk_rcv_wnd >> inet_rsk(req)->rcv_wscale,
Eric Dumazet9a568de2017-05-16 14:00:14 -0700899 tcp_time_stamp_raw() + tcp_rsk(req)->ts_off,
YOSHIFUJI Hideaki9501f972008-04-18 12:45:16 +0900900 req->ts_recent,
901 0,
Christoph Paasch30791ac2017-12-11 00:05:46 -0800902 tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&ip_hdr(skb)->saddr,
Eric Dumazeta915da9b2012-01-31 05:18:33 +0000903 AF_INET),
Eric Dumazet66b13d92011-10-24 03:06:21 -0400904 inet_rsk(req)->no_srccheck ? IP_REPLY_ARG_NOSRCCHECK : 0,
905 ip_hdr(skb)->tos);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700906}
907
Linus Torvalds1da177e2005-04-16 15:20:36 -0700908/*
Kris Katterjohn9bf1d832008-02-17 22:29:19 -0800909 * Send a SYN-ACK after having received a SYN.
Arnaldo Carvalho de Melo60236fd2005-06-18 22:47:21 -0700910 * This still operates on a request_sock only, not on a big
Linus Torvalds1da177e2005-04-16 15:20:36 -0700911 * socket.
912 */
Eric Dumazet0f935db2015-09-25 07:39:21 -0700913static int tcp_v4_send_synack(const struct sock *sk, struct dst_entry *dst,
Octavian Purdilad6274bd2014-06-25 17:09:58 +0300914 struct flowi *fl,
Octavian Purdila72659ec2010-01-17 19:09:39 -0800915 struct request_sock *req,
Eric Dumazetca6fb062015-10-02 11:43:35 -0700916 struct tcp_fastopen_cookie *foc,
Eric Dumazetb3d05142016-04-13 22:05:39 -0700917 enum tcp_synack_type synack_type)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700918{
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -0700919 const struct inet_request_sock *ireq = inet_rsk(req);
David S. Miller6bd023f2011-05-18 18:32:03 -0400920 struct flowi4 fl4;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700921 int err = -1;
Weilong Chend41db5a2013-12-23 14:37:28 +0800922 struct sk_buff *skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700923
924 /* First, grab a route. */
David S. Millerba3f7f02012-07-17 14:02:46 -0700925 if (!dst && (dst = inet_csk_route_req(sk, &fl4, req)) == NULL)
Denis V. Lunevfd80eb92008-02-29 11:43:03 -0800926 return -1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700927
Eric Dumazetb3d05142016-04-13 22:05:39 -0700928 skb = tcp_make_synack(sk, dst, req, foc, synack_type);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700929
930 if (skb) {
Eric Dumazet634fb9792013-10-09 15:21:29 -0700931 __tcp_v4_send_check(skb, ireq->ir_loc_addr, ireq->ir_rmt_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700932
Eric Dumazet634fb9792013-10-09 15:21:29 -0700933 err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr,
934 ireq->ir_rmt_addr,
Eric Dumazet06f877d2017-10-24 08:20:31 -0700935 ireq_opt_deref(ireq));
Gerrit Renkerb9df3cb2006-11-14 11:21:36 -0200936 err = net_xmit_eval(err);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700937 }
938
Linus Torvalds1da177e2005-04-16 15:20:36 -0700939 return err;
940}
941
942/*
Arnaldo Carvalho de Melo60236fd2005-06-18 22:47:21 -0700943 * IPv4 request_sock destructor.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700944 */
Arnaldo Carvalho de Melo60236fd2005-06-18 22:47:21 -0700945static void tcp_v4_reqsk_destructor(struct request_sock *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700946{
Eric Dumazetc92e8c02017-10-20 09:04:13 -0700947 kfree(rcu_dereference_protected(inet_rsk(req)->ireq_opt, 1));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700948}
949
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800950#ifdef CONFIG_TCP_MD5SIG
951/*
952 * RFC2385 MD5 checksumming requires a mapping of
953 * IP address->MD5 Key.
954 * We need to maintain these in the sk structure.
955 */
956
957/* Find the Key structure for an address. */
Eric Dumazetb83e3de2015-09-25 07:39:15 -0700958struct tcp_md5sig_key *tcp_md5_do_lookup(const struct sock *sk,
Eric Dumazeta915da9b2012-01-31 05:18:33 +0000959 const union tcp_md5_addr *addr,
960 int family)
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800961{
Eric Dumazetfd3a1542015-03-24 15:58:56 -0700962 const struct tcp_sock *tp = tcp_sk(sk);
Eric Dumazeta915da9b2012-01-31 05:18:33 +0000963 struct tcp_md5sig_key *key;
Eric Dumazetfd3a1542015-03-24 15:58:56 -0700964 const struct tcp_md5sig_info *md5sig;
Ivan Delalande67973182017-06-15 18:07:06 -0700965 __be32 mask;
966 struct tcp_md5sig_key *best_match = NULL;
967 bool match;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800968
Eric Dumazeta8afca02012-01-31 18:45:40 +0000969 /* caller either holds rcu_read_lock() or socket lock */
970 md5sig = rcu_dereference_check(tp->md5sig_info,
Hannes Frederic Sowa1e1d04e2016-04-05 17:10:15 +0200971 lockdep_sock_is_held(sk));
Eric Dumazeta8afca02012-01-31 18:45:40 +0000972 if (!md5sig)
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800973 return NULL;
Arnd Bergmann083a0322017-06-20 22:11:21 +0200974
Sasha Levinb67bfe02013-02-27 17:06:00 -0800975 hlist_for_each_entry_rcu(key, &md5sig->head, node) {
Eric Dumazeta915da9b2012-01-31 05:18:33 +0000976 if (key->family != family)
977 continue;
Ivan Delalande67973182017-06-15 18:07:06 -0700978
979 if (family == AF_INET) {
980 mask = inet_make_mask(key->prefixlen);
981 match = (key->addr.a4.s_addr & mask) ==
982 (addr->a4.s_addr & mask);
983#if IS_ENABLED(CONFIG_IPV6)
984 } else if (family == AF_INET6) {
985 match = ipv6_prefix_equal(&key->addr.a6, &addr->a6,
986 key->prefixlen);
987#endif
988 } else {
989 match = false;
990 }
991
992 if (match && (!best_match ||
993 key->prefixlen > best_match->prefixlen))
994 best_match = key;
995 }
996 return best_match;
997}
998EXPORT_SYMBOL(tcp_md5_do_lookup);
999
Wu Fengguange8f37d52017-07-06 07:58:53 +08001000static struct tcp_md5sig_key *tcp_md5_do_lookup_exact(const struct sock *sk,
1001 const union tcp_md5_addr *addr,
1002 int family, u8 prefixlen)
Ivan Delalande67973182017-06-15 18:07:06 -07001003{
1004 const struct tcp_sock *tp = tcp_sk(sk);
1005 struct tcp_md5sig_key *key;
1006 unsigned int size = sizeof(struct in_addr);
1007 const struct tcp_md5sig_info *md5sig;
1008
1009 /* caller either holds rcu_read_lock() or socket lock */
1010 md5sig = rcu_dereference_check(tp->md5sig_info,
1011 lockdep_sock_is_held(sk));
1012 if (!md5sig)
1013 return NULL;
1014#if IS_ENABLED(CONFIG_IPV6)
1015 if (family == AF_INET6)
1016 size = sizeof(struct in6_addr);
1017#endif
1018 hlist_for_each_entry_rcu(key, &md5sig->head, node) {
1019 if (key->family != family)
1020 continue;
1021 if (!memcmp(&key->addr, addr, size) &&
1022 key->prefixlen == prefixlen)
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001023 return key;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001024 }
1025 return NULL;
1026}
1027
Eric Dumazetb83e3de2015-09-25 07:39:15 -07001028struct tcp_md5sig_key *tcp_v4_md5_lookup(const struct sock *sk,
Eric Dumazetfd3a1542015-03-24 15:58:56 -07001029 const struct sock *addr_sk)
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001030{
Eric Dumazetb52e6922015-04-09 14:36:42 -07001031 const union tcp_md5_addr *addr;
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001032
Eric Dumazetb52e6922015-04-09 14:36:42 -07001033 addr = (const union tcp_md5_addr *)&addr_sk->sk_daddr;
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001034 return tcp_md5_do_lookup(sk, addr, AF_INET);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001035}
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001036EXPORT_SYMBOL(tcp_v4_md5_lookup);
1037
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001038/* This can be called on a newly created socket, from other files */
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001039int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
Ivan Delalande67973182017-06-15 18:07:06 -07001040 int family, u8 prefixlen, const u8 *newkey, u8 newkeylen,
1041 gfp_t gfp)
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001042{
1043 /* Add Key to the list */
Matthias M. Dellwegb0a713e2007-10-29 20:55:27 -07001044 struct tcp_md5sig_key *key;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001045 struct tcp_sock *tp = tcp_sk(sk);
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001046 struct tcp_md5sig_info *md5sig;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001047
Ivan Delalande67973182017-06-15 18:07:06 -07001048 key = tcp_md5_do_lookup_exact(sk, addr, family, prefixlen);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001049 if (key) {
1050 /* Pre-existing entry - just update that one. */
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001051 memcpy(key->key, newkey, newkeylen);
Matthias M. Dellwegb0a713e2007-10-29 20:55:27 -07001052 key->keylen = newkeylen;
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001053 return 0;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001054 }
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001055
Eric Dumazeta8afca02012-01-31 18:45:40 +00001056 md5sig = rcu_dereference_protected(tp->md5sig_info,
Hannes Frederic Sowa1e1d04e2016-04-05 17:10:15 +02001057 lockdep_sock_is_held(sk));
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001058 if (!md5sig) {
1059 md5sig = kmalloc(sizeof(*md5sig), gfp);
1060 if (!md5sig)
1061 return -ENOMEM;
1062
1063 sk_nocaps_add(sk, NETIF_F_GSO_MASK);
1064 INIT_HLIST_HEAD(&md5sig->head);
Eric Dumazeta8afca02012-01-31 18:45:40 +00001065 rcu_assign_pointer(tp->md5sig_info, md5sig);
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001066 }
1067
Eric Dumazet5f3d9cb2012-01-31 10:56:48 +00001068 key = sock_kmalloc(sk, sizeof(*key), gfp);
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001069 if (!key)
1070 return -ENOMEM;
Eric Dumazet71cea172013-05-20 06:52:26 +00001071 if (!tcp_alloc_md5sig_pool()) {
Eric Dumazet5f3d9cb2012-01-31 10:56:48 +00001072 sock_kfree_s(sk, key, sizeof(*key));
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001073 return -ENOMEM;
1074 }
1075
1076 memcpy(key->key, newkey, newkeylen);
1077 key->keylen = newkeylen;
1078 key->family = family;
Ivan Delalande67973182017-06-15 18:07:06 -07001079 key->prefixlen = prefixlen;
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001080 memcpy(&key->addr, addr,
1081 (family == AF_INET6) ? sizeof(struct in6_addr) :
1082 sizeof(struct in_addr));
1083 hlist_add_head_rcu(&key->node, &md5sig->head);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001084 return 0;
1085}
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001086EXPORT_SYMBOL(tcp_md5_do_add);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001087
Ivan Delalande67973182017-06-15 18:07:06 -07001088int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr, int family,
1089 u8 prefixlen)
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001090{
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001091 struct tcp_md5sig_key *key;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001092
Ivan Delalande67973182017-06-15 18:07:06 -07001093 key = tcp_md5_do_lookup_exact(sk, addr, family, prefixlen);
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001094 if (!key)
1095 return -ENOENT;
1096 hlist_del_rcu(&key->node);
Eric Dumazet5f3d9cb2012-01-31 10:56:48 +00001097 atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001098 kfree_rcu(key, rcu);
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001099 return 0;
1100}
1101EXPORT_SYMBOL(tcp_md5_do_del);
1102
stephen hemmingere0683e702012-10-26 14:31:40 +00001103static void tcp_clear_md5_list(struct sock *sk)
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001104{
1105 struct tcp_sock *tp = tcp_sk(sk);
1106 struct tcp_md5sig_key *key;
Sasha Levinb67bfe02013-02-27 17:06:00 -08001107 struct hlist_node *n;
Eric Dumazeta8afca02012-01-31 18:45:40 +00001108 struct tcp_md5sig_info *md5sig;
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001109
Eric Dumazeta8afca02012-01-31 18:45:40 +00001110 md5sig = rcu_dereference_protected(tp->md5sig_info, 1);
1111
Sasha Levinb67bfe02013-02-27 17:06:00 -08001112 hlist_for_each_entry_safe(key, n, &md5sig->head, node) {
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001113 hlist_del_rcu(&key->node);
Eric Dumazet5f3d9cb2012-01-31 10:56:48 +00001114 atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001115 kfree_rcu(key, rcu);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001116 }
1117}
1118
Ivan Delalande8917a772017-06-15 18:07:07 -07001119static int tcp_v4_parse_md5_keys(struct sock *sk, int optname,
1120 char __user *optval, int optlen)
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001121{
1122 struct tcp_md5sig cmd;
1123 struct sockaddr_in *sin = (struct sockaddr_in *)&cmd.tcpm_addr;
Ivan Delalande8917a772017-06-15 18:07:07 -07001124 u8 prefixlen = 32;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001125
1126 if (optlen < sizeof(cmd))
1127 return -EINVAL;
1128
Arnaldo Carvalho de Melo71742592006-11-17 10:57:30 -02001129 if (copy_from_user(&cmd, optval, sizeof(cmd)))
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001130 return -EFAULT;
1131
1132 if (sin->sin_family != AF_INET)
1133 return -EINVAL;
1134
Ivan Delalande8917a772017-06-15 18:07:07 -07001135 if (optname == TCP_MD5SIG_EXT &&
1136 cmd.tcpm_flags & TCP_MD5SIG_FLAG_PREFIX) {
1137 prefixlen = cmd.tcpm_prefixlen;
1138 if (prefixlen > 32)
1139 return -EINVAL;
1140 }
1141
Dmitry Popov64a124e2014-08-03 22:45:19 +04001142 if (!cmd.tcpm_keylen)
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001143 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
Ivan Delalande8917a772017-06-15 18:07:07 -07001144 AF_INET, prefixlen);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001145
1146 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
1147 return -EINVAL;
1148
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001149 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
Ivan Delalande8917a772017-06-15 18:07:07 -07001150 AF_INET, prefixlen, cmd.tcpm_key, cmd.tcpm_keylen,
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001151 GFP_KERNEL);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001152}
1153
Eric Dumazet19689e32016-06-27 18:51:53 +02001154static int tcp_v4_md5_hash_headers(struct tcp_md5sig_pool *hp,
1155 __be32 daddr, __be32 saddr,
1156 const struct tcphdr *th, int nbytes)
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001157{
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001158 struct tcp4_pseudohdr *bp;
Adam Langley49a72df2008-07-19 00:01:42 -07001159 struct scatterlist sg;
Eric Dumazet19689e32016-06-27 18:51:53 +02001160 struct tcphdr *_th;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001161
Eric Dumazet19689e32016-06-27 18:51:53 +02001162 bp = hp->scratch;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001163 bp->saddr = saddr;
1164 bp->daddr = daddr;
1165 bp->pad = 0;
YOSHIFUJI Hideaki076fb722008-04-17 12:48:12 +09001166 bp->protocol = IPPROTO_TCP;
Adam Langley49a72df2008-07-19 00:01:42 -07001167 bp->len = cpu_to_be16(nbytes);
David S. Millerc7da57a2007-10-26 00:41:21 -07001168
Eric Dumazet19689e32016-06-27 18:51:53 +02001169 _th = (struct tcphdr *)(bp + 1);
1170 memcpy(_th, th, sizeof(*th));
1171 _th->check = 0;
1172
1173 sg_init_one(&sg, bp, sizeof(*bp) + sizeof(*th));
1174 ahash_request_set_crypt(hp->md5_req, &sg, NULL,
1175 sizeof(*bp) + sizeof(*th));
Herbert Xucf80e0e2016-01-24 21:20:23 +08001176 return crypto_ahash_update(hp->md5_req);
Adam Langley49a72df2008-07-19 00:01:42 -07001177}
1178
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001179static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
Eric Dumazet318cf7a2011-10-24 02:46:04 -04001180 __be32 daddr, __be32 saddr, const struct tcphdr *th)
Adam Langley49a72df2008-07-19 00:01:42 -07001181{
1182 struct tcp_md5sig_pool *hp;
Herbert Xucf80e0e2016-01-24 21:20:23 +08001183 struct ahash_request *req;
Adam Langley49a72df2008-07-19 00:01:42 -07001184
1185 hp = tcp_get_md5sig_pool();
1186 if (!hp)
1187 goto clear_hash_noput;
Herbert Xucf80e0e2016-01-24 21:20:23 +08001188 req = hp->md5_req;
Adam Langley49a72df2008-07-19 00:01:42 -07001189
Herbert Xucf80e0e2016-01-24 21:20:23 +08001190 if (crypto_ahash_init(req))
Adam Langley49a72df2008-07-19 00:01:42 -07001191 goto clear_hash;
Eric Dumazet19689e32016-06-27 18:51:53 +02001192 if (tcp_v4_md5_hash_headers(hp, daddr, saddr, th, th->doff << 2))
Adam Langley49a72df2008-07-19 00:01:42 -07001193 goto clear_hash;
1194 if (tcp_md5_hash_key(hp, key))
1195 goto clear_hash;
Herbert Xucf80e0e2016-01-24 21:20:23 +08001196 ahash_request_set_crypt(req, NULL, md5_hash, 0);
1197 if (crypto_ahash_final(req))
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001198 goto clear_hash;
1199
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001200 tcp_put_md5sig_pool();
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001201 return 0;
Adam Langley49a72df2008-07-19 00:01:42 -07001202
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001203clear_hash:
1204 tcp_put_md5sig_pool();
1205clear_hash_noput:
1206 memset(md5_hash, 0, 16);
Adam Langley49a72df2008-07-19 00:01:42 -07001207 return 1;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001208}
1209
Eric Dumazet39f8e582015-03-24 15:58:55 -07001210int tcp_v4_md5_hash_skb(char *md5_hash, const struct tcp_md5sig_key *key,
1211 const struct sock *sk,
Eric Dumazet318cf7a2011-10-24 02:46:04 -04001212 const struct sk_buff *skb)
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001213{
Adam Langley49a72df2008-07-19 00:01:42 -07001214 struct tcp_md5sig_pool *hp;
Herbert Xucf80e0e2016-01-24 21:20:23 +08001215 struct ahash_request *req;
Eric Dumazet318cf7a2011-10-24 02:46:04 -04001216 const struct tcphdr *th = tcp_hdr(skb);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001217 __be32 saddr, daddr;
1218
Eric Dumazet39f8e582015-03-24 15:58:55 -07001219 if (sk) { /* valid for establish/request sockets */
1220 saddr = sk->sk_rcv_saddr;
1221 daddr = sk->sk_daddr;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001222 } else {
Adam Langley49a72df2008-07-19 00:01:42 -07001223 const struct iphdr *iph = ip_hdr(skb);
1224 saddr = iph->saddr;
1225 daddr = iph->daddr;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001226 }
Adam Langley49a72df2008-07-19 00:01:42 -07001227
1228 hp = tcp_get_md5sig_pool();
1229 if (!hp)
1230 goto clear_hash_noput;
Herbert Xucf80e0e2016-01-24 21:20:23 +08001231 req = hp->md5_req;
Adam Langley49a72df2008-07-19 00:01:42 -07001232
Herbert Xucf80e0e2016-01-24 21:20:23 +08001233 if (crypto_ahash_init(req))
Adam Langley49a72df2008-07-19 00:01:42 -07001234 goto clear_hash;
1235
Eric Dumazet19689e32016-06-27 18:51:53 +02001236 if (tcp_v4_md5_hash_headers(hp, daddr, saddr, th, skb->len))
Adam Langley49a72df2008-07-19 00:01:42 -07001237 goto clear_hash;
1238 if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
1239 goto clear_hash;
1240 if (tcp_md5_hash_key(hp, key))
1241 goto clear_hash;
Herbert Xucf80e0e2016-01-24 21:20:23 +08001242 ahash_request_set_crypt(req, NULL, md5_hash, 0);
1243 if (crypto_ahash_final(req))
Adam Langley49a72df2008-07-19 00:01:42 -07001244 goto clear_hash;
1245
1246 tcp_put_md5sig_pool();
1247 return 0;
1248
1249clear_hash:
1250 tcp_put_md5sig_pool();
1251clear_hash_noput:
1252 memset(md5_hash, 0, 16);
1253 return 1;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001254}
Adam Langley49a72df2008-07-19 00:01:42 -07001255EXPORT_SYMBOL(tcp_v4_md5_hash_skb);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001256
Eric Dumazetba8e2752015-10-02 11:43:28 -07001257#endif
1258
Eric Dumazetff74e232015-03-24 15:58:54 -07001259/* Called with rcu_read_lock() */
Eric Dumazetba8e2752015-10-02 11:43:28 -07001260static bool tcp_v4_inbound_md5_hash(const struct sock *sk,
Eric Dumazetff74e232015-03-24 15:58:54 -07001261 const struct sk_buff *skb)
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001262{
Eric Dumazetba8e2752015-10-02 11:43:28 -07001263#ifdef CONFIG_TCP_MD5SIG
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001264 /*
1265 * This gets called for each TCP segment that arrives
1266 * so we want to be efficient.
1267 * We have 3 drop cases:
1268 * o No MD5 hash and one expected.
1269 * o MD5 hash and we're not expecting one.
1270 * o MD5 hash and its wrong.
1271 */
Eric Dumazetcf533ea2011-10-21 05:22:42 -04001272 const __u8 *hash_location = NULL;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001273 struct tcp_md5sig_key *hash_expected;
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07001274 const struct iphdr *iph = ip_hdr(skb);
Eric Dumazetcf533ea2011-10-21 05:22:42 -04001275 const struct tcphdr *th = tcp_hdr(skb);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001276 int genhash;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001277 unsigned char newhash[16];
1278
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001279 hash_expected = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&iph->saddr,
1280 AF_INET);
YOSHIFUJI Hideaki7d5d5522008-04-17 12:29:53 +09001281 hash_location = tcp_parse_md5sig_option(th);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001282
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001283 /* We've parsed the options - do we have a hash? */
1284 if (!hash_expected && !hash_location)
Eric Dumazeta2a385d2012-05-16 23:15:34 +00001285 return false;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001286
1287 if (hash_expected && !hash_location) {
Eric Dumazetc10d9312016-04-29 14:16:47 -07001288 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
Eric Dumazeta2a385d2012-05-16 23:15:34 +00001289 return true;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001290 }
1291
1292 if (!hash_expected && hash_location) {
Eric Dumazetc10d9312016-04-29 14:16:47 -07001293 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
Eric Dumazeta2a385d2012-05-16 23:15:34 +00001294 return true;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001295 }
1296
1297 /* Okay, so this is hash_expected and hash_location -
1298 * so we need to calculate the checksum.
1299 */
Adam Langley49a72df2008-07-19 00:01:42 -07001300 genhash = tcp_v4_md5_hash_skb(newhash,
1301 hash_expected,
Eric Dumazet39f8e582015-03-24 15:58:55 -07001302 NULL, skb);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001303
1304 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
Eric Dumazet72145a62016-08-24 09:01:23 -07001305 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5FAILURE);
Joe Perchese87cc472012-05-13 21:56:26 +00001306 net_info_ratelimited("MD5 Hash failed for (%pI4, %d)->(%pI4, %d)%s\n",
1307 &iph->saddr, ntohs(th->source),
1308 &iph->daddr, ntohs(th->dest),
1309 genhash ? " tcp_v4_calc_md5_hash failed"
1310 : "");
Eric Dumazeta2a385d2012-05-16 23:15:34 +00001311 return true;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001312 }
Eric Dumazeta2a385d2012-05-16 23:15:34 +00001313 return false;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001314#endif
Eric Dumazetba8e2752015-10-02 11:43:28 -07001315 return false;
1316}
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001317
Eric Dumazetb40cf182015-09-25 07:39:08 -07001318static void tcp_v4_init_req(struct request_sock *req,
1319 const struct sock *sk_listener,
Octavian Purdila16bea702014-06-25 17:09:53 +03001320 struct sk_buff *skb)
1321{
1322 struct inet_request_sock *ireq = inet_rsk(req);
Eric Dumazetc92e8c02017-10-20 09:04:13 -07001323 struct net *net = sock_net(sk_listener);
Octavian Purdila16bea702014-06-25 17:09:53 +03001324
Eric Dumazet08d2cc3b2015-03-18 14:05:38 -07001325 sk_rcv_saddr_set(req_to_sk(req), ip_hdr(skb)->daddr);
1326 sk_daddr_set(req_to_sk(req), ip_hdr(skb)->saddr);
Eric Dumazetc92e8c02017-10-20 09:04:13 -07001327 RCU_INIT_POINTER(ireq->ireq_opt, tcp_v4_save_options(net, skb));
Octavian Purdila16bea702014-06-25 17:09:53 +03001328}
1329
Eric Dumazetf9646292015-09-29 07:42:50 -07001330static struct dst_entry *tcp_v4_route_req(const struct sock *sk,
1331 struct flowi *fl,
Soheil Hassas Yeganeh4396e462017-03-15 16:30:46 -04001332 const struct request_sock *req)
Octavian Purdilad94e0412014-06-25 17:09:55 +03001333{
Soheil Hassas Yeganeh4396e462017-03-15 16:30:46 -04001334 return inet_csk_route_req(sk, &fl->u.ip4, req);
Octavian Purdilad94e0412014-06-25 17:09:55 +03001335}
1336
Eric Dumazet72a3eff2006-11-16 02:30:37 -08001337struct request_sock_ops tcp_request_sock_ops __read_mostly = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001338 .family = PF_INET,
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07001339 .obj_size = sizeof(struct tcp_request_sock),
Octavian Purdila5db92c92014-06-25 17:09:59 +03001340 .rtx_syn_ack = tcp_rtx_synack,
Arnaldo Carvalho de Melo60236fd2005-06-18 22:47:21 -07001341 .send_ack = tcp_v4_reqsk_send_ack,
1342 .destructor = tcp_v4_reqsk_destructor,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001343 .send_reset = tcp_v4_send_reset,
stephen hemminger688d1942014-08-29 23:32:05 -07001344 .syn_ack_timeout = tcp_syn_ack_timeout,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001345};
1346
Stephen Hemmingerb2e4b3de2009-09-01 19:25:03 +00001347static const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = {
Octavian Purdila2aec4a22014-06-25 17:10:00 +03001348 .mss_clamp = TCP_MSS_DEFAULT,
Octavian Purdila16bea702014-06-25 17:09:53 +03001349#ifdef CONFIG_TCP_MD5SIG
Eric Dumazetfd3a1542015-03-24 15:58:56 -07001350 .req_md5_lookup = tcp_v4_md5_lookup,
John Dykstrae3afe7b2009-07-16 05:04:51 +00001351 .calc_md5_hash = tcp_v4_md5_hash_skb,
Andrew Mortonb6332e62006-11-30 19:16:28 -08001352#endif
Octavian Purdila16bea702014-06-25 17:09:53 +03001353 .init_req = tcp_v4_init_req,
Octavian Purdilafb7b37a2014-06-25 17:09:54 +03001354#ifdef CONFIG_SYN_COOKIES
1355 .cookie_init_seq = cookie_v4_init_sequence,
1356#endif
Octavian Purdilad94e0412014-06-25 17:09:55 +03001357 .route_req = tcp_v4_route_req,
Eric Dumazet84b114b2017-05-05 06:56:54 -07001358 .init_seq = tcp_v4_init_seq,
1359 .init_ts_off = tcp_v4_init_ts_off,
Octavian Purdilad6274bd2014-06-25 17:09:58 +03001360 .send_synack = tcp_v4_send_synack,
Octavian Purdila16bea702014-06-25 17:09:53 +03001361};
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001362
Linus Torvalds1da177e2005-04-16 15:20:36 -07001363int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1364{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001365 /* Never answer to SYNs send to broadcast or multicast */
Eric Dumazet511c3f92009-06-02 05:14:27 +00001366 if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001367 goto drop;
1368
Octavian Purdila1fb6f152014-06-25 17:10:02 +03001369 return tcp_conn_request(&tcp_request_sock_ops,
1370 &tcp_request_sock_ipv4_ops, sk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001371
Linus Torvalds1da177e2005-04-16 15:20:36 -07001372drop:
Eric Dumazet9caad862016-04-01 08:52:20 -07001373 tcp_listendrop(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001374 return 0;
1375}
Eric Dumazet4bc2f182010-07-09 21:22:10 +00001376EXPORT_SYMBOL(tcp_v4_conn_request);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001377
1378
1379/*
1380 * The three way handshake has completed - we got a valid synack -
1381 * now create the new socket.
1382 */
Eric Dumazet0c271712015-09-29 07:42:48 -07001383struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
Arnaldo Carvalho de Melo60236fd2005-06-18 22:47:21 -07001384 struct request_sock *req,
Eric Dumazet5e0724d2015-10-22 08:20:46 -07001385 struct dst_entry *dst,
1386 struct request_sock *req_unhash,
1387 bool *own_req)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001388{
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07001389 struct inet_request_sock *ireq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001390 struct inet_sock *newinet;
1391 struct tcp_sock *newtp;
1392 struct sock *newsk;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001393#ifdef CONFIG_TCP_MD5SIG
1394 struct tcp_md5sig_key *key;
1395#endif
Eric Dumazetf6d8bd02011-04-21 09:45:37 +00001396 struct ip_options_rcu *inet_opt;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001397
1398 if (sk_acceptq_is_full(sk))
1399 goto exit_overflow;
1400
Linus Torvalds1da177e2005-04-16 15:20:36 -07001401 newsk = tcp_create_openreq_child(sk, req, skb);
1402 if (!newsk)
Balazs Scheidler093d2822010-10-21 13:06:43 +02001403 goto exit_nonewsk;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001404
Herbert Xubcd76112006-06-30 13:36:35 -07001405 newsk->sk_gso_type = SKB_GSO_TCPV4;
Neal Cardwellfae6ef82012-08-19 03:30:38 +00001406 inet_sk_rx_dst_set(newsk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001407
1408 newtp = tcp_sk(newsk);
1409 newinet = inet_sk(newsk);
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07001410 ireq = inet_rsk(req);
Eric Dumazetd1e559d2015-03-18 14:05:35 -07001411 sk_daddr_set(newsk, ireq->ir_rmt_addr);
1412 sk_rcv_saddr_set(newsk, ireq->ir_loc_addr);
David Ahern6dd9a142015-12-16 13:20:44 -08001413 newsk->sk_bound_dev_if = ireq->ir_iif;
Eric Dumazetc92e8c02017-10-20 09:04:13 -07001414 newinet->inet_saddr = ireq->ir_loc_addr;
1415 inet_opt = rcu_dereference(ireq->ireq_opt);
1416 RCU_INIT_POINTER(newinet->inet_opt, inet_opt);
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001417 newinet->mc_index = inet_iif(skb);
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07001418 newinet->mc_ttl = ip_hdr(skb)->ttl;
Jiri Benc4c507d22012-02-09 09:35:49 +00001419 newinet->rcv_tos = ip_hdr(skb)->tos;
Arnaldo Carvalho de Melod83d8462005-12-13 23:26:10 -08001420 inet_csk(newsk)->icsk_ext_hdr_len = 0;
Eric Dumazetf6d8bd02011-04-21 09:45:37 +00001421 if (inet_opt)
1422 inet_csk(newsk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
Eric Dumazetc720c7e82009-10-15 06:30:45 +00001423 newinet->inet_id = newtp->write_seq ^ jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001424
Eric Dumazetdfd25ff2012-03-10 09:20:21 +00001425 if (!dst) {
1426 dst = inet_csk_route_child_sock(sk, newsk, req);
1427 if (!dst)
1428 goto put_and_exit;
1429 } else {
1430 /* syncookie case : see end of cookie_v4_check() */
1431 }
David S. Miller0e734412011-05-08 15:28:03 -07001432 sk_setup_caps(newsk, dst);
1433
Daniel Borkmann81164412015-01-05 23:57:48 +01001434 tcp_ca_openreq_child(newsk, dst);
1435
Linus Torvalds1da177e2005-04-16 15:20:36 -07001436 tcp_sync_mss(newsk, dst_mtu(dst));
Eric Dumazet3541f9e2017-02-02 08:04:56 -08001437 newtp->advmss = tcp_mss_clamp(tcp_sk(sk), dst_metric_advmss(dst));
Tom Quetchenbachf5fff5d2008-09-21 00:21:51 -07001438
Linus Torvalds1da177e2005-04-16 15:20:36 -07001439 tcp_initialize_rcv_mss(newsk);
1440
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001441#ifdef CONFIG_TCP_MD5SIG
1442 /* Copy over the MD5 key from the original socket */
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001443 key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&newinet->inet_daddr,
1444 AF_INET);
Ian Morris00db4122015-04-03 09:17:27 +01001445 if (key) {
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001446 /*
1447 * We're using one, so create a matching key
1448 * on the newsk structure. If we fail to get
1449 * memory, then we end up not copying the key
1450 * across. Shucks.
1451 */
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001452 tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newinet->inet_daddr,
Ivan Delalande67973182017-06-15 18:07:06 -07001453 AF_INET, 32, key->key, key->keylen, GFP_ATOMIC);
Eric Dumazeta4654192010-05-16 00:36:33 -07001454 sk_nocaps_add(newsk, NETIF_F_GSO_MASK);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001455 }
1456#endif
1457
David S. Miller0e734412011-05-08 15:28:03 -07001458 if (__inet_inherit_port(sk, newsk) < 0)
1459 goto put_and_exit;
Eric Dumazet5e0724d2015-10-22 08:20:46 -07001460 *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash));
Eric Dumazetc92e8c02017-10-20 09:04:13 -07001461 if (likely(*own_req)) {
Eric Dumazet49a496c2015-11-05 12:50:19 -08001462 tcp_move_syn(newtp, req);
Eric Dumazetc92e8c02017-10-20 09:04:13 -07001463 ireq->ireq_opt = NULL;
1464 } else {
1465 newinet->inet_opt = NULL;
1466 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001467 return newsk;
1468
1469exit_overflow:
Eric Dumazetc10d9312016-04-29 14:16:47 -07001470 NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
Balazs Scheidler093d2822010-10-21 13:06:43 +02001471exit_nonewsk:
1472 dst_release(dst);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001473exit:
Eric Dumazet9caad862016-04-01 08:52:20 -07001474 tcp_listendrop(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001475 return NULL;
David S. Miller0e734412011-05-08 15:28:03 -07001476put_and_exit:
Eric Dumazetc92e8c02017-10-20 09:04:13 -07001477 newinet->inet_opt = NULL;
Christoph Paasche337e242012-12-14 04:07:58 +00001478 inet_csk_prepare_forced_close(newsk);
1479 tcp_done(newsk);
David S. Miller0e734412011-05-08 15:28:03 -07001480 goto exit;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001481}
Eric Dumazet4bc2f182010-07-09 21:22:10 +00001482EXPORT_SYMBOL(tcp_v4_syn_recv_sock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001483
Eric Dumazet079096f2015-10-02 11:43:32 -07001484static struct sock *tcp_v4_cookie_check(struct sock *sk, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001485{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001486#ifdef CONFIG_SYN_COOKIES
Eric Dumazet079096f2015-10-02 11:43:32 -07001487 const struct tcphdr *th = tcp_hdr(skb);
1488
Florian Westphalaf9b4732010-06-03 00:43:44 +00001489 if (!th->syn)
Cong Wang461b74c2014-10-15 14:33:22 -07001490 sk = cookie_v4_check(sk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001491#endif
1492 return sk;
1493}
1494
Linus Torvalds1da177e2005-04-16 15:20:36 -07001495/* The socket must have it's spinlock held when we get
Eric Dumazete994b2f2015-10-02 11:43:39 -07001496 * here, unless it is a TCP_LISTEN socket.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001497 *
1498 * We have a potential double-lock case here, so even when
1499 * doing backlog processing we use the BH locking scheme.
1500 * This is because we cannot sleep with the original spinlock
1501 * held.
1502 */
1503int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
1504{
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001505 struct sock *rsk;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001506
Linus Torvalds1da177e2005-04-16 15:20:36 -07001507 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
Eric Dumazet404e0a82012-07-29 23:20:37 +00001508 struct dst_entry *dst = sk->sk_rx_dst;
1509
Tom Herbertbdeab992011-08-14 19:45:55 +00001510 sock_rps_save_rxhash(sk, skb);
Eric Dumazet3d973792014-11-11 05:54:27 -08001511 sk_mark_napi_id(sk, skb);
Eric Dumazet404e0a82012-07-29 23:20:37 +00001512 if (dst) {
Eric Dumazet505fbcf2012-07-27 06:23:40 +00001513 if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
Ian Morris51456b22015-04-03 09:17:26 +01001514 !dst->ops->check(dst, 0)) {
David S. Miller92101b32012-07-23 16:29:00 -07001515 dst_release(dst);
1516 sk->sk_rx_dst = NULL;
1517 }
1518 }
Yafang Shao3d97d882018-05-29 23:27:31 +08001519 tcp_rcv_established(sk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001520 return 0;
1521 }
1522
Eric Dumazet12e25e12015-06-03 23:49:21 -07001523 if (tcp_checksum_complete(skb))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001524 goto csum_err;
1525
1526 if (sk->sk_state == TCP_LISTEN) {
Eric Dumazet079096f2015-10-02 11:43:32 -07001527 struct sock *nsk = tcp_v4_cookie_check(sk, skb);
1528
Linus Torvalds1da177e2005-04-16 15:20:36 -07001529 if (!nsk)
1530 goto discard;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001531 if (nsk != sk) {
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001532 if (tcp_child_process(sk, nsk, skb)) {
1533 rsk = nsk;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001534 goto reset;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001535 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001536 return 0;
1537 }
Eric Dumazetca551582010-06-03 09:03:58 +00001538 } else
Tom Herbertbdeab992011-08-14 19:45:55 +00001539 sock_rps_save_rxhash(sk, skb);
Eric Dumazetca551582010-06-03 09:03:58 +00001540
Eric Dumazet72ab4a82015-09-29 07:42:41 -07001541 if (tcp_rcv_state_process(sk, skb)) {
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001542 rsk = sk;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001543 goto reset;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001544 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001545 return 0;
1546
1547reset:
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001548 tcp_v4_send_reset(rsk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001549discard:
1550 kfree_skb(skb);
1551 /* Be careful here. If this function gets more complicated and
1552 * gcc suffers from register pressure on the x86, sk (in %ebx)
1553 * might be destroyed here. This current version compiles correctly,
1554 * but you have been warned.
1555 */
1556 return 0;
1557
1558csum_err:
Eric Dumazetc10d9312016-04-29 14:16:47 -07001559 TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS);
1560 TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001561 goto discard;
1562}
Eric Dumazet4bc2f182010-07-09 21:22:10 +00001563EXPORT_SYMBOL(tcp_v4_do_rcv);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001564
Paolo Abeni74874492017-09-28 15:51:36 +02001565int tcp_v4_early_demux(struct sk_buff *skb)
David S. Miller41063e92012-06-19 21:22:05 -07001566{
David S. Miller41063e92012-06-19 21:22:05 -07001567 const struct iphdr *iph;
1568 const struct tcphdr *th;
1569 struct sock *sk;
David S. Miller41063e92012-06-19 21:22:05 -07001570
David S. Miller41063e92012-06-19 21:22:05 -07001571 if (skb->pkt_type != PACKET_HOST)
Paolo Abeni74874492017-09-28 15:51:36 +02001572 return 0;
David S. Miller41063e92012-06-19 21:22:05 -07001573
Eric Dumazet45f00f92012-10-22 21:42:47 +00001574 if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
Paolo Abeni74874492017-09-28 15:51:36 +02001575 return 0;
David S. Miller41063e92012-06-19 21:22:05 -07001576
1577 iph = ip_hdr(skb);
Eric Dumazet45f00f92012-10-22 21:42:47 +00001578 th = tcp_hdr(skb);
David S. Miller41063e92012-06-19 21:22:05 -07001579
1580 if (th->doff < sizeof(struct tcphdr) / 4)
Paolo Abeni74874492017-09-28 15:51:36 +02001581 return 0;
David S. Miller41063e92012-06-19 21:22:05 -07001582
Eric Dumazet45f00f92012-10-22 21:42:47 +00001583 sk = __inet_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
David S. Miller41063e92012-06-19 21:22:05 -07001584 iph->saddr, th->source,
Vijay Subramanian7011d082012-06-23 17:38:10 +00001585 iph->daddr, ntohs(th->dest),
David Ahern3fa6f612017-08-07 08:44:17 -07001586 skb->skb_iif, inet_sdif(skb));
David S. Miller41063e92012-06-19 21:22:05 -07001587 if (sk) {
1588 skb->sk = sk;
1589 skb->destructor = sock_edemux;
Eric Dumazetf7e4eb02015-03-15 21:12:13 -07001590 if (sk_fullsock(sk)) {
Michal Kubečekd0c294c2015-03-23 15:14:00 +01001591 struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst);
Eric Dumazet505fbcf2012-07-27 06:23:40 +00001592
David S. Miller41063e92012-06-19 21:22:05 -07001593 if (dst)
1594 dst = dst_check(dst, 0);
David S. Miller92101b32012-07-23 16:29:00 -07001595 if (dst &&
Eric Dumazet505fbcf2012-07-27 06:23:40 +00001596 inet_sk(sk)->rx_dst_ifindex == skb->skb_iif)
David S. Miller92101b32012-07-23 16:29:00 -07001597 skb_dst_set_noref(skb, dst);
David S. Miller41063e92012-06-19 21:22:05 -07001598 }
1599 }
Paolo Abeni74874492017-09-28 15:51:36 +02001600 return 0;
David S. Miller41063e92012-06-19 21:22:05 -07001601}
1602
Eric Dumazetc9c33212016-08-27 07:37:54 -07001603bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb)
1604{
1605 u32 limit = sk->sk_rcvbuf + sk->sk_sndbuf;
1606
1607 /* Only socket owner can try to collapse/prune rx queues
1608 * to reduce memory overhead, so add a little headroom here.
1609 * Few sockets backlog are possibly concurrently non empty.
1610 */
1611 limit += 64*1024;
1612
1613 /* In case all data was pulled from skb frags (in __pskb_pull_tail()),
1614 * we can fix skb->truesize to its real value to avoid future drops.
1615 * This is valid because skb is not yet charged to the socket.
1616 * It has been noticed pure SACK packets were sometimes dropped
1617 * (if cooked by drivers without copybreak feature).
1618 */
Eric Dumazet60b1af32017-01-24 14:57:36 -08001619 skb_condense(skb);
Eric Dumazetc9c33212016-08-27 07:37:54 -07001620
1621 if (unlikely(sk_add_backlog(sk, skb, limit))) {
1622 bh_unlock_sock(sk);
1623 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPBACKLOGDROP);
1624 return true;
1625 }
1626 return false;
1627}
1628EXPORT_SYMBOL(tcp_add_backlog);
1629
Eric Dumazetac6e7802016-11-10 13:12:35 -08001630int tcp_filter(struct sock *sk, struct sk_buff *skb)
1631{
1632 struct tcphdr *th = (struct tcphdr *)skb->data;
1633 unsigned int eaten = skb->len;
1634 int err;
1635
1636 err = sk_filter_trim_cap(sk, skb, th->doff * 4);
1637 if (!err) {
1638 eaten -= skb->len;
1639 TCP_SKB_CB(skb)->end_seq -= eaten;
1640 }
1641 return err;
1642}
1643EXPORT_SYMBOL(tcp_filter);
1644
Eric Dumazeteeea10b2017-12-03 09:32:59 -08001645static void tcp_v4_restore_cb(struct sk_buff *skb)
1646{
1647 memmove(IPCB(skb), &TCP_SKB_CB(skb)->header.h4,
1648 sizeof(struct inet_skb_parm));
1649}
1650
1651static void tcp_v4_fill_cb(struct sk_buff *skb, const struct iphdr *iph,
1652 const struct tcphdr *th)
1653{
1654 /* This is tricky : We move IPCB at its correct location into TCP_SKB_CB()
1655 * barrier() makes sure compiler wont play fool^Waliasing games.
1656 */
1657 memmove(&TCP_SKB_CB(skb)->header.h4, IPCB(skb),
1658 sizeof(struct inet_skb_parm));
1659 barrier();
1660
1661 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1662 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1663 skb->len - th->doff * 4);
1664 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1665 TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
1666 TCP_SKB_CB(skb)->tcp_tw_isn = 0;
1667 TCP_SKB_CB(skb)->ip_dsfield = ipv4_get_dsfield(iph);
1668 TCP_SKB_CB(skb)->sacked = 0;
1669 TCP_SKB_CB(skb)->has_rxtstamp =
1670 skb->tstamp || skb_hwtstamps(skb)->hwtstamp;
1671}
1672
Linus Torvalds1da177e2005-04-16 15:20:36 -07001673/*
1674 * From tcp_input.c
1675 */
1676
1677int tcp_v4_rcv(struct sk_buff *skb)
1678{
Eric Dumazet3b24d852016-04-01 08:52:17 -07001679 struct net *net = dev_net(skb->dev);
David Ahern3fa6f612017-08-07 08:44:17 -07001680 int sdif = inet_sdif(skb);
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07001681 const struct iphdr *iph;
Eric Dumazetcf533ea2011-10-21 05:22:42 -04001682 const struct tcphdr *th;
Eric Dumazet3b24d852016-04-01 08:52:17 -07001683 bool refcounted;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001684 struct sock *sk;
1685 int ret;
1686
1687 if (skb->pkt_type != PACKET_HOST)
1688 goto discard_it;
1689
1690 /* Count it even if it's bad */
Eric Dumazet90bbcc62016-04-27 16:44:32 -07001691 __TCP_INC_STATS(net, TCP_MIB_INSEGS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001692
1693 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1694 goto discard_it;
1695
Eric Dumazetea1627c2016-05-13 09:16:40 -07001696 th = (const struct tcphdr *)skb->data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001697
Eric Dumazetea1627c2016-05-13 09:16:40 -07001698 if (unlikely(th->doff < sizeof(struct tcphdr) / 4))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001699 goto bad_packet;
1700 if (!pskb_may_pull(skb, th->doff * 4))
1701 goto discard_it;
1702
1703 /* An explanation is required here, I think.
1704 * Packet length and doff are validated by header prediction,
Stephen Hemmingercaa20d9a2005-11-10 17:13:47 -08001705 * provided case of th->doff==0 is eliminated.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001706 * So, we defer the checks. */
Tom Herberted70fcf2014-05-02 16:29:38 -07001707
1708 if (skb_checksum_init(skb, IPPROTO_TCP, inet_compute_pseudo))
Eric Dumazet6a5dc9e2013-04-29 08:39:56 +00001709 goto csum_error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001710
Eric Dumazetea1627c2016-05-13 09:16:40 -07001711 th = (const struct tcphdr *)skb->data;
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07001712 iph = ip_hdr(skb);
Eric Dumazet4bdc3d62015-10-13 17:12:54 -07001713lookup:
Craig Galleka5836362016-02-10 11:50:38 -05001714 sk = __inet_lookup_skb(&tcp_hashinfo, skb, __tcp_hdrlen(th), th->source,
David Ahern3fa6f612017-08-07 08:44:17 -07001715 th->dest, sdif, &refcounted);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001716 if (!sk)
1717 goto no_tcp_socket;
1718
Eric Dumazetbb134d52010-03-09 05:55:56 +00001719process:
1720 if (sk->sk_state == TCP_TIME_WAIT)
1721 goto do_time_wait;
1722
Eric Dumazet079096f2015-10-02 11:43:32 -07001723 if (sk->sk_state == TCP_NEW_SYN_RECV) {
1724 struct request_sock *req = inet_reqsk(sk);
Eric Dumazete0f97592018-02-13 06:14:12 -08001725 bool req_stolen = false;
Eric Dumazet77166822016-02-18 05:39:18 -08001726 struct sock *nsk;
Eric Dumazet079096f2015-10-02 11:43:32 -07001727
1728 sk = req->rsk_listener;
Eric Dumazet72923552016-02-11 22:50:29 -08001729 if (unlikely(tcp_v4_inbound_md5_hash(sk, skb))) {
Eric Dumazete65c3322016-08-24 08:50:24 -07001730 sk_drops_add(sk, skb);
Eric Dumazet72923552016-02-11 22:50:29 -08001731 reqsk_put(req);
1732 goto discard_it;
1733 }
Frank van der Linden4fd44a92018-06-12 23:09:37 +00001734 if (tcp_checksum_complete(skb)) {
1735 reqsk_put(req);
1736 goto csum_error;
1737 }
Eric Dumazet77166822016-02-18 05:39:18 -08001738 if (unlikely(sk->sk_state != TCP_LISTEN)) {
Eric Dumazetf03f2e12015-10-14 11:16:27 -07001739 inet_csk_reqsk_queue_drop_and_put(sk, req);
Eric Dumazet4bdc3d62015-10-13 17:12:54 -07001740 goto lookup;
1741 }
Eric Dumazet3b24d852016-04-01 08:52:17 -07001742 /* We own a reference on the listener, increase it again
1743 * as we might lose it too soon.
1744 */
Eric Dumazet77166822016-02-18 05:39:18 -08001745 sock_hold(sk);
Eric Dumazet3b24d852016-04-01 08:52:17 -07001746 refcounted = true;
Eric Dumazet1f3b3592017-09-08 12:44:47 -07001747 nsk = NULL;
Eric Dumazeteeea10b2017-12-03 09:32:59 -08001748 if (!tcp_filter(sk, skb)) {
1749 th = (const struct tcphdr *)skb->data;
1750 iph = ip_hdr(skb);
1751 tcp_v4_fill_cb(skb, iph, th);
Eric Dumazete0f97592018-02-13 06:14:12 -08001752 nsk = tcp_check_req(sk, skb, req, false, &req_stolen);
Eric Dumazeteeea10b2017-12-03 09:32:59 -08001753 }
Eric Dumazet079096f2015-10-02 11:43:32 -07001754 if (!nsk) {
1755 reqsk_put(req);
Eric Dumazete0f97592018-02-13 06:14:12 -08001756 if (req_stolen) {
1757 /* Another cpu got exclusive access to req
1758 * and created a full blown socket.
1759 * Try to feed this packet to this socket
1760 * instead of discarding it.
1761 */
1762 tcp_v4_restore_cb(skb);
1763 sock_put(sk);
1764 goto lookup;
1765 }
Eric Dumazet77166822016-02-18 05:39:18 -08001766 goto discard_and_relse;
Eric Dumazet079096f2015-10-02 11:43:32 -07001767 }
1768 if (nsk == sk) {
Eric Dumazet079096f2015-10-02 11:43:32 -07001769 reqsk_put(req);
Eric Dumazeteeea10b2017-12-03 09:32:59 -08001770 tcp_v4_restore_cb(skb);
Eric Dumazet079096f2015-10-02 11:43:32 -07001771 } else if (tcp_child_process(sk, nsk, skb)) {
1772 tcp_v4_send_reset(nsk, skb);
Eric Dumazet77166822016-02-18 05:39:18 -08001773 goto discard_and_relse;
Eric Dumazet079096f2015-10-02 11:43:32 -07001774 } else {
Eric Dumazet77166822016-02-18 05:39:18 -08001775 sock_put(sk);
Eric Dumazet079096f2015-10-02 11:43:32 -07001776 return 0;
1777 }
1778 }
Eric Dumazet6cce09f2010-03-07 23:21:57 +00001779 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
Eric Dumazet02a1d6e2016-04-27 16:44:39 -07001780 __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
Stephen Hemmingerd218d112010-01-11 16:28:01 -08001781 goto discard_and_relse;
Eric Dumazet6cce09f2010-03-07 23:21:57 +00001782 }
Stephen Hemmingerd218d112010-01-11 16:28:01 -08001783
Linus Torvalds1da177e2005-04-16 15:20:36 -07001784 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
1785 goto discard_and_relse;
Dmitry Popov9ea88a12014-08-07 02:38:22 +04001786
Dmitry Popov9ea88a12014-08-07 02:38:22 +04001787 if (tcp_v4_inbound_md5_hash(sk, skb))
1788 goto discard_and_relse;
Dmitry Popov9ea88a12014-08-07 02:38:22 +04001789
Patrick McHardyb59c2702006-01-06 23:06:10 -08001790 nf_reset(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001791
Eric Dumazetac6e7802016-11-10 13:12:35 -08001792 if (tcp_filter(sk, skb))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001793 goto discard_and_relse;
Eric Dumazetac6e7802016-11-10 13:12:35 -08001794 th = (const struct tcphdr *)skb->data;
1795 iph = ip_hdr(skb);
Eric Dumazeteeea10b2017-12-03 09:32:59 -08001796 tcp_v4_fill_cb(skb, iph, th);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001797
1798 skb->dev = NULL;
1799
Eric Dumazete994b2f2015-10-02 11:43:39 -07001800 if (sk->sk_state == TCP_LISTEN) {
1801 ret = tcp_v4_do_rcv(sk, skb);
1802 goto put_and_return;
1803 }
1804
1805 sk_incoming_cpu_update(sk);
1806
Ingo Molnarc6366182006-07-03 00:25:13 -07001807 bh_lock_sock_nested(sk);
Martin KaFai Laua44d6ea2016-03-14 10:52:15 -07001808 tcp_segs_in(tcp_sk(sk), skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001809 ret = 0;
1810 if (!sock_owned_by_user(sk)) {
Florian Westphale7942d02017-07-30 03:57:18 +02001811 ret = tcp_v4_do_rcv(sk, skb);
Eric Dumazetc9c33212016-08-27 07:37:54 -07001812 } else if (tcp_add_backlog(sk, skb)) {
Zhu Yi6b03a532010-03-04 18:01:41 +00001813 goto discard_and_relse;
1814 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001815 bh_unlock_sock(sk);
1816
Eric Dumazete994b2f2015-10-02 11:43:39 -07001817put_and_return:
Eric Dumazet3b24d852016-04-01 08:52:17 -07001818 if (refcounted)
1819 sock_put(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001820
1821 return ret;
1822
1823no_tcp_socket:
1824 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
1825 goto discard_it;
1826
Eric Dumazeteeea10b2017-12-03 09:32:59 -08001827 tcp_v4_fill_cb(skb, iph, th);
1828
Eric Dumazet12e25e12015-06-03 23:49:21 -07001829 if (tcp_checksum_complete(skb)) {
Eric Dumazet6a5dc9e2013-04-29 08:39:56 +00001830csum_error:
Eric Dumazet90bbcc62016-04-27 16:44:32 -07001831 __TCP_INC_STATS(net, TCP_MIB_CSUMERRORS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001832bad_packet:
Eric Dumazet90bbcc62016-04-27 16:44:32 -07001833 __TCP_INC_STATS(net, TCP_MIB_INERRS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001834 } else {
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001835 tcp_v4_send_reset(NULL, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001836 }
1837
1838discard_it:
1839 /* Discard frame. */
1840 kfree_skb(skb);
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001841 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001842
1843discard_and_relse:
Eric Dumazet532182c2016-04-01 08:52:19 -07001844 sk_drops_add(sk, skb);
Eric Dumazet3b24d852016-04-01 08:52:17 -07001845 if (refcounted)
1846 sock_put(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001847 goto discard_it;
1848
1849do_time_wait:
1850 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
YOSHIFUJI Hideaki9469c7b2006-10-10 19:41:46 -07001851 inet_twsk_put(inet_twsk(sk));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001852 goto discard_it;
1853 }
1854
Eric Dumazeteeea10b2017-12-03 09:32:59 -08001855 tcp_v4_fill_cb(skb, iph, th);
1856
Eric Dumazet6a5dc9e2013-04-29 08:39:56 +00001857 if (tcp_checksum_complete(skb)) {
1858 inet_twsk_put(inet_twsk(sk));
1859 goto csum_error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001860 }
YOSHIFUJI Hideaki9469c7b2006-10-10 19:41:46 -07001861 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001862 case TCP_TW_SYN: {
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09001863 struct sock *sk2 = inet_lookup_listener(dev_net(skb->dev),
Craig Galleka5836362016-02-10 11:50:38 -05001864 &tcp_hashinfo, skb,
1865 __tcp_hdrlen(th),
Tom Herbertda5e36302013-01-22 09:50:24 +00001866 iph->saddr, th->source,
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07001867 iph->daddr, th->dest,
David Ahern3fa6f612017-08-07 08:44:17 -07001868 inet_iif(skb),
1869 sdif);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001870 if (sk2) {
Eric Dumazetdbe7faa2015-07-08 14:28:30 -07001871 inet_twsk_deschedule_put(inet_twsk(sk));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001872 sk = sk2;
Eric Dumazeteeea10b2017-12-03 09:32:59 -08001873 tcp_v4_restore_cb(skb);
Eric Dumazet3b24d852016-04-01 08:52:17 -07001874 refcounted = false;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001875 goto process;
1876 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001877 }
Gustavo A. R. Silvafcfd6df2017-10-16 15:48:55 -05001878 /* to ACK */
1879 /* fall through */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001880 case TCP_TW_ACK:
1881 tcp_v4_timewait_ack(sk, skb);
1882 break;
1883 case TCP_TW_RST:
Florian Westphal271c3b92015-12-21 21:29:26 +01001884 tcp_v4_send_reset(sk, skb);
1885 inet_twsk_deschedule_put(inet_twsk(sk));
1886 goto discard_it;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001887 case TCP_TW_SUCCESS:;
1888 }
1889 goto discard_it;
1890}
1891
David S. Millerccb7c412010-12-01 18:09:13 -08001892static struct timewait_sock_ops tcp_timewait_sock_ops = {
1893 .twsk_obj_size = sizeof(struct tcp_timewait_sock),
1894 .twsk_unique = tcp_twsk_unique,
1895 .twsk_destructor= tcp_twsk_destructor,
David S. Millerccb7c412010-12-01 18:09:13 -08001896};
Linus Torvalds1da177e2005-04-16 15:20:36 -07001897
Eric Dumazet63d02d12012-08-09 14:11:00 +00001898void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
Eric Dumazet5d299f32012-08-06 05:09:33 +00001899{
1900 struct dst_entry *dst = skb_dst(skb);
1901
Eric Dumazet5037e9e2015-12-14 14:08:53 -08001902 if (dst && dst_hold_safe(dst)) {
Eric Dumazetca777ef2014-09-08 08:06:07 -07001903 sk->sk_rx_dst = dst;
1904 inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
1905 }
Eric Dumazet5d299f32012-08-06 05:09:33 +00001906}
Eric Dumazet63d02d12012-08-09 14:11:00 +00001907EXPORT_SYMBOL(inet_sk_rx_dst_set);
Eric Dumazet5d299f32012-08-06 05:09:33 +00001908
Stephen Hemminger3b401a82009-09-01 19:25:04 +00001909const struct inet_connection_sock_af_ops ipv4_specific = {
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08001910 .queue_xmit = ip_queue_xmit,
1911 .send_check = tcp_v4_send_check,
1912 .rebuild_header = inet_sk_rebuild_header,
Eric Dumazet5d299f32012-08-06 05:09:33 +00001913 .sk_rx_dst_set = inet_sk_rx_dst_set,
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08001914 .conn_request = tcp_v4_conn_request,
1915 .syn_recv_sock = tcp_v4_syn_recv_sock,
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08001916 .net_header_len = sizeof(struct iphdr),
1917 .setsockopt = ip_setsockopt,
1918 .getsockopt = ip_getsockopt,
1919 .addr2sockaddr = inet_csk_addr2sockaddr,
1920 .sockaddr_len = sizeof(struct sockaddr_in),
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08001921#ifdef CONFIG_COMPAT
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08001922 .compat_setsockopt = compat_ip_setsockopt,
1923 .compat_getsockopt = compat_ip_getsockopt,
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08001924#endif
Neal Cardwell4fab9072014-08-14 12:40:05 -04001925 .mtu_reduced = tcp_v4_mtu_reduced,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001926};
Eric Dumazet4bc2f182010-07-09 21:22:10 +00001927EXPORT_SYMBOL(ipv4_specific);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001928
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001929#ifdef CONFIG_TCP_MD5SIG
Stephen Hemmingerb2e4b3de2009-09-01 19:25:03 +00001930static const struct tcp_sock_af_ops tcp_sock_ipv4_specific = {
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001931 .md5_lookup = tcp_v4_md5_lookup,
Adam Langley49a72df2008-07-19 00:01:42 -07001932 .calc_md5_hash = tcp_v4_md5_hash_skb,
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001933 .md5_parse = tcp_v4_parse_md5_keys,
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001934};
Andrew Mortonb6332e62006-11-30 19:16:28 -08001935#endif
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001936
Linus Torvalds1da177e2005-04-16 15:20:36 -07001937/* NOTE: A lot of things set to zero explicitly by call to
1938 * sk_alloc() so need not be done here.
1939 */
1940static int tcp_v4_init_sock(struct sock *sk)
1941{
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -03001942 struct inet_connection_sock *icsk = inet_csk(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001943
Neal Cardwell900f65d2012-04-19 09:55:21 +00001944 tcp_init_sock(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001945
Arnaldo Carvalho de Melo8292a172005-12-13 23:15:52 -08001946 icsk->icsk_af_ops = &ipv4_specific;
Neal Cardwell900f65d2012-04-19 09:55:21 +00001947
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001948#ifdef CONFIG_TCP_MD5SIG
David S. Millerac807fa2012-04-23 03:21:58 -04001949 tcp_sk(sk)->af_specific = &tcp_sock_ipv4_specific;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001950#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001951
Linus Torvalds1da177e2005-04-16 15:20:36 -07001952 return 0;
1953}
1954
Brian Haley7d06b2e2008-06-14 17:04:49 -07001955void tcp_v4_destroy_sock(struct sock *sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001956{
1957 struct tcp_sock *tp = tcp_sk(sk);
1958
Song Liue1a4aa52017-10-23 09:20:26 -07001959 trace_tcp_destroy_sock(sk);
1960
Linus Torvalds1da177e2005-04-16 15:20:36 -07001961 tcp_clear_xmit_timers(sk);
1962
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -03001963 tcp_cleanup_congestion_control(sk);
Stephen Hemminger317a76f2005-06-23 12:19:55 -07001964
Dave Watson734942c2017-06-14 11:37:14 -07001965 tcp_cleanup_ulp(sk);
1966
Linus Torvalds1da177e2005-04-16 15:20:36 -07001967 /* Cleanup up the write buffer. */
David S. Millerfe067e82007-03-07 12:12:44 -08001968 tcp_write_queue_purge(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001969
Wei Wangcf1ef3f2017-04-20 14:45:46 -07001970 /* Check if we want to disable active TFO */
1971 tcp_fastopen_active_disable_ofo_check(sk);
1972
Linus Torvalds1da177e2005-04-16 15:20:36 -07001973 /* Cleans up our, hopefully empty, out_of_order_queue. */
Yaogong Wang9f5afea2016-09-07 14:49:28 -07001974 skb_rbtree_purge(&tp->out_of_order_queue);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001975
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001976#ifdef CONFIG_TCP_MD5SIG
1977 /* Clean up the MD5 key list, if any */
1978 if (tp->md5sig_info) {
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001979 tcp_clear_md5_list(sk);
Mat Martineaufb7df5e2017-12-21 10:29:10 -08001980 kfree_rcu(rcu_dereference_protected(tp->md5sig_info, 1), rcu);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001981 tp->md5sig_info = NULL;
1982 }
1983#endif
1984
Linus Torvalds1da177e2005-04-16 15:20:36 -07001985 /* Clean up a referenced TCP bind bucket. */
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001986 if (inet_csk(sk)->icsk_bind_hash)
Arnaldo Carvalho de Meloab1e0a12008-02-03 04:06:04 -08001987 inet_put_port(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001988
Ian Morris00db4122015-04-03 09:17:27 +01001989 BUG_ON(tp->fastopen_rsk);
William Allen Simpson435cf552009-12-02 18:17:05 +00001990
Yuchung Chengcf60af02012-07-19 06:43:09 +00001991 /* If socket is aborted during connect operation */
1992 tcp_free_fastopen_req(tp);
Yuchung Cheng1fba70e2017-10-18 11:22:51 -07001993 tcp_fastopen_destroy_cipher(sk);
Eric Dumazetcd8ae852015-05-03 21:34:46 -07001994 tcp_saved_syn_free(tp);
Yuchung Chengcf60af02012-07-19 06:43:09 +00001995
Glauber Costa180d8cd2011-12-11 21:47:02 +00001996 sk_sockets_allocated_dec(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001997}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001998EXPORT_SYMBOL(tcp_v4_destroy_sock);
1999
2000#ifdef CONFIG_PROC_FS
2001/* Proc filesystem TCP sock list dumping. */
2002
Tom Herberta8b690f2010-06-07 00:43:42 -07002003/*
2004 * Get next listener socket follow cur. If cur is NULL, get first socket
2005 * starting from bucket given in st->bucket; when st->bucket is zero the
2006 * very first socket in the hash table is returned.
2007 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002008static void *listening_get_next(struct seq_file *seq, void *cur)
2009{
Christoph Hellwig37d849b2018-04-11 09:31:28 +02002010 struct tcp_seq_afinfo *afinfo = PDE_DATA(file_inode(seq->file));
Jianjun Kong5799de02008-11-03 02:49:10 -08002011 struct tcp_iter_state *st = seq->private;
Denis V. Luneva4146b12008-04-13 22:11:14 -07002012 struct net *net = seq_file_net(seq);
Eric Dumazet3b24d852016-04-01 08:52:17 -07002013 struct inet_listen_hashbucket *ilb;
Eric Dumazet3b24d852016-04-01 08:52:17 -07002014 struct sock *sk = cur;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002015
2016 if (!sk) {
Eric Dumazet3b24d852016-04-01 08:52:17 -07002017get_head:
Tom Herberta8b690f2010-06-07 00:43:42 -07002018 ilb = &tcp_hashinfo.listening_hash[st->bucket];
Eric Dumazet9652dc22016-10-19 21:24:58 -07002019 spin_lock(&ilb->lock);
Eric Dumazet3b24d852016-04-01 08:52:17 -07002020 sk = sk_head(&ilb->head);
Tom Herberta8b690f2010-06-07 00:43:42 -07002021 st->offset = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002022 goto get_sk;
2023 }
Eric Dumazet5caea4e2008-11-20 00:40:07 -08002024 ilb = &tcp_hashinfo.listening_hash[st->bucket];
Linus Torvalds1da177e2005-04-16 15:20:36 -07002025 ++st->num;
Tom Herberta8b690f2010-06-07 00:43:42 -07002026 ++st->offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002027
Eric Dumazet3b24d852016-04-01 08:52:17 -07002028 sk = sk_next(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002029get_sk:
Eric Dumazet3b24d852016-04-01 08:52:17 -07002030 sk_for_each_from(sk) {
Pavel Emelyanov8475ef92010-11-22 03:26:12 +00002031 if (!net_eq(sock_net(sk), net))
2032 continue;
Christoph Hellwig37d849b2018-04-11 09:31:28 +02002033 if (sk->sk_family == afinfo->family)
Eric Dumazet3b24d852016-04-01 08:52:17 -07002034 return sk;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002035 }
Eric Dumazet9652dc22016-10-19 21:24:58 -07002036 spin_unlock(&ilb->lock);
Tom Herberta8b690f2010-06-07 00:43:42 -07002037 st->offset = 0;
Eric Dumazet3b24d852016-04-01 08:52:17 -07002038 if (++st->bucket < INET_LHTABLE_SIZE)
2039 goto get_head;
2040 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002041}
2042
2043static void *listening_get_idx(struct seq_file *seq, loff_t *pos)
2044{
Tom Herberta8b690f2010-06-07 00:43:42 -07002045 struct tcp_iter_state *st = seq->private;
2046 void *rc;
2047
2048 st->bucket = 0;
2049 st->offset = 0;
2050 rc = listening_get_next(seq, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002051
2052 while (rc && *pos) {
2053 rc = listening_get_next(seq, rc);
2054 --*pos;
2055 }
2056 return rc;
2057}
2058
Eric Dumazet05dbc7b2013-10-03 00:22:02 -07002059static inline bool empty_bucket(const struct tcp_iter_state *st)
Andi Kleen6eac5602008-08-28 01:08:02 -07002060{
Eric Dumazet05dbc7b2013-10-03 00:22:02 -07002061 return hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].chain);
Andi Kleen6eac5602008-08-28 01:08:02 -07002062}
2063
Tom Herberta8b690f2010-06-07 00:43:42 -07002064/*
2065 * Get first established socket starting from bucket given in st->bucket.
2066 * If st->bucket is zero, the very first socket in the hash is returned.
2067 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002068static void *established_get_first(struct seq_file *seq)
2069{
Christoph Hellwig37d849b2018-04-11 09:31:28 +02002070 struct tcp_seq_afinfo *afinfo = PDE_DATA(file_inode(seq->file));
Jianjun Kong5799de02008-11-03 02:49:10 -08002071 struct tcp_iter_state *st = seq->private;
Denis V. Luneva4146b12008-04-13 22:11:14 -07002072 struct net *net = seq_file_net(seq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002073 void *rc = NULL;
2074
Tom Herberta8b690f2010-06-07 00:43:42 -07002075 st->offset = 0;
2076 for (; st->bucket <= tcp_hashinfo.ehash_mask; ++st->bucket) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002077 struct sock *sk;
Eric Dumazet3ab5aee2008-11-16 19:40:17 -08002078 struct hlist_nulls_node *node;
Eric Dumazet9db66bd2008-11-20 20:39:09 -08002079 spinlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, st->bucket);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002080
Andi Kleen6eac5602008-08-28 01:08:02 -07002081 /* Lockless fast path for the common case of empty buckets */
2082 if (empty_bucket(st))
2083 continue;
2084
Eric Dumazet9db66bd2008-11-20 20:39:09 -08002085 spin_lock_bh(lock);
Eric Dumazet3ab5aee2008-11-16 19:40:17 -08002086 sk_nulls_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) {
Christoph Hellwig37d849b2018-04-11 09:31:28 +02002087 if (sk->sk_family != afinfo->family ||
YOSHIFUJI Hideaki878628f2008-03-26 03:57:35 +09002088 !net_eq(sock_net(sk), net)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002089 continue;
2090 }
2091 rc = sk;
2092 goto out;
2093 }
Eric Dumazet9db66bd2008-11-20 20:39:09 -08002094 spin_unlock_bh(lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002095 }
2096out:
2097 return rc;
2098}
2099
2100static void *established_get_next(struct seq_file *seq, void *cur)
2101{
Christoph Hellwig37d849b2018-04-11 09:31:28 +02002102 struct tcp_seq_afinfo *afinfo = PDE_DATA(file_inode(seq->file));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002103 struct sock *sk = cur;
Eric Dumazet3ab5aee2008-11-16 19:40:17 -08002104 struct hlist_nulls_node *node;
Jianjun Kong5799de02008-11-03 02:49:10 -08002105 struct tcp_iter_state *st = seq->private;
Denis V. Luneva4146b12008-04-13 22:11:14 -07002106 struct net *net = seq_file_net(seq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002107
2108 ++st->num;
Tom Herberta8b690f2010-06-07 00:43:42 -07002109 ++st->offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002110
Eric Dumazet05dbc7b2013-10-03 00:22:02 -07002111 sk = sk_nulls_next(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002112
Eric Dumazet3ab5aee2008-11-16 19:40:17 -08002113 sk_nulls_for_each_from(sk, node) {
Christoph Hellwig37d849b2018-04-11 09:31:28 +02002114 if (sk->sk_family == afinfo->family &&
2115 net_eq(sock_net(sk), net))
Eric Dumazet05dbc7b2013-10-03 00:22:02 -07002116 return sk;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002117 }
2118
Eric Dumazet05dbc7b2013-10-03 00:22:02 -07002119 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2120 ++st->bucket;
2121 return established_get_first(seq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002122}
2123
2124static void *established_get_idx(struct seq_file *seq, loff_t pos)
2125{
Tom Herberta8b690f2010-06-07 00:43:42 -07002126 struct tcp_iter_state *st = seq->private;
2127 void *rc;
2128
2129 st->bucket = 0;
2130 rc = established_get_first(seq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002131
2132 while (rc && pos) {
2133 rc = established_get_next(seq, rc);
2134 --pos;
Arnaldo Carvalho de Melo71742592006-11-17 10:57:30 -02002135 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002136 return rc;
2137}
2138
2139static void *tcp_get_idx(struct seq_file *seq, loff_t pos)
2140{
2141 void *rc;
Jianjun Kong5799de02008-11-03 02:49:10 -08002142 struct tcp_iter_state *st = seq->private;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002143
Linus Torvalds1da177e2005-04-16 15:20:36 -07002144 st->state = TCP_SEQ_STATE_LISTENING;
2145 rc = listening_get_idx(seq, &pos);
2146
2147 if (!rc) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002148 st->state = TCP_SEQ_STATE_ESTABLISHED;
2149 rc = established_get_idx(seq, pos);
2150 }
2151
2152 return rc;
2153}
2154
Tom Herberta8b690f2010-06-07 00:43:42 -07002155static void *tcp_seek_last_pos(struct seq_file *seq)
2156{
2157 struct tcp_iter_state *st = seq->private;
2158 int offset = st->offset;
2159 int orig_num = st->num;
2160 void *rc = NULL;
2161
2162 switch (st->state) {
Tom Herberta8b690f2010-06-07 00:43:42 -07002163 case TCP_SEQ_STATE_LISTENING:
2164 if (st->bucket >= INET_LHTABLE_SIZE)
2165 break;
2166 st->state = TCP_SEQ_STATE_LISTENING;
2167 rc = listening_get_next(seq, NULL);
2168 while (offset-- && rc)
2169 rc = listening_get_next(seq, rc);
2170 if (rc)
2171 break;
2172 st->bucket = 0;
Eric Dumazet05dbc7b2013-10-03 00:22:02 -07002173 st->state = TCP_SEQ_STATE_ESTABLISHED;
Tom Herberta8b690f2010-06-07 00:43:42 -07002174 /* Fallthrough */
2175 case TCP_SEQ_STATE_ESTABLISHED:
Tom Herberta8b690f2010-06-07 00:43:42 -07002176 if (st->bucket > tcp_hashinfo.ehash_mask)
2177 break;
2178 rc = established_get_first(seq);
2179 while (offset-- && rc)
2180 rc = established_get_next(seq, rc);
2181 }
2182
2183 st->num = orig_num;
2184
2185 return rc;
2186}
2187
Christoph Hellwig37d849b2018-04-11 09:31:28 +02002188void *tcp_seq_start(struct seq_file *seq, loff_t *pos)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002189{
Jianjun Kong5799de02008-11-03 02:49:10 -08002190 struct tcp_iter_state *st = seq->private;
Tom Herberta8b690f2010-06-07 00:43:42 -07002191 void *rc;
2192
2193 if (*pos && *pos == st->last_pos) {
2194 rc = tcp_seek_last_pos(seq);
2195 if (rc)
2196 goto out;
2197 }
2198
Linus Torvalds1da177e2005-04-16 15:20:36 -07002199 st->state = TCP_SEQ_STATE_LISTENING;
2200 st->num = 0;
Tom Herberta8b690f2010-06-07 00:43:42 -07002201 st->bucket = 0;
2202 st->offset = 0;
2203 rc = *pos ? tcp_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2204
2205out:
2206 st->last_pos = *pos;
2207 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002208}
Christoph Hellwig37d849b2018-04-11 09:31:28 +02002209EXPORT_SYMBOL(tcp_seq_start);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002210
Christoph Hellwig37d849b2018-04-11 09:31:28 +02002211void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002212{
Tom Herberta8b690f2010-06-07 00:43:42 -07002213 struct tcp_iter_state *st = seq->private;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002214 void *rc = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002215
2216 if (v == SEQ_START_TOKEN) {
2217 rc = tcp_get_idx(seq, 0);
2218 goto out;
2219 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002220
2221 switch (st->state) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002222 case TCP_SEQ_STATE_LISTENING:
2223 rc = listening_get_next(seq, v);
2224 if (!rc) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002225 st->state = TCP_SEQ_STATE_ESTABLISHED;
Tom Herberta8b690f2010-06-07 00:43:42 -07002226 st->bucket = 0;
2227 st->offset = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002228 rc = established_get_first(seq);
2229 }
2230 break;
2231 case TCP_SEQ_STATE_ESTABLISHED:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002232 rc = established_get_next(seq, v);
2233 break;
2234 }
2235out:
2236 ++*pos;
Tom Herberta8b690f2010-06-07 00:43:42 -07002237 st->last_pos = *pos;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002238 return rc;
2239}
Christoph Hellwig37d849b2018-04-11 09:31:28 +02002240EXPORT_SYMBOL(tcp_seq_next);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002241
Christoph Hellwig37d849b2018-04-11 09:31:28 +02002242void tcp_seq_stop(struct seq_file *seq, void *v)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002243{
Jianjun Kong5799de02008-11-03 02:49:10 -08002244 struct tcp_iter_state *st = seq->private;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002245
2246 switch (st->state) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002247 case TCP_SEQ_STATE_LISTENING:
2248 if (v != SEQ_START_TOKEN)
Eric Dumazet9652dc22016-10-19 21:24:58 -07002249 spin_unlock(&tcp_hashinfo.listening_hash[st->bucket].lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002250 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002251 case TCP_SEQ_STATE_ESTABLISHED:
2252 if (v)
Eric Dumazet9db66bd2008-11-20 20:39:09 -08002253 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002254 break;
2255 }
2256}
Christoph Hellwig37d849b2018-04-11 09:31:28 +02002257EXPORT_SYMBOL(tcp_seq_stop);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002258
Eric Dumazetd4f06872015-03-12 16:44:09 -07002259static void get_openreq4(const struct request_sock *req,
Eric Dumazetaa3a0c82015-10-02 11:43:30 -07002260 struct seq_file *f, int i)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002261{
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07002262 const struct inet_request_sock *ireq = inet_rsk(req);
Eric Dumazetfa76ce732015-03-19 19:04:20 -07002263 long delta = req->rsk_timer.expires - jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002264
Pavel Emelyanov5e659e42008-04-24 01:02:16 -07002265 seq_printf(f, "%4d: %08X:%04X %08X:%04X"
Tetsuo Handa652586d2013-11-14 14:31:57 -08002266 " %02X %08X:%08X %02X:%08lX %08X %5u %8d %u %d %pK",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002267 i,
Eric Dumazet634fb9792013-10-09 15:21:29 -07002268 ireq->ir_loc_addr,
Eric Dumazetd4f06872015-03-12 16:44:09 -07002269 ireq->ir_num,
Eric Dumazet634fb9792013-10-09 15:21:29 -07002270 ireq->ir_rmt_addr,
2271 ntohs(ireq->ir_rmt_port),
Linus Torvalds1da177e2005-04-16 15:20:36 -07002272 TCP_SYN_RECV,
2273 0, 0, /* could print option size, but that is af dependent. */
2274 1, /* timers active (only the expire timer) */
Eric Dumazeta399a802012-08-08 21:13:53 +00002275 jiffies_delta_to_clock_t(delta),
Eric Dumazete6c022a2012-10-27 23:16:46 +00002276 req->num_timeout,
Eric Dumazetaa3a0c82015-10-02 11:43:30 -07002277 from_kuid_munged(seq_user_ns(f),
2278 sock_i_uid(req->rsk_listener)),
Linus Torvalds1da177e2005-04-16 15:20:36 -07002279 0, /* non standard timer */
2280 0, /* open_requests have no inode */
Eric Dumazetd4f06872015-03-12 16:44:09 -07002281 0,
Tetsuo Handa652586d2013-11-14 14:31:57 -08002282 req);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002283}
2284
Tetsuo Handa652586d2013-11-14 14:31:57 -08002285static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002286{
2287 int timer_active;
2288 unsigned long timer_expires;
Eric Dumazetcf533ea2011-10-21 05:22:42 -04002289 const struct tcp_sock *tp = tcp_sk(sk);
Ilpo Järvinencf4c6bf2007-02-22 01:13:58 -08002290 const struct inet_connection_sock *icsk = inet_csk(sk);
Eric Dumazetcf533ea2011-10-21 05:22:42 -04002291 const struct inet_sock *inet = inet_sk(sk);
Eric Dumazet0536fcc2015-09-29 07:42:52 -07002292 const struct fastopen_queue *fastopenq = &icsk->icsk_accept_queue.fastopenq;
Eric Dumazetc720c7e82009-10-15 06:30:45 +00002293 __be32 dest = inet->inet_daddr;
2294 __be32 src = inet->inet_rcv_saddr;
2295 __u16 destp = ntohs(inet->inet_dport);
2296 __u16 srcp = ntohs(inet->inet_sport);
Eric Dumazet49d09002009-12-03 16:06:13 -08002297 int rx_queue;
Eric Dumazet00fd38d2015-11-12 08:43:18 -08002298 int state;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002299
Nandita Dukkipati6ba8a3b2013-03-11 10:00:43 +00002300 if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
Yuchung Cheng57dde7f2017-01-12 22:11:33 -08002301 icsk->icsk_pending == ICSK_TIME_REO_TIMEOUT ||
Nandita Dukkipati6ba8a3b2013-03-11 10:00:43 +00002302 icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002303 timer_active = 1;
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07002304 timer_expires = icsk->icsk_timeout;
2305 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002306 timer_active = 4;
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07002307 timer_expires = icsk->icsk_timeout;
Ilpo Järvinencf4c6bf2007-02-22 01:13:58 -08002308 } else if (timer_pending(&sk->sk_timer)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002309 timer_active = 2;
Ilpo Järvinencf4c6bf2007-02-22 01:13:58 -08002310 timer_expires = sk->sk_timer.expires;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002311 } else {
2312 timer_active = 0;
2313 timer_expires = jiffies;
2314 }
2315
Yafang Shao986ffdf2017-12-20 11:12:52 +08002316 state = inet_sk_state_load(sk);
Eric Dumazet00fd38d2015-11-12 08:43:18 -08002317 if (state == TCP_LISTEN)
Eric Dumazet49d09002009-12-03 16:06:13 -08002318 rx_queue = sk->sk_ack_backlog;
2319 else
Eric Dumazet00fd38d2015-11-12 08:43:18 -08002320 /* Because we don't lock the socket,
2321 * we might find a transient negative value.
Eric Dumazet49d09002009-12-03 16:06:13 -08002322 */
2323 rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
2324
Pavel Emelyanov5e659e42008-04-24 01:02:16 -07002325 seq_printf(f, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
Tetsuo Handa652586d2013-11-14 14:31:57 -08002326 "%08X %5u %8d %lu %d %pK %lu %lu %u %u %d",
Eric Dumazet00fd38d2015-11-12 08:43:18 -08002327 i, src, srcp, dest, destp, state,
Sridhar Samudrala47da8ee2006-06-27 13:29:00 -07002328 tp->write_seq - tp->snd_una,
Eric Dumazet49d09002009-12-03 16:06:13 -08002329 rx_queue,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002330 timer_active,
Eric Dumazeta399a802012-08-08 21:13:53 +00002331 jiffies_delta_to_clock_t(timer_expires - jiffies),
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07002332 icsk->icsk_retransmits,
Eric W. Biedermana7cb5a42012-05-24 01:10:10 -06002333 from_kuid_munged(seq_user_ns(f), sock_i_uid(sk)),
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -03002334 icsk->icsk_probes_out,
Ilpo Järvinencf4c6bf2007-02-22 01:13:58 -08002335 sock_i_ino(sk),
Reshetova, Elena41c6d652017-06-30 13:08:01 +03002336 refcount_read(&sk->sk_refcnt), sk,
Stephen Hemminger7be87352008-06-27 20:00:19 -07002337 jiffies_to_clock_t(icsk->icsk_rto),
2338 jiffies_to_clock_t(icsk->icsk_ack.ato),
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07002339 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002340 tp->snd_cwnd,
Eric Dumazet00fd38d2015-11-12 08:43:18 -08002341 state == TCP_LISTEN ?
2342 fastopenq->max_qlen :
Tetsuo Handa652586d2013-11-14 14:31:57 -08002343 (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002344}
2345
Eric Dumazetcf533ea2011-10-21 05:22:42 -04002346static void get_timewait4_sock(const struct inet_timewait_sock *tw,
Tetsuo Handa652586d2013-11-14 14:31:57 -08002347 struct seq_file *f, int i)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002348{
Eric Dumazet789f5582015-04-12 18:51:09 -07002349 long delta = tw->tw_timer.expires - jiffies;
Al Viro23f33c22006-09-27 18:43:50 -07002350 __be32 dest, src;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002351 __u16 destp, srcp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002352
2353 dest = tw->tw_daddr;
2354 src = tw->tw_rcv_saddr;
2355 destp = ntohs(tw->tw_dport);
2356 srcp = ntohs(tw->tw_sport);
2357
Pavel Emelyanov5e659e42008-04-24 01:02:16 -07002358 seq_printf(f, "%4d: %08X:%04X %08X:%04X"
Tetsuo Handa652586d2013-11-14 14:31:57 -08002359 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002360 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
Eric Dumazeta399a802012-08-08 21:13:53 +00002361 3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
Reshetova, Elena41c6d652017-06-30 13:08:01 +03002362 refcount_read(&tw->tw_refcnt), tw);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002363}
2364
2365#define TMPSZ 150
2366
2367static int tcp4_seq_show(struct seq_file *seq, void *v)
2368{
Jianjun Kong5799de02008-11-03 02:49:10 -08002369 struct tcp_iter_state *st;
Eric Dumazet05dbc7b2013-10-03 00:22:02 -07002370 struct sock *sk = v;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002371
Tetsuo Handa652586d2013-11-14 14:31:57 -08002372 seq_setwidth(seq, TMPSZ - 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002373 if (v == SEQ_START_TOKEN) {
Tetsuo Handa652586d2013-11-14 14:31:57 -08002374 seq_puts(seq, " sl local_address rem_address st tx_queue "
Linus Torvalds1da177e2005-04-16 15:20:36 -07002375 "rx_queue tr tm->when retrnsmt uid timeout "
2376 "inode");
2377 goto out;
2378 }
2379 st = seq->private;
2380
Eric Dumazet079096f2015-10-02 11:43:32 -07002381 if (sk->sk_state == TCP_TIME_WAIT)
2382 get_timewait4_sock(v, seq, st->num);
2383 else if (sk->sk_state == TCP_NEW_SYN_RECV)
Eric Dumazetaa3a0c82015-10-02 11:43:30 -07002384 get_openreq4(v, seq, st->num);
Eric Dumazet079096f2015-10-02 11:43:32 -07002385 else
2386 get_tcp4_sock(v, seq, st->num);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002387out:
Tetsuo Handa652586d2013-11-14 14:31:57 -08002388 seq_pad(seq, '\n');
Linus Torvalds1da177e2005-04-16 15:20:36 -07002389 return 0;
2390}
2391
Christoph Hellwig37d849b2018-04-11 09:31:28 +02002392static const struct seq_operations tcp4_seq_ops = {
2393 .show = tcp4_seq_show,
2394 .start = tcp_seq_start,
2395 .next = tcp_seq_next,
2396 .stop = tcp_seq_stop,
2397};
2398
Linus Torvalds1da177e2005-04-16 15:20:36 -07002399static struct tcp_seq_afinfo tcp4_seq_afinfo = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002400 .family = AF_INET,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002401};
2402
Alexey Dobriyan2c8c1e72010-01-17 03:35:32 +00002403static int __net_init tcp4_proc_init_net(struct net *net)
Pavel Emelyanov757764f2008-03-24 14:56:02 -07002404{
Christoph Hellwigc3506372018-04-10 19:42:55 +02002405 if (!proc_create_net_data("tcp", 0444, net->proc_net, &tcp4_seq_ops,
2406 sizeof(struct tcp_iter_state), &tcp4_seq_afinfo))
Christoph Hellwig37d849b2018-04-11 09:31:28 +02002407 return -ENOMEM;
2408 return 0;
Pavel Emelyanov757764f2008-03-24 14:56:02 -07002409}
2410
Alexey Dobriyan2c8c1e72010-01-17 03:35:32 +00002411static void __net_exit tcp4_proc_exit_net(struct net *net)
Pavel Emelyanov757764f2008-03-24 14:56:02 -07002412{
Christoph Hellwig37d849b2018-04-11 09:31:28 +02002413 remove_proc_entry("tcp", net->proc_net);
Pavel Emelyanov757764f2008-03-24 14:56:02 -07002414}
2415
2416static struct pernet_operations tcp4_net_ops = {
2417 .init = tcp4_proc_init_net,
2418 .exit = tcp4_proc_exit_net,
2419};
2420
Linus Torvalds1da177e2005-04-16 15:20:36 -07002421int __init tcp4_proc_init(void)
2422{
Pavel Emelyanov757764f2008-03-24 14:56:02 -07002423 return register_pernet_subsys(&tcp4_net_ops);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002424}
2425
2426void tcp4_proc_exit(void)
2427{
Pavel Emelyanov757764f2008-03-24 14:56:02 -07002428 unregister_pernet_subsys(&tcp4_net_ops);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002429}
2430#endif /* CONFIG_PROC_FS */
2431
2432struct proto tcp_prot = {
2433 .name = "TCP",
2434 .owner = THIS_MODULE,
2435 .close = tcp_close,
Andrey Ignatovd74bad42018-03-30 15:08:05 -07002436 .pre_connect = tcp_v4_pre_connect,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002437 .connect = tcp_v4_connect,
2438 .disconnect = tcp_disconnect,
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07002439 .accept = inet_csk_accept,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002440 .ioctl = tcp_ioctl,
2441 .init = tcp_v4_init_sock,
2442 .destroy = tcp_v4_destroy_sock,
2443 .shutdown = tcp_shutdown,
2444 .setsockopt = tcp_setsockopt,
2445 .getsockopt = tcp_getsockopt,
Ursula Braun4b9d07a2017-01-09 16:55:12 +01002446 .keepalive = tcp_set_keepalive,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002447 .recvmsg = tcp_recvmsg,
Changli Gao7ba42912010-07-10 20:41:55 +00002448 .sendmsg = tcp_sendmsg,
2449 .sendpage = tcp_sendpage,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002450 .backlog_rcv = tcp_v4_do_rcv,
Eric Dumazet46d3cea2012-07-11 05:50:31 +00002451 .release_cb = tcp_release_cb,
Arnaldo Carvalho de Meloab1e0a12008-02-03 04:06:04 -08002452 .hash = inet_hash,
2453 .unhash = inet_unhash,
2454 .get_port = inet_csk_get_port,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002455 .enter_memory_pressure = tcp_enter_memory_pressure,
Eric Dumazet06044752017-06-07 13:29:12 -07002456 .leave_memory_pressure = tcp_leave_memory_pressure,
Eric Dumazetc9bee3b72013-07-22 20:27:07 -07002457 .stream_memory_free = tcp_stream_memory_free,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002458 .sockets_allocated = &tcp_sockets_allocated,
Arnaldo Carvalho de Melo0a5578c2005-08-09 20:11:41 -07002459 .orphan_count = &tcp_orphan_count,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002460 .memory_allocated = &tcp_memory_allocated,
2461 .memory_pressure = &tcp_memory_pressure,
Eric W. Biedermana4fe34b2013-10-19 16:25:36 -07002462 .sysctl_mem = sysctl_tcp_mem,
Eric Dumazet356d1832017-11-07 00:29:28 -08002463 .sysctl_wmem_offset = offsetof(struct net, ipv4.sysctl_tcp_wmem),
2464 .sysctl_rmem_offset = offsetof(struct net, ipv4.sysctl_tcp_rmem),
Linus Torvalds1da177e2005-04-16 15:20:36 -07002465 .max_header = MAX_TCP_HEADER,
2466 .obj_size = sizeof(struct tcp_sock),
Paul E. McKenney5f0d5a32017-01-18 02:53:44 -08002467 .slab_flags = SLAB_TYPESAFE_BY_RCU,
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -08002468 .twsk_prot = &tcp_timewait_sock_ops,
Arnaldo Carvalho de Melo60236fd2005-06-18 22:47:21 -07002469 .rsk_prot = &tcp_request_sock_ops,
Pavel Emelyanov39d8cda2008-03-22 16:50:58 -07002470 .h.hashinfo = &tcp_hashinfo,
Changli Gao7ba42912010-07-10 20:41:55 +00002471 .no_autobind = true,
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08002472#ifdef CONFIG_COMPAT
2473 .compat_setsockopt = compat_tcp_setsockopt,
2474 .compat_getsockopt = compat_tcp_getsockopt,
2475#endif
Lorenzo Colittic1e64e22015-12-16 12:30:05 +09002476 .diag_destroy = tcp_abort,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002477};
Eric Dumazet4bc2f182010-07-09 21:22:10 +00002478EXPORT_SYMBOL(tcp_prot);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002479
Denis V. Lunev046ee902008-04-03 14:31:33 -07002480static void __net_exit tcp_sk_exit(struct net *net)
2481{
Eric Dumazetbdbbb852015-01-29 21:35:05 -08002482 int cpu;
2483
Stephen Hemminger6670e152017-11-14 08:25:49 -08002484 module_put(net->ipv4.tcp_congestion_control->owner);
2485
Eric Dumazetbdbbb852015-01-29 21:35:05 -08002486 for_each_possible_cpu(cpu)
2487 inet_ctl_sock_destroy(*per_cpu_ptr(net->ipv4.tcp_sk, cpu));
2488 free_percpu(net->ipv4.tcp_sk);
2489}
2490
2491static int __net_init tcp_sk_init(struct net *net)
2492{
Haishuang Yanfee83d02016-12-28 17:52:33 +08002493 int res, cpu, cnt;
Eric Dumazetbdbbb852015-01-29 21:35:05 -08002494
2495 net->ipv4.tcp_sk = alloc_percpu(struct sock *);
2496 if (!net->ipv4.tcp_sk)
2497 return -ENOMEM;
2498
2499 for_each_possible_cpu(cpu) {
2500 struct sock *sk;
2501
2502 res = inet_ctl_sock_create(&sk, PF_INET, SOCK_RAW,
2503 IPPROTO_TCP, net);
2504 if (res)
2505 goto fail;
Eric Dumazeta9d65322016-04-01 08:52:21 -07002506 sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
Eric Dumazetbdbbb852015-01-29 21:35:05 -08002507 *per_cpu_ptr(net->ipv4.tcp_sk, cpu) = sk;
2508 }
Daniel Borkmann49213552015-05-19 21:04:22 +02002509
Eric Dumazetbdbbb852015-01-29 21:35:05 -08002510 net->ipv4.sysctl_tcp_ecn = 2;
Daniel Borkmann49213552015-05-19 21:04:22 +02002511 net->ipv4.sysctl_tcp_ecn_fallback = 1;
2512
Fan Dub0f9ca52015-02-10 09:53:16 +08002513 net->ipv4.sysctl_tcp_base_mss = TCP_BASE_MSS;
Fan Du6b58e0a2015-03-06 11:18:23 +08002514 net->ipv4.sysctl_tcp_probe_threshold = TCP_PROBE_THRESHOLD;
Fan Du05cbc0d2015-03-06 11:18:24 +08002515 net->ipv4.sysctl_tcp_probe_interval = TCP_PROBE_INTERVAL;
Eric Dumazetbdbbb852015-01-29 21:35:05 -08002516
Nikolay Borisov13b287e2016-01-07 16:38:43 +02002517 net->ipv4.sysctl_tcp_keepalive_time = TCP_KEEPALIVE_TIME;
Nikolay Borisov9bd68612016-01-07 16:38:44 +02002518 net->ipv4.sysctl_tcp_keepalive_probes = TCP_KEEPALIVE_PROBES;
Nikolay Borisovb840d152016-01-07 16:38:45 +02002519 net->ipv4.sysctl_tcp_keepalive_intvl = TCP_KEEPALIVE_INTVL;
Nikolay Borisov13b287e2016-01-07 16:38:43 +02002520
Nikolay Borisov6fa25162016-02-03 09:46:49 +02002521 net->ipv4.sysctl_tcp_syn_retries = TCP_SYN_RETRIES;
Nikolay Borisov7c083ec2016-02-03 09:46:50 +02002522 net->ipv4.sysctl_tcp_synack_retries = TCP_SYNACK_RETRIES;
David S. Miller0aca737d2016-02-08 04:24:33 -05002523 net->ipv4.sysctl_tcp_syncookies = 1;
Nikolay Borisov1043e252016-02-03 09:46:52 +02002524 net->ipv4.sysctl_tcp_reordering = TCP_FASTRETRANS_THRESH;
Nikolay Borisovae5c3f42016-02-03 09:46:53 +02002525 net->ipv4.sysctl_tcp_retries1 = TCP_RETR1;
Nikolay Borisovc6214a92016-02-03 09:46:54 +02002526 net->ipv4.sysctl_tcp_retries2 = TCP_RETR2;
Nikolay Borisovc402d9b2016-02-03 09:46:55 +02002527 net->ipv4.sysctl_tcp_orphan_retries = 0;
Nikolay Borisov1e579ca2016-02-03 09:46:56 +02002528 net->ipv4.sysctl_tcp_fin_timeout = TCP_FIN_TIMEOUT;
Nikolay Borisov4979f2d2016-02-03 09:46:57 +02002529 net->ipv4.sysctl_tcp_notsent_lowat = UINT_MAX;
Maciej Żenczykowski79e9fed2018-06-03 10:41:17 -07002530 net->ipv4.sysctl_tcp_tw_reuse = 2;
Nikolay Borisov12ed8242016-02-03 09:46:51 +02002531
Haishuang Yanfee83d02016-12-28 17:52:33 +08002532 cnt = tcp_hashinfo.ehash_mask + 1;
Haishuang Yanfee83d02016-12-28 17:52:33 +08002533 net->ipv4.tcp_death_row.sysctl_max_tw_buckets = (cnt + 1) / 2;
Haishuang Yan1946e672016-12-28 17:52:32 +08002534 net->ipv4.tcp_death_row.hashinfo = &tcp_hashinfo;
2535
Haishuang Yanfee83d02016-12-28 17:52:33 +08002536 net->ipv4.sysctl_max_syn_backlog = max(128, cnt / 256);
Eric Dumazetf9301032017-06-07 10:34:37 -07002537 net->ipv4.sysctl_tcp_sack = 1;
Eric Dumazet9bb37ef2017-06-07 10:34:38 -07002538 net->ipv4.sysctl_tcp_window_scaling = 1;
Eric Dumazet5d2ed052017-06-07 10:34:39 -07002539 net->ipv4.sysctl_tcp_timestamps = 1;
Eric Dumazet2ae21cf2017-10-26 21:54:56 -07002540 net->ipv4.sysctl_tcp_early_retrans = 3;
Eric Dumazete20223f2017-10-26 21:54:57 -07002541 net->ipv4.sysctl_tcp_recovery = TCP_RACK_LOSS_DETECTION;
Eric Dumazetb510f0d2017-10-26 21:54:59 -07002542 net->ipv4.sysctl_tcp_slow_start_after_idle = 1; /* By default, RFC2861 behavior. */
Eric Dumazete0a1e5b2017-10-26 21:55:00 -07002543 net->ipv4.sysctl_tcp_retrans_collapse = 1;
Eric Dumazetc6e21802017-10-26 21:55:06 -07002544 net->ipv4.sysctl_tcp_max_reordering = 300;
Eric Dumazet6496f6b2017-10-26 21:55:07 -07002545 net->ipv4.sysctl_tcp_dsack = 1;
Eric Dumazet0c126542017-10-26 21:55:08 -07002546 net->ipv4.sysctl_tcp_app_win = 31;
Eric Dumazet94f08932017-10-26 21:55:09 -07002547 net->ipv4.sysctl_tcp_adv_win_scale = 1;
Eric Dumazetaf9b69a2017-10-26 21:55:10 -07002548 net->ipv4.sysctl_tcp_frto = 2;
Eric Dumazet4540c0c2017-10-27 07:47:22 -07002549 net->ipv4.sysctl_tcp_moderate_rcvbuf = 1;
Eric Dumazetd06a9902017-10-27 07:47:23 -07002550 /* This limits the percentage of the congestion window which we
2551 * will allow a single TSO frame to consume. Building TSO frames
2552 * which are too large can cause TCP streams to be bursty.
2553 */
2554 net->ipv4.sysctl_tcp_tso_win_divisor = 3;
Eric Dumazet9184d8b2017-10-27 07:47:25 -07002555 /* Default TSQ limit of four TSO segments */
2556 net->ipv4.sysctl_tcp_limit_output_bytes = 262144;
Eric Dumazetb530b682017-10-27 07:47:26 -07002557 /* rfc5961 challenge ack rate limiting */
2558 net->ipv4.sysctl_tcp_challenge_ack_limit = 1000;
Eric Dumazet26e95962017-10-27 07:47:27 -07002559 net->ipv4.sysctl_tcp_min_tso_segs = 2;
Eric Dumazetbd239702017-10-27 07:47:28 -07002560 net->ipv4.sysctl_tcp_min_rtt_wlen = 300;
Eric Dumazet790f00e2017-10-27 07:47:29 -07002561 net->ipv4.sysctl_tcp_autocorking = 1;
Eric Dumazet4170ba62017-10-27 07:47:30 -07002562 net->ipv4.sysctl_tcp_invalid_ratelimit = HZ/2;
Eric Dumazet23a7102a2017-10-27 07:47:31 -07002563 net->ipv4.sysctl_tcp_pacing_ss_ratio = 200;
Eric Dumazetc26e91f2017-10-27 07:47:32 -07002564 net->ipv4.sysctl_tcp_pacing_ca_ratio = 120;
Eric Dumazet356d1832017-11-07 00:29:28 -08002565 if (net != &init_net) {
2566 memcpy(net->ipv4.sysctl_tcp_rmem,
2567 init_net.ipv4.sysctl_tcp_rmem,
2568 sizeof(init_net.ipv4.sysctl_tcp_rmem));
2569 memcpy(net->ipv4.sysctl_tcp_wmem,
2570 init_net.ipv4.sysctl_tcp_wmem,
2571 sizeof(init_net.ipv4.sysctl_tcp_wmem));
2572 }
Eric Dumazet6d82aa22018-05-17 14:47:28 -07002573 net->ipv4.sysctl_tcp_comp_sack_delay_ns = NSEC_PER_MSEC;
Eric Dumazet9c21d2f2018-05-17 14:47:29 -07002574 net->ipv4.sysctl_tcp_comp_sack_nr = 44;
Haishuang Yane1cfcbe2017-09-27 11:35:40 +08002575 net->ipv4.sysctl_tcp_fastopen = TFO_CLIENT_ENABLE;
Haishuang Yan43713842017-09-27 11:35:42 +08002576 spin_lock_init(&net->ipv4.tcp_fastopen_ctx_lock);
Haishuang Yan3733be12017-09-27 11:35:43 +08002577 net->ipv4.sysctl_tcp_fastopen_blackhole_timeout = 60 * 60;
2578 atomic_set(&net->ipv4.tfo_active_disable_times, 0);
Haishuang Yane1cfcbe2017-09-27 11:35:40 +08002579
Stephen Hemminger6670e152017-11-14 08:25:49 -08002580 /* Reno is always built in */
2581 if (!net_eq(net, &init_net) &&
2582 try_module_get(init_net.ipv4.tcp_congestion_control->owner))
2583 net->ipv4.tcp_congestion_control = init_net.ipv4.tcp_congestion_control;
2584 else
2585 net->ipv4.tcp_congestion_control = &tcp_reno;
2586
Daniel Borkmann49213552015-05-19 21:04:22 +02002587 return 0;
Eric Dumazetbdbbb852015-01-29 21:35:05 -08002588fail:
2589 tcp_sk_exit(net);
2590
2591 return res;
Eric W. Biedermanb099ce22009-12-03 02:29:09 +00002592}
2593
2594static void __net_exit tcp_sk_exit_batch(struct list_head *net_exit_list)
2595{
Haishuang Yan43713842017-09-27 11:35:42 +08002596 struct net *net;
2597
Haishuang Yan1946e672016-12-28 17:52:32 +08002598 inet_twsk_purge(&tcp_hashinfo, AF_INET);
Haishuang Yan43713842017-09-27 11:35:42 +08002599
2600 list_for_each_entry(net, net_exit_list, exit_list)
2601 tcp_fastopen_ctx_destroy(net);
Denis V. Lunev046ee902008-04-03 14:31:33 -07002602}
2603
2604static struct pernet_operations __net_initdata tcp_sk_ops = {
Eric W. Biedermanb099ce22009-12-03 02:29:09 +00002605 .init = tcp_sk_init,
2606 .exit = tcp_sk_exit,
2607 .exit_batch = tcp_sk_exit_batch,
Denis V. Lunev046ee902008-04-03 14:31:33 -07002608};
2609
Denis V. Lunev9b0f9762008-02-29 11:13:15 -08002610void __init tcp_v4_init(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002611{
Eric W. Biederman6a1b3052009-02-22 00:10:18 -08002612 if (register_pernet_subsys(&tcp_sk_ops))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002613 panic("Failed to create the TCP control socket.\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002614}