blob: 77d723bbe05085881d3d5d4ca0cb4dbcede8d11d [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * TCP over IPv6
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09003 * Linux INET6 implementation
Linus Torvalds1da177e2005-04-16 15:20:36 -07004 *
5 * Authors:
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09006 * Pedro Roque <roque@di.fc.ul.pt>
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 *
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09008 * Based on:
Linus Torvalds1da177e2005-04-16 15:20:36 -07009 * linux/net/ipv4/tcp.c
10 * linux/net/ipv4/tcp_input.c
11 * linux/net/ipv4/tcp_output.c
12 *
13 * Fixes:
14 * Hideaki YOSHIFUJI : sin6_scope_id support
15 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
16 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
17 * a single port at the same time.
18 * YOSHIFUJI Hideaki @USAGI: convert /proc/net/tcp6 to seq_file.
19 *
20 * This program is free software; you can redistribute it and/or
21 * modify it under the terms of the GNU General Public License
22 * as published by the Free Software Foundation; either version
23 * 2 of the License, or (at your option) any later version.
24 */
25
Herbert Xueb4dea52008-12-29 23:04:08 -080026#include <linux/bottom_half.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070027#include <linux/module.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070028#include <linux/errno.h>
29#include <linux/types.h>
30#include <linux/socket.h>
31#include <linux/sockios.h>
32#include <linux/net.h>
33#include <linux/jiffies.h>
34#include <linux/in.h>
35#include <linux/in6.h>
36#include <linux/netdevice.h>
37#include <linux/init.h>
38#include <linux/jhash.h>
39#include <linux/ipsec.h>
40#include <linux/times.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090041#include <linux/slab.h>
Wang Yufen4aa956d2014-03-29 09:27:29 +080042#include <linux/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070043#include <linux/ipv6.h>
44#include <linux/icmpv6.h>
45#include <linux/random.h>
46
47#include <net/tcp.h>
48#include <net/ndisc.h>
Arnaldo Carvalho de Melo5324a042005-08-12 09:26:18 -030049#include <net/inet6_hashtables.h>
Arnaldo Carvalho de Melo81297652005-12-13 23:15:24 -080050#include <net/inet6_connection_sock.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070051#include <net/ipv6.h>
52#include <net/transp_v6.h>
53#include <net/addrconf.h>
54#include <net/ip6_route.h>
55#include <net/ip6_checksum.h>
56#include <net/inet_ecn.h>
57#include <net/protocol.h>
58#include <net/xfrm.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070059#include <net/snmp.h>
60#include <net/dsfield.h>
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -080061#include <net/timewait_sock.h>
Denis V. Lunev3d58b5f2008-04-03 14:22:32 -070062#include <net/inet_common.h>
David S. Miller6e5714e2011-08-03 20:50:44 -070063#include <net/secure_seq.h>
Eliezer Tamir076bb0c2013-07-10 17:13:17 +030064#include <net/busy_poll.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070065
Linus Torvalds1da177e2005-04-16 15:20:36 -070066#include <linux/proc_fs.h>
67#include <linux/seq_file.h>
68
Herbert Xucf80e0e2016-01-24 21:20:23 +080069#include <crypto/hash.h>
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -080070#include <linux/scatterlist.h>
71
Song Liuc24b14c2017-10-23 09:20:24 -070072#include <trace/events/tcp.h>
73
Eric Dumazeta00e7442015-09-29 07:42:39 -070074static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb);
75static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
Gui Jianfeng6edafaa2008-08-06 23:50:04 -070076 struct request_sock *req);
Linus Torvalds1da177e2005-04-16 15:20:36 -070077
78static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -070079
Stephen Hemminger3b401a82009-09-01 19:25:04 +000080static const struct inet_connection_sock_af_ops ipv6_mapped;
81static const struct inet_connection_sock_af_ops ipv6_specific;
David S. Millera9286302006-11-14 19:53:22 -080082#ifdef CONFIG_TCP_MD5SIG
Stephen Hemmingerb2e4b3de2009-09-01 19:25:03 +000083static const struct tcp_sock_af_ops tcp_sock_ipv6_specific;
84static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific;
YOSHIFUJI Hideaki9501f972008-04-18 12:45:16 +090085#else
Eric Dumazet51723932015-09-29 21:24:05 -070086static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(const struct sock *sk,
Eric Dumazetb71d1d42011-04-22 04:53:02 +000087 const struct in6_addr *addr)
YOSHIFUJI Hideaki9501f972008-04-18 12:45:16 +090088{
89 return NULL;
90}
David S. Millera9286302006-11-14 19:53:22 -080091#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -070092
Eric Dumazet93a77c12019-03-19 07:01:08 -070093/* Helper returning the inet6 address from a given tcp socket.
94 * It can be used in TCP stack instead of inet6_sk(sk).
95 * This avoids a dereference and allow compiler optimizations.
96 */
97static struct ipv6_pinfo *tcp_inet6_sk(const struct sock *sk)
98{
99 struct tcp6_sock *tcp6 = container_of(tcp_sk(sk), struct tcp6_sock, tcp);
100
101 return &tcp6->inet6;
102}
103
Neal Cardwellfae6ef82012-08-19 03:30:38 +0000104static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
105{
106 struct dst_entry *dst = skb_dst(skb);
Neal Cardwellfae6ef82012-08-19 03:30:38 +0000107
Eric Dumazet5037e9e2015-12-14 14:08:53 -0800108 if (dst && dst_hold_safe(dst)) {
Eric Dumazetca777ef2014-09-08 08:06:07 -0700109 const struct rt6_info *rt = (const struct rt6_info *)dst;
110
Eric Dumazetca777ef2014-09-08 08:06:07 -0700111 sk->sk_rx_dst = dst;
112 inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
Eric Dumazet93a77c12019-03-19 07:01:08 -0700113 tcp_inet6_sk(sk)->rx_dst_cookie = rt6_get_cookie(rt);
Eric Dumazetca777ef2014-09-08 08:06:07 -0700114 }
Neal Cardwellfae6ef82012-08-19 03:30:38 +0000115}
116
Eric Dumazet84b114b2017-05-05 06:56:54 -0700117static u32 tcp_v6_init_seq(const struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700118{
Eric Dumazet84b114b2017-05-05 06:56:54 -0700119 return secure_tcpv6_seq(ipv6_hdr(skb)->daddr.s6_addr32,
120 ipv6_hdr(skb)->saddr.s6_addr32,
121 tcp_hdr(skb)->dest,
122 tcp_hdr(skb)->source);
123}
124
Eric Dumazet5d2ed052017-06-07 10:34:39 -0700125static u32 tcp_v6_init_ts_off(const struct net *net, const struct sk_buff *skb)
Eric Dumazet84b114b2017-05-05 06:56:54 -0700126{
Eric Dumazet5d2ed052017-06-07 10:34:39 -0700127 return secure_tcpv6_ts_off(net, ipv6_hdr(skb)->daddr.s6_addr32,
Eric Dumazet84b114b2017-05-05 06:56:54 -0700128 ipv6_hdr(skb)->saddr.s6_addr32);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700129}
130
Andrey Ignatovd74bad42018-03-30 15:08:05 -0700131static int tcp_v6_pre_connect(struct sock *sk, struct sockaddr *uaddr,
132 int addr_len)
133{
134 /* This check is replicated from tcp_v6_connect() and intended to
135 * prevent BPF program called below from accessing bytes that are out
136 * of the bound specified by user in addr_len.
137 */
138 if (addr_len < SIN6_LEN_RFC2133)
139 return -EINVAL;
140
141 sock_owned_by_me(sk);
142
143 return BPF_CGROUP_RUN_PROG_INET6_CONNECT(sk, uaddr);
144}
145
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +0900146static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700147 int addr_len)
148{
149 struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +0900150 struct inet_sock *inet = inet_sk(sk);
Arnaldo Carvalho de Melod83d8462005-12-13 23:26:10 -0800151 struct inet_connection_sock *icsk = inet_csk(sk);
Eric Dumazet93a77c12019-03-19 07:01:08 -0700152 struct ipv6_pinfo *np = tcp_inet6_sk(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700153 struct tcp_sock *tp = tcp_sk(sk);
Arnaud Ebalard20c59de2010-06-01 21:35:01 +0000154 struct in6_addr *saddr = NULL, *final_p, final;
Eric Dumazet45f6fad2015-11-29 19:37:57 -0800155 struct ipv6_txoptions *opt;
David S. Miller4c9483b2011-03-12 16:22:43 -0500156 struct flowi6 fl6;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700157 struct dst_entry *dst;
158 int addr_type;
159 int err;
Haishuang Yan1946e672016-12-28 17:52:32 +0800160 struct inet_timewait_death_row *tcp_death_row = &sock_net(sk)->ipv4.tcp_death_row;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700161
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +0900162 if (addr_len < SIN6_LEN_RFC2133)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700163 return -EINVAL;
164
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +0900165 if (usin->sin6_family != AF_INET6)
Eric Dumazeta02cec22010-09-22 20:43:57 +0000166 return -EAFNOSUPPORT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700167
David S. Miller4c9483b2011-03-12 16:22:43 -0500168 memset(&fl6, 0, sizeof(fl6));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700169
170 if (np->sndflow) {
David S. Miller4c9483b2011-03-12 16:22:43 -0500171 fl6.flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
172 IP6_ECN_flow_init(fl6.flowlabel);
173 if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700174 struct ip6_flowlabel *flowlabel;
David S. Miller4c9483b2011-03-12 16:22:43 -0500175 flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
Ian Morris63159f22015-03-29 14:00:04 +0100176 if (!flowlabel)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700177 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700178 fl6_sock_release(flowlabel);
179 }
180 }
181
182 /*
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +0900183 * connect() to INADDR_ANY means loopback (BSD'ism).
184 */
185
Jonathan T. Leighton052d2362017-02-12 17:26:07 -0500186 if (ipv6_addr_any(&usin->sin6_addr)) {
187 if (ipv6_addr_v4mapped(&sk->sk_v6_rcv_saddr))
188 ipv6_addr_set_v4mapped(htonl(INADDR_LOOPBACK),
189 &usin->sin6_addr);
190 else
191 usin->sin6_addr = in6addr_loopback;
192 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700193
194 addr_type = ipv6_addr_type(&usin->sin6_addr);
195
Weilong Chen4c99aa42013-12-19 18:44:34 +0800196 if (addr_type & IPV6_ADDR_MULTICAST)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700197 return -ENETUNREACH;
198
199 if (addr_type&IPV6_ADDR_LINKLOCAL) {
200 if (addr_len >= sizeof(struct sockaddr_in6) &&
201 usin->sin6_scope_id) {
202 /* If interface is set while binding, indices
203 * must coincide.
204 */
David Ahern54dc3e32018-01-04 14:03:54 -0800205 if (!sk_dev_equal_l3scope(sk, usin->sin6_scope_id))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700206 return -EINVAL;
207
208 sk->sk_bound_dev_if = usin->sin6_scope_id;
209 }
210
211 /* Connect to link-local address requires an interface */
212 if (!sk->sk_bound_dev_if)
213 return -EINVAL;
214 }
215
216 if (tp->rx_opt.ts_recent_stamp &&
Eric Dumazetefe42082013-10-03 15:42:29 -0700217 !ipv6_addr_equal(&sk->sk_v6_daddr, &usin->sin6_addr)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700218 tp->rx_opt.ts_recent = 0;
219 tp->rx_opt.ts_recent_stamp = 0;
220 tp->write_seq = 0;
221 }
222
Eric Dumazetefe42082013-10-03 15:42:29 -0700223 sk->sk_v6_daddr = usin->sin6_addr;
David S. Miller4c9483b2011-03-12 16:22:43 -0500224 np->flow_label = fl6.flowlabel;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700225
226 /*
227 * TCP over IPv4
228 */
229
Jonathan T. Leighton052d2362017-02-12 17:26:07 -0500230 if (addr_type & IPV6_ADDR_MAPPED) {
Arnaldo Carvalho de Melod83d8462005-12-13 23:26:10 -0800231 u32 exthdrlen = icsk->icsk_ext_hdr_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700232 struct sockaddr_in sin;
233
Linus Torvalds1da177e2005-04-16 15:20:36 -0700234 if (__ipv6_only_sock(sk))
235 return -ENETUNREACH;
236
237 sin.sin_family = AF_INET;
238 sin.sin_port = usin->sin6_port;
239 sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
240
Arnaldo Carvalho de Melod83d8462005-12-13 23:26:10 -0800241 icsk->icsk_af_ops = &ipv6_mapped;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700242 sk->sk_backlog_rcv = tcp_v4_do_rcv;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800243#ifdef CONFIG_TCP_MD5SIG
244 tp->af_specific = &tcp_sock_ipv6_mapped_specific;
245#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700246
247 err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
248
249 if (err) {
Arnaldo Carvalho de Melod83d8462005-12-13 23:26:10 -0800250 icsk->icsk_ext_hdr_len = exthdrlen;
251 icsk->icsk_af_ops = &ipv6_specific;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700252 sk->sk_backlog_rcv = tcp_v6_do_rcv;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800253#ifdef CONFIG_TCP_MD5SIG
254 tp->af_specific = &tcp_sock_ipv6_specific;
255#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700256 goto failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700257 }
Eric Dumazetd1e559d2015-03-18 14:05:35 -0700258 np->saddr = sk->sk_v6_rcv_saddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700259
260 return err;
261 }
262
Eric Dumazetefe42082013-10-03 15:42:29 -0700263 if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr))
264 saddr = &sk->sk_v6_rcv_saddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700265
David S. Miller4c9483b2011-03-12 16:22:43 -0500266 fl6.flowi6_proto = IPPROTO_TCP;
Eric Dumazetefe42082013-10-03 15:42:29 -0700267 fl6.daddr = sk->sk_v6_daddr;
Alexey Dobriyan4e3fd7a2011-11-21 03:39:03 +0000268 fl6.saddr = saddr ? *saddr : np->saddr;
David S. Miller4c9483b2011-03-12 16:22:43 -0500269 fl6.flowi6_oif = sk->sk_bound_dev_if;
270 fl6.flowi6_mark = sk->sk_mark;
David S. Miller1958b852011-03-12 16:36:19 -0500271 fl6.fl6_dport = usin->sin6_port;
272 fl6.fl6_sport = inet->inet_sport;
Lorenzo Colittie2d118a2016-11-04 02:23:43 +0900273 fl6.flowi6_uid = sk->sk_uid;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700274
Hannes Frederic Sowa1e1d04e2016-04-05 17:10:15 +0200275 opt = rcu_dereference_protected(np->opt, lockdep_sock_is_held(sk));
Eric Dumazet45f6fad2015-11-29 19:37:57 -0800276 final_p = fl6_update_dst(&fl6, opt, &final);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700277
David S. Miller4c9483b2011-03-12 16:22:43 -0500278 security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
Venkat Yekkiralabeb8d132006-08-04 23:12:42 -0700279
Steffen Klassert0e0d44a2013-08-28 08:04:14 +0200280 dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
David S. Miller68d0c6d2011-03-01 13:19:07 -0800281 if (IS_ERR(dst)) {
282 err = PTR_ERR(dst);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700283 goto failure;
David S. Miller14e50e52007-05-24 18:17:54 -0700284 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700285
Ian Morris63159f22015-03-29 14:00:04 +0100286 if (!saddr) {
David S. Miller4c9483b2011-03-12 16:22:43 -0500287 saddr = &fl6.saddr;
Eric Dumazetefe42082013-10-03 15:42:29 -0700288 sk->sk_v6_rcv_saddr = *saddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700289 }
290
291 /* set the source address */
Alexey Dobriyan4e3fd7a2011-11-21 03:39:03 +0000292 np->saddr = *saddr;
Eric Dumazetc720c7e82009-10-15 06:30:45 +0000293 inet->inet_rcv_saddr = LOOPBACK4_IPV6;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700294
Herbert Xuf83ef8c2006-06-30 13:37:03 -0700295 sk->sk_gso_type = SKB_GSO_TCPV6;
Eric Dumazet6bd4f352015-12-02 21:53:57 -0800296 ip6_dst_store(sk, dst, NULL, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700297
Arnaldo Carvalho de Melod83d8462005-12-13 23:26:10 -0800298 icsk->icsk_ext_hdr_len = 0;
Eric Dumazet45f6fad2015-11-29 19:37:57 -0800299 if (opt)
300 icsk->icsk_ext_hdr_len = opt->opt_flen +
301 opt->opt_nflen;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700302
303 tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
304
Eric Dumazetc720c7e82009-10-15 06:30:45 +0000305 inet->inet_dport = usin->sin6_port;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700306
307 tcp_set_state(sk, TCP_SYN_SENT);
Haishuang Yan1946e672016-12-28 17:52:32 +0800308 err = inet6_hash_connect(tcp_death_row, sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700309 if (err)
310 goto late_failure;
311
Tom Herbert877d1f62015-07-28 16:02:05 -0700312 sk_set_txhash(sk);
Sathya Perla9e7ceb02014-10-22 21:42:01 +0530313
Alexey Kodanev00355fa2017-02-22 13:23:55 +0300314 if (likely(!tp->repair)) {
Alexey Kodanev00355fa2017-02-22 13:23:55 +0300315 if (!tp->write_seq)
Eric Dumazet84b114b2017-05-05 06:56:54 -0700316 tp->write_seq = secure_tcpv6_seq(np->saddr.s6_addr32,
317 sk->sk_v6_daddr.s6_addr32,
318 inet->inet_sport,
319 inet->inet_dport);
Eric Dumazet5d2ed052017-06-07 10:34:39 -0700320 tp->tsoffset = secure_tcpv6_ts_off(sock_net(sk),
321 np->saddr.s6_addr32,
Eric Dumazet84b114b2017-05-05 06:56:54 -0700322 sk->sk_v6_daddr.s6_addr32);
Alexey Kodanev00355fa2017-02-22 13:23:55 +0300323 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700324
Wei Wang19f6d3f32017-01-23 10:59:22 -0800325 if (tcp_fastopen_defer_connect(sk, &err))
326 return err;
327 if (err)
328 goto late_failure;
329
Linus Torvalds1da177e2005-04-16 15:20:36 -0700330 err = tcp_connect(sk);
331 if (err)
332 goto late_failure;
333
334 return 0;
335
336late_failure:
337 tcp_set_state(sk, TCP_CLOSE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700338failure:
Eric Dumazetc720c7e82009-10-15 06:30:45 +0000339 inet->inet_dport = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700340 sk->sk_route_caps = 0;
341 return err;
342}
343
Eric Dumazet563d34d2012-07-23 09:48:52 +0200344static void tcp_v6_mtu_reduced(struct sock *sk)
345{
346 struct dst_entry *dst;
347
348 if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
349 return;
350
351 dst = inet6_csk_update_pmtu(sk, tcp_sk(sk)->mtu_info);
352 if (!dst)
353 return;
354
355 if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
356 tcp_sync_mss(sk, dst_mtu(dst));
357 tcp_simple_retransmit(sk);
358 }
359}
360
Stefano Brivio32bbd872018-11-08 12:19:21 +0100361static int tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
Brian Haleyd5fdd6b2009-06-23 04:31:07 -0700362 u8 type, u8 code, int offset, __be32 info)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700363{
Weilong Chen4c99aa42013-12-19 18:44:34 +0800364 const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data;
Arnaldo Carvalho de Melo505cbfc2005-08-12 09:19:38 -0300365 const struct tcphdr *th = (struct tcphdr *)(skb->data+offset);
Eric Dumazet22150892015-03-22 10:22:23 -0700366 struct net *net = dev_net(skb->dev);
367 struct request_sock *fastopen;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700368 struct ipv6_pinfo *np;
Eric Dumazet22150892015-03-22 10:22:23 -0700369 struct tcp_sock *tp;
370 __u32 seq, snd_una;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700371 struct sock *sk;
Eric Dumazet9cf74902016-02-02 19:31:12 -0800372 bool fatal;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700373 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700374
Eric Dumazet22150892015-03-22 10:22:23 -0700375 sk = __inet6_lookup_established(net, &tcp_hashinfo,
376 &hdr->daddr, th->dest,
377 &hdr->saddr, ntohs(th->source),
David Ahern4297a0e2017-08-07 08:44:21 -0700378 skb->dev->ifindex, inet6_sdif(skb));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700379
Eric Dumazet22150892015-03-22 10:22:23 -0700380 if (!sk) {
Eric Dumazeta16292a2016-04-27 16:44:36 -0700381 __ICMP6_INC_STATS(net, __in6_dev_get(skb->dev),
382 ICMP6_MIB_INERRORS);
Stefano Brivio32bbd872018-11-08 12:19:21 +0100383 return -ENOENT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700384 }
385
386 if (sk->sk_state == TCP_TIME_WAIT) {
YOSHIFUJI Hideaki9469c7b2006-10-10 19:41:46 -0700387 inet_twsk_put(inet_twsk(sk));
Stefano Brivio32bbd872018-11-08 12:19:21 +0100388 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700389 }
Eric Dumazet22150892015-03-22 10:22:23 -0700390 seq = ntohl(th->seq);
Eric Dumazet9cf74902016-02-02 19:31:12 -0800391 fatal = icmpv6_err_convert(type, code, &err);
Stefano Brivio32bbd872018-11-08 12:19:21 +0100392 if (sk->sk_state == TCP_NEW_SYN_RECV) {
393 tcp_req_err(sk, seq, fatal);
394 return 0;
395 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700396
397 bh_lock_sock(sk);
Eric Dumazet563d34d2012-07-23 09:48:52 +0200398 if (sock_owned_by_user(sk) && type != ICMPV6_PKT_TOOBIG)
Eric Dumazet02a1d6e2016-04-27 16:44:39 -0700399 __NET_INC_STATS(net, LINUX_MIB_LOCKDROPPEDICMPS);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700400
401 if (sk->sk_state == TCP_CLOSE)
402 goto out;
403
Eric Dumazet93a77c12019-03-19 07:01:08 -0700404 if (ipv6_hdr(skb)->hop_limit < tcp_inet6_sk(sk)->min_hopcount) {
Eric Dumazet02a1d6e2016-04-27 16:44:39 -0700405 __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
Stephen Hemmingere802af92010-04-22 15:24:53 -0700406 goto out;
407 }
408
Linus Torvalds1da177e2005-04-16 15:20:36 -0700409 tp = tcp_sk(sk);
Yuchung Cheng0a672f742014-05-11 20:22:12 -0700410 /* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
411 fastopen = tp->fastopen_rsk;
412 snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700413 if (sk->sk_state != TCP_LISTEN &&
Yuchung Cheng0a672f742014-05-11 20:22:12 -0700414 !between(seq, snd_una, tp->snd_nxt)) {
Eric Dumazet02a1d6e2016-04-27 16:44:39 -0700415 __NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700416 goto out;
417 }
418
Eric Dumazet93a77c12019-03-19 07:01:08 -0700419 np = tcp_inet6_sk(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700420
David S. Millerec18d9a2012-07-12 00:25:15 -0700421 if (type == NDISC_REDIRECT) {
Jon Maxwell45caeaa2017-03-10 16:40:33 +1100422 if (!sock_owned_by_user(sk)) {
423 struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie);
David S. Millerec18d9a2012-07-12 00:25:15 -0700424
Jon Maxwell45caeaa2017-03-10 16:40:33 +1100425 if (dst)
426 dst->ops->redirect(dst, sk, skb);
427 }
Christoph Paasch50a75a82013-04-07 04:53:15 +0000428 goto out;
David S. Millerec18d9a2012-07-12 00:25:15 -0700429 }
430
Linus Torvalds1da177e2005-04-16 15:20:36 -0700431 if (type == ICMPV6_PKT_TOOBIG) {
Eric Dumazet0d4f0602013-03-18 07:01:28 +0000432 /* We are not interested in TCP_LISTEN and open_requests
433 * (SYN-ACKs send out by Linux are always <576bytes so
434 * they should go through unfragmented).
435 */
436 if (sk->sk_state == TCP_LISTEN)
437 goto out;
438
Hannes Frederic Sowa93b36cf2013-12-15 03:41:14 +0100439 if (!ip6_sk_accept_pmtu(sk))
440 goto out;
441
Eric Dumazet563d34d2012-07-23 09:48:52 +0200442 tp->mtu_info = ntohl(info);
443 if (!sock_owned_by_user(sk))
444 tcp_v6_mtu_reduced(sk);
Julian Anastasovd013ef2a2012-09-05 10:53:18 +0000445 else if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED,
Eric Dumazet7aa54702016-12-03 11:14:57 -0800446 &sk->sk_tsq_flags))
Julian Anastasovd013ef2a2012-09-05 10:53:18 +0000447 sock_hold(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700448 goto out;
449 }
450
Linus Torvalds1da177e2005-04-16 15:20:36 -0700451
Arnaldo Carvalho de Melo60236fd2005-06-18 22:47:21 -0700452 /* Might be for an request_sock */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700453 switch (sk->sk_state) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700454 case TCP_SYN_SENT:
Yuchung Cheng0a672f742014-05-11 20:22:12 -0700455 case TCP_SYN_RECV:
456 /* Only in fast or simultaneous open. If a fast open socket is
457 * is already accepted it is treated as a connected one below.
458 */
Ian Morris63159f22015-03-29 14:00:04 +0100459 if (fastopen && !fastopen->sk)
Yuchung Cheng0a672f742014-05-11 20:22:12 -0700460 break;
461
Linus Torvalds1da177e2005-04-16 15:20:36 -0700462 if (!sock_owned_by_user(sk)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700463 sk->sk_err = err;
464 sk->sk_error_report(sk); /* Wake people up to see the error (see connect in sock.c) */
465
466 tcp_done(sk);
467 } else
468 sk->sk_err_soft = err;
469 goto out;
470 }
471
472 if (!sock_owned_by_user(sk) && np->recverr) {
473 sk->sk_err = err;
474 sk->sk_error_report(sk);
475 } else
476 sk->sk_err_soft = err;
477
478out:
479 bh_unlock_sock(sk);
480 sock_put(sk);
Stefano Brivio32bbd872018-11-08 12:19:21 +0100481 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700482}
483
484
Eric Dumazet0f935db2015-09-25 07:39:21 -0700485static int tcp_v6_send_synack(const struct sock *sk, struct dst_entry *dst,
Octavian Purdilad6274bd2014-06-25 17:09:58 +0300486 struct flowi *fl,
Neal Cardwell3840a062012-06-28 12:34:19 +0000487 struct request_sock *req,
Eric Dumazetca6fb062015-10-02 11:43:35 -0700488 struct tcp_fastopen_cookie *foc,
Eric Dumazetb3d05142016-04-13 22:05:39 -0700489 enum tcp_synack_type synack_type)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700490{
Eric Dumazet634fb9792013-10-09 15:21:29 -0700491 struct inet_request_sock *ireq = inet_rsk(req);
Eric Dumazet93a77c12019-03-19 07:01:08 -0700492 struct ipv6_pinfo *np = tcp_inet6_sk(sk);
Huw Davies56ac42b2016-06-27 15:05:28 -0400493 struct ipv6_txoptions *opt;
Octavian Purdilad6274bd2014-06-25 17:09:58 +0300494 struct flowi6 *fl6 = &fl->u.ip6;
Weilong Chen4c99aa42013-12-19 18:44:34 +0800495 struct sk_buff *skb;
Neal Cardwell94942182012-06-28 12:34:20 +0000496 int err = -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700497
Neal Cardwell9f10d3f2012-06-28 12:34:21 +0000498 /* First, grab a route. */
Eric Dumazetf76b33c2015-09-29 07:42:42 -0700499 if (!dst && (dst = inet6_csk_route_req(sk, fl6, req,
500 IPPROTO_TCP)) == NULL)
Denis V. Lunevfd80eb92008-02-29 11:43:03 -0800501 goto done;
Neal Cardwell94942182012-06-28 12:34:20 +0000502
Eric Dumazetb3d05142016-04-13 22:05:39 -0700503 skb = tcp_make_synack(sk, dst, req, foc, synack_type);
Neal Cardwell94942182012-06-28 12:34:20 +0000504
Linus Torvalds1da177e2005-04-16 15:20:36 -0700505 if (skb) {
Eric Dumazet634fb9792013-10-09 15:21:29 -0700506 __tcp_v6_send_check(skb, &ireq->ir_v6_loc_addr,
507 &ireq->ir_v6_rmt_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700508
Eric Dumazet634fb9792013-10-09 15:21:29 -0700509 fl6->daddr = ireq->ir_v6_rmt_addr;
Ian Morris53b24b82015-03-29 14:00:05 +0100510 if (np->repflow && ireq->pktopts)
Florent Fourcotdf3687f2014-01-17 17:15:03 +0100511 fl6->flowlabel = ip6_flowlabel(ipv6_hdr(ireq->pktopts));
512
Eric Dumazet3e4006f2016-01-08 09:35:51 -0800513 rcu_read_lock();
Huw Davies56ac42b2016-06-27 15:05:28 -0400514 opt = ireq->ipv6_opt;
515 if (!opt)
516 opt = rcu_dereference(np->opt);
Pablo Neira92e55f42017-01-26 22:56:21 +0100517 err = ip6_xmit(sk, skb, fl6, sk->sk_mark, opt, np->tclass);
Eric Dumazet3e4006f2016-01-08 09:35:51 -0800518 rcu_read_unlock();
Gerrit Renkerb9df3cb2006-11-14 11:21:36 -0200519 err = net_xmit_eval(err);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700520 }
521
522done:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700523 return err;
524}
525
Octavian Purdila72659ec2010-01-17 19:09:39 -0800526
Arnaldo Carvalho de Melo60236fd2005-06-18 22:47:21 -0700527static void tcp_v6_reqsk_destructor(struct request_sock *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700528{
Huw Davies56ac42b2016-06-27 15:05:28 -0400529 kfree(inet_rsk(req)->ipv6_opt);
Eric Dumazet634fb9792013-10-09 15:21:29 -0700530 kfree_skb(inet_rsk(req)->pktopts);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700531}
532
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800533#ifdef CONFIG_TCP_MD5SIG
Eric Dumazetb83e3de2015-09-25 07:39:15 -0700534static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(const struct sock *sk,
Eric Dumazetb71d1d42011-04-22 04:53:02 +0000535 const struct in6_addr *addr)
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800536{
Eric Dumazeta915da9b2012-01-31 05:18:33 +0000537 return tcp_md5_do_lookup(sk, (union tcp_md5_addr *)addr, AF_INET6);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800538}
539
Eric Dumazetb83e3de2015-09-25 07:39:15 -0700540static struct tcp_md5sig_key *tcp_v6_md5_lookup(const struct sock *sk,
Eric Dumazetfd3a1542015-03-24 15:58:56 -0700541 const struct sock *addr_sk)
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800542{
Eric Dumazetefe42082013-10-03 15:42:29 -0700543 return tcp_v6_md5_do_lookup(sk, &addr_sk->sk_v6_daddr);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800544}
545
Ivan Delalande8917a772017-06-15 18:07:07 -0700546static int tcp_v6_parse_md5_keys(struct sock *sk, int optname,
547 char __user *optval, int optlen)
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800548{
549 struct tcp_md5sig cmd;
550 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&cmd.tcpm_addr;
Ivan Delalande8917a772017-06-15 18:07:07 -0700551 u8 prefixlen;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800552
553 if (optlen < sizeof(cmd))
554 return -EINVAL;
555
556 if (copy_from_user(&cmd, optval, sizeof(cmd)))
557 return -EFAULT;
558
559 if (sin6->sin6_family != AF_INET6)
560 return -EINVAL;
561
Ivan Delalande8917a772017-06-15 18:07:07 -0700562 if (optname == TCP_MD5SIG_EXT &&
563 cmd.tcpm_flags & TCP_MD5SIG_FLAG_PREFIX) {
564 prefixlen = cmd.tcpm_prefixlen;
565 if (prefixlen > 128 || (ipv6_addr_v4mapped(&sin6->sin6_addr) &&
566 prefixlen > 32))
567 return -EINVAL;
568 } else {
569 prefixlen = ipv6_addr_v4mapped(&sin6->sin6_addr) ? 32 : 128;
570 }
571
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800572 if (!cmd.tcpm_keylen) {
Brian Haleye773e4f2007-08-24 23:16:08 -0700573 if (ipv6_addr_v4mapped(&sin6->sin6_addr))
Eric Dumazeta915da9b2012-01-31 05:18:33 +0000574 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
Ivan Delalande8917a772017-06-15 18:07:07 -0700575 AF_INET, prefixlen);
Eric Dumazeta915da9b2012-01-31 05:18:33 +0000576 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
Ivan Delalande8917a772017-06-15 18:07:07 -0700577 AF_INET6, prefixlen);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800578 }
579
580 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
581 return -EINVAL;
582
Eric Dumazeta915da9b2012-01-31 05:18:33 +0000583 if (ipv6_addr_v4mapped(&sin6->sin6_addr))
584 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
Ivan Delalande8917a772017-06-15 18:07:07 -0700585 AF_INET, prefixlen, cmd.tcpm_key,
Ivan Delalande67973182017-06-15 18:07:06 -0700586 cmd.tcpm_keylen, GFP_KERNEL);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800587
Eric Dumazeta915da9b2012-01-31 05:18:33 +0000588 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
Ivan Delalande8917a772017-06-15 18:07:07 -0700589 AF_INET6, prefixlen, cmd.tcpm_key,
590 cmd.tcpm_keylen, GFP_KERNEL);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800591}
592
Eric Dumazet19689e32016-06-27 18:51:53 +0200593static int tcp_v6_md5_hash_headers(struct tcp_md5sig_pool *hp,
594 const struct in6_addr *daddr,
595 const struct in6_addr *saddr,
596 const struct tcphdr *th, int nbytes)
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800597{
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800598 struct tcp6_pseudohdr *bp;
Adam Langley49a72df2008-07-19 00:01:42 -0700599 struct scatterlist sg;
Eric Dumazet19689e32016-06-27 18:51:53 +0200600 struct tcphdr *_th;
YOSHIFUJI Hideaki8d26d762008-04-17 13:19:16 +0900601
Eric Dumazet19689e32016-06-27 18:51:53 +0200602 bp = hp->scratch;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800603 /* 1. TCP pseudo-header (RFC2460) */
Alexey Dobriyan4e3fd7a2011-11-21 03:39:03 +0000604 bp->saddr = *saddr;
605 bp->daddr = *daddr;
Adam Langley49a72df2008-07-19 00:01:42 -0700606 bp->protocol = cpu_to_be32(IPPROTO_TCP);
Adam Langley00b13042008-07-31 21:36:07 -0700607 bp->len = cpu_to_be32(nbytes);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800608
Eric Dumazet19689e32016-06-27 18:51:53 +0200609 _th = (struct tcphdr *)(bp + 1);
610 memcpy(_th, th, sizeof(*th));
611 _th->check = 0;
612
613 sg_init_one(&sg, bp, sizeof(*bp) + sizeof(*th));
614 ahash_request_set_crypt(hp->md5_req, &sg, NULL,
615 sizeof(*bp) + sizeof(*th));
Herbert Xucf80e0e2016-01-24 21:20:23 +0800616 return crypto_ahash_update(hp->md5_req);
Adam Langley49a72df2008-07-19 00:01:42 -0700617}
David S. Millerc7da57a2007-10-26 00:41:21 -0700618
Eric Dumazet19689e32016-06-27 18:51:53 +0200619static int tcp_v6_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
Eric Dumazetb71d1d42011-04-22 04:53:02 +0000620 const struct in6_addr *daddr, struct in6_addr *saddr,
Eric Dumazet318cf7a2011-10-24 02:46:04 -0400621 const struct tcphdr *th)
Adam Langley49a72df2008-07-19 00:01:42 -0700622{
623 struct tcp_md5sig_pool *hp;
Herbert Xucf80e0e2016-01-24 21:20:23 +0800624 struct ahash_request *req;
Adam Langley49a72df2008-07-19 00:01:42 -0700625
626 hp = tcp_get_md5sig_pool();
627 if (!hp)
628 goto clear_hash_noput;
Herbert Xucf80e0e2016-01-24 21:20:23 +0800629 req = hp->md5_req;
Adam Langley49a72df2008-07-19 00:01:42 -0700630
Herbert Xucf80e0e2016-01-24 21:20:23 +0800631 if (crypto_ahash_init(req))
Adam Langley49a72df2008-07-19 00:01:42 -0700632 goto clear_hash;
Eric Dumazet19689e32016-06-27 18:51:53 +0200633 if (tcp_v6_md5_hash_headers(hp, daddr, saddr, th, th->doff << 2))
Adam Langley49a72df2008-07-19 00:01:42 -0700634 goto clear_hash;
635 if (tcp_md5_hash_key(hp, key))
636 goto clear_hash;
Herbert Xucf80e0e2016-01-24 21:20:23 +0800637 ahash_request_set_crypt(req, NULL, md5_hash, 0);
638 if (crypto_ahash_final(req))
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800639 goto clear_hash;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800640
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800641 tcp_put_md5sig_pool();
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800642 return 0;
Adam Langley49a72df2008-07-19 00:01:42 -0700643
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800644clear_hash:
645 tcp_put_md5sig_pool();
646clear_hash_noput:
647 memset(md5_hash, 0, 16);
Adam Langley49a72df2008-07-19 00:01:42 -0700648 return 1;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800649}
650
Eric Dumazet39f8e582015-03-24 15:58:55 -0700651static int tcp_v6_md5_hash_skb(char *md5_hash,
652 const struct tcp_md5sig_key *key,
Eric Dumazet318cf7a2011-10-24 02:46:04 -0400653 const struct sock *sk,
Eric Dumazet318cf7a2011-10-24 02:46:04 -0400654 const struct sk_buff *skb)
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800655{
Eric Dumazetb71d1d42011-04-22 04:53:02 +0000656 const struct in6_addr *saddr, *daddr;
Adam Langley49a72df2008-07-19 00:01:42 -0700657 struct tcp_md5sig_pool *hp;
Herbert Xucf80e0e2016-01-24 21:20:23 +0800658 struct ahash_request *req;
Eric Dumazet318cf7a2011-10-24 02:46:04 -0400659 const struct tcphdr *th = tcp_hdr(skb);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800660
Eric Dumazet39f8e582015-03-24 15:58:55 -0700661 if (sk) { /* valid for establish/request sockets */
662 saddr = &sk->sk_v6_rcv_saddr;
Eric Dumazetefe42082013-10-03 15:42:29 -0700663 daddr = &sk->sk_v6_daddr;
Adam Langley49a72df2008-07-19 00:01:42 -0700664 } else {
Eric Dumazetb71d1d42011-04-22 04:53:02 +0000665 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
Adam Langley49a72df2008-07-19 00:01:42 -0700666 saddr = &ip6h->saddr;
667 daddr = &ip6h->daddr;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800668 }
Adam Langley49a72df2008-07-19 00:01:42 -0700669
670 hp = tcp_get_md5sig_pool();
671 if (!hp)
672 goto clear_hash_noput;
Herbert Xucf80e0e2016-01-24 21:20:23 +0800673 req = hp->md5_req;
Adam Langley49a72df2008-07-19 00:01:42 -0700674
Herbert Xucf80e0e2016-01-24 21:20:23 +0800675 if (crypto_ahash_init(req))
Adam Langley49a72df2008-07-19 00:01:42 -0700676 goto clear_hash;
677
Eric Dumazet19689e32016-06-27 18:51:53 +0200678 if (tcp_v6_md5_hash_headers(hp, daddr, saddr, th, skb->len))
Adam Langley49a72df2008-07-19 00:01:42 -0700679 goto clear_hash;
680 if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
681 goto clear_hash;
682 if (tcp_md5_hash_key(hp, key))
683 goto clear_hash;
Herbert Xucf80e0e2016-01-24 21:20:23 +0800684 ahash_request_set_crypt(req, NULL, md5_hash, 0);
685 if (crypto_ahash_final(req))
Adam Langley49a72df2008-07-19 00:01:42 -0700686 goto clear_hash;
687
688 tcp_put_md5sig_pool();
689 return 0;
690
691clear_hash:
692 tcp_put_md5sig_pool();
693clear_hash_noput:
694 memset(md5_hash, 0, 16);
695 return 1;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800696}
697
Eric Dumazetba8e2752015-10-02 11:43:28 -0700698#endif
699
700static bool tcp_v6_inbound_md5_hash(const struct sock *sk,
701 const struct sk_buff *skb)
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800702{
Eric Dumazetba8e2752015-10-02 11:43:28 -0700703#ifdef CONFIG_TCP_MD5SIG
Eric Dumazetcf533ea2011-10-21 05:22:42 -0400704 const __u8 *hash_location = NULL;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800705 struct tcp_md5sig_key *hash_expected;
Eric Dumazetb71d1d42011-04-22 04:53:02 +0000706 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
Eric Dumazet318cf7a2011-10-24 02:46:04 -0400707 const struct tcphdr *th = tcp_hdr(skb);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800708 int genhash;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800709 u8 newhash[16];
710
711 hash_expected = tcp_v6_md5_do_lookup(sk, &ip6h->saddr);
YOSHIFUJI Hideaki7d5d5522008-04-17 12:29:53 +0900712 hash_location = tcp_parse_md5sig_option(th);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800713
David S. Miller785957d2008-07-30 03:03:15 -0700714 /* We've parsed the options - do we have a hash? */
715 if (!hash_expected && !hash_location)
Eric Dumazetff74e232015-03-24 15:58:54 -0700716 return false;
David S. Miller785957d2008-07-30 03:03:15 -0700717
718 if (hash_expected && !hash_location) {
Eric Dumazetc10d9312016-04-29 14:16:47 -0700719 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
Eric Dumazetff74e232015-03-24 15:58:54 -0700720 return true;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800721 }
722
David S. Miller785957d2008-07-30 03:03:15 -0700723 if (!hash_expected && hash_location) {
Eric Dumazetc10d9312016-04-29 14:16:47 -0700724 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
Eric Dumazetff74e232015-03-24 15:58:54 -0700725 return true;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800726 }
727
728 /* check the signature */
Adam Langley49a72df2008-07-19 00:01:42 -0700729 genhash = tcp_v6_md5_hash_skb(newhash,
730 hash_expected,
Eric Dumazet39f8e582015-03-24 15:58:55 -0700731 NULL, skb);
Adam Langley49a72df2008-07-19 00:01:42 -0700732
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800733 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
Eric Dumazet72145a62016-08-24 09:01:23 -0700734 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5FAILURE);
Joe Perchese87cc472012-05-13 21:56:26 +0000735 net_info_ratelimited("MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u\n",
736 genhash ? "failed" : "mismatch",
737 &ip6h->saddr, ntohs(th->source),
738 &ip6h->daddr, ntohs(th->dest));
Eric Dumazetff74e232015-03-24 15:58:54 -0700739 return true;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800740 }
Eric Dumazetba8e2752015-10-02 11:43:28 -0700741#endif
Eric Dumazetff74e232015-03-24 15:58:54 -0700742 return false;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800743}
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800744
Eric Dumazetb40cf182015-09-25 07:39:08 -0700745static void tcp_v6_init_req(struct request_sock *req,
746 const struct sock *sk_listener,
Octavian Purdila16bea702014-06-25 17:09:53 +0300747 struct sk_buff *skb)
748{
David Ahernc2027d12018-12-12 15:27:38 -0800749 bool l3_slave = ipv6_l3mdev_skb(TCP_SKB_CB(skb)->header.h6.flags);
Octavian Purdila16bea702014-06-25 17:09:53 +0300750 struct inet_request_sock *ireq = inet_rsk(req);
Eric Dumazet93a77c12019-03-19 07:01:08 -0700751 const struct ipv6_pinfo *np = tcp_inet6_sk(sk_listener);
Octavian Purdila16bea702014-06-25 17:09:53 +0300752
753 ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr;
754 ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr;
755
Octavian Purdila16bea702014-06-25 17:09:53 +0300756 /* So that link locals have meaning */
David Ahernc2027d12018-12-12 15:27:38 -0800757 if ((!sk_listener->sk_bound_dev_if || l3_slave) &&
Octavian Purdila16bea702014-06-25 17:09:53 +0300758 ipv6_addr_type(&ireq->ir_v6_rmt_addr) & IPV6_ADDR_LINKLOCAL)
Eric Dumazet870c3152014-10-17 09:17:20 -0700759 ireq->ir_iif = tcp_v6_iif(skb);
Octavian Purdila16bea702014-06-25 17:09:53 +0300760
Eric Dumazet04317da2014-09-05 15:33:32 -0700761 if (!TCP_SKB_CB(skb)->tcp_tw_isn &&
Eric Dumazetb40cf182015-09-25 07:39:08 -0700762 (ipv6_opt_accepted(sk_listener, skb, &TCP_SKB_CB(skb)->header.h6) ||
Eric Dumazeta2247722014-09-27 09:50:56 -0700763 np->rxopt.bits.rxinfo ||
Octavian Purdila16bea702014-06-25 17:09:53 +0300764 np->rxopt.bits.rxoinfo || np->rxopt.bits.rxhlim ||
765 np->rxopt.bits.rxohlim || np->repflow)) {
Reshetova, Elena63354792017-06-30 13:07:58 +0300766 refcount_inc(&skb->users);
Octavian Purdila16bea702014-06-25 17:09:53 +0300767 ireq->pktopts = skb;
768 }
769}
770
Eric Dumazetf9646292015-09-29 07:42:50 -0700771static struct dst_entry *tcp_v6_route_req(const struct sock *sk,
772 struct flowi *fl,
Soheil Hassas Yeganeh4396e462017-03-15 16:30:46 -0400773 const struct request_sock *req)
Octavian Purdilad94e0412014-06-25 17:09:55 +0300774{
Eric Dumazetf76b33c2015-09-29 07:42:42 -0700775 return inet6_csk_route_req(sk, &fl->u.ip6, req, IPPROTO_TCP);
Octavian Purdilad94e0412014-06-25 17:09:55 +0300776}
777
Glenn Griffinc6aefaf2008-02-07 21:49:26 -0800778struct request_sock_ops tcp6_request_sock_ops __read_mostly = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700779 .family = AF_INET6,
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -0700780 .obj_size = sizeof(struct tcp6_request_sock),
Octavian Purdila5db92c92014-06-25 17:09:59 +0300781 .rtx_syn_ack = tcp_rtx_synack,
Arnaldo Carvalho de Melo60236fd2005-06-18 22:47:21 -0700782 .send_ack = tcp_v6_reqsk_send_ack,
783 .destructor = tcp_v6_reqsk_destructor,
Octavian Purdila72659ec2010-01-17 19:09:39 -0800784 .send_reset = tcp_v6_send_reset,
Wang Yufen4aa956d2014-03-29 09:27:29 +0800785 .syn_ack_timeout = tcp_syn_ack_timeout,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700786};
787
Stephen Hemmingerb2e4b3de2009-09-01 19:25:03 +0000788static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
Octavian Purdila2aec4a22014-06-25 17:10:00 +0300789 .mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) -
790 sizeof(struct ipv6hdr),
Octavian Purdila16bea702014-06-25 17:09:53 +0300791#ifdef CONFIG_TCP_MD5SIG
Eric Dumazetfd3a1542015-03-24 15:58:56 -0700792 .req_md5_lookup = tcp_v6_md5_lookup,
John Dykstrae3afe7b2009-07-16 05:04:51 +0000793 .calc_md5_hash = tcp_v6_md5_hash_skb,
Andrew Mortonb6332e62006-11-30 19:16:28 -0800794#endif
Octavian Purdila16bea702014-06-25 17:09:53 +0300795 .init_req = tcp_v6_init_req,
Octavian Purdilafb7b37a2014-06-25 17:09:54 +0300796#ifdef CONFIG_SYN_COOKIES
797 .cookie_init_seq = cookie_v6_init_sequence,
798#endif
Octavian Purdilad94e0412014-06-25 17:09:55 +0300799 .route_req = tcp_v6_route_req,
Eric Dumazet84b114b2017-05-05 06:56:54 -0700800 .init_seq = tcp_v6_init_seq,
801 .init_ts_off = tcp_v6_init_ts_off,
Octavian Purdilad6274bd2014-06-25 17:09:58 +0300802 .send_synack = tcp_v6_send_synack,
Octavian Purdila16bea702014-06-25 17:09:53 +0300803};
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800804
Eric Dumazeta00e7442015-09-29 07:42:39 -0700805static void tcp_v6_send_response(const struct sock *sk, struct sk_buff *skb, u32 seq,
Eric Dumazet0f85fea2014-12-09 09:56:08 -0800806 u32 ack, u32 win, u32 tsval, u32 tsecr,
807 int oif, struct tcp_md5sig_key *key, int rst,
Hannes Frederic Sowa5119bd12016-06-11 20:41:38 +0200808 u8 tclass, __be32 label)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700809{
Eric Dumazetcf533ea2011-10-21 05:22:42 -0400810 const struct tcphdr *th = tcp_hdr(skb);
811 struct tcphdr *t1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700812 struct sk_buff *buff;
David S. Miller4c9483b2011-03-12 16:22:43 -0500813 struct flowi6 fl6;
Eric Dumazet0f85fea2014-12-09 09:56:08 -0800814 struct net *net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
Daniel Lezcanoe5047992008-03-07 11:16:26 -0800815 struct sock *ctl_sk = net->ipv6.tcp_sk;
YOSHIFUJI Hideaki9cb5734e2008-01-12 02:16:03 -0800816 unsigned int tot_len = sizeof(struct tcphdr);
Eric Dumazetadf30902009-06-02 05:19:30 +0000817 struct dst_entry *dst;
Al Viroe69a4adc2006-11-14 20:56:00 -0800818 __be32 *topt;
Jon Maxwell00483692018-05-10 16:53:51 +1000819 __u32 mark = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700820
Andrey Vaginee684b62013-02-11 05:50:19 +0000821 if (tsecr)
YOSHIFUJI Hideaki4244f8a2006-10-10 19:40:50 -0700822 tot_len += TCPOLEN_TSTAMP_ALIGNED;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800823#ifdef CONFIG_TCP_MD5SIG
824 if (key)
825 tot_len += TCPOLEN_MD5SIG_ALIGNED;
826#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700827
828 buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
829 GFP_ATOMIC);
Ian Morris63159f22015-03-29 14:00:04 +0100830 if (!buff)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700831 return;
832
833 skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len);
834
Johannes Bergd58ff352017-06-16 14:29:23 +0200835 t1 = skb_push(buff, tot_len);
Herbert Xu6651ffc2010-04-21 00:47:15 -0700836 skb_reset_transport_header(buff);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700837
838 /* Swap the send and the receive. */
839 memset(t1, 0, sizeof(*t1));
840 t1->dest = th->source;
841 t1->source = th->dest;
Ilpo Järvinen77c676d2008-10-09 14:41:38 -0700842 t1->doff = tot_len / 4;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700843 t1->seq = htonl(seq);
844 t1->ack_seq = htonl(ack);
Ilpo Järvinen626e2642008-10-09 14:42:40 -0700845 t1->ack = !rst || !th->ack;
846 t1->rst = rst;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700847 t1->window = htons(win);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800848
Al Viroe69a4adc2006-11-14 20:56:00 -0800849 topt = (__be32 *)(t1 + 1);
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +0900850
Andrey Vaginee684b62013-02-11 05:50:19 +0000851 if (tsecr) {
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800852 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
853 (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
Andrey Vaginee684b62013-02-11 05:50:19 +0000854 *topt++ = htonl(tsval);
855 *topt++ = htonl(tsecr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700856 }
857
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800858#ifdef CONFIG_TCP_MD5SIG
859 if (key) {
860 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
861 (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);
Adam Langley49a72df2008-07-19 00:01:42 -0700862 tcp_v6_md5_hash_hdr((__u8 *)topt, key,
Adam Langley90b7e112008-07-31 20:49:48 -0700863 &ipv6_hdr(skb)->saddr,
864 &ipv6_hdr(skb)->daddr, t1);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800865 }
866#endif
867
David S. Miller4c9483b2011-03-12 16:22:43 -0500868 memset(&fl6, 0, sizeof(fl6));
Alexey Dobriyan4e3fd7a2011-11-21 03:39:03 +0000869 fl6.daddr = ipv6_hdr(skb)->saddr;
870 fl6.saddr = ipv6_hdr(skb)->daddr;
Florent Fourcot1d13a962014-01-16 17:21:22 +0100871 fl6.flowlabel = label;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700872
David S. Millere5700af2010-04-21 14:59:20 -0700873 buff->ip_summed = CHECKSUM_PARTIAL;
874 buff->csum = 0;
875
David S. Miller4c9483b2011-03-12 16:22:43 -0500876 __tcp_v6_send_check(buff, &fl6.saddr, &fl6.daddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700877
David S. Miller4c9483b2011-03-12 16:22:43 -0500878 fl6.flowi6_proto = IPPROTO_TCP;
Lorenzo Colittia36dbdb2014-04-11 13:19:12 +0900879 if (rt6_need_strict(&fl6.daddr) && !oif)
Eric Dumazet870c3152014-10-17 09:17:20 -0700880 fl6.flowi6_oif = tcp_v6_iif(skb);
David Ahern9b6c14d2016-11-09 09:07:26 -0800881 else {
882 if (!oif && netif_index_is_l3_master(net, skb->skb_iif))
883 oif = skb->skb_iif;
884
885 fl6.flowi6_oif = oif;
886 }
David Ahern1d2f7b22016-05-04 21:26:08 -0700887
Jon Maxwell00483692018-05-10 16:53:51 +1000888 if (sk)
889 mark = (sk->sk_state == TCP_TIME_WAIT) ?
890 inet_twsk(sk)->tw_mark : sk->sk_mark;
891 fl6.flowi6_mark = IP6_REPLY_MARK(net, skb->mark) ?: mark;
David S. Miller1958b852011-03-12 16:36:19 -0500892 fl6.fl6_dport = t1->dest;
893 fl6.fl6_sport = t1->source;
Lorenzo Colittie2d118a2016-11-04 02:23:43 +0900894 fl6.flowi6_uid = sock_net_uid(net, sk && sk_fullsock(sk) ? sk : NULL);
David S. Miller4c9483b2011-03-12 16:22:43 -0500895 security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700896
Ilpo Järvinen626e2642008-10-09 14:42:40 -0700897 /* Pass a socket to ip6_dst_lookup either it is for RST
898 * Underlying function will use this to retrieve the network
899 * namespace
900 */
Steffen Klassert0e0d44a2013-08-28 08:04:14 +0200901 dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL);
David S. Miller68d0c6d2011-03-01 13:19:07 -0800902 if (!IS_ERR(dst)) {
903 skb_dst_set(buff, dst);
Pablo Neira92e55f42017-01-26 22:56:21 +0100904 ip6_xmit(ctl_sk, buff, &fl6, fl6.flowi6_mark, NULL, tclass);
Eric Dumazetc10d9312016-04-29 14:16:47 -0700905 TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
David S. Miller68d0c6d2011-03-01 13:19:07 -0800906 if (rst)
Eric Dumazetc10d9312016-04-29 14:16:47 -0700907 TCP_INC_STATS(net, TCP_MIB_OUTRSTS);
David S. Miller68d0c6d2011-03-01 13:19:07 -0800908 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700909 }
910
911 kfree_skb(buff);
912}
913
Eric Dumazeta00e7442015-09-29 07:42:39 -0700914static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb)
Ilpo Järvinen626e2642008-10-09 14:42:40 -0700915{
Eric Dumazetcf533ea2011-10-21 05:22:42 -0400916 const struct tcphdr *th = tcp_hdr(skb);
Ilpo Järvinen626e2642008-10-09 14:42:40 -0700917 u32 seq = 0, ack_seq = 0;
Guo-Fu Tsengfa3e5b42008-10-09 21:11:56 -0700918 struct tcp_md5sig_key *key = NULL;
Shawn Lu658ddaa2012-01-31 22:35:48 +0000919#ifdef CONFIG_TCP_MD5SIG
920 const __u8 *hash_location = NULL;
921 struct ipv6hdr *ipv6h = ipv6_hdr(skb);
922 unsigned char newhash[16];
923 int genhash;
924 struct sock *sk1 = NULL;
925#endif
Song Liuc24b14c2017-10-23 09:20:24 -0700926 int oif = 0;
Ilpo Järvinen626e2642008-10-09 14:42:40 -0700927
928 if (th->rst)
929 return;
930
Eric Dumazetc3658e82014-11-25 07:40:04 -0800931 /* If sk not NULL, it means we did a successful lookup and incoming
932 * route had to be correct. prequeue might have dropped our dst.
933 */
934 if (!sk && !ipv6_unicast_destination(skb))
Ilpo Järvinen626e2642008-10-09 14:42:40 -0700935 return;
936
937#ifdef CONFIG_TCP_MD5SIG
Eric Dumazet3b24d852016-04-01 08:52:17 -0700938 rcu_read_lock();
Shawn Lu658ddaa2012-01-31 22:35:48 +0000939 hash_location = tcp_parse_md5sig_option(th);
Florian Westphal271c3b92015-12-21 21:29:26 +0100940 if (sk && sk_fullsock(sk)) {
Florian Westphale46787f2015-12-21 21:29:25 +0100941 key = tcp_v6_md5_do_lookup(sk, &ipv6h->saddr);
942 } else if (hash_location) {
Shawn Lu658ddaa2012-01-31 22:35:48 +0000943 /*
944 * active side is lost. Try to find listening socket through
945 * source port, and then find md5 key through listening socket.
946 * we are not loose security here:
947 * Incoming packet is checked with md5 hash with finding key,
948 * no RST generated if md5 hash doesn't match.
949 */
950 sk1 = inet6_lookup_listener(dev_net(skb_dst(skb)->dev),
Craig Galleka5836362016-02-10 11:50:38 -0500951 &tcp_hashinfo, NULL, 0,
952 &ipv6h->saddr,
Tom Herbert5ba24952013-01-22 09:50:39 +0000953 th->source, &ipv6h->daddr,
David Ahern24b711e2018-07-19 12:41:18 -0700954 ntohs(th->source),
955 tcp_v6_iif_l3_slave(skb),
David Ahern4297a0e2017-08-07 08:44:21 -0700956 tcp_v6_sdif(skb));
Shawn Lu658ddaa2012-01-31 22:35:48 +0000957 if (!sk1)
Eric Dumazet3b24d852016-04-01 08:52:17 -0700958 goto out;
Shawn Lu658ddaa2012-01-31 22:35:48 +0000959
Shawn Lu658ddaa2012-01-31 22:35:48 +0000960 key = tcp_v6_md5_do_lookup(sk1, &ipv6h->saddr);
961 if (!key)
Eric Dumazet3b24d852016-04-01 08:52:17 -0700962 goto out;
Shawn Lu658ddaa2012-01-31 22:35:48 +0000963
Eric Dumazet39f8e582015-03-24 15:58:55 -0700964 genhash = tcp_v6_md5_hash_skb(newhash, key, NULL, skb);
Shawn Lu658ddaa2012-01-31 22:35:48 +0000965 if (genhash || memcmp(hash_location, newhash, 16) != 0)
Eric Dumazet3b24d852016-04-01 08:52:17 -0700966 goto out;
Shawn Lu658ddaa2012-01-31 22:35:48 +0000967 }
Ilpo Järvinen626e2642008-10-09 14:42:40 -0700968#endif
969
970 if (th->ack)
971 seq = ntohl(th->ack_seq);
972 else
973 ack_seq = ntohl(th->seq) + th->syn + th->fin + skb->len -
974 (th->doff << 2);
975
Song Liuc24b14c2017-10-23 09:20:24 -0700976 if (sk) {
977 oif = sk->sk_bound_dev_if;
Song Liu5c487bb2018-02-06 20:50:23 -0800978 if (sk_fullsock(sk))
979 trace_tcp_send_reset(sk, skb);
Song Liuc24b14c2017-10-23 09:20:24 -0700980 }
981
Eric Dumazet0f85fea2014-12-09 09:56:08 -0800982 tcp_v6_send_response(sk, skb, seq, ack_seq, 0, 0, 0, oif, key, 1, 0, 0);
Shawn Lu658ddaa2012-01-31 22:35:48 +0000983
984#ifdef CONFIG_TCP_MD5SIG
Eric Dumazet3b24d852016-04-01 08:52:17 -0700985out:
986 rcu_read_unlock();
Shawn Lu658ddaa2012-01-31 22:35:48 +0000987#endif
Ilpo Järvinen626e2642008-10-09 14:42:40 -0700988}
989
Eric Dumazeta00e7442015-09-29 07:42:39 -0700990static void tcp_v6_send_ack(const struct sock *sk, struct sk_buff *skb, u32 seq,
Eric Dumazet0f85fea2014-12-09 09:56:08 -0800991 u32 ack, u32 win, u32 tsval, u32 tsecr, int oif,
Florent Fourcot1d13a962014-01-16 17:21:22 +0100992 struct tcp_md5sig_key *key, u8 tclass,
Hannes Frederic Sowa5119bd12016-06-11 20:41:38 +0200993 __be32 label)
Ilpo Järvinen626e2642008-10-09 14:42:40 -0700994{
Eric Dumazet0f85fea2014-12-09 09:56:08 -0800995 tcp_v6_send_response(sk, skb, seq, ack, win, tsval, tsecr, oif, key, 0,
996 tclass, label);
Ilpo Järvinen626e2642008-10-09 14:42:40 -0700997}
998
Linus Torvalds1da177e2005-04-16 15:20:36 -0700999static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
1000{
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -07001001 struct inet_timewait_sock *tw = inet_twsk(sk);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001002 struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001003
Eric Dumazet0f85fea2014-12-09 09:56:08 -08001004 tcp_v6_send_ack(sk, skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -07001005 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
Eric Dumazet9a568de2017-05-16 14:00:14 -07001006 tcp_time_stamp_raw() + tcptw->tw_ts_offset,
Wang Yufen9c76a112014-03-29 09:27:31 +08001007 tcptw->tw_ts_recent, tw->tw_bound_dev_if, tcp_twsk_md5_key(tcptw),
Florent Fourcot21858cd2015-05-16 00:24:59 +02001008 tw->tw_tclass, cpu_to_be32(tw->tw_flowlabel));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001009
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -07001010 inet_twsk_put(tw);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001011}
1012
Eric Dumazeta00e7442015-09-29 07:42:39 -07001013static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
Gui Jianfeng6edafaa2008-08-06 23:50:04 -07001014 struct request_sock *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001015{
Daniel Lee3a19ce02014-05-11 20:22:13 -07001016 /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
1017 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
1018 */
Eric Dumazet20a2b492016-08-22 11:31:10 -07001019 /* RFC 7323 2.3
1020 * The window field (SEG.WND) of every outgoing segment, with the
1021 * exception of <SYN> segments, MUST be right-shifted by
1022 * Rcv.Wind.Shift bits:
1023 */
Eric Dumazet0f85fea2014-12-09 09:56:08 -08001024 tcp_v6_send_ack(sk, skb, (sk->sk_state == TCP_LISTEN) ?
Daniel Lee3a19ce02014-05-11 20:22:13 -07001025 tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
Eric Dumazet20a2b492016-08-22 11:31:10 -07001026 tcp_rsk(req)->rcv_nxt,
1027 req->rsk_rcv_wnd >> inet_rsk(req)->rcv_wscale,
Eric Dumazet9a568de2017-05-16 14:00:14 -07001028 tcp_time_stamp_raw() + tcp_rsk(req)->ts_off,
Florian Westphal95a22ca2016-12-01 11:32:06 +01001029 req->ts_recent, sk->sk_bound_dev_if,
Christoph Paasch30791ac2017-12-11 00:05:46 -08001030 tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->saddr),
Florent Fourcot1d13a962014-01-16 17:21:22 +01001031 0, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001032}
1033
1034
Eric Dumazet079096f2015-10-02 11:43:32 -07001035static struct sock *tcp_v6_cookie_check(struct sock *sk, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001036{
Glenn Griffinc6aefaf2008-02-07 21:49:26 -08001037#ifdef CONFIG_SYN_COOKIES
Eric Dumazet079096f2015-10-02 11:43:32 -07001038 const struct tcphdr *th = tcp_hdr(skb);
1039
Florian Westphalaf9b4732010-06-03 00:43:44 +00001040 if (!th->syn)
Glenn Griffinc6aefaf2008-02-07 21:49:26 -08001041 sk = cookie_v6_check(sk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001042#endif
1043 return sk;
1044}
1045
Linus Torvalds1da177e2005-04-16 15:20:36 -07001046static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
1047{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001048 if (skb->protocol == htons(ETH_P_IP))
1049 return tcp_v4_conn_request(sk, skb);
1050
1051 if (!ipv6_unicast_destination(skb))
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09001052 goto drop;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001053
Octavian Purdila1fb6f152014-06-25 17:10:02 +03001054 return tcp_conn_request(&tcp6_request_sock_ops,
1055 &tcp_request_sock_ipv6_ops, sk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001056
Linus Torvalds1da177e2005-04-16 15:20:36 -07001057drop:
Eric Dumazet9caad862016-04-01 08:52:20 -07001058 tcp_listendrop(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001059 return 0; /* don't send reset */
1060}
1061
Eric Dumazetebf6c9c2017-02-05 20:23:22 -08001062static void tcp_v6_restore_cb(struct sk_buff *skb)
1063{
1064 /* We need to move header back to the beginning if xfrm6_policy_check()
1065 * and tcp_v6_fill_cb() are going to be called again.
1066 * ip6_datagram_recv_specific_ctl() also expects IP6CB to be there.
1067 */
1068 memmove(IP6CB(skb), &TCP_SKB_CB(skb)->header.h6,
1069 sizeof(struct inet6_skb_parm));
1070}
1071
Eric Dumazet0c271712015-09-29 07:42:48 -07001072static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
Weilong Chen4c99aa42013-12-19 18:44:34 +08001073 struct request_sock *req,
Eric Dumazet5e0724d2015-10-22 08:20:46 -07001074 struct dst_entry *dst,
1075 struct request_sock *req_unhash,
1076 bool *own_req)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001077{
Eric Dumazet634fb9792013-10-09 15:21:29 -07001078 struct inet_request_sock *ireq;
Eric Dumazet0c271712015-09-29 07:42:48 -07001079 struct ipv6_pinfo *newnp;
Eric Dumazet93a77c12019-03-19 07:01:08 -07001080 const struct ipv6_pinfo *np = tcp_inet6_sk(sk);
Eric Dumazet45f6fad2015-11-29 19:37:57 -08001081 struct ipv6_txoptions *opt;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001082 struct inet_sock *newinet;
1083 struct tcp_sock *newtp;
1084 struct sock *newsk;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001085#ifdef CONFIG_TCP_MD5SIG
1086 struct tcp_md5sig_key *key;
1087#endif
Neal Cardwell3840a062012-06-28 12:34:19 +00001088 struct flowi6 fl6;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001089
1090 if (skb->protocol == htons(ETH_P_IP)) {
1091 /*
1092 * v6 mapped
1093 */
1094
Eric Dumazet5e0724d2015-10-22 08:20:46 -07001095 newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst,
1096 req_unhash, own_req);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001097
Ian Morris63159f22015-03-29 14:00:04 +01001098 if (!newsk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001099 return NULL;
1100
Eric Dumazet93a77c12019-03-19 07:01:08 -07001101 inet_sk(newsk)->pinet6 = tcp_inet6_sk(newsk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001102
1103 newinet = inet_sk(newsk);
Eric Dumazet93a77c12019-03-19 07:01:08 -07001104 newnp = tcp_inet6_sk(newsk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001105 newtp = tcp_sk(newsk);
1106
1107 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1108
Eric Dumazetd1e559d2015-03-18 14:05:35 -07001109 newnp->saddr = newsk->sk_v6_rcv_saddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001110
Arnaldo Carvalho de Melo8292a172005-12-13 23:15:52 -08001111 inet_csk(newsk)->icsk_af_ops = &ipv6_mapped;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001112 newsk->sk_backlog_rcv = tcp_v4_do_rcv;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001113#ifdef CONFIG_TCP_MD5SIG
1114 newtp->af_specific = &tcp_sock_ipv6_mapped_specific;
1115#endif
1116
WANG Cong83eadda2017-05-09 16:59:54 -07001117 newnp->ipv6_mc_list = NULL;
Yan, Zheng676a1182011-09-25 02:21:30 +00001118 newnp->ipv6_ac_list = NULL;
1119 newnp->ipv6_fl_list = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001120 newnp->pktoptions = NULL;
1121 newnp->opt = NULL;
Eric Dumazet870c3152014-10-17 09:17:20 -07001122 newnp->mcast_oif = tcp_v6_iif(skb);
Arnaldo Carvalho de Melo0660e032007-04-25 17:54:47 -07001123 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
Florent Fourcot1397ed32013-12-08 15:46:57 +01001124 newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
Florent Fourcotdf3687f2014-01-17 17:15:03 +01001125 if (np->repflow)
1126 newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001127
Arnaldo Carvalho de Meloe6848972005-08-09 19:45:38 -07001128 /*
1129 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
1130 * here, tcp_create_openreq_child now does this for us, see the comment in
1131 * that function for the gory details. -acme
Linus Torvalds1da177e2005-04-16 15:20:36 -07001132 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001133
1134 /* It is tricky place. Until this moment IPv4 tcp
Arnaldo Carvalho de Melo8292a172005-12-13 23:15:52 -08001135 worked with IPv6 icsk.icsk_af_ops.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001136 Sync it now.
1137 */
Arnaldo Carvalho de Melod83d8462005-12-13 23:26:10 -08001138 tcp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001139
1140 return newsk;
1141 }
1142
Eric Dumazet634fb9792013-10-09 15:21:29 -07001143 ireq = inet_rsk(req);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001144
1145 if (sk_acceptq_is_full(sk))
1146 goto out_overflow;
1147
David S. Miller493f3772010-12-02 12:14:29 -08001148 if (!dst) {
Eric Dumazetf76b33c2015-09-29 07:42:42 -07001149 dst = inet6_csk_route_req(sk, &fl6, req, IPPROTO_TCP);
David S. Miller493f3772010-12-02 12:14:29 -08001150 if (!dst)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001151 goto out;
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09001152 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001153
1154 newsk = tcp_create_openreq_child(sk, req, skb);
Ian Morris63159f22015-03-29 14:00:04 +01001155 if (!newsk)
Balazs Scheidler093d2822010-10-21 13:06:43 +02001156 goto out_nonewsk;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001157
Arnaldo Carvalho de Meloe6848972005-08-09 19:45:38 -07001158 /*
1159 * No need to charge this sock to the relevant IPv6 refcnt debug socks
1160 * count here, tcp_create_openreq_child now does this for us, see the
1161 * comment in that function for the gory details. -acme
1162 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001163
Stephen Hemminger59eed272006-08-25 15:55:43 -07001164 newsk->sk_gso_type = SKB_GSO_TCPV6;
Eric Dumazet6bd4f352015-12-02 21:53:57 -08001165 ip6_dst_store(newsk, dst, NULL, NULL);
Neal Cardwellfae6ef82012-08-19 03:30:38 +00001166 inet6_sk_rx_dst_set(newsk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001167
Eric Dumazet93a77c12019-03-19 07:01:08 -07001168 inet_sk(newsk)->pinet6 = tcp_inet6_sk(newsk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001169
1170 newtp = tcp_sk(newsk);
1171 newinet = inet_sk(newsk);
Eric Dumazet93a77c12019-03-19 07:01:08 -07001172 newnp = tcp_inet6_sk(newsk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001173
1174 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1175
Eric Dumazet634fb9792013-10-09 15:21:29 -07001176 newsk->sk_v6_daddr = ireq->ir_v6_rmt_addr;
1177 newnp->saddr = ireq->ir_v6_loc_addr;
1178 newsk->sk_v6_rcv_saddr = ireq->ir_v6_loc_addr;
1179 newsk->sk_bound_dev_if = ireq->ir_iif;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001180
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09001181 /* Now IPv6 options...
Linus Torvalds1da177e2005-04-16 15:20:36 -07001182
1183 First: no IPv4 options.
1184 */
Eric Dumazetf6d8bd02011-04-21 09:45:37 +00001185 newinet->inet_opt = NULL;
WANG Cong83eadda2017-05-09 16:59:54 -07001186 newnp->ipv6_mc_list = NULL;
Yan, Zheng676a1182011-09-25 02:21:30 +00001187 newnp->ipv6_ac_list = NULL;
Masayuki Nakagawad35690b2007-03-16 16:14:03 -07001188 newnp->ipv6_fl_list = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001189
1190 /* Clone RX bits */
1191 newnp->rxopt.all = np->rxopt.all;
1192
Linus Torvalds1da177e2005-04-16 15:20:36 -07001193 newnp->pktoptions = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001194 newnp->opt = NULL;
Eric Dumazet870c3152014-10-17 09:17:20 -07001195 newnp->mcast_oif = tcp_v6_iif(skb);
Arnaldo Carvalho de Melo0660e032007-04-25 17:54:47 -07001196 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
Florent Fourcot1397ed32013-12-08 15:46:57 +01001197 newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
Florent Fourcotdf3687f2014-01-17 17:15:03 +01001198 if (np->repflow)
1199 newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001200
1201 /* Clone native IPv6 options from listening socket (if any)
1202
1203 Yes, keeping reference count would be much more clever,
1204 but we make one more one thing there: reattach optmem
1205 to newsk.
1206 */
Huw Davies56ac42b2016-06-27 15:05:28 -04001207 opt = ireq->ipv6_opt;
1208 if (!opt)
1209 opt = rcu_dereference(np->opt);
Eric Dumazet45f6fad2015-11-29 19:37:57 -08001210 if (opt) {
1211 opt = ipv6_dup_options(newsk, opt);
1212 RCU_INIT_POINTER(newnp->opt, opt);
1213 }
Arnaldo Carvalho de Melod83d8462005-12-13 23:26:10 -08001214 inet_csk(newsk)->icsk_ext_hdr_len = 0;
Eric Dumazet45f6fad2015-11-29 19:37:57 -08001215 if (opt)
1216 inet_csk(newsk)->icsk_ext_hdr_len = opt->opt_nflen +
1217 opt->opt_flen;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001218
Daniel Borkmann81164412015-01-05 23:57:48 +01001219 tcp_ca_openreq_child(newsk, dst);
1220
Linus Torvalds1da177e2005-04-16 15:20:36 -07001221 tcp_sync_mss(newsk, dst_mtu(dst));
Eric Dumazet3541f9e2017-02-02 08:04:56 -08001222 newtp->advmss = tcp_mss_clamp(tcp_sk(sk), dst_metric_advmss(dst));
Neal Cardwelld135c522012-04-22 09:45:47 +00001223
Linus Torvalds1da177e2005-04-16 15:20:36 -07001224 tcp_initialize_rcv_mss(newsk);
1225
Eric Dumazetc720c7e82009-10-15 06:30:45 +00001226 newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6;
1227 newinet->inet_rcv_saddr = LOOPBACK4_IPV6;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001228
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001229#ifdef CONFIG_TCP_MD5SIG
1230 /* Copy over the MD5 key from the original socket */
Wang Yufen4aa956d2014-03-29 09:27:29 +08001231 key = tcp_v6_md5_do_lookup(sk, &newsk->sk_v6_daddr);
Ian Morris53b24b82015-03-29 14:00:05 +01001232 if (key) {
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001233 /* We're using one, so create a matching key
1234 * on the newsk structure. If we fail to get
1235 * memory, then we end up not copying the key
1236 * across. Shucks.
1237 */
Eric Dumazetefe42082013-10-03 15:42:29 -07001238 tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newsk->sk_v6_daddr,
Ivan Delalande67973182017-06-15 18:07:06 -07001239 AF_INET6, 128, key->key, key->keylen,
Eric Dumazet7450aaf2015-11-30 08:57:28 -08001240 sk_gfp_mask(sk, GFP_ATOMIC));
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001241 }
1242#endif
1243
Balazs Scheidler093d2822010-10-21 13:06:43 +02001244 if (__inet_inherit_port(sk, newsk) < 0) {
Christoph Paasche337e242012-12-14 04:07:58 +00001245 inet_csk_prepare_forced_close(newsk);
1246 tcp_done(newsk);
Balazs Scheidler093d2822010-10-21 13:06:43 +02001247 goto out;
1248 }
Eric Dumazet5e0724d2015-10-22 08:20:46 -07001249 *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash));
Eric Dumazet805c4bc2015-11-05 11:07:13 -08001250 if (*own_req) {
Eric Dumazet49a496c2015-11-05 12:50:19 -08001251 tcp_move_syn(newtp, req);
Eric Dumazet805c4bc2015-11-05 11:07:13 -08001252
1253 /* Clone pktoptions received with SYN, if we own the req */
1254 if (ireq->pktopts) {
1255 newnp->pktoptions = skb_clone(ireq->pktopts,
Eric Dumazet7450aaf2015-11-30 08:57:28 -08001256 sk_gfp_mask(sk, GFP_ATOMIC));
Eric Dumazet805c4bc2015-11-05 11:07:13 -08001257 consume_skb(ireq->pktopts);
1258 ireq->pktopts = NULL;
Eric Dumazetebf6c9c2017-02-05 20:23:22 -08001259 if (newnp->pktoptions) {
1260 tcp_v6_restore_cb(newnp->pktoptions);
Eric Dumazet805c4bc2015-11-05 11:07:13 -08001261 skb_set_owner_r(newnp->pktoptions, newsk);
Eric Dumazetebf6c9c2017-02-05 20:23:22 -08001262 }
Eric Dumazet805c4bc2015-11-05 11:07:13 -08001263 }
Eric Dumazetce105002015-10-30 09:46:12 -07001264 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001265
1266 return newsk;
1267
1268out_overflow:
Eric Dumazet02a1d6e2016-04-27 16:44:39 -07001269 __NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
Balazs Scheidler093d2822010-10-21 13:06:43 +02001270out_nonewsk:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001271 dst_release(dst);
Balazs Scheidler093d2822010-10-21 13:06:43 +02001272out:
Eric Dumazet9caad862016-04-01 08:52:20 -07001273 tcp_listendrop(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001274 return NULL;
1275}
1276
Linus Torvalds1da177e2005-04-16 15:20:36 -07001277/* The socket must have it's spinlock held when we get
Eric Dumazete994b2f2015-10-02 11:43:39 -07001278 * here, unless it is a TCP_LISTEN socket.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001279 *
1280 * We have a potential double-lock case here, so even when
1281 * doing backlog processing we use the BH locking scheme.
1282 * This is because we cannot sleep with the original spinlock
1283 * held.
1284 */
1285static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1286{
Eric Dumazet93a77c12019-03-19 07:01:08 -07001287 struct ipv6_pinfo *np = tcp_inet6_sk(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001288 struct sk_buff *opt_skb = NULL;
Eric Dumazet93a77c12019-03-19 07:01:08 -07001289 struct tcp_sock *tp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001290
1291 /* Imagine: socket is IPv6. IPv4 packet arrives,
1292 goes to IPv4 receive handler and backlogged.
1293 From backlog it always goes here. Kerboom...
1294 Fortunately, tcp_rcv_established and rcv_established
1295 handle them correctly, but it is not case with
1296 tcp_v6_hnd_req and tcp_v6_send_reset(). --ANK
1297 */
1298
1299 if (skb->protocol == htons(ETH_P_IP))
1300 return tcp_v4_do_rcv(sk, skb);
1301
Linus Torvalds1da177e2005-04-16 15:20:36 -07001302 /*
1303 * socket locking is here for SMP purposes as backlog rcv
1304 * is currently called with bh processing disabled.
1305 */
1306
1307 /* Do Stevens' IPV6_PKTOPTIONS.
1308
1309 Yes, guys, it is the only place in our code, where we
1310 may make it not affecting IPv4.
1311 The rest of code is protocol independent,
1312 and I do not like idea to uglify IPv4.
1313
1314 Actually, all the idea behind IPV6_PKTOPTIONS
1315 looks not very well thought. For now we latch
1316 options, received in the last packet, enqueued
1317 by tcp. Feel free to propose better solution.
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09001318 --ANK (980728)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001319 */
1320 if (np->rxopt.all)
Eric Dumazet7450aaf2015-11-30 08:57:28 -08001321 opt_skb = skb_clone(skb, sk_gfp_mask(sk, GFP_ATOMIC));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001322
1323 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
Eric Dumazet5d299f32012-08-06 05:09:33 +00001324 struct dst_entry *dst = sk->sk_rx_dst;
1325
Tom Herbertbdeab992011-08-14 19:45:55 +00001326 sock_rps_save_rxhash(sk, skb);
Eric Dumazet3d973792014-11-11 05:54:27 -08001327 sk_mark_napi_id(sk, skb);
Eric Dumazet5d299f32012-08-06 05:09:33 +00001328 if (dst) {
1329 if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
1330 dst->ops->check(dst, np->rx_dst_cookie) == NULL) {
1331 dst_release(dst);
1332 sk->sk_rx_dst = NULL;
1333 }
1334 }
1335
Yafang Shao3d97d882018-05-29 23:27:31 +08001336 tcp_rcv_established(sk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001337 if (opt_skb)
1338 goto ipv6_pktoptions;
1339 return 0;
1340 }
1341
Eric Dumazet12e25e12015-06-03 23:49:21 -07001342 if (tcp_checksum_complete(skb))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001343 goto csum_err;
1344
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09001345 if (sk->sk_state == TCP_LISTEN) {
Eric Dumazet079096f2015-10-02 11:43:32 -07001346 struct sock *nsk = tcp_v6_cookie_check(sk, skb);
1347
Linus Torvalds1da177e2005-04-16 15:20:36 -07001348 if (!nsk)
1349 goto discard;
1350
Weilong Chen4c99aa42013-12-19 18:44:34 +08001351 if (nsk != sk) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001352 if (tcp_child_process(sk, nsk, skb))
1353 goto reset;
1354 if (opt_skb)
1355 __kfree_skb(opt_skb);
1356 return 0;
1357 }
Neil Horman47482f132011-04-06 13:07:09 -07001358 } else
Tom Herbertbdeab992011-08-14 19:45:55 +00001359 sock_rps_save_rxhash(sk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001360
Eric Dumazet72ab4a82015-09-29 07:42:41 -07001361 if (tcp_rcv_state_process(sk, skb))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001362 goto reset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001363 if (opt_skb)
1364 goto ipv6_pktoptions;
1365 return 0;
1366
1367reset:
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001368 tcp_v6_send_reset(sk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001369discard:
1370 if (opt_skb)
1371 __kfree_skb(opt_skb);
1372 kfree_skb(skb);
1373 return 0;
1374csum_err:
Eric Dumazetc10d9312016-04-29 14:16:47 -07001375 TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS);
1376 TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001377 goto discard;
1378
1379
1380ipv6_pktoptions:
1381 /* Do you ask, what is it?
1382
1383 1. skb was enqueued by tcp.
1384 2. skb is added to tail of read queue, rather than out of order.
1385 3. socket is not in passive state.
1386 4. Finally, it really contains options, which user wants to receive.
1387 */
1388 tp = tcp_sk(sk);
1389 if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt &&
1390 !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) {
YOSHIFUJI Hideaki333fad52005-09-08 09:59:17 +09001391 if (np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo)
Eric Dumazet870c3152014-10-17 09:17:20 -07001392 np->mcast_oif = tcp_v6_iif(opt_skb);
YOSHIFUJI Hideaki333fad52005-09-08 09:59:17 +09001393 if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim)
Arnaldo Carvalho de Melo0660e032007-04-25 17:54:47 -07001394 np->mcast_hops = ipv6_hdr(opt_skb)->hop_limit;
Florent Fourcot82e9f102013-12-08 15:46:59 +01001395 if (np->rxopt.bits.rxflow || np->rxopt.bits.rxtclass)
Florent Fourcot1397ed32013-12-08 15:46:57 +01001396 np->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(opt_skb));
Florent Fourcotdf3687f2014-01-17 17:15:03 +01001397 if (np->repflow)
1398 np->flow_label = ip6_flowlabel(ipv6_hdr(opt_skb));
Eric Dumazeta2247722014-09-27 09:50:56 -07001399 if (ipv6_opt_accepted(sk, opt_skb, &TCP_SKB_CB(opt_skb)->header.h6)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001400 skb_set_owner_r(opt_skb, sk);
Eric Dumazet8ce48622016-10-12 19:01:45 +02001401 tcp_v6_restore_cb(opt_skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001402 opt_skb = xchg(&np->pktoptions, opt_skb);
1403 } else {
1404 __kfree_skb(opt_skb);
1405 opt_skb = xchg(&np->pktoptions, NULL);
1406 }
1407 }
1408
Wei Yongjun800d55f2009-02-23 21:45:33 +00001409 kfree_skb(opt_skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001410 return 0;
1411}
1412
Nicolas Dichtel2dc49d12014-12-22 18:22:48 +01001413static void tcp_v6_fill_cb(struct sk_buff *skb, const struct ipv6hdr *hdr,
1414 const struct tcphdr *th)
1415{
1416 /* This is tricky: we move IP6CB at its correct location into
1417 * TCP_SKB_CB(). It must be done after xfrm6_policy_check(), because
1418 * _decode_session6() uses IP6CB().
1419 * barrier() makes sure compiler won't play aliasing games.
1420 */
1421 memmove(&TCP_SKB_CB(skb)->header.h6, IP6CB(skb),
1422 sizeof(struct inet6_skb_parm));
1423 barrier();
1424
1425 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1426 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1427 skb->len - th->doff*4);
1428 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1429 TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
1430 TCP_SKB_CB(skb)->tcp_tw_isn = 0;
1431 TCP_SKB_CB(skb)->ip_dsfield = ipv6_get_dsfield(hdr);
1432 TCP_SKB_CB(skb)->sacked = 0;
Mike Maloney98aaa912017-08-22 17:08:48 -04001433 TCP_SKB_CB(skb)->has_rxtstamp =
1434 skb->tstamp || skb_hwtstamps(skb)->hwtstamp;
Nicolas Dichtel2dc49d12014-12-22 18:22:48 +01001435}
1436
Herbert Xue5bbef22007-10-15 12:50:28 -07001437static int tcp_v6_rcv(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001438{
Eric Dumazet8b27dae2019-03-22 08:56:40 -07001439 struct sk_buff *skb_to_free;
David Ahern4297a0e2017-08-07 08:44:21 -07001440 int sdif = inet6_sdif(skb);
Eric Dumazetcf533ea2011-10-21 05:22:42 -04001441 const struct tcphdr *th;
Eric Dumazetb71d1d42011-04-22 04:53:02 +00001442 const struct ipv6hdr *hdr;
Eric Dumazet3b24d852016-04-01 08:52:17 -07001443 bool refcounted;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001444 struct sock *sk;
1445 int ret;
Pavel Emelyanova86b1e32008-07-16 20:20:58 -07001446 struct net *net = dev_net(skb->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001447
1448 if (skb->pkt_type != PACKET_HOST)
1449 goto discard_it;
1450
1451 /*
1452 * Count it even if it's bad.
1453 */
Eric Dumazet90bbcc62016-04-27 16:44:32 -07001454 __TCP_INC_STATS(net, TCP_MIB_INSEGS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001455
1456 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1457 goto discard_it;
1458
Eric Dumazetea1627c2016-05-13 09:16:40 -07001459 th = (const struct tcphdr *)skb->data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001460
Eric Dumazetea1627c2016-05-13 09:16:40 -07001461 if (unlikely(th->doff < sizeof(struct tcphdr)/4))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001462 goto bad_packet;
1463 if (!pskb_may_pull(skb, th->doff*4))
1464 goto discard_it;
1465
Tom Herberte4f45b72014-05-02 16:29:51 -07001466 if (skb_checksum_init(skb, IPPROTO_TCP, ip6_compute_pseudo))
Eric Dumazet6a5dc9e2013-04-29 08:39:56 +00001467 goto csum_error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001468
Eric Dumazetea1627c2016-05-13 09:16:40 -07001469 th = (const struct tcphdr *)skb->data;
Stephen Hemmingere802af92010-04-22 15:24:53 -07001470 hdr = ipv6_hdr(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001471
Eric Dumazet4bdc3d62015-10-13 17:12:54 -07001472lookup:
Craig Galleka5836362016-02-10 11:50:38 -05001473 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, __tcp_hdrlen(th),
David Ahern4297a0e2017-08-07 08:44:21 -07001474 th->source, th->dest, inet6_iif(skb), sdif,
Eric Dumazet3b24d852016-04-01 08:52:17 -07001475 &refcounted);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001476 if (!sk)
1477 goto no_tcp_socket;
1478
1479process:
1480 if (sk->sk_state == TCP_TIME_WAIT)
1481 goto do_time_wait;
1482
Eric Dumazet079096f2015-10-02 11:43:32 -07001483 if (sk->sk_state == TCP_NEW_SYN_RECV) {
1484 struct request_sock *req = inet_reqsk(sk);
Eric Dumazete0f97592018-02-13 06:14:12 -08001485 bool req_stolen = false;
Eric Dumazet77166822016-02-18 05:39:18 -08001486 struct sock *nsk;
Eric Dumazet079096f2015-10-02 11:43:32 -07001487
1488 sk = req->rsk_listener;
Eric Dumazet079096f2015-10-02 11:43:32 -07001489 if (tcp_v6_inbound_md5_hash(sk, skb)) {
Eric Dumazete65c3322016-08-24 08:50:24 -07001490 sk_drops_add(sk, skb);
Eric Dumazet079096f2015-10-02 11:43:32 -07001491 reqsk_put(req);
1492 goto discard_it;
1493 }
Frank van der Linden4fd44a92018-06-12 23:09:37 +00001494 if (tcp_checksum_complete(skb)) {
1495 reqsk_put(req);
1496 goto csum_error;
1497 }
Eric Dumazet77166822016-02-18 05:39:18 -08001498 if (unlikely(sk->sk_state != TCP_LISTEN)) {
Eric Dumazetf03f2e12015-10-14 11:16:27 -07001499 inet_csk_reqsk_queue_drop_and_put(sk, req);
Eric Dumazet4bdc3d62015-10-13 17:12:54 -07001500 goto lookup;
1501 }
Eric Dumazet77166822016-02-18 05:39:18 -08001502 sock_hold(sk);
Eric Dumazet3b24d852016-04-01 08:52:17 -07001503 refcounted = true;
Eric Dumazet1f3b3592017-09-08 12:44:47 -07001504 nsk = NULL;
Eric Dumazeteeea10b2017-12-03 09:32:59 -08001505 if (!tcp_filter(sk, skb)) {
1506 th = (const struct tcphdr *)skb->data;
1507 hdr = ipv6_hdr(skb);
1508 tcp_v6_fill_cb(skb, hdr, th);
Eric Dumazete0f97592018-02-13 06:14:12 -08001509 nsk = tcp_check_req(sk, skb, req, false, &req_stolen);
Eric Dumazeteeea10b2017-12-03 09:32:59 -08001510 }
Eric Dumazet079096f2015-10-02 11:43:32 -07001511 if (!nsk) {
1512 reqsk_put(req);
Eric Dumazete0f97592018-02-13 06:14:12 -08001513 if (req_stolen) {
1514 /* Another cpu got exclusive access to req
1515 * and created a full blown socket.
1516 * Try to feed this packet to this socket
1517 * instead of discarding it.
1518 */
1519 tcp_v6_restore_cb(skb);
1520 sock_put(sk);
1521 goto lookup;
1522 }
Eric Dumazet77166822016-02-18 05:39:18 -08001523 goto discard_and_relse;
Eric Dumazet079096f2015-10-02 11:43:32 -07001524 }
1525 if (nsk == sk) {
Eric Dumazet079096f2015-10-02 11:43:32 -07001526 reqsk_put(req);
1527 tcp_v6_restore_cb(skb);
1528 } else if (tcp_child_process(sk, nsk, skb)) {
1529 tcp_v6_send_reset(nsk, skb);
Eric Dumazet77166822016-02-18 05:39:18 -08001530 goto discard_and_relse;
Eric Dumazet079096f2015-10-02 11:43:32 -07001531 } else {
Eric Dumazet77166822016-02-18 05:39:18 -08001532 sock_put(sk);
Eric Dumazet079096f2015-10-02 11:43:32 -07001533 return 0;
1534 }
1535 }
Eric Dumazet93a77c12019-03-19 07:01:08 -07001536 if (hdr->hop_limit < tcp_inet6_sk(sk)->min_hopcount) {
Eric Dumazet02a1d6e2016-04-27 16:44:39 -07001537 __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
Stephen Hemmingere802af92010-04-22 15:24:53 -07001538 goto discard_and_relse;
1539 }
1540
Linus Torvalds1da177e2005-04-16 15:20:36 -07001541 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
1542 goto discard_and_relse;
1543
Dmitry Popov9ea88a12014-08-07 02:38:22 +04001544 if (tcp_v6_inbound_md5_hash(sk, skb))
1545 goto discard_and_relse;
Dmitry Popov9ea88a12014-08-07 02:38:22 +04001546
Eric Dumazetac6e7802016-11-10 13:12:35 -08001547 if (tcp_filter(sk, skb))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001548 goto discard_and_relse;
Eric Dumazetac6e7802016-11-10 13:12:35 -08001549 th = (const struct tcphdr *)skb->data;
1550 hdr = ipv6_hdr(skb);
Eric Dumazeteeea10b2017-12-03 09:32:59 -08001551 tcp_v6_fill_cb(skb, hdr, th);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001552
1553 skb->dev = NULL;
1554
Eric Dumazete994b2f2015-10-02 11:43:39 -07001555 if (sk->sk_state == TCP_LISTEN) {
1556 ret = tcp_v6_do_rcv(sk, skb);
1557 goto put_and_return;
1558 }
1559
1560 sk_incoming_cpu_update(sk);
1561
Fabio Olive Leite293b9c42006-09-25 22:28:47 -07001562 bh_lock_sock_nested(sk);
Martin KaFai Laua44d6ea2016-03-14 10:52:15 -07001563 tcp_segs_in(tcp_sk(sk), skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001564 ret = 0;
1565 if (!sock_owned_by_user(sk)) {
Eric Dumazet8b27dae2019-03-22 08:56:40 -07001566 skb_to_free = sk->sk_rx_skb_cache;
1567 sk->sk_rx_skb_cache = NULL;
Florian Westphale7942d02017-07-30 03:57:18 +02001568 ret = tcp_v6_do_rcv(sk, skb);
Eric Dumazet8b27dae2019-03-22 08:56:40 -07001569 } else {
1570 if (tcp_add_backlog(sk, skb))
1571 goto discard_and_relse;
1572 skb_to_free = NULL;
Zhu Yi6b03a532010-03-04 18:01:41 +00001573 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001574 bh_unlock_sock(sk);
Eric Dumazet8b27dae2019-03-22 08:56:40 -07001575 if (skb_to_free)
1576 __kfree_skb(skb_to_free);
Eric Dumazete994b2f2015-10-02 11:43:39 -07001577put_and_return:
Eric Dumazet3b24d852016-04-01 08:52:17 -07001578 if (refcounted)
1579 sock_put(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001580 return ret ? -1 : 0;
1581
1582no_tcp_socket:
1583 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
1584 goto discard_it;
1585
Nicolas Dichtel2dc49d12014-12-22 18:22:48 +01001586 tcp_v6_fill_cb(skb, hdr, th);
1587
Eric Dumazet12e25e12015-06-03 23:49:21 -07001588 if (tcp_checksum_complete(skb)) {
Eric Dumazet6a5dc9e2013-04-29 08:39:56 +00001589csum_error:
Eric Dumazet90bbcc62016-04-27 16:44:32 -07001590 __TCP_INC_STATS(net, TCP_MIB_CSUMERRORS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001591bad_packet:
Eric Dumazet90bbcc62016-04-27 16:44:32 -07001592 __TCP_INC_STATS(net, TCP_MIB_INERRS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001593 } else {
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001594 tcp_v6_send_reset(NULL, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001595 }
1596
1597discard_it:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001598 kfree_skb(skb);
1599 return 0;
1600
1601discard_and_relse:
Eric Dumazet532182c2016-04-01 08:52:19 -07001602 sk_drops_add(sk, skb);
Eric Dumazet3b24d852016-04-01 08:52:17 -07001603 if (refcounted)
1604 sock_put(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001605 goto discard_it;
1606
1607do_time_wait:
1608 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
YOSHIFUJI Hideaki9469c7b2006-10-10 19:41:46 -07001609 inet_twsk_put(inet_twsk(sk));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001610 goto discard_it;
1611 }
1612
Nicolas Dichtel2dc49d12014-12-22 18:22:48 +01001613 tcp_v6_fill_cb(skb, hdr, th);
1614
Eric Dumazet6a5dc9e2013-04-29 08:39:56 +00001615 if (tcp_checksum_complete(skb)) {
1616 inet_twsk_put(inet_twsk(sk));
1617 goto csum_error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001618 }
1619
YOSHIFUJI Hideaki9469c7b2006-10-10 19:41:46 -07001620 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001621 case TCP_TW_SYN:
1622 {
1623 struct sock *sk2;
1624
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09001625 sk2 = inet6_lookup_listener(dev_net(skb->dev), &tcp_hashinfo,
Craig Galleka5836362016-02-10 11:50:38 -05001626 skb, __tcp_hdrlen(th),
Tom Herbert5ba24952013-01-22 09:50:39 +00001627 &ipv6_hdr(skb)->saddr, th->source,
Arnaldo Carvalho de Melo0660e032007-04-25 17:54:47 -07001628 &ipv6_hdr(skb)->daddr,
David Ahern24b711e2018-07-19 12:41:18 -07001629 ntohs(th->dest),
1630 tcp_v6_iif_l3_slave(skb),
David Ahern4297a0e2017-08-07 08:44:21 -07001631 sdif);
Ian Morris53b24b82015-03-29 14:00:05 +01001632 if (sk2) {
Arnaldo Carvalho de Melo295ff7e2005-08-09 20:44:40 -07001633 struct inet_timewait_sock *tw = inet_twsk(sk);
Eric Dumazetdbe7faa2015-07-08 14:28:30 -07001634 inet_twsk_deschedule_put(tw);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001635 sk = sk2;
Alexey Kodanev4ad19de2015-03-27 12:24:22 +03001636 tcp_v6_restore_cb(skb);
Eric Dumazet3b24d852016-04-01 08:52:17 -07001637 refcounted = false;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001638 goto process;
1639 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001640 }
Gustavo A. R. Silva275757e62017-10-16 16:36:52 -05001641 /* to ACK */
1642 /* fall through */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001643 case TCP_TW_ACK:
1644 tcp_v6_timewait_ack(sk, skb);
1645 break;
1646 case TCP_TW_RST:
Florian Westphal271c3b92015-12-21 21:29:26 +01001647 tcp_v6_send_reset(sk, skb);
1648 inet_twsk_deschedule_put(inet_twsk(sk));
1649 goto discard_it;
Wang Yufen4aa956d2014-03-29 09:27:29 +08001650 case TCP_TW_SUCCESS:
1651 ;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001652 }
1653 goto discard_it;
1654}
1655
Eric Dumazetc7109982012-07-26 12:18:11 +00001656static void tcp_v6_early_demux(struct sk_buff *skb)
1657{
1658 const struct ipv6hdr *hdr;
1659 const struct tcphdr *th;
1660 struct sock *sk;
1661
1662 if (skb->pkt_type != PACKET_HOST)
1663 return;
1664
1665 if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
1666 return;
1667
1668 hdr = ipv6_hdr(skb);
1669 th = tcp_hdr(skb);
1670
1671 if (th->doff < sizeof(struct tcphdr) / 4)
1672 return;
1673
Eric Dumazet870c3152014-10-17 09:17:20 -07001674 /* Note : We use inet6_iif() here, not tcp_v6_iif() */
Eric Dumazetc7109982012-07-26 12:18:11 +00001675 sk = __inet6_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
1676 &hdr->saddr, th->source,
1677 &hdr->daddr, ntohs(th->dest),
David Ahern4297a0e2017-08-07 08:44:21 -07001678 inet6_iif(skb), inet6_sdif(skb));
Eric Dumazetc7109982012-07-26 12:18:11 +00001679 if (sk) {
1680 skb->sk = sk;
1681 skb->destructor = sock_edemux;
Eric Dumazetf7e4eb02015-03-15 21:12:13 -07001682 if (sk_fullsock(sk)) {
Michal Kubečekd0c294c2015-03-23 15:14:00 +01001683 struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst);
Neal Cardwellf3f12132012-10-22 21:41:48 +00001684
Eric Dumazetc7109982012-07-26 12:18:11 +00001685 if (dst)
Eric Dumazet93a77c12019-03-19 07:01:08 -07001686 dst = dst_check(dst, tcp_inet6_sk(sk)->rx_dst_cookie);
Eric Dumazetc7109982012-07-26 12:18:11 +00001687 if (dst &&
Neal Cardwellf3f12132012-10-22 21:41:48 +00001688 inet_sk(sk)->rx_dst_ifindex == skb->skb_iif)
Eric Dumazetc7109982012-07-26 12:18:11 +00001689 skb_dst_set_noref(skb, dst);
1690 }
1691 }
1692}
1693
David S. Millerccb7c412010-12-01 18:09:13 -08001694static struct timewait_sock_ops tcp6_timewait_sock_ops = {
1695 .twsk_obj_size = sizeof(struct tcp6_timewait_sock),
1696 .twsk_unique = tcp_twsk_unique,
Wang Yufen4aa956d2014-03-29 09:27:29 +08001697 .twsk_destructor = tcp_twsk_destructor,
David S. Millerccb7c412010-12-01 18:09:13 -08001698};
1699
Stephen Hemminger3b401a82009-09-01 19:25:04 +00001700static const struct inet_connection_sock_af_ops ipv6_specific = {
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08001701 .queue_xmit = inet6_csk_xmit,
1702 .send_check = tcp_v6_send_check,
1703 .rebuild_header = inet6_sk_rebuild_header,
Eric Dumazet5d299f32012-08-06 05:09:33 +00001704 .sk_rx_dst_set = inet6_sk_rx_dst_set,
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08001705 .conn_request = tcp_v6_conn_request,
1706 .syn_recv_sock = tcp_v6_syn_recv_sock,
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08001707 .net_header_len = sizeof(struct ipv6hdr),
Eric Dumazet67469602012-04-24 07:37:38 +00001708 .net_frag_header_len = sizeof(struct frag_hdr),
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08001709 .setsockopt = ipv6_setsockopt,
1710 .getsockopt = ipv6_getsockopt,
1711 .addr2sockaddr = inet6_csk_addr2sockaddr,
1712 .sockaddr_len = sizeof(struct sockaddr_in6),
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08001713#ifdef CONFIG_COMPAT
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08001714 .compat_setsockopt = compat_ipv6_setsockopt,
1715 .compat_getsockopt = compat_ipv6_getsockopt,
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08001716#endif
Neal Cardwell4fab9072014-08-14 12:40:05 -04001717 .mtu_reduced = tcp_v6_mtu_reduced,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001718};
1719
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001720#ifdef CONFIG_TCP_MD5SIG
Stephen Hemmingerb2e4b3de2009-09-01 19:25:03 +00001721static const struct tcp_sock_af_ops tcp_sock_ipv6_specific = {
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001722 .md5_lookup = tcp_v6_md5_lookup,
Adam Langley49a72df2008-07-19 00:01:42 -07001723 .calc_md5_hash = tcp_v6_md5_hash_skb,
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001724 .md5_parse = tcp_v6_parse_md5_keys,
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001725};
David S. Millera9286302006-11-14 19:53:22 -08001726#endif
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001727
Linus Torvalds1da177e2005-04-16 15:20:36 -07001728/*
1729 * TCP over IPv4 via INET6 API
1730 */
Stephen Hemminger3b401a82009-09-01 19:25:04 +00001731static const struct inet_connection_sock_af_ops ipv6_mapped = {
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08001732 .queue_xmit = ip_queue_xmit,
1733 .send_check = tcp_v4_send_check,
1734 .rebuild_header = inet_sk_rebuild_header,
Eric Dumazet63d02d12012-08-09 14:11:00 +00001735 .sk_rx_dst_set = inet_sk_rx_dst_set,
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08001736 .conn_request = tcp_v6_conn_request,
1737 .syn_recv_sock = tcp_v6_syn_recv_sock,
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08001738 .net_header_len = sizeof(struct iphdr),
1739 .setsockopt = ipv6_setsockopt,
1740 .getsockopt = ipv6_getsockopt,
1741 .addr2sockaddr = inet6_csk_addr2sockaddr,
1742 .sockaddr_len = sizeof(struct sockaddr_in6),
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08001743#ifdef CONFIG_COMPAT
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08001744 .compat_setsockopt = compat_ipv6_setsockopt,
1745 .compat_getsockopt = compat_ipv6_getsockopt,
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08001746#endif
Neal Cardwell4fab9072014-08-14 12:40:05 -04001747 .mtu_reduced = tcp_v4_mtu_reduced,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001748};
1749
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001750#ifdef CONFIG_TCP_MD5SIG
Stephen Hemmingerb2e4b3de2009-09-01 19:25:03 +00001751static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = {
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001752 .md5_lookup = tcp_v4_md5_lookup,
Adam Langley49a72df2008-07-19 00:01:42 -07001753 .calc_md5_hash = tcp_v4_md5_hash_skb,
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001754 .md5_parse = tcp_v6_parse_md5_keys,
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001755};
David S. Millera9286302006-11-14 19:53:22 -08001756#endif
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001757
Linus Torvalds1da177e2005-04-16 15:20:36 -07001758/* NOTE: A lot of things set to zero explicitly by call to
1759 * sk_alloc() so need not be done here.
1760 */
1761static int tcp_v6_init_sock(struct sock *sk)
1762{
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -03001763 struct inet_connection_sock *icsk = inet_csk(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001764
Neal Cardwell900f65d2012-04-19 09:55:21 +00001765 tcp_init_sock(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001766
Arnaldo Carvalho de Melo8292a172005-12-13 23:15:52 -08001767 icsk->icsk_af_ops = &ipv6_specific;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001768
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001769#ifdef CONFIG_TCP_MD5SIG
David S. Millerac807fa2012-04-23 03:21:58 -04001770 tcp_sk(sk)->af_specific = &tcp_sock_ipv6_specific;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001771#endif
1772
Linus Torvalds1da177e2005-04-16 15:20:36 -07001773 return 0;
1774}
1775
Brian Haley7d06b2e2008-06-14 17:04:49 -07001776static void tcp_v6_destroy_sock(struct sock *sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001777{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001778 tcp_v4_destroy_sock(sk);
Brian Haley7d06b2e2008-06-14 17:04:49 -07001779 inet6_destroy_sock(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001780}
1781
YOSHIFUJI Hideaki952a10b2007-04-21 20:13:44 +09001782#ifdef CONFIG_PROC_FS
Linus Torvalds1da177e2005-04-16 15:20:36 -07001783/* Proc filesystem TCPv6 sock list dumping. */
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09001784static void get_openreq6(struct seq_file *seq,
Eric Dumazetaa3a0c82015-10-02 11:43:30 -07001785 const struct request_sock *req, int i)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001786{
Eric Dumazetfa76ce732015-03-19 19:04:20 -07001787 long ttd = req->rsk_timer.expires - jiffies;
Eric Dumazet634fb9792013-10-09 15:21:29 -07001788 const struct in6_addr *src = &inet_rsk(req)->ir_v6_loc_addr;
1789 const struct in6_addr *dest = &inet_rsk(req)->ir_v6_rmt_addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001790
1791 if (ttd < 0)
1792 ttd = 0;
1793
Linus Torvalds1da177e2005-04-16 15:20:36 -07001794 seq_printf(seq,
1795 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
Francesco Fuscod14c5ab2013-08-15 13:42:14 +02001796 "%02X %08X:%08X %02X:%08lX %08X %5u %8d %d %d %pK\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07001797 i,
1798 src->s6_addr32[0], src->s6_addr32[1],
1799 src->s6_addr32[2], src->s6_addr32[3],
Eric Dumazetb44084c2013-10-10 00:04:37 -07001800 inet_rsk(req)->ir_num,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001801 dest->s6_addr32[0], dest->s6_addr32[1],
1802 dest->s6_addr32[2], dest->s6_addr32[3],
Eric Dumazet634fb9792013-10-09 15:21:29 -07001803 ntohs(inet_rsk(req)->ir_rmt_port),
Linus Torvalds1da177e2005-04-16 15:20:36 -07001804 TCP_SYN_RECV,
Weilong Chen4c99aa42013-12-19 18:44:34 +08001805 0, 0, /* could print option size, but that is af dependent. */
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09001806 1, /* timers active (only the expire timer) */
1807 jiffies_to_clock_t(ttd),
Eric Dumazete6c022a2012-10-27 23:16:46 +00001808 req->num_timeout,
Eric Dumazetaa3a0c82015-10-02 11:43:30 -07001809 from_kuid_munged(seq_user_ns(seq),
1810 sock_i_uid(req->rsk_listener)),
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09001811 0, /* non standard timer */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001812 0, /* open_requests have no inode */
1813 0, req);
1814}
1815
1816static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
1817{
Eric Dumazetb71d1d42011-04-22 04:53:02 +00001818 const struct in6_addr *dest, *src;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001819 __u16 destp, srcp;
1820 int timer_active;
1821 unsigned long timer_expires;
Eric Dumazetcf533ea2011-10-21 05:22:42 -04001822 const struct inet_sock *inet = inet_sk(sp);
1823 const struct tcp_sock *tp = tcp_sk(sp);
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001824 const struct inet_connection_sock *icsk = inet_csk(sp);
Eric Dumazet0536fcc2015-09-29 07:42:52 -07001825 const struct fastopen_queue *fastopenq = &icsk->icsk_accept_queue.fastopenq;
Eric Dumazet00fd38d2015-11-12 08:43:18 -08001826 int rx_queue;
1827 int state;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001828
Eric Dumazetefe42082013-10-03 15:42:29 -07001829 dest = &sp->sk_v6_daddr;
1830 src = &sp->sk_v6_rcv_saddr;
Eric Dumazetc720c7e82009-10-15 06:30:45 +00001831 destp = ntohs(inet->inet_dport);
1832 srcp = ntohs(inet->inet_sport);
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001833
Yuchung Chengce3cf4e2016-06-06 15:07:18 -07001834 if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
Yuchung Cheng57dde7f2017-01-12 22:11:33 -08001835 icsk->icsk_pending == ICSK_TIME_REO_TIMEOUT ||
Yuchung Chengce3cf4e2016-06-06 15:07:18 -07001836 icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001837 timer_active = 1;
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001838 timer_expires = icsk->icsk_timeout;
1839 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001840 timer_active = 4;
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001841 timer_expires = icsk->icsk_timeout;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001842 } else if (timer_pending(&sp->sk_timer)) {
1843 timer_active = 2;
1844 timer_expires = sp->sk_timer.expires;
1845 } else {
1846 timer_active = 0;
1847 timer_expires = jiffies;
1848 }
1849
Yafang Shao986ffdf2017-12-20 11:12:52 +08001850 state = inet_sk_state_load(sp);
Eric Dumazet00fd38d2015-11-12 08:43:18 -08001851 if (state == TCP_LISTEN)
1852 rx_queue = sp->sk_ack_backlog;
1853 else
1854 /* Because we don't lock the socket,
1855 * we might find a transient negative value.
1856 */
1857 rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
1858
Linus Torvalds1da177e2005-04-16 15:20:36 -07001859 seq_printf(seq,
1860 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
Francesco Fuscod14c5ab2013-08-15 13:42:14 +02001861 "%02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %lu %lu %u %u %d\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07001862 i,
1863 src->s6_addr32[0], src->s6_addr32[1],
1864 src->s6_addr32[2], src->s6_addr32[3], srcp,
1865 dest->s6_addr32[0], dest->s6_addr32[1],
1866 dest->s6_addr32[2], dest->s6_addr32[3], destp,
Eric Dumazet00fd38d2015-11-12 08:43:18 -08001867 state,
1868 tp->write_seq - tp->snd_una,
1869 rx_queue,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001870 timer_active,
Eric Dumazeta399a802012-08-08 21:13:53 +00001871 jiffies_delta_to_clock_t(timer_expires - jiffies),
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001872 icsk->icsk_retransmits,
Eric W. Biedermana7cb5a42012-05-24 01:10:10 -06001873 from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -03001874 icsk->icsk_probes_out,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001875 sock_i_ino(sp),
Reshetova, Elena41c6d652017-06-30 13:08:01 +03001876 refcount_read(&sp->sk_refcnt), sp,
Stephen Hemminger7be87352008-06-27 20:00:19 -07001877 jiffies_to_clock_t(icsk->icsk_rto),
1878 jiffies_to_clock_t(icsk->icsk_ack.ato),
Wei Wang31954cd2019-01-25 10:53:19 -08001879 (icsk->icsk_ack.quick << 1) | inet_csk_in_pingpong_mode(sp),
Ilpo Järvinen0b6a05c2009-09-15 01:30:10 -07001880 tp->snd_cwnd,
Eric Dumazet00fd38d2015-11-12 08:43:18 -08001881 state == TCP_LISTEN ?
Eric Dumazet0536fcc2015-09-29 07:42:52 -07001882 fastopenq->max_qlen :
Yuchung Cheng0a672f742014-05-11 20:22:12 -07001883 (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001884 );
1885}
1886
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09001887static void get_timewait6_sock(struct seq_file *seq,
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -07001888 struct inet_timewait_sock *tw, int i)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001889{
Eric Dumazet789f5582015-04-12 18:51:09 -07001890 long delta = tw->tw_timer.expires - jiffies;
Eric Dumazetb71d1d42011-04-22 04:53:02 +00001891 const struct in6_addr *dest, *src;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001892 __u16 destp, srcp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001893
Eric Dumazetefe42082013-10-03 15:42:29 -07001894 dest = &tw->tw_v6_daddr;
1895 src = &tw->tw_v6_rcv_saddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001896 destp = ntohs(tw->tw_dport);
1897 srcp = ntohs(tw->tw_sport);
1898
1899 seq_printf(seq,
1900 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
Dan Rosenberg71338aa2011-05-23 12:17:35 +00001901 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07001902 i,
1903 src->s6_addr32[0], src->s6_addr32[1],
1904 src->s6_addr32[2], src->s6_addr32[3], srcp,
1905 dest->s6_addr32[0], dest->s6_addr32[1],
1906 dest->s6_addr32[2], dest->s6_addr32[3], destp,
1907 tw->tw_substate, 0, 0,
Eric Dumazeta399a802012-08-08 21:13:53 +00001908 3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
Reshetova, Elena41c6d652017-06-30 13:08:01 +03001909 refcount_read(&tw->tw_refcnt), tw);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001910}
1911
Linus Torvalds1da177e2005-04-16 15:20:36 -07001912static int tcp6_seq_show(struct seq_file *seq, void *v)
1913{
1914 struct tcp_iter_state *st;
Eric Dumazet05dbc7b2013-10-03 00:22:02 -07001915 struct sock *sk = v;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001916
1917 if (v == SEQ_START_TOKEN) {
1918 seq_puts(seq,
1919 " sl "
1920 "local_address "
1921 "remote_address "
1922 "st tx_queue rx_queue tr tm->when retrnsmt"
1923 " uid timeout inode\n");
1924 goto out;
1925 }
1926 st = seq->private;
1927
Eric Dumazet079096f2015-10-02 11:43:32 -07001928 if (sk->sk_state == TCP_TIME_WAIT)
1929 get_timewait6_sock(seq, v, st->num);
1930 else if (sk->sk_state == TCP_NEW_SYN_RECV)
Eric Dumazetaa3a0c82015-10-02 11:43:30 -07001931 get_openreq6(seq, v, st->num);
Eric Dumazet079096f2015-10-02 11:43:32 -07001932 else
1933 get_tcp6_sock(seq, v, st->num);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001934out:
1935 return 0;
1936}
1937
Christoph Hellwig37d849b2018-04-11 09:31:28 +02001938static const struct seq_operations tcp6_seq_ops = {
1939 .show = tcp6_seq_show,
1940 .start = tcp_seq_start,
1941 .next = tcp_seq_next,
1942 .stop = tcp_seq_stop,
1943};
1944
Linus Torvalds1da177e2005-04-16 15:20:36 -07001945static struct tcp_seq_afinfo tcp6_seq_afinfo = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001946 .family = AF_INET6,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001947};
1948
Alexey Dobriyan2c8c1e72010-01-17 03:35:32 +00001949int __net_init tcp6_proc_init(struct net *net)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001950{
Christoph Hellwigc3506372018-04-10 19:42:55 +02001951 if (!proc_create_net_data("tcp6", 0444, net->proc_net, &tcp6_seq_ops,
1952 sizeof(struct tcp_iter_state), &tcp6_seq_afinfo))
Christoph Hellwig37d849b2018-04-11 09:31:28 +02001953 return -ENOMEM;
1954 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001955}
1956
Daniel Lezcano6f8b13b2008-03-21 04:14:45 -07001957void tcp6_proc_exit(struct net *net)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001958{
Christoph Hellwig37d849b2018-04-11 09:31:28 +02001959 remove_proc_entry("tcp6", net->proc_net);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001960}
1961#endif
1962
1963struct proto tcpv6_prot = {
1964 .name = "TCPv6",
1965 .owner = THIS_MODULE,
1966 .close = tcp_close,
Andrey Ignatovd74bad42018-03-30 15:08:05 -07001967 .pre_connect = tcp_v6_pre_connect,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001968 .connect = tcp_v6_connect,
1969 .disconnect = tcp_disconnect,
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001970 .accept = inet_csk_accept,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001971 .ioctl = tcp_ioctl,
1972 .init = tcp_v6_init_sock,
1973 .destroy = tcp_v6_destroy_sock,
1974 .shutdown = tcp_shutdown,
1975 .setsockopt = tcp_setsockopt,
1976 .getsockopt = tcp_getsockopt,
Ursula Braun4b9d07a2017-01-09 16:55:12 +01001977 .keepalive = tcp_set_keepalive,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001978 .recvmsg = tcp_recvmsg,
Changli Gao7ba42912010-07-10 20:41:55 +00001979 .sendmsg = tcp_sendmsg,
1980 .sendpage = tcp_sendpage,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001981 .backlog_rcv = tcp_v6_do_rcv,
Eric Dumazet46d3cea2012-07-11 05:50:31 +00001982 .release_cb = tcp_release_cb,
Craig Gallek496611d2016-02-10 11:50:36 -05001983 .hash = inet6_hash,
Arnaldo Carvalho de Meloab1e0a12008-02-03 04:06:04 -08001984 .unhash = inet_unhash,
1985 .get_port = inet_csk_get_port,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001986 .enter_memory_pressure = tcp_enter_memory_pressure,
Eric Dumazet06044752017-06-07 13:29:12 -07001987 .leave_memory_pressure = tcp_leave_memory_pressure,
Eric Dumazetc9bee3b72013-07-22 20:27:07 -07001988 .stream_memory_free = tcp_stream_memory_free,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001989 .sockets_allocated = &tcp_sockets_allocated,
1990 .memory_allocated = &tcp_memory_allocated,
1991 .memory_pressure = &tcp_memory_pressure,
Arnaldo Carvalho de Melo0a5578c2005-08-09 20:11:41 -07001992 .orphan_count = &tcp_orphan_count,
Eric W. Biedermana4fe34b2013-10-19 16:25:36 -07001993 .sysctl_mem = sysctl_tcp_mem,
Eric Dumazet356d1832017-11-07 00:29:28 -08001994 .sysctl_wmem_offset = offsetof(struct net, ipv4.sysctl_tcp_wmem),
1995 .sysctl_rmem_offset = offsetof(struct net, ipv4.sysctl_tcp_rmem),
Linus Torvalds1da177e2005-04-16 15:20:36 -07001996 .max_header = MAX_TCP_HEADER,
1997 .obj_size = sizeof(struct tcp6_sock),
Paul E. McKenney5f0d5a32017-01-18 02:53:44 -08001998 .slab_flags = SLAB_TYPESAFE_BY_RCU,
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -08001999 .twsk_prot = &tcp6_timewait_sock_ops,
Arnaldo Carvalho de Melo60236fd2005-06-18 22:47:21 -07002000 .rsk_prot = &tcp6_request_sock_ops,
Pavel Emelyanov39d8cda2008-03-22 16:50:58 -07002001 .h.hashinfo = &tcp_hashinfo,
Changli Gao7ba42912010-07-10 20:41:55 +00002002 .no_autobind = true,
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08002003#ifdef CONFIG_COMPAT
2004 .compat_setsockopt = compat_tcp_setsockopt,
2005 .compat_getsockopt = compat_tcp_getsockopt,
2006#endif
Lorenzo Colittic1e64e22015-12-16 12:30:05 +09002007 .diag_destroy = tcp_abort,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002008};
2009
David Aherna8e3bb32017-08-28 15:14:20 -07002010/* thinking of making this const? Don't.
2011 * early_demux can change based on sysctl.
2012 */
Julia Lawall39294c32017-08-01 18:27:28 +02002013static struct inet6_protocol tcpv6_protocol = {
Eric Dumazetc7109982012-07-26 12:18:11 +00002014 .early_demux = tcp_v6_early_demux,
subashab@codeaurora.orgdddb64b2017-03-23 13:34:16 -06002015 .early_demux_handler = tcp_v6_early_demux,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002016 .handler = tcp_v6_rcv,
2017 .err_handler = tcp_v6_err,
2018 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
2019};
2020
Linus Torvalds1da177e2005-04-16 15:20:36 -07002021static struct inet_protosw tcpv6_protosw = {
2022 .type = SOCK_STREAM,
2023 .protocol = IPPROTO_TCP,
2024 .prot = &tcpv6_prot,
2025 .ops = &inet6_stream_ops,
Arnaldo Carvalho de Melod83d8462005-12-13 23:26:10 -08002026 .flags = INET_PROTOSW_PERMANENT |
2027 INET_PROTOSW_ICSK,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002028};
2029
Alexey Dobriyan2c8c1e72010-01-17 03:35:32 +00002030static int __net_init tcpv6_net_init(struct net *net)
Daniel Lezcano93ec9262008-03-07 11:16:02 -08002031{
Denis V. Lunev56772422008-04-03 14:28:30 -07002032 return inet_ctl_sock_create(&net->ipv6.tcp_sk, PF_INET6,
2033 SOCK_RAW, IPPROTO_TCP, net);
Daniel Lezcano93ec9262008-03-07 11:16:02 -08002034}
2035
Alexey Dobriyan2c8c1e72010-01-17 03:35:32 +00002036static void __net_exit tcpv6_net_exit(struct net *net)
Daniel Lezcano93ec9262008-03-07 11:16:02 -08002037{
Denis V. Lunev56772422008-04-03 14:28:30 -07002038 inet_ctl_sock_destroy(net->ipv6.tcp_sk);
Eric W. Biedermanb099ce22009-12-03 02:29:09 +00002039}
2040
Alexey Dobriyan2c8c1e72010-01-17 03:35:32 +00002041static void __net_exit tcpv6_net_exit_batch(struct list_head *net_exit_list)
Eric W. Biedermanb099ce22009-12-03 02:29:09 +00002042{
Haishuang Yan1946e672016-12-28 17:52:32 +08002043 inet_twsk_purge(&tcp_hashinfo, AF_INET6);
Daniel Lezcano93ec9262008-03-07 11:16:02 -08002044}
2045
2046static struct pernet_operations tcpv6_net_ops = {
Eric W. Biedermanb099ce22009-12-03 02:29:09 +00002047 .init = tcpv6_net_init,
2048 .exit = tcpv6_net_exit,
2049 .exit_batch = tcpv6_net_exit_batch,
Daniel Lezcano93ec9262008-03-07 11:16:02 -08002050};
2051
Daniel Lezcano7f4e4862007-12-11 02:25:35 -08002052int __init tcpv6_init(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002053{
Daniel Lezcano7f4e4862007-12-11 02:25:35 -08002054 int ret;
David Woodhouseae0f7d52006-01-11 15:53:04 -08002055
Vlad Yasevich33362882012-11-15 08:49:15 +00002056 ret = inet6_add_protocol(&tcpv6_protocol, IPPROTO_TCP);
2057 if (ret)
Vlad Yasevichc6b641a2012-11-15 08:49:22 +00002058 goto out;
Vlad Yasevich33362882012-11-15 08:49:15 +00002059
Daniel Lezcano7f4e4862007-12-11 02:25:35 -08002060 /* register inet6 protocol */
2061 ret = inet6_register_protosw(&tcpv6_protosw);
2062 if (ret)
2063 goto out_tcpv6_protocol;
2064
Daniel Lezcano93ec9262008-03-07 11:16:02 -08002065 ret = register_pernet_subsys(&tcpv6_net_ops);
Daniel Lezcano7f4e4862007-12-11 02:25:35 -08002066 if (ret)
2067 goto out_tcpv6_protosw;
2068out:
2069 return ret;
2070
Daniel Lezcano7f4e4862007-12-11 02:25:35 -08002071out_tcpv6_protosw:
2072 inet6_unregister_protosw(&tcpv6_protosw);
Vlad Yasevich33362882012-11-15 08:49:15 +00002073out_tcpv6_protocol:
2074 inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
Daniel Lezcano7f4e4862007-12-11 02:25:35 -08002075 goto out;
2076}
2077
Daniel Lezcano09f77092007-12-13 05:34:58 -08002078void tcpv6_exit(void)
Daniel Lezcano7f4e4862007-12-11 02:25:35 -08002079{
Daniel Lezcano93ec9262008-03-07 11:16:02 -08002080 unregister_pernet_subsys(&tcpv6_net_ops);
Daniel Lezcano7f4e4862007-12-11 02:25:35 -08002081 inet6_unregister_protosw(&tcpv6_protosw);
2082 inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002083}