blob: cf2b0a6a33372a7a6ed8bc30f505a350be463d91 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
3 * INET An implementation of the TCP/IP protocol suite for the LINUX
4 * operating system. INET is implemented using the BSD Socket
5 * interface as the means of communication with the user level.
6 *
7 * The IP fragmentation functionality.
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09008 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07009 * Authors: Fred N. van Kempen <waltje@uWalt.NL.Mugnet.ORG>
Alan Cox113aa832008-10-13 19:01:08 -070010 * Alan Cox <alan@lxorguk.ukuu.org.uk>
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 *
12 * Fixes:
13 * Alan Cox : Split from ip.c , see ip_input.c for history.
14 * David S. Miller : Begin massive cleanup...
15 * Andi Kleen : Add sysctls.
16 * xxxx : Overlapfrag bug.
17 * Ultima : ip_expire() kernel panic.
18 * Bill Hawes : Frag accounting and evictor fixes.
19 * John McDonald : 0 length frag bug.
20 * Alexey Kuznetsov: SMP races, threading, cleanup.
21 * Patrick McHardy : LRU queue of frag heads for evictor.
22 */
23
Joe Perchesafd465032012-03-12 07:03:32 +000024#define pr_fmt(fmt) "IPv4: " fmt
25
Herbert Xu89cee8b2005-12-13 23:14:27 -080026#include <linux/compiler.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070027#include <linux/module.h>
28#include <linux/types.h>
29#include <linux/mm.h>
30#include <linux/jiffies.h>
31#include <linux/skbuff.h>
32#include <linux/list.h>
33#include <linux/ip.h>
34#include <linux/icmp.h>
35#include <linux/netdevice.h>
36#include <linux/jhash.h>
37#include <linux/random.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090038#include <linux/slab.h>
Shan Weie9017b52010-01-23 01:57:42 -080039#include <net/route.h>
40#include <net/dst.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070041#include <net/sock.h>
42#include <net/ip.h>
43#include <net/icmp.h>
44#include <net/checksum.h>
Herbert Xu89cee8b2005-12-13 23:14:27 -080045#include <net/inetpeer.h>
Pavel Emelyanov5ab11c92007-10-15 02:24:19 -070046#include <net/inet_frag.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070047#include <linux/tcp.h>
48#include <linux/udp.h>
49#include <linux/inet.h>
50#include <linux/netfilter_ipv4.h>
Eric Dumazet6623e3b2011-01-05 07:52:55 +000051#include <net/inet_ecn.h>
David Ahern385add92015-09-29 20:07:13 -070052#include <net/l3mdev.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070053
54/* NOTE. Logic of IP defragmentation is parallel to corresponding IPv6
55 * code now. If you change something here, _PLEASE_ update ipv6/reassembly.c
56 * as well. Or notify me, at least. --ANK
57 */
Nikolay Aleksandrovd4ad4d22014-08-01 12:29:48 +020058static const char ip_frag_cache_name[] = "ip4-frags";
Herbert Xu89cee8b2005-12-13 23:14:27 -080059
Linus Torvalds1da177e2005-04-16 15:20:36 -070060/* Describe an entry in the "incomplete datagrams" queue. */
61struct ipq {
Pavel Emelyanov5ab11c92007-10-15 02:24:19 -070062 struct inet_frag_queue q;
63
Eric Dumazet6623e3b2011-01-05 07:52:55 +000064 u8 ecn; /* RFC3168 support */
Florian Westphald6b915e2015-05-22 16:32:51 +020065 u16 max_df_size; /* largest frag with DF set seen */
Herbert Xu89cee8b2005-12-13 23:14:27 -080066 int iif;
67 unsigned int rid;
68 struct inet_peer *peer;
Linus Torvalds1da177e2005-04-16 15:20:36 -070069};
70
Fabian Frederickaa1f7312014-11-04 20:44:04 +010071static u8 ip4_frag_ecn(u8 tos)
Eric Dumazet6623e3b2011-01-05 07:52:55 +000072{
Eric Dumazet5173cc02011-05-16 08:37:37 +000073 return 1 << (tos & INET_ECN_MASK);
Eric Dumazet6623e3b2011-01-05 07:52:55 +000074}
75
Pavel Emelyanov7eb95152007-10-15 02:31:52 -070076static struct inet_frags ip4_frags;
Linus Torvalds1da177e2005-04-16 15:20:36 -070077
Peter Oskolkova4fd2842018-08-11 20:27:25 +000078static int ip_frag_reasm(struct ipq *qp, struct sk_buff *skb,
79 struct sk_buff *prev_tail, struct net_device *dev);
Herbert Xu1706d582007-10-14 00:38:15 -070080
Pavel Emelyanovabd65232007-10-17 19:47:21 -070081
Florian Westphal36c77782014-07-24 16:50:29 +020082static void ip4_frag_init(struct inet_frag_queue *q, const void *a)
Pavel Emelyanovc6fda282007-10-17 19:46:47 -070083{
84 struct ipq *qp = container_of(q, struct ipq, q);
Gao feng54db0cc2012-06-08 01:21:40 +000085 struct netns_ipv4 *ipv4 = container_of(q->net, struct netns_ipv4,
86 frags);
87 struct net *net = container_of(ipv4, struct net, ipv4);
88
Eric Dumazet648700f2018-03-31 12:58:49 -070089 const struct frag_v4_compare_key *key = a;
Pavel Emelyanovc6fda282007-10-17 19:46:47 -070090
Eric Dumazet648700f2018-03-31 12:58:49 -070091 q->key.v4 = *key;
92 qp->ecn = 0;
Nikolay Borisov0fbf4cb2016-02-15 12:11:31 +020093 qp->peer = q->net->max_dist ?
Eric Dumazet648700f2018-03-31 12:58:49 -070094 inet_getpeer_v4(net->ipv4.peers, key->saddr, key->vif, 1) :
David Ahern192132b2015-08-27 16:07:03 -070095 NULL;
Pavel Emelyanovc6fda282007-10-17 19:46:47 -070096}
97
Fabian Frederickaa1f7312014-11-04 20:44:04 +010098static void ip4_frag_free(struct inet_frag_queue *q)
Linus Torvalds1da177e2005-04-16 15:20:36 -070099{
Pavel Emelyanov1e4b8282007-10-15 02:39:14 -0700100 struct ipq *qp;
101
102 qp = container_of(q, struct ipq, q);
103 if (qp->peer)
104 inet_putpeer(qp->peer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700105}
106
Linus Torvalds1da177e2005-04-16 15:20:36 -0700107
108/* Destruction primitives. */
109
Fabian Frederickaa1f7312014-11-04 20:44:04 +0100110static void ipq_put(struct ipq *ipq)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700111{
Eric Dumazet093ba722018-03-31 12:58:44 -0700112 inet_frag_put(&ipq->q);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700113}
114
115/* Kill ipq entry. It is not destroyed immediately,
116 * because caller (and someone more) holds reference count.
117 */
118static void ipq_kill(struct ipq *ipq)
119{
Eric Dumazet093ba722018-03-31 12:58:44 -0700120 inet_frag_kill(&ipq->q);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700121}
122
Andy Zhou5cf42282015-05-15 14:15:35 -0700123static bool frag_expire_skip_icmp(u32 user)
124{
125 return user == IP_DEFRAG_AF_PACKET ||
126 ip_defrag_user_in_between(user, IP_DEFRAG_CONNTRACK_IN,
Andy Zhou8bc04862015-05-15 14:15:36 -0700127 __IP_DEFRAG_CONNTRACK_IN_END) ||
128 ip_defrag_user_in_between(user, IP_DEFRAG_CONNTRACK_BRIDGE_IN,
129 __IP_DEFRAG_CONNTRACK_BRIDGE_IN);
Andy Zhou5cf42282015-05-15 14:15:35 -0700130}
131
Linus Torvalds1da177e2005-04-16 15:20:36 -0700132/*
133 * Oops, a fragment queue timed out. Kill it and send an ICMP reply.
134 */
Kees Cook78802012017-10-16 17:29:20 -0700135static void ip_expire(struct timer_list *t)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700136{
Kees Cook78802012017-10-16 17:29:20 -0700137 struct inet_frag_queue *frag = from_timer(frag, t, timer);
Eric Dumazet399d1402018-03-31 12:58:51 -0700138 const struct iphdr *iph;
Peter Oskolkovfa0f5272018-08-02 23:34:39 +0000139 struct sk_buff *head = NULL;
Pavel Emelyanov84a3aa02008-07-16 20:19:08 -0700140 struct net *net;
Eric Dumazet399d1402018-03-31 12:58:51 -0700141 struct ipq *qp;
142 int err;
Pavel Emelyanove521db92007-10-17 19:45:23 -0700143
Kees Cook78802012017-10-16 17:29:20 -0700144 qp = container_of(frag, struct ipq, q);
Pavel Emelyanov84a3aa02008-07-16 20:19:08 -0700145 net = container_of(qp->q.net, struct net, ipv4.frags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700146
Eric Dumazetec4fbd62017-03-22 08:57:15 -0700147 rcu_read_lock();
Pavel Emelyanov5ab11c92007-10-15 02:24:19 -0700148 spin_lock(&qp->q.lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700149
Nikolay Aleksandrov06aa8b82014-08-01 12:29:44 +0200150 if (qp->q.flags & INET_FRAG_COMPLETE)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700151 goto out;
152
153 ipq_kill(qp);
Eric Dumazetb45386e2016-04-27 16:44:35 -0700154 __IP_INC_STATS(net, IPSTATS_MIB_REASMFAILS);
Eric Dumazet399d1402018-03-31 12:58:51 -0700155 __IP_INC_STATS(net, IPSTATS_MIB_REASMTIMEOUT);
Nikolay Aleksandrov2e404f62014-08-01 12:29:47 +0200156
Dan Carpenter70837ff2018-08-06 22:17:35 +0300157 if (!(qp->q.flags & INET_FRAG_FIRST_IN))
Eric Dumazet399d1402018-03-31 12:58:51 -0700158 goto out;
Nikolay Aleksandrov2e404f62014-08-01 12:29:47 +0200159
Peter Oskolkovfa0f5272018-08-02 23:34:39 +0000160 /* sk_buff::dev and sk_buff::rbnode are unionized. So we
161 * pull the head out of the tree in order to be able to
162 * deal with head->dev.
163 */
Peter Oskolkovc23f35d2019-01-22 10:02:50 -0800164 head = inet_frag_pull_head(&qp->q);
165 if (!head)
166 goto out;
Eric Dumazet399d1402018-03-31 12:58:51 -0700167 head->dev = dev_get_by_index_rcu(net, qp->iif);
168 if (!head->dev)
169 goto out;
Eric Dumazetec4fbd62017-03-22 08:57:15 -0700170
Shan Weie9017b52010-01-23 01:57:42 -0800171
Eric Dumazet399d1402018-03-31 12:58:51 -0700172 /* skb has no dst, perform route lookup again */
173 iph = ip_hdr(head);
174 err = ip_route_input_noref(head, iph->daddr, iph->saddr,
David S. Millerc6cffba2012-07-26 11:14:38 +0000175 iph->tos, head->dev);
Eric Dumazet399d1402018-03-31 12:58:51 -0700176 if (err)
177 goto out;
Eric Dumazet64f3b9e2011-05-04 10:02:26 +0000178
Eric Dumazet399d1402018-03-31 12:58:51 -0700179 /* Only an end host needs to send an ICMP
180 * "Fragment Reassembly Timeout" message, per RFC792.
181 */
182 if (frag_expire_skip_icmp(qp->q.key.v4.user) &&
183 (skb_rtable(head)->rt_type != RTN_LOCAL))
184 goto out;
Eric Dumazetec4fbd62017-03-22 08:57:15 -0700185
Eric Dumazet1eec5d52018-03-31 12:58:54 -0700186 spin_unlock(&qp->q.lock);
187 icmp_send(head, ICMP_TIME_EXCEEDED, ICMP_EXC_FRAGTIME, 0);
Eric Dumazet1eec5d52018-03-31 12:58:54 -0700188 goto out_rcu_unlock;
Shan Weie9017b52010-01-23 01:57:42 -0800189
Linus Torvalds1da177e2005-04-16 15:20:36 -0700190out:
Pavel Emelyanov5ab11c92007-10-15 02:24:19 -0700191 spin_unlock(&qp->q.lock);
Eric Dumazetec4fbd62017-03-22 08:57:15 -0700192out_rcu_unlock:
193 rcu_read_unlock();
zhong jiang1d089622018-09-20 17:37:43 +0800194 kfree_skb(head);
Pavel Emelyanov4b6cb5d2007-10-15 02:41:09 -0700195 ipq_put(qp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700196}
197
Pavel Emelyanovabd65232007-10-17 19:47:21 -0700198/* Find the correct entry in the "incomplete datagrams" queue for
199 * this IP datagram, and create new one, if nothing is found.
200 */
David Ahern9972f132015-08-13 14:59:09 -0600201static struct ipq *ip_find(struct net *net, struct iphdr *iph,
202 u32 user, int vif)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700203{
Eric Dumazet648700f2018-03-31 12:58:49 -0700204 struct frag_v4_compare_key key = {
205 .saddr = iph->saddr,
206 .daddr = iph->daddr,
207 .user = user,
208 .vif = vif,
209 .id = iph->id,
210 .protocol = iph->protocol,
211 };
Pavel Emelyanovc6fda282007-10-17 19:46:47 -0700212 struct inet_frag_queue *q;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700213
Eric Dumazet648700f2018-03-31 12:58:49 -0700214 q = inet_frag_find(&net->ipv4.frags, &key);
Eric Dumazet2d44ed22018-03-31 12:58:52 -0700215 if (!q)
Hannes Frederic Sowa5a3da1f2013-03-15 11:32:30 +0000216 return NULL;
Eric Dumazet2d44ed22018-03-31 12:58:52 -0700217
Pavel Emelyanovc6fda282007-10-17 19:46:47 -0700218 return container_of(q, struct ipq, q);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700219}
220
Herbert Xu89cee8b2005-12-13 23:14:27 -0800221/* Is the fragment too far ahead to be part of ipq? */
Fabian Frederickaa1f7312014-11-04 20:44:04 +0100222static int ip_frag_too_far(struct ipq *qp)
Herbert Xu89cee8b2005-12-13 23:14:27 -0800223{
224 struct inet_peer *peer = qp->peer;
Nikolay Borisov0fbf4cb2016-02-15 12:11:31 +0200225 unsigned int max = qp->q.net->max_dist;
Herbert Xu89cee8b2005-12-13 23:14:27 -0800226 unsigned int start, end;
227
228 int rc;
229
230 if (!peer || !max)
231 return 0;
232
233 start = qp->rid;
234 end = atomic_inc_return(&peer->rid);
235 qp->rid = end;
236
Peter Oskolkovfa0f5272018-08-02 23:34:39 +0000237 rc = qp->q.fragments_tail && (end - start) > max;
Herbert Xu89cee8b2005-12-13 23:14:27 -0800238
239 if (rc) {
Pavel Emelyanov7c73a6f2008-07-16 20:20:11 -0700240 struct net *net;
241
242 net = container_of(qp->q.net, struct net, ipv4.frags);
Eric Dumazetb45386e2016-04-27 16:44:35 -0700243 __IP_INC_STATS(net, IPSTATS_MIB_REASMFAILS);
Herbert Xu89cee8b2005-12-13 23:14:27 -0800244 }
245
246 return rc;
247}
248
249static int ip_frag_reinit(struct ipq *qp)
250{
Jesper Dangaard Brouerd4336732013-01-28 23:45:12 +0000251 unsigned int sum_truesize = 0;
Herbert Xu89cee8b2005-12-13 23:14:27 -0800252
Pavel Emelyanovb2fd5322008-01-22 06:09:37 -0800253 if (!mod_timer(&qp->q.timer, jiffies + qp->q.net->timeout)) {
Reshetova, Elenaedcb6912017-06-30 13:08:07 +0300254 refcount_inc(&qp->q.refcnt);
Herbert Xu89cee8b2005-12-13 23:14:27 -0800255 return -ETIMEDOUT;
256 }
257
Peter Oskolkova4fd2842018-08-11 20:27:25 +0000258 sum_truesize = inet_frag_rbtree_purge(&qp->q.rb_fragments);
Florian Westphal0e60d242015-07-23 12:05:38 +0200259 sub_frag_mem_limit(qp->q.net, sum_truesize);
Herbert Xu89cee8b2005-12-13 23:14:27 -0800260
Nikolay Aleksandrov06aa8b82014-08-01 12:29:44 +0200261 qp->q.flags = 0;
Pavel Emelyanov5ab11c92007-10-15 02:24:19 -0700262 qp->q.len = 0;
263 qp->q.meat = 0;
Peter Oskolkovfa0f5272018-08-02 23:34:39 +0000264 qp->q.rb_fragments = RB_ROOT;
Changli Gaod6bebca2010-06-29 04:39:37 +0000265 qp->q.fragments_tail = NULL;
Peter Oskolkova4fd2842018-08-11 20:27:25 +0000266 qp->q.last_run_head = NULL;
Herbert Xu89cee8b2005-12-13 23:14:27 -0800267 qp->iif = 0;
Eric Dumazet6623e3b2011-01-05 07:52:55 +0000268 qp->ecn = 0;
Herbert Xu89cee8b2005-12-13 23:14:27 -0800269
270 return 0;
271}
272
Linus Torvalds1da177e2005-04-16 15:20:36 -0700273/* Add new segment to existing queue. */
Herbert Xu1706d582007-10-14 00:38:15 -0700274static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700275{
Peter Oskolkov7969e5c2018-08-02 23:34:37 +0000276 struct net *net = container_of(qp->q.net, struct net, ipv4.frags);
Peter Oskolkovc23f35d2019-01-22 10:02:50 -0800277 int ihl, end, flags, offset;
278 struct sk_buff *prev_tail;
Herbert Xu1706d582007-10-14 00:38:15 -0700279 struct net_device *dev;
Florian Westphald6b915e2015-05-22 16:32:51 +0200280 unsigned int fragsize;
Herbert Xu1706d582007-10-14 00:38:15 -0700281 int err = -ENOENT;
Eric Dumazet6623e3b2011-01-05 07:52:55 +0000282 u8 ecn;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700283
Nikolay Aleksandrov06aa8b82014-08-01 12:29:44 +0200284 if (qp->q.flags & INET_FRAG_COMPLETE)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700285 goto err;
286
Herbert Xu89cee8b2005-12-13 23:14:27 -0800287 if (!(IPCB(skb)->flags & IPSKB_FRAG_COMPLETE) &&
Herbert Xu1706d582007-10-14 00:38:15 -0700288 unlikely(ip_frag_too_far(qp)) &&
289 unlikely(err = ip_frag_reinit(qp))) {
Herbert Xu89cee8b2005-12-13 23:14:27 -0800290 ipq_kill(qp);
291 goto err;
292 }
293
Eric Dumazet6623e3b2011-01-05 07:52:55 +0000294 ecn = ip4_frag_ecn(ip_hdr(skb)->tos);
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -0700295 offset = ntohs(ip_hdr(skb)->frag_off);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700296 flags = offset & ~IP_OFFSET;
297 offset &= IP_OFFSET;
298 offset <<= 3; /* offset is in 8-byte chunks */
Arnaldo Carvalho de Meloc9bdd4b2007-03-12 20:09:15 -0300299 ihl = ip_hdrlen(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700300
301 /* Determine the position of this fragment. */
Edward Hyunkoo Jee0848f642015-07-21 09:43:59 +0200302 end = offset + skb->len - skb_network_offset(skb) - ihl;
Herbert Xu1706d582007-10-14 00:38:15 -0700303 err = -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700304
305 /* Is this the final fragment? */
306 if ((flags & IP_MF) == 0) {
307 /* If we already have some bits beyond end
Justin P. Mattock42b2aa82011-11-28 20:31:00 -0800308 * or have different end, the segment is corrupted.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700309 */
Pavel Emelyanov5ab11c92007-10-15 02:24:19 -0700310 if (end < qp->q.len ||
Nikolay Aleksandrov06aa8b82014-08-01 12:29:44 +0200311 ((qp->q.flags & INET_FRAG_LAST_IN) && end != qp->q.len))
Peter Oskolkov0ff89ef2018-08-28 11:36:19 -0700312 goto discard_qp;
Nikolay Aleksandrov06aa8b82014-08-01 12:29:44 +0200313 qp->q.flags |= INET_FRAG_LAST_IN;
Pavel Emelyanov5ab11c92007-10-15 02:24:19 -0700314 qp->q.len = end;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700315 } else {
316 if (end&7) {
317 end &= ~7;
318 if (skb->ip_summed != CHECKSUM_UNNECESSARY)
319 skb->ip_summed = CHECKSUM_NONE;
320 }
Pavel Emelyanov5ab11c92007-10-15 02:24:19 -0700321 if (end > qp->q.len) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700322 /* Some bits beyond end -> corruption. */
Nikolay Aleksandrov06aa8b82014-08-01 12:29:44 +0200323 if (qp->q.flags & INET_FRAG_LAST_IN)
Peter Oskolkov0ff89ef2018-08-28 11:36:19 -0700324 goto discard_qp;
Pavel Emelyanov5ab11c92007-10-15 02:24:19 -0700325 qp->q.len = end;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700326 }
327 }
328 if (end == offset)
Peter Oskolkov0ff89ef2018-08-28 11:36:19 -0700329 goto discard_qp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700330
Herbert Xu1706d582007-10-14 00:38:15 -0700331 err = -ENOMEM;
Edward Hyunkoo Jee0848f642015-07-21 09:43:59 +0200332 if (!pskb_pull(skb, skb_network_offset(skb) + ihl))
Peter Oskolkov0ff89ef2018-08-28 11:36:19 -0700333 goto discard_qp;
Herbert Xu1706d582007-10-14 00:38:15 -0700334
335 err = pskb_trim_rcsum(skb, end - offset);
336 if (err)
Peter Oskolkov0ff89ef2018-08-28 11:36:19 -0700337 goto discard_qp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700338
Peter Oskolkovfa0f5272018-08-02 23:34:39 +0000339 /* Note : skb->rbnode and skb->dev share the same location. */
340 dev = skb->dev;
341 /* Makes sure compiler wont do silly aliasing games */
342 barrier();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700343
Peter Oskolkova4fd2842018-08-11 20:27:25 +0000344 prev_tail = qp->q.fragments_tail;
Peter Oskolkovc23f35d2019-01-22 10:02:50 -0800345 err = inet_frag_queue_insert(&qp->q, skb, offset, end);
346 if (err)
347 goto insert_error;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700348
Eric Dumazetbf663372018-03-31 12:58:58 -0700349 if (dev)
350 qp->iif = dev->ifindex;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700351
Pavel Emelyanov5ab11c92007-10-15 02:24:19 -0700352 qp->q.stamp = skb->tstamp;
353 qp->q.meat += skb->len;
Eric Dumazet6623e3b2011-01-05 07:52:55 +0000354 qp->ecn |= ecn;
Florian Westphal0e60d242015-07-23 12:05:38 +0200355 add_frag_mem_limit(qp->q.net, skb->truesize);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700356 if (offset == 0)
Nikolay Aleksandrov06aa8b82014-08-01 12:29:44 +0200357 qp->q.flags |= INET_FRAG_FIRST_IN;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700358
Florian Westphald6b915e2015-05-22 16:32:51 +0200359 fragsize = skb->len + ihl;
360
361 if (fragsize > qp->q.max_size)
362 qp->q.max_size = fragsize;
363
Patrick McHardy5f2d04f2012-08-26 19:13:55 +0200364 if (ip_hdr(skb)->frag_off & htons(IP_DF) &&
Florian Westphald6b915e2015-05-22 16:32:51 +0200365 fragsize > qp->max_df_size)
366 qp->max_df_size = fragsize;
Patrick McHardy5f2d04f2012-08-26 19:13:55 +0200367
Nikolay Aleksandrov06aa8b82014-08-01 12:29:44 +0200368 if (qp->q.flags == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) &&
Eric Dumazet97599dc2013-04-16 12:55:41 +0000369 qp->q.meat == qp->q.len) {
370 unsigned long orefdst = skb->_skb_refdst;
Herbert Xu1706d582007-10-14 00:38:15 -0700371
Eric Dumazet97599dc2013-04-16 12:55:41 +0000372 skb->_skb_refdst = 0UL;
Peter Oskolkova4fd2842018-08-11 20:27:25 +0000373 err = ip_frag_reasm(qp, skb, prev_tail, dev);
Eric Dumazet97599dc2013-04-16 12:55:41 +0000374 skb->_skb_refdst = orefdst;
Peter Oskolkov0ff89ef2018-08-28 11:36:19 -0700375 if (err)
376 inet_frag_kill(&qp->q);
Eric Dumazet97599dc2013-04-16 12:55:41 +0000377 return err;
378 }
379
380 skb_dst_drop(skb);
Herbert Xu1706d582007-10-14 00:38:15 -0700381 return -EINPROGRESS;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700382
Peter Oskolkovc23f35d2019-01-22 10:02:50 -0800383insert_error:
384 if (err == IPFRAG_DUP) {
385 kfree_skb(skb);
386 return -EINVAL;
387 }
388 err = -EINVAL;
Peter Oskolkov0ff89ef2018-08-28 11:36:19 -0700389 __IP_INC_STATS(net, IPSTATS_MIB_REASM_OVERLAPS);
Peter Oskolkov7969e5c2018-08-02 23:34:37 +0000390discard_qp:
391 inet_frag_kill(&qp->q);
Peter Oskolkovc23f35d2019-01-22 10:02:50 -0800392 __IP_INC_STATS(net, IPSTATS_MIB_REASMFAILS);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700393err:
394 kfree_skb(skb);
Herbert Xu1706d582007-10-14 00:38:15 -0700395 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700396}
397
Linus Torvalds1da177e2005-04-16 15:20:36 -0700398/* Build a new IP datagram from all its fragments. */
Peter Oskolkovfa0f5272018-08-02 23:34:39 +0000399static int ip_frag_reasm(struct ipq *qp, struct sk_buff *skb,
Peter Oskolkova4fd2842018-08-11 20:27:25 +0000400 struct sk_buff *prev_tail, struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700401{
Jorge Boncompte [DTI2]2bad35b2009-03-18 23:26:11 -0700402 struct net *net = container_of(qp->q.net, struct net, ipv4.frags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700403 struct iphdr *iph;
Peter Oskolkovc23f35d2019-01-22 10:02:50 -0800404 void *reasm_data;
405 int len, err;
Eric Dumazet5173cc02011-05-16 08:37:37 +0000406 u8 ecn;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700407
408 ipq_kill(qp);
409
Hannes Frederic Sowabe991972013-03-22 08:24:37 +0000410 ecn = ip_frag_ecn_table[qp->ecn];
Eric Dumazet5173cc02011-05-16 08:37:37 +0000411 if (unlikely(ecn == 0xff)) {
412 err = -EINVAL;
413 goto out_fail;
414 }
Peter Oskolkovc23f35d2019-01-22 10:02:50 -0800415
Herbert Xu1706d582007-10-14 00:38:15 -0700416 /* Make the one we just received the head. */
Peter Oskolkovc23f35d2019-01-22 10:02:50 -0800417 reasm_data = inet_frag_reasm_prepare(&qp->q, skb, prev_tail);
418 if (!reasm_data)
419 goto out_nomem;
Herbert Xu1706d582007-10-14 00:38:15 -0700420
Peter Oskolkovc23f35d2019-01-22 10:02:50 -0800421 len = ip_hdrlen(skb) + qp->q.len;
Herbert Xu1706d582007-10-14 00:38:15 -0700422 err = -E2BIG;
Stephen Hemminger132adf52007-03-08 20:44:43 -0800423 if (len > 65535)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700424 goto out_oversize;
425
Peter Oskolkovc23f35d2019-01-22 10:02:50 -0800426 inet_frag_reasm_finish(&qp->q, skb, reasm_data);
Jiri Wiesnerebaf39e2018-12-05 16:55:29 +0100427
Peter Oskolkovc23f35d2019-01-22 10:02:50 -0800428 skb->dev = dev;
429 IPCB(skb)->frag_max_size = max(qp->max_df_size, qp->q.max_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700430
Peter Oskolkovc23f35d2019-01-22 10:02:50 -0800431 iph = ip_hdr(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700432 iph->tot_len = htons(len);
Eric Dumazet5173cc02011-05-16 08:37:37 +0000433 iph->tos |= ecn;
Florian Westphald6b915e2015-05-22 16:32:51 +0200434
435 /* When we set IP_DF on a refragmented skb we must also force a
436 * call to ip_fragment to avoid forwarding a DF-skb of size s while
437 * original sender only sent fragments of size f (where f < s).
438 *
439 * We only set DF/IPSKB_FRAG_PMTU if such DF fragment was the largest
440 * frag seen to avoid sending tiny DF-fragments in case skb was built
441 * from one very small df-fragment and one large non-df frag.
442 */
443 if (qp->max_df_size == qp->q.max_size) {
Peter Oskolkovc23f35d2019-01-22 10:02:50 -0800444 IPCB(skb)->flags |= IPSKB_FRAG_PMTU;
Florian Westphald6b915e2015-05-22 16:32:51 +0200445 iph->frag_off = htons(IP_DF);
446 } else {
447 iph->frag_off = 0;
448 }
449
Edward Hyunkoo Jee0848f642015-07-21 09:43:59 +0200450 ip_send_check(iph);
451
Eric Dumazetb45386e2016-04-27 16:44:35 -0700452 __IP_INC_STATS(net, IPSTATS_MIB_REASMOKS);
Peter Oskolkovfa0f5272018-08-02 23:34:39 +0000453 qp->q.rb_fragments = RB_ROOT;
Changli Gaod6bebca2010-06-29 04:39:37 +0000454 qp->q.fragments_tail = NULL;
Peter Oskolkova4fd2842018-08-11 20:27:25 +0000455 qp->q.last_run_head = NULL;
Herbert Xu1706d582007-10-14 00:38:15 -0700456 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700457
458out_nomem:
Joe Perchesba7a46f2014-11-11 10:59:17 -0800459 net_dbg_ratelimited("queue_glue: no memory for gluing queue %p\n", qp);
David Howells45542472007-10-17 21:37:22 -0700460 err = -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700461 goto out_fail;
462out_oversize:
Eric Dumazet648700f2018-03-31 12:58:49 -0700463 net_info_ratelimited("Oversized IP packet from %pI4\n", &qp->q.key.v4.saddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700464out_fail:
Eric Dumazetb45386e2016-04-27 16:44:35 -0700465 __IP_INC_STATS(net, IPSTATS_MIB_REASMFAILS);
Herbert Xu1706d582007-10-14 00:38:15 -0700466 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700467}
468
469/* Process an incoming IP datagram fragment. */
Eric W. Biederman19bcf9f2015-10-09 13:44:54 -0500470int ip_defrag(struct net *net, struct sk_buff *skb, u32 user)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700471{
David Ahern9972f132015-08-13 14:59:09 -0600472 struct net_device *dev = skb->dev ? : skb_dst(skb)->dev;
David Ahern385add92015-09-29 20:07:13 -0700473 int vif = l3mdev_master_ifindex_rcu(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700474 struct ipq *qp;
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900475
Eric Dumazetb45386e2016-04-27 16:44:35 -0700476 __IP_INC_STATS(net, IPSTATS_MIB_REASMREQDS);
Joe Stringer8282f272016-01-22 15:49:12 -0800477 skb_orphan(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700478
Linus Torvalds1da177e2005-04-16 15:20:36 -0700479 /* Lookup (or create) queue header */
David Ahern9972f132015-08-13 14:59:09 -0600480 qp = ip_find(net, ip_hdr(skb), user, vif);
Ian Morris00db4122015-04-03 09:17:27 +0100481 if (qp) {
Herbert Xu1706d582007-10-14 00:38:15 -0700482 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700483
Pavel Emelyanov5ab11c92007-10-15 02:24:19 -0700484 spin_lock(&qp->q.lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700485
Herbert Xu1706d582007-10-14 00:38:15 -0700486 ret = ip_frag_queue(qp, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700487
Pavel Emelyanov5ab11c92007-10-15 02:24:19 -0700488 spin_unlock(&qp->q.lock);
Pavel Emelyanov4b6cb5d2007-10-15 02:41:09 -0700489 ipq_put(qp);
Herbert Xu776c7292007-10-14 00:38:32 -0700490 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700491 }
492
Eric Dumazetb45386e2016-04-27 16:44:35 -0700493 __IP_INC_STATS(net, IPSTATS_MIB_REASMFAILS);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700494 kfree_skb(skb);
Herbert Xu776c7292007-10-14 00:38:32 -0700495 return -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700496}
Eric Dumazet4bc2f182010-07-09 21:22:10 +0000497EXPORT_SYMBOL(ip_defrag);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700498
Eric W. Biederman19bcf9f2015-10-09 13:44:54 -0500499struct sk_buff *ip_check_defrag(struct net *net, struct sk_buff *skb, u32 user)
Eric Dumazetbc416d92011-10-06 10:28:31 +0000500{
Johannes Berg1bf3751e2012-12-09 23:41:06 +0000501 struct iphdr iph;
Alexander Drozdov3e32e732015-03-05 10:29:39 +0300502 int netoff;
Eric Dumazetbc416d92011-10-06 10:28:31 +0000503 u32 len;
504
505 if (skb->protocol != htons(ETH_P_IP))
506 return skb;
507
Alexander Drozdov3e32e732015-03-05 10:29:39 +0300508 netoff = skb_network_offset(skb);
509
510 if (skb_copy_bits(skb, netoff, &iph, sizeof(iph)) < 0)
Eric Dumazetbc416d92011-10-06 10:28:31 +0000511 return skb;
512
Johannes Berg1bf3751e2012-12-09 23:41:06 +0000513 if (iph.ihl < 5 || iph.version != 4)
Eric Dumazetbc416d92011-10-06 10:28:31 +0000514 return skb;
515
Johannes Berg1bf3751e2012-12-09 23:41:06 +0000516 len = ntohs(iph.tot_len);
Alexander Drozdov3e32e732015-03-05 10:29:39 +0300517 if (skb->len < netoff + len || len < (iph.ihl * 4))
Johannes Berg1bf3751e2012-12-09 23:41:06 +0000518 return skb;
519
520 if (ip_is_fragment(&iph)) {
Eric Dumazetbc416d92011-10-06 10:28:31 +0000521 skb = skb_share_check(skb, GFP_ATOMIC);
522 if (skb) {
Cong Wang7de414a2018-11-01 12:02:37 -0700523 if (!pskb_may_pull(skb, netoff + iph.ihl * 4)) {
524 kfree_skb(skb);
525 return NULL;
526 }
527 if (pskb_trim_rcsum(skb, netoff + len)) {
528 kfree_skb(skb);
529 return NULL;
530 }
Eric Dumazetbc416d92011-10-06 10:28:31 +0000531 memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
Eric W. Biederman19bcf9f2015-10-09 13:44:54 -0500532 if (ip_defrag(net, skb, user))
Eric Dumazetbc416d92011-10-06 10:28:31 +0000533 return NULL;
Tom Herbert7539fad2013-12-15 22:12:18 -0800534 skb_clear_hash(skb);
Eric Dumazetbc416d92011-10-06 10:28:31 +0000535 }
536 }
537 return skb;
538}
539EXPORT_SYMBOL(ip_check_defrag);
540
Pavel Emelyanov8d8354d2008-01-22 05:58:31 -0800541#ifdef CONFIG_SYSCTL
Eric Dumazet3d234012018-04-04 08:35:10 -0700542static int dist_min;
Pavel Emelyanov8d8354d2008-01-22 05:58:31 -0800543
Pavel Emelyanov0a64b4b2008-05-19 13:51:29 -0700544static struct ctl_table ip4_frags_ns_ctl_table[] = {
Pavel Emelyanov8d8354d2008-01-22 05:58:31 -0800545 {
Pavel Emelyanov8d8354d2008-01-22 05:58:31 -0800546 .procname = "ipfrag_high_thresh",
Pavel Emelyanove31e0bdc72008-01-22 06:10:13 -0800547 .data = &init_net.ipv4.frags.high_thresh,
Eric Dumazet3e67f102018-03-31 12:58:53 -0700548 .maxlen = sizeof(unsigned long),
Pavel Emelyanov8d8354d2008-01-22 05:58:31 -0800549 .mode = 0644,
Eric Dumazet3e67f102018-03-31 12:58:53 -0700550 .proc_handler = proc_doulongvec_minmax,
Nikolay Aleksandrov1bab4c72014-07-24 16:50:37 +0200551 .extra1 = &init_net.ipv4.frags.low_thresh
Pavel Emelyanov8d8354d2008-01-22 05:58:31 -0800552 },
553 {
Pavel Emelyanov8d8354d2008-01-22 05:58:31 -0800554 .procname = "ipfrag_low_thresh",
Pavel Emelyanove31e0bdc72008-01-22 06:10:13 -0800555 .data = &init_net.ipv4.frags.low_thresh,
Eric Dumazet3e67f102018-03-31 12:58:53 -0700556 .maxlen = sizeof(unsigned long),
Pavel Emelyanov8d8354d2008-01-22 05:58:31 -0800557 .mode = 0644,
Eric Dumazet3e67f102018-03-31 12:58:53 -0700558 .proc_handler = proc_doulongvec_minmax,
Nikolay Aleksandrov1bab4c72014-07-24 16:50:37 +0200559 .extra2 = &init_net.ipv4.frags.high_thresh
Pavel Emelyanov8d8354d2008-01-22 05:58:31 -0800560 },
561 {
Pavel Emelyanov8d8354d2008-01-22 05:58:31 -0800562 .procname = "ipfrag_time",
Pavel Emelyanovb2fd5322008-01-22 06:09:37 -0800563 .data = &init_net.ipv4.frags.timeout,
Pavel Emelyanov8d8354d2008-01-22 05:58:31 -0800564 .maxlen = sizeof(int),
565 .mode = 0644,
Alexey Dobriyan6d9f2392008-11-03 18:21:05 -0800566 .proc_handler = proc_dointvec_jiffies,
Pavel Emelyanov8d8354d2008-01-22 05:58:31 -0800567 },
Nikolay Borisov0fbf4cb2016-02-15 12:11:31 +0200568 {
569 .procname = "ipfrag_max_dist",
570 .data = &init_net.ipv4.frags.max_dist,
571 .maxlen = sizeof(int),
572 .mode = 0644,
573 .proc_handler = proc_dointvec_minmax,
Eric Dumazet3d234012018-04-04 08:35:10 -0700574 .extra1 = &dist_min,
Nikolay Borisov0fbf4cb2016-02-15 12:11:31 +0200575 },
Pavel Emelyanov7d291eb2008-05-19 13:53:02 -0700576 { }
577};
578
Florian Westphale3a57d12014-07-24 16:50:35 +0200579/* secret interval has been deprecated */
580static int ip4_frags_secret_interval_unused;
Pavel Emelyanov7d291eb2008-05-19 13:53:02 -0700581static struct ctl_table ip4_frags_ctl_table[] = {
Pavel Emelyanov8d8354d2008-01-22 05:58:31 -0800582 {
Pavel Emelyanov8d8354d2008-01-22 05:58:31 -0800583 .procname = "ipfrag_secret_interval",
Florian Westphale3a57d12014-07-24 16:50:35 +0200584 .data = &ip4_frags_secret_interval_unused,
Pavel Emelyanov8d8354d2008-01-22 05:58:31 -0800585 .maxlen = sizeof(int),
586 .mode = 0644,
Alexey Dobriyan6d9f2392008-11-03 18:21:05 -0800587 .proc_handler = proc_dointvec_jiffies,
Pavel Emelyanov8d8354d2008-01-22 05:58:31 -0800588 },
Pavel Emelyanov8d8354d2008-01-22 05:58:31 -0800589 { }
590};
591
Alexey Dobriyan2c8c1e72010-01-17 03:35:32 +0000592static int __net_init ip4_frags_ns_ctl_register(struct net *net)
Pavel Emelyanov8d8354d2008-01-22 05:58:31 -0800593{
Pavel Emelyanove4a2d5c2008-01-22 06:08:36 -0800594 struct ctl_table *table;
Pavel Emelyanov8d8354d2008-01-22 05:58:31 -0800595 struct ctl_table_header *hdr;
596
Pavel Emelyanov0a64b4b2008-05-19 13:51:29 -0700597 table = ip4_frags_ns_ctl_table;
Octavian Purdila09ad9bc2009-11-25 15:14:13 -0800598 if (!net_eq(net, &init_net)) {
Pavel Emelyanov0a64b4b2008-05-19 13:51:29 -0700599 table = kmemdup(table, sizeof(ip4_frags_ns_ctl_table), GFP_KERNEL);
Ian Morris51456b22015-04-03 09:17:26 +0100600 if (!table)
Pavel Emelyanove4a2d5c2008-01-22 06:08:36 -0800601 goto err_alloc;
602
Pavel Emelyanove31e0bdc72008-01-22 06:10:13 -0800603 table[0].data = &net->ipv4.frags.high_thresh;
Nikolay Aleksandrov1bab4c72014-07-24 16:50:37 +0200604 table[0].extra1 = &net->ipv4.frags.low_thresh;
Pavel Emelyanove31e0bdc72008-01-22 06:10:13 -0800605 table[1].data = &net->ipv4.frags.low_thresh;
Nikolay Aleksandrov1bab4c72014-07-24 16:50:37 +0200606 table[1].extra2 = &net->ipv4.frags.high_thresh;
Pavel Emelyanovb2fd5322008-01-22 06:09:37 -0800607 table[2].data = &net->ipv4.frags.timeout;
Nikolay Borisov0fbf4cb2016-02-15 12:11:31 +0200608 table[3].data = &net->ipv4.frags.max_dist;
Pavel Emelyanove4a2d5c2008-01-22 06:08:36 -0800609 }
610
Eric W. Biedermanec8f23c2012-04-19 13:44:49 +0000611 hdr = register_net_sysctl(net, "net/ipv4", table);
Ian Morris51456b22015-04-03 09:17:26 +0100612 if (!hdr)
Pavel Emelyanove4a2d5c2008-01-22 06:08:36 -0800613 goto err_reg;
614
615 net->ipv4.frags_hdr = hdr;
616 return 0;
617
618err_reg:
Octavian Purdila09ad9bc2009-11-25 15:14:13 -0800619 if (!net_eq(net, &init_net))
Pavel Emelyanove4a2d5c2008-01-22 06:08:36 -0800620 kfree(table);
621err_alloc:
622 return -ENOMEM;
623}
624
Alexey Dobriyan2c8c1e72010-01-17 03:35:32 +0000625static void __net_exit ip4_frags_ns_ctl_unregister(struct net *net)
Pavel Emelyanove4a2d5c2008-01-22 06:08:36 -0800626{
627 struct ctl_table *table;
628
629 table = net->ipv4.frags_hdr->ctl_table_arg;
630 unregister_net_sysctl_table(net->ipv4.frags_hdr);
631 kfree(table);
Pavel Emelyanov8d8354d2008-01-22 05:58:31 -0800632}
Pavel Emelyanov7d291eb2008-05-19 13:53:02 -0700633
Fabian Frederick57a02c32014-10-01 19:18:57 +0200634static void __init ip4_frags_ctl_register(void)
Pavel Emelyanov7d291eb2008-05-19 13:53:02 -0700635{
Eric W. Biederman43444752012-04-19 13:22:55 +0000636 register_net_sysctl(&init_net, "net/ipv4", ip4_frags_ctl_table);
Pavel Emelyanov7d291eb2008-05-19 13:53:02 -0700637}
Pavel Emelyanov8d8354d2008-01-22 05:58:31 -0800638#else
Fabian Frederickaa1f7312014-11-04 20:44:04 +0100639static int ip4_frags_ns_ctl_register(struct net *net)
Pavel Emelyanov8d8354d2008-01-22 05:58:31 -0800640{
641 return 0;
642}
Pavel Emelyanove4a2d5c2008-01-22 06:08:36 -0800643
Fabian Frederickaa1f7312014-11-04 20:44:04 +0100644static void ip4_frags_ns_ctl_unregister(struct net *net)
Pavel Emelyanove4a2d5c2008-01-22 06:08:36 -0800645{
646}
Pavel Emelyanov7d291eb2008-05-19 13:53:02 -0700647
Fabian Frederickaa1f7312014-11-04 20:44:04 +0100648static void __init ip4_frags_ctl_register(void)
Pavel Emelyanov7d291eb2008-05-19 13:53:02 -0700649{
650}
Pavel Emelyanov8d8354d2008-01-22 05:58:31 -0800651#endif
652
Alexey Dobriyan2c8c1e72010-01-17 03:35:32 +0000653static int __net_init ipv4_frags_init_net(struct net *net)
Pavel Emelyanov8d8354d2008-01-22 05:58:31 -0800654{
Eric Dumazet787bea72018-03-31 12:58:43 -0700655 int res;
656
Jesper Dangaard Brouerc2a93662013-01-15 07:16:35 +0000657 /* Fragment cache limits.
658 *
659 * The fragment memory accounting code, (tries to) account for
660 * the real memory usage, by measuring both the size of frag
661 * queue struct (inet_frag_queue (ipv4:ipq/ipv6:frag_queue))
662 * and the SKB's truesize.
663 *
664 * A 64K fragment consumes 129736 bytes (44*2944)+200
665 * (1500 truesize == 2944, sizeof(struct ipq) == 200)
666 *
667 * We will commit 4MB at one time. Should we cross that limit
668 * we will prune down to 3MB, making room for approx 8 big 64K
669 * fragments 8x128k.
Pavel Emelyanove31e0bdc72008-01-22 06:10:13 -0800670 */
Jesper Dangaard Brouerc2a93662013-01-15 07:16:35 +0000671 net->ipv4.frags.high_thresh = 4 * 1024 * 1024;
672 net->ipv4.frags.low_thresh = 3 * 1024 * 1024;
Pavel Emelyanove31e0bdc72008-01-22 06:10:13 -0800673 /*
Pavel Emelyanovb2fd5322008-01-22 06:09:37 -0800674 * Important NOTE! Fragment queue must be destroyed before MSL expires.
675 * RFC791 is wrong proposing to prolongate timer each fragment arrival
676 * by TTL.
677 */
678 net->ipv4.frags.timeout = IP_FRAG_TIME;
679
Nikolay Borisov0fbf4cb2016-02-15 12:11:31 +0200680 net->ipv4.frags.max_dist = 64;
Eric Dumazet093ba722018-03-31 12:58:44 -0700681 net->ipv4.frags.f = &ip4_frags;
Nikolay Borisov0fbf4cb2016-02-15 12:11:31 +0200682
Eric Dumazet787bea72018-03-31 12:58:43 -0700683 res = inet_frags_init_net(&net->ipv4.frags);
684 if (res < 0)
685 return res;
686 res = ip4_frags_ns_ctl_register(net);
687 if (res < 0)
Eric Dumazet093ba722018-03-31 12:58:44 -0700688 inet_frags_exit_net(&net->ipv4.frags);
Eric Dumazet787bea72018-03-31 12:58:43 -0700689 return res;
Pavel Emelyanov8d8354d2008-01-22 05:58:31 -0800690}
691
Alexey Dobriyan2c8c1e72010-01-17 03:35:32 +0000692static void __net_exit ipv4_frags_exit_net(struct net *net)
Pavel Emelyanov81566e82008-01-22 06:12:39 -0800693{
Pavel Emelyanov0a64b4b2008-05-19 13:51:29 -0700694 ip4_frags_ns_ctl_unregister(net);
Eric Dumazet093ba722018-03-31 12:58:44 -0700695 inet_frags_exit_net(&net->ipv4.frags);
Pavel Emelyanov81566e82008-01-22 06:12:39 -0800696}
697
698static struct pernet_operations ip4_frags_ops = {
699 .init = ipv4_frags_init_net,
700 .exit = ipv4_frags_exit_net,
701};
702
Eric Dumazet648700f2018-03-31 12:58:49 -0700703
704static u32 ip4_key_hashfn(const void *data, u32 len, u32 seed)
705{
706 return jhash2(data,
707 sizeof(struct frag_v4_compare_key) / sizeof(u32), seed);
708}
709
710static u32 ip4_obj_hashfn(const void *data, u32 len, u32 seed)
711{
712 const struct inet_frag_queue *fq = data;
713
714 return jhash2((const u32 *)&fq->key.v4,
715 sizeof(struct frag_v4_compare_key) / sizeof(u32), seed);
716}
717
718static int ip4_obj_cmpfn(struct rhashtable_compare_arg *arg, const void *ptr)
719{
720 const struct frag_v4_compare_key *key = arg->key;
721 const struct inet_frag_queue *fq = ptr;
722
723 return !!memcmp(&fq->key, key, sizeof(*key));
724}
725
726static const struct rhashtable_params ip4_rhash_params = {
727 .head_offset = offsetof(struct inet_frag_queue, node),
728 .key_offset = offsetof(struct inet_frag_queue, key),
729 .key_len = sizeof(struct frag_v4_compare_key),
730 .hashfn = ip4_key_hashfn,
731 .obj_hashfn = ip4_obj_hashfn,
732 .obj_cmpfn = ip4_obj_cmpfn,
733 .automatic_shrinking = true,
734};
735
Eric Dumazetb7aa0bf2007-04-19 16:16:32 -0700736void __init ipfrag_init(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700737{
Pavel Emelyanovc6fda282007-10-17 19:46:47 -0700738 ip4_frags.constructor = ip4_frag_init;
Pavel Emelyanov1e4b8282007-10-15 02:39:14 -0700739 ip4_frags.destructor = ip4_frag_free;
Pavel Emelyanov1e4b8282007-10-15 02:39:14 -0700740 ip4_frags.qsize = sizeof(struct ipq);
Pavel Emelyanove521db92007-10-17 19:45:23 -0700741 ip4_frags.frag_expire = ip_expire;
Nikolay Aleksandrovd4ad4d22014-08-01 12:29:48 +0200742 ip4_frags.frags_cache_name = ip_frag_cache_name;
Eric Dumazet648700f2018-03-31 12:58:49 -0700743 ip4_frags.rhash_params = ip4_rhash_params;
Nikolay Aleksandrovd4ad4d22014-08-01 12:29:48 +0200744 if (inet_frags_init(&ip4_frags))
745 panic("IP: failed to allocate ip4_frags cache\n");
Eric Dumazet483a6e42018-03-31 12:58:47 -0700746 ip4_frags_ctl_register();
747 register_pernet_subsys(&ip4_frags_ops);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700748}