blob: 7f2caf71212beb1b3a65cdf75df38868be21c627 [file] [log] [blame]
Joe Perchesafd465032012-03-12 07:03:32 +00001#define pr_fmt(fmt) "IPsec: " fmt
2
Herbert Xu38320c72008-01-28 19:35:05 -08003#include <crypto/aead.h>
4#include <crypto/authenc.h>
Herbert Xu6b7326c2006-07-30 15:41:01 +10005#include <linux/err.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -07006#include <linux/module.h>
7#include <net/ip.h>
8#include <net/xfrm.h>
9#include <net/esp.h>
Adrian Bunk72998d82007-10-26 22:53:58 -070010#include <linux/scatterlist.h>
Herbert Xua02a6422005-10-10 21:11:08 -070011#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070012#include <linux/pfkeyv2.h>
Herbert Xu38320c72008-01-28 19:35:05 -080013#include <linux/rtnetlink.h>
14#include <linux/slab.h>
Herbert Xub7c6538c2007-10-09 13:33:35 -070015#include <linux/spinlock.h>
Thomas Graf2017a722007-12-10 16:53:05 -080016#include <linux/in6.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070017#include <net/icmp.h>
Arnaldo Carvalho de Melo14c85022005-12-27 02:43:12 -020018#include <net/protocol.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070019#include <net/udp.h>
20
Steffen Klassertcac26612017-01-17 10:22:57 +010021#include <linux/highmem.h>
22
Herbert Xu38320c72008-01-28 19:35:05 -080023struct esp_skb_cb {
24 struct xfrm_skb_cb xfrm;
25 void *tmp;
26};
27
Herbert Xu962fcef2016-06-18 13:03:36 +080028struct esp_output_extra {
29 __be32 seqhi;
30 u32 esphoff;
31};
32
Herbert Xu38320c72008-01-28 19:35:05 -080033#define ESP_SKB_CB(__skb) ((struct esp_skb_cb *)&((__skb)->cb[0]))
34
Martin Willid979e202010-12-08 04:37:50 +000035static u32 esp4_get_mtu(struct xfrm_state *x, int mtu);
36
Herbert Xu38320c72008-01-28 19:35:05 -080037/*
38 * Allocate an AEAD request structure with extra space for SG and IV.
39 *
40 * For alignment considerations the IV is placed at the front, followed
41 * by the request and finally the SG list.
42 *
43 * TODO: Use spare space in skb for this where possible.
44 */
Herbert Xu962fcef2016-06-18 13:03:36 +080045static void *esp_alloc_tmp(struct crypto_aead *aead, int nfrags, int extralen)
Herbert Xu38320c72008-01-28 19:35:05 -080046{
47 unsigned int len;
48
Herbert Xu962fcef2016-06-18 13:03:36 +080049 len = extralen;
Steffen Klassert0dc49e92011-03-08 00:07:14 +000050
51 len += crypto_aead_ivsize(aead);
52
Herbert Xu38320c72008-01-28 19:35:05 -080053 if (len) {
54 len += crypto_aead_alignmask(aead) &
55 ~(crypto_tfm_ctx_alignment() - 1);
56 len = ALIGN(len, crypto_tfm_ctx_alignment());
57 }
58
Herbert Xu7021b2e2015-05-27 16:03:46 +080059 len += sizeof(struct aead_request) + crypto_aead_reqsize(aead);
Herbert Xu38320c72008-01-28 19:35:05 -080060 len = ALIGN(len, __alignof__(struct scatterlist));
61
62 len += sizeof(struct scatterlist) * nfrags;
63
64 return kmalloc(len, GFP_ATOMIC);
65}
66
Herbert Xu962fcef2016-06-18 13:03:36 +080067static inline void *esp_tmp_extra(void *tmp)
Steffen Klassert0dc49e92011-03-08 00:07:14 +000068{
Herbert Xu962fcef2016-06-18 13:03:36 +080069 return PTR_ALIGN(tmp, __alignof__(struct esp_output_extra));
Steffen Klassert0dc49e92011-03-08 00:07:14 +000070}
Herbert Xu962fcef2016-06-18 13:03:36 +080071
72static inline u8 *esp_tmp_iv(struct crypto_aead *aead, void *tmp, int extralen)
Herbert Xu38320c72008-01-28 19:35:05 -080073{
74 return crypto_aead_ivsize(aead) ?
Herbert Xu962fcef2016-06-18 13:03:36 +080075 PTR_ALIGN((u8 *)tmp + extralen,
76 crypto_aead_alignmask(aead) + 1) : tmp + extralen;
Herbert Xu38320c72008-01-28 19:35:05 -080077}
78
Herbert Xu38320c72008-01-28 19:35:05 -080079static inline struct aead_request *esp_tmp_req(struct crypto_aead *aead, u8 *iv)
80{
81 struct aead_request *req;
82
83 req = (void *)PTR_ALIGN(iv + crypto_aead_ivsize(aead),
84 crypto_tfm_ctx_alignment());
85 aead_request_set_tfm(req, aead);
86 return req;
87}
88
89static inline struct scatterlist *esp_req_sg(struct crypto_aead *aead,
90 struct aead_request *req)
91{
92 return (void *)ALIGN((unsigned long)(req + 1) +
93 crypto_aead_reqsize(aead),
94 __alignof__(struct scatterlist));
95}
96
Steffen Klassertcac26612017-01-17 10:22:57 +010097static void esp_ssg_unref(struct xfrm_state *x, void *tmp)
98{
99 struct esp_output_extra *extra = esp_tmp_extra(tmp);
100 struct crypto_aead *aead = x->data;
101 int extralen = 0;
102 u8 *iv;
103 struct aead_request *req;
104 struct scatterlist *sg;
105
106 if (x->props.flags & XFRM_STATE_ESN)
107 extralen += sizeof(*extra);
108
109 extra = esp_tmp_extra(tmp);
110 iv = esp_tmp_iv(aead, tmp, extralen);
111 req = esp_tmp_req(aead, iv);
112
113 /* Unref skb_frag_pages in the src scatterlist if necessary.
114 * Skip the first sg which comes from skb->data.
115 */
116 if (req->src != req->dst)
117 for (sg = sg_next(req->src); sg; sg = sg_next(sg))
118 put_page(sg_page(sg));
119}
120
Herbert Xu38320c72008-01-28 19:35:05 -0800121static void esp_output_done(struct crypto_async_request *base, int err)
122{
123 struct sk_buff *skb = base->data;
Steffen Klassertcac26612017-01-17 10:22:57 +0100124 void *tmp;
125 struct dst_entry *dst = skb_dst(skb);
126 struct xfrm_state *x = dst->xfrm;
Herbert Xu38320c72008-01-28 19:35:05 -0800127
Steffen Klassertcac26612017-01-17 10:22:57 +0100128 tmp = ESP_SKB_CB(skb)->tmp;
129 esp_ssg_unref(x, tmp);
130 kfree(tmp);
Herbert Xu38320c72008-01-28 19:35:05 -0800131 xfrm_output_resume(skb, err);
132}
133
Herbert Xu7021b2e2015-05-27 16:03:46 +0800134/* Move ESP header back into place. */
135static void esp_restore_header(struct sk_buff *skb, unsigned int offset)
136{
137 struct ip_esp_hdr *esph = (void *)(skb->data + offset);
138 void *tmp = ESP_SKB_CB(skb)->tmp;
Herbert Xu962fcef2016-06-18 13:03:36 +0800139 __be32 *seqhi = esp_tmp_extra(tmp);
Herbert Xu7021b2e2015-05-27 16:03:46 +0800140
141 esph->seq_no = esph->spi;
142 esph->spi = *seqhi;
143}
144
145static void esp_output_restore_header(struct sk_buff *skb)
146{
Herbert Xu962fcef2016-06-18 13:03:36 +0800147 void *tmp = ESP_SKB_CB(skb)->tmp;
148 struct esp_output_extra *extra = esp_tmp_extra(tmp);
149
150 esp_restore_header(skb, skb_transport_offset(skb) + extra->esphoff -
151 sizeof(__be32));
Herbert Xu7021b2e2015-05-27 16:03:46 +0800152}
153
Steffen Klassertcac26612017-01-17 10:22:57 +0100154static struct ip_esp_hdr *esp_output_set_extra(struct sk_buff *skb,
Steffen Klassertfca11eb2017-04-14 10:06:33 +0200155 struct xfrm_state *x,
Steffen Klassertcac26612017-01-17 10:22:57 +0100156 struct ip_esp_hdr *esph,
157 struct esp_output_extra *extra)
158{
Steffen Klassertcac26612017-01-17 10:22:57 +0100159 /* For ESN we move the header forward by 4 bytes to
160 * accomodate the high bits. We will move it back after
161 * encryption.
162 */
163 if ((x->props.flags & XFRM_STATE_ESN)) {
Steffen Klassert7862b402017-04-14 10:06:50 +0200164 __u32 seqhi;
165 struct xfrm_offload *xo = xfrm_offload(skb);
166
167 if (xo)
168 seqhi = xo->seq.hi;
169 else
170 seqhi = XFRM_SKB_CB(skb)->seq.output.hi;
171
Steffen Klassertcac26612017-01-17 10:22:57 +0100172 extra->esphoff = (unsigned char *)esph -
173 skb_transport_header(skb);
174 esph = (struct ip_esp_hdr *)((unsigned char *)esph - 4);
175 extra->seqhi = esph->spi;
Steffen Klassert7862b402017-04-14 10:06:50 +0200176 esph->seq_no = htonl(seqhi);
Steffen Klassertcac26612017-01-17 10:22:57 +0100177 }
178
179 esph->spi = x->id.spi;
180
181 return esph;
182}
183
Herbert Xu7021b2e2015-05-27 16:03:46 +0800184static void esp_output_done_esn(struct crypto_async_request *base, int err)
185{
186 struct sk_buff *skb = base->data;
187
188 esp_output_restore_header(skb);
189 esp_output_done(base, err);
190}
191
Steffen Klasserteb758c82017-01-17 10:23:08 +0100192static void esp_output_fill_trailer(u8 *tail, int tfclen, int plen, __u8 proto)
193{
194 /* Fill padding... */
195 if (tfclen) {
196 memset(tail, 0, tfclen);
197 tail += tfclen;
198 }
199 do {
200 int i;
201 for (i = 0; i < plen - 2; i++)
202 tail[i] = i + 1;
203 } while (0);
204 tail[plen - 2] = plen - 2;
205 tail[plen - 1] = proto;
206}
207
Steffen Klassertfca11eb2017-04-14 10:06:33 +0200208static void esp_output_udp_encap(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *esp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700209{
Steffen Klassertfca11eb2017-04-14 10:06:33 +0200210 int encap_type;
211 struct udphdr *uh;
212 __be32 *udpdata32;
213 __be16 sport, dport;
214 struct xfrm_encap_tmpl *encap = x->encap;
215 struct ip_esp_hdr *esph = esp->esph;
216
217 spin_lock_bh(&x->lock);
218 sport = encap->encap_sport;
219 dport = encap->encap_dport;
220 encap_type = encap->encap_type;
221 spin_unlock_bh(&x->lock);
222
223 uh = (struct udphdr *)esph;
224 uh->source = sport;
225 uh->dest = dport;
226 uh->len = htons(skb->len + esp->tailen
227 - skb_transport_offset(skb));
228 uh->check = 0;
229
230 switch (encap_type) {
231 default:
232 case UDP_ENCAP_ESPINUDP:
233 esph = (struct ip_esp_hdr *)(uh + 1);
234 break;
235 case UDP_ENCAP_ESPINUDP_NON_IKE:
236 udpdata32 = (__be32 *)(uh + 1);
237 udpdata32[0] = udpdata32[1] = 0;
238 esph = (struct ip_esp_hdr *)(udpdata32 + 2);
239 break;
240 }
241
242 *skb_mac_header(skb) = IPPROTO_UDP;
243 esp->esph = esph;
244}
245
246int esp_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *esp)
247{
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -0700248 u8 *tail;
Steffen Klassertcac26612017-01-17 10:22:57 +0100249 u8 *vaddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700250 int nfrags;
Steffen Klassertfca11eb2017-04-14 10:06:33 +0200251 struct page *page;
252 struct sk_buff *trailer;
253 int tailen = esp->tailen;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700254
255 /* this is non-NULL only with UDP Encapsulation */
Steffen Klassertfca11eb2017-04-14 10:06:33 +0200256 if (x->encap)
257 esp_output_udp_encap(x, skb, esp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700258
Steffen Klassertcac26612017-01-17 10:22:57 +0100259 if (!skb_cloned(skb)) {
260 if (tailen <= skb_availroom(skb)) {
261 nfrags = 1;
262 trailer = skb;
263 tail = skb_tail_pointer(trailer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700264
Steffen Klassertcac26612017-01-17 10:22:57 +0100265 goto skip_cow;
266 } else if ((skb_shinfo(skb)->nr_frags < MAX_SKB_FRAGS)
267 && !skb_has_frag_list(skb)) {
268 int allocsize;
269 struct sock *sk = skb->sk;
270 struct page_frag *pfrag = &x->xfrag;
Herbert Xu7021b2e2015-05-27 16:03:46 +0800271
Steffen Klassertfca11eb2017-04-14 10:06:33 +0200272 esp->inplace = false;
273
Steffen Klassertcac26612017-01-17 10:22:57 +0100274 allocsize = ALIGN(tailen, L1_CACHE_BYTES);
275
276 spin_lock_bh(&x->lock);
277
278 if (unlikely(!skb_page_frag_refill(allocsize, pfrag, GFP_ATOMIC))) {
279 spin_unlock_bh(&x->lock);
280 goto cow;
281 }
282
283 page = pfrag->page;
284 get_page(page);
285
286 vaddr = kmap_atomic(page);
287
288 tail = vaddr + pfrag->offset;
289
Steffen Klassertfca11eb2017-04-14 10:06:33 +0200290 esp_output_fill_trailer(tail, esp->tfclen, esp->plen, esp->proto);
Steffen Klassertcac26612017-01-17 10:22:57 +0100291
292 kunmap_atomic(vaddr);
293
Steffen Klassertfca11eb2017-04-14 10:06:33 +0200294 spin_unlock_bh(&x->lock);
295
Steffen Klassertcac26612017-01-17 10:22:57 +0100296 nfrags = skb_shinfo(skb)->nr_frags;
297
298 __skb_fill_page_desc(skb, nfrags, page, pfrag->offset,
299 tailen);
300 skb_shinfo(skb)->nr_frags = ++nfrags;
301
302 pfrag->offset = pfrag->offset + allocsize;
303 nfrags++;
304
305 skb->len += tailen;
306 skb->data_len += tailen;
307 skb->truesize += tailen;
308 if (sk)
309 atomic_add(tailen, &sk->sk_wmem_alloc);
310
Steffen Klassertfca11eb2017-04-14 10:06:33 +0200311 goto out;
Steffen Klassertcac26612017-01-17 10:22:57 +0100312 }
Herbert Xu7021b2e2015-05-27 16:03:46 +0800313 }
314
Steffen Klassertcac26612017-01-17 10:22:57 +0100315cow:
Steffen Klassertfca11eb2017-04-14 10:06:33 +0200316 nfrags = skb_cow_data(skb, tailen, &trailer);
317 if (nfrags < 0)
318 goto out;
Steffen Klassertcac26612017-01-17 10:22:57 +0100319 tail = skb_tail_pointer(trailer);
Steffen Klassertcac26612017-01-17 10:22:57 +0100320
321skip_cow:
Steffen Klassertfca11eb2017-04-14 10:06:33 +0200322 esp_output_fill_trailer(tail, esp->tfclen, esp->plen, esp->proto);
323 pskb_put(skb, trailer, tailen);
Steffen Klassertcac26612017-01-17 10:22:57 +0100324
Steffen Klassertfca11eb2017-04-14 10:06:33 +0200325out:
326 return nfrags;
327}
328EXPORT_SYMBOL_GPL(esp_output_head);
Herbert Xu7021b2e2015-05-27 16:03:46 +0800329
Steffen Klassertfca11eb2017-04-14 10:06:33 +0200330int esp_output_tail(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *esp)
331{
332 u8 *iv;
333 int alen;
334 void *tmp;
335 int ivlen;
336 int assoclen;
337 int extralen;
338 struct page *page;
339 struct ip_esp_hdr *esph;
340 struct crypto_aead *aead;
341 struct aead_request *req;
342 struct scatterlist *sg, *dsg;
343 struct esp_output_extra *extra;
344 int err = -ENOMEM;
345
346 assoclen = sizeof(struct ip_esp_hdr);
347 extralen = 0;
348
349 if (x->props.flags & XFRM_STATE_ESN) {
350 extralen += sizeof(*extra);
351 assoclen += sizeof(__be32);
352 }
353
354 aead = x->data;
355 alen = crypto_aead_authsize(aead);
356 ivlen = crypto_aead_ivsize(aead);
357
358 tmp = esp_alloc_tmp(aead, esp->nfrags + 2, extralen);
Steffen Klasserte892d2d2017-04-24 07:33:56 +0200359 if (!tmp)
Steffen Klassertcac26612017-01-17 10:22:57 +0100360 goto error;
Steffen Klassertcac26612017-01-17 10:22:57 +0100361
362 extra = esp_tmp_extra(tmp);
363 iv = esp_tmp_iv(aead, tmp, extralen);
364 req = esp_tmp_req(aead, iv);
365 sg = esp_req_sg(aead, req);
Steffen Klassertcac26612017-01-17 10:22:57 +0100366
Steffen Klassertfca11eb2017-04-14 10:06:33 +0200367 if (esp->inplace)
368 dsg = sg;
369 else
370 dsg = &sg[esp->nfrags];
Steffen Klassertcac26612017-01-17 10:22:57 +0100371
Steffen Klassertfca11eb2017-04-14 10:06:33 +0200372 esph = esp_output_set_extra(skb, x, esp->esph, extra);
373 esp->esph = esph;
374
375 sg_init_table(sg, esp->nfrags);
Herbert Xu38320c72008-01-28 19:35:05 -0800376 skb_to_sgvec(skb, sg,
Herbert Xu7021b2e2015-05-27 16:03:46 +0800377 (unsigned char *)esph - skb->data,
Steffen Klassertfca11eb2017-04-14 10:06:33 +0200378 assoclen + ivlen + esp->clen + alen);
Steffen Klassert0dc49e92011-03-08 00:07:14 +0000379
Steffen Klassertfca11eb2017-04-14 10:06:33 +0200380 if (!esp->inplace) {
381 int allocsize;
382 struct page_frag *pfrag = &x->xfrag;
383
384 allocsize = ALIGN(skb->data_len, L1_CACHE_BYTES);
385
386 spin_lock_bh(&x->lock);
387 if (unlikely(!skb_page_frag_refill(allocsize, pfrag, GFP_ATOMIC))) {
388 spin_unlock_bh(&x->lock);
Steffen Klassertfca11eb2017-04-14 10:06:33 +0200389 goto error;
390 }
391
392 skb_shinfo(skb)->nr_frags = 1;
393
394 page = pfrag->page;
395 get_page(page);
396 /* replace page frags in skb with new page */
397 __skb_fill_page_desc(skb, 0, page, pfrag->offset, skb->data_len);
398 pfrag->offset = pfrag->offset + allocsize;
399 spin_unlock_bh(&x->lock);
400
401 sg_init_table(dsg, skb_shinfo(skb)->nr_frags + 1);
402 skb_to_sgvec(skb, dsg,
403 (unsigned char *)esph - skb->data,
404 assoclen + ivlen + esp->clen + alen);
405 }
406
Steffen Klassertcac26612017-01-17 10:22:57 +0100407 if ((x->props.flags & XFRM_STATE_ESN))
408 aead_request_set_callback(req, 0, esp_output_done_esn, skb);
409 else
410 aead_request_set_callback(req, 0, esp_output_done, skb);
411
Steffen Klassertfca11eb2017-04-14 10:06:33 +0200412 aead_request_set_crypt(req, sg, dsg, ivlen + esp->clen, iv);
Herbert Xu7021b2e2015-05-27 16:03:46 +0800413 aead_request_set_ad(req, assoclen);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700414
Herbert Xu7021b2e2015-05-27 16:03:46 +0800415 memset(iv, 0, ivlen);
Steffen Klassertfca11eb2017-04-14 10:06:33 +0200416 memcpy(iv + ivlen - min(ivlen, 8), (u8 *)&esp->seqno + 8 - min(ivlen, 8),
Herbert Xu7021b2e2015-05-27 16:03:46 +0800417 min(ivlen, 8));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700418
Herbert Xu38320c72008-01-28 19:35:05 -0800419 ESP_SKB_CB(skb)->tmp = tmp;
Herbert Xu7021b2e2015-05-27 16:03:46 +0800420 err = crypto_aead_encrypt(req);
421
422 switch (err) {
423 case -EINPROGRESS:
Herbert Xu38320c72008-01-28 19:35:05 -0800424 goto error;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700425
Herbert Xu7021b2e2015-05-27 16:03:46 +0800426 case -EBUSY:
Herbert Xu38320c72008-01-28 19:35:05 -0800427 err = NET_XMIT_DROP;
Herbert Xu7021b2e2015-05-27 16:03:46 +0800428 break;
429
430 case 0:
431 if ((x->props.flags & XFRM_STATE_ESN))
432 esp_output_restore_header(skb);
433 }
Herbert Xu6b7326c2006-07-30 15:41:01 +1000434
Steffen Klassertcac26612017-01-17 10:22:57 +0100435 if (sg != dsg)
436 esp_ssg_unref(x, tmp);
Herbert Xu38320c72008-01-28 19:35:05 -0800437 kfree(tmp);
Herbert Xub7c6538c2007-10-09 13:33:35 -0700438
Linus Torvalds1da177e2005-04-16 15:20:36 -0700439error:
440 return err;
441}
Steffen Klassertfca11eb2017-04-14 10:06:33 +0200442EXPORT_SYMBOL_GPL(esp_output_tail);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700443
Steffen Klassertfca11eb2017-04-14 10:06:33 +0200444static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
445{
446 int alen;
447 int blksize;
448 struct ip_esp_hdr *esph;
449 struct crypto_aead *aead;
450 struct esp_info esp;
451
452 esp.inplace = true;
453
454 esp.proto = *skb_mac_header(skb);
455 *skb_mac_header(skb) = IPPROTO_ESP;
456
457 /* skb is pure payload to encrypt */
458
459 aead = x->data;
460 alen = crypto_aead_authsize(aead);
461
462 esp.tfclen = 0;
463 if (x->tfcpad) {
464 struct xfrm_dst *dst = (struct xfrm_dst *)skb_dst(skb);
465 u32 padto;
466
467 padto = min(x->tfcpad, esp4_get_mtu(x, dst->child_mtu_cached));
468 if (skb->len < padto)
469 esp.tfclen = padto - skb->len;
470 }
471 blksize = ALIGN(crypto_aead_blocksize(aead), 4);
472 esp.clen = ALIGN(skb->len + 2 + esp.tfclen, blksize);
473 esp.plen = esp.clen - skb->len - esp.tfclen;
474 esp.tailen = esp.tfclen + esp.plen + alen;
475
476 esp.esph = ip_esp_hdr(skb);
477
478 esp.nfrags = esp_output_head(x, skb, &esp);
479 if (esp.nfrags < 0)
480 return esp.nfrags;
481
482 esph = esp.esph;
483 esph->spi = x->id.spi;
484
485 esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low);
486 esp.seqno = cpu_to_be64(XFRM_SKB_CB(skb)->seq.output.low +
487 ((u64)XFRM_SKB_CB(skb)->seq.output.hi << 32));
488
489 skb_push(skb, -skb_network_offset(skb));
490
491 return esp_output_tail(x, skb, &esp);
492}
493
494int esp_input_done2(struct sk_buff *skb, int err)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700495{
Eric Dumazetb71d1d42011-04-22 04:53:02 +0000496 const struct iphdr *iph;
Herbert Xu38320c72008-01-28 19:35:05 -0800497 struct xfrm_state *x = xfrm_input_state(skb);
Steffen Klassertd77e38e2017-04-14 10:06:10 +0200498 struct xfrm_offload *xo = xfrm_offload(skb);
Mathias Krause1c5ad132013-10-18 12:09:05 +0200499 struct crypto_aead *aead = x->data;
Herbert Xu38320c72008-01-28 19:35:05 -0800500 int alen = crypto_aead_authsize(aead);
501 int hlen = sizeof(struct ip_esp_hdr) + crypto_aead_ivsize(aead);
502 int elen = skb->len - hlen;
Herbert Xu31a4ab92006-05-27 23:06:13 -0700503 int ihl;
Herbert Xu4bf05ec2006-02-27 13:00:01 -0800504 u8 nexthdr[2];
Herbert Xu4bf05ec2006-02-27 13:00:01 -0800505 int padlen;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700506
Steffen Klassertd77e38e2017-04-14 10:06:10 +0200507 if (!xo || (xo && !(xo->flags & CRYPTO_DONE)))
508 kfree(ESP_SKB_CB(skb)->tmp);
Herbert Xu0ebea8e2007-11-13 21:45:58 -0800509
Herbert Xu6b7326c2006-07-30 15:41:01 +1000510 if (unlikely(err))
Herbert Xu668dc8a2007-12-16 15:55:02 -0800511 goto out;
Herbert Xu4bf05ec2006-02-27 13:00:01 -0800512
513 if (skb_copy_bits(skb, skb->len-alen-2, nexthdr, 2))
514 BUG();
515
Herbert Xu668dc8a2007-12-16 15:55:02 -0800516 err = -EINVAL;
Herbert Xu4bf05ec2006-02-27 13:00:01 -0800517 padlen = nexthdr[0];
Herbert Xu38320c72008-01-28 19:35:05 -0800518 if (padlen + 2 + alen >= elen)
Herbert Xu4bf05ec2006-02-27 13:00:01 -0800519 goto out;
520
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900521 /* ... check padding bits here. Silly. :-) */
Herbert Xu4bf05ec2006-02-27 13:00:01 -0800522
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -0700523 iph = ip_hdr(skb);
Herbert Xu31a4ab92006-05-27 23:06:13 -0700524 ihl = iph->ihl * 4;
525
Herbert Xu752c1f42006-02-27 13:00:40 -0800526 if (x->encap) {
527 struct xfrm_encap_tmpl *encap = x->encap;
Arnaldo Carvalho de Melod56f90a2007-04-10 20:50:43 -0700528 struct udphdr *uh = (void *)(skb_network_header(skb) + ihl);
Herbert Xu752c1f42006-02-27 13:00:40 -0800529
530 /*
531 * 1) if the NAT-T peer's IP or port changed then
532 * advertize the change to the keying daemon.
533 * This is an inbound SA, so just compare
534 * SRC ports.
535 */
536 if (iph->saddr != x->props.saddr.a4 ||
537 uh->source != encap->encap_sport) {
538 xfrm_address_t ipaddr;
539
540 ipaddr.a4 = iph->saddr;
541 km_new_mapping(x, &ipaddr, uh->source);
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900542
Herbert Xu752c1f42006-02-27 13:00:40 -0800543 /* XXX: perhaps add an extra
544 * policy check here, to see
545 * if we should allow or
546 * reject a packet from a
547 * different source
548 * address/port.
549 */
Herbert Xu4bf05ec2006-02-27 13:00:01 -0800550 }
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900551
Herbert Xu752c1f42006-02-27 13:00:40 -0800552 /*
553 * 2) ignore UDP/TCP checksums in case
554 * of NAT-T in Transport Mode, or
555 * perform other post-processing fixes
556 * as per draft-ietf-ipsec-udp-encaps-06,
557 * section 3.1.2
558 */
Herbert Xu8bd17072007-10-10 15:41:41 -0700559 if (x->props.mode == XFRM_MODE_TRANSPORT)
Herbert Xu752c1f42006-02-27 13:00:40 -0800560 skb->ip_summed = CHECKSUM_UNNECESSARY;
Herbert Xu4bf05ec2006-02-27 13:00:01 -0800561 }
562
Herbert Xu4bf05ec2006-02-27 13:00:01 -0800563 pskb_trim(skb, skb->len - alen - padlen - 2);
Herbert Xu38320c72008-01-28 19:35:05 -0800564 __skb_pull(skb, hlen);
Li RongQing7143dfa2012-12-28 16:07:16 +0800565 if (x->props.mode == XFRM_MODE_TUNNEL)
566 skb_reset_transport_header(skb);
567 else
568 skb_set_transport_header(skb, -ihl);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700569
Herbert Xu38320c72008-01-28 19:35:05 -0800570 err = nexthdr[1];
571
572 /* RFC4303: Drop dummy packets without any error */
573 if (err == IPPROTO_NONE)
574 err = -EINVAL;
575
576out:
577 return err;
578}
Steffen Klassertfca11eb2017-04-14 10:06:33 +0200579EXPORT_SYMBOL_GPL(esp_input_done2);
Herbert Xu38320c72008-01-28 19:35:05 -0800580
581static void esp_input_done(struct crypto_async_request *base, int err)
582{
583 struct sk_buff *skb = base->data;
584
585 xfrm_input_resume(skb, esp_input_done2(skb, err));
586}
587
Herbert Xu7021b2e2015-05-27 16:03:46 +0800588static void esp_input_restore_header(struct sk_buff *skb)
589{
590 esp_restore_header(skb, 0);
591 __skb_pull(skb, 4);
592}
593
Steffen Klassertcac26612017-01-17 10:22:57 +0100594static void esp_input_set_header(struct sk_buff *skb, __be32 *seqhi)
595{
596 struct xfrm_state *x = xfrm_input_state(skb);
597 struct ip_esp_hdr *esph = (struct ip_esp_hdr *)skb->data;
598
599 /* For ESN we move the header forward by 4 bytes to
600 * accomodate the high bits. We will move it back after
601 * decryption.
602 */
603 if ((x->props.flags & XFRM_STATE_ESN)) {
604 esph = (void *)skb_push(skb, 4);
605 *seqhi = esph->spi;
606 esph->spi = esph->seq_no;
607 esph->seq_no = XFRM_SKB_CB(skb)->seq.input.hi;
608 }
609}
610
Herbert Xu7021b2e2015-05-27 16:03:46 +0800611static void esp_input_done_esn(struct crypto_async_request *base, int err)
612{
613 struct sk_buff *skb = base->data;
614
615 esp_input_restore_header(skb);
616 esp_input_done(base, err);
617}
618
Herbert Xu38320c72008-01-28 19:35:05 -0800619/*
620 * Note: detecting truncated vs. non-truncated authentication data is very
621 * expensive, so we only support truncated data, which is the recommended
622 * and common case.
623 */
624static int esp_input(struct xfrm_state *x, struct sk_buff *skb)
625{
626 struct ip_esp_hdr *esph;
Mathias Krause1c5ad132013-10-18 12:09:05 +0200627 struct crypto_aead *aead = x->data;
Herbert Xu38320c72008-01-28 19:35:05 -0800628 struct aead_request *req;
629 struct sk_buff *trailer;
Herbert Xu7021b2e2015-05-27 16:03:46 +0800630 int ivlen = crypto_aead_ivsize(aead);
631 int elen = skb->len - sizeof(*esph) - ivlen;
Herbert Xu38320c72008-01-28 19:35:05 -0800632 int nfrags;
Steffen Klassert0dc49e92011-03-08 00:07:14 +0000633 int assoclen;
Steffen Klassert0dc49e92011-03-08 00:07:14 +0000634 int seqhilen;
635 __be32 *seqhi;
Herbert Xu38320c72008-01-28 19:35:05 -0800636 void *tmp;
637 u8 *iv;
638 struct scatterlist *sg;
Herbert Xu38320c72008-01-28 19:35:05 -0800639 int err = -EINVAL;
640
Herbert Xu7021b2e2015-05-27 16:03:46 +0800641 if (!pskb_may_pull(skb, sizeof(*esph) + ivlen))
Herbert Xu38320c72008-01-28 19:35:05 -0800642 goto out;
643
644 if (elen <= 0)
645 goto out;
646
Steffen Klassert0dc49e92011-03-08 00:07:14 +0000647 assoclen = sizeof(*esph);
Steffen Klassert0dc49e92011-03-08 00:07:14 +0000648 seqhilen = 0;
649
650 if (x->props.flags & XFRM_STATE_ESN) {
Steffen Klassert0dc49e92011-03-08 00:07:14 +0000651 seqhilen += sizeof(__be32);
652 assoclen += seqhilen;
653 }
654
Steffen Klassertcac26612017-01-17 10:22:57 +0100655 if (!skb_cloned(skb)) {
656 if (!skb_is_nonlinear(skb)) {
657 nfrags = 1;
658
659 goto skip_cow;
660 } else if (!skb_has_frag_list(skb)) {
661 nfrags = skb_shinfo(skb)->nr_frags;
662 nfrags++;
663
664 goto skip_cow;
665 }
666 }
667
668 err = skb_cow_data(skb, 0, &trailer);
669 if (err < 0)
670 goto out;
671
672 nfrags = err;
673
674skip_cow:
Herbert Xu38320c72008-01-28 19:35:05 -0800675 err = -ENOMEM;
Herbert Xu7021b2e2015-05-27 16:03:46 +0800676 tmp = esp_alloc_tmp(aead, nfrags, seqhilen);
Herbert Xu38320c72008-01-28 19:35:05 -0800677 if (!tmp)
678 goto out;
679
680 ESP_SKB_CB(skb)->tmp = tmp;
Herbert Xu962fcef2016-06-18 13:03:36 +0800681 seqhi = esp_tmp_extra(tmp);
Steffen Klassert0dc49e92011-03-08 00:07:14 +0000682 iv = esp_tmp_iv(aead, tmp, seqhilen);
Herbert Xu38320c72008-01-28 19:35:05 -0800683 req = esp_tmp_req(aead, iv);
Herbert Xu7021b2e2015-05-27 16:03:46 +0800684 sg = esp_req_sg(aead, req);
Herbert Xu38320c72008-01-28 19:35:05 -0800685
Steffen Klassertcac26612017-01-17 10:22:57 +0100686 esp_input_set_header(skb, seqhi);
Herbert Xu38320c72008-01-28 19:35:05 -0800687
688 sg_init_table(sg, nfrags);
Herbert Xu7021b2e2015-05-27 16:03:46 +0800689 skb_to_sgvec(skb, sg, 0, skb->len);
Steffen Klassert0dc49e92011-03-08 00:07:14 +0000690
Steffen Klassertcac26612017-01-17 10:22:57 +0100691 skb->ip_summed = CHECKSUM_NONE;
692
693 if ((x->props.flags & XFRM_STATE_ESN))
694 aead_request_set_callback(req, 0, esp_input_done_esn, skb);
695 else
696 aead_request_set_callback(req, 0, esp_input_done, skb);
697
Herbert Xu7021b2e2015-05-27 16:03:46 +0800698 aead_request_set_crypt(req, sg, sg, elen + ivlen, iv);
699 aead_request_set_ad(req, assoclen);
Herbert Xu38320c72008-01-28 19:35:05 -0800700
701 err = crypto_aead_decrypt(req);
702 if (err == -EINPROGRESS)
703 goto out;
704
Herbert Xu7021b2e2015-05-27 16:03:46 +0800705 if ((x->props.flags & XFRM_STATE_ESN))
706 esp_input_restore_header(skb);
707
Herbert Xu38320c72008-01-28 19:35:05 -0800708 err = esp_input_done2(skb, err);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700709
710out:
Herbert Xu668dc8a2007-12-16 15:55:02 -0800711 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700712}
713
Patrick McHardyc5c25232007-04-09 11:47:18 -0700714static u32 esp4_get_mtu(struct xfrm_state *x, int mtu)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700715{
Mathias Krause1c5ad132013-10-18 12:09:05 +0200716 struct crypto_aead *aead = x->data;
717 u32 blksize = ALIGN(crypto_aead_blocksize(aead), 4);
Benjamin Poirier91657ea2012-05-24 11:32:38 +0000718 unsigned int net_adj;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700719
Diego Beltrami0a69452c2006-10-03 23:47:05 -0700720 switch (x->props.mode) {
Benjamin Poirier91657ea2012-05-24 11:32:38 +0000721 case XFRM_MODE_TRANSPORT:
722 case XFRM_MODE_BEET:
723 net_adj = sizeof(struct iphdr);
724 break;
Diego Beltrami0a69452c2006-10-03 23:47:05 -0700725 case XFRM_MODE_TUNNEL:
Benjamin Poirier91657ea2012-05-24 11:32:38 +0000726 net_adj = 0;
Diego Beltrami0a69452c2006-10-03 23:47:05 -0700727 break;
728 default:
Benjamin Poirier91657ea2012-05-24 11:32:38 +0000729 BUG();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700730 }
Diego Beltrami0a69452c2006-10-03 23:47:05 -0700731
Mathias Krause1c5ad132013-10-18 12:09:05 +0200732 return ((mtu - x->props.header_len - crypto_aead_authsize(aead) -
Mathias Krause123b0d12013-10-18 12:09:04 +0200733 net_adj) & ~(blksize - 1)) + net_adj - 2;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700734}
735
Steffen Klassert827789c2014-02-21 08:41:08 +0100736static int esp4_err(struct sk_buff *skb, u32 info)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700737{
Alexey Dobriyan4fb236b2008-11-25 17:59:27 -0800738 struct net *net = dev_net(skb->dev);
Eric Dumazetb71d1d42011-04-22 04:53:02 +0000739 const struct iphdr *iph = (const struct iphdr *)skb->data;
Jianjun Kongd93191002008-11-03 00:23:42 -0800740 struct ip_esp_hdr *esph = (struct ip_esp_hdr *)(skb->data+(iph->ihl<<2));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700741 struct xfrm_state *x;
742
David S. Miller55be7a92012-07-11 21:27:49 -0700743 switch (icmp_hdr(skb)->type) {
744 case ICMP_DEST_UNREACH:
745 if (icmp_hdr(skb)->code != ICMP_FRAG_NEEDED)
Steffen Klassert827789c2014-02-21 08:41:08 +0100746 return 0;
David S. Miller55be7a92012-07-11 21:27:49 -0700747 case ICMP_REDIRECT:
748 break;
749 default:
Steffen Klassert827789c2014-02-21 08:41:08 +0100750 return 0;
David S. Miller55be7a92012-07-11 21:27:49 -0700751 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700752
Eric Dumazetb71d1d42011-04-22 04:53:02 +0000753 x = xfrm_state_lookup(net, skb->mark, (const xfrm_address_t *)&iph->daddr,
754 esph->spi, IPPROTO_ESP, AF_INET);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700755 if (!x)
Steffen Klassert827789c2014-02-21 08:41:08 +0100756 return 0;
David S. Miller55be7a92012-07-11 21:27:49 -0700757
Timo Teräs387aa652013-05-27 20:46:31 +0000758 if (icmp_hdr(skb)->type == ICMP_DEST_UNREACH)
David S. Miller55be7a92012-07-11 21:27:49 -0700759 ipv4_update_pmtu(skb, net, info, 0, 0, IPPROTO_ESP, 0);
Timo Teräs387aa652013-05-27 20:46:31 +0000760 else
David S. Miller55be7a92012-07-11 21:27:49 -0700761 ipv4_redirect(skb, net, 0, 0, IPPROTO_ESP, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700762 xfrm_state_put(x);
Steffen Klassert827789c2014-02-21 08:41:08 +0100763
764 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700765}
766
767static void esp_destroy(struct xfrm_state *x)
768{
Mathias Krause1c5ad132013-10-18 12:09:05 +0200769 struct crypto_aead *aead = x->data;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700770
Mathias Krause1c5ad132013-10-18 12:09:05 +0200771 if (!aead)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700772 return;
773
Mathias Krause1c5ad132013-10-18 12:09:05 +0200774 crypto_free_aead(aead);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700775}
776
Herbert Xu1a6509d2008-01-28 19:37:29 -0800777static int esp_init_aead(struct xfrm_state *x)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700778{
Herbert Xu7021b2e2015-05-27 16:03:46 +0800779 char aead_name[CRYPTO_MAX_ALG_NAME];
Herbert Xu1a6509d2008-01-28 19:37:29 -0800780 struct crypto_aead *aead;
781 int err;
Steffen Klassertb3859c82017-04-14 10:07:19 +0200782 u32 mask = 0;
Herbert Xu1a6509d2008-01-28 19:37:29 -0800783
Herbert Xu7021b2e2015-05-27 16:03:46 +0800784 err = -ENAMETOOLONG;
785 if (snprintf(aead_name, CRYPTO_MAX_ALG_NAME, "%s(%s)",
786 x->geniv, x->aead->alg_name) >= CRYPTO_MAX_ALG_NAME)
787 goto error;
788
Steffen Klassertb3859c82017-04-14 10:07:19 +0200789 if (x->xso.offload_handle)
790 mask |= CRYPTO_ALG_ASYNC;
791
792 aead = crypto_alloc_aead(aead_name, 0, mask);
Herbert Xu1a6509d2008-01-28 19:37:29 -0800793 err = PTR_ERR(aead);
794 if (IS_ERR(aead))
795 goto error;
796
Mathias Krause1c5ad132013-10-18 12:09:05 +0200797 x->data = aead;
Herbert Xu1a6509d2008-01-28 19:37:29 -0800798
799 err = crypto_aead_setkey(aead, x->aead->alg_key,
800 (x->aead->alg_key_len + 7) / 8);
801 if (err)
802 goto error;
803
804 err = crypto_aead_setauthsize(aead, x->aead->alg_icv_len / 8);
805 if (err)
806 goto error;
807
808error:
809 return err;
810}
811
812static int esp_init_authenc(struct xfrm_state *x)
813{
Herbert Xu38320c72008-01-28 19:35:05 -0800814 struct crypto_aead *aead;
815 struct crypto_authenc_key_param *param;
816 struct rtattr *rta;
817 char *key;
818 char *p;
819 char authenc_name[CRYPTO_MAX_ALG_NAME];
Herbert Xu38320c72008-01-28 19:35:05 -0800820 unsigned int keylen;
821 int err;
Steffen Klassertb3859c82017-04-14 10:07:19 +0200822 u32 mask = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700823
Herbert Xu1a6509d2008-01-28 19:37:29 -0800824 err = -EINVAL;
Ian Morris51456b22015-04-03 09:17:26 +0100825 if (!x->ealg)
Herbert Xu1a6509d2008-01-28 19:37:29 -0800826 goto error;
Herbert Xu38320c72008-01-28 19:35:05 -0800827
Herbert Xu1a6509d2008-01-28 19:37:29 -0800828 err = -ENAMETOOLONG;
Steffen Klassert0dc49e92011-03-08 00:07:14 +0000829
830 if ((x->props.flags & XFRM_STATE_ESN)) {
831 if (snprintf(authenc_name, CRYPTO_MAX_ALG_NAME,
Herbert Xu7021b2e2015-05-27 16:03:46 +0800832 "%s%sauthencesn(%s,%s)%s",
833 x->geniv ?: "", x->geniv ? "(" : "",
Steffen Klassert0dc49e92011-03-08 00:07:14 +0000834 x->aalg ? x->aalg->alg_name : "digest_null",
Herbert Xu7021b2e2015-05-27 16:03:46 +0800835 x->ealg->alg_name,
836 x->geniv ? ")" : "") >= CRYPTO_MAX_ALG_NAME)
Steffen Klassert0dc49e92011-03-08 00:07:14 +0000837 goto error;
838 } else {
839 if (snprintf(authenc_name, CRYPTO_MAX_ALG_NAME,
Herbert Xu7021b2e2015-05-27 16:03:46 +0800840 "%s%sauthenc(%s,%s)%s",
841 x->geniv ?: "", x->geniv ? "(" : "",
Steffen Klassert0dc49e92011-03-08 00:07:14 +0000842 x->aalg ? x->aalg->alg_name : "digest_null",
Herbert Xu7021b2e2015-05-27 16:03:46 +0800843 x->ealg->alg_name,
844 x->geniv ? ")" : "") >= CRYPTO_MAX_ALG_NAME)
Steffen Klassert0dc49e92011-03-08 00:07:14 +0000845 goto error;
846 }
Herbert Xu38320c72008-01-28 19:35:05 -0800847
Steffen Klassertb3859c82017-04-14 10:07:19 +0200848 if (x->xso.offload_handle)
849 mask |= CRYPTO_ALG_ASYNC;
850
851 aead = crypto_alloc_aead(authenc_name, 0, mask);
Herbert Xu38320c72008-01-28 19:35:05 -0800852 err = PTR_ERR(aead);
853 if (IS_ERR(aead))
854 goto error;
855
Mathias Krause1c5ad132013-10-18 12:09:05 +0200856 x->data = aead;
Herbert Xu38320c72008-01-28 19:35:05 -0800857
858 keylen = (x->aalg ? (x->aalg->alg_key_len + 7) / 8 : 0) +
859 (x->ealg->alg_key_len + 7) / 8 + RTA_SPACE(sizeof(*param));
860 err = -ENOMEM;
861 key = kmalloc(keylen, GFP_KERNEL);
862 if (!key)
863 goto error;
864
865 p = key;
866 rta = (void *)p;
867 rta->rta_type = CRYPTO_AUTHENC_KEYA_PARAM;
868 rta->rta_len = RTA_LENGTH(sizeof(*param));
869 param = RTA_DATA(rta);
870 p += RTA_SPACE(sizeof(*param));
871
Linus Torvalds1da177e2005-04-16 15:20:36 -0700872 if (x->aalg) {
873 struct xfrm_algo_desc *aalg_desc;
874
Herbert Xu38320c72008-01-28 19:35:05 -0800875 memcpy(p, x->aalg->alg_key, (x->aalg->alg_key_len + 7) / 8);
876 p += (x->aalg->alg_key_len + 7) / 8;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700877
878 aalg_desc = xfrm_aalg_get_byname(x->aalg->alg_name, 0);
879 BUG_ON(!aalg_desc);
880
Herbert Xu38320c72008-01-28 19:35:05 -0800881 err = -EINVAL;
Joe Perches45083492014-11-05 15:36:08 -0800882 if (aalg_desc->uinfo.auth.icv_fullbits / 8 !=
Herbert Xu38320c72008-01-28 19:35:05 -0800883 crypto_aead_authsize(aead)) {
Joe Perches45083492014-11-05 15:36:08 -0800884 pr_info("ESP: %s digestsize %u != %hu\n",
885 x->aalg->alg_name,
886 crypto_aead_authsize(aead),
887 aalg_desc->uinfo.auth.icv_fullbits / 8);
Herbert Xu38320c72008-01-28 19:35:05 -0800888 goto free_key;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700889 }
890
Herbert Xu38320c72008-01-28 19:35:05 -0800891 err = crypto_aead_setauthsize(
Martin Willi8f8a0882009-11-25 00:29:53 +0000892 aead, x->aalg->alg_trunc_len / 8);
Herbert Xu38320c72008-01-28 19:35:05 -0800893 if (err)
894 goto free_key;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700895 }
Herbert Xu4b7137f2007-10-08 17:13:44 -0700896
Herbert Xu38320c72008-01-28 19:35:05 -0800897 param->enckeylen = cpu_to_be32((x->ealg->alg_key_len + 7) / 8);
898 memcpy(p, x->ealg->alg_key, (x->ealg->alg_key_len + 7) / 8);
899
900 err = crypto_aead_setkey(aead, key, keylen);
901
902free_key:
903 kfree(key);
904
Herbert Xu1a6509d2008-01-28 19:37:29 -0800905error:
906 return err;
907}
908
909static int esp_init_state(struct xfrm_state *x)
910{
Herbert Xu1a6509d2008-01-28 19:37:29 -0800911 struct crypto_aead *aead;
912 u32 align;
913 int err;
914
Mathias Krause1c5ad132013-10-18 12:09:05 +0200915 x->data = NULL;
Herbert Xu1a6509d2008-01-28 19:37:29 -0800916
917 if (x->aead)
918 err = esp_init_aead(x);
919 else
920 err = esp_init_authenc(x);
921
Herbert Xu38320c72008-01-28 19:35:05 -0800922 if (err)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700923 goto error;
Herbert Xu38320c72008-01-28 19:35:05 -0800924
Mathias Krause1c5ad132013-10-18 12:09:05 +0200925 aead = x->data;
Herbert Xu1a6509d2008-01-28 19:37:29 -0800926
Herbert Xu38320c72008-01-28 19:35:05 -0800927 x->props.header_len = sizeof(struct ip_esp_hdr) +
928 crypto_aead_ivsize(aead);
Masahide NAKAMURA7e49e6d2006-09-22 15:05:15 -0700929 if (x->props.mode == XFRM_MODE_TUNNEL)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700930 x->props.header_len += sizeof(struct iphdr);
Joakim Koskelaeb49e632008-08-06 02:39:30 -0700931 else if (x->props.mode == XFRM_MODE_BEET && x->sel.family != AF_INET6)
Patrick McHardyac758e32007-04-09 11:47:58 -0700932 x->props.header_len += IPV4_BEET_PHMAXLEN;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700933 if (x->encap) {
934 struct xfrm_encap_tmpl *encap = x->encap;
935
936 switch (encap->encap_type) {
937 default:
938 goto error;
939 case UDP_ENCAP_ESPINUDP:
940 x->props.header_len += sizeof(struct udphdr);
941 break;
942 case UDP_ENCAP_ESPINUDP_NON_IKE:
943 x->props.header_len += sizeof(struct udphdr) + 2 * sizeof(u32);
944 break;
945 }
946 }
Herbert Xu38320c72008-01-28 19:35:05 -0800947
948 align = ALIGN(crypto_aead_blocksize(aead), 4);
Mathias Krause1c5ad132013-10-18 12:09:05 +0200949 x->props.trailer_len = align + 1 + crypto_aead_authsize(aead);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700950
951error:
Herbert Xu38320c72008-01-28 19:35:05 -0800952 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700953}
954
Steffen Klassert827789c2014-02-21 08:41:08 +0100955static int esp4_rcv_cb(struct sk_buff *skb, int err)
956{
957 return 0;
958}
959
Eric Dumazet533cb5b2008-01-30 19:11:50 -0800960static const struct xfrm_type esp_type =
Linus Torvalds1da177e2005-04-16 15:20:36 -0700961{
962 .description = "ESP4",
963 .owner = THIS_MODULE,
964 .proto = IPPROTO_ESP,
Herbert Xu436a0a42007-10-08 17:25:53 -0700965 .flags = XFRM_TYPE_REPLAY_PROT,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700966 .init_state = esp_init_state,
967 .destructor = esp_destroy,
Patrick McHardyc5c25232007-04-09 11:47:18 -0700968 .get_mtu = esp4_get_mtu,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700969 .input = esp_input,
Steffen Klassertfca11eb2017-04-14 10:06:33 +0200970 .output = esp_output,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700971};
972
Steffen Klassert827789c2014-02-21 08:41:08 +0100973static struct xfrm4_protocol esp4_protocol = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700974 .handler = xfrm4_rcv,
Steffen Klassert827789c2014-02-21 08:41:08 +0100975 .input_handler = xfrm_input,
976 .cb_handler = esp4_rcv_cb,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700977 .err_handler = esp4_err,
Steffen Klassert827789c2014-02-21 08:41:08 +0100978 .priority = 0,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700979};
980
981static int __init esp4_init(void)
982{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700983 if (xfrm_register_type(&esp_type, AF_INET) < 0) {
Joe Perches058bd4d2012-03-11 18:36:11 +0000984 pr_info("%s: can't add xfrm type\n", __func__);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700985 return -EAGAIN;
986 }
Steffen Klassert827789c2014-02-21 08:41:08 +0100987 if (xfrm4_protocol_register(&esp4_protocol, IPPROTO_ESP) < 0) {
Joe Perches058bd4d2012-03-11 18:36:11 +0000988 pr_info("%s: can't add protocol\n", __func__);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700989 xfrm_unregister_type(&esp_type, AF_INET);
990 return -EAGAIN;
991 }
992 return 0;
993}
994
995static void __exit esp4_fini(void)
996{
Steffen Klassert827789c2014-02-21 08:41:08 +0100997 if (xfrm4_protocol_deregister(&esp4_protocol, IPPROTO_ESP) < 0)
Joe Perches058bd4d2012-03-11 18:36:11 +0000998 pr_info("%s: can't remove protocol\n", __func__);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700999 if (xfrm_unregister_type(&esp_type, AF_INET) < 0)
Joe Perches058bd4d2012-03-11 18:36:11 +00001000 pr_info("%s: can't remove xfrm type\n", __func__);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001001}
1002
1003module_init(esp4_init);
1004module_exit(esp4_fini);
1005MODULE_LICENSE("GPL");
Masahide NAKAMURAd3d6dd32007-06-26 23:57:49 -07001006MODULE_ALIAS_XFRM_TYPE(AF_INET, XFRM_PROTO_ESP);