blob: 26f26e71ef3f6702d35806edf5a74bc5cc18b22d [file] [log] [blame]
Ilya Lesokhine8f69792018-04-30 10:16:16 +03001/* Copyright (c) 2018, Mellanox Technologies All rights reserved.
2 *
3 * This software is available to you under a choice of one of two
4 * licenses. You may choose to be licensed under the terms of the GNU
5 * General Public License (GPL) Version 2, available from the file
6 * COPYING in the main directory of this source tree, or the
7 * OpenIB.org BSD license below:
8 *
9 * Redistribution and use in source and binary forms, with or
10 * without modification, are permitted provided that the following
11 * conditions are met:
12 *
13 * - Redistributions of source code must retain the above
14 * copyright notice, this list of conditions and the following
15 * disclaimer.
16 *
17 * - Redistributions in binary form must reproduce the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer in the documentation and/or other materials
20 * provided with the distribution.
21 *
22 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
23 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
24 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
25 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
26 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
27 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
28 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
29 * SOFTWARE.
30 */
31
32#include <crypto/aead.h>
33#include <linux/highmem.h>
34#include <linux/module.h>
35#include <linux/netdevice.h>
36#include <net/dst.h>
37#include <net/inet_connection_sock.h>
38#include <net/tcp.h>
39#include <net/tls.h>
40
41/* device_offload_lock is used to synchronize tls_dev_add
42 * against NETDEV_DOWN notifications.
43 */
44static DECLARE_RWSEM(device_offload_lock);
45
46static void tls_device_gc_task(struct work_struct *work);
47
48static DECLARE_WORK(tls_device_gc_work, tls_device_gc_task);
49static LIST_HEAD(tls_device_gc_list);
50static LIST_HEAD(tls_device_list);
51static DEFINE_SPINLOCK(tls_device_lock);
52
53static void tls_device_free_ctx(struct tls_context *ctx)
54{
Jakub Kicinski5a03bc72019-04-10 11:04:30 -070055 if (ctx->tx_conf == TLS_HW) {
Boris Pismenny4799ac82018-07-13 14:33:43 +030056 kfree(tls_offload_ctx_tx(ctx));
Jakub Kicinski5a03bc72019-04-10 11:04:30 -070057 kfree(ctx->tx.rec_seq);
58 kfree(ctx->tx.iv);
59 }
Boris Pismenny4799ac82018-07-13 14:33:43 +030060
61 if (ctx->rx_conf == TLS_HW)
62 kfree(tls_offload_ctx_rx(ctx));
Ilya Lesokhine8f69792018-04-30 10:16:16 +030063
Ilya Lesokhine8f69792018-04-30 10:16:16 +030064 kfree(ctx);
65}
66
67static void tls_device_gc_task(struct work_struct *work)
68{
69 struct tls_context *ctx, *tmp;
70 unsigned long flags;
71 LIST_HEAD(gc_list);
72
73 spin_lock_irqsave(&tls_device_lock, flags);
74 list_splice_init(&tls_device_gc_list, &gc_list);
75 spin_unlock_irqrestore(&tls_device_lock, flags);
76
77 list_for_each_entry_safe(ctx, tmp, &gc_list, list) {
78 struct net_device *netdev = ctx->netdev;
79
Boris Pismenny4799ac82018-07-13 14:33:43 +030080 if (netdev && ctx->tx_conf == TLS_HW) {
Ilya Lesokhine8f69792018-04-30 10:16:16 +030081 netdev->tlsdev_ops->tls_dev_del(netdev, ctx,
82 TLS_OFFLOAD_CTX_DIR_TX);
83 dev_put(netdev);
Boris Pismenny4799ac82018-07-13 14:33:43 +030084 ctx->netdev = NULL;
Ilya Lesokhine8f69792018-04-30 10:16:16 +030085 }
86
87 list_del(&ctx->list);
88 tls_device_free_ctx(ctx);
89 }
90}
91
92static void tls_device_queue_ctx_destruction(struct tls_context *ctx)
93{
94 unsigned long flags;
95
96 spin_lock_irqsave(&tls_device_lock, flags);
97 list_move_tail(&ctx->list, &tls_device_gc_list);
98
99 /* schedule_work inside the spinlock
100 * to make sure tls_device_down waits for that work.
101 */
102 schedule_work(&tls_device_gc_work);
103
104 spin_unlock_irqrestore(&tls_device_lock, flags);
105}
106
107/* We assume that the socket is already connected */
108static struct net_device *get_netdev_for_sock(struct sock *sk)
109{
110 struct dst_entry *dst = sk_dst_get(sk);
111 struct net_device *netdev = NULL;
112
113 if (likely(dst)) {
114 netdev = dst->dev;
115 dev_hold(netdev);
116 }
117
118 dst_release(dst);
119
120 return netdev;
121}
122
123static void destroy_record(struct tls_record_info *record)
124{
125 int nr_frags = record->num_frags;
126 skb_frag_t *frag;
127
128 while (nr_frags-- > 0) {
129 frag = &record->frags[nr_frags];
130 __skb_frag_unref(frag);
131 }
132 kfree(record);
133}
134
Boris Pismennyd80a1b9d2018-07-13 14:33:39 +0300135static void delete_all_records(struct tls_offload_context_tx *offload_ctx)
Ilya Lesokhine8f69792018-04-30 10:16:16 +0300136{
137 struct tls_record_info *info, *temp;
138
139 list_for_each_entry_safe(info, temp, &offload_ctx->records_list, list) {
140 list_del(&info->list);
141 destroy_record(info);
142 }
143
144 offload_ctx->retransmit_hint = NULL;
145}
146
147static void tls_icsk_clean_acked(struct sock *sk, u32 acked_seq)
148{
149 struct tls_context *tls_ctx = tls_get_ctx(sk);
150 struct tls_record_info *info, *temp;
Boris Pismennyd80a1b9d2018-07-13 14:33:39 +0300151 struct tls_offload_context_tx *ctx;
Ilya Lesokhine8f69792018-04-30 10:16:16 +0300152 u64 deleted_records = 0;
153 unsigned long flags;
154
155 if (!tls_ctx)
156 return;
157
Boris Pismennyd80a1b9d2018-07-13 14:33:39 +0300158 ctx = tls_offload_ctx_tx(tls_ctx);
Ilya Lesokhine8f69792018-04-30 10:16:16 +0300159
160 spin_lock_irqsave(&ctx->lock, flags);
161 info = ctx->retransmit_hint;
162 if (info && !before(acked_seq, info->end_seq)) {
163 ctx->retransmit_hint = NULL;
164 list_del(&info->list);
165 destroy_record(info);
166 deleted_records++;
167 }
168
169 list_for_each_entry_safe(info, temp, &ctx->records_list, list) {
170 if (before(acked_seq, info->end_seq))
171 break;
172 list_del(&info->list);
173
174 destroy_record(info);
175 deleted_records++;
176 }
177
178 ctx->unacked_record_sn += deleted_records;
179 spin_unlock_irqrestore(&ctx->lock, flags);
180}
181
182/* At this point, there should be no references on this
183 * socket and no in-flight SKBs associated with this
184 * socket, so it is safe to free all the resources.
185 */
Jakub Kicinski9e995792019-04-25 12:32:02 -0700186static void tls_device_sk_destruct(struct sock *sk)
Ilya Lesokhine8f69792018-04-30 10:16:16 +0300187{
188 struct tls_context *tls_ctx = tls_get_ctx(sk);
Boris Pismennyd80a1b9d2018-07-13 14:33:39 +0300189 struct tls_offload_context_tx *ctx = tls_offload_ctx_tx(tls_ctx);
Ilya Lesokhine8f69792018-04-30 10:16:16 +0300190
Boris Pismenny4799ac82018-07-13 14:33:43 +0300191 tls_ctx->sk_destruct(sk);
Ilya Lesokhine8f69792018-04-30 10:16:16 +0300192
Boris Pismenny4799ac82018-07-13 14:33:43 +0300193 if (tls_ctx->tx_conf == TLS_HW) {
194 if (ctx->open_record)
195 destroy_record(ctx->open_record);
196 delete_all_records(ctx);
197 crypto_free_aead(ctx->aead_send);
198 clean_acked_data_disable(inet_csk(sk));
199 }
Ilya Lesokhine8f69792018-04-30 10:16:16 +0300200
201 if (refcount_dec_and_test(&tls_ctx->refcount))
202 tls_device_queue_ctx_destruction(tls_ctx);
203}
Ilya Lesokhine8f69792018-04-30 10:16:16 +0300204
Jakub Kicinski35b71a342019-04-10 11:04:31 -0700205void tls_device_free_resources_tx(struct sock *sk)
206{
207 struct tls_context *tls_ctx = tls_get_ctx(sk);
208
209 tls_free_partial_record(sk, tls_ctx);
210}
211
Ilya Lesokhine8f69792018-04-30 10:16:16 +0300212static void tls_append_frag(struct tls_record_info *record,
213 struct page_frag *pfrag,
214 int size)
215{
216 skb_frag_t *frag;
217
218 frag = &record->frags[record->num_frags - 1];
219 if (frag->page.p == pfrag->page &&
220 frag->page_offset + frag->size == pfrag->offset) {
221 frag->size += size;
222 } else {
223 ++frag;
224 frag->page.p = pfrag->page;
225 frag->page_offset = pfrag->offset;
226 frag->size = size;
227 ++record->num_frags;
228 get_page(pfrag->page);
229 }
230
231 pfrag->offset += size;
232 record->len += size;
233}
234
235static int tls_push_record(struct sock *sk,
236 struct tls_context *ctx,
Boris Pismennyd80a1b9d2018-07-13 14:33:39 +0300237 struct tls_offload_context_tx *offload_ctx,
Ilya Lesokhine8f69792018-04-30 10:16:16 +0300238 struct tls_record_info *record,
239 struct page_frag *pfrag,
240 int flags,
241 unsigned char record_type)
242{
Vakul Garg4509de12019-02-14 07:11:35 +0000243 struct tls_prot_info *prot = &ctx->prot_info;
Ilya Lesokhine8f69792018-04-30 10:16:16 +0300244 struct tcp_sock *tp = tcp_sk(sk);
245 struct page_frag dummy_tag_frag;
246 skb_frag_t *frag;
247 int i;
248
249 /* fill prepend */
250 frag = &record->frags[0];
251 tls_fill_prepend(ctx,
252 skb_frag_address(frag),
Vakul Garg4509de12019-02-14 07:11:35 +0000253 record->len - prot->prepend_size,
Dave Watson130b3922019-01-30 21:58:31 +0000254 record_type,
255 ctx->crypto_send.info.version);
Ilya Lesokhine8f69792018-04-30 10:16:16 +0300256
257 /* HW doesn't care about the data in the tag, because it fills it. */
258 dummy_tag_frag.page = skb_frag_page(frag);
259 dummy_tag_frag.offset = 0;
260
Vakul Garg4509de12019-02-14 07:11:35 +0000261 tls_append_frag(record, &dummy_tag_frag, prot->tag_size);
Ilya Lesokhine8f69792018-04-30 10:16:16 +0300262 record->end_seq = tp->write_seq + record->len;
263 spin_lock_irq(&offload_ctx->lock);
264 list_add_tail(&record->list, &offload_ctx->records_list);
265 spin_unlock_irq(&offload_ctx->lock);
266 offload_ctx->open_record = NULL;
Dave Watson130b3922019-01-30 21:58:31 +0000267 tls_advance_record_sn(sk, &ctx->tx, ctx->crypto_send.info.version);
Ilya Lesokhine8f69792018-04-30 10:16:16 +0300268
269 for (i = 0; i < record->num_frags; i++) {
270 frag = &record->frags[i];
271 sg_unmark_end(&offload_ctx->sg_tx_data[i]);
272 sg_set_page(&offload_ctx->sg_tx_data[i], skb_frag_page(frag),
273 frag->size, frag->page_offset);
274 sk_mem_charge(sk, frag->size);
275 get_page(skb_frag_page(frag));
276 }
277 sg_mark_end(&offload_ctx->sg_tx_data[record->num_frags - 1]);
278
279 /* all ready, send */
280 return tls_push_sg(sk, ctx, offload_ctx->sg_tx_data, 0, flags);
281}
282
Boris Pismennyd80a1b9d2018-07-13 14:33:39 +0300283static int tls_create_new_record(struct tls_offload_context_tx *offload_ctx,
Ilya Lesokhine8f69792018-04-30 10:16:16 +0300284 struct page_frag *pfrag,
285 size_t prepend_size)
286{
287 struct tls_record_info *record;
288 skb_frag_t *frag;
289
290 record = kmalloc(sizeof(*record), GFP_KERNEL);
291 if (!record)
292 return -ENOMEM;
293
294 frag = &record->frags[0];
295 __skb_frag_set_page(frag, pfrag->page);
296 frag->page_offset = pfrag->offset;
297 skb_frag_size_set(frag, prepend_size);
298
299 get_page(pfrag->page);
300 pfrag->offset += prepend_size;
301
302 record->num_frags = 1;
303 record->len = prepend_size;
304 offload_ctx->open_record = record;
305 return 0;
306}
307
308static int tls_do_allocation(struct sock *sk,
Boris Pismennyd80a1b9d2018-07-13 14:33:39 +0300309 struct tls_offload_context_tx *offload_ctx,
Ilya Lesokhine8f69792018-04-30 10:16:16 +0300310 struct page_frag *pfrag,
311 size_t prepend_size)
312{
313 int ret;
314
315 if (!offload_ctx->open_record) {
316 if (unlikely(!skb_page_frag_refill(prepend_size, pfrag,
317 sk->sk_allocation))) {
318 sk->sk_prot->enter_memory_pressure(sk);
319 sk_stream_moderate_sndbuf(sk);
320 return -ENOMEM;
321 }
322
323 ret = tls_create_new_record(offload_ctx, pfrag, prepend_size);
324 if (ret)
325 return ret;
326
327 if (pfrag->size > pfrag->offset)
328 return 0;
329 }
330
331 if (!sk_page_frag_refill(sk, pfrag))
332 return -ENOMEM;
333
334 return 0;
335}
336
337static int tls_push_data(struct sock *sk,
338 struct iov_iter *msg_iter,
339 size_t size, int flags,
340 unsigned char record_type)
341{
342 struct tls_context *tls_ctx = tls_get_ctx(sk);
Vakul Garg4509de12019-02-14 07:11:35 +0000343 struct tls_prot_info *prot = &tls_ctx->prot_info;
Boris Pismennyd80a1b9d2018-07-13 14:33:39 +0300344 struct tls_offload_context_tx *ctx = tls_offload_ctx_tx(tls_ctx);
Ilya Lesokhine8f69792018-04-30 10:16:16 +0300345 int tls_push_record_flags = flags | MSG_SENDPAGE_NOTLAST;
346 int more = flags & (MSG_SENDPAGE_NOTLAST | MSG_MORE);
347 struct tls_record_info *record = ctx->open_record;
348 struct page_frag *pfrag;
349 size_t orig_size = size;
350 u32 max_open_record_len;
351 int copy, rc = 0;
352 bool done = false;
353 long timeo;
354
355 if (flags &
356 ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL | MSG_SENDPAGE_NOTLAST))
357 return -ENOTSUPP;
358
359 if (sk->sk_err)
360 return -sk->sk_err;
361
362 timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
Boris Pismenny94850252019-02-27 17:38:03 +0200363 if (tls_is_partially_sent_record(tls_ctx)) {
364 rc = tls_push_partial_record(sk, tls_ctx, flags);
365 if (rc < 0)
366 return rc;
367 }
Ilya Lesokhine8f69792018-04-30 10:16:16 +0300368
369 pfrag = sk_page_frag(sk);
370
371 /* TLS_HEADER_SIZE is not counted as part of the TLS record, and
372 * we need to leave room for an authentication tag.
373 */
374 max_open_record_len = TLS_MAX_PAYLOAD_SIZE +
Vakul Garg4509de12019-02-14 07:11:35 +0000375 prot->prepend_size;
Ilya Lesokhine8f69792018-04-30 10:16:16 +0300376 do {
377 rc = tls_do_allocation(sk, ctx, pfrag,
Vakul Garg4509de12019-02-14 07:11:35 +0000378 prot->prepend_size);
Ilya Lesokhine8f69792018-04-30 10:16:16 +0300379 if (rc) {
380 rc = sk_stream_wait_memory(sk, &timeo);
381 if (!rc)
382 continue;
383
384 record = ctx->open_record;
385 if (!record)
386 break;
387handle_error:
388 if (record_type != TLS_RECORD_TYPE_DATA) {
389 /* avoid sending partial
390 * record with type !=
391 * application_data
392 */
393 size = orig_size;
394 destroy_record(record);
395 ctx->open_record = NULL;
Vakul Garg4509de12019-02-14 07:11:35 +0000396 } else if (record->len > prot->prepend_size) {
Ilya Lesokhine8f69792018-04-30 10:16:16 +0300397 goto last_record;
398 }
399
400 break;
401 }
402
403 record = ctx->open_record;
404 copy = min_t(size_t, size, (pfrag->size - pfrag->offset));
405 copy = min_t(size_t, copy, (max_open_record_len - record->len));
406
407 if (copy_from_iter_nocache(page_address(pfrag->page) +
408 pfrag->offset,
409 copy, msg_iter) != copy) {
410 rc = -EFAULT;
411 goto handle_error;
412 }
413 tls_append_frag(record, pfrag, copy);
414
415 size -= copy;
416 if (!size) {
417last_record:
418 tls_push_record_flags = flags;
419 if (more) {
420 tls_ctx->pending_open_record_frags =
Daniel Borkmannd829e9c2018-10-13 02:45:59 +0200421 !!record->num_frags;
Ilya Lesokhine8f69792018-04-30 10:16:16 +0300422 break;
423 }
424
425 done = true;
426 }
427
428 if (done || record->len >= max_open_record_len ||
429 (record->num_frags >= MAX_SKB_FRAGS - 1)) {
430 rc = tls_push_record(sk,
431 tls_ctx,
432 ctx,
433 record,
434 pfrag,
435 tls_push_record_flags,
436 record_type);
437 if (rc < 0)
438 break;
439 }
440 } while (!done);
441
442 if (orig_size - size > 0)
443 rc = orig_size - size;
444
445 return rc;
446}
447
448int tls_device_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
449{
450 unsigned char record_type = TLS_RECORD_TYPE_DATA;
451 int rc;
452
453 lock_sock(sk);
454
455 if (unlikely(msg->msg_controllen)) {
456 rc = tls_proccess_cmsg(sk, msg, &record_type);
457 if (rc)
458 goto out;
459 }
460
461 rc = tls_push_data(sk, &msg->msg_iter, size,
462 msg->msg_flags, record_type);
463
464out:
465 release_sock(sk);
466 return rc;
467}
468
469int tls_device_sendpage(struct sock *sk, struct page *page,
470 int offset, size_t size, int flags)
471{
472 struct iov_iter msg_iter;
473 char *kaddr = kmap(page);
474 struct kvec iov;
475 int rc;
476
477 if (flags & MSG_SENDPAGE_NOTLAST)
478 flags |= MSG_MORE;
479
480 lock_sock(sk);
481
482 if (flags & MSG_OOB) {
483 rc = -ENOTSUPP;
484 goto out;
485 }
486
487 iov.iov_base = kaddr + offset;
488 iov.iov_len = size;
David Howellsaa563d72018-10-20 00:57:56 +0100489 iov_iter_kvec(&msg_iter, WRITE, &iov, 1, size);
Ilya Lesokhine8f69792018-04-30 10:16:16 +0300490 rc = tls_push_data(sk, &msg_iter, size,
491 flags, TLS_RECORD_TYPE_DATA);
492 kunmap(page);
493
494out:
495 release_sock(sk);
496 return rc;
497}
498
Boris Pismennyd80a1b9d2018-07-13 14:33:39 +0300499struct tls_record_info *tls_get_record(struct tls_offload_context_tx *context,
Ilya Lesokhine8f69792018-04-30 10:16:16 +0300500 u32 seq, u64 *p_record_sn)
501{
502 u64 record_sn = context->hint_record_sn;
503 struct tls_record_info *info;
504
505 info = context->retransmit_hint;
506 if (!info ||
507 before(seq, info->end_seq - info->len)) {
508 /* if retransmit_hint is irrelevant start
509 * from the beggining of the list
510 */
511 info = list_first_entry(&context->records_list,
512 struct tls_record_info, list);
513 record_sn = context->unacked_record_sn;
514 }
515
516 list_for_each_entry_from(info, &context->records_list, list) {
517 if (before(seq, info->end_seq)) {
518 if (!context->retransmit_hint ||
519 after(info->end_seq,
520 context->retransmit_hint->end_seq)) {
521 context->hint_record_sn = record_sn;
522 context->retransmit_hint = info;
523 }
524 *p_record_sn = record_sn;
525 return info;
526 }
527 record_sn++;
528 }
529
530 return NULL;
531}
532EXPORT_SYMBOL(tls_get_record);
533
534static int tls_device_push_pending_record(struct sock *sk, int flags)
535{
536 struct iov_iter msg_iter;
537
David Howellsaa563d72018-10-20 00:57:56 +0100538 iov_iter_kvec(&msg_iter, WRITE, NULL, 0, 0);
Ilya Lesokhine8f69792018-04-30 10:16:16 +0300539 return tls_push_data(sk, &msg_iter, 0, flags, TLS_RECORD_TYPE_DATA);
540}
541
Boris Pismenny7463d3a2019-02-27 17:38:04 +0200542void tls_device_write_space(struct sock *sk, struct tls_context *ctx)
543{
544 int rc = 0;
545
546 if (!sk->sk_write_pending && tls_is_partially_sent_record(ctx)) {
547 gfp_t sk_allocation = sk->sk_allocation;
548
549 sk->sk_allocation = GFP_ATOMIC;
550 rc = tls_push_partial_record(sk, ctx,
551 MSG_DONTWAIT | MSG_NOSIGNAL);
552 sk->sk_allocation = sk_allocation;
553 }
Boris Pismenny7463d3a2019-02-27 17:38:04 +0200554}
555
Boris Pismenny4799ac82018-07-13 14:33:43 +0300556void handle_device_resync(struct sock *sk, u32 seq, u64 rcd_sn)
557{
558 struct tls_context *tls_ctx = tls_get_ctx(sk);
559 struct net_device *netdev = tls_ctx->netdev;
560 struct tls_offload_context_rx *rx_ctx;
561 u32 is_req_pending;
562 s64 resync_req;
563 u32 req_seq;
564
565 if (tls_ctx->rx_conf != TLS_HW)
566 return;
567
568 rx_ctx = tls_offload_ctx_rx(tls_ctx);
569 resync_req = atomic64_read(&rx_ctx->resync_req);
Jakub Kicinski63a1c952019-04-25 12:32:04 -0700570 req_seq = (resync_req >> 32) - ((u32)TLS_HEADER_SIZE - 1);
Boris Pismenny4799ac82018-07-13 14:33:43 +0300571 is_req_pending = resync_req;
572
573 if (unlikely(is_req_pending) && req_seq == seq &&
574 atomic64_try_cmpxchg(&rx_ctx->resync_req, &resync_req, 0))
575 netdev->tlsdev_ops->tls_dev_resync_rx(netdev, sk,
576 seq + TLS_HEADER_SIZE - 1,
577 rcd_sn);
578}
579
580static int tls_device_reencrypt(struct sock *sk, struct sk_buff *skb)
581{
582 struct strp_msg *rxm = strp_msg(skb);
583 int err = 0, offset = rxm->offset, copy, nsg;
584 struct sk_buff *skb_iter, *unused;
585 struct scatterlist sg[1];
586 char *orig_buf, *buf;
587
588 orig_buf = kmalloc(rxm->full_len + TLS_HEADER_SIZE +
589 TLS_CIPHER_AES_GCM_128_IV_SIZE, sk->sk_allocation);
590 if (!orig_buf)
591 return -ENOMEM;
592 buf = orig_buf;
593
594 nsg = skb_cow_data(skb, 0, &unused);
595 if (unlikely(nsg < 0)) {
596 err = nsg;
597 goto free_buf;
598 }
599
600 sg_init_table(sg, 1);
601 sg_set_buf(&sg[0], buf,
602 rxm->full_len + TLS_HEADER_SIZE +
603 TLS_CIPHER_AES_GCM_128_IV_SIZE);
604 skb_copy_bits(skb, offset, buf,
605 TLS_HEADER_SIZE + TLS_CIPHER_AES_GCM_128_IV_SIZE);
606
607 /* We are interested only in the decrypted data not the auth */
608 err = decrypt_skb(sk, skb, sg);
609 if (err != -EBADMSG)
610 goto free_buf;
611 else
612 err = 0;
613
614 copy = min_t(int, skb_pagelen(skb) - offset,
615 rxm->full_len - TLS_CIPHER_AES_GCM_128_TAG_SIZE);
616
617 if (skb->decrypted)
618 skb_store_bits(skb, offset, buf, copy);
619
620 offset += copy;
621 buf += copy;
622
623 skb_walk_frags(skb, skb_iter) {
624 copy = min_t(int, skb_iter->len,
625 rxm->full_len - offset + rxm->offset -
626 TLS_CIPHER_AES_GCM_128_TAG_SIZE);
627
628 if (skb_iter->decrypted)
Gustavo A. R. Silvaeecd6852018-07-18 08:27:41 -0500629 skb_store_bits(skb_iter, offset, buf, copy);
Boris Pismenny4799ac82018-07-13 14:33:43 +0300630
631 offset += copy;
632 buf += copy;
633 }
634
635free_buf:
636 kfree(orig_buf);
637 return err;
638}
639
640int tls_device_decrypted(struct sock *sk, struct sk_buff *skb)
641{
642 struct tls_context *tls_ctx = tls_get_ctx(sk);
643 struct tls_offload_context_rx *ctx = tls_offload_ctx_rx(tls_ctx);
644 int is_decrypted = skb->decrypted;
645 int is_encrypted = !is_decrypted;
646 struct sk_buff *skb_iter;
647
648 /* Skip if it is already decrypted */
649 if (ctx->sw.decrypted)
650 return 0;
651
652 /* Check if all the data is decrypted already */
653 skb_walk_frags(skb, skb_iter) {
654 is_decrypted &= skb_iter->decrypted;
655 is_encrypted &= !skb_iter->decrypted;
656 }
657
658 ctx->sw.decrypted |= is_decrypted;
659
660 /* Return immedeatly if the record is either entirely plaintext or
661 * entirely ciphertext. Otherwise handle reencrypt partially decrypted
662 * record.
663 */
664 return (is_encrypted || is_decrypted) ? 0 :
665 tls_device_reencrypt(sk, skb);
666}
667
Jakub Kicinski9e995792019-04-25 12:32:02 -0700668static void tls_device_attach(struct tls_context *ctx, struct sock *sk,
669 struct net_device *netdev)
670{
671 if (sk->sk_destruct != tls_device_sk_destruct) {
672 refcount_set(&ctx->refcount, 1);
673 dev_hold(netdev);
674 ctx->netdev = netdev;
675 spin_lock_irq(&tls_device_lock);
676 list_add_tail(&ctx->list, &tls_device_list);
677 spin_unlock_irq(&tls_device_lock);
678
679 ctx->sk_destruct = sk->sk_destruct;
680 sk->sk_destruct = tls_device_sk_destruct;
681 }
682}
683
Ilya Lesokhine8f69792018-04-30 10:16:16 +0300684int tls_set_device_offload(struct sock *sk, struct tls_context *ctx)
685{
686 u16 nonce_size, tag_size, iv_size, rec_seq_size;
Vakul Garg4509de12019-02-14 07:11:35 +0000687 struct tls_context *tls_ctx = tls_get_ctx(sk);
688 struct tls_prot_info *prot = &tls_ctx->prot_info;
Ilya Lesokhine8f69792018-04-30 10:16:16 +0300689 struct tls_record_info *start_marker_record;
Boris Pismennyd80a1b9d2018-07-13 14:33:39 +0300690 struct tls_offload_context_tx *offload_ctx;
Ilya Lesokhine8f69792018-04-30 10:16:16 +0300691 struct tls_crypto_info *crypto_info;
692 struct net_device *netdev;
693 char *iv, *rec_seq;
694 struct sk_buff *skb;
695 int rc = -EINVAL;
696 __be64 rcd_sn;
697
698 if (!ctx)
699 goto out;
700
701 if (ctx->priv_ctx_tx) {
702 rc = -EEXIST;
703 goto out;
704 }
705
706 start_marker_record = kmalloc(sizeof(*start_marker_record), GFP_KERNEL);
707 if (!start_marker_record) {
708 rc = -ENOMEM;
709 goto out;
710 }
711
Boris Pismennyd80a1b9d2018-07-13 14:33:39 +0300712 offload_ctx = kzalloc(TLS_OFFLOAD_CONTEXT_SIZE_TX, GFP_KERNEL);
Ilya Lesokhine8f69792018-04-30 10:16:16 +0300713 if (!offload_ctx) {
714 rc = -ENOMEM;
715 goto free_marker_record;
716 }
717
Sabrina Dubroca86029d12018-09-12 17:44:42 +0200718 crypto_info = &ctx->crypto_send.info;
Ilya Lesokhine8f69792018-04-30 10:16:16 +0300719 switch (crypto_info->cipher_type) {
720 case TLS_CIPHER_AES_GCM_128:
721 nonce_size = TLS_CIPHER_AES_GCM_128_IV_SIZE;
722 tag_size = TLS_CIPHER_AES_GCM_128_TAG_SIZE;
723 iv_size = TLS_CIPHER_AES_GCM_128_IV_SIZE;
724 iv = ((struct tls12_crypto_info_aes_gcm_128 *)crypto_info)->iv;
725 rec_seq_size = TLS_CIPHER_AES_GCM_128_REC_SEQ_SIZE;
726 rec_seq =
727 ((struct tls12_crypto_info_aes_gcm_128 *)crypto_info)->rec_seq;
728 break;
729 default:
730 rc = -EINVAL;
731 goto free_offload_ctx;
732 }
733
Vakul Garg4509de12019-02-14 07:11:35 +0000734 prot->prepend_size = TLS_HEADER_SIZE + nonce_size;
735 prot->tag_size = tag_size;
736 prot->overhead_size = prot->prepend_size + prot->tag_size;
737 prot->iv_size = iv_size;
Ilya Lesokhine8f69792018-04-30 10:16:16 +0300738 ctx->tx.iv = kmalloc(iv_size + TLS_CIPHER_AES_GCM_128_SALT_SIZE,
739 GFP_KERNEL);
740 if (!ctx->tx.iv) {
741 rc = -ENOMEM;
742 goto free_offload_ctx;
743 }
744
745 memcpy(ctx->tx.iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE, iv, iv_size);
746
Vakul Garg4509de12019-02-14 07:11:35 +0000747 prot->rec_seq_size = rec_seq_size;
zhong jiang969d5092018-08-01 00:50:24 +0800748 ctx->tx.rec_seq = kmemdup(rec_seq, rec_seq_size, GFP_KERNEL);
Ilya Lesokhine8f69792018-04-30 10:16:16 +0300749 if (!ctx->tx.rec_seq) {
750 rc = -ENOMEM;
751 goto free_iv;
752 }
Ilya Lesokhine8f69792018-04-30 10:16:16 +0300753
754 rc = tls_sw_fallback_init(sk, offload_ctx, crypto_info);
755 if (rc)
756 goto free_rec_seq;
757
758 /* start at rec_seq - 1 to account for the start marker record */
759 memcpy(&rcd_sn, ctx->tx.rec_seq, sizeof(rcd_sn));
760 offload_ctx->unacked_record_sn = be64_to_cpu(rcd_sn) - 1;
761
762 start_marker_record->end_seq = tcp_sk(sk)->write_seq;
763 start_marker_record->len = 0;
764 start_marker_record->num_frags = 0;
765
766 INIT_LIST_HEAD(&offload_ctx->records_list);
767 list_add_tail(&start_marker_record->list, &offload_ctx->records_list);
768 spin_lock_init(&offload_ctx->lock);
Boris Pismenny895262d2018-05-10 16:27:25 +0300769 sg_init_table(offload_ctx->sg_tx_data,
770 ARRAY_SIZE(offload_ctx->sg_tx_data));
Ilya Lesokhine8f69792018-04-30 10:16:16 +0300771
772 clean_acked_data_enable(inet_csk(sk), &tls_icsk_clean_acked);
773 ctx->push_pending_record = tls_device_push_pending_record;
Ilya Lesokhine8f69792018-04-30 10:16:16 +0300774
775 /* TLS offload is greatly simplified if we don't send
776 * SKBs where only part of the payload needs to be encrypted.
777 * So mark the last skb in the write queue as end of record.
778 */
779 skb = tcp_write_queue_tail(sk);
780 if (skb)
781 TCP_SKB_CB(skb)->eor = 1;
782
Ilya Lesokhine8f69792018-04-30 10:16:16 +0300783 /* We support starting offload on multiple sockets
784 * concurrently, so we only need a read lock here.
785 * This lock must precede get_netdev_for_sock to prevent races between
786 * NETDEV_DOWN and setsockopt.
787 */
788 down_read(&device_offload_lock);
789 netdev = get_netdev_for_sock(sk);
790 if (!netdev) {
791 pr_err_ratelimited("%s: netdev not found\n", __func__);
792 rc = -EINVAL;
793 goto release_lock;
794 }
795
796 if (!(netdev->features & NETIF_F_HW_TLS_TX)) {
797 rc = -ENOTSUPP;
798 goto release_netdev;
799 }
800
801 /* Avoid offloading if the device is down
802 * We don't want to offload new flows after
803 * the NETDEV_DOWN event
804 */
805 if (!(netdev->flags & IFF_UP)) {
806 rc = -EINVAL;
807 goto release_netdev;
808 }
809
810 ctx->priv_ctx_tx = offload_ctx;
811 rc = netdev->tlsdev_ops->tls_dev_add(netdev, sk, TLS_OFFLOAD_CTX_DIR_TX,
Sabrina Dubroca86029d12018-09-12 17:44:42 +0200812 &ctx->crypto_send.info,
Ilya Lesokhine8f69792018-04-30 10:16:16 +0300813 tcp_sk(sk)->write_seq);
814 if (rc)
815 goto release_netdev;
816
Boris Pismenny4799ac82018-07-13 14:33:43 +0300817 tls_device_attach(ctx, sk, netdev);
Ilya Lesokhine8f69792018-04-30 10:16:16 +0300818
Ilya Lesokhine8f69792018-04-30 10:16:16 +0300819 /* following this assignment tls_is_sk_tx_device_offloaded
820 * will return true and the context might be accessed
821 * by the netdev's xmit function.
822 */
Boris Pismenny4799ac82018-07-13 14:33:43 +0300823 smp_store_release(&sk->sk_validate_xmit_skb, tls_validate_xmit_skb);
824 dev_put(netdev);
Ilya Lesokhine8f69792018-04-30 10:16:16 +0300825 up_read(&device_offload_lock);
826 goto out;
827
828release_netdev:
829 dev_put(netdev);
830release_lock:
831 up_read(&device_offload_lock);
832 clean_acked_data_disable(inet_csk(sk));
833 crypto_free_aead(offload_ctx->aead_send);
834free_rec_seq:
835 kfree(ctx->tx.rec_seq);
836free_iv:
837 kfree(ctx->tx.iv);
838free_offload_ctx:
839 kfree(offload_ctx);
840 ctx->priv_ctx_tx = NULL;
841free_marker_record:
842 kfree(start_marker_record);
843out:
844 return rc;
845}
846
Boris Pismenny4799ac82018-07-13 14:33:43 +0300847int tls_set_device_offload_rx(struct sock *sk, struct tls_context *ctx)
848{
849 struct tls_offload_context_rx *context;
850 struct net_device *netdev;
851 int rc = 0;
852
853 /* We support starting offload on multiple sockets
854 * concurrently, so we only need a read lock here.
855 * This lock must precede get_netdev_for_sock to prevent races between
856 * NETDEV_DOWN and setsockopt.
857 */
858 down_read(&device_offload_lock);
859 netdev = get_netdev_for_sock(sk);
860 if (!netdev) {
861 pr_err_ratelimited("%s: netdev not found\n", __func__);
862 rc = -EINVAL;
863 goto release_lock;
864 }
865
866 if (!(netdev->features & NETIF_F_HW_TLS_RX)) {
Boris Pismenny4799ac82018-07-13 14:33:43 +0300867 rc = -ENOTSUPP;
868 goto release_netdev;
869 }
870
871 /* Avoid offloading if the device is down
872 * We don't want to offload new flows after
873 * the NETDEV_DOWN event
874 */
875 if (!(netdev->flags & IFF_UP)) {
876 rc = -EINVAL;
877 goto release_netdev;
878 }
879
880 context = kzalloc(TLS_OFFLOAD_CONTEXT_SIZE_RX, GFP_KERNEL);
881 if (!context) {
882 rc = -ENOMEM;
883 goto release_netdev;
884 }
885
886 ctx->priv_ctx_rx = context;
887 rc = tls_set_sw_offload(sk, ctx, 0);
888 if (rc)
889 goto release_ctx;
890
891 rc = netdev->tlsdev_ops->tls_dev_add(netdev, sk, TLS_OFFLOAD_CTX_DIR_RX,
Sabrina Dubroca86029d12018-09-12 17:44:42 +0200892 &ctx->crypto_recv.info,
Boris Pismenny4799ac82018-07-13 14:33:43 +0300893 tcp_sk(sk)->copied_seq);
Jakub Kicinskie49d268d2019-04-25 12:32:01 -0700894 if (rc)
Boris Pismenny4799ac82018-07-13 14:33:43 +0300895 goto free_sw_resources;
Boris Pismenny4799ac82018-07-13 14:33:43 +0300896
897 tls_device_attach(ctx, sk, netdev);
898 goto release_netdev;
899
900free_sw_resources:
Jakub Kicinski62ef81d2019-04-19 16:51:38 -0700901 up_read(&device_offload_lock);
Boris Pismenny4799ac82018-07-13 14:33:43 +0300902 tls_sw_free_resources_rx(sk);
Jakub Kicinski62ef81d2019-04-19 16:51:38 -0700903 down_read(&device_offload_lock);
Boris Pismenny4799ac82018-07-13 14:33:43 +0300904release_ctx:
905 ctx->priv_ctx_rx = NULL;
906release_netdev:
907 dev_put(netdev);
908release_lock:
909 up_read(&device_offload_lock);
910 return rc;
911}
912
913void tls_device_offload_cleanup_rx(struct sock *sk)
914{
915 struct tls_context *tls_ctx = tls_get_ctx(sk);
916 struct net_device *netdev;
917
918 down_read(&device_offload_lock);
919 netdev = tls_ctx->netdev;
920 if (!netdev)
921 goto out;
922
923 if (!(netdev->features & NETIF_F_HW_TLS_RX)) {
924 pr_err_ratelimited("%s: device is missing NETIF_F_HW_TLS_RX cap\n",
925 __func__);
926 goto out;
927 }
928
929 netdev->tlsdev_ops->tls_dev_del(netdev, tls_ctx,
930 TLS_OFFLOAD_CTX_DIR_RX);
931
932 if (tls_ctx->tx_conf != TLS_HW) {
933 dev_put(netdev);
934 tls_ctx->netdev = NULL;
935 }
936out:
937 up_read(&device_offload_lock);
Boris Pismenny4799ac82018-07-13 14:33:43 +0300938 tls_sw_release_resources_rx(sk);
939}
940
Ilya Lesokhine8f69792018-04-30 10:16:16 +0300941static int tls_device_down(struct net_device *netdev)
942{
943 struct tls_context *ctx, *tmp;
944 unsigned long flags;
945 LIST_HEAD(list);
946
947 /* Request a write lock to block new offload attempts */
948 down_write(&device_offload_lock);
949
950 spin_lock_irqsave(&tls_device_lock, flags);
951 list_for_each_entry_safe(ctx, tmp, &tls_device_list, list) {
952 if (ctx->netdev != netdev ||
953 !refcount_inc_not_zero(&ctx->refcount))
954 continue;
955
956 list_move(&ctx->list, &list);
957 }
958 spin_unlock_irqrestore(&tls_device_lock, flags);
959
960 list_for_each_entry_safe(ctx, tmp, &list, list) {
Boris Pismenny4799ac82018-07-13 14:33:43 +0300961 if (ctx->tx_conf == TLS_HW)
962 netdev->tlsdev_ops->tls_dev_del(netdev, ctx,
963 TLS_OFFLOAD_CTX_DIR_TX);
964 if (ctx->rx_conf == TLS_HW)
965 netdev->tlsdev_ops->tls_dev_del(netdev, ctx,
966 TLS_OFFLOAD_CTX_DIR_RX);
Ilya Lesokhine8f69792018-04-30 10:16:16 +0300967 ctx->netdev = NULL;
968 dev_put(netdev);
969 list_del_init(&ctx->list);
970
971 if (refcount_dec_and_test(&ctx->refcount))
972 tls_device_free_ctx(ctx);
973 }
974
975 up_write(&device_offload_lock);
976
977 flush_work(&tls_device_gc_work);
978
979 return NOTIFY_DONE;
980}
981
982static int tls_dev_event(struct notifier_block *this, unsigned long event,
983 void *ptr)
984{
985 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
986
Boris Pismenny4799ac82018-07-13 14:33:43 +0300987 if (!(dev->features & (NETIF_F_HW_TLS_RX | NETIF_F_HW_TLS_TX)))
Ilya Lesokhine8f69792018-04-30 10:16:16 +0300988 return NOTIFY_DONE;
989
990 switch (event) {
991 case NETDEV_REGISTER:
992 case NETDEV_FEAT_CHANGE:
Boris Pismenny4799ac82018-07-13 14:33:43 +0300993 if ((dev->features & NETIF_F_HW_TLS_RX) &&
994 !dev->tlsdev_ops->tls_dev_resync_rx)
995 return NOTIFY_BAD;
996
Ilya Lesokhine8f69792018-04-30 10:16:16 +0300997 if (dev->tlsdev_ops &&
998 dev->tlsdev_ops->tls_dev_add &&
999 dev->tlsdev_ops->tls_dev_del)
1000 return NOTIFY_DONE;
1001 else
1002 return NOTIFY_BAD;
1003 case NETDEV_DOWN:
1004 return tls_device_down(dev);
1005 }
1006 return NOTIFY_DONE;
1007}
1008
1009static struct notifier_block tls_dev_notifier = {
1010 .notifier_call = tls_dev_event,
1011};
1012
1013void __init tls_device_init(void)
1014{
1015 register_netdevice_notifier(&tls_dev_notifier);
1016}
1017
1018void __exit tls_device_cleanup(void)
1019{
1020 unregister_netdevice_notifier(&tls_dev_notifier);
1021 flush_work(&tls_device_gc_work);
1022}