blob: d1024771e926cee6792ebd5af98d304a33380d25 [file] [log] [blame]
Tom Lendacky63b94502013-11-12 11:46:16 -06001/*
2 * AMD Cryptographic Coprocessor (CCP) driver
3 *
Gary R Hookea0375a2016-03-01 13:49:25 -06004 * Copyright (C) 2013,2016 Advanced Micro Devices, Inc.
Tom Lendacky63b94502013-11-12 11:46:16 -06005 *
6 * Author: Tom Lendacky <thomas.lendacky@amd.com>
Gary R Hooka43eb982016-07-26 19:09:31 -05007 * Author: Gary R Hook <gary.hook@amd.com>
Tom Lendacky63b94502013-11-12 11:46:16 -06008 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 */
13
14#include <linux/module.h>
15#include <linux/kernel.h>
16#include <linux/pci.h>
Tom Lendacky63b94502013-11-12 11:46:16 -060017#include <linux/interrupt.h>
Tom Lendacky63b94502013-11-12 11:46:16 -060018#include <crypto/scatterwalk.h>
Gary R Hookea0375a2016-03-01 13:49:25 -060019#include <linux/ccp.h>
Tom Lendacky63b94502013-11-12 11:46:16 -060020
21#include "ccp-dev.h"
22
Tom Lendackyc11baa02014-01-24 16:18:02 -060023/* SHA initial context values */
24static const __be32 ccp_sha1_init[CCP_SHA_CTXSIZE / sizeof(__be32)] = {
25 cpu_to_be32(SHA1_H0), cpu_to_be32(SHA1_H1),
26 cpu_to_be32(SHA1_H2), cpu_to_be32(SHA1_H3),
27 cpu_to_be32(SHA1_H4), 0, 0, 0,
28};
29
30static const __be32 ccp_sha224_init[CCP_SHA_CTXSIZE / sizeof(__be32)] = {
31 cpu_to_be32(SHA224_H0), cpu_to_be32(SHA224_H1),
32 cpu_to_be32(SHA224_H2), cpu_to_be32(SHA224_H3),
33 cpu_to_be32(SHA224_H4), cpu_to_be32(SHA224_H5),
34 cpu_to_be32(SHA224_H6), cpu_to_be32(SHA224_H7),
35};
36
37static const __be32 ccp_sha256_init[CCP_SHA_CTXSIZE / sizeof(__be32)] = {
38 cpu_to_be32(SHA256_H0), cpu_to_be32(SHA256_H1),
39 cpu_to_be32(SHA256_H2), cpu_to_be32(SHA256_H3),
40 cpu_to_be32(SHA256_H4), cpu_to_be32(SHA256_H5),
41 cpu_to_be32(SHA256_H6), cpu_to_be32(SHA256_H7),
42};
43
Tom Lendacky63b94502013-11-12 11:46:16 -060044static u32 ccp_alloc_ksb(struct ccp_device *ccp, unsigned int count)
45{
46 int start;
47
48 for (;;) {
49 mutex_lock(&ccp->ksb_mutex);
50
51 start = (u32)bitmap_find_next_zero_area(ccp->ksb,
52 ccp->ksb_count,
53 ccp->ksb_start,
54 count, 0);
55 if (start <= ccp->ksb_count) {
56 bitmap_set(ccp->ksb, start, count);
57
58 mutex_unlock(&ccp->ksb_mutex);
59 break;
60 }
61
62 ccp->ksb_avail = 0;
63
64 mutex_unlock(&ccp->ksb_mutex);
65
66 /* Wait for KSB entries to become available */
67 if (wait_event_interruptible(ccp->ksb_queue, ccp->ksb_avail))
68 return 0;
69 }
70
71 return KSB_START + start;
72}
73
74static void ccp_free_ksb(struct ccp_device *ccp, unsigned int start,
75 unsigned int count)
76{
77 if (!start)
78 return;
79
80 mutex_lock(&ccp->ksb_mutex);
81
82 bitmap_clear(ccp->ksb, start - KSB_START, count);
83
84 ccp->ksb_avail = 1;
85
86 mutex_unlock(&ccp->ksb_mutex);
87
88 wake_up_interruptible_all(&ccp->ksb_queue);
89}
90
91static u32 ccp_gen_jobid(struct ccp_device *ccp)
92{
93 return atomic_inc_return(&ccp->current_id) & CCP_JOBID_MASK;
94}
95
96static void ccp_sg_free(struct ccp_sg_workarea *wa)
97{
98 if (wa->dma_count)
99 dma_unmap_sg(wa->dma_dev, wa->dma_sg, wa->nents, wa->dma_dir);
100
101 wa->dma_count = 0;
102}
103
104static int ccp_init_sg_workarea(struct ccp_sg_workarea *wa, struct device *dev,
Tom Lendacky81a59f02014-01-06 13:34:17 -0600105 struct scatterlist *sg, u64 len,
Tom Lendacky63b94502013-11-12 11:46:16 -0600106 enum dma_data_direction dma_dir)
107{
108 memset(wa, 0, sizeof(*wa));
109
110 wa->sg = sg;
111 if (!sg)
112 return 0;
113
Tom Lendackyfb43f692015-06-01 11:15:53 -0500114 wa->nents = sg_nents_for_len(sg, len);
115 if (wa->nents < 0)
116 return wa->nents;
117
Tom Lendacky63b94502013-11-12 11:46:16 -0600118 wa->bytes_left = len;
119 wa->sg_used = 0;
120
121 if (len == 0)
122 return 0;
123
124 if (dma_dir == DMA_NONE)
125 return 0;
126
127 wa->dma_sg = sg;
128 wa->dma_dev = dev;
129 wa->dma_dir = dma_dir;
130 wa->dma_count = dma_map_sg(dev, sg, wa->nents, dma_dir);
131 if (!wa->dma_count)
132 return -ENOMEM;
133
Tom Lendacky63b94502013-11-12 11:46:16 -0600134 return 0;
135}
136
137static void ccp_update_sg_workarea(struct ccp_sg_workarea *wa, unsigned int len)
138{
Tom Lendacky81a59f02014-01-06 13:34:17 -0600139 unsigned int nbytes = min_t(u64, len, wa->bytes_left);
Tom Lendacky63b94502013-11-12 11:46:16 -0600140
141 if (!wa->sg)
142 return;
143
144 wa->sg_used += nbytes;
145 wa->bytes_left -= nbytes;
146 if (wa->sg_used == wa->sg->length) {
147 wa->sg = sg_next(wa->sg);
148 wa->sg_used = 0;
149 }
150}
151
152static void ccp_dm_free(struct ccp_dm_workarea *wa)
153{
154 if (wa->length <= CCP_DMAPOOL_MAX_SIZE) {
155 if (wa->address)
156 dma_pool_free(wa->dma_pool, wa->address,
157 wa->dma.address);
158 } else {
159 if (wa->dma.address)
160 dma_unmap_single(wa->dev, wa->dma.address, wa->length,
161 wa->dma.dir);
162 kfree(wa->address);
163 }
164
165 wa->address = NULL;
166 wa->dma.address = 0;
167}
168
169static int ccp_init_dm_workarea(struct ccp_dm_workarea *wa,
170 struct ccp_cmd_queue *cmd_q,
171 unsigned int len,
172 enum dma_data_direction dir)
173{
174 memset(wa, 0, sizeof(*wa));
175
176 if (!len)
177 return 0;
178
179 wa->dev = cmd_q->ccp->dev;
180 wa->length = len;
181
182 if (len <= CCP_DMAPOOL_MAX_SIZE) {
183 wa->dma_pool = cmd_q->dma_pool;
184
185 wa->address = dma_pool_alloc(wa->dma_pool, GFP_KERNEL,
186 &wa->dma.address);
187 if (!wa->address)
188 return -ENOMEM;
189
190 wa->dma.length = CCP_DMAPOOL_MAX_SIZE;
191
192 memset(wa->address, 0, CCP_DMAPOOL_MAX_SIZE);
193 } else {
194 wa->address = kzalloc(len, GFP_KERNEL);
195 if (!wa->address)
196 return -ENOMEM;
197
198 wa->dma.address = dma_map_single(wa->dev, wa->address, len,
199 dir);
200 if (!wa->dma.address)
201 return -ENOMEM;
202
203 wa->dma.length = len;
204 }
205 wa->dma.dir = dir;
206
207 return 0;
208}
209
210static void ccp_set_dm_area(struct ccp_dm_workarea *wa, unsigned int wa_offset,
211 struct scatterlist *sg, unsigned int sg_offset,
212 unsigned int len)
213{
214 WARN_ON(!wa->address);
215
216 scatterwalk_map_and_copy(wa->address + wa_offset, sg, sg_offset, len,
217 0);
218}
219
220static void ccp_get_dm_area(struct ccp_dm_workarea *wa, unsigned int wa_offset,
221 struct scatterlist *sg, unsigned int sg_offset,
222 unsigned int len)
223{
224 WARN_ON(!wa->address);
225
226 scatterwalk_map_and_copy(wa->address + wa_offset, sg, sg_offset, len,
227 1);
228}
229
Tom Lendacky355eba52015-10-01 16:32:31 -0500230static int ccp_reverse_set_dm_area(struct ccp_dm_workarea *wa,
231 struct scatterlist *sg,
232 unsigned int len, unsigned int se_len,
233 bool sign_extend)
Tom Lendacky63b94502013-11-12 11:46:16 -0600234{
235 unsigned int nbytes, sg_offset, dm_offset, ksb_len, i;
236 u8 buffer[CCP_REVERSE_BUF_SIZE];
237
Tom Lendacky355eba52015-10-01 16:32:31 -0500238 if (WARN_ON(se_len > sizeof(buffer)))
239 return -EINVAL;
Tom Lendacky63b94502013-11-12 11:46:16 -0600240
241 sg_offset = len;
242 dm_offset = 0;
243 nbytes = len;
244 while (nbytes) {
245 ksb_len = min_t(unsigned int, nbytes, se_len);
246 sg_offset -= ksb_len;
247
248 scatterwalk_map_and_copy(buffer, sg, sg_offset, ksb_len, 0);
249 for (i = 0; i < ksb_len; i++)
250 wa->address[dm_offset + i] = buffer[ksb_len - i - 1];
251
252 dm_offset += ksb_len;
253 nbytes -= ksb_len;
254
255 if ((ksb_len != se_len) && sign_extend) {
256 /* Must sign-extend to nearest sign-extend length */
257 if (wa->address[dm_offset - 1] & 0x80)
258 memset(wa->address + dm_offset, 0xff,
259 se_len - ksb_len);
260 }
261 }
Tom Lendacky355eba52015-10-01 16:32:31 -0500262
263 return 0;
Tom Lendacky63b94502013-11-12 11:46:16 -0600264}
265
266static void ccp_reverse_get_dm_area(struct ccp_dm_workarea *wa,
267 struct scatterlist *sg,
268 unsigned int len)
269{
270 unsigned int nbytes, sg_offset, dm_offset, ksb_len, i;
271 u8 buffer[CCP_REVERSE_BUF_SIZE];
272
273 sg_offset = 0;
274 dm_offset = len;
275 nbytes = len;
276 while (nbytes) {
277 ksb_len = min_t(unsigned int, nbytes, sizeof(buffer));
278 dm_offset -= ksb_len;
279
280 for (i = 0; i < ksb_len; i++)
281 buffer[ksb_len - i - 1] = wa->address[dm_offset + i];
282 scatterwalk_map_and_copy(buffer, sg, sg_offset, ksb_len, 1);
283
284 sg_offset += ksb_len;
285 nbytes -= ksb_len;
286 }
287}
288
289static void ccp_free_data(struct ccp_data *data, struct ccp_cmd_queue *cmd_q)
290{
291 ccp_dm_free(&data->dm_wa);
292 ccp_sg_free(&data->sg_wa);
293}
294
295static int ccp_init_data(struct ccp_data *data, struct ccp_cmd_queue *cmd_q,
Tom Lendacky81a59f02014-01-06 13:34:17 -0600296 struct scatterlist *sg, u64 sg_len,
Tom Lendacky63b94502013-11-12 11:46:16 -0600297 unsigned int dm_len,
298 enum dma_data_direction dir)
299{
300 int ret;
301
302 memset(data, 0, sizeof(*data));
303
304 ret = ccp_init_sg_workarea(&data->sg_wa, cmd_q->ccp->dev, sg, sg_len,
305 dir);
306 if (ret)
307 goto e_err;
308
309 ret = ccp_init_dm_workarea(&data->dm_wa, cmd_q, dm_len, dir);
310 if (ret)
311 goto e_err;
312
313 return 0;
314
315e_err:
316 ccp_free_data(data, cmd_q);
317
318 return ret;
319}
320
321static unsigned int ccp_queue_buf(struct ccp_data *data, unsigned int from)
322{
323 struct ccp_sg_workarea *sg_wa = &data->sg_wa;
324 struct ccp_dm_workarea *dm_wa = &data->dm_wa;
325 unsigned int buf_count, nbytes;
326
327 /* Clear the buffer if setting it */
328 if (!from)
329 memset(dm_wa->address, 0, dm_wa->length);
330
331 if (!sg_wa->sg)
332 return 0;
333
Tom Lendacky81a59f02014-01-06 13:34:17 -0600334 /* Perform the copy operation
335 * nbytes will always be <= UINT_MAX because dm_wa->length is
336 * an unsigned int
337 */
338 nbytes = min_t(u64, sg_wa->bytes_left, dm_wa->length);
Tom Lendacky63b94502013-11-12 11:46:16 -0600339 scatterwalk_map_and_copy(dm_wa->address, sg_wa->sg, sg_wa->sg_used,
340 nbytes, from);
341
342 /* Update the structures and generate the count */
343 buf_count = 0;
344 while (sg_wa->bytes_left && (buf_count < dm_wa->length)) {
Tom Lendacky81a59f02014-01-06 13:34:17 -0600345 nbytes = min(sg_wa->sg->length - sg_wa->sg_used,
346 dm_wa->length - buf_count);
347 nbytes = min_t(u64, sg_wa->bytes_left, nbytes);
Tom Lendacky63b94502013-11-12 11:46:16 -0600348
349 buf_count += nbytes;
350 ccp_update_sg_workarea(sg_wa, nbytes);
351 }
352
353 return buf_count;
354}
355
356static unsigned int ccp_fill_queue_buf(struct ccp_data *data)
357{
358 return ccp_queue_buf(data, 0);
359}
360
361static unsigned int ccp_empty_queue_buf(struct ccp_data *data)
362{
363 return ccp_queue_buf(data, 1);
364}
365
366static void ccp_prepare_data(struct ccp_data *src, struct ccp_data *dst,
367 struct ccp_op *op, unsigned int block_size,
368 bool blocksize_op)
369{
370 unsigned int sg_src_len, sg_dst_len, op_len;
371
372 /* The CCP can only DMA from/to one address each per operation. This
373 * requires that we find the smallest DMA area between the source
Tom Lendacky81a59f02014-01-06 13:34:17 -0600374 * and destination. The resulting len values will always be <= UINT_MAX
375 * because the dma length is an unsigned int.
Tom Lendacky63b94502013-11-12 11:46:16 -0600376 */
Tom Lendacky81a59f02014-01-06 13:34:17 -0600377 sg_src_len = sg_dma_len(src->sg_wa.sg) - src->sg_wa.sg_used;
378 sg_src_len = min_t(u64, src->sg_wa.bytes_left, sg_src_len);
Tom Lendacky63b94502013-11-12 11:46:16 -0600379
380 if (dst) {
Tom Lendacky81a59f02014-01-06 13:34:17 -0600381 sg_dst_len = sg_dma_len(dst->sg_wa.sg) - dst->sg_wa.sg_used;
382 sg_dst_len = min_t(u64, src->sg_wa.bytes_left, sg_dst_len);
Tom Lendacky63b94502013-11-12 11:46:16 -0600383 op_len = min(sg_src_len, sg_dst_len);
Tom Lendacky8db88462015-02-03 13:07:05 -0600384 } else {
Tom Lendacky63b94502013-11-12 11:46:16 -0600385 op_len = sg_src_len;
Tom Lendacky8db88462015-02-03 13:07:05 -0600386 }
Tom Lendacky63b94502013-11-12 11:46:16 -0600387
388 /* The data operation length will be at least block_size in length
389 * or the smaller of available sg room remaining for the source or
390 * the destination
391 */
392 op_len = max(op_len, block_size);
393
394 /* Unless we have to buffer data, there's no reason to wait */
395 op->soc = 0;
396
397 if (sg_src_len < block_size) {
398 /* Not enough data in the sg element, so it
399 * needs to be buffered into a blocksize chunk
400 */
401 int cp_len = ccp_fill_queue_buf(src);
402
403 op->soc = 1;
404 op->src.u.dma.address = src->dm_wa.dma.address;
405 op->src.u.dma.offset = 0;
406 op->src.u.dma.length = (blocksize_op) ? block_size : cp_len;
407 } else {
408 /* Enough data in the sg element, but we need to
409 * adjust for any previously copied data
410 */
411 op->src.u.dma.address = sg_dma_address(src->sg_wa.sg);
412 op->src.u.dma.offset = src->sg_wa.sg_used;
413 op->src.u.dma.length = op_len & ~(block_size - 1);
414
415 ccp_update_sg_workarea(&src->sg_wa, op->src.u.dma.length);
416 }
417
418 if (dst) {
419 if (sg_dst_len < block_size) {
420 /* Not enough room in the sg element or we're on the
421 * last piece of data (when using padding), so the
422 * output needs to be buffered into a blocksize chunk
423 */
424 op->soc = 1;
425 op->dst.u.dma.address = dst->dm_wa.dma.address;
426 op->dst.u.dma.offset = 0;
427 op->dst.u.dma.length = op->src.u.dma.length;
428 } else {
429 /* Enough room in the sg element, but we need to
430 * adjust for any previously used area
431 */
432 op->dst.u.dma.address = sg_dma_address(dst->sg_wa.sg);
433 op->dst.u.dma.offset = dst->sg_wa.sg_used;
434 op->dst.u.dma.length = op->src.u.dma.length;
435 }
436 }
437}
438
439static void ccp_process_data(struct ccp_data *src, struct ccp_data *dst,
440 struct ccp_op *op)
441{
442 op->init = 0;
443
444 if (dst) {
445 if (op->dst.u.dma.address == dst->dm_wa.dma.address)
446 ccp_empty_queue_buf(dst);
447 else
448 ccp_update_sg_workarea(&dst->sg_wa,
449 op->dst.u.dma.length);
450 }
451}
452
453static int ccp_copy_to_from_ksb(struct ccp_cmd_queue *cmd_q,
454 struct ccp_dm_workarea *wa, u32 jobid, u32 ksb,
455 u32 byte_swap, bool from)
456{
457 struct ccp_op op;
458
459 memset(&op, 0, sizeof(op));
460
461 op.cmd_q = cmd_q;
462 op.jobid = jobid;
463 op.eom = 1;
464
465 if (from) {
466 op.soc = 1;
467 op.src.type = CCP_MEMTYPE_KSB;
468 op.src.u.ksb = ksb;
469 op.dst.type = CCP_MEMTYPE_SYSTEM;
470 op.dst.u.dma.address = wa->dma.address;
471 op.dst.u.dma.length = wa->length;
472 } else {
473 op.src.type = CCP_MEMTYPE_SYSTEM;
474 op.src.u.dma.address = wa->dma.address;
475 op.src.u.dma.length = wa->length;
476 op.dst.type = CCP_MEMTYPE_KSB;
477 op.dst.u.ksb = ksb;
478 }
479
480 op.u.passthru.byte_swap = byte_swap;
481
Gary R Hooka43eb982016-07-26 19:09:31 -0500482 return cmd_q->ccp->vdata->perform->passthru(&op);
Tom Lendacky63b94502013-11-12 11:46:16 -0600483}
484
485static int ccp_copy_to_ksb(struct ccp_cmd_queue *cmd_q,
486 struct ccp_dm_workarea *wa, u32 jobid, u32 ksb,
487 u32 byte_swap)
488{
489 return ccp_copy_to_from_ksb(cmd_q, wa, jobid, ksb, byte_swap, false);
490}
491
492static int ccp_copy_from_ksb(struct ccp_cmd_queue *cmd_q,
493 struct ccp_dm_workarea *wa, u32 jobid, u32 ksb,
494 u32 byte_swap)
495{
496 return ccp_copy_to_from_ksb(cmd_q, wa, jobid, ksb, byte_swap, true);
497}
498
499static int ccp_run_aes_cmac_cmd(struct ccp_cmd_queue *cmd_q,
500 struct ccp_cmd *cmd)
501{
502 struct ccp_aes_engine *aes = &cmd->u.aes;
503 struct ccp_dm_workarea key, ctx;
504 struct ccp_data src;
505 struct ccp_op op;
506 unsigned int dm_offset;
507 int ret;
508
509 if (!((aes->key_len == AES_KEYSIZE_128) ||
510 (aes->key_len == AES_KEYSIZE_192) ||
511 (aes->key_len == AES_KEYSIZE_256)))
512 return -EINVAL;
513
514 if (aes->src_len & (AES_BLOCK_SIZE - 1))
515 return -EINVAL;
516
517 if (aes->iv_len != AES_BLOCK_SIZE)
518 return -EINVAL;
519
520 if (!aes->key || !aes->iv || !aes->src)
521 return -EINVAL;
522
523 if (aes->cmac_final) {
524 if (aes->cmac_key_len != AES_BLOCK_SIZE)
525 return -EINVAL;
526
527 if (!aes->cmac_key)
528 return -EINVAL;
529 }
530
531 BUILD_BUG_ON(CCP_AES_KEY_KSB_COUNT != 1);
532 BUILD_BUG_ON(CCP_AES_CTX_KSB_COUNT != 1);
533
534 ret = -EIO;
535 memset(&op, 0, sizeof(op));
536 op.cmd_q = cmd_q;
537 op.jobid = ccp_gen_jobid(cmd_q->ccp);
538 op.ksb_key = cmd_q->ksb_key;
539 op.ksb_ctx = cmd_q->ksb_ctx;
540 op.init = 1;
541 op.u.aes.type = aes->type;
542 op.u.aes.mode = aes->mode;
543 op.u.aes.action = aes->action;
544
545 /* All supported key sizes fit in a single (32-byte) KSB entry
546 * and must be in little endian format. Use the 256-bit byte
547 * swap passthru option to convert from big endian to little
548 * endian.
549 */
550 ret = ccp_init_dm_workarea(&key, cmd_q,
551 CCP_AES_KEY_KSB_COUNT * CCP_KSB_BYTES,
552 DMA_TO_DEVICE);
553 if (ret)
554 return ret;
555
556 dm_offset = CCP_KSB_BYTES - aes->key_len;
557 ccp_set_dm_area(&key, dm_offset, aes->key, 0, aes->key_len);
558 ret = ccp_copy_to_ksb(cmd_q, &key, op.jobid, op.ksb_key,
559 CCP_PASSTHRU_BYTESWAP_256BIT);
560 if (ret) {
561 cmd->engine_error = cmd_q->cmd_error;
562 goto e_key;
563 }
564
565 /* The AES context fits in a single (32-byte) KSB entry and
566 * must be in little endian format. Use the 256-bit byte swap
567 * passthru option to convert from big endian to little endian.
568 */
569 ret = ccp_init_dm_workarea(&ctx, cmd_q,
570 CCP_AES_CTX_KSB_COUNT * CCP_KSB_BYTES,
571 DMA_BIDIRECTIONAL);
572 if (ret)
573 goto e_key;
574
575 dm_offset = CCP_KSB_BYTES - AES_BLOCK_SIZE;
576 ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
577 ret = ccp_copy_to_ksb(cmd_q, &ctx, op.jobid, op.ksb_ctx,
578 CCP_PASSTHRU_BYTESWAP_256BIT);
579 if (ret) {
580 cmd->engine_error = cmd_q->cmd_error;
581 goto e_ctx;
582 }
583
584 /* Send data to the CCP AES engine */
585 ret = ccp_init_data(&src, cmd_q, aes->src, aes->src_len,
586 AES_BLOCK_SIZE, DMA_TO_DEVICE);
587 if (ret)
588 goto e_ctx;
589
590 while (src.sg_wa.bytes_left) {
591 ccp_prepare_data(&src, NULL, &op, AES_BLOCK_SIZE, true);
592 if (aes->cmac_final && !src.sg_wa.bytes_left) {
593 op.eom = 1;
594
595 /* Push the K1/K2 key to the CCP now */
596 ret = ccp_copy_from_ksb(cmd_q, &ctx, op.jobid,
597 op.ksb_ctx,
598 CCP_PASSTHRU_BYTESWAP_256BIT);
599 if (ret) {
600 cmd->engine_error = cmd_q->cmd_error;
601 goto e_src;
602 }
603
604 ccp_set_dm_area(&ctx, 0, aes->cmac_key, 0,
605 aes->cmac_key_len);
606 ret = ccp_copy_to_ksb(cmd_q, &ctx, op.jobid, op.ksb_ctx,
607 CCP_PASSTHRU_BYTESWAP_256BIT);
608 if (ret) {
609 cmd->engine_error = cmd_q->cmd_error;
610 goto e_src;
611 }
612 }
613
Gary R Hooka43eb982016-07-26 19:09:31 -0500614 ret = cmd_q->ccp->vdata->perform->aes(&op);
Tom Lendacky63b94502013-11-12 11:46:16 -0600615 if (ret) {
616 cmd->engine_error = cmd_q->cmd_error;
617 goto e_src;
618 }
619
620 ccp_process_data(&src, NULL, &op);
621 }
622
623 /* Retrieve the AES context - convert from LE to BE using
624 * 32-byte (256-bit) byteswapping
625 */
626 ret = ccp_copy_from_ksb(cmd_q, &ctx, op.jobid, op.ksb_ctx,
627 CCP_PASSTHRU_BYTESWAP_256BIT);
628 if (ret) {
629 cmd->engine_error = cmd_q->cmd_error;
630 goto e_src;
631 }
632
633 /* ...but we only need AES_BLOCK_SIZE bytes */
634 dm_offset = CCP_KSB_BYTES - AES_BLOCK_SIZE;
635 ccp_get_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
636
637e_src:
638 ccp_free_data(&src, cmd_q);
639
640e_ctx:
641 ccp_dm_free(&ctx);
642
643e_key:
644 ccp_dm_free(&key);
645
646 return ret;
647}
648
649static int ccp_run_aes_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
650{
651 struct ccp_aes_engine *aes = &cmd->u.aes;
652 struct ccp_dm_workarea key, ctx;
653 struct ccp_data src, dst;
654 struct ccp_op op;
655 unsigned int dm_offset;
656 bool in_place = false;
657 int ret;
658
659 if (aes->mode == CCP_AES_MODE_CMAC)
660 return ccp_run_aes_cmac_cmd(cmd_q, cmd);
661
662 if (!((aes->key_len == AES_KEYSIZE_128) ||
663 (aes->key_len == AES_KEYSIZE_192) ||
664 (aes->key_len == AES_KEYSIZE_256)))
665 return -EINVAL;
666
667 if (((aes->mode == CCP_AES_MODE_ECB) ||
668 (aes->mode == CCP_AES_MODE_CBC) ||
669 (aes->mode == CCP_AES_MODE_CFB)) &&
670 (aes->src_len & (AES_BLOCK_SIZE - 1)))
671 return -EINVAL;
672
673 if (!aes->key || !aes->src || !aes->dst)
674 return -EINVAL;
675
676 if (aes->mode != CCP_AES_MODE_ECB) {
677 if (aes->iv_len != AES_BLOCK_SIZE)
678 return -EINVAL;
679
680 if (!aes->iv)
681 return -EINVAL;
682 }
683
684 BUILD_BUG_ON(CCP_AES_KEY_KSB_COUNT != 1);
685 BUILD_BUG_ON(CCP_AES_CTX_KSB_COUNT != 1);
686
687 ret = -EIO;
688 memset(&op, 0, sizeof(op));
689 op.cmd_q = cmd_q;
690 op.jobid = ccp_gen_jobid(cmd_q->ccp);
691 op.ksb_key = cmd_q->ksb_key;
692 op.ksb_ctx = cmd_q->ksb_ctx;
693 op.init = (aes->mode == CCP_AES_MODE_ECB) ? 0 : 1;
694 op.u.aes.type = aes->type;
695 op.u.aes.mode = aes->mode;
696 op.u.aes.action = aes->action;
697
698 /* All supported key sizes fit in a single (32-byte) KSB entry
699 * and must be in little endian format. Use the 256-bit byte
700 * swap passthru option to convert from big endian to little
701 * endian.
702 */
703 ret = ccp_init_dm_workarea(&key, cmd_q,
704 CCP_AES_KEY_KSB_COUNT * CCP_KSB_BYTES,
705 DMA_TO_DEVICE);
706 if (ret)
707 return ret;
708
709 dm_offset = CCP_KSB_BYTES - aes->key_len;
710 ccp_set_dm_area(&key, dm_offset, aes->key, 0, aes->key_len);
711 ret = ccp_copy_to_ksb(cmd_q, &key, op.jobid, op.ksb_key,
712 CCP_PASSTHRU_BYTESWAP_256BIT);
713 if (ret) {
714 cmd->engine_error = cmd_q->cmd_error;
715 goto e_key;
716 }
717
718 /* The AES context fits in a single (32-byte) KSB entry and
719 * must be in little endian format. Use the 256-bit byte swap
720 * passthru option to convert from big endian to little endian.
721 */
722 ret = ccp_init_dm_workarea(&ctx, cmd_q,
723 CCP_AES_CTX_KSB_COUNT * CCP_KSB_BYTES,
724 DMA_BIDIRECTIONAL);
725 if (ret)
726 goto e_key;
727
728 if (aes->mode != CCP_AES_MODE_ECB) {
729 /* Load the AES context - conver to LE */
730 dm_offset = CCP_KSB_BYTES - AES_BLOCK_SIZE;
731 ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
732 ret = ccp_copy_to_ksb(cmd_q, &ctx, op.jobid, op.ksb_ctx,
733 CCP_PASSTHRU_BYTESWAP_256BIT);
734 if (ret) {
735 cmd->engine_error = cmd_q->cmd_error;
736 goto e_ctx;
737 }
738 }
739
740 /* Prepare the input and output data workareas. For in-place
741 * operations we need to set the dma direction to BIDIRECTIONAL
742 * and copy the src workarea to the dst workarea.
743 */
744 if (sg_virt(aes->src) == sg_virt(aes->dst))
745 in_place = true;
746
747 ret = ccp_init_data(&src, cmd_q, aes->src, aes->src_len,
748 AES_BLOCK_SIZE,
749 in_place ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
750 if (ret)
751 goto e_ctx;
752
Tom Lendacky8db88462015-02-03 13:07:05 -0600753 if (in_place) {
Tom Lendacky63b94502013-11-12 11:46:16 -0600754 dst = src;
Tom Lendacky8db88462015-02-03 13:07:05 -0600755 } else {
Tom Lendacky63b94502013-11-12 11:46:16 -0600756 ret = ccp_init_data(&dst, cmd_q, aes->dst, aes->src_len,
757 AES_BLOCK_SIZE, DMA_FROM_DEVICE);
758 if (ret)
759 goto e_src;
760 }
761
762 /* Send data to the CCP AES engine */
763 while (src.sg_wa.bytes_left) {
764 ccp_prepare_data(&src, &dst, &op, AES_BLOCK_SIZE, true);
765 if (!src.sg_wa.bytes_left) {
766 op.eom = 1;
767
768 /* Since we don't retrieve the AES context in ECB
769 * mode we have to wait for the operation to complete
770 * on the last piece of data
771 */
772 if (aes->mode == CCP_AES_MODE_ECB)
773 op.soc = 1;
774 }
775
Gary R Hooka43eb982016-07-26 19:09:31 -0500776 ret = cmd_q->ccp->vdata->perform->aes(&op);
Tom Lendacky63b94502013-11-12 11:46:16 -0600777 if (ret) {
778 cmd->engine_error = cmd_q->cmd_error;
779 goto e_dst;
780 }
781
782 ccp_process_data(&src, &dst, &op);
783 }
784
785 if (aes->mode != CCP_AES_MODE_ECB) {
786 /* Retrieve the AES context - convert from LE to BE using
787 * 32-byte (256-bit) byteswapping
788 */
789 ret = ccp_copy_from_ksb(cmd_q, &ctx, op.jobid, op.ksb_ctx,
790 CCP_PASSTHRU_BYTESWAP_256BIT);
791 if (ret) {
792 cmd->engine_error = cmd_q->cmd_error;
793 goto e_dst;
794 }
795
796 /* ...but we only need AES_BLOCK_SIZE bytes */
797 dm_offset = CCP_KSB_BYTES - AES_BLOCK_SIZE;
798 ccp_get_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
799 }
800
801e_dst:
802 if (!in_place)
803 ccp_free_data(&dst, cmd_q);
804
805e_src:
806 ccp_free_data(&src, cmd_q);
807
808e_ctx:
809 ccp_dm_free(&ctx);
810
811e_key:
812 ccp_dm_free(&key);
813
814 return ret;
815}
816
817static int ccp_run_xts_aes_cmd(struct ccp_cmd_queue *cmd_q,
818 struct ccp_cmd *cmd)
819{
820 struct ccp_xts_aes_engine *xts = &cmd->u.xts;
821 struct ccp_dm_workarea key, ctx;
822 struct ccp_data src, dst;
823 struct ccp_op op;
824 unsigned int unit_size, dm_offset;
825 bool in_place = false;
826 int ret;
827
828 switch (xts->unit_size) {
829 case CCP_XTS_AES_UNIT_SIZE_16:
830 unit_size = 16;
831 break;
832 case CCP_XTS_AES_UNIT_SIZE_512:
833 unit_size = 512;
834 break;
835 case CCP_XTS_AES_UNIT_SIZE_1024:
836 unit_size = 1024;
837 break;
838 case CCP_XTS_AES_UNIT_SIZE_2048:
839 unit_size = 2048;
840 break;
841 case CCP_XTS_AES_UNIT_SIZE_4096:
842 unit_size = 4096;
843 break;
844
845 default:
846 return -EINVAL;
847 }
848
849 if (xts->key_len != AES_KEYSIZE_128)
850 return -EINVAL;
851
852 if (!xts->final && (xts->src_len & (AES_BLOCK_SIZE - 1)))
853 return -EINVAL;
854
855 if (xts->iv_len != AES_BLOCK_SIZE)
856 return -EINVAL;
857
858 if (!xts->key || !xts->iv || !xts->src || !xts->dst)
859 return -EINVAL;
860
861 BUILD_BUG_ON(CCP_XTS_AES_KEY_KSB_COUNT != 1);
862 BUILD_BUG_ON(CCP_XTS_AES_CTX_KSB_COUNT != 1);
863
864 ret = -EIO;
865 memset(&op, 0, sizeof(op));
866 op.cmd_q = cmd_q;
867 op.jobid = ccp_gen_jobid(cmd_q->ccp);
868 op.ksb_key = cmd_q->ksb_key;
869 op.ksb_ctx = cmd_q->ksb_ctx;
870 op.init = 1;
871 op.u.xts.action = xts->action;
872 op.u.xts.unit_size = xts->unit_size;
873
874 /* All supported key sizes fit in a single (32-byte) KSB entry
875 * and must be in little endian format. Use the 256-bit byte
876 * swap passthru option to convert from big endian to little
877 * endian.
878 */
879 ret = ccp_init_dm_workarea(&key, cmd_q,
880 CCP_XTS_AES_KEY_KSB_COUNT * CCP_KSB_BYTES,
881 DMA_TO_DEVICE);
882 if (ret)
883 return ret;
884
885 dm_offset = CCP_KSB_BYTES - AES_KEYSIZE_128;
886 ccp_set_dm_area(&key, dm_offset, xts->key, 0, xts->key_len);
887 ccp_set_dm_area(&key, 0, xts->key, dm_offset, xts->key_len);
888 ret = ccp_copy_to_ksb(cmd_q, &key, op.jobid, op.ksb_key,
889 CCP_PASSTHRU_BYTESWAP_256BIT);
890 if (ret) {
891 cmd->engine_error = cmd_q->cmd_error;
892 goto e_key;
893 }
894
895 /* The AES context fits in a single (32-byte) KSB entry and
896 * for XTS is already in little endian format so no byte swapping
897 * is needed.
898 */
899 ret = ccp_init_dm_workarea(&ctx, cmd_q,
900 CCP_XTS_AES_CTX_KSB_COUNT * CCP_KSB_BYTES,
901 DMA_BIDIRECTIONAL);
902 if (ret)
903 goto e_key;
904
905 ccp_set_dm_area(&ctx, 0, xts->iv, 0, xts->iv_len);
906 ret = ccp_copy_to_ksb(cmd_q, &ctx, op.jobid, op.ksb_ctx,
907 CCP_PASSTHRU_BYTESWAP_NOOP);
908 if (ret) {
909 cmd->engine_error = cmd_q->cmd_error;
910 goto e_ctx;
911 }
912
913 /* Prepare the input and output data workareas. For in-place
914 * operations we need to set the dma direction to BIDIRECTIONAL
915 * and copy the src workarea to the dst workarea.
916 */
917 if (sg_virt(xts->src) == sg_virt(xts->dst))
918 in_place = true;
919
920 ret = ccp_init_data(&src, cmd_q, xts->src, xts->src_len,
921 unit_size,
922 in_place ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
923 if (ret)
924 goto e_ctx;
925
Tom Lendacky8db88462015-02-03 13:07:05 -0600926 if (in_place) {
Tom Lendacky63b94502013-11-12 11:46:16 -0600927 dst = src;
Tom Lendacky8db88462015-02-03 13:07:05 -0600928 } else {
Tom Lendacky63b94502013-11-12 11:46:16 -0600929 ret = ccp_init_data(&dst, cmd_q, xts->dst, xts->src_len,
930 unit_size, DMA_FROM_DEVICE);
931 if (ret)
932 goto e_src;
933 }
934
935 /* Send data to the CCP AES engine */
936 while (src.sg_wa.bytes_left) {
937 ccp_prepare_data(&src, &dst, &op, unit_size, true);
938 if (!src.sg_wa.bytes_left)
939 op.eom = 1;
940
Gary R Hooka43eb982016-07-26 19:09:31 -0500941 ret = cmd_q->ccp->vdata->perform->xts_aes(&op);
Tom Lendacky63b94502013-11-12 11:46:16 -0600942 if (ret) {
943 cmd->engine_error = cmd_q->cmd_error;
944 goto e_dst;
945 }
946
947 ccp_process_data(&src, &dst, &op);
948 }
949
950 /* Retrieve the AES context - convert from LE to BE using
951 * 32-byte (256-bit) byteswapping
952 */
953 ret = ccp_copy_from_ksb(cmd_q, &ctx, op.jobid, op.ksb_ctx,
954 CCP_PASSTHRU_BYTESWAP_256BIT);
955 if (ret) {
956 cmd->engine_error = cmd_q->cmd_error;
957 goto e_dst;
958 }
959
960 /* ...but we only need AES_BLOCK_SIZE bytes */
961 dm_offset = CCP_KSB_BYTES - AES_BLOCK_SIZE;
962 ccp_get_dm_area(&ctx, dm_offset, xts->iv, 0, xts->iv_len);
963
964e_dst:
965 if (!in_place)
966 ccp_free_data(&dst, cmd_q);
967
968e_src:
969 ccp_free_data(&src, cmd_q);
970
971e_ctx:
972 ccp_dm_free(&ctx);
973
974e_key:
975 ccp_dm_free(&key);
976
977 return ret;
978}
979
980static int ccp_run_sha_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
981{
982 struct ccp_sha_engine *sha = &cmd->u.sha;
983 struct ccp_dm_workarea ctx;
984 struct ccp_data src;
985 struct ccp_op op;
986 int ret;
987
988 if (sha->ctx_len != CCP_SHA_CTXSIZE)
989 return -EINVAL;
990
991 if (!sha->ctx)
992 return -EINVAL;
993
994 if (!sha->final && (sha->src_len & (CCP_SHA_BLOCKSIZE - 1)))
995 return -EINVAL;
996
997 if (!sha->src_len) {
998 const u8 *sha_zero;
999
1000 /* Not final, just return */
1001 if (!sha->final)
1002 return 0;
1003
1004 /* CCP can't do a zero length sha operation so the caller
1005 * must buffer the data.
1006 */
1007 if (sha->msg_bits)
1008 return -EINVAL;
1009
LABBE Corentinbdd75062015-12-17 13:45:41 +01001010 /* The CCP cannot perform zero-length sha operations so the
1011 * caller is required to buffer data for the final operation.
1012 * However, a sha operation for a message with a total length
1013 * of zero is valid so known values are required to supply
1014 * the result.
Tom Lendacky63b94502013-11-12 11:46:16 -06001015 */
1016 switch (sha->type) {
1017 case CCP_SHA_TYPE_1:
LABBE Corentinbdd75062015-12-17 13:45:41 +01001018 sha_zero = sha1_zero_message_hash;
Tom Lendacky63b94502013-11-12 11:46:16 -06001019 break;
1020 case CCP_SHA_TYPE_224:
LABBE Corentinbdd75062015-12-17 13:45:41 +01001021 sha_zero = sha224_zero_message_hash;
Tom Lendacky63b94502013-11-12 11:46:16 -06001022 break;
1023 case CCP_SHA_TYPE_256:
LABBE Corentinbdd75062015-12-17 13:45:41 +01001024 sha_zero = sha256_zero_message_hash;
Tom Lendacky63b94502013-11-12 11:46:16 -06001025 break;
1026 default:
1027 return -EINVAL;
1028 }
1029
1030 scatterwalk_map_and_copy((void *)sha_zero, sha->ctx, 0,
1031 sha->ctx_len, 1);
1032
1033 return 0;
1034 }
1035
1036 if (!sha->src)
1037 return -EINVAL;
1038
1039 BUILD_BUG_ON(CCP_SHA_KSB_COUNT != 1);
1040
1041 memset(&op, 0, sizeof(op));
1042 op.cmd_q = cmd_q;
1043 op.jobid = ccp_gen_jobid(cmd_q->ccp);
1044 op.ksb_ctx = cmd_q->ksb_ctx;
1045 op.u.sha.type = sha->type;
1046 op.u.sha.msg_bits = sha->msg_bits;
1047
1048 /* The SHA context fits in a single (32-byte) KSB entry and
1049 * must be in little endian format. Use the 256-bit byte swap
1050 * passthru option to convert from big endian to little endian.
1051 */
1052 ret = ccp_init_dm_workarea(&ctx, cmd_q,
1053 CCP_SHA_KSB_COUNT * CCP_KSB_BYTES,
1054 DMA_BIDIRECTIONAL);
1055 if (ret)
1056 return ret;
1057
Tom Lendackyc11baa02014-01-24 16:18:02 -06001058 if (sha->first) {
1059 const __be32 *init;
1060
1061 switch (sha->type) {
1062 case CCP_SHA_TYPE_1:
1063 init = ccp_sha1_init;
1064 break;
1065 case CCP_SHA_TYPE_224:
1066 init = ccp_sha224_init;
1067 break;
1068 case CCP_SHA_TYPE_256:
1069 init = ccp_sha256_init;
1070 break;
1071 default:
1072 ret = -EINVAL;
1073 goto e_ctx;
1074 }
1075 memcpy(ctx.address, init, CCP_SHA_CTXSIZE);
Tom Lendacky8db88462015-02-03 13:07:05 -06001076 } else {
Tom Lendackyc11baa02014-01-24 16:18:02 -06001077 ccp_set_dm_area(&ctx, 0, sha->ctx, 0, sha->ctx_len);
Tom Lendacky8db88462015-02-03 13:07:05 -06001078 }
Tom Lendackyc11baa02014-01-24 16:18:02 -06001079
Tom Lendacky63b94502013-11-12 11:46:16 -06001080 ret = ccp_copy_to_ksb(cmd_q, &ctx, op.jobid, op.ksb_ctx,
1081 CCP_PASSTHRU_BYTESWAP_256BIT);
1082 if (ret) {
1083 cmd->engine_error = cmd_q->cmd_error;
1084 goto e_ctx;
1085 }
1086
1087 /* Send data to the CCP SHA engine */
1088 ret = ccp_init_data(&src, cmd_q, sha->src, sha->src_len,
1089 CCP_SHA_BLOCKSIZE, DMA_TO_DEVICE);
1090 if (ret)
1091 goto e_ctx;
1092
1093 while (src.sg_wa.bytes_left) {
1094 ccp_prepare_data(&src, NULL, &op, CCP_SHA_BLOCKSIZE, false);
1095 if (sha->final && !src.sg_wa.bytes_left)
1096 op.eom = 1;
1097
Gary R Hooka43eb982016-07-26 19:09:31 -05001098 ret = cmd_q->ccp->vdata->perform->sha(&op);
Tom Lendacky63b94502013-11-12 11:46:16 -06001099 if (ret) {
1100 cmd->engine_error = cmd_q->cmd_error;
1101 goto e_data;
1102 }
1103
1104 ccp_process_data(&src, NULL, &op);
1105 }
1106
1107 /* Retrieve the SHA context - convert from LE to BE using
1108 * 32-byte (256-bit) byteswapping to BE
1109 */
1110 ret = ccp_copy_from_ksb(cmd_q, &ctx, op.jobid, op.ksb_ctx,
1111 CCP_PASSTHRU_BYTESWAP_256BIT);
1112 if (ret) {
1113 cmd->engine_error = cmd_q->cmd_error;
1114 goto e_data;
1115 }
1116
1117 ccp_get_dm_area(&ctx, 0, sha->ctx, 0, sha->ctx_len);
1118
Tom Lendackyc11baa02014-01-24 16:18:02 -06001119 if (sha->final && sha->opad) {
1120 /* HMAC operation, recursively perform final SHA */
1121 struct ccp_cmd hmac_cmd;
1122 struct scatterlist sg;
1123 u64 block_size, digest_size;
1124 u8 *hmac_buf;
1125
1126 switch (sha->type) {
1127 case CCP_SHA_TYPE_1:
1128 block_size = SHA1_BLOCK_SIZE;
1129 digest_size = SHA1_DIGEST_SIZE;
1130 break;
1131 case CCP_SHA_TYPE_224:
1132 block_size = SHA224_BLOCK_SIZE;
1133 digest_size = SHA224_DIGEST_SIZE;
1134 break;
1135 case CCP_SHA_TYPE_256:
1136 block_size = SHA256_BLOCK_SIZE;
1137 digest_size = SHA256_DIGEST_SIZE;
1138 break;
1139 default:
1140 ret = -EINVAL;
1141 goto e_data;
1142 }
1143
1144 if (sha->opad_len != block_size) {
1145 ret = -EINVAL;
1146 goto e_data;
1147 }
1148
1149 hmac_buf = kmalloc(block_size + digest_size, GFP_KERNEL);
1150 if (!hmac_buf) {
1151 ret = -ENOMEM;
1152 goto e_data;
1153 }
1154 sg_init_one(&sg, hmac_buf, block_size + digest_size);
1155
1156 scatterwalk_map_and_copy(hmac_buf, sha->opad, 0, block_size, 0);
1157 memcpy(hmac_buf + block_size, ctx.address, digest_size);
1158
1159 memset(&hmac_cmd, 0, sizeof(hmac_cmd));
1160 hmac_cmd.engine = CCP_ENGINE_SHA;
1161 hmac_cmd.u.sha.type = sha->type;
1162 hmac_cmd.u.sha.ctx = sha->ctx;
1163 hmac_cmd.u.sha.ctx_len = sha->ctx_len;
1164 hmac_cmd.u.sha.src = &sg;
1165 hmac_cmd.u.sha.src_len = block_size + digest_size;
1166 hmac_cmd.u.sha.opad = NULL;
1167 hmac_cmd.u.sha.opad_len = 0;
1168 hmac_cmd.u.sha.first = 1;
1169 hmac_cmd.u.sha.final = 1;
1170 hmac_cmd.u.sha.msg_bits = (block_size + digest_size) << 3;
1171
1172 ret = ccp_run_sha_cmd(cmd_q, &hmac_cmd);
1173 if (ret)
1174 cmd->engine_error = hmac_cmd.engine_error;
1175
1176 kfree(hmac_buf);
1177 }
1178
Tom Lendacky63b94502013-11-12 11:46:16 -06001179e_data:
1180 ccp_free_data(&src, cmd_q);
1181
1182e_ctx:
1183 ccp_dm_free(&ctx);
1184
1185 return ret;
1186}
1187
1188static int ccp_run_rsa_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
1189{
1190 struct ccp_rsa_engine *rsa = &cmd->u.rsa;
1191 struct ccp_dm_workarea exp, src;
1192 struct ccp_data dst;
1193 struct ccp_op op;
1194 unsigned int ksb_count, i_len, o_len;
1195 int ret;
1196
1197 if (rsa->key_size > CCP_RSA_MAX_WIDTH)
1198 return -EINVAL;
1199
1200 if (!rsa->exp || !rsa->mod || !rsa->src || !rsa->dst)
1201 return -EINVAL;
1202
1203 /* The RSA modulus must precede the message being acted upon, so
1204 * it must be copied to a DMA area where the message and the
1205 * modulus can be concatenated. Therefore the input buffer
1206 * length required is twice the output buffer length (which
1207 * must be a multiple of 256-bits).
1208 */
1209 o_len = ((rsa->key_size + 255) / 256) * 32;
1210 i_len = o_len * 2;
1211
1212 ksb_count = o_len / CCP_KSB_BYTES;
1213
1214 memset(&op, 0, sizeof(op));
1215 op.cmd_q = cmd_q;
1216 op.jobid = ccp_gen_jobid(cmd_q->ccp);
1217 op.ksb_key = ccp_alloc_ksb(cmd_q->ccp, ksb_count);
1218 if (!op.ksb_key)
1219 return -EIO;
1220
1221 /* The RSA exponent may span multiple (32-byte) KSB entries and must
1222 * be in little endian format. Reverse copy each 32-byte chunk
1223 * of the exponent (En chunk to E0 chunk, E(n-1) chunk to E1 chunk)
1224 * and each byte within that chunk and do not perform any byte swap
1225 * operations on the passthru operation.
1226 */
1227 ret = ccp_init_dm_workarea(&exp, cmd_q, o_len, DMA_TO_DEVICE);
1228 if (ret)
1229 goto e_ksb;
1230
Tom Lendacky355eba52015-10-01 16:32:31 -05001231 ret = ccp_reverse_set_dm_area(&exp, rsa->exp, rsa->exp_len,
1232 CCP_KSB_BYTES, false);
1233 if (ret)
1234 goto e_exp;
Tom Lendacky63b94502013-11-12 11:46:16 -06001235 ret = ccp_copy_to_ksb(cmd_q, &exp, op.jobid, op.ksb_key,
1236 CCP_PASSTHRU_BYTESWAP_NOOP);
1237 if (ret) {
1238 cmd->engine_error = cmd_q->cmd_error;
1239 goto e_exp;
1240 }
1241
1242 /* Concatenate the modulus and the message. Both the modulus and
1243 * the operands must be in little endian format. Since the input
1244 * is in big endian format it must be converted.
1245 */
1246 ret = ccp_init_dm_workarea(&src, cmd_q, i_len, DMA_TO_DEVICE);
1247 if (ret)
1248 goto e_exp;
1249
Tom Lendacky355eba52015-10-01 16:32:31 -05001250 ret = ccp_reverse_set_dm_area(&src, rsa->mod, rsa->mod_len,
1251 CCP_KSB_BYTES, false);
1252 if (ret)
1253 goto e_src;
Tom Lendacky63b94502013-11-12 11:46:16 -06001254 src.address += o_len; /* Adjust the address for the copy operation */
Tom Lendacky355eba52015-10-01 16:32:31 -05001255 ret = ccp_reverse_set_dm_area(&src, rsa->src, rsa->src_len,
1256 CCP_KSB_BYTES, false);
1257 if (ret)
1258 goto e_src;
Tom Lendacky63b94502013-11-12 11:46:16 -06001259 src.address -= o_len; /* Reset the address to original value */
1260
1261 /* Prepare the output area for the operation */
1262 ret = ccp_init_data(&dst, cmd_q, rsa->dst, rsa->mod_len,
1263 o_len, DMA_FROM_DEVICE);
1264 if (ret)
1265 goto e_src;
1266
1267 op.soc = 1;
1268 op.src.u.dma.address = src.dma.address;
1269 op.src.u.dma.offset = 0;
1270 op.src.u.dma.length = i_len;
1271 op.dst.u.dma.address = dst.dm_wa.dma.address;
1272 op.dst.u.dma.offset = 0;
1273 op.dst.u.dma.length = o_len;
1274
1275 op.u.rsa.mod_size = rsa->key_size;
1276 op.u.rsa.input_len = i_len;
1277
Gary R Hooka43eb982016-07-26 19:09:31 -05001278 ret = cmd_q->ccp->vdata->perform->rsa(&op);
Tom Lendacky63b94502013-11-12 11:46:16 -06001279 if (ret) {
1280 cmd->engine_error = cmd_q->cmd_error;
1281 goto e_dst;
1282 }
1283
1284 ccp_reverse_get_dm_area(&dst.dm_wa, rsa->dst, rsa->mod_len);
1285
1286e_dst:
1287 ccp_free_data(&dst, cmd_q);
1288
1289e_src:
1290 ccp_dm_free(&src);
1291
1292e_exp:
1293 ccp_dm_free(&exp);
1294
1295e_ksb:
1296 ccp_free_ksb(cmd_q->ccp, op.ksb_key, ksb_count);
1297
1298 return ret;
1299}
1300
1301static int ccp_run_passthru_cmd(struct ccp_cmd_queue *cmd_q,
1302 struct ccp_cmd *cmd)
1303{
1304 struct ccp_passthru_engine *pt = &cmd->u.passthru;
1305 struct ccp_dm_workarea mask;
1306 struct ccp_data src, dst;
1307 struct ccp_op op;
1308 bool in_place = false;
1309 unsigned int i;
1310 int ret;
1311
1312 if (!pt->final && (pt->src_len & (CCP_PASSTHRU_BLOCKSIZE - 1)))
1313 return -EINVAL;
1314
1315 if (!pt->src || !pt->dst)
1316 return -EINVAL;
1317
1318 if (pt->bit_mod != CCP_PASSTHRU_BITWISE_NOOP) {
1319 if (pt->mask_len != CCP_PASSTHRU_MASKSIZE)
1320 return -EINVAL;
1321 if (!pt->mask)
1322 return -EINVAL;
1323 }
1324
1325 BUILD_BUG_ON(CCP_PASSTHRU_KSB_COUNT != 1);
1326
1327 memset(&op, 0, sizeof(op));
1328 op.cmd_q = cmd_q;
1329 op.jobid = ccp_gen_jobid(cmd_q->ccp);
1330
1331 if (pt->bit_mod != CCP_PASSTHRU_BITWISE_NOOP) {
1332 /* Load the mask */
1333 op.ksb_key = cmd_q->ksb_key;
1334
1335 ret = ccp_init_dm_workarea(&mask, cmd_q,
1336 CCP_PASSTHRU_KSB_COUNT *
1337 CCP_KSB_BYTES,
1338 DMA_TO_DEVICE);
1339 if (ret)
1340 return ret;
1341
1342 ccp_set_dm_area(&mask, 0, pt->mask, 0, pt->mask_len);
1343 ret = ccp_copy_to_ksb(cmd_q, &mask, op.jobid, op.ksb_key,
1344 CCP_PASSTHRU_BYTESWAP_NOOP);
1345 if (ret) {
1346 cmd->engine_error = cmd_q->cmd_error;
1347 goto e_mask;
1348 }
1349 }
1350
1351 /* Prepare the input and output data workareas. For in-place
1352 * operations we need to set the dma direction to BIDIRECTIONAL
1353 * and copy the src workarea to the dst workarea.
1354 */
1355 if (sg_virt(pt->src) == sg_virt(pt->dst))
1356 in_place = true;
1357
1358 ret = ccp_init_data(&src, cmd_q, pt->src, pt->src_len,
1359 CCP_PASSTHRU_MASKSIZE,
1360 in_place ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
1361 if (ret)
1362 goto e_mask;
1363
Tom Lendacky8db88462015-02-03 13:07:05 -06001364 if (in_place) {
Tom Lendacky63b94502013-11-12 11:46:16 -06001365 dst = src;
Tom Lendacky8db88462015-02-03 13:07:05 -06001366 } else {
Tom Lendacky63b94502013-11-12 11:46:16 -06001367 ret = ccp_init_data(&dst, cmd_q, pt->dst, pt->src_len,
1368 CCP_PASSTHRU_MASKSIZE, DMA_FROM_DEVICE);
1369 if (ret)
1370 goto e_src;
1371 }
1372
1373 /* Send data to the CCP Passthru engine
1374 * Because the CCP engine works on a single source and destination
1375 * dma address at a time, each entry in the source scatterlist
1376 * (after the dma_map_sg call) must be less than or equal to the
1377 * (remaining) length in the destination scatterlist entry and the
1378 * length must be a multiple of CCP_PASSTHRU_BLOCKSIZE
1379 */
1380 dst.sg_wa.sg_used = 0;
1381 for (i = 1; i <= src.sg_wa.dma_count; i++) {
1382 if (!dst.sg_wa.sg ||
1383 (dst.sg_wa.sg->length < src.sg_wa.sg->length)) {
1384 ret = -EINVAL;
1385 goto e_dst;
1386 }
1387
1388 if (i == src.sg_wa.dma_count) {
1389 op.eom = 1;
1390 op.soc = 1;
1391 }
1392
1393 op.src.type = CCP_MEMTYPE_SYSTEM;
1394 op.src.u.dma.address = sg_dma_address(src.sg_wa.sg);
1395 op.src.u.dma.offset = 0;
1396 op.src.u.dma.length = sg_dma_len(src.sg_wa.sg);
1397
1398 op.dst.type = CCP_MEMTYPE_SYSTEM;
1399 op.dst.u.dma.address = sg_dma_address(dst.sg_wa.sg);
Dave Jones80e84c12014-02-09 09:59:14 +08001400 op.dst.u.dma.offset = dst.sg_wa.sg_used;
1401 op.dst.u.dma.length = op.src.u.dma.length;
Tom Lendacky63b94502013-11-12 11:46:16 -06001402
Gary R Hooka43eb982016-07-26 19:09:31 -05001403 ret = cmd_q->ccp->vdata->perform->passthru(&op);
Tom Lendacky63b94502013-11-12 11:46:16 -06001404 if (ret) {
1405 cmd->engine_error = cmd_q->cmd_error;
1406 goto e_dst;
1407 }
1408
1409 dst.sg_wa.sg_used += src.sg_wa.sg->length;
1410 if (dst.sg_wa.sg_used == dst.sg_wa.sg->length) {
1411 dst.sg_wa.sg = sg_next(dst.sg_wa.sg);
1412 dst.sg_wa.sg_used = 0;
1413 }
1414 src.sg_wa.sg = sg_next(src.sg_wa.sg);
1415 }
1416
1417e_dst:
1418 if (!in_place)
1419 ccp_free_data(&dst, cmd_q);
1420
1421e_src:
1422 ccp_free_data(&src, cmd_q);
1423
1424e_mask:
1425 if (pt->bit_mod != CCP_PASSTHRU_BITWISE_NOOP)
1426 ccp_dm_free(&mask);
1427
1428 return ret;
1429}
1430
Gary R Hook58ea8ab2016-04-18 09:21:44 -05001431static int ccp_run_passthru_nomap_cmd(struct ccp_cmd_queue *cmd_q,
1432 struct ccp_cmd *cmd)
1433{
1434 struct ccp_passthru_nomap_engine *pt = &cmd->u.passthru_nomap;
1435 struct ccp_dm_workarea mask;
1436 struct ccp_op op;
1437 int ret;
1438
1439 if (!pt->final && (pt->src_len & (CCP_PASSTHRU_BLOCKSIZE - 1)))
1440 return -EINVAL;
1441
1442 if (!pt->src_dma || !pt->dst_dma)
1443 return -EINVAL;
1444
1445 if (pt->bit_mod != CCP_PASSTHRU_BITWISE_NOOP) {
1446 if (pt->mask_len != CCP_PASSTHRU_MASKSIZE)
1447 return -EINVAL;
1448 if (!pt->mask)
1449 return -EINVAL;
1450 }
1451
1452 BUILD_BUG_ON(CCP_PASSTHRU_KSB_COUNT != 1);
1453
1454 memset(&op, 0, sizeof(op));
1455 op.cmd_q = cmd_q;
1456 op.jobid = ccp_gen_jobid(cmd_q->ccp);
1457
1458 if (pt->bit_mod != CCP_PASSTHRU_BITWISE_NOOP) {
1459 /* Load the mask */
1460 op.ksb_key = cmd_q->ksb_key;
1461
1462 mask.length = pt->mask_len;
1463 mask.dma.address = pt->mask;
1464 mask.dma.length = pt->mask_len;
1465
1466 ret = ccp_copy_to_ksb(cmd_q, &mask, op.jobid, op.ksb_key,
1467 CCP_PASSTHRU_BYTESWAP_NOOP);
1468 if (ret) {
1469 cmd->engine_error = cmd_q->cmd_error;
1470 return ret;
1471 }
1472 }
1473
1474 /* Send data to the CCP Passthru engine */
1475 op.eom = 1;
1476 op.soc = 1;
1477
1478 op.src.type = CCP_MEMTYPE_SYSTEM;
1479 op.src.u.dma.address = pt->src_dma;
1480 op.src.u.dma.offset = 0;
1481 op.src.u.dma.length = pt->src_len;
1482
1483 op.dst.type = CCP_MEMTYPE_SYSTEM;
1484 op.dst.u.dma.address = pt->dst_dma;
1485 op.dst.u.dma.offset = 0;
1486 op.dst.u.dma.length = pt->src_len;
1487
Gary R Hooka43eb982016-07-26 19:09:31 -05001488 ret = cmd_q->ccp->vdata->perform->passthru(&op);
Gary R Hook58ea8ab2016-04-18 09:21:44 -05001489 if (ret)
1490 cmd->engine_error = cmd_q->cmd_error;
1491
1492 return ret;
1493}
1494
Tom Lendacky63b94502013-11-12 11:46:16 -06001495static int ccp_run_ecc_mm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
1496{
1497 struct ccp_ecc_engine *ecc = &cmd->u.ecc;
1498 struct ccp_dm_workarea src, dst;
1499 struct ccp_op op;
1500 int ret;
1501 u8 *save;
1502
1503 if (!ecc->u.mm.operand_1 ||
1504 (ecc->u.mm.operand_1_len > CCP_ECC_MODULUS_BYTES))
1505 return -EINVAL;
1506
1507 if (ecc->function != CCP_ECC_FUNCTION_MINV_384BIT)
1508 if (!ecc->u.mm.operand_2 ||
1509 (ecc->u.mm.operand_2_len > CCP_ECC_MODULUS_BYTES))
1510 return -EINVAL;
1511
1512 if (!ecc->u.mm.result ||
1513 (ecc->u.mm.result_len < CCP_ECC_MODULUS_BYTES))
1514 return -EINVAL;
1515
1516 memset(&op, 0, sizeof(op));
1517 op.cmd_q = cmd_q;
1518 op.jobid = ccp_gen_jobid(cmd_q->ccp);
1519
1520 /* Concatenate the modulus and the operands. Both the modulus and
1521 * the operands must be in little endian format. Since the input
1522 * is in big endian format it must be converted and placed in a
1523 * fixed length buffer.
1524 */
1525 ret = ccp_init_dm_workarea(&src, cmd_q, CCP_ECC_SRC_BUF_SIZE,
1526 DMA_TO_DEVICE);
1527 if (ret)
1528 return ret;
1529
1530 /* Save the workarea address since it is updated in order to perform
1531 * the concatenation
1532 */
1533 save = src.address;
1534
1535 /* Copy the ECC modulus */
Tom Lendacky355eba52015-10-01 16:32:31 -05001536 ret = ccp_reverse_set_dm_area(&src, ecc->mod, ecc->mod_len,
1537 CCP_ECC_OPERAND_SIZE, false);
1538 if (ret)
1539 goto e_src;
Tom Lendacky63b94502013-11-12 11:46:16 -06001540 src.address += CCP_ECC_OPERAND_SIZE;
1541
1542 /* Copy the first operand */
Tom Lendacky355eba52015-10-01 16:32:31 -05001543 ret = ccp_reverse_set_dm_area(&src, ecc->u.mm.operand_1,
1544 ecc->u.mm.operand_1_len,
1545 CCP_ECC_OPERAND_SIZE, false);
1546 if (ret)
1547 goto e_src;
Tom Lendacky63b94502013-11-12 11:46:16 -06001548 src.address += CCP_ECC_OPERAND_SIZE;
1549
1550 if (ecc->function != CCP_ECC_FUNCTION_MINV_384BIT) {
1551 /* Copy the second operand */
Tom Lendacky355eba52015-10-01 16:32:31 -05001552 ret = ccp_reverse_set_dm_area(&src, ecc->u.mm.operand_2,
1553 ecc->u.mm.operand_2_len,
1554 CCP_ECC_OPERAND_SIZE, false);
1555 if (ret)
1556 goto e_src;
Tom Lendacky63b94502013-11-12 11:46:16 -06001557 src.address += CCP_ECC_OPERAND_SIZE;
1558 }
1559
1560 /* Restore the workarea address */
1561 src.address = save;
1562
1563 /* Prepare the output area for the operation */
1564 ret = ccp_init_dm_workarea(&dst, cmd_q, CCP_ECC_DST_BUF_SIZE,
1565 DMA_FROM_DEVICE);
1566 if (ret)
1567 goto e_src;
1568
1569 op.soc = 1;
1570 op.src.u.dma.address = src.dma.address;
1571 op.src.u.dma.offset = 0;
1572 op.src.u.dma.length = src.length;
1573 op.dst.u.dma.address = dst.dma.address;
1574 op.dst.u.dma.offset = 0;
1575 op.dst.u.dma.length = dst.length;
1576
1577 op.u.ecc.function = cmd->u.ecc.function;
1578
Gary R Hooka43eb982016-07-26 19:09:31 -05001579 ret = cmd_q->ccp->vdata->perform->ecc(&op);
Tom Lendacky63b94502013-11-12 11:46:16 -06001580 if (ret) {
1581 cmd->engine_error = cmd_q->cmd_error;
1582 goto e_dst;
1583 }
1584
1585 ecc->ecc_result = le16_to_cpup(
1586 (const __le16 *)(dst.address + CCP_ECC_RESULT_OFFSET));
1587 if (!(ecc->ecc_result & CCP_ECC_RESULT_SUCCESS)) {
1588 ret = -EIO;
1589 goto e_dst;
1590 }
1591
1592 /* Save the ECC result */
1593 ccp_reverse_get_dm_area(&dst, ecc->u.mm.result, CCP_ECC_MODULUS_BYTES);
1594
1595e_dst:
1596 ccp_dm_free(&dst);
1597
1598e_src:
1599 ccp_dm_free(&src);
1600
1601 return ret;
1602}
1603
1604static int ccp_run_ecc_pm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
1605{
1606 struct ccp_ecc_engine *ecc = &cmd->u.ecc;
1607 struct ccp_dm_workarea src, dst;
1608 struct ccp_op op;
1609 int ret;
1610 u8 *save;
1611
1612 if (!ecc->u.pm.point_1.x ||
1613 (ecc->u.pm.point_1.x_len > CCP_ECC_MODULUS_BYTES) ||
1614 !ecc->u.pm.point_1.y ||
1615 (ecc->u.pm.point_1.y_len > CCP_ECC_MODULUS_BYTES))
1616 return -EINVAL;
1617
1618 if (ecc->function == CCP_ECC_FUNCTION_PADD_384BIT) {
1619 if (!ecc->u.pm.point_2.x ||
1620 (ecc->u.pm.point_2.x_len > CCP_ECC_MODULUS_BYTES) ||
1621 !ecc->u.pm.point_2.y ||
1622 (ecc->u.pm.point_2.y_len > CCP_ECC_MODULUS_BYTES))
1623 return -EINVAL;
1624 } else {
1625 if (!ecc->u.pm.domain_a ||
1626 (ecc->u.pm.domain_a_len > CCP_ECC_MODULUS_BYTES))
1627 return -EINVAL;
1628
1629 if (ecc->function == CCP_ECC_FUNCTION_PMUL_384BIT)
1630 if (!ecc->u.pm.scalar ||
1631 (ecc->u.pm.scalar_len > CCP_ECC_MODULUS_BYTES))
1632 return -EINVAL;
1633 }
1634
1635 if (!ecc->u.pm.result.x ||
1636 (ecc->u.pm.result.x_len < CCP_ECC_MODULUS_BYTES) ||
1637 !ecc->u.pm.result.y ||
1638 (ecc->u.pm.result.y_len < CCP_ECC_MODULUS_BYTES))
1639 return -EINVAL;
1640
1641 memset(&op, 0, sizeof(op));
1642 op.cmd_q = cmd_q;
1643 op.jobid = ccp_gen_jobid(cmd_q->ccp);
1644
1645 /* Concatenate the modulus and the operands. Both the modulus and
1646 * the operands must be in little endian format. Since the input
1647 * is in big endian format it must be converted and placed in a
1648 * fixed length buffer.
1649 */
1650 ret = ccp_init_dm_workarea(&src, cmd_q, CCP_ECC_SRC_BUF_SIZE,
1651 DMA_TO_DEVICE);
1652 if (ret)
1653 return ret;
1654
1655 /* Save the workarea address since it is updated in order to perform
1656 * the concatenation
1657 */
1658 save = src.address;
1659
1660 /* Copy the ECC modulus */
Tom Lendacky355eba52015-10-01 16:32:31 -05001661 ret = ccp_reverse_set_dm_area(&src, ecc->mod, ecc->mod_len,
1662 CCP_ECC_OPERAND_SIZE, false);
1663 if (ret)
1664 goto e_src;
Tom Lendacky63b94502013-11-12 11:46:16 -06001665 src.address += CCP_ECC_OPERAND_SIZE;
1666
1667 /* Copy the first point X and Y coordinate */
Tom Lendacky355eba52015-10-01 16:32:31 -05001668 ret = ccp_reverse_set_dm_area(&src, ecc->u.pm.point_1.x,
1669 ecc->u.pm.point_1.x_len,
1670 CCP_ECC_OPERAND_SIZE, false);
1671 if (ret)
1672 goto e_src;
Tom Lendacky63b94502013-11-12 11:46:16 -06001673 src.address += CCP_ECC_OPERAND_SIZE;
Tom Lendacky355eba52015-10-01 16:32:31 -05001674 ret = ccp_reverse_set_dm_area(&src, ecc->u.pm.point_1.y,
1675 ecc->u.pm.point_1.y_len,
1676 CCP_ECC_OPERAND_SIZE, false);
1677 if (ret)
1678 goto e_src;
Tom Lendacky63b94502013-11-12 11:46:16 -06001679 src.address += CCP_ECC_OPERAND_SIZE;
1680
1681 /* Set the first point Z coordianate to 1 */
Tom Lendacky8db88462015-02-03 13:07:05 -06001682 *src.address = 0x01;
Tom Lendacky63b94502013-11-12 11:46:16 -06001683 src.address += CCP_ECC_OPERAND_SIZE;
1684
1685 if (ecc->function == CCP_ECC_FUNCTION_PADD_384BIT) {
1686 /* Copy the second point X and Y coordinate */
Tom Lendacky355eba52015-10-01 16:32:31 -05001687 ret = ccp_reverse_set_dm_area(&src, ecc->u.pm.point_2.x,
1688 ecc->u.pm.point_2.x_len,
1689 CCP_ECC_OPERAND_SIZE, false);
1690 if (ret)
1691 goto e_src;
Tom Lendacky63b94502013-11-12 11:46:16 -06001692 src.address += CCP_ECC_OPERAND_SIZE;
Tom Lendacky355eba52015-10-01 16:32:31 -05001693 ret = ccp_reverse_set_dm_area(&src, ecc->u.pm.point_2.y,
1694 ecc->u.pm.point_2.y_len,
1695 CCP_ECC_OPERAND_SIZE, false);
1696 if (ret)
1697 goto e_src;
Tom Lendacky63b94502013-11-12 11:46:16 -06001698 src.address += CCP_ECC_OPERAND_SIZE;
1699
1700 /* Set the second point Z coordianate to 1 */
Tom Lendacky8db88462015-02-03 13:07:05 -06001701 *src.address = 0x01;
Tom Lendacky63b94502013-11-12 11:46:16 -06001702 src.address += CCP_ECC_OPERAND_SIZE;
1703 } else {
1704 /* Copy the Domain "a" parameter */
Tom Lendacky355eba52015-10-01 16:32:31 -05001705 ret = ccp_reverse_set_dm_area(&src, ecc->u.pm.domain_a,
1706 ecc->u.pm.domain_a_len,
1707 CCP_ECC_OPERAND_SIZE, false);
1708 if (ret)
1709 goto e_src;
Tom Lendacky63b94502013-11-12 11:46:16 -06001710 src.address += CCP_ECC_OPERAND_SIZE;
1711
1712 if (ecc->function == CCP_ECC_FUNCTION_PMUL_384BIT) {
1713 /* Copy the scalar value */
Tom Lendacky355eba52015-10-01 16:32:31 -05001714 ret = ccp_reverse_set_dm_area(&src, ecc->u.pm.scalar,
1715 ecc->u.pm.scalar_len,
1716 CCP_ECC_OPERAND_SIZE,
1717 false);
1718 if (ret)
1719 goto e_src;
Tom Lendacky63b94502013-11-12 11:46:16 -06001720 src.address += CCP_ECC_OPERAND_SIZE;
1721 }
1722 }
1723
1724 /* Restore the workarea address */
1725 src.address = save;
1726
1727 /* Prepare the output area for the operation */
1728 ret = ccp_init_dm_workarea(&dst, cmd_q, CCP_ECC_DST_BUF_SIZE,
1729 DMA_FROM_DEVICE);
1730 if (ret)
1731 goto e_src;
1732
1733 op.soc = 1;
1734 op.src.u.dma.address = src.dma.address;
1735 op.src.u.dma.offset = 0;
1736 op.src.u.dma.length = src.length;
1737 op.dst.u.dma.address = dst.dma.address;
1738 op.dst.u.dma.offset = 0;
1739 op.dst.u.dma.length = dst.length;
1740
1741 op.u.ecc.function = cmd->u.ecc.function;
1742
Gary R Hooka43eb982016-07-26 19:09:31 -05001743 ret = cmd_q->ccp->vdata->perform->ecc(&op);
Tom Lendacky63b94502013-11-12 11:46:16 -06001744 if (ret) {
1745 cmd->engine_error = cmd_q->cmd_error;
1746 goto e_dst;
1747 }
1748
1749 ecc->ecc_result = le16_to_cpup(
1750 (const __le16 *)(dst.address + CCP_ECC_RESULT_OFFSET));
1751 if (!(ecc->ecc_result & CCP_ECC_RESULT_SUCCESS)) {
1752 ret = -EIO;
1753 goto e_dst;
1754 }
1755
1756 /* Save the workarea address since it is updated as we walk through
1757 * to copy the point math result
1758 */
1759 save = dst.address;
1760
1761 /* Save the ECC result X and Y coordinates */
1762 ccp_reverse_get_dm_area(&dst, ecc->u.pm.result.x,
1763 CCP_ECC_MODULUS_BYTES);
1764 dst.address += CCP_ECC_OUTPUT_SIZE;
1765 ccp_reverse_get_dm_area(&dst, ecc->u.pm.result.y,
1766 CCP_ECC_MODULUS_BYTES);
1767 dst.address += CCP_ECC_OUTPUT_SIZE;
1768
1769 /* Restore the workarea address */
1770 dst.address = save;
1771
1772e_dst:
1773 ccp_dm_free(&dst);
1774
1775e_src:
1776 ccp_dm_free(&src);
1777
1778 return ret;
1779}
1780
1781static int ccp_run_ecc_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
1782{
1783 struct ccp_ecc_engine *ecc = &cmd->u.ecc;
1784
1785 ecc->ecc_result = 0;
1786
1787 if (!ecc->mod ||
1788 (ecc->mod_len > CCP_ECC_MODULUS_BYTES))
1789 return -EINVAL;
1790
1791 switch (ecc->function) {
1792 case CCP_ECC_FUNCTION_MMUL_384BIT:
1793 case CCP_ECC_FUNCTION_MADD_384BIT:
1794 case CCP_ECC_FUNCTION_MINV_384BIT:
1795 return ccp_run_ecc_mm_cmd(cmd_q, cmd);
1796
1797 case CCP_ECC_FUNCTION_PADD_384BIT:
1798 case CCP_ECC_FUNCTION_PMUL_384BIT:
1799 case CCP_ECC_FUNCTION_PDBL_384BIT:
1800 return ccp_run_ecc_pm_cmd(cmd_q, cmd);
1801
1802 default:
1803 return -EINVAL;
1804 }
1805}
1806
1807int ccp_run_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
1808{
1809 int ret;
1810
1811 cmd->engine_error = 0;
1812 cmd_q->cmd_error = 0;
1813 cmd_q->int_rcvd = 0;
1814 cmd_q->free_slots = CMD_Q_DEPTH(ioread32(cmd_q->reg_status));
1815
1816 switch (cmd->engine) {
1817 case CCP_ENGINE_AES:
1818 ret = ccp_run_aes_cmd(cmd_q, cmd);
1819 break;
1820 case CCP_ENGINE_XTS_AES_128:
1821 ret = ccp_run_xts_aes_cmd(cmd_q, cmd);
1822 break;
1823 case CCP_ENGINE_SHA:
1824 ret = ccp_run_sha_cmd(cmd_q, cmd);
1825 break;
1826 case CCP_ENGINE_RSA:
1827 ret = ccp_run_rsa_cmd(cmd_q, cmd);
1828 break;
1829 case CCP_ENGINE_PASSTHRU:
Gary R Hook58ea8ab2016-04-18 09:21:44 -05001830 if (cmd->flags & CCP_CMD_PASSTHRU_NO_DMA_MAP)
1831 ret = ccp_run_passthru_nomap_cmd(cmd_q, cmd);
1832 else
1833 ret = ccp_run_passthru_cmd(cmd_q, cmd);
Tom Lendacky63b94502013-11-12 11:46:16 -06001834 break;
1835 case CCP_ENGINE_ECC:
1836 ret = ccp_run_ecc_cmd(cmd_q, cmd);
1837 break;
1838 default:
1839 ret = -EINVAL;
1840 }
1841
1842 return ret;
1843}