blob: 02c8c95fdc2d4593abc472929c186ba5c40fae81 [file] [log] [blame]
Gary R Hookea0375a2016-03-01 13:49:25 -06001/*
2 * AMD Cryptographic Coprocessor (CCP) driver
3 *
4 * Copyright (C) 2013,2016 Advanced Micro Devices, Inc.
5 *
6 * Author: Tom Lendacky <thomas.lendacky@amd.com>
Gary R Hookfba88552016-07-26 19:09:20 -05007 * Author: Gary R Hook <gary.hook@amd.com>
Gary R Hookea0375a2016-03-01 13:49:25 -06008 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 */
13
14#include <linux/module.h>
15#include <linux/kernel.h>
16#include <linux/pci.h>
17#include <linux/kthread.h>
18#include <linux/interrupt.h>
19#include <linux/ccp.h>
20
21#include "ccp-dev.h"
22
Gary R Hook58a690b2016-07-26 19:09:50 -050023static u32 ccp_alloc_ksb(struct ccp_cmd_queue *cmd_q, unsigned int count)
24{
25 int start;
26 struct ccp_device *ccp = cmd_q->ccp;
27
28 for (;;) {
29 mutex_lock(&ccp->sb_mutex);
30
31 start = (u32)bitmap_find_next_zero_area(ccp->sb,
32 ccp->sb_count,
33 ccp->sb_start,
34 count, 0);
35 if (start <= ccp->sb_count) {
36 bitmap_set(ccp->sb, start, count);
37
38 mutex_unlock(&ccp->sb_mutex);
39 break;
40 }
41
42 ccp->sb_avail = 0;
43
44 mutex_unlock(&ccp->sb_mutex);
45
46 /* Wait for KSB entries to become available */
47 if (wait_event_interruptible(ccp->sb_queue, ccp->sb_avail))
48 return 0;
49 }
50
51 return KSB_START + start;
52}
53
54static void ccp_free_ksb(struct ccp_cmd_queue *cmd_q, unsigned int start,
55 unsigned int count)
56{
57 struct ccp_device *ccp = cmd_q->ccp;
58
59 if (!start)
60 return;
61
62 mutex_lock(&ccp->sb_mutex);
63
64 bitmap_clear(ccp->sb, start - KSB_START, count);
65
66 ccp->sb_avail = 1;
67
68 mutex_unlock(&ccp->sb_mutex);
69
70 wake_up_interruptible_all(&ccp->sb_queue);
71}
72
Gary R Hookbb4e89b2016-07-26 19:10:13 -050073static unsigned int ccp_get_free_slots(struct ccp_cmd_queue *cmd_q)
74{
75 return CMD_Q_DEPTH(ioread32(cmd_q->reg_status));
76}
77
Gary R Hookea0375a2016-03-01 13:49:25 -060078static int ccp_do_cmd(struct ccp_op *op, u32 *cr, unsigned int cr_count)
79{
80 struct ccp_cmd_queue *cmd_q = op->cmd_q;
81 struct ccp_device *ccp = cmd_q->ccp;
82 void __iomem *cr_addr;
83 u32 cr0, cmd;
84 unsigned int i;
85 int ret = 0;
86
87 /* We could read a status register to see how many free slots
88 * are actually available, but reading that register resets it
89 * and you could lose some error information.
90 */
91 cmd_q->free_slots--;
92
93 cr0 = (cmd_q->id << REQ0_CMD_Q_SHIFT)
94 | (op->jobid << REQ0_JOBID_SHIFT)
95 | REQ0_WAIT_FOR_WRITE;
96
97 if (op->soc)
98 cr0 |= REQ0_STOP_ON_COMPLETE
99 | REQ0_INT_ON_COMPLETE;
100
101 if (op->ioc || !cmd_q->free_slots)
102 cr0 |= REQ0_INT_ON_COMPLETE;
103
104 /* Start at CMD_REQ1 */
105 cr_addr = ccp->io_regs + CMD_REQ0 + CMD_REQ_INCR;
106
107 mutex_lock(&ccp->req_mutex);
108
109 /* Write CMD_REQ1 through CMD_REQx first */
110 for (i = 0; i < cr_count; i++, cr_addr += CMD_REQ_INCR)
111 iowrite32(*(cr + i), cr_addr);
112
113 /* Tell the CCP to start */
114 wmb();
115 iowrite32(cr0, ccp->io_regs + CMD_REQ0);
116
117 mutex_unlock(&ccp->req_mutex);
118
119 if (cr0 & REQ0_INT_ON_COMPLETE) {
120 /* Wait for the job to complete */
121 ret = wait_event_interruptible(cmd_q->int_queue,
122 cmd_q->int_rcvd);
123 if (ret || cmd_q->cmd_error) {
124 /* On error delete all related jobs from the queue */
125 cmd = (cmd_q->id << DEL_Q_ID_SHIFT)
126 | op->jobid;
127
128 iowrite32(cmd, ccp->io_regs + DEL_CMD_Q_JOB);
129
130 if (!ret)
131 ret = -EIO;
132 } else if (op->soc) {
133 /* Delete just head job from the queue on SoC */
134 cmd = DEL_Q_ACTIVE
135 | (cmd_q->id << DEL_Q_ID_SHIFT)
136 | op->jobid;
137
138 iowrite32(cmd, ccp->io_regs + DEL_CMD_Q_JOB);
139 }
140
141 cmd_q->free_slots = CMD_Q_DEPTH(cmd_q->q_status);
142
143 cmd_q->int_rcvd = 0;
144 }
145
146 return ret;
147}
148
149static int ccp_perform_aes(struct ccp_op *op)
150{
151 u32 cr[6];
152
153 /* Fill out the register contents for REQ1 through REQ6 */
154 cr[0] = (CCP_ENGINE_AES << REQ1_ENGINE_SHIFT)
155 | (op->u.aes.type << REQ1_AES_TYPE_SHIFT)
156 | (op->u.aes.mode << REQ1_AES_MODE_SHIFT)
157 | (op->u.aes.action << REQ1_AES_ACTION_SHIFT)
Gary R Hook956ee212016-07-26 19:09:40 -0500158 | (op->sb_key << REQ1_KEY_KSB_SHIFT);
Gary R Hookea0375a2016-03-01 13:49:25 -0600159 cr[1] = op->src.u.dma.length - 1;
160 cr[2] = ccp_addr_lo(&op->src.u.dma);
Gary R Hook956ee212016-07-26 19:09:40 -0500161 cr[3] = (op->sb_ctx << REQ4_KSB_SHIFT)
Gary R Hookea0375a2016-03-01 13:49:25 -0600162 | (CCP_MEMTYPE_SYSTEM << REQ4_MEMTYPE_SHIFT)
163 | ccp_addr_hi(&op->src.u.dma);
164 cr[4] = ccp_addr_lo(&op->dst.u.dma);
165 cr[5] = (CCP_MEMTYPE_SYSTEM << REQ6_MEMTYPE_SHIFT)
166 | ccp_addr_hi(&op->dst.u.dma);
167
168 if (op->u.aes.mode == CCP_AES_MODE_CFB)
169 cr[0] |= ((0x7f) << REQ1_AES_CFB_SIZE_SHIFT);
170
171 if (op->eom)
172 cr[0] |= REQ1_EOM;
173
174 if (op->init)
175 cr[0] |= REQ1_INIT;
176
177 return ccp_do_cmd(op, cr, ARRAY_SIZE(cr));
178}
179
180static int ccp_perform_xts_aes(struct ccp_op *op)
181{
182 u32 cr[6];
183
184 /* Fill out the register contents for REQ1 through REQ6 */
185 cr[0] = (CCP_ENGINE_XTS_AES_128 << REQ1_ENGINE_SHIFT)
186 | (op->u.xts.action << REQ1_AES_ACTION_SHIFT)
187 | (op->u.xts.unit_size << REQ1_XTS_AES_SIZE_SHIFT)
Gary R Hook956ee212016-07-26 19:09:40 -0500188 | (op->sb_key << REQ1_KEY_KSB_SHIFT);
Gary R Hookea0375a2016-03-01 13:49:25 -0600189 cr[1] = op->src.u.dma.length - 1;
190 cr[2] = ccp_addr_lo(&op->src.u.dma);
Gary R Hook956ee212016-07-26 19:09:40 -0500191 cr[3] = (op->sb_ctx << REQ4_KSB_SHIFT)
Gary R Hookea0375a2016-03-01 13:49:25 -0600192 | (CCP_MEMTYPE_SYSTEM << REQ4_MEMTYPE_SHIFT)
193 | ccp_addr_hi(&op->src.u.dma);
194 cr[4] = ccp_addr_lo(&op->dst.u.dma);
195 cr[5] = (CCP_MEMTYPE_SYSTEM << REQ6_MEMTYPE_SHIFT)
196 | ccp_addr_hi(&op->dst.u.dma);
197
198 if (op->eom)
199 cr[0] |= REQ1_EOM;
200
201 if (op->init)
202 cr[0] |= REQ1_INIT;
203
204 return ccp_do_cmd(op, cr, ARRAY_SIZE(cr));
205}
206
207static int ccp_perform_sha(struct ccp_op *op)
208{
209 u32 cr[6];
210
211 /* Fill out the register contents for REQ1 through REQ6 */
212 cr[0] = (CCP_ENGINE_SHA << REQ1_ENGINE_SHIFT)
213 | (op->u.sha.type << REQ1_SHA_TYPE_SHIFT)
214 | REQ1_INIT;
215 cr[1] = op->src.u.dma.length - 1;
216 cr[2] = ccp_addr_lo(&op->src.u.dma);
Gary R Hook956ee212016-07-26 19:09:40 -0500217 cr[3] = (op->sb_ctx << REQ4_KSB_SHIFT)
Gary R Hookea0375a2016-03-01 13:49:25 -0600218 | (CCP_MEMTYPE_SYSTEM << REQ4_MEMTYPE_SHIFT)
219 | ccp_addr_hi(&op->src.u.dma);
220
221 if (op->eom) {
222 cr[0] |= REQ1_EOM;
223 cr[4] = lower_32_bits(op->u.sha.msg_bits);
224 cr[5] = upper_32_bits(op->u.sha.msg_bits);
225 } else {
226 cr[4] = 0;
227 cr[5] = 0;
228 }
229
230 return ccp_do_cmd(op, cr, ARRAY_SIZE(cr));
231}
232
233static int ccp_perform_rsa(struct ccp_op *op)
234{
235 u32 cr[6];
236
237 /* Fill out the register contents for REQ1 through REQ6 */
238 cr[0] = (CCP_ENGINE_RSA << REQ1_ENGINE_SHIFT)
239 | (op->u.rsa.mod_size << REQ1_RSA_MOD_SIZE_SHIFT)
Gary R Hook956ee212016-07-26 19:09:40 -0500240 | (op->sb_key << REQ1_KEY_KSB_SHIFT)
Gary R Hookea0375a2016-03-01 13:49:25 -0600241 | REQ1_EOM;
242 cr[1] = op->u.rsa.input_len - 1;
243 cr[2] = ccp_addr_lo(&op->src.u.dma);
Gary R Hook956ee212016-07-26 19:09:40 -0500244 cr[3] = (op->sb_ctx << REQ4_KSB_SHIFT)
Gary R Hookea0375a2016-03-01 13:49:25 -0600245 | (CCP_MEMTYPE_SYSTEM << REQ4_MEMTYPE_SHIFT)
246 | ccp_addr_hi(&op->src.u.dma);
247 cr[4] = ccp_addr_lo(&op->dst.u.dma);
248 cr[5] = (CCP_MEMTYPE_SYSTEM << REQ6_MEMTYPE_SHIFT)
249 | ccp_addr_hi(&op->dst.u.dma);
250
251 return ccp_do_cmd(op, cr, ARRAY_SIZE(cr));
252}
253
254static int ccp_perform_passthru(struct ccp_op *op)
255{
256 u32 cr[6];
257
258 /* Fill out the register contents for REQ1 through REQ6 */
259 cr[0] = (CCP_ENGINE_PASSTHRU << REQ1_ENGINE_SHIFT)
260 | (op->u.passthru.bit_mod << REQ1_PT_BW_SHIFT)
261 | (op->u.passthru.byte_swap << REQ1_PT_BS_SHIFT);
262
263 if (op->src.type == CCP_MEMTYPE_SYSTEM)
264 cr[1] = op->src.u.dma.length - 1;
265 else
266 cr[1] = op->dst.u.dma.length - 1;
267
268 if (op->src.type == CCP_MEMTYPE_SYSTEM) {
269 cr[2] = ccp_addr_lo(&op->src.u.dma);
270 cr[3] = (CCP_MEMTYPE_SYSTEM << REQ4_MEMTYPE_SHIFT)
271 | ccp_addr_hi(&op->src.u.dma);
272
273 if (op->u.passthru.bit_mod != CCP_PASSTHRU_BITWISE_NOOP)
Gary R Hook956ee212016-07-26 19:09:40 -0500274 cr[3] |= (op->sb_key << REQ4_KSB_SHIFT);
Gary R Hookea0375a2016-03-01 13:49:25 -0600275 } else {
Gary R Hook956ee212016-07-26 19:09:40 -0500276 cr[2] = op->src.u.sb * CCP_SB_BYTES;
277 cr[3] = (CCP_MEMTYPE_SB << REQ4_MEMTYPE_SHIFT);
Gary R Hookea0375a2016-03-01 13:49:25 -0600278 }
279
280 if (op->dst.type == CCP_MEMTYPE_SYSTEM) {
281 cr[4] = ccp_addr_lo(&op->dst.u.dma);
282 cr[5] = (CCP_MEMTYPE_SYSTEM << REQ6_MEMTYPE_SHIFT)
283 | ccp_addr_hi(&op->dst.u.dma);
284 } else {
Gary R Hook956ee212016-07-26 19:09:40 -0500285 cr[4] = op->dst.u.sb * CCP_SB_BYTES;
286 cr[5] = (CCP_MEMTYPE_SB << REQ6_MEMTYPE_SHIFT);
Gary R Hookea0375a2016-03-01 13:49:25 -0600287 }
288
289 if (op->eom)
290 cr[0] |= REQ1_EOM;
291
292 return ccp_do_cmd(op, cr, ARRAY_SIZE(cr));
293}
294
295static int ccp_perform_ecc(struct ccp_op *op)
296{
297 u32 cr[6];
298
299 /* Fill out the register contents for REQ1 through REQ6 */
300 cr[0] = REQ1_ECC_AFFINE_CONVERT
301 | (CCP_ENGINE_ECC << REQ1_ENGINE_SHIFT)
302 | (op->u.ecc.function << REQ1_ECC_FUNCTION_SHIFT)
303 | REQ1_EOM;
304 cr[1] = op->src.u.dma.length - 1;
305 cr[2] = ccp_addr_lo(&op->src.u.dma);
306 cr[3] = (CCP_MEMTYPE_SYSTEM << REQ4_MEMTYPE_SHIFT)
307 | ccp_addr_hi(&op->src.u.dma);
308 cr[4] = ccp_addr_lo(&op->dst.u.dma);
309 cr[5] = (CCP_MEMTYPE_SYSTEM << REQ6_MEMTYPE_SHIFT)
310 | ccp_addr_hi(&op->dst.u.dma);
311
312 return ccp_do_cmd(op, cr, ARRAY_SIZE(cr));
313}
314
Gary R Hookea0375a2016-03-01 13:49:25 -0600315static int ccp_init(struct ccp_device *ccp)
316{
317 struct device *dev = ccp->dev;
318 struct ccp_cmd_queue *cmd_q;
319 struct dma_pool *dma_pool;
320 char dma_pool_name[MAX_DMAPOOL_NAME_LEN];
321 unsigned int qmr, qim, i;
322 int ret;
323
324 /* Find available queues */
325 qim = 0;
326 qmr = ioread32(ccp->io_regs + Q_MASK_REG);
327 for (i = 0; i < MAX_HW_QUEUES; i++) {
328 if (!(qmr & (1 << i)))
329 continue;
330
331 /* Allocate a dma pool for this queue */
332 snprintf(dma_pool_name, sizeof(dma_pool_name), "%s_q%d",
333 ccp->name, i);
334 dma_pool = dma_pool_create(dma_pool_name, dev,
335 CCP_DMAPOOL_MAX_SIZE,
336 CCP_DMAPOOL_ALIGN, 0);
337 if (!dma_pool) {
338 dev_err(dev, "unable to allocate dma pool\n");
339 ret = -ENOMEM;
340 goto e_pool;
341 }
342
343 cmd_q = &ccp->cmd_q[ccp->cmd_q_count];
344 ccp->cmd_q_count++;
345
346 cmd_q->ccp = ccp;
347 cmd_q->id = i;
348 cmd_q->dma_pool = dma_pool;
349
350 /* Reserve 2 KSB regions for the queue */
Gary R Hook956ee212016-07-26 19:09:40 -0500351 cmd_q->sb_key = KSB_START + ccp->sb_start++;
352 cmd_q->sb_ctx = KSB_START + ccp->sb_start++;
353 ccp->sb_count -= 2;
Gary R Hookea0375a2016-03-01 13:49:25 -0600354
355 /* Preset some register values and masks that are queue
356 * number dependent
357 */
358 cmd_q->reg_status = ccp->io_regs + CMD_Q_STATUS_BASE +
359 (CMD_Q_STATUS_INCR * i);
360 cmd_q->reg_int_status = ccp->io_regs + CMD_Q_INT_STATUS_BASE +
361 (CMD_Q_STATUS_INCR * i);
362 cmd_q->int_ok = 1 << (i * 2);
363 cmd_q->int_err = 1 << ((i * 2) + 1);
364
Gary R Hookbb4e89b2016-07-26 19:10:13 -0500365 cmd_q->free_slots = ccp_get_free_slots(cmd_q);
Gary R Hookea0375a2016-03-01 13:49:25 -0600366
367 init_waitqueue_head(&cmd_q->int_queue);
368
369 /* Build queue interrupt mask (two interrupts per queue) */
370 qim |= cmd_q->int_ok | cmd_q->int_err;
371
372#ifdef CONFIG_ARM64
373 /* For arm64 set the recommended queue cache settings */
374 iowrite32(ccp->axcache, ccp->io_regs + CMD_Q_CACHE_BASE +
375 (CMD_Q_CACHE_INC * i));
376#endif
377
378 dev_dbg(dev, "queue #%u available\n", i);
379 }
380 if (ccp->cmd_q_count == 0) {
381 dev_notice(dev, "no command queues available\n");
382 ret = -EIO;
383 goto e_pool;
384 }
385 dev_notice(dev, "%u command queues available\n", ccp->cmd_q_count);
386
387 /* Disable and clear interrupts until ready */
388 iowrite32(0x00, ccp->io_regs + IRQ_MASK_REG);
389 for (i = 0; i < ccp->cmd_q_count; i++) {
390 cmd_q = &ccp->cmd_q[i];
391
392 ioread32(cmd_q->reg_int_status);
393 ioread32(cmd_q->reg_status);
394 }
395 iowrite32(qim, ccp->io_regs + IRQ_STATUS_REG);
396
397 /* Request an irq */
398 ret = ccp->get_irq(ccp);
399 if (ret) {
400 dev_err(dev, "unable to allocate an IRQ\n");
401 goto e_pool;
402 }
403
404 /* Initialize the queues used to wait for KSB space and suspend */
Gary R Hook956ee212016-07-26 19:09:40 -0500405 init_waitqueue_head(&ccp->sb_queue);
Gary R Hookea0375a2016-03-01 13:49:25 -0600406 init_waitqueue_head(&ccp->suspend_queue);
407
408 /* Create a kthread for each queue */
409 for (i = 0; i < ccp->cmd_q_count; i++) {
410 struct task_struct *kthread;
411
412 cmd_q = &ccp->cmd_q[i];
413
414 kthread = kthread_create(ccp_cmd_queue_thread, cmd_q,
415 "%s-q%u", ccp->name, cmd_q->id);
416 if (IS_ERR(kthread)) {
417 dev_err(dev, "error creating queue thread (%ld)\n",
418 PTR_ERR(kthread));
419 ret = PTR_ERR(kthread);
420 goto e_kthread;
421 }
422
423 cmd_q->kthread = kthread;
424 wake_up_process(kthread);
425 }
426
427 /* Register the RNG */
428 ccp->hwrng.name = ccp->rngname;
429 ccp->hwrng.read = ccp_trng_read;
430 ret = hwrng_register(&ccp->hwrng);
431 if (ret) {
432 dev_err(dev, "error registering hwrng (%d)\n", ret);
433 goto e_kthread;
434 }
435
Gary R Hook58ea8ab2016-04-18 09:21:44 -0500436 /* Register the DMA engine support */
437 ret = ccp_dmaengine_register(ccp);
438 if (ret)
439 goto e_hwrng;
440
Gary R Hookea0375a2016-03-01 13:49:25 -0600441 ccp_add_device(ccp);
442
443 /* Enable interrupts */
444 iowrite32(qim, ccp->io_regs + IRQ_MASK_REG);
445
446 return 0;
447
Gary R Hook58ea8ab2016-04-18 09:21:44 -0500448e_hwrng:
449 hwrng_unregister(&ccp->hwrng);
450
Gary R Hookea0375a2016-03-01 13:49:25 -0600451e_kthread:
452 for (i = 0; i < ccp->cmd_q_count; i++)
453 if (ccp->cmd_q[i].kthread)
454 kthread_stop(ccp->cmd_q[i].kthread);
455
456 ccp->free_irq(ccp);
457
458e_pool:
459 for (i = 0; i < ccp->cmd_q_count; i++)
460 dma_pool_destroy(ccp->cmd_q[i].dma_pool);
461
462 return ret;
463}
464
465static void ccp_destroy(struct ccp_device *ccp)
466{
467 struct ccp_cmd_queue *cmd_q;
468 struct ccp_cmd *cmd;
469 unsigned int qim, i;
470
471 /* Remove this device from the list of available units first */
472 ccp_del_device(ccp);
473
Gary R Hookea0375a2016-03-01 13:49:25 -0600474 /* Build queue interrupt mask (two interrupt masks per queue) */
475 qim = 0;
476 for (i = 0; i < ccp->cmd_q_count; i++) {
477 cmd_q = &ccp->cmd_q[i];
478 qim |= cmd_q->int_ok | cmd_q->int_err;
479 }
480
481 /* Disable and clear interrupts */
482 iowrite32(0x00, ccp->io_regs + IRQ_MASK_REG);
483 for (i = 0; i < ccp->cmd_q_count; i++) {
484 cmd_q = &ccp->cmd_q[i];
485
486 ioread32(cmd_q->reg_int_status);
487 ioread32(cmd_q->reg_status);
488 }
489 iowrite32(qim, ccp->io_regs + IRQ_STATUS_REG);
490
Gary R Hook8256e682016-07-26 19:10:02 -0500491 /* Unregister the DMA engine */
492 ccp_dmaengine_unregister(ccp);
493
494 /* Unregister the RNG */
495 hwrng_unregister(&ccp->hwrng);
496
497 /* Stop the queue kthreads */
498 for (i = 0; i < ccp->cmd_q_count; i++)
499 if (ccp->cmd_q[i].kthread)
500 kthread_stop(ccp->cmd_q[i].kthread);
501
Gary R Hookea0375a2016-03-01 13:49:25 -0600502 ccp->free_irq(ccp);
503
504 for (i = 0; i < ccp->cmd_q_count; i++)
505 dma_pool_destroy(ccp->cmd_q[i].dma_pool);
506
507 /* Flush the cmd and backlog queue */
508 while (!list_empty(&ccp->cmd)) {
509 /* Invoke the callback directly with an error code */
510 cmd = list_first_entry(&ccp->cmd, struct ccp_cmd, entry);
511 list_del(&cmd->entry);
512 cmd->callback(cmd->data, -ENODEV);
513 }
514 while (!list_empty(&ccp->backlog)) {
515 /* Invoke the callback directly with an error code */
516 cmd = list_first_entry(&ccp->backlog, struct ccp_cmd, entry);
517 list_del(&cmd->entry);
518 cmd->callback(cmd->data, -ENODEV);
519 }
520}
521
522static irqreturn_t ccp_irq_handler(int irq, void *data)
523{
524 struct device *dev = data;
525 struct ccp_device *ccp = dev_get_drvdata(dev);
526 struct ccp_cmd_queue *cmd_q;
527 u32 q_int, status;
528 unsigned int i;
529
530 status = ioread32(ccp->io_regs + IRQ_STATUS_REG);
531
532 for (i = 0; i < ccp->cmd_q_count; i++) {
533 cmd_q = &ccp->cmd_q[i];
534
535 q_int = status & (cmd_q->int_ok | cmd_q->int_err);
536 if (q_int) {
537 cmd_q->int_status = status;
538 cmd_q->q_status = ioread32(cmd_q->reg_status);
539 cmd_q->q_int_status = ioread32(cmd_q->reg_int_status);
540
541 /* On error, only save the first error value */
542 if ((q_int & cmd_q->int_err) && !cmd_q->cmd_error)
543 cmd_q->cmd_error = CMD_Q_ERROR(cmd_q->q_status);
544
545 cmd_q->int_rcvd = 1;
546
547 /* Acknowledge the interrupt and wake the kthread */
548 iowrite32(q_int, ccp->io_regs + IRQ_STATUS_REG);
549 wake_up_interruptible(&cmd_q->int_queue);
550 }
551 }
552
553 return IRQ_HANDLED;
554}
555
Julia Lawallbc197b2a2016-05-01 13:52:55 +0200556static const struct ccp_actions ccp3_actions = {
Gary R Hooka43eb982016-07-26 19:09:31 -0500557 .aes = ccp_perform_aes,
558 .xts_aes = ccp_perform_xts_aes,
559 .sha = ccp_perform_sha,
560 .rsa = ccp_perform_rsa,
561 .passthru = ccp_perform_passthru,
562 .ecc = ccp_perform_ecc,
Gary R Hook58a690b2016-07-26 19:09:50 -0500563 .sballoc = ccp_alloc_ksb,
564 .sbfree = ccp_free_ksb,
Gary R Hookea0375a2016-03-01 13:49:25 -0600565 .init = ccp_init,
566 .destroy = ccp_destroy,
Gary R Hookbb4e89b2016-07-26 19:10:13 -0500567 .get_free_slots = ccp_get_free_slots,
Gary R Hookea0375a2016-03-01 13:49:25 -0600568 .irqhandler = ccp_irq_handler,
569};
570
571struct ccp_vdata ccpv3 = {
572 .version = CCP_VERSION(3, 0),
573 .perform = &ccp3_actions,
Gary R Hookfba88552016-07-26 19:09:20 -0500574 .bar = 2,
575 .offset = 0x20000,
Gary R Hookea0375a2016-03-01 13:49:25 -0600576};