blob: cc3e96c4f5fb404542add4ca118b939a882c3557 [file] [log] [blame]
Thomas Gleixnerd2912cb2019-06-04 10:11:33 +02001// SPDX-License-Identifier: GPL-2.0-only
Tom Lendacky63b94502013-11-12 11:46:16 -06002/*
3 * AMD Cryptographic Coprocessor (CCP) driver
4 *
Gary R Hook68cc6522017-07-17 15:00:49 -05005 * Copyright (C) 2013,2017 Advanced Micro Devices, Inc.
Tom Lendacky63b94502013-11-12 11:46:16 -06006 *
7 * Author: Tom Lendacky <thomas.lendacky@amd.com>
Gary R Hook956ee212016-07-26 19:09:40 -05008 * Author: Gary R Hook <gary.hook@amd.com>
Tom Lendacky63b94502013-11-12 11:46:16 -06009 */
10
Tom Lendacky63b94502013-11-12 11:46:16 -060011#include <linux/kernel.h>
12#include <linux/kthread.h>
13#include <linux/sched.h>
14#include <linux/interrupt.h>
15#include <linux/spinlock.h>
Mike Galbraith7587c402016-04-05 15:03:21 +020016#include <linux/spinlock_types.h>
Gary R Hook553d2372016-03-01 13:49:04 -060017#include <linux/types.h>
Tom Lendacky63b94502013-11-12 11:46:16 -060018#include <linux/mutex.h>
19#include <linux/delay.h>
20#include <linux/hw_random.h>
21#include <linux/cpu.h>
Tom Lendackyc4f4b322014-06-05 10:17:57 -050022#ifdef CONFIG_X86
Tom Lendacky63b94502013-11-12 11:46:16 -060023#include <asm/cpu_device_id.h>
Tom Lendackyc4f4b322014-06-05 10:17:57 -050024#endif
Tom Lendacky63b94502013-11-12 11:46:16 -060025#include <linux/ccp.h>
26
27#include "ccp-dev.h"
28
Tom Lendacky530abd82014-01-24 16:18:14 -060029struct ccp_tasklet_data {
30 struct completion completion;
31 struct ccp_cmd *cmd;
32};
33
Gary R Hook81422ba2016-09-28 11:53:56 -050034/* Human-readable error strings */
Wei Yongjunff4f44d2016-10-17 15:08:50 +000035static char *ccp_error_codes[] = {
Gary R Hook81422ba2016-09-28 11:53:56 -050036 "",
37 "ERR 01: ILLEGAL_ENGINE",
38 "ERR 02: ILLEGAL_KEY_ID",
39 "ERR 03: ILLEGAL_FUNCTION_TYPE",
40 "ERR 04: ILLEGAL_FUNCTION_MODE",
41 "ERR 05: ILLEGAL_FUNCTION_ENCRYPT",
42 "ERR 06: ILLEGAL_FUNCTION_SIZE",
43 "ERR 07: Zlib_MISSING_INIT_EOM",
44 "ERR 08: ILLEGAL_FUNCTION_RSVD",
45 "ERR 09: ILLEGAL_BUFFER_LENGTH",
46 "ERR 10: VLSB_FAULT",
47 "ERR 11: ILLEGAL_MEM_ADDR",
48 "ERR 12: ILLEGAL_MEM_SEL",
49 "ERR 13: ILLEGAL_CONTEXT_ID",
50 "ERR 14: ILLEGAL_KEY_ADDR",
51 "ERR 15: 0xF Reserved",
52 "ERR 16: Zlib_ILLEGAL_MULTI_QUEUE",
53 "ERR 17: Zlib_ILLEGAL_JOBID_CHANGE",
54 "ERR 18: CMD_TIMEOUT",
55 "ERR 19: IDMA0_AXI_SLVERR",
56 "ERR 20: IDMA0_AXI_DECERR",
57 "ERR 21: 0x15 Reserved",
58 "ERR 22: IDMA1_AXI_SLAVE_FAULT",
59 "ERR 23: IDMA1_AIXI_DECERR",
60 "ERR 24: 0x18 Reserved",
61 "ERR 25: ZLIBVHB_AXI_SLVERR",
62 "ERR 26: ZLIBVHB_AXI_DECERR",
63 "ERR 27: 0x1B Reserved",
64 "ERR 27: ZLIB_UNEXPECTED_EOM",
65 "ERR 27: ZLIB_EXTRA_DATA",
66 "ERR 30: ZLIB_BTYPE",
67 "ERR 31: ZLIB_UNDEFINED_SYMBOL",
68 "ERR 32: ZLIB_UNDEFINED_DISTANCE_S",
69 "ERR 33: ZLIB_CODE_LENGTH_SYMBOL",
70 "ERR 34: ZLIB _VHB_ILLEGAL_FETCH",
71 "ERR 35: ZLIB_UNCOMPRESSED_LEN",
72 "ERR 36: ZLIB_LIMIT_REACHED",
73 "ERR 37: ZLIB_CHECKSUM_MISMATCH0",
74 "ERR 38: ODMA0_AXI_SLVERR",
75 "ERR 39: ODMA0_AXI_DECERR",
76 "ERR 40: 0x28 Reserved",
77 "ERR 41: ODMA1_AXI_SLVERR",
78 "ERR 42: ODMA1_AXI_DECERR",
79 "ERR 43: LSB_PARITY_ERR",
80};
81
82void ccp_log_error(struct ccp_device *d, int e)
83{
84 dev_err(d->dev, "CCP error: %s (0x%x)\n", ccp_error_codes[e], e);
85}
86
Gary R Hook553d2372016-03-01 13:49:04 -060087/* List of CCPs, CCP count, read-write access lock, and access functions
88 *
89 * Lock structure: get ccp_unit_lock for reading whenever we need to
90 * examine the CCP list. While holding it for reading we can acquire
91 * the RR lock to update the round-robin next-CCP pointer. The unit lock
92 * must be acquired before the RR lock.
93 *
94 * If the unit-lock is acquired for writing, we have total control over
95 * the list, so there's no value in getting the RR lock.
96 */
97static DEFINE_RWLOCK(ccp_unit_lock);
98static LIST_HEAD(ccp_units);
99
100/* Round-robin counter */
Gary R Hook03a6f292016-03-16 09:02:26 -0500101static DEFINE_SPINLOCK(ccp_rr_lock);
Gary R Hook553d2372016-03-01 13:49:04 -0600102static struct ccp_device *ccp_rr;
103
Gary R Hookea0375a2016-03-01 13:49:25 -0600104/**
105 * ccp_add_device - add a CCP device to the list
106 *
107 * @ccp: ccp_device struct pointer
108 *
Gary R Hook553d2372016-03-01 13:49:04 -0600109 * Put this CCP on the unit list, which makes it available
110 * for use.
Gary R Hookea0375a2016-03-01 13:49:25 -0600111 *
112 * Returns zero if a CCP device is present, -ENODEV otherwise.
Gary R Hook553d2372016-03-01 13:49:04 -0600113 */
Gary R Hookea0375a2016-03-01 13:49:25 -0600114void ccp_add_device(struct ccp_device *ccp)
Tom Lendacky63b94502013-11-12 11:46:16 -0600115{
Gary R Hook553d2372016-03-01 13:49:04 -0600116 unsigned long flags;
117
118 write_lock_irqsave(&ccp_unit_lock, flags);
119 list_add_tail(&ccp->entry, &ccp_units);
120 if (!ccp_rr)
121 /* We already have the list lock (we're first) so this
122 * pointer can't change on us. Set its initial value.
123 */
124 ccp_rr = ccp;
125 write_unlock_irqrestore(&ccp_unit_lock, flags);
Tom Lendacky63b94502013-11-12 11:46:16 -0600126}
127
Gary R Hookea0375a2016-03-01 13:49:25 -0600128/**
129 * ccp_del_device - remove a CCP device from the list
130 *
131 * @ccp: ccp_device struct pointer
132 *
133 * Remove this unit from the list of devices. If the next device
Gary R Hook553d2372016-03-01 13:49:04 -0600134 * up for use is this one, adjust the pointer. If this is the last
135 * device, NULL the pointer.
136 */
Gary R Hookea0375a2016-03-01 13:49:25 -0600137void ccp_del_device(struct ccp_device *ccp)
Tom Lendacky63b94502013-11-12 11:46:16 -0600138{
Gary R Hook553d2372016-03-01 13:49:04 -0600139 unsigned long flags;
140
141 write_lock_irqsave(&ccp_unit_lock, flags);
142 if (ccp_rr == ccp) {
143 /* ccp_unit_lock is read/write; any read access
144 * will be suspended while we make changes to the
145 * list and RR pointer.
146 */
147 if (list_is_last(&ccp_rr->entry, &ccp_units))
148 ccp_rr = list_first_entry(&ccp_units, struct ccp_device,
149 entry);
150 else
151 ccp_rr = list_next_entry(ccp_rr, entry);
152 }
153 list_del(&ccp->entry);
154 if (list_empty(&ccp_units))
155 ccp_rr = NULL;
156 write_unlock_irqrestore(&ccp_unit_lock, flags);
157}
158
Gary R Hook084935b2016-07-26 19:10:31 -0500159
160
161int ccp_register_rng(struct ccp_device *ccp)
162{
163 int ret = 0;
164
165 dev_dbg(ccp->dev, "Registering RNG...\n");
166 /* Register an RNG */
167 ccp->hwrng.name = ccp->rngname;
168 ccp->hwrng.read = ccp_trng_read;
169 ret = hwrng_register(&ccp->hwrng);
170 if (ret)
171 dev_err(ccp->dev, "error registering hwrng (%d)\n", ret);
172
173 return ret;
174}
175
176void ccp_unregister_rng(struct ccp_device *ccp)
177{
178 if (ccp->hwrng.name)
179 hwrng_unregister(&ccp->hwrng);
180}
181
Gary R Hook553d2372016-03-01 13:49:04 -0600182static struct ccp_device *ccp_get_device(void)
183{
184 unsigned long flags;
185 struct ccp_device *dp = NULL;
186
187 /* We round-robin through the unit list.
188 * The (ccp_rr) pointer refers to the next unit to use.
189 */
190 read_lock_irqsave(&ccp_unit_lock, flags);
191 if (!list_empty(&ccp_units)) {
Gary R Hook03a6f292016-03-16 09:02:26 -0500192 spin_lock(&ccp_rr_lock);
Gary R Hook553d2372016-03-01 13:49:04 -0600193 dp = ccp_rr;
194 if (list_is_last(&ccp_rr->entry, &ccp_units))
195 ccp_rr = list_first_entry(&ccp_units, struct ccp_device,
196 entry);
197 else
198 ccp_rr = list_next_entry(ccp_rr, entry);
Gary R Hook03a6f292016-03-16 09:02:26 -0500199 spin_unlock(&ccp_rr_lock);
Gary R Hook553d2372016-03-01 13:49:04 -0600200 }
201 read_unlock_irqrestore(&ccp_unit_lock, flags);
202
203 return dp;
Tom Lendacky63b94502013-11-12 11:46:16 -0600204}
205
206/**
Tom Lendackyc9f21cb2014-09-05 10:31:09 -0500207 * ccp_present - check if a CCP device is present
208 *
209 * Returns zero if a CCP device is present, -ENODEV otherwise.
210 */
211int ccp_present(void)
212{
Gary R Hook553d2372016-03-01 13:49:04 -0600213 unsigned long flags;
214 int ret;
Tom Lendackyc9f21cb2014-09-05 10:31:09 -0500215
Gary R Hook553d2372016-03-01 13:49:04 -0600216 read_lock_irqsave(&ccp_unit_lock, flags);
217 ret = list_empty(&ccp_units);
218 read_unlock_irqrestore(&ccp_unit_lock, flags);
219
220 return ret ? -ENODEV : 0;
Tom Lendackyc9f21cb2014-09-05 10:31:09 -0500221}
222EXPORT_SYMBOL_GPL(ccp_present);
223
224/**
Gary R Hookc7019c42016-03-01 13:49:15 -0600225 * ccp_version - get the version of the CCP device
226 *
227 * Returns the version from the first unit on the list;
228 * otherwise a zero if no CCP device is present
229 */
230unsigned int ccp_version(void)
231{
232 struct ccp_device *dp;
233 unsigned long flags;
234 int ret = 0;
235
236 read_lock_irqsave(&ccp_unit_lock, flags);
237 if (!list_empty(&ccp_units)) {
238 dp = list_first_entry(&ccp_units, struct ccp_device, entry);
239 ret = dp->vdata->version;
240 }
241 read_unlock_irqrestore(&ccp_unit_lock, flags);
242
243 return ret;
244}
245EXPORT_SYMBOL_GPL(ccp_version);
246
247/**
Tom Lendacky63b94502013-11-12 11:46:16 -0600248 * ccp_enqueue_cmd - queue an operation for processing by the CCP
249 *
250 * @cmd: ccp_cmd struct to be processed
251 *
252 * Queue a cmd to be processed by the CCP. If queueing the cmd
253 * would exceed the defined length of the cmd queue the cmd will
254 * only be queued if the CCP_CMD_MAY_BACKLOG flag is set and will
255 * result in a return code of -EBUSY.
256 *
257 * The callback routine specified in the ccp_cmd struct will be
258 * called to notify the caller of completion (if the cmd was not
259 * backlogged) or advancement out of the backlog. If the cmd has
260 * advanced out of the backlog the "err" value of the callback
261 * will be -EINPROGRESS. Any other "err" value during callback is
262 * the result of the operation.
263 *
264 * The cmd has been successfully queued if:
265 * the return code is -EINPROGRESS or
266 * the return code is -EBUSY and CCP_CMD_MAY_BACKLOG flag is set
267 */
268int ccp_enqueue_cmd(struct ccp_cmd *cmd)
269{
Gary R Hook7c468442017-03-10 12:28:18 -0600270 struct ccp_device *ccp;
Tom Lendacky63b94502013-11-12 11:46:16 -0600271 unsigned long flags;
272 unsigned int i;
273 int ret;
274
Gary R Hook7c468442017-03-10 12:28:18 -0600275 /* Some commands might need to be sent to a specific device */
276 ccp = cmd->ccp ? cmd->ccp : ccp_get_device();
277
Tom Lendacky63b94502013-11-12 11:46:16 -0600278 if (!ccp)
279 return -ENODEV;
280
281 /* Caller must supply a callback routine */
282 if (!cmd->callback)
283 return -EINVAL;
284
285 cmd->ccp = ccp;
286
287 spin_lock_irqsave(&ccp->cmd_lock, flags);
288
289 i = ccp->cmd_q_count;
290
291 if (ccp->cmd_count >= MAX_CMD_QLEN) {
Gilad Ben-Yossefcfba73d2017-10-18 08:00:34 +0100292 if (cmd->flags & CCP_CMD_MAY_BACKLOG) {
293 ret = -EBUSY;
Tom Lendacky63b94502013-11-12 11:46:16 -0600294 list_add_tail(&cmd->entry, &ccp->backlog);
Gilad Ben-Yossefcfba73d2017-10-18 08:00:34 +0100295 } else {
296 ret = -ENOSPC;
297 }
Tom Lendacky63b94502013-11-12 11:46:16 -0600298 } else {
299 ret = -EINPROGRESS;
300 ccp->cmd_count++;
301 list_add_tail(&cmd->entry, &ccp->cmd);
302
303 /* Find an idle queue */
304 if (!ccp->suspending) {
305 for (i = 0; i < ccp->cmd_q_count; i++) {
306 if (ccp->cmd_q[i].active)
307 continue;
308
309 break;
310 }
311 }
312 }
313
314 spin_unlock_irqrestore(&ccp->cmd_lock, flags);
315
316 /* If we found an idle queue, wake it up */
317 if (i < ccp->cmd_q_count)
318 wake_up_process(ccp->cmd_q[i].kthread);
319
320 return ret;
321}
322EXPORT_SYMBOL_GPL(ccp_enqueue_cmd);
323
324static void ccp_do_cmd_backlog(struct work_struct *work)
325{
326 struct ccp_cmd *cmd = container_of(work, struct ccp_cmd, work);
327 struct ccp_device *ccp = cmd->ccp;
328 unsigned long flags;
329 unsigned int i;
330
331 cmd->callback(cmd->data, -EINPROGRESS);
332
333 spin_lock_irqsave(&ccp->cmd_lock, flags);
334
335 ccp->cmd_count++;
336 list_add_tail(&cmd->entry, &ccp->cmd);
337
338 /* Find an idle queue */
339 for (i = 0; i < ccp->cmd_q_count; i++) {
340 if (ccp->cmd_q[i].active)
341 continue;
342
343 break;
344 }
345
346 spin_unlock_irqrestore(&ccp->cmd_lock, flags);
347
348 /* If we found an idle queue, wake it up */
349 if (i < ccp->cmd_q_count)
350 wake_up_process(ccp->cmd_q[i].kthread);
351}
352
353static struct ccp_cmd *ccp_dequeue_cmd(struct ccp_cmd_queue *cmd_q)
354{
355 struct ccp_device *ccp = cmd_q->ccp;
356 struct ccp_cmd *cmd = NULL;
357 struct ccp_cmd *backlog = NULL;
358 unsigned long flags;
359
360 spin_lock_irqsave(&ccp->cmd_lock, flags);
361
362 cmd_q->active = 0;
363
364 if (ccp->suspending) {
365 cmd_q->suspended = 1;
366
367 spin_unlock_irqrestore(&ccp->cmd_lock, flags);
368 wake_up_interruptible(&ccp->suspend_queue);
369
370 return NULL;
371 }
372
373 if (ccp->cmd_count) {
374 cmd_q->active = 1;
375
376 cmd = list_first_entry(&ccp->cmd, struct ccp_cmd, entry);
377 list_del(&cmd->entry);
378
379 ccp->cmd_count--;
380 }
381
382 if (!list_empty(&ccp->backlog)) {
383 backlog = list_first_entry(&ccp->backlog, struct ccp_cmd,
384 entry);
385 list_del(&backlog->entry);
386 }
387
388 spin_unlock_irqrestore(&ccp->cmd_lock, flags);
389
390 if (backlog) {
391 INIT_WORK(&backlog->work, ccp_do_cmd_backlog);
392 schedule_work(&backlog->work);
393 }
394
395 return cmd;
396}
397
Tom Lendacky530abd82014-01-24 16:18:14 -0600398static void ccp_do_cmd_complete(unsigned long data)
Tom Lendacky63b94502013-11-12 11:46:16 -0600399{
Tom Lendacky530abd82014-01-24 16:18:14 -0600400 struct ccp_tasklet_data *tdata = (struct ccp_tasklet_data *)data;
401 struct ccp_cmd *cmd = tdata->cmd;
Tom Lendacky63b94502013-11-12 11:46:16 -0600402
403 cmd->callback(cmd->data, cmd->ret);
Gary R Hook77af0ae2017-06-27 08:58:04 -0500404
Tom Lendacky530abd82014-01-24 16:18:14 -0600405 complete(&tdata->completion);
Tom Lendacky63b94502013-11-12 11:46:16 -0600406}
407
Gary R Hookea0375a2016-03-01 13:49:25 -0600408/**
409 * ccp_cmd_queue_thread - create a kernel thread to manage a CCP queue
410 *
411 * @data: thread-specific data
412 */
413int ccp_cmd_queue_thread(void *data)
Tom Lendacky63b94502013-11-12 11:46:16 -0600414{
415 struct ccp_cmd_queue *cmd_q = (struct ccp_cmd_queue *)data;
416 struct ccp_cmd *cmd;
Tom Lendacky530abd82014-01-24 16:18:14 -0600417 struct ccp_tasklet_data tdata;
418 struct tasklet_struct tasklet;
419
420 tasklet_init(&tasklet, ccp_do_cmd_complete, (unsigned long)&tdata);
Tom Lendacky63b94502013-11-12 11:46:16 -0600421
422 set_current_state(TASK_INTERRUPTIBLE);
423 while (!kthread_should_stop()) {
424 schedule();
425
426 set_current_state(TASK_INTERRUPTIBLE);
427
428 cmd = ccp_dequeue_cmd(cmd_q);
429 if (!cmd)
430 continue;
431
432 __set_current_state(TASK_RUNNING);
433
434 /* Execute the command */
435 cmd->ret = ccp_run_cmd(cmd_q, cmd);
436
437 /* Schedule the completion callback */
Tom Lendacky530abd82014-01-24 16:18:14 -0600438 tdata.cmd = cmd;
439 init_completion(&tdata.completion);
440 tasklet_schedule(&tasklet);
441 wait_for_completion(&tdata.completion);
Tom Lendacky63b94502013-11-12 11:46:16 -0600442 }
443
444 __set_current_state(TASK_RUNNING);
445
446 return 0;
447}
448
Tom Lendacky63b94502013-11-12 11:46:16 -0600449/**
450 * ccp_alloc_struct - allocate and initialize the ccp_device struct
451 *
452 * @dev: device struct of the CCP
453 */
Brijesh Singh720419f2017-07-06 09:59:14 -0500454struct ccp_device *ccp_alloc_struct(struct sp_device *sp)
Tom Lendacky63b94502013-11-12 11:46:16 -0600455{
Brijesh Singh720419f2017-07-06 09:59:14 -0500456 struct device *dev = sp->dev;
Tom Lendacky63b94502013-11-12 11:46:16 -0600457 struct ccp_device *ccp;
458
Tom Lendackybe03a3a2015-02-03 13:07:23 -0600459 ccp = devm_kzalloc(dev, sizeof(*ccp), GFP_KERNEL);
Tom Lendacky8db88462015-02-03 13:07:05 -0600460 if (!ccp)
Tom Lendacky63b94502013-11-12 11:46:16 -0600461 return NULL;
Tom Lendacky63b94502013-11-12 11:46:16 -0600462 ccp->dev = dev;
Brijesh Singh720419f2017-07-06 09:59:14 -0500463 ccp->sp = sp;
464 ccp->axcache = sp->axcache;
Tom Lendacky63b94502013-11-12 11:46:16 -0600465
466 INIT_LIST_HEAD(&ccp->cmd);
467 INIT_LIST_HEAD(&ccp->backlog);
468
469 spin_lock_init(&ccp->cmd_lock);
470 mutex_init(&ccp->req_mutex);
Gary R Hook956ee212016-07-26 19:09:40 -0500471 mutex_init(&ccp->sb_mutex);
472 ccp->sb_count = KSB_COUNT;
473 ccp->sb_start = 0;
Tom Lendacky63b94502013-11-12 11:46:16 -0600474
Gary R Hook103600a2016-10-18 17:33:37 -0500475 /* Initialize the wait queues */
476 init_waitqueue_head(&ccp->sb_queue);
477 init_waitqueue_head(&ccp->suspend_queue);
478
Brijesh Singh720419f2017-07-06 09:59:14 -0500479 snprintf(ccp->name, MAX_CCP_NAME_LEN, "ccp-%u", sp->ord);
480 snprintf(ccp->rngname, MAX_CCP_NAME_LEN, "ccp-%u-rng", sp->ord);
Gary R Hook553d2372016-03-01 13:49:04 -0600481
Tom Lendacky63b94502013-11-12 11:46:16 -0600482 return ccp;
483}
484
Gary R Hook8256e682016-07-26 19:10:02 -0500485int ccp_trng_read(struct hwrng *rng, void *data, size_t max, bool wait)
486{
487 struct ccp_device *ccp = container_of(rng, struct ccp_device, hwrng);
488 u32 trng_value;
489 int len = min_t(int, sizeof(trng_value), max);
490
491 /* Locking is provided by the caller so we can update device
492 * hwrng-related fields safely
493 */
494 trng_value = ioread32(ccp->io_regs + TRNG_OUT_REG);
495 if (!trng_value) {
496 /* Zero is returned if not data is available or if a
497 * bad-entropy error is present. Assume an error if
498 * we exceed TRNG_RETRIES reads of zero.
499 */
500 if (ccp->hwrng_retries++ > TRNG_RETRIES)
501 return -EIO;
502
503 return 0;
504 }
505
506 /* Reset the counter and save the rng value */
507 ccp->hwrng_retries = 0;
508 memcpy(data, &trng_value, len);
509
510 return len;
511}
512
Tom Lendacky63b94502013-11-12 11:46:16 -0600513#ifdef CONFIG_PM
514bool ccp_queues_suspended(struct ccp_device *ccp)
515{
516 unsigned int suspended = 0;
517 unsigned long flags;
518 unsigned int i;
519
520 spin_lock_irqsave(&ccp->cmd_lock, flags);
521
522 for (i = 0; i < ccp->cmd_q_count; i++)
523 if (ccp->cmd_q[i].suspended)
524 suspended++;
525
526 spin_unlock_irqrestore(&ccp->cmd_lock, flags);
527
528 return ccp->cmd_q_count == suspended;
529}
Brijesh Singh970e8302017-07-06 09:59:13 -0500530
Brijesh Singh720419f2017-07-06 09:59:14 -0500531int ccp_dev_suspend(struct sp_device *sp, pm_message_t state)
Brijesh Singh970e8302017-07-06 09:59:13 -0500532{
Brijesh Singh720419f2017-07-06 09:59:14 -0500533 struct ccp_device *ccp = sp->ccp_data;
Brijesh Singh970e8302017-07-06 09:59:13 -0500534 unsigned long flags;
535 unsigned int i;
536
537 spin_lock_irqsave(&ccp->cmd_lock, flags);
538
539 ccp->suspending = 1;
540
541 /* Wake all the queue kthreads to prepare for suspend */
542 for (i = 0; i < ccp->cmd_q_count; i++)
543 wake_up_process(ccp->cmd_q[i].kthread);
544
545 spin_unlock_irqrestore(&ccp->cmd_lock, flags);
546
547 /* Wait for all queue kthreads to say they're done */
548 while (!ccp_queues_suspended(ccp))
549 wait_event_interruptible(ccp->suspend_queue,
550 ccp_queues_suspended(ccp));
551
552 return 0;
553}
554
Brijesh Singh720419f2017-07-06 09:59:14 -0500555int ccp_dev_resume(struct sp_device *sp)
Brijesh Singh970e8302017-07-06 09:59:13 -0500556{
Brijesh Singh720419f2017-07-06 09:59:14 -0500557 struct ccp_device *ccp = sp->ccp_data;
Brijesh Singh970e8302017-07-06 09:59:13 -0500558 unsigned long flags;
559 unsigned int i;
560
561 spin_lock_irqsave(&ccp->cmd_lock, flags);
562
563 ccp->suspending = 0;
564
565 /* Wake up all the kthreads */
566 for (i = 0; i < ccp->cmd_q_count; i++) {
567 ccp->cmd_q[i].suspended = 0;
568 wake_up_process(ccp->cmd_q[i].kthread);
569 }
570
571 spin_unlock_irqrestore(&ccp->cmd_lock, flags);
572
573 return 0;
574}
Tom Lendacky63b94502013-11-12 11:46:16 -0600575#endif
576
Brijesh Singh720419f2017-07-06 09:59:14 -0500577int ccp_dev_init(struct sp_device *sp)
Brijesh Singh970e8302017-07-06 09:59:13 -0500578{
Brijesh Singh720419f2017-07-06 09:59:14 -0500579 struct device *dev = sp->dev;
580 struct ccp_device *ccp;
581 int ret;
Brijesh Singh970e8302017-07-06 09:59:13 -0500582
Brijesh Singh720419f2017-07-06 09:59:14 -0500583 ret = -ENOMEM;
584 ccp = ccp_alloc_struct(sp);
585 if (!ccp)
586 goto e_err;
587 sp->ccp_data = ccp;
588
589 ccp->vdata = (struct ccp_vdata *)sp->dev_vdata->ccp_vdata;
590 if (!ccp->vdata || !ccp->vdata->version) {
591 ret = -ENODEV;
592 dev_err(dev, "missing driver data\n");
593 goto e_err;
594 }
595
Brijesh Singhf4d18d62017-07-06 09:59:15 -0500596 ccp->use_tasklet = sp->use_tasklet;
Brijesh Singh720419f2017-07-06 09:59:14 -0500597
598 ccp->io_regs = sp->io_map + ccp->vdata->offset;
Brijesh Singh970e8302017-07-06 09:59:13 -0500599 if (ccp->vdata->setup)
600 ccp->vdata->setup(ccp);
601
Brijesh Singh720419f2017-07-06 09:59:14 -0500602 ret = ccp->vdata->perform->init(ccp);
603 if (ret)
604 goto e_err;
605
606 dev_notice(dev, "ccp enabled\n");
607
608 return 0;
609
610e_err:
611 sp->ccp_data = NULL;
612
613 dev_notice(dev, "ccp initialization failed\n");
614
615 return ret;
Brijesh Singh970e8302017-07-06 09:59:13 -0500616}
617
Brijesh Singh720419f2017-07-06 09:59:14 -0500618void ccp_dev_destroy(struct sp_device *sp)
Brijesh Singh970e8302017-07-06 09:59:13 -0500619{
Brijesh Singh720419f2017-07-06 09:59:14 -0500620 struct ccp_device *ccp = sp->ccp_data;
621
Brijesh Singh970e8302017-07-06 09:59:13 -0500622 if (!ccp)
623 return;
624
625 ccp->vdata->perform->destroy(ccp);
626}