blob: aa0a180ae7006536d468cc86b1d006bf48930e91 [file] [log] [blame]
Eric Anholt57692c92018-04-30 11:10:58 -07001// SPDX-License-Identifier: GPL-2.0+
2/* Copyright (C) 2014-2018 Broadcom */
3
4/**
5 * DOC: Interrupt management for the V3D engine
6 *
Eric Anholt1584f162018-11-28 15:09:25 -08007 * When we take a bin, render, or TFU done interrupt, we need to
8 * signal the fence for that job so that the scheduler can queue up
Eric Anholt57692c92018-04-30 11:10:58 -07009 * the next one and unblock any waiters.
10 *
11 * When we take the binner out of memory interrupt, we need to
12 * allocate some new memory and pass it to the binner so that the
13 * current job can make progress.
14 */
15
16#include "v3d_drv.h"
17#include "v3d_regs.h"
Eric Anholt55a9b742018-11-30 16:57:58 -080018#include "v3d_trace.h"
Eric Anholt57692c92018-04-30 11:10:58 -070019
20#define V3D_CORE_IRQS ((u32)(V3D_INT_OUTOMEM | \
21 V3D_INT_FLDONE | \
22 V3D_INT_FRDONE | \
23 V3D_INT_GMPV))
24
25#define V3D_HUB_IRQS ((u32)(V3D_HUB_INT_MMU_WRV | \
26 V3D_HUB_INT_MMU_PTI | \
Eric Anholt1584f162018-11-28 15:09:25 -080027 V3D_HUB_INT_MMU_CAP | \
28 V3D_HUB_INT_TFUC))
Eric Anholt57692c92018-04-30 11:10:58 -070029
Eric Anholteea9b972019-03-08 09:43:36 -080030static irqreturn_t
31v3d_hub_irq(int irq, void *arg);
32
Eric Anholt57692c92018-04-30 11:10:58 -070033static void
34v3d_overflow_mem_work(struct work_struct *work)
35{
36 struct v3d_dev *v3d =
37 container_of(work, struct v3d_dev, overflow_mem_work);
38 struct drm_device *dev = &v3d->drm;
39 struct v3d_bo *bo = v3d_bo_create(dev, NULL /* XXX: GMP */, 256 * 1024);
Eric Anholt40609d42019-03-14 09:34:51 -070040 struct drm_gem_object *obj;
Eric Anholt57692c92018-04-30 11:10:58 -070041 unsigned long irqflags;
42
43 if (IS_ERR(bo)) {
44 DRM_ERROR("Couldn't allocate binner overflow mem\n");
45 return;
46 }
Eric Anholt40609d42019-03-14 09:34:51 -070047 obj = &bo->base.base;
Eric Anholt57692c92018-04-30 11:10:58 -070048
49 /* We lost a race, and our work task came in after the bin job
50 * completed and exited. This can happen because the HW
51 * signals OOM before it's fully OOM, so the binner might just
52 * barely complete.
53 *
54 * If we lose the race and our work task comes in after a new
55 * bin job got scheduled, that's fine. We'll just give them
56 * some binner pool anyway.
57 */
58 spin_lock_irqsave(&v3d->job_lock, irqflags);
59 if (!v3d->bin_job) {
60 spin_unlock_irqrestore(&v3d->job_lock, irqflags);
61 goto out;
62 }
63
Eric Anholt40609d42019-03-14 09:34:51 -070064 drm_gem_object_get(obj);
Eric Anholt57692c92018-04-30 11:10:58 -070065 list_add_tail(&bo->unref_head, &v3d->bin_job->unref_list);
66 spin_unlock_irqrestore(&v3d->job_lock, irqflags);
67
68 V3D_CORE_WRITE(0, V3D_PTB_BPOA, bo->node.start << PAGE_SHIFT);
Eric Anholt40609d42019-03-14 09:34:51 -070069 V3D_CORE_WRITE(0, V3D_PTB_BPOS, obj->size);
Eric Anholt57692c92018-04-30 11:10:58 -070070
71out:
Eric Anholt40609d42019-03-14 09:34:51 -070072 drm_gem_object_put_unlocked(obj);
Eric Anholt57692c92018-04-30 11:10:58 -070073}
74
75static irqreturn_t
76v3d_irq(int irq, void *arg)
77{
78 struct v3d_dev *v3d = arg;
79 u32 intsts;
80 irqreturn_t status = IRQ_NONE;
81
82 intsts = V3D_CORE_READ(0, V3D_CTL_INT_STS);
83
84 /* Acknowledge the interrupts we're handling here. */
85 V3D_CORE_WRITE(0, V3D_CTL_INT_CLR, intsts);
86
87 if (intsts & V3D_INT_OUTOMEM) {
88 /* Note that the OOM status is edge signaled, so the
89 * interrupt won't happen again until the we actually
Eric Anholtad8d68b2019-03-13 16:52:08 -070090 * add more memory. Also, as of V3D 4.1, FLDONE won't
91 * be reported until any OOM state has been cleared.
Eric Anholt57692c92018-04-30 11:10:58 -070092 */
93 schedule_work(&v3d->overflow_mem_work);
94 status = IRQ_HANDLED;
95 }
96
97 if (intsts & V3D_INT_FLDONE) {
Eric Anholt55a9b742018-11-30 16:57:58 -080098 struct v3d_fence *fence =
Eric Anholt3f0b6462019-03-13 16:52:09 -070099 to_v3d_fence(v3d->bin_job->bin.irq_fence);
Eric Anholt55a9b742018-11-30 16:57:58 -0800100
101 trace_v3d_bcl_irq(&v3d->drm, fence->seqno);
102 dma_fence_signal(&fence->base);
Eric Anholt57692c92018-04-30 11:10:58 -0700103 status = IRQ_HANDLED;
104 }
105
106 if (intsts & V3D_INT_FRDONE) {
Eric Anholt55a9b742018-11-30 16:57:58 -0800107 struct v3d_fence *fence =
Eric Anholt3f0b6462019-03-13 16:52:09 -0700108 to_v3d_fence(v3d->render_job->render.irq_fence);
Eric Anholt55a9b742018-11-30 16:57:58 -0800109
110 trace_v3d_rcl_irq(&v3d->drm, fence->seqno);
111 dma_fence_signal(&fence->base);
Eric Anholt57692c92018-04-30 11:10:58 -0700112 status = IRQ_HANDLED;
113 }
114
115 /* We shouldn't be triggering these if we have GMP in
116 * always-allowed mode.
117 */
118 if (intsts & V3D_INT_GMPV)
119 dev_err(v3d->dev, "GMP violation\n");
120
Eric Anholteea9b972019-03-08 09:43:36 -0800121 /* V3D 4.2 wires the hub and core IRQs together, so if we &
122 * didn't see the common one then check hub for MMU IRQs.
123 */
124 if (v3d->single_irq_line && status == IRQ_NONE)
125 return v3d_hub_irq(irq, arg);
126
Eric Anholt57692c92018-04-30 11:10:58 -0700127 return status;
128}
129
130static irqreturn_t
131v3d_hub_irq(int irq, void *arg)
132{
133 struct v3d_dev *v3d = arg;
134 u32 intsts;
135 irqreturn_t status = IRQ_NONE;
136
137 intsts = V3D_READ(V3D_HUB_INT_STS);
138
139 /* Acknowledge the interrupts we're handling here. */
140 V3D_WRITE(V3D_HUB_INT_CLR, intsts);
141
Eric Anholt1584f162018-11-28 15:09:25 -0800142 if (intsts & V3D_HUB_INT_TFUC) {
Eric Anholt55a9b742018-11-30 16:57:58 -0800143 struct v3d_fence *fence =
Eric Anholt3f0b6462019-03-13 16:52:09 -0700144 to_v3d_fence(v3d->tfu_job->irq_fence);
Eric Anholt55a9b742018-11-30 16:57:58 -0800145
146 trace_v3d_tfu_irq(&v3d->drm, fence->seqno);
147 dma_fence_signal(&fence->base);
Eric Anholt1584f162018-11-28 15:09:25 -0800148 status = IRQ_HANDLED;
149 }
150
Eric Anholt57692c92018-04-30 11:10:58 -0700151 if (intsts & (V3D_HUB_INT_MMU_WRV |
152 V3D_HUB_INT_MMU_PTI |
153 V3D_HUB_INT_MMU_CAP)) {
154 u32 axi_id = V3D_READ(V3D_MMU_VIO_ID);
155 u64 vio_addr = (u64)V3D_READ(V3D_MMU_VIO_ADDR) << 8;
156
157 dev_err(v3d->dev, "MMU error from client %d at 0x%08llx%s%s%s\n",
158 axi_id, (long long)vio_addr,
159 ((intsts & V3D_HUB_INT_MMU_WRV) ?
160 ", write violation" : ""),
161 ((intsts & V3D_HUB_INT_MMU_PTI) ?
162 ", pte invalid" : ""),
163 ((intsts & V3D_HUB_INT_MMU_CAP) ?
164 ", cap exceeded" : ""));
165 status = IRQ_HANDLED;
166 }
167
168 return status;
169}
170
Eric Anholtfc227712019-03-08 09:43:35 -0800171int
Eric Anholt57692c92018-04-30 11:10:58 -0700172v3d_irq_init(struct v3d_dev *v3d)
173{
Eric Anholteea9b972019-03-08 09:43:36 -0800174 int irq1, ret, core;
Eric Anholt57692c92018-04-30 11:10:58 -0700175
176 INIT_WORK(&v3d->overflow_mem_work, v3d_overflow_mem_work);
177
178 /* Clear any pending interrupts someone might have left around
179 * for us.
180 */
181 for (core = 0; core < v3d->cores; core++)
182 V3D_CORE_WRITE(core, V3D_CTL_INT_CLR, V3D_CORE_IRQS);
183 V3D_WRITE(V3D_HUB_INT_CLR, V3D_HUB_IRQS);
184
Eric Anholteea9b972019-03-08 09:43:36 -0800185 irq1 = platform_get_irq(v3d->pdev, 1);
186 if (irq1 == -EPROBE_DEFER)
187 return irq1;
188 if (irq1 > 0) {
189 ret = devm_request_irq(v3d->dev, irq1,
190 v3d_irq, IRQF_SHARED,
191 "v3d_core0", v3d);
192 if (ret)
193 goto fail;
194 ret = devm_request_irq(v3d->dev, platform_get_irq(v3d->pdev, 0),
195 v3d_hub_irq, IRQF_SHARED,
196 "v3d_hub", v3d);
197 if (ret)
198 goto fail;
199 } else {
200 v3d->single_irq_line = true;
Eric Anholtfc227712019-03-08 09:43:35 -0800201
Eric Anholteea9b972019-03-08 09:43:36 -0800202 ret = devm_request_irq(v3d->dev, platform_get_irq(v3d->pdev, 0),
203 v3d_irq, IRQF_SHARED,
204 "v3d", v3d);
205 if (ret)
206 goto fail;
207 }
Eric Anholt57692c92018-04-30 11:10:58 -0700208
209 v3d_irq_enable(v3d);
Eric Anholtfc227712019-03-08 09:43:35 -0800210 return 0;
211
212fail:
213 if (ret != -EPROBE_DEFER)
214 dev_err(v3d->dev, "IRQ setup failed: %d\n", ret);
215 return ret;
Eric Anholt57692c92018-04-30 11:10:58 -0700216}
217
218void
219v3d_irq_enable(struct v3d_dev *v3d)
220{
221 int core;
222
223 /* Enable our set of interrupts, masking out any others. */
224 for (core = 0; core < v3d->cores; core++) {
225 V3D_CORE_WRITE(core, V3D_CTL_INT_MSK_SET, ~V3D_CORE_IRQS);
226 V3D_CORE_WRITE(core, V3D_CTL_INT_MSK_CLR, V3D_CORE_IRQS);
227 }
228
229 V3D_WRITE(V3D_HUB_INT_MSK_SET, ~V3D_HUB_IRQS);
230 V3D_WRITE(V3D_HUB_INT_MSK_CLR, V3D_HUB_IRQS);
231}
232
233void
234v3d_irq_disable(struct v3d_dev *v3d)
235{
236 int core;
237
238 /* Disable all interrupts. */
239 for (core = 0; core < v3d->cores; core++)
240 V3D_CORE_WRITE(core, V3D_CTL_INT_MSK_SET, ~0);
241 V3D_WRITE(V3D_HUB_INT_MSK_SET, ~0);
242
243 /* Clear any pending interrupts we might have left. */
244 for (core = 0; core < v3d->cores; core++)
245 V3D_CORE_WRITE(core, V3D_CTL_INT_CLR, V3D_CORE_IRQS);
246 V3D_WRITE(V3D_HUB_INT_CLR, V3D_HUB_IRQS);
247
248 cancel_work_sync(&v3d->overflow_mem_work);
249}
250
251/** Reinitializes interrupt registers when a GPU reset is performed. */
252void v3d_irq_reset(struct v3d_dev *v3d)
253{
254 v3d_irq_enable(v3d);
255}