blob: 6386e2fe2ff73a85bd03844b84f5357bcf828ec7 [file] [log] [blame]
Thomas Gleixnerc942fdd2019-05-27 08:55:06 +02001// SPDX-License-Identifier: GPL-2.0-or-later
Sascha Haueraecfbdb2012-09-21 10:07:49 +02002/*
3 * Copyright (c) 2010 Sascha Hauer <s.hauer@pengutronix.de>
4 * Copyright (C) 2005-2009 Freescale Semiconductor, Inc.
Sascha Haueraecfbdb2012-09-21 10:07:49 +02005 */
6#include <linux/module.h>
7#include <linux/export.h>
8#include <linux/types.h>
Philipp Zabel6c641552013-03-28 17:35:21 +01009#include <linux/reset.h>
Sascha Haueraecfbdb2012-09-21 10:07:49 +020010#include <linux/platform_device.h>
11#include <linux/err.h>
12#include <linux/spinlock.h>
13#include <linux/delay.h>
14#include <linux/interrupt.h>
15#include <linux/io.h>
16#include <linux/clk.h>
17#include <linux/list.h>
18#include <linux/irq.h>
Catalin Marinasde88cbb2013-01-18 15:31:37 +000019#include <linux/irqchip/chained_irq.h>
Philipp Zabelb7287662013-06-21 10:27:39 +020020#include <linux/irqdomain.h>
Sascha Haueraecfbdb2012-09-21 10:07:49 +020021#include <linux/of_device.h>
Philipp Zabel304e6be2015-11-09 16:35:12 +010022#include <linux/of_graph.h>
Sascha Haueraecfbdb2012-09-21 10:07:49 +020023
Philipp Zabel7cb17792013-10-10 16:18:38 +020024#include <drm/drm_fourcc.h>
25
Philipp Zabel39b90042013-09-30 16:13:39 +020026#include <video/imx-ipu-v3.h>
Sascha Haueraecfbdb2012-09-21 10:07:49 +020027#include "ipu-prv.h"
28
29static inline u32 ipu_cm_read(struct ipu_soc *ipu, unsigned offset)
30{
31 return readl(ipu->cm_reg + offset);
32}
33
34static inline void ipu_cm_write(struct ipu_soc *ipu, u32 value, unsigned offset)
35{
36 writel(value, ipu->cm_reg + offset);
37}
38
Steve Longerbeam572a7612016-07-19 18:11:02 -070039int ipu_get_num(struct ipu_soc *ipu)
40{
41 return ipu->id;
42}
43EXPORT_SYMBOL_GPL(ipu_get_num);
44
Philipp Zabelf9bb7ac2017-02-24 18:23:55 +010045void ipu_srm_dp_update(struct ipu_soc *ipu, bool sync)
Sascha Haueraecfbdb2012-09-21 10:07:49 +020046{
47 u32 val;
48
49 val = ipu_cm_read(ipu, IPU_SRM_PRI2);
Philipp Zabelf9bb7ac2017-02-24 18:23:55 +010050 val &= ~DP_S_SRM_MODE_MASK;
51 val |= sync ? DP_S_SRM_MODE_NEXT_FRAME :
52 DP_S_SRM_MODE_NOW;
Sascha Haueraecfbdb2012-09-21 10:07:49 +020053 ipu_cm_write(ipu, val, IPU_SRM_PRI2);
54}
Philipp Zabelf9bb7ac2017-02-24 18:23:55 +010055EXPORT_SYMBOL_GPL(ipu_srm_dp_update);
Sascha Haueraecfbdb2012-09-21 10:07:49 +020056
Philipp Zabel7cb17792013-10-10 16:18:38 +020057enum ipu_color_space ipu_drm_fourcc_to_colorspace(u32 drm_fourcc)
58{
59 switch (drm_fourcc) {
Philipp Zabel0cb8b752014-12-12 13:40:14 +010060 case DRM_FORMAT_ARGB1555:
61 case DRM_FORMAT_ABGR1555:
62 case DRM_FORMAT_RGBA5551:
63 case DRM_FORMAT_BGRA5551:
Philipp Zabel7cb17792013-10-10 16:18:38 +020064 case DRM_FORMAT_RGB565:
65 case DRM_FORMAT_BGR565:
66 case DRM_FORMAT_RGB888:
67 case DRM_FORMAT_BGR888:
Lucas Stach7d2e8a22015-08-04 17:21:04 +020068 case DRM_FORMAT_ARGB4444:
Philipp Zabel7cb17792013-10-10 16:18:38 +020069 case DRM_FORMAT_XRGB8888:
70 case DRM_FORMAT_XBGR8888:
71 case DRM_FORMAT_RGBX8888:
72 case DRM_FORMAT_BGRX8888:
73 case DRM_FORMAT_ARGB8888:
74 case DRM_FORMAT_ABGR8888:
75 case DRM_FORMAT_RGBA8888:
76 case DRM_FORMAT_BGRA8888:
Philipp Zabele72db3b2015-01-09 11:03:13 +010077 case DRM_FORMAT_RGB565_A8:
78 case DRM_FORMAT_BGR565_A8:
79 case DRM_FORMAT_RGB888_A8:
80 case DRM_FORMAT_BGR888_A8:
81 case DRM_FORMAT_RGBX8888_A8:
82 case DRM_FORMAT_BGRX8888_A8:
Philipp Zabel7cb17792013-10-10 16:18:38 +020083 return IPUV3_COLORSPACE_RGB;
84 case DRM_FORMAT_YUYV:
85 case DRM_FORMAT_UYVY:
86 case DRM_FORMAT_YUV420:
87 case DRM_FORMAT_YVU420:
Steve Longerbeam9a34cef2014-06-25 18:05:53 -070088 case DRM_FORMAT_YUV422:
89 case DRM_FORMAT_YVU422:
Philipp Zabelc9d508c2016-10-18 13:36:33 +020090 case DRM_FORMAT_YUV444:
91 case DRM_FORMAT_YVU444:
Steve Longerbeam9a34cef2014-06-25 18:05:53 -070092 case DRM_FORMAT_NV12:
93 case DRM_FORMAT_NV21:
94 case DRM_FORMAT_NV16:
95 case DRM_FORMAT_NV61:
Philipp Zabel7cb17792013-10-10 16:18:38 +020096 return IPUV3_COLORSPACE_YUV;
97 default:
98 return IPUV3_COLORSPACE_UNKNOWN;
99 }
100}
101EXPORT_SYMBOL_GPL(ipu_drm_fourcc_to_colorspace);
102
Sascha Haueraecfbdb2012-09-21 10:07:49 +0200103enum ipu_color_space ipu_pixelformat_to_colorspace(u32 pixelformat)
104{
105 switch (pixelformat) {
106 case V4L2_PIX_FMT_YUV420:
Philipp Zabeld3e4e612012-11-12 16:29:00 +0100107 case V4L2_PIX_FMT_YVU420:
Steve Longerbeam9a34cef2014-06-25 18:05:53 -0700108 case V4L2_PIX_FMT_YUV422P:
Sascha Haueraecfbdb2012-09-21 10:07:49 +0200109 case V4L2_PIX_FMT_UYVY:
Michael Olbrichc096ae12012-11-12 16:28:59 +0100110 case V4L2_PIX_FMT_YUYV:
Steve Longerbeam9a34cef2014-06-25 18:05:53 -0700111 case V4L2_PIX_FMT_NV12:
112 case V4L2_PIX_FMT_NV21:
113 case V4L2_PIX_FMT_NV16:
114 case V4L2_PIX_FMT_NV61:
Sascha Haueraecfbdb2012-09-21 10:07:49 +0200115 return IPUV3_COLORSPACE_YUV;
Philipp Zabel5c41bb62018-08-02 10:40:33 +0200116 case V4L2_PIX_FMT_XRGB32:
117 case V4L2_PIX_FMT_XBGR32:
Sascha Haueraecfbdb2012-09-21 10:07:49 +0200118 case V4L2_PIX_FMT_RGB32:
119 case V4L2_PIX_FMT_BGR32:
120 case V4L2_PIX_FMT_RGB24:
121 case V4L2_PIX_FMT_BGR24:
122 case V4L2_PIX_FMT_RGB565:
123 return IPUV3_COLORSPACE_RGB;
124 default:
125 return IPUV3_COLORSPACE_UNKNOWN;
126 }
127}
128EXPORT_SYMBOL_GPL(ipu_pixelformat_to_colorspace);
129
Steve Longerbeam4cea9402014-06-25 18:05:38 -0700130bool ipu_pixelformat_is_planar(u32 pixelformat)
131{
132 switch (pixelformat) {
133 case V4L2_PIX_FMT_YUV420:
134 case V4L2_PIX_FMT_YVU420:
Steve Longerbeam9a34cef2014-06-25 18:05:53 -0700135 case V4L2_PIX_FMT_YUV422P:
136 case V4L2_PIX_FMT_NV12:
137 case V4L2_PIX_FMT_NV21:
138 case V4L2_PIX_FMT_NV16:
139 case V4L2_PIX_FMT_NV61:
Steve Longerbeam4cea9402014-06-25 18:05:38 -0700140 return true;
141 }
142
143 return false;
144}
145EXPORT_SYMBOL_GPL(ipu_pixelformat_is_planar);
146
Steve Longerbeamae0e9702014-06-25 18:05:36 -0700147enum ipu_color_space ipu_mbus_code_to_colorspace(u32 mbus_code)
148{
149 switch (mbus_code & 0xf000) {
150 case 0x1000:
151 return IPUV3_COLORSPACE_RGB;
152 case 0x2000:
153 return IPUV3_COLORSPACE_YUV;
154 default:
155 return IPUV3_COLORSPACE_UNKNOWN;
156 }
157}
158EXPORT_SYMBOL_GPL(ipu_mbus_code_to_colorspace);
159
Steve Longerbeam6930afd2014-06-25 18:05:43 -0700160int ipu_stride_to_bytes(u32 pixel_stride, u32 pixelformat)
161{
162 switch (pixelformat) {
163 case V4L2_PIX_FMT_YUV420:
164 case V4L2_PIX_FMT_YVU420:
Steve Longerbeam9a34cef2014-06-25 18:05:53 -0700165 case V4L2_PIX_FMT_YUV422P:
166 case V4L2_PIX_FMT_NV12:
167 case V4L2_PIX_FMT_NV21:
168 case V4L2_PIX_FMT_NV16:
169 case V4L2_PIX_FMT_NV61:
Steve Longerbeam6930afd2014-06-25 18:05:43 -0700170 /*
171 * for the planar YUV formats, the stride passed to
172 * cpmem must be the stride in bytes of the Y plane.
173 * And all the planar YUV formats have an 8-bit
174 * Y component.
175 */
176 return (8 * pixel_stride) >> 3;
177 case V4L2_PIX_FMT_RGB565:
178 case V4L2_PIX_FMT_YUYV:
179 case V4L2_PIX_FMT_UYVY:
180 return (16 * pixel_stride) >> 3;
181 case V4L2_PIX_FMT_BGR24:
182 case V4L2_PIX_FMT_RGB24:
183 return (24 * pixel_stride) >> 3;
184 case V4L2_PIX_FMT_BGR32:
185 case V4L2_PIX_FMT_RGB32:
Philipp Zabel5c41bb62018-08-02 10:40:33 +0200186 case V4L2_PIX_FMT_XBGR32:
187 case V4L2_PIX_FMT_XRGB32:
Steve Longerbeam6930afd2014-06-25 18:05:43 -0700188 return (32 * pixel_stride) >> 3;
189 default:
190 break;
191 }
192
193 return -EINVAL;
194}
195EXPORT_SYMBOL_GPL(ipu_stride_to_bytes);
196
Steve Longerbeamf835f382014-06-25 18:05:37 -0700197int ipu_degrees_to_rot_mode(enum ipu_rotate_mode *mode, int degrees,
198 bool hflip, bool vflip)
199{
200 u32 r90, vf, hf;
201
202 switch (degrees) {
203 case 0:
204 vf = hf = r90 = 0;
205 break;
206 case 90:
207 vf = hf = 0;
208 r90 = 1;
209 break;
210 case 180:
211 vf = hf = 1;
212 r90 = 0;
213 break;
214 case 270:
215 vf = hf = r90 = 1;
216 break;
217 default:
218 return -EINVAL;
219 }
220
221 hf ^= (u32)hflip;
222 vf ^= (u32)vflip;
223
224 *mode = (enum ipu_rotate_mode)((r90 << 2) | (hf << 1) | vf);
225 return 0;
226}
227EXPORT_SYMBOL_GPL(ipu_degrees_to_rot_mode);
228
229int ipu_rot_mode_to_degrees(int *degrees, enum ipu_rotate_mode mode,
230 bool hflip, bool vflip)
231{
232 u32 r90, vf, hf;
233
234 r90 = ((u32)mode >> 2) & 0x1;
235 hf = ((u32)mode >> 1) & 0x1;
236 vf = ((u32)mode >> 0) & 0x1;
237 hf ^= (u32)hflip;
238 vf ^= (u32)vflip;
239
240 switch ((enum ipu_rotate_mode)((r90 << 2) | (hf << 1) | vf)) {
241 case IPU_ROTATE_NONE:
242 *degrees = 0;
243 break;
244 case IPU_ROTATE_90_RIGHT:
245 *degrees = 90;
246 break;
247 case IPU_ROTATE_180:
248 *degrees = 180;
249 break;
250 case IPU_ROTATE_90_LEFT:
251 *degrees = 270;
252 break;
253 default:
254 return -EINVAL;
255 }
256
257 return 0;
258}
259EXPORT_SYMBOL_GPL(ipu_rot_mode_to_degrees);
260
Sascha Haueraecfbdb2012-09-21 10:07:49 +0200261struct ipuv3_channel *ipu_idmac_get(struct ipu_soc *ipu, unsigned num)
262{
263 struct ipuv3_channel *channel;
264
265 dev_dbg(ipu->dev, "%s %d\n", __func__, num);
266
267 if (num > 63)
268 return ERR_PTR(-ENODEV);
269
270 mutex_lock(&ipu->channel_lock);
271
Philipp Zabel93adc8b2017-05-08 12:45:52 +0200272 list_for_each_entry(channel, &ipu->channels, list) {
273 if (channel->num == num) {
274 channel = ERR_PTR(-EBUSY);
275 goto out;
276 }
277 }
Sascha Haueraecfbdb2012-09-21 10:07:49 +0200278
Philipp Zabel93adc8b2017-05-08 12:45:52 +0200279 channel = kzalloc(sizeof(*channel), GFP_KERNEL);
280 if (!channel) {
281 channel = ERR_PTR(-ENOMEM);
Sascha Haueraecfbdb2012-09-21 10:07:49 +0200282 goto out;
283 }
284
Sascha Haueraecfbdb2012-09-21 10:07:49 +0200285 channel->num = num;
Philipp Zabel93adc8b2017-05-08 12:45:52 +0200286 channel->ipu = ipu;
287 list_add(&channel->list, &ipu->channels);
Sascha Haueraecfbdb2012-09-21 10:07:49 +0200288
289out:
290 mutex_unlock(&ipu->channel_lock);
291
292 return channel;
293}
294EXPORT_SYMBOL_GPL(ipu_idmac_get);
295
296void ipu_idmac_put(struct ipuv3_channel *channel)
297{
298 struct ipu_soc *ipu = channel->ipu;
299
300 dev_dbg(ipu->dev, "%s %d\n", __func__, channel->num);
301
302 mutex_lock(&ipu->channel_lock);
303
Philipp Zabel93adc8b2017-05-08 12:45:52 +0200304 list_del(&channel->list);
305 kfree(channel);
Sascha Haueraecfbdb2012-09-21 10:07:49 +0200306
307 mutex_unlock(&ipu->channel_lock);
308}
309EXPORT_SYMBOL_GPL(ipu_idmac_put);
310
Steve Longerbeamaa52f572014-06-25 18:05:40 -0700311#define idma_mask(ch) (1 << ((ch) & 0x1f))
Sascha Haueraecfbdb2012-09-21 10:07:49 +0200312
Steve Longerbeame7268c62014-06-25 18:05:42 -0700313/*
314 * This is an undocumented feature, a write one to a channel bit in
315 * IPU_CHA_CUR_BUF and IPU_CHA_TRIPLE_CUR_BUF will reset the channel's
316 * internal current buffer pointer so that transfers start from buffer
317 * 0 on the next channel enable (that's the theory anyway, the imx6 TRM
318 * only says these are read-only registers). This operation is required
319 * for channel linking to work correctly, for instance video capture
320 * pipelines that carry out image rotations will fail after the first
321 * streaming unless this function is called for each channel before
322 * re-enabling the channels.
323 */
324static void __ipu_idmac_reset_current_buffer(struct ipuv3_channel *channel)
325{
326 struct ipu_soc *ipu = channel->ipu;
327 unsigned int chno = channel->num;
328
329 ipu_cm_write(ipu, idma_mask(chno), IPU_CHA_CUR_BUF(chno));
330}
331
Sascha Haueraecfbdb2012-09-21 10:07:49 +0200332void ipu_idmac_set_double_buffer(struct ipuv3_channel *channel,
333 bool doublebuffer)
334{
335 struct ipu_soc *ipu = channel->ipu;
336 unsigned long flags;
337 u32 reg;
338
339 spin_lock_irqsave(&ipu->lock, flags);
340
341 reg = ipu_cm_read(ipu, IPU_CHA_DB_MODE_SEL(channel->num));
342 if (doublebuffer)
343 reg |= idma_mask(channel->num);
344 else
345 reg &= ~idma_mask(channel->num);
346 ipu_cm_write(ipu, reg, IPU_CHA_DB_MODE_SEL(channel->num));
347
Steve Longerbeame7268c62014-06-25 18:05:42 -0700348 __ipu_idmac_reset_current_buffer(channel);
349
Sascha Haueraecfbdb2012-09-21 10:07:49 +0200350 spin_unlock_irqrestore(&ipu->lock, flags);
351}
352EXPORT_SYMBOL_GPL(ipu_idmac_set_double_buffer);
353
Steve Longerbeam4fd1a072014-06-25 18:05:45 -0700354static const struct {
355 int chnum;
356 u32 reg;
357 int shift;
358} idmac_lock_en_info[] = {
359 { .chnum = 5, .reg = IDMAC_CH_LOCK_EN_1, .shift = 0, },
360 { .chnum = 11, .reg = IDMAC_CH_LOCK_EN_1, .shift = 2, },
361 { .chnum = 12, .reg = IDMAC_CH_LOCK_EN_1, .shift = 4, },
362 { .chnum = 14, .reg = IDMAC_CH_LOCK_EN_1, .shift = 6, },
363 { .chnum = 15, .reg = IDMAC_CH_LOCK_EN_1, .shift = 8, },
364 { .chnum = 20, .reg = IDMAC_CH_LOCK_EN_1, .shift = 10, },
365 { .chnum = 21, .reg = IDMAC_CH_LOCK_EN_1, .shift = 12, },
366 { .chnum = 22, .reg = IDMAC_CH_LOCK_EN_1, .shift = 14, },
367 { .chnum = 23, .reg = IDMAC_CH_LOCK_EN_1, .shift = 16, },
368 { .chnum = 27, .reg = IDMAC_CH_LOCK_EN_1, .shift = 18, },
369 { .chnum = 28, .reg = IDMAC_CH_LOCK_EN_1, .shift = 20, },
370 { .chnum = 45, .reg = IDMAC_CH_LOCK_EN_2, .shift = 0, },
371 { .chnum = 46, .reg = IDMAC_CH_LOCK_EN_2, .shift = 2, },
372 { .chnum = 47, .reg = IDMAC_CH_LOCK_EN_2, .shift = 4, },
373 { .chnum = 48, .reg = IDMAC_CH_LOCK_EN_2, .shift = 6, },
374 { .chnum = 49, .reg = IDMAC_CH_LOCK_EN_2, .shift = 8, },
375 { .chnum = 50, .reg = IDMAC_CH_LOCK_EN_2, .shift = 10, },
376};
377
378int ipu_idmac_lock_enable(struct ipuv3_channel *channel, int num_bursts)
379{
380 struct ipu_soc *ipu = channel->ipu;
381 unsigned long flags;
382 u32 bursts, regval;
383 int i;
384
385 switch (num_bursts) {
386 case 0:
387 case 1:
388 bursts = 0x00; /* locking disabled */
389 break;
390 case 2:
391 bursts = 0x01;
392 break;
393 case 4:
394 bursts = 0x02;
395 break;
396 case 8:
397 bursts = 0x03;
398 break;
399 default:
400 return -EINVAL;
401 }
402
Philipp Zabelcda77552017-10-10 15:13:55 +0200403 /*
404 * IPUv3EX / i.MX51 has a different register layout, and on IPUv3M /
405 * i.MX53 channel arbitration locking doesn't seem to work properly.
406 * Allow enabling the lock feature on IPUv3H / i.MX6 only.
407 */
408 if (bursts && ipu->ipu_type != IPUV3H)
409 return -EINVAL;
410
Steve Longerbeam4fd1a072014-06-25 18:05:45 -0700411 for (i = 0; i < ARRAY_SIZE(idmac_lock_en_info); i++) {
412 if (channel->num == idmac_lock_en_info[i].chnum)
413 break;
414 }
415 if (i >= ARRAY_SIZE(idmac_lock_en_info))
416 return -EINVAL;
417
418 spin_lock_irqsave(&ipu->lock, flags);
419
420 regval = ipu_idmac_read(ipu, idmac_lock_en_info[i].reg);
421 regval &= ~(0x03 << idmac_lock_en_info[i].shift);
422 regval |= (bursts << idmac_lock_en_info[i].shift);
423 ipu_idmac_write(ipu, regval, idmac_lock_en_info[i].reg);
424
425 spin_unlock_irqrestore(&ipu->lock, flags);
426
427 return 0;
428}
429EXPORT_SYMBOL_GPL(ipu_idmac_lock_enable);
430
Sascha Haueraecfbdb2012-09-21 10:07:49 +0200431int ipu_module_enable(struct ipu_soc *ipu, u32 mask)
432{
433 unsigned long lock_flags;
434 u32 val;
435
436 spin_lock_irqsave(&ipu->lock, lock_flags);
437
438 val = ipu_cm_read(ipu, IPU_DISP_GEN);
439
440 if (mask & IPU_CONF_DI0_EN)
441 val |= IPU_DI0_COUNTER_RELEASE;
442 if (mask & IPU_CONF_DI1_EN)
443 val |= IPU_DI1_COUNTER_RELEASE;
444
445 ipu_cm_write(ipu, val, IPU_DISP_GEN);
446
447 val = ipu_cm_read(ipu, IPU_CONF);
448 val |= mask;
449 ipu_cm_write(ipu, val, IPU_CONF);
450
451 spin_unlock_irqrestore(&ipu->lock, lock_flags);
452
453 return 0;
454}
455EXPORT_SYMBOL_GPL(ipu_module_enable);
456
457int ipu_module_disable(struct ipu_soc *ipu, u32 mask)
458{
459 unsigned long lock_flags;
460 u32 val;
461
462 spin_lock_irqsave(&ipu->lock, lock_flags);
463
464 val = ipu_cm_read(ipu, IPU_CONF);
465 val &= ~mask;
466 ipu_cm_write(ipu, val, IPU_CONF);
467
468 val = ipu_cm_read(ipu, IPU_DISP_GEN);
469
470 if (mask & IPU_CONF_DI0_EN)
471 val &= ~IPU_DI0_COUNTER_RELEASE;
472 if (mask & IPU_CONF_DI1_EN)
473 val &= ~IPU_DI1_COUNTER_RELEASE;
474
475 ipu_cm_write(ipu, val, IPU_DISP_GEN);
476
477 spin_unlock_irqrestore(&ipu->lock, lock_flags);
478
479 return 0;
480}
481EXPORT_SYMBOL_GPL(ipu_module_disable);
482
Philipp Zabele9046092012-05-16 17:28:29 +0200483int ipu_idmac_get_current_buffer(struct ipuv3_channel *channel)
484{
485 struct ipu_soc *ipu = channel->ipu;
486 unsigned int chno = channel->num;
487
488 return (ipu_cm_read(ipu, IPU_CHA_CUR_BUF(chno)) & idma_mask(chno)) ? 1 : 0;
489}
490EXPORT_SYMBOL_GPL(ipu_idmac_get_current_buffer);
491
Steve Longerbeamaa52f572014-06-25 18:05:40 -0700492bool ipu_idmac_buffer_is_ready(struct ipuv3_channel *channel, u32 buf_num)
493{
494 struct ipu_soc *ipu = channel->ipu;
495 unsigned long flags;
496 u32 reg = 0;
497
498 spin_lock_irqsave(&ipu->lock, flags);
499 switch (buf_num) {
500 case 0:
501 reg = ipu_cm_read(ipu, IPU_CHA_BUF0_RDY(channel->num));
502 break;
503 case 1:
504 reg = ipu_cm_read(ipu, IPU_CHA_BUF1_RDY(channel->num));
505 break;
506 case 2:
507 reg = ipu_cm_read(ipu, IPU_CHA_BUF2_RDY(channel->num));
508 break;
509 }
510 spin_unlock_irqrestore(&ipu->lock, flags);
511
512 return ((reg & idma_mask(channel->num)) != 0);
513}
514EXPORT_SYMBOL_GPL(ipu_idmac_buffer_is_ready);
515
Sascha Haueraecfbdb2012-09-21 10:07:49 +0200516void ipu_idmac_select_buffer(struct ipuv3_channel *channel, u32 buf_num)
517{
518 struct ipu_soc *ipu = channel->ipu;
519 unsigned int chno = channel->num;
520 unsigned long flags;
521
522 spin_lock_irqsave(&ipu->lock, flags);
523
524 /* Mark buffer as ready. */
525 if (buf_num == 0)
526 ipu_cm_write(ipu, idma_mask(chno), IPU_CHA_BUF0_RDY(chno));
527 else
528 ipu_cm_write(ipu, idma_mask(chno), IPU_CHA_BUF1_RDY(chno));
529
530 spin_unlock_irqrestore(&ipu->lock, flags);
531}
532EXPORT_SYMBOL_GPL(ipu_idmac_select_buffer);
533
Steve Longerbeambce6f082014-06-25 18:05:41 -0700534void ipu_idmac_clear_buffer(struct ipuv3_channel *channel, u32 buf_num)
535{
536 struct ipu_soc *ipu = channel->ipu;
537 unsigned int chno = channel->num;
538 unsigned long flags;
539
540 spin_lock_irqsave(&ipu->lock, flags);
541
542 ipu_cm_write(ipu, 0xF0300000, IPU_GPR); /* write one to clear */
543 switch (buf_num) {
544 case 0:
545 ipu_cm_write(ipu, idma_mask(chno), IPU_CHA_BUF0_RDY(chno));
546 break;
547 case 1:
548 ipu_cm_write(ipu, idma_mask(chno), IPU_CHA_BUF1_RDY(chno));
549 break;
550 case 2:
551 ipu_cm_write(ipu, idma_mask(chno), IPU_CHA_BUF2_RDY(chno));
552 break;
553 default:
554 break;
555 }
556 ipu_cm_write(ipu, 0x0, IPU_GPR); /* write one to set */
557
558 spin_unlock_irqrestore(&ipu->lock, flags);
559}
560EXPORT_SYMBOL_GPL(ipu_idmac_clear_buffer);
561
Sascha Haueraecfbdb2012-09-21 10:07:49 +0200562int ipu_idmac_enable_channel(struct ipuv3_channel *channel)
563{
564 struct ipu_soc *ipu = channel->ipu;
565 u32 val;
566 unsigned long flags;
567
568 spin_lock_irqsave(&ipu->lock, flags);
569
570 val = ipu_idmac_read(ipu, IDMAC_CHA_EN(channel->num));
571 val |= idma_mask(channel->num);
572 ipu_idmac_write(ipu, val, IDMAC_CHA_EN(channel->num));
573
574 spin_unlock_irqrestore(&ipu->lock, flags);
575
576 return 0;
577}
578EXPORT_SYMBOL_GPL(ipu_idmac_enable_channel);
579
Philipp Zabel17075502014-04-14 23:53:17 +0200580bool ipu_idmac_channel_busy(struct ipu_soc *ipu, unsigned int chno)
581{
582 return (ipu_idmac_read(ipu, IDMAC_CHA_BUSY(chno)) & idma_mask(chno));
583}
584EXPORT_SYMBOL_GPL(ipu_idmac_channel_busy);
585
Sascha Hauerfb822a32013-10-10 16:18:41 +0200586int ipu_idmac_wait_busy(struct ipuv3_channel *channel, int ms)
587{
588 struct ipu_soc *ipu = channel->ipu;
589 unsigned long timeout;
590
591 timeout = jiffies + msecs_to_jiffies(ms);
592 while (ipu_idmac_read(ipu, IDMAC_CHA_BUSY(channel->num)) &
593 idma_mask(channel->num)) {
594 if (time_after(jiffies, timeout))
595 return -ETIMEDOUT;
596 cpu_relax();
597 }
598
599 return 0;
600}
601EXPORT_SYMBOL_GPL(ipu_idmac_wait_busy);
602
Sascha Haueraecfbdb2012-09-21 10:07:49 +0200603int ipu_idmac_disable_channel(struct ipuv3_channel *channel)
604{
605 struct ipu_soc *ipu = channel->ipu;
606 u32 val;
607 unsigned long flags;
Sascha Haueraecfbdb2012-09-21 10:07:49 +0200608
609 spin_lock_irqsave(&ipu->lock, flags);
610
611 /* Disable DMA channel(s) */
612 val = ipu_idmac_read(ipu, IDMAC_CHA_EN(channel->num));
613 val &= ~idma_mask(channel->num);
614 ipu_idmac_write(ipu, val, IDMAC_CHA_EN(channel->num));
615
Steve Longerbeame7268c62014-06-25 18:05:42 -0700616 __ipu_idmac_reset_current_buffer(channel);
617
Sascha Haueraecfbdb2012-09-21 10:07:49 +0200618 /* Set channel buffers NOT to be ready */
619 ipu_cm_write(ipu, 0xf0000000, IPU_GPR); /* write one to clear */
620
621 if (ipu_cm_read(ipu, IPU_CHA_BUF0_RDY(channel->num)) &
622 idma_mask(channel->num)) {
623 ipu_cm_write(ipu, idma_mask(channel->num),
624 IPU_CHA_BUF0_RDY(channel->num));
625 }
626
627 if (ipu_cm_read(ipu, IPU_CHA_BUF1_RDY(channel->num)) &
628 idma_mask(channel->num)) {
629 ipu_cm_write(ipu, idma_mask(channel->num),
630 IPU_CHA_BUF1_RDY(channel->num));
631 }
632
633 ipu_cm_write(ipu, 0x0, IPU_GPR); /* write one to set */
634
635 /* Reset the double buffer */
636 val = ipu_cm_read(ipu, IPU_CHA_DB_MODE_SEL(channel->num));
637 val &= ~idma_mask(channel->num);
638 ipu_cm_write(ipu, val, IPU_CHA_DB_MODE_SEL(channel->num));
639
640 spin_unlock_irqrestore(&ipu->lock, flags);
641
642 return 0;
643}
644EXPORT_SYMBOL_GPL(ipu_idmac_disable_channel);
645
Steve Longerbeam2bcf5772014-06-25 18:05:44 -0700646/*
647 * The imx6 rev. D TRM says that enabling the WM feature will increase
648 * a channel's priority. Refer to Table 36-8 Calculated priority value.
649 * The sub-module that is the sink or source for the channel must enable
650 * watermark signal for this to take effect (SMFC_WM for instance).
651 */
652void ipu_idmac_enable_watermark(struct ipuv3_channel *channel, bool enable)
653{
654 struct ipu_soc *ipu = channel->ipu;
655 unsigned long flags;
656 u32 val;
657
658 spin_lock_irqsave(&ipu->lock, flags);
659
660 val = ipu_idmac_read(ipu, IDMAC_WM_EN(channel->num));
661 if (enable)
662 val |= 1 << (channel->num % 32);
663 else
664 val &= ~(1 << (channel->num % 32));
665 ipu_idmac_write(ipu, val, IDMAC_WM_EN(channel->num));
666
667 spin_unlock_irqrestore(&ipu->lock, flags);
668}
669EXPORT_SYMBOL_GPL(ipu_idmac_enable_watermark);
670
Philipp Zabel6c641552013-03-28 17:35:21 +0100671static int ipu_memory_reset(struct ipu_soc *ipu)
Sascha Haueraecfbdb2012-09-21 10:07:49 +0200672{
673 unsigned long timeout;
674
675 ipu_cm_write(ipu, 0x807FFFFF, IPU_MEM_RST);
676
677 timeout = jiffies + msecs_to_jiffies(1000);
678 while (ipu_cm_read(ipu, IPU_MEM_RST) & 0x80000000) {
679 if (time_after(jiffies, timeout))
680 return -ETIME;
681 cpu_relax();
682 }
683
Sascha Haueraecfbdb2012-09-21 10:07:49 +0200684 return 0;
685}
686
Steve Longerbeamba079752014-06-25 18:05:30 -0700687/*
688 * Set the source mux for the given CSI. Selects either parallel or
689 * MIPI CSI2 sources.
690 */
691void ipu_set_csi_src_mux(struct ipu_soc *ipu, int csi_id, bool mipi_csi2)
692{
693 unsigned long flags;
694 u32 val, mask;
695
696 mask = (csi_id == 1) ? IPU_CONF_CSI1_DATA_SOURCE :
697 IPU_CONF_CSI0_DATA_SOURCE;
698
699 spin_lock_irqsave(&ipu->lock, flags);
700
701 val = ipu_cm_read(ipu, IPU_CONF);
702 if (mipi_csi2)
703 val |= mask;
704 else
705 val &= ~mask;
706 ipu_cm_write(ipu, val, IPU_CONF);
707
708 spin_unlock_irqrestore(&ipu->lock, flags);
709}
710EXPORT_SYMBOL_GPL(ipu_set_csi_src_mux);
711
712/*
713 * Set the source mux for the IC. Selects either CSI[01] or the VDI.
714 */
715void ipu_set_ic_src_mux(struct ipu_soc *ipu, int csi_id, bool vdi)
716{
717 unsigned long flags;
718 u32 val;
719
720 spin_lock_irqsave(&ipu->lock, flags);
721
722 val = ipu_cm_read(ipu, IPU_CONF);
Marek Vasutb7dfee242017-06-03 11:57:21 -0700723 if (vdi)
Steve Longerbeamba079752014-06-25 18:05:30 -0700724 val |= IPU_CONF_IC_INPUT;
Marek Vasutb7dfee242017-06-03 11:57:21 -0700725 else
Steve Longerbeamba079752014-06-25 18:05:30 -0700726 val &= ~IPU_CONF_IC_INPUT;
Marek Vasutb7dfee242017-06-03 11:57:21 -0700727
728 if (csi_id == 1)
729 val |= IPU_CONF_CSI_SEL;
730 else
731 val &= ~IPU_CONF_CSI_SEL;
732
Steve Longerbeamba079752014-06-25 18:05:30 -0700733 ipu_cm_write(ipu, val, IPU_CONF);
734
735 spin_unlock_irqrestore(&ipu->lock, flags);
736}
737EXPORT_SYMBOL_GPL(ipu_set_ic_src_mux);
738
Steve Longerbeamac4708f2016-08-17 17:50:17 -0700739
740/* Frame Synchronization Unit Channel Linking */
741
742struct fsu_link_reg_info {
743 int chno;
744 u32 reg;
745 u32 mask;
746 u32 val;
747};
748
749struct fsu_link_info {
750 struct fsu_link_reg_info src;
751 struct fsu_link_reg_info sink;
752};
753
754static const struct fsu_link_info fsu_link_info[] = {
755 {
756 .src = { IPUV3_CHANNEL_IC_PRP_ENC_MEM, IPU_FS_PROC_FLOW2,
757 FS_PRP_ENC_DEST_SEL_MASK, FS_PRP_ENC_DEST_SEL_IRT_ENC },
758 .sink = { IPUV3_CHANNEL_MEM_ROT_ENC, IPU_FS_PROC_FLOW1,
759 FS_PRPENC_ROT_SRC_SEL_MASK, FS_PRPENC_ROT_SRC_SEL_ENC },
760 }, {
761 .src = { IPUV3_CHANNEL_IC_PRP_VF_MEM, IPU_FS_PROC_FLOW2,
762 FS_PRPVF_DEST_SEL_MASK, FS_PRPVF_DEST_SEL_IRT_VF },
763 .sink = { IPUV3_CHANNEL_MEM_ROT_VF, IPU_FS_PROC_FLOW1,
764 FS_PRPVF_ROT_SRC_SEL_MASK, FS_PRPVF_ROT_SRC_SEL_VF },
765 }, {
766 .src = { IPUV3_CHANNEL_IC_PP_MEM, IPU_FS_PROC_FLOW2,
767 FS_PP_DEST_SEL_MASK, FS_PP_DEST_SEL_IRT_PP },
768 .sink = { IPUV3_CHANNEL_MEM_ROT_PP, IPU_FS_PROC_FLOW1,
769 FS_PP_ROT_SRC_SEL_MASK, FS_PP_ROT_SRC_SEL_PP },
770 }, {
771 .src = { IPUV3_CHANNEL_CSI_DIRECT, 0 },
772 .sink = { IPUV3_CHANNEL_CSI_VDI_PREV, IPU_FS_PROC_FLOW1,
773 FS_VDI_SRC_SEL_MASK, FS_VDI_SRC_SEL_CSI_DIRECT },
774 },
775};
776
777static const struct fsu_link_info *find_fsu_link_info(int src, int sink)
778{
779 int i;
780
781 for (i = 0; i < ARRAY_SIZE(fsu_link_info); i++) {
782 if (src == fsu_link_info[i].src.chno &&
783 sink == fsu_link_info[i].sink.chno)
784 return &fsu_link_info[i];
785 }
786
787 return NULL;
788}
789
790/*
791 * Links a source channel to a sink channel in the FSU.
792 */
793int ipu_fsu_link(struct ipu_soc *ipu, int src_ch, int sink_ch)
794{
795 const struct fsu_link_info *link;
796 u32 src_reg, sink_reg;
797 unsigned long flags;
798
799 link = find_fsu_link_info(src_ch, sink_ch);
800 if (!link)
801 return -EINVAL;
802
803 spin_lock_irqsave(&ipu->lock, flags);
804
805 if (link->src.mask) {
806 src_reg = ipu_cm_read(ipu, link->src.reg);
807 src_reg &= ~link->src.mask;
808 src_reg |= link->src.val;
809 ipu_cm_write(ipu, src_reg, link->src.reg);
810 }
811
812 if (link->sink.mask) {
813 sink_reg = ipu_cm_read(ipu, link->sink.reg);
814 sink_reg &= ~link->sink.mask;
815 sink_reg |= link->sink.val;
816 ipu_cm_write(ipu, sink_reg, link->sink.reg);
817 }
818
819 spin_unlock_irqrestore(&ipu->lock, flags);
820 return 0;
821}
822EXPORT_SYMBOL_GPL(ipu_fsu_link);
823
824/*
825 * Unlinks source and sink channels in the FSU.
826 */
827int ipu_fsu_unlink(struct ipu_soc *ipu, int src_ch, int sink_ch)
828{
829 const struct fsu_link_info *link;
830 u32 src_reg, sink_reg;
831 unsigned long flags;
832
833 link = find_fsu_link_info(src_ch, sink_ch);
834 if (!link)
835 return -EINVAL;
836
837 spin_lock_irqsave(&ipu->lock, flags);
838
839 if (link->src.mask) {
840 src_reg = ipu_cm_read(ipu, link->src.reg);
841 src_reg &= ~link->src.mask;
842 ipu_cm_write(ipu, src_reg, link->src.reg);
843 }
844
845 if (link->sink.mask) {
846 sink_reg = ipu_cm_read(ipu, link->sink.reg);
847 sink_reg &= ~link->sink.mask;
848 ipu_cm_write(ipu, sink_reg, link->sink.reg);
849 }
850
851 spin_unlock_irqrestore(&ipu->lock, flags);
852 return 0;
853}
854EXPORT_SYMBOL_GPL(ipu_fsu_unlink);
855
856/* Link IDMAC channels in the FSU */
857int ipu_idmac_link(struct ipuv3_channel *src, struct ipuv3_channel *sink)
858{
859 return ipu_fsu_link(src->ipu, src->num, sink->num);
860}
861EXPORT_SYMBOL_GPL(ipu_idmac_link);
862
863/* Unlink IDMAC channels in the FSU */
864int ipu_idmac_unlink(struct ipuv3_channel *src, struct ipuv3_channel *sink)
865{
866 return ipu_fsu_unlink(src->ipu, src->num, sink->num);
867}
868EXPORT_SYMBOL_GPL(ipu_idmac_unlink);
869
Sascha Haueraecfbdb2012-09-21 10:07:49 +0200870struct ipu_devtype {
871 const char *name;
872 unsigned long cm_ofs;
873 unsigned long cpmem_ofs;
874 unsigned long srm_ofs;
875 unsigned long tpm_ofs;
Steve Longerbeam2ffd48f2014-08-19 10:52:40 -0700876 unsigned long csi0_ofs;
877 unsigned long csi1_ofs;
Steve Longerbeam1aa8ea02014-08-11 13:04:50 +0200878 unsigned long ic_ofs;
Sascha Haueraecfbdb2012-09-21 10:07:49 +0200879 unsigned long disp0_ofs;
880 unsigned long disp1_ofs;
881 unsigned long dc_tmpl_ofs;
882 unsigned long vdi_ofs;
883 enum ipuv3_type type;
884};
885
886static struct ipu_devtype ipu_type_imx51 = {
887 .name = "IPUv3EX",
888 .cm_ofs = 0x1e000000,
889 .cpmem_ofs = 0x1f000000,
890 .srm_ofs = 0x1f040000,
891 .tpm_ofs = 0x1f060000,
Alexander Shiyan2c0408d2018-12-20 11:06:38 +0300892 .csi0_ofs = 0x1e030000,
893 .csi1_ofs = 0x1e038000,
Philipp Zabela49e7c02014-09-22 17:15:40 +0200894 .ic_ofs = 0x1e020000,
Sascha Haueraecfbdb2012-09-21 10:07:49 +0200895 .disp0_ofs = 0x1e040000,
896 .disp1_ofs = 0x1e048000,
897 .dc_tmpl_ofs = 0x1f080000,
898 .vdi_ofs = 0x1e068000,
899 .type = IPUV3EX,
900};
901
902static struct ipu_devtype ipu_type_imx53 = {
903 .name = "IPUv3M",
904 .cm_ofs = 0x06000000,
905 .cpmem_ofs = 0x07000000,
906 .srm_ofs = 0x07040000,
907 .tpm_ofs = 0x07060000,
Steve Longerbeambb867d212018-10-16 17:31:40 -0700908 .csi0_ofs = 0x06030000,
909 .csi1_ofs = 0x06038000,
Philipp Zabela49e7c02014-09-22 17:15:40 +0200910 .ic_ofs = 0x06020000,
Sascha Haueraecfbdb2012-09-21 10:07:49 +0200911 .disp0_ofs = 0x06040000,
912 .disp1_ofs = 0x06048000,
913 .dc_tmpl_ofs = 0x07080000,
914 .vdi_ofs = 0x06068000,
915 .type = IPUV3M,
916};
917
918static struct ipu_devtype ipu_type_imx6q = {
919 .name = "IPUv3H",
920 .cm_ofs = 0x00200000,
921 .cpmem_ofs = 0x00300000,
922 .srm_ofs = 0x00340000,
923 .tpm_ofs = 0x00360000,
Steve Longerbeam2ffd48f2014-08-19 10:52:40 -0700924 .csi0_ofs = 0x00230000,
925 .csi1_ofs = 0x00238000,
Steve Longerbeam1aa8ea02014-08-11 13:04:50 +0200926 .ic_ofs = 0x00220000,
Sascha Haueraecfbdb2012-09-21 10:07:49 +0200927 .disp0_ofs = 0x00240000,
928 .disp1_ofs = 0x00248000,
929 .dc_tmpl_ofs = 0x00380000,
930 .vdi_ofs = 0x00268000,
931 .type = IPUV3H,
932};
933
934static const struct of_device_id imx_ipu_dt_ids[] = {
935 { .compatible = "fsl,imx51-ipu", .data = &ipu_type_imx51, },
936 { .compatible = "fsl,imx53-ipu", .data = &ipu_type_imx53, },
937 { .compatible = "fsl,imx6q-ipu", .data = &ipu_type_imx6q, },
Lucas Stach92681fe2017-03-08 12:13:18 +0100938 { .compatible = "fsl,imx6qp-ipu", .data = &ipu_type_imx6q, },
Sascha Haueraecfbdb2012-09-21 10:07:49 +0200939 { /* sentinel */ }
940};
941MODULE_DEVICE_TABLE(of, imx_ipu_dt_ids);
942
943static int ipu_submodules_init(struct ipu_soc *ipu,
944 struct platform_device *pdev, unsigned long ipu_base,
945 struct clk *ipu_clk)
946{
947 char *unit;
948 int ret;
949 struct device *dev = &pdev->dev;
950 const struct ipu_devtype *devtype = ipu->devtype;
951
Steve Longerbeam7d2691d2014-06-25 18:05:47 -0700952 ret = ipu_cpmem_init(ipu, dev, ipu_base + devtype->cpmem_ofs);
953 if (ret) {
954 unit = "cpmem";
955 goto err_cpmem;
956 }
957
Steve Longerbeam2ffd48f2014-08-19 10:52:40 -0700958 ret = ipu_csi_init(ipu, dev, 0, ipu_base + devtype->csi0_ofs,
959 IPU_CONF_CSI0_EN, ipu_clk);
960 if (ret) {
961 unit = "csi0";
962 goto err_csi_0;
963 }
964
965 ret = ipu_csi_init(ipu, dev, 1, ipu_base + devtype->csi1_ofs,
966 IPU_CONF_CSI1_EN, ipu_clk);
967 if (ret) {
968 unit = "csi1";
969 goto err_csi_1;
970 }
971
Steve Longerbeam1aa8ea02014-08-11 13:04:50 +0200972 ret = ipu_ic_init(ipu, dev,
973 ipu_base + devtype->ic_ofs,
974 ipu_base + devtype->tpm_ofs);
975 if (ret) {
976 unit = "ic";
977 goto err_ic;
978 }
979
Steve Longerbeam2d2ead42016-08-17 17:50:16 -0700980 ret = ipu_vdi_init(ipu, dev, ipu_base + devtype->vdi_ofs,
981 IPU_CONF_VDI_EN | IPU_CONF_ISP_EN |
982 IPU_CONF_IC_INPUT);
983 if (ret) {
984 unit = "vdi";
985 goto err_vdi;
986 }
987
Steve Longerbeamcd98e852016-09-17 12:33:58 -0700988 ret = ipu_image_convert_init(ipu, dev);
989 if (ret) {
990 unit = "image_convert";
991 goto err_image_convert;
992 }
993
Sascha Haueraecfbdb2012-09-21 10:07:49 +0200994 ret = ipu_di_init(ipu, dev, 0, ipu_base + devtype->disp0_ofs,
Steve Longerbeam1aa8ea02014-08-11 13:04:50 +0200995 IPU_CONF_DI0_EN, ipu_clk);
Sascha Haueraecfbdb2012-09-21 10:07:49 +0200996 if (ret) {
997 unit = "di0";
998 goto err_di_0;
999 }
1000
1001 ret = ipu_di_init(ipu, dev, 1, ipu_base + devtype->disp1_ofs,
1002 IPU_CONF_DI1_EN, ipu_clk);
1003 if (ret) {
1004 unit = "di1";
1005 goto err_di_1;
1006 }
1007
1008 ret = ipu_dc_init(ipu, dev, ipu_base + devtype->cm_ofs +
1009 IPU_CM_DC_REG_OFS, ipu_base + devtype->dc_tmpl_ofs);
1010 if (ret) {
1011 unit = "dc_template";
1012 goto err_dc;
1013 }
1014
1015 ret = ipu_dmfc_init(ipu, dev, ipu_base +
1016 devtype->cm_ofs + IPU_CM_DMFC_REG_OFS, ipu_clk);
1017 if (ret) {
1018 unit = "dmfc";
1019 goto err_dmfc;
1020 }
1021
1022 ret = ipu_dp_init(ipu, dev, ipu_base + devtype->srm_ofs);
1023 if (ret) {
1024 unit = "dp";
1025 goto err_dp;
1026 }
1027
Philipp Zabel35de9252012-05-09 16:59:01 +02001028 ret = ipu_smfc_init(ipu, dev, ipu_base +
1029 devtype->cm_ofs + IPU_CM_SMFC_REG_OFS);
1030 if (ret) {
1031 unit = "smfc";
1032 goto err_smfc;
1033 }
1034
Sascha Haueraecfbdb2012-09-21 10:07:49 +02001035 return 0;
1036
Philipp Zabel35de9252012-05-09 16:59:01 +02001037err_smfc:
1038 ipu_dp_exit(ipu);
Sascha Haueraecfbdb2012-09-21 10:07:49 +02001039err_dp:
1040 ipu_dmfc_exit(ipu);
1041err_dmfc:
1042 ipu_dc_exit(ipu);
1043err_dc:
1044 ipu_di_exit(ipu, 1);
1045err_di_1:
1046 ipu_di_exit(ipu, 0);
1047err_di_0:
Steve Longerbeamcd98e852016-09-17 12:33:58 -07001048 ipu_image_convert_exit(ipu);
1049err_image_convert:
Steve Longerbeam2d2ead42016-08-17 17:50:16 -07001050 ipu_vdi_exit(ipu);
1051err_vdi:
Steve Longerbeam1aa8ea02014-08-11 13:04:50 +02001052 ipu_ic_exit(ipu);
1053err_ic:
Steve Longerbeam2ffd48f2014-08-19 10:52:40 -07001054 ipu_csi_exit(ipu, 1);
1055err_csi_1:
1056 ipu_csi_exit(ipu, 0);
1057err_csi_0:
Steve Longerbeam7d2691d2014-06-25 18:05:47 -07001058 ipu_cpmem_exit(ipu);
1059err_cpmem:
Sascha Haueraecfbdb2012-09-21 10:07:49 +02001060 dev_err(&pdev->dev, "init %s failed with %d\n", unit, ret);
1061 return ret;
1062}
1063
1064static void ipu_irq_handle(struct ipu_soc *ipu, const int *regs, int num_regs)
1065{
1066 unsigned long status;
Philipp Zabelb7287662013-06-21 10:27:39 +02001067 int i, bit, irq;
Sascha Haueraecfbdb2012-09-21 10:07:49 +02001068
1069 for (i = 0; i < num_regs; i++) {
1070
1071 status = ipu_cm_read(ipu, IPU_INT_STAT(regs[i]));
1072 status &= ipu_cm_read(ipu, IPU_INT_CTRL(regs[i]));
1073
Philipp Zabelb7287662013-06-21 10:27:39 +02001074 for_each_set_bit(bit, &status, 32) {
Antoine Schweitzer-Chaput838201a2014-04-18 23:20:06 +02001075 irq = irq_linear_revmap(ipu->domain,
1076 regs[i] * 32 + bit);
Philipp Zabelb7287662013-06-21 10:27:39 +02001077 if (irq)
1078 generic_handle_irq(irq);
1079 }
Sascha Haueraecfbdb2012-09-21 10:07:49 +02001080 }
1081}
1082
Thomas Gleixnerbd0b9ac2015-09-14 10:42:37 +02001083static void ipu_irq_handler(struct irq_desc *desc)
Sascha Haueraecfbdb2012-09-21 10:07:49 +02001084{
1085 struct ipu_soc *ipu = irq_desc_get_handler_data(desc);
Jiang Liu4d9efdfc2015-07-13 20:39:54 +00001086 struct irq_chip *chip = irq_desc_get_chip(desc);
Colin Ian Kingac66b832018-02-14 18:45:59 +00001087 static const int int_reg[] = { 0, 1, 2, 3, 10, 11, 12, 13, 14};
Sascha Haueraecfbdb2012-09-21 10:07:49 +02001088
1089 chained_irq_enter(chip, desc);
1090
1091 ipu_irq_handle(ipu, int_reg, ARRAY_SIZE(int_reg));
1092
1093 chained_irq_exit(chip, desc);
1094}
1095
Thomas Gleixnerbd0b9ac2015-09-14 10:42:37 +02001096static void ipu_err_irq_handler(struct irq_desc *desc)
Sascha Haueraecfbdb2012-09-21 10:07:49 +02001097{
1098 struct ipu_soc *ipu = irq_desc_get_handler_data(desc);
Jiang Liu4d9efdfc2015-07-13 20:39:54 +00001099 struct irq_chip *chip = irq_desc_get_chip(desc);
Colin Ian Kingac66b832018-02-14 18:45:59 +00001100 static const int int_reg[] = { 4, 5, 8, 9};
Sascha Haueraecfbdb2012-09-21 10:07:49 +02001101
1102 chained_irq_enter(chip, desc);
1103
1104 ipu_irq_handle(ipu, int_reg, ARRAY_SIZE(int_reg));
1105
1106 chained_irq_exit(chip, desc);
1107}
1108
Philipp Zabel861a50c2014-04-14 23:53:16 +02001109int ipu_map_irq(struct ipu_soc *ipu, int irq)
1110{
1111 int virq;
1112
1113 virq = irq_linear_revmap(ipu->domain, irq);
1114 if (!virq)
1115 virq = irq_create_mapping(ipu->domain, irq);
1116
1117 return virq;
1118}
1119EXPORT_SYMBOL_GPL(ipu_map_irq);
1120
Sascha Haueraecfbdb2012-09-21 10:07:49 +02001121int ipu_idmac_channel_irq(struct ipu_soc *ipu, struct ipuv3_channel *channel,
1122 enum ipu_channel_irq irq_type)
1123{
Philipp Zabel861a50c2014-04-14 23:53:16 +02001124 return ipu_map_irq(ipu, irq_type + channel->num);
Sascha Haueraecfbdb2012-09-21 10:07:49 +02001125}
1126EXPORT_SYMBOL_GPL(ipu_idmac_channel_irq);
1127
1128static void ipu_submodules_exit(struct ipu_soc *ipu)
1129{
Philipp Zabel35de9252012-05-09 16:59:01 +02001130 ipu_smfc_exit(ipu);
Sascha Haueraecfbdb2012-09-21 10:07:49 +02001131 ipu_dp_exit(ipu);
1132 ipu_dmfc_exit(ipu);
1133 ipu_dc_exit(ipu);
1134 ipu_di_exit(ipu, 1);
1135 ipu_di_exit(ipu, 0);
Steve Longerbeamcd98e852016-09-17 12:33:58 -07001136 ipu_image_convert_exit(ipu);
Steve Longerbeam2d2ead42016-08-17 17:50:16 -07001137 ipu_vdi_exit(ipu);
Steve Longerbeam1aa8ea02014-08-11 13:04:50 +02001138 ipu_ic_exit(ipu);
Steve Longerbeam2ffd48f2014-08-19 10:52:40 -07001139 ipu_csi_exit(ipu, 1);
1140 ipu_csi_exit(ipu, 0);
Steve Longerbeam7d2691d2014-06-25 18:05:47 -07001141 ipu_cpmem_exit(ipu);
Sascha Haueraecfbdb2012-09-21 10:07:49 +02001142}
1143
1144static int platform_remove_devices_fn(struct device *dev, void *unused)
1145{
1146 struct platform_device *pdev = to_platform_device(dev);
1147
1148 platform_device_unregister(pdev);
1149
1150 return 0;
1151}
1152
1153static void platform_device_unregister_children(struct platform_device *pdev)
1154{
1155 device_for_each_child(&pdev->dev, NULL, platform_remove_devices_fn);
1156}
1157
1158struct ipu_platform_reg {
1159 struct ipu_client_platformdata pdata;
1160 const char *name;
1161};
1162
Philipp Zabel304e6be2015-11-09 16:35:12 +01001163/* These must be in the order of the corresponding device tree port nodes */
Philipp Zabel310944d2016-05-12 15:00:44 +02001164static struct ipu_platform_reg client_reg[] = {
Sascha Haueraecfbdb2012-09-21 10:07:49 +02001165 {
1166 .pdata = {
Philipp Zabel304e6be2015-11-09 16:35:12 +01001167 .csi = 0,
1168 .dma[0] = IPUV3_CHANNEL_CSI0,
1169 .dma[1] = -EINVAL,
1170 },
Steve Longerbeam88287ec2016-07-19 18:11:11 -07001171 .name = "imx-ipuv3-csi",
Philipp Zabel304e6be2015-11-09 16:35:12 +01001172 }, {
1173 .pdata = {
1174 .csi = 1,
1175 .dma[0] = IPUV3_CHANNEL_CSI1,
1176 .dma[1] = -EINVAL,
1177 },
Steve Longerbeam88287ec2016-07-19 18:11:11 -07001178 .name = "imx-ipuv3-csi",
Philipp Zabel304e6be2015-11-09 16:35:12 +01001179 }, {
1180 .pdata = {
Sascha Haueraecfbdb2012-09-21 10:07:49 +02001181 .di = 0,
1182 .dc = 5,
1183 .dp = IPU_DP_FLOW_SYNC_BG,
1184 .dma[0] = IPUV3_CHANNEL_MEM_BG_SYNC,
Philipp Zabelb8d181e2013-10-10 16:18:45 +02001185 .dma[1] = IPUV3_CHANNEL_MEM_FG_SYNC,
Sascha Haueraecfbdb2012-09-21 10:07:49 +02001186 },
1187 .name = "imx-ipuv3-crtc",
1188 }, {
1189 .pdata = {
1190 .di = 1,
1191 .dc = 1,
1192 .dp = -EINVAL,
1193 .dma[0] = IPUV3_CHANNEL_MEM_DC_SYNC,
1194 .dma[1] = -EINVAL,
1195 },
1196 .name = "imx-ipuv3-crtc",
1197 },
1198};
1199
Russell King4ae078d2013-12-16 11:34:25 +00001200static DEFINE_MUTEX(ipu_client_id_mutex);
Sascha Haueraecfbdb2012-09-21 10:07:49 +02001201static int ipu_client_id;
1202
Philipp Zabeld6ca8ca2012-05-23 17:08:19 +02001203static int ipu_add_client_devices(struct ipu_soc *ipu, unsigned long ipu_base)
Sascha Haueraecfbdb2012-09-21 10:07:49 +02001204{
Russell King4ae078d2013-12-16 11:34:25 +00001205 struct device *dev = ipu->dev;
1206 unsigned i;
1207 int id, ret;
1208
1209 mutex_lock(&ipu_client_id_mutex);
1210 id = ipu_client_id;
1211 ipu_client_id += ARRAY_SIZE(client_reg);
1212 mutex_unlock(&ipu_client_id_mutex);
Sascha Haueraecfbdb2012-09-21 10:07:49 +02001213
1214 for (i = 0; i < ARRAY_SIZE(client_reg); i++) {
Philipp Zabel310944d2016-05-12 15:00:44 +02001215 struct ipu_platform_reg *reg = &client_reg[i];
Russell King4ae078d2013-12-16 11:34:25 +00001216 struct platform_device *pdev;
Philipp Zabel17e05212016-01-04 17:32:26 +01001217 struct device_node *of_node;
1218
1219 /* Associate subdevice with the corresponding port node */
1220 of_node = of_graph_get_port_by_id(dev->of_node, i);
1221 if (!of_node) {
1222 dev_info(dev,
Rob Herring4bf99142017-07-18 16:43:04 -05001223 "no port@%d node in %pOF, not using %s%d\n",
1224 i, dev->of_node,
Philipp Zabel17e05212016-01-04 17:32:26 +01001225 (i / 2) ? "DI" : "CSI", i % 2);
1226 continue;
1227 }
Russell King4ae078d2013-12-16 11:34:25 +00001228
Philipp Zabel304e6be2015-11-09 16:35:12 +01001229 pdev = platform_device_alloc(reg->name, id++);
1230 if (!pdev) {
1231 ret = -ENOMEM;
1232 goto err_register;
1233 }
Russell King4ae078d2013-12-16 11:34:25 +00001234
Philipp Zabel304e6be2015-11-09 16:35:12 +01001235 pdev->dev.parent = dev;
1236
Philipp Zabel310944d2016-05-12 15:00:44 +02001237 reg->pdata.of_node = of_node;
Philipp Zabel304e6be2015-11-09 16:35:12 +01001238 ret = platform_device_add_data(pdev, &reg->pdata,
1239 sizeof(reg->pdata));
1240 if (!ret)
1241 ret = platform_device_add(pdev);
1242 if (ret) {
1243 platform_device_put(pdev);
Sascha Haueraecfbdb2012-09-21 10:07:49 +02001244 goto err_register;
Axel Line4946cd2014-08-03 10:38:18 +08001245 }
Sascha Haueraecfbdb2012-09-21 10:07:49 +02001246 }
1247
1248 return 0;
1249
1250err_register:
Russell King4ae078d2013-12-16 11:34:25 +00001251 platform_device_unregister_children(to_platform_device(dev));
Sascha Haueraecfbdb2012-09-21 10:07:49 +02001252
1253 return ret;
1254}
1255
Philipp Zabelb7287662013-06-21 10:27:39 +02001256
Sascha Haueraecfbdb2012-09-21 10:07:49 +02001257static int ipu_irq_init(struct ipu_soc *ipu)
1258{
Philipp Zabel379cdec2013-06-21 14:52:17 +02001259 struct irq_chip_generic *gc;
1260 struct irq_chip_type *ct;
Philipp Zabel37f85b262013-06-21 14:52:18 +02001261 unsigned long unused[IPU_NUM_IRQS / 32] = {
1262 0x400100d0, 0xffe000fd,
1263 0x400100d0, 0xffe000fd,
1264 0x400100d0, 0xffe000fd,
1265 0x4077ffff, 0xffe7e1fd,
1266 0x23fffffe, 0x8880fff0,
1267 0xf98fe7d0, 0xfff81fff,
1268 0x400100d0, 0xffe000fd,
1269 0x00000000,
1270 };
Philipp Zabel379cdec2013-06-21 14:52:17 +02001271 int ret, i;
1272
Philipp Zabelb7287662013-06-21 10:27:39 +02001273 ipu->domain = irq_domain_add_linear(ipu->dev->of_node, IPU_NUM_IRQS,
Philipp Zabel379cdec2013-06-21 14:52:17 +02001274 &irq_generic_chip_ops, ipu);
Philipp Zabelb7287662013-06-21 10:27:39 +02001275 if (!ipu->domain) {
1276 dev_err(ipu->dev, "failed to add irq domain\n");
1277 return -ENODEV;
Sascha Haueraecfbdb2012-09-21 10:07:49 +02001278 }
1279
Philipp Zabel379cdec2013-06-21 14:52:17 +02001280 ret = irq_alloc_domain_generic_chips(ipu->domain, 32, 1, "IPU",
Rob Herringca0141d2015-08-29 18:01:21 -05001281 handle_level_irq, 0, 0, 0);
Philipp Zabel379cdec2013-06-21 14:52:17 +02001282 if (ret < 0) {
1283 dev_err(ipu->dev, "failed to alloc generic irq chips\n");
1284 irq_domain_remove(ipu->domain);
1285 return ret;
1286 }
1287
Philipp Zabela92d8142016-08-29 08:32:03 +02001288 /* Mask and clear all interrupts */
1289 for (i = 0; i < IPU_NUM_IRQS; i += 32) {
Russell King510e6422015-06-16 23:29:41 +01001290 ipu_cm_write(ipu, 0, IPU_INT_CTRL(i / 32));
Philipp Zabela92d8142016-08-29 08:32:03 +02001291 ipu_cm_write(ipu, ~unused[i / 32], IPU_INT_STAT(i / 32));
1292 }
Russell King510e6422015-06-16 23:29:41 +01001293
Philipp Zabel379cdec2013-06-21 14:52:17 +02001294 for (i = 0; i < IPU_NUM_IRQS; i += 32) {
1295 gc = irq_get_domain_generic_chip(ipu->domain, i);
1296 gc->reg_base = ipu->cm_reg;
Philipp Zabel37f85b262013-06-21 14:52:18 +02001297 gc->unused = unused[i / 32];
Philipp Zabel379cdec2013-06-21 14:52:17 +02001298 ct = gc->chip_types;
1299 ct->chip.irq_ack = irq_gc_ack_set_bit;
1300 ct->chip.irq_mask = irq_gc_mask_clr_bit;
1301 ct->chip.irq_unmask = irq_gc_mask_set_bit;
1302 ct->regs.ack = IPU_INT_STAT(i / 32);
1303 ct->regs.mask = IPU_INT_CTRL(i / 32);
1304 }
1305
Russell King86f5e732015-06-16 23:06:30 +01001306 irq_set_chained_handler_and_data(ipu->irq_sync, ipu_irq_handler, ipu);
1307 irq_set_chained_handler_and_data(ipu->irq_err, ipu_err_irq_handler,
1308 ipu);
Sascha Haueraecfbdb2012-09-21 10:07:49 +02001309
1310 return 0;
1311}
1312
1313static void ipu_irq_exit(struct ipu_soc *ipu)
1314{
Philipp Zabelb7287662013-06-21 10:27:39 +02001315 int i, irq;
Sascha Haueraecfbdb2012-09-21 10:07:49 +02001316
Russell King86f5e732015-06-16 23:06:30 +01001317 irq_set_chained_handler_and_data(ipu->irq_err, NULL, NULL);
1318 irq_set_chained_handler_and_data(ipu->irq_sync, NULL, NULL);
Sascha Haueraecfbdb2012-09-21 10:07:49 +02001319
Philipp Zabel379cdec2013-06-21 14:52:17 +02001320 /* TODO: remove irq_domain_generic_chips */
1321
Philipp Zabelb7287662013-06-21 10:27:39 +02001322 for (i = 0; i < IPU_NUM_IRQS; i++) {
1323 irq = irq_linear_revmap(ipu->domain, i);
1324 if (irq)
1325 irq_dispose_mapping(irq);
Sascha Haueraecfbdb2012-09-21 10:07:49 +02001326 }
1327
Philipp Zabelb7287662013-06-21 10:27:39 +02001328 irq_domain_remove(ipu->domain);
Sascha Haueraecfbdb2012-09-21 10:07:49 +02001329}
1330
Steve Longerbeam3feb0492014-06-25 18:05:55 -07001331void ipu_dump(struct ipu_soc *ipu)
1332{
1333 int i;
1334
1335 dev_dbg(ipu->dev, "IPU_CONF = \t0x%08X\n",
1336 ipu_cm_read(ipu, IPU_CONF));
1337 dev_dbg(ipu->dev, "IDMAC_CONF = \t0x%08X\n",
1338 ipu_idmac_read(ipu, IDMAC_CONF));
1339 dev_dbg(ipu->dev, "IDMAC_CHA_EN1 = \t0x%08X\n",
1340 ipu_idmac_read(ipu, IDMAC_CHA_EN(0)));
1341 dev_dbg(ipu->dev, "IDMAC_CHA_EN2 = \t0x%08X\n",
1342 ipu_idmac_read(ipu, IDMAC_CHA_EN(32)));
1343 dev_dbg(ipu->dev, "IDMAC_CHA_PRI1 = \t0x%08X\n",
1344 ipu_idmac_read(ipu, IDMAC_CHA_PRI(0)));
1345 dev_dbg(ipu->dev, "IDMAC_CHA_PRI2 = \t0x%08X\n",
1346 ipu_idmac_read(ipu, IDMAC_CHA_PRI(32)));
1347 dev_dbg(ipu->dev, "IDMAC_BAND_EN1 = \t0x%08X\n",
1348 ipu_idmac_read(ipu, IDMAC_BAND_EN(0)));
1349 dev_dbg(ipu->dev, "IDMAC_BAND_EN2 = \t0x%08X\n",
1350 ipu_idmac_read(ipu, IDMAC_BAND_EN(32)));
1351 dev_dbg(ipu->dev, "IPU_CHA_DB_MODE_SEL0 = \t0x%08X\n",
1352 ipu_cm_read(ipu, IPU_CHA_DB_MODE_SEL(0)));
1353 dev_dbg(ipu->dev, "IPU_CHA_DB_MODE_SEL1 = \t0x%08X\n",
1354 ipu_cm_read(ipu, IPU_CHA_DB_MODE_SEL(32)));
1355 dev_dbg(ipu->dev, "IPU_FS_PROC_FLOW1 = \t0x%08X\n",
1356 ipu_cm_read(ipu, IPU_FS_PROC_FLOW1));
1357 dev_dbg(ipu->dev, "IPU_FS_PROC_FLOW2 = \t0x%08X\n",
1358 ipu_cm_read(ipu, IPU_FS_PROC_FLOW2));
1359 dev_dbg(ipu->dev, "IPU_FS_PROC_FLOW3 = \t0x%08X\n",
1360 ipu_cm_read(ipu, IPU_FS_PROC_FLOW3));
1361 dev_dbg(ipu->dev, "IPU_FS_DISP_FLOW1 = \t0x%08X\n",
1362 ipu_cm_read(ipu, IPU_FS_DISP_FLOW1));
1363 for (i = 0; i < 15; i++)
1364 dev_dbg(ipu->dev, "IPU_INT_CTRL(%d) = \t%08X\n", i,
1365 ipu_cm_read(ipu, IPU_INT_CTRL(i)));
1366}
1367EXPORT_SYMBOL_GPL(ipu_dump);
1368
Bill Pembertonc4aabf82012-11-19 13:22:11 -05001369static int ipu_probe(struct platform_device *pdev)
Sascha Haueraecfbdb2012-09-21 10:07:49 +02001370{
Steve Longerbeam572a7612016-07-19 18:11:02 -07001371 struct device_node *np = pdev->dev.of_node;
Sascha Haueraecfbdb2012-09-21 10:07:49 +02001372 struct ipu_soc *ipu;
1373 struct resource *res;
1374 unsigned long ipu_base;
Philipp Zabel93adc8b2017-05-08 12:45:52 +02001375 int ret, irq_sync, irq_err;
Sascha Haueraecfbdb2012-09-21 10:07:49 +02001376 const struct ipu_devtype *devtype;
1377
LABBE Corentine92e4472016-08-24 10:17:17 +02001378 devtype = of_device_get_match_data(&pdev->dev);
1379 if (!devtype)
1380 return -EINVAL;
Sascha Haueraecfbdb2012-09-21 10:07:49 +02001381
Sascha Haueraecfbdb2012-09-21 10:07:49 +02001382 irq_sync = platform_get_irq(pdev, 0);
1383 irq_err = platform_get_irq(pdev, 1);
1384 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1385
Fabio Estevamfd563db2012-10-24 21:36:46 -02001386 dev_dbg(&pdev->dev, "irq_sync: %d irq_err: %d\n",
Sascha Haueraecfbdb2012-09-21 10:07:49 +02001387 irq_sync, irq_err);
1388
1389 if (!res || irq_sync < 0 || irq_err < 0)
1390 return -ENODEV;
1391
1392 ipu_base = res->start;
1393
1394 ipu = devm_kzalloc(&pdev->dev, sizeof(*ipu), GFP_KERNEL);
1395 if (!ipu)
1396 return -ENODEV;
1397
Lucas Stach92681fe2017-03-08 12:13:18 +01001398 ipu->id = of_alias_get_id(np, "ipu");
Philipp Zabel2d87e6c2018-06-21 21:13:38 +02001399 if (ipu->id < 0)
1400 ipu->id = 0;
Lucas Stach92681fe2017-03-08 12:13:18 +01001401
Lucas Stach30310c82017-03-23 16:52:02 +01001402 if (of_device_is_compatible(np, "fsl,imx6qp-ipu") &&
1403 IS_ENABLED(CONFIG_DRM)) {
Lucas Stach92681fe2017-03-08 12:13:18 +01001404 ipu->prg_priv = ipu_prg_lookup_by_phandle(&pdev->dev,
1405 "fsl,prg", ipu->id);
1406 if (!ipu->prg_priv)
1407 return -EPROBE_DEFER;
1408 }
1409
Sascha Haueraecfbdb2012-09-21 10:07:49 +02001410 ipu->devtype = devtype;
1411 ipu->ipu_type = devtype->type;
1412
1413 spin_lock_init(&ipu->lock);
1414 mutex_init(&ipu->channel_lock);
Philipp Zabel93adc8b2017-05-08 12:45:52 +02001415 INIT_LIST_HEAD(&ipu->channels);
Sascha Haueraecfbdb2012-09-21 10:07:49 +02001416
Fabio Estevamfd563db2012-10-24 21:36:46 -02001417 dev_dbg(&pdev->dev, "cm_reg: 0x%08lx\n",
Sascha Haueraecfbdb2012-09-21 10:07:49 +02001418 ipu_base + devtype->cm_ofs);
Fabio Estevamfd563db2012-10-24 21:36:46 -02001419 dev_dbg(&pdev->dev, "idmac: 0x%08lx\n",
Sascha Haueraecfbdb2012-09-21 10:07:49 +02001420 ipu_base + devtype->cm_ofs + IPU_CM_IDMAC_REG_OFS);
Fabio Estevamfd563db2012-10-24 21:36:46 -02001421 dev_dbg(&pdev->dev, "cpmem: 0x%08lx\n",
Sascha Haueraecfbdb2012-09-21 10:07:49 +02001422 ipu_base + devtype->cpmem_ofs);
Steve Longerbeam2ffd48f2014-08-19 10:52:40 -07001423 dev_dbg(&pdev->dev, "csi0: 0x%08lx\n",
1424 ipu_base + devtype->csi0_ofs);
1425 dev_dbg(&pdev->dev, "csi1: 0x%08lx\n",
1426 ipu_base + devtype->csi1_ofs);
Steve Longerbeam1aa8ea02014-08-11 13:04:50 +02001427 dev_dbg(&pdev->dev, "ic: 0x%08lx\n",
1428 ipu_base + devtype->ic_ofs);
Fabio Estevamfd563db2012-10-24 21:36:46 -02001429 dev_dbg(&pdev->dev, "disp0: 0x%08lx\n",
Sascha Haueraecfbdb2012-09-21 10:07:49 +02001430 ipu_base + devtype->disp0_ofs);
Fabio Estevamfd563db2012-10-24 21:36:46 -02001431 dev_dbg(&pdev->dev, "disp1: 0x%08lx\n",
Sascha Haueraecfbdb2012-09-21 10:07:49 +02001432 ipu_base + devtype->disp1_ofs);
Fabio Estevamfd563db2012-10-24 21:36:46 -02001433 dev_dbg(&pdev->dev, "srm: 0x%08lx\n",
Sascha Haueraecfbdb2012-09-21 10:07:49 +02001434 ipu_base + devtype->srm_ofs);
Fabio Estevamfd563db2012-10-24 21:36:46 -02001435 dev_dbg(&pdev->dev, "tpm: 0x%08lx\n",
Sascha Haueraecfbdb2012-09-21 10:07:49 +02001436 ipu_base + devtype->tpm_ofs);
Fabio Estevamfd563db2012-10-24 21:36:46 -02001437 dev_dbg(&pdev->dev, "dc: 0x%08lx\n",
Sascha Haueraecfbdb2012-09-21 10:07:49 +02001438 ipu_base + devtype->cm_ofs + IPU_CM_DC_REG_OFS);
Fabio Estevamfd563db2012-10-24 21:36:46 -02001439 dev_dbg(&pdev->dev, "ic: 0x%08lx\n",
Sascha Haueraecfbdb2012-09-21 10:07:49 +02001440 ipu_base + devtype->cm_ofs + IPU_CM_IC_REG_OFS);
Fabio Estevamfd563db2012-10-24 21:36:46 -02001441 dev_dbg(&pdev->dev, "dmfc: 0x%08lx\n",
Sascha Haueraecfbdb2012-09-21 10:07:49 +02001442 ipu_base + devtype->cm_ofs + IPU_CM_DMFC_REG_OFS);
Fabio Estevamfd563db2012-10-24 21:36:46 -02001443 dev_dbg(&pdev->dev, "vdi: 0x%08lx\n",
Sascha Haueraecfbdb2012-09-21 10:07:49 +02001444 ipu_base + devtype->vdi_ofs);
1445
1446 ipu->cm_reg = devm_ioremap(&pdev->dev,
1447 ipu_base + devtype->cm_ofs, PAGE_SIZE);
1448 ipu->idmac_reg = devm_ioremap(&pdev->dev,
1449 ipu_base + devtype->cm_ofs + IPU_CM_IDMAC_REG_OFS,
1450 PAGE_SIZE);
Sascha Haueraecfbdb2012-09-21 10:07:49 +02001451
Steve Longerbeam7d2691d2014-06-25 18:05:47 -07001452 if (!ipu->cm_reg || !ipu->idmac_reg)
Fabio Estevambe798b22013-07-20 18:22:09 -03001453 return -ENOMEM;
Sascha Haueraecfbdb2012-09-21 10:07:49 +02001454
1455 ipu->clk = devm_clk_get(&pdev->dev, "bus");
1456 if (IS_ERR(ipu->clk)) {
1457 ret = PTR_ERR(ipu->clk);
1458 dev_err(&pdev->dev, "clk_get failed with %d", ret);
Fabio Estevambe798b22013-07-20 18:22:09 -03001459 return ret;
Sascha Haueraecfbdb2012-09-21 10:07:49 +02001460 }
1461
1462 platform_set_drvdata(pdev, ipu);
1463
Fabio Estevam62645a22013-07-20 18:22:10 -03001464 ret = clk_prepare_enable(ipu->clk);
1465 if (ret) {
1466 dev_err(&pdev->dev, "clk_prepare_enable failed: %d\n", ret);
1467 return ret;
1468 }
Sascha Haueraecfbdb2012-09-21 10:07:49 +02001469
1470 ipu->dev = &pdev->dev;
1471 ipu->irq_sync = irq_sync;
1472 ipu->irq_err = irq_err;
1473
Philipp Zabel6c641552013-03-28 17:35:21 +01001474 ret = device_reset(&pdev->dev);
1475 if (ret) {
1476 dev_err(&pdev->dev, "failed to reset: %d\n", ret);
1477 goto out_failed_reset;
1478 }
1479 ret = ipu_memory_reset(ipu);
Lothar Waßmann4d27b2c2012-12-25 15:58:37 +01001480 if (ret)
1481 goto out_failed_reset;
Sascha Haueraecfbdb2012-09-21 10:07:49 +02001482
David Jander596a65d2015-07-02 16:21:57 +02001483 ret = ipu_irq_init(ipu);
1484 if (ret)
1485 goto out_failed_irq;
1486
Sascha Haueraecfbdb2012-09-21 10:07:49 +02001487 /* Set MCU_T to divide MCU access window into 2 */
1488 ipu_cm_write(ipu, 0x00400000L | (IPU_MCU_T_DEFAULT << 18),
1489 IPU_DISP_GEN);
1490
1491 ret = ipu_submodules_init(ipu, pdev, ipu_base, ipu->clk);
1492 if (ret)
1493 goto failed_submodules_init;
1494
Philipp Zabeld6ca8ca2012-05-23 17:08:19 +02001495 ret = ipu_add_client_devices(ipu, ipu_base);
Sascha Haueraecfbdb2012-09-21 10:07:49 +02001496 if (ret) {
1497 dev_err(&pdev->dev, "adding client devices failed with %d\n",
1498 ret);
1499 goto failed_add_clients;
1500 }
1501
Fabio Estevam9c2c4382012-10-24 21:36:47 -02001502 dev_info(&pdev->dev, "%s probed\n", devtype->name);
1503
Sascha Haueraecfbdb2012-09-21 10:07:49 +02001504 return 0;
1505
1506failed_add_clients:
1507 ipu_submodules_exit(ipu);
1508failed_submodules_init:
Philipp Zabel6c641552013-03-28 17:35:21 +01001509 ipu_irq_exit(ipu);
Sascha Haueraecfbdb2012-09-21 10:07:49 +02001510out_failed_irq:
David Jander596a65d2015-07-02 16:21:57 +02001511out_failed_reset:
Sascha Haueraecfbdb2012-09-21 10:07:49 +02001512 clk_disable_unprepare(ipu->clk);
Sascha Haueraecfbdb2012-09-21 10:07:49 +02001513 return ret;
1514}
1515
Bill Pemberton8aa1be42012-11-19 13:26:38 -05001516static int ipu_remove(struct platform_device *pdev)
Sascha Haueraecfbdb2012-09-21 10:07:49 +02001517{
1518 struct ipu_soc *ipu = platform_get_drvdata(pdev);
Sascha Haueraecfbdb2012-09-21 10:07:49 +02001519
1520 platform_device_unregister_children(pdev);
1521 ipu_submodules_exit(ipu);
1522 ipu_irq_exit(ipu);
1523
1524 clk_disable_unprepare(ipu->clk);
1525
1526 return 0;
1527}
1528
1529static struct platform_driver imx_ipu_driver = {
1530 .driver = {
1531 .name = "imx-ipuv3",
1532 .of_match_table = imx_ipu_dt_ids,
1533 },
1534 .probe = ipu_probe,
Bill Pemberton99c28f12012-11-19 13:20:51 -05001535 .remove = ipu_remove,
Sascha Haueraecfbdb2012-09-21 10:07:49 +02001536};
1537
Lucas Stachd2a34232017-03-08 12:13:14 +01001538static struct platform_driver * const drivers[] = {
Lucas Stach30310c82017-03-23 16:52:02 +01001539#if IS_ENABLED(CONFIG_DRM)
Lucas Stachd2a34232017-03-08 12:13:14 +01001540 &ipu_pre_drv,
Lucas Stachea9c2602017-03-08 12:13:16 +01001541 &ipu_prg_drv,
Lucas Stach30310c82017-03-23 16:52:02 +01001542#endif
Lucas Stachd2a34232017-03-08 12:13:14 +01001543 &imx_ipu_driver,
1544};
1545
1546static int __init imx_ipu_init(void)
1547{
1548 return platform_register_drivers(drivers, ARRAY_SIZE(drivers));
1549}
1550module_init(imx_ipu_init);
1551
1552static void __exit imx_ipu_exit(void)
1553{
1554 platform_unregister_drivers(drivers, ARRAY_SIZE(drivers));
1555}
1556module_exit(imx_ipu_exit);
Sascha Haueraecfbdb2012-09-21 10:07:49 +02001557
Fabio Estevam10f22682013-07-20 18:22:11 -03001558MODULE_ALIAS("platform:imx-ipuv3");
Sascha Haueraecfbdb2012-09-21 10:07:49 +02001559MODULE_DESCRIPTION("i.MX IPU v3 driver");
1560MODULE_AUTHOR("Sascha Hauer <s.hauer@pengutronix.de>");
1561MODULE_LICENSE("GPL");