blob: 400d8c45b075eac42b7353541d720374d54b80a2 [file] [log] [blame]
Rob Clark16ea9752013-01-08 15:04:28 -06001/*
2 * Copyright (C) 2012 Texas Instruments
3 * Author: Rob Clark <robdclark@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
Rob Clarka464d612013-08-07 13:41:20 -040018#include "drm_flip_work.h"
Daniel Vetter3cb9ae42014-10-29 10:03:57 +010019#include <drm/drm_plane_helper.h>
Jyri Sarha305198d2016-04-07 15:05:16 +030020#include <drm/drm_atomic_helper.h>
Rob Clark16ea9752013-01-08 15:04:28 -060021
22#include "tilcdc_drv.h"
23#include "tilcdc_regs.h"
24
Tomi Valkeinen2b3a8cd2015-11-03 12:00:51 +020025#define TILCDC_VBLANK_SAFETY_THRESHOLD_US 1000
26
Rob Clark16ea9752013-01-08 15:04:28 -060027struct tilcdc_crtc {
28 struct drm_crtc base;
29
Jyri Sarha47f571c2016-04-07 15:04:18 +030030 struct drm_plane primary;
Rob Clark16ea9752013-01-08 15:04:28 -060031 const struct tilcdc_panel_info *info;
Rob Clark16ea9752013-01-08 15:04:28 -060032 struct drm_pending_vblank_event *event;
Jyri Sarha47bfd6c2016-06-22 16:27:54 +030033 bool enabled;
Rob Clark16ea9752013-01-08 15:04:28 -060034 wait_queue_head_t frame_done_wq;
35 bool frame_done;
Tomi Valkeinen2b3a8cd2015-11-03 12:00:51 +020036 spinlock_t irq_lock;
37
38 ktime_t last_vblank;
Rob Clark16ea9752013-01-08 15:04:28 -060039
Tomi Valkeinen2b2080d72015-10-20 09:37:27 +030040 struct drm_framebuffer *curr_fb;
Tomi Valkeinen2b3a8cd2015-11-03 12:00:51 +020041 struct drm_framebuffer *next_fb;
Rob Clark16ea9752013-01-08 15:04:28 -060042
43 /* for deferred fb unref's: */
Rob Clarka464d612013-08-07 13:41:20 -040044 struct drm_flip_work unref_work;
Jyri Sarha103cd8b2015-02-10 14:13:23 +020045
46 /* Only set if an external encoder is connected */
47 bool simulate_vesa_sync;
Jyri Sarha5895d082016-01-08 14:33:09 +020048
49 int sync_lost_count;
50 bool frame_intact;
Rob Clark16ea9752013-01-08 15:04:28 -060051};
52#define to_tilcdc_crtc(x) container_of(x, struct tilcdc_crtc, base)
53
Rob Clarka464d612013-08-07 13:41:20 -040054static void unref_worker(struct drm_flip_work *work, void *val)
Rob Clark16ea9752013-01-08 15:04:28 -060055{
Darren Etheridgef7b45752013-06-21 13:52:26 -050056 struct tilcdc_crtc *tilcdc_crtc =
Rob Clarka464d612013-08-07 13:41:20 -040057 container_of(work, struct tilcdc_crtc, unref_work);
Rob Clark16ea9752013-01-08 15:04:28 -060058 struct drm_device *dev = tilcdc_crtc->base.dev;
Rob Clark16ea9752013-01-08 15:04:28 -060059
60 mutex_lock(&dev->mode_config.mutex);
Rob Clarka464d612013-08-07 13:41:20 -040061 drm_framebuffer_unreference(val);
Rob Clark16ea9752013-01-08 15:04:28 -060062 mutex_unlock(&dev->mode_config.mutex);
63}
64
Tomi Valkeinen2b2080d72015-10-20 09:37:27 +030065static void set_scanout(struct drm_crtc *crtc, struct drm_framebuffer *fb)
Rob Clark16ea9752013-01-08 15:04:28 -060066{
67 struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
68 struct drm_device *dev = crtc->dev;
Rob Clark16ea9752013-01-08 15:04:28 -060069 struct drm_gem_cma_object *gem;
70 unsigned int depth, bpp;
Tomi Valkeinen2b2080d72015-10-20 09:37:27 +030071 dma_addr_t start, end;
Rob Clark16ea9752013-01-08 15:04:28 -060072
73 drm_fb_get_bpp_depth(fb->pixel_format, &depth, &bpp);
74 gem = drm_fb_cma_get_gem_obj(fb, 0);
75
Tomi Valkeinen2b2080d72015-10-20 09:37:27 +030076 start = gem->paddr + fb->offsets[0] +
77 crtc->y * fb->pitches[0] +
78 crtc->x * bpp / 8;
Rob Clark16ea9752013-01-08 15:04:28 -060079
Tomi Valkeinen2b2080d72015-10-20 09:37:27 +030080 end = start + (crtc->mode.vdisplay * fb->pitches[0]);
Rob Clark16ea9752013-01-08 15:04:28 -060081
Tomi Valkeinen2b2080d72015-10-20 09:37:27 +030082 tilcdc_write(dev, LCDC_DMA_FB_BASE_ADDR_0_REG, start);
83 tilcdc_write(dev, LCDC_DMA_FB_CEILING_ADDR_0_REG, end);
84
85 if (tilcdc_crtc->curr_fb)
86 drm_flip_work_queue(&tilcdc_crtc->unref_work,
87 tilcdc_crtc->curr_fb);
88
89 tilcdc_crtc->curr_fb = fb;
Rob Clark16ea9752013-01-08 15:04:28 -060090}
91
Jyri Sarhaafaf8332016-06-21 16:00:44 +030092static void tilcdc_crtc_enable_irqs(struct drm_device *dev)
93{
94 struct tilcdc_drm_private *priv = dev->dev_private;
95
96 tilcdc_clear_irqstatus(dev, 0xffffffff);
97
98 if (priv->rev == 1) {
99 tilcdc_set(dev, LCDC_RASTER_CTRL_REG,
100 LCDC_V1_UNDERFLOW_INT_ENA);
101 } else {
102 tilcdc_write(dev, LCDC_INT_ENABLE_SET_REG,
103 LCDC_V2_UNDERFLOW_INT_ENA |
104 LCDC_V2_END_OF_FRAME0_INT_ENA |
105 LCDC_FRAME_DONE | LCDC_SYNC_LOST);
106 }
107}
108
109static void tilcdc_crtc_disable_irqs(struct drm_device *dev)
110{
111 struct tilcdc_drm_private *priv = dev->dev_private;
112
113 /* disable irqs that we might have enabled: */
114 if (priv->rev == 1) {
115 tilcdc_clear(dev, LCDC_RASTER_CTRL_REG,
116 LCDC_V1_UNDERFLOW_INT_ENA | LCDC_V1_PL_INT_ENA);
117 tilcdc_clear(dev, LCDC_DMA_CTRL_REG,
118 LCDC_V1_END_OF_FRAME_INT_ENA);
119 } else {
120 tilcdc_write(dev, LCDC_INT_ENABLE_CLR_REG,
121 LCDC_V2_UNDERFLOW_INT_ENA | LCDC_V2_PL_INT_ENA |
122 LCDC_V2_END_OF_FRAME0_INT_ENA |
123 LCDC_FRAME_DONE | LCDC_SYNC_LOST);
124 }
125}
126
Tomi Valkeinen2efec4f2015-10-20 09:37:27 +0300127static void reset(struct drm_crtc *crtc)
Rob Clark16ea9752013-01-08 15:04:28 -0600128{
129 struct drm_device *dev = crtc->dev;
130 struct tilcdc_drm_private *priv = dev->dev_private;
131
Tomi Valkeinen2efec4f2015-10-20 09:37:27 +0300132 if (priv->rev != 2)
133 return;
134
135 tilcdc_set(dev, LCDC_CLK_RESET_REG, LCDC_CLK_MAIN_RESET);
136 usleep_range(250, 1000);
137 tilcdc_clear(dev, LCDC_CLK_RESET_REG, LCDC_CLK_MAIN_RESET);
138}
139
Jyri Sarha47bfd6c2016-06-22 16:27:54 +0300140static void tilcdc_crtc_enable(struct drm_crtc *crtc)
Tomi Valkeinen2efec4f2015-10-20 09:37:27 +0300141{
142 struct drm_device *dev = crtc->dev;
Jyri Sarha47bfd6c2016-06-22 16:27:54 +0300143 struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
144
145 if (tilcdc_crtc->enabled)
146 return;
147
148 pm_runtime_get_sync(dev->dev);
Tomi Valkeinen2efec4f2015-10-20 09:37:27 +0300149
150 reset(crtc);
Rob Clark16ea9752013-01-08 15:04:28 -0600151
Jyri Sarhaafaf8332016-06-21 16:00:44 +0300152 tilcdc_crtc_enable_irqs(dev);
153
Tomi Valkeinen2b2080d72015-10-20 09:37:27 +0300154 tilcdc_clear(dev, LCDC_DMA_CTRL_REG, LCDC_DUAL_FRAME_BUFFER_ENABLE);
Rob Clark16ea9752013-01-08 15:04:28 -0600155 tilcdc_set(dev, LCDC_RASTER_CTRL_REG, LCDC_PALETTE_LOAD_MODE(DATA_ONLY));
156 tilcdc_set(dev, LCDC_RASTER_CTRL_REG, LCDC_RASTER_ENABLE);
Jyri Sarhad85f850e2016-06-15 11:16:23 +0300157
158 drm_crtc_vblank_on(crtc);
Jyri Sarha47bfd6c2016-06-22 16:27:54 +0300159
160 tilcdc_crtc->enabled = true;
Rob Clark16ea9752013-01-08 15:04:28 -0600161}
162
Jyri Sarha47bfd6c2016-06-22 16:27:54 +0300163void tilcdc_crtc_disable(struct drm_crtc *crtc)
Rob Clark16ea9752013-01-08 15:04:28 -0600164{
Jyri Sarha2d5be882016-04-07 20:20:23 +0300165 struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
Rob Clark16ea9752013-01-08 15:04:28 -0600166 struct drm_device *dev = crtc->dev;
Jyri Sarha2d5be882016-04-07 20:20:23 +0300167 struct tilcdc_drm_private *priv = dev->dev_private;
Rob Clark16ea9752013-01-08 15:04:28 -0600168
Jyri Sarha47bfd6c2016-06-22 16:27:54 +0300169 if (!tilcdc_crtc->enabled)
170 return;
171
Jyri Sarha2d5be882016-04-07 20:20:23 +0300172 tilcdc_crtc->frame_done = false;
Rob Clark16ea9752013-01-08 15:04:28 -0600173 tilcdc_clear(dev, LCDC_RASTER_CTRL_REG, LCDC_RASTER_ENABLE);
Jyri Sarha2d5be882016-04-07 20:20:23 +0300174
175 /*
176 * if necessary wait for framedone irq which will still come
177 * before putting things to sleep..
178 */
179 if (priv->rev == 2) {
180 int ret = wait_event_timeout(tilcdc_crtc->frame_done_wq,
181 tilcdc_crtc->frame_done,
Jyri Sarha437c7d92016-06-16 16:19:17 +0300182 msecs_to_jiffies(500));
Jyri Sarha2d5be882016-04-07 20:20:23 +0300183 if (ret == 0)
184 dev_err(dev->dev, "%s: timeout waiting for framedone\n",
185 __func__);
186 }
Jyri Sarhad85f850e2016-06-15 11:16:23 +0300187
188 drm_crtc_vblank_off(crtc);
Jyri Sarhaafaf8332016-06-21 16:00:44 +0300189
190 tilcdc_crtc_disable_irqs(dev);
Jyri Sarha47bfd6c2016-06-22 16:27:54 +0300191
192 pm_runtime_put_sync(dev->dev);
193
194 if (tilcdc_crtc->next_fb) {
195 drm_flip_work_queue(&tilcdc_crtc->unref_work,
196 tilcdc_crtc->next_fb);
197 tilcdc_crtc->next_fb = NULL;
198 }
199
200 if (tilcdc_crtc->curr_fb) {
201 drm_flip_work_queue(&tilcdc_crtc->unref_work,
202 tilcdc_crtc->curr_fb);
203 tilcdc_crtc->curr_fb = NULL;
204 }
205
206 drm_flip_work_commit(&tilcdc_crtc->unref_work, priv->wq);
207 tilcdc_crtc->last_vblank = ktime_set(0, 0);
208
209 tilcdc_crtc->enabled = false;
210}
211
212static bool tilcdc_crtc_is_on(struct drm_crtc *crtc)
213{
214 return crtc->state && crtc->state->enable && crtc->state->active;
Rob Clark16ea9752013-01-08 15:04:28 -0600215}
216
217static void tilcdc_crtc_destroy(struct drm_crtc *crtc)
218{
219 struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
220
Jyri Sarha47bfd6c2016-06-22 16:27:54 +0300221 tilcdc_crtc_disable(crtc);
Rob Clark16ea9752013-01-08 15:04:28 -0600222
Jyri Sarhad66284fb2015-05-27 11:58:37 +0300223 of_node_put(crtc->port);
Rob Clark16ea9752013-01-08 15:04:28 -0600224 drm_crtc_cleanup(crtc);
Rob Clarka464d612013-08-07 13:41:20 -0400225 drm_flip_work_cleanup(&tilcdc_crtc->unref_work);
Rob Clark16ea9752013-01-08 15:04:28 -0600226}
227
Jyri Sarha8c65abb2016-04-07 14:56:32 +0300228int tilcdc_crtc_page_flip(struct drm_crtc *crtc,
Rob Clark16ea9752013-01-08 15:04:28 -0600229 struct drm_framebuffer *fb,
Keith Packarded8d1972013-07-22 18:49:58 -0700230 struct drm_pending_vblank_event *event,
231 uint32_t page_flip_flags)
Rob Clark16ea9752013-01-08 15:04:28 -0600232{
233 struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
234 struct drm_device *dev = crtc->dev;
Tomi Valkeinen2b2080d72015-10-20 09:37:27 +0300235 unsigned long flags;
Tomi Valkeinen6f206e92014-02-07 17:37:07 +0000236
Rob Clark16ea9752013-01-08 15:04:28 -0600237 if (tilcdc_crtc->event) {
238 dev_err(dev->dev, "already pending page flip!\n");
239 return -EBUSY;
240 }
241
Tomi Valkeinen2b2080d72015-10-20 09:37:27 +0300242 drm_framebuffer_reference(fb);
243
Matt Roperf4510a22014-04-01 15:22:40 -0700244 crtc->primary->fb = fb;
Tomi Valkeinen65734a22015-10-19 12:30:03 +0300245
246 pm_runtime_get_sync(dev->dev);
247
Tomi Valkeinen2b3a8cd2015-11-03 12:00:51 +0200248 spin_lock_irqsave(&tilcdc_crtc->irq_lock, flags);
Tomi Valkeinen2b2080d72015-10-20 09:37:27 +0300249
Jyri Sarha0a1fe1b2016-06-13 09:53:36 +0300250 if (crtc->hwmode.vrefresh && ktime_to_ns(tilcdc_crtc->last_vblank)) {
251 ktime_t next_vblank;
252 s64 tdiff;
Tomi Valkeinen2b2080d72015-10-20 09:37:27 +0300253
Jyri Sarha0a1fe1b2016-06-13 09:53:36 +0300254 next_vblank = ktime_add_us(tilcdc_crtc->last_vblank,
255 1000000 / crtc->hwmode.vrefresh);
Tomi Valkeinen2b3a8cd2015-11-03 12:00:51 +0200256
Jyri Sarha0a1fe1b2016-06-13 09:53:36 +0300257 tdiff = ktime_to_us(ktime_sub(next_vblank, ktime_get()));
258
259 if (tdiff < TILCDC_VBLANK_SAFETY_THRESHOLD_US)
260 tilcdc_crtc->next_fb = fb;
261 }
262
263 if (tilcdc_crtc->next_fb != fb)
Tomi Valkeinen2b3a8cd2015-11-03 12:00:51 +0200264 set_scanout(crtc, fb);
Tomi Valkeinen2b3a8cd2015-11-03 12:00:51 +0200265
Tomi Valkeinen2b2080d72015-10-20 09:37:27 +0300266 tilcdc_crtc->event = event;
Tomi Valkeinen2b3a8cd2015-11-03 12:00:51 +0200267
268 spin_unlock_irqrestore(&tilcdc_crtc->irq_lock, flags);
Rob Clark16ea9752013-01-08 15:04:28 -0600269
Tomi Valkeinen65734a22015-10-19 12:30:03 +0300270 pm_runtime_put_sync(dev->dev);
271
Rob Clark16ea9752013-01-08 15:04:28 -0600272 return 0;
273}
274
Rob Clark16ea9752013-01-08 15:04:28 -0600275static bool tilcdc_crtc_mode_fixup(struct drm_crtc *crtc,
276 const struct drm_display_mode *mode,
277 struct drm_display_mode *adjusted_mode)
278{
Jyri Sarha103cd8b2015-02-10 14:13:23 +0200279 struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
280
281 if (!tilcdc_crtc->simulate_vesa_sync)
282 return true;
283
284 /*
285 * tilcdc does not generate VESA-compliant sync but aligns
286 * VS on the second edge of HS instead of first edge.
287 * We use adjusted_mode, to fixup sync by aligning both rising
288 * edges and add HSKEW offset to fix the sync.
289 */
290 adjusted_mode->hskew = mode->hsync_end - mode->hsync_start;
291 adjusted_mode->flags |= DRM_MODE_FLAG_HSKEW;
292
293 if (mode->flags & DRM_MODE_FLAG_NHSYNC) {
294 adjusted_mode->flags |= DRM_MODE_FLAG_PHSYNC;
295 adjusted_mode->flags &= ~DRM_MODE_FLAG_NHSYNC;
296 } else {
297 adjusted_mode->flags |= DRM_MODE_FLAG_NHSYNC;
298 adjusted_mode->flags &= ~DRM_MODE_FLAG_PHSYNC;
299 }
300
Rob Clark16ea9752013-01-08 15:04:28 -0600301 return true;
302}
303
Jyri Sarhaf6382f12016-04-07 15:09:50 +0300304static void tilcdc_crtc_mode_set_nofb(struct drm_crtc *crtc)
305{
306 struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
307 struct drm_device *dev = crtc->dev;
308 struct tilcdc_drm_private *priv = dev->dev_private;
309 const struct tilcdc_panel_info *info = tilcdc_crtc->info;
310 uint32_t reg, hbp, hfp, hsw, vbp, vfp, vsw;
311 struct drm_display_mode *mode = &crtc->state->adjusted_mode;
312 struct drm_framebuffer *fb = crtc->primary->state->fb;
313
314 if (WARN_ON(!info))
315 return;
316
317 if (WARN_ON(!fb))
318 return;
319
320 pm_runtime_get_sync(dev->dev);
321
322 /* Configure the Burst Size and fifo threshold of DMA: */
323 reg = tilcdc_read(dev, LCDC_DMA_CTRL_REG) & ~0x00000770;
324 switch (info->dma_burst_sz) {
325 case 1:
326 reg |= LCDC_DMA_BURST_SIZE(LCDC_DMA_BURST_1);
327 break;
328 case 2:
329 reg |= LCDC_DMA_BURST_SIZE(LCDC_DMA_BURST_2);
330 break;
331 case 4:
332 reg |= LCDC_DMA_BURST_SIZE(LCDC_DMA_BURST_4);
333 break;
334 case 8:
335 reg |= LCDC_DMA_BURST_SIZE(LCDC_DMA_BURST_8);
336 break;
337 case 16:
338 reg |= LCDC_DMA_BURST_SIZE(LCDC_DMA_BURST_16);
339 break;
340 default:
341 dev_err(dev->dev, "invalid burst size\n");
342 return;
343 }
344 reg |= (info->fifo_th << 8);
345 tilcdc_write(dev, LCDC_DMA_CTRL_REG, reg);
346
347 /* Configure timings: */
348 hbp = mode->htotal - mode->hsync_end;
349 hfp = mode->hsync_start - mode->hdisplay;
350 hsw = mode->hsync_end - mode->hsync_start;
351 vbp = mode->vtotal - mode->vsync_end;
352 vfp = mode->vsync_start - mode->vdisplay;
353 vsw = mode->vsync_end - mode->vsync_start;
354
355 DBG("%dx%d, hbp=%u, hfp=%u, hsw=%u, vbp=%u, vfp=%u, vsw=%u",
356 mode->hdisplay, mode->vdisplay, hbp, hfp, hsw, vbp, vfp, vsw);
357
358 /* Set AC Bias Period and Number of Transitions per Interrupt: */
359 reg = tilcdc_read(dev, LCDC_RASTER_TIMING_2_REG) & ~0x000fff00;
360 reg |= LCDC_AC_BIAS_FREQUENCY(info->ac_bias) |
361 LCDC_AC_BIAS_TRANSITIONS_PER_INT(info->ac_bias_intrpt);
362
363 /*
364 * subtract one from hfp, hbp, hsw because the hardware uses
365 * a value of 0 as 1
366 */
367 if (priv->rev == 2) {
368 /* clear bits we're going to set */
369 reg &= ~0x78000033;
370 reg |= ((hfp-1) & 0x300) >> 8;
371 reg |= ((hbp-1) & 0x300) >> 4;
372 reg |= ((hsw-1) & 0x3c0) << 21;
373 }
374 tilcdc_write(dev, LCDC_RASTER_TIMING_2_REG, reg);
375
376 reg = (((mode->hdisplay >> 4) - 1) << 4) |
377 (((hbp-1) & 0xff) << 24) |
378 (((hfp-1) & 0xff) << 16) |
379 (((hsw-1) & 0x3f) << 10);
380 if (priv->rev == 2)
381 reg |= (((mode->hdisplay >> 4) - 1) & 0x40) >> 3;
382 tilcdc_write(dev, LCDC_RASTER_TIMING_0_REG, reg);
383
384 reg = ((mode->vdisplay - 1) & 0x3ff) |
385 ((vbp & 0xff) << 24) |
386 ((vfp & 0xff) << 16) |
387 (((vsw-1) & 0x3f) << 10);
388 tilcdc_write(dev, LCDC_RASTER_TIMING_1_REG, reg);
389
390 /*
391 * be sure to set Bit 10 for the V2 LCDC controller,
392 * otherwise limited to 1024 pixels width, stopping
393 * 1920x1080 being supported.
394 */
395 if (priv->rev == 2) {
396 if ((mode->vdisplay - 1) & 0x400) {
397 tilcdc_set(dev, LCDC_RASTER_TIMING_2_REG,
398 LCDC_LPP_B10);
399 } else {
400 tilcdc_clear(dev, LCDC_RASTER_TIMING_2_REG,
401 LCDC_LPP_B10);
402 }
403 }
404
405 /* Configure display type: */
406 reg = tilcdc_read(dev, LCDC_RASTER_CTRL_REG) &
407 ~(LCDC_TFT_MODE | LCDC_MONO_8BIT_MODE | LCDC_MONOCHROME_MODE |
408 LCDC_V2_TFT_24BPP_MODE | LCDC_V2_TFT_24BPP_UNPACK |
409 0x000ff000 /* Palette Loading Delay bits */);
410 reg |= LCDC_TFT_MODE; /* no monochrome/passive support */
411 if (info->tft_alt_mode)
412 reg |= LCDC_TFT_ALT_ENABLE;
413 if (priv->rev == 2) {
414 unsigned int depth, bpp;
415
416 drm_fb_get_bpp_depth(fb->pixel_format, &depth, &bpp);
417 switch (bpp) {
418 case 16:
419 break;
420 case 32:
421 reg |= LCDC_V2_TFT_24BPP_UNPACK;
422 /* fallthrough */
423 case 24:
424 reg |= LCDC_V2_TFT_24BPP_MODE;
425 break;
426 default:
427 dev_err(dev->dev, "invalid pixel format\n");
428 return;
429 }
430 }
431 reg |= info->fdd < 12;
432 tilcdc_write(dev, LCDC_RASTER_CTRL_REG, reg);
433
434 if (info->invert_pxl_clk)
435 tilcdc_set(dev, LCDC_RASTER_TIMING_2_REG, LCDC_INVERT_PIXEL_CLOCK);
436 else
437 tilcdc_clear(dev, LCDC_RASTER_TIMING_2_REG, LCDC_INVERT_PIXEL_CLOCK);
438
439 if (info->sync_ctrl)
440 tilcdc_set(dev, LCDC_RASTER_TIMING_2_REG, LCDC_SYNC_CTRL);
441 else
442 tilcdc_clear(dev, LCDC_RASTER_TIMING_2_REG, LCDC_SYNC_CTRL);
443
444 if (info->sync_edge)
445 tilcdc_set(dev, LCDC_RASTER_TIMING_2_REG, LCDC_SYNC_EDGE);
446 else
447 tilcdc_clear(dev, LCDC_RASTER_TIMING_2_REG, LCDC_SYNC_EDGE);
448
449 if (mode->flags & DRM_MODE_FLAG_NHSYNC)
450 tilcdc_set(dev, LCDC_RASTER_TIMING_2_REG, LCDC_INVERT_HSYNC);
451 else
452 tilcdc_clear(dev, LCDC_RASTER_TIMING_2_REG, LCDC_INVERT_HSYNC);
453
454 if (mode->flags & DRM_MODE_FLAG_NVSYNC)
455 tilcdc_set(dev, LCDC_RASTER_TIMING_2_REG, LCDC_INVERT_VSYNC);
456 else
457 tilcdc_clear(dev, LCDC_RASTER_TIMING_2_REG, LCDC_INVERT_VSYNC);
458
459 if (info->raster_order)
460 tilcdc_set(dev, LCDC_RASTER_CTRL_REG, LCDC_RASTER_ORDER);
461 else
462 tilcdc_clear(dev, LCDC_RASTER_CTRL_REG, LCDC_RASTER_ORDER);
463
464 drm_framebuffer_reference(fb);
465
466 set_scanout(crtc, fb);
467
468 tilcdc_crtc_update_clk(crtc);
469
470 pm_runtime_put_sync(dev->dev);
471
472 crtc->hwmode = crtc->state->adjusted_mode;
473}
474
Jyri Sarhadb380c52016-04-07 15:10:23 +0300475static int tilcdc_crtc_atomic_check(struct drm_crtc *crtc,
476 struct drm_crtc_state *state)
477{
478 struct drm_display_mode *mode = &state->mode;
479 int ret;
480
481 /* If we are not active we don't care */
482 if (!state->active)
483 return 0;
484
485 if (state->state->planes[0].ptr != crtc->primary ||
486 state->state->planes[0].state == NULL ||
487 state->state->planes[0].state->crtc != crtc) {
488 dev_dbg(crtc->dev->dev, "CRTC primary plane must be present");
489 return -EINVAL;
490 }
491
492 ret = tilcdc_crtc_mode_valid(crtc, mode);
493 if (ret) {
494 dev_dbg(crtc->dev->dev, "Mode \"%s\" not valid", mode->name);
495 return -EINVAL;
496 }
497
498 return 0;
499}
500
Rob Clark16ea9752013-01-08 15:04:28 -0600501static const struct drm_crtc_funcs tilcdc_crtc_funcs = {
Jyri Sarha305198d2016-04-07 15:05:16 +0300502 .destroy = tilcdc_crtc_destroy,
503 .set_config = drm_atomic_helper_set_config,
504 .page_flip = drm_atomic_helper_page_flip,
505 .reset = drm_atomic_helper_crtc_reset,
506 .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
507 .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
Rob Clark16ea9752013-01-08 15:04:28 -0600508};
509
510static const struct drm_crtc_helper_funcs tilcdc_crtc_helper_funcs = {
Rob Clark16ea9752013-01-08 15:04:28 -0600511 .mode_fixup = tilcdc_crtc_mode_fixup,
Jyri Sarha305198d2016-04-07 15:05:16 +0300512 .enable = tilcdc_crtc_enable,
513 .disable = tilcdc_crtc_disable,
Jyri Sarhadb380c52016-04-07 15:10:23 +0300514 .atomic_check = tilcdc_crtc_atomic_check,
Jyri Sarhaf6382f12016-04-07 15:09:50 +0300515 .mode_set_nofb = tilcdc_crtc_mode_set_nofb,
Rob Clark16ea9752013-01-08 15:04:28 -0600516};
517
518int tilcdc_crtc_max_width(struct drm_crtc *crtc)
519{
520 struct drm_device *dev = crtc->dev;
521 struct tilcdc_drm_private *priv = dev->dev_private;
522 int max_width = 0;
523
524 if (priv->rev == 1)
525 max_width = 1024;
526 else if (priv->rev == 2)
527 max_width = 2048;
528
529 return max_width;
530}
531
532int tilcdc_crtc_mode_valid(struct drm_crtc *crtc, struct drm_display_mode *mode)
533{
534 struct tilcdc_drm_private *priv = crtc->dev->dev_private;
535 unsigned int bandwidth;
Darren Etheridgee1c5d0a2013-06-21 13:52:25 -0500536 uint32_t hbp, hfp, hsw, vbp, vfp, vsw;
Rob Clark16ea9752013-01-08 15:04:28 -0600537
Darren Etheridgee1c5d0a2013-06-21 13:52:25 -0500538 /*
539 * check to see if the width is within the range that
540 * the LCD Controller physically supports
541 */
Rob Clark16ea9752013-01-08 15:04:28 -0600542 if (mode->hdisplay > tilcdc_crtc_max_width(crtc))
543 return MODE_VIRTUAL_X;
544
545 /* width must be multiple of 16 */
546 if (mode->hdisplay & 0xf)
547 return MODE_VIRTUAL_X;
548
549 if (mode->vdisplay > 2048)
550 return MODE_VIRTUAL_Y;
551
Darren Etheridgee1c5d0a2013-06-21 13:52:25 -0500552 DBG("Processing mode %dx%d@%d with pixel clock %d",
553 mode->hdisplay, mode->vdisplay,
554 drm_mode_vrefresh(mode), mode->clock);
555
556 hbp = mode->htotal - mode->hsync_end;
557 hfp = mode->hsync_start - mode->hdisplay;
558 hsw = mode->hsync_end - mode->hsync_start;
559 vbp = mode->vtotal - mode->vsync_end;
560 vfp = mode->vsync_start - mode->vdisplay;
561 vsw = mode->vsync_end - mode->vsync_start;
562
563 if ((hbp-1) & ~0x3ff) {
564 DBG("Pruning mode: Horizontal Back Porch out of range");
565 return MODE_HBLANK_WIDE;
566 }
567
568 if ((hfp-1) & ~0x3ff) {
569 DBG("Pruning mode: Horizontal Front Porch out of range");
570 return MODE_HBLANK_WIDE;
571 }
572
573 if ((hsw-1) & ~0x3ff) {
574 DBG("Pruning mode: Horizontal Sync Width out of range");
575 return MODE_HSYNC_WIDE;
576 }
577
578 if (vbp & ~0xff) {
579 DBG("Pruning mode: Vertical Back Porch out of range");
580 return MODE_VBLANK_WIDE;
581 }
582
583 if (vfp & ~0xff) {
584 DBG("Pruning mode: Vertical Front Porch out of range");
585 return MODE_VBLANK_WIDE;
586 }
587
588 if ((vsw-1) & ~0x3f) {
589 DBG("Pruning mode: Vertical Sync Width out of range");
590 return MODE_VSYNC_WIDE;
591 }
592
Darren Etheridge4e564342013-06-21 13:52:23 -0500593 /*
594 * some devices have a maximum allowed pixel clock
595 * configured from the DT
596 */
597 if (mode->clock > priv->max_pixelclock) {
Darren Etheridgef7b45752013-06-21 13:52:26 -0500598 DBG("Pruning mode: pixel clock too high");
Darren Etheridge4e564342013-06-21 13:52:23 -0500599 return MODE_CLOCK_HIGH;
600 }
601
602 /*
603 * some devices further limit the max horizontal resolution
604 * configured from the DT
605 */
606 if (mode->hdisplay > priv->max_width)
607 return MODE_BAD_WIDTH;
608
Rob Clark16ea9752013-01-08 15:04:28 -0600609 /* filter out modes that would require too much memory bandwidth: */
Darren Etheridge4e564342013-06-21 13:52:23 -0500610 bandwidth = mode->hdisplay * mode->vdisplay *
611 drm_mode_vrefresh(mode);
612 if (bandwidth > priv->max_bandwidth) {
Darren Etheridgef7b45752013-06-21 13:52:26 -0500613 DBG("Pruning mode: exceeds defined bandwidth limit");
Rob Clark16ea9752013-01-08 15:04:28 -0600614 return MODE_BAD;
Darren Etheridge4e564342013-06-21 13:52:23 -0500615 }
Rob Clark16ea9752013-01-08 15:04:28 -0600616
617 return MODE_OK;
618}
619
620void tilcdc_crtc_set_panel_info(struct drm_crtc *crtc,
621 const struct tilcdc_panel_info *info)
622{
623 struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
624 tilcdc_crtc->info = info;
625}
626
Jyri Sarha103cd8b2015-02-10 14:13:23 +0200627void tilcdc_crtc_set_simulate_vesa_sync(struct drm_crtc *crtc,
628 bool simulate_vesa_sync)
629{
630 struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
631
632 tilcdc_crtc->simulate_vesa_sync = simulate_vesa_sync;
633}
634
Rob Clark16ea9752013-01-08 15:04:28 -0600635void tilcdc_crtc_update_clk(struct drm_crtc *crtc)
636{
Rob Clark16ea9752013-01-08 15:04:28 -0600637 struct drm_device *dev = crtc->dev;
638 struct tilcdc_drm_private *priv = dev->dev_private;
Darren Etheridge3d193062014-01-15 15:52:36 -0600639 unsigned long lcd_clk;
640 const unsigned clkdiv = 2; /* using a fixed divider of 2 */
Rob Clark16ea9752013-01-08 15:04:28 -0600641 int ret;
642
643 pm_runtime_get_sync(dev->dev);
644
Jyri Sarha47bfd6c2016-06-22 16:27:54 +0300645 tilcdc_crtc_disable(crtc);
Rob Clark16ea9752013-01-08 15:04:28 -0600646
Darren Etheridge3d193062014-01-15 15:52:36 -0600647 /* mode.clock is in KHz, set_rate wants parameter in Hz */
648 ret = clk_set_rate(priv->clk, crtc->mode.clock * 1000 * clkdiv);
649 if (ret < 0) {
Rob Clark16ea9752013-01-08 15:04:28 -0600650 dev_err(dev->dev, "failed to set display clock rate to: %d\n",
651 crtc->mode.clock);
652 goto out;
653 }
654
655 lcd_clk = clk_get_rate(priv->clk);
Rob Clark16ea9752013-01-08 15:04:28 -0600656
Darren Etheridge3d193062014-01-15 15:52:36 -0600657 DBG("lcd_clk=%lu, mode clock=%d, div=%u",
658 lcd_clk, crtc->mode.clock, clkdiv);
Rob Clark16ea9752013-01-08 15:04:28 -0600659
660 /* Configure the LCD clock divisor. */
Darren Etheridge3d193062014-01-15 15:52:36 -0600661 tilcdc_write(dev, LCDC_CTRL_REG, LCDC_CLK_DIVISOR(clkdiv) |
Rob Clark16ea9752013-01-08 15:04:28 -0600662 LCDC_RASTER_MODE);
663
664 if (priv->rev == 2)
665 tilcdc_set(dev, LCDC_CLK_ENABLE_REG,
666 LCDC_V2_DMA_CLK_EN | LCDC_V2_LIDD_CLK_EN |
667 LCDC_V2_CORE_CLK_EN);
668
Jyri Sarha47bfd6c2016-06-22 16:27:54 +0300669 if (tilcdc_crtc_is_on(crtc))
670 tilcdc_crtc_enable(crtc);
Rob Clark16ea9752013-01-08 15:04:28 -0600671
672out:
673 pm_runtime_put_sync(dev->dev);
674}
675
Jyri Sarha5895d082016-01-08 14:33:09 +0200676#define SYNC_LOST_COUNT_LIMIT 50
677
Rob Clark16ea9752013-01-08 15:04:28 -0600678irqreturn_t tilcdc_crtc_irq(struct drm_crtc *crtc)
679{
680 struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
681 struct drm_device *dev = crtc->dev;
682 struct tilcdc_drm_private *priv = dev->dev_private;
Tomi Valkeinen317aae72015-10-20 12:08:03 +0300683 uint32_t stat;
Rob Clark16ea9752013-01-08 15:04:28 -0600684
Tomi Valkeinen317aae72015-10-20 12:08:03 +0300685 stat = tilcdc_read_irqstatus(dev);
686 tilcdc_clear_irqstatus(dev, stat);
687
Tomi Valkeinen2b2080d72015-10-20 09:37:27 +0300688 if (stat & LCDC_END_OF_FRAME0) {
Rob Clark16ea9752013-01-08 15:04:28 -0600689 unsigned long flags;
Tomi Valkeinen2b3a8cd2015-11-03 12:00:51 +0200690 bool skip_event = false;
691 ktime_t now;
692
693 now = ktime_get();
Rob Clark16ea9752013-01-08 15:04:28 -0600694
Tomi Valkeinen2b2080d72015-10-20 09:37:27 +0300695 drm_flip_work_commit(&tilcdc_crtc->unref_work, priv->wq);
Rob Clark16ea9752013-01-08 15:04:28 -0600696
Tomi Valkeinen2b3a8cd2015-11-03 12:00:51 +0200697 spin_lock_irqsave(&tilcdc_crtc->irq_lock, flags);
Rob Clark16ea9752013-01-08 15:04:28 -0600698
Tomi Valkeinen2b3a8cd2015-11-03 12:00:51 +0200699 tilcdc_crtc->last_vblank = now;
Rob Clark16ea9752013-01-08 15:04:28 -0600700
Tomi Valkeinen2b3a8cd2015-11-03 12:00:51 +0200701 if (tilcdc_crtc->next_fb) {
702 set_scanout(crtc, tilcdc_crtc->next_fb);
703 tilcdc_crtc->next_fb = NULL;
704 skip_event = true;
Tomi Valkeinen2b2080d72015-10-20 09:37:27 +0300705 }
706
Tomi Valkeinen2b3a8cd2015-11-03 12:00:51 +0200707 spin_unlock_irqrestore(&tilcdc_crtc->irq_lock, flags);
708
Gustavo Padovan099ede82016-07-04 21:04:52 -0300709 drm_crtc_handle_vblank(crtc);
Tomi Valkeinen2b3a8cd2015-11-03 12:00:51 +0200710
711 if (!skip_event) {
712 struct drm_pending_vblank_event *event;
713
714 spin_lock_irqsave(&dev->event_lock, flags);
715
716 event = tilcdc_crtc->event;
717 tilcdc_crtc->event = NULL;
718 if (event)
Gustavo Padovandfebc152016-04-14 10:48:22 -0700719 drm_crtc_send_vblank_event(crtc, event);
Tomi Valkeinen2b3a8cd2015-11-03 12:00:51 +0200720
721 spin_unlock_irqrestore(&dev->event_lock, flags);
722 }
Jyri Sarha5895d082016-01-08 14:33:09 +0200723
724 if (tilcdc_crtc->frame_intact)
725 tilcdc_crtc->sync_lost_count = 0;
726 else
727 tilcdc_crtc->frame_intact = true;
Rob Clark16ea9752013-01-08 15:04:28 -0600728 }
729
Jyri Sarha14944112016-04-07 20:36:48 +0300730 if (stat & LCDC_FIFO_UNDERFLOW)
731 dev_err_ratelimited(dev->dev, "%s(0x%08x): FIFO underfow",
732 __func__, stat);
733
734 /* For revision 2 only */
Rob Clark16ea9752013-01-08 15:04:28 -0600735 if (priv->rev == 2) {
736 if (stat & LCDC_FRAME_DONE) {
737 tilcdc_crtc->frame_done = true;
738 wake_up(&tilcdc_crtc->frame_done_wq);
739 }
Rob Clark16ea9752013-01-08 15:04:28 -0600740
Jyri Sarha1abcdac2016-06-17 11:54:06 +0300741 if (stat & LCDC_SYNC_LOST) {
742 dev_err_ratelimited(dev->dev, "%s(0x%08x): Sync lost",
743 __func__, stat);
744 tilcdc_crtc->frame_intact = false;
745 if (tilcdc_crtc->sync_lost_count++ >
746 SYNC_LOST_COUNT_LIMIT) {
747 dev_err(dev->dev, "%s(0x%08x): Sync lost flood detected, disabling the interrupt", __func__, stat);
748 tilcdc_write(dev, LCDC_INT_ENABLE_CLR_REG,
749 LCDC_SYNC_LOST);
750 }
Jyri Sarha5895d082016-01-08 14:33:09 +0200751 }
Jyri Sarhac0c2baa2015-12-18 13:07:52 +0200752
Jyri Sarha14944112016-04-07 20:36:48 +0300753 /* Indicate to LCDC that the interrupt service routine has
754 * completed, see 13.3.6.1.6 in AM335x TRM.
755 */
756 tilcdc_write(dev, LCDC_END_OF_INT_IND_REG, 0);
757 }
Jyri Sarhac0c2baa2015-12-18 13:07:52 +0200758
Rob Clark16ea9752013-01-08 15:04:28 -0600759 return IRQ_HANDLED;
760}
761
Rob Clark16ea9752013-01-08 15:04:28 -0600762struct drm_crtc *tilcdc_crtc_create(struct drm_device *dev)
763{
Jyri Sarhad66284fb2015-05-27 11:58:37 +0300764 struct tilcdc_drm_private *priv = dev->dev_private;
Rob Clark16ea9752013-01-08 15:04:28 -0600765 struct tilcdc_crtc *tilcdc_crtc;
766 struct drm_crtc *crtc;
767 int ret;
768
Jyri Sarhad0ec32c2016-02-23 12:44:27 +0200769 tilcdc_crtc = devm_kzalloc(dev->dev, sizeof(*tilcdc_crtc), GFP_KERNEL);
Rob Clark16ea9752013-01-08 15:04:28 -0600770 if (!tilcdc_crtc) {
771 dev_err(dev->dev, "allocation failed\n");
772 return NULL;
773 }
774
775 crtc = &tilcdc_crtc->base;
776
Jyri Sarha47f571c2016-04-07 15:04:18 +0300777 ret = tilcdc_plane_init(dev, &tilcdc_crtc->primary);
778 if (ret < 0)
779 goto fail;
780
Rob Clark16ea9752013-01-08 15:04:28 -0600781 init_waitqueue_head(&tilcdc_crtc->frame_done_wq);
782
Boris BREZILLONd7f8db52014-11-14 19:30:30 +0100783 drm_flip_work_init(&tilcdc_crtc->unref_work,
Rob Clarka464d612013-08-07 13:41:20 -0400784 "unref", unref_worker);
Rob Clark16ea9752013-01-08 15:04:28 -0600785
Tomi Valkeinen2b3a8cd2015-11-03 12:00:51 +0200786 spin_lock_init(&tilcdc_crtc->irq_lock);
787
Jyri Sarha47f571c2016-04-07 15:04:18 +0300788 ret = drm_crtc_init_with_planes(dev, crtc,
789 &tilcdc_crtc->primary,
790 NULL,
791 &tilcdc_crtc_funcs,
792 "tilcdc crtc");
Rob Clark16ea9752013-01-08 15:04:28 -0600793 if (ret < 0)
794 goto fail;
795
796 drm_crtc_helper_add(crtc, &tilcdc_crtc_helper_funcs);
797
Jyri Sarhad66284fb2015-05-27 11:58:37 +0300798 if (priv->is_componentized) {
799 struct device_node *ports =
800 of_get_child_by_name(dev->dev->of_node, "ports");
801
802 if (ports) {
803 crtc->port = of_get_child_by_name(ports, "port");
804 of_node_put(ports);
805 } else {
806 crtc->port =
807 of_get_child_by_name(dev->dev->of_node, "port");
808 }
809 if (!crtc->port) { /* This should never happen */
810 dev_err(dev->dev, "Port node not found in %s\n",
811 dev->dev->of_node->full_name);
812 goto fail;
813 }
814 }
815
Rob Clark16ea9752013-01-08 15:04:28 -0600816 return crtc;
817
818fail:
819 tilcdc_crtc_destroy(crtc);
820 return NULL;
821}