blob: b6dffdbbc0c1118ef9dde023060070d7f8e7d9bb [file] [log] [blame]
Rob Clarkcd5351f2011-11-12 12:09:40 -06001/*
Rob Clark8bb0daf2013-02-11 12:43:09 -05002 * drivers/gpu/drm/omapdrm/omap_gem.c
Rob Clarkcd5351f2011-11-12 12:09:40 -06003 *
4 * Copyright (C) 2011 Texas Instruments
5 * Author: Rob Clark <rob.clark@linaro.org>
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License version 2 as published by
9 * the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along with
17 * this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
Rob Clarkcd5351f2011-11-12 12:09:40 -060020#include <linux/shmem_fs.h>
Laurent Pinchart2d278f52015-03-05 21:31:37 +020021#include <linux/spinlock.h>
22
David Herrmann0de23972013-07-24 21:07:52 +020023#include <drm/drm_vma_manager.h>
Rob Clarkcd5351f2011-11-12 12:09:40 -060024
25#include "omap_drv.h"
Rob Clarkf7f9f452011-12-05 19:19:22 -060026#include "omap_dmm_tiler.h"
Rob Clarkcd5351f2011-11-12 12:09:40 -060027
Rob Clarkcd5351f2011-11-12 12:09:40 -060028/*
29 * GEM buffer object implementation.
30 */
31
Rob Clarkcd5351f2011-11-12 12:09:40 -060032/* note: we use upper 8 bits of flags for driver-internal flags: */
Laurent Pinchart7ef93b02015-12-14 22:39:33 +020033#define OMAP_BO_DMA 0x01000000 /* actually is physically contiguous */
Rob Clarkcd5351f2011-11-12 12:09:40 -060034#define OMAP_BO_EXT_SYNC 0x02000000 /* externally allocated sync object */
35#define OMAP_BO_EXT_MEM 0x04000000 /* externally allocated memory */
36
Rob Clarkcd5351f2011-11-12 12:09:40 -060037struct omap_gem_object {
38 struct drm_gem_object base;
39
Rob Clarkf6b60362012-03-05 10:48:36 -060040 struct list_head mm_list;
41
Rob Clarkcd5351f2011-11-12 12:09:40 -060042 uint32_t flags;
43
Rob Clarkf7f9f452011-12-05 19:19:22 -060044 /** width/height for tiled formats (rounded up to slot boundaries) */
45 uint16_t width, height;
46
Rob Clarka6a91822011-12-09 23:26:08 -060047 /** roll applied when mapping to DMM */
48 uint32_t roll;
49
Rob Clarkcd5351f2011-11-12 12:09:40 -060050 /**
51 * If buffer is allocated physically contiguous, the OMAP_BO_DMA flag
Rob Clarkf7f9f452011-12-05 19:19:22 -060052 * is set and the paddr is valid. Also if the buffer is remapped in
53 * TILER and paddr_cnt > 0, then paddr is valid. But if you are using
54 * the physical address and OMAP_BO_DMA is not set, then you should
55 * be going thru omap_gem_{get,put}_paddr() to ensure the mapping is
56 * not removed from under your feet.
Rob Clarkcd5351f2011-11-12 12:09:40 -060057 *
58 * Note that OMAP_BO_SCANOUT is a hint from userspace that DMA capable
59 * buffer is requested, but doesn't mean that it is. Use the
60 * OMAP_BO_DMA flag to determine if the buffer has a DMA capable
61 * physical address.
62 */
63 dma_addr_t paddr;
64
65 /**
Rob Clarkf7f9f452011-12-05 19:19:22 -060066 * # of users of paddr
67 */
68 uint32_t paddr_cnt;
69
70 /**
71 * tiler block used when buffer is remapped in DMM/TILER.
72 */
73 struct tiler_block *block;
74
75 /**
Rob Clarkcd5351f2011-11-12 12:09:40 -060076 * Array of backing pages, if allocated. Note that pages are never
77 * allocated for buffers originally allocated from contiguous memory
78 */
79 struct page **pages;
80
Rob Clarkf3bc9d22011-12-20 16:54:28 -060081 /** addresses corresponding to pages in above array */
82 dma_addr_t *addrs;
83
Rob Clarkcd5351f2011-11-12 12:09:40 -060084 /**
85 * Virtual address, if mapped.
86 */
87 void *vaddr;
88
89 /**
90 * sync-object allocated on demand (if needed)
91 *
92 * Per-buffer sync-object for tracking pending and completed hw/dma
93 * read and write operations. The layout in memory is dictated by
94 * the SGX firmware, which uses this information to stall the command
95 * stream if a surface is not ready yet.
96 *
97 * Note that when buffer is used by SGX, the sync-object needs to be
98 * allocated from a special heap of sync-objects. This way many sync
99 * objects can be packed in a page, and not waste GPU virtual address
100 * space. Because of this we have to have a omap_gem_set_sync_object()
101 * API to allow replacement of the syncobj after it has (potentially)
102 * already been allocated. A bit ugly but I haven't thought of a
103 * better alternative.
104 */
105 struct {
106 uint32_t write_pending;
107 uint32_t write_complete;
108 uint32_t read_pending;
109 uint32_t read_complete;
110 } *sync;
111};
112
Laurent Pinchart7ef93b02015-12-14 22:39:33 +0200113#define to_omap_bo(x) container_of(x, struct omap_gem_object, base)
Rob Clarkc5b12472012-01-18 18:33:02 -0600114
Rob Clarkf7f9f452011-12-05 19:19:22 -0600115/* To deal with userspace mmap'ings of 2d tiled buffers, which (a) are
116 * not necessarily pinned in TILER all the time, and (b) when they are
117 * they are not necessarily page aligned, we reserve one or more small
118 * regions in each of the 2d containers to use as a user-GART where we
119 * can create a second page-aligned mapping of parts of the buffer
120 * being accessed from userspace.
121 *
122 * Note that we could optimize slightly when we know that multiple
123 * tiler containers are backed by the same PAT.. but I'll leave that
124 * for later..
125 */
126#define NUM_USERGART_ENTRIES 2
127struct usergart_entry {
128 struct tiler_block *block; /* the reserved tiler block */
129 dma_addr_t paddr;
130 struct drm_gem_object *obj; /* the current pinned obj */
131 pgoff_t obj_pgoff; /* page offset of obj currently
132 mapped in */
133};
134static struct {
135 struct usergart_entry entry[NUM_USERGART_ENTRIES];
136 int height; /* height in rows */
137 int height_shift; /* ilog2(height in rows) */
138 int slot_shift; /* ilog2(width per slot) */
139 int stride_pfn; /* stride in pages */
140 int last; /* index of last used entry */
141} *usergart;
142
Laurent Pinchartb902f8f2015-12-14 22:39:32 +0200143/* -----------------------------------------------------------------------------
144 * Helpers
145 */
146
147/** get mmap offset */
148static uint64_t mmap_offset(struct drm_gem_object *obj)
149{
150 struct drm_device *dev = obj->dev;
151 int ret;
152 size_t size;
153
154 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
155
156 /* Make it mmapable */
157 size = omap_gem_mmap_size(obj);
158 ret = drm_gem_create_mmap_offset_size(obj, size);
159 if (ret) {
160 dev_err(dev->dev, "could not allocate mmap offset\n");
161 return 0;
162 }
163
164 return drm_vma_node_offset_addr(&obj->vma_node);
165}
166
Laurent Pinchart7ef93b02015-12-14 22:39:33 +0200167/* GEM objects can either be allocated from contiguous memory (in which
168 * case obj->filp==NULL), or w/ shmem backing (obj->filp!=NULL). But non
169 * contiguous buffers can be remapped in TILER/DMM if they need to be
170 * contiguous... but we don't do this all the time to reduce pressure
171 * on TILER/DMM space when we know at allocation time that the buffer
172 * will need to be scanned out.
173 */
174static inline bool is_shmem(struct drm_gem_object *obj)
175{
176 return obj->filp != NULL;
177}
178
179/* -----------------------------------------------------------------------------
180 * Eviction
181 */
182
Rob Clarkf7f9f452011-12-05 19:19:22 -0600183static void evict_entry(struct drm_gem_object *obj,
184 enum tiler_fmt fmt, struct usergart_entry *entry)
185{
David Herrmann6796cb12014-01-03 14:24:19 +0100186 struct omap_gem_object *omap_obj = to_omap_bo(obj);
187 int n = usergart[fmt].height;
188 size_t size = PAGE_SIZE * n;
189 loff_t off = mmap_offset(obj) +
190 (entry->obj_pgoff << PAGE_SHIFT);
191 const int m = 1 + ((omap_obj->width << fmt) / PAGE_SIZE);
192
193 if (m > 1) {
194 int i;
195 /* if stride > than PAGE_SIZE then sparse mapping: */
196 for (i = n; i > 0; i--) {
197 unmap_mapping_range(obj->dev->anon_inode->i_mapping,
198 off, PAGE_SIZE, 1);
199 off += PAGE_SIZE * m;
Rob Clarke559895ae2012-03-05 10:48:40 -0600200 }
David Herrmann6796cb12014-01-03 14:24:19 +0100201 } else {
202 unmap_mapping_range(obj->dev->anon_inode->i_mapping,
203 off, size, 1);
Rob Clarkf7f9f452011-12-05 19:19:22 -0600204 }
205
206 entry->obj = NULL;
207}
208
209/* Evict a buffer from usergart, if it is mapped there */
210static void evict(struct drm_gem_object *obj)
211{
212 struct omap_gem_object *omap_obj = to_omap_bo(obj);
213
214 if (omap_obj->flags & OMAP_BO_TILED) {
215 enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
216 int i;
217
218 if (!usergart)
219 return;
220
221 for (i = 0; i < NUM_USERGART_ENTRIES; i++) {
222 struct usergart_entry *entry = &usergart[fmt].entry[i];
223 if (entry->obj == obj)
224 evict_entry(obj, fmt, entry);
225 }
226 }
227}
228
Laurent Pinchart7ef93b02015-12-14 22:39:33 +0200229/* -----------------------------------------------------------------------------
230 * Page Management
Rob Clarkcd5351f2011-11-12 12:09:40 -0600231 */
Rob Clarkcd5351f2011-11-12 12:09:40 -0600232
233/** ensure backing pages are allocated */
234static int omap_gem_attach_pages(struct drm_gem_object *obj)
235{
Rob Clark8b6b5692012-05-17 02:37:25 -0600236 struct drm_device *dev = obj->dev;
Rob Clarkcd5351f2011-11-12 12:09:40 -0600237 struct omap_gem_object *omap_obj = to_omap_bo(obj);
238 struct page **pages;
Emil Gooded4eb23a2012-08-17 18:53:26 +0200239 int npages = obj->size >> PAGE_SHIFT;
240 int i, ret;
Rob Clark8b6b5692012-05-17 02:37:25 -0600241 dma_addr_t *addrs;
Rob Clarkcd5351f2011-11-12 12:09:40 -0600242
243 WARN_ON(omap_obj->pages);
244
David Herrmann0cdbe8a2014-05-25 12:59:47 +0200245 pages = drm_gem_get_pages(obj);
Rob Clarkcd5351f2011-11-12 12:09:40 -0600246 if (IS_ERR(pages)) {
247 dev_err(obj->dev->dev, "could not get pages: %ld\n", PTR_ERR(pages));
248 return PTR_ERR(pages);
249 }
250
Rob Clarkf3bc9d22011-12-20 16:54:28 -0600251 /* for non-cached buffers, ensure the new pages are clean because
252 * DSS, GPU, etc. are not cache coherent:
253 */
254 if (omap_obj->flags & (OMAP_BO_WC|OMAP_BO_UNCACHED)) {
Vincent Penquerc'h23d84ed2012-10-09 19:40:39 +0100255 addrs = kmalloc(npages * sizeof(*addrs), GFP_KERNEL);
Emil Gooded4eb23a2012-08-17 18:53:26 +0200256 if (!addrs) {
257 ret = -ENOMEM;
258 goto free_pages;
259 }
260
Rob Clarkf3bc9d22011-12-20 16:54:28 -0600261 for (i = 0; i < npages; i++) {
Rob Clark8b6b5692012-05-17 02:37:25 -0600262 addrs[i] = dma_map_page(dev->dev, pages[i],
Rob Clarkf3bc9d22011-12-20 16:54:28 -0600263 0, PAGE_SIZE, DMA_BIDIRECTIONAL);
264 }
Rob Clark8b6b5692012-05-17 02:37:25 -0600265 } else {
Vincent Penquerc'h23d84ed2012-10-09 19:40:39 +0100266 addrs = kzalloc(npages * sizeof(*addrs), GFP_KERNEL);
Emil Gooded4eb23a2012-08-17 18:53:26 +0200267 if (!addrs) {
268 ret = -ENOMEM;
269 goto free_pages;
270 }
Rob Clarkf3bc9d22011-12-20 16:54:28 -0600271 }
272
Rob Clark8b6b5692012-05-17 02:37:25 -0600273 omap_obj->addrs = addrs;
Rob Clarkcd5351f2011-11-12 12:09:40 -0600274 omap_obj->pages = pages;
Rob Clark8b6b5692012-05-17 02:37:25 -0600275
Rob Clarkcd5351f2011-11-12 12:09:40 -0600276 return 0;
Emil Gooded4eb23a2012-08-17 18:53:26 +0200277
278free_pages:
Rob Clarkddcd09d2013-08-07 13:41:27 -0400279 drm_gem_put_pages(obj, pages, true, false);
Emil Gooded4eb23a2012-08-17 18:53:26 +0200280
281 return ret;
Rob Clarkcd5351f2011-11-12 12:09:40 -0600282}
283
Laurent Pinchartb902f8f2015-12-14 22:39:32 +0200284/* acquire pages when needed (for example, for DMA where physically
285 * contiguous buffer is not required
286 */
287static int get_pages(struct drm_gem_object *obj, struct page ***pages)
288{
289 struct omap_gem_object *omap_obj = to_omap_bo(obj);
290 int ret = 0;
291
292 if (is_shmem(obj) && !omap_obj->pages) {
293 ret = omap_gem_attach_pages(obj);
294 if (ret) {
295 dev_err(obj->dev->dev, "could not attach pages\n");
296 return ret;
297 }
298 }
299
300 /* TODO: even phys-contig.. we should have a list of pages? */
301 *pages = omap_obj->pages;
302
303 return 0;
304}
305
Rob Clarkcd5351f2011-11-12 12:09:40 -0600306/** release backing pages */
307static void omap_gem_detach_pages(struct drm_gem_object *obj)
308{
309 struct omap_gem_object *omap_obj = to_omap_bo(obj);
Rob Clarkf3bc9d22011-12-20 16:54:28 -0600310
311 /* for non-cached buffers, ensure the new pages are clean because
312 * DSS, GPU, etc. are not cache coherent:
313 */
314 if (omap_obj->flags & (OMAP_BO_WC|OMAP_BO_UNCACHED)) {
315 int i, npages = obj->size >> PAGE_SHIFT;
316 for (i = 0; i < npages; i++) {
317 dma_unmap_page(obj->dev->dev, omap_obj->addrs[i],
318 PAGE_SIZE, DMA_BIDIRECTIONAL);
319 }
Rob Clarkf3bc9d22011-12-20 16:54:28 -0600320 }
321
Rob Clark8b6b5692012-05-17 02:37:25 -0600322 kfree(omap_obj->addrs);
323 omap_obj->addrs = NULL;
324
Rob Clarkddcd09d2013-08-07 13:41:27 -0400325 drm_gem_put_pages(obj, omap_obj->pages, true, false);
Rob Clarkcd5351f2011-11-12 12:09:40 -0600326 omap_obj->pages = NULL;
327}
328
Rob Clark6ad11bc2012-04-10 13:19:55 -0500329/* get buffer flags */
330uint32_t omap_gem_flags(struct drm_gem_object *obj)
331{
332 return to_omap_bo(obj)->flags;
333}
334
Rob Clarkc5b12472012-01-18 18:33:02 -0600335uint64_t omap_gem_mmap_offset(struct drm_gem_object *obj)
336{
337 uint64_t offset;
338 mutex_lock(&obj->dev->struct_mutex);
339 offset = mmap_offset(obj);
340 mutex_unlock(&obj->dev->struct_mutex);
341 return offset;
342}
343
Rob Clarkf7f9f452011-12-05 19:19:22 -0600344/** get mmap size */
345size_t omap_gem_mmap_size(struct drm_gem_object *obj)
346{
347 struct omap_gem_object *omap_obj = to_omap_bo(obj);
348 size_t size = obj->size;
349
350 if (omap_obj->flags & OMAP_BO_TILED) {
351 /* for tiled buffers, the virtual size has stride rounded up
352 * to 4kb.. (to hide the fact that row n+1 might start 16kb or
353 * 32kb later!). But we don't back the entire buffer with
354 * pages, only the valid picture part.. so need to adjust for
355 * this in the size used to mmap and generate mmap offset
356 */
357 size = tiler_vsize(gem2fmt(omap_obj->flags),
358 omap_obj->width, omap_obj->height);
359 }
360
361 return size;
362}
363
Rob Clark3c810c62012-08-15 15:18:01 -0500364/* get tiled size, returns -EINVAL if not tiled buffer */
365int omap_gem_tiled_size(struct drm_gem_object *obj, uint16_t *w, uint16_t *h)
366{
367 struct omap_gem_object *omap_obj = to_omap_bo(obj);
368 if (omap_obj->flags & OMAP_BO_TILED) {
369 *w = omap_obj->width;
370 *h = omap_obj->height;
371 return 0;
372 }
373 return -EINVAL;
374}
Rob Clarkf7f9f452011-12-05 19:19:22 -0600375
Laurent Pinchart7ef93b02015-12-14 22:39:33 +0200376/* -----------------------------------------------------------------------------
377 * Fault Handling
378 */
379
Rob Clarkf7f9f452011-12-05 19:19:22 -0600380/* Normal handling for the case of faulting in non-tiled buffers */
381static int fault_1d(struct drm_gem_object *obj,
382 struct vm_area_struct *vma, struct vm_fault *vmf)
383{
384 struct omap_gem_object *omap_obj = to_omap_bo(obj);
385 unsigned long pfn;
386 pgoff_t pgoff;
387
388 /* We don't use vmf->pgoff since that has the fake offset: */
389 pgoff = ((unsigned long)vmf->virtual_address -
390 vma->vm_start) >> PAGE_SHIFT;
391
392 if (omap_obj->pages) {
Rob Clark8b6b5692012-05-17 02:37:25 -0600393 omap_gem_cpu_sync(obj, pgoff);
Rob Clarkf7f9f452011-12-05 19:19:22 -0600394 pfn = page_to_pfn(omap_obj->pages[pgoff]);
395 } else {
396 BUG_ON(!(omap_obj->flags & OMAP_BO_DMA));
397 pfn = (omap_obj->paddr >> PAGE_SHIFT) + pgoff;
398 }
399
400 VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address,
401 pfn, pfn << PAGE_SHIFT);
402
403 return vm_insert_mixed(vma, (unsigned long)vmf->virtual_address, pfn);
404}
405
406/* Special handling for the case of faulting in 2d tiled buffers */
407static int fault_2d(struct drm_gem_object *obj,
408 struct vm_area_struct *vma, struct vm_fault *vmf)
409{
410 struct omap_gem_object *omap_obj = to_omap_bo(obj);
411 struct usergart_entry *entry;
412 enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
413 struct page *pages[64]; /* XXX is this too much to have on stack? */
414 unsigned long pfn;
415 pgoff_t pgoff, base_pgoff;
416 void __user *vaddr;
417 int i, ret, slots;
418
Rob Clarke559895ae2012-03-05 10:48:40 -0600419 /*
420 * Note the height of the slot is also equal to the number of pages
421 * that need to be mapped in to fill 4kb wide CPU page. If the slot
422 * height is 64, then 64 pages fill a 4kb wide by 64 row region.
Rob Clarkf7f9f452011-12-05 19:19:22 -0600423 */
Rob Clarke559895ae2012-03-05 10:48:40 -0600424 const int n = usergart[fmt].height;
425 const int n_shift = usergart[fmt].height_shift;
426
427 /*
428 * If buffer width in bytes > PAGE_SIZE then the virtual stride is
429 * rounded up to next multiple of PAGE_SIZE.. this need to be taken
430 * into account in some of the math, so figure out virtual stride
431 * in pages
432 */
433 const int m = 1 + ((omap_obj->width << fmt) / PAGE_SIZE);
Rob Clarkf7f9f452011-12-05 19:19:22 -0600434
435 /* We don't use vmf->pgoff since that has the fake offset: */
436 pgoff = ((unsigned long)vmf->virtual_address -
437 vma->vm_start) >> PAGE_SHIFT;
438
Rob Clarke559895ae2012-03-05 10:48:40 -0600439 /*
440 * Actual address we start mapping at is rounded down to previous slot
Rob Clarkf7f9f452011-12-05 19:19:22 -0600441 * boundary in the y direction:
442 */
Rob Clarke559895ae2012-03-05 10:48:40 -0600443 base_pgoff = round_down(pgoff, m << n_shift);
Rob Clarkf7f9f452011-12-05 19:19:22 -0600444
Rob Clarke559895ae2012-03-05 10:48:40 -0600445 /* figure out buffer width in slots */
Rob Clarkf7f9f452011-12-05 19:19:22 -0600446 slots = omap_obj->width >> usergart[fmt].slot_shift;
447
Rob Clarke559895ae2012-03-05 10:48:40 -0600448 vaddr = vmf->virtual_address - ((pgoff - base_pgoff) << PAGE_SHIFT);
449
450 entry = &usergart[fmt].entry[usergart[fmt].last];
451
Rob Clarkf7f9f452011-12-05 19:19:22 -0600452 /* evict previous buffer using this usergart entry, if any: */
453 if (entry->obj)
454 evict_entry(entry->obj, fmt, entry);
455
456 entry->obj = obj;
457 entry->obj_pgoff = base_pgoff;
458
Rob Clarke559895ae2012-03-05 10:48:40 -0600459 /* now convert base_pgoff to phys offset from virt offset: */
460 base_pgoff = (base_pgoff >> n_shift) * slots;
Rob Clarkf7f9f452011-12-05 19:19:22 -0600461
Rob Clarke559895ae2012-03-05 10:48:40 -0600462 /* for wider-than 4k.. figure out which part of the slot-row we want: */
463 if (m > 1) {
464 int off = pgoff % m;
465 entry->obj_pgoff += off;
466 base_pgoff /= m;
467 slots = min(slots - (off << n_shift), n);
468 base_pgoff += off << n_shift;
469 vaddr += off << PAGE_SHIFT;
470 }
471
472 /*
473 * Map in pages. Beyond the valid pixel part of the buffer, we set
474 * pages[i] to NULL to get a dummy page mapped in.. if someone
475 * reads/writes it they will get random/undefined content, but at
476 * least it won't be corrupting whatever other random page used to
477 * be mapped in, or other undefined behavior.
Rob Clarkf7f9f452011-12-05 19:19:22 -0600478 */
479 memcpy(pages, &omap_obj->pages[base_pgoff],
480 sizeof(struct page *) * slots);
481 memset(pages + slots, 0,
Rob Clarke559895ae2012-03-05 10:48:40 -0600482 sizeof(struct page *) * (n - slots));
Rob Clarkf7f9f452011-12-05 19:19:22 -0600483
Rob Clarka6a91822011-12-09 23:26:08 -0600484 ret = tiler_pin(entry->block, pages, ARRAY_SIZE(pages), 0, true);
Rob Clarkf7f9f452011-12-05 19:19:22 -0600485 if (ret) {
486 dev_err(obj->dev->dev, "failed to pin: %d\n", ret);
487 return ret;
488 }
489
Rob Clarkf7f9f452011-12-05 19:19:22 -0600490 pfn = entry->paddr >> PAGE_SHIFT;
491
492 VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address,
493 pfn, pfn << PAGE_SHIFT);
494
Rob Clarke559895ae2012-03-05 10:48:40 -0600495 for (i = n; i > 0; i--) {
Rob Clarkf7f9f452011-12-05 19:19:22 -0600496 vm_insert_mixed(vma, (unsigned long)vaddr, pfn);
497 pfn += usergart[fmt].stride_pfn;
Rob Clarke559895ae2012-03-05 10:48:40 -0600498 vaddr += PAGE_SIZE * m;
Rob Clarkf7f9f452011-12-05 19:19:22 -0600499 }
500
501 /* simple round-robin: */
502 usergart[fmt].last = (usergart[fmt].last + 1) % NUM_USERGART_ENTRIES;
503
504 return 0;
505}
506
Rob Clarkcd5351f2011-11-12 12:09:40 -0600507/**
508 * omap_gem_fault - pagefault handler for GEM objects
509 * @vma: the VMA of the GEM object
510 * @vmf: fault detail
511 *
512 * Invoked when a fault occurs on an mmap of a GEM managed area. GEM
513 * does most of the work for us including the actual map/unmap calls
514 * but we need to do the actual page work.
515 *
516 * The VMA was set up by GEM. In doing so it also ensured that the
517 * vma->vm_private_data points to the GEM object that is backing this
518 * mapping.
519 */
520int omap_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
521{
522 struct drm_gem_object *obj = vma->vm_private_data;
523 struct omap_gem_object *omap_obj = to_omap_bo(obj);
524 struct drm_device *dev = obj->dev;
525 struct page **pages;
Rob Clarkcd5351f2011-11-12 12:09:40 -0600526 int ret;
527
528 /* Make sure we don't parallel update on a fault, nor move or remove
529 * something from beneath our feet
530 */
531 mutex_lock(&dev->struct_mutex);
532
533 /* if a shmem backed object, make sure we have pages attached now */
534 ret = get_pages(obj, &pages);
YAMANE Toshiakiae053032012-11-14 19:33:17 +0900535 if (ret)
Rob Clarkcd5351f2011-11-12 12:09:40 -0600536 goto fail;
Rob Clarkcd5351f2011-11-12 12:09:40 -0600537
538 /* where should we do corresponding put_pages().. we are mapping
539 * the original page, rather than thru a GART, so we can't rely
540 * on eviction to trigger this. But munmap() or all mappings should
541 * probably trigger put_pages()?
542 */
543
Rob Clarkf7f9f452011-12-05 19:19:22 -0600544 if (omap_obj->flags & OMAP_BO_TILED)
545 ret = fault_2d(obj, vma, vmf);
546 else
547 ret = fault_1d(obj, vma, vmf);
Rob Clarkcd5351f2011-11-12 12:09:40 -0600548
Rob Clarkcd5351f2011-11-12 12:09:40 -0600549
550fail:
551 mutex_unlock(&dev->struct_mutex);
552 switch (ret) {
553 case 0:
554 case -ERESTARTSYS:
555 case -EINTR:
556 return VM_FAULT_NOPAGE;
557 case -ENOMEM:
558 return VM_FAULT_OOM;
559 default:
560 return VM_FAULT_SIGBUS;
561 }
562}
563
564/** We override mainly to fix up some of the vm mapping flags.. */
565int omap_gem_mmap(struct file *filp, struct vm_area_struct *vma)
566{
Rob Clarkcd5351f2011-11-12 12:09:40 -0600567 int ret;
568
569 ret = drm_gem_mmap(filp, vma);
570 if (ret) {
571 DBG("mmap failed: %d", ret);
572 return ret;
573 }
574
Rob Clark8b6b5692012-05-17 02:37:25 -0600575 return omap_gem_mmap_obj(vma->vm_private_data, vma);
576}
577
578int omap_gem_mmap_obj(struct drm_gem_object *obj,
579 struct vm_area_struct *vma)
580{
581 struct omap_gem_object *omap_obj = to_omap_bo(obj);
Rob Clarkcd5351f2011-11-12 12:09:40 -0600582
583 vma->vm_flags &= ~VM_PFNMAP;
584 vma->vm_flags |= VM_MIXEDMAP;
585
586 if (omap_obj->flags & OMAP_BO_WC) {
587 vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
588 } else if (omap_obj->flags & OMAP_BO_UNCACHED) {
589 vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags));
590 } else {
Rob Clark8b6b5692012-05-17 02:37:25 -0600591 /*
592 * We do have some private objects, at least for scanout buffers
593 * on hardware without DMM/TILER. But these are allocated write-
594 * combine
595 */
596 if (WARN_ON(!obj->filp))
597 return -EINVAL;
598
599 /*
600 * Shunt off cached objs to shmem file so they have their own
601 * address_space (so unmap_mapping_range does what we want,
602 * in particular in the case of mmap'd dmabufs)
603 */
604 fput(vma->vm_file);
Rob Clark8b6b5692012-05-17 02:37:25 -0600605 vma->vm_pgoff = 0;
Al Virocb0942b2012-08-27 14:48:26 -0400606 vma->vm_file = get_file(obj->filp);
Rob Clark8b6b5692012-05-17 02:37:25 -0600607
Rob Clarkcd5351f2011-11-12 12:09:40 -0600608 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
609 }
610
Rob Clark8b6b5692012-05-17 02:37:25 -0600611 return 0;
Rob Clarkcd5351f2011-11-12 12:09:40 -0600612}
613
Laurent Pinchart7ef93b02015-12-14 22:39:33 +0200614/* -----------------------------------------------------------------------------
615 * Dumb Buffers
616 */
Rob Clark8b6b5692012-05-17 02:37:25 -0600617
Rob Clarkcd5351f2011-11-12 12:09:40 -0600618/**
619 * omap_gem_dumb_create - create a dumb buffer
620 * @drm_file: our client file
621 * @dev: our device
622 * @args: the requested arguments copied from userspace
623 *
624 * Allocate a buffer suitable for use for a frame buffer of the
625 * form described by user space. Give userspace a handle by which
626 * to reference it.
627 */
628int omap_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
629 struct drm_mode_create_dumb *args)
630{
631 union omap_gem_size gsize;
632
Thierry Redingbdb2b932014-11-03 11:57:33 +0100633 args->pitch = align_pitch(0, args->width, args->bpp);
Rob Clarkcd5351f2011-11-12 12:09:40 -0600634 args->size = PAGE_ALIGN(args->pitch * args->height);
635
636 gsize = (union omap_gem_size){
637 .bytes = args->size,
638 };
639
640 return omap_gem_new_handle(dev, file, gsize,
641 OMAP_BO_SCANOUT | OMAP_BO_WC, &args->handle);
642}
643
644/**
Rob Clarkcd5351f2011-11-12 12:09:40 -0600645 * omap_gem_dumb_map - buffer mapping for dumb interface
646 * @file: our drm client file
647 * @dev: drm device
648 * @handle: GEM handle to the object (from dumb_create)
649 *
650 * Do the necessary setup to allow the mapping of the frame buffer
651 * into user memory. We don't have to do much here at the moment.
652 */
653int omap_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
654 uint32_t handle, uint64_t *offset)
655{
656 struct drm_gem_object *obj;
657 int ret = 0;
658
Rob Clarkcd5351f2011-11-12 12:09:40 -0600659 /* GEM does all our handle to object mapping */
660 obj = drm_gem_object_lookup(dev, file, handle);
661 if (obj == NULL) {
662 ret = -ENOENT;
663 goto fail;
664 }
665
666 *offset = omap_gem_mmap_offset(obj);
667
668 drm_gem_object_unreference_unlocked(obj);
669
670fail:
Rob Clarkcd5351f2011-11-12 12:09:40 -0600671 return ret;
672}
673
Laurent Pincharte1c11742015-12-14 22:39:30 +0200674#ifdef CONFIG_DRM_FBDEV_EMULATION
Rob Clarka6a91822011-12-09 23:26:08 -0600675/* Set scrolling position. This allows us to implement fast scrolling
676 * for console.
Rob Clark9b55b952012-03-05 10:48:33 -0600677 *
678 * Call only from non-atomic contexts.
Rob Clarka6a91822011-12-09 23:26:08 -0600679 */
680int omap_gem_roll(struct drm_gem_object *obj, uint32_t roll)
681{
682 struct omap_gem_object *omap_obj = to_omap_bo(obj);
683 uint32_t npages = obj->size >> PAGE_SHIFT;
684 int ret = 0;
685
686 if (roll > npages) {
687 dev_err(obj->dev->dev, "invalid roll: %d\n", roll);
688 return -EINVAL;
689 }
690
Rob Clarka6a91822011-12-09 23:26:08 -0600691 omap_obj->roll = roll;
692
Rob Clarkaf695922011-12-16 11:34:34 -0600693 mutex_lock(&obj->dev->struct_mutex);
694
Rob Clarka6a91822011-12-09 23:26:08 -0600695 /* if we aren't mapped yet, we don't need to do anything */
696 if (omap_obj->block) {
697 struct page **pages;
698 ret = get_pages(obj, &pages);
699 if (ret)
700 goto fail;
701 ret = tiler_pin(omap_obj->block, pages, npages, roll, true);
702 if (ret)
703 dev_err(obj->dev->dev, "could not repin: %d\n", ret);
704 }
705
706fail:
707 mutex_unlock(&obj->dev->struct_mutex);
708
709 return ret;
710}
Laurent Pincharte1c11742015-12-14 22:39:30 +0200711#endif
Rob Clarka6a91822011-12-09 23:26:08 -0600712
Laurent Pinchart7ef93b02015-12-14 22:39:33 +0200713/* -----------------------------------------------------------------------------
714 * Memory Management & DMA Sync
715 */
716
717/**
718 * shmem buffers that are mapped cached can simulate coherency via using
719 * page faulting to keep track of dirty pages
720 */
721static inline bool is_cached_coherent(struct drm_gem_object *obj)
722{
723 struct omap_gem_object *omap_obj = to_omap_bo(obj);
724 return is_shmem(obj) &&
725 ((omap_obj->flags & OMAP_BO_CACHE_MASK) == OMAP_BO_CACHED);
726}
727
Rob Clark8b6b5692012-05-17 02:37:25 -0600728/* Sync the buffer for CPU access.. note pages should already be
729 * attached, ie. omap_gem_get_pages()
730 */
731void omap_gem_cpu_sync(struct drm_gem_object *obj, int pgoff)
732{
733 struct drm_device *dev = obj->dev;
734 struct omap_gem_object *omap_obj = to_omap_bo(obj);
735
736 if (is_cached_coherent(obj) && omap_obj->addrs[pgoff]) {
737 dma_unmap_page(dev->dev, omap_obj->addrs[pgoff],
738 PAGE_SIZE, DMA_BIDIRECTIONAL);
739 omap_obj->addrs[pgoff] = 0;
740 }
741}
742
743/* sync the buffer for DMA access */
744void omap_gem_dma_sync(struct drm_gem_object *obj,
745 enum dma_data_direction dir)
746{
747 struct drm_device *dev = obj->dev;
748 struct omap_gem_object *omap_obj = to_omap_bo(obj);
749
750 if (is_cached_coherent(obj)) {
751 int i, npages = obj->size >> PAGE_SHIFT;
752 struct page **pages = omap_obj->pages;
753 bool dirty = false;
754
755 for (i = 0; i < npages; i++) {
756 if (!omap_obj->addrs[i]) {
757 omap_obj->addrs[i] = dma_map_page(dev->dev, pages[i], 0,
758 PAGE_SIZE, DMA_BIDIRECTIONAL);
759 dirty = true;
760 }
761 }
762
763 if (dirty) {
764 unmap_mapping_range(obj->filp->f_mapping, 0,
765 omap_gem_mmap_size(obj), 1);
766 }
767 }
768}
769
Rob Clarkcd5351f2011-11-12 12:09:40 -0600770/* Get physical address for DMA.. if 'remap' is true, and the buffer is not
771 * already contiguous, remap it to pin in physically contiguous memory.. (ie.
772 * map in TILER)
773 */
774int omap_gem_get_paddr(struct drm_gem_object *obj,
775 dma_addr_t *paddr, bool remap)
776{
Rob Clarka6a91822011-12-09 23:26:08 -0600777 struct omap_drm_private *priv = obj->dev->dev_private;
Rob Clarkcd5351f2011-11-12 12:09:40 -0600778 struct omap_gem_object *omap_obj = to_omap_bo(obj);
779 int ret = 0;
780
Rob Clarkf7f9f452011-12-05 19:19:22 -0600781 mutex_lock(&obj->dev->struct_mutex);
782
Rob Clarka6a91822011-12-09 23:26:08 -0600783 if (remap && is_shmem(obj) && priv->has_dmm) {
Rob Clarkf7f9f452011-12-05 19:19:22 -0600784 if (omap_obj->paddr_cnt == 0) {
785 struct page **pages;
Rob Clarka6a91822011-12-09 23:26:08 -0600786 uint32_t npages = obj->size >> PAGE_SHIFT;
Rob Clarkf7f9f452011-12-05 19:19:22 -0600787 enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
788 struct tiler_block *block;
Rob Clarka6a91822011-12-09 23:26:08 -0600789
Rob Clarkf7f9f452011-12-05 19:19:22 -0600790 BUG_ON(omap_obj->block);
791
792 ret = get_pages(obj, &pages);
793 if (ret)
794 goto fail;
795
Rob Clarkf7f9f452011-12-05 19:19:22 -0600796 if (omap_obj->flags & OMAP_BO_TILED) {
797 block = tiler_reserve_2d(fmt,
798 omap_obj->width,
799 omap_obj->height, 0);
800 } else {
801 block = tiler_reserve_1d(obj->size);
802 }
803
804 if (IS_ERR(block)) {
805 ret = PTR_ERR(block);
806 dev_err(obj->dev->dev,
807 "could not remap: %d (%d)\n", ret, fmt);
808 goto fail;
809 }
810
811 /* TODO: enable async refill.. */
Rob Clarka6a91822011-12-09 23:26:08 -0600812 ret = tiler_pin(block, pages, npages,
813 omap_obj->roll, true);
Rob Clarkf7f9f452011-12-05 19:19:22 -0600814 if (ret) {
815 tiler_release(block);
816 dev_err(obj->dev->dev,
817 "could not pin: %d\n", ret);
818 goto fail;
819 }
820
821 omap_obj->paddr = tiler_ssptr(block);
822 omap_obj->block = block;
823
Russell King2d31ca32014-07-12 10:53:41 +0100824 DBG("got paddr: %pad", &omap_obj->paddr);
Rob Clarkf7f9f452011-12-05 19:19:22 -0600825 }
826
827 omap_obj->paddr_cnt++;
828
829 *paddr = omap_obj->paddr;
830 } else if (omap_obj->flags & OMAP_BO_DMA) {
831 *paddr = omap_obj->paddr;
832 } else {
833 ret = -EINVAL;
Rob Clark8b6b5692012-05-17 02:37:25 -0600834 goto fail;
Rob Clarkcd5351f2011-11-12 12:09:40 -0600835 }
836
Rob Clarkf7f9f452011-12-05 19:19:22 -0600837fail:
838 mutex_unlock(&obj->dev->struct_mutex);
Rob Clarkcd5351f2011-11-12 12:09:40 -0600839
840 return ret;
841}
842
843/* Release physical address, when DMA is no longer being performed.. this
844 * could potentially unpin and unmap buffers from TILER
845 */
Tomi Valkeinen393a9492015-04-28 14:01:36 +0300846void omap_gem_put_paddr(struct drm_gem_object *obj)
Rob Clarkcd5351f2011-11-12 12:09:40 -0600847{
Rob Clarkf7f9f452011-12-05 19:19:22 -0600848 struct omap_gem_object *omap_obj = to_omap_bo(obj);
Tomi Valkeinen393a9492015-04-28 14:01:36 +0300849 int ret;
Rob Clarkf7f9f452011-12-05 19:19:22 -0600850
851 mutex_lock(&obj->dev->struct_mutex);
852 if (omap_obj->paddr_cnt > 0) {
853 omap_obj->paddr_cnt--;
854 if (omap_obj->paddr_cnt == 0) {
855 ret = tiler_unpin(omap_obj->block);
856 if (ret) {
857 dev_err(obj->dev->dev,
858 "could not unpin pages: %d\n", ret);
Rob Clarkf7f9f452011-12-05 19:19:22 -0600859 }
860 ret = tiler_release(omap_obj->block);
861 if (ret) {
862 dev_err(obj->dev->dev,
863 "could not release unmap: %d\n", ret);
864 }
Tomi Valkeinen3f4d17c2014-09-03 19:25:53 +0000865 omap_obj->paddr = 0;
Rob Clarkf7f9f452011-12-05 19:19:22 -0600866 omap_obj->block = NULL;
867 }
868 }
Tomi Valkeinen393a9492015-04-28 14:01:36 +0300869
Rob Clarkf7f9f452011-12-05 19:19:22 -0600870 mutex_unlock(&obj->dev->struct_mutex);
Rob Clarkcd5351f2011-11-12 12:09:40 -0600871}
872
Rob Clark3c810c62012-08-15 15:18:01 -0500873/* Get rotated scanout address (only valid if already pinned), at the
874 * specified orientation and x,y offset from top-left corner of buffer
875 * (only valid for tiled 2d buffers)
876 */
877int omap_gem_rotated_paddr(struct drm_gem_object *obj, uint32_t orient,
878 int x, int y, dma_addr_t *paddr)
879{
880 struct omap_gem_object *omap_obj = to_omap_bo(obj);
881 int ret = -EINVAL;
882
883 mutex_lock(&obj->dev->struct_mutex);
884 if ((omap_obj->paddr_cnt > 0) && omap_obj->block &&
885 (omap_obj->flags & OMAP_BO_TILED)) {
886 *paddr = tiler_tsptr(omap_obj->block, orient, x, y);
887 ret = 0;
888 }
889 mutex_unlock(&obj->dev->struct_mutex);
890 return ret;
891}
892
893/* Get tiler stride for the buffer (only valid for 2d tiled buffers) */
894int omap_gem_tiled_stride(struct drm_gem_object *obj, uint32_t orient)
895{
896 struct omap_gem_object *omap_obj = to_omap_bo(obj);
897 int ret = -EINVAL;
898 if (omap_obj->flags & OMAP_BO_TILED)
899 ret = tiler_stride(gem2fmt(omap_obj->flags), orient);
900 return ret;
901}
902
Rob Clark6ad11bc2012-04-10 13:19:55 -0500903/* if !remap, and we don't have pages backing, then fail, rather than
904 * increasing the pin count (which we don't really do yet anyways,
905 * because we don't support swapping pages back out). And 'remap'
906 * might not be quite the right name, but I wanted to keep it working
907 * similarly to omap_gem_get_paddr(). Note though that mutex is not
908 * aquired if !remap (because this can be called in atomic ctxt),
909 * but probably omap_gem_get_paddr() should be changed to work in the
910 * same way. If !remap, a matching omap_gem_put_pages() call is not
911 * required (and should not be made).
912 */
913int omap_gem_get_pages(struct drm_gem_object *obj, struct page ***pages,
914 bool remap)
Rob Clarkcd5351f2011-11-12 12:09:40 -0600915{
916 int ret;
Rob Clark6ad11bc2012-04-10 13:19:55 -0500917 if (!remap) {
918 struct omap_gem_object *omap_obj = to_omap_bo(obj);
919 if (!omap_obj->pages)
920 return -ENOMEM;
921 *pages = omap_obj->pages;
922 return 0;
923 }
Rob Clarkcd5351f2011-11-12 12:09:40 -0600924 mutex_lock(&obj->dev->struct_mutex);
925 ret = get_pages(obj, pages);
926 mutex_unlock(&obj->dev->struct_mutex);
927 return ret;
928}
929
930/* release pages when DMA no longer being performed */
931int omap_gem_put_pages(struct drm_gem_object *obj)
932{
933 /* do something here if we dynamically attach/detach pages.. at
934 * least they would no longer need to be pinned if everyone has
935 * released the pages..
936 */
937 return 0;
938}
939
Laurent Pincharte1c11742015-12-14 22:39:30 +0200940#ifdef CONFIG_DRM_FBDEV_EMULATION
Rob Clarkf7f9f452011-12-05 19:19:22 -0600941/* Get kernel virtual address for CPU access.. this more or less only
942 * exists for omap_fbdev. This should be called with struct_mutex
943 * held.
Rob Clarkcd5351f2011-11-12 12:09:40 -0600944 */
945void *omap_gem_vaddr(struct drm_gem_object *obj)
946{
947 struct omap_gem_object *omap_obj = to_omap_bo(obj);
YAMANE Toshiaki696e3ca2012-11-14 19:33:43 +0900948 WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
Rob Clarkf7f9f452011-12-05 19:19:22 -0600949 if (!omap_obj->vaddr) {
950 struct page **pages;
951 int ret = get_pages(obj, &pages);
952 if (ret)
953 return ERR_PTR(ret);
954 omap_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
955 VM_MAP, pgprot_writecombine(PAGE_KERNEL));
956 }
Rob Clarkcd5351f2011-11-12 12:09:40 -0600957 return omap_obj->vaddr;
958}
Laurent Pincharte1c11742015-12-14 22:39:30 +0200959#endif
Rob Clarkcd5351f2011-11-12 12:09:40 -0600960
Laurent Pinchart7ef93b02015-12-14 22:39:33 +0200961/* -----------------------------------------------------------------------------
962 * Power Management
963 */
964
Andy Grosse78edba2012-12-19 14:53:37 -0600965#ifdef CONFIG_PM
966/* re-pin objects in DMM in resume path: */
967int omap_gem_resume(struct device *dev)
968{
969 struct drm_device *drm_dev = dev_get_drvdata(dev);
970 struct omap_drm_private *priv = drm_dev->dev_private;
971 struct omap_gem_object *omap_obj;
972 int ret = 0;
973
974 list_for_each_entry(omap_obj, &priv->obj_list, mm_list) {
975 if (omap_obj->block) {
976 struct drm_gem_object *obj = &omap_obj->base;
977 uint32_t npages = obj->size >> PAGE_SHIFT;
978 WARN_ON(!omap_obj->pages); /* this can't happen */
979 ret = tiler_pin(omap_obj->block,
980 omap_obj->pages, npages,
981 omap_obj->roll, true);
982 if (ret) {
983 dev_err(dev, "could not repin: %d\n", ret);
984 return ret;
985 }
986 }
987 }
988
989 return 0;
990}
991#endif
992
Laurent Pinchart7ef93b02015-12-14 22:39:33 +0200993/* -----------------------------------------------------------------------------
994 * DebugFS
995 */
996
Rob Clarkf6b60362012-03-05 10:48:36 -0600997#ifdef CONFIG_DEBUG_FS
998void omap_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
999{
Rob Clarkf6b60362012-03-05 10:48:36 -06001000 struct omap_gem_object *omap_obj = to_omap_bo(obj);
David Herrmann0de23972013-07-24 21:07:52 +02001001 uint64_t off;
Rob Clarkf6b60362012-03-05 10:48:36 -06001002
David Herrmann0de23972013-07-24 21:07:52 +02001003 off = drm_vma_node_start(&obj->vma_node);
Rob Clarkf6b60362012-03-05 10:48:36 -06001004
Russell King2d31ca32014-07-12 10:53:41 +01001005 seq_printf(m, "%08x: %2d (%2d) %08llx %pad (%2d) %p %4d",
Rob Clarkf6b60362012-03-05 10:48:36 -06001006 omap_obj->flags, obj->name, obj->refcount.refcount.counter,
Russell King2d31ca32014-07-12 10:53:41 +01001007 off, &omap_obj->paddr, omap_obj->paddr_cnt,
Rob Clarkf6b60362012-03-05 10:48:36 -06001008 omap_obj->vaddr, omap_obj->roll);
1009
1010 if (omap_obj->flags & OMAP_BO_TILED) {
1011 seq_printf(m, " %dx%d", omap_obj->width, omap_obj->height);
1012 if (omap_obj->block) {
1013 struct tcm_area *area = &omap_obj->block->area;
1014 seq_printf(m, " (%dx%d, %dx%d)",
1015 area->p0.x, area->p0.y,
1016 area->p1.x, area->p1.y);
1017 }
1018 } else {
1019 seq_printf(m, " %d", obj->size);
1020 }
1021
1022 seq_printf(m, "\n");
1023}
1024
1025void omap_gem_describe_objects(struct list_head *list, struct seq_file *m)
1026{
1027 struct omap_gem_object *omap_obj;
1028 int count = 0;
1029 size_t size = 0;
1030
1031 list_for_each_entry(omap_obj, list, mm_list) {
1032 struct drm_gem_object *obj = &omap_obj->base;
1033 seq_printf(m, " ");
1034 omap_gem_describe(obj, m);
1035 count++;
1036 size += obj->size;
1037 }
1038
1039 seq_printf(m, "Total %d objects, %zu bytes\n", count, size);
1040}
1041#endif
1042
Laurent Pinchart7ef93b02015-12-14 22:39:33 +02001043/* -----------------------------------------------------------------------------
1044 * Buffer Synchronization
Rob Clarkcd5351f2011-11-12 12:09:40 -06001045 */
1046
Laurent Pinchart7ef93b02015-12-14 22:39:33 +02001047static DEFINE_SPINLOCK(sync_lock);
1048
Rob Clarkcd5351f2011-11-12 12:09:40 -06001049struct omap_gem_sync_waiter {
1050 struct list_head list;
1051 struct omap_gem_object *omap_obj;
1052 enum omap_gem_op op;
1053 uint32_t read_target, write_target;
1054 /* notify called w/ sync_lock held */
1055 void (*notify)(void *arg);
1056 void *arg;
1057};
1058
1059/* list of omap_gem_sync_waiter.. the notify fxn gets called back when
1060 * the read and/or write target count is achieved which can call a user
1061 * callback (ex. to kick 3d and/or 2d), wakeup blocked task (prep for
1062 * cpu access), etc.
1063 */
1064static LIST_HEAD(waiters);
1065
1066static inline bool is_waiting(struct omap_gem_sync_waiter *waiter)
1067{
1068 struct omap_gem_object *omap_obj = waiter->omap_obj;
1069 if ((waiter->op & OMAP_GEM_READ) &&
Archit Tanejaf2cff0f2014-04-11 12:53:31 +05301070 (omap_obj->sync->write_complete < waiter->write_target))
Rob Clarkcd5351f2011-11-12 12:09:40 -06001071 return true;
1072 if ((waiter->op & OMAP_GEM_WRITE) &&
Archit Tanejaf2cff0f2014-04-11 12:53:31 +05301073 (omap_obj->sync->read_complete < waiter->read_target))
Rob Clarkcd5351f2011-11-12 12:09:40 -06001074 return true;
1075 return false;
1076}
1077
1078/* macro for sync debug.. */
1079#define SYNCDBG 0
1080#define SYNC(fmt, ...) do { if (SYNCDBG) \
1081 printk(KERN_ERR "%s:%d: "fmt"\n", \
1082 __func__, __LINE__, ##__VA_ARGS__); \
1083 } while (0)
1084
1085
1086static void sync_op_update(void)
1087{
1088 struct omap_gem_sync_waiter *waiter, *n;
1089 list_for_each_entry_safe(waiter, n, &waiters, list) {
1090 if (!is_waiting(waiter)) {
1091 list_del(&waiter->list);
1092 SYNC("notify: %p", waiter);
1093 waiter->notify(waiter->arg);
1094 kfree(waiter);
1095 }
1096 }
1097}
1098
1099static inline int sync_op(struct drm_gem_object *obj,
1100 enum omap_gem_op op, bool start)
1101{
1102 struct omap_gem_object *omap_obj = to_omap_bo(obj);
1103 int ret = 0;
1104
1105 spin_lock(&sync_lock);
1106
1107 if (!omap_obj->sync) {
1108 omap_obj->sync = kzalloc(sizeof(*omap_obj->sync), GFP_ATOMIC);
1109 if (!omap_obj->sync) {
1110 ret = -ENOMEM;
1111 goto unlock;
1112 }
1113 }
1114
1115 if (start) {
1116 if (op & OMAP_GEM_READ)
1117 omap_obj->sync->read_pending++;
1118 if (op & OMAP_GEM_WRITE)
1119 omap_obj->sync->write_pending++;
1120 } else {
1121 if (op & OMAP_GEM_READ)
1122 omap_obj->sync->read_complete++;
1123 if (op & OMAP_GEM_WRITE)
1124 omap_obj->sync->write_complete++;
1125 sync_op_update();
1126 }
1127
1128unlock:
1129 spin_unlock(&sync_lock);
1130
1131 return ret;
1132}
1133
1134/* it is a bit lame to handle updates in this sort of polling way, but
1135 * in case of PVR, the GPU can directly update read/write complete
1136 * values, and not really tell us which ones it updated.. this also
1137 * means that sync_lock is not quite sufficient. So we'll need to
1138 * do something a bit better when it comes time to add support for
1139 * separate 2d hw..
1140 */
1141void omap_gem_op_update(void)
1142{
1143 spin_lock(&sync_lock);
1144 sync_op_update();
1145 spin_unlock(&sync_lock);
1146}
1147
1148/* mark the start of read and/or write operation */
1149int omap_gem_op_start(struct drm_gem_object *obj, enum omap_gem_op op)
1150{
1151 return sync_op(obj, op, true);
1152}
1153
1154int omap_gem_op_finish(struct drm_gem_object *obj, enum omap_gem_op op)
1155{
1156 return sync_op(obj, op, false);
1157}
1158
1159static DECLARE_WAIT_QUEUE_HEAD(sync_event);
1160
1161static void sync_notify(void *arg)
1162{
1163 struct task_struct **waiter_task = arg;
1164 *waiter_task = NULL;
1165 wake_up_all(&sync_event);
1166}
1167
1168int omap_gem_op_sync(struct drm_gem_object *obj, enum omap_gem_op op)
1169{
1170 struct omap_gem_object *omap_obj = to_omap_bo(obj);
1171 int ret = 0;
1172 if (omap_obj->sync) {
1173 struct task_struct *waiter_task = current;
1174 struct omap_gem_sync_waiter *waiter =
1175 kzalloc(sizeof(*waiter), GFP_KERNEL);
1176
YAMANE Toshiakiae053032012-11-14 19:33:17 +09001177 if (!waiter)
Rob Clarkcd5351f2011-11-12 12:09:40 -06001178 return -ENOMEM;
Rob Clarkcd5351f2011-11-12 12:09:40 -06001179
1180 waiter->omap_obj = omap_obj;
1181 waiter->op = op;
1182 waiter->read_target = omap_obj->sync->read_pending;
1183 waiter->write_target = omap_obj->sync->write_pending;
1184 waiter->notify = sync_notify;
1185 waiter->arg = &waiter_task;
1186
1187 spin_lock(&sync_lock);
1188 if (is_waiting(waiter)) {
1189 SYNC("waited: %p", waiter);
1190 list_add_tail(&waiter->list, &waiters);
1191 spin_unlock(&sync_lock);
1192 ret = wait_event_interruptible(sync_event,
1193 (waiter_task == NULL));
1194 spin_lock(&sync_lock);
1195 if (waiter_task) {
1196 SYNC("interrupted: %p", waiter);
1197 /* we were interrupted */
1198 list_del(&waiter->list);
1199 waiter_task = NULL;
1200 } else {
1201 /* freed in sync_op_update() */
1202 waiter = NULL;
1203 }
1204 }
1205 spin_unlock(&sync_lock);
Fabian Frederickd2c87e22014-07-04 21:17:15 +02001206 kfree(waiter);
Rob Clarkcd5351f2011-11-12 12:09:40 -06001207 }
1208 return ret;
1209}
1210
1211/* call fxn(arg), either synchronously or asynchronously if the op
1212 * is currently blocked.. fxn() can be called from any context
1213 *
1214 * (TODO for now fxn is called back from whichever context calls
1215 * omap_gem_op_update().. but this could be better defined later
1216 * if needed)
1217 *
1218 * TODO more code in common w/ _sync()..
1219 */
1220int omap_gem_op_async(struct drm_gem_object *obj, enum omap_gem_op op,
1221 void (*fxn)(void *arg), void *arg)
1222{
1223 struct omap_gem_object *omap_obj = to_omap_bo(obj);
1224 if (omap_obj->sync) {
1225 struct omap_gem_sync_waiter *waiter =
1226 kzalloc(sizeof(*waiter), GFP_ATOMIC);
1227
YAMANE Toshiakiae053032012-11-14 19:33:17 +09001228 if (!waiter)
Rob Clarkcd5351f2011-11-12 12:09:40 -06001229 return -ENOMEM;
Rob Clarkcd5351f2011-11-12 12:09:40 -06001230
1231 waiter->omap_obj = omap_obj;
1232 waiter->op = op;
1233 waiter->read_target = omap_obj->sync->read_pending;
1234 waiter->write_target = omap_obj->sync->write_pending;
1235 waiter->notify = fxn;
1236 waiter->arg = arg;
1237
1238 spin_lock(&sync_lock);
1239 if (is_waiting(waiter)) {
1240 SYNC("waited: %p", waiter);
1241 list_add_tail(&waiter->list, &waiters);
1242 spin_unlock(&sync_lock);
1243 return 0;
1244 }
1245
1246 spin_unlock(&sync_lock);
Subhajit Paul15ec2ca2014-04-11 12:53:30 +05301247
1248 kfree(waiter);
Rob Clarkcd5351f2011-11-12 12:09:40 -06001249 }
1250
1251 /* no waiting.. */
1252 fxn(arg);
1253
1254 return 0;
1255}
1256
1257/* special API so PVR can update the buffer to use a sync-object allocated
1258 * from it's sync-obj heap. Only used for a newly allocated (from PVR's
1259 * perspective) sync-object, so we overwrite the new syncobj w/ values
1260 * from the already allocated syncobj (if there is one)
1261 */
1262int omap_gem_set_sync_object(struct drm_gem_object *obj, void *syncobj)
1263{
1264 struct omap_gem_object *omap_obj = to_omap_bo(obj);
1265 int ret = 0;
1266
1267 spin_lock(&sync_lock);
1268
1269 if ((omap_obj->flags & OMAP_BO_EXT_SYNC) && !syncobj) {
1270 /* clearing a previously set syncobj */
Peter Huewee6200962013-01-26 00:40:13 +01001271 syncobj = kmemdup(omap_obj->sync, sizeof(*omap_obj->sync),
1272 GFP_ATOMIC);
Rob Clarkcd5351f2011-11-12 12:09:40 -06001273 if (!syncobj) {
1274 ret = -ENOMEM;
1275 goto unlock;
1276 }
Rob Clarkcd5351f2011-11-12 12:09:40 -06001277 omap_obj->flags &= ~OMAP_BO_EXT_SYNC;
1278 omap_obj->sync = syncobj;
1279 } else if (syncobj && !(omap_obj->flags & OMAP_BO_EXT_SYNC)) {
1280 /* replacing an existing syncobj */
1281 if (omap_obj->sync) {
1282 memcpy(syncobj, omap_obj->sync, sizeof(*omap_obj->sync));
1283 kfree(omap_obj->sync);
1284 }
1285 omap_obj->flags |= OMAP_BO_EXT_SYNC;
1286 omap_obj->sync = syncobj;
1287 }
1288
1289unlock:
1290 spin_unlock(&sync_lock);
1291 return ret;
1292}
1293
Laurent Pinchart7ef93b02015-12-14 22:39:33 +02001294/* -----------------------------------------------------------------------------
1295 * Constructor & Destructor
1296 */
1297
Rob Clarkcd5351f2011-11-12 12:09:40 -06001298/* don't call directly.. called from GEM core when it is time to actually
1299 * free the object..
1300 */
1301void omap_gem_free_object(struct drm_gem_object *obj)
1302{
1303 struct drm_device *dev = obj->dev;
Tomi Valkeinen76c40552014-12-17 14:34:22 +02001304 struct omap_drm_private *priv = dev->dev_private;
Rob Clarkcd5351f2011-11-12 12:09:40 -06001305 struct omap_gem_object *omap_obj = to_omap_bo(obj);
1306
Rob Clarkf7f9f452011-12-05 19:19:22 -06001307 evict(obj);
1308
Rob Clarkf6b60362012-03-05 10:48:36 -06001309 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
1310
Tomi Valkeinen76c40552014-12-17 14:34:22 +02001311 spin_lock(&priv->list_lock);
Rob Clarkf6b60362012-03-05 10:48:36 -06001312 list_del(&omap_obj->mm_list);
Tomi Valkeinen76c40552014-12-17 14:34:22 +02001313 spin_unlock(&priv->list_lock);
Rob Clarkf6b60362012-03-05 10:48:36 -06001314
David Herrmann0de23972013-07-24 21:07:52 +02001315 drm_gem_free_mmap_offset(obj);
Rob Clarkcd5351f2011-11-12 12:09:40 -06001316
Rob Clark9a0774e2012-01-16 12:51:17 -06001317 /* this means the object is still pinned.. which really should
1318 * not happen. I think..
1319 */
1320 WARN_ON(omap_obj->paddr_cnt > 0);
1321
Rob Clarkcd5351f2011-11-12 12:09:40 -06001322 /* don't free externally allocated backing memory */
1323 if (!(omap_obj->flags & OMAP_BO_EXT_MEM)) {
YAMANE Toshiakiae053032012-11-14 19:33:17 +09001324 if (omap_obj->pages)
Rob Clarkcd5351f2011-11-12 12:09:40 -06001325 omap_gem_detach_pages(obj);
YAMANE Toshiakiae053032012-11-14 19:33:17 +09001326
Rob Clarkcd5351f2011-11-12 12:09:40 -06001327 if (!is_shmem(obj)) {
1328 dma_free_writecombine(dev->dev, obj->size,
1329 omap_obj->vaddr, omap_obj->paddr);
Rob Clarkf7f9f452011-12-05 19:19:22 -06001330 } else if (omap_obj->vaddr) {
1331 vunmap(omap_obj->vaddr);
Rob Clarkcd5351f2011-11-12 12:09:40 -06001332 }
1333 }
1334
1335 /* don't free externally allocated syncobj */
YAMANE Toshiakiae053032012-11-14 19:33:17 +09001336 if (!(omap_obj->flags & OMAP_BO_EXT_SYNC))
Rob Clarkcd5351f2011-11-12 12:09:40 -06001337 kfree(omap_obj->sync);
Rob Clarkcd5351f2011-11-12 12:09:40 -06001338
1339 drm_gem_object_release(obj);
1340
1341 kfree(obj);
1342}
1343
Rob Clarkcd5351f2011-11-12 12:09:40 -06001344/* GEM buffer object constructor */
1345struct drm_gem_object *omap_gem_new(struct drm_device *dev,
1346 union omap_gem_size gsize, uint32_t flags)
1347{
Rob Clarka6a91822011-12-09 23:26:08 -06001348 struct omap_drm_private *priv = dev->dev_private;
Rob Clarkcd5351f2011-11-12 12:09:40 -06001349 struct omap_gem_object *omap_obj;
1350 struct drm_gem_object *obj = NULL;
David Herrmannab5a60c2014-05-25 12:45:39 +02001351 struct address_space *mapping;
Rob Clarkcd5351f2011-11-12 12:09:40 -06001352 size_t size;
1353 int ret;
1354
1355 if (flags & OMAP_BO_TILED) {
Rob Clarkf7f9f452011-12-05 19:19:22 -06001356 if (!usergart) {
1357 dev_err(dev->dev, "Tiled buffers require DMM\n");
1358 goto fail;
1359 }
Rob Clarkcd5351f2011-11-12 12:09:40 -06001360
Rob Clarkf7f9f452011-12-05 19:19:22 -06001361 /* tiled buffers are always shmem paged backed.. when they are
1362 * scanned out, they are remapped into DMM/TILER
1363 */
1364 flags &= ~OMAP_BO_SCANOUT;
1365
1366 /* currently don't allow cached buffers.. there is some caching
1367 * stuff that needs to be handled better
1368 */
Tomi Valkeinen7cb0d6c2014-09-25 19:24:29 +00001369 flags &= ~(OMAP_BO_CACHED|OMAP_BO_WC|OMAP_BO_UNCACHED);
1370 flags |= tiler_get_cpu_cache_flags();
Rob Clarkf7f9f452011-12-05 19:19:22 -06001371
1372 /* align dimensions to slot boundaries... */
1373 tiler_align(gem2fmt(flags),
1374 &gsize.tiled.width, &gsize.tiled.height);
1375
1376 /* ...and calculate size based on aligned dimensions */
1377 size = tiler_size(gem2fmt(flags),
1378 gsize.tiled.width, gsize.tiled.height);
1379 } else {
1380 size = PAGE_ALIGN(gsize.bytes);
1381 }
Rob Clarkcd5351f2011-11-12 12:09:40 -06001382
1383 omap_obj = kzalloc(sizeof(*omap_obj), GFP_KERNEL);
Joe Perches78110bb2013-02-11 09:41:29 -08001384 if (!omap_obj)
Tomi Valkeinena903e3b2015-03-17 15:31:11 +02001385 return NULL;
Rob Clarkf6b60362012-03-05 10:48:36 -06001386
Rob Clarkcd5351f2011-11-12 12:09:40 -06001387 obj = &omap_obj->base;
1388
Rob Clarka6a91822011-12-09 23:26:08 -06001389 if ((flags & OMAP_BO_SCANOUT) && !priv->has_dmm) {
1390 /* attempt to allocate contiguous memory if we don't
1391 * have DMM for remappign discontiguous buffers
1392 */
Rob Clarkcd5351f2011-11-12 12:09:40 -06001393 omap_obj->vaddr = dma_alloc_writecombine(dev->dev, size,
1394 &omap_obj->paddr, GFP_KERNEL);
Tomi Valkeinena903e3b2015-03-17 15:31:11 +02001395 if (!omap_obj->vaddr) {
1396 kfree(omap_obj);
YAMANE Toshiakiae053032012-11-14 19:33:17 +09001397
Tomi Valkeinena903e3b2015-03-17 15:31:11 +02001398 return NULL;
1399 }
1400
1401 flags |= OMAP_BO_DMA;
Rob Clarkcd5351f2011-11-12 12:09:40 -06001402 }
1403
Tomi Valkeinena903e3b2015-03-17 15:31:11 +02001404 spin_lock(&priv->list_lock);
1405 list_add(&omap_obj->mm_list, &priv->obj_list);
1406 spin_unlock(&priv->list_lock);
1407
Rob Clarkcd5351f2011-11-12 12:09:40 -06001408 omap_obj->flags = flags;
1409
Rob Clarkf7f9f452011-12-05 19:19:22 -06001410 if (flags & OMAP_BO_TILED) {
1411 omap_obj->width = gsize.tiled.width;
1412 omap_obj->height = gsize.tiled.height;
1413 }
1414
David Herrmannab5a60c2014-05-25 12:45:39 +02001415 if (flags & (OMAP_BO_DMA|OMAP_BO_EXT_MEM)) {
David Herrmann89c82332013-07-11 11:56:32 +02001416 drm_gem_private_object_init(dev, obj, size);
David Herrmannab5a60c2014-05-25 12:45:39 +02001417 } else {
Rob Clarkcd5351f2011-11-12 12:09:40 -06001418 ret = drm_gem_object_init(dev, obj, size);
David Herrmannab5a60c2014-05-25 12:45:39 +02001419 if (ret)
1420 goto fail;
Rob Clarkcd5351f2011-11-12 12:09:40 -06001421
David Herrmannab5a60c2014-05-25 12:45:39 +02001422 mapping = file_inode(obj->filp)->i_mapping;
1423 mapping_set_gfp_mask(mapping, GFP_USER | __GFP_DMA32);
1424 }
Rob Clarkcd5351f2011-11-12 12:09:40 -06001425
1426 return obj;
1427
1428fail:
YAMANE Toshiakiae053032012-11-14 19:33:17 +09001429 if (obj)
Rob Clarkcd5351f2011-11-12 12:09:40 -06001430 omap_gem_free_object(obj);
YAMANE Toshiakiae053032012-11-14 19:33:17 +09001431
Rob Clarkcd5351f2011-11-12 12:09:40 -06001432 return NULL;
1433}
Rob Clarkf7f9f452011-12-05 19:19:22 -06001434
Laurent Pinchart7ef93b02015-12-14 22:39:33 +02001435/* convenience method to construct a GEM buffer object, and userspace handle */
1436int omap_gem_new_handle(struct drm_device *dev, struct drm_file *file,
1437 union omap_gem_size gsize, uint32_t flags, uint32_t *handle)
1438{
1439 struct drm_gem_object *obj;
1440 int ret;
1441
1442 obj = omap_gem_new(dev, gsize, flags);
1443 if (!obj)
1444 return -ENOMEM;
1445
1446 ret = drm_gem_handle_create(file, obj, handle);
1447 if (ret) {
1448 drm_gem_object_release(obj);
1449 kfree(obj); /* TODO isn't there a dtor to call? just copying i915 */
1450 return ret;
1451 }
1452
1453 /* drop reference from allocate - handle holds it now */
1454 drm_gem_object_unreference_unlocked(obj);
1455
1456 return 0;
1457}
1458
1459/* -----------------------------------------------------------------------------
1460 * Init & Cleanup
1461 */
1462
1463/* If DMM is used, we need to set some stuff up.. */
Rob Clarkf7f9f452011-12-05 19:19:22 -06001464void omap_gem_init(struct drm_device *dev)
1465{
Rob Clarka6a91822011-12-09 23:26:08 -06001466 struct omap_drm_private *priv = dev->dev_private;
Rob Clarkf7f9f452011-12-05 19:19:22 -06001467 const enum tiler_fmt fmts[] = {
1468 TILFMT_8BIT, TILFMT_16BIT, TILFMT_32BIT
1469 };
Andy Gross5c137792012-03-05 10:48:39 -06001470 int i, j;
Rob Clarkf7f9f452011-12-05 19:19:22 -06001471
Andy Grosse5e4e9b2012-10-17 00:30:03 -05001472 if (!dmm_is_available()) {
Rob Clarkf7f9f452011-12-05 19:19:22 -06001473 /* DMM only supported on OMAP4 and later, so this isn't fatal */
Andy Gross5c137792012-03-05 10:48:39 -06001474 dev_warn(dev->dev, "DMM not available, disable DMM support\n");
Rob Clarkf7f9f452011-12-05 19:19:22 -06001475 return;
1476 }
1477
Joe Perches78110bb2013-02-11 09:41:29 -08001478 usergart = kcalloc(3, sizeof(*usergart), GFP_KERNEL);
1479 if (!usergart)
Rob Clarkb3698392011-12-09 23:26:06 -06001480 return;
Rob Clarkf7f9f452011-12-05 19:19:22 -06001481
1482 /* reserve 4k aligned/wide regions for userspace mappings: */
1483 for (i = 0; i < ARRAY_SIZE(fmts); i++) {
1484 uint16_t h = 1, w = PAGE_SIZE >> i;
1485 tiler_align(fmts[i], &w, &h);
1486 /* note: since each region is 1 4kb page wide, and minimum
1487 * number of rows, the height ends up being the same as the
1488 * # of pages in the region
1489 */
1490 usergart[i].height = h;
1491 usergart[i].height_shift = ilog2(h);
Rob Clark3c810c62012-08-15 15:18:01 -05001492 usergart[i].stride_pfn = tiler_stride(fmts[i], 0) >> PAGE_SHIFT;
Rob Clarkf7f9f452011-12-05 19:19:22 -06001493 usergart[i].slot_shift = ilog2((PAGE_SIZE / h) >> i);
1494 for (j = 0; j < NUM_USERGART_ENTRIES; j++) {
1495 struct usergart_entry *entry = &usergart[i].entry[j];
1496 struct tiler_block *block =
1497 tiler_reserve_2d(fmts[i], w, h,
1498 PAGE_SIZE);
1499 if (IS_ERR(block)) {
1500 dev_err(dev->dev,
1501 "reserve failed: %d, %d, %ld\n",
1502 i, j, PTR_ERR(block));
1503 return;
1504 }
1505 entry->paddr = tiler_ssptr(block);
1506 entry->block = block;
1507
Russell King2d31ca32014-07-12 10:53:41 +01001508 DBG("%d:%d: %dx%d: paddr=%pad stride=%d", i, j, w, h,
1509 &entry->paddr,
Rob Clarkf7f9f452011-12-05 19:19:22 -06001510 usergart[i].stride_pfn << PAGE_SHIFT);
1511 }
1512 }
Rob Clarka6a91822011-12-09 23:26:08 -06001513
1514 priv->has_dmm = true;
Rob Clarkf7f9f452011-12-05 19:19:22 -06001515}
1516
1517void omap_gem_deinit(struct drm_device *dev)
1518{
1519 /* I believe we can rely on there being no more outstanding GEM
1520 * objects which could depend on usergart/dmm at this point.
1521 */
Rob Clarkf7f9f452011-12-05 19:19:22 -06001522 kfree(usergart);
1523}