blob: ceba5459ceb75b33bded69b6b56ffb9c5919a03c [file] [log] [blame]
Rob Clarkcd5351f2011-11-12 12:09:40 -06001/*
Rob Clark8bb0daf2013-02-11 12:43:09 -05002 * drivers/gpu/drm/omapdrm/omap_gem.c
Rob Clarkcd5351f2011-11-12 12:09:40 -06003 *
4 * Copyright (C) 2011 Texas Instruments
5 * Author: Rob Clark <rob.clark@linaro.org>
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License version 2 as published by
9 * the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along with
17 * this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
Rob Clarkcd5351f2011-11-12 12:09:40 -060020#include <linux/shmem_fs.h>
Laurent Pinchart2d278f52015-03-05 21:31:37 +020021#include <linux/spinlock.h>
Dan Williams01c8f1c2016-01-15 16:56:40 -080022#include <linux/pfn_t.h>
Laurent Pinchart2d278f52015-03-05 21:31:37 +020023
David Herrmann0de23972013-07-24 21:07:52 +020024#include <drm/drm_vma_manager.h>
Rob Clarkcd5351f2011-11-12 12:09:40 -060025
26#include "omap_drv.h"
Rob Clarkf7f9f452011-12-05 19:19:22 -060027#include "omap_dmm_tiler.h"
Rob Clarkcd5351f2011-11-12 12:09:40 -060028
29/* remove these once drm core helpers are merged */
YAMANE Toshiaki801d5bc2012-11-14 19:32:56 +090030struct page **_drm_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask);
Rob Clarkcd5351f2011-11-12 12:09:40 -060031void _drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
32 bool dirty, bool accessed);
Rob Clarkf7f9f452011-12-05 19:19:22 -060033int _drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size);
Rob Clarkcd5351f2011-11-12 12:09:40 -060034
35/*
36 * GEM buffer object implementation.
37 */
38
39#define to_omap_bo(x) container_of(x, struct omap_gem_object, base)
40
41/* note: we use upper 8 bits of flags for driver-internal flags: */
42#define OMAP_BO_DMA 0x01000000 /* actually is physically contiguous */
43#define OMAP_BO_EXT_SYNC 0x02000000 /* externally allocated sync object */
44#define OMAP_BO_EXT_MEM 0x04000000 /* externally allocated memory */
45
46
47struct omap_gem_object {
48 struct drm_gem_object base;
49
Rob Clarkf6b60362012-03-05 10:48:36 -060050 struct list_head mm_list;
51
Rob Clarkcd5351f2011-11-12 12:09:40 -060052 uint32_t flags;
53
Rob Clarkf7f9f452011-12-05 19:19:22 -060054 /** width/height for tiled formats (rounded up to slot boundaries) */
55 uint16_t width, height;
56
Rob Clarka6a91822011-12-09 23:26:08 -060057 /** roll applied when mapping to DMM */
58 uint32_t roll;
59
Rob Clarkcd5351f2011-11-12 12:09:40 -060060 /**
61 * If buffer is allocated physically contiguous, the OMAP_BO_DMA flag
Rob Clarkf7f9f452011-12-05 19:19:22 -060062 * is set and the paddr is valid. Also if the buffer is remapped in
63 * TILER and paddr_cnt > 0, then paddr is valid. But if you are using
64 * the physical address and OMAP_BO_DMA is not set, then you should
65 * be going thru omap_gem_{get,put}_paddr() to ensure the mapping is
66 * not removed from under your feet.
Rob Clarkcd5351f2011-11-12 12:09:40 -060067 *
68 * Note that OMAP_BO_SCANOUT is a hint from userspace that DMA capable
69 * buffer is requested, but doesn't mean that it is. Use the
70 * OMAP_BO_DMA flag to determine if the buffer has a DMA capable
71 * physical address.
72 */
73 dma_addr_t paddr;
74
75 /**
Rob Clarkf7f9f452011-12-05 19:19:22 -060076 * # of users of paddr
77 */
78 uint32_t paddr_cnt;
79
80 /**
81 * tiler block used when buffer is remapped in DMM/TILER.
82 */
83 struct tiler_block *block;
84
85 /**
Rob Clarkcd5351f2011-11-12 12:09:40 -060086 * Array of backing pages, if allocated. Note that pages are never
87 * allocated for buffers originally allocated from contiguous memory
88 */
89 struct page **pages;
90
Rob Clarkf3bc9d22011-12-20 16:54:28 -060091 /** addresses corresponding to pages in above array */
92 dma_addr_t *addrs;
93
Rob Clarkcd5351f2011-11-12 12:09:40 -060094 /**
95 * Virtual address, if mapped.
96 */
97 void *vaddr;
98
99 /**
100 * sync-object allocated on demand (if needed)
101 *
102 * Per-buffer sync-object for tracking pending and completed hw/dma
103 * read and write operations. The layout in memory is dictated by
104 * the SGX firmware, which uses this information to stall the command
105 * stream if a surface is not ready yet.
106 *
107 * Note that when buffer is used by SGX, the sync-object needs to be
108 * allocated from a special heap of sync-objects. This way many sync
109 * objects can be packed in a page, and not waste GPU virtual address
110 * space. Because of this we have to have a omap_gem_set_sync_object()
111 * API to allow replacement of the syncobj after it has (potentially)
112 * already been allocated. A bit ugly but I haven't thought of a
113 * better alternative.
114 */
115 struct {
116 uint32_t write_pending;
117 uint32_t write_complete;
118 uint32_t read_pending;
119 uint32_t read_complete;
120 } *sync;
121};
122
Rob Clarkc5b12472012-01-18 18:33:02 -0600123static int get_pages(struct drm_gem_object *obj, struct page ***pages);
124static uint64_t mmap_offset(struct drm_gem_object *obj);
125
Rob Clarkf7f9f452011-12-05 19:19:22 -0600126/* To deal with userspace mmap'ings of 2d tiled buffers, which (a) are
127 * not necessarily pinned in TILER all the time, and (b) when they are
128 * they are not necessarily page aligned, we reserve one or more small
129 * regions in each of the 2d containers to use as a user-GART where we
130 * can create a second page-aligned mapping of parts of the buffer
131 * being accessed from userspace.
132 *
133 * Note that we could optimize slightly when we know that multiple
134 * tiler containers are backed by the same PAT.. but I'll leave that
135 * for later..
136 */
137#define NUM_USERGART_ENTRIES 2
138struct usergart_entry {
139 struct tiler_block *block; /* the reserved tiler block */
140 dma_addr_t paddr;
141 struct drm_gem_object *obj; /* the current pinned obj */
142 pgoff_t obj_pgoff; /* page offset of obj currently
143 mapped in */
144};
145static struct {
146 struct usergart_entry entry[NUM_USERGART_ENTRIES];
147 int height; /* height in rows */
148 int height_shift; /* ilog2(height in rows) */
149 int slot_shift; /* ilog2(width per slot) */
150 int stride_pfn; /* stride in pages */
151 int last; /* index of last used entry */
152} *usergart;
153
154static void evict_entry(struct drm_gem_object *obj,
155 enum tiler_fmt fmt, struct usergart_entry *entry)
156{
David Herrmann6796cb12014-01-03 14:24:19 +0100157 struct omap_gem_object *omap_obj = to_omap_bo(obj);
158 int n = usergart[fmt].height;
159 size_t size = PAGE_SIZE * n;
160 loff_t off = mmap_offset(obj) +
161 (entry->obj_pgoff << PAGE_SHIFT);
162 const int m = 1 + ((omap_obj->width << fmt) / PAGE_SIZE);
163
164 if (m > 1) {
165 int i;
166 /* if stride > than PAGE_SIZE then sparse mapping: */
167 for (i = n; i > 0; i--) {
168 unmap_mapping_range(obj->dev->anon_inode->i_mapping,
169 off, PAGE_SIZE, 1);
170 off += PAGE_SIZE * m;
Rob Clarke559895ae2012-03-05 10:48:40 -0600171 }
David Herrmann6796cb12014-01-03 14:24:19 +0100172 } else {
173 unmap_mapping_range(obj->dev->anon_inode->i_mapping,
174 off, size, 1);
Rob Clarkf7f9f452011-12-05 19:19:22 -0600175 }
176
177 entry->obj = NULL;
178}
179
180/* Evict a buffer from usergart, if it is mapped there */
181static void evict(struct drm_gem_object *obj)
182{
183 struct omap_gem_object *omap_obj = to_omap_bo(obj);
184
185 if (omap_obj->flags & OMAP_BO_TILED) {
186 enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
187 int i;
188
189 if (!usergart)
190 return;
191
192 for (i = 0; i < NUM_USERGART_ENTRIES; i++) {
193 struct usergart_entry *entry = &usergart[fmt].entry[i];
194 if (entry->obj == obj)
195 evict_entry(obj, fmt, entry);
196 }
197 }
198}
199
Rob Clarkcd5351f2011-11-12 12:09:40 -0600200/* GEM objects can either be allocated from contiguous memory (in which
201 * case obj->filp==NULL), or w/ shmem backing (obj->filp!=NULL). But non
202 * contiguous buffers can be remapped in TILER/DMM if they need to be
203 * contiguous... but we don't do this all the time to reduce pressure
204 * on TILER/DMM space when we know at allocation time that the buffer
205 * will need to be scanned out.
206 */
207static inline bool is_shmem(struct drm_gem_object *obj)
208{
209 return obj->filp != NULL;
210}
211
Rob Clark8b6b5692012-05-17 02:37:25 -0600212/**
213 * shmem buffers that are mapped cached can simulate coherency via using
214 * page faulting to keep track of dirty pages
215 */
216static inline bool is_cached_coherent(struct drm_gem_object *obj)
217{
218 struct omap_gem_object *omap_obj = to_omap_bo(obj);
219 return is_shmem(obj) &&
220 ((omap_obj->flags & OMAP_BO_CACHE_MASK) == OMAP_BO_CACHED);
221}
222
Rob Clarkcd5351f2011-11-12 12:09:40 -0600223static DEFINE_SPINLOCK(sync_lock);
224
225/** ensure backing pages are allocated */
226static int omap_gem_attach_pages(struct drm_gem_object *obj)
227{
Rob Clark8b6b5692012-05-17 02:37:25 -0600228 struct drm_device *dev = obj->dev;
Rob Clarkcd5351f2011-11-12 12:09:40 -0600229 struct omap_gem_object *omap_obj = to_omap_bo(obj);
230 struct page **pages;
Emil Gooded4eb23a2012-08-17 18:53:26 +0200231 int npages = obj->size >> PAGE_SHIFT;
232 int i, ret;
Rob Clark8b6b5692012-05-17 02:37:25 -0600233 dma_addr_t *addrs;
Rob Clarkcd5351f2011-11-12 12:09:40 -0600234
235 WARN_ON(omap_obj->pages);
236
David Herrmann0cdbe8a2014-05-25 12:59:47 +0200237 pages = drm_gem_get_pages(obj);
Rob Clarkcd5351f2011-11-12 12:09:40 -0600238 if (IS_ERR(pages)) {
239 dev_err(obj->dev->dev, "could not get pages: %ld\n", PTR_ERR(pages));
240 return PTR_ERR(pages);
241 }
242
Rob Clarkf3bc9d22011-12-20 16:54:28 -0600243 /* for non-cached buffers, ensure the new pages are clean because
244 * DSS, GPU, etc. are not cache coherent:
245 */
246 if (omap_obj->flags & (OMAP_BO_WC|OMAP_BO_UNCACHED)) {
Vincent Penquerc'h23d84ed2012-10-09 19:40:39 +0100247 addrs = kmalloc(npages * sizeof(*addrs), GFP_KERNEL);
Emil Gooded4eb23a2012-08-17 18:53:26 +0200248 if (!addrs) {
249 ret = -ENOMEM;
250 goto free_pages;
251 }
252
Rob Clarkf3bc9d22011-12-20 16:54:28 -0600253 for (i = 0; i < npages; i++) {
Rob Clark8b6b5692012-05-17 02:37:25 -0600254 addrs[i] = dma_map_page(dev->dev, pages[i],
Rob Clarkf3bc9d22011-12-20 16:54:28 -0600255 0, PAGE_SIZE, DMA_BIDIRECTIONAL);
256 }
Rob Clark8b6b5692012-05-17 02:37:25 -0600257 } else {
Vincent Penquerc'h23d84ed2012-10-09 19:40:39 +0100258 addrs = kzalloc(npages * sizeof(*addrs), GFP_KERNEL);
Emil Gooded4eb23a2012-08-17 18:53:26 +0200259 if (!addrs) {
260 ret = -ENOMEM;
261 goto free_pages;
262 }
Rob Clarkf3bc9d22011-12-20 16:54:28 -0600263 }
264
Rob Clark8b6b5692012-05-17 02:37:25 -0600265 omap_obj->addrs = addrs;
Rob Clarkcd5351f2011-11-12 12:09:40 -0600266 omap_obj->pages = pages;
Rob Clark8b6b5692012-05-17 02:37:25 -0600267
Rob Clarkcd5351f2011-11-12 12:09:40 -0600268 return 0;
Emil Gooded4eb23a2012-08-17 18:53:26 +0200269
270free_pages:
Rob Clarkddcd09d2013-08-07 13:41:27 -0400271 drm_gem_put_pages(obj, pages, true, false);
Emil Gooded4eb23a2012-08-17 18:53:26 +0200272
273 return ret;
Rob Clarkcd5351f2011-11-12 12:09:40 -0600274}
275
276/** release backing pages */
277static void omap_gem_detach_pages(struct drm_gem_object *obj)
278{
279 struct omap_gem_object *omap_obj = to_omap_bo(obj);
Rob Clarkf3bc9d22011-12-20 16:54:28 -0600280
281 /* for non-cached buffers, ensure the new pages are clean because
282 * DSS, GPU, etc. are not cache coherent:
283 */
284 if (omap_obj->flags & (OMAP_BO_WC|OMAP_BO_UNCACHED)) {
285 int i, npages = obj->size >> PAGE_SHIFT;
286 for (i = 0; i < npages; i++) {
287 dma_unmap_page(obj->dev->dev, omap_obj->addrs[i],
288 PAGE_SIZE, DMA_BIDIRECTIONAL);
289 }
Rob Clarkf3bc9d22011-12-20 16:54:28 -0600290 }
291
Rob Clark8b6b5692012-05-17 02:37:25 -0600292 kfree(omap_obj->addrs);
293 omap_obj->addrs = NULL;
294
Rob Clarkddcd09d2013-08-07 13:41:27 -0400295 drm_gem_put_pages(obj, omap_obj->pages, true, false);
Rob Clarkcd5351f2011-11-12 12:09:40 -0600296 omap_obj->pages = NULL;
297}
298
Rob Clark6ad11bc2012-04-10 13:19:55 -0500299/* get buffer flags */
300uint32_t omap_gem_flags(struct drm_gem_object *obj)
301{
302 return to_omap_bo(obj)->flags;
303}
304
Rob Clarkcd5351f2011-11-12 12:09:40 -0600305/** get mmap offset */
Rob Clarkc5b12472012-01-18 18:33:02 -0600306static uint64_t mmap_offset(struct drm_gem_object *obj)
Rob Clarkcd5351f2011-11-12 12:09:40 -0600307{
Rob Clarkf6b60362012-03-05 10:48:36 -0600308 struct drm_device *dev = obj->dev;
David Herrmann0de23972013-07-24 21:07:52 +0200309 int ret;
310 size_t size;
Rob Clarkf6b60362012-03-05 10:48:36 -0600311
312 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
313
David Herrmann0de23972013-07-24 21:07:52 +0200314 /* Make it mmapable */
315 size = omap_gem_mmap_size(obj);
Rob Clarkddcd09d2013-08-07 13:41:27 -0400316 ret = drm_gem_create_mmap_offset_size(obj, size);
David Herrmann0de23972013-07-24 21:07:52 +0200317 if (ret) {
318 dev_err(dev->dev, "could not allocate mmap offset\n");
319 return 0;
Rob Clarkcd5351f2011-11-12 12:09:40 -0600320 }
321
David Herrmann0de23972013-07-24 21:07:52 +0200322 return drm_vma_node_offset_addr(&obj->vma_node);
Rob Clarkcd5351f2011-11-12 12:09:40 -0600323}
324
Rob Clarkc5b12472012-01-18 18:33:02 -0600325uint64_t omap_gem_mmap_offset(struct drm_gem_object *obj)
326{
327 uint64_t offset;
328 mutex_lock(&obj->dev->struct_mutex);
329 offset = mmap_offset(obj);
330 mutex_unlock(&obj->dev->struct_mutex);
331 return offset;
332}
333
Rob Clarkf7f9f452011-12-05 19:19:22 -0600334/** get mmap size */
335size_t omap_gem_mmap_size(struct drm_gem_object *obj)
336{
337 struct omap_gem_object *omap_obj = to_omap_bo(obj);
338 size_t size = obj->size;
339
340 if (omap_obj->flags & OMAP_BO_TILED) {
341 /* for tiled buffers, the virtual size has stride rounded up
342 * to 4kb.. (to hide the fact that row n+1 might start 16kb or
343 * 32kb later!). But we don't back the entire buffer with
344 * pages, only the valid picture part.. so need to adjust for
345 * this in the size used to mmap and generate mmap offset
346 */
347 size = tiler_vsize(gem2fmt(omap_obj->flags),
348 omap_obj->width, omap_obj->height);
349 }
350
351 return size;
352}
353
Rob Clark3c810c62012-08-15 15:18:01 -0500354/* get tiled size, returns -EINVAL if not tiled buffer */
355int omap_gem_tiled_size(struct drm_gem_object *obj, uint16_t *w, uint16_t *h)
356{
357 struct omap_gem_object *omap_obj = to_omap_bo(obj);
358 if (omap_obj->flags & OMAP_BO_TILED) {
359 *w = omap_obj->width;
360 *h = omap_obj->height;
361 return 0;
362 }
363 return -EINVAL;
364}
Rob Clarkf7f9f452011-12-05 19:19:22 -0600365
366/* Normal handling for the case of faulting in non-tiled buffers */
367static int fault_1d(struct drm_gem_object *obj,
368 struct vm_area_struct *vma, struct vm_fault *vmf)
369{
370 struct omap_gem_object *omap_obj = to_omap_bo(obj);
371 unsigned long pfn;
372 pgoff_t pgoff;
373
374 /* We don't use vmf->pgoff since that has the fake offset: */
375 pgoff = ((unsigned long)vmf->virtual_address -
376 vma->vm_start) >> PAGE_SHIFT;
377
378 if (omap_obj->pages) {
Rob Clark8b6b5692012-05-17 02:37:25 -0600379 omap_gem_cpu_sync(obj, pgoff);
Rob Clarkf7f9f452011-12-05 19:19:22 -0600380 pfn = page_to_pfn(omap_obj->pages[pgoff]);
381 } else {
382 BUG_ON(!(omap_obj->flags & OMAP_BO_DMA));
383 pfn = (omap_obj->paddr >> PAGE_SHIFT) + pgoff;
384 }
385
386 VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address,
387 pfn, pfn << PAGE_SHIFT);
388
Dan Williams01c8f1c2016-01-15 16:56:40 -0800389 return vm_insert_mixed(vma, (unsigned long)vmf->virtual_address,
390 __pfn_to_pfn_t(pfn, PFN_DEV));
Rob Clarkf7f9f452011-12-05 19:19:22 -0600391}
392
393/* Special handling for the case of faulting in 2d tiled buffers */
394static int fault_2d(struct drm_gem_object *obj,
395 struct vm_area_struct *vma, struct vm_fault *vmf)
396{
397 struct omap_gem_object *omap_obj = to_omap_bo(obj);
398 struct usergart_entry *entry;
399 enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
400 struct page *pages[64]; /* XXX is this too much to have on stack? */
401 unsigned long pfn;
402 pgoff_t pgoff, base_pgoff;
403 void __user *vaddr;
404 int i, ret, slots;
405
Rob Clarke559895ae2012-03-05 10:48:40 -0600406 /*
407 * Note the height of the slot is also equal to the number of pages
408 * that need to be mapped in to fill 4kb wide CPU page. If the slot
409 * height is 64, then 64 pages fill a 4kb wide by 64 row region.
Rob Clarkf7f9f452011-12-05 19:19:22 -0600410 */
Rob Clarke559895ae2012-03-05 10:48:40 -0600411 const int n = usergart[fmt].height;
412 const int n_shift = usergart[fmt].height_shift;
413
414 /*
415 * If buffer width in bytes > PAGE_SIZE then the virtual stride is
416 * rounded up to next multiple of PAGE_SIZE.. this need to be taken
417 * into account in some of the math, so figure out virtual stride
418 * in pages
419 */
420 const int m = 1 + ((omap_obj->width << fmt) / PAGE_SIZE);
Rob Clarkf7f9f452011-12-05 19:19:22 -0600421
422 /* We don't use vmf->pgoff since that has the fake offset: */
423 pgoff = ((unsigned long)vmf->virtual_address -
424 vma->vm_start) >> PAGE_SHIFT;
425
Rob Clarke559895ae2012-03-05 10:48:40 -0600426 /*
427 * Actual address we start mapping at is rounded down to previous slot
Rob Clarkf7f9f452011-12-05 19:19:22 -0600428 * boundary in the y direction:
429 */
Rob Clarke559895ae2012-03-05 10:48:40 -0600430 base_pgoff = round_down(pgoff, m << n_shift);
Rob Clarkf7f9f452011-12-05 19:19:22 -0600431
Rob Clarke559895ae2012-03-05 10:48:40 -0600432 /* figure out buffer width in slots */
Rob Clarkf7f9f452011-12-05 19:19:22 -0600433 slots = omap_obj->width >> usergart[fmt].slot_shift;
434
Rob Clarke559895ae2012-03-05 10:48:40 -0600435 vaddr = vmf->virtual_address - ((pgoff - base_pgoff) << PAGE_SHIFT);
436
437 entry = &usergart[fmt].entry[usergart[fmt].last];
438
Rob Clarkf7f9f452011-12-05 19:19:22 -0600439 /* evict previous buffer using this usergart entry, if any: */
440 if (entry->obj)
441 evict_entry(entry->obj, fmt, entry);
442
443 entry->obj = obj;
444 entry->obj_pgoff = base_pgoff;
445
Rob Clarke559895ae2012-03-05 10:48:40 -0600446 /* now convert base_pgoff to phys offset from virt offset: */
447 base_pgoff = (base_pgoff >> n_shift) * slots;
Rob Clarkf7f9f452011-12-05 19:19:22 -0600448
Rob Clarke559895ae2012-03-05 10:48:40 -0600449 /* for wider-than 4k.. figure out which part of the slot-row we want: */
450 if (m > 1) {
451 int off = pgoff % m;
452 entry->obj_pgoff += off;
453 base_pgoff /= m;
454 slots = min(slots - (off << n_shift), n);
455 base_pgoff += off << n_shift;
456 vaddr += off << PAGE_SHIFT;
457 }
458
459 /*
460 * Map in pages. Beyond the valid pixel part of the buffer, we set
461 * pages[i] to NULL to get a dummy page mapped in.. if someone
462 * reads/writes it they will get random/undefined content, but at
463 * least it won't be corrupting whatever other random page used to
464 * be mapped in, or other undefined behavior.
Rob Clarkf7f9f452011-12-05 19:19:22 -0600465 */
466 memcpy(pages, &omap_obj->pages[base_pgoff],
467 sizeof(struct page *) * slots);
468 memset(pages + slots, 0,
Rob Clarke559895ae2012-03-05 10:48:40 -0600469 sizeof(struct page *) * (n - slots));
Rob Clarkf7f9f452011-12-05 19:19:22 -0600470
Rob Clarka6a91822011-12-09 23:26:08 -0600471 ret = tiler_pin(entry->block, pages, ARRAY_SIZE(pages), 0, true);
Rob Clarkf7f9f452011-12-05 19:19:22 -0600472 if (ret) {
473 dev_err(obj->dev->dev, "failed to pin: %d\n", ret);
474 return ret;
475 }
476
Rob Clarkf7f9f452011-12-05 19:19:22 -0600477 pfn = entry->paddr >> PAGE_SHIFT;
478
479 VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address,
480 pfn, pfn << PAGE_SHIFT);
481
Rob Clarke559895ae2012-03-05 10:48:40 -0600482 for (i = n; i > 0; i--) {
Dan Williams01c8f1c2016-01-15 16:56:40 -0800483 vm_insert_mixed(vma, (unsigned long)vaddr,
484 __pfn_to_pfn_t(pfn, PFN_DEV));
Rob Clarkf7f9f452011-12-05 19:19:22 -0600485 pfn += usergart[fmt].stride_pfn;
Rob Clarke559895ae2012-03-05 10:48:40 -0600486 vaddr += PAGE_SIZE * m;
Rob Clarkf7f9f452011-12-05 19:19:22 -0600487 }
488
489 /* simple round-robin: */
490 usergart[fmt].last = (usergart[fmt].last + 1) % NUM_USERGART_ENTRIES;
491
492 return 0;
493}
494
Rob Clarkcd5351f2011-11-12 12:09:40 -0600495/**
496 * omap_gem_fault - pagefault handler for GEM objects
497 * @vma: the VMA of the GEM object
498 * @vmf: fault detail
499 *
500 * Invoked when a fault occurs on an mmap of a GEM managed area. GEM
501 * does most of the work for us including the actual map/unmap calls
502 * but we need to do the actual page work.
503 *
504 * The VMA was set up by GEM. In doing so it also ensured that the
505 * vma->vm_private_data points to the GEM object that is backing this
506 * mapping.
507 */
508int omap_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
509{
510 struct drm_gem_object *obj = vma->vm_private_data;
511 struct omap_gem_object *omap_obj = to_omap_bo(obj);
512 struct drm_device *dev = obj->dev;
513 struct page **pages;
Rob Clarkcd5351f2011-11-12 12:09:40 -0600514 int ret;
515
516 /* Make sure we don't parallel update on a fault, nor move or remove
517 * something from beneath our feet
518 */
519 mutex_lock(&dev->struct_mutex);
520
521 /* if a shmem backed object, make sure we have pages attached now */
522 ret = get_pages(obj, &pages);
YAMANE Toshiakiae053032012-11-14 19:33:17 +0900523 if (ret)
Rob Clarkcd5351f2011-11-12 12:09:40 -0600524 goto fail;
Rob Clarkcd5351f2011-11-12 12:09:40 -0600525
526 /* where should we do corresponding put_pages().. we are mapping
527 * the original page, rather than thru a GART, so we can't rely
528 * on eviction to trigger this. But munmap() or all mappings should
529 * probably trigger put_pages()?
530 */
531
Rob Clarkf7f9f452011-12-05 19:19:22 -0600532 if (omap_obj->flags & OMAP_BO_TILED)
533 ret = fault_2d(obj, vma, vmf);
534 else
535 ret = fault_1d(obj, vma, vmf);
Rob Clarkcd5351f2011-11-12 12:09:40 -0600536
Rob Clarkcd5351f2011-11-12 12:09:40 -0600537
538fail:
539 mutex_unlock(&dev->struct_mutex);
540 switch (ret) {
541 case 0:
542 case -ERESTARTSYS:
543 case -EINTR:
544 return VM_FAULT_NOPAGE;
545 case -ENOMEM:
546 return VM_FAULT_OOM;
547 default:
548 return VM_FAULT_SIGBUS;
549 }
550}
551
552/** We override mainly to fix up some of the vm mapping flags.. */
553int omap_gem_mmap(struct file *filp, struct vm_area_struct *vma)
554{
Rob Clarkcd5351f2011-11-12 12:09:40 -0600555 int ret;
556
557 ret = drm_gem_mmap(filp, vma);
558 if (ret) {
559 DBG("mmap failed: %d", ret);
560 return ret;
561 }
562
Rob Clark8b6b5692012-05-17 02:37:25 -0600563 return omap_gem_mmap_obj(vma->vm_private_data, vma);
564}
565
566int omap_gem_mmap_obj(struct drm_gem_object *obj,
567 struct vm_area_struct *vma)
568{
569 struct omap_gem_object *omap_obj = to_omap_bo(obj);
Rob Clarkcd5351f2011-11-12 12:09:40 -0600570
571 vma->vm_flags &= ~VM_PFNMAP;
572 vma->vm_flags |= VM_MIXEDMAP;
573
574 if (omap_obj->flags & OMAP_BO_WC) {
575 vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
576 } else if (omap_obj->flags & OMAP_BO_UNCACHED) {
577 vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags));
578 } else {
Rob Clark8b6b5692012-05-17 02:37:25 -0600579 /*
580 * We do have some private objects, at least for scanout buffers
581 * on hardware without DMM/TILER. But these are allocated write-
582 * combine
583 */
584 if (WARN_ON(!obj->filp))
585 return -EINVAL;
586
587 /*
588 * Shunt off cached objs to shmem file so they have their own
589 * address_space (so unmap_mapping_range does what we want,
590 * in particular in the case of mmap'd dmabufs)
591 */
592 fput(vma->vm_file);
Rob Clark8b6b5692012-05-17 02:37:25 -0600593 vma->vm_pgoff = 0;
Al Virocb0942b2012-08-27 14:48:26 -0400594 vma->vm_file = get_file(obj->filp);
Rob Clark8b6b5692012-05-17 02:37:25 -0600595
Rob Clarkcd5351f2011-11-12 12:09:40 -0600596 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
597 }
598
Rob Clark8b6b5692012-05-17 02:37:25 -0600599 return 0;
Rob Clarkcd5351f2011-11-12 12:09:40 -0600600}
601
Rob Clark8b6b5692012-05-17 02:37:25 -0600602
Rob Clarkcd5351f2011-11-12 12:09:40 -0600603/**
604 * omap_gem_dumb_create - create a dumb buffer
605 * @drm_file: our client file
606 * @dev: our device
607 * @args: the requested arguments copied from userspace
608 *
609 * Allocate a buffer suitable for use for a frame buffer of the
610 * form described by user space. Give userspace a handle by which
611 * to reference it.
612 */
613int omap_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
614 struct drm_mode_create_dumb *args)
615{
616 union omap_gem_size gsize;
617
Thierry Redingbdb2b932014-11-03 11:57:33 +0100618 args->pitch = align_pitch(0, args->width, args->bpp);
Rob Clarkcd5351f2011-11-12 12:09:40 -0600619 args->size = PAGE_ALIGN(args->pitch * args->height);
620
621 gsize = (union omap_gem_size){
622 .bytes = args->size,
623 };
624
625 return omap_gem_new_handle(dev, file, gsize,
626 OMAP_BO_SCANOUT | OMAP_BO_WC, &args->handle);
627}
628
629/**
Rob Clarkcd5351f2011-11-12 12:09:40 -0600630 * omap_gem_dumb_map - buffer mapping for dumb interface
631 * @file: our drm client file
632 * @dev: drm device
633 * @handle: GEM handle to the object (from dumb_create)
634 *
635 * Do the necessary setup to allow the mapping of the frame buffer
636 * into user memory. We don't have to do much here at the moment.
637 */
638int omap_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
639 uint32_t handle, uint64_t *offset)
640{
641 struct drm_gem_object *obj;
642 int ret = 0;
643
Rob Clarkcd5351f2011-11-12 12:09:40 -0600644 /* GEM does all our handle to object mapping */
645 obj = drm_gem_object_lookup(dev, file, handle);
646 if (obj == NULL) {
647 ret = -ENOENT;
648 goto fail;
649 }
650
651 *offset = omap_gem_mmap_offset(obj);
652
653 drm_gem_object_unreference_unlocked(obj);
654
655fail:
Rob Clarkcd5351f2011-11-12 12:09:40 -0600656 return ret;
657}
658
Rob Clarka6a91822011-12-09 23:26:08 -0600659/* Set scrolling position. This allows us to implement fast scrolling
660 * for console.
Rob Clark9b55b952012-03-05 10:48:33 -0600661 *
662 * Call only from non-atomic contexts.
Rob Clarka6a91822011-12-09 23:26:08 -0600663 */
664int omap_gem_roll(struct drm_gem_object *obj, uint32_t roll)
665{
666 struct omap_gem_object *omap_obj = to_omap_bo(obj);
667 uint32_t npages = obj->size >> PAGE_SHIFT;
668 int ret = 0;
669
670 if (roll > npages) {
671 dev_err(obj->dev->dev, "invalid roll: %d\n", roll);
672 return -EINVAL;
673 }
674
Rob Clarka6a91822011-12-09 23:26:08 -0600675 omap_obj->roll = roll;
676
Rob Clarkaf695922011-12-16 11:34:34 -0600677 mutex_lock(&obj->dev->struct_mutex);
678
Rob Clarka6a91822011-12-09 23:26:08 -0600679 /* if we aren't mapped yet, we don't need to do anything */
680 if (omap_obj->block) {
681 struct page **pages;
682 ret = get_pages(obj, &pages);
683 if (ret)
684 goto fail;
685 ret = tiler_pin(omap_obj->block, pages, npages, roll, true);
686 if (ret)
687 dev_err(obj->dev->dev, "could not repin: %d\n", ret);
688 }
689
690fail:
691 mutex_unlock(&obj->dev->struct_mutex);
692
693 return ret;
694}
695
Rob Clark8b6b5692012-05-17 02:37:25 -0600696/* Sync the buffer for CPU access.. note pages should already be
697 * attached, ie. omap_gem_get_pages()
698 */
699void omap_gem_cpu_sync(struct drm_gem_object *obj, int pgoff)
700{
701 struct drm_device *dev = obj->dev;
702 struct omap_gem_object *omap_obj = to_omap_bo(obj);
703
704 if (is_cached_coherent(obj) && omap_obj->addrs[pgoff]) {
705 dma_unmap_page(dev->dev, omap_obj->addrs[pgoff],
706 PAGE_SIZE, DMA_BIDIRECTIONAL);
707 omap_obj->addrs[pgoff] = 0;
708 }
709}
710
711/* sync the buffer for DMA access */
712void omap_gem_dma_sync(struct drm_gem_object *obj,
713 enum dma_data_direction dir)
714{
715 struct drm_device *dev = obj->dev;
716 struct omap_gem_object *omap_obj = to_omap_bo(obj);
717
718 if (is_cached_coherent(obj)) {
719 int i, npages = obj->size >> PAGE_SHIFT;
720 struct page **pages = omap_obj->pages;
721 bool dirty = false;
722
723 for (i = 0; i < npages; i++) {
724 if (!omap_obj->addrs[i]) {
725 omap_obj->addrs[i] = dma_map_page(dev->dev, pages[i], 0,
726 PAGE_SIZE, DMA_BIDIRECTIONAL);
727 dirty = true;
728 }
729 }
730
731 if (dirty) {
732 unmap_mapping_range(obj->filp->f_mapping, 0,
733 omap_gem_mmap_size(obj), 1);
734 }
735 }
736}
737
Rob Clarkcd5351f2011-11-12 12:09:40 -0600738/* Get physical address for DMA.. if 'remap' is true, and the buffer is not
739 * already contiguous, remap it to pin in physically contiguous memory.. (ie.
740 * map in TILER)
741 */
742int omap_gem_get_paddr(struct drm_gem_object *obj,
743 dma_addr_t *paddr, bool remap)
744{
Rob Clarka6a91822011-12-09 23:26:08 -0600745 struct omap_drm_private *priv = obj->dev->dev_private;
Rob Clarkcd5351f2011-11-12 12:09:40 -0600746 struct omap_gem_object *omap_obj = to_omap_bo(obj);
747 int ret = 0;
748
Rob Clarkf7f9f452011-12-05 19:19:22 -0600749 mutex_lock(&obj->dev->struct_mutex);
750
Rob Clarka6a91822011-12-09 23:26:08 -0600751 if (remap && is_shmem(obj) && priv->has_dmm) {
Rob Clarkf7f9f452011-12-05 19:19:22 -0600752 if (omap_obj->paddr_cnt == 0) {
753 struct page **pages;
Rob Clarka6a91822011-12-09 23:26:08 -0600754 uint32_t npages = obj->size >> PAGE_SHIFT;
Rob Clarkf7f9f452011-12-05 19:19:22 -0600755 enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
756 struct tiler_block *block;
Rob Clarka6a91822011-12-09 23:26:08 -0600757
Rob Clarkf7f9f452011-12-05 19:19:22 -0600758 BUG_ON(omap_obj->block);
759
760 ret = get_pages(obj, &pages);
761 if (ret)
762 goto fail;
763
Rob Clarkf7f9f452011-12-05 19:19:22 -0600764 if (omap_obj->flags & OMAP_BO_TILED) {
765 block = tiler_reserve_2d(fmt,
766 omap_obj->width,
767 omap_obj->height, 0);
768 } else {
769 block = tiler_reserve_1d(obj->size);
770 }
771
772 if (IS_ERR(block)) {
773 ret = PTR_ERR(block);
774 dev_err(obj->dev->dev,
775 "could not remap: %d (%d)\n", ret, fmt);
776 goto fail;
777 }
778
779 /* TODO: enable async refill.. */
Rob Clarka6a91822011-12-09 23:26:08 -0600780 ret = tiler_pin(block, pages, npages,
781 omap_obj->roll, true);
Rob Clarkf7f9f452011-12-05 19:19:22 -0600782 if (ret) {
783 tiler_release(block);
784 dev_err(obj->dev->dev,
785 "could not pin: %d\n", ret);
786 goto fail;
787 }
788
789 omap_obj->paddr = tiler_ssptr(block);
790 omap_obj->block = block;
791
Russell King2d31ca32014-07-12 10:53:41 +0100792 DBG("got paddr: %pad", &omap_obj->paddr);
Rob Clarkf7f9f452011-12-05 19:19:22 -0600793 }
794
795 omap_obj->paddr_cnt++;
796
797 *paddr = omap_obj->paddr;
798 } else if (omap_obj->flags & OMAP_BO_DMA) {
799 *paddr = omap_obj->paddr;
800 } else {
801 ret = -EINVAL;
Rob Clark8b6b5692012-05-17 02:37:25 -0600802 goto fail;
Rob Clarkcd5351f2011-11-12 12:09:40 -0600803 }
804
Rob Clarkf7f9f452011-12-05 19:19:22 -0600805fail:
806 mutex_unlock(&obj->dev->struct_mutex);
Rob Clarkcd5351f2011-11-12 12:09:40 -0600807
808 return ret;
809}
810
811/* Release physical address, when DMA is no longer being performed.. this
812 * could potentially unpin and unmap buffers from TILER
813 */
Tomi Valkeinen393a9492015-04-28 14:01:36 +0300814void omap_gem_put_paddr(struct drm_gem_object *obj)
Rob Clarkcd5351f2011-11-12 12:09:40 -0600815{
Rob Clarkf7f9f452011-12-05 19:19:22 -0600816 struct omap_gem_object *omap_obj = to_omap_bo(obj);
Tomi Valkeinen393a9492015-04-28 14:01:36 +0300817 int ret;
Rob Clarkf7f9f452011-12-05 19:19:22 -0600818
819 mutex_lock(&obj->dev->struct_mutex);
820 if (omap_obj->paddr_cnt > 0) {
821 omap_obj->paddr_cnt--;
822 if (omap_obj->paddr_cnt == 0) {
823 ret = tiler_unpin(omap_obj->block);
824 if (ret) {
825 dev_err(obj->dev->dev,
826 "could not unpin pages: %d\n", ret);
Rob Clarkf7f9f452011-12-05 19:19:22 -0600827 }
828 ret = tiler_release(omap_obj->block);
829 if (ret) {
830 dev_err(obj->dev->dev,
831 "could not release unmap: %d\n", ret);
832 }
Tomi Valkeinen3f4d17c2014-09-03 19:25:53 +0000833 omap_obj->paddr = 0;
Rob Clarkf7f9f452011-12-05 19:19:22 -0600834 omap_obj->block = NULL;
835 }
836 }
Tomi Valkeinen393a9492015-04-28 14:01:36 +0300837
Rob Clarkf7f9f452011-12-05 19:19:22 -0600838 mutex_unlock(&obj->dev->struct_mutex);
Rob Clarkcd5351f2011-11-12 12:09:40 -0600839}
840
Rob Clark3c810c62012-08-15 15:18:01 -0500841/* Get rotated scanout address (only valid if already pinned), at the
842 * specified orientation and x,y offset from top-left corner of buffer
843 * (only valid for tiled 2d buffers)
844 */
845int omap_gem_rotated_paddr(struct drm_gem_object *obj, uint32_t orient,
846 int x, int y, dma_addr_t *paddr)
847{
848 struct omap_gem_object *omap_obj = to_omap_bo(obj);
849 int ret = -EINVAL;
850
851 mutex_lock(&obj->dev->struct_mutex);
852 if ((omap_obj->paddr_cnt > 0) && omap_obj->block &&
853 (omap_obj->flags & OMAP_BO_TILED)) {
854 *paddr = tiler_tsptr(omap_obj->block, orient, x, y);
855 ret = 0;
856 }
857 mutex_unlock(&obj->dev->struct_mutex);
858 return ret;
859}
860
861/* Get tiler stride for the buffer (only valid for 2d tiled buffers) */
862int omap_gem_tiled_stride(struct drm_gem_object *obj, uint32_t orient)
863{
864 struct omap_gem_object *omap_obj = to_omap_bo(obj);
865 int ret = -EINVAL;
866 if (omap_obj->flags & OMAP_BO_TILED)
867 ret = tiler_stride(gem2fmt(omap_obj->flags), orient);
868 return ret;
869}
870
Rob Clarkcd5351f2011-11-12 12:09:40 -0600871/* acquire pages when needed (for example, for DMA where physically
872 * contiguous buffer is not required
873 */
874static int get_pages(struct drm_gem_object *obj, struct page ***pages)
875{
876 struct omap_gem_object *omap_obj = to_omap_bo(obj);
877 int ret = 0;
878
879 if (is_shmem(obj) && !omap_obj->pages) {
880 ret = omap_gem_attach_pages(obj);
881 if (ret) {
882 dev_err(obj->dev->dev, "could not attach pages\n");
883 return ret;
884 }
885 }
886
887 /* TODO: even phys-contig.. we should have a list of pages? */
888 *pages = omap_obj->pages;
889
890 return 0;
891}
892
Rob Clark6ad11bc2012-04-10 13:19:55 -0500893/* if !remap, and we don't have pages backing, then fail, rather than
894 * increasing the pin count (which we don't really do yet anyways,
895 * because we don't support swapping pages back out). And 'remap'
896 * might not be quite the right name, but I wanted to keep it working
897 * similarly to omap_gem_get_paddr(). Note though that mutex is not
898 * aquired if !remap (because this can be called in atomic ctxt),
899 * but probably omap_gem_get_paddr() should be changed to work in the
900 * same way. If !remap, a matching omap_gem_put_pages() call is not
901 * required (and should not be made).
902 */
903int omap_gem_get_pages(struct drm_gem_object *obj, struct page ***pages,
904 bool remap)
Rob Clarkcd5351f2011-11-12 12:09:40 -0600905{
906 int ret;
Rob Clark6ad11bc2012-04-10 13:19:55 -0500907 if (!remap) {
908 struct omap_gem_object *omap_obj = to_omap_bo(obj);
909 if (!omap_obj->pages)
910 return -ENOMEM;
911 *pages = omap_obj->pages;
912 return 0;
913 }
Rob Clarkcd5351f2011-11-12 12:09:40 -0600914 mutex_lock(&obj->dev->struct_mutex);
915 ret = get_pages(obj, pages);
916 mutex_unlock(&obj->dev->struct_mutex);
917 return ret;
918}
919
920/* release pages when DMA no longer being performed */
921int omap_gem_put_pages(struct drm_gem_object *obj)
922{
923 /* do something here if we dynamically attach/detach pages.. at
924 * least they would no longer need to be pinned if everyone has
925 * released the pages..
926 */
927 return 0;
928}
929
Rob Clarkf7f9f452011-12-05 19:19:22 -0600930/* Get kernel virtual address for CPU access.. this more or less only
931 * exists for omap_fbdev. This should be called with struct_mutex
932 * held.
Rob Clarkcd5351f2011-11-12 12:09:40 -0600933 */
934void *omap_gem_vaddr(struct drm_gem_object *obj)
935{
936 struct omap_gem_object *omap_obj = to_omap_bo(obj);
YAMANE Toshiaki696e3ca2012-11-14 19:33:43 +0900937 WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
Rob Clarkf7f9f452011-12-05 19:19:22 -0600938 if (!omap_obj->vaddr) {
939 struct page **pages;
940 int ret = get_pages(obj, &pages);
941 if (ret)
942 return ERR_PTR(ret);
943 omap_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
944 VM_MAP, pgprot_writecombine(PAGE_KERNEL));
945 }
Rob Clarkcd5351f2011-11-12 12:09:40 -0600946 return omap_obj->vaddr;
947}
948
Andy Grosse78edba2012-12-19 14:53:37 -0600949#ifdef CONFIG_PM
950/* re-pin objects in DMM in resume path: */
951int omap_gem_resume(struct device *dev)
952{
953 struct drm_device *drm_dev = dev_get_drvdata(dev);
954 struct omap_drm_private *priv = drm_dev->dev_private;
955 struct omap_gem_object *omap_obj;
956 int ret = 0;
957
958 list_for_each_entry(omap_obj, &priv->obj_list, mm_list) {
959 if (omap_obj->block) {
960 struct drm_gem_object *obj = &omap_obj->base;
961 uint32_t npages = obj->size >> PAGE_SHIFT;
962 WARN_ON(!omap_obj->pages); /* this can't happen */
963 ret = tiler_pin(omap_obj->block,
964 omap_obj->pages, npages,
965 omap_obj->roll, true);
966 if (ret) {
967 dev_err(dev, "could not repin: %d\n", ret);
968 return ret;
969 }
970 }
971 }
972
973 return 0;
974}
975#endif
976
Rob Clarkf6b60362012-03-05 10:48:36 -0600977#ifdef CONFIG_DEBUG_FS
978void omap_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
979{
Rob Clarkf6b60362012-03-05 10:48:36 -0600980 struct omap_gem_object *omap_obj = to_omap_bo(obj);
David Herrmann0de23972013-07-24 21:07:52 +0200981 uint64_t off;
Rob Clarkf6b60362012-03-05 10:48:36 -0600982
David Herrmann0de23972013-07-24 21:07:52 +0200983 off = drm_vma_node_start(&obj->vma_node);
Rob Clarkf6b60362012-03-05 10:48:36 -0600984
Russell King2d31ca32014-07-12 10:53:41 +0100985 seq_printf(m, "%08x: %2d (%2d) %08llx %pad (%2d) %p %4d",
Rob Clarkf6b60362012-03-05 10:48:36 -0600986 omap_obj->flags, obj->name, obj->refcount.refcount.counter,
Russell King2d31ca32014-07-12 10:53:41 +0100987 off, &omap_obj->paddr, omap_obj->paddr_cnt,
Rob Clarkf6b60362012-03-05 10:48:36 -0600988 omap_obj->vaddr, omap_obj->roll);
989
990 if (omap_obj->flags & OMAP_BO_TILED) {
991 seq_printf(m, " %dx%d", omap_obj->width, omap_obj->height);
992 if (omap_obj->block) {
993 struct tcm_area *area = &omap_obj->block->area;
994 seq_printf(m, " (%dx%d, %dx%d)",
995 area->p0.x, area->p0.y,
996 area->p1.x, area->p1.y);
997 }
998 } else {
999 seq_printf(m, " %d", obj->size);
1000 }
1001
1002 seq_printf(m, "\n");
1003}
1004
1005void omap_gem_describe_objects(struct list_head *list, struct seq_file *m)
1006{
1007 struct omap_gem_object *omap_obj;
1008 int count = 0;
1009 size_t size = 0;
1010
1011 list_for_each_entry(omap_obj, list, mm_list) {
1012 struct drm_gem_object *obj = &omap_obj->base;
1013 seq_printf(m, " ");
1014 omap_gem_describe(obj, m);
1015 count++;
1016 size += obj->size;
1017 }
1018
1019 seq_printf(m, "Total %d objects, %zu bytes\n", count, size);
1020}
1021#endif
1022
Rob Clarkcd5351f2011-11-12 12:09:40 -06001023/* Buffer Synchronization:
1024 */
1025
1026struct omap_gem_sync_waiter {
1027 struct list_head list;
1028 struct omap_gem_object *omap_obj;
1029 enum omap_gem_op op;
1030 uint32_t read_target, write_target;
1031 /* notify called w/ sync_lock held */
1032 void (*notify)(void *arg);
1033 void *arg;
1034};
1035
1036/* list of omap_gem_sync_waiter.. the notify fxn gets called back when
1037 * the read and/or write target count is achieved which can call a user
1038 * callback (ex. to kick 3d and/or 2d), wakeup blocked task (prep for
1039 * cpu access), etc.
1040 */
1041static LIST_HEAD(waiters);
1042
1043static inline bool is_waiting(struct omap_gem_sync_waiter *waiter)
1044{
1045 struct omap_gem_object *omap_obj = waiter->omap_obj;
1046 if ((waiter->op & OMAP_GEM_READ) &&
Archit Tanejaf2cff0f2014-04-11 12:53:31 +05301047 (omap_obj->sync->write_complete < waiter->write_target))
Rob Clarkcd5351f2011-11-12 12:09:40 -06001048 return true;
1049 if ((waiter->op & OMAP_GEM_WRITE) &&
Archit Tanejaf2cff0f2014-04-11 12:53:31 +05301050 (omap_obj->sync->read_complete < waiter->read_target))
Rob Clarkcd5351f2011-11-12 12:09:40 -06001051 return true;
1052 return false;
1053}
1054
1055/* macro for sync debug.. */
1056#define SYNCDBG 0
1057#define SYNC(fmt, ...) do { if (SYNCDBG) \
1058 printk(KERN_ERR "%s:%d: "fmt"\n", \
1059 __func__, __LINE__, ##__VA_ARGS__); \
1060 } while (0)
1061
1062
1063static void sync_op_update(void)
1064{
1065 struct omap_gem_sync_waiter *waiter, *n;
1066 list_for_each_entry_safe(waiter, n, &waiters, list) {
1067 if (!is_waiting(waiter)) {
1068 list_del(&waiter->list);
1069 SYNC("notify: %p", waiter);
1070 waiter->notify(waiter->arg);
1071 kfree(waiter);
1072 }
1073 }
1074}
1075
1076static inline int sync_op(struct drm_gem_object *obj,
1077 enum omap_gem_op op, bool start)
1078{
1079 struct omap_gem_object *omap_obj = to_omap_bo(obj);
1080 int ret = 0;
1081
1082 spin_lock(&sync_lock);
1083
1084 if (!omap_obj->sync) {
1085 omap_obj->sync = kzalloc(sizeof(*omap_obj->sync), GFP_ATOMIC);
1086 if (!omap_obj->sync) {
1087 ret = -ENOMEM;
1088 goto unlock;
1089 }
1090 }
1091
1092 if (start) {
1093 if (op & OMAP_GEM_READ)
1094 omap_obj->sync->read_pending++;
1095 if (op & OMAP_GEM_WRITE)
1096 omap_obj->sync->write_pending++;
1097 } else {
1098 if (op & OMAP_GEM_READ)
1099 omap_obj->sync->read_complete++;
1100 if (op & OMAP_GEM_WRITE)
1101 omap_obj->sync->write_complete++;
1102 sync_op_update();
1103 }
1104
1105unlock:
1106 spin_unlock(&sync_lock);
1107
1108 return ret;
1109}
1110
1111/* it is a bit lame to handle updates in this sort of polling way, but
1112 * in case of PVR, the GPU can directly update read/write complete
1113 * values, and not really tell us which ones it updated.. this also
1114 * means that sync_lock is not quite sufficient. So we'll need to
1115 * do something a bit better when it comes time to add support for
1116 * separate 2d hw..
1117 */
1118void omap_gem_op_update(void)
1119{
1120 spin_lock(&sync_lock);
1121 sync_op_update();
1122 spin_unlock(&sync_lock);
1123}
1124
1125/* mark the start of read and/or write operation */
1126int omap_gem_op_start(struct drm_gem_object *obj, enum omap_gem_op op)
1127{
1128 return sync_op(obj, op, true);
1129}
1130
1131int omap_gem_op_finish(struct drm_gem_object *obj, enum omap_gem_op op)
1132{
1133 return sync_op(obj, op, false);
1134}
1135
1136static DECLARE_WAIT_QUEUE_HEAD(sync_event);
1137
1138static void sync_notify(void *arg)
1139{
1140 struct task_struct **waiter_task = arg;
1141 *waiter_task = NULL;
1142 wake_up_all(&sync_event);
1143}
1144
1145int omap_gem_op_sync(struct drm_gem_object *obj, enum omap_gem_op op)
1146{
1147 struct omap_gem_object *omap_obj = to_omap_bo(obj);
1148 int ret = 0;
1149 if (omap_obj->sync) {
1150 struct task_struct *waiter_task = current;
1151 struct omap_gem_sync_waiter *waiter =
1152 kzalloc(sizeof(*waiter), GFP_KERNEL);
1153
YAMANE Toshiakiae053032012-11-14 19:33:17 +09001154 if (!waiter)
Rob Clarkcd5351f2011-11-12 12:09:40 -06001155 return -ENOMEM;
Rob Clarkcd5351f2011-11-12 12:09:40 -06001156
1157 waiter->omap_obj = omap_obj;
1158 waiter->op = op;
1159 waiter->read_target = omap_obj->sync->read_pending;
1160 waiter->write_target = omap_obj->sync->write_pending;
1161 waiter->notify = sync_notify;
1162 waiter->arg = &waiter_task;
1163
1164 spin_lock(&sync_lock);
1165 if (is_waiting(waiter)) {
1166 SYNC("waited: %p", waiter);
1167 list_add_tail(&waiter->list, &waiters);
1168 spin_unlock(&sync_lock);
1169 ret = wait_event_interruptible(sync_event,
1170 (waiter_task == NULL));
1171 spin_lock(&sync_lock);
1172 if (waiter_task) {
1173 SYNC("interrupted: %p", waiter);
1174 /* we were interrupted */
1175 list_del(&waiter->list);
1176 waiter_task = NULL;
1177 } else {
1178 /* freed in sync_op_update() */
1179 waiter = NULL;
1180 }
1181 }
1182 spin_unlock(&sync_lock);
Fabian Frederickd2c87e22014-07-04 21:17:15 +02001183 kfree(waiter);
Rob Clarkcd5351f2011-11-12 12:09:40 -06001184 }
1185 return ret;
1186}
1187
1188/* call fxn(arg), either synchronously or asynchronously if the op
1189 * is currently blocked.. fxn() can be called from any context
1190 *
1191 * (TODO for now fxn is called back from whichever context calls
1192 * omap_gem_op_update().. but this could be better defined later
1193 * if needed)
1194 *
1195 * TODO more code in common w/ _sync()..
1196 */
1197int omap_gem_op_async(struct drm_gem_object *obj, enum omap_gem_op op,
1198 void (*fxn)(void *arg), void *arg)
1199{
1200 struct omap_gem_object *omap_obj = to_omap_bo(obj);
1201 if (omap_obj->sync) {
1202 struct omap_gem_sync_waiter *waiter =
1203 kzalloc(sizeof(*waiter), GFP_ATOMIC);
1204
YAMANE Toshiakiae053032012-11-14 19:33:17 +09001205 if (!waiter)
Rob Clarkcd5351f2011-11-12 12:09:40 -06001206 return -ENOMEM;
Rob Clarkcd5351f2011-11-12 12:09:40 -06001207
1208 waiter->omap_obj = omap_obj;
1209 waiter->op = op;
1210 waiter->read_target = omap_obj->sync->read_pending;
1211 waiter->write_target = omap_obj->sync->write_pending;
1212 waiter->notify = fxn;
1213 waiter->arg = arg;
1214
1215 spin_lock(&sync_lock);
1216 if (is_waiting(waiter)) {
1217 SYNC("waited: %p", waiter);
1218 list_add_tail(&waiter->list, &waiters);
1219 spin_unlock(&sync_lock);
1220 return 0;
1221 }
1222
1223 spin_unlock(&sync_lock);
Subhajit Paul15ec2ca2014-04-11 12:53:30 +05301224
1225 kfree(waiter);
Rob Clarkcd5351f2011-11-12 12:09:40 -06001226 }
1227
1228 /* no waiting.. */
1229 fxn(arg);
1230
1231 return 0;
1232}
1233
1234/* special API so PVR can update the buffer to use a sync-object allocated
1235 * from it's sync-obj heap. Only used for a newly allocated (from PVR's
1236 * perspective) sync-object, so we overwrite the new syncobj w/ values
1237 * from the already allocated syncobj (if there is one)
1238 */
1239int omap_gem_set_sync_object(struct drm_gem_object *obj, void *syncobj)
1240{
1241 struct omap_gem_object *omap_obj = to_omap_bo(obj);
1242 int ret = 0;
1243
1244 spin_lock(&sync_lock);
1245
1246 if ((omap_obj->flags & OMAP_BO_EXT_SYNC) && !syncobj) {
1247 /* clearing a previously set syncobj */
Peter Huewee6200962013-01-26 00:40:13 +01001248 syncobj = kmemdup(omap_obj->sync, sizeof(*omap_obj->sync),
1249 GFP_ATOMIC);
Rob Clarkcd5351f2011-11-12 12:09:40 -06001250 if (!syncobj) {
1251 ret = -ENOMEM;
1252 goto unlock;
1253 }
Rob Clarkcd5351f2011-11-12 12:09:40 -06001254 omap_obj->flags &= ~OMAP_BO_EXT_SYNC;
1255 omap_obj->sync = syncobj;
1256 } else if (syncobj && !(omap_obj->flags & OMAP_BO_EXT_SYNC)) {
1257 /* replacing an existing syncobj */
1258 if (omap_obj->sync) {
1259 memcpy(syncobj, omap_obj->sync, sizeof(*omap_obj->sync));
1260 kfree(omap_obj->sync);
1261 }
1262 omap_obj->flags |= OMAP_BO_EXT_SYNC;
1263 omap_obj->sync = syncobj;
1264 }
1265
1266unlock:
1267 spin_unlock(&sync_lock);
1268 return ret;
1269}
1270
Rob Clarkcd5351f2011-11-12 12:09:40 -06001271/* don't call directly.. called from GEM core when it is time to actually
1272 * free the object..
1273 */
1274void omap_gem_free_object(struct drm_gem_object *obj)
1275{
1276 struct drm_device *dev = obj->dev;
Tomi Valkeinen76c40552014-12-17 14:34:22 +02001277 struct omap_drm_private *priv = dev->dev_private;
Rob Clarkcd5351f2011-11-12 12:09:40 -06001278 struct omap_gem_object *omap_obj = to_omap_bo(obj);
1279
Rob Clarkf7f9f452011-12-05 19:19:22 -06001280 evict(obj);
1281
Rob Clarkf6b60362012-03-05 10:48:36 -06001282 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
1283
Tomi Valkeinen76c40552014-12-17 14:34:22 +02001284 spin_lock(&priv->list_lock);
Rob Clarkf6b60362012-03-05 10:48:36 -06001285 list_del(&omap_obj->mm_list);
Tomi Valkeinen76c40552014-12-17 14:34:22 +02001286 spin_unlock(&priv->list_lock);
Rob Clarkf6b60362012-03-05 10:48:36 -06001287
David Herrmann0de23972013-07-24 21:07:52 +02001288 drm_gem_free_mmap_offset(obj);
Rob Clarkcd5351f2011-11-12 12:09:40 -06001289
Rob Clark9a0774e2012-01-16 12:51:17 -06001290 /* this means the object is still pinned.. which really should
1291 * not happen. I think..
1292 */
1293 WARN_ON(omap_obj->paddr_cnt > 0);
1294
Rob Clarkcd5351f2011-11-12 12:09:40 -06001295 /* don't free externally allocated backing memory */
1296 if (!(omap_obj->flags & OMAP_BO_EXT_MEM)) {
YAMANE Toshiakiae053032012-11-14 19:33:17 +09001297 if (omap_obj->pages)
Rob Clarkcd5351f2011-11-12 12:09:40 -06001298 omap_gem_detach_pages(obj);
YAMANE Toshiakiae053032012-11-14 19:33:17 +09001299
Rob Clarkcd5351f2011-11-12 12:09:40 -06001300 if (!is_shmem(obj)) {
1301 dma_free_writecombine(dev->dev, obj->size,
1302 omap_obj->vaddr, omap_obj->paddr);
Rob Clarkf7f9f452011-12-05 19:19:22 -06001303 } else if (omap_obj->vaddr) {
1304 vunmap(omap_obj->vaddr);
Rob Clarkcd5351f2011-11-12 12:09:40 -06001305 }
1306 }
1307
1308 /* don't free externally allocated syncobj */
YAMANE Toshiakiae053032012-11-14 19:33:17 +09001309 if (!(omap_obj->flags & OMAP_BO_EXT_SYNC))
Rob Clarkcd5351f2011-11-12 12:09:40 -06001310 kfree(omap_obj->sync);
Rob Clarkcd5351f2011-11-12 12:09:40 -06001311
1312 drm_gem_object_release(obj);
1313
1314 kfree(obj);
1315}
1316
1317/* convenience method to construct a GEM buffer object, and userspace handle */
1318int omap_gem_new_handle(struct drm_device *dev, struct drm_file *file,
1319 union omap_gem_size gsize, uint32_t flags, uint32_t *handle)
1320{
1321 struct drm_gem_object *obj;
1322 int ret;
1323
1324 obj = omap_gem_new(dev, gsize, flags);
1325 if (!obj)
1326 return -ENOMEM;
1327
1328 ret = drm_gem_handle_create(file, obj, handle);
1329 if (ret) {
1330 drm_gem_object_release(obj);
1331 kfree(obj); /* TODO isn't there a dtor to call? just copying i915 */
1332 return ret;
1333 }
1334
1335 /* drop reference from allocate - handle holds it now */
1336 drm_gem_object_unreference_unlocked(obj);
1337
1338 return 0;
1339}
1340
1341/* GEM buffer object constructor */
1342struct drm_gem_object *omap_gem_new(struct drm_device *dev,
1343 union omap_gem_size gsize, uint32_t flags)
1344{
Rob Clarka6a91822011-12-09 23:26:08 -06001345 struct omap_drm_private *priv = dev->dev_private;
Rob Clarkcd5351f2011-11-12 12:09:40 -06001346 struct omap_gem_object *omap_obj;
1347 struct drm_gem_object *obj = NULL;
David Herrmannab5a60c2014-05-25 12:45:39 +02001348 struct address_space *mapping;
Rob Clarkcd5351f2011-11-12 12:09:40 -06001349 size_t size;
1350 int ret;
1351
1352 if (flags & OMAP_BO_TILED) {
Rob Clarkf7f9f452011-12-05 19:19:22 -06001353 if (!usergart) {
1354 dev_err(dev->dev, "Tiled buffers require DMM\n");
1355 goto fail;
1356 }
Rob Clarkcd5351f2011-11-12 12:09:40 -06001357
Rob Clarkf7f9f452011-12-05 19:19:22 -06001358 /* tiled buffers are always shmem paged backed.. when they are
1359 * scanned out, they are remapped into DMM/TILER
1360 */
1361 flags &= ~OMAP_BO_SCANOUT;
1362
1363 /* currently don't allow cached buffers.. there is some caching
1364 * stuff that needs to be handled better
1365 */
Tomi Valkeinen7cb0d6c2014-09-25 19:24:29 +00001366 flags &= ~(OMAP_BO_CACHED|OMAP_BO_WC|OMAP_BO_UNCACHED);
1367 flags |= tiler_get_cpu_cache_flags();
Rob Clarkf7f9f452011-12-05 19:19:22 -06001368
1369 /* align dimensions to slot boundaries... */
1370 tiler_align(gem2fmt(flags),
1371 &gsize.tiled.width, &gsize.tiled.height);
1372
1373 /* ...and calculate size based on aligned dimensions */
1374 size = tiler_size(gem2fmt(flags),
1375 gsize.tiled.width, gsize.tiled.height);
1376 } else {
1377 size = PAGE_ALIGN(gsize.bytes);
1378 }
Rob Clarkcd5351f2011-11-12 12:09:40 -06001379
1380 omap_obj = kzalloc(sizeof(*omap_obj), GFP_KERNEL);
Joe Perches78110bb2013-02-11 09:41:29 -08001381 if (!omap_obj)
Tomi Valkeinena903e3b2015-03-17 15:31:11 +02001382 return NULL;
Rob Clarkf6b60362012-03-05 10:48:36 -06001383
Rob Clarkcd5351f2011-11-12 12:09:40 -06001384 obj = &omap_obj->base;
1385
Rob Clarka6a91822011-12-09 23:26:08 -06001386 if ((flags & OMAP_BO_SCANOUT) && !priv->has_dmm) {
1387 /* attempt to allocate contiguous memory if we don't
1388 * have DMM for remappign discontiguous buffers
1389 */
Rob Clarkcd5351f2011-11-12 12:09:40 -06001390 omap_obj->vaddr = dma_alloc_writecombine(dev->dev, size,
1391 &omap_obj->paddr, GFP_KERNEL);
Tomi Valkeinena903e3b2015-03-17 15:31:11 +02001392 if (!omap_obj->vaddr) {
1393 kfree(omap_obj);
YAMANE Toshiakiae053032012-11-14 19:33:17 +09001394
Tomi Valkeinena903e3b2015-03-17 15:31:11 +02001395 return NULL;
1396 }
1397
1398 flags |= OMAP_BO_DMA;
Rob Clarkcd5351f2011-11-12 12:09:40 -06001399 }
1400
Tomi Valkeinena903e3b2015-03-17 15:31:11 +02001401 spin_lock(&priv->list_lock);
1402 list_add(&omap_obj->mm_list, &priv->obj_list);
1403 spin_unlock(&priv->list_lock);
1404
Rob Clarkcd5351f2011-11-12 12:09:40 -06001405 omap_obj->flags = flags;
1406
Rob Clarkf7f9f452011-12-05 19:19:22 -06001407 if (flags & OMAP_BO_TILED) {
1408 omap_obj->width = gsize.tiled.width;
1409 omap_obj->height = gsize.tiled.height;
1410 }
1411
David Herrmannab5a60c2014-05-25 12:45:39 +02001412 if (flags & (OMAP_BO_DMA|OMAP_BO_EXT_MEM)) {
David Herrmann89c82332013-07-11 11:56:32 +02001413 drm_gem_private_object_init(dev, obj, size);
David Herrmannab5a60c2014-05-25 12:45:39 +02001414 } else {
Rob Clarkcd5351f2011-11-12 12:09:40 -06001415 ret = drm_gem_object_init(dev, obj, size);
David Herrmannab5a60c2014-05-25 12:45:39 +02001416 if (ret)
1417 goto fail;
Rob Clarkcd5351f2011-11-12 12:09:40 -06001418
David Herrmannab5a60c2014-05-25 12:45:39 +02001419 mapping = file_inode(obj->filp)->i_mapping;
1420 mapping_set_gfp_mask(mapping, GFP_USER | __GFP_DMA32);
1421 }
Rob Clarkcd5351f2011-11-12 12:09:40 -06001422
1423 return obj;
1424
1425fail:
YAMANE Toshiakiae053032012-11-14 19:33:17 +09001426 if (obj)
Rob Clarkcd5351f2011-11-12 12:09:40 -06001427 omap_gem_free_object(obj);
YAMANE Toshiakiae053032012-11-14 19:33:17 +09001428
Rob Clarkcd5351f2011-11-12 12:09:40 -06001429 return NULL;
1430}
Rob Clarkf7f9f452011-12-05 19:19:22 -06001431
1432/* init/cleanup.. if DMM is used, we need to set some stuff up.. */
1433void omap_gem_init(struct drm_device *dev)
1434{
Rob Clarka6a91822011-12-09 23:26:08 -06001435 struct omap_drm_private *priv = dev->dev_private;
Rob Clarkf7f9f452011-12-05 19:19:22 -06001436 const enum tiler_fmt fmts[] = {
1437 TILFMT_8BIT, TILFMT_16BIT, TILFMT_32BIT
1438 };
Andy Gross5c137792012-03-05 10:48:39 -06001439 int i, j;
Rob Clarkf7f9f452011-12-05 19:19:22 -06001440
Andy Grosse5e4e9b2012-10-17 00:30:03 -05001441 if (!dmm_is_available()) {
Rob Clarkf7f9f452011-12-05 19:19:22 -06001442 /* DMM only supported on OMAP4 and later, so this isn't fatal */
Andy Gross5c137792012-03-05 10:48:39 -06001443 dev_warn(dev->dev, "DMM not available, disable DMM support\n");
Rob Clarkf7f9f452011-12-05 19:19:22 -06001444 return;
1445 }
1446
Joe Perches78110bb2013-02-11 09:41:29 -08001447 usergart = kcalloc(3, sizeof(*usergart), GFP_KERNEL);
1448 if (!usergart)
Rob Clarkb3698392011-12-09 23:26:06 -06001449 return;
Rob Clarkf7f9f452011-12-05 19:19:22 -06001450
1451 /* reserve 4k aligned/wide regions for userspace mappings: */
1452 for (i = 0; i < ARRAY_SIZE(fmts); i++) {
1453 uint16_t h = 1, w = PAGE_SIZE >> i;
1454 tiler_align(fmts[i], &w, &h);
1455 /* note: since each region is 1 4kb page wide, and minimum
1456 * number of rows, the height ends up being the same as the
1457 * # of pages in the region
1458 */
1459 usergart[i].height = h;
1460 usergart[i].height_shift = ilog2(h);
Rob Clark3c810c62012-08-15 15:18:01 -05001461 usergart[i].stride_pfn = tiler_stride(fmts[i], 0) >> PAGE_SHIFT;
Rob Clarkf7f9f452011-12-05 19:19:22 -06001462 usergart[i].slot_shift = ilog2((PAGE_SIZE / h) >> i);
1463 for (j = 0; j < NUM_USERGART_ENTRIES; j++) {
1464 struct usergart_entry *entry = &usergart[i].entry[j];
1465 struct tiler_block *block =
1466 tiler_reserve_2d(fmts[i], w, h,
1467 PAGE_SIZE);
1468 if (IS_ERR(block)) {
1469 dev_err(dev->dev,
1470 "reserve failed: %d, %d, %ld\n",
1471 i, j, PTR_ERR(block));
1472 return;
1473 }
1474 entry->paddr = tiler_ssptr(block);
1475 entry->block = block;
1476
Russell King2d31ca32014-07-12 10:53:41 +01001477 DBG("%d:%d: %dx%d: paddr=%pad stride=%d", i, j, w, h,
1478 &entry->paddr,
Rob Clarkf7f9f452011-12-05 19:19:22 -06001479 usergart[i].stride_pfn << PAGE_SHIFT);
1480 }
1481 }
Rob Clarka6a91822011-12-09 23:26:08 -06001482
1483 priv->has_dmm = true;
Rob Clarkf7f9f452011-12-05 19:19:22 -06001484}
1485
1486void omap_gem_deinit(struct drm_device *dev)
1487{
1488 /* I believe we can rely on there being no more outstanding GEM
1489 * objects which could depend on usergart/dmm at this point.
1490 */
Rob Clarkf7f9f452011-12-05 19:19:22 -06001491 kfree(usergart);
1492}