blob: 5c5c86ddd6f4777a10f5b326d2df34138ba33cf8 [file] [log] [blame]
Rob Clarkcd5351f2011-11-12 12:09:40 -06001/*
Rob Clark8bb0daf2013-02-11 12:43:09 -05002 * drivers/gpu/drm/omapdrm/omap_gem.c
Rob Clarkcd5351f2011-11-12 12:09:40 -06003 *
4 * Copyright (C) 2011 Texas Instruments
5 * Author: Rob Clark <rob.clark@linaro.org>
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License version 2 as published by
9 * the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along with
17 * this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
Arnd Bergmann2d802452016-05-11 18:01:45 +020020#include <linux/seq_file.h>
Rob Clarkcd5351f2011-11-12 12:09:40 -060021#include <linux/shmem_fs.h>
Laurent Pinchart2d278f52015-03-05 21:31:37 +020022#include <linux/spinlock.h>
Dan Williams01c8f1c2016-01-15 16:56:40 -080023#include <linux/pfn_t.h>
Laurent Pinchart2d278f52015-03-05 21:31:37 +020024
David Herrmann0de23972013-07-24 21:07:52 +020025#include <drm/drm_vma_manager.h>
Rob Clarkcd5351f2011-11-12 12:09:40 -060026
27#include "omap_drv.h"
Rob Clarkf7f9f452011-12-05 19:19:22 -060028#include "omap_dmm_tiler.h"
Rob Clarkcd5351f2011-11-12 12:09:40 -060029
Rob Clarkcd5351f2011-11-12 12:09:40 -060030/*
31 * GEM buffer object implementation.
32 */
33
Rob Clarkcd5351f2011-11-12 12:09:40 -060034/* note: we use upper 8 bits of flags for driver-internal flags: */
Laurent Pinchartcdb03812015-12-14 22:39:37 +020035#define OMAP_BO_MEM_DMA_API 0x01000000 /* memory allocated with the dma_alloc_* API */
36#define OMAP_BO_MEM_SHMEM 0x02000000 /* memory allocated through shmem backing */
Laurent Pinchartb22e6692015-12-14 22:39:44 +020037#define OMAP_BO_MEM_DMABUF 0x08000000 /* memory imported from a dmabuf */
Rob Clarkcd5351f2011-11-12 12:09:40 -060038
Rob Clarkcd5351f2011-11-12 12:09:40 -060039struct omap_gem_object {
40 struct drm_gem_object base;
41
Rob Clarkf6b60362012-03-05 10:48:36 -060042 struct list_head mm_list;
43
Rob Clarkcd5351f2011-11-12 12:09:40 -060044 uint32_t flags;
45
Rob Clarkf7f9f452011-12-05 19:19:22 -060046 /** width/height for tiled formats (rounded up to slot boundaries) */
47 uint16_t width, height;
48
Rob Clarka6a91822011-12-09 23:26:08 -060049 /** roll applied when mapping to DMM */
50 uint32_t roll;
51
Rob Clarkcd5351f2011-11-12 12:09:40 -060052 /**
Laurent Pinchart16869082017-04-21 00:33:51 +030053 * dma_addr contains the buffer DMA address. It is valid for
Rob Clarkcd5351f2011-11-12 12:09:40 -060054 *
Laurent Pinchartb22e6692015-12-14 22:39:44 +020055 * - buffers allocated through the DMA mapping API (with the
56 * OMAP_BO_MEM_DMA_API flag set)
57 *
58 * - buffers imported from dmabuf (with the OMAP_BO_MEM_DMABUF flag set)
59 * if they are physically contiguous (when sgt->orig_nents == 1)
60 *
Laurent Pinchart16869082017-04-21 00:33:51 +030061 * - buffers mapped through the TILER when dma_addr_cnt is not zero, in
Laurent Pinchartb22e6692015-12-14 22:39:44 +020062 * which case the DMA address points to the TILER aperture
63 *
64 * Physically contiguous buffers have their DMA address equal to the
65 * physical address as we don't remap those buffers through the TILER.
66 *
67 * Buffers mapped to the TILER have their DMA address pointing to the
Laurent Pinchart16869082017-04-21 00:33:51 +030068 * TILER aperture. As TILER mappings are refcounted (through
Laurent Pinchartbc20c852017-04-21 00:33:52 +030069 * dma_addr_cnt) the DMA address must be accessed through omap_gem_pin()
70 * to ensure that the mapping won't disappear unexpectedly. References
71 * must be released with omap_gem_unpin().
Rob Clarkcd5351f2011-11-12 12:09:40 -060072 */
Laurent Pinchart16869082017-04-21 00:33:51 +030073 dma_addr_t dma_addr;
Rob Clarkcd5351f2011-11-12 12:09:40 -060074
75 /**
Laurent Pinchart16869082017-04-21 00:33:51 +030076 * # of users of dma_addr
Rob Clarkf7f9f452011-12-05 19:19:22 -060077 */
Laurent Pinchart16869082017-04-21 00:33:51 +030078 uint32_t dma_addr_cnt;
Rob Clarkf7f9f452011-12-05 19:19:22 -060079
80 /**
Laurent Pinchartb22e6692015-12-14 22:39:44 +020081 * If the buffer has been imported from a dmabuf the OMAP_DB_DMABUF flag
82 * is set and the sgt field is valid.
83 */
84 struct sg_table *sgt;
85
86 /**
Rob Clarkf7f9f452011-12-05 19:19:22 -060087 * tiler block used when buffer is remapped in DMM/TILER.
88 */
89 struct tiler_block *block;
90
91 /**
Rob Clarkcd5351f2011-11-12 12:09:40 -060092 * Array of backing pages, if allocated. Note that pages are never
93 * allocated for buffers originally allocated from contiguous memory
94 */
95 struct page **pages;
96
Rob Clarkf3bc9d22011-12-20 16:54:28 -060097 /** addresses corresponding to pages in above array */
Laurent Pinchart57c22f72017-04-21 00:33:54 +030098 dma_addr_t *dma_addrs;
Rob Clarkf3bc9d22011-12-20 16:54:28 -060099
Rob Clarkcd5351f2011-11-12 12:09:40 -0600100 /**
101 * Virtual address, if mapped.
102 */
103 void *vaddr;
Rob Clarkcd5351f2011-11-12 12:09:40 -0600104};
105
Laurent Pinchart7ef93b02015-12-14 22:39:33 +0200106#define to_omap_bo(x) container_of(x, struct omap_gem_object, base)
Rob Clarkc5b12472012-01-18 18:33:02 -0600107
Rob Clarkf7f9f452011-12-05 19:19:22 -0600108/* To deal with userspace mmap'ings of 2d tiled buffers, which (a) are
109 * not necessarily pinned in TILER all the time, and (b) when they are
110 * they are not necessarily page aligned, we reserve one or more small
111 * regions in each of the 2d containers to use as a user-GART where we
112 * can create a second page-aligned mapping of parts of the buffer
113 * being accessed from userspace.
114 *
115 * Note that we could optimize slightly when we know that multiple
116 * tiler containers are backed by the same PAT.. but I'll leave that
117 * for later..
118 */
119#define NUM_USERGART_ENTRIES 2
Laurent Pinchartf4302742015-12-14 22:39:34 +0200120struct omap_drm_usergart_entry {
Rob Clarkf7f9f452011-12-05 19:19:22 -0600121 struct tiler_block *block; /* the reserved tiler block */
Laurent Pinchart16869082017-04-21 00:33:51 +0300122 dma_addr_t dma_addr;
Rob Clarkf7f9f452011-12-05 19:19:22 -0600123 struct drm_gem_object *obj; /* the current pinned obj */
124 pgoff_t obj_pgoff; /* page offset of obj currently
125 mapped in */
126};
Laurent Pinchartf4302742015-12-14 22:39:34 +0200127
128struct omap_drm_usergart {
129 struct omap_drm_usergart_entry entry[NUM_USERGART_ENTRIES];
Rob Clarkf7f9f452011-12-05 19:19:22 -0600130 int height; /* height in rows */
131 int height_shift; /* ilog2(height in rows) */
132 int slot_shift; /* ilog2(width per slot) */
133 int stride_pfn; /* stride in pages */
134 int last; /* index of last used entry */
Laurent Pinchartf4302742015-12-14 22:39:34 +0200135};
Rob Clarkf7f9f452011-12-05 19:19:22 -0600136
Laurent Pinchartb902f8f2015-12-14 22:39:32 +0200137/* -----------------------------------------------------------------------------
138 * Helpers
139 */
140
141/** get mmap offset */
142static uint64_t mmap_offset(struct drm_gem_object *obj)
143{
144 struct drm_device *dev = obj->dev;
145 int ret;
146 size_t size;
147
148 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
149
150 /* Make it mmapable */
151 size = omap_gem_mmap_size(obj);
152 ret = drm_gem_create_mmap_offset_size(obj, size);
153 if (ret) {
154 dev_err(dev->dev, "could not allocate mmap offset\n");
155 return 0;
156 }
157
158 return drm_vma_node_offset_addr(&obj->vma_node);
159}
160
Laurent Pinchartb22e6692015-12-14 22:39:44 +0200161static bool is_contiguous(struct omap_gem_object *omap_obj)
Laurent Pinchart7ef93b02015-12-14 22:39:33 +0200162{
Laurent Pinchartb22e6692015-12-14 22:39:44 +0200163 if (omap_obj->flags & OMAP_BO_MEM_DMA_API)
164 return true;
165
166 if ((omap_obj->flags & OMAP_BO_MEM_DMABUF) && omap_obj->sgt->nents == 1)
167 return true;
168
169 return false;
Laurent Pinchart7ef93b02015-12-14 22:39:33 +0200170}
171
172/* -----------------------------------------------------------------------------
173 * Eviction
174 */
Rob Clarkf7f9f452011-12-05 19:19:22 -0600175
176static void evict_entry(struct drm_gem_object *obj,
Laurent Pinchartf4302742015-12-14 22:39:34 +0200177 enum tiler_fmt fmt, struct omap_drm_usergart_entry *entry)
Rob Clarkf7f9f452011-12-05 19:19:22 -0600178{
David Herrmann6796cb12014-01-03 14:24:19 +0100179 struct omap_gem_object *omap_obj = to_omap_bo(obj);
Laurent Pinchartf4302742015-12-14 22:39:34 +0200180 struct omap_drm_private *priv = obj->dev->dev_private;
181 int n = priv->usergart[fmt].height;
David Herrmann6796cb12014-01-03 14:24:19 +0100182 size_t size = PAGE_SIZE * n;
183 loff_t off = mmap_offset(obj) +
184 (entry->obj_pgoff << PAGE_SHIFT);
Tomi Valkeinencc8dd762017-05-18 11:51:51 +0300185 const int m = DIV_ROUND_UP(omap_obj->width << fmt, PAGE_SIZE);
David Herrmann6796cb12014-01-03 14:24:19 +0100186
187 if (m > 1) {
188 int i;
189 /* if stride > than PAGE_SIZE then sparse mapping: */
190 for (i = n; i > 0; i--) {
191 unmap_mapping_range(obj->dev->anon_inode->i_mapping,
192 off, PAGE_SIZE, 1);
193 off += PAGE_SIZE * m;
Rob Clarke559895ae2012-03-05 10:48:40 -0600194 }
David Herrmann6796cb12014-01-03 14:24:19 +0100195 } else {
196 unmap_mapping_range(obj->dev->anon_inode->i_mapping,
197 off, size, 1);
Rob Clarkf7f9f452011-12-05 19:19:22 -0600198 }
199
200 entry->obj = NULL;
201}
202
203/* Evict a buffer from usergart, if it is mapped there */
204static void evict(struct drm_gem_object *obj)
205{
206 struct omap_gem_object *omap_obj = to_omap_bo(obj);
Laurent Pinchartf4302742015-12-14 22:39:34 +0200207 struct omap_drm_private *priv = obj->dev->dev_private;
Rob Clarkf7f9f452011-12-05 19:19:22 -0600208
209 if (omap_obj->flags & OMAP_BO_TILED) {
210 enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
211 int i;
212
Rob Clarkf7f9f452011-12-05 19:19:22 -0600213 for (i = 0; i < NUM_USERGART_ENTRIES; i++) {
Laurent Pinchartf4302742015-12-14 22:39:34 +0200214 struct omap_drm_usergart_entry *entry =
215 &priv->usergart[fmt].entry[i];
216
Rob Clarkf7f9f452011-12-05 19:19:22 -0600217 if (entry->obj == obj)
218 evict_entry(obj, fmt, entry);
219 }
220 }
221}
222
Laurent Pinchart7ef93b02015-12-14 22:39:33 +0200223/* -----------------------------------------------------------------------------
224 * Page Management
Rob Clarkcd5351f2011-11-12 12:09:40 -0600225 */
Rob Clarkcd5351f2011-11-12 12:09:40 -0600226
227/** ensure backing pages are allocated */
228static int omap_gem_attach_pages(struct drm_gem_object *obj)
229{
Rob Clark8b6b5692012-05-17 02:37:25 -0600230 struct drm_device *dev = obj->dev;
Rob Clarkcd5351f2011-11-12 12:09:40 -0600231 struct omap_gem_object *omap_obj = to_omap_bo(obj);
232 struct page **pages;
Emil Gooded4eb23a2012-08-17 18:53:26 +0200233 int npages = obj->size >> PAGE_SHIFT;
234 int i, ret;
Rob Clark8b6b5692012-05-17 02:37:25 -0600235 dma_addr_t *addrs;
Rob Clarkcd5351f2011-11-12 12:09:40 -0600236
237 WARN_ON(omap_obj->pages);
238
David Herrmann0cdbe8a2014-05-25 12:59:47 +0200239 pages = drm_gem_get_pages(obj);
Rob Clarkcd5351f2011-11-12 12:09:40 -0600240 if (IS_ERR(pages)) {
241 dev_err(obj->dev->dev, "could not get pages: %ld\n", PTR_ERR(pages));
242 return PTR_ERR(pages);
243 }
244
Rob Clarkf3bc9d22011-12-20 16:54:28 -0600245 /* for non-cached buffers, ensure the new pages are clean because
246 * DSS, GPU, etc. are not cache coherent:
247 */
248 if (omap_obj->flags & (OMAP_BO_WC|OMAP_BO_UNCACHED)) {
Vincent Penquerc'h23d84ed2012-10-09 19:40:39 +0100249 addrs = kmalloc(npages * sizeof(*addrs), GFP_KERNEL);
Emil Gooded4eb23a2012-08-17 18:53:26 +0200250 if (!addrs) {
251 ret = -ENOMEM;
252 goto free_pages;
253 }
254
Rob Clarkf3bc9d22011-12-20 16:54:28 -0600255 for (i = 0; i < npages; i++) {
Rob Clark8b6b5692012-05-17 02:37:25 -0600256 addrs[i] = dma_map_page(dev->dev, pages[i],
Laurent Pinchart97817fd2017-04-21 00:33:58 +0300257 0, PAGE_SIZE, DMA_TO_DEVICE);
Tomi Valkeinen579ef252016-01-05 11:43:14 +0200258
259 if (dma_mapping_error(dev->dev, addrs[i])) {
260 dev_warn(dev->dev,
261 "%s: failed to map page\n", __func__);
262
263 for (i = i - 1; i >= 0; --i) {
264 dma_unmap_page(dev->dev, addrs[i],
Laurent Pinchart97817fd2017-04-21 00:33:58 +0300265 PAGE_SIZE, DMA_TO_DEVICE);
Tomi Valkeinen579ef252016-01-05 11:43:14 +0200266 }
267
268 ret = -ENOMEM;
269 goto free_addrs;
270 }
Rob Clarkf3bc9d22011-12-20 16:54:28 -0600271 }
Rob Clark8b6b5692012-05-17 02:37:25 -0600272 } else {
Vincent Penquerc'h23d84ed2012-10-09 19:40:39 +0100273 addrs = kzalloc(npages * sizeof(*addrs), GFP_KERNEL);
Emil Gooded4eb23a2012-08-17 18:53:26 +0200274 if (!addrs) {
275 ret = -ENOMEM;
276 goto free_pages;
277 }
Rob Clarkf3bc9d22011-12-20 16:54:28 -0600278 }
279
Laurent Pinchart57c22f72017-04-21 00:33:54 +0300280 omap_obj->dma_addrs = addrs;
Rob Clarkcd5351f2011-11-12 12:09:40 -0600281 omap_obj->pages = pages;
Rob Clark8b6b5692012-05-17 02:37:25 -0600282
Rob Clarkcd5351f2011-11-12 12:09:40 -0600283 return 0;
Emil Gooded4eb23a2012-08-17 18:53:26 +0200284
Tomi Valkeinen579ef252016-01-05 11:43:14 +0200285free_addrs:
286 kfree(addrs);
Emil Gooded4eb23a2012-08-17 18:53:26 +0200287free_pages:
Rob Clarkddcd09d2013-08-07 13:41:27 -0400288 drm_gem_put_pages(obj, pages, true, false);
Emil Gooded4eb23a2012-08-17 18:53:26 +0200289
290 return ret;
Rob Clarkcd5351f2011-11-12 12:09:40 -0600291}
292
Laurent Pinchartb902f8f2015-12-14 22:39:32 +0200293/* acquire pages when needed (for example, for DMA where physically
294 * contiguous buffer is not required
295 */
296static int get_pages(struct drm_gem_object *obj, struct page ***pages)
297{
298 struct omap_gem_object *omap_obj = to_omap_bo(obj);
299 int ret = 0;
300
Laurent Pinchartcdb03812015-12-14 22:39:37 +0200301 if ((omap_obj->flags & OMAP_BO_MEM_SHMEM) && !omap_obj->pages) {
Laurent Pinchartb902f8f2015-12-14 22:39:32 +0200302 ret = omap_gem_attach_pages(obj);
303 if (ret) {
304 dev_err(obj->dev->dev, "could not attach pages\n");
305 return ret;
306 }
307 }
308
309 /* TODO: even phys-contig.. we should have a list of pages? */
310 *pages = omap_obj->pages;
311
312 return 0;
313}
314
Rob Clarkcd5351f2011-11-12 12:09:40 -0600315/** release backing pages */
316static void omap_gem_detach_pages(struct drm_gem_object *obj)
317{
318 struct omap_gem_object *omap_obj = to_omap_bo(obj);
Laurent Pinchart930dc192017-04-21 00:33:57 +0300319 unsigned int npages = obj->size >> PAGE_SHIFT;
320 unsigned int i;
Rob Clarkf3bc9d22011-12-20 16:54:28 -0600321
Laurent Pinchart930dc192017-04-21 00:33:57 +0300322 for (i = 0; i < npages; i++) {
323 if (omap_obj->dma_addrs[i])
324 dma_unmap_page(obj->dev->dev, omap_obj->dma_addrs[i],
Laurent Pinchart97817fd2017-04-21 00:33:58 +0300325 PAGE_SIZE, DMA_TO_DEVICE);
Rob Clarkf3bc9d22011-12-20 16:54:28 -0600326 }
327
Laurent Pinchart57c22f72017-04-21 00:33:54 +0300328 kfree(omap_obj->dma_addrs);
329 omap_obj->dma_addrs = NULL;
Rob Clark8b6b5692012-05-17 02:37:25 -0600330
Rob Clarkddcd09d2013-08-07 13:41:27 -0400331 drm_gem_put_pages(obj, omap_obj->pages, true, false);
Rob Clarkcd5351f2011-11-12 12:09:40 -0600332 omap_obj->pages = NULL;
333}
334
Rob Clark6ad11bc2012-04-10 13:19:55 -0500335/* get buffer flags */
336uint32_t omap_gem_flags(struct drm_gem_object *obj)
337{
338 return to_omap_bo(obj)->flags;
339}
340
Rob Clarkc5b12472012-01-18 18:33:02 -0600341uint64_t omap_gem_mmap_offset(struct drm_gem_object *obj)
342{
343 uint64_t offset;
344 mutex_lock(&obj->dev->struct_mutex);
345 offset = mmap_offset(obj);
346 mutex_unlock(&obj->dev->struct_mutex);
347 return offset;
348}
349
Rob Clarkf7f9f452011-12-05 19:19:22 -0600350/** get mmap size */
351size_t omap_gem_mmap_size(struct drm_gem_object *obj)
352{
353 struct omap_gem_object *omap_obj = to_omap_bo(obj);
354 size_t size = obj->size;
355
356 if (omap_obj->flags & OMAP_BO_TILED) {
357 /* for tiled buffers, the virtual size has stride rounded up
358 * to 4kb.. (to hide the fact that row n+1 might start 16kb or
359 * 32kb later!). But we don't back the entire buffer with
360 * pages, only the valid picture part.. so need to adjust for
361 * this in the size used to mmap and generate mmap offset
362 */
363 size = tiler_vsize(gem2fmt(omap_obj->flags),
364 omap_obj->width, omap_obj->height);
365 }
366
367 return size;
368}
369
Laurent Pinchart7ef93b02015-12-14 22:39:33 +0200370/* -----------------------------------------------------------------------------
371 * Fault Handling
372 */
373
Rob Clarkf7f9f452011-12-05 19:19:22 -0600374/* Normal handling for the case of faulting in non-tiled buffers */
375static int fault_1d(struct drm_gem_object *obj,
376 struct vm_area_struct *vma, struct vm_fault *vmf)
377{
378 struct omap_gem_object *omap_obj = to_omap_bo(obj);
379 unsigned long pfn;
380 pgoff_t pgoff;
381
382 /* We don't use vmf->pgoff since that has the fake offset: */
Jan Kara1a29d852016-12-14 15:07:01 -0800383 pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
Rob Clarkf7f9f452011-12-05 19:19:22 -0600384
385 if (omap_obj->pages) {
Laurent Pinchartd61ce7d2017-04-21 00:33:55 +0300386 omap_gem_cpu_sync_page(obj, pgoff);
Rob Clarkf7f9f452011-12-05 19:19:22 -0600387 pfn = page_to_pfn(omap_obj->pages[pgoff]);
388 } else {
Laurent Pinchartb22e6692015-12-14 22:39:44 +0200389 BUG_ON(!is_contiguous(omap_obj));
Laurent Pinchart16869082017-04-21 00:33:51 +0300390 pfn = (omap_obj->dma_addr >> PAGE_SHIFT) + pgoff;
Rob Clarkf7f9f452011-12-05 19:19:22 -0600391 }
392
Jan Kara1a29d852016-12-14 15:07:01 -0800393 VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
Rob Clarkf7f9f452011-12-05 19:19:22 -0600394 pfn, pfn << PAGE_SHIFT);
395
Jan Kara1a29d852016-12-14 15:07:01 -0800396 return vm_insert_mixed(vma, vmf->address, __pfn_to_pfn_t(pfn, PFN_DEV));
Rob Clarkf7f9f452011-12-05 19:19:22 -0600397}
398
399/* Special handling for the case of faulting in 2d tiled buffers */
400static int fault_2d(struct drm_gem_object *obj,
401 struct vm_area_struct *vma, struct vm_fault *vmf)
402{
403 struct omap_gem_object *omap_obj = to_omap_bo(obj);
Laurent Pinchartf4302742015-12-14 22:39:34 +0200404 struct omap_drm_private *priv = obj->dev->dev_private;
405 struct omap_drm_usergart_entry *entry;
Rob Clarkf7f9f452011-12-05 19:19:22 -0600406 enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
407 struct page *pages[64]; /* XXX is this too much to have on stack? */
408 unsigned long pfn;
409 pgoff_t pgoff, base_pgoff;
Jan Kara1a29d852016-12-14 15:07:01 -0800410 unsigned long vaddr;
Rob Clarkf7f9f452011-12-05 19:19:22 -0600411 int i, ret, slots;
412
Rob Clarke559895ae2012-03-05 10:48:40 -0600413 /*
414 * Note the height of the slot is also equal to the number of pages
415 * that need to be mapped in to fill 4kb wide CPU page. If the slot
416 * height is 64, then 64 pages fill a 4kb wide by 64 row region.
Rob Clarkf7f9f452011-12-05 19:19:22 -0600417 */
Laurent Pinchartf4302742015-12-14 22:39:34 +0200418 const int n = priv->usergart[fmt].height;
419 const int n_shift = priv->usergart[fmt].height_shift;
Rob Clarke559895ae2012-03-05 10:48:40 -0600420
421 /*
422 * If buffer width in bytes > PAGE_SIZE then the virtual stride is
423 * rounded up to next multiple of PAGE_SIZE.. this need to be taken
424 * into account in some of the math, so figure out virtual stride
425 * in pages
426 */
Tomi Valkeinencc8dd762017-05-18 11:51:51 +0300427 const int m = DIV_ROUND_UP(omap_obj->width << fmt, PAGE_SIZE);
Rob Clarkf7f9f452011-12-05 19:19:22 -0600428
429 /* We don't use vmf->pgoff since that has the fake offset: */
Jan Kara1a29d852016-12-14 15:07:01 -0800430 pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
Rob Clarkf7f9f452011-12-05 19:19:22 -0600431
Rob Clarke559895ae2012-03-05 10:48:40 -0600432 /*
433 * Actual address we start mapping at is rounded down to previous slot
Rob Clarkf7f9f452011-12-05 19:19:22 -0600434 * boundary in the y direction:
435 */
Rob Clarke559895ae2012-03-05 10:48:40 -0600436 base_pgoff = round_down(pgoff, m << n_shift);
Rob Clarkf7f9f452011-12-05 19:19:22 -0600437
Rob Clarke559895ae2012-03-05 10:48:40 -0600438 /* figure out buffer width in slots */
Laurent Pinchartf4302742015-12-14 22:39:34 +0200439 slots = omap_obj->width >> priv->usergart[fmt].slot_shift;
Rob Clarkf7f9f452011-12-05 19:19:22 -0600440
Jan Kara1a29d852016-12-14 15:07:01 -0800441 vaddr = vmf->address - ((pgoff - base_pgoff) << PAGE_SHIFT);
Rob Clarke559895ae2012-03-05 10:48:40 -0600442
Laurent Pinchartf4302742015-12-14 22:39:34 +0200443 entry = &priv->usergart[fmt].entry[priv->usergart[fmt].last];
Rob Clarke559895ae2012-03-05 10:48:40 -0600444
Rob Clarkf7f9f452011-12-05 19:19:22 -0600445 /* evict previous buffer using this usergart entry, if any: */
446 if (entry->obj)
447 evict_entry(entry->obj, fmt, entry);
448
449 entry->obj = obj;
450 entry->obj_pgoff = base_pgoff;
451
Rob Clarke559895ae2012-03-05 10:48:40 -0600452 /* now convert base_pgoff to phys offset from virt offset: */
453 base_pgoff = (base_pgoff >> n_shift) * slots;
Rob Clarkf7f9f452011-12-05 19:19:22 -0600454
Rob Clarke559895ae2012-03-05 10:48:40 -0600455 /* for wider-than 4k.. figure out which part of the slot-row we want: */
456 if (m > 1) {
457 int off = pgoff % m;
458 entry->obj_pgoff += off;
459 base_pgoff /= m;
460 slots = min(slots - (off << n_shift), n);
461 base_pgoff += off << n_shift;
462 vaddr += off << PAGE_SHIFT;
463 }
464
465 /*
466 * Map in pages. Beyond the valid pixel part of the buffer, we set
467 * pages[i] to NULL to get a dummy page mapped in.. if someone
468 * reads/writes it they will get random/undefined content, but at
469 * least it won't be corrupting whatever other random page used to
470 * be mapped in, or other undefined behavior.
Rob Clarkf7f9f452011-12-05 19:19:22 -0600471 */
472 memcpy(pages, &omap_obj->pages[base_pgoff],
473 sizeof(struct page *) * slots);
474 memset(pages + slots, 0,
Rob Clarke559895ae2012-03-05 10:48:40 -0600475 sizeof(struct page *) * (n - slots));
Rob Clarkf7f9f452011-12-05 19:19:22 -0600476
Rob Clarka6a91822011-12-09 23:26:08 -0600477 ret = tiler_pin(entry->block, pages, ARRAY_SIZE(pages), 0, true);
Rob Clarkf7f9f452011-12-05 19:19:22 -0600478 if (ret) {
479 dev_err(obj->dev->dev, "failed to pin: %d\n", ret);
480 return ret;
481 }
482
Laurent Pinchart16869082017-04-21 00:33:51 +0300483 pfn = entry->dma_addr >> PAGE_SHIFT;
Rob Clarkf7f9f452011-12-05 19:19:22 -0600484
Jan Kara1a29d852016-12-14 15:07:01 -0800485 VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
Rob Clarkf7f9f452011-12-05 19:19:22 -0600486 pfn, pfn << PAGE_SHIFT);
487
Rob Clarke559895ae2012-03-05 10:48:40 -0600488 for (i = n; i > 0; i--) {
Jan Kara1a29d852016-12-14 15:07:01 -0800489 vm_insert_mixed(vma, vaddr, __pfn_to_pfn_t(pfn, PFN_DEV));
Laurent Pinchartf4302742015-12-14 22:39:34 +0200490 pfn += priv->usergart[fmt].stride_pfn;
Rob Clarke559895ae2012-03-05 10:48:40 -0600491 vaddr += PAGE_SIZE * m;
Rob Clarkf7f9f452011-12-05 19:19:22 -0600492 }
493
494 /* simple round-robin: */
Laurent Pinchartf4302742015-12-14 22:39:34 +0200495 priv->usergart[fmt].last = (priv->usergart[fmt].last + 1)
496 % NUM_USERGART_ENTRIES;
Rob Clarkf7f9f452011-12-05 19:19:22 -0600497
498 return 0;
499}
500
Rob Clarkcd5351f2011-11-12 12:09:40 -0600501/**
502 * omap_gem_fault - pagefault handler for GEM objects
Rob Clarkcd5351f2011-11-12 12:09:40 -0600503 * @vmf: fault detail
504 *
505 * Invoked when a fault occurs on an mmap of a GEM managed area. GEM
506 * does most of the work for us including the actual map/unmap calls
507 * but we need to do the actual page work.
508 *
509 * The VMA was set up by GEM. In doing so it also ensured that the
510 * vma->vm_private_data points to the GEM object that is backing this
511 * mapping.
512 */
Dave Jiang11bac802017-02-24 14:56:41 -0800513int omap_gem_fault(struct vm_fault *vmf)
Rob Clarkcd5351f2011-11-12 12:09:40 -0600514{
Dave Jiang11bac802017-02-24 14:56:41 -0800515 struct vm_area_struct *vma = vmf->vma;
Rob Clarkcd5351f2011-11-12 12:09:40 -0600516 struct drm_gem_object *obj = vma->vm_private_data;
517 struct omap_gem_object *omap_obj = to_omap_bo(obj);
518 struct drm_device *dev = obj->dev;
519 struct page **pages;
Rob Clarkcd5351f2011-11-12 12:09:40 -0600520 int ret;
521
522 /* Make sure we don't parallel update on a fault, nor move or remove
523 * something from beneath our feet
524 */
525 mutex_lock(&dev->struct_mutex);
526
527 /* if a shmem backed object, make sure we have pages attached now */
528 ret = get_pages(obj, &pages);
YAMANE Toshiakiae053032012-11-14 19:33:17 +0900529 if (ret)
Rob Clarkcd5351f2011-11-12 12:09:40 -0600530 goto fail;
Rob Clarkcd5351f2011-11-12 12:09:40 -0600531
532 /* where should we do corresponding put_pages().. we are mapping
533 * the original page, rather than thru a GART, so we can't rely
534 * on eviction to trigger this. But munmap() or all mappings should
535 * probably trigger put_pages()?
536 */
537
Rob Clarkf7f9f452011-12-05 19:19:22 -0600538 if (omap_obj->flags & OMAP_BO_TILED)
539 ret = fault_2d(obj, vma, vmf);
540 else
541 ret = fault_1d(obj, vma, vmf);
Rob Clarkcd5351f2011-11-12 12:09:40 -0600542
Rob Clarkcd5351f2011-11-12 12:09:40 -0600543
544fail:
545 mutex_unlock(&dev->struct_mutex);
546 switch (ret) {
547 case 0:
548 case -ERESTARTSYS:
549 case -EINTR:
Rob Clarke1d4ee02013-10-20 12:07:42 -0400550 case -EBUSY:
551 /*
552 * EBUSY is ok: this just means that another thread
553 * already did the job.
554 */
Rob Clarkcd5351f2011-11-12 12:09:40 -0600555 return VM_FAULT_NOPAGE;
556 case -ENOMEM:
557 return VM_FAULT_OOM;
558 default:
559 return VM_FAULT_SIGBUS;
560 }
561}
562
563/** We override mainly to fix up some of the vm mapping flags.. */
564int omap_gem_mmap(struct file *filp, struct vm_area_struct *vma)
565{
Rob Clarkcd5351f2011-11-12 12:09:40 -0600566 int ret;
567
568 ret = drm_gem_mmap(filp, vma);
569 if (ret) {
570 DBG("mmap failed: %d", ret);
571 return ret;
572 }
573
Rob Clark8b6b5692012-05-17 02:37:25 -0600574 return omap_gem_mmap_obj(vma->vm_private_data, vma);
575}
576
577int omap_gem_mmap_obj(struct drm_gem_object *obj,
578 struct vm_area_struct *vma)
579{
580 struct omap_gem_object *omap_obj = to_omap_bo(obj);
Rob Clarkcd5351f2011-11-12 12:09:40 -0600581
582 vma->vm_flags &= ~VM_PFNMAP;
583 vma->vm_flags |= VM_MIXEDMAP;
584
585 if (omap_obj->flags & OMAP_BO_WC) {
586 vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
587 } else if (omap_obj->flags & OMAP_BO_UNCACHED) {
588 vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags));
589 } else {
Rob Clark8b6b5692012-05-17 02:37:25 -0600590 /*
591 * We do have some private objects, at least for scanout buffers
592 * on hardware without DMM/TILER. But these are allocated write-
593 * combine
594 */
595 if (WARN_ON(!obj->filp))
596 return -EINVAL;
597
598 /*
599 * Shunt off cached objs to shmem file so they have their own
600 * address_space (so unmap_mapping_range does what we want,
601 * in particular in the case of mmap'd dmabufs)
602 */
603 fput(vma->vm_file);
Rob Clark8b6b5692012-05-17 02:37:25 -0600604 vma->vm_pgoff = 0;
Al Virocb0942b2012-08-27 14:48:26 -0400605 vma->vm_file = get_file(obj->filp);
Rob Clark8b6b5692012-05-17 02:37:25 -0600606
Rob Clarkcd5351f2011-11-12 12:09:40 -0600607 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
608 }
609
Rob Clark8b6b5692012-05-17 02:37:25 -0600610 return 0;
Rob Clarkcd5351f2011-11-12 12:09:40 -0600611}
612
Laurent Pinchart7ef93b02015-12-14 22:39:33 +0200613/* -----------------------------------------------------------------------------
614 * Dumb Buffers
615 */
Rob Clark8b6b5692012-05-17 02:37:25 -0600616
Rob Clarkcd5351f2011-11-12 12:09:40 -0600617/**
618 * omap_gem_dumb_create - create a dumb buffer
619 * @drm_file: our client file
620 * @dev: our device
621 * @args: the requested arguments copied from userspace
622 *
623 * Allocate a buffer suitable for use for a frame buffer of the
624 * form described by user space. Give userspace a handle by which
625 * to reference it.
626 */
627int omap_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
628 struct drm_mode_create_dumb *args)
629{
630 union omap_gem_size gsize;
631
Tomi Valkeinence481ed2016-04-19 09:06:32 +0300632 args->pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
Tomi Valkeinen6a5228f2016-04-18 18:18:37 +0300633
Rob Clarkcd5351f2011-11-12 12:09:40 -0600634 args->size = PAGE_ALIGN(args->pitch * args->height);
635
636 gsize = (union omap_gem_size){
637 .bytes = args->size,
638 };
639
640 return omap_gem_new_handle(dev, file, gsize,
641 OMAP_BO_SCANOUT | OMAP_BO_WC, &args->handle);
642}
643
644/**
Rob Clarkcd5351f2011-11-12 12:09:40 -0600645 * omap_gem_dumb_map - buffer mapping for dumb interface
646 * @file: our drm client file
647 * @dev: drm device
648 * @handle: GEM handle to the object (from dumb_create)
649 *
650 * Do the necessary setup to allow the mapping of the frame buffer
651 * into user memory. We don't have to do much here at the moment.
652 */
653int omap_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
654 uint32_t handle, uint64_t *offset)
655{
656 struct drm_gem_object *obj;
657 int ret = 0;
658
Rob Clarkcd5351f2011-11-12 12:09:40 -0600659 /* GEM does all our handle to object mapping */
Chris Wilsona8ad0bd2016-05-09 11:04:54 +0100660 obj = drm_gem_object_lookup(file, handle);
Rob Clarkcd5351f2011-11-12 12:09:40 -0600661 if (obj == NULL) {
662 ret = -ENOENT;
663 goto fail;
664 }
665
666 *offset = omap_gem_mmap_offset(obj);
667
668 drm_gem_object_unreference_unlocked(obj);
669
670fail:
Rob Clarkcd5351f2011-11-12 12:09:40 -0600671 return ret;
672}
673
Laurent Pincharte1c11742015-12-14 22:39:30 +0200674#ifdef CONFIG_DRM_FBDEV_EMULATION
Rob Clarka6a91822011-12-09 23:26:08 -0600675/* Set scrolling position. This allows us to implement fast scrolling
676 * for console.
Rob Clark9b55b952012-03-05 10:48:33 -0600677 *
678 * Call only from non-atomic contexts.
Rob Clarka6a91822011-12-09 23:26:08 -0600679 */
680int omap_gem_roll(struct drm_gem_object *obj, uint32_t roll)
681{
682 struct omap_gem_object *omap_obj = to_omap_bo(obj);
683 uint32_t npages = obj->size >> PAGE_SHIFT;
684 int ret = 0;
685
686 if (roll > npages) {
687 dev_err(obj->dev->dev, "invalid roll: %d\n", roll);
688 return -EINVAL;
689 }
690
Rob Clarka6a91822011-12-09 23:26:08 -0600691 omap_obj->roll = roll;
692
Rob Clarkaf695922011-12-16 11:34:34 -0600693 mutex_lock(&obj->dev->struct_mutex);
694
Rob Clarka6a91822011-12-09 23:26:08 -0600695 /* if we aren't mapped yet, we don't need to do anything */
696 if (omap_obj->block) {
697 struct page **pages;
698 ret = get_pages(obj, &pages);
699 if (ret)
700 goto fail;
701 ret = tiler_pin(omap_obj->block, pages, npages, roll, true);
702 if (ret)
703 dev_err(obj->dev->dev, "could not repin: %d\n", ret);
704 }
705
706fail:
707 mutex_unlock(&obj->dev->struct_mutex);
708
709 return ret;
710}
Laurent Pincharte1c11742015-12-14 22:39:30 +0200711#endif
Rob Clarka6a91822011-12-09 23:26:08 -0600712
Laurent Pinchart7ef93b02015-12-14 22:39:33 +0200713/* -----------------------------------------------------------------------------
714 * Memory Management & DMA Sync
715 */
716
Laurent Pinchart24fbaca2017-04-21 00:33:56 +0300717/*
718 * shmem buffers that are mapped cached are not coherent.
719 *
720 * We keep track of dirty pages using page faulting to perform cache management.
721 * When a page is mapped to the CPU in read/write mode the device can't access
722 * it and omap_obj->dma_addrs[i] is NULL. When a page is mapped to the device
723 * the omap_obj->dma_addrs[i] is set to the DMA address, and the page is
724 * unmapped from the CPU.
Laurent Pinchart7ef93b02015-12-14 22:39:33 +0200725 */
726static inline bool is_cached_coherent(struct drm_gem_object *obj)
727{
728 struct omap_gem_object *omap_obj = to_omap_bo(obj);
Laurent Pinchartcdb03812015-12-14 22:39:37 +0200729
Laurent Pinchart24fbaca2017-04-21 00:33:56 +0300730 return !((omap_obj->flags & OMAP_BO_MEM_SHMEM) &&
731 ((omap_obj->flags & OMAP_BO_CACHE_MASK) == OMAP_BO_CACHED));
Laurent Pinchart7ef93b02015-12-14 22:39:33 +0200732}
Rob Clarkcd5351f2011-11-12 12:09:40 -0600733
Rob Clark8b6b5692012-05-17 02:37:25 -0600734/* Sync the buffer for CPU access.. note pages should already be
735 * attached, ie. omap_gem_get_pages()
736 */
Laurent Pinchartd61ce7d2017-04-21 00:33:55 +0300737void omap_gem_cpu_sync_page(struct drm_gem_object *obj, int pgoff)
Rob Clark8b6b5692012-05-17 02:37:25 -0600738{
739 struct drm_device *dev = obj->dev;
740 struct omap_gem_object *omap_obj = to_omap_bo(obj);
741
Laurent Pinchart24fbaca2017-04-21 00:33:56 +0300742 if (is_cached_coherent(obj))
743 return;
744
745 if (omap_obj->dma_addrs[pgoff]) {
Laurent Pinchart57c22f72017-04-21 00:33:54 +0300746 dma_unmap_page(dev->dev, omap_obj->dma_addrs[pgoff],
Laurent Pinchart97817fd2017-04-21 00:33:58 +0300747 PAGE_SIZE, DMA_TO_DEVICE);
Laurent Pinchart57c22f72017-04-21 00:33:54 +0300748 omap_obj->dma_addrs[pgoff] = 0;
Rob Clark8b6b5692012-05-17 02:37:25 -0600749 }
750}
751
752/* sync the buffer for DMA access */
Laurent Pinchartd61ce7d2017-04-21 00:33:55 +0300753void omap_gem_dma_sync_buffer(struct drm_gem_object *obj,
Rob Clark8b6b5692012-05-17 02:37:25 -0600754 enum dma_data_direction dir)
755{
756 struct drm_device *dev = obj->dev;
757 struct omap_gem_object *omap_obj = to_omap_bo(obj);
Laurent Pinchart4fa6ce42017-04-21 00:33:53 +0300758 int i, npages = obj->size >> PAGE_SHIFT;
759 struct page **pages = omap_obj->pages;
760 bool dirty = false;
Rob Clark8b6b5692012-05-17 02:37:25 -0600761
Laurent Pinchart24fbaca2017-04-21 00:33:56 +0300762 if (is_cached_coherent(obj))
Laurent Pinchart4fa6ce42017-04-21 00:33:53 +0300763 return;
Rob Clark8b6b5692012-05-17 02:37:25 -0600764
Laurent Pinchart4fa6ce42017-04-21 00:33:53 +0300765 for (i = 0; i < npages; i++) {
Laurent Pinchart57c22f72017-04-21 00:33:54 +0300766 if (!omap_obj->dma_addrs[i]) {
Laurent Pinchart4fa6ce42017-04-21 00:33:53 +0300767 dma_addr_t addr;
Tomi Valkeinena3d63452016-01-05 11:43:15 +0200768
Laurent Pinchart4fa6ce42017-04-21 00:33:53 +0300769 addr = dma_map_page(dev->dev, pages[i], 0,
Laurent Pinchart97817fd2017-04-21 00:33:58 +0300770 PAGE_SIZE, dir);
Laurent Pinchart4fa6ce42017-04-21 00:33:53 +0300771 if (dma_mapping_error(dev->dev, addr)) {
772 dev_warn(dev->dev, "%s: failed to map page\n",
773 __func__);
774 break;
Rob Clark8b6b5692012-05-17 02:37:25 -0600775 }
Rob Clark8b6b5692012-05-17 02:37:25 -0600776
Laurent Pinchart4fa6ce42017-04-21 00:33:53 +0300777 dirty = true;
Laurent Pinchart57c22f72017-04-21 00:33:54 +0300778 omap_obj->dma_addrs[i] = addr;
Rob Clark8b6b5692012-05-17 02:37:25 -0600779 }
780 }
Laurent Pinchart4fa6ce42017-04-21 00:33:53 +0300781
782 if (dirty) {
783 unmap_mapping_range(obj->filp->f_mapping, 0,
784 omap_gem_mmap_size(obj), 1);
785 }
Rob Clark8b6b5692012-05-17 02:37:25 -0600786}
787
Laurent Pinchartbc20c852017-04-21 00:33:52 +0300788/**
789 * omap_gem_pin() - Pin a GEM object in memory
790 * @obj: the GEM object
791 * @dma_addr: the DMA address
792 *
793 * Pin the given GEM object in memory and fill the dma_addr pointer with the
794 * object's DMA address. If the buffer is not physically contiguous it will be
795 * remapped through the TILER to provide a contiguous view.
796 *
797 * Pins are reference-counted, calling this function multiple times is allowed
798 * as long the corresponding omap_gem_unpin() calls are balanced.
799 *
800 * Return 0 on success or a negative error code otherwise.
Rob Clarkcd5351f2011-11-12 12:09:40 -0600801 */
Laurent Pinchartbc20c852017-04-21 00:33:52 +0300802int omap_gem_pin(struct drm_gem_object *obj, dma_addr_t *dma_addr)
Rob Clarkcd5351f2011-11-12 12:09:40 -0600803{
Rob Clarka6a91822011-12-09 23:26:08 -0600804 struct omap_drm_private *priv = obj->dev->dev_private;
Rob Clarkcd5351f2011-11-12 12:09:40 -0600805 struct omap_gem_object *omap_obj = to_omap_bo(obj);
806 int ret = 0;
807
Rob Clarkf7f9f452011-12-05 19:19:22 -0600808 mutex_lock(&obj->dev->struct_mutex);
809
Laurent Pinchartaa0408b2017-04-21 00:33:50 +0300810 if (!is_contiguous(omap_obj) && priv->has_dmm) {
Laurent Pinchart16869082017-04-21 00:33:51 +0300811 if (omap_obj->dma_addr_cnt == 0) {
Rob Clarkf7f9f452011-12-05 19:19:22 -0600812 struct page **pages;
Rob Clarka6a91822011-12-09 23:26:08 -0600813 uint32_t npages = obj->size >> PAGE_SHIFT;
Rob Clarkf7f9f452011-12-05 19:19:22 -0600814 enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
815 struct tiler_block *block;
Rob Clarka6a91822011-12-09 23:26:08 -0600816
Rob Clarkf7f9f452011-12-05 19:19:22 -0600817 BUG_ON(omap_obj->block);
818
819 ret = get_pages(obj, &pages);
820 if (ret)
821 goto fail;
822
Rob Clarkf7f9f452011-12-05 19:19:22 -0600823 if (omap_obj->flags & OMAP_BO_TILED) {
824 block = tiler_reserve_2d(fmt,
825 omap_obj->width,
826 omap_obj->height, 0);
827 } else {
828 block = tiler_reserve_1d(obj->size);
829 }
830
831 if (IS_ERR(block)) {
832 ret = PTR_ERR(block);
833 dev_err(obj->dev->dev,
834 "could not remap: %d (%d)\n", ret, fmt);
835 goto fail;
836 }
837
838 /* TODO: enable async refill.. */
Rob Clarka6a91822011-12-09 23:26:08 -0600839 ret = tiler_pin(block, pages, npages,
840 omap_obj->roll, true);
Rob Clarkf7f9f452011-12-05 19:19:22 -0600841 if (ret) {
842 tiler_release(block);
843 dev_err(obj->dev->dev,
844 "could not pin: %d\n", ret);
845 goto fail;
846 }
847
Laurent Pinchart16869082017-04-21 00:33:51 +0300848 omap_obj->dma_addr = tiler_ssptr(block);
Rob Clarkf7f9f452011-12-05 19:19:22 -0600849 omap_obj->block = block;
850
Laurent Pinchart16869082017-04-21 00:33:51 +0300851 DBG("got dma address: %pad", &omap_obj->dma_addr);
Rob Clarkf7f9f452011-12-05 19:19:22 -0600852 }
853
Laurent Pinchart16869082017-04-21 00:33:51 +0300854 omap_obj->dma_addr_cnt++;
Rob Clarkf7f9f452011-12-05 19:19:22 -0600855
Laurent Pinchart16869082017-04-21 00:33:51 +0300856 *dma_addr = omap_obj->dma_addr;
Laurent Pinchartb22e6692015-12-14 22:39:44 +0200857 } else if (is_contiguous(omap_obj)) {
Laurent Pinchart16869082017-04-21 00:33:51 +0300858 *dma_addr = omap_obj->dma_addr;
Rob Clarkf7f9f452011-12-05 19:19:22 -0600859 } else {
860 ret = -EINVAL;
Rob Clark8b6b5692012-05-17 02:37:25 -0600861 goto fail;
Rob Clarkcd5351f2011-11-12 12:09:40 -0600862 }
863
Rob Clarkf7f9f452011-12-05 19:19:22 -0600864fail:
865 mutex_unlock(&obj->dev->struct_mutex);
Rob Clarkcd5351f2011-11-12 12:09:40 -0600866
867 return ret;
868}
869
Laurent Pinchartbc20c852017-04-21 00:33:52 +0300870/**
871 * omap_gem_unpin() - Unpin a GEM object from memory
872 * @obj: the GEM object
873 *
874 * Unpin the given GEM object previously pinned with omap_gem_pin(). Pins are
875 * reference-counted, the actualy unpin will only be performed when the number
876 * of calls to this function matches the number of calls to omap_gem_pin().
Rob Clarkcd5351f2011-11-12 12:09:40 -0600877 */
Laurent Pinchartbc20c852017-04-21 00:33:52 +0300878void omap_gem_unpin(struct drm_gem_object *obj)
Rob Clarkcd5351f2011-11-12 12:09:40 -0600879{
Rob Clarkf7f9f452011-12-05 19:19:22 -0600880 struct omap_gem_object *omap_obj = to_omap_bo(obj);
Tomi Valkeinen393a9492015-04-28 14:01:36 +0300881 int ret;
Rob Clarkf7f9f452011-12-05 19:19:22 -0600882
883 mutex_lock(&obj->dev->struct_mutex);
Laurent Pinchart16869082017-04-21 00:33:51 +0300884 if (omap_obj->dma_addr_cnt > 0) {
885 omap_obj->dma_addr_cnt--;
886 if (omap_obj->dma_addr_cnt == 0) {
Rob Clarkf7f9f452011-12-05 19:19:22 -0600887 ret = tiler_unpin(omap_obj->block);
888 if (ret) {
889 dev_err(obj->dev->dev,
890 "could not unpin pages: %d\n", ret);
Rob Clarkf7f9f452011-12-05 19:19:22 -0600891 }
892 ret = tiler_release(omap_obj->block);
893 if (ret) {
894 dev_err(obj->dev->dev,
895 "could not release unmap: %d\n", ret);
896 }
Laurent Pinchart16869082017-04-21 00:33:51 +0300897 omap_obj->dma_addr = 0;
Rob Clarkf7f9f452011-12-05 19:19:22 -0600898 omap_obj->block = NULL;
899 }
900 }
Tomi Valkeinen393a9492015-04-28 14:01:36 +0300901
Rob Clarkf7f9f452011-12-05 19:19:22 -0600902 mutex_unlock(&obj->dev->struct_mutex);
Rob Clarkcd5351f2011-11-12 12:09:40 -0600903}
904
Rob Clark3c810c62012-08-15 15:18:01 -0500905/* Get rotated scanout address (only valid if already pinned), at the
906 * specified orientation and x,y offset from top-left corner of buffer
907 * (only valid for tiled 2d buffers)
908 */
Laurent Pinchart16869082017-04-21 00:33:51 +0300909int omap_gem_rotated_dma_addr(struct drm_gem_object *obj, uint32_t orient,
910 int x, int y, dma_addr_t *dma_addr)
Rob Clark3c810c62012-08-15 15:18:01 -0500911{
912 struct omap_gem_object *omap_obj = to_omap_bo(obj);
913 int ret = -EINVAL;
914
915 mutex_lock(&obj->dev->struct_mutex);
Laurent Pinchart16869082017-04-21 00:33:51 +0300916 if ((omap_obj->dma_addr_cnt > 0) && omap_obj->block &&
Rob Clark3c810c62012-08-15 15:18:01 -0500917 (omap_obj->flags & OMAP_BO_TILED)) {
Laurent Pinchart16869082017-04-21 00:33:51 +0300918 *dma_addr = tiler_tsptr(omap_obj->block, orient, x, y);
Rob Clark3c810c62012-08-15 15:18:01 -0500919 ret = 0;
920 }
921 mutex_unlock(&obj->dev->struct_mutex);
922 return ret;
923}
924
925/* Get tiler stride for the buffer (only valid for 2d tiled buffers) */
926int omap_gem_tiled_stride(struct drm_gem_object *obj, uint32_t orient)
927{
928 struct omap_gem_object *omap_obj = to_omap_bo(obj);
929 int ret = -EINVAL;
930 if (omap_obj->flags & OMAP_BO_TILED)
931 ret = tiler_stride(gem2fmt(omap_obj->flags), orient);
932 return ret;
933}
934
Rob Clark6ad11bc2012-04-10 13:19:55 -0500935/* if !remap, and we don't have pages backing, then fail, rather than
936 * increasing the pin count (which we don't really do yet anyways,
937 * because we don't support swapping pages back out). And 'remap'
938 * might not be quite the right name, but I wanted to keep it working
Laurent Pinchartbc20c852017-04-21 00:33:52 +0300939 * similarly to omap_gem_pin(). Note though that mutex is not
Rob Clark6ad11bc2012-04-10 13:19:55 -0500940 * aquired if !remap (because this can be called in atomic ctxt),
Laurent Pinchartbc20c852017-04-21 00:33:52 +0300941 * but probably omap_gem_unpin() should be changed to work in the
Rob Clark6ad11bc2012-04-10 13:19:55 -0500942 * same way. If !remap, a matching omap_gem_put_pages() call is not
943 * required (and should not be made).
944 */
945int omap_gem_get_pages(struct drm_gem_object *obj, struct page ***pages,
946 bool remap)
Rob Clarkcd5351f2011-11-12 12:09:40 -0600947{
948 int ret;
Rob Clark6ad11bc2012-04-10 13:19:55 -0500949 if (!remap) {
950 struct omap_gem_object *omap_obj = to_omap_bo(obj);
951 if (!omap_obj->pages)
952 return -ENOMEM;
953 *pages = omap_obj->pages;
954 return 0;
955 }
Rob Clarkcd5351f2011-11-12 12:09:40 -0600956 mutex_lock(&obj->dev->struct_mutex);
957 ret = get_pages(obj, pages);
958 mutex_unlock(&obj->dev->struct_mutex);
959 return ret;
960}
961
962/* release pages when DMA no longer being performed */
963int omap_gem_put_pages(struct drm_gem_object *obj)
964{
965 /* do something here if we dynamically attach/detach pages.. at
966 * least they would no longer need to be pinned if everyone has
967 * released the pages..
968 */
969 return 0;
970}
971
Laurent Pincharte1c11742015-12-14 22:39:30 +0200972#ifdef CONFIG_DRM_FBDEV_EMULATION
Rob Clarkf7f9f452011-12-05 19:19:22 -0600973/* Get kernel virtual address for CPU access.. this more or less only
974 * exists for omap_fbdev. This should be called with struct_mutex
975 * held.
Rob Clarkcd5351f2011-11-12 12:09:40 -0600976 */
977void *omap_gem_vaddr(struct drm_gem_object *obj)
978{
979 struct omap_gem_object *omap_obj = to_omap_bo(obj);
YAMANE Toshiaki696e3ca2012-11-14 19:33:43 +0900980 WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
Rob Clarkf7f9f452011-12-05 19:19:22 -0600981 if (!omap_obj->vaddr) {
982 struct page **pages;
983 int ret = get_pages(obj, &pages);
984 if (ret)
985 return ERR_PTR(ret);
986 omap_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
987 VM_MAP, pgprot_writecombine(PAGE_KERNEL));
988 }
Rob Clarkcd5351f2011-11-12 12:09:40 -0600989 return omap_obj->vaddr;
990}
Laurent Pincharte1c11742015-12-14 22:39:30 +0200991#endif
Rob Clarkcd5351f2011-11-12 12:09:40 -0600992
Laurent Pinchart7ef93b02015-12-14 22:39:33 +0200993/* -----------------------------------------------------------------------------
994 * Power Management
995 */
Rob Clarkcd5351f2011-11-12 12:09:40 -0600996
Andy Grosse78edba2012-12-19 14:53:37 -0600997#ifdef CONFIG_PM
998/* re-pin objects in DMM in resume path: */
999int omap_gem_resume(struct device *dev)
1000{
1001 struct drm_device *drm_dev = dev_get_drvdata(dev);
1002 struct omap_drm_private *priv = drm_dev->dev_private;
1003 struct omap_gem_object *omap_obj;
1004 int ret = 0;
1005
1006 list_for_each_entry(omap_obj, &priv->obj_list, mm_list) {
1007 if (omap_obj->block) {
1008 struct drm_gem_object *obj = &omap_obj->base;
1009 uint32_t npages = obj->size >> PAGE_SHIFT;
1010 WARN_ON(!omap_obj->pages); /* this can't happen */
1011 ret = tiler_pin(omap_obj->block,
1012 omap_obj->pages, npages,
1013 omap_obj->roll, true);
1014 if (ret) {
1015 dev_err(dev, "could not repin: %d\n", ret);
1016 return ret;
1017 }
1018 }
1019 }
1020
1021 return 0;
1022}
1023#endif
1024
Laurent Pinchart7ef93b02015-12-14 22:39:33 +02001025/* -----------------------------------------------------------------------------
1026 * DebugFS
1027 */
1028
Rob Clarkf6b60362012-03-05 10:48:36 -06001029#ifdef CONFIG_DEBUG_FS
1030void omap_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
1031{
Rob Clarkf6b60362012-03-05 10:48:36 -06001032 struct omap_gem_object *omap_obj = to_omap_bo(obj);
David Herrmann0de23972013-07-24 21:07:52 +02001033 uint64_t off;
Rob Clarkf6b60362012-03-05 10:48:36 -06001034
David Herrmann0de23972013-07-24 21:07:52 +02001035 off = drm_vma_node_start(&obj->vma_node);
Rob Clarkf6b60362012-03-05 10:48:36 -06001036
Russell King2d31ca32014-07-12 10:53:41 +01001037 seq_printf(m, "%08x: %2d (%2d) %08llx %pad (%2d) %p %4d",
Peter Zijlstra2c935bc2016-11-14 17:29:48 +01001038 omap_obj->flags, obj->name, kref_read(&obj->refcount),
Laurent Pinchart16869082017-04-21 00:33:51 +03001039 off, &omap_obj->dma_addr, omap_obj->dma_addr_cnt,
Rob Clarkf6b60362012-03-05 10:48:36 -06001040 omap_obj->vaddr, omap_obj->roll);
1041
1042 if (omap_obj->flags & OMAP_BO_TILED) {
1043 seq_printf(m, " %dx%d", omap_obj->width, omap_obj->height);
1044 if (omap_obj->block) {
1045 struct tcm_area *area = &omap_obj->block->area;
1046 seq_printf(m, " (%dx%d, %dx%d)",
1047 area->p0.x, area->p0.y,
1048 area->p1.x, area->p1.y);
1049 }
1050 } else {
Tomi Valkeinen2150c192017-02-21 09:57:12 +02001051 seq_printf(m, " %zu", obj->size);
Rob Clarkf6b60362012-03-05 10:48:36 -06001052 }
1053
1054 seq_printf(m, "\n");
1055}
1056
1057void omap_gem_describe_objects(struct list_head *list, struct seq_file *m)
1058{
1059 struct omap_gem_object *omap_obj;
1060 int count = 0;
1061 size_t size = 0;
1062
1063 list_for_each_entry(omap_obj, list, mm_list) {
1064 struct drm_gem_object *obj = &omap_obj->base;
1065 seq_printf(m, " ");
1066 omap_gem_describe(obj, m);
1067 count++;
1068 size += obj->size;
1069 }
1070
1071 seq_printf(m, "Total %d objects, %zu bytes\n", count, size);
1072}
1073#endif
1074
Laurent Pinchart7ef93b02015-12-14 22:39:33 +02001075/* -----------------------------------------------------------------------------
Laurent Pinchart7ef93b02015-12-14 22:39:33 +02001076 * Constructor & Destructor
1077 */
1078
Rob Clarkcd5351f2011-11-12 12:09:40 -06001079void omap_gem_free_object(struct drm_gem_object *obj)
1080{
1081 struct drm_device *dev = obj->dev;
Tomi Valkeinen76c40552014-12-17 14:34:22 +02001082 struct omap_drm_private *priv = dev->dev_private;
Rob Clarkcd5351f2011-11-12 12:09:40 -06001083 struct omap_gem_object *omap_obj = to_omap_bo(obj);
1084
Rob Clarkf7f9f452011-12-05 19:19:22 -06001085 evict(obj);
1086
Rob Clarkf6b60362012-03-05 10:48:36 -06001087 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
1088
Tomi Valkeinen76c40552014-12-17 14:34:22 +02001089 spin_lock(&priv->list_lock);
Rob Clarkf6b60362012-03-05 10:48:36 -06001090 list_del(&omap_obj->mm_list);
Tomi Valkeinen76c40552014-12-17 14:34:22 +02001091 spin_unlock(&priv->list_lock);
Rob Clarkf6b60362012-03-05 10:48:36 -06001092
Rob Clark9a0774e2012-01-16 12:51:17 -06001093 /* this means the object is still pinned.. which really should
1094 * not happen. I think..
1095 */
Laurent Pinchart16869082017-04-21 00:33:51 +03001096 WARN_ON(omap_obj->dma_addr_cnt > 0);
Rob Clark9a0774e2012-01-16 12:51:17 -06001097
Tomi Valkeinen3f50eff2016-01-27 10:58:43 +02001098 if (omap_obj->pages) {
1099 if (omap_obj->flags & OMAP_BO_MEM_DMABUF)
1100 kfree(omap_obj->pages);
1101 else
Rob Clarkcd5351f2011-11-12 12:09:40 -06001102 omap_gem_detach_pages(obj);
Rob Clarkcd5351f2011-11-12 12:09:40 -06001103 }
1104
Tomi Valkeinen3f50eff2016-01-27 10:58:43 +02001105 if (omap_obj->flags & OMAP_BO_MEM_DMA_API) {
Linus Torvalds266c73b2016-03-21 13:48:00 -07001106 dma_free_wc(dev->dev, obj->size, omap_obj->vaddr,
Laurent Pinchart16869082017-04-21 00:33:51 +03001107 omap_obj->dma_addr);
Tomi Valkeinen3f50eff2016-01-27 10:58:43 +02001108 } else if (omap_obj->vaddr) {
1109 vunmap(omap_obj->vaddr);
1110 } else if (obj->import_attach) {
1111 drm_prime_gem_destroy(obj, omap_obj->sgt);
1112 }
1113
Rob Clarkcd5351f2011-11-12 12:09:40 -06001114 drm_gem_object_release(obj);
1115
Laurent Pinchart00e9c7c2015-12-14 22:39:38 +02001116 kfree(omap_obj);
Rob Clarkcd5351f2011-11-12 12:09:40 -06001117}
1118
1119/* GEM buffer object constructor */
1120struct drm_gem_object *omap_gem_new(struct drm_device *dev,
1121 union omap_gem_size gsize, uint32_t flags)
1122{
Rob Clarka6a91822011-12-09 23:26:08 -06001123 struct omap_drm_private *priv = dev->dev_private;
Rob Clarkcd5351f2011-11-12 12:09:40 -06001124 struct omap_gem_object *omap_obj;
Laurent Pinchart92b4b442015-12-14 22:39:41 +02001125 struct drm_gem_object *obj;
David Herrmannab5a60c2014-05-25 12:45:39 +02001126 struct address_space *mapping;
Rob Clarkcd5351f2011-11-12 12:09:40 -06001127 size_t size;
1128 int ret;
1129
Laurent Pinchart9cba3b92015-12-14 22:39:43 +02001130 /* Validate the flags and compute the memory and cache flags. */
Rob Clarkcd5351f2011-11-12 12:09:40 -06001131 if (flags & OMAP_BO_TILED) {
Laurent Pinchartf4302742015-12-14 22:39:34 +02001132 if (!priv->usergart) {
Rob Clarkf7f9f452011-12-05 19:19:22 -06001133 dev_err(dev->dev, "Tiled buffers require DMM\n");
Laurent Pinchart92b4b442015-12-14 22:39:41 +02001134 return NULL;
Rob Clarkf7f9f452011-12-05 19:19:22 -06001135 }
Rob Clarkcd5351f2011-11-12 12:09:40 -06001136
Laurent Pinchart9cba3b92015-12-14 22:39:43 +02001137 /*
1138 * Tiled buffers are always shmem paged backed. When they are
1139 * scanned out, they are remapped into DMM/TILER.
Rob Clarkf7f9f452011-12-05 19:19:22 -06001140 */
1141 flags &= ~OMAP_BO_SCANOUT;
Laurent Pinchart9cba3b92015-12-14 22:39:43 +02001142 flags |= OMAP_BO_MEM_SHMEM;
Rob Clarkf7f9f452011-12-05 19:19:22 -06001143
Laurent Pinchart9cba3b92015-12-14 22:39:43 +02001144 /*
1145 * Currently don't allow cached buffers. There is some caching
1146 * stuff that needs to be handled better.
Rob Clarkf7f9f452011-12-05 19:19:22 -06001147 */
Tomi Valkeinen7cb0d6c2014-09-25 19:24:29 +00001148 flags &= ~(OMAP_BO_CACHED|OMAP_BO_WC|OMAP_BO_UNCACHED);
1149 flags |= tiler_get_cpu_cache_flags();
Laurent Pinchart9cba3b92015-12-14 22:39:43 +02001150 } else if ((flags & OMAP_BO_SCANOUT) && !priv->has_dmm) {
1151 /*
Laurent Pinchartb22e6692015-12-14 22:39:44 +02001152 * OMAP_BO_SCANOUT hints that the buffer doesn't need to be
1153 * tiled. However, to lower the pressure on memory allocation,
1154 * use contiguous memory only if no TILER is available.
Laurent Pinchart9cba3b92015-12-14 22:39:43 +02001155 */
1156 flags |= OMAP_BO_MEM_DMA_API;
Tomi Valkeinen3f50eff2016-01-27 10:58:43 +02001157 } else if (!(flags & OMAP_BO_MEM_DMABUF)) {
Laurent Pinchart9cba3b92015-12-14 22:39:43 +02001158 /*
Tomi Valkeinen3f50eff2016-01-27 10:58:43 +02001159 * All other buffers not backed by dma_buf are shmem-backed.
Laurent Pinchart9cba3b92015-12-14 22:39:43 +02001160 */
1161 flags |= OMAP_BO_MEM_SHMEM;
Rob Clarkf7f9f452011-12-05 19:19:22 -06001162 }
Rob Clarkcd5351f2011-11-12 12:09:40 -06001163
Laurent Pinchart9cba3b92015-12-14 22:39:43 +02001164 /* Allocate the initialize the OMAP GEM object. */
Rob Clarkcd5351f2011-11-12 12:09:40 -06001165 omap_obj = kzalloc(sizeof(*omap_obj), GFP_KERNEL);
Joe Perches78110bb2013-02-11 09:41:29 -08001166 if (!omap_obj)
Tomi Valkeinena903e3b2015-03-17 15:31:11 +02001167 return NULL;
Rob Clarkf6b60362012-03-05 10:48:36 -06001168
Rob Clarkcd5351f2011-11-12 12:09:40 -06001169 obj = &omap_obj->base;
Laurent Pinchart9cba3b92015-12-14 22:39:43 +02001170 omap_obj->flags = flags;
Rob Clarkcd5351f2011-11-12 12:09:40 -06001171
Laurent Pinchart9cba3b92015-12-14 22:39:43 +02001172 if (flags & OMAP_BO_TILED) {
1173 /*
1174 * For tiled buffers align dimensions to slot boundaries and
1175 * calculate size based on aligned dimensions.
Rob Clarka6a91822011-12-09 23:26:08 -06001176 */
Laurent Pinchart9cba3b92015-12-14 22:39:43 +02001177 tiler_align(gem2fmt(flags), &gsize.tiled.width,
1178 &gsize.tiled.height);
YAMANE Toshiakiae053032012-11-14 19:33:17 +09001179
Laurent Pinchart9cba3b92015-12-14 22:39:43 +02001180 size = tiler_size(gem2fmt(flags), gsize.tiled.width,
1181 gsize.tiled.height);
Tomi Valkeinena903e3b2015-03-17 15:31:11 +02001182
Laurent Pinchart9cba3b92015-12-14 22:39:43 +02001183 omap_obj->width = gsize.tiled.width;
1184 omap_obj->height = gsize.tiled.height;
1185 } else {
1186 size = PAGE_ALIGN(gsize.bytes);
Rob Clarkcd5351f2011-11-12 12:09:40 -06001187 }
1188
Laurent Pinchartc2eb77f2016-03-02 12:51:19 +02001189 /* Initialize the GEM object. */
1190 if (!(flags & OMAP_BO_MEM_SHMEM)) {
1191 drm_gem_private_object_init(dev, obj, size);
1192 } else {
1193 ret = drm_gem_object_init(dev, obj, size);
1194 if (ret)
1195 goto err_free;
1196
Al Viro93c76a32015-12-04 23:45:44 -05001197 mapping = obj->filp->f_mapping;
Laurent Pinchartc2eb77f2016-03-02 12:51:19 +02001198 mapping_set_gfp_mask(mapping, GFP_USER | __GFP_DMA32);
1199 }
Tomi Valkeinena903e3b2015-03-17 15:31:11 +02001200
Laurent Pinchart9cba3b92015-12-14 22:39:43 +02001201 /* Allocate memory if needed. */
1202 if (flags & OMAP_BO_MEM_DMA_API) {
Linus Torvalds266c73b2016-03-21 13:48:00 -07001203 omap_obj->vaddr = dma_alloc_wc(dev->dev, size,
Laurent Pinchart16869082017-04-21 00:33:51 +03001204 &omap_obj->dma_addr,
Linus Torvalds266c73b2016-03-21 13:48:00 -07001205 GFP_KERNEL);
Laurent Pinchart9cba3b92015-12-14 22:39:43 +02001206 if (!omap_obj->vaddr)
Laurent Pinchartc2eb77f2016-03-02 12:51:19 +02001207 goto err_release;
Rob Clarkcd5351f2011-11-12 12:09:40 -06001208 }
1209
1210 spin_lock(&priv->list_lock);
1211 list_add(&omap_obj->mm_list, &priv->obj_list);
Rob Clarkf7f9f452011-12-05 19:19:22 -06001212 spin_unlock(&priv->list_lock);
1213
Rob Clarkcd5351f2011-11-12 12:09:40 -06001214 return obj;
1215
Laurent Pinchartc2eb77f2016-03-02 12:51:19 +02001216err_release:
1217 drm_gem_object_release(obj);
1218err_free:
1219 kfree(omap_obj);
Rob Clarkcd5351f2011-11-12 12:09:40 -06001220 return NULL;
1221}
Rob Clarkf7f9f452011-12-05 19:19:22 -06001222
Laurent Pinchartb22e6692015-12-14 22:39:44 +02001223struct drm_gem_object *omap_gem_new_dmabuf(struct drm_device *dev, size_t size,
1224 struct sg_table *sgt)
1225{
1226 struct omap_drm_private *priv = dev->dev_private;
1227 struct omap_gem_object *omap_obj;
1228 struct drm_gem_object *obj;
1229 union omap_gem_size gsize;
1230
1231 /* Without a DMM only physically contiguous buffers can be supported. */
1232 if (sgt->orig_nents != 1 && !priv->has_dmm)
1233 return ERR_PTR(-EINVAL);
1234
1235 mutex_lock(&dev->struct_mutex);
1236
1237 gsize.bytes = PAGE_ALIGN(size);
1238 obj = omap_gem_new(dev, gsize, OMAP_BO_MEM_DMABUF | OMAP_BO_WC);
1239 if (!obj) {
1240 obj = ERR_PTR(-ENOMEM);
1241 goto done;
1242 }
1243
1244 omap_obj = to_omap_bo(obj);
1245 omap_obj->sgt = sgt;
1246
1247 if (sgt->orig_nents == 1) {
Laurent Pinchart16869082017-04-21 00:33:51 +03001248 omap_obj->dma_addr = sg_dma_address(sgt->sgl);
Laurent Pinchartb22e6692015-12-14 22:39:44 +02001249 } else {
1250 /* Create pages list from sgt */
1251 struct sg_page_iter iter;
1252 struct page **pages;
1253 unsigned int npages;
1254 unsigned int i = 0;
1255
1256 npages = DIV_ROUND_UP(size, PAGE_SIZE);
1257 pages = kcalloc(npages, sizeof(*pages), GFP_KERNEL);
1258 if (!pages) {
1259 omap_gem_free_object(obj);
1260 obj = ERR_PTR(-ENOMEM);
1261 goto done;
1262 }
1263
1264 omap_obj->pages = pages;
1265
1266 for_each_sg_page(sgt->sgl, &iter, sgt->orig_nents, 0) {
1267 pages[i++] = sg_page_iter_page(&iter);
1268 if (i > npages)
1269 break;
1270 }
1271
1272 if (WARN_ON(i != npages)) {
1273 omap_gem_free_object(obj);
1274 obj = ERR_PTR(-ENOMEM);
1275 goto done;
1276 }
1277 }
1278
1279done:
1280 mutex_unlock(&dev->struct_mutex);
1281 return obj;
1282}
1283
Laurent Pinchart7ef93b02015-12-14 22:39:33 +02001284/* convenience method to construct a GEM buffer object, and userspace handle */
1285int omap_gem_new_handle(struct drm_device *dev, struct drm_file *file,
1286 union omap_gem_size gsize, uint32_t flags, uint32_t *handle)
1287{
1288 struct drm_gem_object *obj;
1289 int ret;
1290
1291 obj = omap_gem_new(dev, gsize, flags);
1292 if (!obj)
1293 return -ENOMEM;
1294
1295 ret = drm_gem_handle_create(file, obj, handle);
1296 if (ret) {
Laurent Pinchart74128a22015-12-14 22:39:39 +02001297 omap_gem_free_object(obj);
Laurent Pinchart7ef93b02015-12-14 22:39:33 +02001298 return ret;
1299 }
1300
1301 /* drop reference from allocate - handle holds it now */
1302 drm_gem_object_unreference_unlocked(obj);
1303
1304 return 0;
1305}
1306
1307/* -----------------------------------------------------------------------------
1308 * Init & Cleanup
1309 */
1310
1311/* If DMM is used, we need to set some stuff up.. */
Rob Clarkf7f9f452011-12-05 19:19:22 -06001312void omap_gem_init(struct drm_device *dev)
1313{
Rob Clarka6a91822011-12-09 23:26:08 -06001314 struct omap_drm_private *priv = dev->dev_private;
Laurent Pinchartf4302742015-12-14 22:39:34 +02001315 struct omap_drm_usergart *usergart;
Rob Clarkf7f9f452011-12-05 19:19:22 -06001316 const enum tiler_fmt fmts[] = {
1317 TILFMT_8BIT, TILFMT_16BIT, TILFMT_32BIT
1318 };
Andy Gross5c137792012-03-05 10:48:39 -06001319 int i, j;
Rob Clarkf7f9f452011-12-05 19:19:22 -06001320
Andy Grosse5e4e9b2012-10-17 00:30:03 -05001321 if (!dmm_is_available()) {
Rob Clarkf7f9f452011-12-05 19:19:22 -06001322 /* DMM only supported on OMAP4 and later, so this isn't fatal */
Andy Gross5c137792012-03-05 10:48:39 -06001323 dev_warn(dev->dev, "DMM not available, disable DMM support\n");
Rob Clarkf7f9f452011-12-05 19:19:22 -06001324 return;
1325 }
1326
Joe Perches78110bb2013-02-11 09:41:29 -08001327 usergart = kcalloc(3, sizeof(*usergart), GFP_KERNEL);
1328 if (!usergart)
Rob Clarkb3698392011-12-09 23:26:06 -06001329 return;
Rob Clarkf7f9f452011-12-05 19:19:22 -06001330
1331 /* reserve 4k aligned/wide regions for userspace mappings: */
1332 for (i = 0; i < ARRAY_SIZE(fmts); i++) {
1333 uint16_t h = 1, w = PAGE_SIZE >> i;
1334 tiler_align(fmts[i], &w, &h);
1335 /* note: since each region is 1 4kb page wide, and minimum
1336 * number of rows, the height ends up being the same as the
1337 * # of pages in the region
1338 */
1339 usergart[i].height = h;
1340 usergart[i].height_shift = ilog2(h);
Rob Clark3c810c62012-08-15 15:18:01 -05001341 usergart[i].stride_pfn = tiler_stride(fmts[i], 0) >> PAGE_SHIFT;
Rob Clarkf7f9f452011-12-05 19:19:22 -06001342 usergart[i].slot_shift = ilog2((PAGE_SIZE / h) >> i);
1343 for (j = 0; j < NUM_USERGART_ENTRIES; j++) {
Laurent Pinchartf4302742015-12-14 22:39:34 +02001344 struct omap_drm_usergart_entry *entry;
1345 struct tiler_block *block;
1346
1347 entry = &usergart[i].entry[j];
1348 block = tiler_reserve_2d(fmts[i], w, h, PAGE_SIZE);
Rob Clarkf7f9f452011-12-05 19:19:22 -06001349 if (IS_ERR(block)) {
1350 dev_err(dev->dev,
1351 "reserve failed: %d, %d, %ld\n",
1352 i, j, PTR_ERR(block));
1353 return;
1354 }
Laurent Pinchart16869082017-04-21 00:33:51 +03001355 entry->dma_addr = tiler_ssptr(block);
Rob Clarkf7f9f452011-12-05 19:19:22 -06001356 entry->block = block;
1357
Laurent Pinchart16869082017-04-21 00:33:51 +03001358 DBG("%d:%d: %dx%d: dma_addr=%pad stride=%d", i, j, w, h,
1359 &entry->dma_addr,
Rob Clarkf7f9f452011-12-05 19:19:22 -06001360 usergart[i].stride_pfn << PAGE_SHIFT);
1361 }
1362 }
Rob Clarka6a91822011-12-09 23:26:08 -06001363
Laurent Pinchartf4302742015-12-14 22:39:34 +02001364 priv->usergart = usergart;
Rob Clarka6a91822011-12-09 23:26:08 -06001365 priv->has_dmm = true;
Rob Clarkf7f9f452011-12-05 19:19:22 -06001366}
1367
1368void omap_gem_deinit(struct drm_device *dev)
1369{
Laurent Pinchartf4302742015-12-14 22:39:34 +02001370 struct omap_drm_private *priv = dev->dev_private;
1371
Rob Clarkf7f9f452011-12-05 19:19:22 -06001372 /* I believe we can rely on there being no more outstanding GEM
1373 * objects which could depend on usergart/dmm at this point.
1374 */
Laurent Pinchartf4302742015-12-14 22:39:34 +02001375 kfree(priv->usergart);
Rob Clarkf7f9f452011-12-05 19:19:22 -06001376}