blob: 3ea716875151c17cb07256c5bf03a439af7e2104 [file] [log] [blame]
Rob Clarkcd5351f2011-11-12 12:09:40 -06001/*
Andrew F. Davisbb5cdf82017-12-05 14:29:31 -06002 * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/
Rob Clarkcd5351f2011-11-12 12:09:40 -06003 * Author: Rob Clark <rob.clark@linaro.org>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
Arnd Bergmann2d802452016-05-11 18:01:45 +020018#include <linux/seq_file.h>
Rob Clarkcd5351f2011-11-12 12:09:40 -060019#include <linux/shmem_fs.h>
Laurent Pinchart2d278f52015-03-05 21:31:37 +020020#include <linux/spinlock.h>
Dan Williams01c8f1c2016-01-15 16:56:40 -080021#include <linux/pfn_t.h>
Laurent Pinchart2d278f52015-03-05 21:31:37 +020022
David Herrmann0de23972013-07-24 21:07:52 +020023#include <drm/drm_vma_manager.h>
Rob Clarkcd5351f2011-11-12 12:09:40 -060024
25#include "omap_drv.h"
Rob Clarkf7f9f452011-12-05 19:19:22 -060026#include "omap_dmm_tiler.h"
Rob Clarkcd5351f2011-11-12 12:09:40 -060027
Rob Clarkcd5351f2011-11-12 12:09:40 -060028/*
29 * GEM buffer object implementation.
30 */
31
Rob Clarkcd5351f2011-11-12 12:09:40 -060032/* note: we use upper 8 bits of flags for driver-internal flags: */
Laurent Pinchartcdb03812015-12-14 22:39:37 +020033#define OMAP_BO_MEM_DMA_API 0x01000000 /* memory allocated with the dma_alloc_* API */
34#define OMAP_BO_MEM_SHMEM 0x02000000 /* memory allocated through shmem backing */
Laurent Pinchartb22e6692015-12-14 22:39:44 +020035#define OMAP_BO_MEM_DMABUF 0x08000000 /* memory imported from a dmabuf */
Rob Clarkcd5351f2011-11-12 12:09:40 -060036
Rob Clarkcd5351f2011-11-12 12:09:40 -060037struct omap_gem_object {
38 struct drm_gem_object base;
39
Rob Clarkf6b60362012-03-05 10:48:36 -060040 struct list_head mm_list;
41
Laurent Pinchartdfe9cfc2018-02-11 15:07:33 +020042 u32 flags;
Rob Clarkcd5351f2011-11-12 12:09:40 -060043
Rob Clarkf7f9f452011-12-05 19:19:22 -060044 /** width/height for tiled formats (rounded up to slot boundaries) */
Laurent Pinchartdfe9cfc2018-02-11 15:07:33 +020045 u16 width, height;
Rob Clarkf7f9f452011-12-05 19:19:22 -060046
Rob Clarka6a91822011-12-09 23:26:08 -060047 /** roll applied when mapping to DMM */
Laurent Pinchartdfe9cfc2018-02-11 15:07:33 +020048 u32 roll;
Rob Clarka6a91822011-12-09 23:26:08 -060049
Rob Clarkcd5351f2011-11-12 12:09:40 -060050 /**
Laurent Pinchart16869082017-04-21 00:33:51 +030051 * dma_addr contains the buffer DMA address. It is valid for
Rob Clarkcd5351f2011-11-12 12:09:40 -060052 *
Laurent Pinchartb22e6692015-12-14 22:39:44 +020053 * - buffers allocated through the DMA mapping API (with the
54 * OMAP_BO_MEM_DMA_API flag set)
55 *
56 * - buffers imported from dmabuf (with the OMAP_BO_MEM_DMABUF flag set)
57 * if they are physically contiguous (when sgt->orig_nents == 1)
58 *
Laurent Pinchart16869082017-04-21 00:33:51 +030059 * - buffers mapped through the TILER when dma_addr_cnt is not zero, in
Laurent Pinchartb22e6692015-12-14 22:39:44 +020060 * which case the DMA address points to the TILER aperture
61 *
62 * Physically contiguous buffers have their DMA address equal to the
63 * physical address as we don't remap those buffers through the TILER.
64 *
65 * Buffers mapped to the TILER have their DMA address pointing to the
Laurent Pinchart16869082017-04-21 00:33:51 +030066 * TILER aperture. As TILER mappings are refcounted (through
Laurent Pinchartbc20c852017-04-21 00:33:52 +030067 * dma_addr_cnt) the DMA address must be accessed through omap_gem_pin()
68 * to ensure that the mapping won't disappear unexpectedly. References
69 * must be released with omap_gem_unpin().
Rob Clarkcd5351f2011-11-12 12:09:40 -060070 */
Laurent Pinchart16869082017-04-21 00:33:51 +030071 dma_addr_t dma_addr;
Rob Clarkcd5351f2011-11-12 12:09:40 -060072
73 /**
Laurent Pinchart16869082017-04-21 00:33:51 +030074 * # of users of dma_addr
Rob Clarkf7f9f452011-12-05 19:19:22 -060075 */
Laurent Pinchartdfe9cfc2018-02-11 15:07:33 +020076 u32 dma_addr_cnt;
Rob Clarkf7f9f452011-12-05 19:19:22 -060077
78 /**
Laurent Pinchartb22e6692015-12-14 22:39:44 +020079 * If the buffer has been imported from a dmabuf the OMAP_DB_DMABUF flag
80 * is set and the sgt field is valid.
81 */
82 struct sg_table *sgt;
83
84 /**
Rob Clarkf7f9f452011-12-05 19:19:22 -060085 * tiler block used when buffer is remapped in DMM/TILER.
86 */
87 struct tiler_block *block;
88
89 /**
Rob Clarkcd5351f2011-11-12 12:09:40 -060090 * Array of backing pages, if allocated. Note that pages are never
91 * allocated for buffers originally allocated from contiguous memory
92 */
93 struct page **pages;
94
Rob Clarkf3bc9d22011-12-20 16:54:28 -060095 /** addresses corresponding to pages in above array */
Laurent Pinchart57c22f72017-04-21 00:33:54 +030096 dma_addr_t *dma_addrs;
Rob Clarkf3bc9d22011-12-20 16:54:28 -060097
Rob Clarkcd5351f2011-11-12 12:09:40 -060098 /**
99 * Virtual address, if mapped.
100 */
101 void *vaddr;
Rob Clarkcd5351f2011-11-12 12:09:40 -0600102};
103
Laurent Pinchart7ef93b02015-12-14 22:39:33 +0200104#define to_omap_bo(x) container_of(x, struct omap_gem_object, base)
Rob Clarkc5b12472012-01-18 18:33:02 -0600105
Rob Clarkf7f9f452011-12-05 19:19:22 -0600106/* To deal with userspace mmap'ings of 2d tiled buffers, which (a) are
107 * not necessarily pinned in TILER all the time, and (b) when they are
108 * they are not necessarily page aligned, we reserve one or more small
109 * regions in each of the 2d containers to use as a user-GART where we
110 * can create a second page-aligned mapping of parts of the buffer
111 * being accessed from userspace.
112 *
113 * Note that we could optimize slightly when we know that multiple
114 * tiler containers are backed by the same PAT.. but I'll leave that
115 * for later..
116 */
117#define NUM_USERGART_ENTRIES 2
Laurent Pinchartf4302742015-12-14 22:39:34 +0200118struct omap_drm_usergart_entry {
Rob Clarkf7f9f452011-12-05 19:19:22 -0600119 struct tiler_block *block; /* the reserved tiler block */
Laurent Pinchart16869082017-04-21 00:33:51 +0300120 dma_addr_t dma_addr;
Rob Clarkf7f9f452011-12-05 19:19:22 -0600121 struct drm_gem_object *obj; /* the current pinned obj */
122 pgoff_t obj_pgoff; /* page offset of obj currently
123 mapped in */
124};
Laurent Pinchartf4302742015-12-14 22:39:34 +0200125
126struct omap_drm_usergart {
127 struct omap_drm_usergart_entry entry[NUM_USERGART_ENTRIES];
Rob Clarkf7f9f452011-12-05 19:19:22 -0600128 int height; /* height in rows */
129 int height_shift; /* ilog2(height in rows) */
130 int slot_shift; /* ilog2(width per slot) */
131 int stride_pfn; /* stride in pages */
132 int last; /* index of last used entry */
Laurent Pinchartf4302742015-12-14 22:39:34 +0200133};
Rob Clarkf7f9f452011-12-05 19:19:22 -0600134
Laurent Pinchartb902f8f2015-12-14 22:39:32 +0200135/* -----------------------------------------------------------------------------
136 * Helpers
137 */
138
139/** get mmap offset */
Laurent Pinchartdfe9cfc2018-02-11 15:07:33 +0200140static u64 mmap_offset(struct drm_gem_object *obj)
Laurent Pinchartb902f8f2015-12-14 22:39:32 +0200141{
142 struct drm_device *dev = obj->dev;
143 int ret;
144 size_t size;
145
146 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
147
148 /* Make it mmapable */
149 size = omap_gem_mmap_size(obj);
150 ret = drm_gem_create_mmap_offset_size(obj, size);
151 if (ret) {
152 dev_err(dev->dev, "could not allocate mmap offset\n");
153 return 0;
154 }
155
156 return drm_vma_node_offset_addr(&obj->vma_node);
157}
158
Laurent Pinchartb22e6692015-12-14 22:39:44 +0200159static bool is_contiguous(struct omap_gem_object *omap_obj)
Laurent Pinchart7ef93b02015-12-14 22:39:33 +0200160{
Laurent Pinchartb22e6692015-12-14 22:39:44 +0200161 if (omap_obj->flags & OMAP_BO_MEM_DMA_API)
162 return true;
163
164 if ((omap_obj->flags & OMAP_BO_MEM_DMABUF) && omap_obj->sgt->nents == 1)
165 return true;
166
167 return false;
Laurent Pinchart7ef93b02015-12-14 22:39:33 +0200168}
169
170/* -----------------------------------------------------------------------------
171 * Eviction
172 */
Rob Clarkf7f9f452011-12-05 19:19:22 -0600173
174static void evict_entry(struct drm_gem_object *obj,
Laurent Pinchartf4302742015-12-14 22:39:34 +0200175 enum tiler_fmt fmt, struct omap_drm_usergart_entry *entry)
Rob Clarkf7f9f452011-12-05 19:19:22 -0600176{
David Herrmann6796cb12014-01-03 14:24:19 +0100177 struct omap_gem_object *omap_obj = to_omap_bo(obj);
Laurent Pinchartf4302742015-12-14 22:39:34 +0200178 struct omap_drm_private *priv = obj->dev->dev_private;
179 int n = priv->usergart[fmt].height;
David Herrmann6796cb12014-01-03 14:24:19 +0100180 size_t size = PAGE_SIZE * n;
181 loff_t off = mmap_offset(obj) +
182 (entry->obj_pgoff << PAGE_SHIFT);
Tomi Valkeinencc8dd762017-05-18 11:51:51 +0300183 const int m = DIV_ROUND_UP(omap_obj->width << fmt, PAGE_SIZE);
David Herrmann6796cb12014-01-03 14:24:19 +0100184
185 if (m > 1) {
186 int i;
187 /* if stride > than PAGE_SIZE then sparse mapping: */
188 for (i = n; i > 0; i--) {
189 unmap_mapping_range(obj->dev->anon_inode->i_mapping,
190 off, PAGE_SIZE, 1);
191 off += PAGE_SIZE * m;
Rob Clarke559895ae2012-03-05 10:48:40 -0600192 }
David Herrmann6796cb12014-01-03 14:24:19 +0100193 } else {
194 unmap_mapping_range(obj->dev->anon_inode->i_mapping,
195 off, size, 1);
Rob Clarkf7f9f452011-12-05 19:19:22 -0600196 }
197
198 entry->obj = NULL;
199}
200
201/* Evict a buffer from usergart, if it is mapped there */
202static void evict(struct drm_gem_object *obj)
203{
204 struct omap_gem_object *omap_obj = to_omap_bo(obj);
Laurent Pinchartf4302742015-12-14 22:39:34 +0200205 struct omap_drm_private *priv = obj->dev->dev_private;
Rob Clarkf7f9f452011-12-05 19:19:22 -0600206
207 if (omap_obj->flags & OMAP_BO_TILED) {
208 enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
209 int i;
210
Rob Clarkf7f9f452011-12-05 19:19:22 -0600211 for (i = 0; i < NUM_USERGART_ENTRIES; i++) {
Laurent Pinchartf4302742015-12-14 22:39:34 +0200212 struct omap_drm_usergart_entry *entry =
213 &priv->usergart[fmt].entry[i];
214
Rob Clarkf7f9f452011-12-05 19:19:22 -0600215 if (entry->obj == obj)
216 evict_entry(obj, fmt, entry);
217 }
218 }
219}
220
Laurent Pinchart7ef93b02015-12-14 22:39:33 +0200221/* -----------------------------------------------------------------------------
222 * Page Management
Rob Clarkcd5351f2011-11-12 12:09:40 -0600223 */
Rob Clarkcd5351f2011-11-12 12:09:40 -0600224
225/** ensure backing pages are allocated */
226static int omap_gem_attach_pages(struct drm_gem_object *obj)
227{
Rob Clark8b6b5692012-05-17 02:37:25 -0600228 struct drm_device *dev = obj->dev;
Rob Clarkcd5351f2011-11-12 12:09:40 -0600229 struct omap_gem_object *omap_obj = to_omap_bo(obj);
230 struct page **pages;
Emil Gooded4eb23a2012-08-17 18:53:26 +0200231 int npages = obj->size >> PAGE_SHIFT;
232 int i, ret;
Rob Clark8b6b5692012-05-17 02:37:25 -0600233 dma_addr_t *addrs;
Rob Clarkcd5351f2011-11-12 12:09:40 -0600234
235 WARN_ON(omap_obj->pages);
236
David Herrmann0cdbe8a2014-05-25 12:59:47 +0200237 pages = drm_gem_get_pages(obj);
Rob Clarkcd5351f2011-11-12 12:09:40 -0600238 if (IS_ERR(pages)) {
239 dev_err(obj->dev->dev, "could not get pages: %ld\n", PTR_ERR(pages));
240 return PTR_ERR(pages);
241 }
242
Rob Clarkf3bc9d22011-12-20 16:54:28 -0600243 /* for non-cached buffers, ensure the new pages are clean because
244 * DSS, GPU, etc. are not cache coherent:
245 */
246 if (omap_obj->flags & (OMAP_BO_WC|OMAP_BO_UNCACHED)) {
Kees Cook6da2ec52018-06-12 13:55:00 -0700247 addrs = kmalloc_array(npages, sizeof(*addrs), GFP_KERNEL);
Emil Gooded4eb23a2012-08-17 18:53:26 +0200248 if (!addrs) {
249 ret = -ENOMEM;
250 goto free_pages;
251 }
252
Rob Clarkf3bc9d22011-12-20 16:54:28 -0600253 for (i = 0; i < npages; i++) {
Rob Clark8b6b5692012-05-17 02:37:25 -0600254 addrs[i] = dma_map_page(dev->dev, pages[i],
Laurent Pinchart97817fd2017-04-21 00:33:58 +0300255 0, PAGE_SIZE, DMA_TO_DEVICE);
Tomi Valkeinen579ef252016-01-05 11:43:14 +0200256
257 if (dma_mapping_error(dev->dev, addrs[i])) {
258 dev_warn(dev->dev,
259 "%s: failed to map page\n", __func__);
260
261 for (i = i - 1; i >= 0; --i) {
262 dma_unmap_page(dev->dev, addrs[i],
Laurent Pinchart97817fd2017-04-21 00:33:58 +0300263 PAGE_SIZE, DMA_TO_DEVICE);
Tomi Valkeinen579ef252016-01-05 11:43:14 +0200264 }
265
266 ret = -ENOMEM;
267 goto free_addrs;
268 }
Rob Clarkf3bc9d22011-12-20 16:54:28 -0600269 }
Rob Clark8b6b5692012-05-17 02:37:25 -0600270 } else {
Vincent Penquerc'h23d84ed2012-10-09 19:40:39 +0100271 addrs = kzalloc(npages * sizeof(*addrs), GFP_KERNEL);
Emil Gooded4eb23a2012-08-17 18:53:26 +0200272 if (!addrs) {
273 ret = -ENOMEM;
274 goto free_pages;
275 }
Rob Clarkf3bc9d22011-12-20 16:54:28 -0600276 }
277
Laurent Pinchart57c22f72017-04-21 00:33:54 +0300278 omap_obj->dma_addrs = addrs;
Rob Clarkcd5351f2011-11-12 12:09:40 -0600279 omap_obj->pages = pages;
Rob Clark8b6b5692012-05-17 02:37:25 -0600280
Rob Clarkcd5351f2011-11-12 12:09:40 -0600281 return 0;
Emil Gooded4eb23a2012-08-17 18:53:26 +0200282
Tomi Valkeinen579ef252016-01-05 11:43:14 +0200283free_addrs:
284 kfree(addrs);
Emil Gooded4eb23a2012-08-17 18:53:26 +0200285free_pages:
Rob Clarkddcd09d2013-08-07 13:41:27 -0400286 drm_gem_put_pages(obj, pages, true, false);
Emil Gooded4eb23a2012-08-17 18:53:26 +0200287
288 return ret;
Rob Clarkcd5351f2011-11-12 12:09:40 -0600289}
290
Laurent Pinchartb902f8f2015-12-14 22:39:32 +0200291/* acquire pages when needed (for example, for DMA where physically
292 * contiguous buffer is not required
293 */
294static int get_pages(struct drm_gem_object *obj, struct page ***pages)
295{
296 struct omap_gem_object *omap_obj = to_omap_bo(obj);
297 int ret = 0;
298
Laurent Pinchartcdb03812015-12-14 22:39:37 +0200299 if ((omap_obj->flags & OMAP_BO_MEM_SHMEM) && !omap_obj->pages) {
Laurent Pinchartb902f8f2015-12-14 22:39:32 +0200300 ret = omap_gem_attach_pages(obj);
301 if (ret) {
302 dev_err(obj->dev->dev, "could not attach pages\n");
303 return ret;
304 }
305 }
306
307 /* TODO: even phys-contig.. we should have a list of pages? */
308 *pages = omap_obj->pages;
309
310 return 0;
311}
312
Rob Clarkcd5351f2011-11-12 12:09:40 -0600313/** release backing pages */
314static void omap_gem_detach_pages(struct drm_gem_object *obj)
315{
316 struct omap_gem_object *omap_obj = to_omap_bo(obj);
Laurent Pinchart930dc192017-04-21 00:33:57 +0300317 unsigned int npages = obj->size >> PAGE_SHIFT;
318 unsigned int i;
Rob Clarkf3bc9d22011-12-20 16:54:28 -0600319
Laurent Pinchart930dc192017-04-21 00:33:57 +0300320 for (i = 0; i < npages; i++) {
321 if (omap_obj->dma_addrs[i])
322 dma_unmap_page(obj->dev->dev, omap_obj->dma_addrs[i],
Laurent Pinchart97817fd2017-04-21 00:33:58 +0300323 PAGE_SIZE, DMA_TO_DEVICE);
Rob Clarkf3bc9d22011-12-20 16:54:28 -0600324 }
325
Laurent Pinchart57c22f72017-04-21 00:33:54 +0300326 kfree(omap_obj->dma_addrs);
327 omap_obj->dma_addrs = NULL;
Rob Clark8b6b5692012-05-17 02:37:25 -0600328
Rob Clarkddcd09d2013-08-07 13:41:27 -0400329 drm_gem_put_pages(obj, omap_obj->pages, true, false);
Rob Clarkcd5351f2011-11-12 12:09:40 -0600330 omap_obj->pages = NULL;
331}
332
Rob Clark6ad11bc2012-04-10 13:19:55 -0500333/* get buffer flags */
Laurent Pinchartdfe9cfc2018-02-11 15:07:33 +0200334u32 omap_gem_flags(struct drm_gem_object *obj)
Rob Clark6ad11bc2012-04-10 13:19:55 -0500335{
336 return to_omap_bo(obj)->flags;
337}
338
Laurent Pinchartdfe9cfc2018-02-11 15:07:33 +0200339u64 omap_gem_mmap_offset(struct drm_gem_object *obj)
Rob Clarkc5b12472012-01-18 18:33:02 -0600340{
Laurent Pinchartdfe9cfc2018-02-11 15:07:33 +0200341 u64 offset;
342
Rob Clarkc5b12472012-01-18 18:33:02 -0600343 mutex_lock(&obj->dev->struct_mutex);
344 offset = mmap_offset(obj);
345 mutex_unlock(&obj->dev->struct_mutex);
346 return offset;
347}
348
Rob Clarkf7f9f452011-12-05 19:19:22 -0600349/** get mmap size */
350size_t omap_gem_mmap_size(struct drm_gem_object *obj)
351{
352 struct omap_gem_object *omap_obj = to_omap_bo(obj);
353 size_t size = obj->size;
354
355 if (omap_obj->flags & OMAP_BO_TILED) {
356 /* for tiled buffers, the virtual size has stride rounded up
357 * to 4kb.. (to hide the fact that row n+1 might start 16kb or
358 * 32kb later!). But we don't back the entire buffer with
359 * pages, only the valid picture part.. so need to adjust for
360 * this in the size used to mmap and generate mmap offset
361 */
362 size = tiler_vsize(gem2fmt(omap_obj->flags),
363 omap_obj->width, omap_obj->height);
364 }
365
366 return size;
367}
368
Laurent Pinchart7ef93b02015-12-14 22:39:33 +0200369/* -----------------------------------------------------------------------------
370 * Fault Handling
371 */
372
Rob Clarkf7f9f452011-12-05 19:19:22 -0600373/* Normal handling for the case of faulting in non-tiled buffers */
374static int fault_1d(struct drm_gem_object *obj,
375 struct vm_area_struct *vma, struct vm_fault *vmf)
376{
377 struct omap_gem_object *omap_obj = to_omap_bo(obj);
378 unsigned long pfn;
379 pgoff_t pgoff;
380
381 /* We don't use vmf->pgoff since that has the fake offset: */
Jan Kara1a29d852016-12-14 15:07:01 -0800382 pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
Rob Clarkf7f9f452011-12-05 19:19:22 -0600383
384 if (omap_obj->pages) {
Laurent Pinchartd61ce7d2017-04-21 00:33:55 +0300385 omap_gem_cpu_sync_page(obj, pgoff);
Rob Clarkf7f9f452011-12-05 19:19:22 -0600386 pfn = page_to_pfn(omap_obj->pages[pgoff]);
387 } else {
Laurent Pinchartb22e6692015-12-14 22:39:44 +0200388 BUG_ON(!is_contiguous(omap_obj));
Laurent Pinchart16869082017-04-21 00:33:51 +0300389 pfn = (omap_obj->dma_addr >> PAGE_SHIFT) + pgoff;
Rob Clarkf7f9f452011-12-05 19:19:22 -0600390 }
391
Jan Kara1a29d852016-12-14 15:07:01 -0800392 VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
Rob Clarkf7f9f452011-12-05 19:19:22 -0600393 pfn, pfn << PAGE_SHIFT);
394
Jan Kara1a29d852016-12-14 15:07:01 -0800395 return vm_insert_mixed(vma, vmf->address, __pfn_to_pfn_t(pfn, PFN_DEV));
Rob Clarkf7f9f452011-12-05 19:19:22 -0600396}
397
398/* Special handling for the case of faulting in 2d tiled buffers */
399static int fault_2d(struct drm_gem_object *obj,
400 struct vm_area_struct *vma, struct vm_fault *vmf)
401{
402 struct omap_gem_object *omap_obj = to_omap_bo(obj);
Laurent Pinchartf4302742015-12-14 22:39:34 +0200403 struct omap_drm_private *priv = obj->dev->dev_private;
404 struct omap_drm_usergart_entry *entry;
Rob Clarkf7f9f452011-12-05 19:19:22 -0600405 enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
406 struct page *pages[64]; /* XXX is this too much to have on stack? */
407 unsigned long pfn;
408 pgoff_t pgoff, base_pgoff;
Jan Kara1a29d852016-12-14 15:07:01 -0800409 unsigned long vaddr;
Rob Clarkf7f9f452011-12-05 19:19:22 -0600410 int i, ret, slots;
411
Rob Clarke559895ae2012-03-05 10:48:40 -0600412 /*
413 * Note the height of the slot is also equal to the number of pages
414 * that need to be mapped in to fill 4kb wide CPU page. If the slot
415 * height is 64, then 64 pages fill a 4kb wide by 64 row region.
Rob Clarkf7f9f452011-12-05 19:19:22 -0600416 */
Laurent Pinchartf4302742015-12-14 22:39:34 +0200417 const int n = priv->usergart[fmt].height;
418 const int n_shift = priv->usergart[fmt].height_shift;
Rob Clarke559895ae2012-03-05 10:48:40 -0600419
420 /*
421 * If buffer width in bytes > PAGE_SIZE then the virtual stride is
422 * rounded up to next multiple of PAGE_SIZE.. this need to be taken
423 * into account in some of the math, so figure out virtual stride
424 * in pages
425 */
Tomi Valkeinencc8dd762017-05-18 11:51:51 +0300426 const int m = DIV_ROUND_UP(omap_obj->width << fmt, PAGE_SIZE);
Rob Clarkf7f9f452011-12-05 19:19:22 -0600427
428 /* We don't use vmf->pgoff since that has the fake offset: */
Jan Kara1a29d852016-12-14 15:07:01 -0800429 pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
Rob Clarkf7f9f452011-12-05 19:19:22 -0600430
Rob Clarke559895ae2012-03-05 10:48:40 -0600431 /*
432 * Actual address we start mapping at is rounded down to previous slot
Rob Clarkf7f9f452011-12-05 19:19:22 -0600433 * boundary in the y direction:
434 */
Rob Clarke559895ae2012-03-05 10:48:40 -0600435 base_pgoff = round_down(pgoff, m << n_shift);
Rob Clarkf7f9f452011-12-05 19:19:22 -0600436
Rob Clarke559895ae2012-03-05 10:48:40 -0600437 /* figure out buffer width in slots */
Laurent Pinchartf4302742015-12-14 22:39:34 +0200438 slots = omap_obj->width >> priv->usergart[fmt].slot_shift;
Rob Clarkf7f9f452011-12-05 19:19:22 -0600439
Jan Kara1a29d852016-12-14 15:07:01 -0800440 vaddr = vmf->address - ((pgoff - base_pgoff) << PAGE_SHIFT);
Rob Clarke559895ae2012-03-05 10:48:40 -0600441
Laurent Pinchartf4302742015-12-14 22:39:34 +0200442 entry = &priv->usergart[fmt].entry[priv->usergart[fmt].last];
Rob Clarke559895ae2012-03-05 10:48:40 -0600443
Rob Clarkf7f9f452011-12-05 19:19:22 -0600444 /* evict previous buffer using this usergart entry, if any: */
445 if (entry->obj)
446 evict_entry(entry->obj, fmt, entry);
447
448 entry->obj = obj;
449 entry->obj_pgoff = base_pgoff;
450
Rob Clarke559895ae2012-03-05 10:48:40 -0600451 /* now convert base_pgoff to phys offset from virt offset: */
452 base_pgoff = (base_pgoff >> n_shift) * slots;
Rob Clarkf7f9f452011-12-05 19:19:22 -0600453
Rob Clarke559895ae2012-03-05 10:48:40 -0600454 /* for wider-than 4k.. figure out which part of the slot-row we want: */
455 if (m > 1) {
456 int off = pgoff % m;
457 entry->obj_pgoff += off;
458 base_pgoff /= m;
459 slots = min(slots - (off << n_shift), n);
460 base_pgoff += off << n_shift;
461 vaddr += off << PAGE_SHIFT;
462 }
463
464 /*
465 * Map in pages. Beyond the valid pixel part of the buffer, we set
466 * pages[i] to NULL to get a dummy page mapped in.. if someone
467 * reads/writes it they will get random/undefined content, but at
468 * least it won't be corrupting whatever other random page used to
469 * be mapped in, or other undefined behavior.
Rob Clarkf7f9f452011-12-05 19:19:22 -0600470 */
471 memcpy(pages, &omap_obj->pages[base_pgoff],
472 sizeof(struct page *) * slots);
473 memset(pages + slots, 0,
Rob Clarke559895ae2012-03-05 10:48:40 -0600474 sizeof(struct page *) * (n - slots));
Rob Clarkf7f9f452011-12-05 19:19:22 -0600475
Rob Clarka6a91822011-12-09 23:26:08 -0600476 ret = tiler_pin(entry->block, pages, ARRAY_SIZE(pages), 0, true);
Rob Clarkf7f9f452011-12-05 19:19:22 -0600477 if (ret) {
478 dev_err(obj->dev->dev, "failed to pin: %d\n", ret);
479 return ret;
480 }
481
Laurent Pinchart16869082017-04-21 00:33:51 +0300482 pfn = entry->dma_addr >> PAGE_SHIFT;
Rob Clarkf7f9f452011-12-05 19:19:22 -0600483
Jan Kara1a29d852016-12-14 15:07:01 -0800484 VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
Rob Clarkf7f9f452011-12-05 19:19:22 -0600485 pfn, pfn << PAGE_SHIFT);
486
Rob Clarke559895ae2012-03-05 10:48:40 -0600487 for (i = n; i > 0; i--) {
Jan Kara1a29d852016-12-14 15:07:01 -0800488 vm_insert_mixed(vma, vaddr, __pfn_to_pfn_t(pfn, PFN_DEV));
Laurent Pinchartf4302742015-12-14 22:39:34 +0200489 pfn += priv->usergart[fmt].stride_pfn;
Rob Clarke559895ae2012-03-05 10:48:40 -0600490 vaddr += PAGE_SIZE * m;
Rob Clarkf7f9f452011-12-05 19:19:22 -0600491 }
492
493 /* simple round-robin: */
Laurent Pinchartf4302742015-12-14 22:39:34 +0200494 priv->usergart[fmt].last = (priv->usergart[fmt].last + 1)
495 % NUM_USERGART_ENTRIES;
Rob Clarkf7f9f452011-12-05 19:19:22 -0600496
497 return 0;
498}
499
Rob Clarkcd5351f2011-11-12 12:09:40 -0600500/**
501 * omap_gem_fault - pagefault handler for GEM objects
Rob Clarkcd5351f2011-11-12 12:09:40 -0600502 * @vmf: fault detail
503 *
504 * Invoked when a fault occurs on an mmap of a GEM managed area. GEM
505 * does most of the work for us including the actual map/unmap calls
506 * but we need to do the actual page work.
507 *
508 * The VMA was set up by GEM. In doing so it also ensured that the
509 * vma->vm_private_data points to the GEM object that is backing this
510 * mapping.
511 */
Dave Jiang11bac802017-02-24 14:56:41 -0800512int omap_gem_fault(struct vm_fault *vmf)
Rob Clarkcd5351f2011-11-12 12:09:40 -0600513{
Dave Jiang11bac802017-02-24 14:56:41 -0800514 struct vm_area_struct *vma = vmf->vma;
Rob Clarkcd5351f2011-11-12 12:09:40 -0600515 struct drm_gem_object *obj = vma->vm_private_data;
516 struct omap_gem_object *omap_obj = to_omap_bo(obj);
517 struct drm_device *dev = obj->dev;
518 struct page **pages;
Rob Clarkcd5351f2011-11-12 12:09:40 -0600519 int ret;
520
521 /* Make sure we don't parallel update on a fault, nor move or remove
522 * something from beneath our feet
523 */
524 mutex_lock(&dev->struct_mutex);
525
526 /* if a shmem backed object, make sure we have pages attached now */
527 ret = get_pages(obj, &pages);
YAMANE Toshiakiae053032012-11-14 19:33:17 +0900528 if (ret)
Rob Clarkcd5351f2011-11-12 12:09:40 -0600529 goto fail;
Rob Clarkcd5351f2011-11-12 12:09:40 -0600530
531 /* where should we do corresponding put_pages().. we are mapping
532 * the original page, rather than thru a GART, so we can't rely
533 * on eviction to trigger this. But munmap() or all mappings should
534 * probably trigger put_pages()?
535 */
536
Rob Clarkf7f9f452011-12-05 19:19:22 -0600537 if (omap_obj->flags & OMAP_BO_TILED)
538 ret = fault_2d(obj, vma, vmf);
539 else
540 ret = fault_1d(obj, vma, vmf);
Rob Clarkcd5351f2011-11-12 12:09:40 -0600541
Rob Clarkcd5351f2011-11-12 12:09:40 -0600542
543fail:
544 mutex_unlock(&dev->struct_mutex);
545 switch (ret) {
546 case 0:
547 case -ERESTARTSYS:
548 case -EINTR:
Rob Clarke1d4ee02013-10-20 12:07:42 -0400549 case -EBUSY:
550 /*
551 * EBUSY is ok: this just means that another thread
552 * already did the job.
553 */
Rob Clarkcd5351f2011-11-12 12:09:40 -0600554 return VM_FAULT_NOPAGE;
555 case -ENOMEM:
556 return VM_FAULT_OOM;
557 default:
558 return VM_FAULT_SIGBUS;
559 }
560}
561
562/** We override mainly to fix up some of the vm mapping flags.. */
563int omap_gem_mmap(struct file *filp, struct vm_area_struct *vma)
564{
Rob Clarkcd5351f2011-11-12 12:09:40 -0600565 int ret;
566
567 ret = drm_gem_mmap(filp, vma);
568 if (ret) {
569 DBG("mmap failed: %d", ret);
570 return ret;
571 }
572
Rob Clark8b6b5692012-05-17 02:37:25 -0600573 return omap_gem_mmap_obj(vma->vm_private_data, vma);
574}
575
576int omap_gem_mmap_obj(struct drm_gem_object *obj,
577 struct vm_area_struct *vma)
578{
579 struct omap_gem_object *omap_obj = to_omap_bo(obj);
Rob Clarkcd5351f2011-11-12 12:09:40 -0600580
581 vma->vm_flags &= ~VM_PFNMAP;
582 vma->vm_flags |= VM_MIXEDMAP;
583
584 if (omap_obj->flags & OMAP_BO_WC) {
585 vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
586 } else if (omap_obj->flags & OMAP_BO_UNCACHED) {
587 vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags));
588 } else {
Rob Clark8b6b5692012-05-17 02:37:25 -0600589 /*
590 * We do have some private objects, at least for scanout buffers
591 * on hardware without DMM/TILER. But these are allocated write-
592 * combine
593 */
594 if (WARN_ON(!obj->filp))
595 return -EINVAL;
596
597 /*
598 * Shunt off cached objs to shmem file so they have their own
599 * address_space (so unmap_mapping_range does what we want,
600 * in particular in the case of mmap'd dmabufs)
601 */
602 fput(vma->vm_file);
Rob Clark8b6b5692012-05-17 02:37:25 -0600603 vma->vm_pgoff = 0;
Al Virocb0942b2012-08-27 14:48:26 -0400604 vma->vm_file = get_file(obj->filp);
Rob Clark8b6b5692012-05-17 02:37:25 -0600605
Rob Clarkcd5351f2011-11-12 12:09:40 -0600606 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
607 }
608
Rob Clark8b6b5692012-05-17 02:37:25 -0600609 return 0;
Rob Clarkcd5351f2011-11-12 12:09:40 -0600610}
611
Laurent Pinchart7ef93b02015-12-14 22:39:33 +0200612/* -----------------------------------------------------------------------------
613 * Dumb Buffers
614 */
Rob Clark8b6b5692012-05-17 02:37:25 -0600615
Rob Clarkcd5351f2011-11-12 12:09:40 -0600616/**
617 * omap_gem_dumb_create - create a dumb buffer
618 * @drm_file: our client file
619 * @dev: our device
620 * @args: the requested arguments copied from userspace
621 *
622 * Allocate a buffer suitable for use for a frame buffer of the
623 * form described by user space. Give userspace a handle by which
624 * to reference it.
625 */
626int omap_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
627 struct drm_mode_create_dumb *args)
628{
629 union omap_gem_size gsize;
630
Tomi Valkeinence481ed2016-04-19 09:06:32 +0300631 args->pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
Tomi Valkeinen6a5228f2016-04-18 18:18:37 +0300632
Rob Clarkcd5351f2011-11-12 12:09:40 -0600633 args->size = PAGE_ALIGN(args->pitch * args->height);
634
635 gsize = (union omap_gem_size){
636 .bytes = args->size,
637 };
638
639 return omap_gem_new_handle(dev, file, gsize,
640 OMAP_BO_SCANOUT | OMAP_BO_WC, &args->handle);
641}
642
643/**
Rob Clarkcd5351f2011-11-12 12:09:40 -0600644 * omap_gem_dumb_map - buffer mapping for dumb interface
645 * @file: our drm client file
646 * @dev: drm device
647 * @handle: GEM handle to the object (from dumb_create)
648 *
649 * Do the necessary setup to allow the mapping of the frame buffer
650 * into user memory. We don't have to do much here at the moment.
651 */
652int omap_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
Laurent Pinchartdfe9cfc2018-02-11 15:07:33 +0200653 u32 handle, u64 *offset)
Rob Clarkcd5351f2011-11-12 12:09:40 -0600654{
655 struct drm_gem_object *obj;
656 int ret = 0;
657
Rob Clarkcd5351f2011-11-12 12:09:40 -0600658 /* GEM does all our handle to object mapping */
Chris Wilsona8ad0bd2016-05-09 11:04:54 +0100659 obj = drm_gem_object_lookup(file, handle);
Rob Clarkcd5351f2011-11-12 12:09:40 -0600660 if (obj == NULL) {
661 ret = -ENOENT;
662 goto fail;
663 }
664
665 *offset = omap_gem_mmap_offset(obj);
666
667 drm_gem_object_unreference_unlocked(obj);
668
669fail:
Rob Clarkcd5351f2011-11-12 12:09:40 -0600670 return ret;
671}
672
Laurent Pincharte1c11742015-12-14 22:39:30 +0200673#ifdef CONFIG_DRM_FBDEV_EMULATION
Rob Clarka6a91822011-12-09 23:26:08 -0600674/* Set scrolling position. This allows us to implement fast scrolling
675 * for console.
Rob Clark9b55b952012-03-05 10:48:33 -0600676 *
677 * Call only from non-atomic contexts.
Rob Clarka6a91822011-12-09 23:26:08 -0600678 */
Laurent Pinchartdfe9cfc2018-02-11 15:07:33 +0200679int omap_gem_roll(struct drm_gem_object *obj, u32 roll)
Rob Clarka6a91822011-12-09 23:26:08 -0600680{
681 struct omap_gem_object *omap_obj = to_omap_bo(obj);
Laurent Pinchartdfe9cfc2018-02-11 15:07:33 +0200682 u32 npages = obj->size >> PAGE_SHIFT;
Rob Clarka6a91822011-12-09 23:26:08 -0600683 int ret = 0;
684
685 if (roll > npages) {
686 dev_err(obj->dev->dev, "invalid roll: %d\n", roll);
687 return -EINVAL;
688 }
689
Rob Clarka6a91822011-12-09 23:26:08 -0600690 omap_obj->roll = roll;
691
Rob Clarkaf695922011-12-16 11:34:34 -0600692 mutex_lock(&obj->dev->struct_mutex);
693
Rob Clarka6a91822011-12-09 23:26:08 -0600694 /* if we aren't mapped yet, we don't need to do anything */
695 if (omap_obj->block) {
696 struct page **pages;
697 ret = get_pages(obj, &pages);
698 if (ret)
699 goto fail;
700 ret = tiler_pin(omap_obj->block, pages, npages, roll, true);
701 if (ret)
702 dev_err(obj->dev->dev, "could not repin: %d\n", ret);
703 }
704
705fail:
706 mutex_unlock(&obj->dev->struct_mutex);
707
708 return ret;
709}
Laurent Pincharte1c11742015-12-14 22:39:30 +0200710#endif
Rob Clarka6a91822011-12-09 23:26:08 -0600711
Laurent Pinchart7ef93b02015-12-14 22:39:33 +0200712/* -----------------------------------------------------------------------------
713 * Memory Management & DMA Sync
714 */
715
Laurent Pinchart24fbaca2017-04-21 00:33:56 +0300716/*
717 * shmem buffers that are mapped cached are not coherent.
718 *
719 * We keep track of dirty pages using page faulting to perform cache management.
720 * When a page is mapped to the CPU in read/write mode the device can't access
721 * it and omap_obj->dma_addrs[i] is NULL. When a page is mapped to the device
722 * the omap_obj->dma_addrs[i] is set to the DMA address, and the page is
723 * unmapped from the CPU.
Laurent Pinchart7ef93b02015-12-14 22:39:33 +0200724 */
725static inline bool is_cached_coherent(struct drm_gem_object *obj)
726{
727 struct omap_gem_object *omap_obj = to_omap_bo(obj);
Laurent Pinchartcdb03812015-12-14 22:39:37 +0200728
Laurent Pinchart24fbaca2017-04-21 00:33:56 +0300729 return !((omap_obj->flags & OMAP_BO_MEM_SHMEM) &&
730 ((omap_obj->flags & OMAP_BO_CACHE_MASK) == OMAP_BO_CACHED));
Laurent Pinchart7ef93b02015-12-14 22:39:33 +0200731}
Rob Clarkcd5351f2011-11-12 12:09:40 -0600732
Rob Clark8b6b5692012-05-17 02:37:25 -0600733/* Sync the buffer for CPU access.. note pages should already be
734 * attached, ie. omap_gem_get_pages()
735 */
Laurent Pinchartd61ce7d2017-04-21 00:33:55 +0300736void omap_gem_cpu_sync_page(struct drm_gem_object *obj, int pgoff)
Rob Clark8b6b5692012-05-17 02:37:25 -0600737{
738 struct drm_device *dev = obj->dev;
739 struct omap_gem_object *omap_obj = to_omap_bo(obj);
740
Laurent Pinchart24fbaca2017-04-21 00:33:56 +0300741 if (is_cached_coherent(obj))
742 return;
743
744 if (omap_obj->dma_addrs[pgoff]) {
Laurent Pinchart57c22f72017-04-21 00:33:54 +0300745 dma_unmap_page(dev->dev, omap_obj->dma_addrs[pgoff],
Laurent Pinchart97817fd2017-04-21 00:33:58 +0300746 PAGE_SIZE, DMA_TO_DEVICE);
Laurent Pinchart57c22f72017-04-21 00:33:54 +0300747 omap_obj->dma_addrs[pgoff] = 0;
Rob Clark8b6b5692012-05-17 02:37:25 -0600748 }
749}
750
751/* sync the buffer for DMA access */
Laurent Pinchartd61ce7d2017-04-21 00:33:55 +0300752void omap_gem_dma_sync_buffer(struct drm_gem_object *obj,
Rob Clark8b6b5692012-05-17 02:37:25 -0600753 enum dma_data_direction dir)
754{
755 struct drm_device *dev = obj->dev;
756 struct omap_gem_object *omap_obj = to_omap_bo(obj);
Laurent Pinchart4fa6ce42017-04-21 00:33:53 +0300757 int i, npages = obj->size >> PAGE_SHIFT;
758 struct page **pages = omap_obj->pages;
759 bool dirty = false;
Rob Clark8b6b5692012-05-17 02:37:25 -0600760
Laurent Pinchart24fbaca2017-04-21 00:33:56 +0300761 if (is_cached_coherent(obj))
Laurent Pinchart4fa6ce42017-04-21 00:33:53 +0300762 return;
Rob Clark8b6b5692012-05-17 02:37:25 -0600763
Laurent Pinchart4fa6ce42017-04-21 00:33:53 +0300764 for (i = 0; i < npages; i++) {
Laurent Pinchart57c22f72017-04-21 00:33:54 +0300765 if (!omap_obj->dma_addrs[i]) {
Laurent Pinchart4fa6ce42017-04-21 00:33:53 +0300766 dma_addr_t addr;
Tomi Valkeinena3d63452016-01-05 11:43:15 +0200767
Laurent Pinchart4fa6ce42017-04-21 00:33:53 +0300768 addr = dma_map_page(dev->dev, pages[i], 0,
Laurent Pinchart97817fd2017-04-21 00:33:58 +0300769 PAGE_SIZE, dir);
Laurent Pinchart4fa6ce42017-04-21 00:33:53 +0300770 if (dma_mapping_error(dev->dev, addr)) {
771 dev_warn(dev->dev, "%s: failed to map page\n",
772 __func__);
773 break;
Rob Clark8b6b5692012-05-17 02:37:25 -0600774 }
Rob Clark8b6b5692012-05-17 02:37:25 -0600775
Laurent Pinchart4fa6ce42017-04-21 00:33:53 +0300776 dirty = true;
Laurent Pinchart57c22f72017-04-21 00:33:54 +0300777 omap_obj->dma_addrs[i] = addr;
Rob Clark8b6b5692012-05-17 02:37:25 -0600778 }
779 }
Laurent Pinchart4fa6ce42017-04-21 00:33:53 +0300780
781 if (dirty) {
782 unmap_mapping_range(obj->filp->f_mapping, 0,
783 omap_gem_mmap_size(obj), 1);
784 }
Rob Clark8b6b5692012-05-17 02:37:25 -0600785}
786
Laurent Pinchartbc20c852017-04-21 00:33:52 +0300787/**
788 * omap_gem_pin() - Pin a GEM object in memory
789 * @obj: the GEM object
790 * @dma_addr: the DMA address
791 *
792 * Pin the given GEM object in memory and fill the dma_addr pointer with the
793 * object's DMA address. If the buffer is not physically contiguous it will be
794 * remapped through the TILER to provide a contiguous view.
795 *
796 * Pins are reference-counted, calling this function multiple times is allowed
797 * as long the corresponding omap_gem_unpin() calls are balanced.
798 *
799 * Return 0 on success or a negative error code otherwise.
Rob Clarkcd5351f2011-11-12 12:09:40 -0600800 */
Laurent Pinchartbc20c852017-04-21 00:33:52 +0300801int omap_gem_pin(struct drm_gem_object *obj, dma_addr_t *dma_addr)
Rob Clarkcd5351f2011-11-12 12:09:40 -0600802{
Rob Clarka6a91822011-12-09 23:26:08 -0600803 struct omap_drm_private *priv = obj->dev->dev_private;
Rob Clarkcd5351f2011-11-12 12:09:40 -0600804 struct omap_gem_object *omap_obj = to_omap_bo(obj);
805 int ret = 0;
806
Rob Clarkf7f9f452011-12-05 19:19:22 -0600807 mutex_lock(&obj->dev->struct_mutex);
808
Laurent Pinchartaa0408b2017-04-21 00:33:50 +0300809 if (!is_contiguous(omap_obj) && priv->has_dmm) {
Laurent Pinchart16869082017-04-21 00:33:51 +0300810 if (omap_obj->dma_addr_cnt == 0) {
Rob Clarkf7f9f452011-12-05 19:19:22 -0600811 struct page **pages;
Laurent Pinchartdfe9cfc2018-02-11 15:07:33 +0200812 u32 npages = obj->size >> PAGE_SHIFT;
Rob Clarkf7f9f452011-12-05 19:19:22 -0600813 enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
814 struct tiler_block *block;
Rob Clarka6a91822011-12-09 23:26:08 -0600815
Rob Clarkf7f9f452011-12-05 19:19:22 -0600816 BUG_ON(omap_obj->block);
817
818 ret = get_pages(obj, &pages);
819 if (ret)
820 goto fail;
821
Rob Clarkf7f9f452011-12-05 19:19:22 -0600822 if (omap_obj->flags & OMAP_BO_TILED) {
823 block = tiler_reserve_2d(fmt,
824 omap_obj->width,
825 omap_obj->height, 0);
826 } else {
827 block = tiler_reserve_1d(obj->size);
828 }
829
830 if (IS_ERR(block)) {
831 ret = PTR_ERR(block);
832 dev_err(obj->dev->dev,
833 "could not remap: %d (%d)\n", ret, fmt);
834 goto fail;
835 }
836
837 /* TODO: enable async refill.. */
Rob Clarka6a91822011-12-09 23:26:08 -0600838 ret = tiler_pin(block, pages, npages,
839 omap_obj->roll, true);
Rob Clarkf7f9f452011-12-05 19:19:22 -0600840 if (ret) {
841 tiler_release(block);
842 dev_err(obj->dev->dev,
843 "could not pin: %d\n", ret);
844 goto fail;
845 }
846
Laurent Pinchart16869082017-04-21 00:33:51 +0300847 omap_obj->dma_addr = tiler_ssptr(block);
Rob Clarkf7f9f452011-12-05 19:19:22 -0600848 omap_obj->block = block;
849
Laurent Pinchart16869082017-04-21 00:33:51 +0300850 DBG("got dma address: %pad", &omap_obj->dma_addr);
Rob Clarkf7f9f452011-12-05 19:19:22 -0600851 }
852
Laurent Pinchart16869082017-04-21 00:33:51 +0300853 omap_obj->dma_addr_cnt++;
Rob Clarkf7f9f452011-12-05 19:19:22 -0600854
Laurent Pinchart16869082017-04-21 00:33:51 +0300855 *dma_addr = omap_obj->dma_addr;
Laurent Pinchartb22e6692015-12-14 22:39:44 +0200856 } else if (is_contiguous(omap_obj)) {
Laurent Pinchart16869082017-04-21 00:33:51 +0300857 *dma_addr = omap_obj->dma_addr;
Rob Clarkf7f9f452011-12-05 19:19:22 -0600858 } else {
859 ret = -EINVAL;
Rob Clark8b6b5692012-05-17 02:37:25 -0600860 goto fail;
Rob Clarkcd5351f2011-11-12 12:09:40 -0600861 }
862
Rob Clarkf7f9f452011-12-05 19:19:22 -0600863fail:
864 mutex_unlock(&obj->dev->struct_mutex);
Rob Clarkcd5351f2011-11-12 12:09:40 -0600865
866 return ret;
867}
868
Laurent Pinchartbc20c852017-04-21 00:33:52 +0300869/**
870 * omap_gem_unpin() - Unpin a GEM object from memory
871 * @obj: the GEM object
872 *
873 * Unpin the given GEM object previously pinned with omap_gem_pin(). Pins are
874 * reference-counted, the actualy unpin will only be performed when the number
875 * of calls to this function matches the number of calls to omap_gem_pin().
Rob Clarkcd5351f2011-11-12 12:09:40 -0600876 */
Laurent Pinchartbc20c852017-04-21 00:33:52 +0300877void omap_gem_unpin(struct drm_gem_object *obj)
Rob Clarkcd5351f2011-11-12 12:09:40 -0600878{
Rob Clarkf7f9f452011-12-05 19:19:22 -0600879 struct omap_gem_object *omap_obj = to_omap_bo(obj);
Tomi Valkeinen393a9492015-04-28 14:01:36 +0300880 int ret;
Rob Clarkf7f9f452011-12-05 19:19:22 -0600881
882 mutex_lock(&obj->dev->struct_mutex);
Laurent Pinchart16869082017-04-21 00:33:51 +0300883 if (omap_obj->dma_addr_cnt > 0) {
884 omap_obj->dma_addr_cnt--;
885 if (omap_obj->dma_addr_cnt == 0) {
Rob Clarkf7f9f452011-12-05 19:19:22 -0600886 ret = tiler_unpin(omap_obj->block);
887 if (ret) {
888 dev_err(obj->dev->dev,
889 "could not unpin pages: %d\n", ret);
Rob Clarkf7f9f452011-12-05 19:19:22 -0600890 }
891 ret = tiler_release(omap_obj->block);
892 if (ret) {
893 dev_err(obj->dev->dev,
894 "could not release unmap: %d\n", ret);
895 }
Laurent Pinchart16869082017-04-21 00:33:51 +0300896 omap_obj->dma_addr = 0;
Rob Clarkf7f9f452011-12-05 19:19:22 -0600897 omap_obj->block = NULL;
898 }
899 }
Tomi Valkeinen393a9492015-04-28 14:01:36 +0300900
Rob Clarkf7f9f452011-12-05 19:19:22 -0600901 mutex_unlock(&obj->dev->struct_mutex);
Rob Clarkcd5351f2011-11-12 12:09:40 -0600902}
903
Rob Clark3c810c62012-08-15 15:18:01 -0500904/* Get rotated scanout address (only valid if already pinned), at the
905 * specified orientation and x,y offset from top-left corner of buffer
906 * (only valid for tiled 2d buffers)
907 */
Laurent Pinchartdfe9cfc2018-02-11 15:07:33 +0200908int omap_gem_rotated_dma_addr(struct drm_gem_object *obj, u32 orient,
Laurent Pinchart16869082017-04-21 00:33:51 +0300909 int x, int y, dma_addr_t *dma_addr)
Rob Clark3c810c62012-08-15 15:18:01 -0500910{
911 struct omap_gem_object *omap_obj = to_omap_bo(obj);
912 int ret = -EINVAL;
913
914 mutex_lock(&obj->dev->struct_mutex);
Laurent Pinchart16869082017-04-21 00:33:51 +0300915 if ((omap_obj->dma_addr_cnt > 0) && omap_obj->block &&
Rob Clark3c810c62012-08-15 15:18:01 -0500916 (omap_obj->flags & OMAP_BO_TILED)) {
Laurent Pinchart16869082017-04-21 00:33:51 +0300917 *dma_addr = tiler_tsptr(omap_obj->block, orient, x, y);
Rob Clark3c810c62012-08-15 15:18:01 -0500918 ret = 0;
919 }
920 mutex_unlock(&obj->dev->struct_mutex);
921 return ret;
922}
923
924/* Get tiler stride for the buffer (only valid for 2d tiled buffers) */
Laurent Pinchartdfe9cfc2018-02-11 15:07:33 +0200925int omap_gem_tiled_stride(struct drm_gem_object *obj, u32 orient)
Rob Clark3c810c62012-08-15 15:18:01 -0500926{
927 struct omap_gem_object *omap_obj = to_omap_bo(obj);
928 int ret = -EINVAL;
929 if (omap_obj->flags & OMAP_BO_TILED)
930 ret = tiler_stride(gem2fmt(omap_obj->flags), orient);
931 return ret;
932}
933
Rob Clark6ad11bc2012-04-10 13:19:55 -0500934/* if !remap, and we don't have pages backing, then fail, rather than
935 * increasing the pin count (which we don't really do yet anyways,
936 * because we don't support swapping pages back out). And 'remap'
937 * might not be quite the right name, but I wanted to keep it working
Laurent Pinchartbc20c852017-04-21 00:33:52 +0300938 * similarly to omap_gem_pin(). Note though that mutex is not
Rob Clark6ad11bc2012-04-10 13:19:55 -0500939 * aquired if !remap (because this can be called in atomic ctxt),
Laurent Pinchartbc20c852017-04-21 00:33:52 +0300940 * but probably omap_gem_unpin() should be changed to work in the
Rob Clark6ad11bc2012-04-10 13:19:55 -0500941 * same way. If !remap, a matching omap_gem_put_pages() call is not
942 * required (and should not be made).
943 */
944int omap_gem_get_pages(struct drm_gem_object *obj, struct page ***pages,
945 bool remap)
Rob Clarkcd5351f2011-11-12 12:09:40 -0600946{
947 int ret;
Rob Clark6ad11bc2012-04-10 13:19:55 -0500948 if (!remap) {
949 struct omap_gem_object *omap_obj = to_omap_bo(obj);
950 if (!omap_obj->pages)
951 return -ENOMEM;
952 *pages = omap_obj->pages;
953 return 0;
954 }
Rob Clarkcd5351f2011-11-12 12:09:40 -0600955 mutex_lock(&obj->dev->struct_mutex);
956 ret = get_pages(obj, pages);
957 mutex_unlock(&obj->dev->struct_mutex);
958 return ret;
959}
960
961/* release pages when DMA no longer being performed */
962int omap_gem_put_pages(struct drm_gem_object *obj)
963{
964 /* do something here if we dynamically attach/detach pages.. at
965 * least they would no longer need to be pinned if everyone has
966 * released the pages..
967 */
968 return 0;
969}
970
Laurent Pincharte1c11742015-12-14 22:39:30 +0200971#ifdef CONFIG_DRM_FBDEV_EMULATION
Rob Clarkf7f9f452011-12-05 19:19:22 -0600972/* Get kernel virtual address for CPU access.. this more or less only
973 * exists for omap_fbdev. This should be called with struct_mutex
974 * held.
Rob Clarkcd5351f2011-11-12 12:09:40 -0600975 */
976void *omap_gem_vaddr(struct drm_gem_object *obj)
977{
978 struct omap_gem_object *omap_obj = to_omap_bo(obj);
YAMANE Toshiaki696e3ca2012-11-14 19:33:43 +0900979 WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
Rob Clarkf7f9f452011-12-05 19:19:22 -0600980 if (!omap_obj->vaddr) {
981 struct page **pages;
982 int ret = get_pages(obj, &pages);
983 if (ret)
984 return ERR_PTR(ret);
985 omap_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
986 VM_MAP, pgprot_writecombine(PAGE_KERNEL));
987 }
Rob Clarkcd5351f2011-11-12 12:09:40 -0600988 return omap_obj->vaddr;
989}
Laurent Pincharte1c11742015-12-14 22:39:30 +0200990#endif
Rob Clarkcd5351f2011-11-12 12:09:40 -0600991
Laurent Pinchart7ef93b02015-12-14 22:39:33 +0200992/* -----------------------------------------------------------------------------
993 * Power Management
994 */
Rob Clarkcd5351f2011-11-12 12:09:40 -0600995
Andy Grosse78edba2012-12-19 14:53:37 -0600996#ifdef CONFIG_PM
997/* re-pin objects in DMM in resume path: */
Laurent Pinchart7fb15c42017-10-13 17:58:58 +0300998int omap_gem_resume(struct drm_device *dev)
Andy Grosse78edba2012-12-19 14:53:37 -0600999{
Laurent Pinchart7fb15c42017-10-13 17:58:58 +03001000 struct omap_drm_private *priv = dev->dev_private;
Andy Grosse78edba2012-12-19 14:53:37 -06001001 struct omap_gem_object *omap_obj;
1002 int ret = 0;
1003
1004 list_for_each_entry(omap_obj, &priv->obj_list, mm_list) {
1005 if (omap_obj->block) {
1006 struct drm_gem_object *obj = &omap_obj->base;
Laurent Pinchartdfe9cfc2018-02-11 15:07:33 +02001007 u32 npages = obj->size >> PAGE_SHIFT;
1008
Andy Grosse78edba2012-12-19 14:53:37 -06001009 WARN_ON(!omap_obj->pages); /* this can't happen */
1010 ret = tiler_pin(omap_obj->block,
1011 omap_obj->pages, npages,
1012 omap_obj->roll, true);
1013 if (ret) {
Laurent Pinchart7fb15c42017-10-13 17:58:58 +03001014 dev_err(dev->dev, "could not repin: %d\n", ret);
Andy Grosse78edba2012-12-19 14:53:37 -06001015 return ret;
1016 }
1017 }
1018 }
1019
1020 return 0;
1021}
1022#endif
1023
Laurent Pinchart7ef93b02015-12-14 22:39:33 +02001024/* -----------------------------------------------------------------------------
1025 * DebugFS
1026 */
1027
Rob Clarkf6b60362012-03-05 10:48:36 -06001028#ifdef CONFIG_DEBUG_FS
1029void omap_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
1030{
Rob Clarkf6b60362012-03-05 10:48:36 -06001031 struct omap_gem_object *omap_obj = to_omap_bo(obj);
Laurent Pinchartdfe9cfc2018-02-11 15:07:33 +02001032 u64 off;
Rob Clarkf6b60362012-03-05 10:48:36 -06001033
David Herrmann0de23972013-07-24 21:07:52 +02001034 off = drm_vma_node_start(&obj->vma_node);
Rob Clarkf6b60362012-03-05 10:48:36 -06001035
Russell King2d31ca32014-07-12 10:53:41 +01001036 seq_printf(m, "%08x: %2d (%2d) %08llx %pad (%2d) %p %4d",
Peter Zijlstra2c935bc2016-11-14 17:29:48 +01001037 omap_obj->flags, obj->name, kref_read(&obj->refcount),
Laurent Pinchart16869082017-04-21 00:33:51 +03001038 off, &omap_obj->dma_addr, omap_obj->dma_addr_cnt,
Rob Clarkf6b60362012-03-05 10:48:36 -06001039 omap_obj->vaddr, omap_obj->roll);
1040
1041 if (omap_obj->flags & OMAP_BO_TILED) {
1042 seq_printf(m, " %dx%d", omap_obj->width, omap_obj->height);
1043 if (omap_obj->block) {
1044 struct tcm_area *area = &omap_obj->block->area;
1045 seq_printf(m, " (%dx%d, %dx%d)",
1046 area->p0.x, area->p0.y,
1047 area->p1.x, area->p1.y);
1048 }
1049 } else {
Tomi Valkeinen2150c192017-02-21 09:57:12 +02001050 seq_printf(m, " %zu", obj->size);
Rob Clarkf6b60362012-03-05 10:48:36 -06001051 }
1052
1053 seq_printf(m, "\n");
1054}
1055
1056void omap_gem_describe_objects(struct list_head *list, struct seq_file *m)
1057{
1058 struct omap_gem_object *omap_obj;
1059 int count = 0;
1060 size_t size = 0;
1061
1062 list_for_each_entry(omap_obj, list, mm_list) {
1063 struct drm_gem_object *obj = &omap_obj->base;
1064 seq_printf(m, " ");
1065 omap_gem_describe(obj, m);
1066 count++;
1067 size += obj->size;
1068 }
1069
1070 seq_printf(m, "Total %d objects, %zu bytes\n", count, size);
1071}
1072#endif
1073
Laurent Pinchart7ef93b02015-12-14 22:39:33 +02001074/* -----------------------------------------------------------------------------
Laurent Pinchart7ef93b02015-12-14 22:39:33 +02001075 * Constructor & Destructor
1076 */
1077
Rob Clarkcd5351f2011-11-12 12:09:40 -06001078void omap_gem_free_object(struct drm_gem_object *obj)
1079{
1080 struct drm_device *dev = obj->dev;
Tomi Valkeinen76c40552014-12-17 14:34:22 +02001081 struct omap_drm_private *priv = dev->dev_private;
Rob Clarkcd5351f2011-11-12 12:09:40 -06001082 struct omap_gem_object *omap_obj = to_omap_bo(obj);
1083
Rob Clarkf7f9f452011-12-05 19:19:22 -06001084 evict(obj);
1085
Rob Clarkf6b60362012-03-05 10:48:36 -06001086 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
1087
Tomi Valkeinen76c40552014-12-17 14:34:22 +02001088 spin_lock(&priv->list_lock);
Rob Clarkf6b60362012-03-05 10:48:36 -06001089 list_del(&omap_obj->mm_list);
Tomi Valkeinen76c40552014-12-17 14:34:22 +02001090 spin_unlock(&priv->list_lock);
Rob Clarkf6b60362012-03-05 10:48:36 -06001091
Rob Clark9a0774e2012-01-16 12:51:17 -06001092 /* this means the object is still pinned.. which really should
1093 * not happen. I think..
1094 */
Laurent Pinchart16869082017-04-21 00:33:51 +03001095 WARN_ON(omap_obj->dma_addr_cnt > 0);
Rob Clark9a0774e2012-01-16 12:51:17 -06001096
Tomi Valkeinen3f50eff2016-01-27 10:58:43 +02001097 if (omap_obj->pages) {
1098 if (omap_obj->flags & OMAP_BO_MEM_DMABUF)
1099 kfree(omap_obj->pages);
1100 else
Rob Clarkcd5351f2011-11-12 12:09:40 -06001101 omap_gem_detach_pages(obj);
Rob Clarkcd5351f2011-11-12 12:09:40 -06001102 }
1103
Tomi Valkeinen3f50eff2016-01-27 10:58:43 +02001104 if (omap_obj->flags & OMAP_BO_MEM_DMA_API) {
Linus Torvalds266c73b2016-03-21 13:48:00 -07001105 dma_free_wc(dev->dev, obj->size, omap_obj->vaddr,
Laurent Pinchart16869082017-04-21 00:33:51 +03001106 omap_obj->dma_addr);
Tomi Valkeinen3f50eff2016-01-27 10:58:43 +02001107 } else if (omap_obj->vaddr) {
1108 vunmap(omap_obj->vaddr);
1109 } else if (obj->import_attach) {
1110 drm_prime_gem_destroy(obj, omap_obj->sgt);
1111 }
1112
Rob Clarkcd5351f2011-11-12 12:09:40 -06001113 drm_gem_object_release(obj);
1114
Laurent Pinchart00e9c7c2015-12-14 22:39:38 +02001115 kfree(omap_obj);
Rob Clarkcd5351f2011-11-12 12:09:40 -06001116}
1117
1118/* GEM buffer object constructor */
1119struct drm_gem_object *omap_gem_new(struct drm_device *dev,
Laurent Pinchartdfe9cfc2018-02-11 15:07:33 +02001120 union omap_gem_size gsize, u32 flags)
Rob Clarkcd5351f2011-11-12 12:09:40 -06001121{
Rob Clarka6a91822011-12-09 23:26:08 -06001122 struct omap_drm_private *priv = dev->dev_private;
Rob Clarkcd5351f2011-11-12 12:09:40 -06001123 struct omap_gem_object *omap_obj;
Laurent Pinchart92b4b442015-12-14 22:39:41 +02001124 struct drm_gem_object *obj;
David Herrmannab5a60c2014-05-25 12:45:39 +02001125 struct address_space *mapping;
Rob Clarkcd5351f2011-11-12 12:09:40 -06001126 size_t size;
1127 int ret;
1128
Laurent Pinchart9cba3b92015-12-14 22:39:43 +02001129 /* Validate the flags and compute the memory and cache flags. */
Rob Clarkcd5351f2011-11-12 12:09:40 -06001130 if (flags & OMAP_BO_TILED) {
Laurent Pinchartf4302742015-12-14 22:39:34 +02001131 if (!priv->usergart) {
Rob Clarkf7f9f452011-12-05 19:19:22 -06001132 dev_err(dev->dev, "Tiled buffers require DMM\n");
Laurent Pinchart92b4b442015-12-14 22:39:41 +02001133 return NULL;
Rob Clarkf7f9f452011-12-05 19:19:22 -06001134 }
Rob Clarkcd5351f2011-11-12 12:09:40 -06001135
Laurent Pinchart9cba3b92015-12-14 22:39:43 +02001136 /*
1137 * Tiled buffers are always shmem paged backed. When they are
1138 * scanned out, they are remapped into DMM/TILER.
Rob Clarkf7f9f452011-12-05 19:19:22 -06001139 */
1140 flags &= ~OMAP_BO_SCANOUT;
Laurent Pinchart9cba3b92015-12-14 22:39:43 +02001141 flags |= OMAP_BO_MEM_SHMEM;
Rob Clarkf7f9f452011-12-05 19:19:22 -06001142
Laurent Pinchart9cba3b92015-12-14 22:39:43 +02001143 /*
1144 * Currently don't allow cached buffers. There is some caching
1145 * stuff that needs to be handled better.
Rob Clarkf7f9f452011-12-05 19:19:22 -06001146 */
Tomi Valkeinen7cb0d6c2014-09-25 19:24:29 +00001147 flags &= ~(OMAP_BO_CACHED|OMAP_BO_WC|OMAP_BO_UNCACHED);
1148 flags |= tiler_get_cpu_cache_flags();
Laurent Pinchart9cba3b92015-12-14 22:39:43 +02001149 } else if ((flags & OMAP_BO_SCANOUT) && !priv->has_dmm) {
1150 /*
Laurent Pinchartb22e6692015-12-14 22:39:44 +02001151 * OMAP_BO_SCANOUT hints that the buffer doesn't need to be
1152 * tiled. However, to lower the pressure on memory allocation,
1153 * use contiguous memory only if no TILER is available.
Laurent Pinchart9cba3b92015-12-14 22:39:43 +02001154 */
1155 flags |= OMAP_BO_MEM_DMA_API;
Tomi Valkeinen3f50eff2016-01-27 10:58:43 +02001156 } else if (!(flags & OMAP_BO_MEM_DMABUF)) {
Laurent Pinchart9cba3b92015-12-14 22:39:43 +02001157 /*
Tomi Valkeinen3f50eff2016-01-27 10:58:43 +02001158 * All other buffers not backed by dma_buf are shmem-backed.
Laurent Pinchart9cba3b92015-12-14 22:39:43 +02001159 */
1160 flags |= OMAP_BO_MEM_SHMEM;
Rob Clarkf7f9f452011-12-05 19:19:22 -06001161 }
Rob Clarkcd5351f2011-11-12 12:09:40 -06001162
Laurent Pinchart9cba3b92015-12-14 22:39:43 +02001163 /* Allocate the initialize the OMAP GEM object. */
Rob Clarkcd5351f2011-11-12 12:09:40 -06001164 omap_obj = kzalloc(sizeof(*omap_obj), GFP_KERNEL);
Joe Perches78110bb2013-02-11 09:41:29 -08001165 if (!omap_obj)
Tomi Valkeinena903e3b2015-03-17 15:31:11 +02001166 return NULL;
Rob Clarkf6b60362012-03-05 10:48:36 -06001167
Rob Clarkcd5351f2011-11-12 12:09:40 -06001168 obj = &omap_obj->base;
Laurent Pinchart9cba3b92015-12-14 22:39:43 +02001169 omap_obj->flags = flags;
Rob Clarkcd5351f2011-11-12 12:09:40 -06001170
Laurent Pinchart9cba3b92015-12-14 22:39:43 +02001171 if (flags & OMAP_BO_TILED) {
1172 /*
1173 * For tiled buffers align dimensions to slot boundaries and
1174 * calculate size based on aligned dimensions.
Rob Clarka6a91822011-12-09 23:26:08 -06001175 */
Laurent Pinchart9cba3b92015-12-14 22:39:43 +02001176 tiler_align(gem2fmt(flags), &gsize.tiled.width,
1177 &gsize.tiled.height);
YAMANE Toshiakiae053032012-11-14 19:33:17 +09001178
Laurent Pinchart9cba3b92015-12-14 22:39:43 +02001179 size = tiler_size(gem2fmt(flags), gsize.tiled.width,
1180 gsize.tiled.height);
Tomi Valkeinena903e3b2015-03-17 15:31:11 +02001181
Laurent Pinchart9cba3b92015-12-14 22:39:43 +02001182 omap_obj->width = gsize.tiled.width;
1183 omap_obj->height = gsize.tiled.height;
1184 } else {
1185 size = PAGE_ALIGN(gsize.bytes);
Rob Clarkcd5351f2011-11-12 12:09:40 -06001186 }
1187
Laurent Pinchartc2eb77f2016-03-02 12:51:19 +02001188 /* Initialize the GEM object. */
1189 if (!(flags & OMAP_BO_MEM_SHMEM)) {
1190 drm_gem_private_object_init(dev, obj, size);
1191 } else {
1192 ret = drm_gem_object_init(dev, obj, size);
1193 if (ret)
1194 goto err_free;
1195
Al Viro93c76a32015-12-04 23:45:44 -05001196 mapping = obj->filp->f_mapping;
Laurent Pinchartc2eb77f2016-03-02 12:51:19 +02001197 mapping_set_gfp_mask(mapping, GFP_USER | __GFP_DMA32);
1198 }
Tomi Valkeinena903e3b2015-03-17 15:31:11 +02001199
Laurent Pinchart9cba3b92015-12-14 22:39:43 +02001200 /* Allocate memory if needed. */
1201 if (flags & OMAP_BO_MEM_DMA_API) {
Linus Torvalds266c73b2016-03-21 13:48:00 -07001202 omap_obj->vaddr = dma_alloc_wc(dev->dev, size,
Laurent Pinchart16869082017-04-21 00:33:51 +03001203 &omap_obj->dma_addr,
Linus Torvalds266c73b2016-03-21 13:48:00 -07001204 GFP_KERNEL);
Laurent Pinchart9cba3b92015-12-14 22:39:43 +02001205 if (!omap_obj->vaddr)
Laurent Pinchartc2eb77f2016-03-02 12:51:19 +02001206 goto err_release;
Rob Clarkcd5351f2011-11-12 12:09:40 -06001207 }
1208
1209 spin_lock(&priv->list_lock);
1210 list_add(&omap_obj->mm_list, &priv->obj_list);
Rob Clarkf7f9f452011-12-05 19:19:22 -06001211 spin_unlock(&priv->list_lock);
1212
Rob Clarkcd5351f2011-11-12 12:09:40 -06001213 return obj;
1214
Laurent Pinchartc2eb77f2016-03-02 12:51:19 +02001215err_release:
1216 drm_gem_object_release(obj);
1217err_free:
1218 kfree(omap_obj);
Rob Clarkcd5351f2011-11-12 12:09:40 -06001219 return NULL;
1220}
Rob Clarkf7f9f452011-12-05 19:19:22 -06001221
Laurent Pinchartb22e6692015-12-14 22:39:44 +02001222struct drm_gem_object *omap_gem_new_dmabuf(struct drm_device *dev, size_t size,
1223 struct sg_table *sgt)
1224{
1225 struct omap_drm_private *priv = dev->dev_private;
1226 struct omap_gem_object *omap_obj;
1227 struct drm_gem_object *obj;
1228 union omap_gem_size gsize;
1229
1230 /* Without a DMM only physically contiguous buffers can be supported. */
1231 if (sgt->orig_nents != 1 && !priv->has_dmm)
1232 return ERR_PTR(-EINVAL);
1233
1234 mutex_lock(&dev->struct_mutex);
1235
1236 gsize.bytes = PAGE_ALIGN(size);
1237 obj = omap_gem_new(dev, gsize, OMAP_BO_MEM_DMABUF | OMAP_BO_WC);
1238 if (!obj) {
1239 obj = ERR_PTR(-ENOMEM);
1240 goto done;
1241 }
1242
1243 omap_obj = to_omap_bo(obj);
1244 omap_obj->sgt = sgt;
1245
1246 if (sgt->orig_nents == 1) {
Laurent Pinchart16869082017-04-21 00:33:51 +03001247 omap_obj->dma_addr = sg_dma_address(sgt->sgl);
Laurent Pinchartb22e6692015-12-14 22:39:44 +02001248 } else {
1249 /* Create pages list from sgt */
1250 struct sg_page_iter iter;
1251 struct page **pages;
1252 unsigned int npages;
1253 unsigned int i = 0;
1254
1255 npages = DIV_ROUND_UP(size, PAGE_SIZE);
1256 pages = kcalloc(npages, sizeof(*pages), GFP_KERNEL);
1257 if (!pages) {
1258 omap_gem_free_object(obj);
1259 obj = ERR_PTR(-ENOMEM);
1260 goto done;
1261 }
1262
1263 omap_obj->pages = pages;
1264
1265 for_each_sg_page(sgt->sgl, &iter, sgt->orig_nents, 0) {
1266 pages[i++] = sg_page_iter_page(&iter);
1267 if (i > npages)
1268 break;
1269 }
1270
1271 if (WARN_ON(i != npages)) {
1272 omap_gem_free_object(obj);
1273 obj = ERR_PTR(-ENOMEM);
1274 goto done;
1275 }
1276 }
1277
1278done:
1279 mutex_unlock(&dev->struct_mutex);
1280 return obj;
1281}
1282
Laurent Pinchart7ef93b02015-12-14 22:39:33 +02001283/* convenience method to construct a GEM buffer object, and userspace handle */
1284int omap_gem_new_handle(struct drm_device *dev, struct drm_file *file,
Laurent Pinchartdfe9cfc2018-02-11 15:07:33 +02001285 union omap_gem_size gsize, u32 flags, u32 *handle)
Laurent Pinchart7ef93b02015-12-14 22:39:33 +02001286{
1287 struct drm_gem_object *obj;
1288 int ret;
1289
1290 obj = omap_gem_new(dev, gsize, flags);
1291 if (!obj)
1292 return -ENOMEM;
1293
1294 ret = drm_gem_handle_create(file, obj, handle);
1295 if (ret) {
Laurent Pinchart74128a22015-12-14 22:39:39 +02001296 omap_gem_free_object(obj);
Laurent Pinchart7ef93b02015-12-14 22:39:33 +02001297 return ret;
1298 }
1299
1300 /* drop reference from allocate - handle holds it now */
1301 drm_gem_object_unreference_unlocked(obj);
1302
1303 return 0;
1304}
1305
1306/* -----------------------------------------------------------------------------
1307 * Init & Cleanup
1308 */
1309
1310/* If DMM is used, we need to set some stuff up.. */
Rob Clarkf7f9f452011-12-05 19:19:22 -06001311void omap_gem_init(struct drm_device *dev)
1312{
Rob Clarka6a91822011-12-09 23:26:08 -06001313 struct omap_drm_private *priv = dev->dev_private;
Laurent Pinchartf4302742015-12-14 22:39:34 +02001314 struct omap_drm_usergart *usergart;
Rob Clarkf7f9f452011-12-05 19:19:22 -06001315 const enum tiler_fmt fmts[] = {
1316 TILFMT_8BIT, TILFMT_16BIT, TILFMT_32BIT
1317 };
Andy Gross5c137792012-03-05 10:48:39 -06001318 int i, j;
Rob Clarkf7f9f452011-12-05 19:19:22 -06001319
Andy Grosse5e4e9b2012-10-17 00:30:03 -05001320 if (!dmm_is_available()) {
Rob Clarkf7f9f452011-12-05 19:19:22 -06001321 /* DMM only supported on OMAP4 and later, so this isn't fatal */
Andy Gross5c137792012-03-05 10:48:39 -06001322 dev_warn(dev->dev, "DMM not available, disable DMM support\n");
Rob Clarkf7f9f452011-12-05 19:19:22 -06001323 return;
1324 }
1325
Joe Perches78110bb2013-02-11 09:41:29 -08001326 usergart = kcalloc(3, sizeof(*usergart), GFP_KERNEL);
1327 if (!usergart)
Rob Clarkb3698392011-12-09 23:26:06 -06001328 return;
Rob Clarkf7f9f452011-12-05 19:19:22 -06001329
1330 /* reserve 4k aligned/wide regions for userspace mappings: */
1331 for (i = 0; i < ARRAY_SIZE(fmts); i++) {
Laurent Pinchartdfe9cfc2018-02-11 15:07:33 +02001332 u16 h = 1, w = PAGE_SIZE >> i;
1333
Rob Clarkf7f9f452011-12-05 19:19:22 -06001334 tiler_align(fmts[i], &w, &h);
1335 /* note: since each region is 1 4kb page wide, and minimum
1336 * number of rows, the height ends up being the same as the
1337 * # of pages in the region
1338 */
1339 usergart[i].height = h;
1340 usergart[i].height_shift = ilog2(h);
Rob Clark3c810c62012-08-15 15:18:01 -05001341 usergart[i].stride_pfn = tiler_stride(fmts[i], 0) >> PAGE_SHIFT;
Rob Clarkf7f9f452011-12-05 19:19:22 -06001342 usergart[i].slot_shift = ilog2((PAGE_SIZE / h) >> i);
1343 for (j = 0; j < NUM_USERGART_ENTRIES; j++) {
Laurent Pinchartf4302742015-12-14 22:39:34 +02001344 struct omap_drm_usergart_entry *entry;
1345 struct tiler_block *block;
1346
1347 entry = &usergart[i].entry[j];
1348 block = tiler_reserve_2d(fmts[i], w, h, PAGE_SIZE);
Rob Clarkf7f9f452011-12-05 19:19:22 -06001349 if (IS_ERR(block)) {
1350 dev_err(dev->dev,
1351 "reserve failed: %d, %d, %ld\n",
1352 i, j, PTR_ERR(block));
1353 return;
1354 }
Laurent Pinchart16869082017-04-21 00:33:51 +03001355 entry->dma_addr = tiler_ssptr(block);
Rob Clarkf7f9f452011-12-05 19:19:22 -06001356 entry->block = block;
1357
Laurent Pinchart16869082017-04-21 00:33:51 +03001358 DBG("%d:%d: %dx%d: dma_addr=%pad stride=%d", i, j, w, h,
1359 &entry->dma_addr,
Rob Clarkf7f9f452011-12-05 19:19:22 -06001360 usergart[i].stride_pfn << PAGE_SHIFT);
1361 }
1362 }
Rob Clarka6a91822011-12-09 23:26:08 -06001363
Laurent Pinchartf4302742015-12-14 22:39:34 +02001364 priv->usergart = usergart;
Rob Clarka6a91822011-12-09 23:26:08 -06001365 priv->has_dmm = true;
Rob Clarkf7f9f452011-12-05 19:19:22 -06001366}
1367
1368void omap_gem_deinit(struct drm_device *dev)
1369{
Laurent Pinchartf4302742015-12-14 22:39:34 +02001370 struct omap_drm_private *priv = dev->dev_private;
1371
Rob Clarkf7f9f452011-12-05 19:19:22 -06001372 /* I believe we can rely on there being no more outstanding GEM
1373 * objects which could depend on usergart/dmm at this point.
1374 */
Laurent Pinchartf4302742015-12-14 22:39:34 +02001375 kfree(priv->usergart);
Rob Clarkf7f9f452011-12-05 19:19:22 -06001376}