blob: 37378dbc50d0dcaf1962743a1e1746ab5e54a186 [file] [log] [blame]
Thomas Gleixnercaab2772019-06-03 07:44:50 +02001// SPDX-License-Identifier: GPL-2.0-only
Rob Clarkcd5351f2011-11-12 12:09:40 -06002/*
Andrew F. Davisbb5cdf82017-12-05 14:29:31 -06003 * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/
Rob Clarkcd5351f2011-11-12 12:09:40 -06004 * Author: Rob Clark <rob.clark@linaro.org>
Rob Clarkcd5351f2011-11-12 12:09:40 -06005 */
6
Arnd Bergmann2d802452016-05-11 18:01:45 +02007#include <linux/seq_file.h>
Rob Clarkcd5351f2011-11-12 12:09:40 -06008#include <linux/shmem_fs.h>
Laurent Pinchart2d278f52015-03-05 21:31:37 +02009#include <linux/spinlock.h>
Dan Williams01c8f1c2016-01-15 16:56:40 -080010#include <linux/pfn_t.h>
Laurent Pinchart2d278f52015-03-05 21:31:37 +020011
David Herrmann0de23972013-07-24 21:07:52 +020012#include <drm/drm_vma_manager.h>
Rob Clarkcd5351f2011-11-12 12:09:40 -060013
14#include "omap_drv.h"
Rob Clarkf7f9f452011-12-05 19:19:22 -060015#include "omap_dmm_tiler.h"
Rob Clarkcd5351f2011-11-12 12:09:40 -060016
Rob Clarkcd5351f2011-11-12 12:09:40 -060017/*
18 * GEM buffer object implementation.
19 */
20
Rob Clarkcd5351f2011-11-12 12:09:40 -060021/* note: we use upper 8 bits of flags for driver-internal flags: */
Laurent Pinchartcdb03812015-12-14 22:39:37 +020022#define OMAP_BO_MEM_DMA_API 0x01000000 /* memory allocated with the dma_alloc_* API */
23#define OMAP_BO_MEM_SHMEM 0x02000000 /* memory allocated through shmem backing */
Laurent Pinchartb22e6692015-12-14 22:39:44 +020024#define OMAP_BO_MEM_DMABUF 0x08000000 /* memory imported from a dmabuf */
Rob Clarkcd5351f2011-11-12 12:09:40 -060025
Rob Clarkcd5351f2011-11-12 12:09:40 -060026struct omap_gem_object {
27 struct drm_gem_object base;
28
Rob Clarkf6b60362012-03-05 10:48:36 -060029 struct list_head mm_list;
30
Laurent Pinchartdfe9cfc2018-02-11 15:07:33 +020031 u32 flags;
Rob Clarkcd5351f2011-11-12 12:09:40 -060032
Rob Clarkf7f9f452011-12-05 19:19:22 -060033 /** width/height for tiled formats (rounded up to slot boundaries) */
Laurent Pinchartdfe9cfc2018-02-11 15:07:33 +020034 u16 width, height;
Rob Clarkf7f9f452011-12-05 19:19:22 -060035
Rob Clarka6a91822011-12-09 23:26:08 -060036 /** roll applied when mapping to DMM */
Laurent Pinchartdfe9cfc2018-02-11 15:07:33 +020037 u32 roll;
Rob Clarka6a91822011-12-09 23:26:08 -060038
Laurent Pinchart3cbd0c52018-05-26 19:54:33 +030039 /** protects dma_addr_cnt, block, pages, dma_addrs and vaddr */
40 struct mutex lock;
41
Rob Clarkcd5351f2011-11-12 12:09:40 -060042 /**
Laurent Pinchart16869082017-04-21 00:33:51 +030043 * dma_addr contains the buffer DMA address. It is valid for
Rob Clarkcd5351f2011-11-12 12:09:40 -060044 *
Laurent Pinchartb22e6692015-12-14 22:39:44 +020045 * - buffers allocated through the DMA mapping API (with the
46 * OMAP_BO_MEM_DMA_API flag set)
47 *
48 * - buffers imported from dmabuf (with the OMAP_BO_MEM_DMABUF flag set)
49 * if they are physically contiguous (when sgt->orig_nents == 1)
50 *
Laurent Pinchart16869082017-04-21 00:33:51 +030051 * - buffers mapped through the TILER when dma_addr_cnt is not zero, in
Laurent Pinchartb22e6692015-12-14 22:39:44 +020052 * which case the DMA address points to the TILER aperture
53 *
54 * Physically contiguous buffers have their DMA address equal to the
55 * physical address as we don't remap those buffers through the TILER.
56 *
57 * Buffers mapped to the TILER have their DMA address pointing to the
Laurent Pinchart16869082017-04-21 00:33:51 +030058 * TILER aperture. As TILER mappings are refcounted (through
Laurent Pinchartbc20c852017-04-21 00:33:52 +030059 * dma_addr_cnt) the DMA address must be accessed through omap_gem_pin()
60 * to ensure that the mapping won't disappear unexpectedly. References
61 * must be released with omap_gem_unpin().
Rob Clarkcd5351f2011-11-12 12:09:40 -060062 */
Laurent Pinchart16869082017-04-21 00:33:51 +030063 dma_addr_t dma_addr;
Rob Clarkcd5351f2011-11-12 12:09:40 -060064
65 /**
Laurent Pinchart16869082017-04-21 00:33:51 +030066 * # of users of dma_addr
Rob Clarkf7f9f452011-12-05 19:19:22 -060067 */
Laurent Pinchartdfe9cfc2018-02-11 15:07:33 +020068 u32 dma_addr_cnt;
Rob Clarkf7f9f452011-12-05 19:19:22 -060069
70 /**
Laurent Pinchartb22e6692015-12-14 22:39:44 +020071 * If the buffer has been imported from a dmabuf the OMAP_DB_DMABUF flag
72 * is set and the sgt field is valid.
73 */
74 struct sg_table *sgt;
75
76 /**
Rob Clarkf7f9f452011-12-05 19:19:22 -060077 * tiler block used when buffer is remapped in DMM/TILER.
78 */
79 struct tiler_block *block;
80
81 /**
Rob Clarkcd5351f2011-11-12 12:09:40 -060082 * Array of backing pages, if allocated. Note that pages are never
83 * allocated for buffers originally allocated from contiguous memory
84 */
85 struct page **pages;
86
Rob Clarkf3bc9d22011-12-20 16:54:28 -060087 /** addresses corresponding to pages in above array */
Laurent Pinchart57c22f72017-04-21 00:33:54 +030088 dma_addr_t *dma_addrs;
Rob Clarkf3bc9d22011-12-20 16:54:28 -060089
Rob Clarkcd5351f2011-11-12 12:09:40 -060090 /**
91 * Virtual address, if mapped.
92 */
93 void *vaddr;
Rob Clarkcd5351f2011-11-12 12:09:40 -060094};
95
Laurent Pinchart7ef93b02015-12-14 22:39:33 +020096#define to_omap_bo(x) container_of(x, struct omap_gem_object, base)
Rob Clarkc5b12472012-01-18 18:33:02 -060097
Rob Clarkf7f9f452011-12-05 19:19:22 -060098/* To deal with userspace mmap'ings of 2d tiled buffers, which (a) are
99 * not necessarily pinned in TILER all the time, and (b) when they are
100 * they are not necessarily page aligned, we reserve one or more small
101 * regions in each of the 2d containers to use as a user-GART where we
102 * can create a second page-aligned mapping of parts of the buffer
103 * being accessed from userspace.
104 *
105 * Note that we could optimize slightly when we know that multiple
106 * tiler containers are backed by the same PAT.. but I'll leave that
107 * for later..
108 */
109#define NUM_USERGART_ENTRIES 2
Laurent Pinchartf4302742015-12-14 22:39:34 +0200110struct omap_drm_usergart_entry {
Rob Clarkf7f9f452011-12-05 19:19:22 -0600111 struct tiler_block *block; /* the reserved tiler block */
Laurent Pinchart16869082017-04-21 00:33:51 +0300112 dma_addr_t dma_addr;
Rob Clarkf7f9f452011-12-05 19:19:22 -0600113 struct drm_gem_object *obj; /* the current pinned obj */
114 pgoff_t obj_pgoff; /* page offset of obj currently
115 mapped in */
116};
Laurent Pinchartf4302742015-12-14 22:39:34 +0200117
118struct omap_drm_usergart {
119 struct omap_drm_usergart_entry entry[NUM_USERGART_ENTRIES];
Rob Clarkf7f9f452011-12-05 19:19:22 -0600120 int height; /* height in rows */
121 int height_shift; /* ilog2(height in rows) */
122 int slot_shift; /* ilog2(width per slot) */
123 int stride_pfn; /* stride in pages */
124 int last; /* index of last used entry */
Laurent Pinchartf4302742015-12-14 22:39:34 +0200125};
Rob Clarkf7f9f452011-12-05 19:19:22 -0600126
Laurent Pinchartb902f8f2015-12-14 22:39:32 +0200127/* -----------------------------------------------------------------------------
128 * Helpers
129 */
130
131/** get mmap offset */
Laurent Pinchartdc8c9ae2018-05-25 19:39:22 +0300132u64 omap_gem_mmap_offset(struct drm_gem_object *obj)
Laurent Pinchartb902f8f2015-12-14 22:39:32 +0200133{
134 struct drm_device *dev = obj->dev;
135 int ret;
136 size_t size;
137
Laurent Pinchartb902f8f2015-12-14 22:39:32 +0200138 /* Make it mmapable */
139 size = omap_gem_mmap_size(obj);
140 ret = drm_gem_create_mmap_offset_size(obj, size);
141 if (ret) {
142 dev_err(dev->dev, "could not allocate mmap offset\n");
143 return 0;
144 }
145
146 return drm_vma_node_offset_addr(&obj->vma_node);
147}
148
Laurent Pinchart620063e2018-05-25 19:39:20 +0300149static bool omap_gem_is_contiguous(struct omap_gem_object *omap_obj)
Laurent Pinchart7ef93b02015-12-14 22:39:33 +0200150{
Laurent Pinchartb22e6692015-12-14 22:39:44 +0200151 if (omap_obj->flags & OMAP_BO_MEM_DMA_API)
152 return true;
153
154 if ((omap_obj->flags & OMAP_BO_MEM_DMABUF) && omap_obj->sgt->nents == 1)
155 return true;
156
157 return false;
Laurent Pinchart7ef93b02015-12-14 22:39:33 +0200158}
159
160/* -----------------------------------------------------------------------------
161 * Eviction
162 */
Rob Clarkf7f9f452011-12-05 19:19:22 -0600163
Laurent Pinchart620063e2018-05-25 19:39:20 +0300164static void omap_gem_evict_entry(struct drm_gem_object *obj,
Laurent Pinchartf4302742015-12-14 22:39:34 +0200165 enum tiler_fmt fmt, struct omap_drm_usergart_entry *entry)
Rob Clarkf7f9f452011-12-05 19:19:22 -0600166{
David Herrmann6796cb12014-01-03 14:24:19 +0100167 struct omap_gem_object *omap_obj = to_omap_bo(obj);
Laurent Pinchartf4302742015-12-14 22:39:34 +0200168 struct omap_drm_private *priv = obj->dev->dev_private;
169 int n = priv->usergart[fmt].height;
David Herrmann6796cb12014-01-03 14:24:19 +0100170 size_t size = PAGE_SIZE * n;
Laurent Pinchartdc8c9ae2018-05-25 19:39:22 +0300171 loff_t off = omap_gem_mmap_offset(obj) +
David Herrmann6796cb12014-01-03 14:24:19 +0100172 (entry->obj_pgoff << PAGE_SHIFT);
Tomi Valkeinencc8dd762017-05-18 11:51:51 +0300173 const int m = DIV_ROUND_UP(omap_obj->width << fmt, PAGE_SIZE);
David Herrmann6796cb12014-01-03 14:24:19 +0100174
175 if (m > 1) {
176 int i;
177 /* if stride > than PAGE_SIZE then sparse mapping: */
178 for (i = n; i > 0; i--) {
179 unmap_mapping_range(obj->dev->anon_inode->i_mapping,
180 off, PAGE_SIZE, 1);
181 off += PAGE_SIZE * m;
Rob Clarke559895ae2012-03-05 10:48:40 -0600182 }
David Herrmann6796cb12014-01-03 14:24:19 +0100183 } else {
184 unmap_mapping_range(obj->dev->anon_inode->i_mapping,
185 off, size, 1);
Rob Clarkf7f9f452011-12-05 19:19:22 -0600186 }
187
188 entry->obj = NULL;
189}
190
191/* Evict a buffer from usergart, if it is mapped there */
Laurent Pinchart620063e2018-05-25 19:39:20 +0300192static void omap_gem_evict(struct drm_gem_object *obj)
Rob Clarkf7f9f452011-12-05 19:19:22 -0600193{
194 struct omap_gem_object *omap_obj = to_omap_bo(obj);
Laurent Pinchartf4302742015-12-14 22:39:34 +0200195 struct omap_drm_private *priv = obj->dev->dev_private;
Rob Clarkf7f9f452011-12-05 19:19:22 -0600196
197 if (omap_obj->flags & OMAP_BO_TILED) {
198 enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
199 int i;
200
Rob Clarkf7f9f452011-12-05 19:19:22 -0600201 for (i = 0; i < NUM_USERGART_ENTRIES; i++) {
Laurent Pinchartf4302742015-12-14 22:39:34 +0200202 struct omap_drm_usergart_entry *entry =
203 &priv->usergart[fmt].entry[i];
204
Rob Clarkf7f9f452011-12-05 19:19:22 -0600205 if (entry->obj == obj)
Laurent Pinchart620063e2018-05-25 19:39:20 +0300206 omap_gem_evict_entry(obj, fmt, entry);
Rob Clarkf7f9f452011-12-05 19:19:22 -0600207 }
208 }
209}
210
Laurent Pinchart7ef93b02015-12-14 22:39:33 +0200211/* -----------------------------------------------------------------------------
212 * Page Management
Rob Clarkcd5351f2011-11-12 12:09:40 -0600213 */
Rob Clarkcd5351f2011-11-12 12:09:40 -0600214
Laurent Pinchart3cbd0c52018-05-26 19:54:33 +0300215/*
216 * Ensure backing pages are allocated. Must be called with the omap_obj.lock
217 * held.
218 */
Rob Clarkcd5351f2011-11-12 12:09:40 -0600219static int omap_gem_attach_pages(struct drm_gem_object *obj)
220{
Rob Clark8b6b5692012-05-17 02:37:25 -0600221 struct drm_device *dev = obj->dev;
Rob Clarkcd5351f2011-11-12 12:09:40 -0600222 struct omap_gem_object *omap_obj = to_omap_bo(obj);
223 struct page **pages;
Emil Gooded4eb23a2012-08-17 18:53:26 +0200224 int npages = obj->size >> PAGE_SHIFT;
225 int i, ret;
Rob Clark8b6b5692012-05-17 02:37:25 -0600226 dma_addr_t *addrs;
Rob Clarkcd5351f2011-11-12 12:09:40 -0600227
Laurent Pinchart3cbd0c52018-05-26 19:54:33 +0300228 lockdep_assert_held(&omap_obj->lock);
229
Laurent Pinchart24912442018-05-25 19:39:21 +0300230 /*
231 * If not using shmem (in which case backing pages don't need to be
232 * allocated) or if pages are already allocated we're done.
233 */
234 if (!(omap_obj->flags & OMAP_BO_MEM_SHMEM) || omap_obj->pages)
235 return 0;
Rob Clarkcd5351f2011-11-12 12:09:40 -0600236
David Herrmann0cdbe8a2014-05-25 12:59:47 +0200237 pages = drm_gem_get_pages(obj);
Rob Clarkcd5351f2011-11-12 12:09:40 -0600238 if (IS_ERR(pages)) {
239 dev_err(obj->dev->dev, "could not get pages: %ld\n", PTR_ERR(pages));
240 return PTR_ERR(pages);
241 }
242
Rob Clarkf3bc9d22011-12-20 16:54:28 -0600243 /* for non-cached buffers, ensure the new pages are clean because
244 * DSS, GPU, etc. are not cache coherent:
245 */
246 if (omap_obj->flags & (OMAP_BO_WC|OMAP_BO_UNCACHED)) {
Kees Cook6da2ec52018-06-12 13:55:00 -0700247 addrs = kmalloc_array(npages, sizeof(*addrs), GFP_KERNEL);
Emil Gooded4eb23a2012-08-17 18:53:26 +0200248 if (!addrs) {
249 ret = -ENOMEM;
250 goto free_pages;
251 }
252
Rob Clarkf3bc9d22011-12-20 16:54:28 -0600253 for (i = 0; i < npages; i++) {
Rob Clark8b6b5692012-05-17 02:37:25 -0600254 addrs[i] = dma_map_page(dev->dev, pages[i],
Laurent Pinchart97817fd2017-04-21 00:33:58 +0300255 0, PAGE_SIZE, DMA_TO_DEVICE);
Tomi Valkeinen579ef252016-01-05 11:43:14 +0200256
257 if (dma_mapping_error(dev->dev, addrs[i])) {
258 dev_warn(dev->dev,
259 "%s: failed to map page\n", __func__);
260
261 for (i = i - 1; i >= 0; --i) {
262 dma_unmap_page(dev->dev, addrs[i],
Laurent Pinchart97817fd2017-04-21 00:33:58 +0300263 PAGE_SIZE, DMA_TO_DEVICE);
Tomi Valkeinen579ef252016-01-05 11:43:14 +0200264 }
265
266 ret = -ENOMEM;
267 goto free_addrs;
268 }
Rob Clarkf3bc9d22011-12-20 16:54:28 -0600269 }
Rob Clark8b6b5692012-05-17 02:37:25 -0600270 } else {
Kees Cook6396bb22018-06-12 14:03:40 -0700271 addrs = kcalloc(npages, sizeof(*addrs), GFP_KERNEL);
Emil Gooded4eb23a2012-08-17 18:53:26 +0200272 if (!addrs) {
273 ret = -ENOMEM;
274 goto free_pages;
275 }
Rob Clarkf3bc9d22011-12-20 16:54:28 -0600276 }
277
Laurent Pinchart57c22f72017-04-21 00:33:54 +0300278 omap_obj->dma_addrs = addrs;
Rob Clarkcd5351f2011-11-12 12:09:40 -0600279 omap_obj->pages = pages;
Rob Clark8b6b5692012-05-17 02:37:25 -0600280
Rob Clarkcd5351f2011-11-12 12:09:40 -0600281 return 0;
Emil Gooded4eb23a2012-08-17 18:53:26 +0200282
Tomi Valkeinen579ef252016-01-05 11:43:14 +0200283free_addrs:
284 kfree(addrs);
Emil Gooded4eb23a2012-08-17 18:53:26 +0200285free_pages:
Rob Clarkddcd09d2013-08-07 13:41:27 -0400286 drm_gem_put_pages(obj, pages, true, false);
Emil Gooded4eb23a2012-08-17 18:53:26 +0200287
288 return ret;
Rob Clarkcd5351f2011-11-12 12:09:40 -0600289}
290
Laurent Pinchart3cbd0c52018-05-26 19:54:33 +0300291/* Release backing pages. Must be called with the omap_obj.lock held. */
Rob Clarkcd5351f2011-11-12 12:09:40 -0600292static void omap_gem_detach_pages(struct drm_gem_object *obj)
293{
294 struct omap_gem_object *omap_obj = to_omap_bo(obj);
Laurent Pinchart930dc192017-04-21 00:33:57 +0300295 unsigned int npages = obj->size >> PAGE_SHIFT;
296 unsigned int i;
Rob Clarkf3bc9d22011-12-20 16:54:28 -0600297
Laurent Pinchart3cbd0c52018-05-26 19:54:33 +0300298 lockdep_assert_held(&omap_obj->lock);
299
Laurent Pinchart930dc192017-04-21 00:33:57 +0300300 for (i = 0; i < npages; i++) {
301 if (omap_obj->dma_addrs[i])
302 dma_unmap_page(obj->dev->dev, omap_obj->dma_addrs[i],
Laurent Pinchart97817fd2017-04-21 00:33:58 +0300303 PAGE_SIZE, DMA_TO_DEVICE);
Rob Clarkf3bc9d22011-12-20 16:54:28 -0600304 }
305
Laurent Pinchart57c22f72017-04-21 00:33:54 +0300306 kfree(omap_obj->dma_addrs);
307 omap_obj->dma_addrs = NULL;
Rob Clark8b6b5692012-05-17 02:37:25 -0600308
Rob Clarkddcd09d2013-08-07 13:41:27 -0400309 drm_gem_put_pages(obj, omap_obj->pages, true, false);
Rob Clarkcd5351f2011-11-12 12:09:40 -0600310 omap_obj->pages = NULL;
311}
312
Rob Clark6ad11bc2012-04-10 13:19:55 -0500313/* get buffer flags */
Laurent Pinchartdfe9cfc2018-02-11 15:07:33 +0200314u32 omap_gem_flags(struct drm_gem_object *obj)
Rob Clark6ad11bc2012-04-10 13:19:55 -0500315{
316 return to_omap_bo(obj)->flags;
317}
318
Rob Clarkf7f9f452011-12-05 19:19:22 -0600319/** get mmap size */
320size_t omap_gem_mmap_size(struct drm_gem_object *obj)
321{
322 struct omap_gem_object *omap_obj = to_omap_bo(obj);
323 size_t size = obj->size;
324
325 if (omap_obj->flags & OMAP_BO_TILED) {
326 /* for tiled buffers, the virtual size has stride rounded up
327 * to 4kb.. (to hide the fact that row n+1 might start 16kb or
328 * 32kb later!). But we don't back the entire buffer with
329 * pages, only the valid picture part.. so need to adjust for
330 * this in the size used to mmap and generate mmap offset
331 */
332 size = tiler_vsize(gem2fmt(omap_obj->flags),
333 omap_obj->width, omap_obj->height);
334 }
335
336 return size;
337}
338
Laurent Pinchart7ef93b02015-12-14 22:39:33 +0200339/* -----------------------------------------------------------------------------
340 * Fault Handling
341 */
342
Rob Clarkf7f9f452011-12-05 19:19:22 -0600343/* Normal handling for the case of faulting in non-tiled buffers */
Laurent Pinchart620063e2018-05-25 19:39:20 +0300344static vm_fault_t omap_gem_fault_1d(struct drm_gem_object *obj,
Rob Clarkf7f9f452011-12-05 19:19:22 -0600345 struct vm_area_struct *vma, struct vm_fault *vmf)
346{
347 struct omap_gem_object *omap_obj = to_omap_bo(obj);
348 unsigned long pfn;
349 pgoff_t pgoff;
350
351 /* We don't use vmf->pgoff since that has the fake offset: */
Jan Kara1a29d852016-12-14 15:07:01 -0800352 pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
Rob Clarkf7f9f452011-12-05 19:19:22 -0600353
354 if (omap_obj->pages) {
Laurent Pinchartd61ce7d2017-04-21 00:33:55 +0300355 omap_gem_cpu_sync_page(obj, pgoff);
Rob Clarkf7f9f452011-12-05 19:19:22 -0600356 pfn = page_to_pfn(omap_obj->pages[pgoff]);
357 } else {
Laurent Pinchart620063e2018-05-25 19:39:20 +0300358 BUG_ON(!omap_gem_is_contiguous(omap_obj));
Laurent Pinchart16869082017-04-21 00:33:51 +0300359 pfn = (omap_obj->dma_addr >> PAGE_SHIFT) + pgoff;
Rob Clarkf7f9f452011-12-05 19:19:22 -0600360 }
361
Jan Kara1a29d852016-12-14 15:07:01 -0800362 VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
Rob Clarkf7f9f452011-12-05 19:19:22 -0600363 pfn, pfn << PAGE_SHIFT);
364
Souptick Joarder6ada1322018-05-22 23:43:57 +0530365 return vmf_insert_mixed(vma, vmf->address,
366 __pfn_to_pfn_t(pfn, PFN_DEV));
Rob Clarkf7f9f452011-12-05 19:19:22 -0600367}
368
369/* Special handling for the case of faulting in 2d tiled buffers */
Laurent Pinchart620063e2018-05-25 19:39:20 +0300370static vm_fault_t omap_gem_fault_2d(struct drm_gem_object *obj,
Rob Clarkf7f9f452011-12-05 19:19:22 -0600371 struct vm_area_struct *vma, struct vm_fault *vmf)
372{
373 struct omap_gem_object *omap_obj = to_omap_bo(obj);
Laurent Pinchartf4302742015-12-14 22:39:34 +0200374 struct omap_drm_private *priv = obj->dev->dev_private;
375 struct omap_drm_usergart_entry *entry;
Rob Clarkf7f9f452011-12-05 19:19:22 -0600376 enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
377 struct page *pages[64]; /* XXX is this too much to have on stack? */
378 unsigned long pfn;
379 pgoff_t pgoff, base_pgoff;
Jan Kara1a29d852016-12-14 15:07:01 -0800380 unsigned long vaddr;
Souptick Joarder6ada1322018-05-22 23:43:57 +0530381 int i, err, slots;
382 vm_fault_t ret = VM_FAULT_NOPAGE;
Rob Clarkf7f9f452011-12-05 19:19:22 -0600383
Rob Clarke559895ae2012-03-05 10:48:40 -0600384 /*
385 * Note the height of the slot is also equal to the number of pages
386 * that need to be mapped in to fill 4kb wide CPU page. If the slot
387 * height is 64, then 64 pages fill a 4kb wide by 64 row region.
Rob Clarkf7f9f452011-12-05 19:19:22 -0600388 */
Laurent Pinchartf4302742015-12-14 22:39:34 +0200389 const int n = priv->usergart[fmt].height;
390 const int n_shift = priv->usergart[fmt].height_shift;
Rob Clarke559895ae2012-03-05 10:48:40 -0600391
392 /*
393 * If buffer width in bytes > PAGE_SIZE then the virtual stride is
394 * rounded up to next multiple of PAGE_SIZE.. this need to be taken
395 * into account in some of the math, so figure out virtual stride
396 * in pages
397 */
Tomi Valkeinencc8dd762017-05-18 11:51:51 +0300398 const int m = DIV_ROUND_UP(omap_obj->width << fmt, PAGE_SIZE);
Rob Clarkf7f9f452011-12-05 19:19:22 -0600399
400 /* We don't use vmf->pgoff since that has the fake offset: */
Jan Kara1a29d852016-12-14 15:07:01 -0800401 pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
Rob Clarkf7f9f452011-12-05 19:19:22 -0600402
Rob Clarke559895ae2012-03-05 10:48:40 -0600403 /*
404 * Actual address we start mapping at is rounded down to previous slot
Rob Clarkf7f9f452011-12-05 19:19:22 -0600405 * boundary in the y direction:
406 */
Rob Clarke559895ae2012-03-05 10:48:40 -0600407 base_pgoff = round_down(pgoff, m << n_shift);
Rob Clarkf7f9f452011-12-05 19:19:22 -0600408
Rob Clarke559895ae2012-03-05 10:48:40 -0600409 /* figure out buffer width in slots */
Laurent Pinchartf4302742015-12-14 22:39:34 +0200410 slots = omap_obj->width >> priv->usergart[fmt].slot_shift;
Rob Clarkf7f9f452011-12-05 19:19:22 -0600411
Jan Kara1a29d852016-12-14 15:07:01 -0800412 vaddr = vmf->address - ((pgoff - base_pgoff) << PAGE_SHIFT);
Rob Clarke559895ae2012-03-05 10:48:40 -0600413
Laurent Pinchartf4302742015-12-14 22:39:34 +0200414 entry = &priv->usergart[fmt].entry[priv->usergart[fmt].last];
Rob Clarke559895ae2012-03-05 10:48:40 -0600415
Rob Clarkf7f9f452011-12-05 19:19:22 -0600416 /* evict previous buffer using this usergart entry, if any: */
417 if (entry->obj)
Laurent Pinchart620063e2018-05-25 19:39:20 +0300418 omap_gem_evict_entry(entry->obj, fmt, entry);
Rob Clarkf7f9f452011-12-05 19:19:22 -0600419
420 entry->obj = obj;
421 entry->obj_pgoff = base_pgoff;
422
Rob Clarke559895ae2012-03-05 10:48:40 -0600423 /* now convert base_pgoff to phys offset from virt offset: */
424 base_pgoff = (base_pgoff >> n_shift) * slots;
Rob Clarkf7f9f452011-12-05 19:19:22 -0600425
Rob Clarke559895ae2012-03-05 10:48:40 -0600426 /* for wider-than 4k.. figure out which part of the slot-row we want: */
427 if (m > 1) {
428 int off = pgoff % m;
429 entry->obj_pgoff += off;
430 base_pgoff /= m;
431 slots = min(slots - (off << n_shift), n);
432 base_pgoff += off << n_shift;
433 vaddr += off << PAGE_SHIFT;
434 }
435
436 /*
437 * Map in pages. Beyond the valid pixel part of the buffer, we set
438 * pages[i] to NULL to get a dummy page mapped in.. if someone
439 * reads/writes it they will get random/undefined content, but at
440 * least it won't be corrupting whatever other random page used to
441 * be mapped in, or other undefined behavior.
Rob Clarkf7f9f452011-12-05 19:19:22 -0600442 */
443 memcpy(pages, &omap_obj->pages[base_pgoff],
444 sizeof(struct page *) * slots);
445 memset(pages + slots, 0,
Rob Clarke559895ae2012-03-05 10:48:40 -0600446 sizeof(struct page *) * (n - slots));
Rob Clarkf7f9f452011-12-05 19:19:22 -0600447
Souptick Joarder6ada1322018-05-22 23:43:57 +0530448 err = tiler_pin(entry->block, pages, ARRAY_SIZE(pages), 0, true);
449 if (err) {
450 ret = vmf_error(err);
451 dev_err(obj->dev->dev, "failed to pin: %d\n", err);
Rob Clarkf7f9f452011-12-05 19:19:22 -0600452 return ret;
453 }
454
Laurent Pinchart16869082017-04-21 00:33:51 +0300455 pfn = entry->dma_addr >> PAGE_SHIFT;
Rob Clarkf7f9f452011-12-05 19:19:22 -0600456
Jan Kara1a29d852016-12-14 15:07:01 -0800457 VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
Rob Clarkf7f9f452011-12-05 19:19:22 -0600458 pfn, pfn << PAGE_SHIFT);
459
Rob Clarke559895ae2012-03-05 10:48:40 -0600460 for (i = n; i > 0; i--) {
Souptick Joarder6ada1322018-05-22 23:43:57 +0530461 ret = vmf_insert_mixed(vma,
462 vaddr, __pfn_to_pfn_t(pfn, PFN_DEV));
463 if (ret & VM_FAULT_ERROR)
464 break;
Laurent Pinchartf4302742015-12-14 22:39:34 +0200465 pfn += priv->usergart[fmt].stride_pfn;
Rob Clarke559895ae2012-03-05 10:48:40 -0600466 vaddr += PAGE_SIZE * m;
Rob Clarkf7f9f452011-12-05 19:19:22 -0600467 }
468
469 /* simple round-robin: */
Laurent Pinchartf4302742015-12-14 22:39:34 +0200470 priv->usergart[fmt].last = (priv->usergart[fmt].last + 1)
471 % NUM_USERGART_ENTRIES;
Rob Clarkf7f9f452011-12-05 19:19:22 -0600472
Souptick Joarder6ada1322018-05-22 23:43:57 +0530473 return ret;
Rob Clarkf7f9f452011-12-05 19:19:22 -0600474}
475
Rob Clarkcd5351f2011-11-12 12:09:40 -0600476/**
477 * omap_gem_fault - pagefault handler for GEM objects
Rob Clarkcd5351f2011-11-12 12:09:40 -0600478 * @vmf: fault detail
479 *
480 * Invoked when a fault occurs on an mmap of a GEM managed area. GEM
481 * does most of the work for us including the actual map/unmap calls
482 * but we need to do the actual page work.
483 *
484 * The VMA was set up by GEM. In doing so it also ensured that the
485 * vma->vm_private_data points to the GEM object that is backing this
486 * mapping.
487 */
Souptick Joarder6ada1322018-05-22 23:43:57 +0530488vm_fault_t omap_gem_fault(struct vm_fault *vmf)
Rob Clarkcd5351f2011-11-12 12:09:40 -0600489{
Dave Jiang11bac802017-02-24 14:56:41 -0800490 struct vm_area_struct *vma = vmf->vma;
Rob Clarkcd5351f2011-11-12 12:09:40 -0600491 struct drm_gem_object *obj = vma->vm_private_data;
492 struct omap_gem_object *omap_obj = to_omap_bo(obj);
Souptick Joarder6ada1322018-05-22 23:43:57 +0530493 int err;
494 vm_fault_t ret;
Rob Clarkcd5351f2011-11-12 12:09:40 -0600495
496 /* Make sure we don't parallel update on a fault, nor move or remove
497 * something from beneath our feet
498 */
Laurent Pinchart3cbd0c52018-05-26 19:54:33 +0300499 mutex_lock(&omap_obj->lock);
Rob Clarkcd5351f2011-11-12 12:09:40 -0600500
501 /* if a shmem backed object, make sure we have pages attached now */
Laurent Pinchart24912442018-05-25 19:39:21 +0300502 err = omap_gem_attach_pages(obj);
Souptick Joarder6ada1322018-05-22 23:43:57 +0530503 if (err) {
504 ret = vmf_error(err);
Rob Clarkcd5351f2011-11-12 12:09:40 -0600505 goto fail;
Souptick Joarder6ada1322018-05-22 23:43:57 +0530506 }
Rob Clarkcd5351f2011-11-12 12:09:40 -0600507
508 /* where should we do corresponding put_pages().. we are mapping
509 * the original page, rather than thru a GART, so we can't rely
510 * on eviction to trigger this. But munmap() or all mappings should
511 * probably trigger put_pages()?
512 */
513
Rob Clarkf7f9f452011-12-05 19:19:22 -0600514 if (omap_obj->flags & OMAP_BO_TILED)
Laurent Pinchart620063e2018-05-25 19:39:20 +0300515 ret = omap_gem_fault_2d(obj, vma, vmf);
Rob Clarkf7f9f452011-12-05 19:19:22 -0600516 else
Laurent Pinchart620063e2018-05-25 19:39:20 +0300517 ret = omap_gem_fault_1d(obj, vma, vmf);
Rob Clarkcd5351f2011-11-12 12:09:40 -0600518
Rob Clarkcd5351f2011-11-12 12:09:40 -0600519
520fail:
Laurent Pinchart3cbd0c52018-05-26 19:54:33 +0300521 mutex_unlock(&omap_obj->lock);
Souptick Joarder6ada1322018-05-22 23:43:57 +0530522 return ret;
Rob Clarkcd5351f2011-11-12 12:09:40 -0600523}
524
525/** We override mainly to fix up some of the vm mapping flags.. */
526int omap_gem_mmap(struct file *filp, struct vm_area_struct *vma)
527{
Rob Clarkcd5351f2011-11-12 12:09:40 -0600528 int ret;
529
530 ret = drm_gem_mmap(filp, vma);
531 if (ret) {
532 DBG("mmap failed: %d", ret);
533 return ret;
534 }
535
Rob Clark8b6b5692012-05-17 02:37:25 -0600536 return omap_gem_mmap_obj(vma->vm_private_data, vma);
537}
538
539int omap_gem_mmap_obj(struct drm_gem_object *obj,
540 struct vm_area_struct *vma)
541{
542 struct omap_gem_object *omap_obj = to_omap_bo(obj);
Rob Clarkcd5351f2011-11-12 12:09:40 -0600543
544 vma->vm_flags &= ~VM_PFNMAP;
545 vma->vm_flags |= VM_MIXEDMAP;
546
547 if (omap_obj->flags & OMAP_BO_WC) {
548 vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
549 } else if (omap_obj->flags & OMAP_BO_UNCACHED) {
550 vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags));
551 } else {
Rob Clark8b6b5692012-05-17 02:37:25 -0600552 /*
553 * We do have some private objects, at least for scanout buffers
554 * on hardware without DMM/TILER. But these are allocated write-
555 * combine
556 */
557 if (WARN_ON(!obj->filp))
558 return -EINVAL;
559
560 /*
561 * Shunt off cached objs to shmem file so they have their own
562 * address_space (so unmap_mapping_range does what we want,
563 * in particular in the case of mmap'd dmabufs)
564 */
565 fput(vma->vm_file);
Rob Clark8b6b5692012-05-17 02:37:25 -0600566 vma->vm_pgoff = 0;
Al Virocb0942b2012-08-27 14:48:26 -0400567 vma->vm_file = get_file(obj->filp);
Rob Clark8b6b5692012-05-17 02:37:25 -0600568
Rob Clarkcd5351f2011-11-12 12:09:40 -0600569 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
570 }
571
Rob Clark8b6b5692012-05-17 02:37:25 -0600572 return 0;
Rob Clarkcd5351f2011-11-12 12:09:40 -0600573}
574
Laurent Pinchart7ef93b02015-12-14 22:39:33 +0200575/* -----------------------------------------------------------------------------
576 * Dumb Buffers
577 */
Rob Clark8b6b5692012-05-17 02:37:25 -0600578
Rob Clarkcd5351f2011-11-12 12:09:40 -0600579/**
580 * omap_gem_dumb_create - create a dumb buffer
581 * @drm_file: our client file
582 * @dev: our device
583 * @args: the requested arguments copied from userspace
584 *
585 * Allocate a buffer suitable for use for a frame buffer of the
586 * form described by user space. Give userspace a handle by which
587 * to reference it.
588 */
589int omap_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
590 struct drm_mode_create_dumb *args)
591{
592 union omap_gem_size gsize;
593
Tomi Valkeinence481ed2016-04-19 09:06:32 +0300594 args->pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
Tomi Valkeinen6a5228f2016-04-18 18:18:37 +0300595
Rob Clarkcd5351f2011-11-12 12:09:40 -0600596 args->size = PAGE_ALIGN(args->pitch * args->height);
597
598 gsize = (union omap_gem_size){
599 .bytes = args->size,
600 };
601
602 return omap_gem_new_handle(dev, file, gsize,
603 OMAP_BO_SCANOUT | OMAP_BO_WC, &args->handle);
604}
605
606/**
Rob Clarkcd5351f2011-11-12 12:09:40 -0600607 * omap_gem_dumb_map - buffer mapping for dumb interface
608 * @file: our drm client file
609 * @dev: drm device
610 * @handle: GEM handle to the object (from dumb_create)
611 *
612 * Do the necessary setup to allow the mapping of the frame buffer
613 * into user memory. We don't have to do much here at the moment.
614 */
615int omap_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
Laurent Pinchartdfe9cfc2018-02-11 15:07:33 +0200616 u32 handle, u64 *offset)
Rob Clarkcd5351f2011-11-12 12:09:40 -0600617{
618 struct drm_gem_object *obj;
619 int ret = 0;
620
Rob Clarkcd5351f2011-11-12 12:09:40 -0600621 /* GEM does all our handle to object mapping */
Chris Wilsona8ad0bd2016-05-09 11:04:54 +0100622 obj = drm_gem_object_lookup(file, handle);
Rob Clarkcd5351f2011-11-12 12:09:40 -0600623 if (obj == NULL) {
624 ret = -ENOENT;
625 goto fail;
626 }
627
628 *offset = omap_gem_mmap_offset(obj);
629
Thomas Zimmermanne64d0222018-06-18 15:07:26 +0200630 drm_gem_object_put_unlocked(obj);
Rob Clarkcd5351f2011-11-12 12:09:40 -0600631
632fail:
Rob Clarkcd5351f2011-11-12 12:09:40 -0600633 return ret;
634}
635
Laurent Pincharte1c11742015-12-14 22:39:30 +0200636#ifdef CONFIG_DRM_FBDEV_EMULATION
Rob Clarka6a91822011-12-09 23:26:08 -0600637/* Set scrolling position. This allows us to implement fast scrolling
638 * for console.
Rob Clark9b55b952012-03-05 10:48:33 -0600639 *
640 * Call only from non-atomic contexts.
Rob Clarka6a91822011-12-09 23:26:08 -0600641 */
Laurent Pinchartdfe9cfc2018-02-11 15:07:33 +0200642int omap_gem_roll(struct drm_gem_object *obj, u32 roll)
Rob Clarka6a91822011-12-09 23:26:08 -0600643{
644 struct omap_gem_object *omap_obj = to_omap_bo(obj);
Laurent Pinchartdfe9cfc2018-02-11 15:07:33 +0200645 u32 npages = obj->size >> PAGE_SHIFT;
Rob Clarka6a91822011-12-09 23:26:08 -0600646 int ret = 0;
647
648 if (roll > npages) {
649 dev_err(obj->dev->dev, "invalid roll: %d\n", roll);
650 return -EINVAL;
651 }
652
Rob Clarka6a91822011-12-09 23:26:08 -0600653 omap_obj->roll = roll;
654
Laurent Pinchart3cbd0c52018-05-26 19:54:33 +0300655 mutex_lock(&omap_obj->lock);
Rob Clarkaf695922011-12-16 11:34:34 -0600656
Rob Clarka6a91822011-12-09 23:26:08 -0600657 /* if we aren't mapped yet, we don't need to do anything */
658 if (omap_obj->block) {
Laurent Pinchart24912442018-05-25 19:39:21 +0300659 ret = omap_gem_attach_pages(obj);
Rob Clarka6a91822011-12-09 23:26:08 -0600660 if (ret)
661 goto fail;
Laurent Pinchart24912442018-05-25 19:39:21 +0300662
663 ret = tiler_pin(omap_obj->block, omap_obj->pages, npages,
664 roll, true);
Rob Clarka6a91822011-12-09 23:26:08 -0600665 if (ret)
666 dev_err(obj->dev->dev, "could not repin: %d\n", ret);
667 }
668
669fail:
Laurent Pinchart3cbd0c52018-05-26 19:54:33 +0300670 mutex_unlock(&omap_obj->lock);
Rob Clarka6a91822011-12-09 23:26:08 -0600671
672 return ret;
673}
Laurent Pincharte1c11742015-12-14 22:39:30 +0200674#endif
Rob Clarka6a91822011-12-09 23:26:08 -0600675
Laurent Pinchart7ef93b02015-12-14 22:39:33 +0200676/* -----------------------------------------------------------------------------
677 * Memory Management & DMA Sync
678 */
679
Laurent Pinchart24fbaca2017-04-21 00:33:56 +0300680/*
681 * shmem buffers that are mapped cached are not coherent.
682 *
683 * We keep track of dirty pages using page faulting to perform cache management.
684 * When a page is mapped to the CPU in read/write mode the device can't access
685 * it and omap_obj->dma_addrs[i] is NULL. When a page is mapped to the device
686 * the omap_obj->dma_addrs[i] is set to the DMA address, and the page is
687 * unmapped from the CPU.
Laurent Pinchart7ef93b02015-12-14 22:39:33 +0200688 */
Laurent Pinchart620063e2018-05-25 19:39:20 +0300689static inline bool omap_gem_is_cached_coherent(struct drm_gem_object *obj)
Laurent Pinchart7ef93b02015-12-14 22:39:33 +0200690{
691 struct omap_gem_object *omap_obj = to_omap_bo(obj);
Laurent Pinchartcdb03812015-12-14 22:39:37 +0200692
Laurent Pinchart24fbaca2017-04-21 00:33:56 +0300693 return !((omap_obj->flags & OMAP_BO_MEM_SHMEM) &&
694 ((omap_obj->flags & OMAP_BO_CACHE_MASK) == OMAP_BO_CACHED));
Laurent Pinchart7ef93b02015-12-14 22:39:33 +0200695}
Rob Clarkcd5351f2011-11-12 12:09:40 -0600696
Rob Clark8b6b5692012-05-17 02:37:25 -0600697/* Sync the buffer for CPU access.. note pages should already be
698 * attached, ie. omap_gem_get_pages()
699 */
Laurent Pinchartd61ce7d2017-04-21 00:33:55 +0300700void omap_gem_cpu_sync_page(struct drm_gem_object *obj, int pgoff)
Rob Clark8b6b5692012-05-17 02:37:25 -0600701{
702 struct drm_device *dev = obj->dev;
703 struct omap_gem_object *omap_obj = to_omap_bo(obj);
704
Laurent Pinchart620063e2018-05-25 19:39:20 +0300705 if (omap_gem_is_cached_coherent(obj))
Laurent Pinchart24fbaca2017-04-21 00:33:56 +0300706 return;
707
708 if (omap_obj->dma_addrs[pgoff]) {
Laurent Pinchart57c22f72017-04-21 00:33:54 +0300709 dma_unmap_page(dev->dev, omap_obj->dma_addrs[pgoff],
Laurent Pinchart97817fd2017-04-21 00:33:58 +0300710 PAGE_SIZE, DMA_TO_DEVICE);
Laurent Pinchart57c22f72017-04-21 00:33:54 +0300711 omap_obj->dma_addrs[pgoff] = 0;
Rob Clark8b6b5692012-05-17 02:37:25 -0600712 }
713}
714
715/* sync the buffer for DMA access */
Laurent Pinchartd61ce7d2017-04-21 00:33:55 +0300716void omap_gem_dma_sync_buffer(struct drm_gem_object *obj,
Rob Clark8b6b5692012-05-17 02:37:25 -0600717 enum dma_data_direction dir)
718{
719 struct drm_device *dev = obj->dev;
720 struct omap_gem_object *omap_obj = to_omap_bo(obj);
Laurent Pinchart4fa6ce42017-04-21 00:33:53 +0300721 int i, npages = obj->size >> PAGE_SHIFT;
722 struct page **pages = omap_obj->pages;
723 bool dirty = false;
Rob Clark8b6b5692012-05-17 02:37:25 -0600724
Laurent Pinchart620063e2018-05-25 19:39:20 +0300725 if (omap_gem_is_cached_coherent(obj))
Laurent Pinchart4fa6ce42017-04-21 00:33:53 +0300726 return;
Rob Clark8b6b5692012-05-17 02:37:25 -0600727
Laurent Pinchart4fa6ce42017-04-21 00:33:53 +0300728 for (i = 0; i < npages; i++) {
Laurent Pinchart57c22f72017-04-21 00:33:54 +0300729 if (!omap_obj->dma_addrs[i]) {
Laurent Pinchart4fa6ce42017-04-21 00:33:53 +0300730 dma_addr_t addr;
Tomi Valkeinena3d63452016-01-05 11:43:15 +0200731
Laurent Pinchart4fa6ce42017-04-21 00:33:53 +0300732 addr = dma_map_page(dev->dev, pages[i], 0,
Laurent Pinchart97817fd2017-04-21 00:33:58 +0300733 PAGE_SIZE, dir);
Laurent Pinchart4fa6ce42017-04-21 00:33:53 +0300734 if (dma_mapping_error(dev->dev, addr)) {
735 dev_warn(dev->dev, "%s: failed to map page\n",
736 __func__);
737 break;
Rob Clark8b6b5692012-05-17 02:37:25 -0600738 }
Rob Clark8b6b5692012-05-17 02:37:25 -0600739
Laurent Pinchart4fa6ce42017-04-21 00:33:53 +0300740 dirty = true;
Laurent Pinchart57c22f72017-04-21 00:33:54 +0300741 omap_obj->dma_addrs[i] = addr;
Rob Clark8b6b5692012-05-17 02:37:25 -0600742 }
743 }
Laurent Pinchart4fa6ce42017-04-21 00:33:53 +0300744
745 if (dirty) {
746 unmap_mapping_range(obj->filp->f_mapping, 0,
747 omap_gem_mmap_size(obj), 1);
748 }
Rob Clark8b6b5692012-05-17 02:37:25 -0600749}
750
Laurent Pinchartbc20c852017-04-21 00:33:52 +0300751/**
752 * omap_gem_pin() - Pin a GEM object in memory
753 * @obj: the GEM object
754 * @dma_addr: the DMA address
755 *
756 * Pin the given GEM object in memory and fill the dma_addr pointer with the
757 * object's DMA address. If the buffer is not physically contiguous it will be
758 * remapped through the TILER to provide a contiguous view.
759 *
760 * Pins are reference-counted, calling this function multiple times is allowed
761 * as long the corresponding omap_gem_unpin() calls are balanced.
762 *
763 * Return 0 on success or a negative error code otherwise.
Rob Clarkcd5351f2011-11-12 12:09:40 -0600764 */
Laurent Pinchartbc20c852017-04-21 00:33:52 +0300765int omap_gem_pin(struct drm_gem_object *obj, dma_addr_t *dma_addr)
Rob Clarkcd5351f2011-11-12 12:09:40 -0600766{
Rob Clarka6a91822011-12-09 23:26:08 -0600767 struct omap_drm_private *priv = obj->dev->dev_private;
Rob Clarkcd5351f2011-11-12 12:09:40 -0600768 struct omap_gem_object *omap_obj = to_omap_bo(obj);
769 int ret = 0;
770
Laurent Pinchart3cbd0c52018-05-26 19:54:33 +0300771 mutex_lock(&omap_obj->lock);
Rob Clarkf7f9f452011-12-05 19:19:22 -0600772
Laurent Pinchart620063e2018-05-25 19:39:20 +0300773 if (!omap_gem_is_contiguous(omap_obj) && priv->has_dmm) {
Laurent Pinchart16869082017-04-21 00:33:51 +0300774 if (omap_obj->dma_addr_cnt == 0) {
Laurent Pinchartdfe9cfc2018-02-11 15:07:33 +0200775 u32 npages = obj->size >> PAGE_SHIFT;
Rob Clarkf7f9f452011-12-05 19:19:22 -0600776 enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
777 struct tiler_block *block;
Rob Clarka6a91822011-12-09 23:26:08 -0600778
Rob Clarkf7f9f452011-12-05 19:19:22 -0600779 BUG_ON(omap_obj->block);
780
Laurent Pinchart24912442018-05-25 19:39:21 +0300781 ret = omap_gem_attach_pages(obj);
Rob Clarkf7f9f452011-12-05 19:19:22 -0600782 if (ret)
783 goto fail;
784
Rob Clarkf7f9f452011-12-05 19:19:22 -0600785 if (omap_obj->flags & OMAP_BO_TILED) {
786 block = tiler_reserve_2d(fmt,
787 omap_obj->width,
788 omap_obj->height, 0);
789 } else {
790 block = tiler_reserve_1d(obj->size);
791 }
792
793 if (IS_ERR(block)) {
794 ret = PTR_ERR(block);
795 dev_err(obj->dev->dev,
796 "could not remap: %d (%d)\n", ret, fmt);
797 goto fail;
798 }
799
800 /* TODO: enable async refill.. */
Laurent Pinchart24912442018-05-25 19:39:21 +0300801 ret = tiler_pin(block, omap_obj->pages, npages,
Rob Clarka6a91822011-12-09 23:26:08 -0600802 omap_obj->roll, true);
Rob Clarkf7f9f452011-12-05 19:19:22 -0600803 if (ret) {
804 tiler_release(block);
805 dev_err(obj->dev->dev,
806 "could not pin: %d\n", ret);
807 goto fail;
808 }
809
Laurent Pinchart16869082017-04-21 00:33:51 +0300810 omap_obj->dma_addr = tiler_ssptr(block);
Rob Clarkf7f9f452011-12-05 19:19:22 -0600811 omap_obj->block = block;
812
Laurent Pinchart16869082017-04-21 00:33:51 +0300813 DBG("got dma address: %pad", &omap_obj->dma_addr);
Rob Clarkf7f9f452011-12-05 19:19:22 -0600814 }
815
Laurent Pinchart16869082017-04-21 00:33:51 +0300816 omap_obj->dma_addr_cnt++;
Rob Clarkf7f9f452011-12-05 19:19:22 -0600817
Laurent Pinchart16869082017-04-21 00:33:51 +0300818 *dma_addr = omap_obj->dma_addr;
Laurent Pinchart620063e2018-05-25 19:39:20 +0300819 } else if (omap_gem_is_contiguous(omap_obj)) {
Laurent Pinchart16869082017-04-21 00:33:51 +0300820 *dma_addr = omap_obj->dma_addr;
Rob Clarkf7f9f452011-12-05 19:19:22 -0600821 } else {
822 ret = -EINVAL;
Rob Clark8b6b5692012-05-17 02:37:25 -0600823 goto fail;
Rob Clarkcd5351f2011-11-12 12:09:40 -0600824 }
825
Rob Clarkf7f9f452011-12-05 19:19:22 -0600826fail:
Laurent Pinchart3cbd0c52018-05-26 19:54:33 +0300827 mutex_unlock(&omap_obj->lock);
Rob Clarkcd5351f2011-11-12 12:09:40 -0600828
829 return ret;
830}
831
Laurent Pinchartbc20c852017-04-21 00:33:52 +0300832/**
833 * omap_gem_unpin() - Unpin a GEM object from memory
834 * @obj: the GEM object
835 *
836 * Unpin the given GEM object previously pinned with omap_gem_pin(). Pins are
837 * reference-counted, the actualy unpin will only be performed when the number
838 * of calls to this function matches the number of calls to omap_gem_pin().
Rob Clarkcd5351f2011-11-12 12:09:40 -0600839 */
Laurent Pinchartbc20c852017-04-21 00:33:52 +0300840void omap_gem_unpin(struct drm_gem_object *obj)
Rob Clarkcd5351f2011-11-12 12:09:40 -0600841{
Rob Clarkf7f9f452011-12-05 19:19:22 -0600842 struct omap_gem_object *omap_obj = to_omap_bo(obj);
Tomi Valkeinen393a9492015-04-28 14:01:36 +0300843 int ret;
Rob Clarkf7f9f452011-12-05 19:19:22 -0600844
Laurent Pinchart3cbd0c52018-05-26 19:54:33 +0300845 mutex_lock(&omap_obj->lock);
846
Laurent Pinchart16869082017-04-21 00:33:51 +0300847 if (omap_obj->dma_addr_cnt > 0) {
848 omap_obj->dma_addr_cnt--;
849 if (omap_obj->dma_addr_cnt == 0) {
Rob Clarkf7f9f452011-12-05 19:19:22 -0600850 ret = tiler_unpin(omap_obj->block);
851 if (ret) {
852 dev_err(obj->dev->dev,
853 "could not unpin pages: %d\n", ret);
Rob Clarkf7f9f452011-12-05 19:19:22 -0600854 }
855 ret = tiler_release(omap_obj->block);
856 if (ret) {
857 dev_err(obj->dev->dev,
858 "could not release unmap: %d\n", ret);
859 }
Laurent Pinchart16869082017-04-21 00:33:51 +0300860 omap_obj->dma_addr = 0;
Rob Clarkf7f9f452011-12-05 19:19:22 -0600861 omap_obj->block = NULL;
862 }
863 }
Tomi Valkeinen393a9492015-04-28 14:01:36 +0300864
Laurent Pinchart3cbd0c52018-05-26 19:54:33 +0300865 mutex_unlock(&omap_obj->lock);
Rob Clarkcd5351f2011-11-12 12:09:40 -0600866}
867
Rob Clark3c810c62012-08-15 15:18:01 -0500868/* Get rotated scanout address (only valid if already pinned), at the
869 * specified orientation and x,y offset from top-left corner of buffer
870 * (only valid for tiled 2d buffers)
871 */
Laurent Pinchartdfe9cfc2018-02-11 15:07:33 +0200872int omap_gem_rotated_dma_addr(struct drm_gem_object *obj, u32 orient,
Laurent Pinchart16869082017-04-21 00:33:51 +0300873 int x, int y, dma_addr_t *dma_addr)
Rob Clark3c810c62012-08-15 15:18:01 -0500874{
875 struct omap_gem_object *omap_obj = to_omap_bo(obj);
876 int ret = -EINVAL;
877
Laurent Pinchart3cbd0c52018-05-26 19:54:33 +0300878 mutex_lock(&omap_obj->lock);
879
Laurent Pinchart16869082017-04-21 00:33:51 +0300880 if ((omap_obj->dma_addr_cnt > 0) && omap_obj->block &&
Rob Clark3c810c62012-08-15 15:18:01 -0500881 (omap_obj->flags & OMAP_BO_TILED)) {
Laurent Pinchart16869082017-04-21 00:33:51 +0300882 *dma_addr = tiler_tsptr(omap_obj->block, orient, x, y);
Rob Clark3c810c62012-08-15 15:18:01 -0500883 ret = 0;
884 }
Laurent Pinchart3cbd0c52018-05-26 19:54:33 +0300885
886 mutex_unlock(&omap_obj->lock);
887
Rob Clark3c810c62012-08-15 15:18:01 -0500888 return ret;
889}
890
891/* Get tiler stride for the buffer (only valid for 2d tiled buffers) */
Laurent Pinchartdfe9cfc2018-02-11 15:07:33 +0200892int omap_gem_tiled_stride(struct drm_gem_object *obj, u32 orient)
Rob Clark3c810c62012-08-15 15:18:01 -0500893{
894 struct omap_gem_object *omap_obj = to_omap_bo(obj);
895 int ret = -EINVAL;
896 if (omap_obj->flags & OMAP_BO_TILED)
897 ret = tiler_stride(gem2fmt(omap_obj->flags), orient);
898 return ret;
899}
900
Rob Clark6ad11bc2012-04-10 13:19:55 -0500901/* if !remap, and we don't have pages backing, then fail, rather than
902 * increasing the pin count (which we don't really do yet anyways,
903 * because we don't support swapping pages back out). And 'remap'
904 * might not be quite the right name, but I wanted to keep it working
Laurent Pinchartbc20c852017-04-21 00:33:52 +0300905 * similarly to omap_gem_pin(). Note though that mutex is not
Rob Clark6ad11bc2012-04-10 13:19:55 -0500906 * aquired if !remap (because this can be called in atomic ctxt),
Laurent Pinchartbc20c852017-04-21 00:33:52 +0300907 * but probably omap_gem_unpin() should be changed to work in the
Rob Clark6ad11bc2012-04-10 13:19:55 -0500908 * same way. If !remap, a matching omap_gem_put_pages() call is not
909 * required (and should not be made).
910 */
911int omap_gem_get_pages(struct drm_gem_object *obj, struct page ***pages,
912 bool remap)
Rob Clarkcd5351f2011-11-12 12:09:40 -0600913{
Laurent Pinchart24912442018-05-25 19:39:21 +0300914 struct omap_gem_object *omap_obj = to_omap_bo(obj);
Laurent Pinchart3cbd0c52018-05-26 19:54:33 +0300915 int ret = 0;
Laurent Pinchart24912442018-05-25 19:39:21 +0300916
Laurent Pinchart3cbd0c52018-05-26 19:54:33 +0300917 mutex_lock(&omap_obj->lock);
918
919 if (remap) {
920 ret = omap_gem_attach_pages(obj);
921 if (ret)
922 goto unlock;
Rob Clark6ad11bc2012-04-10 13:19:55 -0500923 }
Laurent Pinchart3cbd0c52018-05-26 19:54:33 +0300924
925 if (!omap_obj->pages) {
926 ret = -ENOMEM;
927 goto unlock;
928 }
929
Laurent Pinchart24912442018-05-25 19:39:21 +0300930 *pages = omap_obj->pages;
Laurent Pinchart3cbd0c52018-05-26 19:54:33 +0300931
932unlock:
933 mutex_unlock(&omap_obj->lock);
934
Rob Clarkcd5351f2011-11-12 12:09:40 -0600935 return ret;
936}
937
938/* release pages when DMA no longer being performed */
939int omap_gem_put_pages(struct drm_gem_object *obj)
940{
941 /* do something here if we dynamically attach/detach pages.. at
942 * least they would no longer need to be pinned if everyone has
943 * released the pages..
944 */
945 return 0;
946}
947
Laurent Pincharte1c11742015-12-14 22:39:30 +0200948#ifdef CONFIG_DRM_FBDEV_EMULATION
Laurent Pinchart3cbd0c52018-05-26 19:54:33 +0300949/*
950 * Get kernel virtual address for CPU access.. this more or less only
951 * exists for omap_fbdev.
Rob Clarkcd5351f2011-11-12 12:09:40 -0600952 */
953void *omap_gem_vaddr(struct drm_gem_object *obj)
954{
955 struct omap_gem_object *omap_obj = to_omap_bo(obj);
Laurent Pinchart3cbd0c52018-05-26 19:54:33 +0300956 void *vaddr;
957 int ret;
Laurent Pinchart620063e2018-05-25 19:39:20 +0300958
Laurent Pinchart3cbd0c52018-05-26 19:54:33 +0300959 mutex_lock(&omap_obj->lock);
960
961 if (!omap_obj->vaddr) {
Laurent Pinchart24912442018-05-25 19:39:21 +0300962 ret = omap_gem_attach_pages(obj);
Laurent Pinchart3cbd0c52018-05-26 19:54:33 +0300963 if (ret) {
964 vaddr = ERR_PTR(ret);
965 goto unlock;
966 }
967
Laurent Pinchart24912442018-05-25 19:39:21 +0300968 omap_obj->vaddr = vmap(omap_obj->pages, obj->size >> PAGE_SHIFT,
Rob Clarkf7f9f452011-12-05 19:19:22 -0600969 VM_MAP, pgprot_writecombine(PAGE_KERNEL));
970 }
Laurent Pinchart3cbd0c52018-05-26 19:54:33 +0300971
972 vaddr = omap_obj->vaddr;
973
974unlock:
975 mutex_unlock(&omap_obj->lock);
976 return vaddr;
Rob Clarkcd5351f2011-11-12 12:09:40 -0600977}
Laurent Pincharte1c11742015-12-14 22:39:30 +0200978#endif
Rob Clarkcd5351f2011-11-12 12:09:40 -0600979
Laurent Pinchart7ef93b02015-12-14 22:39:33 +0200980/* -----------------------------------------------------------------------------
981 * Power Management
982 */
Rob Clarkcd5351f2011-11-12 12:09:40 -0600983
Andy Grosse78edba2012-12-19 14:53:37 -0600984#ifdef CONFIG_PM
985/* re-pin objects in DMM in resume path: */
Laurent Pinchart7fb15c42017-10-13 17:58:58 +0300986int omap_gem_resume(struct drm_device *dev)
Andy Grosse78edba2012-12-19 14:53:37 -0600987{
Laurent Pinchart7fb15c42017-10-13 17:58:58 +0300988 struct omap_drm_private *priv = dev->dev_private;
Andy Grosse78edba2012-12-19 14:53:37 -0600989 struct omap_gem_object *omap_obj;
990 int ret = 0;
991
Daniel Vetter5117bd82018-05-25 19:39:24 +0300992 mutex_lock(&priv->list_lock);
Andy Grosse78edba2012-12-19 14:53:37 -0600993 list_for_each_entry(omap_obj, &priv->obj_list, mm_list) {
994 if (omap_obj->block) {
995 struct drm_gem_object *obj = &omap_obj->base;
Laurent Pinchartdfe9cfc2018-02-11 15:07:33 +0200996 u32 npages = obj->size >> PAGE_SHIFT;
997
Andy Grosse78edba2012-12-19 14:53:37 -0600998 WARN_ON(!omap_obj->pages); /* this can't happen */
999 ret = tiler_pin(omap_obj->block,
1000 omap_obj->pages, npages,
1001 omap_obj->roll, true);
1002 if (ret) {
Laurent Pinchart7fb15c42017-10-13 17:58:58 +03001003 dev_err(dev->dev, "could not repin: %d\n", ret);
Daniel Vetter5117bd82018-05-25 19:39:24 +03001004 goto done;
Andy Grosse78edba2012-12-19 14:53:37 -06001005 }
1006 }
1007 }
1008
Daniel Vetter5117bd82018-05-25 19:39:24 +03001009done:
1010 mutex_unlock(&priv->list_lock);
1011 return ret;
Andy Grosse78edba2012-12-19 14:53:37 -06001012}
1013#endif
1014
Laurent Pinchart7ef93b02015-12-14 22:39:33 +02001015/* -----------------------------------------------------------------------------
1016 * DebugFS
1017 */
1018
Rob Clarkf6b60362012-03-05 10:48:36 -06001019#ifdef CONFIG_DEBUG_FS
1020void omap_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
1021{
Rob Clarkf6b60362012-03-05 10:48:36 -06001022 struct omap_gem_object *omap_obj = to_omap_bo(obj);
Laurent Pinchartdfe9cfc2018-02-11 15:07:33 +02001023 u64 off;
Rob Clarkf6b60362012-03-05 10:48:36 -06001024
David Herrmann0de23972013-07-24 21:07:52 +02001025 off = drm_vma_node_start(&obj->vma_node);
Rob Clarkf6b60362012-03-05 10:48:36 -06001026
Laurent Pinchart3cbd0c52018-05-26 19:54:33 +03001027 mutex_lock(&omap_obj->lock);
1028
Russell King2d31ca32014-07-12 10:53:41 +01001029 seq_printf(m, "%08x: %2d (%2d) %08llx %pad (%2d) %p %4d",
Peter Zijlstra2c935bc2016-11-14 17:29:48 +01001030 omap_obj->flags, obj->name, kref_read(&obj->refcount),
Laurent Pinchart16869082017-04-21 00:33:51 +03001031 off, &omap_obj->dma_addr, omap_obj->dma_addr_cnt,
Rob Clarkf6b60362012-03-05 10:48:36 -06001032 omap_obj->vaddr, omap_obj->roll);
1033
1034 if (omap_obj->flags & OMAP_BO_TILED) {
1035 seq_printf(m, " %dx%d", omap_obj->width, omap_obj->height);
1036 if (omap_obj->block) {
1037 struct tcm_area *area = &omap_obj->block->area;
1038 seq_printf(m, " (%dx%d, %dx%d)",
1039 area->p0.x, area->p0.y,
1040 area->p1.x, area->p1.y);
1041 }
1042 } else {
Tomi Valkeinen2150c192017-02-21 09:57:12 +02001043 seq_printf(m, " %zu", obj->size);
Rob Clarkf6b60362012-03-05 10:48:36 -06001044 }
1045
Laurent Pinchart3cbd0c52018-05-26 19:54:33 +03001046 mutex_unlock(&omap_obj->lock);
1047
Rob Clarkf6b60362012-03-05 10:48:36 -06001048 seq_printf(m, "\n");
1049}
1050
1051void omap_gem_describe_objects(struct list_head *list, struct seq_file *m)
1052{
1053 struct omap_gem_object *omap_obj;
1054 int count = 0;
1055 size_t size = 0;
1056
1057 list_for_each_entry(omap_obj, list, mm_list) {
1058 struct drm_gem_object *obj = &omap_obj->base;
1059 seq_printf(m, " ");
1060 omap_gem_describe(obj, m);
1061 count++;
1062 size += obj->size;
1063 }
1064
1065 seq_printf(m, "Total %d objects, %zu bytes\n", count, size);
1066}
1067#endif
1068
Laurent Pinchart7ef93b02015-12-14 22:39:33 +02001069/* -----------------------------------------------------------------------------
Laurent Pinchart7ef93b02015-12-14 22:39:33 +02001070 * Constructor & Destructor
1071 */
1072
Rob Clarkcd5351f2011-11-12 12:09:40 -06001073void omap_gem_free_object(struct drm_gem_object *obj)
1074{
1075 struct drm_device *dev = obj->dev;
Tomi Valkeinen76c40552014-12-17 14:34:22 +02001076 struct omap_drm_private *priv = dev->dev_private;
Rob Clarkcd5351f2011-11-12 12:09:40 -06001077 struct omap_gem_object *omap_obj = to_omap_bo(obj);
1078
Laurent Pinchart620063e2018-05-25 19:39:20 +03001079 omap_gem_evict(obj);
Rob Clarkf7f9f452011-12-05 19:19:22 -06001080
Daniel Vetter5117bd82018-05-25 19:39:24 +03001081 mutex_lock(&priv->list_lock);
Rob Clarkf6b60362012-03-05 10:48:36 -06001082 list_del(&omap_obj->mm_list);
Daniel Vetter5117bd82018-05-25 19:39:24 +03001083 mutex_unlock(&priv->list_lock);
Rob Clarkf6b60362012-03-05 10:48:36 -06001084
Laurent Pinchart3cbd0c52018-05-26 19:54:33 +03001085 /*
1086 * We own the sole reference to the object at this point, but to keep
1087 * lockdep happy, we must still take the omap_obj_lock to call
1088 * omap_gem_detach_pages(). This should hardly make any difference as
1089 * there can't be any lock contention.
Rob Clark9a0774e2012-01-16 12:51:17 -06001090 */
Laurent Pinchart3cbd0c52018-05-26 19:54:33 +03001091 mutex_lock(&omap_obj->lock);
1092
1093 /* The object should not be pinned. */
Laurent Pinchart16869082017-04-21 00:33:51 +03001094 WARN_ON(omap_obj->dma_addr_cnt > 0);
Rob Clark9a0774e2012-01-16 12:51:17 -06001095
Tomi Valkeinen3f50eff2016-01-27 10:58:43 +02001096 if (omap_obj->pages) {
1097 if (omap_obj->flags & OMAP_BO_MEM_DMABUF)
1098 kfree(omap_obj->pages);
1099 else
Rob Clarkcd5351f2011-11-12 12:09:40 -06001100 omap_gem_detach_pages(obj);
Rob Clarkcd5351f2011-11-12 12:09:40 -06001101 }
1102
Tomi Valkeinen3f50eff2016-01-27 10:58:43 +02001103 if (omap_obj->flags & OMAP_BO_MEM_DMA_API) {
Linus Torvalds266c73b2016-03-21 13:48:00 -07001104 dma_free_wc(dev->dev, obj->size, omap_obj->vaddr,
Laurent Pinchart16869082017-04-21 00:33:51 +03001105 omap_obj->dma_addr);
Tomi Valkeinen3f50eff2016-01-27 10:58:43 +02001106 } else if (omap_obj->vaddr) {
1107 vunmap(omap_obj->vaddr);
1108 } else if (obj->import_attach) {
1109 drm_prime_gem_destroy(obj, omap_obj->sgt);
1110 }
1111
Laurent Pinchart3cbd0c52018-05-26 19:54:33 +03001112 mutex_unlock(&omap_obj->lock);
1113
Rob Clarkcd5351f2011-11-12 12:09:40 -06001114 drm_gem_object_release(obj);
1115
Laurent Pinchart3cbd0c52018-05-26 19:54:33 +03001116 mutex_destroy(&omap_obj->lock);
1117
Laurent Pinchart00e9c7c2015-12-14 22:39:38 +02001118 kfree(omap_obj);
Rob Clarkcd5351f2011-11-12 12:09:40 -06001119}
1120
1121/* GEM buffer object constructor */
1122struct drm_gem_object *omap_gem_new(struct drm_device *dev,
Laurent Pinchartdfe9cfc2018-02-11 15:07:33 +02001123 union omap_gem_size gsize, u32 flags)
Rob Clarkcd5351f2011-11-12 12:09:40 -06001124{
Rob Clarka6a91822011-12-09 23:26:08 -06001125 struct omap_drm_private *priv = dev->dev_private;
Rob Clarkcd5351f2011-11-12 12:09:40 -06001126 struct omap_gem_object *omap_obj;
Laurent Pinchart92b4b442015-12-14 22:39:41 +02001127 struct drm_gem_object *obj;
David Herrmannab5a60c2014-05-25 12:45:39 +02001128 struct address_space *mapping;
Rob Clarkcd5351f2011-11-12 12:09:40 -06001129 size_t size;
1130 int ret;
1131
Laurent Pinchart9cba3b92015-12-14 22:39:43 +02001132 /* Validate the flags and compute the memory and cache flags. */
Rob Clarkcd5351f2011-11-12 12:09:40 -06001133 if (flags & OMAP_BO_TILED) {
Laurent Pinchartf4302742015-12-14 22:39:34 +02001134 if (!priv->usergart) {
Rob Clarkf7f9f452011-12-05 19:19:22 -06001135 dev_err(dev->dev, "Tiled buffers require DMM\n");
Laurent Pinchart92b4b442015-12-14 22:39:41 +02001136 return NULL;
Rob Clarkf7f9f452011-12-05 19:19:22 -06001137 }
Rob Clarkcd5351f2011-11-12 12:09:40 -06001138
Laurent Pinchart9cba3b92015-12-14 22:39:43 +02001139 /*
1140 * Tiled buffers are always shmem paged backed. When they are
1141 * scanned out, they are remapped into DMM/TILER.
Rob Clarkf7f9f452011-12-05 19:19:22 -06001142 */
1143 flags &= ~OMAP_BO_SCANOUT;
Laurent Pinchart9cba3b92015-12-14 22:39:43 +02001144 flags |= OMAP_BO_MEM_SHMEM;
Rob Clarkf7f9f452011-12-05 19:19:22 -06001145
Laurent Pinchart9cba3b92015-12-14 22:39:43 +02001146 /*
1147 * Currently don't allow cached buffers. There is some caching
1148 * stuff that needs to be handled better.
Rob Clarkf7f9f452011-12-05 19:19:22 -06001149 */
Tomi Valkeinen7cb0d6c2014-09-25 19:24:29 +00001150 flags &= ~(OMAP_BO_CACHED|OMAP_BO_WC|OMAP_BO_UNCACHED);
1151 flags |= tiler_get_cpu_cache_flags();
Laurent Pinchart9cba3b92015-12-14 22:39:43 +02001152 } else if ((flags & OMAP_BO_SCANOUT) && !priv->has_dmm) {
1153 /*
Laurent Pinchartb22e6692015-12-14 22:39:44 +02001154 * OMAP_BO_SCANOUT hints that the buffer doesn't need to be
1155 * tiled. However, to lower the pressure on memory allocation,
1156 * use contiguous memory only if no TILER is available.
Laurent Pinchart9cba3b92015-12-14 22:39:43 +02001157 */
1158 flags |= OMAP_BO_MEM_DMA_API;
Tomi Valkeinen3f50eff2016-01-27 10:58:43 +02001159 } else if (!(flags & OMAP_BO_MEM_DMABUF)) {
Laurent Pinchart9cba3b92015-12-14 22:39:43 +02001160 /*
Tomi Valkeinen3f50eff2016-01-27 10:58:43 +02001161 * All other buffers not backed by dma_buf are shmem-backed.
Laurent Pinchart9cba3b92015-12-14 22:39:43 +02001162 */
1163 flags |= OMAP_BO_MEM_SHMEM;
Rob Clarkf7f9f452011-12-05 19:19:22 -06001164 }
Rob Clarkcd5351f2011-11-12 12:09:40 -06001165
Laurent Pinchart9cba3b92015-12-14 22:39:43 +02001166 /* Allocate the initialize the OMAP GEM object. */
Rob Clarkcd5351f2011-11-12 12:09:40 -06001167 omap_obj = kzalloc(sizeof(*omap_obj), GFP_KERNEL);
Joe Perches78110bb2013-02-11 09:41:29 -08001168 if (!omap_obj)
Tomi Valkeinena903e3b2015-03-17 15:31:11 +02001169 return NULL;
Rob Clarkf6b60362012-03-05 10:48:36 -06001170
Rob Clarkcd5351f2011-11-12 12:09:40 -06001171 obj = &omap_obj->base;
Laurent Pinchart9cba3b92015-12-14 22:39:43 +02001172 omap_obj->flags = flags;
Laurent Pinchart3cbd0c52018-05-26 19:54:33 +03001173 mutex_init(&omap_obj->lock);
Rob Clarkcd5351f2011-11-12 12:09:40 -06001174
Laurent Pinchart9cba3b92015-12-14 22:39:43 +02001175 if (flags & OMAP_BO_TILED) {
1176 /*
1177 * For tiled buffers align dimensions to slot boundaries and
1178 * calculate size based on aligned dimensions.
Rob Clarka6a91822011-12-09 23:26:08 -06001179 */
Laurent Pinchart9cba3b92015-12-14 22:39:43 +02001180 tiler_align(gem2fmt(flags), &gsize.tiled.width,
1181 &gsize.tiled.height);
YAMANE Toshiakiae053032012-11-14 19:33:17 +09001182
Laurent Pinchart9cba3b92015-12-14 22:39:43 +02001183 size = tiler_size(gem2fmt(flags), gsize.tiled.width,
1184 gsize.tiled.height);
Tomi Valkeinena903e3b2015-03-17 15:31:11 +02001185
Laurent Pinchart9cba3b92015-12-14 22:39:43 +02001186 omap_obj->width = gsize.tiled.width;
1187 omap_obj->height = gsize.tiled.height;
1188 } else {
1189 size = PAGE_ALIGN(gsize.bytes);
Rob Clarkcd5351f2011-11-12 12:09:40 -06001190 }
1191
Laurent Pinchartc2eb77f2016-03-02 12:51:19 +02001192 /* Initialize the GEM object. */
1193 if (!(flags & OMAP_BO_MEM_SHMEM)) {
1194 drm_gem_private_object_init(dev, obj, size);
1195 } else {
1196 ret = drm_gem_object_init(dev, obj, size);
1197 if (ret)
1198 goto err_free;
1199
Al Viro93c76a32015-12-04 23:45:44 -05001200 mapping = obj->filp->f_mapping;
Laurent Pinchartc2eb77f2016-03-02 12:51:19 +02001201 mapping_set_gfp_mask(mapping, GFP_USER | __GFP_DMA32);
1202 }
Tomi Valkeinena903e3b2015-03-17 15:31:11 +02001203
Laurent Pinchart9cba3b92015-12-14 22:39:43 +02001204 /* Allocate memory if needed. */
1205 if (flags & OMAP_BO_MEM_DMA_API) {
Linus Torvalds266c73b2016-03-21 13:48:00 -07001206 omap_obj->vaddr = dma_alloc_wc(dev->dev, size,
Laurent Pinchart16869082017-04-21 00:33:51 +03001207 &omap_obj->dma_addr,
Linus Torvalds266c73b2016-03-21 13:48:00 -07001208 GFP_KERNEL);
Laurent Pinchart9cba3b92015-12-14 22:39:43 +02001209 if (!omap_obj->vaddr)
Laurent Pinchartc2eb77f2016-03-02 12:51:19 +02001210 goto err_release;
Rob Clarkcd5351f2011-11-12 12:09:40 -06001211 }
1212
Daniel Vetter5117bd82018-05-25 19:39:24 +03001213 mutex_lock(&priv->list_lock);
Rob Clarkcd5351f2011-11-12 12:09:40 -06001214 list_add(&omap_obj->mm_list, &priv->obj_list);
Daniel Vetter5117bd82018-05-25 19:39:24 +03001215 mutex_unlock(&priv->list_lock);
Rob Clarkf7f9f452011-12-05 19:19:22 -06001216
Rob Clarkcd5351f2011-11-12 12:09:40 -06001217 return obj;
1218
Laurent Pinchartc2eb77f2016-03-02 12:51:19 +02001219err_release:
1220 drm_gem_object_release(obj);
1221err_free:
1222 kfree(omap_obj);
Rob Clarkcd5351f2011-11-12 12:09:40 -06001223 return NULL;
1224}
Rob Clarkf7f9f452011-12-05 19:19:22 -06001225
Laurent Pinchartb22e6692015-12-14 22:39:44 +02001226struct drm_gem_object *omap_gem_new_dmabuf(struct drm_device *dev, size_t size,
1227 struct sg_table *sgt)
1228{
1229 struct omap_drm_private *priv = dev->dev_private;
1230 struct omap_gem_object *omap_obj;
1231 struct drm_gem_object *obj;
1232 union omap_gem_size gsize;
1233
1234 /* Without a DMM only physically contiguous buffers can be supported. */
1235 if (sgt->orig_nents != 1 && !priv->has_dmm)
1236 return ERR_PTR(-EINVAL);
1237
Laurent Pinchartb22e6692015-12-14 22:39:44 +02001238 gsize.bytes = PAGE_ALIGN(size);
1239 obj = omap_gem_new(dev, gsize, OMAP_BO_MEM_DMABUF | OMAP_BO_WC);
Laurent Pinchart3cbd0c52018-05-26 19:54:33 +03001240 if (!obj)
1241 return ERR_PTR(-ENOMEM);
Laurent Pinchartb22e6692015-12-14 22:39:44 +02001242
1243 omap_obj = to_omap_bo(obj);
Laurent Pinchart3cbd0c52018-05-26 19:54:33 +03001244
1245 mutex_lock(&omap_obj->lock);
1246
Laurent Pinchartb22e6692015-12-14 22:39:44 +02001247 omap_obj->sgt = sgt;
1248
1249 if (sgt->orig_nents == 1) {
Laurent Pinchart16869082017-04-21 00:33:51 +03001250 omap_obj->dma_addr = sg_dma_address(sgt->sgl);
Laurent Pinchartb22e6692015-12-14 22:39:44 +02001251 } else {
1252 /* Create pages list from sgt */
1253 struct sg_page_iter iter;
1254 struct page **pages;
1255 unsigned int npages;
1256 unsigned int i = 0;
1257
1258 npages = DIV_ROUND_UP(size, PAGE_SIZE);
1259 pages = kcalloc(npages, sizeof(*pages), GFP_KERNEL);
1260 if (!pages) {
1261 omap_gem_free_object(obj);
1262 obj = ERR_PTR(-ENOMEM);
1263 goto done;
1264 }
1265
1266 omap_obj->pages = pages;
1267
1268 for_each_sg_page(sgt->sgl, &iter, sgt->orig_nents, 0) {
1269 pages[i++] = sg_page_iter_page(&iter);
1270 if (i > npages)
1271 break;
1272 }
1273
1274 if (WARN_ON(i != npages)) {
1275 omap_gem_free_object(obj);
1276 obj = ERR_PTR(-ENOMEM);
1277 goto done;
1278 }
1279 }
1280
1281done:
Laurent Pinchart3cbd0c52018-05-26 19:54:33 +03001282 mutex_unlock(&omap_obj->lock);
Laurent Pinchartb22e6692015-12-14 22:39:44 +02001283 return obj;
1284}
1285
Laurent Pinchart7ef93b02015-12-14 22:39:33 +02001286/* convenience method to construct a GEM buffer object, and userspace handle */
1287int omap_gem_new_handle(struct drm_device *dev, struct drm_file *file,
Laurent Pinchartdfe9cfc2018-02-11 15:07:33 +02001288 union omap_gem_size gsize, u32 flags, u32 *handle)
Laurent Pinchart7ef93b02015-12-14 22:39:33 +02001289{
1290 struct drm_gem_object *obj;
1291 int ret;
1292
1293 obj = omap_gem_new(dev, gsize, flags);
1294 if (!obj)
1295 return -ENOMEM;
1296
1297 ret = drm_gem_handle_create(file, obj, handle);
1298 if (ret) {
Laurent Pinchart74128a22015-12-14 22:39:39 +02001299 omap_gem_free_object(obj);
Laurent Pinchart7ef93b02015-12-14 22:39:33 +02001300 return ret;
1301 }
1302
1303 /* drop reference from allocate - handle holds it now */
Thomas Zimmermanne64d0222018-06-18 15:07:26 +02001304 drm_gem_object_put_unlocked(obj);
Laurent Pinchart7ef93b02015-12-14 22:39:33 +02001305
1306 return 0;
1307}
1308
1309/* -----------------------------------------------------------------------------
1310 * Init & Cleanup
1311 */
1312
1313/* If DMM is used, we need to set some stuff up.. */
Rob Clarkf7f9f452011-12-05 19:19:22 -06001314void omap_gem_init(struct drm_device *dev)
1315{
Rob Clarka6a91822011-12-09 23:26:08 -06001316 struct omap_drm_private *priv = dev->dev_private;
Laurent Pinchartf4302742015-12-14 22:39:34 +02001317 struct omap_drm_usergart *usergart;
Rob Clarkf7f9f452011-12-05 19:19:22 -06001318 const enum tiler_fmt fmts[] = {
1319 TILFMT_8BIT, TILFMT_16BIT, TILFMT_32BIT
1320 };
Andy Gross5c137792012-03-05 10:48:39 -06001321 int i, j;
Rob Clarkf7f9f452011-12-05 19:19:22 -06001322
Andy Grosse5e4e9b2012-10-17 00:30:03 -05001323 if (!dmm_is_available()) {
Rob Clarkf7f9f452011-12-05 19:19:22 -06001324 /* DMM only supported on OMAP4 and later, so this isn't fatal */
Andy Gross5c137792012-03-05 10:48:39 -06001325 dev_warn(dev->dev, "DMM not available, disable DMM support\n");
Rob Clarkf7f9f452011-12-05 19:19:22 -06001326 return;
1327 }
1328
Joe Perches78110bb2013-02-11 09:41:29 -08001329 usergart = kcalloc(3, sizeof(*usergart), GFP_KERNEL);
1330 if (!usergart)
Rob Clarkb3698392011-12-09 23:26:06 -06001331 return;
Rob Clarkf7f9f452011-12-05 19:19:22 -06001332
1333 /* reserve 4k aligned/wide regions for userspace mappings: */
1334 for (i = 0; i < ARRAY_SIZE(fmts); i++) {
Laurent Pinchartdfe9cfc2018-02-11 15:07:33 +02001335 u16 h = 1, w = PAGE_SIZE >> i;
1336
Rob Clarkf7f9f452011-12-05 19:19:22 -06001337 tiler_align(fmts[i], &w, &h);
1338 /* note: since each region is 1 4kb page wide, and minimum
1339 * number of rows, the height ends up being the same as the
1340 * # of pages in the region
1341 */
1342 usergart[i].height = h;
1343 usergart[i].height_shift = ilog2(h);
Rob Clark3c810c62012-08-15 15:18:01 -05001344 usergart[i].stride_pfn = tiler_stride(fmts[i], 0) >> PAGE_SHIFT;
Rob Clarkf7f9f452011-12-05 19:19:22 -06001345 usergart[i].slot_shift = ilog2((PAGE_SIZE / h) >> i);
1346 for (j = 0; j < NUM_USERGART_ENTRIES; j++) {
Laurent Pinchartf4302742015-12-14 22:39:34 +02001347 struct omap_drm_usergart_entry *entry;
1348 struct tiler_block *block;
1349
1350 entry = &usergart[i].entry[j];
1351 block = tiler_reserve_2d(fmts[i], w, h, PAGE_SIZE);
Rob Clarkf7f9f452011-12-05 19:19:22 -06001352 if (IS_ERR(block)) {
1353 dev_err(dev->dev,
1354 "reserve failed: %d, %d, %ld\n",
1355 i, j, PTR_ERR(block));
1356 return;
1357 }
Laurent Pinchart16869082017-04-21 00:33:51 +03001358 entry->dma_addr = tiler_ssptr(block);
Rob Clarkf7f9f452011-12-05 19:19:22 -06001359 entry->block = block;
1360
Laurent Pinchart16869082017-04-21 00:33:51 +03001361 DBG("%d:%d: %dx%d: dma_addr=%pad stride=%d", i, j, w, h,
1362 &entry->dma_addr,
Rob Clarkf7f9f452011-12-05 19:19:22 -06001363 usergart[i].stride_pfn << PAGE_SHIFT);
1364 }
1365 }
Rob Clarka6a91822011-12-09 23:26:08 -06001366
Laurent Pinchartf4302742015-12-14 22:39:34 +02001367 priv->usergart = usergart;
Rob Clarka6a91822011-12-09 23:26:08 -06001368 priv->has_dmm = true;
Rob Clarkf7f9f452011-12-05 19:19:22 -06001369}
1370
1371void omap_gem_deinit(struct drm_device *dev)
1372{
Laurent Pinchartf4302742015-12-14 22:39:34 +02001373 struct omap_drm_private *priv = dev->dev_private;
1374
Rob Clarkf7f9f452011-12-05 19:19:22 -06001375 /* I believe we can rely on there being no more outstanding GEM
1376 * objects which could depend on usergart/dmm at this point.
1377 */
Laurent Pinchartf4302742015-12-14 22:39:34 +02001378 kfree(priv->usergart);
Rob Clarkf7f9f452011-12-05 19:19:22 -06001379}