blob: 874b2968a866d72966d4280a909b41f102a168d0 [file] [log] [blame]
Thomas Gleixnerd2912cb2019-06-04 10:11:33 +02001// SPDX-License-Identifier: GPL-2.0-only
Russell King96f60e32012-08-15 13:59:49 +01002/*
3 * Copyright (C) 2012 Russell King
Russell King96f60e32012-08-15 13:59:49 +01004 */
5#include <linux/dma-buf.h>
6#include <linux/dma-mapping.h>
7#include <linux/shmem_fs.h>
Russell King96f60e32012-08-15 13:59:49 +01008#include "armada_drm.h"
9#include "armada_gem.h"
10#include <drm/armada_drm.h>
11#include "armada_ioctlP.h"
12
Souptick Joarder7794ec72018-07-30 11:52:31 +010013static vm_fault_t armada_gem_vm_fault(struct vm_fault *vmf)
Russell King96f60e32012-08-15 13:59:49 +010014{
Dave Jiang11bac802017-02-24 14:56:41 -080015 struct drm_gem_object *gobj = vmf->vma->vm_private_data;
16 struct armada_gem_object *obj = drm_to_armada_gem(gobj);
Russell King96f60e32012-08-15 13:59:49 +010017 unsigned long pfn = obj->phys_addr >> PAGE_SHIFT;
Russell King96f60e32012-08-15 13:59:49 +010018
Dave Jiang11bac802017-02-24 14:56:41 -080019 pfn += (vmf->address - vmf->vma->vm_start) >> PAGE_SHIFT;
Souptick Joarder7794ec72018-07-30 11:52:31 +010020 return vmf_insert_pfn(vmf->vma, vmf->address, pfn);
Russell King96f60e32012-08-15 13:59:49 +010021}
22
23const struct vm_operations_struct armada_gem_vm_ops = {
24 .fault = armada_gem_vm_fault,
25 .open = drm_gem_vm_open,
26 .close = drm_gem_vm_close,
27};
28
29static size_t roundup_gem_size(size_t size)
30{
31 return roundup(size, PAGE_SIZE);
32}
33
Russell King96f60e32012-08-15 13:59:49 +010034void armada_gem_free_object(struct drm_gem_object *obj)
35{
36 struct armada_gem_object *dobj = drm_to_armada_gem(obj);
Daniel Vetter0b8ebea2015-11-24 10:00:36 +010037 struct armada_private *priv = obj->dev->dev_private;
Russell King96f60e32012-08-15 13:59:49 +010038
39 DRM_DEBUG_DRIVER("release obj %p\n", dobj);
40
41 drm_gem_free_mmap_offset(&dobj->obj);
42
Daniel Vetter0b8ebea2015-11-24 10:00:36 +010043 might_lock(&priv->linear_lock);
44
Russell King96f60e32012-08-15 13:59:49 +010045 if (dobj->page) {
46 /* page backed memory */
47 unsigned int order = get_order(dobj->obj.size);
48 __free_pages(dobj->page, order);
49 } else if (dobj->linear) {
50 /* linear backed memory */
Daniel Vetter0b8ebea2015-11-24 10:00:36 +010051 mutex_lock(&priv->linear_lock);
Russell King96f60e32012-08-15 13:59:49 +010052 drm_mm_remove_node(dobj->linear);
Daniel Vetter0b8ebea2015-11-24 10:00:36 +010053 mutex_unlock(&priv->linear_lock);
Russell King96f60e32012-08-15 13:59:49 +010054 kfree(dobj->linear);
55 if (dobj->addr)
56 iounmap(dobj->addr);
57 }
58
59 if (dobj->obj.import_attach) {
60 /* We only ever display imported data */
Russell King0481c8c2015-06-15 10:14:51 +010061 if (dobj->sgt)
62 dma_buf_unmap_attachment(dobj->obj.import_attach,
63 dobj->sgt, DMA_TO_DEVICE);
Russell King96f60e32012-08-15 13:59:49 +010064 drm_prime_gem_destroy(&dobj->obj, NULL);
65 }
66
67 drm_gem_object_release(&dobj->obj);
68
69 kfree(dobj);
70}
71
72int
73armada_gem_linear_back(struct drm_device *dev, struct armada_gem_object *obj)
74{
75 struct armada_private *priv = dev->dev_private;
76 size_t size = obj->obj.size;
77
78 if (obj->page || obj->linear)
79 return 0;
80
81 /*
82 * If it is a small allocation (typically cursor, which will
83 * be 32x64 or 64x32 ARGB pixels) try to get it from the system.
84 * Framebuffers will never be this small (our minimum size for
85 * framebuffers is larger than this anyway.) Such objects are
86 * only accessed by the CPU so we don't need any special handing
87 * here.
88 */
89 if (size <= 8192) {
90 unsigned int order = get_order(size);
91 struct page *p = alloc_pages(GFP_KERNEL, order);
92
93 if (p) {
94 obj->addr = page_address(p);
95 obj->phys_addr = page_to_phys(p);
96 obj->page = p;
97
98 memset(obj->addr, 0, PAGE_ALIGN(size));
99 }
100 }
101
102 /*
103 * We could grab something from CMA if it's enabled, but that
104 * involves building in a problem:
105 *
106 * CMA's interface uses dma_alloc_coherent(), which provides us
107 * with an CPU virtual address and a device address.
108 *
109 * The CPU virtual address may be either an address in the kernel
110 * direct mapped region (for example, as it would be on x86) or
111 * it may be remapped into another part of kernel memory space
112 * (eg, as it would be on ARM.) This means virt_to_phys() on the
113 * returned virtual address is invalid depending on the architecture
114 * implementation.
115 *
116 * The device address may also not be a physical address; it may
117 * be that there is some kind of remapping between the device and
118 * system RAM, which makes the use of the device address also
119 * unsafe to re-use as a physical address.
120 *
121 * This makes DRM usage of dma_alloc_coherent() in a generic way
122 * at best very questionable and unsafe.
123 */
124
125 /* Otherwise, grab it from our linear allocation */
126 if (!obj->page) {
127 struct drm_mm_node *node;
128 unsigned align = min_t(unsigned, size, SZ_2M);
129 void __iomem *ptr;
130 int ret;
131
132 node = kzalloc(sizeof(*node), GFP_KERNEL);
133 if (!node)
134 return -ENOSPC;
135
Daniel Vetter0b8ebea2015-11-24 10:00:36 +0100136 mutex_lock(&priv->linear_lock);
Chris Wilson4e64e552017-02-02 21:04:38 +0000137 ret = drm_mm_insert_node_generic(&priv->linear, node,
138 size, align, 0, 0);
Daniel Vetter0b8ebea2015-11-24 10:00:36 +0100139 mutex_unlock(&priv->linear_lock);
Russell King96f60e32012-08-15 13:59:49 +0100140 if (ret) {
141 kfree(node);
142 return ret;
143 }
144
145 obj->linear = node;
146
147 /* Ensure that the memory we're returning is cleared. */
148 ptr = ioremap_wc(obj->linear->start, size);
149 if (!ptr) {
Daniel Vetter0b8ebea2015-11-24 10:00:36 +0100150 mutex_lock(&priv->linear_lock);
Russell King96f60e32012-08-15 13:59:49 +0100151 drm_mm_remove_node(obj->linear);
Daniel Vetter0b8ebea2015-11-24 10:00:36 +0100152 mutex_unlock(&priv->linear_lock);
Russell King96f60e32012-08-15 13:59:49 +0100153 kfree(obj->linear);
154 obj->linear = NULL;
155 return -ENOMEM;
156 }
157
158 memset_io(ptr, 0, size);
159 iounmap(ptr);
160
161 obj->phys_addr = obj->linear->start;
162 obj->dev_addr = obj->linear->start;
Christoph Hellwigb4005852017-05-22 10:46:22 +0200163 obj->mapped = true;
Russell King96f60e32012-08-15 13:59:49 +0100164 }
165
Russell King7513e092013-11-27 15:46:55 +0000166 DRM_DEBUG_DRIVER("obj %p phys %#llx dev %#llx\n", obj,
167 (unsigned long long)obj->phys_addr,
168 (unsigned long long)obj->dev_addr);
Russell King96f60e32012-08-15 13:59:49 +0100169
170 return 0;
171}
172
173void *
174armada_gem_map_object(struct drm_device *dev, struct armada_gem_object *dobj)
175{
176 /* only linear objects need to be ioremap'd */
177 if (!dobj->addr && dobj->linear)
178 dobj->addr = ioremap_wc(dobj->phys_addr, dobj->obj.size);
179 return dobj->addr;
180}
181
182struct armada_gem_object *
183armada_gem_alloc_private_object(struct drm_device *dev, size_t size)
184{
185 struct armada_gem_object *obj;
186
187 size = roundup_gem_size(size);
188
189 obj = kzalloc(sizeof(*obj), GFP_KERNEL);
190 if (!obj)
191 return NULL;
192
193 drm_gem_private_object_init(dev, &obj->obj, size);
Russell King96f60e32012-08-15 13:59:49 +0100194
195 DRM_DEBUG_DRIVER("alloc private obj %p size %zu\n", obj, size);
196
197 return obj;
198}
199
Baoyou Xie42b45452016-10-22 17:03:58 +0800200static struct armada_gem_object *armada_gem_alloc_object(struct drm_device *dev,
Russell King96f60e32012-08-15 13:59:49 +0100201 size_t size)
202{
203 struct armada_gem_object *obj;
204 struct address_space *mapping;
205
206 size = roundup_gem_size(size);
207
208 obj = kzalloc(sizeof(*obj), GFP_KERNEL);
209 if (!obj)
210 return NULL;
211
212 if (drm_gem_object_init(dev, &obj->obj, size)) {
213 kfree(obj);
214 return NULL;
215 }
216
Al Viro93c76a32015-12-04 23:45:44 -0500217 mapping = obj->obj.filp->f_mapping;
Russell King96f60e32012-08-15 13:59:49 +0100218 mapping_set_gfp_mask(mapping, GFP_HIGHUSER | __GFP_RECLAIMABLE);
219
220 DRM_DEBUG_DRIVER("alloc obj %p size %zu\n", obj, size);
221
222 return obj;
223}
224
225/* Dumb alloc support */
226int armada_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
227 struct drm_mode_create_dumb *args)
228{
229 struct armada_gem_object *dobj;
230 u32 handle;
231 size_t size;
232 int ret;
233
234 args->pitch = armada_pitch(args->width, args->bpp);
235 args->size = size = args->pitch * args->height;
236
237 dobj = armada_gem_alloc_private_object(dev, size);
238 if (dobj == NULL)
239 return -ENOMEM;
240
241 ret = armada_gem_linear_back(dev, dobj);
242 if (ret)
243 goto err;
244
245 ret = drm_gem_handle_create(file, &dobj->obj, &handle);
246 if (ret)
247 goto err;
248
249 args->handle = handle;
250
251 /* drop reference from allocate - handle holds it now */
252 DRM_DEBUG_DRIVER("obj %p size %zu handle %#x\n", dobj, size, handle);
253 err:
Haneen Mohammed4c3cf372017-09-20 12:54:48 -0600254 drm_gem_object_put_unlocked(&dobj->obj);
Russell King96f60e32012-08-15 13:59:49 +0100255 return ret;
256}
257
Russell King96f60e32012-08-15 13:59:49 +0100258/* Private driver gem ioctls */
259int armada_gem_create_ioctl(struct drm_device *dev, void *data,
260 struct drm_file *file)
261{
262 struct drm_armada_gem_create *args = data;
263 struct armada_gem_object *dobj;
264 size_t size;
265 u32 handle;
266 int ret;
267
268 if (args->size == 0)
269 return -ENOMEM;
270
271 size = args->size;
272
273 dobj = armada_gem_alloc_object(dev, size);
274 if (dobj == NULL)
275 return -ENOMEM;
276
277 ret = drm_gem_handle_create(file, &dobj->obj, &handle);
278 if (ret)
279 goto err;
280
281 args->handle = handle;
282
283 /* drop reference from allocate - handle holds it now */
284 DRM_DEBUG_DRIVER("obj %p size %zu handle %#x\n", dobj, size, handle);
285 err:
Haneen Mohammed4c3cf372017-09-20 12:54:48 -0600286 drm_gem_object_put_unlocked(&dobj->obj);
Russell King96f60e32012-08-15 13:59:49 +0100287 return ret;
288}
289
290/* Map a shmem-backed object into process memory space */
291int armada_gem_mmap_ioctl(struct drm_device *dev, void *data,
292 struct drm_file *file)
293{
294 struct drm_armada_gem_mmap *args = data;
295 struct armada_gem_object *dobj;
296 unsigned long addr;
297
Chris Wilsona8ad0bd2016-05-09 11:04:54 +0100298 dobj = armada_gem_object_lookup(file, args->handle);
Russell King96f60e32012-08-15 13:59:49 +0100299 if (dobj == NULL)
300 return -ENOENT;
301
302 if (!dobj->obj.filp) {
Haneen Mohammed4c3cf372017-09-20 12:54:48 -0600303 drm_gem_object_put_unlocked(&dobj->obj);
Russell King96f60e32012-08-15 13:59:49 +0100304 return -EINVAL;
305 }
306
307 addr = vm_mmap(dobj->obj.filp, 0, args->size, PROT_READ | PROT_WRITE,
308 MAP_SHARED, args->offset);
Haneen Mohammed4c3cf372017-09-20 12:54:48 -0600309 drm_gem_object_put_unlocked(&dobj->obj);
Russell King96f60e32012-08-15 13:59:49 +0100310 if (IS_ERR_VALUE(addr))
311 return addr;
312
313 args->addr = addr;
314
315 return 0;
316}
317
318int armada_gem_pwrite_ioctl(struct drm_device *dev, void *data,
319 struct drm_file *file)
320{
321 struct drm_armada_gem_pwrite *args = data;
322 struct armada_gem_object *dobj;
323 char __user *ptr;
324 int ret;
325
326 DRM_DEBUG_DRIVER("handle %u off %u size %u ptr 0x%llx\n",
327 args->handle, args->offset, args->size, args->ptr);
328
329 if (args->size == 0)
330 return 0;
331
332 ptr = (char __user *)(uintptr_t)args->ptr;
333
Linus Torvalds96d4f262019-01-03 18:57:57 -0800334 if (!access_ok(ptr, args->size))
Russell King96f60e32012-08-15 13:59:49 +0100335 return -EFAULT;
336
Al Viro4bce9f6e2016-09-17 18:02:44 -0400337 ret = fault_in_pages_readable(ptr, args->size);
Russell King96f60e32012-08-15 13:59:49 +0100338 if (ret)
339 return ret;
340
Chris Wilsona8ad0bd2016-05-09 11:04:54 +0100341 dobj = armada_gem_object_lookup(file, args->handle);
Russell King96f60e32012-08-15 13:59:49 +0100342 if (dobj == NULL)
343 return -ENOENT;
344
345 /* Must be a kernel-mapped object */
346 if (!dobj->addr)
347 return -EINVAL;
348
349 if (args->offset > dobj->obj.size ||
350 args->size > dobj->obj.size - args->offset) {
351 DRM_ERROR("invalid size: object size %u\n", dobj->obj.size);
352 ret = -EINVAL;
353 goto unref;
354 }
355
356 if (copy_from_user(dobj->addr + args->offset, ptr, args->size)) {
357 ret = -EFAULT;
358 } else if (dobj->update) {
359 dobj->update(dobj->update_data);
360 ret = 0;
361 }
362
363 unref:
Haneen Mohammed4c3cf372017-09-20 12:54:48 -0600364 drm_gem_object_put_unlocked(&dobj->obj);
Russell King96f60e32012-08-15 13:59:49 +0100365 return ret;
366}
367
368/* Prime support */
Baoyou Xie42b45452016-10-22 17:03:58 +0800369static struct sg_table *
Russell King96f60e32012-08-15 13:59:49 +0100370armada_gem_prime_map_dma_buf(struct dma_buf_attachment *attach,
371 enum dma_data_direction dir)
372{
373 struct drm_gem_object *obj = attach->dmabuf->priv;
374 struct armada_gem_object *dobj = drm_to_armada_gem(obj);
375 struct scatterlist *sg;
376 struct sg_table *sgt;
377 int i, num;
378
379 sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
380 if (!sgt)
381 return NULL;
382
383 if (dobj->obj.filp) {
384 struct address_space *mapping;
Russell King96f60e32012-08-15 13:59:49 +0100385 int count;
386
387 count = dobj->obj.size / PAGE_SIZE;
388 if (sg_alloc_table(sgt, count, GFP_KERNEL))
389 goto free_sgt;
390
Al Viro93c76a32015-12-04 23:45:44 -0500391 mapping = dobj->obj.filp->f_mapping;
Russell King96f60e32012-08-15 13:59:49 +0100392
393 for_each_sg(sgt->sgl, sg, count, i) {
394 struct page *page;
395
David Herrmann2524fc72014-05-25 14:34:09 +0200396 page = shmem_read_mapping_page(mapping, i);
Russell King96f60e32012-08-15 13:59:49 +0100397 if (IS_ERR(page)) {
398 num = i;
399 goto release;
400 }
401
402 sg_set_page(sg, page, PAGE_SIZE, 0);
403 }
404
405 if (dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir) == 0) {
406 num = sgt->nents;
407 goto release;
408 }
409 } else if (dobj->page) {
410 /* Single contiguous page */
411 if (sg_alloc_table(sgt, 1, GFP_KERNEL))
412 goto free_sgt;
413
414 sg_set_page(sgt->sgl, dobj->page, dobj->obj.size, 0);
415
416 if (dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir) == 0)
417 goto free_table;
418 } else if (dobj->linear) {
419 /* Single contiguous physical region - no struct page */
420 if (sg_alloc_table(sgt, 1, GFP_KERNEL))
421 goto free_sgt;
422 sg_dma_address(sgt->sgl) = dobj->dev_addr;
423 sg_dma_len(sgt->sgl) = dobj->obj.size;
424 } else {
425 goto free_sgt;
426 }
427 return sgt;
428
429 release:
430 for_each_sg(sgt->sgl, sg, num, i)
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300431 put_page(sg_page(sg));
Russell King96f60e32012-08-15 13:59:49 +0100432 free_table:
433 sg_free_table(sgt);
434 free_sgt:
435 kfree(sgt);
436 return NULL;
437}
438
439static void armada_gem_prime_unmap_dma_buf(struct dma_buf_attachment *attach,
440 struct sg_table *sgt, enum dma_data_direction dir)
441{
442 struct drm_gem_object *obj = attach->dmabuf->priv;
443 struct armada_gem_object *dobj = drm_to_armada_gem(obj);
444 int i;
445
446 if (!dobj->linear)
447 dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, dir);
448
449 if (dobj->obj.filp) {
450 struct scatterlist *sg;
451 for_each_sg(sgt->sgl, sg, sgt->nents, i)
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300452 put_page(sg_page(sg));
Russell King96f60e32012-08-15 13:59:49 +0100453 }
454
455 sg_free_table(sgt);
456 kfree(sgt);
457}
458
459static void *armada_gem_dmabuf_no_kmap(struct dma_buf *buf, unsigned long n)
460{
461 return NULL;
462}
463
464static void
465armada_gem_dmabuf_no_kunmap(struct dma_buf *buf, unsigned long n, void *addr)
466{
467}
468
469static int
470armada_gem_dmabuf_mmap(struct dma_buf *buf, struct vm_area_struct *vma)
471{
472 return -EINVAL;
473}
474
475static const struct dma_buf_ops armada_gem_prime_dmabuf_ops = {
476 .map_dma_buf = armada_gem_prime_map_dma_buf,
477 .unmap_dma_buf = armada_gem_prime_unmap_dma_buf,
478 .release = drm_gem_dmabuf_release,
Logan Gunthorpef9b67f02017-04-19 13:36:10 -0600479 .map = armada_gem_dmabuf_no_kmap,
480 .unmap = armada_gem_dmabuf_no_kunmap,
Russell King96f60e32012-08-15 13:59:49 +0100481 .mmap = armada_gem_dmabuf_mmap,
482};
483
484struct dma_buf *
485armada_gem_prime_export(struct drm_device *dev, struct drm_gem_object *obj,
486 int flags)
487{
Sumit Semwald8fbe342015-01-23 12:53:43 +0530488 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
489
490 exp_info.ops = &armada_gem_prime_dmabuf_ops;
491 exp_info.size = obj->size;
492 exp_info.flags = O_RDWR;
493 exp_info.priv = obj;
494
Chris Wilsona4fce9c2016-10-05 13:21:44 +0100495 return drm_gem_dmabuf_export(dev, &exp_info);
Russell King96f60e32012-08-15 13:59:49 +0100496}
497
498struct drm_gem_object *
499armada_gem_prime_import(struct drm_device *dev, struct dma_buf *buf)
500{
501 struct dma_buf_attachment *attach;
502 struct armada_gem_object *dobj;
503
504 if (buf->ops == &armada_gem_prime_dmabuf_ops) {
505 struct drm_gem_object *obj = buf->priv;
506 if (obj->dev == dev) {
507 /*
508 * Importing our own dmabuf(s) increases the
509 * refcount on the gem object itself.
510 */
Haneen Mohammed4c3cf372017-09-20 12:54:48 -0600511 drm_gem_object_get(obj);
Russell King96f60e32012-08-15 13:59:49 +0100512 return obj;
513 }
514 }
515
516 attach = dma_buf_attach(buf, dev->dev);
517 if (IS_ERR(attach))
518 return ERR_CAST(attach);
519
520 dobj = armada_gem_alloc_private_object(dev, buf->size);
521 if (!dobj) {
522 dma_buf_detach(buf, attach);
523 return ERR_PTR(-ENOMEM);
524 }
525
526 dobj->obj.import_attach = attach;
Russell King5cd52682013-12-07 16:28:39 +0000527 get_dma_buf(buf);
Russell King96f60e32012-08-15 13:59:49 +0100528
529 /*
530 * Don't call dma_buf_map_attachment() here - it maps the
531 * scatterlist immediately for DMA, and this is not always
532 * an appropriate thing to do.
533 */
534 return &dobj->obj;
535}
536
537int armada_gem_map_import(struct armada_gem_object *dobj)
538{
539 int ret;
540
541 dobj->sgt = dma_buf_map_attachment(dobj->obj.import_attach,
Christophe JAILLET244a2412016-10-30 16:38:42 +0100542 DMA_TO_DEVICE);
Russell King96f60e32012-08-15 13:59:49 +0100543 if (IS_ERR(dobj->sgt)) {
544 ret = PTR_ERR(dobj->sgt);
545 dobj->sgt = NULL;
546 DRM_ERROR("dma_buf_map_attachment() error: %d\n", ret);
547 return ret;
548 }
549 if (dobj->sgt->nents > 1) {
550 DRM_ERROR("dma_buf_map_attachment() returned an (unsupported) scattered list\n");
551 return -EINVAL;
552 }
553 if (sg_dma_len(dobj->sgt->sgl) < dobj->obj.size) {
554 DRM_ERROR("dma_buf_map_attachment() returned a small buffer\n");
555 return -EINVAL;
556 }
557 dobj->dev_addr = sg_dma_address(dobj->sgt->sgl);
Christoph Hellwigb4005852017-05-22 10:46:22 +0200558 dobj->mapped = true;
Russell King96f60e32012-08-15 13:59:49 +0100559 return 0;
560}