blob: 426c43d4fdbf49fedbc9a0f0ad85fe8d92c6c3d3 [file] [log] [blame]
Robert P. J. Day96532ba2008-02-03 15:06:26 +02001#ifndef _LINUX_DMA_MAPPING_H
2#define _LINUX_DMA_MAPPING_H
Linus Torvalds1da177e2005-04-16 15:20:36 -07003
Robin Murphy002edb62015-11-06 16:32:51 -08004#include <linux/sizes.h>
Andrew Morton842fa692011-11-02 13:39:33 -07005#include <linux/string.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -07006#include <linux/device.h>
7#include <linux/err.h>
Christoph Hellwige1c7e322016-01-20 15:02:05 -08008#include <linux/dma-debug.h>
Alexey Dobriyanb7f080c2011-06-16 11:01:34 +00009#include <linux/dma-direction.h>
FUJITA Tomonorif0402a22009-01-05 23:59:01 +090010#include <linux/scatterlist.h>
Christoph Hellwige1c7e322016-01-20 15:02:05 -080011#include <linux/kmemcheck.h>
12#include <linux/bug.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070013
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -070014/**
15 * List of possible attributes associated with a DMA mapping. The semantics
16 * of each attribute should be defined in Documentation/DMA-attributes.txt.
17 *
18 * DMA_ATTR_WRITE_BARRIER: DMA to a memory region with this attribute
19 * forces all pending DMA writes to complete.
20 */
21#define DMA_ATTR_WRITE_BARRIER (1UL << 0)
22/*
23 * DMA_ATTR_WEAK_ORDERING: Specifies that reads and writes to the mapping
24 * may be weakly ordered, that is that reads and writes may pass each other.
25 */
26#define DMA_ATTR_WEAK_ORDERING (1UL << 1)
27/*
28 * DMA_ATTR_WRITE_COMBINE: Specifies that writes to the mapping may be
29 * buffered to improve performance.
30 */
31#define DMA_ATTR_WRITE_COMBINE (1UL << 2)
32/*
33 * DMA_ATTR_NON_CONSISTENT: Lets the platform to choose to return either
34 * consistent or non-consistent memory as it sees fit.
35 */
36#define DMA_ATTR_NON_CONSISTENT (1UL << 3)
37/*
38 * DMA_ATTR_NO_KERNEL_MAPPING: Lets the platform to avoid creating a kernel
39 * virtual mapping for the allocated buffer.
40 */
41#define DMA_ATTR_NO_KERNEL_MAPPING (1UL << 4)
42/*
43 * DMA_ATTR_SKIP_CPU_SYNC: Allows platform code to skip synchronization of
44 * the CPU cache for the given buffer assuming that it has been already
45 * transferred to 'device' domain.
46 */
47#define DMA_ATTR_SKIP_CPU_SYNC (1UL << 5)
48/*
49 * DMA_ATTR_FORCE_CONTIGUOUS: Forces contiguous allocation of the buffer
50 * in physical memory.
51 */
52#define DMA_ATTR_FORCE_CONTIGUOUS (1UL << 6)
53/*
54 * DMA_ATTR_ALLOC_SINGLE_PAGES: This is a hint to the DMA-mapping subsystem
55 * that it's probably not worth the time to try to allocate memory to in a way
56 * that gives better TLB efficiency.
57 */
58#define DMA_ATTR_ALLOC_SINGLE_PAGES (1UL << 7)
Mauricio Faria de Oliveiraa9a62c92016-10-11 13:54:14 -070059/*
60 * DMA_ATTR_NO_WARN: This tells the DMA-mapping subsystem to suppress
61 * allocation failure reports (similarly to __GFP_NOWARN).
62 */
63#define DMA_ATTR_NO_WARN (1UL << 8)
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -070064
Bjorn Helgaas77f2ea22014-04-30 11:20:53 -060065/*
66 * A dma_addr_t can hold any valid DMA or bus address for the platform.
67 * It can be given to a device to use as a DMA source or target. A CPU cannot
68 * reference a dma_addr_t directly because there may be translation between
69 * its physical address space and the bus address space.
70 */
FUJITA Tomonorif0402a22009-01-05 23:59:01 +090071struct dma_map_ops {
Marek Szyprowski613c4572012-03-28 16:36:27 +020072 void* (*alloc)(struct device *dev, size_t size,
73 dma_addr_t *dma_handle, gfp_t gfp,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -070074 unsigned long attrs);
Marek Szyprowski613c4572012-03-28 16:36:27 +020075 void (*free)(struct device *dev, size_t size,
76 void *vaddr, dma_addr_t dma_handle,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -070077 unsigned long attrs);
Marek Szyprowski9adc5372011-12-21 16:55:33 +010078 int (*mmap)(struct device *, struct vm_area_struct *,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -070079 void *, dma_addr_t, size_t,
80 unsigned long attrs);
Marek Szyprowski9adc5372011-12-21 16:55:33 +010081
Marek Szyprowskid2b74282012-06-13 10:05:52 +020082 int (*get_sgtable)(struct device *dev, struct sg_table *sgt, void *,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -070083 dma_addr_t, size_t, unsigned long attrs);
Marek Szyprowskid2b74282012-06-13 10:05:52 +020084
FUJITA Tomonorif0402a22009-01-05 23:59:01 +090085 dma_addr_t (*map_page)(struct device *dev, struct page *page,
86 unsigned long offset, size_t size,
87 enum dma_data_direction dir,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -070088 unsigned long attrs);
FUJITA Tomonorif0402a22009-01-05 23:59:01 +090089 void (*unmap_page)(struct device *dev, dma_addr_t dma_handle,
90 size_t size, enum dma_data_direction dir,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -070091 unsigned long attrs);
Ricardo Ribalda Delgado04abab62015-02-11 13:53:15 +010092 /*
93 * map_sg returns 0 on error and a value > 0 on success.
94 * It should never return a value < 0.
95 */
FUJITA Tomonorif0402a22009-01-05 23:59:01 +090096 int (*map_sg)(struct device *dev, struct scatterlist *sg,
97 int nents, enum dma_data_direction dir,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -070098 unsigned long attrs);
FUJITA Tomonorif0402a22009-01-05 23:59:01 +090099 void (*unmap_sg)(struct device *dev,
100 struct scatterlist *sg, int nents,
101 enum dma_data_direction dir,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700102 unsigned long attrs);
Niklas Söderlundba409b32016-08-10 13:22:14 +0200103 dma_addr_t (*map_resource)(struct device *dev, phys_addr_t phys_addr,
104 size_t size, enum dma_data_direction dir,
105 unsigned long attrs);
106 void (*unmap_resource)(struct device *dev, dma_addr_t dma_handle,
107 size_t size, enum dma_data_direction dir,
108 unsigned long attrs);
FUJITA Tomonorif0402a22009-01-05 23:59:01 +0900109 void (*sync_single_for_cpu)(struct device *dev,
110 dma_addr_t dma_handle, size_t size,
111 enum dma_data_direction dir);
112 void (*sync_single_for_device)(struct device *dev,
113 dma_addr_t dma_handle, size_t size,
114 enum dma_data_direction dir);
FUJITA Tomonorif0402a22009-01-05 23:59:01 +0900115 void (*sync_sg_for_cpu)(struct device *dev,
116 struct scatterlist *sg, int nents,
117 enum dma_data_direction dir);
118 void (*sync_sg_for_device)(struct device *dev,
119 struct scatterlist *sg, int nents,
120 enum dma_data_direction dir);
121 int (*mapping_error)(struct device *dev, dma_addr_t dma_addr);
122 int (*dma_supported)(struct device *dev, u64 mask);
FUJITA Tomonorif726f30e2009-08-04 19:08:24 +0000123 int (*set_dma_mask)(struct device *dev, u64 mask);
Milton Miller3a8f7552011-06-24 09:05:23 +0000124#ifdef ARCH_HAS_DMA_GET_REQUIRED_MASK
125 u64 (*get_required_mask)(struct device *dev);
126#endif
FUJITA Tomonorif0402a22009-01-05 23:59:01 +0900127 int is_phys;
128};
129
Bart Van Assche52997092017-01-20 13:04:01 -0800130extern const struct dma_map_ops dma_noop_ops;
Bart Van Assche551199a2017-01-20 13:04:07 -0800131extern const struct dma_map_ops dma_virt_ops;
Christian Borntraegera8463d42016-02-02 21:46:32 -0800132
Andrew Morton8f286c32007-10-18 03:05:07 -0700133#define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
Borislav Petkov34c65382007-10-18 03:05:06 -0700134
James Bottomley32e8f702007-10-16 01:23:55 -0700135#define DMA_MASK_NONE 0x0ULL
136
Rolf Eike Beerd6bd3a32006-09-29 01:59:48 -0700137static inline int valid_dma_direction(int dma_direction)
138{
139 return ((dma_direction == DMA_BIDIRECTIONAL) ||
140 (dma_direction == DMA_TO_DEVICE) ||
141 (dma_direction == DMA_FROM_DEVICE));
142}
143
James Bottomley32e8f702007-10-16 01:23:55 -0700144static inline int is_device_dma_capable(struct device *dev)
145{
146 return dev->dma_mask != NULL && *dev->dma_mask != DMA_MASK_NONE;
147}
148
Christoph Hellwig20d666e2016-01-20 15:02:09 -0800149#ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT
150/*
151 * These three functions are only for dma allocator.
152 * Don't use them in device drivers.
153 */
154int dma_alloc_from_coherent(struct device *dev, ssize_t size,
155 dma_addr_t *dma_handle, void **ret);
156int dma_release_from_coherent(struct device *dev, int order, void *vaddr);
157
158int dma_mmap_from_coherent(struct device *dev, struct vm_area_struct *vma,
159 void *cpu_addr, size_t size, int *ret);
160#else
161#define dma_alloc_from_coherent(dev, size, handle, ret) (0)
162#define dma_release_from_coherent(dev, order, vaddr) (0)
163#define dma_mmap_from_coherent(dev, vma, vaddr, order, ret) (0)
164#endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
165
Dan Williams1b0fac42007-07-15 23:40:26 -0700166#ifdef CONFIG_HAS_DMA
Linus Torvalds1da177e2005-04-16 15:20:36 -0700167#include <asm/dma-mapping.h>
Bart Van Assche815dd182017-01-20 13:04:04 -0800168static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
169{
170 if (dev && dev->dma_ops)
171 return dev->dma_ops;
172 return get_arch_dma_ops(dev ? dev->bus : NULL);
173}
174
Bart Van Asscheca6e8e12017-01-20 13:04:03 -0800175static inline void set_dma_ops(struct device *dev,
176 const struct dma_map_ops *dma_ops)
177{
178 dev->dma_ops = dma_ops;
179}
Dan Williams1b0fac42007-07-15 23:40:26 -0700180#else
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800181/*
182 * Define the dma api to allow compilation but not linking of
183 * dma dependent code. Code that depends on the dma-mapping
184 * API needs to set 'depends on HAS_DMA' in its Kconfig
185 */
Bart Van Assche52997092017-01-20 13:04:01 -0800186extern const struct dma_map_ops bad_dma_ops;
187static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800188{
189 return &bad_dma_ops;
190}
191#endif
192
193static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr,
194 size_t size,
195 enum dma_data_direction dir,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700196 unsigned long attrs)
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800197{
Bart Van Assche52997092017-01-20 13:04:01 -0800198 const struct dma_map_ops *ops = get_dma_ops(dev);
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800199 dma_addr_t addr;
200
201 kmemcheck_mark_initialized(ptr, size);
202 BUG_ON(!valid_dma_direction(dir));
203 addr = ops->map_page(dev, virt_to_page(ptr),
Geliang Tang8e994692016-01-20 15:02:12 -0800204 offset_in_page(ptr), size,
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800205 dir, attrs);
206 debug_dma_map_page(dev, virt_to_page(ptr),
Geliang Tang8e994692016-01-20 15:02:12 -0800207 offset_in_page(ptr), size,
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800208 dir, addr, true);
209 return addr;
210}
211
212static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr,
213 size_t size,
214 enum dma_data_direction dir,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700215 unsigned long attrs)
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800216{
Bart Van Assche52997092017-01-20 13:04:01 -0800217 const struct dma_map_ops *ops = get_dma_ops(dev);
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800218
219 BUG_ON(!valid_dma_direction(dir));
220 if (ops->unmap_page)
221 ops->unmap_page(dev, addr, size, dir, attrs);
222 debug_dma_unmap_page(dev, addr, size, dir, true);
223}
224
225/*
226 * dma_maps_sg_attrs returns 0 on error and > 0 on success.
227 * It should never return a value < 0.
228 */
229static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
230 int nents, enum dma_data_direction dir,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700231 unsigned long attrs)
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800232{
Bart Van Assche52997092017-01-20 13:04:01 -0800233 const struct dma_map_ops *ops = get_dma_ops(dev);
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800234 int i, ents;
235 struct scatterlist *s;
236
237 for_each_sg(sg, s, nents, i)
238 kmemcheck_mark_initialized(sg_virt(s), s->length);
239 BUG_ON(!valid_dma_direction(dir));
240 ents = ops->map_sg(dev, sg, nents, dir, attrs);
241 BUG_ON(ents < 0);
242 debug_dma_map_sg(dev, sg, nents, ents, dir);
243
244 return ents;
245}
246
247static inline void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg,
248 int nents, enum dma_data_direction dir,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700249 unsigned long attrs)
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800250{
Bart Van Assche52997092017-01-20 13:04:01 -0800251 const struct dma_map_ops *ops = get_dma_ops(dev);
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800252
253 BUG_ON(!valid_dma_direction(dir));
254 debug_dma_unmap_sg(dev, sg, nents, dir);
255 if (ops->unmap_sg)
256 ops->unmap_sg(dev, sg, nents, dir, attrs);
257}
258
Alexander Duyck0495c3d2016-12-14 15:05:23 -0800259static inline dma_addr_t dma_map_page_attrs(struct device *dev,
260 struct page *page,
261 size_t offset, size_t size,
262 enum dma_data_direction dir,
263 unsigned long attrs)
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800264{
Bart Van Assche52997092017-01-20 13:04:01 -0800265 const struct dma_map_ops *ops = get_dma_ops(dev);
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800266 dma_addr_t addr;
267
268 kmemcheck_mark_initialized(page_address(page) + offset, size);
269 BUG_ON(!valid_dma_direction(dir));
Alexander Duyck0495c3d2016-12-14 15:05:23 -0800270 addr = ops->map_page(dev, page, offset, size, dir, attrs);
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800271 debug_dma_map_page(dev, page, offset, size, dir, addr, false);
272
273 return addr;
274}
275
Alexander Duyck0495c3d2016-12-14 15:05:23 -0800276static inline void dma_unmap_page_attrs(struct device *dev,
277 dma_addr_t addr, size_t size,
278 enum dma_data_direction dir,
279 unsigned long attrs)
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800280{
Bart Van Assche52997092017-01-20 13:04:01 -0800281 const struct dma_map_ops *ops = get_dma_ops(dev);
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800282
283 BUG_ON(!valid_dma_direction(dir));
284 if (ops->unmap_page)
Alexander Duyck0495c3d2016-12-14 15:05:23 -0800285 ops->unmap_page(dev, addr, size, dir, attrs);
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800286 debug_dma_unmap_page(dev, addr, size, dir, false);
287}
288
Niklas Söderlund6f3d8792016-08-10 13:22:16 +0200289static inline dma_addr_t dma_map_resource(struct device *dev,
290 phys_addr_t phys_addr,
291 size_t size,
292 enum dma_data_direction dir,
293 unsigned long attrs)
294{
Bart Van Assche52997092017-01-20 13:04:01 -0800295 const struct dma_map_ops *ops = get_dma_ops(dev);
Niklas Söderlund6f3d8792016-08-10 13:22:16 +0200296 dma_addr_t addr;
297
298 BUG_ON(!valid_dma_direction(dir));
299
300 /* Don't allow RAM to be mapped */
Niklas Söderlund3757dc42016-09-29 12:02:40 +0200301 BUG_ON(pfn_valid(PHYS_PFN(phys_addr)));
Niklas Söderlund6f3d8792016-08-10 13:22:16 +0200302
303 addr = phys_addr;
304 if (ops->map_resource)
305 addr = ops->map_resource(dev, phys_addr, size, dir, attrs);
306
307 debug_dma_map_resource(dev, phys_addr, size, dir, addr);
308
309 return addr;
310}
311
312static inline void dma_unmap_resource(struct device *dev, dma_addr_t addr,
313 size_t size, enum dma_data_direction dir,
314 unsigned long attrs)
315{
Bart Van Assche52997092017-01-20 13:04:01 -0800316 const struct dma_map_ops *ops = get_dma_ops(dev);
Niklas Söderlund6f3d8792016-08-10 13:22:16 +0200317
318 BUG_ON(!valid_dma_direction(dir));
319 if (ops->unmap_resource)
320 ops->unmap_resource(dev, addr, size, dir, attrs);
321 debug_dma_unmap_resource(dev, addr, size, dir);
322}
323
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800324static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
325 size_t size,
326 enum dma_data_direction dir)
327{
Bart Van Assche52997092017-01-20 13:04:01 -0800328 const struct dma_map_ops *ops = get_dma_ops(dev);
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800329
330 BUG_ON(!valid_dma_direction(dir));
331 if (ops->sync_single_for_cpu)
332 ops->sync_single_for_cpu(dev, addr, size, dir);
333 debug_dma_sync_single_for_cpu(dev, addr, size, dir);
334}
335
336static inline void dma_sync_single_for_device(struct device *dev,
337 dma_addr_t addr, size_t size,
338 enum dma_data_direction dir)
339{
Bart Van Assche52997092017-01-20 13:04:01 -0800340 const struct dma_map_ops *ops = get_dma_ops(dev);
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800341
342 BUG_ON(!valid_dma_direction(dir));
343 if (ops->sync_single_for_device)
344 ops->sync_single_for_device(dev, addr, size, dir);
345 debug_dma_sync_single_for_device(dev, addr, size, dir);
346}
347
348static inline void dma_sync_single_range_for_cpu(struct device *dev,
349 dma_addr_t addr,
350 unsigned long offset,
351 size_t size,
352 enum dma_data_direction dir)
353{
354 const struct dma_map_ops *ops = get_dma_ops(dev);
355
356 BUG_ON(!valid_dma_direction(dir));
357 if (ops->sync_single_for_cpu)
358 ops->sync_single_for_cpu(dev, addr + offset, size, dir);
359 debug_dma_sync_single_range_for_cpu(dev, addr, offset, size, dir);
360}
361
362static inline void dma_sync_single_range_for_device(struct device *dev,
363 dma_addr_t addr,
364 unsigned long offset,
365 size_t size,
366 enum dma_data_direction dir)
367{
368 const struct dma_map_ops *ops = get_dma_ops(dev);
369
370 BUG_ON(!valid_dma_direction(dir));
371 if (ops->sync_single_for_device)
372 ops->sync_single_for_device(dev, addr + offset, size, dir);
373 debug_dma_sync_single_range_for_device(dev, addr, offset, size, dir);
374}
375
376static inline void
377dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
378 int nelems, enum dma_data_direction dir)
379{
Bart Van Assche52997092017-01-20 13:04:01 -0800380 const struct dma_map_ops *ops = get_dma_ops(dev);
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800381
382 BUG_ON(!valid_dma_direction(dir));
383 if (ops->sync_sg_for_cpu)
384 ops->sync_sg_for_cpu(dev, sg, nelems, dir);
385 debug_dma_sync_sg_for_cpu(dev, sg, nelems, dir);
386}
387
388static inline void
389dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
390 int nelems, enum dma_data_direction dir)
391{
Bart Van Assche52997092017-01-20 13:04:01 -0800392 const struct dma_map_ops *ops = get_dma_ops(dev);
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800393
394 BUG_ON(!valid_dma_direction(dir));
395 if (ops->sync_sg_for_device)
396 ops->sync_sg_for_device(dev, sg, nelems, dir);
397 debug_dma_sync_sg_for_device(dev, sg, nelems, dir);
398
399}
400
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700401#define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, 0)
402#define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, 0)
403#define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, 0)
404#define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, 0)
Alexander Duyck0495c3d2016-12-14 15:05:23 -0800405#define dma_map_page(d, p, o, s, r) dma_map_page_attrs(d, p, o, s, r, 0)
406#define dma_unmap_page(d, a, s, r) dma_unmap_page_attrs(d, a, s, r, 0)
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800407
408extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
409 void *cpu_addr, dma_addr_t dma_addr, size_t size);
410
411void *dma_common_contiguous_remap(struct page *page, size_t size,
412 unsigned long vm_flags,
413 pgprot_t prot, const void *caller);
414
415void *dma_common_pages_remap(struct page **pages, size_t size,
416 unsigned long vm_flags, pgprot_t prot,
417 const void *caller);
418void dma_common_free_remap(void *cpu_addr, size_t size, unsigned long vm_flags);
419
420/**
421 * dma_mmap_attrs - map a coherent DMA allocation into user space
422 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
423 * @vma: vm_area_struct describing requested user mapping
424 * @cpu_addr: kernel CPU-view address returned from dma_alloc_attrs
425 * @handle: device-view address returned from dma_alloc_attrs
426 * @size: size of memory originally requested in dma_alloc_attrs
427 * @attrs: attributes of mapping properties requested in dma_alloc_attrs
428 *
429 * Map a coherent DMA buffer previously allocated by dma_alloc_attrs
430 * into user space. The coherent DMA buffer must not be freed by the
431 * driver until the user space mapping has been released.
432 */
433static inline int
434dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, void *cpu_addr,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700435 dma_addr_t dma_addr, size_t size, unsigned long attrs)
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800436{
Bart Van Assche52997092017-01-20 13:04:01 -0800437 const struct dma_map_ops *ops = get_dma_ops(dev);
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800438 BUG_ON(!ops);
439 if (ops->mmap)
440 return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
441 return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size);
442}
443
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700444#define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, 0)
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800445
446int
447dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
448 void *cpu_addr, dma_addr_t dma_addr, size_t size);
449
450static inline int
451dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt, void *cpu_addr,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700452 dma_addr_t dma_addr, size_t size,
453 unsigned long attrs)
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800454{
Bart Van Assche52997092017-01-20 13:04:01 -0800455 const struct dma_map_ops *ops = get_dma_ops(dev);
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800456 BUG_ON(!ops);
457 if (ops->get_sgtable)
458 return ops->get_sgtable(dev, sgt, cpu_addr, dma_addr, size,
459 attrs);
460 return dma_common_get_sgtable(dev, sgt, cpu_addr, dma_addr, size);
461}
462
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700463#define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, 0)
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800464
465#ifndef arch_dma_alloc_attrs
466#define arch_dma_alloc_attrs(dev, flag) (true)
467#endif
468
469static inline void *dma_alloc_attrs(struct device *dev, size_t size,
470 dma_addr_t *dma_handle, gfp_t flag,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700471 unsigned long attrs)
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800472{
Bart Van Assche52997092017-01-20 13:04:01 -0800473 const struct dma_map_ops *ops = get_dma_ops(dev);
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800474 void *cpu_addr;
475
476 BUG_ON(!ops);
477
478 if (dma_alloc_from_coherent(dev, size, dma_handle, &cpu_addr))
479 return cpu_addr;
480
481 if (!arch_dma_alloc_attrs(&dev, &flag))
482 return NULL;
483 if (!ops->alloc)
484 return NULL;
485
486 cpu_addr = ops->alloc(dev, size, dma_handle, flag, attrs);
487 debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr);
488 return cpu_addr;
489}
490
491static inline void dma_free_attrs(struct device *dev, size_t size,
492 void *cpu_addr, dma_addr_t dma_handle,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700493 unsigned long attrs)
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800494{
Bart Van Assche52997092017-01-20 13:04:01 -0800495 const struct dma_map_ops *ops = get_dma_ops(dev);
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800496
497 BUG_ON(!ops);
498 WARN_ON(irqs_disabled());
499
500 if (dma_release_from_coherent(dev, get_order(size), cpu_addr))
501 return;
502
Zhen Leid6b7eae2016-03-09 14:08:38 -0800503 if (!ops->free || !cpu_addr)
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800504 return;
505
506 debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
507 ops->free(dev, size, cpu_addr, dma_handle, attrs);
508}
509
510static inline void *dma_alloc_coherent(struct device *dev, size_t size,
511 dma_addr_t *dma_handle, gfp_t flag)
512{
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700513 return dma_alloc_attrs(dev, size, dma_handle, flag, 0);
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800514}
515
516static inline void dma_free_coherent(struct device *dev, size_t size,
517 void *cpu_addr, dma_addr_t dma_handle)
518{
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700519 return dma_free_attrs(dev, size, cpu_addr, dma_handle, 0);
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800520}
521
522static inline void *dma_alloc_noncoherent(struct device *dev, size_t size,
523 dma_addr_t *dma_handle, gfp_t gfp)
524{
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700525 return dma_alloc_attrs(dev, size, dma_handle, gfp,
526 DMA_ATTR_NON_CONSISTENT);
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800527}
528
529static inline void dma_free_noncoherent(struct device *dev, size_t size,
530 void *cpu_addr, dma_addr_t dma_handle)
531{
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700532 dma_free_attrs(dev, size, cpu_addr, dma_handle,
533 DMA_ATTR_NON_CONSISTENT);
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800534}
535
536static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
537{
538 debug_dma_mapping_error(dev, dma_addr);
539
540 if (get_dma_ops(dev)->mapping_error)
541 return get_dma_ops(dev)->mapping_error(dev, dma_addr);
542
543#ifdef DMA_ERROR_CODE
544 return dma_addr == DMA_ERROR_CODE;
545#else
546 return 0;
547#endif
548}
549
550#ifndef HAVE_ARCH_DMA_SUPPORTED
551static inline int dma_supported(struct device *dev, u64 mask)
552{
Bart Van Assche52997092017-01-20 13:04:01 -0800553 const struct dma_map_ops *ops = get_dma_ops(dev);
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800554
555 if (!ops)
556 return 0;
557 if (!ops->dma_supported)
558 return 1;
559 return ops->dma_supported(dev, mask);
560}
561#endif
562
563#ifndef HAVE_ARCH_DMA_SET_MASK
564static inline int dma_set_mask(struct device *dev, u64 mask)
565{
Bart Van Assche52997092017-01-20 13:04:01 -0800566 const struct dma_map_ops *ops = get_dma_ops(dev);
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800567
568 if (ops->set_dma_mask)
569 return ops->set_dma_mask(dev, mask);
570
571 if (!dev->dma_mask || !dma_supported(dev, mask))
572 return -EIO;
573 *dev->dma_mask = mask;
574 return 0;
575}
Dan Williams1b0fac42007-07-15 23:40:26 -0700576#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700577
FUJITA Tomonori589fc9a2008-09-12 19:42:34 +0900578static inline u64 dma_get_mask(struct device *dev)
579{
FUJITA Tomonori07a2c012008-09-19 02:02:05 +0900580 if (dev && dev->dma_mask && *dev->dma_mask)
FUJITA Tomonori589fc9a2008-09-12 19:42:34 +0900581 return *dev->dma_mask;
Yang Hongyang284901a2009-04-06 19:01:15 -0700582 return DMA_BIT_MASK(32);
FUJITA Tomonori589fc9a2008-09-12 19:42:34 +0900583}
584
Rob Herring58af4a22012-03-20 14:33:01 -0500585#ifdef CONFIG_ARCH_HAS_DMA_SET_COHERENT_MASK
FUJITA Tomonori710224f2010-09-22 13:04:55 -0700586int dma_set_coherent_mask(struct device *dev, u64 mask);
587#else
FUJITA Tomonori6a1961f2010-03-10 15:23:39 -0800588static inline int dma_set_coherent_mask(struct device *dev, u64 mask)
589{
590 if (!dma_supported(dev, mask))
591 return -EIO;
592 dev->coherent_dma_mask = mask;
593 return 0;
594}
FUJITA Tomonori710224f2010-09-22 13:04:55 -0700595#endif
FUJITA Tomonori6a1961f2010-03-10 15:23:39 -0800596
Russell King4aa806b2013-06-26 13:49:44 +0100597/*
598 * Set both the DMA mask and the coherent DMA mask to the same thing.
599 * Note that we don't check the return value from dma_set_coherent_mask()
600 * as the DMA API guarantees that the coherent DMA mask can be set to
601 * the same or smaller than the streaming DMA mask.
602 */
603static inline int dma_set_mask_and_coherent(struct device *dev, u64 mask)
604{
605 int rc = dma_set_mask(dev, mask);
606 if (rc == 0)
607 dma_set_coherent_mask(dev, mask);
608 return rc;
609}
610
Russell Kingfa6a8d62013-06-27 12:21:45 +0100611/*
612 * Similar to the above, except it deals with the case where the device
613 * does not have dev->dma_mask appropriately setup.
614 */
615static inline int dma_coerce_mask_and_coherent(struct device *dev, u64 mask)
616{
617 dev->dma_mask = &dev->coherent_dma_mask;
618 return dma_set_mask_and_coherent(dev, mask);
619}
620
Linus Torvalds1da177e2005-04-16 15:20:36 -0700621extern u64 dma_get_required_mask(struct device *dev);
622
Will Deacona3a60f82014-08-27 15:49:10 +0100623#ifndef arch_setup_dma_ops
Will Deacon97890ba2014-08-27 16:24:20 +0100624static inline void arch_setup_dma_ops(struct device *dev, u64 dma_base,
Robin Murphy53c92d72016-04-07 18:42:05 +0100625 u64 size, const struct iommu_ops *iommu,
Will Deacon97890ba2014-08-27 16:24:20 +0100626 bool coherent) { }
627#endif
628
629#ifndef arch_teardown_dma_ops
630static inline void arch_teardown_dma_ops(struct device *dev) { }
Santosh Shilimkar591c1ee42014-04-24 11:30:04 -0400631#endif
632
FUJITA Tomonori6b7b6512008-02-04 22:27:55 -0800633static inline unsigned int dma_get_max_seg_size(struct device *dev)
634{
Robin Murphy002edb62015-11-06 16:32:51 -0800635 if (dev->dma_parms && dev->dma_parms->max_segment_size)
636 return dev->dma_parms->max_segment_size;
637 return SZ_64K;
FUJITA Tomonori6b7b6512008-02-04 22:27:55 -0800638}
639
640static inline unsigned int dma_set_max_seg_size(struct device *dev,
641 unsigned int size)
642{
643 if (dev->dma_parms) {
644 dev->dma_parms->max_segment_size = size;
645 return 0;
Robin Murphy002edb62015-11-06 16:32:51 -0800646 }
647 return -EIO;
FUJITA Tomonori6b7b6512008-02-04 22:27:55 -0800648}
649
FUJITA Tomonorid22a6962008-02-04 22:28:13 -0800650static inline unsigned long dma_get_seg_boundary(struct device *dev)
651{
Robin Murphy002edb62015-11-06 16:32:51 -0800652 if (dev->dma_parms && dev->dma_parms->segment_boundary_mask)
653 return dev->dma_parms->segment_boundary_mask;
654 return DMA_BIT_MASK(32);
FUJITA Tomonorid22a6962008-02-04 22:28:13 -0800655}
656
657static inline int dma_set_seg_boundary(struct device *dev, unsigned long mask)
658{
659 if (dev->dma_parms) {
660 dev->dma_parms->segment_boundary_mask = mask;
661 return 0;
Robin Murphy002edb62015-11-06 16:32:51 -0800662 }
663 return -EIO;
FUJITA Tomonorid22a6962008-02-04 22:28:13 -0800664}
665
Santosh Shilimkar00c8f162013-07-29 14:18:48 +0100666#ifndef dma_max_pfn
667static inline unsigned long dma_max_pfn(struct device *dev)
668{
669 return *dev->dma_mask >> PAGE_SHIFT;
670}
671#endif
672
Andrew Morton842fa692011-11-02 13:39:33 -0700673static inline void *dma_zalloc_coherent(struct device *dev, size_t size,
674 dma_addr_t *dma_handle, gfp_t flag)
675{
Joe Perchesede23fa82013-08-26 22:45:23 -0700676 void *ret = dma_alloc_coherent(dev, size, dma_handle,
677 flag | __GFP_ZERO);
Andrew Morton842fa692011-11-02 13:39:33 -0700678 return ret;
679}
680
Heiko Carstense259f192010-08-13 09:39:18 +0200681#ifdef CONFIG_HAS_DMA
FUJITA Tomonori4565f012010-08-10 18:03:22 -0700682static inline int dma_get_cache_alignment(void)
683{
684#ifdef ARCH_DMA_MINALIGN
685 return ARCH_DMA_MINALIGN;
686#endif
687 return 1;
688}
Heiko Carstense259f192010-08-13 09:39:18 +0200689#endif
FUJITA Tomonori4565f012010-08-10 18:03:22 -0700690
Linus Torvalds1da177e2005-04-16 15:20:36 -0700691/* flags for the coherent memory api */
692#define DMA_MEMORY_MAP 0x01
693#define DMA_MEMORY_IO 0x02
694#define DMA_MEMORY_INCLUDES_CHILDREN 0x04
695#define DMA_MEMORY_EXCLUSIVE 0x08
696
Christoph Hellwig20d666e2016-01-20 15:02:09 -0800697#ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT
698int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
699 dma_addr_t device_addr, size_t size, int flags);
700void dma_release_declared_memory(struct device *dev);
701void *dma_mark_declared_memory_occupied(struct device *dev,
702 dma_addr_t device_addr, size_t size);
703#else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700704static inline int
Bjorn Helgaas88a984b2014-05-20 16:54:22 -0600705dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700706 dma_addr_t device_addr, size_t size, int flags)
707{
708 return 0;
709}
710
711static inline void
712dma_release_declared_memory(struct device *dev)
713{
714}
715
716static inline void *
717dma_mark_declared_memory_occupied(struct device *dev,
718 dma_addr_t device_addr, size_t size)
719{
720 return ERR_PTR(-EBUSY);
721}
Christoph Hellwig20d666e2016-01-20 15:02:09 -0800722#endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700723
Tejun Heo9ac78492007-01-20 16:00:26 +0900724/*
725 * Managed DMA API
726 */
727extern void *dmam_alloc_coherent(struct device *dev, size_t size,
728 dma_addr_t *dma_handle, gfp_t gfp);
729extern void dmam_free_coherent(struct device *dev, size_t size, void *vaddr,
730 dma_addr_t dma_handle);
731extern void *dmam_alloc_noncoherent(struct device *dev, size_t size,
732 dma_addr_t *dma_handle, gfp_t gfp);
733extern void dmam_free_noncoherent(struct device *dev, size_t size, void *vaddr,
734 dma_addr_t dma_handle);
Christoph Hellwig20d666e2016-01-20 15:02:09 -0800735#ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT
Bjorn Helgaas88a984b2014-05-20 16:54:22 -0600736extern int dmam_declare_coherent_memory(struct device *dev,
737 phys_addr_t phys_addr,
Tejun Heo9ac78492007-01-20 16:00:26 +0900738 dma_addr_t device_addr, size_t size,
739 int flags);
740extern void dmam_release_declared_memory(struct device *dev);
Christoph Hellwig20d666e2016-01-20 15:02:09 -0800741#else /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
Tejun Heo9ac78492007-01-20 16:00:26 +0900742static inline int dmam_declare_coherent_memory(struct device *dev,
Bjorn Helgaas88a984b2014-05-20 16:54:22 -0600743 phys_addr_t phys_addr, dma_addr_t device_addr,
Tejun Heo9ac78492007-01-20 16:00:26 +0900744 size_t size, gfp_t gfp)
745{
746 return 0;
747}
748
749static inline void dmam_release_declared_memory(struct device *dev)
750{
751}
Christoph Hellwig20d666e2016-01-20 15:02:09 -0800752#endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
Tejun Heo9ac78492007-01-20 16:00:26 +0900753
Luis R. Rodriguezf6e45662016-01-22 18:34:22 -0800754static inline void *dma_alloc_wc(struct device *dev, size_t size,
755 dma_addr_t *dma_addr, gfp_t gfp)
Thierry Redingb4bbb102014-06-27 11:56:58 +0200756{
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700757 return dma_alloc_attrs(dev, size, dma_addr, gfp,
758 DMA_ATTR_WRITE_COMBINE);
Thierry Redingb4bbb102014-06-27 11:56:58 +0200759}
Luis R. Rodriguezf6e45662016-01-22 18:34:22 -0800760#ifndef dma_alloc_writecombine
761#define dma_alloc_writecombine dma_alloc_wc
762#endif
Thierry Redingb4bbb102014-06-27 11:56:58 +0200763
Luis R. Rodriguezf6e45662016-01-22 18:34:22 -0800764static inline void dma_free_wc(struct device *dev, size_t size,
765 void *cpu_addr, dma_addr_t dma_addr)
Thierry Redingb4bbb102014-06-27 11:56:58 +0200766{
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700767 return dma_free_attrs(dev, size, cpu_addr, dma_addr,
768 DMA_ATTR_WRITE_COMBINE);
Thierry Redingb4bbb102014-06-27 11:56:58 +0200769}
Luis R. Rodriguezf6e45662016-01-22 18:34:22 -0800770#ifndef dma_free_writecombine
771#define dma_free_writecombine dma_free_wc
772#endif
Thierry Redingb4bbb102014-06-27 11:56:58 +0200773
Luis R. Rodriguezf6e45662016-01-22 18:34:22 -0800774static inline int dma_mmap_wc(struct device *dev,
775 struct vm_area_struct *vma,
776 void *cpu_addr, dma_addr_t dma_addr,
777 size_t size)
Thierry Redingb4bbb102014-06-27 11:56:58 +0200778{
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700779 return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size,
780 DMA_ATTR_WRITE_COMBINE);
Thierry Redingb4bbb102014-06-27 11:56:58 +0200781}
Luis R. Rodriguezf6e45662016-01-22 18:34:22 -0800782#ifndef dma_mmap_writecombine
783#define dma_mmap_writecombine dma_mmap_wc
784#endif
Arthur Kepner74bc7ce2008-04-29 01:00:30 -0700785
Andrey Smirnov24813662016-09-28 15:22:33 -0700786#if defined(CONFIG_NEED_DMA_MAP_STATE) || defined(CONFIG_DMA_API_DEBUG)
FUJITA Tomonori0acedc12010-03-10 15:23:31 -0800787#define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME) dma_addr_t ADDR_NAME
788#define DEFINE_DMA_UNMAP_LEN(LEN_NAME) __u32 LEN_NAME
789#define dma_unmap_addr(PTR, ADDR_NAME) ((PTR)->ADDR_NAME)
790#define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) (((PTR)->ADDR_NAME) = (VAL))
791#define dma_unmap_len(PTR, LEN_NAME) ((PTR)->LEN_NAME)
792#define dma_unmap_len_set(PTR, LEN_NAME, VAL) (((PTR)->LEN_NAME) = (VAL))
793#else
794#define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME)
795#define DEFINE_DMA_UNMAP_LEN(LEN_NAME)
796#define dma_unmap_addr(PTR, ADDR_NAME) (0)
797#define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) do { } while (0)
798#define dma_unmap_len(PTR, LEN_NAME) (0)
799#define dma_unmap_len_set(PTR, LEN_NAME, VAL) do { } while (0)
800#endif
801
Linus Torvalds1da177e2005-04-16 15:20:36 -0700802#endif