blob: 642cb4c7ad37041ff483ff3e6a5a27ef177b7881 [file] [log] [blame]
Robert P. J. Day96532ba2008-02-03 15:06:26 +02001#ifndef _LINUX_DMA_MAPPING_H
2#define _LINUX_DMA_MAPPING_H
Linus Torvalds1da177e2005-04-16 15:20:36 -07003
Robin Murphy002edb62015-11-06 16:32:51 -08004#include <linux/sizes.h>
Andrew Morton842fa692011-11-02 13:39:33 -07005#include <linux/string.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -07006#include <linux/device.h>
7#include <linux/err.h>
Christoph Hellwige1c7e322016-01-20 15:02:05 -08008#include <linux/dma-debug.h>
Alexey Dobriyanb7f080c2011-06-16 11:01:34 +00009#include <linux/dma-direction.h>
FUJITA Tomonorif0402a22009-01-05 23:59:01 +090010#include <linux/scatterlist.h>
Christoph Hellwige1c7e322016-01-20 15:02:05 -080011#include <linux/kmemcheck.h>
12#include <linux/bug.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070013
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -070014/**
15 * List of possible attributes associated with a DMA mapping. The semantics
16 * of each attribute should be defined in Documentation/DMA-attributes.txt.
17 *
18 * DMA_ATTR_WRITE_BARRIER: DMA to a memory region with this attribute
19 * forces all pending DMA writes to complete.
20 */
21#define DMA_ATTR_WRITE_BARRIER (1UL << 0)
22/*
23 * DMA_ATTR_WEAK_ORDERING: Specifies that reads and writes to the mapping
24 * may be weakly ordered, that is that reads and writes may pass each other.
25 */
26#define DMA_ATTR_WEAK_ORDERING (1UL << 1)
27/*
28 * DMA_ATTR_WRITE_COMBINE: Specifies that writes to the mapping may be
29 * buffered to improve performance.
30 */
31#define DMA_ATTR_WRITE_COMBINE (1UL << 2)
32/*
33 * DMA_ATTR_NON_CONSISTENT: Lets the platform to choose to return either
34 * consistent or non-consistent memory as it sees fit.
35 */
36#define DMA_ATTR_NON_CONSISTENT (1UL << 3)
37/*
38 * DMA_ATTR_NO_KERNEL_MAPPING: Lets the platform to avoid creating a kernel
39 * virtual mapping for the allocated buffer.
40 */
41#define DMA_ATTR_NO_KERNEL_MAPPING (1UL << 4)
42/*
43 * DMA_ATTR_SKIP_CPU_SYNC: Allows platform code to skip synchronization of
44 * the CPU cache for the given buffer assuming that it has been already
45 * transferred to 'device' domain.
46 */
47#define DMA_ATTR_SKIP_CPU_SYNC (1UL << 5)
48/*
49 * DMA_ATTR_FORCE_CONTIGUOUS: Forces contiguous allocation of the buffer
50 * in physical memory.
51 */
52#define DMA_ATTR_FORCE_CONTIGUOUS (1UL << 6)
53/*
54 * DMA_ATTR_ALLOC_SINGLE_PAGES: This is a hint to the DMA-mapping subsystem
55 * that it's probably not worth the time to try to allocate memory to in a way
56 * that gives better TLB efficiency.
57 */
58#define DMA_ATTR_ALLOC_SINGLE_PAGES (1UL << 7)
59
Bjorn Helgaas77f2ea22014-04-30 11:20:53 -060060/*
61 * A dma_addr_t can hold any valid DMA or bus address for the platform.
62 * It can be given to a device to use as a DMA source or target. A CPU cannot
63 * reference a dma_addr_t directly because there may be translation between
64 * its physical address space and the bus address space.
65 */
FUJITA Tomonorif0402a22009-01-05 23:59:01 +090066struct dma_map_ops {
Marek Szyprowski613c4572012-03-28 16:36:27 +020067 void* (*alloc)(struct device *dev, size_t size,
68 dma_addr_t *dma_handle, gfp_t gfp,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -070069 unsigned long attrs);
Marek Szyprowski613c4572012-03-28 16:36:27 +020070 void (*free)(struct device *dev, size_t size,
71 void *vaddr, dma_addr_t dma_handle,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -070072 unsigned long attrs);
Marek Szyprowski9adc5372011-12-21 16:55:33 +010073 int (*mmap)(struct device *, struct vm_area_struct *,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -070074 void *, dma_addr_t, size_t,
75 unsigned long attrs);
Marek Szyprowski9adc5372011-12-21 16:55:33 +010076
Marek Szyprowskid2b74282012-06-13 10:05:52 +020077 int (*get_sgtable)(struct device *dev, struct sg_table *sgt, void *,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -070078 dma_addr_t, size_t, unsigned long attrs);
Marek Szyprowskid2b74282012-06-13 10:05:52 +020079
FUJITA Tomonorif0402a22009-01-05 23:59:01 +090080 dma_addr_t (*map_page)(struct device *dev, struct page *page,
81 unsigned long offset, size_t size,
82 enum dma_data_direction dir,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -070083 unsigned long attrs);
FUJITA Tomonorif0402a22009-01-05 23:59:01 +090084 void (*unmap_page)(struct device *dev, dma_addr_t dma_handle,
85 size_t size, enum dma_data_direction dir,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -070086 unsigned long attrs);
Ricardo Ribalda Delgado04abab62015-02-11 13:53:15 +010087 /*
88 * map_sg returns 0 on error and a value > 0 on success.
89 * It should never return a value < 0.
90 */
FUJITA Tomonorif0402a22009-01-05 23:59:01 +090091 int (*map_sg)(struct device *dev, struct scatterlist *sg,
92 int nents, enum dma_data_direction dir,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -070093 unsigned long attrs);
FUJITA Tomonorif0402a22009-01-05 23:59:01 +090094 void (*unmap_sg)(struct device *dev,
95 struct scatterlist *sg, int nents,
96 enum dma_data_direction dir,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -070097 unsigned long attrs);
Niklas Söderlundba409b32016-08-10 13:22:14 +020098 dma_addr_t (*map_resource)(struct device *dev, phys_addr_t phys_addr,
99 size_t size, enum dma_data_direction dir,
100 unsigned long attrs);
101 void (*unmap_resource)(struct device *dev, dma_addr_t dma_handle,
102 size_t size, enum dma_data_direction dir,
103 unsigned long attrs);
FUJITA Tomonorif0402a22009-01-05 23:59:01 +0900104 void (*sync_single_for_cpu)(struct device *dev,
105 dma_addr_t dma_handle, size_t size,
106 enum dma_data_direction dir);
107 void (*sync_single_for_device)(struct device *dev,
108 dma_addr_t dma_handle, size_t size,
109 enum dma_data_direction dir);
FUJITA Tomonorif0402a22009-01-05 23:59:01 +0900110 void (*sync_sg_for_cpu)(struct device *dev,
111 struct scatterlist *sg, int nents,
112 enum dma_data_direction dir);
113 void (*sync_sg_for_device)(struct device *dev,
114 struct scatterlist *sg, int nents,
115 enum dma_data_direction dir);
116 int (*mapping_error)(struct device *dev, dma_addr_t dma_addr);
117 int (*dma_supported)(struct device *dev, u64 mask);
FUJITA Tomonorif726f30e2009-08-04 19:08:24 +0000118 int (*set_dma_mask)(struct device *dev, u64 mask);
Milton Miller3a8f7552011-06-24 09:05:23 +0000119#ifdef ARCH_HAS_DMA_GET_REQUIRED_MASK
120 u64 (*get_required_mask)(struct device *dev);
121#endif
FUJITA Tomonorif0402a22009-01-05 23:59:01 +0900122 int is_phys;
123};
124
Christian Borntraegera8463d42016-02-02 21:46:32 -0800125extern struct dma_map_ops dma_noop_ops;
126
Andrew Morton8f286c32007-10-18 03:05:07 -0700127#define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
Borislav Petkov34c65382007-10-18 03:05:06 -0700128
James Bottomley32e8f702007-10-16 01:23:55 -0700129#define DMA_MASK_NONE 0x0ULL
130
Rolf Eike Beerd6bd3a32006-09-29 01:59:48 -0700131static inline int valid_dma_direction(int dma_direction)
132{
133 return ((dma_direction == DMA_BIDIRECTIONAL) ||
134 (dma_direction == DMA_TO_DEVICE) ||
135 (dma_direction == DMA_FROM_DEVICE));
136}
137
James Bottomley32e8f702007-10-16 01:23:55 -0700138static inline int is_device_dma_capable(struct device *dev)
139{
140 return dev->dma_mask != NULL && *dev->dma_mask != DMA_MASK_NONE;
141}
142
Christoph Hellwig20d666e2016-01-20 15:02:09 -0800143#ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT
144/*
145 * These three functions are only for dma allocator.
146 * Don't use them in device drivers.
147 */
148int dma_alloc_from_coherent(struct device *dev, ssize_t size,
149 dma_addr_t *dma_handle, void **ret);
150int dma_release_from_coherent(struct device *dev, int order, void *vaddr);
151
152int dma_mmap_from_coherent(struct device *dev, struct vm_area_struct *vma,
153 void *cpu_addr, size_t size, int *ret);
154#else
155#define dma_alloc_from_coherent(dev, size, handle, ret) (0)
156#define dma_release_from_coherent(dev, order, vaddr) (0)
157#define dma_mmap_from_coherent(dev, vma, vaddr, order, ret) (0)
158#endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
159
Dan Williams1b0fac42007-07-15 23:40:26 -0700160#ifdef CONFIG_HAS_DMA
Linus Torvalds1da177e2005-04-16 15:20:36 -0700161#include <asm/dma-mapping.h>
Dan Williams1b0fac42007-07-15 23:40:26 -0700162#else
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800163/*
164 * Define the dma api to allow compilation but not linking of
165 * dma dependent code. Code that depends on the dma-mapping
166 * API needs to set 'depends on HAS_DMA' in its Kconfig
167 */
168extern struct dma_map_ops bad_dma_ops;
169static inline struct dma_map_ops *get_dma_ops(struct device *dev)
170{
171 return &bad_dma_ops;
172}
173#endif
174
175static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr,
176 size_t size,
177 enum dma_data_direction dir,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700178 unsigned long attrs)
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800179{
180 struct dma_map_ops *ops = get_dma_ops(dev);
181 dma_addr_t addr;
182
183 kmemcheck_mark_initialized(ptr, size);
184 BUG_ON(!valid_dma_direction(dir));
185 addr = ops->map_page(dev, virt_to_page(ptr),
Geliang Tang8e994692016-01-20 15:02:12 -0800186 offset_in_page(ptr), size,
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800187 dir, attrs);
188 debug_dma_map_page(dev, virt_to_page(ptr),
Geliang Tang8e994692016-01-20 15:02:12 -0800189 offset_in_page(ptr), size,
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800190 dir, addr, true);
191 return addr;
192}
193
194static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr,
195 size_t size,
196 enum dma_data_direction dir,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700197 unsigned long attrs)
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800198{
199 struct dma_map_ops *ops = get_dma_ops(dev);
200
201 BUG_ON(!valid_dma_direction(dir));
202 if (ops->unmap_page)
203 ops->unmap_page(dev, addr, size, dir, attrs);
204 debug_dma_unmap_page(dev, addr, size, dir, true);
205}
206
207/*
208 * dma_maps_sg_attrs returns 0 on error and > 0 on success.
209 * It should never return a value < 0.
210 */
211static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
212 int nents, enum dma_data_direction dir,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700213 unsigned long attrs)
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800214{
215 struct dma_map_ops *ops = get_dma_ops(dev);
216 int i, ents;
217 struct scatterlist *s;
218
219 for_each_sg(sg, s, nents, i)
220 kmemcheck_mark_initialized(sg_virt(s), s->length);
221 BUG_ON(!valid_dma_direction(dir));
222 ents = ops->map_sg(dev, sg, nents, dir, attrs);
223 BUG_ON(ents < 0);
224 debug_dma_map_sg(dev, sg, nents, ents, dir);
225
226 return ents;
227}
228
229static inline void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg,
230 int nents, enum dma_data_direction dir,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700231 unsigned long attrs)
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800232{
233 struct dma_map_ops *ops = get_dma_ops(dev);
234
235 BUG_ON(!valid_dma_direction(dir));
236 debug_dma_unmap_sg(dev, sg, nents, dir);
237 if (ops->unmap_sg)
238 ops->unmap_sg(dev, sg, nents, dir, attrs);
239}
240
241static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
242 size_t offset, size_t size,
243 enum dma_data_direction dir)
244{
245 struct dma_map_ops *ops = get_dma_ops(dev);
246 dma_addr_t addr;
247
248 kmemcheck_mark_initialized(page_address(page) + offset, size);
249 BUG_ON(!valid_dma_direction(dir));
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700250 addr = ops->map_page(dev, page, offset, size, dir, 0);
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800251 debug_dma_map_page(dev, page, offset, size, dir, addr, false);
252
253 return addr;
254}
255
256static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
257 size_t size, enum dma_data_direction dir)
258{
259 struct dma_map_ops *ops = get_dma_ops(dev);
260
261 BUG_ON(!valid_dma_direction(dir));
262 if (ops->unmap_page)
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700263 ops->unmap_page(dev, addr, size, dir, 0);
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800264 debug_dma_unmap_page(dev, addr, size, dir, false);
265}
266
Niklas Söderlund6f3d8792016-08-10 13:22:16 +0200267static inline dma_addr_t dma_map_resource(struct device *dev,
268 phys_addr_t phys_addr,
269 size_t size,
270 enum dma_data_direction dir,
271 unsigned long attrs)
272{
273 struct dma_map_ops *ops = get_dma_ops(dev);
Niklas Söderlund6f3d8792016-08-10 13:22:16 +0200274 dma_addr_t addr;
275
276 BUG_ON(!valid_dma_direction(dir));
277
278 /* Don't allow RAM to be mapped */
Niklas Söderlund3757dc42016-09-29 12:02:40 +0200279 BUG_ON(pfn_valid(PHYS_PFN(phys_addr)));
Niklas Söderlund6f3d8792016-08-10 13:22:16 +0200280
281 addr = phys_addr;
282 if (ops->map_resource)
283 addr = ops->map_resource(dev, phys_addr, size, dir, attrs);
284
285 debug_dma_map_resource(dev, phys_addr, size, dir, addr);
286
287 return addr;
288}
289
290static inline void dma_unmap_resource(struct device *dev, dma_addr_t addr,
291 size_t size, enum dma_data_direction dir,
292 unsigned long attrs)
293{
294 struct dma_map_ops *ops = get_dma_ops(dev);
295
296 BUG_ON(!valid_dma_direction(dir));
297 if (ops->unmap_resource)
298 ops->unmap_resource(dev, addr, size, dir, attrs);
299 debug_dma_unmap_resource(dev, addr, size, dir);
300}
301
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800302static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
303 size_t size,
304 enum dma_data_direction dir)
305{
306 struct dma_map_ops *ops = get_dma_ops(dev);
307
308 BUG_ON(!valid_dma_direction(dir));
309 if (ops->sync_single_for_cpu)
310 ops->sync_single_for_cpu(dev, addr, size, dir);
311 debug_dma_sync_single_for_cpu(dev, addr, size, dir);
312}
313
314static inline void dma_sync_single_for_device(struct device *dev,
315 dma_addr_t addr, size_t size,
316 enum dma_data_direction dir)
317{
318 struct dma_map_ops *ops = get_dma_ops(dev);
319
320 BUG_ON(!valid_dma_direction(dir));
321 if (ops->sync_single_for_device)
322 ops->sync_single_for_device(dev, addr, size, dir);
323 debug_dma_sync_single_for_device(dev, addr, size, dir);
324}
325
326static inline void dma_sync_single_range_for_cpu(struct device *dev,
327 dma_addr_t addr,
328 unsigned long offset,
329 size_t size,
330 enum dma_data_direction dir)
331{
332 const struct dma_map_ops *ops = get_dma_ops(dev);
333
334 BUG_ON(!valid_dma_direction(dir));
335 if (ops->sync_single_for_cpu)
336 ops->sync_single_for_cpu(dev, addr + offset, size, dir);
337 debug_dma_sync_single_range_for_cpu(dev, addr, offset, size, dir);
338}
339
340static inline void dma_sync_single_range_for_device(struct device *dev,
341 dma_addr_t addr,
342 unsigned long offset,
343 size_t size,
344 enum dma_data_direction dir)
345{
346 const struct dma_map_ops *ops = get_dma_ops(dev);
347
348 BUG_ON(!valid_dma_direction(dir));
349 if (ops->sync_single_for_device)
350 ops->sync_single_for_device(dev, addr + offset, size, dir);
351 debug_dma_sync_single_range_for_device(dev, addr, offset, size, dir);
352}
353
354static inline void
355dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
356 int nelems, enum dma_data_direction dir)
357{
358 struct dma_map_ops *ops = get_dma_ops(dev);
359
360 BUG_ON(!valid_dma_direction(dir));
361 if (ops->sync_sg_for_cpu)
362 ops->sync_sg_for_cpu(dev, sg, nelems, dir);
363 debug_dma_sync_sg_for_cpu(dev, sg, nelems, dir);
364}
365
366static inline void
367dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
368 int nelems, enum dma_data_direction dir)
369{
370 struct dma_map_ops *ops = get_dma_ops(dev);
371
372 BUG_ON(!valid_dma_direction(dir));
373 if (ops->sync_sg_for_device)
374 ops->sync_sg_for_device(dev, sg, nelems, dir);
375 debug_dma_sync_sg_for_device(dev, sg, nelems, dir);
376
377}
378
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700379#define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, 0)
380#define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, 0)
381#define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, 0)
382#define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, 0)
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800383
384extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
385 void *cpu_addr, dma_addr_t dma_addr, size_t size);
386
387void *dma_common_contiguous_remap(struct page *page, size_t size,
388 unsigned long vm_flags,
389 pgprot_t prot, const void *caller);
390
391void *dma_common_pages_remap(struct page **pages, size_t size,
392 unsigned long vm_flags, pgprot_t prot,
393 const void *caller);
394void dma_common_free_remap(void *cpu_addr, size_t size, unsigned long vm_flags);
395
396/**
397 * dma_mmap_attrs - map a coherent DMA allocation into user space
398 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
399 * @vma: vm_area_struct describing requested user mapping
400 * @cpu_addr: kernel CPU-view address returned from dma_alloc_attrs
401 * @handle: device-view address returned from dma_alloc_attrs
402 * @size: size of memory originally requested in dma_alloc_attrs
403 * @attrs: attributes of mapping properties requested in dma_alloc_attrs
404 *
405 * Map a coherent DMA buffer previously allocated by dma_alloc_attrs
406 * into user space. The coherent DMA buffer must not be freed by the
407 * driver until the user space mapping has been released.
408 */
409static inline int
410dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, void *cpu_addr,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700411 dma_addr_t dma_addr, size_t size, unsigned long attrs)
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800412{
413 struct dma_map_ops *ops = get_dma_ops(dev);
414 BUG_ON(!ops);
415 if (ops->mmap)
416 return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
417 return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size);
418}
419
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700420#define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, 0)
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800421
422int
423dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
424 void *cpu_addr, dma_addr_t dma_addr, size_t size);
425
426static inline int
427dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt, void *cpu_addr,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700428 dma_addr_t dma_addr, size_t size,
429 unsigned long attrs)
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800430{
431 struct dma_map_ops *ops = get_dma_ops(dev);
432 BUG_ON(!ops);
433 if (ops->get_sgtable)
434 return ops->get_sgtable(dev, sgt, cpu_addr, dma_addr, size,
435 attrs);
436 return dma_common_get_sgtable(dev, sgt, cpu_addr, dma_addr, size);
437}
438
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700439#define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, 0)
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800440
441#ifndef arch_dma_alloc_attrs
442#define arch_dma_alloc_attrs(dev, flag) (true)
443#endif
444
445static inline void *dma_alloc_attrs(struct device *dev, size_t size,
446 dma_addr_t *dma_handle, gfp_t flag,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700447 unsigned long attrs)
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800448{
449 struct dma_map_ops *ops = get_dma_ops(dev);
450 void *cpu_addr;
451
452 BUG_ON(!ops);
453
454 if (dma_alloc_from_coherent(dev, size, dma_handle, &cpu_addr))
455 return cpu_addr;
456
457 if (!arch_dma_alloc_attrs(&dev, &flag))
458 return NULL;
459 if (!ops->alloc)
460 return NULL;
461
462 cpu_addr = ops->alloc(dev, size, dma_handle, flag, attrs);
463 debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr);
464 return cpu_addr;
465}
466
467static inline void dma_free_attrs(struct device *dev, size_t size,
468 void *cpu_addr, dma_addr_t dma_handle,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700469 unsigned long attrs)
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800470{
471 struct dma_map_ops *ops = get_dma_ops(dev);
472
473 BUG_ON(!ops);
474 WARN_ON(irqs_disabled());
475
476 if (dma_release_from_coherent(dev, get_order(size), cpu_addr))
477 return;
478
Zhen Leid6b7eae2016-03-09 14:08:38 -0800479 if (!ops->free || !cpu_addr)
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800480 return;
481
482 debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
483 ops->free(dev, size, cpu_addr, dma_handle, attrs);
484}
485
486static inline void *dma_alloc_coherent(struct device *dev, size_t size,
487 dma_addr_t *dma_handle, gfp_t flag)
488{
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700489 return dma_alloc_attrs(dev, size, dma_handle, flag, 0);
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800490}
491
492static inline void dma_free_coherent(struct device *dev, size_t size,
493 void *cpu_addr, dma_addr_t dma_handle)
494{
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700495 return dma_free_attrs(dev, size, cpu_addr, dma_handle, 0);
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800496}
497
498static inline void *dma_alloc_noncoherent(struct device *dev, size_t size,
499 dma_addr_t *dma_handle, gfp_t gfp)
500{
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700501 return dma_alloc_attrs(dev, size, dma_handle, gfp,
502 DMA_ATTR_NON_CONSISTENT);
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800503}
504
505static inline void dma_free_noncoherent(struct device *dev, size_t size,
506 void *cpu_addr, dma_addr_t dma_handle)
507{
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700508 dma_free_attrs(dev, size, cpu_addr, dma_handle,
509 DMA_ATTR_NON_CONSISTENT);
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800510}
511
512static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
513{
514 debug_dma_mapping_error(dev, dma_addr);
515
516 if (get_dma_ops(dev)->mapping_error)
517 return get_dma_ops(dev)->mapping_error(dev, dma_addr);
518
519#ifdef DMA_ERROR_CODE
520 return dma_addr == DMA_ERROR_CODE;
521#else
522 return 0;
523#endif
524}
525
526#ifndef HAVE_ARCH_DMA_SUPPORTED
527static inline int dma_supported(struct device *dev, u64 mask)
528{
529 struct dma_map_ops *ops = get_dma_ops(dev);
530
531 if (!ops)
532 return 0;
533 if (!ops->dma_supported)
534 return 1;
535 return ops->dma_supported(dev, mask);
536}
537#endif
538
539#ifndef HAVE_ARCH_DMA_SET_MASK
540static inline int dma_set_mask(struct device *dev, u64 mask)
541{
542 struct dma_map_ops *ops = get_dma_ops(dev);
543
544 if (ops->set_dma_mask)
545 return ops->set_dma_mask(dev, mask);
546
547 if (!dev->dma_mask || !dma_supported(dev, mask))
548 return -EIO;
549 *dev->dma_mask = mask;
550 return 0;
551}
Dan Williams1b0fac42007-07-15 23:40:26 -0700552#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700553
FUJITA Tomonori589fc9a2008-09-12 19:42:34 +0900554static inline u64 dma_get_mask(struct device *dev)
555{
FUJITA Tomonori07a2c012008-09-19 02:02:05 +0900556 if (dev && dev->dma_mask && *dev->dma_mask)
FUJITA Tomonori589fc9a2008-09-12 19:42:34 +0900557 return *dev->dma_mask;
Yang Hongyang284901a2009-04-06 19:01:15 -0700558 return DMA_BIT_MASK(32);
FUJITA Tomonori589fc9a2008-09-12 19:42:34 +0900559}
560
Rob Herring58af4a22012-03-20 14:33:01 -0500561#ifdef CONFIG_ARCH_HAS_DMA_SET_COHERENT_MASK
FUJITA Tomonori710224f2010-09-22 13:04:55 -0700562int dma_set_coherent_mask(struct device *dev, u64 mask);
563#else
FUJITA Tomonori6a1961f2010-03-10 15:23:39 -0800564static inline int dma_set_coherent_mask(struct device *dev, u64 mask)
565{
566 if (!dma_supported(dev, mask))
567 return -EIO;
568 dev->coherent_dma_mask = mask;
569 return 0;
570}
FUJITA Tomonori710224f2010-09-22 13:04:55 -0700571#endif
FUJITA Tomonori6a1961f2010-03-10 15:23:39 -0800572
Russell King4aa806b2013-06-26 13:49:44 +0100573/*
574 * Set both the DMA mask and the coherent DMA mask to the same thing.
575 * Note that we don't check the return value from dma_set_coherent_mask()
576 * as the DMA API guarantees that the coherent DMA mask can be set to
577 * the same or smaller than the streaming DMA mask.
578 */
579static inline int dma_set_mask_and_coherent(struct device *dev, u64 mask)
580{
581 int rc = dma_set_mask(dev, mask);
582 if (rc == 0)
583 dma_set_coherent_mask(dev, mask);
584 return rc;
585}
586
Russell Kingfa6a8d62013-06-27 12:21:45 +0100587/*
588 * Similar to the above, except it deals with the case where the device
589 * does not have dev->dma_mask appropriately setup.
590 */
591static inline int dma_coerce_mask_and_coherent(struct device *dev, u64 mask)
592{
593 dev->dma_mask = &dev->coherent_dma_mask;
594 return dma_set_mask_and_coherent(dev, mask);
595}
596
Linus Torvalds1da177e2005-04-16 15:20:36 -0700597extern u64 dma_get_required_mask(struct device *dev);
598
Will Deacona3a60f82014-08-27 15:49:10 +0100599#ifndef arch_setup_dma_ops
Will Deacon97890ba2014-08-27 16:24:20 +0100600static inline void arch_setup_dma_ops(struct device *dev, u64 dma_base,
Robin Murphy53c92d72016-04-07 18:42:05 +0100601 u64 size, const struct iommu_ops *iommu,
Will Deacon97890ba2014-08-27 16:24:20 +0100602 bool coherent) { }
603#endif
604
605#ifndef arch_teardown_dma_ops
606static inline void arch_teardown_dma_ops(struct device *dev) { }
Santosh Shilimkar591c1ee42014-04-24 11:30:04 -0400607#endif
608
FUJITA Tomonori6b7b6512008-02-04 22:27:55 -0800609static inline unsigned int dma_get_max_seg_size(struct device *dev)
610{
Robin Murphy002edb62015-11-06 16:32:51 -0800611 if (dev->dma_parms && dev->dma_parms->max_segment_size)
612 return dev->dma_parms->max_segment_size;
613 return SZ_64K;
FUJITA Tomonori6b7b6512008-02-04 22:27:55 -0800614}
615
616static inline unsigned int dma_set_max_seg_size(struct device *dev,
617 unsigned int size)
618{
619 if (dev->dma_parms) {
620 dev->dma_parms->max_segment_size = size;
621 return 0;
Robin Murphy002edb62015-11-06 16:32:51 -0800622 }
623 return -EIO;
FUJITA Tomonori6b7b6512008-02-04 22:27:55 -0800624}
625
FUJITA Tomonorid22a6962008-02-04 22:28:13 -0800626static inline unsigned long dma_get_seg_boundary(struct device *dev)
627{
Robin Murphy002edb62015-11-06 16:32:51 -0800628 if (dev->dma_parms && dev->dma_parms->segment_boundary_mask)
629 return dev->dma_parms->segment_boundary_mask;
630 return DMA_BIT_MASK(32);
FUJITA Tomonorid22a6962008-02-04 22:28:13 -0800631}
632
633static inline int dma_set_seg_boundary(struct device *dev, unsigned long mask)
634{
635 if (dev->dma_parms) {
636 dev->dma_parms->segment_boundary_mask = mask;
637 return 0;
Robin Murphy002edb62015-11-06 16:32:51 -0800638 }
639 return -EIO;
FUJITA Tomonorid22a6962008-02-04 22:28:13 -0800640}
641
Santosh Shilimkar00c8f162013-07-29 14:18:48 +0100642#ifndef dma_max_pfn
643static inline unsigned long dma_max_pfn(struct device *dev)
644{
645 return *dev->dma_mask >> PAGE_SHIFT;
646}
647#endif
648
Andrew Morton842fa692011-11-02 13:39:33 -0700649static inline void *dma_zalloc_coherent(struct device *dev, size_t size,
650 dma_addr_t *dma_handle, gfp_t flag)
651{
Joe Perchesede23fa82013-08-26 22:45:23 -0700652 void *ret = dma_alloc_coherent(dev, size, dma_handle,
653 flag | __GFP_ZERO);
Andrew Morton842fa692011-11-02 13:39:33 -0700654 return ret;
655}
656
Heiko Carstense259f192010-08-13 09:39:18 +0200657#ifdef CONFIG_HAS_DMA
FUJITA Tomonori4565f012010-08-10 18:03:22 -0700658static inline int dma_get_cache_alignment(void)
659{
660#ifdef ARCH_DMA_MINALIGN
661 return ARCH_DMA_MINALIGN;
662#endif
663 return 1;
664}
Heiko Carstense259f192010-08-13 09:39:18 +0200665#endif
FUJITA Tomonori4565f012010-08-10 18:03:22 -0700666
Linus Torvalds1da177e2005-04-16 15:20:36 -0700667/* flags for the coherent memory api */
668#define DMA_MEMORY_MAP 0x01
669#define DMA_MEMORY_IO 0x02
670#define DMA_MEMORY_INCLUDES_CHILDREN 0x04
671#define DMA_MEMORY_EXCLUSIVE 0x08
672
Christoph Hellwig20d666e2016-01-20 15:02:09 -0800673#ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT
674int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
675 dma_addr_t device_addr, size_t size, int flags);
676void dma_release_declared_memory(struct device *dev);
677void *dma_mark_declared_memory_occupied(struct device *dev,
678 dma_addr_t device_addr, size_t size);
679#else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700680static inline int
Bjorn Helgaas88a984b2014-05-20 16:54:22 -0600681dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700682 dma_addr_t device_addr, size_t size, int flags)
683{
684 return 0;
685}
686
687static inline void
688dma_release_declared_memory(struct device *dev)
689{
690}
691
692static inline void *
693dma_mark_declared_memory_occupied(struct device *dev,
694 dma_addr_t device_addr, size_t size)
695{
696 return ERR_PTR(-EBUSY);
697}
Christoph Hellwig20d666e2016-01-20 15:02:09 -0800698#endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700699
Tejun Heo9ac78492007-01-20 16:00:26 +0900700/*
701 * Managed DMA API
702 */
703extern void *dmam_alloc_coherent(struct device *dev, size_t size,
704 dma_addr_t *dma_handle, gfp_t gfp);
705extern void dmam_free_coherent(struct device *dev, size_t size, void *vaddr,
706 dma_addr_t dma_handle);
707extern void *dmam_alloc_noncoherent(struct device *dev, size_t size,
708 dma_addr_t *dma_handle, gfp_t gfp);
709extern void dmam_free_noncoherent(struct device *dev, size_t size, void *vaddr,
710 dma_addr_t dma_handle);
Christoph Hellwig20d666e2016-01-20 15:02:09 -0800711#ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT
Bjorn Helgaas88a984b2014-05-20 16:54:22 -0600712extern int dmam_declare_coherent_memory(struct device *dev,
713 phys_addr_t phys_addr,
Tejun Heo9ac78492007-01-20 16:00:26 +0900714 dma_addr_t device_addr, size_t size,
715 int flags);
716extern void dmam_release_declared_memory(struct device *dev);
Christoph Hellwig20d666e2016-01-20 15:02:09 -0800717#else /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
Tejun Heo9ac78492007-01-20 16:00:26 +0900718static inline int dmam_declare_coherent_memory(struct device *dev,
Bjorn Helgaas88a984b2014-05-20 16:54:22 -0600719 phys_addr_t phys_addr, dma_addr_t device_addr,
Tejun Heo9ac78492007-01-20 16:00:26 +0900720 size_t size, gfp_t gfp)
721{
722 return 0;
723}
724
725static inline void dmam_release_declared_memory(struct device *dev)
726{
727}
Christoph Hellwig20d666e2016-01-20 15:02:09 -0800728#endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
Tejun Heo9ac78492007-01-20 16:00:26 +0900729
Luis R. Rodriguezf6e45662016-01-22 18:34:22 -0800730static inline void *dma_alloc_wc(struct device *dev, size_t size,
731 dma_addr_t *dma_addr, gfp_t gfp)
Thierry Redingb4bbb102014-06-27 11:56:58 +0200732{
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700733 return dma_alloc_attrs(dev, size, dma_addr, gfp,
734 DMA_ATTR_WRITE_COMBINE);
Thierry Redingb4bbb102014-06-27 11:56:58 +0200735}
Luis R. Rodriguezf6e45662016-01-22 18:34:22 -0800736#ifndef dma_alloc_writecombine
737#define dma_alloc_writecombine dma_alloc_wc
738#endif
Thierry Redingb4bbb102014-06-27 11:56:58 +0200739
Luis R. Rodriguezf6e45662016-01-22 18:34:22 -0800740static inline void dma_free_wc(struct device *dev, size_t size,
741 void *cpu_addr, dma_addr_t dma_addr)
Thierry Redingb4bbb102014-06-27 11:56:58 +0200742{
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700743 return dma_free_attrs(dev, size, cpu_addr, dma_addr,
744 DMA_ATTR_WRITE_COMBINE);
Thierry Redingb4bbb102014-06-27 11:56:58 +0200745}
Luis R. Rodriguezf6e45662016-01-22 18:34:22 -0800746#ifndef dma_free_writecombine
747#define dma_free_writecombine dma_free_wc
748#endif
Thierry Redingb4bbb102014-06-27 11:56:58 +0200749
Luis R. Rodriguezf6e45662016-01-22 18:34:22 -0800750static inline int dma_mmap_wc(struct device *dev,
751 struct vm_area_struct *vma,
752 void *cpu_addr, dma_addr_t dma_addr,
753 size_t size)
Thierry Redingb4bbb102014-06-27 11:56:58 +0200754{
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700755 return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size,
756 DMA_ATTR_WRITE_COMBINE);
Thierry Redingb4bbb102014-06-27 11:56:58 +0200757}
Luis R. Rodriguezf6e45662016-01-22 18:34:22 -0800758#ifndef dma_mmap_writecombine
759#define dma_mmap_writecombine dma_mmap_wc
760#endif
Arthur Kepner74bc7ce2008-04-29 01:00:30 -0700761
FUJITA Tomonori0acedc12010-03-10 15:23:31 -0800762#ifdef CONFIG_NEED_DMA_MAP_STATE
763#define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME) dma_addr_t ADDR_NAME
764#define DEFINE_DMA_UNMAP_LEN(LEN_NAME) __u32 LEN_NAME
765#define dma_unmap_addr(PTR, ADDR_NAME) ((PTR)->ADDR_NAME)
766#define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) (((PTR)->ADDR_NAME) = (VAL))
767#define dma_unmap_len(PTR, LEN_NAME) ((PTR)->LEN_NAME)
768#define dma_unmap_len_set(PTR, LEN_NAME, VAL) (((PTR)->LEN_NAME) = (VAL))
769#else
770#define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME)
771#define DEFINE_DMA_UNMAP_LEN(LEN_NAME)
772#define dma_unmap_addr(PTR, ADDR_NAME) (0)
773#define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) do { } while (0)
774#define dma_unmap_len(PTR, LEN_NAME) (0)
775#define dma_unmap_len_set(PTR, LEN_NAME, VAL) do { } while (0)
776#endif
777
Linus Torvalds1da177e2005-04-16 15:20:36 -0700778#endif