blob: eee1499db3964d8393fa83becddca3e51eaa815e [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
Robert P. J. Day96532ba2008-02-03 15:06:26 +02002#ifndef _LINUX_DMA_MAPPING_H
3#define _LINUX_DMA_MAPPING_H
Linus Torvalds1da177e2005-04-16 15:20:36 -07004
Robin Murphy002edb62015-11-06 16:32:51 -08005#include <linux/sizes.h>
Andrew Morton842fa692011-11-02 13:39:33 -07006#include <linux/string.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -07007#include <linux/device.h>
8#include <linux/err.h>
Christoph Hellwige1c7e322016-01-20 15:02:05 -08009#include <linux/dma-debug.h>
Alexey Dobriyanb7f080c2011-06-16 11:01:34 +000010#include <linux/dma-direction.h>
FUJITA Tomonorif0402a22009-01-05 23:59:01 +090011#include <linux/scatterlist.h>
Christoph Hellwige1c7e322016-01-20 15:02:05 -080012#include <linux/kmemcheck.h>
13#include <linux/bug.h>
Tom Lendacky648babb2017-07-17 16:10:22 -050014#include <linux/mem_encrypt.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070015
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -070016/**
17 * List of possible attributes associated with a DMA mapping. The semantics
18 * of each attribute should be defined in Documentation/DMA-attributes.txt.
19 *
20 * DMA_ATTR_WRITE_BARRIER: DMA to a memory region with this attribute
21 * forces all pending DMA writes to complete.
22 */
23#define DMA_ATTR_WRITE_BARRIER (1UL << 0)
24/*
25 * DMA_ATTR_WEAK_ORDERING: Specifies that reads and writes to the mapping
26 * may be weakly ordered, that is that reads and writes may pass each other.
27 */
28#define DMA_ATTR_WEAK_ORDERING (1UL << 1)
29/*
30 * DMA_ATTR_WRITE_COMBINE: Specifies that writes to the mapping may be
31 * buffered to improve performance.
32 */
33#define DMA_ATTR_WRITE_COMBINE (1UL << 2)
34/*
35 * DMA_ATTR_NON_CONSISTENT: Lets the platform to choose to return either
36 * consistent or non-consistent memory as it sees fit.
37 */
38#define DMA_ATTR_NON_CONSISTENT (1UL << 3)
39/*
40 * DMA_ATTR_NO_KERNEL_MAPPING: Lets the platform to avoid creating a kernel
41 * virtual mapping for the allocated buffer.
42 */
43#define DMA_ATTR_NO_KERNEL_MAPPING (1UL << 4)
44/*
45 * DMA_ATTR_SKIP_CPU_SYNC: Allows platform code to skip synchronization of
46 * the CPU cache for the given buffer assuming that it has been already
47 * transferred to 'device' domain.
48 */
49#define DMA_ATTR_SKIP_CPU_SYNC (1UL << 5)
50/*
51 * DMA_ATTR_FORCE_CONTIGUOUS: Forces contiguous allocation of the buffer
52 * in physical memory.
53 */
54#define DMA_ATTR_FORCE_CONTIGUOUS (1UL << 6)
55/*
56 * DMA_ATTR_ALLOC_SINGLE_PAGES: This is a hint to the DMA-mapping subsystem
57 * that it's probably not worth the time to try to allocate memory to in a way
58 * that gives better TLB efficiency.
59 */
60#define DMA_ATTR_ALLOC_SINGLE_PAGES (1UL << 7)
Mauricio Faria de Oliveiraa9a62c92016-10-11 13:54:14 -070061/*
62 * DMA_ATTR_NO_WARN: This tells the DMA-mapping subsystem to suppress
63 * allocation failure reports (similarly to __GFP_NOWARN).
64 */
65#define DMA_ATTR_NO_WARN (1UL << 8)
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -070066
Bjorn Helgaas77f2ea22014-04-30 11:20:53 -060067/*
Mitchel Humpherysb2fb3662017-01-06 18:58:11 +053068 * DMA_ATTR_PRIVILEGED: used to indicate that the buffer is fully
69 * accessible at an elevated privilege level (and ideally inaccessible or
70 * at least read-only at lesser-privileged levels).
71 */
72#define DMA_ATTR_PRIVILEGED (1UL << 9)
73
74/*
Bjorn Helgaas77f2ea22014-04-30 11:20:53 -060075 * A dma_addr_t can hold any valid DMA or bus address for the platform.
76 * It can be given to a device to use as a DMA source or target. A CPU cannot
77 * reference a dma_addr_t directly because there may be translation between
78 * its physical address space and the bus address space.
79 */
FUJITA Tomonorif0402a22009-01-05 23:59:01 +090080struct dma_map_ops {
Marek Szyprowski613c4572012-03-28 16:36:27 +020081 void* (*alloc)(struct device *dev, size_t size,
82 dma_addr_t *dma_handle, gfp_t gfp,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -070083 unsigned long attrs);
Marek Szyprowski613c4572012-03-28 16:36:27 +020084 void (*free)(struct device *dev, size_t size,
85 void *vaddr, dma_addr_t dma_handle,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -070086 unsigned long attrs);
Marek Szyprowski9adc5372011-12-21 16:55:33 +010087 int (*mmap)(struct device *, struct vm_area_struct *,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -070088 void *, dma_addr_t, size_t,
89 unsigned long attrs);
Marek Szyprowski9adc5372011-12-21 16:55:33 +010090
Marek Szyprowskid2b74282012-06-13 10:05:52 +020091 int (*get_sgtable)(struct device *dev, struct sg_table *sgt, void *,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -070092 dma_addr_t, size_t, unsigned long attrs);
Marek Szyprowskid2b74282012-06-13 10:05:52 +020093
FUJITA Tomonorif0402a22009-01-05 23:59:01 +090094 dma_addr_t (*map_page)(struct device *dev, struct page *page,
95 unsigned long offset, size_t size,
96 enum dma_data_direction dir,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -070097 unsigned long attrs);
FUJITA Tomonorif0402a22009-01-05 23:59:01 +090098 void (*unmap_page)(struct device *dev, dma_addr_t dma_handle,
99 size_t size, enum dma_data_direction dir,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700100 unsigned long attrs);
Ricardo Ribalda Delgado04abab62015-02-11 13:53:15 +0100101 /*
102 * map_sg returns 0 on error and a value > 0 on success.
103 * It should never return a value < 0.
104 */
FUJITA Tomonorif0402a22009-01-05 23:59:01 +0900105 int (*map_sg)(struct device *dev, struct scatterlist *sg,
106 int nents, enum dma_data_direction dir,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700107 unsigned long attrs);
FUJITA Tomonorif0402a22009-01-05 23:59:01 +0900108 void (*unmap_sg)(struct device *dev,
109 struct scatterlist *sg, int nents,
110 enum dma_data_direction dir,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700111 unsigned long attrs);
Niklas Söderlundba409b32016-08-10 13:22:14 +0200112 dma_addr_t (*map_resource)(struct device *dev, phys_addr_t phys_addr,
113 size_t size, enum dma_data_direction dir,
114 unsigned long attrs);
115 void (*unmap_resource)(struct device *dev, dma_addr_t dma_handle,
116 size_t size, enum dma_data_direction dir,
117 unsigned long attrs);
FUJITA Tomonorif0402a22009-01-05 23:59:01 +0900118 void (*sync_single_for_cpu)(struct device *dev,
119 dma_addr_t dma_handle, size_t size,
120 enum dma_data_direction dir);
121 void (*sync_single_for_device)(struct device *dev,
122 dma_addr_t dma_handle, size_t size,
123 enum dma_data_direction dir);
FUJITA Tomonorif0402a22009-01-05 23:59:01 +0900124 void (*sync_sg_for_cpu)(struct device *dev,
125 struct scatterlist *sg, int nents,
126 enum dma_data_direction dir);
127 void (*sync_sg_for_device)(struct device *dev,
128 struct scatterlist *sg, int nents,
129 enum dma_data_direction dir);
Christoph Hellwigc9eb6172017-08-27 10:37:15 +0200130 void (*cache_sync)(struct device *dev, void *vaddr, size_t size,
131 enum dma_data_direction direction);
FUJITA Tomonorif0402a22009-01-05 23:59:01 +0900132 int (*mapping_error)(struct device *dev, dma_addr_t dma_addr);
133 int (*dma_supported)(struct device *dev, u64 mask);
Milton Miller3a8f7552011-06-24 09:05:23 +0000134#ifdef ARCH_HAS_DMA_GET_REQUIRED_MASK
135 u64 (*get_required_mask)(struct device *dev);
136#endif
FUJITA Tomonorif0402a22009-01-05 23:59:01 +0900137 int is_phys;
138};
139
Bart Van Assche52997092017-01-20 13:04:01 -0800140extern const struct dma_map_ops dma_noop_ops;
Bart Van Assche551199a2017-01-20 13:04:07 -0800141extern const struct dma_map_ops dma_virt_ops;
Christian Borntraegera8463d42016-02-02 21:46:32 -0800142
Andrew Morton8f286c32007-10-18 03:05:07 -0700143#define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
Borislav Petkov34c65382007-10-18 03:05:06 -0700144
James Bottomley32e8f702007-10-16 01:23:55 -0700145#define DMA_MASK_NONE 0x0ULL
146
Rolf Eike Beerd6bd3a32006-09-29 01:59:48 -0700147static inline int valid_dma_direction(int dma_direction)
148{
149 return ((dma_direction == DMA_BIDIRECTIONAL) ||
150 (dma_direction == DMA_TO_DEVICE) ||
151 (dma_direction == DMA_FROM_DEVICE));
152}
153
James Bottomley32e8f702007-10-16 01:23:55 -0700154static inline int is_device_dma_capable(struct device *dev)
155{
156 return dev->dma_mask != NULL && *dev->dma_mask != DMA_MASK_NONE;
157}
158
Christoph Hellwig20d666e2016-01-20 15:02:09 -0800159#ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT
160/*
161 * These three functions are only for dma allocator.
162 * Don't use them in device drivers.
163 */
Vladimir Murzin43fc5092017-07-20 11:19:58 +0100164int dma_alloc_from_dev_coherent(struct device *dev, ssize_t size,
Christoph Hellwig20d666e2016-01-20 15:02:09 -0800165 dma_addr_t *dma_handle, void **ret);
Vladimir Murzin43fc5092017-07-20 11:19:58 +0100166int dma_release_from_dev_coherent(struct device *dev, int order, void *vaddr);
Christoph Hellwig20d666e2016-01-20 15:02:09 -0800167
Vladimir Murzin43fc5092017-07-20 11:19:58 +0100168int dma_mmap_from_dev_coherent(struct device *dev, struct vm_area_struct *vma,
Christoph Hellwig20d666e2016-01-20 15:02:09 -0800169 void *cpu_addr, size_t size, int *ret);
Vladimir Murzin43fc5092017-07-20 11:19:58 +0100170
171void *dma_alloc_from_global_coherent(ssize_t size, dma_addr_t *dma_handle);
172int dma_release_from_global_coherent(int order, void *vaddr);
173int dma_mmap_from_global_coherent(struct vm_area_struct *vma, void *cpu_addr,
174 size_t size, int *ret);
175
Christoph Hellwig20d666e2016-01-20 15:02:09 -0800176#else
Vladimir Murzin43fc5092017-07-20 11:19:58 +0100177#define dma_alloc_from_dev_coherent(dev, size, handle, ret) (0)
178#define dma_release_from_dev_coherent(dev, order, vaddr) (0)
179#define dma_mmap_from_dev_coherent(dev, vma, vaddr, order, ret) (0)
180
181static inline void *dma_alloc_from_global_coherent(ssize_t size,
182 dma_addr_t *dma_handle)
183{
184 return NULL;
185}
186
187static inline int dma_release_from_global_coherent(int order, void *vaddr)
188{
189 return 0;
190}
191
192static inline int dma_mmap_from_global_coherent(struct vm_area_struct *vma,
193 void *cpu_addr, size_t size,
194 int *ret)
195{
196 return 0;
197}
Christoph Hellwig20d666e2016-01-20 15:02:09 -0800198#endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
199
Dan Williams1b0fac42007-07-15 23:40:26 -0700200#ifdef CONFIG_HAS_DMA
Linus Torvalds1da177e2005-04-16 15:20:36 -0700201#include <asm/dma-mapping.h>
Bart Van Assche815dd182017-01-20 13:04:04 -0800202static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
203{
204 if (dev && dev->dma_ops)
205 return dev->dma_ops;
206 return get_arch_dma_ops(dev ? dev->bus : NULL);
207}
208
Bart Van Asscheca6e8e12017-01-20 13:04:03 -0800209static inline void set_dma_ops(struct device *dev,
210 const struct dma_map_ops *dma_ops)
211{
212 dev->dma_ops = dma_ops;
213}
Dan Williams1b0fac42007-07-15 23:40:26 -0700214#else
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800215/*
216 * Define the dma api to allow compilation but not linking of
217 * dma dependent code. Code that depends on the dma-mapping
218 * API needs to set 'depends on HAS_DMA' in its Kconfig
219 */
Bart Van Assche52997092017-01-20 13:04:01 -0800220extern const struct dma_map_ops bad_dma_ops;
221static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800222{
223 return &bad_dma_ops;
224}
225#endif
226
227static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr,
228 size_t size,
229 enum dma_data_direction dir,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700230 unsigned long attrs)
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800231{
Bart Van Assche52997092017-01-20 13:04:01 -0800232 const struct dma_map_ops *ops = get_dma_ops(dev);
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800233 dma_addr_t addr;
234
235 kmemcheck_mark_initialized(ptr, size);
236 BUG_ON(!valid_dma_direction(dir));
237 addr = ops->map_page(dev, virt_to_page(ptr),
Geliang Tang8e994692016-01-20 15:02:12 -0800238 offset_in_page(ptr), size,
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800239 dir, attrs);
240 debug_dma_map_page(dev, virt_to_page(ptr),
Geliang Tang8e994692016-01-20 15:02:12 -0800241 offset_in_page(ptr), size,
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800242 dir, addr, true);
243 return addr;
244}
245
246static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr,
247 size_t size,
248 enum dma_data_direction dir,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700249 unsigned long attrs)
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800250{
Bart Van Assche52997092017-01-20 13:04:01 -0800251 const struct dma_map_ops *ops = get_dma_ops(dev);
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800252
253 BUG_ON(!valid_dma_direction(dir));
254 if (ops->unmap_page)
255 ops->unmap_page(dev, addr, size, dir, attrs);
256 debug_dma_unmap_page(dev, addr, size, dir, true);
257}
258
259/*
260 * dma_maps_sg_attrs returns 0 on error and > 0 on success.
261 * It should never return a value < 0.
262 */
263static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
264 int nents, enum dma_data_direction dir,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700265 unsigned long attrs)
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800266{
Bart Van Assche52997092017-01-20 13:04:01 -0800267 const struct dma_map_ops *ops = get_dma_ops(dev);
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800268 int i, ents;
269 struct scatterlist *s;
270
271 for_each_sg(sg, s, nents, i)
272 kmemcheck_mark_initialized(sg_virt(s), s->length);
273 BUG_ON(!valid_dma_direction(dir));
274 ents = ops->map_sg(dev, sg, nents, dir, attrs);
275 BUG_ON(ents < 0);
276 debug_dma_map_sg(dev, sg, nents, ents, dir);
277
278 return ents;
279}
280
281static inline void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg,
282 int nents, enum dma_data_direction dir,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700283 unsigned long attrs)
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800284{
Bart Van Assche52997092017-01-20 13:04:01 -0800285 const struct dma_map_ops *ops = get_dma_ops(dev);
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800286
287 BUG_ON(!valid_dma_direction(dir));
288 debug_dma_unmap_sg(dev, sg, nents, dir);
289 if (ops->unmap_sg)
290 ops->unmap_sg(dev, sg, nents, dir, attrs);
291}
292
Alexander Duyck0495c3d2016-12-14 15:05:23 -0800293static inline dma_addr_t dma_map_page_attrs(struct device *dev,
294 struct page *page,
295 size_t offset, size_t size,
296 enum dma_data_direction dir,
297 unsigned long attrs)
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800298{
Bart Van Assche52997092017-01-20 13:04:01 -0800299 const struct dma_map_ops *ops = get_dma_ops(dev);
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800300 dma_addr_t addr;
301
302 kmemcheck_mark_initialized(page_address(page) + offset, size);
303 BUG_ON(!valid_dma_direction(dir));
Alexander Duyck0495c3d2016-12-14 15:05:23 -0800304 addr = ops->map_page(dev, page, offset, size, dir, attrs);
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800305 debug_dma_map_page(dev, page, offset, size, dir, addr, false);
306
307 return addr;
308}
309
Alexander Duyck0495c3d2016-12-14 15:05:23 -0800310static inline void dma_unmap_page_attrs(struct device *dev,
311 dma_addr_t addr, size_t size,
312 enum dma_data_direction dir,
313 unsigned long attrs)
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800314{
Bart Van Assche52997092017-01-20 13:04:01 -0800315 const struct dma_map_ops *ops = get_dma_ops(dev);
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800316
317 BUG_ON(!valid_dma_direction(dir));
318 if (ops->unmap_page)
Alexander Duyck0495c3d2016-12-14 15:05:23 -0800319 ops->unmap_page(dev, addr, size, dir, attrs);
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800320 debug_dma_unmap_page(dev, addr, size, dir, false);
321}
322
Niklas Söderlund6f3d8792016-08-10 13:22:16 +0200323static inline dma_addr_t dma_map_resource(struct device *dev,
324 phys_addr_t phys_addr,
325 size_t size,
326 enum dma_data_direction dir,
327 unsigned long attrs)
328{
Bart Van Assche52997092017-01-20 13:04:01 -0800329 const struct dma_map_ops *ops = get_dma_ops(dev);
Niklas Söderlund6f3d8792016-08-10 13:22:16 +0200330 dma_addr_t addr;
331
332 BUG_ON(!valid_dma_direction(dir));
333
334 /* Don't allow RAM to be mapped */
Niklas Söderlund3757dc42016-09-29 12:02:40 +0200335 BUG_ON(pfn_valid(PHYS_PFN(phys_addr)));
Niklas Söderlund6f3d8792016-08-10 13:22:16 +0200336
337 addr = phys_addr;
338 if (ops->map_resource)
339 addr = ops->map_resource(dev, phys_addr, size, dir, attrs);
340
341 debug_dma_map_resource(dev, phys_addr, size, dir, addr);
342
343 return addr;
344}
345
346static inline void dma_unmap_resource(struct device *dev, dma_addr_t addr,
347 size_t size, enum dma_data_direction dir,
348 unsigned long attrs)
349{
Bart Van Assche52997092017-01-20 13:04:01 -0800350 const struct dma_map_ops *ops = get_dma_ops(dev);
Niklas Söderlund6f3d8792016-08-10 13:22:16 +0200351
352 BUG_ON(!valid_dma_direction(dir));
353 if (ops->unmap_resource)
354 ops->unmap_resource(dev, addr, size, dir, attrs);
355 debug_dma_unmap_resource(dev, addr, size, dir);
356}
357
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800358static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
359 size_t size,
360 enum dma_data_direction dir)
361{
Bart Van Assche52997092017-01-20 13:04:01 -0800362 const struct dma_map_ops *ops = get_dma_ops(dev);
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800363
364 BUG_ON(!valid_dma_direction(dir));
365 if (ops->sync_single_for_cpu)
366 ops->sync_single_for_cpu(dev, addr, size, dir);
367 debug_dma_sync_single_for_cpu(dev, addr, size, dir);
368}
369
370static inline void dma_sync_single_for_device(struct device *dev,
371 dma_addr_t addr, size_t size,
372 enum dma_data_direction dir)
373{
Bart Van Assche52997092017-01-20 13:04:01 -0800374 const struct dma_map_ops *ops = get_dma_ops(dev);
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800375
376 BUG_ON(!valid_dma_direction(dir));
377 if (ops->sync_single_for_device)
378 ops->sync_single_for_device(dev, addr, size, dir);
379 debug_dma_sync_single_for_device(dev, addr, size, dir);
380}
381
382static inline void dma_sync_single_range_for_cpu(struct device *dev,
383 dma_addr_t addr,
384 unsigned long offset,
385 size_t size,
386 enum dma_data_direction dir)
387{
388 const struct dma_map_ops *ops = get_dma_ops(dev);
389
390 BUG_ON(!valid_dma_direction(dir));
391 if (ops->sync_single_for_cpu)
392 ops->sync_single_for_cpu(dev, addr + offset, size, dir);
393 debug_dma_sync_single_range_for_cpu(dev, addr, offset, size, dir);
394}
395
396static inline void dma_sync_single_range_for_device(struct device *dev,
397 dma_addr_t addr,
398 unsigned long offset,
399 size_t size,
400 enum dma_data_direction dir)
401{
402 const struct dma_map_ops *ops = get_dma_ops(dev);
403
404 BUG_ON(!valid_dma_direction(dir));
405 if (ops->sync_single_for_device)
406 ops->sync_single_for_device(dev, addr + offset, size, dir);
407 debug_dma_sync_single_range_for_device(dev, addr, offset, size, dir);
408}
409
410static inline void
411dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
412 int nelems, enum dma_data_direction dir)
413{
Bart Van Assche52997092017-01-20 13:04:01 -0800414 const struct dma_map_ops *ops = get_dma_ops(dev);
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800415
416 BUG_ON(!valid_dma_direction(dir));
417 if (ops->sync_sg_for_cpu)
418 ops->sync_sg_for_cpu(dev, sg, nelems, dir);
419 debug_dma_sync_sg_for_cpu(dev, sg, nelems, dir);
420}
421
422static inline void
423dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
424 int nelems, enum dma_data_direction dir)
425{
Bart Van Assche52997092017-01-20 13:04:01 -0800426 const struct dma_map_ops *ops = get_dma_ops(dev);
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800427
428 BUG_ON(!valid_dma_direction(dir));
429 if (ops->sync_sg_for_device)
430 ops->sync_sg_for_device(dev, sg, nelems, dir);
431 debug_dma_sync_sg_for_device(dev, sg, nelems, dir);
432
433}
434
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700435#define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, 0)
436#define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, 0)
437#define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, 0)
438#define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, 0)
Alexander Duyck0495c3d2016-12-14 15:05:23 -0800439#define dma_map_page(d, p, o, s, r) dma_map_page_attrs(d, p, o, s, r, 0)
440#define dma_unmap_page(d, a, s, r) dma_unmap_page_attrs(d, a, s, r, 0)
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800441
Christoph Hellwigc9eb6172017-08-27 10:37:15 +0200442static inline void
443dma_cache_sync(struct device *dev, void *vaddr, size_t size,
444 enum dma_data_direction dir)
445{
446 const struct dma_map_ops *ops = get_dma_ops(dev);
447
448 BUG_ON(!valid_dma_direction(dir));
449 if (ops->cache_sync)
450 ops->cache_sync(dev, vaddr, size, dir);
451}
452
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800453extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
454 void *cpu_addr, dma_addr_t dma_addr, size_t size);
455
456void *dma_common_contiguous_remap(struct page *page, size_t size,
457 unsigned long vm_flags,
458 pgprot_t prot, const void *caller);
459
460void *dma_common_pages_remap(struct page **pages, size_t size,
461 unsigned long vm_flags, pgprot_t prot,
462 const void *caller);
463void dma_common_free_remap(void *cpu_addr, size_t size, unsigned long vm_flags);
464
465/**
466 * dma_mmap_attrs - map a coherent DMA allocation into user space
467 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
468 * @vma: vm_area_struct describing requested user mapping
469 * @cpu_addr: kernel CPU-view address returned from dma_alloc_attrs
470 * @handle: device-view address returned from dma_alloc_attrs
471 * @size: size of memory originally requested in dma_alloc_attrs
472 * @attrs: attributes of mapping properties requested in dma_alloc_attrs
473 *
474 * Map a coherent DMA buffer previously allocated by dma_alloc_attrs
475 * into user space. The coherent DMA buffer must not be freed by the
476 * driver until the user space mapping has been released.
477 */
478static inline int
479dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, void *cpu_addr,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700480 dma_addr_t dma_addr, size_t size, unsigned long attrs)
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800481{
Bart Van Assche52997092017-01-20 13:04:01 -0800482 const struct dma_map_ops *ops = get_dma_ops(dev);
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800483 BUG_ON(!ops);
484 if (ops->mmap)
485 return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
486 return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size);
487}
488
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700489#define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, 0)
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800490
491int
492dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
493 void *cpu_addr, dma_addr_t dma_addr, size_t size);
494
495static inline int
496dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt, void *cpu_addr,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700497 dma_addr_t dma_addr, size_t size,
498 unsigned long attrs)
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800499{
Bart Van Assche52997092017-01-20 13:04:01 -0800500 const struct dma_map_ops *ops = get_dma_ops(dev);
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800501 BUG_ON(!ops);
502 if (ops->get_sgtable)
503 return ops->get_sgtable(dev, sgt, cpu_addr, dma_addr, size,
504 attrs);
505 return dma_common_get_sgtable(dev, sgt, cpu_addr, dma_addr, size);
506}
507
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700508#define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, 0)
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800509
510#ifndef arch_dma_alloc_attrs
511#define arch_dma_alloc_attrs(dev, flag) (true)
512#endif
513
514static inline void *dma_alloc_attrs(struct device *dev, size_t size,
515 dma_addr_t *dma_handle, gfp_t flag,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700516 unsigned long attrs)
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800517{
Bart Van Assche52997092017-01-20 13:04:01 -0800518 const struct dma_map_ops *ops = get_dma_ops(dev);
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800519 void *cpu_addr;
520
521 BUG_ON(!ops);
522
Vladimir Murzin43fc5092017-07-20 11:19:58 +0100523 if (dma_alloc_from_dev_coherent(dev, size, dma_handle, &cpu_addr))
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800524 return cpu_addr;
525
526 if (!arch_dma_alloc_attrs(&dev, &flag))
527 return NULL;
528 if (!ops->alloc)
529 return NULL;
530
531 cpu_addr = ops->alloc(dev, size, dma_handle, flag, attrs);
532 debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr);
533 return cpu_addr;
534}
535
536static inline void dma_free_attrs(struct device *dev, size_t size,
537 void *cpu_addr, dma_addr_t dma_handle,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700538 unsigned long attrs)
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800539{
Bart Van Assche52997092017-01-20 13:04:01 -0800540 const struct dma_map_ops *ops = get_dma_ops(dev);
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800541
542 BUG_ON(!ops);
543 WARN_ON(irqs_disabled());
544
Vladimir Murzin43fc5092017-07-20 11:19:58 +0100545 if (dma_release_from_dev_coherent(dev, get_order(size), cpu_addr))
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800546 return;
547
Zhen Leid6b7eae2016-03-09 14:08:38 -0800548 if (!ops->free || !cpu_addr)
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800549 return;
550
551 debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
552 ops->free(dev, size, cpu_addr, dma_handle, attrs);
553}
554
555static inline void *dma_alloc_coherent(struct device *dev, size_t size,
556 dma_addr_t *dma_handle, gfp_t flag)
557{
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700558 return dma_alloc_attrs(dev, size, dma_handle, flag, 0);
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800559}
560
561static inline void dma_free_coherent(struct device *dev, size_t size,
562 void *cpu_addr, dma_addr_t dma_handle)
563{
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700564 return dma_free_attrs(dev, size, cpu_addr, dma_handle, 0);
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800565}
566
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800567static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
568{
Robin Murphy5237e952017-07-24 18:29:27 +0100569 const struct dma_map_ops *ops = get_dma_ops(dev);
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800570
Robin Murphy5237e952017-07-24 18:29:27 +0100571 debug_dma_mapping_error(dev, dma_addr);
572 if (ops->mapping_error)
573 return ops->mapping_error(dev, dma_addr);
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800574 return 0;
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800575}
576
Tom Lendacky648babb2017-07-17 16:10:22 -0500577static inline void dma_check_mask(struct device *dev, u64 mask)
578{
579 if (sme_active() && (mask < (((u64)sme_get_me_mask() << 1) - 1)))
580 dev_warn(dev, "SME is active, device will require DMA bounce buffers\n");
581}
582
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800583static inline int dma_supported(struct device *dev, u64 mask)
584{
Bart Van Assche52997092017-01-20 13:04:01 -0800585 const struct dma_map_ops *ops = get_dma_ops(dev);
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800586
587 if (!ops)
588 return 0;
589 if (!ops->dma_supported)
590 return 1;
591 return ops->dma_supported(dev, mask);
592}
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800593
594#ifndef HAVE_ARCH_DMA_SET_MASK
595static inline int dma_set_mask(struct device *dev, u64 mask)
596{
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800597 if (!dev->dma_mask || !dma_supported(dev, mask))
598 return -EIO;
Tom Lendacky648babb2017-07-17 16:10:22 -0500599
600 dma_check_mask(dev, mask);
601
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800602 *dev->dma_mask = mask;
603 return 0;
604}
Dan Williams1b0fac42007-07-15 23:40:26 -0700605#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700606
FUJITA Tomonori589fc9a2008-09-12 19:42:34 +0900607static inline u64 dma_get_mask(struct device *dev)
608{
FUJITA Tomonori07a2c012008-09-19 02:02:05 +0900609 if (dev && dev->dma_mask && *dev->dma_mask)
FUJITA Tomonori589fc9a2008-09-12 19:42:34 +0900610 return *dev->dma_mask;
Yang Hongyang284901a2009-04-06 19:01:15 -0700611 return DMA_BIT_MASK(32);
FUJITA Tomonori589fc9a2008-09-12 19:42:34 +0900612}
613
Rob Herring58af4a22012-03-20 14:33:01 -0500614#ifdef CONFIG_ARCH_HAS_DMA_SET_COHERENT_MASK
FUJITA Tomonori710224f2010-09-22 13:04:55 -0700615int dma_set_coherent_mask(struct device *dev, u64 mask);
616#else
FUJITA Tomonori6a1961f2010-03-10 15:23:39 -0800617static inline int dma_set_coherent_mask(struct device *dev, u64 mask)
618{
619 if (!dma_supported(dev, mask))
620 return -EIO;
Tom Lendacky648babb2017-07-17 16:10:22 -0500621
622 dma_check_mask(dev, mask);
623
FUJITA Tomonori6a1961f2010-03-10 15:23:39 -0800624 dev->coherent_dma_mask = mask;
625 return 0;
626}
FUJITA Tomonori710224f2010-09-22 13:04:55 -0700627#endif
FUJITA Tomonori6a1961f2010-03-10 15:23:39 -0800628
Russell King4aa806b2013-06-26 13:49:44 +0100629/*
630 * Set both the DMA mask and the coherent DMA mask to the same thing.
631 * Note that we don't check the return value from dma_set_coherent_mask()
632 * as the DMA API guarantees that the coherent DMA mask can be set to
633 * the same or smaller than the streaming DMA mask.
634 */
635static inline int dma_set_mask_and_coherent(struct device *dev, u64 mask)
636{
637 int rc = dma_set_mask(dev, mask);
638 if (rc == 0)
639 dma_set_coherent_mask(dev, mask);
640 return rc;
641}
642
Russell Kingfa6a8d62013-06-27 12:21:45 +0100643/*
644 * Similar to the above, except it deals with the case where the device
645 * does not have dev->dma_mask appropriately setup.
646 */
647static inline int dma_coerce_mask_and_coherent(struct device *dev, u64 mask)
648{
649 dev->dma_mask = &dev->coherent_dma_mask;
650 return dma_set_mask_and_coherent(dev, mask);
651}
652
Linus Torvalds1da177e2005-04-16 15:20:36 -0700653extern u64 dma_get_required_mask(struct device *dev);
654
Will Deacona3a60f82014-08-27 15:49:10 +0100655#ifndef arch_setup_dma_ops
Will Deacon97890ba2014-08-27 16:24:20 +0100656static inline void arch_setup_dma_ops(struct device *dev, u64 dma_base,
Robin Murphy53c92d72016-04-07 18:42:05 +0100657 u64 size, const struct iommu_ops *iommu,
Will Deacon97890ba2014-08-27 16:24:20 +0100658 bool coherent) { }
659#endif
660
661#ifndef arch_teardown_dma_ops
662static inline void arch_teardown_dma_ops(struct device *dev) { }
Santosh Shilimkar591c1ee42014-04-24 11:30:04 -0400663#endif
664
FUJITA Tomonori6b7b6512008-02-04 22:27:55 -0800665static inline unsigned int dma_get_max_seg_size(struct device *dev)
666{
Robin Murphy002edb62015-11-06 16:32:51 -0800667 if (dev->dma_parms && dev->dma_parms->max_segment_size)
668 return dev->dma_parms->max_segment_size;
669 return SZ_64K;
FUJITA Tomonori6b7b6512008-02-04 22:27:55 -0800670}
671
672static inline unsigned int dma_set_max_seg_size(struct device *dev,
673 unsigned int size)
674{
675 if (dev->dma_parms) {
676 dev->dma_parms->max_segment_size = size;
677 return 0;
Robin Murphy002edb62015-11-06 16:32:51 -0800678 }
679 return -EIO;
FUJITA Tomonori6b7b6512008-02-04 22:27:55 -0800680}
681
FUJITA Tomonorid22a6962008-02-04 22:28:13 -0800682static inline unsigned long dma_get_seg_boundary(struct device *dev)
683{
Robin Murphy002edb62015-11-06 16:32:51 -0800684 if (dev->dma_parms && dev->dma_parms->segment_boundary_mask)
685 return dev->dma_parms->segment_boundary_mask;
686 return DMA_BIT_MASK(32);
FUJITA Tomonorid22a6962008-02-04 22:28:13 -0800687}
688
689static inline int dma_set_seg_boundary(struct device *dev, unsigned long mask)
690{
691 if (dev->dma_parms) {
692 dev->dma_parms->segment_boundary_mask = mask;
693 return 0;
Robin Murphy002edb62015-11-06 16:32:51 -0800694 }
695 return -EIO;
FUJITA Tomonorid22a6962008-02-04 22:28:13 -0800696}
697
Santosh Shilimkar00c8f162013-07-29 14:18:48 +0100698#ifndef dma_max_pfn
699static inline unsigned long dma_max_pfn(struct device *dev)
700{
701 return *dev->dma_mask >> PAGE_SHIFT;
702}
703#endif
704
Andrew Morton842fa692011-11-02 13:39:33 -0700705static inline void *dma_zalloc_coherent(struct device *dev, size_t size,
706 dma_addr_t *dma_handle, gfp_t flag)
707{
Joe Perchesede23fa82013-08-26 22:45:23 -0700708 void *ret = dma_alloc_coherent(dev, size, dma_handle,
709 flag | __GFP_ZERO);
Andrew Morton842fa692011-11-02 13:39:33 -0700710 return ret;
711}
712
Heiko Carstense259f192010-08-13 09:39:18 +0200713#ifdef CONFIG_HAS_DMA
FUJITA Tomonori4565f012010-08-10 18:03:22 -0700714static inline int dma_get_cache_alignment(void)
715{
716#ifdef ARCH_DMA_MINALIGN
717 return ARCH_DMA_MINALIGN;
718#endif
719 return 1;
720}
Heiko Carstense259f192010-08-13 09:39:18 +0200721#endif
FUJITA Tomonori4565f012010-08-10 18:03:22 -0700722
Linus Torvalds1da177e2005-04-16 15:20:36 -0700723/* flags for the coherent memory api */
Christoph Hellwig2436bdc2017-08-25 17:13:09 +0200724#define DMA_MEMORY_EXCLUSIVE 0x01
Linus Torvalds1da177e2005-04-16 15:20:36 -0700725
Christoph Hellwig20d666e2016-01-20 15:02:09 -0800726#ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT
727int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
728 dma_addr_t device_addr, size_t size, int flags);
729void dma_release_declared_memory(struct device *dev);
730void *dma_mark_declared_memory_occupied(struct device *dev,
731 dma_addr_t device_addr, size_t size);
732#else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700733static inline int
Bjorn Helgaas88a984b2014-05-20 16:54:22 -0600734dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700735 dma_addr_t device_addr, size_t size, int flags)
736{
Christoph Hellwig2436bdc2017-08-25 17:13:09 +0200737 return -ENOSYS;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700738}
739
740static inline void
741dma_release_declared_memory(struct device *dev)
742{
743}
744
745static inline void *
746dma_mark_declared_memory_occupied(struct device *dev,
747 dma_addr_t device_addr, size_t size)
748{
749 return ERR_PTR(-EBUSY);
750}
Christoph Hellwig20d666e2016-01-20 15:02:09 -0800751#endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700752
Sricharan R09515ef2017-04-10 16:51:01 +0530753#ifdef CONFIG_HAS_DMA
754int dma_configure(struct device *dev);
755void dma_deconfigure(struct device *dev);
756#else
757static inline int dma_configure(struct device *dev)
758{
759 return 0;
760}
761
762static inline void dma_deconfigure(struct device *dev) {}
763#endif
764
Tejun Heo9ac78492007-01-20 16:00:26 +0900765/*
766 * Managed DMA API
767 */
768extern void *dmam_alloc_coherent(struct device *dev, size_t size,
769 dma_addr_t *dma_handle, gfp_t gfp);
770extern void dmam_free_coherent(struct device *dev, size_t size, void *vaddr,
771 dma_addr_t dma_handle);
Christoph Hellwig63d36c92017-06-12 19:15:04 +0200772extern void *dmam_alloc_attrs(struct device *dev, size_t size,
773 dma_addr_t *dma_handle, gfp_t gfp,
774 unsigned long attrs);
Christoph Hellwig20d666e2016-01-20 15:02:09 -0800775#ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT
Bjorn Helgaas88a984b2014-05-20 16:54:22 -0600776extern int dmam_declare_coherent_memory(struct device *dev,
777 phys_addr_t phys_addr,
Tejun Heo9ac78492007-01-20 16:00:26 +0900778 dma_addr_t device_addr, size_t size,
779 int flags);
780extern void dmam_release_declared_memory(struct device *dev);
Christoph Hellwig20d666e2016-01-20 15:02:09 -0800781#else /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
Tejun Heo9ac78492007-01-20 16:00:26 +0900782static inline int dmam_declare_coherent_memory(struct device *dev,
Bjorn Helgaas88a984b2014-05-20 16:54:22 -0600783 phys_addr_t phys_addr, dma_addr_t device_addr,
Tejun Heo9ac78492007-01-20 16:00:26 +0900784 size_t size, gfp_t gfp)
785{
786 return 0;
787}
788
789static inline void dmam_release_declared_memory(struct device *dev)
790{
791}
Christoph Hellwig20d666e2016-01-20 15:02:09 -0800792#endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
Tejun Heo9ac78492007-01-20 16:00:26 +0900793
Luis R. Rodriguezf6e45662016-01-22 18:34:22 -0800794static inline void *dma_alloc_wc(struct device *dev, size_t size,
795 dma_addr_t *dma_addr, gfp_t gfp)
Thierry Redingb4bbb102014-06-27 11:56:58 +0200796{
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700797 return dma_alloc_attrs(dev, size, dma_addr, gfp,
798 DMA_ATTR_WRITE_COMBINE);
Thierry Redingb4bbb102014-06-27 11:56:58 +0200799}
Luis R. Rodriguezf6e45662016-01-22 18:34:22 -0800800#ifndef dma_alloc_writecombine
801#define dma_alloc_writecombine dma_alloc_wc
802#endif
Thierry Redingb4bbb102014-06-27 11:56:58 +0200803
Luis R. Rodriguezf6e45662016-01-22 18:34:22 -0800804static inline void dma_free_wc(struct device *dev, size_t size,
805 void *cpu_addr, dma_addr_t dma_addr)
Thierry Redingb4bbb102014-06-27 11:56:58 +0200806{
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700807 return dma_free_attrs(dev, size, cpu_addr, dma_addr,
808 DMA_ATTR_WRITE_COMBINE);
Thierry Redingb4bbb102014-06-27 11:56:58 +0200809}
Luis R. Rodriguezf6e45662016-01-22 18:34:22 -0800810#ifndef dma_free_writecombine
811#define dma_free_writecombine dma_free_wc
812#endif
Thierry Redingb4bbb102014-06-27 11:56:58 +0200813
Luis R. Rodriguezf6e45662016-01-22 18:34:22 -0800814static inline int dma_mmap_wc(struct device *dev,
815 struct vm_area_struct *vma,
816 void *cpu_addr, dma_addr_t dma_addr,
817 size_t size)
Thierry Redingb4bbb102014-06-27 11:56:58 +0200818{
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700819 return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size,
820 DMA_ATTR_WRITE_COMBINE);
Thierry Redingb4bbb102014-06-27 11:56:58 +0200821}
Luis R. Rodriguezf6e45662016-01-22 18:34:22 -0800822#ifndef dma_mmap_writecombine
823#define dma_mmap_writecombine dma_mmap_wc
824#endif
Arthur Kepner74bc7ce2008-04-29 01:00:30 -0700825
Andrey Smirnov24813662016-09-28 15:22:33 -0700826#if defined(CONFIG_NEED_DMA_MAP_STATE) || defined(CONFIG_DMA_API_DEBUG)
FUJITA Tomonori0acedc12010-03-10 15:23:31 -0800827#define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME) dma_addr_t ADDR_NAME
828#define DEFINE_DMA_UNMAP_LEN(LEN_NAME) __u32 LEN_NAME
829#define dma_unmap_addr(PTR, ADDR_NAME) ((PTR)->ADDR_NAME)
830#define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) (((PTR)->ADDR_NAME) = (VAL))
831#define dma_unmap_len(PTR, LEN_NAME) ((PTR)->LEN_NAME)
832#define dma_unmap_len_set(PTR, LEN_NAME, VAL) (((PTR)->LEN_NAME) = (VAL))
833#else
834#define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME)
835#define DEFINE_DMA_UNMAP_LEN(LEN_NAME)
836#define dma_unmap_addr(PTR, ADDR_NAME) (0)
837#define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) do { } while (0)
838#define dma_unmap_len(PTR, LEN_NAME) (0)
839#define dma_unmap_len_set(PTR, LEN_NAME, VAL) do { } while (0)
840#endif
841
Linus Torvalds1da177e2005-04-16 15:20:36 -0700842#endif