blob: 1a0edcde7d14b2a5afba5467bcdb10d7e75cf018 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
Robert P. J. Day96532ba2008-02-03 15:06:26 +02002#ifndef _LINUX_DMA_MAPPING_H
3#define _LINUX_DMA_MAPPING_H
Linus Torvalds1da177e2005-04-16 15:20:36 -07004
Robin Murphy002edb62015-11-06 16:32:51 -08005#include <linux/sizes.h>
Andrew Morton842fa692011-11-02 13:39:33 -07006#include <linux/string.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -07007#include <linux/device.h>
8#include <linux/err.h>
Christoph Hellwige1c7e322016-01-20 15:02:05 -08009#include <linux/dma-debug.h>
Alexey Dobriyanb7f080c2011-06-16 11:01:34 +000010#include <linux/dma-direction.h>
FUJITA Tomonorif0402a22009-01-05 23:59:01 +090011#include <linux/scatterlist.h>
Christoph Hellwige1c7e322016-01-20 15:02:05 -080012#include <linux/bug.h>
Tom Lendacky648babb2017-07-17 16:10:22 -050013#include <linux/mem_encrypt.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070014
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -070015/**
16 * List of possible attributes associated with a DMA mapping. The semantics
17 * of each attribute should be defined in Documentation/DMA-attributes.txt.
18 *
19 * DMA_ATTR_WRITE_BARRIER: DMA to a memory region with this attribute
20 * forces all pending DMA writes to complete.
21 */
22#define DMA_ATTR_WRITE_BARRIER (1UL << 0)
23/*
24 * DMA_ATTR_WEAK_ORDERING: Specifies that reads and writes to the mapping
25 * may be weakly ordered, that is that reads and writes may pass each other.
26 */
27#define DMA_ATTR_WEAK_ORDERING (1UL << 1)
28/*
29 * DMA_ATTR_WRITE_COMBINE: Specifies that writes to the mapping may be
30 * buffered to improve performance.
31 */
32#define DMA_ATTR_WRITE_COMBINE (1UL << 2)
33/*
34 * DMA_ATTR_NON_CONSISTENT: Lets the platform to choose to return either
35 * consistent or non-consistent memory as it sees fit.
36 */
37#define DMA_ATTR_NON_CONSISTENT (1UL << 3)
38/*
39 * DMA_ATTR_NO_KERNEL_MAPPING: Lets the platform to avoid creating a kernel
40 * virtual mapping for the allocated buffer.
41 */
42#define DMA_ATTR_NO_KERNEL_MAPPING (1UL << 4)
43/*
44 * DMA_ATTR_SKIP_CPU_SYNC: Allows platform code to skip synchronization of
45 * the CPU cache for the given buffer assuming that it has been already
46 * transferred to 'device' domain.
47 */
48#define DMA_ATTR_SKIP_CPU_SYNC (1UL << 5)
49/*
50 * DMA_ATTR_FORCE_CONTIGUOUS: Forces contiguous allocation of the buffer
51 * in physical memory.
52 */
53#define DMA_ATTR_FORCE_CONTIGUOUS (1UL << 6)
54/*
55 * DMA_ATTR_ALLOC_SINGLE_PAGES: This is a hint to the DMA-mapping subsystem
56 * that it's probably not worth the time to try to allocate memory to in a way
57 * that gives better TLB efficiency.
58 */
59#define DMA_ATTR_ALLOC_SINGLE_PAGES (1UL << 7)
Mauricio Faria de Oliveiraa9a62c92016-10-11 13:54:14 -070060/*
61 * DMA_ATTR_NO_WARN: This tells the DMA-mapping subsystem to suppress
62 * allocation failure reports (similarly to __GFP_NOWARN).
63 */
64#define DMA_ATTR_NO_WARN (1UL << 8)
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -070065
Bjorn Helgaas77f2ea22014-04-30 11:20:53 -060066/*
Mitchel Humpherysb2fb3662017-01-06 18:58:11 +053067 * DMA_ATTR_PRIVILEGED: used to indicate that the buffer is fully
68 * accessible at an elevated privilege level (and ideally inaccessible or
69 * at least read-only at lesser-privileged levels).
70 */
71#define DMA_ATTR_PRIVILEGED (1UL << 9)
72
73/*
Bjorn Helgaas77f2ea22014-04-30 11:20:53 -060074 * A dma_addr_t can hold any valid DMA or bus address for the platform.
75 * It can be given to a device to use as a DMA source or target. A CPU cannot
76 * reference a dma_addr_t directly because there may be translation between
77 * its physical address space and the bus address space.
78 */
FUJITA Tomonorif0402a22009-01-05 23:59:01 +090079struct dma_map_ops {
Marek Szyprowski613c4572012-03-28 16:36:27 +020080 void* (*alloc)(struct device *dev, size_t size,
81 dma_addr_t *dma_handle, gfp_t gfp,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -070082 unsigned long attrs);
Marek Szyprowski613c4572012-03-28 16:36:27 +020083 void (*free)(struct device *dev, size_t size,
84 void *vaddr, dma_addr_t dma_handle,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -070085 unsigned long attrs);
Marek Szyprowski9adc5372011-12-21 16:55:33 +010086 int (*mmap)(struct device *, struct vm_area_struct *,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -070087 void *, dma_addr_t, size_t,
88 unsigned long attrs);
Marek Szyprowski9adc5372011-12-21 16:55:33 +010089
Marek Szyprowskid2b74282012-06-13 10:05:52 +020090 int (*get_sgtable)(struct device *dev, struct sg_table *sgt, void *,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -070091 dma_addr_t, size_t, unsigned long attrs);
Marek Szyprowskid2b74282012-06-13 10:05:52 +020092
FUJITA Tomonorif0402a22009-01-05 23:59:01 +090093 dma_addr_t (*map_page)(struct device *dev, struct page *page,
94 unsigned long offset, size_t size,
95 enum dma_data_direction dir,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -070096 unsigned long attrs);
FUJITA Tomonorif0402a22009-01-05 23:59:01 +090097 void (*unmap_page)(struct device *dev, dma_addr_t dma_handle,
98 size_t size, enum dma_data_direction dir,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -070099 unsigned long attrs);
Ricardo Ribalda Delgado04abab62015-02-11 13:53:15 +0100100 /*
101 * map_sg returns 0 on error and a value > 0 on success.
102 * It should never return a value < 0.
103 */
FUJITA Tomonorif0402a22009-01-05 23:59:01 +0900104 int (*map_sg)(struct device *dev, struct scatterlist *sg,
105 int nents, enum dma_data_direction dir,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700106 unsigned long attrs);
FUJITA Tomonorif0402a22009-01-05 23:59:01 +0900107 void (*unmap_sg)(struct device *dev,
108 struct scatterlist *sg, int nents,
109 enum dma_data_direction dir,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700110 unsigned long attrs);
Niklas Söderlundba409b32016-08-10 13:22:14 +0200111 dma_addr_t (*map_resource)(struct device *dev, phys_addr_t phys_addr,
112 size_t size, enum dma_data_direction dir,
113 unsigned long attrs);
114 void (*unmap_resource)(struct device *dev, dma_addr_t dma_handle,
115 size_t size, enum dma_data_direction dir,
116 unsigned long attrs);
FUJITA Tomonorif0402a22009-01-05 23:59:01 +0900117 void (*sync_single_for_cpu)(struct device *dev,
118 dma_addr_t dma_handle, size_t size,
119 enum dma_data_direction dir);
120 void (*sync_single_for_device)(struct device *dev,
121 dma_addr_t dma_handle, size_t size,
122 enum dma_data_direction dir);
FUJITA Tomonorif0402a22009-01-05 23:59:01 +0900123 void (*sync_sg_for_cpu)(struct device *dev,
124 struct scatterlist *sg, int nents,
125 enum dma_data_direction dir);
126 void (*sync_sg_for_device)(struct device *dev,
127 struct scatterlist *sg, int nents,
128 enum dma_data_direction dir);
Christoph Hellwigc9eb6172017-08-27 10:37:15 +0200129 void (*cache_sync)(struct device *dev, void *vaddr, size_t size,
130 enum dma_data_direction direction);
FUJITA Tomonorif0402a22009-01-05 23:59:01 +0900131 int (*mapping_error)(struct device *dev, dma_addr_t dma_addr);
132 int (*dma_supported)(struct device *dev, u64 mask);
Milton Miller3a8f7552011-06-24 09:05:23 +0000133 u64 (*get_required_mask)(struct device *dev);
FUJITA Tomonorif0402a22009-01-05 23:59:01 +0900134};
135
Christoph Hellwig002e6742018-01-09 16:30:23 +0100136extern const struct dma_map_ops dma_direct_ops;
Bart Van Assche551199a2017-01-20 13:04:07 -0800137extern const struct dma_map_ops dma_virt_ops;
Christian Borntraegera8463d42016-02-02 21:46:32 -0800138
Andrew Morton8f286c32007-10-18 03:05:07 -0700139#define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
Borislav Petkov34c65382007-10-18 03:05:06 -0700140
James Bottomley32e8f702007-10-16 01:23:55 -0700141#define DMA_MASK_NONE 0x0ULL
142
Rolf Eike Beerd6bd3a32006-09-29 01:59:48 -0700143static inline int valid_dma_direction(int dma_direction)
144{
145 return ((dma_direction == DMA_BIDIRECTIONAL) ||
146 (dma_direction == DMA_TO_DEVICE) ||
147 (dma_direction == DMA_FROM_DEVICE));
148}
149
James Bottomley32e8f702007-10-16 01:23:55 -0700150static inline int is_device_dma_capable(struct device *dev)
151{
152 return dev->dma_mask != NULL && *dev->dma_mask != DMA_MASK_NONE;
153}
154
Christoph Hellwig20d666e2016-01-20 15:02:09 -0800155#ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT
156/*
157 * These three functions are only for dma allocator.
158 * Don't use them in device drivers.
159 */
Vladimir Murzin43fc5092017-07-20 11:19:58 +0100160int dma_alloc_from_dev_coherent(struct device *dev, ssize_t size,
Christoph Hellwig20d666e2016-01-20 15:02:09 -0800161 dma_addr_t *dma_handle, void **ret);
Vladimir Murzin43fc5092017-07-20 11:19:58 +0100162int dma_release_from_dev_coherent(struct device *dev, int order, void *vaddr);
Christoph Hellwig20d666e2016-01-20 15:02:09 -0800163
Vladimir Murzin43fc5092017-07-20 11:19:58 +0100164int dma_mmap_from_dev_coherent(struct device *dev, struct vm_area_struct *vma,
Christoph Hellwig20d666e2016-01-20 15:02:09 -0800165 void *cpu_addr, size_t size, int *ret);
Vladimir Murzin43fc5092017-07-20 11:19:58 +0100166
167void *dma_alloc_from_global_coherent(ssize_t size, dma_addr_t *dma_handle);
168int dma_release_from_global_coherent(int order, void *vaddr);
169int dma_mmap_from_global_coherent(struct vm_area_struct *vma, void *cpu_addr,
170 size_t size, int *ret);
171
Christoph Hellwig20d666e2016-01-20 15:02:09 -0800172#else
Vladimir Murzin43fc5092017-07-20 11:19:58 +0100173#define dma_alloc_from_dev_coherent(dev, size, handle, ret) (0)
174#define dma_release_from_dev_coherent(dev, order, vaddr) (0)
175#define dma_mmap_from_dev_coherent(dev, vma, vaddr, order, ret) (0)
176
177static inline void *dma_alloc_from_global_coherent(ssize_t size,
178 dma_addr_t *dma_handle)
179{
180 return NULL;
181}
182
183static inline int dma_release_from_global_coherent(int order, void *vaddr)
184{
185 return 0;
186}
187
188static inline int dma_mmap_from_global_coherent(struct vm_area_struct *vma,
189 void *cpu_addr, size_t size,
190 int *ret)
191{
192 return 0;
193}
Christoph Hellwig20d666e2016-01-20 15:02:09 -0800194#endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
195
Dan Williams1b0fac42007-07-15 23:40:26 -0700196#ifdef CONFIG_HAS_DMA
Linus Torvalds1da177e2005-04-16 15:20:36 -0700197#include <asm/dma-mapping.h>
Bart Van Assche815dd182017-01-20 13:04:04 -0800198static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
199{
200 if (dev && dev->dma_ops)
201 return dev->dma_ops;
202 return get_arch_dma_ops(dev ? dev->bus : NULL);
203}
204
Bart Van Asscheca6e8e12017-01-20 13:04:03 -0800205static inline void set_dma_ops(struct device *dev,
206 const struct dma_map_ops *dma_ops)
207{
208 dev->dma_ops = dma_ops;
209}
Dan Williams1b0fac42007-07-15 23:40:26 -0700210#else
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800211/*
Geert Uytterhoevenf29ab492018-03-16 14:25:40 +0100212 * Define the dma api to allow compilation of dma dependent code.
213 * Code that depends on the dma-mapping API needs to set 'depends on HAS_DMA'
214 * in its Kconfig, unless it already depends on <something> || COMPILE_TEST,
215 * where <something> guarantuees the availability of the dma-mapping API.
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800216 */
Bart Van Assche52997092017-01-20 13:04:01 -0800217static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800218{
Geert Uytterhoevenf29ab492018-03-16 14:25:40 +0100219 return NULL;
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800220}
221#endif
222
223static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr,
224 size_t size,
225 enum dma_data_direction dir,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700226 unsigned long attrs)
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800227{
Bart Van Assche52997092017-01-20 13:04:01 -0800228 const struct dma_map_ops *ops = get_dma_ops(dev);
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800229 dma_addr_t addr;
230
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800231 BUG_ON(!valid_dma_direction(dir));
Stephen Boyd99c65fa2018-10-08 00:20:07 -0700232 debug_dma_map_single(dev, ptr, size);
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800233 addr = ops->map_page(dev, virt_to_page(ptr),
Geliang Tang8e994692016-01-20 15:02:12 -0800234 offset_in_page(ptr), size,
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800235 dir, attrs);
236 debug_dma_map_page(dev, virt_to_page(ptr),
Geliang Tang8e994692016-01-20 15:02:12 -0800237 offset_in_page(ptr), size,
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800238 dir, addr, true);
239 return addr;
240}
241
242static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr,
243 size_t size,
244 enum dma_data_direction dir,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700245 unsigned long attrs)
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800246{
Bart Van Assche52997092017-01-20 13:04:01 -0800247 const struct dma_map_ops *ops = get_dma_ops(dev);
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800248
249 BUG_ON(!valid_dma_direction(dir));
250 if (ops->unmap_page)
251 ops->unmap_page(dev, addr, size, dir, attrs);
252 debug_dma_unmap_page(dev, addr, size, dir, true);
253}
254
255/*
256 * dma_maps_sg_attrs returns 0 on error and > 0 on success.
257 * It should never return a value < 0.
258 */
259static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
260 int nents, enum dma_data_direction dir,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700261 unsigned long attrs)
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800262{
Bart Van Assche52997092017-01-20 13:04:01 -0800263 const struct dma_map_ops *ops = get_dma_ops(dev);
Levin, Alexander (Sasha Levin)49502762017-11-15 17:35:51 -0800264 int ents;
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800265
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800266 BUG_ON(!valid_dma_direction(dir));
267 ents = ops->map_sg(dev, sg, nents, dir, attrs);
268 BUG_ON(ents < 0);
269 debug_dma_map_sg(dev, sg, nents, ents, dir);
270
271 return ents;
272}
273
274static inline void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg,
275 int nents, enum dma_data_direction dir,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700276 unsigned long attrs)
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800277{
Bart Van Assche52997092017-01-20 13:04:01 -0800278 const struct dma_map_ops *ops = get_dma_ops(dev);
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800279
280 BUG_ON(!valid_dma_direction(dir));
281 debug_dma_unmap_sg(dev, sg, nents, dir);
282 if (ops->unmap_sg)
283 ops->unmap_sg(dev, sg, nents, dir, attrs);
284}
285
Alexander Duyck0495c3d2016-12-14 15:05:23 -0800286static inline dma_addr_t dma_map_page_attrs(struct device *dev,
287 struct page *page,
288 size_t offset, size_t size,
289 enum dma_data_direction dir,
290 unsigned long attrs)
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800291{
Bart Van Assche52997092017-01-20 13:04:01 -0800292 const struct dma_map_ops *ops = get_dma_ops(dev);
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800293 dma_addr_t addr;
294
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800295 BUG_ON(!valid_dma_direction(dir));
Alexander Duyck0495c3d2016-12-14 15:05:23 -0800296 addr = ops->map_page(dev, page, offset, size, dir, attrs);
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800297 debug_dma_map_page(dev, page, offset, size, dir, addr, false);
298
299 return addr;
300}
301
Alexander Duyck0495c3d2016-12-14 15:05:23 -0800302static inline void dma_unmap_page_attrs(struct device *dev,
303 dma_addr_t addr, size_t size,
304 enum dma_data_direction dir,
305 unsigned long attrs)
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800306{
Bart Van Assche52997092017-01-20 13:04:01 -0800307 const struct dma_map_ops *ops = get_dma_ops(dev);
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800308
309 BUG_ON(!valid_dma_direction(dir));
310 if (ops->unmap_page)
Alexander Duyck0495c3d2016-12-14 15:05:23 -0800311 ops->unmap_page(dev, addr, size, dir, attrs);
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800312 debug_dma_unmap_page(dev, addr, size, dir, false);
313}
314
Niklas Söderlund6f3d8792016-08-10 13:22:16 +0200315static inline dma_addr_t dma_map_resource(struct device *dev,
316 phys_addr_t phys_addr,
317 size_t size,
318 enum dma_data_direction dir,
319 unsigned long attrs)
320{
Bart Van Assche52997092017-01-20 13:04:01 -0800321 const struct dma_map_ops *ops = get_dma_ops(dev);
Niklas Söderlund6f3d8792016-08-10 13:22:16 +0200322 dma_addr_t addr;
323
324 BUG_ON(!valid_dma_direction(dir));
325
326 /* Don't allow RAM to be mapped */
Niklas Söderlund3757dc42016-09-29 12:02:40 +0200327 BUG_ON(pfn_valid(PHYS_PFN(phys_addr)));
Niklas Söderlund6f3d8792016-08-10 13:22:16 +0200328
329 addr = phys_addr;
330 if (ops->map_resource)
331 addr = ops->map_resource(dev, phys_addr, size, dir, attrs);
332
333 debug_dma_map_resource(dev, phys_addr, size, dir, addr);
334
335 return addr;
336}
337
338static inline void dma_unmap_resource(struct device *dev, dma_addr_t addr,
339 size_t size, enum dma_data_direction dir,
340 unsigned long attrs)
341{
Bart Van Assche52997092017-01-20 13:04:01 -0800342 const struct dma_map_ops *ops = get_dma_ops(dev);
Niklas Söderlund6f3d8792016-08-10 13:22:16 +0200343
344 BUG_ON(!valid_dma_direction(dir));
345 if (ops->unmap_resource)
346 ops->unmap_resource(dev, addr, size, dir, attrs);
347 debug_dma_unmap_resource(dev, addr, size, dir);
348}
349
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800350static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
351 size_t size,
352 enum dma_data_direction dir)
353{
Bart Van Assche52997092017-01-20 13:04:01 -0800354 const struct dma_map_ops *ops = get_dma_ops(dev);
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800355
356 BUG_ON(!valid_dma_direction(dir));
357 if (ops->sync_single_for_cpu)
358 ops->sync_single_for_cpu(dev, addr, size, dir);
359 debug_dma_sync_single_for_cpu(dev, addr, size, dir);
360}
361
362static inline void dma_sync_single_for_device(struct device *dev,
363 dma_addr_t addr, size_t size,
364 enum dma_data_direction dir)
365{
Bart Van Assche52997092017-01-20 13:04:01 -0800366 const struct dma_map_ops *ops = get_dma_ops(dev);
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800367
368 BUG_ON(!valid_dma_direction(dir));
369 if (ops->sync_single_for_device)
370 ops->sync_single_for_device(dev, addr, size, dir);
371 debug_dma_sync_single_for_device(dev, addr, size, dir);
372}
373
374static inline void dma_sync_single_range_for_cpu(struct device *dev,
375 dma_addr_t addr,
376 unsigned long offset,
377 size_t size,
378 enum dma_data_direction dir)
379{
380 const struct dma_map_ops *ops = get_dma_ops(dev);
381
382 BUG_ON(!valid_dma_direction(dir));
383 if (ops->sync_single_for_cpu)
384 ops->sync_single_for_cpu(dev, addr + offset, size, dir);
385 debug_dma_sync_single_range_for_cpu(dev, addr, offset, size, dir);
386}
387
388static inline void dma_sync_single_range_for_device(struct device *dev,
389 dma_addr_t addr,
390 unsigned long offset,
391 size_t size,
392 enum dma_data_direction dir)
393{
394 const struct dma_map_ops *ops = get_dma_ops(dev);
395
396 BUG_ON(!valid_dma_direction(dir));
397 if (ops->sync_single_for_device)
398 ops->sync_single_for_device(dev, addr + offset, size, dir);
399 debug_dma_sync_single_range_for_device(dev, addr, offset, size, dir);
400}
401
402static inline void
403dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
404 int nelems, enum dma_data_direction dir)
405{
Bart Van Assche52997092017-01-20 13:04:01 -0800406 const struct dma_map_ops *ops = get_dma_ops(dev);
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800407
408 BUG_ON(!valid_dma_direction(dir));
409 if (ops->sync_sg_for_cpu)
410 ops->sync_sg_for_cpu(dev, sg, nelems, dir);
411 debug_dma_sync_sg_for_cpu(dev, sg, nelems, dir);
412}
413
414static inline void
415dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
416 int nelems, enum dma_data_direction dir)
417{
Bart Van Assche52997092017-01-20 13:04:01 -0800418 const struct dma_map_ops *ops = get_dma_ops(dev);
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800419
420 BUG_ON(!valid_dma_direction(dir));
421 if (ops->sync_sg_for_device)
422 ops->sync_sg_for_device(dev, sg, nelems, dir);
423 debug_dma_sync_sg_for_device(dev, sg, nelems, dir);
424
425}
426
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700427#define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, 0)
428#define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, 0)
429#define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, 0)
430#define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, 0)
Alexander Duyck0495c3d2016-12-14 15:05:23 -0800431#define dma_map_page(d, p, o, s, r) dma_map_page_attrs(d, p, o, s, r, 0)
432#define dma_unmap_page(d, a, s, r) dma_unmap_page_attrs(d, a, s, r, 0)
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800433
Christoph Hellwigc9eb6172017-08-27 10:37:15 +0200434static inline void
435dma_cache_sync(struct device *dev, void *vaddr, size_t size,
436 enum dma_data_direction dir)
437{
438 const struct dma_map_ops *ops = get_dma_ops(dev);
439
440 BUG_ON(!valid_dma_direction(dir));
441 if (ops->cache_sync)
442 ops->cache_sync(dev, vaddr, size, dir);
443}
444
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800445extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
Christoph Hellwig58b04402018-09-11 08:55:28 +0200446 void *cpu_addr, dma_addr_t dma_addr, size_t size,
447 unsigned long attrs);
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800448
449void *dma_common_contiguous_remap(struct page *page, size_t size,
450 unsigned long vm_flags,
451 pgprot_t prot, const void *caller);
452
453void *dma_common_pages_remap(struct page **pages, size_t size,
454 unsigned long vm_flags, pgprot_t prot,
455 const void *caller);
456void dma_common_free_remap(void *cpu_addr, size_t size, unsigned long vm_flags);
457
Christoph Hellwig0c3b3172018-11-04 20:29:28 +0100458int __init dma_atomic_pool_init(gfp_t gfp, pgprot_t prot);
459bool dma_in_atomic_pool(void *start, size_t size);
460void *dma_alloc_from_pool(size_t size, struct page **ret_page, gfp_t flags);
461bool dma_free_from_pool(void *start, size_t size);
462
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800463/**
464 * dma_mmap_attrs - map a coherent DMA allocation into user space
465 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
466 * @vma: vm_area_struct describing requested user mapping
467 * @cpu_addr: kernel CPU-view address returned from dma_alloc_attrs
468 * @handle: device-view address returned from dma_alloc_attrs
469 * @size: size of memory originally requested in dma_alloc_attrs
470 * @attrs: attributes of mapping properties requested in dma_alloc_attrs
471 *
472 * Map a coherent DMA buffer previously allocated by dma_alloc_attrs
473 * into user space. The coherent DMA buffer must not be freed by the
474 * driver until the user space mapping has been released.
475 */
476static inline int
477dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, void *cpu_addr,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700478 dma_addr_t dma_addr, size_t size, unsigned long attrs)
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800479{
Bart Van Assche52997092017-01-20 13:04:01 -0800480 const struct dma_map_ops *ops = get_dma_ops(dev);
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800481 BUG_ON(!ops);
482 if (ops->mmap)
483 return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
Christoph Hellwig58b04402018-09-11 08:55:28 +0200484 return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800485}
486
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700487#define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, 0)
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800488
489int
Christoph Hellwig9406a492018-08-23 09:39:38 +0200490dma_common_get_sgtable(struct device *dev, struct sg_table *sgt, void *cpu_addr,
491 dma_addr_t dma_addr, size_t size, unsigned long attrs);
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800492
493static inline int
494dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt, void *cpu_addr,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700495 dma_addr_t dma_addr, size_t size,
496 unsigned long attrs)
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800497{
Bart Van Assche52997092017-01-20 13:04:01 -0800498 const struct dma_map_ops *ops = get_dma_ops(dev);
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800499 BUG_ON(!ops);
500 if (ops->get_sgtable)
501 return ops->get_sgtable(dev, sgt, cpu_addr, dma_addr, size,
502 attrs);
Christoph Hellwig9406a492018-08-23 09:39:38 +0200503 return dma_common_get_sgtable(dev, sgt, cpu_addr, dma_addr, size,
504 attrs);
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800505}
506
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700507#define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, 0)
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800508
509#ifndef arch_dma_alloc_attrs
Huaisheng Ye884571f2018-05-25 13:00:00 +0800510#define arch_dma_alloc_attrs(dev) (true)
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800511#endif
512
513static inline void *dma_alloc_attrs(struct device *dev, size_t size,
514 dma_addr_t *dma_handle, gfp_t flag,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700515 unsigned long attrs)
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800516{
Bart Van Assche52997092017-01-20 13:04:01 -0800517 const struct dma_map_ops *ops = get_dma_ops(dev);
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800518 void *cpu_addr;
519
520 BUG_ON(!ops);
Christoph Hellwig205e1b72017-12-22 14:50:47 +0100521 WARN_ON_ONCE(dev && !dev->coherent_dma_mask);
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800522
Vladimir Murzin43fc5092017-07-20 11:19:58 +0100523 if (dma_alloc_from_dev_coherent(dev, size, dma_handle, &cpu_addr))
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800524 return cpu_addr;
525
Christoph Hellwige89f5b32018-03-28 15:35:35 +0200526 /* let the implementation decide on the zone to allocate from: */
527 flag &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM);
Christoph Hellwig57bf5a82017-12-22 16:05:15 +0100528
Huaisheng Ye884571f2018-05-25 13:00:00 +0800529 if (!arch_dma_alloc_attrs(&dev))
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800530 return NULL;
531 if (!ops->alloc)
532 return NULL;
533
534 cpu_addr = ops->alloc(dev, size, dma_handle, flag, attrs);
535 debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr);
536 return cpu_addr;
537}
538
539static inline void dma_free_attrs(struct device *dev, size_t size,
540 void *cpu_addr, dma_addr_t dma_handle,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700541 unsigned long attrs)
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800542{
Bart Van Assche52997092017-01-20 13:04:01 -0800543 const struct dma_map_ops *ops = get_dma_ops(dev);
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800544
545 BUG_ON(!ops);
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800546
Vladimir Murzin43fc5092017-07-20 11:19:58 +0100547 if (dma_release_from_dev_coherent(dev, get_order(size), cpu_addr))
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800548 return;
Robin Murphyd27fb992018-07-23 22:42:48 +0100549 /*
550 * On non-coherent platforms which implement DMA-coherent buffers via
551 * non-cacheable remaps, ops->free() may call vunmap(). Thus getting
552 * this far in IRQ context is a) at risk of a BUG_ON() or trying to
553 * sleep on some machines, and b) an indication that the driver is
554 * probably misusing the coherent API anyway.
555 */
556 WARN_ON(irqs_disabled());
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800557
Zhen Leid6b7eae2016-03-09 14:08:38 -0800558 if (!ops->free || !cpu_addr)
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800559 return;
560
561 debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
562 ops->free(dev, size, cpu_addr, dma_handle, attrs);
563}
564
565static inline void *dma_alloc_coherent(struct device *dev, size_t size,
Christoph Hellwig7ed1d912018-09-24 13:06:58 +0200566 dma_addr_t *dma_handle, gfp_t gfp)
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800567{
Christoph Hellwig7ed1d912018-09-24 13:06:58 +0200568
569 return dma_alloc_attrs(dev, size, dma_handle, gfp,
570 (gfp & __GFP_NOWARN) ? DMA_ATTR_NO_WARN : 0);
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800571}
572
573static inline void dma_free_coherent(struct device *dev, size_t size,
574 void *cpu_addr, dma_addr_t dma_handle)
575{
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700576 return dma_free_attrs(dev, size, cpu_addr, dma_handle, 0);
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800577}
578
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800579static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
580{
Robin Murphy5237e952017-07-24 18:29:27 +0100581 const struct dma_map_ops *ops = get_dma_ops(dev);
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800582
Robin Murphy5237e952017-07-24 18:29:27 +0100583 debug_dma_mapping_error(dev, dma_addr);
584 if (ops->mapping_error)
585 return ops->mapping_error(dev, dma_addr);
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800586 return 0;
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800587}
588
Tom Lendacky648babb2017-07-17 16:10:22 -0500589static inline void dma_check_mask(struct device *dev, u64 mask)
590{
591 if (sme_active() && (mask < (((u64)sme_get_me_mask() << 1) - 1)))
592 dev_warn(dev, "SME is active, device will require DMA bounce buffers\n");
593}
594
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800595static inline int dma_supported(struct device *dev, u64 mask)
596{
Bart Van Assche52997092017-01-20 13:04:01 -0800597 const struct dma_map_ops *ops = get_dma_ops(dev);
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800598
599 if (!ops)
600 return 0;
601 if (!ops->dma_supported)
602 return 1;
603 return ops->dma_supported(dev, mask);
604}
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800605
606#ifndef HAVE_ARCH_DMA_SET_MASK
607static inline int dma_set_mask(struct device *dev, u64 mask)
608{
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800609 if (!dev->dma_mask || !dma_supported(dev, mask))
610 return -EIO;
Tom Lendacky648babb2017-07-17 16:10:22 -0500611
612 dma_check_mask(dev, mask);
613
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800614 *dev->dma_mask = mask;
615 return 0;
616}
Dan Williams1b0fac42007-07-15 23:40:26 -0700617#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700618
FUJITA Tomonori589fc9a2008-09-12 19:42:34 +0900619static inline u64 dma_get_mask(struct device *dev)
620{
FUJITA Tomonori07a2c012008-09-19 02:02:05 +0900621 if (dev && dev->dma_mask && *dev->dma_mask)
FUJITA Tomonori589fc9a2008-09-12 19:42:34 +0900622 return *dev->dma_mask;
Yang Hongyang284901a2009-04-06 19:01:15 -0700623 return DMA_BIT_MASK(32);
FUJITA Tomonori589fc9a2008-09-12 19:42:34 +0900624}
625
Rob Herring58af4a22012-03-20 14:33:01 -0500626#ifdef CONFIG_ARCH_HAS_DMA_SET_COHERENT_MASK
FUJITA Tomonori710224f2010-09-22 13:04:55 -0700627int dma_set_coherent_mask(struct device *dev, u64 mask);
628#else
FUJITA Tomonori6a1961f2010-03-10 15:23:39 -0800629static inline int dma_set_coherent_mask(struct device *dev, u64 mask)
630{
631 if (!dma_supported(dev, mask))
632 return -EIO;
Tom Lendacky648babb2017-07-17 16:10:22 -0500633
634 dma_check_mask(dev, mask);
635
FUJITA Tomonori6a1961f2010-03-10 15:23:39 -0800636 dev->coherent_dma_mask = mask;
637 return 0;
638}
FUJITA Tomonori710224f2010-09-22 13:04:55 -0700639#endif
FUJITA Tomonori6a1961f2010-03-10 15:23:39 -0800640
Russell King4aa806b2013-06-26 13:49:44 +0100641/*
642 * Set both the DMA mask and the coherent DMA mask to the same thing.
643 * Note that we don't check the return value from dma_set_coherent_mask()
644 * as the DMA API guarantees that the coherent DMA mask can be set to
645 * the same or smaller than the streaming DMA mask.
646 */
647static inline int dma_set_mask_and_coherent(struct device *dev, u64 mask)
648{
649 int rc = dma_set_mask(dev, mask);
650 if (rc == 0)
651 dma_set_coherent_mask(dev, mask);
652 return rc;
653}
654
Russell Kingfa6a8d62013-06-27 12:21:45 +0100655/*
656 * Similar to the above, except it deals with the case where the device
657 * does not have dev->dma_mask appropriately setup.
658 */
659static inline int dma_coerce_mask_and_coherent(struct device *dev, u64 mask)
660{
661 dev->dma_mask = &dev->coherent_dma_mask;
662 return dma_set_mask_and_coherent(dev, mask);
663}
664
Linus Torvalds1da177e2005-04-16 15:20:36 -0700665extern u64 dma_get_required_mask(struct device *dev);
666
Will Deacona3a60f82014-08-27 15:49:10 +0100667#ifndef arch_setup_dma_ops
Will Deacon97890ba2014-08-27 16:24:20 +0100668static inline void arch_setup_dma_ops(struct device *dev, u64 dma_base,
Robin Murphy53c92d72016-04-07 18:42:05 +0100669 u64 size, const struct iommu_ops *iommu,
Will Deacon97890ba2014-08-27 16:24:20 +0100670 bool coherent) { }
671#endif
672
673#ifndef arch_teardown_dma_ops
Christoph Hellwig1a0afc12018-09-25 13:16:55 -0700674static inline void arch_teardown_dma_ops(struct device *dev) { }
Santosh Shilimkar591c1ee42014-04-24 11:30:04 -0400675#endif
676
FUJITA Tomonori6b7b6512008-02-04 22:27:55 -0800677static inline unsigned int dma_get_max_seg_size(struct device *dev)
678{
Robin Murphy002edb62015-11-06 16:32:51 -0800679 if (dev->dma_parms && dev->dma_parms->max_segment_size)
680 return dev->dma_parms->max_segment_size;
681 return SZ_64K;
FUJITA Tomonori6b7b6512008-02-04 22:27:55 -0800682}
683
Niklas Söderlundc9d76d02018-08-29 23:29:21 +0200684static inline int dma_set_max_seg_size(struct device *dev, unsigned int size)
FUJITA Tomonori6b7b6512008-02-04 22:27:55 -0800685{
686 if (dev->dma_parms) {
687 dev->dma_parms->max_segment_size = size;
688 return 0;
Robin Murphy002edb62015-11-06 16:32:51 -0800689 }
690 return -EIO;
FUJITA Tomonori6b7b6512008-02-04 22:27:55 -0800691}
692
FUJITA Tomonorid22a6962008-02-04 22:28:13 -0800693static inline unsigned long dma_get_seg_boundary(struct device *dev)
694{
Robin Murphy002edb62015-11-06 16:32:51 -0800695 if (dev->dma_parms && dev->dma_parms->segment_boundary_mask)
696 return dev->dma_parms->segment_boundary_mask;
697 return DMA_BIT_MASK(32);
FUJITA Tomonorid22a6962008-02-04 22:28:13 -0800698}
699
700static inline int dma_set_seg_boundary(struct device *dev, unsigned long mask)
701{
702 if (dev->dma_parms) {
703 dev->dma_parms->segment_boundary_mask = mask;
704 return 0;
Robin Murphy002edb62015-11-06 16:32:51 -0800705 }
706 return -EIO;
FUJITA Tomonorid22a6962008-02-04 22:28:13 -0800707}
708
Santosh Shilimkar00c8f162013-07-29 14:18:48 +0100709#ifndef dma_max_pfn
710static inline unsigned long dma_max_pfn(struct device *dev)
711{
Christoph Hellwiga41ef1e2017-11-30 07:32:51 -0800712 return (*dev->dma_mask >> PAGE_SHIFT) + dev->dma_pfn_offset;
Santosh Shilimkar00c8f162013-07-29 14:18:48 +0100713}
714#endif
715
Andrew Morton842fa692011-11-02 13:39:33 -0700716static inline void *dma_zalloc_coherent(struct device *dev, size_t size,
717 dma_addr_t *dma_handle, gfp_t flag)
718{
Joe Perchesede23fa82013-08-26 22:45:23 -0700719 void *ret = dma_alloc_coherent(dev, size, dma_handle,
720 flag | __GFP_ZERO);
Andrew Morton842fa692011-11-02 13:39:33 -0700721 return ret;
722}
723
FUJITA Tomonori4565f012010-08-10 18:03:22 -0700724static inline int dma_get_cache_alignment(void)
725{
726#ifdef ARCH_DMA_MINALIGN
727 return ARCH_DMA_MINALIGN;
728#endif
729 return 1;
730}
731
Linus Torvalds1da177e2005-04-16 15:20:36 -0700732/* flags for the coherent memory api */
Christoph Hellwig2436bdc2017-08-25 17:13:09 +0200733#define DMA_MEMORY_EXCLUSIVE 0x01
Linus Torvalds1da177e2005-04-16 15:20:36 -0700734
Christoph Hellwig20d666e2016-01-20 15:02:09 -0800735#ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT
736int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
737 dma_addr_t device_addr, size_t size, int flags);
738void dma_release_declared_memory(struct device *dev);
739void *dma_mark_declared_memory_occupied(struct device *dev,
740 dma_addr_t device_addr, size_t size);
741#else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700742static inline int
Bjorn Helgaas88a984b2014-05-20 16:54:22 -0600743dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700744 dma_addr_t device_addr, size_t size, int flags)
745{
Christoph Hellwig2436bdc2017-08-25 17:13:09 +0200746 return -ENOSYS;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700747}
748
749static inline void
750dma_release_declared_memory(struct device *dev)
751{
752}
753
754static inline void *
755dma_mark_declared_memory_occupied(struct device *dev,
756 dma_addr_t device_addr, size_t size)
757{
758 return ERR_PTR(-EBUSY);
759}
Christoph Hellwig20d666e2016-01-20 15:02:09 -0800760#endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700761
Tejun Heo9ac78492007-01-20 16:00:26 +0900762/*
763 * Managed DMA API
764 */
Geert Uytterhoevenab642e92018-03-16 14:25:41 +0100765#ifdef CONFIG_HAS_DMA
Tejun Heo9ac78492007-01-20 16:00:26 +0900766extern void *dmam_alloc_coherent(struct device *dev, size_t size,
767 dma_addr_t *dma_handle, gfp_t gfp);
768extern void dmam_free_coherent(struct device *dev, size_t size, void *vaddr,
769 dma_addr_t dma_handle);
Geert Uytterhoevenab642e92018-03-16 14:25:41 +0100770#else /* !CONFIG_HAS_DMA */
771static inline void *dmam_alloc_coherent(struct device *dev, size_t size,
772 dma_addr_t *dma_handle, gfp_t gfp)
773{ return NULL; }
774static inline void dmam_free_coherent(struct device *dev, size_t size,
775 void *vaddr, dma_addr_t dma_handle) { }
776#endif /* !CONFIG_HAS_DMA */
777
Christoph Hellwig63d36c92017-06-12 19:15:04 +0200778extern void *dmam_alloc_attrs(struct device *dev, size_t size,
779 dma_addr_t *dma_handle, gfp_t gfp,
780 unsigned long attrs);
Christoph Hellwig20d666e2016-01-20 15:02:09 -0800781#ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT
Bjorn Helgaas88a984b2014-05-20 16:54:22 -0600782extern int dmam_declare_coherent_memory(struct device *dev,
783 phys_addr_t phys_addr,
Tejun Heo9ac78492007-01-20 16:00:26 +0900784 dma_addr_t device_addr, size_t size,
785 int flags);
786extern void dmam_release_declared_memory(struct device *dev);
Christoph Hellwig20d666e2016-01-20 15:02:09 -0800787#else /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
Tejun Heo9ac78492007-01-20 16:00:26 +0900788static inline int dmam_declare_coherent_memory(struct device *dev,
Bjorn Helgaas88a984b2014-05-20 16:54:22 -0600789 phys_addr_t phys_addr, dma_addr_t device_addr,
Tejun Heo9ac78492007-01-20 16:00:26 +0900790 size_t size, gfp_t gfp)
791{
792 return 0;
793}
794
795static inline void dmam_release_declared_memory(struct device *dev)
796{
797}
Christoph Hellwig20d666e2016-01-20 15:02:09 -0800798#endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
Tejun Heo9ac78492007-01-20 16:00:26 +0900799
Luis R. Rodriguezf6e45662016-01-22 18:34:22 -0800800static inline void *dma_alloc_wc(struct device *dev, size_t size,
801 dma_addr_t *dma_addr, gfp_t gfp)
Thierry Redingb4bbb102014-06-27 11:56:58 +0200802{
Christoph Hellwig7ed1d912018-09-24 13:06:58 +0200803 unsigned long attrs = DMA_ATTR_NO_WARN;
804
805 if (gfp & __GFP_NOWARN)
806 attrs |= DMA_ATTR_NO_WARN;
807
808 return dma_alloc_attrs(dev, size, dma_addr, gfp, attrs);
Thierry Redingb4bbb102014-06-27 11:56:58 +0200809}
Luis R. Rodriguezf6e45662016-01-22 18:34:22 -0800810#ifndef dma_alloc_writecombine
811#define dma_alloc_writecombine dma_alloc_wc
812#endif
Thierry Redingb4bbb102014-06-27 11:56:58 +0200813
Luis R. Rodriguezf6e45662016-01-22 18:34:22 -0800814static inline void dma_free_wc(struct device *dev, size_t size,
815 void *cpu_addr, dma_addr_t dma_addr)
Thierry Redingb4bbb102014-06-27 11:56:58 +0200816{
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700817 return dma_free_attrs(dev, size, cpu_addr, dma_addr,
818 DMA_ATTR_WRITE_COMBINE);
Thierry Redingb4bbb102014-06-27 11:56:58 +0200819}
Luis R. Rodriguezf6e45662016-01-22 18:34:22 -0800820#ifndef dma_free_writecombine
821#define dma_free_writecombine dma_free_wc
822#endif
Thierry Redingb4bbb102014-06-27 11:56:58 +0200823
Luis R. Rodriguezf6e45662016-01-22 18:34:22 -0800824static inline int dma_mmap_wc(struct device *dev,
825 struct vm_area_struct *vma,
826 void *cpu_addr, dma_addr_t dma_addr,
827 size_t size)
Thierry Redingb4bbb102014-06-27 11:56:58 +0200828{
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700829 return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size,
830 DMA_ATTR_WRITE_COMBINE);
Thierry Redingb4bbb102014-06-27 11:56:58 +0200831}
Luis R. Rodriguezf6e45662016-01-22 18:34:22 -0800832#ifndef dma_mmap_writecombine
833#define dma_mmap_writecombine dma_mmap_wc
834#endif
Arthur Kepner74bc7ce2008-04-29 01:00:30 -0700835
Christoph Hellwigf616ab52018-05-09 06:53:49 +0200836#ifdef CONFIG_NEED_DMA_MAP_STATE
FUJITA Tomonori0acedc12010-03-10 15:23:31 -0800837#define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME) dma_addr_t ADDR_NAME
838#define DEFINE_DMA_UNMAP_LEN(LEN_NAME) __u32 LEN_NAME
839#define dma_unmap_addr(PTR, ADDR_NAME) ((PTR)->ADDR_NAME)
840#define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) (((PTR)->ADDR_NAME) = (VAL))
841#define dma_unmap_len(PTR, LEN_NAME) ((PTR)->LEN_NAME)
842#define dma_unmap_len_set(PTR, LEN_NAME, VAL) (((PTR)->LEN_NAME) = (VAL))
843#else
844#define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME)
845#define DEFINE_DMA_UNMAP_LEN(LEN_NAME)
846#define dma_unmap_addr(PTR, ADDR_NAME) (0)
847#define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) do { } while (0)
848#define dma_unmap_len(PTR, LEN_NAME) (0)
849#define dma_unmap_len_set(PTR, LEN_NAME, VAL) do { } while (0)
850#endif
851
Linus Torvalds1da177e2005-04-16 15:20:36 -0700852#endif