blob: 9e66bfe369aa057ac9d285d1f3cede42e54387cf [file] [log] [blame]
Christoph Hellwigea8c64a2018-01-10 16:21:13 +01001/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _LINUX_DMA_DIRECT_H
3#define _LINUX_DMA_DIRECT_H 1
4
5#include <linux/dma-mapping.h>
Christoph Hellwigb6e05472018-03-19 11:38:24 +01006#include <linux/mem_encrypt.h>
Christoph Hellwigea8c64a2018-01-10 16:21:13 +01007
Robin Murphyb3408712018-11-21 16:00:50 +00008#define DIRECT_MAPPING_ERROR (~(dma_addr_t)0)
Christoph Hellwigdff8d6c2018-08-16 15:30:39 +03009
Christoph Hellwigea8c64a2018-01-10 16:21:13 +010010#ifdef CONFIG_ARCH_HAS_PHYS_TO_DMA
11#include <asm/dma-direct.h>
12#else
Christoph Hellwigb6e05472018-03-19 11:38:24 +010013static inline dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t paddr)
Christoph Hellwigea8c64a2018-01-10 16:21:13 +010014{
15 dma_addr_t dev_addr = (dma_addr_t)paddr;
16
17 return dev_addr - ((dma_addr_t)dev->dma_pfn_offset << PAGE_SHIFT);
18}
19
Christoph Hellwigb6e05472018-03-19 11:38:24 +010020static inline phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t dev_addr)
Christoph Hellwigea8c64a2018-01-10 16:21:13 +010021{
22 phys_addr_t paddr = (phys_addr_t)dev_addr;
23
24 return paddr + ((phys_addr_t)dev->dma_pfn_offset << PAGE_SHIFT);
25}
26
27static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
28{
29 if (!dev->dma_mask)
30 return false;
31
Christoph Hellwigb4ebe602018-09-20 14:04:08 +020032 return addr + size - 1 <=
33 min_not_zero(*dev->dma_mask, dev->bus_dma_mask);
Christoph Hellwigea8c64a2018-01-10 16:21:13 +010034}
35#endif /* !CONFIG_ARCH_HAS_PHYS_TO_DMA */
Christoph Hellwigb49efd72018-01-09 22:11:31 +010036
Christoph Hellwigb6e05472018-03-19 11:38:24 +010037/*
38 * If memory encryption is supported, phys_to_dma will set the memory encryption
39 * bit in the DMA address, and dma_to_phys will clear it. The raw __phys_to_dma
40 * and __dma_to_phys versions should only be used on non-encrypted memory for
41 * special occasions like DMA coherent buffers.
42 */
43static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
44{
45 return __sme_set(__phys_to_dma(dev, paddr));
46}
47
48static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
49{
50 return __sme_clr(__dma_to_phys(dev, daddr));
51}
52
Christoph Hellwigb49efd72018-01-09 22:11:31 +010053#ifdef CONFIG_ARCH_HAS_DMA_MARK_CLEAN
54void dma_mark_clean(void *addr, size_t size);
55#else
56static inline void dma_mark_clean(void *addr, size_t size)
57{
58}
59#endif /* CONFIG_ARCH_HAS_DMA_MARK_CLEAN */
60
Christoph Hellwiga20bb052018-09-20 13:26:13 +020061u64 dma_direct_get_required_mask(struct device *dev);
Christoph Hellwig19dca8c2017-12-23 13:46:06 +010062void *dma_direct_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
63 gfp_t gfp, unsigned long attrs);
64void dma_direct_free(struct device *dev, size_t size, void *cpu_addr,
65 dma_addr_t dma_addr, unsigned long attrs);
Christoph Hellwigbc3ec752018-09-08 11:22:43 +020066void *dma_direct_alloc_pages(struct device *dev, size_t size,
67 dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs);
68void dma_direct_free_pages(struct device *dev, size_t size, void *cpu_addr,
69 dma_addr_t dma_addr, unsigned long attrs);
Christoph Hellwig782e6762018-04-16 15:24:51 +020070dma_addr_t dma_direct_map_page(struct device *dev, struct page *page,
71 unsigned long offset, size_t size, enum dma_data_direction dir,
72 unsigned long attrs);
73int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents,
74 enum dma_data_direction dir, unsigned long attrs);
Christoph Hellwig1a9777a2017-12-24 15:04:32 +010075int dma_direct_supported(struct device *dev, u64 mask);
Christoph Hellwig782e6762018-04-16 15:24:51 +020076int dma_direct_mapping_error(struct device *dev, dma_addr_t dma_addr);
Christoph Hellwigea8c64a2018-01-10 16:21:13 +010077#endif /* _LINUX_DMA_DIRECT_H */