Christoph Hellwig | ea8c64a | 2018-01-10 16:21:13 +0100 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
| 2 | #ifndef _LINUX_DMA_DIRECT_H |
| 3 | #define _LINUX_DMA_DIRECT_H 1 |
| 4 | |
| 5 | #include <linux/dma-mapping.h> |
Christoph Hellwig | b6e0547 | 2018-03-19 11:38:24 +0100 | [diff] [blame] | 6 | #include <linux/mem_encrypt.h> |
Christoph Hellwig | ea8c64a | 2018-01-10 16:21:13 +0100 | [diff] [blame] | 7 | |
| 8 | #ifdef CONFIG_ARCH_HAS_PHYS_TO_DMA |
| 9 | #include <asm/dma-direct.h> |
| 10 | #else |
Christoph Hellwig | b6e0547 | 2018-03-19 11:38:24 +0100 | [diff] [blame] | 11 | static inline dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t paddr) |
Christoph Hellwig | ea8c64a | 2018-01-10 16:21:13 +0100 | [diff] [blame] | 12 | { |
| 13 | dma_addr_t dev_addr = (dma_addr_t)paddr; |
| 14 | |
| 15 | return dev_addr - ((dma_addr_t)dev->dma_pfn_offset << PAGE_SHIFT); |
| 16 | } |
| 17 | |
Christoph Hellwig | b6e0547 | 2018-03-19 11:38:24 +0100 | [diff] [blame] | 18 | static inline phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t dev_addr) |
Christoph Hellwig | ea8c64a | 2018-01-10 16:21:13 +0100 | [diff] [blame] | 19 | { |
| 20 | phys_addr_t paddr = (phys_addr_t)dev_addr; |
| 21 | |
| 22 | return paddr + ((phys_addr_t)dev->dma_pfn_offset << PAGE_SHIFT); |
| 23 | } |
| 24 | |
| 25 | static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) |
| 26 | { |
| 27 | if (!dev->dma_mask) |
| 28 | return false; |
| 29 | |
Christoph Hellwig | b4ebe60 | 2018-09-20 14:04:08 +0200 | [diff] [blame] | 30 | return addr + size - 1 <= |
| 31 | min_not_zero(*dev->dma_mask, dev->bus_dma_mask); |
Christoph Hellwig | ea8c64a | 2018-01-10 16:21:13 +0100 | [diff] [blame] | 32 | } |
| 33 | #endif /* !CONFIG_ARCH_HAS_PHYS_TO_DMA */ |
Christoph Hellwig | b49efd7 | 2018-01-09 22:11:31 +0100 | [diff] [blame] | 34 | |
Christoph Hellwig | b6e0547 | 2018-03-19 11:38:24 +0100 | [diff] [blame] | 35 | /* |
| 36 | * If memory encryption is supported, phys_to_dma will set the memory encryption |
| 37 | * bit in the DMA address, and dma_to_phys will clear it. The raw __phys_to_dma |
| 38 | * and __dma_to_phys versions should only be used on non-encrypted memory for |
| 39 | * special occasions like DMA coherent buffers. |
| 40 | */ |
| 41 | static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr) |
| 42 | { |
| 43 | return __sme_set(__phys_to_dma(dev, paddr)); |
| 44 | } |
| 45 | |
| 46 | static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr) |
| 47 | { |
| 48 | return __sme_clr(__dma_to_phys(dev, daddr)); |
| 49 | } |
| 50 | |
Christoph Hellwig | a20bb05 | 2018-09-20 13:26:13 +0200 | [diff] [blame] | 51 | u64 dma_direct_get_required_mask(struct device *dev); |
Christoph Hellwig | 19dca8c | 2017-12-23 13:46:06 +0100 | [diff] [blame] | 52 | void *dma_direct_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, |
| 53 | gfp_t gfp, unsigned long attrs); |
| 54 | void dma_direct_free(struct device *dev, size_t size, void *cpu_addr, |
| 55 | dma_addr_t dma_addr, unsigned long attrs); |
Christoph Hellwig | bc3ec75 | 2018-09-08 11:22:43 +0200 | [diff] [blame] | 56 | void *dma_direct_alloc_pages(struct device *dev, size_t size, |
| 57 | dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs); |
| 58 | void dma_direct_free_pages(struct device *dev, size_t size, void *cpu_addr, |
| 59 | dma_addr_t dma_addr, unsigned long attrs); |
Christoph Hellwig | b18814e7 | 2018-11-04 17:27:56 +0100 | [diff] [blame] | 60 | struct page *__dma_direct_alloc_pages(struct device *dev, size_t size, |
| 61 | dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs); |
| 62 | void __dma_direct_free_pages(struct device *dev, size_t size, struct page *page); |
Christoph Hellwig | 1a9777a | 2017-12-24 15:04:32 +0100 | [diff] [blame] | 63 | int dma_direct_supported(struct device *dev, u64 mask); |
Christoph Hellwig | ea8c64a | 2018-01-10 16:21:13 +0100 | [diff] [blame] | 64 | #endif /* _LINUX_DMA_DIRECT_H */ |