blob: 09231ef06d01e71645cd92722646b56621c42b9c [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Becky Bruce8dd0e952008-09-08 09:09:53 +00002/*
3 * Copyright (C) 2006 Benjamin Herrenschmidt, IBM Corporation
4 *
5 * Provide default implementations of the DMA mapping callbacks for
6 * busses using the iommu infrastructure
7 */
8
Christoph Hellwig8617a5c2019-02-13 08:01:05 +01009#include <linux/dma-direct.h>
10#include <linux/pci.h>
Becky Bruce8dd0e952008-09-08 09:09:53 +000011#include <asm/iommu.h>
12
13/*
14 * Generic iommu implementation
15 */
16
Christoph Hellwig8617a5c2019-02-13 08:01:05 +010017/*
18 * The coherent mask may be smaller than the real mask, check if we can
19 * really use a direct window.
20 */
21static inline bool dma_iommu_alloc_bypass(struct device *dev)
22{
Christoph Hellwigba767b52019-02-13 08:01:09 +010023 return dev->archdata.iommu_bypass && !iommu_fixed_is_weak &&
Christoph Hellwig65a21b72019-02-13 08:01:26 +010024 dma_direct_supported(dev, dev->coherent_dma_mask);
Christoph Hellwig8617a5c2019-02-13 08:01:05 +010025}
26
27static inline bool dma_iommu_map_bypass(struct device *dev,
28 unsigned long attrs)
29{
Christoph Hellwigba767b52019-02-13 08:01:09 +010030 return dev->archdata.iommu_bypass &&
31 (!iommu_fixed_is_weak || (attrs & DMA_ATTR_WEAK_ORDERING));
Christoph Hellwig8617a5c2019-02-13 08:01:05 +010032}
33
Becky Bruce8dd0e952008-09-08 09:09:53 +000034/* Allocates a contiguous real buffer and creates mappings over it.
35 * Returns the virtual address of the buffer and sets dma_handle
36 * to the dma address (mapping) of the first page.
37 */
38static void *dma_iommu_alloc_coherent(struct device *dev, size_t size,
Andrzej Pietrasiewiczbfbf7d62011-12-06 14:14:46 +010039 dma_addr_t *dma_handle, gfp_t flag,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -070040 unsigned long attrs)
Becky Bruce8dd0e952008-09-08 09:09:53 +000041{
Christoph Hellwig8617a5c2019-02-13 08:01:05 +010042 if (dma_iommu_alloc_bypass(dev))
Christoph Hellwig31f940a2019-02-13 08:01:28 +010043 return dma_direct_alloc(dev, size, dma_handle, flag, attrs);
Becky Bruce738ef422009-09-21 08:26:35 +000044 return iommu_alloc_coherent(dev, get_iommu_table_base(dev), size,
Nishanth Aravamudanb3c73852010-10-18 07:27:04 +000045 dma_handle, dev->coherent_dma_mask, flag,
Becky Bruce8fae0352008-09-08 09:09:54 +000046 dev_to_node(dev));
Becky Bruce8dd0e952008-09-08 09:09:53 +000047}
48
49static void dma_iommu_free_coherent(struct device *dev, size_t size,
Andrzej Pietrasiewiczbfbf7d62011-12-06 14:14:46 +010050 void *vaddr, dma_addr_t dma_handle,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -070051 unsigned long attrs)
Becky Bruce8dd0e952008-09-08 09:09:53 +000052{
Christoph Hellwig8617a5c2019-02-13 08:01:05 +010053 if (dma_iommu_alloc_bypass(dev))
Christoph Hellwig31f940a2019-02-13 08:01:28 +010054 dma_direct_free(dev, size, vaddr, dma_handle, attrs);
Christoph Hellwig8617a5c2019-02-13 08:01:05 +010055 else
56 iommu_free_coherent(get_iommu_table_base(dev), size, vaddr,
57 dma_handle);
Becky Bruce8dd0e952008-09-08 09:09:53 +000058}
59
60/* Creates TCEs for a user provided buffer. The user buffer must be
Mark Nelsonf9226d52008-10-27 20:38:08 +000061 * contiguous real kernel storage (not vmalloc). The address passed here
62 * comprises a page address and offset into that page. The dma_addr_t
63 * returned will point to the same byte within the page as was passed in.
Becky Bruce8dd0e952008-09-08 09:09:53 +000064 */
Mark Nelsonf9226d52008-10-27 20:38:08 +000065static dma_addr_t dma_iommu_map_page(struct device *dev, struct page *page,
66 unsigned long offset, size_t size,
67 enum dma_data_direction direction,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -070068 unsigned long attrs)
Becky Bruce8dd0e952008-09-08 09:09:53 +000069{
Christoph Hellwig8617a5c2019-02-13 08:01:05 +010070 if (dma_iommu_map_bypass(dev, attrs))
Christoph Hellwig461db2b2019-02-13 08:01:29 +010071 return dma_direct_map_page(dev, page, offset, size, direction,
Christoph Hellwig8617a5c2019-02-13 08:01:05 +010072 attrs);
Becky Bruce738ef422009-09-21 08:26:35 +000073 return iommu_map_page(dev, get_iommu_table_base(dev), page, offset,
74 size, device_to_mask(dev), direction, attrs);
Becky Bruce8dd0e952008-09-08 09:09:53 +000075}
76
77
Mark Nelsonf9226d52008-10-27 20:38:08 +000078static void dma_iommu_unmap_page(struct device *dev, dma_addr_t dma_handle,
79 size_t size, enum dma_data_direction direction,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -070080 unsigned long attrs)
Becky Bruce8dd0e952008-09-08 09:09:53 +000081{
Christoph Hellwig8617a5c2019-02-13 08:01:05 +010082 if (!dma_iommu_map_bypass(dev, attrs))
83 iommu_unmap_page(get_iommu_table_base(dev), dma_handle, size,
84 direction, attrs);
Becky Bruce8dd0e952008-09-08 09:09:53 +000085}
86
87
88static int dma_iommu_map_sg(struct device *dev, struct scatterlist *sglist,
89 int nelems, enum dma_data_direction direction,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -070090 unsigned long attrs)
Becky Bruce8dd0e952008-09-08 09:09:53 +000091{
Christoph Hellwig8617a5c2019-02-13 08:01:05 +010092 if (dma_iommu_map_bypass(dev, attrs))
Christoph Hellwig461db2b2019-02-13 08:01:29 +010093 return dma_direct_map_sg(dev, sglist, nelems, direction, attrs);
Joerg Roedel0690cbd2014-11-05 15:28:30 +010094 return ppc_iommu_map_sg(dev, get_iommu_table_base(dev), sglist, nelems,
95 device_to_mask(dev), direction, attrs);
Becky Bruce8dd0e952008-09-08 09:09:53 +000096}
97
98static void dma_iommu_unmap_sg(struct device *dev, struct scatterlist *sglist,
99 int nelems, enum dma_data_direction direction,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700100 unsigned long attrs)
Becky Bruce8dd0e952008-09-08 09:09:53 +0000101{
Christoph Hellwig8617a5c2019-02-13 08:01:05 +0100102 if (!dma_iommu_map_bypass(dev, attrs))
103 ppc_iommu_unmap_sg(get_iommu_table_base(dev), sglist, nelems,
Joerg Roedel0690cbd2014-11-05 15:28:30 +0100104 direction, attrs);
Becky Bruce8dd0e952008-09-08 09:09:53 +0000105}
106
Christoph Hellwig8617a5c2019-02-13 08:01:05 +0100107static bool dma_iommu_bypass_supported(struct device *dev, u64 mask)
108{
109 struct pci_dev *pdev = to_pci_dev(dev);
110 struct pci_controller *phb = pci_bus_to_host(pdev->bus);
111
112 return phb->controller_ops.iommu_bypass_supported &&
113 phb->controller_ops.iommu_bypass_supported(pdev, mask);
114}
115
Becky Bruce8dd0e952008-09-08 09:09:53 +0000116/* We support DMA to/from any memory page via the iommu */
Benjamin Herrenschmidt817820b2015-06-24 15:25:31 +1000117int dma_iommu_dma_supported(struct device *dev, u64 mask)
Becky Bruce8dd0e952008-09-08 09:09:53 +0000118{
Becky Bruce738ef422009-09-21 08:26:35 +0000119 struct iommu_table *tbl = get_iommu_table_base(dev);
Becky Bruce8dd0e952008-09-08 09:09:53 +0000120
Nishanth Aravamudan1cb8e852010-09-15 08:05:45 +0000121 if (!tbl) {
122 dev_info(dev, "Warning: IOMMU dma not supported: mask 0x%08llx"
123 ", table unavailable\n", mask);
124 return 0;
125 }
126
Christoph Hellwig8617a5c2019-02-13 08:01:05 +0100127 if (dev_is_pci(dev) && dma_iommu_bypass_supported(dev, mask)) {
128 dev->archdata.iommu_bypass = true;
129 dev_dbg(dev, "iommu: 64-bit OK, using fixed ops\n");
130 return 1;
131 }
132
Alistair Poppled0847752013-12-09 18:17:03 +1100133 if (tbl->it_offset > (mask >> tbl->it_page_shift)) {
Aaro Koskinen4c374af2012-08-18 07:34:15 +0000134 dev_info(dev, "Warning: IOMMU offset too big for device mask\n");
135 dev_info(dev, "mask: 0x%08llx, table offset: 0x%08lx\n",
Alistair Poppled0847752013-12-09 18:17:03 +1100136 mask, tbl->it_offset << tbl->it_page_shift);
Becky Bruce8dd0e952008-09-08 09:09:53 +0000137 return 0;
Christoph Hellwig8617a5c2019-02-13 08:01:05 +0100138 }
139
140 dev_dbg(dev, "iommu: not 64-bit, using default ops\n");
141 dev->archdata.iommu_bypass = false;
142 return 1;
Becky Bruce8dd0e952008-09-08 09:09:53 +0000143}
144
Christoph Hellwiga20f5072019-02-13 08:01:04 +0100145u64 dma_iommu_get_required_mask(struct device *dev)
Milton Miller6a5c7be2011-06-24 09:05:22 +0000146{
147 struct iommu_table *tbl = get_iommu_table_base(dev);
148 u64 mask;
Christoph Hellwig8617a5c2019-02-13 08:01:05 +0100149
Milton Miller6a5c7be2011-06-24 09:05:22 +0000150 if (!tbl)
151 return 0;
152
Christoph Hellwig8617a5c2019-02-13 08:01:05 +0100153 if (dev_is_pci(dev)) {
Christoph Hellwig5a479102019-02-13 08:01:25 +0100154 u64 bypass_mask = dma_direct_get_required_mask(dev);
Christoph Hellwig8617a5c2019-02-13 08:01:05 +0100155
156 if (dma_iommu_bypass_supported(dev, bypass_mask))
157 return bypass_mask;
158 }
159
Milton Miller6a5c7be2011-06-24 09:05:22 +0000160 mask = 1ULL < (fls_long(tbl->it_offset + tbl->it_size) - 1);
161 mask += mask - 1;
162
163 return mask;
164}
165
Christoph Hellwigba767b52019-02-13 08:01:09 +0100166const struct dma_map_ops dma_iommu_ops = {
Andrzej Pietrasiewiczbfbf7d62011-12-06 14:14:46 +0100167 .alloc = dma_iommu_alloc_coherent,
168 .free = dma_iommu_free_coherent,
Milton Miller2eccacd2011-06-24 09:05:25 +0000169 .map_sg = dma_iommu_map_sg,
170 .unmap_sg = dma_iommu_unmap_sg,
171 .dma_supported = dma_iommu_dma_supported,
172 .map_page = dma_iommu_map_page,
173 .unmap_page = dma_iommu_unmap_page,
Milton Millerd24f9c62011-06-24 09:05:24 +0000174 .get_required_mask = dma_iommu_get_required_mask,
Becky Bruce8dd0e952008-09-08 09:09:53 +0000175};