Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Christoph Hellwig | ea8c64a | 2018-01-10 16:21:13 +0100 | [diff] [blame] | 2 | #include <linux/dma-direct.h> |
Joerg Roedel | 2118d0c | 2009-01-09 15:13:15 +0100 | [diff] [blame] | 3 | #include <linux/dma-debug.h> |
Glauber Costa | cb5867a | 2008-04-08 13:20:51 -0300 | [diff] [blame] | 4 | #include <linux/dmar.h> |
Paul Gortmaker | 69c60c8 | 2011-05-26 12:22:53 -0400 | [diff] [blame] | 5 | #include <linux/export.h> |
Mike Rapoport | 57c8a66 | 2018-10-30 15:09:49 -0700 | [diff] [blame] | 6 | #include <linux/memblock.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 7 | #include <linux/gfp.h> |
Glauber Costa | bca5c09 | 2008-04-08 13:20:53 -0300 | [diff] [blame] | 8 | #include <linux/pci.h> |
Glauber Costa | cb5867a | 2008-04-08 13:20:51 -0300 | [diff] [blame] | 9 | |
Glauber Costa | 116890d | 2008-04-08 13:20:54 -0300 | [diff] [blame] | 10 | #include <asm/proto.h> |
| 11 | #include <asm/dma.h> |
FUJITA Tomonori | 46a7fa2 | 2008-07-11 10:23:42 +0900 | [diff] [blame] | 12 | #include <asm/iommu.h> |
Joerg Roedel | 1d9b16d | 2008-11-27 18:39:15 +0100 | [diff] [blame] | 13 | #include <asm/gart.h> |
Glauber Costa | cb5867a | 2008-04-08 13:20:51 -0300 | [diff] [blame] | 14 | #include <asm/calgary.h> |
Ingo Molnar | b4941a9 | 2009-11-10 14:37:58 +0100 | [diff] [blame] | 15 | #include <asm/x86_init.h> |
Konrad Rzeszutek Wilk | ee1f284 | 2010-08-26 13:58:05 -0400 | [diff] [blame] | 16 | #include <asm/iommu_table.h> |
Glauber Costa | 459121c9 | 2008-04-08 13:20:43 -0300 | [diff] [blame] | 17 | |
Christoph Hellwig | 0ead51c | 2018-05-28 12:47:57 +0200 | [diff] [blame] | 18 | static bool disable_dac_quirk __read_mostly; |
Fenghua Yu | 3b15e58 | 2008-10-23 16:51:00 -0700 | [diff] [blame] | 19 | |
Christoph Hellwig | 356da6d | 2018-12-06 13:39:32 -0800 | [diff] [blame] | 20 | const struct dma_map_ops *dma_ops; |
Glauber Costa | 85c246e | 2008-04-08 13:20:50 -0300 | [diff] [blame] | 21 | EXPORT_SYMBOL(dma_ops); |
| 22 | |
Glauber Costa | f9c258d | 2008-04-08 13:20:52 -0300 | [diff] [blame] | 23 | #ifdef CONFIG_IOMMU_DEBUG |
| 24 | int panic_on_overflow __read_mostly = 1; |
| 25 | int force_iommu __read_mostly = 1; |
| 26 | #else |
| 27 | int panic_on_overflow __read_mostly = 0; |
| 28 | int force_iommu __read_mostly = 0; |
| 29 | #endif |
| 30 | |
Glauber Costa | fae9a0d | 2008-04-08 13:20:56 -0300 | [diff] [blame] | 31 | int iommu_merge __read_mostly = 0; |
| 32 | |
| 33 | int no_iommu __read_mostly; |
| 34 | /* Set this to 1 if there is a HW IOMMU in the system */ |
| 35 | int iommu_detected __read_mostly = 0; |
| 36 | |
Joerg Roedel | ac0101d | 2009-09-01 16:00:35 +0200 | [diff] [blame] | 37 | /* |
| 38 | * This variable becomes 1 if iommu=pt is passed on the kernel command line. |
Marin Mitov | e3be785 | 2009-10-03 20:45:02 +0300 | [diff] [blame] | 39 | * If this variable is 1, IOMMU implementations do no DMA translation for |
Joerg Roedel | ac0101d | 2009-09-01 16:00:35 +0200 | [diff] [blame] | 40 | * devices and allow every device to access to whole physical memory. This is |
Justin P. Mattock | fb637f3 | 2010-01-14 22:16:16 -0800 | [diff] [blame] | 41 | * useful if a user wants to use an IOMMU only for KVM device assignment to |
Joerg Roedel | ac0101d | 2009-09-01 16:00:35 +0200 | [diff] [blame] | 42 | * guests and not for driver dma translation. |
Olof Johansson | 58d1131 | 2018-07-20 11:02:23 -0700 | [diff] [blame] | 43 | * It is also possible to disable by default in kernel config, and enable with |
| 44 | * iommu=nopt at boot time. |
Joerg Roedel | ac0101d | 2009-09-01 16:00:35 +0200 | [diff] [blame] | 45 | */ |
Olof Johansson | 58d1131 | 2018-07-20 11:02:23 -0700 | [diff] [blame] | 46 | #ifdef CONFIG_IOMMU_DEFAULT_PASSTHROUGH |
| 47 | int iommu_pass_through __read_mostly = 1; |
| 48 | #else |
Joerg Roedel | ac0101d | 2009-09-01 16:00:35 +0200 | [diff] [blame] | 49 | int iommu_pass_through __read_mostly; |
Olof Johansson | 58d1131 | 2018-07-20 11:02:23 -0700 | [diff] [blame] | 50 | #endif |
Fenghua Yu | aed5d5f | 2009-04-30 17:57:11 -0700 | [diff] [blame] | 51 | |
Konrad Rzeszutek Wilk | ee1f284 | 2010-08-26 13:58:05 -0400 | [diff] [blame] | 52 | extern struct iommu_table_entry __iommu_table[], __iommu_table_end[]; |
| 53 | |
Glauber Costa | 116890d | 2008-04-08 13:20:54 -0300 | [diff] [blame] | 54 | void __init pci_iommu_alloc(void) |
| 55 | { |
Konrad Rzeszutek Wilk | ee1f284 | 2010-08-26 13:58:05 -0400 | [diff] [blame] | 56 | struct iommu_table_entry *p; |
| 57 | |
Konrad Rzeszutek Wilk | ee1f284 | 2010-08-26 13:58:05 -0400 | [diff] [blame] | 58 | sort_iommu_table(__iommu_table, __iommu_table_end); |
| 59 | check_iommu_entries(__iommu_table, __iommu_table_end); |
Jeremy Fitzhardinge | cfb80c9 | 2008-12-16 12:17:36 -0800 | [diff] [blame] | 60 | |
Konrad Rzeszutek Wilk | ee1f284 | 2010-08-26 13:58:05 -0400 | [diff] [blame] | 61 | for (p = __iommu_table; p < __iommu_table_end; p++) { |
| 62 | if (p && p->detect && p->detect() > 0) { |
| 63 | p->flags |= IOMMU_DETECTED; |
| 64 | if (p->early_init) |
| 65 | p->early_init(); |
| 66 | if (p->flags & IOMMU_FINISH_IF_DETECTED) |
| 67 | break; |
| 68 | } |
| 69 | } |
Glauber Costa | 116890d | 2008-04-08 13:20:54 -0300 | [diff] [blame] | 70 | } |
Marek Szyprowski | 0a2b9a6 | 2011-12-29 13:09:51 +0100 | [diff] [blame] | 71 | |
Glauber Costa | fae9a0d | 2008-04-08 13:20:56 -0300 | [diff] [blame] | 72 | /* |
Paul Bolle | 395cf96 | 2011-08-15 02:02:26 +0200 | [diff] [blame] | 73 | * See <Documentation/x86/x86_64/boot-options.txt> for the iommu kernel |
| 74 | * parameter documentation. |
Glauber Costa | fae9a0d | 2008-04-08 13:20:56 -0300 | [diff] [blame] | 75 | */ |
| 76 | static __init int iommu_setup(char *p) |
| 77 | { |
| 78 | iommu_merge = 1; |
| 79 | |
| 80 | if (!p) |
| 81 | return -EINVAL; |
| 82 | |
| 83 | while (*p) { |
| 84 | if (!strncmp(p, "off", 3)) |
| 85 | no_iommu = 1; |
| 86 | /* gart_parse_options has more force support */ |
| 87 | if (!strncmp(p, "force", 5)) |
| 88 | force_iommu = 1; |
| 89 | if (!strncmp(p, "noforce", 7)) { |
| 90 | iommu_merge = 0; |
| 91 | force_iommu = 0; |
| 92 | } |
| 93 | |
| 94 | if (!strncmp(p, "biomerge", 8)) { |
Glauber Costa | fae9a0d | 2008-04-08 13:20:56 -0300 | [diff] [blame] | 95 | iommu_merge = 1; |
| 96 | force_iommu = 1; |
| 97 | } |
| 98 | if (!strncmp(p, "panic", 5)) |
| 99 | panic_on_overflow = 1; |
| 100 | if (!strncmp(p, "nopanic", 7)) |
| 101 | panic_on_overflow = 0; |
| 102 | if (!strncmp(p, "merge", 5)) { |
| 103 | iommu_merge = 1; |
| 104 | force_iommu = 1; |
| 105 | } |
| 106 | if (!strncmp(p, "nomerge", 7)) |
| 107 | iommu_merge = 0; |
| 108 | if (!strncmp(p, "forcesac", 8)) |
Christoph Hellwig | 06e9552 | 2018-04-27 09:13:24 +0200 | [diff] [blame] | 109 | pr_warn("forcesac option ignored.\n"); |
Glauber Costa | fae9a0d | 2008-04-08 13:20:56 -0300 | [diff] [blame] | 110 | if (!strncmp(p, "allowdac", 8)) |
Christoph Hellwig | 098afd9 | 2018-04-27 09:31:47 +0200 | [diff] [blame] | 111 | pr_warn("allowdac option ignored.\n"); |
Glauber Costa | fae9a0d | 2008-04-08 13:20:56 -0300 | [diff] [blame] | 112 | if (!strncmp(p, "nodac", 5)) |
Christoph Hellwig | 098afd9 | 2018-04-27 09:31:47 +0200 | [diff] [blame] | 113 | pr_warn("nodac option ignored.\n"); |
Glauber Costa | fae9a0d | 2008-04-08 13:20:56 -0300 | [diff] [blame] | 114 | if (!strncmp(p, "usedac", 6)) { |
Christoph Hellwig | 0ead51c | 2018-05-28 12:47:57 +0200 | [diff] [blame] | 115 | disable_dac_quirk = true; |
Glauber Costa | fae9a0d | 2008-04-08 13:20:56 -0300 | [diff] [blame] | 116 | return 1; |
| 117 | } |
| 118 | #ifdef CONFIG_SWIOTLB |
| 119 | if (!strncmp(p, "soft", 4)) |
| 120 | swiotlb = 1; |
David Woodhouse | 3238c0c | 2009-07-01 18:56:16 +0100 | [diff] [blame] | 121 | #endif |
Alex Williamson | 8028687 | 2009-07-30 16:15:18 -0600 | [diff] [blame] | 122 | if (!strncmp(p, "pt", 2)) |
Fenghua Yu | 4ed0d3e | 2009-04-24 17:30:20 -0700 | [diff] [blame] | 123 | iommu_pass_through = 1; |
Olof Johansson | 58d1131 | 2018-07-20 11:02:23 -0700 | [diff] [blame] | 124 | if (!strncmp(p, "nopt", 4)) |
| 125 | iommu_pass_through = 0; |
Glauber Costa | fae9a0d | 2008-04-08 13:20:56 -0300 | [diff] [blame] | 126 | |
Glauber Costa | fae9a0d | 2008-04-08 13:20:56 -0300 | [diff] [blame] | 127 | gart_parse_options(p); |
Glauber Costa | fae9a0d | 2008-04-08 13:20:56 -0300 | [diff] [blame] | 128 | |
| 129 | #ifdef CONFIG_CALGARY_IOMMU |
| 130 | if (!strncmp(p, "calgary", 7)) |
| 131 | use_calgary = 1; |
| 132 | #endif /* CONFIG_CALGARY_IOMMU */ |
| 133 | |
| 134 | p += strcspn(p, ","); |
| 135 | if (*p == ',') |
| 136 | ++p; |
| 137 | } |
| 138 | return 0; |
| 139 | } |
| 140 | early_param("iommu", iommu_setup); |
| 141 | |
Glauber Costa | cb5867a | 2008-04-08 13:20:51 -0300 | [diff] [blame] | 142 | static int __init pci_iommu_init(void) |
| 143 | { |
Konrad Rzeszutek Wilk | ee1f284 | 2010-08-26 13:58:05 -0400 | [diff] [blame] | 144 | struct iommu_table_entry *p; |
Joerg Roedel | 2118d0c | 2009-01-09 15:13:15 +0100 | [diff] [blame] | 145 | |
FUJITA Tomonori | d07c1be | 2009-11-10 19:46:12 +0900 | [diff] [blame] | 146 | x86_init.iommu.iommu_init(); |
| 147 | |
Konrad Rzeszutek Wilk | ee1f284 | 2010-08-26 13:58:05 -0400 | [diff] [blame] | 148 | for (p = __iommu_table; p < __iommu_table_end; p++) { |
| 149 | if (p && (p->flags & IOMMU_DETECTED) && p->late_init) |
| 150 | p->late_init(); |
| 151 | } |
FUJITA Tomonori | 75f1cdf | 2009-11-10 19:46:20 +0900 | [diff] [blame] | 152 | |
Glauber Costa | cb5867a | 2008-04-08 13:20:51 -0300 | [diff] [blame] | 153 | return 0; |
| 154 | } |
Glauber Costa | cb5867a | 2008-04-08 13:20:51 -0300 | [diff] [blame] | 155 | /* Must execute after PCI subsystem */ |
David Woodhouse | 9a821b2 | 2009-10-12 12:59:29 +0100 | [diff] [blame] | 156 | rootfs_initcall(pci_iommu_init); |
Fenghua Yu | 3b15e58 | 2008-10-23 16:51:00 -0700 | [diff] [blame] | 157 | |
| 158 | #ifdef CONFIG_PCI |
| 159 | /* Many VIA bridges seem to corrupt data for DAC. Disable it here */ |
| 160 | |
Christoph Hellwig | 0ead51c | 2018-05-28 12:47:57 +0200 | [diff] [blame] | 161 | static int via_no_dac_cb(struct pci_dev *pdev, void *data) |
| 162 | { |
Robin Murphy | f07d141 | 2018-07-23 23:16:07 +0100 | [diff] [blame] | 163 | pdev->dev.bus_dma_mask = DMA_BIT_MASK(32); |
Christoph Hellwig | 0ead51c | 2018-05-28 12:47:57 +0200 | [diff] [blame] | 164 | return 0; |
| 165 | } |
| 166 | |
Greg Kroah-Hartman | a18e369 | 2012-12-21 14:02:53 -0800 | [diff] [blame] | 167 | static void via_no_dac(struct pci_dev *dev) |
Fenghua Yu | 3b15e58 | 2008-10-23 16:51:00 -0700 | [diff] [blame] | 168 | { |
Christoph Hellwig | 0ead51c | 2018-05-28 12:47:57 +0200 | [diff] [blame] | 169 | if (!disable_dac_quirk) { |
Bjorn Helgaas | 13bf757 | 2009-02-24 10:38:22 -0700 | [diff] [blame] | 170 | dev_info(&dev->dev, "disabling DAC on VIA PCI bridge\n"); |
Christoph Hellwig | 0ead51c | 2018-05-28 12:47:57 +0200 | [diff] [blame] | 171 | pci_walk_bus(dev->subordinate, via_no_dac_cb, NULL); |
Fenghua Yu | 3b15e58 | 2008-10-23 16:51:00 -0700 | [diff] [blame] | 172 | } |
| 173 | } |
Yinghai Lu | c484b24 | 2012-02-23 23:46:50 -0800 | [diff] [blame] | 174 | DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_VIA, PCI_ANY_ID, |
| 175 | PCI_CLASS_BRIDGE_PCI, 8, via_no_dac); |
Fenghua Yu | 3b15e58 | 2008-10-23 16:51:00 -0700 | [diff] [blame] | 176 | #endif |