blob: bc7042209c57a39725a764b22b68371d6b17e4bc [file] [log] [blame]
Thomas Gleixnerd2912cb2019-06-04 10:11:33 +02001// SPDX-License-Identifier: GPL-2.0-only
Michal Simek3a0d7a42010-02-22 12:16:08 +01002/*
3 * Microblaze support for cache consistent memory.
4 * Copyright (C) 2010 Michal Simek <monstr@monstr.eu>
5 * Copyright (C) 2010 PetaLogix
6 * Copyright (C) 2005 John Williams <jwilliams@itee.uq.edu.au>
7 *
8 * Based on PowerPC version derived from arch/arm/mm/consistent.c
9 * Copyright (C) 2001 Dan Malek (dmalek@jlc.net)
10 * Copyright (C) 2000 Russell King
Michal Simek3a0d7a42010-02-22 12:16:08 +010011 */
12
Michal Simekd64af912013-02-01 13:10:35 +010013#include <linux/export.h>
Michal Simek3a0d7a42010-02-22 12:16:08 +010014#include <linux/signal.h>
15#include <linux/sched.h>
16#include <linux/kernel.h>
17#include <linux/errno.h>
18#include <linux/string.h>
19#include <linux/types.h>
20#include <linux/ptrace.h>
21#include <linux/mman.h>
22#include <linux/mm.h>
23#include <linux/swap.h>
24#include <linux/stddef.h>
25#include <linux/vmalloc.h>
26#include <linux/init.h>
27#include <linux/delay.h>
Mike Rapoport57c8a662018-10-30 15:09:49 -070028#include <linux/memblock.h>
Michal Simek3a0d7a42010-02-22 12:16:08 +010029#include <linux/highmem.h>
30#include <linux/pci.h>
31#include <linux/interrupt.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090032#include <linux/gfp.h>
Christoph Hellwig5411ad22018-07-19 05:54:39 -070033#include <linux/dma-noncoherent.h>
Michal Simek3a0d7a42010-02-22 12:16:08 +010034
35#include <asm/pgalloc.h>
36#include <linux/io.h>
37#include <linux/hardirq.h>
Michal Simek6bd55f02012-12-27 10:40:38 +010038#include <linux/mmu_context.h>
Michal Simek3a0d7a42010-02-22 12:16:08 +010039#include <asm/mmu.h>
40#include <linux/uaccess.h>
41#include <asm/pgtable.h>
42#include <asm/cpuinfo.h>
Michal Simekf1525762010-04-10 17:34:06 +020043#include <asm/tlbflush.h>
Michal Simek3a0d7a42010-02-22 12:16:08 +010044
45#ifndef CONFIG_MMU
Michal Simek3a0d7a42010-02-22 12:16:08 +010046/* I have to use dcache values because I can't relate on ram size */
Michal Simekf1525762010-04-10 17:34:06 +020047# define UNCACHED_SHADOW_MASK (cpuinfo.dcache_high - cpuinfo.dcache_base + 1)
48#endif
Michal Simek3a0d7a42010-02-22 12:16:08 +010049
50/*
51 * Consistent memory allocators. Used for DMA devices that want to
52 * share uncached memory with the processor core.
53 * My crufty no-MMU approach is simple. In the HW platform we can optionally
54 * mirror the DDR up above the processor cacheable region. So, memory accessed
55 * in this mirror region will not be cached. It's alloced from the same
56 * pool as normal memory, but the handle we return is shifted up into the
57 * uncached region. This will no doubt cause big problems if memory allocated
58 * here is not also freed properly. -- JW
59 */
Christoph Hellwig5411ad22018-07-19 05:54:39 -070060void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
61 gfp_t gfp, unsigned long attrs)
Michal Simek3a0d7a42010-02-22 12:16:08 +010062{
Michal Simekf1525762010-04-10 17:34:06 +020063 unsigned long order, vaddr;
64 void *ret;
65 unsigned int i, err = 0;
66 struct page *page, *end;
Michal Simek3a0d7a42010-02-22 12:16:08 +010067
Michal Simekf1525762010-04-10 17:34:06 +020068#ifdef CONFIG_MMU
Michal Simek3a0d7a42010-02-22 12:16:08 +010069 phys_addr_t pa;
70 struct vm_struct *area;
Michal Simekf1525762010-04-10 17:34:06 +020071 unsigned long va;
72#endif
Michal Simek3a0d7a42010-02-22 12:16:08 +010073
74 if (in_interrupt())
75 BUG();
76
77 /* Only allocate page size areas. */
78 size = PAGE_ALIGN(size);
79 order = get_order(size);
80
Christoph Hellwig518a2f12018-12-14 09:00:40 +010081 vaddr = __get_free_pages(gfp | __GFP_ZERO, order);
Michal Simekf1525762010-04-10 17:34:06 +020082 if (!vaddr)
Michal Simek3a0d7a42010-02-22 12:16:08 +010083 return NULL;
Michal Simek3a0d7a42010-02-22 12:16:08 +010084
85 /*
86 * we need to ensure that there are no cachelines in use,
87 * or worse dirty in this area.
88 */
Michal Simekf1525762010-04-10 17:34:06 +020089 flush_dcache_range(virt_to_phys((void *)vaddr),
90 virt_to_phys((void *)vaddr) + size);
Michal Simek3a0d7a42010-02-22 12:16:08 +010091
Michal Simekf1525762010-04-10 17:34:06 +020092#ifndef CONFIG_MMU
93 ret = (void *)vaddr;
94 /*
95 * Here's the magic! Note if the uncached shadow is not implemented,
96 * it's up to the calling code to also test that condition and make
97 * other arranegments, such as manually flushing the cache and so on.
98 */
99# ifdef CONFIG_XILINX_UNCACHED_SHADOW
100 ret = (void *)((unsigned) ret | UNCACHED_SHADOW_MASK);
101# endif
102 if ((unsigned int)ret > cpuinfo.dcache_base &&
103 (unsigned int)ret < cpuinfo.dcache_high)
Michal Simek6bd55f02012-12-27 10:40:38 +0100104 pr_warn("ERROR: Your cache coherent area is CACHED!!!\n");
Michal Simekf1525762010-04-10 17:34:06 +0200105
106 /* dma_handle is same as physical (shadowed) address */
107 *dma_handle = (dma_addr_t)ret;
108#else
Michal Simek3a0d7a42010-02-22 12:16:08 +0100109 /* Allocate some common virtual space to map the new pages. */
110 area = get_vm_area(size, VM_ALLOC);
Michal Simekf1525762010-04-10 17:34:06 +0200111 if (!area) {
112 free_pages(vaddr, order);
Michal Simek3a0d7a42010-02-22 12:16:08 +0100113 return NULL;
114 }
115 va = (unsigned long) area->addr;
116 ret = (void *)va;
117
118 /* This gives us the real physical address of the first page. */
Michal Simeka66a6262013-02-07 15:12:24 +0100119 *dma_handle = pa = __virt_to_phys(vaddr);
Michal Simekf1525762010-04-10 17:34:06 +0200120#endif
Michal Simek3a0d7a42010-02-22 12:16:08 +0100121
122 /*
Michal Simekf1525762010-04-10 17:34:06 +0200123 * free wasted pages. We skip the first page since we know
124 * that it will have count = 1 and won't require freeing.
125 * We also mark the pages in use as reserved so that
126 * remap_page_range works.
Michal Simek3a0d7a42010-02-22 12:16:08 +0100127 */
Michal Simekf1525762010-04-10 17:34:06 +0200128 page = virt_to_page(vaddr);
129 end = page + (1 << order);
130
131 split_page(page, order);
132
133 for (i = 0; i < size && err == 0; i += PAGE_SIZE) {
134#ifdef CONFIG_MMU
135 /* MS: This is the whole magic - use cache inhibit pages */
136 err = map_page(va + i, pa + i, _PAGE_KERNEL | _PAGE_NO_CACHE);
137#endif
138
139 SetPageReserved(page);
140 page++;
Michal Simek3a0d7a42010-02-22 12:16:08 +0100141 }
142
Michal Simekf1525762010-04-10 17:34:06 +0200143 /* Free the otherwise unused pages. */
144 while (page < end) {
145 __free_page(page);
146 page++;
147 }
Michal Simek3a0d7a42010-02-22 12:16:08 +0100148
149 if (err) {
Michal Simekf1525762010-04-10 17:34:06 +0200150 free_pages(vaddr, order);
Michal Simek3a0d7a42010-02-22 12:16:08 +0100151 return NULL;
152 }
153
154 return ret;
155}
Michal Simek3a0d7a42010-02-22 12:16:08 +0100156
Lars-Peter Clausen3a8e3262014-12-03 16:07:28 +0100157#ifdef CONFIG_MMU
158static pte_t *consistent_virt_to_pte(void *vaddr)
159{
160 unsigned long addr = (unsigned long)vaddr;
161
162 return pte_offset_kernel(pmd_offset(pgd_offset_k(addr), addr), addr);
163}
164
Christoph Hellwig58b04402018-09-11 08:55:28 +0200165long arch_dma_coherent_to_pfn(struct device *dev, void *vaddr,
166 dma_addr_t dma_addr)
Lars-Peter Clausen3a8e3262014-12-03 16:07:28 +0100167{
168 pte_t *ptep = consistent_virt_to_pte(vaddr);
169
170 if (pte_none(*ptep) || !pte_present(*ptep))
171 return 0;
172
173 return pte_pfn(*ptep);
174}
175#endif
176
Michal Simek3a0d7a42010-02-22 12:16:08 +0100177/*
178 * free page(s) as defined by the above mapping.
179 */
Christoph Hellwig5411ad22018-07-19 05:54:39 -0700180void arch_dma_free(struct device *dev, size_t size, void *vaddr,
181 dma_addr_t dma_addr, unsigned long attrs)
Michal Simek3a0d7a42010-02-22 12:16:08 +0100182{
Michal Simekf1525762010-04-10 17:34:06 +0200183 struct page *page;
184
Michal Simek3a0d7a42010-02-22 12:16:08 +0100185 if (in_interrupt())
186 BUG();
187
Michal Simekf1525762010-04-10 17:34:06 +0200188 size = PAGE_ALIGN(size);
189
190#ifndef CONFIG_MMU
Michal Simek3a0d7a42010-02-22 12:16:08 +0100191 /* Clear SHADOW_MASK bit in address, and free as per usual */
Michal Simekf1525762010-04-10 17:34:06 +0200192# ifdef CONFIG_XILINX_UNCACHED_SHADOW
Michal Simek3a0d7a42010-02-22 12:16:08 +0100193 vaddr = (void *)((unsigned)vaddr & ~UNCACHED_SHADOW_MASK);
Michal Simekf1525762010-04-10 17:34:06 +0200194# endif
195 page = virt_to_page(vaddr);
196
197 do {
Xishi Qiuc1ce4b32013-11-12 15:07:13 -0800198 __free_reserved_page(page);
Michal Simekf1525762010-04-10 17:34:06 +0200199 page++;
200 } while (size -= PAGE_SIZE);
201#else
202 do {
Lars-Peter Clausen3a8e3262014-12-03 16:07:28 +0100203 pte_t *ptep = consistent_virt_to_pte(vaddr);
Michal Simekf1525762010-04-10 17:34:06 +0200204 unsigned long pfn;
205
Michal Simekf1525762010-04-10 17:34:06 +0200206 if (!pte_none(*ptep) && pte_present(*ptep)) {
207 pfn = pte_pfn(*ptep);
208 pte_clear(&init_mm, (unsigned int)vaddr, ptep);
209 if (pfn_valid(pfn)) {
210 page = pfn_to_page(pfn);
Xishi Qiuc1ce4b32013-11-12 15:07:13 -0800211 __free_reserved_page(page);
Michal Simekf1525762010-04-10 17:34:06 +0200212 }
213 }
214 vaddr += PAGE_SIZE;
215 } while (size -= PAGE_SIZE);
216
217 /* flush tlb */
218 flush_tlb_all();
Michal Simek3a0d7a42010-02-22 12:16:08 +0100219#endif
Michal Simek3a0d7a42010-02-22 12:16:08 +0100220}