blob: f1d1e0dfe8b4f693db64668c46873a56ab8995d7 [file] [log] [blame]
Dan Williams92281dee2015-08-10 23:07:06 -04001/*
2 * Copyright(c) 2015 Intel Corporation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of version 2 of the GNU General Public License as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 */
Dan Williams9476df72016-01-15 16:56:19 -080013#include <linux/radix-tree.h>
14#include <linux/memremap.h>
Christoph Hellwig7d3dcf22015-08-10 23:07:07 -040015#include <linux/device.h>
Dan Williams92281dee2015-08-10 23:07:06 -040016#include <linux/types.h>
Dan Williams34c0fd52016-01-15 16:56:14 -080017#include <linux/pfn_t.h>
Dan Williams92281dee2015-08-10 23:07:06 -040018#include <linux/io.h>
19#include <linux/mm.h>
Christoph Hellwig41e94a82015-08-17 16:00:35 +020020#include <linux/memory_hotplug.h>
Jérôme Glisse5042db42017-09-08 16:11:43 -070021#include <linux/swap.h>
22#include <linux/swapops.h>
Dan Williams92281dee2015-08-10 23:07:06 -040023
24#ifndef ioremap_cache
25/* temporary while we convert existing ioremap_cache users to memremap */
26__weak void __iomem *ioremap_cache(resource_size_t offset, unsigned long size)
27{
28 return ioremap(offset, size);
29}
30#endif
31
Ard Biesheuvelc269cba2016-02-22 15:02:07 +010032#ifndef arch_memremap_wb
33static void *arch_memremap_wb(resource_size_t offset, unsigned long size)
34{
35 return (__force void *)ioremap_cache(offset, size);
36}
37#endif
38
Tom Lendacky8f716c92017-07-17 16:10:16 -050039#ifndef arch_memremap_can_ram_remap
40static bool arch_memremap_can_ram_remap(resource_size_t offset, size_t size,
41 unsigned long flags)
42{
43 return true;
44}
45#endif
46
47static void *try_ram_remap(resource_size_t offset, size_t size,
48 unsigned long flags)
Dan Williams182475b2015-10-26 16:55:56 -040049{
Ard Biesheuvelac343e82016-03-09 14:08:32 -080050 unsigned long pfn = PHYS_PFN(offset);
Dan Williams182475b2015-10-26 16:55:56 -040051
52 /* In the simple case just return the existing linear address */
Tom Lendacky8f716c92017-07-17 16:10:16 -050053 if (pfn_valid(pfn) && !PageHighMem(pfn_to_page(pfn)) &&
54 arch_memremap_can_ram_remap(offset, size, flags))
Dan Williams182475b2015-10-26 16:55:56 -040055 return __va(offset);
Tom Lendacky8f716c92017-07-17 16:10:16 -050056
Ard Biesheuvelc269cba2016-02-22 15:02:07 +010057 return NULL; /* fallback to arch_memremap_wb */
Dan Williams182475b2015-10-26 16:55:56 -040058}
59
Dan Williams92281dee2015-08-10 23:07:06 -040060/**
61 * memremap() - remap an iomem_resource as cacheable memory
62 * @offset: iomem resource start address
63 * @size: size of remap
Tom Lendacky8f716c92017-07-17 16:10:16 -050064 * @flags: any of MEMREMAP_WB, MEMREMAP_WT, MEMREMAP_WC,
65 * MEMREMAP_ENC, MEMREMAP_DEC
Dan Williams92281dee2015-08-10 23:07:06 -040066 *
67 * memremap() is "ioremap" for cases where it is known that the resource
68 * being mapped does not have i/o side effects and the __iomem
Brian Starkeyc907e0e2016-03-22 14:28:00 -070069 * annotation is not applicable. In the case of multiple flags, the different
70 * mapping types will be attempted in the order listed below until one of
71 * them succeeds.
Dan Williams92281dee2015-08-10 23:07:06 -040072 *
Toshi Kani1c29f252016-01-26 21:57:28 +010073 * MEMREMAP_WB - matches the default mapping for System RAM on
Dan Williams92281dee2015-08-10 23:07:06 -040074 * the architecture. This is usually a read-allocate write-back cache.
75 * Morever, if MEMREMAP_WB is specified and the requested remap region is RAM
76 * memremap() will bypass establishing a new mapping and instead return
77 * a pointer into the direct map.
78 *
79 * MEMREMAP_WT - establish a mapping whereby writes either bypass the
80 * cache or are written through to memory and never exist in a
81 * cache-dirty state with respect to program visibility. Attempts to
Toshi Kani1c29f252016-01-26 21:57:28 +010082 * map System RAM with this mapping type will fail.
Brian Starkeyc907e0e2016-03-22 14:28:00 -070083 *
84 * MEMREMAP_WC - establish a writecombine mapping, whereby writes may
85 * be coalesced together (e.g. in the CPU's write buffers), but is otherwise
86 * uncached. Attempts to map System RAM with this mapping type will fail.
Dan Williams92281dee2015-08-10 23:07:06 -040087 */
88void *memremap(resource_size_t offset, size_t size, unsigned long flags)
89{
Toshi Kani1c29f252016-01-26 21:57:28 +010090 int is_ram = region_intersects(offset, size,
91 IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE);
Dan Williams92281dee2015-08-10 23:07:06 -040092 void *addr = NULL;
93
Brian Starkeycf61e2a2016-03-22 14:27:57 -070094 if (!flags)
95 return NULL;
96
Dan Williams92281dee2015-08-10 23:07:06 -040097 if (is_ram == REGION_MIXED) {
98 WARN_ONCE(1, "memremap attempted on mixed range %pa size: %#lx\n",
99 &offset, (unsigned long) size);
100 return NULL;
101 }
102
103 /* Try all mapping types requested until one returns non-NULL */
104 if (flags & MEMREMAP_WB) {
Dan Williams92281dee2015-08-10 23:07:06 -0400105 /*
106 * MEMREMAP_WB is special in that it can be satisifed
107 * from the direct map. Some archs depend on the
108 * capability of memremap() to autodetect cases where
Toshi Kani1c29f252016-01-26 21:57:28 +0100109 * the requested range is potentially in System RAM.
Dan Williams92281dee2015-08-10 23:07:06 -0400110 */
111 if (is_ram == REGION_INTERSECTS)
Tom Lendacky8f716c92017-07-17 16:10:16 -0500112 addr = try_ram_remap(offset, size, flags);
Dan Williams182475b2015-10-26 16:55:56 -0400113 if (!addr)
Ard Biesheuvelc269cba2016-02-22 15:02:07 +0100114 addr = arch_memremap_wb(offset, size);
Dan Williams92281dee2015-08-10 23:07:06 -0400115 }
116
117 /*
Brian Starkeycf61e2a2016-03-22 14:27:57 -0700118 * If we don't have a mapping yet and other request flags are
119 * present then we will be attempting to establish a new virtual
Dan Williams92281dee2015-08-10 23:07:06 -0400120 * address mapping. Enforce that this mapping is not aliasing
Toshi Kani1c29f252016-01-26 21:57:28 +0100121 * System RAM.
Dan Williams92281dee2015-08-10 23:07:06 -0400122 */
Brian Starkeycf61e2a2016-03-22 14:27:57 -0700123 if (!addr && is_ram == REGION_INTERSECTS && flags != MEMREMAP_WB) {
Dan Williams92281dee2015-08-10 23:07:06 -0400124 WARN_ONCE(1, "memremap attempted on ram %pa size: %#lx\n",
125 &offset, (unsigned long) size);
126 return NULL;
127 }
128
Brian Starkeycf61e2a2016-03-22 14:27:57 -0700129 if (!addr && (flags & MEMREMAP_WT))
Dan Williams92281dee2015-08-10 23:07:06 -0400130 addr = ioremap_wt(offset, size);
Dan Williams92281dee2015-08-10 23:07:06 -0400131
Brian Starkeyc907e0e2016-03-22 14:28:00 -0700132 if (!addr && (flags & MEMREMAP_WC))
133 addr = ioremap_wc(offset, size);
134
Dan Williams92281dee2015-08-10 23:07:06 -0400135 return addr;
136}
137EXPORT_SYMBOL(memremap);
138
139void memunmap(void *addr)
140{
141 if (is_vmalloc_addr(addr))
142 iounmap((void __iomem *) addr);
143}
144EXPORT_SYMBOL(memunmap);
Christoph Hellwig7d3dcf22015-08-10 23:07:07 -0400145
146static void devm_memremap_release(struct device *dev, void *res)
147{
Toshi Kani9273a8b2016-02-17 13:11:29 -0800148 memunmap(*(void **)res);
Christoph Hellwig7d3dcf22015-08-10 23:07:07 -0400149}
150
151static int devm_memremap_match(struct device *dev, void *res, void *match_data)
152{
153 return *(void **)res == match_data;
154}
155
156void *devm_memremap(struct device *dev, resource_size_t offset,
157 size_t size, unsigned long flags)
158{
159 void **ptr, *addr;
160
Dan Williams538ea4a2015-10-05 20:35:56 -0400161 ptr = devres_alloc_node(devm_memremap_release, sizeof(*ptr), GFP_KERNEL,
162 dev_to_node(dev));
Christoph Hellwig7d3dcf22015-08-10 23:07:07 -0400163 if (!ptr)
Dan Williamsb36f4762015-09-15 02:42:20 -0400164 return ERR_PTR(-ENOMEM);
Christoph Hellwig7d3dcf22015-08-10 23:07:07 -0400165
166 addr = memremap(offset, size, flags);
167 if (addr) {
168 *ptr = addr;
169 devres_add(dev, ptr);
Toshi Kani93f834d2016-02-20 14:32:24 -0800170 } else {
Christoph Hellwig7d3dcf22015-08-10 23:07:07 -0400171 devres_free(ptr);
Toshi Kani93f834d2016-02-20 14:32:24 -0800172 return ERR_PTR(-ENXIO);
173 }
Christoph Hellwig7d3dcf22015-08-10 23:07:07 -0400174
175 return addr;
176}
177EXPORT_SYMBOL(devm_memremap);
178
179void devm_memunmap(struct device *dev, void *addr)
180{
Dan Williamsd7413142015-09-15 02:37:48 -0400181 WARN_ON(devres_release(dev, devm_memremap_release,
182 devm_memremap_match, addr));
Christoph Hellwig7d3dcf22015-08-10 23:07:07 -0400183}
184EXPORT_SYMBOL(devm_memunmap);
Christoph Hellwig41e94a82015-08-17 16:00:35 +0200185
186#ifdef CONFIG_ZONE_DEVICE
Dan Williams9476df72016-01-15 16:56:19 -0800187static DEFINE_MUTEX(pgmap_lock);
188static RADIX_TREE(pgmap_radix, GFP_KERNEL);
189#define SECTION_MASK ~((1UL << PA_SECTION_SHIFT) - 1)
190#define SECTION_SIZE (1UL << PA_SECTION_SHIFT)
191
Christoph Hellwig41e94a82015-08-17 16:00:35 +0200192struct page_map {
193 struct resource res;
Dan Williams9476df72016-01-15 16:56:19 -0800194 struct percpu_ref *ref;
195 struct dev_pagemap pgmap;
Dan Williams4b94ffd2016-01-15 16:56:22 -0800196 struct vmem_altmap altmap;
Christoph Hellwig41e94a82015-08-17 16:00:35 +0200197};
198
Dan Williamsab1b5972017-09-06 16:24:13 -0700199static unsigned long order_at(struct resource *res, unsigned long pgoff)
200{
201 unsigned long phys_pgoff = PHYS_PFN(res->start) + pgoff;
202 unsigned long nr_pages, mask;
203
204 nr_pages = PHYS_PFN(resource_size(res));
205 if (nr_pages == pgoff)
206 return ULONG_MAX;
207
208 /*
209 * What is the largest aligned power-of-2 range available from
210 * this resource pgoff to the end of the resource range,
211 * considering the alignment of the current pgoff?
212 */
213 mask = phys_pgoff | rounddown_pow_of_two(nr_pages - pgoff);
214 if (!mask)
215 return ULONG_MAX;
216
217 return find_first_bit(&mask, BITS_PER_LONG);
218}
219
220#define foreach_order_pgoff(res, order, pgoff) \
221 for (pgoff = 0, order = order_at((res), pgoff); order < ULONG_MAX; \
222 pgoff += 1UL << order, order = order_at((res), pgoff))
223
Jérôme Glisse5042db42017-09-08 16:11:43 -0700224#if IS_ENABLED(CONFIG_DEVICE_PRIVATE)
225int device_private_entry_fault(struct vm_area_struct *vma,
226 unsigned long addr,
227 swp_entry_t entry,
228 unsigned int flags,
229 pmd_t *pmdp)
230{
231 struct page *page = device_private_entry_to_page(entry);
232
233 /*
234 * The page_fault() callback must migrate page back to system memory
235 * so that CPU can access it. This might fail for various reasons
236 * (device issue, device was unsafely unplugged, ...). When such
237 * error conditions happen, the callback must return VM_FAULT_SIGBUS.
238 *
239 * Note that because memory cgroup charges are accounted to the device
240 * memory, this should never fail because of memory restrictions (but
241 * allocation of regular system page might still fail because we are
242 * out of memory).
243 *
244 * There is a more in-depth description of what that callback can and
245 * cannot do, in include/linux/memremap.h
246 */
247 return page->pgmap->page_fault(vma, addr, page, flags, pmdp);
248}
249EXPORT_SYMBOL(device_private_entry_fault);
250#endif /* CONFIG_DEVICE_PRIVATE */
251
Dan Williams9476df72016-01-15 16:56:19 -0800252static void pgmap_radix_release(struct resource *res)
Christoph Hellwig41e94a82015-08-17 16:00:35 +0200253{
Dan Williamsab1b5972017-09-06 16:24:13 -0700254 unsigned long pgoff, order;
Dan Williams9476df72016-01-15 16:56:19 -0800255
256 mutex_lock(&pgmap_lock);
Dan Williamsab1b5972017-09-06 16:24:13 -0700257 foreach_order_pgoff(res, order, pgoff)
258 radix_tree_delete(&pgmap_radix, PHYS_PFN(res->start) + pgoff);
Dan Williams9476df72016-01-15 16:56:19 -0800259 mutex_unlock(&pgmap_lock);
Dan Williamsab1b5972017-09-06 16:24:13 -0700260
261 synchronize_rcu();
Dan Williams9476df72016-01-15 16:56:19 -0800262}
263
Dan Williams5c2c2582016-01-15 16:56:49 -0800264static unsigned long pfn_first(struct page_map *page_map)
265{
266 struct dev_pagemap *pgmap = &page_map->pgmap;
267 const struct resource *res = &page_map->res;
268 struct vmem_altmap *altmap = pgmap->altmap;
269 unsigned long pfn;
270
271 pfn = res->start >> PAGE_SHIFT;
272 if (altmap)
273 pfn += vmem_altmap_offset(altmap);
274 return pfn;
275}
276
277static unsigned long pfn_end(struct page_map *page_map)
278{
279 const struct resource *res = &page_map->res;
280
281 return (res->start + resource_size(res)) >> PAGE_SHIFT;
282}
283
284#define for_each_device_pfn(pfn, map) \
285 for (pfn = pfn_first(map); pfn < pfn_end(map); pfn++)
286
Dan Williams9476df72016-01-15 16:56:19 -0800287static void devm_memremap_pages_release(struct device *dev, void *data)
288{
289 struct page_map *page_map = data;
290 struct resource *res = &page_map->res;
291 resource_size_t align_start, align_size;
Dan Williams4b94ffd2016-01-15 16:56:22 -0800292 struct dev_pagemap *pgmap = &page_map->pgmap;
Dan Williams71389702017-04-28 10:23:37 -0700293 unsigned long pfn;
294
295 for_each_device_pfn(pfn, page_map)
296 put_page(pfn_to_page(pfn));
Dan Williams9476df72016-01-15 16:56:19 -0800297
Dan Williams5c2c2582016-01-15 16:56:49 -0800298 if (percpu_ref_tryget_live(pgmap->ref)) {
299 dev_WARN(dev, "%s: page mapping is still live!\n", __func__);
300 percpu_ref_put(pgmap->ref);
301 }
302
Christoph Hellwig41e94a82015-08-17 16:00:35 +0200303 /* pages are dead and unused, undo the arch mapping */
Dan Williams9476df72016-01-15 16:56:19 -0800304 align_start = res->start & ~(SECTION_SIZE - 1);
305 align_size = ALIGN(resource_size(res), SECTION_SIZE);
Dan Williamsb5d24fd2017-02-24 14:55:45 -0800306
Dan Williamsf931ab42017-01-10 16:57:36 -0800307 mem_hotplug_begin();
Dan Williams9476df72016-01-15 16:56:19 -0800308 arch_remove_memory(align_start, align_size);
Dan Williamsf931ab42017-01-10 16:57:36 -0800309 mem_hotplug_done();
Dan Williamsb5d24fd2017-02-24 14:55:45 -0800310
Dan Williams90497712016-09-07 08:51:21 -0700311 untrack_pfn(NULL, PHYS_PFN(align_start), align_size);
Dan Williamseb7d78c2016-01-29 21:48:34 -0800312 pgmap_radix_release(res);
Dan Williams4b94ffd2016-01-15 16:56:22 -0800313 dev_WARN_ONCE(dev, pgmap->altmap && pgmap->altmap->alloc,
314 "%s: failed to free all reserved pages\n", __func__);
Dan Williams9476df72016-01-15 16:56:19 -0800315}
316
317/* assumes rcu_read_lock() held at entry */
318struct dev_pagemap *find_dev_pagemap(resource_size_t phys)
319{
320 struct page_map *page_map;
321
322 WARN_ON_ONCE(!rcu_read_lock_held());
323
Dan Williamsab1b5972017-09-06 16:24:13 -0700324 page_map = radix_tree_lookup(&pgmap_radix, PHYS_PFN(phys));
Dan Williams9476df72016-01-15 16:56:19 -0800325 return page_map ? &page_map->pgmap : NULL;
Christoph Hellwig41e94a82015-08-17 16:00:35 +0200326}
327
Dan Williams4b94ffd2016-01-15 16:56:22 -0800328/**
329 * devm_memremap_pages - remap and provide memmap backing for the given resource
330 * @dev: hosting device for @res
331 * @res: "host memory" address range
Dan Williams5c2c2582016-01-15 16:56:49 -0800332 * @ref: a live per-cpu reference count
Dan Williams4b94ffd2016-01-15 16:56:22 -0800333 * @altmap: optional descriptor for allocating the memmap from @res
334 *
Dan Williams5c2c2582016-01-15 16:56:49 -0800335 * Notes:
336 * 1/ @ref must be 'live' on entry and 'dead' before devm_memunmap_pages() time
Dan Williams71389702017-04-28 10:23:37 -0700337 * (or devm release event). The expected order of events is that @ref has
338 * been through percpu_ref_kill() before devm_memremap_pages_release(). The
339 * wait for the completion of all references being dropped and
340 * percpu_ref_exit() must occur after devm_memremap_pages_release().
Dan Williams5c2c2582016-01-15 16:56:49 -0800341 *
342 * 2/ @res is expected to be a host memory range that could feasibly be
343 * treated as a "System RAM" range, i.e. not a device mmio range, but
344 * this is not enforced.
Dan Williams4b94ffd2016-01-15 16:56:22 -0800345 */
346void *devm_memremap_pages(struct device *dev, struct resource *res,
Dan Williams5c2c2582016-01-15 16:56:49 -0800347 struct percpu_ref *ref, struct vmem_altmap *altmap)
Christoph Hellwig41e94a82015-08-17 16:00:35 +0200348{
Dan Williamsab1b5972017-09-06 16:24:13 -0700349 resource_size_t align_start, align_size, align_end;
350 unsigned long pfn, pgoff, order;
Dan Williams90497712016-09-07 08:51:21 -0700351 pgprot_t pgprot = PAGE_KERNEL;
Dan Williams4b94ffd2016-01-15 16:56:22 -0800352 struct dev_pagemap *pgmap;
Christoph Hellwig41e94a82015-08-17 16:00:35 +0200353 struct page_map *page_map;
Dan Williams5f29a772016-03-09 14:08:13 -0800354 int error, nid, is_ram;
Dan Williams5f29a772016-03-09 14:08:13 -0800355
356 align_start = res->start & ~(SECTION_SIZE - 1);
357 align_size = ALIGN(res->start + resource_size(res), SECTION_SIZE)
358 - align_start;
Linus Torvaldsd37a14bb2016-03-14 15:15:51 -0700359 is_ram = region_intersects(align_start, align_size,
360 IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE);
Christoph Hellwig41e94a82015-08-17 16:00:35 +0200361
362 if (is_ram == REGION_MIXED) {
363 WARN_ONCE(1, "%s attempted on mixed region %pr\n",
364 __func__, res);
365 return ERR_PTR(-ENXIO);
366 }
367
368 if (is_ram == REGION_INTERSECTS)
369 return __va(res->start);
370
Dan Williams5c2c2582016-01-15 16:56:49 -0800371 if (!ref)
372 return ERR_PTR(-EINVAL);
373
Dan Williams538ea4a2015-10-05 20:35:56 -0400374 page_map = devres_alloc_node(devm_memremap_pages_release,
375 sizeof(*page_map), GFP_KERNEL, dev_to_node(dev));
Christoph Hellwig41e94a82015-08-17 16:00:35 +0200376 if (!page_map)
377 return ERR_PTR(-ENOMEM);
Dan Williams4b94ffd2016-01-15 16:56:22 -0800378 pgmap = &page_map->pgmap;
Christoph Hellwig41e94a82015-08-17 16:00:35 +0200379
380 memcpy(&page_map->res, res, sizeof(*res));
381
Dan Williams4b94ffd2016-01-15 16:56:22 -0800382 pgmap->dev = dev;
383 if (altmap) {
384 memcpy(&page_map->altmap, altmap, sizeof(*altmap));
385 pgmap->altmap = &page_map->altmap;
386 }
Dan Williams5c2c2582016-01-15 16:56:49 -0800387 pgmap->ref = ref;
Dan Williams4b94ffd2016-01-15 16:56:22 -0800388 pgmap->res = &page_map->res;
Jérôme Glisse5042db42017-09-08 16:11:43 -0700389 pgmap->type = MEMORY_DEVICE_HOST;
390 pgmap->page_fault = NULL;
391 pgmap->page_free = NULL;
392 pgmap->data = NULL;
Dan Williams4b94ffd2016-01-15 16:56:22 -0800393
Dan Williams9476df72016-01-15 16:56:19 -0800394 mutex_lock(&pgmap_lock);
395 error = 0;
Dan Williamseb7d78c2016-01-29 21:48:34 -0800396 align_end = align_start + align_size - 1;
Dan Williamsab1b5972017-09-06 16:24:13 -0700397
398 foreach_order_pgoff(res, order, pgoff) {
Dan Williams9476df72016-01-15 16:56:19 -0800399 struct dev_pagemap *dup;
400
401 rcu_read_lock();
Dan Williamsab1b5972017-09-06 16:24:13 -0700402 dup = find_dev_pagemap(res->start + PFN_PHYS(pgoff));
Dan Williams9476df72016-01-15 16:56:19 -0800403 rcu_read_unlock();
404 if (dup) {
405 dev_err(dev, "%s: %pr collides with mapping for %s\n",
406 __func__, res, dev_name(dup->dev));
407 error = -EBUSY;
408 break;
409 }
Dan Williamsab1b5972017-09-06 16:24:13 -0700410 error = __radix_tree_insert(&pgmap_radix,
411 PHYS_PFN(res->start) + pgoff, order, page_map);
Dan Williams9476df72016-01-15 16:56:19 -0800412 if (error) {
413 dev_err(dev, "%s: failed: %d\n", __func__, error);
414 break;
415 }
416 }
417 mutex_unlock(&pgmap_lock);
418 if (error)
419 goto err_radix;
420
Christoph Hellwig41e94a82015-08-17 16:00:35 +0200421 nid = dev_to_node(dev);
422 if (nid < 0)
Dan Williams7eff93b2015-10-05 20:35:55 -0400423 nid = numa_mem_id();
Christoph Hellwig41e94a82015-08-17 16:00:35 +0200424
Dan Williams90497712016-09-07 08:51:21 -0700425 error = track_pfn_remap(NULL, &pgprot, PHYS_PFN(align_start), 0,
426 align_size);
427 if (error)
428 goto err_pfn_remap;
429
Dan Williamsf931ab42017-01-10 16:57:36 -0800430 mem_hotplug_begin();
Michal Hocko3d79a722017-07-06 15:38:21 -0700431 error = arch_add_memory(nid, align_start, align_size, false);
Michal Hockof1dd2cd2017-07-06 15:38:11 -0700432 if (!error)
433 move_pfn_range_to_zone(&NODE_DATA(nid)->node_zones[ZONE_DEVICE],
434 align_start >> PAGE_SHIFT,
435 align_size >> PAGE_SHIFT);
Dan Williamsf931ab42017-01-10 16:57:36 -0800436 mem_hotplug_done();
Dan Williams9476df72016-01-15 16:56:19 -0800437 if (error)
438 goto err_add_memory;
Christoph Hellwig41e94a82015-08-17 16:00:35 +0200439
Dan Williams5c2c2582016-01-15 16:56:49 -0800440 for_each_device_pfn(pfn, page_map) {
441 struct page *page = pfn_to_page(pfn);
442
Dan Williamsd77a1172016-03-09 14:08:10 -0800443 /*
444 * ZONE_DEVICE pages union ->lru with a ->pgmap back
445 * pointer. It is a bug if a ZONE_DEVICE page is ever
446 * freed or placed on a driver-private list. Seed the
447 * storage with LIST_POISON* values.
448 */
449 list_del(&page->lru);
Dan Williams5c2c2582016-01-15 16:56:49 -0800450 page->pgmap = pgmap;
Dan Williams71389702017-04-28 10:23:37 -0700451 percpu_ref_get(ref);
Dan Williams5c2c2582016-01-15 16:56:49 -0800452 }
Christoph Hellwig41e94a82015-08-17 16:00:35 +0200453 devres_add(dev, page_map);
454 return __va(res->start);
Dan Williams9476df72016-01-15 16:56:19 -0800455
456 err_add_memory:
Dan Williams90497712016-09-07 08:51:21 -0700457 untrack_pfn(NULL, PHYS_PFN(align_start), align_size);
458 err_pfn_remap:
Dan Williams9476df72016-01-15 16:56:19 -0800459 err_radix:
460 pgmap_radix_release(res);
461 devres_free(page_map);
462 return ERR_PTR(error);
Christoph Hellwig41e94a82015-08-17 16:00:35 +0200463}
464EXPORT_SYMBOL(devm_memremap_pages);
Dan Williams4b94ffd2016-01-15 16:56:22 -0800465
466unsigned long vmem_altmap_offset(struct vmem_altmap *altmap)
467{
468 /* number of pfns from base where pfn_to_page() is valid */
469 return altmap->reserve + altmap->free;
470}
471
472void vmem_altmap_free(struct vmem_altmap *altmap, unsigned long nr_pfns)
473{
474 altmap->alloc -= nr_pfns;
475}
476
Dan Williams4b94ffd2016-01-15 16:56:22 -0800477struct vmem_altmap *to_vmem_altmap(unsigned long memmap_start)
478{
479 /*
480 * 'memmap_start' is the virtual address for the first "struct
481 * page" in this range of the vmemmap array. In the case of
Andreas Ziegler07061aa2016-03-15 14:55:33 -0700482 * CONFIG_SPARSEMEM_VMEMMAP a page_to_pfn conversion is simple
Dan Williams4b94ffd2016-01-15 16:56:22 -0800483 * pointer arithmetic, so we can perform this to_vmem_altmap()
484 * conversion without concern for the initialization state of
485 * the struct page fields.
486 */
487 struct page *page = (struct page *) memmap_start;
488 struct dev_pagemap *pgmap;
489
490 /*
Andreas Ziegler07061aa2016-03-15 14:55:33 -0700491 * Unconditionally retrieve a dev_pagemap associated with the
Dan Williams4b94ffd2016-01-15 16:56:22 -0800492 * given physical address, this is only for use in the
493 * arch_{add|remove}_memory() for setting up and tearing down
494 * the memmap.
495 */
496 rcu_read_lock();
497 pgmap = find_dev_pagemap(__pfn_to_phys(page_to_pfn(page)));
498 rcu_read_unlock();
499
500 return pgmap ? pgmap->altmap : NULL;
501}
Christoph Hellwig41e94a82015-08-17 16:00:35 +0200502#endif /* CONFIG_ZONE_DEVICE */