blob: 5f74ba623dbdd396c4505aa66435c24c88a0ff4f [file] [log] [blame]
Yinghai Lu95f72d12010-07-12 14:36:09 +10001#ifndef _LINUX_MEMBLOCK_H
2#define _LINUX_MEMBLOCK_H
3#ifdef __KERNEL__
4
5/*
6 * Logical memory blocks.
7 *
8 * Copyright (C) 2001 Peter Bergner, IBM Corp.
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
14 */
15
16#include <linux/init.h>
17#include <linux/mm.h>
Mike Rapoport57c8a662018-10-30 15:09:49 -070018#include <asm/dma.h>
19
20extern unsigned long max_low_pfn;
21extern unsigned long min_low_pfn;
22
23/*
24 * highest page
25 */
26extern unsigned long max_pfn;
27/*
28 * highest possible page
29 */
30extern unsigned long long max_possible_pfn;
Yinghai Lu95f72d12010-07-12 14:36:09 +100031
Yinghai Lu37d8d4b2010-07-28 15:20:58 +100032#define INIT_MEMBLOCK_REGIONS 128
Philipp Hachtmann70210ed2014-01-29 18:16:01 +010033#define INIT_PHYSMEM_REGIONS 4
Yinghai Lu95f72d12010-07-12 14:36:09 +100034
Mike Rapoport9a0de1b2018-06-30 17:55:04 +030035/**
36 * enum memblock_flags - definition of memory region attributes
37 * @MEMBLOCK_NONE: no special request
38 * @MEMBLOCK_HOTPLUG: hotpluggable region
39 * @MEMBLOCK_MIRROR: mirrored region
40 * @MEMBLOCK_NOMAP: don't add to kernel direct mapping
41 */
Mike Rapoporte1720fe2018-06-30 17:55:01 +030042enum memblock_flags {
Tony Luckfc6daaf2015-06-24 16:58:09 -070043 MEMBLOCK_NONE = 0x0, /* No special request */
44 MEMBLOCK_HOTPLUG = 0x1, /* hotpluggable region */
Tony Lucka3f5baf2015-06-24 16:58:12 -070045 MEMBLOCK_MIRROR = 0x2, /* mirrored region */
Ard Biesheuvelbf3d3cc2015-11-30 13:28:15 +010046 MEMBLOCK_NOMAP = 0x4, /* don't add to kernel direct mapping */
Tony Luckfc6daaf2015-06-24 16:58:09 -070047};
Tang Chen66b16ed2014-01-21 15:49:23 -080048
Mike Rapoport9a0de1b2018-06-30 17:55:04 +030049/**
50 * struct memblock_region - represents a memory region
51 * @base: physical address of the region
52 * @size: size of the region
53 * @flags: memory region attributes
54 * @nid: NUMA node id
55 */
Benjamin Herrenschmidte3239ff2010-08-04 14:06:41 +100056struct memblock_region {
Benjamin Herrenschmidt2898cc42010-08-04 13:34:42 +100057 phys_addr_t base;
58 phys_addr_t size;
Mike Rapoporte1720fe2018-06-30 17:55:01 +030059 enum memblock_flags flags;
Tejun Heo7c0caeb2011-07-14 11:43:42 +020060#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
61 int nid;
62#endif
Yinghai Lu95f72d12010-07-12 14:36:09 +100063};
64
Mike Rapoport9a0de1b2018-06-30 17:55:04 +030065/**
66 * struct memblock_type - collection of memory regions of certain type
67 * @cnt: number of regions
68 * @max: size of the allocated array
69 * @total_size: size of all regions
70 * @regions: array of regions
71 * @name: the memory type symbolic name
72 */
Benjamin Herrenschmidte3239ff2010-08-04 14:06:41 +100073struct memblock_type {
Mike Rapoport9a0de1b2018-06-30 17:55:04 +030074 unsigned long cnt;
75 unsigned long max;
76 phys_addr_t total_size;
Benjamin Herrenschmidtbf23c512010-07-06 15:39:06 -070077 struct memblock_region *regions;
Heiko Carstens0262d9c2017-02-24 14:55:59 -080078 char *name;
Yinghai Lu95f72d12010-07-12 14:36:09 +100079};
80
Mike Rapoport9a0de1b2018-06-30 17:55:04 +030081/**
82 * struct memblock - memblock allocator metadata
83 * @bottom_up: is bottom up direction?
84 * @current_limit: physical address of the current allocation limit
85 * @memory: usabe memory regions
86 * @reserved: reserved memory regions
87 * @physmem: all physical memory
88 */
Yinghai Lu95f72d12010-07-12 14:36:09 +100089struct memblock {
Tang Chen79442ed2013-11-12 15:07:59 -080090 bool bottom_up; /* is bottom up direction? */
Benjamin Herrenschmidt2898cc42010-08-04 13:34:42 +100091 phys_addr_t current_limit;
Benjamin Herrenschmidte3239ff2010-08-04 14:06:41 +100092 struct memblock_type memory;
93 struct memblock_type reserved;
Philipp Hachtmann70210ed2014-01-29 18:16:01 +010094#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
95 struct memblock_type physmem;
96#endif
Yinghai Lu95f72d12010-07-12 14:36:09 +100097};
98
99extern struct memblock memblock;
Yinghai Lu5e63cf42010-07-28 15:07:21 +1000100extern int memblock_debug;
Yinghai Lu5e63cf42010-07-28 15:07:21 +1000101
Kirill A. Shutemov036fbb22016-01-15 16:57:11 -0800102#ifdef CONFIG_ARCH_DISCARD_MEMBLOCK
103#define __init_memblock __meminit
104#define __initdata_memblock __meminitdata
Pavel Tatashin3010f872017-08-18 15:16:05 -0700105void memblock_discard(void);
Kirill A. Shutemov036fbb22016-01-15 16:57:11 -0800106#else
107#define __init_memblock
108#define __initdata_memblock
109#endif
110
Yinghai Lu5e63cf42010-07-28 15:07:21 +1000111#define memblock_dbg(fmt, ...) \
112 if (memblock_debug) printk(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__)
Yinghai Lu95f72d12010-07-12 14:36:09 +1000113
Grygorii Strashko87029ee2014-01-21 15:50:14 -0800114phys_addr_t memblock_find_in_range_node(phys_addr_t size, phys_addr_t align,
Chen Gangba6c19f2016-07-26 15:24:47 -0700115 phys_addr_t start, phys_addr_t end,
Mike Rapoporte1720fe2018-06-30 17:55:01 +0300116 int nid, enum memblock_flags flags);
Tejun Heofc769a82011-07-12 09:58:10 +0200117phys_addr_t memblock_find_in_range(phys_addr_t start, phys_addr_t end,
118 phys_addr_t size, phys_addr_t align);
Tejun Heo1aadc052011-12-08 10:22:08 -0800119void memblock_allow_resize(void);
Tejun Heo7fb0bc32011-12-08 10:22:08 -0800120int memblock_add_node(phys_addr_t base, phys_addr_t size, int nid);
Tejun Heo581adcb2011-12-08 10:22:06 -0800121int memblock_add(phys_addr_t base, phys_addr_t size);
122int memblock_remove(phys_addr_t base, phys_addr_t size);
123int memblock_free(phys_addr_t base, phys_addr_t size);
124int memblock_reserve(phys_addr_t base, phys_addr_t size);
Yinghai Lu6ede1fd2012-10-22 16:35:18 -0700125void memblock_trim_memory(phys_addr_t align);
Tang Chen95cf82e2015-09-08 15:02:03 -0700126bool memblock_overlaps_region(struct memblock_type *type,
127 phys_addr_t base, phys_addr_t size);
Tang Chen66b16ed2014-01-21 15:49:23 -0800128int memblock_mark_hotplug(phys_addr_t base, phys_addr_t size);
129int memblock_clear_hotplug(phys_addr_t base, phys_addr_t size);
Tony Lucka3f5baf2015-06-24 16:58:12 -0700130int memblock_mark_mirror(phys_addr_t base, phys_addr_t size);
Ard Biesheuvelbf3d3cc2015-11-30 13:28:15 +0100131int memblock_mark_nomap(phys_addr_t base, phys_addr_t size);
AKASHI Takahiro4c546b82017-04-03 11:23:54 +0900132int memblock_clear_nomap(phys_addr_t base, phys_addr_t size);
Mike Rapoporte1720fe2018-06-30 17:55:01 +0300133enum memblock_flags choose_memblock_flags(void);
Philipp Hachtmannf1af9d32014-01-29 18:16:01 +0100134
Mike Rapoport57c8a662018-10-30 15:09:49 -0700135unsigned long memblock_free_all(void);
136void reset_node_managed_pages(pg_data_t *pgdat);
137void reset_all_zones_managed_pages(void);
138
Philipp Hachtmannf1af9d32014-01-29 18:16:01 +0100139/* Low level functions */
140int memblock_add_range(struct memblock_type *type,
141 phys_addr_t base, phys_addr_t size,
Mike Rapoporte1720fe2018-06-30 17:55:01 +0300142 int nid, enum memblock_flags flags);
Philipp Hachtmannf1af9d32014-01-29 18:16:01 +0100143
Mike Rapoporte1720fe2018-06-30 17:55:01 +0300144void __next_mem_range(u64 *idx, int nid, enum memblock_flags flags,
Tony Luckfc6daaf2015-06-24 16:58:09 -0700145 struct memblock_type *type_a,
Philipp Hachtmannf1af9d32014-01-29 18:16:01 +0100146 struct memblock_type *type_b, phys_addr_t *out_start,
147 phys_addr_t *out_end, int *out_nid);
148
Mike Rapoporte1720fe2018-06-30 17:55:01 +0300149void __next_mem_range_rev(u64 *idx, int nid, enum memblock_flags flags,
Tony Luckfc6daaf2015-06-24 16:58:09 -0700150 struct memblock_type *type_a,
Philipp Hachtmannf1af9d32014-01-29 18:16:01 +0100151 struct memblock_type *type_b, phys_addr_t *out_start,
152 phys_addr_t *out_end, int *out_nid);
153
Robin Holt8e7a7f82015-06-30 14:56:41 -0700154void __next_reserved_mem_region(u64 *idx, phys_addr_t *out_start,
Chen Gangba6c19f2016-07-26 15:24:47 -0700155 phys_addr_t *out_end);
Robin Holt8e7a7f82015-06-30 14:56:41 -0700156
Pavel Tatashin3010f872017-08-18 15:16:05 -0700157void __memblock_free_late(phys_addr_t base, phys_addr_t size);
158
Philipp Hachtmannf1af9d32014-01-29 18:16:01 +0100159/**
160 * for_each_mem_range - iterate through memblock areas from type_a and not
161 * included in type_b. Or just type_a if type_b is NULL.
162 * @i: u64 used as loop variable
163 * @type_a: ptr to memblock_type to iterate
164 * @type_b: ptr to memblock_type which excludes from the iteration
165 * @nid: node selector, %NUMA_NO_NODE for all nodes
Tony Luckfc6daaf2015-06-24 16:58:09 -0700166 * @flags: pick from blocks based on memory attributes
Philipp Hachtmannf1af9d32014-01-29 18:16:01 +0100167 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
168 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
169 * @p_nid: ptr to int for nid of the range, can be %NULL
170 */
Tony Luckfc6daaf2015-06-24 16:58:09 -0700171#define for_each_mem_range(i, type_a, type_b, nid, flags, \
Philipp Hachtmannf1af9d32014-01-29 18:16:01 +0100172 p_start, p_end, p_nid) \
Tony Luckfc6daaf2015-06-24 16:58:09 -0700173 for (i = 0, __next_mem_range(&i, nid, flags, type_a, type_b, \
Philipp Hachtmannf1af9d32014-01-29 18:16:01 +0100174 p_start, p_end, p_nid); \
175 i != (u64)ULLONG_MAX; \
Tony Luckfc6daaf2015-06-24 16:58:09 -0700176 __next_mem_range(&i, nid, flags, type_a, type_b, \
Philipp Hachtmannf1af9d32014-01-29 18:16:01 +0100177 p_start, p_end, p_nid))
178
179/**
180 * for_each_mem_range_rev - reverse iterate through memblock areas from
181 * type_a and not included in type_b. Or just type_a if type_b is NULL.
182 * @i: u64 used as loop variable
183 * @type_a: ptr to memblock_type to iterate
184 * @type_b: ptr to memblock_type which excludes from the iteration
185 * @nid: node selector, %NUMA_NO_NODE for all nodes
Tony Luckfc6daaf2015-06-24 16:58:09 -0700186 * @flags: pick from blocks based on memory attributes
Philipp Hachtmannf1af9d32014-01-29 18:16:01 +0100187 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
188 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
189 * @p_nid: ptr to int for nid of the range, can be %NULL
190 */
Tony Luckfc6daaf2015-06-24 16:58:09 -0700191#define for_each_mem_range_rev(i, type_a, type_b, nid, flags, \
Philipp Hachtmannf1af9d32014-01-29 18:16:01 +0100192 p_start, p_end, p_nid) \
193 for (i = (u64)ULLONG_MAX, \
Tony Luckfc6daaf2015-06-24 16:58:09 -0700194 __next_mem_range_rev(&i, nid, flags, type_a, type_b,\
Chen Gangba6c19f2016-07-26 15:24:47 -0700195 p_start, p_end, p_nid); \
Philipp Hachtmannf1af9d32014-01-29 18:16:01 +0100196 i != (u64)ULLONG_MAX; \
Tony Luckfc6daaf2015-06-24 16:58:09 -0700197 __next_mem_range_rev(&i, nid, flags, type_a, type_b, \
Philipp Hachtmannf1af9d32014-01-29 18:16:01 +0100198 p_start, p_end, p_nid))
199
Robin Holt8e7a7f82015-06-30 14:56:41 -0700200/**
201 * for_each_reserved_mem_region - iterate over all reserved memblock areas
202 * @i: u64 used as loop variable
203 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
204 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
205 *
206 * Walks over reserved areas of memblock. Available as soon as memblock
207 * is initialized.
208 */
209#define for_each_reserved_mem_region(i, p_start, p_end) \
Chen Gangba6c19f2016-07-26 15:24:47 -0700210 for (i = 0UL, __next_reserved_mem_region(&i, p_start, p_end); \
Robin Holt8e7a7f82015-06-30 14:56:41 -0700211 i != (u64)ULLONG_MAX; \
212 __next_reserved_mem_region(&i, p_start, p_end))
213
Tang Chen55ac5902014-01-21 15:49:35 -0800214static inline bool memblock_is_hotpluggable(struct memblock_region *m)
215{
216 return m->flags & MEMBLOCK_HOTPLUG;
217}
218
Tony Lucka3f5baf2015-06-24 16:58:12 -0700219static inline bool memblock_is_mirror(struct memblock_region *m)
220{
221 return m->flags & MEMBLOCK_MIRROR;
222}
223
Ard Biesheuvelbf3d3cc2015-11-30 13:28:15 +0100224static inline bool memblock_is_nomap(struct memblock_region *m)
225{
226 return m->flags & MEMBLOCK_NOMAP;
227}
228
Tejun Heo0ee332c2011-12-08 10:22:09 -0800229#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
Yinghai Lue76b63f2013-09-11 14:22:17 -0700230int memblock_search_pfn_nid(unsigned long pfn, unsigned long *start_pfn,
231 unsigned long *end_pfn);
Tejun Heo0ee332c2011-12-08 10:22:09 -0800232void __next_mem_pfn_range(int *idx, int nid, unsigned long *out_start_pfn,
233 unsigned long *out_end_pfn, int *out_nid);
234
235/**
236 * for_each_mem_pfn_range - early memory pfn range iterator
237 * @i: an integer used as loop variable
238 * @nid: node selector, %MAX_NUMNODES for all nodes
239 * @p_start: ptr to ulong for start pfn of the range, can be %NULL
240 * @p_end: ptr to ulong for end pfn of the range, can be %NULL
241 * @p_nid: ptr to int for nid of the range, can be %NULL
242 *
Wanpeng Lif2d52fe2012-10-08 16:32:24 -0700243 * Walks over configured memory ranges.
Tejun Heo0ee332c2011-12-08 10:22:09 -0800244 */
245#define for_each_mem_pfn_range(i, nid, p_start, p_end, p_nid) \
246 for (i = -1, __next_mem_pfn_range(&i, nid, p_start, p_end, p_nid); \
247 i >= 0; __next_mem_pfn_range(&i, nid, p_start, p_end, p_nid))
248#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
249
Tejun Heo35fd0802011-07-12 11:15:59 +0200250/**
251 * for_each_free_mem_range - iterate through free memblock areas
252 * @i: u64 used as loop variable
Grygorii Strashkob1154232014-01-21 15:50:16 -0800253 * @nid: node selector, %NUMA_NO_NODE for all nodes
Florian Fainellid30b5542016-01-14 15:22:04 -0800254 * @flags: pick from blocks based on memory attributes
Tejun Heo35fd0802011-07-12 11:15:59 +0200255 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
256 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
257 * @p_nid: ptr to int for nid of the range, can be %NULL
258 *
259 * Walks over free (memory && !reserved) areas of memblock. Available as
260 * soon as memblock is initialized.
261 */
Tony Luckfc6daaf2015-06-24 16:58:09 -0700262#define for_each_free_mem_range(i, nid, flags, p_start, p_end, p_nid) \
Philipp Hachtmannf1af9d32014-01-29 18:16:01 +0100263 for_each_mem_range(i, &memblock.memory, &memblock.reserved, \
Tony Luckfc6daaf2015-06-24 16:58:09 -0700264 nid, flags, p_start, p_end, p_nid)
Tejun Heo7bd0b0f2011-12-08 10:22:09 -0800265
266/**
267 * for_each_free_mem_range_reverse - rev-iterate through free memblock areas
268 * @i: u64 used as loop variable
Grygorii Strashkob1154232014-01-21 15:50:16 -0800269 * @nid: node selector, %NUMA_NO_NODE for all nodes
Florian Fainellid30b5542016-01-14 15:22:04 -0800270 * @flags: pick from blocks based on memory attributes
Tejun Heo7bd0b0f2011-12-08 10:22:09 -0800271 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
272 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
273 * @p_nid: ptr to int for nid of the range, can be %NULL
274 *
275 * Walks over free (memory && !reserved) areas of memblock in reverse
276 * order. Available as soon as memblock is initialized.
277 */
Tony Luckfc6daaf2015-06-24 16:58:09 -0700278#define for_each_free_mem_range_reverse(i, nid, flags, p_start, p_end, \
279 p_nid) \
Philipp Hachtmannf1af9d32014-01-29 18:16:01 +0100280 for_each_mem_range_rev(i, &memblock.memory, &memblock.reserved, \
Tony Luckfc6daaf2015-06-24 16:58:09 -0700281 nid, flags, p_start, p_end, p_nid)
Tejun Heo7bd0b0f2011-12-08 10:22:09 -0800282
Tang Chen66b16ed2014-01-21 15:49:23 -0800283static inline void memblock_set_region_flags(struct memblock_region *r,
Mike Rapoporte1720fe2018-06-30 17:55:01 +0300284 enum memblock_flags flags)
Tang Chen66b16ed2014-01-21 15:49:23 -0800285{
286 r->flags |= flags;
287}
288
289static inline void memblock_clear_region_flags(struct memblock_region *r,
Mike Rapoporte1720fe2018-06-30 17:55:01 +0300290 enum memblock_flags flags)
Tang Chen66b16ed2014-01-21 15:49:23 -0800291{
292 r->flags &= ~flags;
293}
294
Tejun Heo7c0caeb2011-07-14 11:43:42 +0200295#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
Tang Chene7e8de52014-01-21 15:49:26 -0800296int memblock_set_node(phys_addr_t base, phys_addr_t size,
297 struct memblock_type *type, int nid);
Tejun Heo7c0caeb2011-07-14 11:43:42 +0200298
299static inline void memblock_set_region_node(struct memblock_region *r, int nid)
300{
301 r->nid = nid;
302}
303
304static inline int memblock_get_region_node(const struct memblock_region *r)
305{
306 return r->nid;
307}
308#else
309static inline void memblock_set_region_node(struct memblock_region *r, int nid)
310{
311}
312
313static inline int memblock_get_region_node(const struct memblock_region *r)
314{
315 return 0;
316}
317#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
318
Mike Rapoport57c8a662018-10-30 15:09:49 -0700319/* Flags for memblock allocation APIs */
320#define MEMBLOCK_ALLOC_ANYWHERE (~(phys_addr_t)0)
321#define MEMBLOCK_ALLOC_ACCESSIBLE 0
322
323/* We are using top down, so it is safe to use 0 here */
324#define MEMBLOCK_LOW_LIMIT 0
325
326#ifndef ARCH_LOW_ADDRESS_LIMIT
327#define ARCH_LOW_ADDRESS_LIMIT 0xffffffffUL
328#endif
329
Mike Rapoport9a8dd702018-10-30 15:07:59 -0700330phys_addr_t memblock_phys_alloc_nid(phys_addr_t size, phys_addr_t align, int nid);
331phys_addr_t memblock_phys_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid);
Benjamin Herrenschmidt9d1e2492010-07-06 15:39:17 -0700332
Mike Rapoport9a8dd702018-10-30 15:07:59 -0700333phys_addr_t memblock_phys_alloc(phys_addr_t size, phys_addr_t align);
Benjamin Herrenschmidte63075a2010-07-06 15:39:01 -0700334
Mike Rapoport57c8a662018-10-30 15:09:49 -0700335void *memblock_alloc_try_nid_raw(phys_addr_t size, phys_addr_t align,
336 phys_addr_t min_addr, phys_addr_t max_addr,
337 int nid);
338void *memblock_alloc_try_nid_nopanic(phys_addr_t size, phys_addr_t align,
339 phys_addr_t min_addr, phys_addr_t max_addr,
340 int nid);
341void *memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align,
342 phys_addr_t min_addr, phys_addr_t max_addr,
343 int nid);
344
345static inline void * __init memblock_alloc(phys_addr_t size, phys_addr_t align)
346{
347 return memblock_alloc_try_nid(size, align, MEMBLOCK_LOW_LIMIT,
348 MEMBLOCK_ALLOC_ACCESSIBLE, NUMA_NO_NODE);
349}
350
351static inline void * __init memblock_alloc_raw(phys_addr_t size,
352 phys_addr_t align)
353{
354 return memblock_alloc_try_nid_raw(size, align, MEMBLOCK_LOW_LIMIT,
355 MEMBLOCK_ALLOC_ACCESSIBLE,
356 NUMA_NO_NODE);
357}
358
359static inline void * __init memblock_alloc_from(phys_addr_t size,
360 phys_addr_t align,
361 phys_addr_t min_addr)
362{
363 return memblock_alloc_try_nid(size, align, min_addr,
364 MEMBLOCK_ALLOC_ACCESSIBLE, NUMA_NO_NODE);
365}
366
367static inline void * __init memblock_alloc_nopanic(phys_addr_t size,
368 phys_addr_t align)
369{
370 return memblock_alloc_try_nid_nopanic(size, align, MEMBLOCK_LOW_LIMIT,
371 MEMBLOCK_ALLOC_ACCESSIBLE,
372 NUMA_NO_NODE);
373}
374
375static inline void * __init memblock_alloc_low(phys_addr_t size,
376 phys_addr_t align)
377{
378 return memblock_alloc_try_nid(size, align, MEMBLOCK_LOW_LIMIT,
379 ARCH_LOW_ADDRESS_LIMIT, NUMA_NO_NODE);
380}
381static inline void * __init memblock_alloc_low_nopanic(phys_addr_t size,
382 phys_addr_t align)
383{
384 return memblock_alloc_try_nid_nopanic(size, align, MEMBLOCK_LOW_LIMIT,
385 ARCH_LOW_ADDRESS_LIMIT,
386 NUMA_NO_NODE);
387}
388
389static inline void * __init memblock_alloc_from_nopanic(phys_addr_t size,
390 phys_addr_t align,
391 phys_addr_t min_addr)
392{
393 return memblock_alloc_try_nid_nopanic(size, align, min_addr,
394 MEMBLOCK_ALLOC_ACCESSIBLE,
395 NUMA_NO_NODE);
396}
397
398static inline void * __init memblock_alloc_node(phys_addr_t size,
399 phys_addr_t align, int nid)
400{
401 return memblock_alloc_try_nid(size, align, MEMBLOCK_LOW_LIMIT,
402 MEMBLOCK_ALLOC_ACCESSIBLE, nid);
403}
404
405static inline void * __init memblock_alloc_node_nopanic(phys_addr_t size,
406 int nid)
407{
Mike Rapoport7e1c4e22018-10-30 15:09:57 -0700408 return memblock_alloc_try_nid_nopanic(size, SMP_CACHE_BYTES,
409 MEMBLOCK_LOW_LIMIT,
Mike Rapoport57c8a662018-10-30 15:09:49 -0700410 MEMBLOCK_ALLOC_ACCESSIBLE, nid);
411}
412
413static inline void __init memblock_free_early(phys_addr_t base,
414 phys_addr_t size)
415{
Mike Rapoport4d728682018-12-28 00:35:29 -0800416 memblock_free(base, size);
Mike Rapoport57c8a662018-10-30 15:09:49 -0700417}
418
419static inline void __init memblock_free_early_nid(phys_addr_t base,
420 phys_addr_t size, int nid)
421{
Mike Rapoport4d728682018-12-28 00:35:29 -0800422 memblock_free(base, size);
Mike Rapoport57c8a662018-10-30 15:09:49 -0700423}
424
425static inline void __init memblock_free_late(phys_addr_t base, phys_addr_t size)
426{
427 __memblock_free_late(base, size);
428}
429
Tang Chen79442ed2013-11-12 15:07:59 -0800430/*
431 * Set the allocation direction to bottom-up or top-down.
432 */
Fabian Frederick2cfb3662014-08-06 16:05:03 -0700433static inline void __init memblock_set_bottom_up(bool enable)
Tang Chen79442ed2013-11-12 15:07:59 -0800434{
435 memblock.bottom_up = enable;
436}
437
438/*
439 * Check if the allocation direction is bottom-up or not.
440 * if this is true, that said, memblock will allocate memory
441 * in bottom-up direction.
442 */
443static inline bool memblock_bottom_up(void)
444{
445 return memblock.bottom_up;
446}
Tang Chen79442ed2013-11-12 15:07:59 -0800447
Akinobu Mita2bfc2862014-06-04 16:06:53 -0700448phys_addr_t __init memblock_alloc_range(phys_addr_t size, phys_addr_t align,
Tony Luckfc6daaf2015-06-24 16:58:09 -0700449 phys_addr_t start, phys_addr_t end,
Mike Rapoporte1720fe2018-06-30 17:55:01 +0300450 enum memblock_flags flags);
Nicholas Pigginb575454f2018-02-14 01:08:15 +1000451phys_addr_t memblock_alloc_base_nid(phys_addr_t size,
452 phys_addr_t align, phys_addr_t max_addr,
Mike Rapoporte1720fe2018-06-30 17:55:01 +0300453 int nid, enum memblock_flags flags);
Tejun Heo581adcb2011-12-08 10:22:06 -0800454phys_addr_t memblock_alloc_base(phys_addr_t size, phys_addr_t align,
455 phys_addr_t max_addr);
456phys_addr_t __memblock_alloc_base(phys_addr_t size, phys_addr_t align,
457 phys_addr_t max_addr);
458phys_addr_t memblock_phys_mem_size(void);
Srikar Dronamraju8907de52016-10-07 16:59:18 -0700459phys_addr_t memblock_reserved_size(void);
Yinghai Lu595ad9a2013-01-24 12:20:09 -0800460phys_addr_t memblock_mem_size(unsigned long limit_pfn);
Tejun Heo581adcb2011-12-08 10:22:06 -0800461phys_addr_t memblock_start_of_DRAM(void);
462phys_addr_t memblock_end_of_DRAM(void);
463void memblock_enforce_memory_limit(phys_addr_t memory_limit);
AKASHI Takahiroc9ca9b42017-04-03 11:23:55 +0900464void memblock_cap_memory_range(phys_addr_t base, phys_addr_t size);
Dennis Chena571d4e2016-07-28 15:48:26 -0700465void memblock_mem_limit_remove_map(phys_addr_t limit);
Yaowei Baib4ad0c72016-01-14 15:18:54 -0800466bool memblock_is_memory(phys_addr_t addr);
Yaowei Bai937f0c22018-02-06 15:41:18 -0800467bool memblock_is_map_memory(phys_addr_t addr);
468bool memblock_is_region_memory(phys_addr_t base, phys_addr_t size);
Yaowei Baib4ad0c72016-01-14 15:18:54 -0800469bool memblock_is_reserved(phys_addr_t addr);
Tang Chenc5c5c9d2015-09-08 15:02:00 -0700470bool memblock_is_region_reserved(phys_addr_t base, phys_addr_t size);
Yinghai Lu95f72d12010-07-12 14:36:09 +1000471
Tejun Heo4ff7b822011-12-08 10:22:06 -0800472extern void __memblock_dump_all(void);
473
474static inline void memblock_dump_all(void)
475{
476 if (memblock_debug)
477 __memblock_dump_all();
478}
Yinghai Lu95f72d12010-07-12 14:36:09 +1000479
Benjamin Herrenschmidte63075a2010-07-06 15:39:01 -0700480/**
481 * memblock_set_current_limit - Set the current allocation limit to allow
482 * limiting allocations to what is currently
483 * accessible during boot
484 * @limit: New limit value (physical address)
485 */
Tejun Heo581adcb2011-12-08 10:22:06 -0800486void memblock_set_current_limit(phys_addr_t limit);
Benjamin Herrenschmidte63075a2010-07-06 15:39:01 -0700487
Benjamin Herrenschmidt35a1f0b2010-07-06 15:38:58 -0700488
Laura Abbottfec51012014-02-27 01:23:43 +0100489phys_addr_t memblock_get_current_limit(void);
490
Benjamin Herrenschmidt5b385f22010-08-04 13:40:38 +1000491/*
492 * pfn conversion functions
493 *
494 * While the memory MEMBLOCKs should always be page aligned, the reserved
495 * MEMBLOCKs may not be. This accessor attempt to provide a very clear
496 * idea of what they return for such non aligned MEMBLOCKs.
497 */
498
499/**
Mike Rapoport47cec442018-06-30 17:55:02 +0300500 * memblock_region_memory_base_pfn - get the lowest pfn of the memory region
Benjamin Herrenschmidt5b385f22010-08-04 13:40:38 +1000501 * @reg: memblock_region structure
Mike Rapoport47cec442018-06-30 17:55:02 +0300502 *
503 * Return: the lowest pfn intersecting with the memory region
Benjamin Herrenschmidt5b385f22010-08-04 13:40:38 +1000504 */
Yinghai Luc7fc2de2010-10-12 14:07:09 -0700505static inline unsigned long memblock_region_memory_base_pfn(const struct memblock_region *reg)
Benjamin Herrenschmidt5b385f22010-08-04 13:40:38 +1000506{
Yinghai Luc7fc2de2010-10-12 14:07:09 -0700507 return PFN_UP(reg->base);
Benjamin Herrenschmidt5b385f22010-08-04 13:40:38 +1000508}
509
510/**
Mike Rapoport47cec442018-06-30 17:55:02 +0300511 * memblock_region_memory_end_pfn - get the end pfn of the memory region
Benjamin Herrenschmidt5b385f22010-08-04 13:40:38 +1000512 * @reg: memblock_region structure
Mike Rapoport47cec442018-06-30 17:55:02 +0300513 *
514 * Return: the end_pfn of the reserved region
Benjamin Herrenschmidt5b385f22010-08-04 13:40:38 +1000515 */
Yinghai Luc7fc2de2010-10-12 14:07:09 -0700516static inline unsigned long memblock_region_memory_end_pfn(const struct memblock_region *reg)
Benjamin Herrenschmidt5b385f22010-08-04 13:40:38 +1000517{
Yinghai Luc7fc2de2010-10-12 14:07:09 -0700518 return PFN_DOWN(reg->base + reg->size);
Benjamin Herrenschmidt5b385f22010-08-04 13:40:38 +1000519}
520
521/**
Mike Rapoport47cec442018-06-30 17:55:02 +0300522 * memblock_region_reserved_base_pfn - get the lowest pfn of the reserved region
Benjamin Herrenschmidt5b385f22010-08-04 13:40:38 +1000523 * @reg: memblock_region structure
Mike Rapoport47cec442018-06-30 17:55:02 +0300524 *
525 * Return: the lowest pfn intersecting with the reserved region
Benjamin Herrenschmidt5b385f22010-08-04 13:40:38 +1000526 */
Yinghai Luc7fc2de2010-10-12 14:07:09 -0700527static inline unsigned long memblock_region_reserved_base_pfn(const struct memblock_region *reg)
Benjamin Herrenschmidt5b385f22010-08-04 13:40:38 +1000528{
Yinghai Luc7fc2de2010-10-12 14:07:09 -0700529 return PFN_DOWN(reg->base);
Benjamin Herrenschmidt5b385f22010-08-04 13:40:38 +1000530}
531
532/**
Mike Rapoport47cec442018-06-30 17:55:02 +0300533 * memblock_region_reserved_end_pfn - get the end pfn of the reserved region
Benjamin Herrenschmidt5b385f22010-08-04 13:40:38 +1000534 * @reg: memblock_region structure
Mike Rapoport47cec442018-06-30 17:55:02 +0300535 *
536 * Return: the end_pfn of the reserved region
Benjamin Herrenschmidt5b385f22010-08-04 13:40:38 +1000537 */
Yinghai Luc7fc2de2010-10-12 14:07:09 -0700538static inline unsigned long memblock_region_reserved_end_pfn(const struct memblock_region *reg)
Benjamin Herrenschmidt5b385f22010-08-04 13:40:38 +1000539{
Yinghai Luc7fc2de2010-10-12 14:07:09 -0700540 return PFN_UP(reg->base + reg->size);
Benjamin Herrenschmidt5b385f22010-08-04 13:40:38 +1000541}
542
543#define for_each_memblock(memblock_type, region) \
Chen Gangba6c19f2016-07-26 15:24:47 -0700544 for (region = memblock.memblock_type.regions; \
Benjamin Herrenschmidt5b385f22010-08-04 13:40:38 +1000545 region < (memblock.memblock_type.regions + memblock.memblock_type.cnt); \
546 region++)
547
Gioh Kim66e8b432017-11-15 17:33:42 -0800548#define for_each_memblock_type(i, memblock_type, rgn) \
549 for (i = 0, rgn = &memblock_type->regions[0]; \
550 i < memblock_type->cnt; \
551 i++, rgn = &memblock_type->regions[i])
Benjamin Herrenschmidt5b385f22010-08-04 13:40:38 +1000552
Mike Rapoport57c8a662018-10-30 15:09:49 -0700553extern void *alloc_large_system_hash(const char *tablename,
554 unsigned long bucketsize,
555 unsigned long numentries,
556 int scale,
557 int flags,
558 unsigned int *_hash_shift,
559 unsigned int *_hash_mask,
560 unsigned long low_limit,
561 unsigned long high_limit);
562
563#define HASH_EARLY 0x00000001 /* Allocating during early boot? */
564#define HASH_SMALL 0x00000002 /* sub-page allocation allowed, min
565 * shift passed via *_hash_shift */
566#define HASH_ZERO 0x00000004 /* Zero allocated hash table */
567
568/* Only NUMA needs hash distribution. 64bit NUMA architectures have
569 * sufficient vmalloc space.
570 */
571#ifdef CONFIG_NUMA
572#define HASHDIST_DEFAULT IS_ENABLED(CONFIG_64BIT)
573extern int hashdist; /* Distribute hashes across NUMA nodes? */
574#else
575#define hashdist (0)
576#endif
577
Vladimir Murzin4a207992015-04-14 15:48:27 -0700578#ifdef CONFIG_MEMTEST
Vladimir Murzin7f70bae2015-04-14 15:48:30 -0700579extern void early_memtest(phys_addr_t start, phys_addr_t end);
Vladimir Murzin4a207992015-04-14 15:48:27 -0700580#else
Vladimir Murzin7f70bae2015-04-14 15:48:30 -0700581static inline void early_memtest(phys_addr_t start, phys_addr_t end)
Vladimir Murzin4a207992015-04-14 15:48:27 -0700582{
583}
584#endif
Yinghai Luf0b37fad2010-07-28 15:28:21 +1000585
Yinghai Lu95f72d12010-07-12 14:36:09 +1000586#endif /* __KERNEL__ */
587
588#endif /* _LINUX_MEMBLOCK_H */