blob: 676d3900e1bd5f0a3001105c944bd5da0097bba0 [file] [log] [blame]
Yinghai Lu95f72d12010-07-12 14:36:09 +10001#ifndef _LINUX_MEMBLOCK_H
2#define _LINUX_MEMBLOCK_H
3#ifdef __KERNEL__
4
5/*
6 * Logical memory blocks.
7 *
8 * Copyright (C) 2001 Peter Bergner, IBM Corp.
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
14 */
15
16#include <linux/init.h>
17#include <linux/mm.h>
Mike Rapoport57c8a662018-10-30 15:09:49 -070018#include <asm/dma.h>
19
20extern unsigned long max_low_pfn;
21extern unsigned long min_low_pfn;
22
23/*
24 * highest page
25 */
26extern unsigned long max_pfn;
27/*
28 * highest possible page
29 */
30extern unsigned long long max_possible_pfn;
Yinghai Lu95f72d12010-07-12 14:36:09 +100031
Mike Rapoport9a0de1b2018-06-30 17:55:04 +030032/**
33 * enum memblock_flags - definition of memory region attributes
34 * @MEMBLOCK_NONE: no special request
35 * @MEMBLOCK_HOTPLUG: hotpluggable region
36 * @MEMBLOCK_MIRROR: mirrored region
37 * @MEMBLOCK_NOMAP: don't add to kernel direct mapping
38 */
Mike Rapoporte1720fe2018-06-30 17:55:01 +030039enum memblock_flags {
Tony Luckfc6daaf2015-06-24 16:58:09 -070040 MEMBLOCK_NONE = 0x0, /* No special request */
41 MEMBLOCK_HOTPLUG = 0x1, /* hotpluggable region */
Tony Lucka3f5baf2015-06-24 16:58:12 -070042 MEMBLOCK_MIRROR = 0x2, /* mirrored region */
Ard Biesheuvelbf3d3cc2015-11-30 13:28:15 +010043 MEMBLOCK_NOMAP = 0x4, /* don't add to kernel direct mapping */
Tony Luckfc6daaf2015-06-24 16:58:09 -070044};
Tang Chen66b16ed2014-01-21 15:49:23 -080045
Mike Rapoport9a0de1b2018-06-30 17:55:04 +030046/**
47 * struct memblock_region - represents a memory region
48 * @base: physical address of the region
49 * @size: size of the region
50 * @flags: memory region attributes
51 * @nid: NUMA node id
52 */
Benjamin Herrenschmidte3239ff2010-08-04 14:06:41 +100053struct memblock_region {
Benjamin Herrenschmidt2898cc42010-08-04 13:34:42 +100054 phys_addr_t base;
55 phys_addr_t size;
Mike Rapoporte1720fe2018-06-30 17:55:01 +030056 enum memblock_flags flags;
Tejun Heo7c0caeb2011-07-14 11:43:42 +020057#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
58 int nid;
59#endif
Yinghai Lu95f72d12010-07-12 14:36:09 +100060};
61
Mike Rapoport9a0de1b2018-06-30 17:55:04 +030062/**
63 * struct memblock_type - collection of memory regions of certain type
64 * @cnt: number of regions
65 * @max: size of the allocated array
66 * @total_size: size of all regions
67 * @regions: array of regions
68 * @name: the memory type symbolic name
69 */
Benjamin Herrenschmidte3239ff2010-08-04 14:06:41 +100070struct memblock_type {
Mike Rapoport9a0de1b2018-06-30 17:55:04 +030071 unsigned long cnt;
72 unsigned long max;
73 phys_addr_t total_size;
Benjamin Herrenschmidtbf23c512010-07-06 15:39:06 -070074 struct memblock_region *regions;
Heiko Carstens0262d9c2017-02-24 14:55:59 -080075 char *name;
Yinghai Lu95f72d12010-07-12 14:36:09 +100076};
77
Mike Rapoport9a0de1b2018-06-30 17:55:04 +030078/**
79 * struct memblock - memblock allocator metadata
80 * @bottom_up: is bottom up direction?
81 * @current_limit: physical address of the current allocation limit
82 * @memory: usabe memory regions
83 * @reserved: reserved memory regions
84 * @physmem: all physical memory
85 */
Yinghai Lu95f72d12010-07-12 14:36:09 +100086struct memblock {
Tang Chen79442ed2013-11-12 15:07:59 -080087 bool bottom_up; /* is bottom up direction? */
Benjamin Herrenschmidt2898cc42010-08-04 13:34:42 +100088 phys_addr_t current_limit;
Benjamin Herrenschmidte3239ff2010-08-04 14:06:41 +100089 struct memblock_type memory;
90 struct memblock_type reserved;
Philipp Hachtmann70210ed2014-01-29 18:16:01 +010091#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
92 struct memblock_type physmem;
93#endif
Yinghai Lu95f72d12010-07-12 14:36:09 +100094};
95
96extern struct memblock memblock;
Yinghai Lu5e63cf42010-07-28 15:07:21 +100097extern int memblock_debug;
Yinghai Lu5e63cf42010-07-28 15:07:21 +100098
Mike Rapoport350e88b2019-05-13 17:22:59 -070099#ifndef CONFIG_ARCH_KEEP_MEMBLOCK
Kirill A. Shutemov036fbb22016-01-15 16:57:11 -0800100#define __init_memblock __meminit
101#define __initdata_memblock __meminitdata
Pavel Tatashin3010f872017-08-18 15:16:05 -0700102void memblock_discard(void);
Kirill A. Shutemov036fbb22016-01-15 16:57:11 -0800103#else
104#define __init_memblock
105#define __initdata_memblock
Mike Rapoport350e88b2019-05-13 17:22:59 -0700106static inline void memblock_discard(void) {}
Kirill A. Shutemov036fbb22016-01-15 16:57:11 -0800107#endif
108
Yinghai Lu5e63cf42010-07-28 15:07:21 +1000109#define memblock_dbg(fmt, ...) \
110 if (memblock_debug) printk(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__)
Yinghai Lu95f72d12010-07-12 14:36:09 +1000111
Tejun Heofc769a82011-07-12 09:58:10 +0200112phys_addr_t memblock_find_in_range(phys_addr_t start, phys_addr_t end,
113 phys_addr_t size, phys_addr_t align);
Tejun Heo1aadc052011-12-08 10:22:08 -0800114void memblock_allow_resize(void);
Tejun Heo7fb0bc32011-12-08 10:22:08 -0800115int memblock_add_node(phys_addr_t base, phys_addr_t size, int nid);
Tejun Heo581adcb2011-12-08 10:22:06 -0800116int memblock_add(phys_addr_t base, phys_addr_t size);
117int memblock_remove(phys_addr_t base, phys_addr_t size);
118int memblock_free(phys_addr_t base, phys_addr_t size);
119int memblock_reserve(phys_addr_t base, phys_addr_t size);
Yinghai Lu6ede1fd2012-10-22 16:35:18 -0700120void memblock_trim_memory(phys_addr_t align);
Tang Chen95cf82e2015-09-08 15:02:03 -0700121bool memblock_overlaps_region(struct memblock_type *type,
122 phys_addr_t base, phys_addr_t size);
Tang Chen66b16ed2014-01-21 15:49:23 -0800123int memblock_mark_hotplug(phys_addr_t base, phys_addr_t size);
124int memblock_clear_hotplug(phys_addr_t base, phys_addr_t size);
Tony Lucka3f5baf2015-06-24 16:58:12 -0700125int memblock_mark_mirror(phys_addr_t base, phys_addr_t size);
Ard Biesheuvelbf3d3cc2015-11-30 13:28:15 +0100126int memblock_mark_nomap(phys_addr_t base, phys_addr_t size);
AKASHI Takahiro4c546b82017-04-03 11:23:54 +0900127int memblock_clear_nomap(phys_addr_t base, phys_addr_t size);
Philipp Hachtmannf1af9d32014-01-29 18:16:01 +0100128
Mike Rapoport57c8a662018-10-30 15:09:49 -0700129unsigned long memblock_free_all(void);
130void reset_node_managed_pages(pg_data_t *pgdat);
131void reset_all_zones_managed_pages(void);
132
Philipp Hachtmannf1af9d32014-01-29 18:16:01 +0100133/* Low level functions */
134int memblock_add_range(struct memblock_type *type,
135 phys_addr_t base, phys_addr_t size,
Mike Rapoporte1720fe2018-06-30 17:55:01 +0300136 int nid, enum memblock_flags flags);
Philipp Hachtmannf1af9d32014-01-29 18:16:01 +0100137
Mike Rapoporte1720fe2018-06-30 17:55:01 +0300138void __next_mem_range(u64 *idx, int nid, enum memblock_flags flags,
Tony Luckfc6daaf2015-06-24 16:58:09 -0700139 struct memblock_type *type_a,
Philipp Hachtmannf1af9d32014-01-29 18:16:01 +0100140 struct memblock_type *type_b, phys_addr_t *out_start,
141 phys_addr_t *out_end, int *out_nid);
142
Mike Rapoporte1720fe2018-06-30 17:55:01 +0300143void __next_mem_range_rev(u64 *idx, int nid, enum memblock_flags flags,
Tony Luckfc6daaf2015-06-24 16:58:09 -0700144 struct memblock_type *type_a,
Philipp Hachtmannf1af9d32014-01-29 18:16:01 +0100145 struct memblock_type *type_b, phys_addr_t *out_start,
146 phys_addr_t *out_end, int *out_nid);
147
Robin Holt8e7a7f82015-06-30 14:56:41 -0700148void __next_reserved_mem_region(u64 *idx, phys_addr_t *out_start,
Chen Gangba6c19f2016-07-26 15:24:47 -0700149 phys_addr_t *out_end);
Robin Holt8e7a7f82015-06-30 14:56:41 -0700150
Pavel Tatashin3010f872017-08-18 15:16:05 -0700151void __memblock_free_late(phys_addr_t base, phys_addr_t size);
152
Philipp Hachtmannf1af9d32014-01-29 18:16:01 +0100153/**
154 * for_each_mem_range - iterate through memblock areas from type_a and not
155 * included in type_b. Or just type_a if type_b is NULL.
156 * @i: u64 used as loop variable
157 * @type_a: ptr to memblock_type to iterate
158 * @type_b: ptr to memblock_type which excludes from the iteration
159 * @nid: node selector, %NUMA_NO_NODE for all nodes
Tony Luckfc6daaf2015-06-24 16:58:09 -0700160 * @flags: pick from blocks based on memory attributes
Philipp Hachtmannf1af9d32014-01-29 18:16:01 +0100161 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
162 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
163 * @p_nid: ptr to int for nid of the range, can be %NULL
164 */
Tony Luckfc6daaf2015-06-24 16:58:09 -0700165#define for_each_mem_range(i, type_a, type_b, nid, flags, \
Philipp Hachtmannf1af9d32014-01-29 18:16:01 +0100166 p_start, p_end, p_nid) \
Tony Luckfc6daaf2015-06-24 16:58:09 -0700167 for (i = 0, __next_mem_range(&i, nid, flags, type_a, type_b, \
Philipp Hachtmannf1af9d32014-01-29 18:16:01 +0100168 p_start, p_end, p_nid); \
169 i != (u64)ULLONG_MAX; \
Tony Luckfc6daaf2015-06-24 16:58:09 -0700170 __next_mem_range(&i, nid, flags, type_a, type_b, \
Philipp Hachtmannf1af9d32014-01-29 18:16:01 +0100171 p_start, p_end, p_nid))
172
173/**
174 * for_each_mem_range_rev - reverse iterate through memblock areas from
175 * type_a and not included in type_b. Or just type_a if type_b is NULL.
176 * @i: u64 used as loop variable
177 * @type_a: ptr to memblock_type to iterate
178 * @type_b: ptr to memblock_type which excludes from the iteration
179 * @nid: node selector, %NUMA_NO_NODE for all nodes
Tony Luckfc6daaf2015-06-24 16:58:09 -0700180 * @flags: pick from blocks based on memory attributes
Philipp Hachtmannf1af9d32014-01-29 18:16:01 +0100181 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
182 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
183 * @p_nid: ptr to int for nid of the range, can be %NULL
184 */
Tony Luckfc6daaf2015-06-24 16:58:09 -0700185#define for_each_mem_range_rev(i, type_a, type_b, nid, flags, \
Philipp Hachtmannf1af9d32014-01-29 18:16:01 +0100186 p_start, p_end, p_nid) \
187 for (i = (u64)ULLONG_MAX, \
Tony Luckfc6daaf2015-06-24 16:58:09 -0700188 __next_mem_range_rev(&i, nid, flags, type_a, type_b,\
Chen Gangba6c19f2016-07-26 15:24:47 -0700189 p_start, p_end, p_nid); \
Philipp Hachtmannf1af9d32014-01-29 18:16:01 +0100190 i != (u64)ULLONG_MAX; \
Tony Luckfc6daaf2015-06-24 16:58:09 -0700191 __next_mem_range_rev(&i, nid, flags, type_a, type_b, \
Philipp Hachtmannf1af9d32014-01-29 18:16:01 +0100192 p_start, p_end, p_nid))
193
Robin Holt8e7a7f82015-06-30 14:56:41 -0700194/**
195 * for_each_reserved_mem_region - iterate over all reserved memblock areas
196 * @i: u64 used as loop variable
197 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
198 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
199 *
200 * Walks over reserved areas of memblock. Available as soon as memblock
201 * is initialized.
202 */
203#define for_each_reserved_mem_region(i, p_start, p_end) \
Chen Gangba6c19f2016-07-26 15:24:47 -0700204 for (i = 0UL, __next_reserved_mem_region(&i, p_start, p_end); \
Robin Holt8e7a7f82015-06-30 14:56:41 -0700205 i != (u64)ULLONG_MAX; \
206 __next_reserved_mem_region(&i, p_start, p_end))
207
Tang Chen55ac5902014-01-21 15:49:35 -0800208static inline bool memblock_is_hotpluggable(struct memblock_region *m)
209{
210 return m->flags & MEMBLOCK_HOTPLUG;
211}
212
Tony Lucka3f5baf2015-06-24 16:58:12 -0700213static inline bool memblock_is_mirror(struct memblock_region *m)
214{
215 return m->flags & MEMBLOCK_MIRROR;
216}
217
Ard Biesheuvelbf3d3cc2015-11-30 13:28:15 +0100218static inline bool memblock_is_nomap(struct memblock_region *m)
219{
220 return m->flags & MEMBLOCK_NOMAP;
221}
222
Tejun Heo0ee332c2011-12-08 10:22:09 -0800223#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
Yinghai Lue76b63f2013-09-11 14:22:17 -0700224int memblock_search_pfn_nid(unsigned long pfn, unsigned long *start_pfn,
225 unsigned long *end_pfn);
Tejun Heo0ee332c2011-12-08 10:22:09 -0800226void __next_mem_pfn_range(int *idx, int nid, unsigned long *out_start_pfn,
227 unsigned long *out_end_pfn, int *out_nid);
228
229/**
230 * for_each_mem_pfn_range - early memory pfn range iterator
231 * @i: an integer used as loop variable
232 * @nid: node selector, %MAX_NUMNODES for all nodes
233 * @p_start: ptr to ulong for start pfn of the range, can be %NULL
234 * @p_end: ptr to ulong for end pfn of the range, can be %NULL
235 * @p_nid: ptr to int for nid of the range, can be %NULL
236 *
Wanpeng Lif2d52fe2012-10-08 16:32:24 -0700237 * Walks over configured memory ranges.
Tejun Heo0ee332c2011-12-08 10:22:09 -0800238 */
239#define for_each_mem_pfn_range(i, nid, p_start, p_end, p_nid) \
240 for (i = -1, __next_mem_pfn_range(&i, nid, p_start, p_end, p_nid); \
241 i >= 0; __next_mem_pfn_range(&i, nid, p_start, p_end, p_nid))
242#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
243
Alexander Duyck837566e2019-05-13 17:21:17 -0700244#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
245void __next_mem_pfn_range_in_zone(u64 *idx, struct zone *zone,
246 unsigned long *out_spfn,
247 unsigned long *out_epfn);
248/**
249 * for_each_free_mem_range_in_zone - iterate through zone specific free
250 * memblock areas
251 * @i: u64 used as loop variable
252 * @zone: zone in which all of the memory blocks reside
253 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
254 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
255 *
256 * Walks over free (memory && !reserved) areas of memblock in a specific
257 * zone. Available once memblock and an empty zone is initialized. The main
258 * assumption is that the zone start, end, and pgdat have been associated.
259 * This way we can use the zone to determine NUMA node, and if a given part
260 * of the memblock is valid for the zone.
261 */
262#define for_each_free_mem_pfn_range_in_zone(i, zone, p_start, p_end) \
263 for (i = 0, \
264 __next_mem_pfn_range_in_zone(&i, zone, p_start, p_end); \
265 i != U64_MAX; \
266 __next_mem_pfn_range_in_zone(&i, zone, p_start, p_end))
Alexander Duyck0e56aca2019-05-13 17:21:20 -0700267
268/**
269 * for_each_free_mem_range_in_zone_from - iterate through zone specific
270 * free memblock areas from a given point
271 * @i: u64 used as loop variable
272 * @zone: zone in which all of the memory blocks reside
273 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
274 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
275 *
276 * Walks over free (memory && !reserved) areas of memblock in a specific
277 * zone, continuing from current position. Available as soon as memblock is
278 * initialized.
279 */
280#define for_each_free_mem_pfn_range_in_zone_from(i, zone, p_start, p_end) \
281 for (; i != U64_MAX; \
282 __next_mem_pfn_range_in_zone(&i, zone, p_start, p_end))
Alexander Duyck837566e2019-05-13 17:21:17 -0700283#endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
284
Tejun Heo35fd0802011-07-12 11:15:59 +0200285/**
286 * for_each_free_mem_range - iterate through free memblock areas
287 * @i: u64 used as loop variable
Grygorii Strashkob1154232014-01-21 15:50:16 -0800288 * @nid: node selector, %NUMA_NO_NODE for all nodes
Florian Fainellid30b5542016-01-14 15:22:04 -0800289 * @flags: pick from blocks based on memory attributes
Tejun Heo35fd0802011-07-12 11:15:59 +0200290 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
291 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
292 * @p_nid: ptr to int for nid of the range, can be %NULL
293 *
294 * Walks over free (memory && !reserved) areas of memblock. Available as
295 * soon as memblock is initialized.
296 */
Tony Luckfc6daaf2015-06-24 16:58:09 -0700297#define for_each_free_mem_range(i, nid, flags, p_start, p_end, p_nid) \
Philipp Hachtmannf1af9d32014-01-29 18:16:01 +0100298 for_each_mem_range(i, &memblock.memory, &memblock.reserved, \
Tony Luckfc6daaf2015-06-24 16:58:09 -0700299 nid, flags, p_start, p_end, p_nid)
Tejun Heo7bd0b0f2011-12-08 10:22:09 -0800300
301/**
302 * for_each_free_mem_range_reverse - rev-iterate through free memblock areas
303 * @i: u64 used as loop variable
Grygorii Strashkob1154232014-01-21 15:50:16 -0800304 * @nid: node selector, %NUMA_NO_NODE for all nodes
Florian Fainellid30b5542016-01-14 15:22:04 -0800305 * @flags: pick from blocks based on memory attributes
Tejun Heo7bd0b0f2011-12-08 10:22:09 -0800306 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
307 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
308 * @p_nid: ptr to int for nid of the range, can be %NULL
309 *
310 * Walks over free (memory && !reserved) areas of memblock in reverse
311 * order. Available as soon as memblock is initialized.
312 */
Tony Luckfc6daaf2015-06-24 16:58:09 -0700313#define for_each_free_mem_range_reverse(i, nid, flags, p_start, p_end, \
314 p_nid) \
Philipp Hachtmannf1af9d32014-01-29 18:16:01 +0100315 for_each_mem_range_rev(i, &memblock.memory, &memblock.reserved, \
Tony Luckfc6daaf2015-06-24 16:58:09 -0700316 nid, flags, p_start, p_end, p_nid)
Tejun Heo7bd0b0f2011-12-08 10:22:09 -0800317
Tejun Heo7c0caeb2011-07-14 11:43:42 +0200318#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
Tang Chene7e8de52014-01-21 15:49:26 -0800319int memblock_set_node(phys_addr_t base, phys_addr_t size,
320 struct memblock_type *type, int nid);
Tejun Heo7c0caeb2011-07-14 11:43:42 +0200321
322static inline void memblock_set_region_node(struct memblock_region *r, int nid)
323{
324 r->nid = nid;
325}
326
327static inline int memblock_get_region_node(const struct memblock_region *r)
328{
329 return r->nid;
330}
331#else
332static inline void memblock_set_region_node(struct memblock_region *r, int nid)
333{
334}
335
336static inline int memblock_get_region_node(const struct memblock_region *r)
337{
338 return 0;
339}
340#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
341
Mike Rapoport57c8a662018-10-30 15:09:49 -0700342/* Flags for memblock allocation APIs */
343#define MEMBLOCK_ALLOC_ANYWHERE (~(phys_addr_t)0)
344#define MEMBLOCK_ALLOC_ACCESSIBLE 0
Qian Caifed84c72018-12-28 00:36:29 -0800345#define MEMBLOCK_ALLOC_KASAN 1
Mike Rapoport57c8a662018-10-30 15:09:49 -0700346
347/* We are using top down, so it is safe to use 0 here */
348#define MEMBLOCK_LOW_LIMIT 0
349
350#ifndef ARCH_LOW_ADDRESS_LIMIT
351#define ARCH_LOW_ADDRESS_LIMIT 0xffffffffUL
352#endif
353
Mike Rapoport8a770c22019-03-11 23:29:16 -0700354phys_addr_t memblock_phys_alloc_range(phys_addr_t size, phys_addr_t align,
355 phys_addr_t start, phys_addr_t end);
Mike Rapoport9a8dd702018-10-30 15:07:59 -0700356phys_addr_t memblock_phys_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid);
Benjamin Herrenschmidt9d1e2492010-07-06 15:39:17 -0700357
Mike Rapoportecc3e772019-03-11 23:29:26 -0700358static inline phys_addr_t memblock_phys_alloc(phys_addr_t size,
359 phys_addr_t align)
360{
361 return memblock_phys_alloc_range(size, align, 0,
362 MEMBLOCK_ALLOC_ACCESSIBLE);
363}
Benjamin Herrenschmidte63075a2010-07-06 15:39:01 -0700364
Mike Rapoport57c8a662018-10-30 15:09:49 -0700365void *memblock_alloc_try_nid_raw(phys_addr_t size, phys_addr_t align,
366 phys_addr_t min_addr, phys_addr_t max_addr,
367 int nid);
Mike Rapoport57c8a662018-10-30 15:09:49 -0700368void *memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align,
369 phys_addr_t min_addr, phys_addr_t max_addr,
370 int nid);
371
372static inline void * __init memblock_alloc(phys_addr_t size, phys_addr_t align)
373{
374 return memblock_alloc_try_nid(size, align, MEMBLOCK_LOW_LIMIT,
375 MEMBLOCK_ALLOC_ACCESSIBLE, NUMA_NO_NODE);
376}
377
378static inline void * __init memblock_alloc_raw(phys_addr_t size,
379 phys_addr_t align)
380{
381 return memblock_alloc_try_nid_raw(size, align, MEMBLOCK_LOW_LIMIT,
382 MEMBLOCK_ALLOC_ACCESSIBLE,
383 NUMA_NO_NODE);
384}
385
386static inline void * __init memblock_alloc_from(phys_addr_t size,
387 phys_addr_t align,
388 phys_addr_t min_addr)
389{
390 return memblock_alloc_try_nid(size, align, min_addr,
391 MEMBLOCK_ALLOC_ACCESSIBLE, NUMA_NO_NODE);
392}
393
Mike Rapoport57c8a662018-10-30 15:09:49 -0700394static inline void * __init memblock_alloc_low(phys_addr_t size,
395 phys_addr_t align)
396{
397 return memblock_alloc_try_nid(size, align, MEMBLOCK_LOW_LIMIT,
398 ARCH_LOW_ADDRESS_LIMIT, NUMA_NO_NODE);
399}
Mike Rapoport57c8a662018-10-30 15:09:49 -0700400
401static inline void * __init memblock_alloc_node(phys_addr_t size,
402 phys_addr_t align, int nid)
403{
404 return memblock_alloc_try_nid(size, align, MEMBLOCK_LOW_LIMIT,
405 MEMBLOCK_ALLOC_ACCESSIBLE, nid);
406}
407
Mike Rapoport57c8a662018-10-30 15:09:49 -0700408static inline void __init memblock_free_early(phys_addr_t base,
409 phys_addr_t size)
410{
Mike Rapoport4d728682018-12-28 00:35:29 -0800411 memblock_free(base, size);
Mike Rapoport57c8a662018-10-30 15:09:49 -0700412}
413
414static inline void __init memblock_free_early_nid(phys_addr_t base,
415 phys_addr_t size, int nid)
416{
Mike Rapoport4d728682018-12-28 00:35:29 -0800417 memblock_free(base, size);
Mike Rapoport57c8a662018-10-30 15:09:49 -0700418}
419
420static inline void __init memblock_free_late(phys_addr_t base, phys_addr_t size)
421{
422 __memblock_free_late(base, size);
423}
424
Tang Chen79442ed2013-11-12 15:07:59 -0800425/*
426 * Set the allocation direction to bottom-up or top-down.
427 */
Fabian Frederick2cfb3662014-08-06 16:05:03 -0700428static inline void __init memblock_set_bottom_up(bool enable)
Tang Chen79442ed2013-11-12 15:07:59 -0800429{
430 memblock.bottom_up = enable;
431}
432
433/*
434 * Check if the allocation direction is bottom-up or not.
435 * if this is true, that said, memblock will allocate memory
436 * in bottom-up direction.
437 */
438static inline bool memblock_bottom_up(void)
439{
440 return memblock.bottom_up;
441}
Tang Chen79442ed2013-11-12 15:07:59 -0800442
Tejun Heo581adcb2011-12-08 10:22:06 -0800443phys_addr_t memblock_phys_mem_size(void);
Srikar Dronamraju8907de52016-10-07 16:59:18 -0700444phys_addr_t memblock_reserved_size(void);
Yinghai Lu595ad9a2013-01-24 12:20:09 -0800445phys_addr_t memblock_mem_size(unsigned long limit_pfn);
Tejun Heo581adcb2011-12-08 10:22:06 -0800446phys_addr_t memblock_start_of_DRAM(void);
447phys_addr_t memblock_end_of_DRAM(void);
448void memblock_enforce_memory_limit(phys_addr_t memory_limit);
AKASHI Takahiroc9ca9b42017-04-03 11:23:55 +0900449void memblock_cap_memory_range(phys_addr_t base, phys_addr_t size);
Dennis Chena571d4e2016-07-28 15:48:26 -0700450void memblock_mem_limit_remove_map(phys_addr_t limit);
Yaowei Baib4ad0c72016-01-14 15:18:54 -0800451bool memblock_is_memory(phys_addr_t addr);
Yaowei Bai937f0c22018-02-06 15:41:18 -0800452bool memblock_is_map_memory(phys_addr_t addr);
453bool memblock_is_region_memory(phys_addr_t base, phys_addr_t size);
Yaowei Baib4ad0c72016-01-14 15:18:54 -0800454bool memblock_is_reserved(phys_addr_t addr);
Tang Chenc5c5c9d2015-09-08 15:02:00 -0700455bool memblock_is_region_reserved(phys_addr_t base, phys_addr_t size);
Yinghai Lu95f72d12010-07-12 14:36:09 +1000456
Tejun Heo4ff7b822011-12-08 10:22:06 -0800457extern void __memblock_dump_all(void);
458
459static inline void memblock_dump_all(void)
460{
461 if (memblock_debug)
462 __memblock_dump_all();
463}
Yinghai Lu95f72d12010-07-12 14:36:09 +1000464
Benjamin Herrenschmidte63075a2010-07-06 15:39:01 -0700465/**
466 * memblock_set_current_limit - Set the current allocation limit to allow
467 * limiting allocations to what is currently
468 * accessible during boot
469 * @limit: New limit value (physical address)
470 */
Tejun Heo581adcb2011-12-08 10:22:06 -0800471void memblock_set_current_limit(phys_addr_t limit);
Benjamin Herrenschmidte63075a2010-07-06 15:39:01 -0700472
Benjamin Herrenschmidt35a1f0b2010-07-06 15:38:58 -0700473
Laura Abbottfec51012014-02-27 01:23:43 +0100474phys_addr_t memblock_get_current_limit(void);
475
Benjamin Herrenschmidt5b385f22010-08-04 13:40:38 +1000476/*
477 * pfn conversion functions
478 *
479 * While the memory MEMBLOCKs should always be page aligned, the reserved
480 * MEMBLOCKs may not be. This accessor attempt to provide a very clear
481 * idea of what they return for such non aligned MEMBLOCKs.
482 */
483
484/**
Mike Rapoport47cec442018-06-30 17:55:02 +0300485 * memblock_region_memory_base_pfn - get the lowest pfn of the memory region
Benjamin Herrenschmidt5b385f22010-08-04 13:40:38 +1000486 * @reg: memblock_region structure
Mike Rapoport47cec442018-06-30 17:55:02 +0300487 *
488 * Return: the lowest pfn intersecting with the memory region
Benjamin Herrenschmidt5b385f22010-08-04 13:40:38 +1000489 */
Yinghai Luc7fc2de2010-10-12 14:07:09 -0700490static inline unsigned long memblock_region_memory_base_pfn(const struct memblock_region *reg)
Benjamin Herrenschmidt5b385f22010-08-04 13:40:38 +1000491{
Yinghai Luc7fc2de2010-10-12 14:07:09 -0700492 return PFN_UP(reg->base);
Benjamin Herrenschmidt5b385f22010-08-04 13:40:38 +1000493}
494
495/**
Mike Rapoport47cec442018-06-30 17:55:02 +0300496 * memblock_region_memory_end_pfn - get the end pfn of the memory region
Benjamin Herrenschmidt5b385f22010-08-04 13:40:38 +1000497 * @reg: memblock_region structure
Mike Rapoport47cec442018-06-30 17:55:02 +0300498 *
499 * Return: the end_pfn of the reserved region
Benjamin Herrenschmidt5b385f22010-08-04 13:40:38 +1000500 */
Yinghai Luc7fc2de2010-10-12 14:07:09 -0700501static inline unsigned long memblock_region_memory_end_pfn(const struct memblock_region *reg)
Benjamin Herrenschmidt5b385f22010-08-04 13:40:38 +1000502{
Yinghai Luc7fc2de2010-10-12 14:07:09 -0700503 return PFN_DOWN(reg->base + reg->size);
Benjamin Herrenschmidt5b385f22010-08-04 13:40:38 +1000504}
505
506/**
Mike Rapoport47cec442018-06-30 17:55:02 +0300507 * memblock_region_reserved_base_pfn - get the lowest pfn of the reserved region
Benjamin Herrenschmidt5b385f22010-08-04 13:40:38 +1000508 * @reg: memblock_region structure
Mike Rapoport47cec442018-06-30 17:55:02 +0300509 *
510 * Return: the lowest pfn intersecting with the reserved region
Benjamin Herrenschmidt5b385f22010-08-04 13:40:38 +1000511 */
Yinghai Luc7fc2de2010-10-12 14:07:09 -0700512static inline unsigned long memblock_region_reserved_base_pfn(const struct memblock_region *reg)
Benjamin Herrenschmidt5b385f22010-08-04 13:40:38 +1000513{
Yinghai Luc7fc2de2010-10-12 14:07:09 -0700514 return PFN_DOWN(reg->base);
Benjamin Herrenschmidt5b385f22010-08-04 13:40:38 +1000515}
516
517/**
Mike Rapoport47cec442018-06-30 17:55:02 +0300518 * memblock_region_reserved_end_pfn - get the end pfn of the reserved region
Benjamin Herrenschmidt5b385f22010-08-04 13:40:38 +1000519 * @reg: memblock_region structure
Mike Rapoport47cec442018-06-30 17:55:02 +0300520 *
521 * Return: the end_pfn of the reserved region
Benjamin Herrenschmidt5b385f22010-08-04 13:40:38 +1000522 */
Yinghai Luc7fc2de2010-10-12 14:07:09 -0700523static inline unsigned long memblock_region_reserved_end_pfn(const struct memblock_region *reg)
Benjamin Herrenschmidt5b385f22010-08-04 13:40:38 +1000524{
Yinghai Luc7fc2de2010-10-12 14:07:09 -0700525 return PFN_UP(reg->base + reg->size);
Benjamin Herrenschmidt5b385f22010-08-04 13:40:38 +1000526}
527
528#define for_each_memblock(memblock_type, region) \
Chen Gangba6c19f2016-07-26 15:24:47 -0700529 for (region = memblock.memblock_type.regions; \
Benjamin Herrenschmidt5b385f22010-08-04 13:40:38 +1000530 region < (memblock.memblock_type.regions + memblock.memblock_type.cnt); \
531 region++)
532
Gioh Kim66e8b432017-11-15 17:33:42 -0800533#define for_each_memblock_type(i, memblock_type, rgn) \
534 for (i = 0, rgn = &memblock_type->regions[0]; \
535 i < memblock_type->cnt; \
536 i++, rgn = &memblock_type->regions[i])
Benjamin Herrenschmidt5b385f22010-08-04 13:40:38 +1000537
Mike Rapoport57c8a662018-10-30 15:09:49 -0700538extern void *alloc_large_system_hash(const char *tablename,
539 unsigned long bucketsize,
540 unsigned long numentries,
541 int scale,
542 int flags,
543 unsigned int *_hash_shift,
544 unsigned int *_hash_mask,
545 unsigned long low_limit,
546 unsigned long high_limit);
547
548#define HASH_EARLY 0x00000001 /* Allocating during early boot? */
549#define HASH_SMALL 0x00000002 /* sub-page allocation allowed, min
550 * shift passed via *_hash_shift */
551#define HASH_ZERO 0x00000004 /* Zero allocated hash table */
552
553/* Only NUMA needs hash distribution. 64bit NUMA architectures have
554 * sufficient vmalloc space.
555 */
556#ifdef CONFIG_NUMA
557#define HASHDIST_DEFAULT IS_ENABLED(CONFIG_64BIT)
558extern int hashdist; /* Distribute hashes across NUMA nodes? */
559#else
560#define hashdist (0)
561#endif
562
Vladimir Murzin4a207992015-04-14 15:48:27 -0700563#ifdef CONFIG_MEMTEST
Vladimir Murzin7f70bae2015-04-14 15:48:30 -0700564extern void early_memtest(phys_addr_t start, phys_addr_t end);
Vladimir Murzin4a207992015-04-14 15:48:27 -0700565#else
Vladimir Murzin7f70bae2015-04-14 15:48:30 -0700566static inline void early_memtest(phys_addr_t start, phys_addr_t end)
Vladimir Murzin4a207992015-04-14 15:48:27 -0700567{
568}
569#endif
Yinghai Luf0b37fad2010-07-28 15:28:21 +1000570
Yinghai Lu95f72d12010-07-12 14:36:09 +1000571#endif /* __KERNEL__ */
572
573#endif /* _LINUX_MEMBLOCK_H */