blob: 205f62b8d2916b8d3ad9c92ebdfd3ed00aee06fb [file] [log] [blame]
Thomas Gleixner40b0b3f2019-06-03 07:44:46 +02001/* SPDX-License-Identifier: GPL-2.0-only */
Jes Sorensenf14f75b2005-06-21 17:15:02 -07002/*
Huang Ying7f184272011-07-13 13:14:24 +08003 * Basic general purpose allocator for managing special purpose
4 * memory, for example, memory that is not managed by the regular
5 * kmalloc/kfree interface. Uses for this includes on-device special
6 * memory, uncached memory etc.
7 *
8 * It is safe to use the allocator in NMI handlers and other special
9 * unblockable contexts that could otherwise deadlock on locks. This
10 * is implemented by using atomic operations and retries on any
11 * conflicts. The disadvantage is that there may be livelocks in
12 * extreme cases. For better scalability, one allocator can be used
13 * for each CPU.
14 *
15 * The lockless operation only works if there is enough memory
16 * available. If new memory is added to the pool a lock has to be
17 * still taken. So any user relying on locklessness has to ensure
18 * that sufficient memory is preallocated.
19 *
20 * The basic atomic operation of this allocator is cmpxchg on long.
21 * On architectures that don't have NMI-safe cmpxchg implementation,
22 * the allocator can NOT be used in NMI handler. So code uses the
23 * allocator in NMI handler should depend on
24 * CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG.
Jes Sorensenf14f75b2005-06-21 17:15:02 -070025 */
26
Jes Sorensenf14f75b2005-06-21 17:15:02 -070027
Jean-Christophe PLAGNIOL-VILLARD6aae6e02011-05-24 17:13:33 -070028#ifndef __GENALLOC_H__
29#define __GENALLOC_H__
Philipp Zabel9375db02013-04-29 16:17:10 -070030
Zhao Qiangde2dd0e2015-11-30 10:48:52 +080031#include <linux/types.h>
Shawn Guob30afea02014-01-23 15:53:18 -080032#include <linux/spinlock_types.h>
Stephen Bates36a3d1d2017-11-17 15:28:16 -080033#include <linux/atomic.h>
Shawn Guob30afea02014-01-23 15:53:18 -080034
Philipp Zabel9375db02013-04-29 16:17:10 -070035struct device;
36struct device_node;
Zhao Qiangde2dd0e2015-11-30 10:48:52 +080037struct gen_pool;
Philipp Zabel9375db02013-04-29 16:17:10 -070038
Benjamin Gaignardca279cf2012-10-04 17:13:20 -070039/**
Jonathan Corbeta27bfca2017-08-31 09:47:22 -060040 * typedef genpool_algo_t: Allocation callback function type definition
Benjamin Gaignardca279cf2012-10-04 17:13:20 -070041 * @map: Pointer to bitmap
42 * @size: The bitmap size in bits
43 * @start: The bitnumber to start searching at
44 * @nr: The number of zeroed bits we're looking for
Jonathan Corbeta27bfca2017-08-31 09:47:22 -060045 * @data: optional additional data used by the callback
46 * @pool: the pool being allocated from
Benjamin Gaignardca279cf2012-10-04 17:13:20 -070047 */
48typedef unsigned long (*genpool_algo_t)(unsigned long *map,
49 unsigned long size,
50 unsigned long start,
51 unsigned int nr,
Alexey Skidanov52fbf112019-01-03 15:26:44 -080052 void *data, struct gen_pool *pool,
53 unsigned long start_addr);
Benjamin Gaignardca279cf2012-10-04 17:13:20 -070054
Jes Sorensenf14f75b2005-06-21 17:15:02 -070055/*
Dean Nelson929f9722006-06-23 02:03:21 -070056 * General purpose special memory pool descriptor.
Jes Sorensenf14f75b2005-06-21 17:15:02 -070057 */
58struct gen_pool {
Huang Ying7f184272011-07-13 13:14:24 +080059 spinlock_t lock;
Dean Nelson929f9722006-06-23 02:03:21 -070060 struct list_head chunks; /* list of chunks in this pool */
61 int min_alloc_order; /* minimum allocation order */
Benjamin Gaignardca279cf2012-10-04 17:13:20 -070062
63 genpool_algo_t algo; /* allocation function */
64 void *data;
Vladimir Zapolskiyc98c3632015-09-04 15:47:47 -070065
66 const char *name;
Jes Sorensenf14f75b2005-06-21 17:15:02 -070067};
68
Dean Nelson929f9722006-06-23 02:03:21 -070069/*
70 * General purpose special memory pool chunk descriptor.
71 */
72struct gen_pool_chunk {
Dean Nelson929f9722006-06-23 02:03:21 -070073 struct list_head next_chunk; /* next chunk in pool */
Stephen Bates36a3d1d2017-11-17 15:28:16 -080074 atomic_long_t avail;
Jean-Christophe PLAGNIOL-VILLARD3c8f3702011-05-24 17:13:34 -070075 phys_addr_t phys_addr; /* physical starting address of memory chunk */
Dan Williams795ee302019-06-13 15:56:27 -070076 void *owner; /* private data to retrieve at alloc time */
Joonyoung Shim674470d2013-09-11 14:21:43 -070077 unsigned long start_addr; /* start address of memory chunk */
78 unsigned long end_addr; /* end address of memory chunk (inclusive) */
Dean Nelson929f9722006-06-23 02:03:21 -070079 unsigned long bits[0]; /* bitmap for allocating memory chunk */
80};
81
Zhao Qiangde2dd0e2015-11-30 10:48:52 +080082/*
83 * gen_pool data descriptor for gen_pool_first_fit_align.
84 */
85struct genpool_data_align {
86 int align; /* alignment by bytes for starting address */
87};
88
Zhao Qiangb26981c2015-11-30 10:48:53 +080089/*
90 * gen_pool data descriptor for gen_pool_fixed_alloc.
91 */
92struct genpool_data_fixed {
93 unsigned long offset; /* The offset of the specific region */
94};
95
Dean Nelson929f9722006-06-23 02:03:21 -070096extern struct gen_pool *gen_pool_create(int, int);
Jean-Christophe PLAGNIOL-VILLARD3c8f3702011-05-24 17:13:34 -070097extern phys_addr_t gen_pool_virt_to_phys(struct gen_pool *pool, unsigned long);
Dan Williams795ee302019-06-13 15:56:27 -070098extern int gen_pool_add_owner(struct gen_pool *, unsigned long, phys_addr_t,
99 size_t, int, void *);
100
101static inline int gen_pool_add_virt(struct gen_pool *pool, unsigned long addr,
102 phys_addr_t phys, size_t size, int nid)
103{
104 return gen_pool_add_owner(pool, addr, phys, size, nid, NULL);
105}
106
Jean-Christophe PLAGNIOL-VILLARD3c8f3702011-05-24 17:13:34 -0700107/**
108 * gen_pool_add - add a new chunk of special memory to the pool
109 * @pool: pool to add new memory chunk to
110 * @addr: starting address of memory chunk to add to pool
111 * @size: size in bytes of the memory chunk to add to pool
112 * @nid: node id of the node the chunk structure and bitmap should be
113 * allocated on, or -1
114 *
115 * Add a new chunk of special memory to the specified pool.
116 *
117 * Returns 0 on success or a -ve errno on failure.
118 */
119static inline int gen_pool_add(struct gen_pool *pool, unsigned long addr,
120 size_t size, int nid)
121{
122 return gen_pool_add_virt(pool, addr, -1, size, nid);
123}
Steve Wise322acc92006-10-02 02:17:00 -0700124extern void gen_pool_destroy(struct gen_pool *);
Dan Williams795ee302019-06-13 15:56:27 -0700125unsigned long gen_pool_alloc_algo_owner(struct gen_pool *pool, size_t size,
126 genpool_algo_t algo, void *data, void **owner);
127
128static inline unsigned long gen_pool_alloc_owner(struct gen_pool *pool,
129 size_t size, void **owner)
130{
131 return gen_pool_alloc_algo_owner(pool, size, pool->algo, pool->data,
132 owner);
133}
134
135static inline unsigned long gen_pool_alloc_algo(struct gen_pool *pool,
136 size_t size, genpool_algo_t algo, void *data)
137{
138 return gen_pool_alloc_algo_owner(pool, size, algo, data, NULL);
139}
140
141/**
142 * gen_pool_alloc - allocate special memory from the pool
143 * @pool: pool to allocate from
144 * @size: number of bytes to allocate from the pool
145 *
146 * Allocate the requested number of bytes from the specified pool.
147 * Uses the pool allocation function (with first-fit algorithm by default).
148 * Can not be used in NMI handler on architectures without
149 * NMI-safe cmpxchg implementation.
150 */
151static inline unsigned long gen_pool_alloc(struct gen_pool *pool, size_t size)
152{
153 return gen_pool_alloc_algo(pool, size, pool->algo, pool->data);
154}
155
Nicolin Chen684f0d32013-11-12 15:09:52 -0800156extern void *gen_pool_dma_alloc(struct gen_pool *pool, size_t size,
157 dma_addr_t *dma);
Dan Williams795ee302019-06-13 15:56:27 -0700158extern void gen_pool_free_owner(struct gen_pool *pool, unsigned long addr,
159 size_t size, void **owner);
160static inline void gen_pool_free(struct gen_pool *pool, unsigned long addr,
161 size_t size)
162{
163 gen_pool_free_owner(pool, addr, size, NULL);
164}
165
Huang Ying7f184272011-07-13 13:14:24 +0800166extern void gen_pool_for_each_chunk(struct gen_pool *,
167 void (*)(struct gen_pool *, struct gen_pool_chunk *, void *), void *);
168extern size_t gen_pool_avail(struct gen_pool *);
169extern size_t gen_pool_size(struct gen_pool *);
Benjamin Gaignardca279cf2012-10-04 17:13:20 -0700170
171extern void gen_pool_set_algo(struct gen_pool *pool, genpool_algo_t algo,
172 void *data);
173
174extern unsigned long gen_pool_first_fit(unsigned long *map, unsigned long size,
Zhao Qiangde2dd0e2015-11-30 10:48:52 +0800175 unsigned long start, unsigned int nr, void *data,
Alexey Skidanov52fbf112019-01-03 15:26:44 -0800176 struct gen_pool *pool, unsigned long start_addr);
Zhao Qiangde2dd0e2015-11-30 10:48:52 +0800177
Zhao Qiangb26981c2015-11-30 10:48:53 +0800178extern unsigned long gen_pool_fixed_alloc(unsigned long *map,
179 unsigned long size, unsigned long start, unsigned int nr,
Alexey Skidanov52fbf112019-01-03 15:26:44 -0800180 void *data, struct gen_pool *pool, unsigned long start_addr);
Zhao Qiangb26981c2015-11-30 10:48:53 +0800181
Zhao Qiangde2dd0e2015-11-30 10:48:52 +0800182extern unsigned long gen_pool_first_fit_align(unsigned long *map,
183 unsigned long size, unsigned long start, unsigned int nr,
Alexey Skidanov52fbf112019-01-03 15:26:44 -0800184 void *data, struct gen_pool *pool, unsigned long start_addr);
Zhao Qiangde2dd0e2015-11-30 10:48:52 +0800185
Benjamin Gaignardca279cf2012-10-04 17:13:20 -0700186
Laura Abbott505e3be2014-10-09 15:26:35 -0700187extern unsigned long gen_pool_first_fit_order_align(unsigned long *map,
188 unsigned long size, unsigned long start, unsigned int nr,
Alexey Skidanov52fbf112019-01-03 15:26:44 -0800189 void *data, struct gen_pool *pool, unsigned long start_addr);
Laura Abbott505e3be2014-10-09 15:26:35 -0700190
Benjamin Gaignardca279cf2012-10-04 17:13:20 -0700191extern unsigned long gen_pool_best_fit(unsigned long *map, unsigned long size,
Zhao Qiangde2dd0e2015-11-30 10:48:52 +0800192 unsigned long start, unsigned int nr, void *data,
Alexey Skidanov52fbf112019-01-03 15:26:44 -0800193 struct gen_pool *pool, unsigned long start_addr);
Zhao Qiangde2dd0e2015-11-30 10:48:52 +0800194
Benjamin Gaignardca279cf2012-10-04 17:13:20 -0700195
Philipp Zabel9375db02013-04-29 16:17:10 -0700196extern struct gen_pool *devm_gen_pool_create(struct device *dev,
Vladimir Zapolskiy73858172015-09-04 15:47:43 -0700197 int min_alloc_order, int nid, const char *name);
198extern struct gen_pool *gen_pool_get(struct device *dev, const char *name);
Philipp Zabel9375db02013-04-29 16:17:10 -0700199
Laura Abbott9efb3a42014-10-09 15:26:38 -0700200bool addr_in_gen_pool(struct gen_pool *pool, unsigned long start,
201 size_t size);
202
Philipp Zabel9375db02013-04-29 16:17:10 -0700203#ifdef CONFIG_OF
Vladimir Zapolskiyabdd4a72015-06-30 15:00:07 -0700204extern struct gen_pool *of_gen_pool_get(struct device_node *np,
Philipp Zabel9375db02013-04-29 16:17:10 -0700205 const char *propname, int index);
206#else
Vladimir Zapolskiyabdd4a72015-06-30 15:00:07 -0700207static inline struct gen_pool *of_gen_pool_get(struct device_node *np,
Philipp Zabel9375db02013-04-29 16:17:10 -0700208 const char *propname, int index)
209{
210 return NULL;
211}
212#endif
Jean-Christophe PLAGNIOL-VILLARD6aae6e02011-05-24 17:13:33 -0700213#endif /* __GENALLOC_H__ */