blob: a040bfb132c3cf8e8ab21dd4c121c30957d5cb18 [file] [log] [blame]
Sasha Levin28b24c12015-04-14 15:44:57 -07001/*
2 * CMA DebugFS Interface
3 *
4 * Copyright (c) 2015 Sasha Levin <sasha.levin@oracle.com>
5 */
6
7
8#include <linux/debugfs.h>
9#include <linux/cma.h>
Sasha Levin26b02a12015-04-14 15:44:59 -070010#include <linux/list.h>
11#include <linux/kernel.h>
12#include <linux/slab.h>
Sasha Levin83253302015-04-14 15:45:02 -070013#include <linux/mm_types.h>
Sasha Levin28b24c12015-04-14 15:44:57 -070014
15#include "cma.h"
16
Sasha Levin26b02a12015-04-14 15:44:59 -070017struct cma_mem {
18 struct hlist_node node;
19 struct page *p;
20 unsigned long n;
21};
22
Sasha Levin28b24c12015-04-14 15:44:57 -070023static struct dentry *cma_debugfs_root;
24
25static int cma_debugfs_get(void *data, u64 *val)
26{
27 unsigned long *p = data;
28
29 *val = *p;
30
31 return 0;
32}
33
34DEFINE_SIMPLE_ATTRIBUTE(cma_debugfs_fops, cma_debugfs_get, NULL, "%llu\n");
35
Dmitry Safonov2e32b942015-04-15 16:14:59 -070036static int cma_used_get(void *data, u64 *val)
37{
38 struct cma *cma = data;
39 unsigned long used;
40
41 mutex_lock(&cma->lock);
42 /* pages counter is smaller than sizeof(int) */
43 used = bitmap_weight(cma->bitmap, (int)cma->count);
44 mutex_unlock(&cma->lock);
45 *val = (u64)used << cma->order_per_bit;
46
47 return 0;
48}
49
50DEFINE_SIMPLE_ATTRIBUTE(cma_used_fops, cma_used_get, NULL, "%llu\n");
51
52static int cma_maxchunk_get(void *data, u64 *val)
53{
54 struct cma *cma = data;
55 unsigned long maxchunk = 0;
56 unsigned long start, end = 0;
57
58 mutex_lock(&cma->lock);
59 for (;;) {
60 start = find_next_zero_bit(cma->bitmap, cma->count, end);
61 if (start >= cma->count)
62 break;
63 end = find_next_bit(cma->bitmap, cma->count, start);
64 maxchunk = max(end - start, maxchunk);
65 }
66 mutex_unlock(&cma->lock);
67 *val = (u64)maxchunk << cma->order_per_bit;
68
69 return 0;
70}
71
72DEFINE_SIMPLE_ATTRIBUTE(cma_maxchunk_fops, cma_maxchunk_get, NULL, "%llu\n");
73
Sasha Levin26b02a12015-04-14 15:44:59 -070074static void cma_add_to_cma_mem_list(struct cma *cma, struct cma_mem *mem)
75{
76 spin_lock(&cma->mem_head_lock);
77 hlist_add_head(&mem->node, &cma->mem_head);
78 spin_unlock(&cma->mem_head_lock);
79}
80
Sasha Levin83253302015-04-14 15:45:02 -070081static struct cma_mem *cma_get_entry_from_list(struct cma *cma)
82{
83 struct cma_mem *mem = NULL;
84
85 spin_lock(&cma->mem_head_lock);
86 if (!hlist_empty(&cma->mem_head)) {
87 mem = hlist_entry(cma->mem_head.first, struct cma_mem, node);
88 hlist_del_init(&mem->node);
89 }
90 spin_unlock(&cma->mem_head_lock);
91
92 return mem;
93}
94
95static int cma_free_mem(struct cma *cma, int count)
96{
97 struct cma_mem *mem = NULL;
98
99 while (count) {
100 mem = cma_get_entry_from_list(cma);
101 if (mem == NULL)
102 return 0;
103
104 if (mem->n <= count) {
105 cma_release(cma, mem->p, mem->n);
106 count -= mem->n;
107 kfree(mem);
108 } else if (cma->order_per_bit == 0) {
109 cma_release(cma, mem->p, count);
110 mem->p += count;
111 mem->n -= count;
112 count = 0;
113 cma_add_to_cma_mem_list(cma, mem);
114 } else {
115 pr_debug("cma: cannot release partial block when order_per_bit != 0\n");
116 cma_add_to_cma_mem_list(cma, mem);
117 break;
118 }
119 }
120
121 return 0;
122
123}
124
125static int cma_free_write(void *data, u64 val)
126{
127 int pages = val;
128 struct cma *cma = data;
129
130 return cma_free_mem(cma, pages);
131}
132
133DEFINE_SIMPLE_ATTRIBUTE(cma_free_fops, NULL, cma_free_write, "%llu\n");
134
Sasha Levin26b02a12015-04-14 15:44:59 -0700135static int cma_alloc_mem(struct cma *cma, int count)
136{
137 struct cma_mem *mem;
138 struct page *p;
139
140 mem = kzalloc(sizeof(*mem), GFP_KERNEL);
141 if (!mem)
142 return -ENOMEM;
143
Andrew Morton875abdb2015-04-14 15:45:05 -0700144 p = cma_alloc(cma, count, 0);
Sasha Levin26b02a12015-04-14 15:44:59 -0700145 if (!p) {
146 kfree(mem);
147 return -ENOMEM;
148 }
149
150 mem->p = p;
151 mem->n = count;
152
153 cma_add_to_cma_mem_list(cma, mem);
154
155 return 0;
156}
157
158static int cma_alloc_write(void *data, u64 val)
159{
160 int pages = val;
161 struct cma *cma = data;
162
163 return cma_alloc_mem(cma, pages);
164}
165
166DEFINE_SIMPLE_ATTRIBUTE(cma_alloc_fops, NULL, cma_alloc_write, "%llu\n");
167
Sasha Levin28b24c12015-04-14 15:44:57 -0700168static void cma_debugfs_add_one(struct cma *cma, int idx)
169{
170 struct dentry *tmp;
171 char name[16];
172 int u32s;
173
174 sprintf(name, "cma-%d", idx);
175
176 tmp = debugfs_create_dir(name, cma_debugfs_root);
177
Sasha Levin26b02a12015-04-14 15:44:59 -0700178 debugfs_create_file("alloc", S_IWUSR, cma_debugfs_root, cma,
179 &cma_alloc_fops);
180
Sasha Levin83253302015-04-14 15:45:02 -0700181 debugfs_create_file("free", S_IWUSR, cma_debugfs_root, cma,
182 &cma_free_fops);
183
Sasha Levin28b24c12015-04-14 15:44:57 -0700184 debugfs_create_file("base_pfn", S_IRUGO, tmp,
185 &cma->base_pfn, &cma_debugfs_fops);
186 debugfs_create_file("count", S_IRUGO, tmp,
187 &cma->count, &cma_debugfs_fops);
188 debugfs_create_file("order_per_bit", S_IRUGO, tmp,
Sasha Levin26b02a12015-04-14 15:44:59 -0700189 &cma->order_per_bit, &cma_debugfs_fops);
Dmitry Safonov2e32b942015-04-15 16:14:59 -0700190 debugfs_create_file("used", S_IRUGO, tmp, cma, &cma_used_fops);
191 debugfs_create_file("maxchunk", S_IRUGO, tmp, cma, &cma_maxchunk_fops);
Sasha Levin28b24c12015-04-14 15:44:57 -0700192
193 u32s = DIV_ROUND_UP(cma_bitmap_maxno(cma), BITS_PER_BYTE * sizeof(u32));
194 debugfs_create_u32_array("bitmap", S_IRUGO, tmp, (u32*)cma->bitmap, u32s);
195}
196
197static int __init cma_debugfs_init(void)
198{
199 int i;
200
201 cma_debugfs_root = debugfs_create_dir("cma", NULL);
202 if (!cma_debugfs_root)
203 return -ENOMEM;
204
205 for (i = 0; i < cma_area_count; i++)
206 cma_debugfs_add_one(&cma_areas[i], i);
207
208 return 0;
209}
210late_initcall(cma_debugfs_init);