mm: don't use radix tree writeback tags for pages in swap cache
[linux-stable.git] / mm / cma_debug.c
blobf8e4b60db167215862824637d856ffb34332f071
1 /*
2 * CMA DebugFS Interface
4 * Copyright (c) 2015 Sasha Levin <sasha.levin@oracle.com>
5 */
8 #include <linux/debugfs.h>
9 #include <linux/cma.h>
10 #include <linux/list.h>
11 #include <linux/kernel.h>
12 #include <linux/slab.h>
13 #include <linux/mm_types.h>
15 #include "cma.h"
17 struct cma_mem {
18 struct hlist_node node;
19 struct page *p;
20 unsigned long n;
23 static struct dentry *cma_debugfs_root;
25 static int cma_debugfs_get(void *data, u64 *val)
27 unsigned long *p = data;
29 *val = *p;
31 return 0;
33 DEFINE_SIMPLE_ATTRIBUTE(cma_debugfs_fops, cma_debugfs_get, NULL, "%llu\n");
35 static int cma_used_get(void *data, u64 *val)
37 struct cma *cma = data;
38 unsigned long used;
40 mutex_lock(&cma->lock);
41 /* pages counter is smaller than sizeof(int) */
42 used = bitmap_weight(cma->bitmap, (int)cma_bitmap_maxno(cma));
43 mutex_unlock(&cma->lock);
44 *val = (u64)used << cma->order_per_bit;
46 return 0;
48 DEFINE_SIMPLE_ATTRIBUTE(cma_used_fops, cma_used_get, NULL, "%llu\n");
50 static int cma_maxchunk_get(void *data, u64 *val)
52 struct cma *cma = data;
53 unsigned long maxchunk = 0;
54 unsigned long start, end = 0;
55 unsigned long bitmap_maxno = cma_bitmap_maxno(cma);
57 mutex_lock(&cma->lock);
58 for (;;) {
59 start = find_next_zero_bit(cma->bitmap, bitmap_maxno, end);
60 if (start >= cma->count)
61 break;
62 end = find_next_bit(cma->bitmap, bitmap_maxno, start);
63 maxchunk = max(end - start, maxchunk);
65 mutex_unlock(&cma->lock);
66 *val = (u64)maxchunk << cma->order_per_bit;
68 return 0;
70 DEFINE_SIMPLE_ATTRIBUTE(cma_maxchunk_fops, cma_maxchunk_get, NULL, "%llu\n");
72 static void cma_add_to_cma_mem_list(struct cma *cma, struct cma_mem *mem)
74 spin_lock(&cma->mem_head_lock);
75 hlist_add_head(&mem->node, &cma->mem_head);
76 spin_unlock(&cma->mem_head_lock);
79 static struct cma_mem *cma_get_entry_from_list(struct cma *cma)
81 struct cma_mem *mem = NULL;
83 spin_lock(&cma->mem_head_lock);
84 if (!hlist_empty(&cma->mem_head)) {
85 mem = hlist_entry(cma->mem_head.first, struct cma_mem, node);
86 hlist_del_init(&mem->node);
88 spin_unlock(&cma->mem_head_lock);
90 return mem;
93 static int cma_free_mem(struct cma *cma, int count)
95 struct cma_mem *mem = NULL;
97 while (count) {
98 mem = cma_get_entry_from_list(cma);
99 if (mem == NULL)
100 return 0;
102 if (mem->n <= count) {
103 cma_release(cma, mem->p, mem->n);
104 count -= mem->n;
105 kfree(mem);
106 } else if (cma->order_per_bit == 0) {
107 cma_release(cma, mem->p, count);
108 mem->p += count;
109 mem->n -= count;
110 count = 0;
111 cma_add_to_cma_mem_list(cma, mem);
112 } else {
113 pr_debug("cma: cannot release partial block when order_per_bit != 0\n");
114 cma_add_to_cma_mem_list(cma, mem);
115 break;
119 return 0;
123 static int cma_free_write(void *data, u64 val)
125 int pages = val;
126 struct cma *cma = data;
128 return cma_free_mem(cma, pages);
130 DEFINE_SIMPLE_ATTRIBUTE(cma_free_fops, NULL, cma_free_write, "%llu\n");
132 static int cma_alloc_mem(struct cma *cma, int count)
134 struct cma_mem *mem;
135 struct page *p;
137 mem = kzalloc(sizeof(*mem), GFP_KERNEL);
138 if (!mem)
139 return -ENOMEM;
141 p = cma_alloc(cma, count, 0);
142 if (!p) {
143 kfree(mem);
144 return -ENOMEM;
147 mem->p = p;
148 mem->n = count;
150 cma_add_to_cma_mem_list(cma, mem);
152 return 0;
155 static int cma_alloc_write(void *data, u64 val)
157 int pages = val;
158 struct cma *cma = data;
160 return cma_alloc_mem(cma, pages);
162 DEFINE_SIMPLE_ATTRIBUTE(cma_alloc_fops, NULL, cma_alloc_write, "%llu\n");
164 static void cma_debugfs_add_one(struct cma *cma, int idx)
166 struct dentry *tmp;
167 char name[16];
168 int u32s;
170 sprintf(name, "cma-%d", idx);
172 tmp = debugfs_create_dir(name, cma_debugfs_root);
174 debugfs_create_file("alloc", S_IWUSR, tmp, cma,
175 &cma_alloc_fops);
177 debugfs_create_file("free", S_IWUSR, tmp, cma,
178 &cma_free_fops);
180 debugfs_create_file("base_pfn", S_IRUGO, tmp,
181 &cma->base_pfn, &cma_debugfs_fops);
182 debugfs_create_file("count", S_IRUGO, tmp,
183 &cma->count, &cma_debugfs_fops);
184 debugfs_create_file("order_per_bit", S_IRUGO, tmp,
185 &cma->order_per_bit, &cma_debugfs_fops);
186 debugfs_create_file("used", S_IRUGO, tmp, cma, &cma_used_fops);
187 debugfs_create_file("maxchunk", S_IRUGO, tmp, cma, &cma_maxchunk_fops);
189 u32s = DIV_ROUND_UP(cma_bitmap_maxno(cma), BITS_PER_BYTE * sizeof(u32));
190 debugfs_create_u32_array("bitmap", S_IRUGO, tmp, (u32*)cma->bitmap, u32s);
193 static int __init cma_debugfs_init(void)
195 int i;
197 cma_debugfs_root = debugfs_create_dir("cma", NULL);
198 if (!cma_debugfs_root)
199 return -ENOMEM;
201 for (i = 0; i < cma_area_count; i++)
202 cma_debugfs_add_one(&cma_areas[i], i);
204 return 0;
206 late_initcall(cma_debugfs_init);