2 * Handle caching attributes in page tables (PAT)
4 * Authors: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
5 * Suresh B Siddha <suresh.b.siddha@intel.com>
7 * Interval tree (augmented rbtree) used to store the PAT memory type
11 #include <linux/seq_file.h>
12 #include <linux/debugfs.h>
13 #include <linux/kernel.h>
14 #include <linux/module.h>
15 #include <linux/rbtree.h>
16 #include <linux/sched.h>
17 #include <linux/gfp.h>
19 #include <asm/pgtable.h>
22 #include "pat_internal.h"
25 * The memtype tree keeps track of memory type for specific
26 * physical memory areas. Without proper tracking, conflicting memory
27 * types in different mappings can cause CPU cache corruption.
29 * The tree is an interval tree (augmented rbtree) with tree ordered
30 * on starting address. Tree can contain multiple entries for
31 * different regions which overlap. All the aliases have the same
32 * cache attributes of course.
34 * memtype_lock protects the rbtree.
37 static struct rb_root memtype_rbroot
= RB_ROOT
;
39 static int is_node_overlap(struct memtype
*node
, u64 start
, u64 end
)
41 if (node
->start
>= end
|| node
->end
<= start
)
47 static u64
get_subtree_max_end(struct rb_node
*node
)
51 struct memtype
*data
= container_of(node
, struct memtype
, rb
);
52 ret
= data
->subtree_max_end
;
57 /* Update 'subtree_max_end' for a node, based on node and its children */
58 static void memtype_rb_augment_cb(struct rb_node
*node
, void *__unused
)
61 u64 max_end
, child_max_end
;
66 data
= container_of(node
, struct memtype
, rb
);
69 child_max_end
= get_subtree_max_end(node
->rb_right
);
70 if (child_max_end
> max_end
)
71 max_end
= child_max_end
;
73 child_max_end
= get_subtree_max_end(node
->rb_left
);
74 if (child_max_end
> max_end
)
75 max_end
= child_max_end
;
77 data
->subtree_max_end
= max_end
;
80 /* Find the first (lowest start addr) overlapping range from rb tree */
81 static struct memtype
*memtype_rb_lowest_match(struct rb_root
*root
,
84 struct rb_node
*node
= root
->rb_node
;
85 struct memtype
*last_lower
= NULL
;
88 struct memtype
*data
= container_of(node
, struct memtype
, rb
);
90 if (get_subtree_max_end(node
->rb_left
) > start
) {
91 /* Lowest overlap if any must be on left side */
93 } else if (is_node_overlap(data
, start
, end
)) {
96 } else if (start
>= data
->start
) {
97 /* Lowest overlap if any must be on right side */
98 node
= node
->rb_right
;
103 return last_lower
; /* Returns NULL if there is no overlap */
106 static struct memtype
*memtype_rb_exact_match(struct rb_root
*root
,
109 struct memtype
*match
;
111 match
= memtype_rb_lowest_match(root
, start
, end
);
112 while (match
!= NULL
&& match
->start
< end
) {
113 struct rb_node
*node
;
115 if (match
->start
== start
&& match
->end
== end
)
118 node
= rb_next(&match
->rb
);
120 match
= container_of(node
, struct memtype
, rb
);
125 return NULL
; /* Returns NULL if there is no exact match */
128 static int memtype_rb_check_conflict(struct rb_root
*root
,
130 unsigned long reqtype
, unsigned long *newtype
)
132 struct rb_node
*node
;
133 struct memtype
*match
;
134 int found_type
= reqtype
;
136 match
= memtype_rb_lowest_match(&memtype_rbroot
, start
, end
);
140 if (match
->type
!= found_type
&& newtype
== NULL
)
143 dprintk("Overlap at 0x%Lx-0x%Lx\n", match
->start
, match
->end
);
144 found_type
= match
->type
;
146 node
= rb_next(&match
->rb
);
148 match
= container_of(node
, struct memtype
, rb
);
150 if (match
->start
>= end
) /* Checked all possible matches */
153 if (is_node_overlap(match
, start
, end
) &&
154 match
->type
!= found_type
) {
158 node
= rb_next(&match
->rb
);
162 *newtype
= found_type
;
167 printk(KERN_INFO
"%s:%d conflicting memory types "
168 "%Lx-%Lx %s<->%s\n", current
->comm
, current
->pid
, start
,
169 end
, cattr_name(found_type
), cattr_name(match
->type
));
173 static void memtype_rb_insert(struct rb_root
*root
, struct memtype
*newdata
)
175 struct rb_node
**node
= &(root
->rb_node
);
176 struct rb_node
*parent
= NULL
;
179 struct memtype
*data
= container_of(*node
, struct memtype
, rb
);
182 if (newdata
->start
<= data
->start
)
183 node
= &((*node
)->rb_left
);
184 else if (newdata
->start
> data
->start
)
185 node
= &((*node
)->rb_right
);
188 rb_link_node(&newdata
->rb
, parent
, node
);
189 rb_insert_color(&newdata
->rb
, root
);
190 rb_augment_insert(&newdata
->rb
, memtype_rb_augment_cb
, NULL
);
193 int rbt_memtype_check_insert(struct memtype
*new, unsigned long *ret_type
)
197 err
= memtype_rb_check_conflict(&memtype_rbroot
, new->start
, new->end
,
198 new->type
, ret_type
);
202 new->type
= *ret_type
;
204 new->subtree_max_end
= new->end
;
205 memtype_rb_insert(&memtype_rbroot
, new);
210 struct memtype
*rbt_memtype_erase(u64 start
, u64 end
)
212 struct rb_node
*deepest
;
213 struct memtype
*data
;
215 data
= memtype_rb_exact_match(&memtype_rbroot
, start
, end
);
219 deepest
= rb_augment_erase_begin(&data
->rb
);
220 rb_erase(&data
->rb
, &memtype_rbroot
);
221 rb_augment_erase_end(deepest
, memtype_rb_augment_cb
, NULL
);
226 struct memtype
*rbt_memtype_lookup(u64 addr
)
228 struct memtype
*data
;
229 data
= memtype_rb_lowest_match(&memtype_rbroot
, addr
, addr
+ PAGE_SIZE
);
233 #if defined(CONFIG_DEBUG_FS)
234 int rbt_memtype_copy_nth_element(struct memtype
*out
, loff_t pos
)
236 struct rb_node
*node
;
239 node
= rb_first(&memtype_rbroot
);
240 while (node
&& pos
!= i
) {
241 node
= rb_next(node
);
245 if (node
) { /* pos == i */
246 struct memtype
*this = container_of(node
, struct memtype
, rb
);