4 * virtual => physical page mapping cache. Users of this mechanism
5 * register callbacks for a given (virt,mm,phys) page mapping, and
6 * the kernel guarantees to call back when this mapping is invalidated.
7 * (ie. upon COW or unmap.)
9 * Started by Ingo Molnar, Copyright (C) 2002
13 #include <linux/init.h>
14 #include <linux/hash.h>
15 #include <linux/vcache.h>
17 #define VCACHE_HASHBITS 8
18 #define VCACHE_HASHSIZE (1 << VCACHE_HASHBITS)
20 spinlock_t vcache_lock
= SPIN_LOCK_UNLOCKED
;
22 static struct list_head hash
[VCACHE_HASHSIZE
];
24 static struct list_head
*hash_vcache(unsigned long address
,
27 return &hash
[hash_long(address
+ (unsigned long)mm
, VCACHE_HASHBITS
)];
30 void __attach_vcache(vcache_t
*vcache
,
31 unsigned long address
,
33 void (*callback
)(struct vcache_s
*data
, struct page
*new))
35 struct list_head
*hash_head
;
38 vcache
->address
= address
;
40 vcache
->callback
= callback
;
42 hash_head
= hash_vcache(address
, mm
);
44 list_add_tail(&vcache
->hash_entry
, hash_head
);
47 void __detach_vcache(vcache_t
*vcache
)
49 list_del_init(&vcache
->hash_entry
);
52 void invalidate_vcache(unsigned long address
, struct mm_struct
*mm
,
53 struct page
*new_page
)
55 struct list_head
*l
, *hash_head
;
60 hash_head
= hash_vcache(address
, mm
);
62 * This is safe, because this path is called with the pagetable
63 * lock held. So while other mm's might add new entries in
64 * parallel, *this* mm is locked out, so if the list is empty
65 * now then we do not have to take the vcache lock to see it's
68 if (likely(list_empty(hash_head
)))
71 spin_lock(&vcache_lock
);
72 list_for_each(l
, hash_head
) {
73 vcache
= list_entry(l
, vcache_t
, hash_entry
);
74 if (vcache
->address
!= address
|| vcache
->mm
!= mm
)
76 vcache
->callback(vcache
, new_page
);
78 spin_unlock(&vcache_lock
);
81 static int __init
vcache_init(void)
85 for (i
= 0; i
< VCACHE_HASHSIZE
; i
++)
86 INIT_LIST_HEAD(hash
+ i
);
89 __initcall(vcache_init
);