Cleanup syscall code to look more like it's mips64 equivalent.
[linux-2.6/linux-mips.git] / mm / vcache.c
blob599e0f25490d94f93f3116951dc4a6ff114e78b8
1 /*
2 * linux/mm/vcache.c
4 * virtual => physical page mapping cache. Users of this mechanism
5 * register callbacks for a given (virt,mm,phys) page mapping, and
6 * the kernel guarantees to call back when this mapping is invalidated.
7 * (ie. upon COW or unmap.)
9 * Started by Ingo Molnar, Copyright (C) 2002
12 #include <linux/mm.h>
13 #include <linux/init.h>
14 #include <linux/hash.h>
15 #include <linux/vcache.h>
17 #define VCACHE_HASHBITS 8
18 #define VCACHE_HASHSIZE (1 << VCACHE_HASHBITS)
20 spinlock_t vcache_lock = SPIN_LOCK_UNLOCKED;
22 static struct list_head hash[VCACHE_HASHSIZE];
24 static struct list_head *hash_vcache(unsigned long address,
25 struct mm_struct *mm)
27 return &hash[hash_long(address + (unsigned long)mm, VCACHE_HASHBITS)];
30 void __attach_vcache(vcache_t *vcache,
31 unsigned long address,
32 struct mm_struct *mm,
33 void (*callback)(struct vcache_s *data, struct page *new))
35 struct list_head *hash_head;
37 address &= PAGE_MASK;
38 vcache->address = address;
39 vcache->mm = mm;
40 vcache->callback = callback;
42 hash_head = hash_vcache(address, mm);
44 list_add_tail(&vcache->hash_entry, hash_head);
47 void __detach_vcache(vcache_t *vcache)
49 list_del_init(&vcache->hash_entry);
52 void invalidate_vcache(unsigned long address, struct mm_struct *mm,
53 struct page *new_page)
55 struct list_head *l, *hash_head;
56 vcache_t *vcache;
58 address &= PAGE_MASK;
60 hash_head = hash_vcache(address, mm);
62 * This is safe, because this path is called with the pagetable
63 * lock held. So while other mm's might add new entries in
64 * parallel, *this* mm is locked out, so if the list is empty
65 * now then we do not have to take the vcache lock to see it's
66 * really empty.
68 if (likely(list_empty(hash_head)))
69 return;
71 spin_lock(&vcache_lock);
72 list_for_each(l, hash_head) {
73 vcache = list_entry(l, vcache_t, hash_entry);
74 if (vcache->address != address || vcache->mm != mm)
75 continue;
76 vcache->callback(vcache, new_page);
78 spin_unlock(&vcache_lock);
81 static int __init vcache_init(void)
83 unsigned int i;
85 for (i = 0; i < VCACHE_HASHSIZE; i++)
86 INIT_LIST_HEAD(hash + i);
87 return 0;
89 __initcall(vcache_init);