Disable key sockets -- they make no sense without IPsec.
[linux-2.6/linux-mips.git] / arch / arm / mm / blockops.c
blob4f5ee2d089963942487ef516108de3b49ebadc54
1 #include <linux/kernel.h>
2 #include <linux/init.h>
3 #include <linux/errno.h>
4 #include <linux/mm.h>
6 #include <asm/memory.h>
7 #include <asm/ptrace.h>
8 #include <asm/cacheflush.h>
9 #include <asm/traps.h>
11 extern struct cpu_cache_fns blk_cache_fns;
13 #define HARVARD_CACHE
16 * blk_flush_kern_dcache_page(kaddr)
18 * Ensure that the data held in the page kaddr is written back
19 * to the page in question.
21 * - kaddr - kernel address (guaranteed to be page aligned)
23 static void __attribute__((naked))
24 blk_flush_kern_dcache_page(void *kaddr)
26 asm(
27 "add r1, r0, %0 \n\
28 sub r1, r1, %1 \n\
29 1: .word 0xec401f0e @ mcrr p15, 0, r0, r1, c14, 0 @ blocking \n\
30 mov r0, #0 \n\
31 mcr p15, 0, r0, c7, c5, 0 \n\
32 mcr p15, 0, r0, c7, c10, 4 \n\
33 mov pc, lr"
35 : "I" (PAGE_SIZE), "I" (L1_CACHE_BYTES));
39 * blk_dma_inv_range(start,end)
41 * Invalidate the data cache within the specified region; we will
42 * be performing a DMA operation in this region and we want to
43 * purge old data in the cache.
45 * - start - virtual start address of region
46 * - end - virtual end address of region
48 static void __attribute__((naked))
49 blk_dma_inv_range_unified(unsigned long start, unsigned long end)
51 asm(
52 "tst r0, %0 \n\
53 mcrne p15, 0, r0, c7, c11, 1 @ clean unified line \n\
54 tst r1, %0 \n\
55 mcrne p15, 0, r1, c7, c15, 1 @ clean & invalidate unified line\n\
56 .word 0xec401f06 @ mcrr p15, 0, r1, r0, c6, 0 @ blocking \n\
57 mov r0, #0 \n\
58 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer \n\
59 mov pc, lr"
61 : "I" (L1_CACHE_BYTES - 1));
64 static void __attribute__((naked))
65 blk_dma_inv_range_harvard(unsigned long start, unsigned long end)
67 asm(
68 "tst r0, %0 \n\
69 mcrne p15, 0, r0, c7, c10, 1 @ clean D line \n\
70 tst r1, %0 \n\
71 mcrne p15, 0, r1, c7, c14, 1 @ clean & invalidate D line \n\
72 .word 0xec401f06 @ mcrr p15, 0, r1, r0, c6, 0 @ blocking \n\
73 mov r0, #0 \n\
74 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer \n\
75 mov pc, lr"
77 : "I" (L1_CACHE_BYTES - 1));
81 * blk_dma_clean_range(start,end)
82 * - start - virtual start address of region
83 * - end - virtual end address of region
85 static void __attribute__((naked))
86 blk_dma_clean_range(unsigned long start, unsigned long end)
88 asm(
89 ".word 0xec401f0c @ mcrr p15, 0, r1, r0, c12, 0 @ blocking \n\
90 mov r0, #0 \n\
91 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer \n\
92 mov pc, lr");
96 * blk_dma_flush_range(start,end)
97 * - start - virtual start address of region
98 * - end - virtual end address of region
100 static void __attribute__((naked))
101 blk_dma_flush_range(unsigned long start, unsigned long end)
103 asm(
104 ".word 0xec401f0e @ mcrr p15, 0, r1, r0, c14, 0 @ blocking \n\
105 mov pc, lr");
108 static int blockops_trap(struct pt_regs *regs, unsigned int instr)
110 regs->ARM_r4 |= regs->ARM_r2;
111 regs->ARM_pc += 4;
112 return 0;
115 static char *func[] = {
116 "Prefetch data range",
117 "Clean+Invalidate data range",
118 "Clean data range",
119 "Invalidate data range",
120 "Invalidate instr range"
123 static struct undef_hook blockops_hook __initdata = {
124 .instr_mask = 0x0fffffd0,
125 .instr_val = 0x0c401f00,
126 .cpsr_mask = PSR_T_BIT,
127 .cpsr_val = 0,
128 .fn = blockops_trap,
131 static int __init blockops_check(void)
133 register unsigned int err asm("r4") = 0;
134 unsigned int err_pos = 1;
135 unsigned int cache_type;
136 int i;
138 asm("mrc p15, 0, %0, c0, c0, 1" : "=r" (cache_type));
140 printk("Checking V6 block cache operations:\n");
141 register_undef_hook(&blockops_hook);
143 __asm__ ("mov r0, %0\n\t"
144 "mov r1, %1\n\t"
145 "mov r2, #1\n\t"
146 ".word 0xec401f2c @ mcrr p15, 0, r1, r0, c12, 2\n\t"
147 "mov r2, #2\n\t"
148 ".word 0xec401f0e @ mcrr p15, 0, r1, r0, c14, 0\n\t"
149 "mov r2, #4\n\t"
150 ".word 0xec401f0c @ mcrr p15, 0, r1, r0, c12, 0\n\t"
151 "mov r2, #8\n\t"
152 ".word 0xec401f06 @ mcrr p15, 0, r1, r0, c6, 0\n\t"
153 "mov r2, #16\n\t"
154 ".word 0xec401f05 @ mcrr p15, 0, r1, r0, c5, 0\n\t"
156 : "r" (PAGE_OFFSET), "r" (PAGE_OFFSET + 128)
157 : "r0", "r1", "r2");
159 unregister_undef_hook(&blockops_hook);
161 for (i = 0; i < ARRAY_SIZE(func); i++, err_pos <<= 1)
162 printk("%30s: %ssupported\n", func[i], err & err_pos ? "not " : "");
164 if ((err & 8) == 0) {
165 printk(" --> Using %s block cache invalidate\n",
166 cache_type & (1 << 24) ? "harvard" : "unified");
167 if (cache_type & (1 << 24))
168 cpu_cache.dma_inv_range = blk_dma_inv_range_harvard;
169 else
170 cpu_cache.dma_inv_range = blk_dma_inv_range_unified;
172 if ((err & 4) == 0) {
173 printk(" --> Using block cache clean\n");
174 cpu_cache.dma_clean_range = blk_dma_clean_range;
176 if ((err & 2) == 0) {
177 printk(" --> Using block cache clean+invalidate\n");
178 cpu_cache.dma_flush_range = blk_dma_flush_range;
179 cpu_cache.flush_kern_dcache_page = blk_flush_kern_dcache_page;
182 return 0;
185 __initcall(blockops_check);