RT-AC56 3.0.0.4.374.37 core
[tomato.git] / release / src-rt-6.x.4708 / linux / linux-2.6.36 / drivers / staging / tidspbridge / hw / hw_mmu.c
blob014f5d5293ae95d17a915c1534c575b6914e46c0
1 /*
2 * hw_mmu.c
4 * DSP-BIOS Bridge driver support functions for TI OMAP processors.
6 * API definitions to setup MMU TLB and PTE
8 * Copyright (C) 2007 Texas Instruments, Inc.
10 * This package is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
14 * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
15 * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
16 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
19 #include <linux/io.h>
20 #include "MMURegAcM.h"
21 #include <hw_defs.h>
22 #include <hw_mmu.h>
23 #include <linux/types.h>
24 #include <linux/err.h>
26 #define MMU_BASE_VAL_MASK 0xFC00
27 #define MMU_PAGE_MAX 3
28 #define MMU_ELEMENTSIZE_MAX 3
29 #define MMU_ADDR_MASK 0xFFFFF000
30 #define MMU_TTB_MASK 0xFFFFC000
31 #define MMU_SECTION_ADDR_MASK 0xFFF00000
32 #define MMU_SSECTION_ADDR_MASK 0xFF000000
33 #define MMU_PAGE_TABLE_MASK 0xFFFFFC00
34 #define MMU_LARGE_PAGE_MASK 0xFFFF0000
35 #define MMU_SMALL_PAGE_MASK 0xFFFFF000
37 #define MMU_LOAD_TLB 0x00000001
38 #define MMU_GFLUSH 0x60
41 * hw_mmu_page_size_t: Enumerated Type used to specify the MMU Page Size(SLSS)
43 enum hw_mmu_page_size_t {
44 HW_MMU_SECTION,
45 HW_MMU_LARGE_PAGE,
46 HW_MMU_SMALL_PAGE,
47 HW_MMU_SUPERSECTION
51 * FUNCTION : mmu_flush_entry
53 * INPUTS:
55 * Identifier : base_address
56 * Type : const u32
57 * Description : Base Address of instance of MMU module
59 * RETURNS:
61 * Type : hw_status
62 * Description : 0 -- No errors occured
63 * RET_BAD_NULL_PARAM -- A Pointer
64 * Paramater was set to NULL
66 * PURPOSE: : Flush the TLB entry pointed by the
67 * lock counter register
68 * even if this entry is set protected
70 * METHOD: : Check the Input parameter and Flush a
71 * single entry in the TLB.
73 static hw_status mmu_flush_entry(const void __iomem *base_address);
76 * FUNCTION : mmu_set_cam_entry
78 * INPUTS:
80 * Identifier : base_address
81 * TypE : const u32
82 * Description : Base Address of instance of MMU module
84 * Identifier : page_sz
85 * TypE : const u32
86 * Description : It indicates the page size
88 * Identifier : preserved_bit
89 * Type : const u32
90 * Description : It indicates the TLB entry is preserved entry
91 * or not
93 * Identifier : valid_bit
94 * Type : const u32
95 * Description : It indicates the TLB entry is valid entry or not
98 * Identifier : virtual_addr_tag
99 * Type : const u32
100 * Description : virtual Address
102 * RETURNS:
104 * Type : hw_status
105 * Description : 0 -- No errors occured
106 * RET_BAD_NULL_PARAM -- A Pointer Paramater
107 * was set to NULL
108 * RET_PARAM_OUT_OF_RANGE -- Input Parameter out
109 * of Range
111 * PURPOSE: : Set MMU_CAM reg
113 * METHOD: : Check the Input parameters and set the CAM entry.
115 static hw_status mmu_set_cam_entry(const void __iomem *base_address,
116 const u32 page_sz,
117 const u32 preserved_bit,
118 const u32 valid_bit,
119 const u32 virtual_addr_tag);
122 * FUNCTION : mmu_set_ram_entry
124 * INPUTS:
126 * Identifier : base_address
127 * Type : const u32
128 * Description : Base Address of instance of MMU module
130 * Identifier : physical_addr
131 * Type : const u32
132 * Description : Physical Address to which the corresponding
133 * virtual Address shouldpoint
135 * Identifier : endianism
136 * Type : hw_endianism_t
137 * Description : endianism for the given page
139 * Identifier : element_size
140 * Type : hw_element_size_t
141 * Description : The element size ( 8,16, 32 or 64 bit)
143 * Identifier : mixed_size
144 * Type : hw_mmu_mixed_size_t
145 * Description : Element Size to follow CPU or TLB
147 * RETURNS:
149 * Type : hw_status
150 * Description : 0 -- No errors occured
151 * RET_BAD_NULL_PARAM -- A Pointer Paramater
152 * was set to NULL
153 * RET_PARAM_OUT_OF_RANGE -- Input Parameter
154 * out of Range
156 * PURPOSE: : Set MMU_CAM reg
158 * METHOD: : Check the Input parameters and set the RAM entry.
160 static hw_status mmu_set_ram_entry(const void __iomem *base_address,
161 const u32 physical_addr,
162 enum hw_endianism_t endianism,
163 enum hw_element_size_t element_size,
164 enum hw_mmu_mixed_size_t mixed_size);
166 /* HW FUNCTIONS */
168 hw_status hw_mmu_enable(const void __iomem *base_address)
170 hw_status status = 0;
172 MMUMMU_CNTLMMU_ENABLE_WRITE32(base_address, HW_SET);
174 return status;
177 hw_status hw_mmu_disable(const void __iomem *base_address)
179 hw_status status = 0;
181 MMUMMU_CNTLMMU_ENABLE_WRITE32(base_address, HW_CLEAR);
183 return status;
186 hw_status hw_mmu_num_locked_set(const void __iomem *base_address,
187 u32 num_locked_entries)
189 hw_status status = 0;
191 MMUMMU_LOCK_BASE_VALUE_WRITE32(base_address, num_locked_entries);
193 return status;
196 hw_status hw_mmu_victim_num_set(const void __iomem *base_address,
197 u32 victim_entry_num)
199 hw_status status = 0;
201 MMUMMU_LOCK_CURRENT_VICTIM_WRITE32(base_address, victim_entry_num);
203 return status;
206 hw_status hw_mmu_event_ack(const void __iomem *base_address, u32 irq_mask)
208 hw_status status = 0;
210 MMUMMU_IRQSTATUS_WRITE_REGISTER32(base_address, irq_mask);
212 return status;
215 hw_status hw_mmu_event_disable(const void __iomem *base_address, u32 irq_mask)
217 hw_status status = 0;
218 u32 irq_reg;
220 irq_reg = MMUMMU_IRQENABLE_READ_REGISTER32(base_address);
222 MMUMMU_IRQENABLE_WRITE_REGISTER32(base_address, irq_reg & ~irq_mask);
224 return status;
227 hw_status hw_mmu_event_enable(const void __iomem *base_address, u32 irq_mask)
229 hw_status status = 0;
230 u32 irq_reg;
232 irq_reg = MMUMMU_IRQENABLE_READ_REGISTER32(base_address);
234 MMUMMU_IRQENABLE_WRITE_REGISTER32(base_address, irq_reg | irq_mask);
236 return status;
239 hw_status hw_mmu_event_status(const void __iomem *base_address, u32 *irq_mask)
241 hw_status status = 0;
243 *irq_mask = MMUMMU_IRQSTATUS_READ_REGISTER32(base_address);
245 return status;
248 hw_status hw_mmu_fault_addr_read(const void __iomem *base_address, u32 *addr)
250 hw_status status = 0;
252 /* read values from register */
253 *addr = MMUMMU_FAULT_AD_READ_REGISTER32(base_address);
255 return status;
258 hw_status hw_mmu_ttb_set(const void __iomem *base_address, u32 ttb_phys_addr)
260 hw_status status = 0;
261 u32 load_ttb;
263 load_ttb = ttb_phys_addr & ~0x7FUL;
264 /* write values to register */
265 MMUMMU_TTB_WRITE_REGISTER32(base_address, load_ttb);
267 return status;
270 hw_status hw_mmu_twl_enable(const void __iomem *base_address)
272 hw_status status = 0;
274 MMUMMU_CNTLTWL_ENABLE_WRITE32(base_address, HW_SET);
276 return status;
279 hw_status hw_mmu_twl_disable(const void __iomem *base_address)
281 hw_status status = 0;
283 MMUMMU_CNTLTWL_ENABLE_WRITE32(base_address, HW_CLEAR);
285 return status;
288 hw_status hw_mmu_tlb_flush(const void __iomem *base_address, u32 virtual_addr,
289 u32 page_sz)
291 hw_status status = 0;
292 u32 virtual_addr_tag;
293 enum hw_mmu_page_size_t pg_size_bits;
295 switch (page_sz) {
296 case HW_PAGE_SIZE4KB:
297 pg_size_bits = HW_MMU_SMALL_PAGE;
298 break;
300 case HW_PAGE_SIZE64KB:
301 pg_size_bits = HW_MMU_LARGE_PAGE;
302 break;
304 case HW_PAGE_SIZE1MB:
305 pg_size_bits = HW_MMU_SECTION;
306 break;
308 case HW_PAGE_SIZE16MB:
309 pg_size_bits = HW_MMU_SUPERSECTION;
310 break;
312 default:
313 return -EINVAL;
316 /* Generate the 20-bit tag from virtual address */
317 virtual_addr_tag = ((virtual_addr & MMU_ADDR_MASK) >> 12);
319 mmu_set_cam_entry(base_address, pg_size_bits, 0, 0, virtual_addr_tag);
321 mmu_flush_entry(base_address);
323 return status;
326 hw_status hw_mmu_tlb_add(const void __iomem *base_address,
327 u32 physical_addr,
328 u32 virtual_addr,
329 u32 page_sz,
330 u32 entry_num,
331 struct hw_mmu_map_attrs_t *map_attrs,
332 s8 preserved_bit, s8 valid_bit)
334 hw_status status = 0;
335 u32 lock_reg;
336 u32 virtual_addr_tag;
337 enum hw_mmu_page_size_t mmu_pg_size;
339 /*Check the input Parameters */
340 switch (page_sz) {
341 case HW_PAGE_SIZE4KB:
342 mmu_pg_size = HW_MMU_SMALL_PAGE;
343 break;
345 case HW_PAGE_SIZE64KB:
346 mmu_pg_size = HW_MMU_LARGE_PAGE;
347 break;
349 case HW_PAGE_SIZE1MB:
350 mmu_pg_size = HW_MMU_SECTION;
351 break;
353 case HW_PAGE_SIZE16MB:
354 mmu_pg_size = HW_MMU_SUPERSECTION;
355 break;
357 default:
358 return -EINVAL;
361 lock_reg = MMUMMU_LOCK_READ_REGISTER32(base_address);
363 /* Generate the 20-bit tag from virtual address */
364 virtual_addr_tag = ((virtual_addr & MMU_ADDR_MASK) >> 12);
366 /* Write the fields in the CAM Entry Register */
367 mmu_set_cam_entry(base_address, mmu_pg_size, preserved_bit, valid_bit,
368 virtual_addr_tag);
370 /* Write the different fields of the RAM Entry Register */
371 /* endianism of the page,Element Size of the page (8, 16, 32, 64 bit) */
372 mmu_set_ram_entry(base_address, physical_addr, map_attrs->endianism,
373 map_attrs->element_size, map_attrs->mixed_size);
375 /* Update the MMU Lock Register */
376 /* currentVictim between lockedBaseValue and (MMU_Entries_Number - 1) */
377 MMUMMU_LOCK_CURRENT_VICTIM_WRITE32(base_address, entry_num);
379 /* Enable loading of an entry in TLB by writing 1
380 into LD_TLB_REG register */
381 MMUMMU_LD_TLB_WRITE_REGISTER32(base_address, MMU_LOAD_TLB);
383 MMUMMU_LOCK_WRITE_REGISTER32(base_address, lock_reg);
385 return status;
388 hw_status hw_mmu_pte_set(const u32 pg_tbl_va,
389 u32 physical_addr,
390 u32 virtual_addr,
391 u32 page_sz, struct hw_mmu_map_attrs_t *map_attrs)
393 hw_status status = 0;
394 u32 pte_addr, pte_val;
395 s32 num_entries = 1;
397 switch (page_sz) {
398 case HW_PAGE_SIZE4KB:
399 pte_addr = hw_mmu_pte_addr_l2(pg_tbl_va,
400 virtual_addr &
401 MMU_SMALL_PAGE_MASK);
402 pte_val =
403 ((physical_addr & MMU_SMALL_PAGE_MASK) |
404 (map_attrs->endianism << 9) | (map_attrs->
405 element_size << 4) |
406 (map_attrs->mixed_size << 11) | 2);
407 break;
409 case HW_PAGE_SIZE64KB:
410 num_entries = 16;
411 pte_addr = hw_mmu_pte_addr_l2(pg_tbl_va,
412 virtual_addr &
413 MMU_LARGE_PAGE_MASK);
414 pte_val =
415 ((physical_addr & MMU_LARGE_PAGE_MASK) |
416 (map_attrs->endianism << 9) | (map_attrs->
417 element_size << 4) |
418 (map_attrs->mixed_size << 11) | 1);
419 break;
421 case HW_PAGE_SIZE1MB:
422 pte_addr = hw_mmu_pte_addr_l1(pg_tbl_va,
423 virtual_addr &
424 MMU_SECTION_ADDR_MASK);
425 pte_val =
426 ((((physical_addr & MMU_SECTION_ADDR_MASK) |
427 (map_attrs->endianism << 15) | (map_attrs->
428 element_size << 10) |
429 (map_attrs->mixed_size << 17)) & ~0x40000) | 0x2);
430 break;
432 case HW_PAGE_SIZE16MB:
433 num_entries = 16;
434 pte_addr = hw_mmu_pte_addr_l1(pg_tbl_va,
435 virtual_addr &
436 MMU_SSECTION_ADDR_MASK);
437 pte_val =
438 (((physical_addr & MMU_SSECTION_ADDR_MASK) |
439 (map_attrs->endianism << 15) | (map_attrs->
440 element_size << 10) |
441 (map_attrs->mixed_size << 17)
442 ) | 0x40000 | 0x2);
443 break;
445 case HW_MMU_COARSE_PAGE_SIZE:
446 pte_addr = hw_mmu_pte_addr_l1(pg_tbl_va,
447 virtual_addr &
448 MMU_SECTION_ADDR_MASK);
449 pte_val = (physical_addr & MMU_PAGE_TABLE_MASK) | 1;
450 break;
452 default:
453 return -EINVAL;
456 while (--num_entries >= 0)
457 ((u32 *) pte_addr)[num_entries] = pte_val;
459 return status;
462 hw_status hw_mmu_pte_clear(const u32 pg_tbl_va, u32 virtual_addr, u32 page_size)
464 hw_status status = 0;
465 u32 pte_addr;
466 s32 num_entries = 1;
468 switch (page_size) {
469 case HW_PAGE_SIZE4KB:
470 pte_addr = hw_mmu_pte_addr_l2(pg_tbl_va,
471 virtual_addr &
472 MMU_SMALL_PAGE_MASK);
473 break;
475 case HW_PAGE_SIZE64KB:
476 num_entries = 16;
477 pte_addr = hw_mmu_pte_addr_l2(pg_tbl_va,
478 virtual_addr &
479 MMU_LARGE_PAGE_MASK);
480 break;
482 case HW_PAGE_SIZE1MB:
483 case HW_MMU_COARSE_PAGE_SIZE:
484 pte_addr = hw_mmu_pte_addr_l1(pg_tbl_va,
485 virtual_addr &
486 MMU_SECTION_ADDR_MASK);
487 break;
489 case HW_PAGE_SIZE16MB:
490 num_entries = 16;
491 pte_addr = hw_mmu_pte_addr_l1(pg_tbl_va,
492 virtual_addr &
493 MMU_SSECTION_ADDR_MASK);
494 break;
496 default:
497 return -EINVAL;
500 while (--num_entries >= 0)
501 ((u32 *) pte_addr)[num_entries] = 0;
503 return status;
506 /* mmu_flush_entry */
507 static hw_status mmu_flush_entry(const void __iomem *base_address)
509 hw_status status = 0;
510 u32 flush_entry_data = 0x1;
512 /* write values to register */
513 MMUMMU_FLUSH_ENTRY_WRITE_REGISTER32(base_address, flush_entry_data);
515 return status;
518 /* mmu_set_cam_entry */
519 static hw_status mmu_set_cam_entry(const void __iomem *base_address,
520 const u32 page_sz,
521 const u32 preserved_bit,
522 const u32 valid_bit,
523 const u32 virtual_addr_tag)
525 hw_status status = 0;
526 u32 mmu_cam_reg;
528 mmu_cam_reg = (virtual_addr_tag << 12);
529 mmu_cam_reg = (mmu_cam_reg) | (page_sz) | (valid_bit << 2) |
530 (preserved_bit << 3);
532 /* write values to register */
533 MMUMMU_CAM_WRITE_REGISTER32(base_address, mmu_cam_reg);
535 return status;
538 /* mmu_set_ram_entry */
539 static hw_status mmu_set_ram_entry(const void __iomem *base_address,
540 const u32 physical_addr,
541 enum hw_endianism_t endianism,
542 enum hw_element_size_t element_size,
543 enum hw_mmu_mixed_size_t mixed_size)
545 hw_status status = 0;
546 u32 mmu_ram_reg;
548 mmu_ram_reg = (physical_addr & MMU_ADDR_MASK);
549 mmu_ram_reg = (mmu_ram_reg) | ((endianism << 9) | (element_size << 7) |
550 (mixed_size << 6));
552 /* write values to register */
553 MMUMMU_RAM_WRITE_REGISTER32(base_address, mmu_ram_reg);
555 return status;
559 void hw_mmu_tlb_flush_all(const void __iomem *base)
561 __raw_writeb(1, base + MMU_GFLUSH);