2 * SRAM allocator for Blackfin on-chip memory
4 * Copyright 2004-2009 Analog Devices Inc.
6 * Licensed under the GPL-2 or later.
9 #include <linux/module.h>
10 #include <linux/kernel.h>
11 #include <linux/types.h>
12 #include <linux/miscdevice.h>
13 #include <linux/ioport.h>
14 #include <linux/fcntl.h>
15 #include <linux/init.h>
16 #include <linux/poll.h>
17 #include <linux/proc_fs.h>
18 #include <linux/spinlock.h>
19 #include <linux/rtc.h>
20 #include <asm/blackfin.h>
21 #include <asm/mem_map.h>
22 #include "blackfin_sram.h"
24 /* the data structure for L1 scratchpad and DATA SRAM */
29 struct sram_piece
*next
;
32 static DEFINE_PER_CPU_SHARED_ALIGNED(spinlock_t
, l1sram_lock
);
33 static DEFINE_PER_CPU(struct sram_piece
, free_l1_ssram_head
);
34 static DEFINE_PER_CPU(struct sram_piece
, used_l1_ssram_head
);
36 #if L1_DATA_A_LENGTH != 0
37 static DEFINE_PER_CPU(struct sram_piece
, free_l1_data_A_sram_head
);
38 static DEFINE_PER_CPU(struct sram_piece
, used_l1_data_A_sram_head
);
41 #if L1_DATA_B_LENGTH != 0
42 static DEFINE_PER_CPU(struct sram_piece
, free_l1_data_B_sram_head
);
43 static DEFINE_PER_CPU(struct sram_piece
, used_l1_data_B_sram_head
);
46 #if L1_DATA_A_LENGTH || L1_DATA_B_LENGTH
47 static DEFINE_PER_CPU_SHARED_ALIGNED(spinlock_t
, l1_data_sram_lock
);
50 #if L1_CODE_LENGTH != 0
51 static DEFINE_PER_CPU_SHARED_ALIGNED(spinlock_t
, l1_inst_sram_lock
);
52 static DEFINE_PER_CPU(struct sram_piece
, free_l1_inst_sram_head
);
53 static DEFINE_PER_CPU(struct sram_piece
, used_l1_inst_sram_head
);
57 static spinlock_t l2_sram_lock ____cacheline_aligned_in_smp
;
58 static struct sram_piece free_l2_sram_head
, used_l2_sram_head
;
61 static struct kmem_cache
*sram_piece_cache
;
63 /* L1 Scratchpad SRAM initialization function */
64 static void __init
l1sram_init(void)
67 unsigned long reserve
;
72 reserve
= sizeof(struct l1_scratch_task_info
);
75 for (cpu
= 0; cpu
< num_possible_cpus(); ++cpu
) {
76 per_cpu(free_l1_ssram_head
, cpu
).next
=
77 kmem_cache_alloc(sram_piece_cache
, GFP_KERNEL
);
78 if (!per_cpu(free_l1_ssram_head
, cpu
).next
) {
79 printk(KERN_INFO
"Fail to initialize Scratchpad data SRAM.\n");
83 per_cpu(free_l1_ssram_head
, cpu
).next
->paddr
= (void *)get_l1_scratch_start_cpu(cpu
) + reserve
;
84 per_cpu(free_l1_ssram_head
, cpu
).next
->size
= L1_SCRATCH_LENGTH
- reserve
;
85 per_cpu(free_l1_ssram_head
, cpu
).next
->pid
= 0;
86 per_cpu(free_l1_ssram_head
, cpu
).next
->next
= NULL
;
88 per_cpu(used_l1_ssram_head
, cpu
).next
= NULL
;
90 /* mutex initialize */
91 spin_lock_init(&per_cpu(l1sram_lock
, cpu
));
92 printk(KERN_INFO
"Blackfin Scratchpad data SRAM: %d KB\n",
93 L1_SCRATCH_LENGTH
>> 10);
97 static void __init
l1_data_sram_init(void)
99 #if L1_DATA_A_LENGTH != 0 || L1_DATA_B_LENGTH != 0
102 #if L1_DATA_A_LENGTH != 0
103 for (cpu
= 0; cpu
< num_possible_cpus(); ++cpu
) {
104 per_cpu(free_l1_data_A_sram_head
, cpu
).next
=
105 kmem_cache_alloc(sram_piece_cache
, GFP_KERNEL
);
106 if (!per_cpu(free_l1_data_A_sram_head
, cpu
).next
) {
107 printk(KERN_INFO
"Fail to initialize L1 Data A SRAM.\n");
111 per_cpu(free_l1_data_A_sram_head
, cpu
).next
->paddr
=
112 (void *)get_l1_data_a_start_cpu(cpu
) + (_ebss_l1
- _sdata_l1
);
113 per_cpu(free_l1_data_A_sram_head
, cpu
).next
->size
=
114 L1_DATA_A_LENGTH
- (_ebss_l1
- _sdata_l1
);
115 per_cpu(free_l1_data_A_sram_head
, cpu
).next
->pid
= 0;
116 per_cpu(free_l1_data_A_sram_head
, cpu
).next
->next
= NULL
;
118 per_cpu(used_l1_data_A_sram_head
, cpu
).next
= NULL
;
120 printk(KERN_INFO
"Blackfin L1 Data A SRAM: %d KB (%d KB free)\n",
121 L1_DATA_A_LENGTH
>> 10,
122 per_cpu(free_l1_data_A_sram_head
, cpu
).next
->size
>> 10);
125 #if L1_DATA_B_LENGTH != 0
126 for (cpu
= 0; cpu
< num_possible_cpus(); ++cpu
) {
127 per_cpu(free_l1_data_B_sram_head
, cpu
).next
=
128 kmem_cache_alloc(sram_piece_cache
, GFP_KERNEL
);
129 if (!per_cpu(free_l1_data_B_sram_head
, cpu
).next
) {
130 printk(KERN_INFO
"Fail to initialize L1 Data B SRAM.\n");
134 per_cpu(free_l1_data_B_sram_head
, cpu
).next
->paddr
=
135 (void *)get_l1_data_b_start_cpu(cpu
) + (_ebss_b_l1
- _sdata_b_l1
);
136 per_cpu(free_l1_data_B_sram_head
, cpu
).next
->size
=
137 L1_DATA_B_LENGTH
- (_ebss_b_l1
- _sdata_b_l1
);
138 per_cpu(free_l1_data_B_sram_head
, cpu
).next
->pid
= 0;
139 per_cpu(free_l1_data_B_sram_head
, cpu
).next
->next
= NULL
;
141 per_cpu(used_l1_data_B_sram_head
, cpu
).next
= NULL
;
143 printk(KERN_INFO
"Blackfin L1 Data B SRAM: %d KB (%d KB free)\n",
144 L1_DATA_B_LENGTH
>> 10,
145 per_cpu(free_l1_data_B_sram_head
, cpu
).next
->size
>> 10);
146 /* mutex initialize */
150 #if L1_DATA_A_LENGTH != 0 || L1_DATA_B_LENGTH != 0
151 for (cpu
= 0; cpu
< num_possible_cpus(); ++cpu
)
152 spin_lock_init(&per_cpu(l1_data_sram_lock
, cpu
));
156 static void __init
l1_inst_sram_init(void)
158 #if L1_CODE_LENGTH != 0
160 for (cpu
= 0; cpu
< num_possible_cpus(); ++cpu
) {
161 per_cpu(free_l1_inst_sram_head
, cpu
).next
=
162 kmem_cache_alloc(sram_piece_cache
, GFP_KERNEL
);
163 if (!per_cpu(free_l1_inst_sram_head
, cpu
).next
) {
164 printk(KERN_INFO
"Failed to initialize L1 Instruction SRAM\n");
168 per_cpu(free_l1_inst_sram_head
, cpu
).next
->paddr
=
169 (void *)get_l1_code_start_cpu(cpu
) + (_etext_l1
- _stext_l1
);
170 per_cpu(free_l1_inst_sram_head
, cpu
).next
->size
=
171 L1_CODE_LENGTH
- (_etext_l1
- _stext_l1
);
172 per_cpu(free_l1_inst_sram_head
, cpu
).next
->pid
= 0;
173 per_cpu(free_l1_inst_sram_head
, cpu
).next
->next
= NULL
;
175 per_cpu(used_l1_inst_sram_head
, cpu
).next
= NULL
;
177 printk(KERN_INFO
"Blackfin L1 Instruction SRAM: %d KB (%d KB free)\n",
178 L1_CODE_LENGTH
>> 10,
179 per_cpu(free_l1_inst_sram_head
, cpu
).next
->size
>> 10);
181 /* mutex initialize */
182 spin_lock_init(&per_cpu(l1_inst_sram_lock
, cpu
));
187 static void __init
l2_sram_init(void)
190 free_l2_sram_head
.next
=
191 kmem_cache_alloc(sram_piece_cache
, GFP_KERNEL
);
192 if (!free_l2_sram_head
.next
) {
193 printk(KERN_INFO
"Fail to initialize L2 SRAM.\n");
197 free_l2_sram_head
.next
->paddr
=
198 (void *)L2_START
+ (_ebss_l2
- _stext_l2
);
199 free_l2_sram_head
.next
->size
=
200 L2_LENGTH
- (_ebss_l2
- _stext_l2
);
201 free_l2_sram_head
.next
->pid
= 0;
202 free_l2_sram_head
.next
->next
= NULL
;
204 used_l2_sram_head
.next
= NULL
;
206 printk(KERN_INFO
"Blackfin L2 SRAM: %d KB (%d KB free)\n",
208 free_l2_sram_head
.next
->size
>> 10);
210 /* mutex initialize */
211 spin_lock_init(&l2_sram_lock
);
215 static int __init
bfin_sram_init(void)
217 sram_piece_cache
= kmem_cache_create("sram_piece_cache",
218 sizeof(struct sram_piece
),
219 0, SLAB_PANIC
, NULL
);
228 pure_initcall(bfin_sram_init
);
230 /* SRAM allocate function */
231 static void *_sram_alloc(size_t size
, struct sram_piece
*pfree_head
,
232 struct sram_piece
*pused_head
)
234 struct sram_piece
*pslot
, *plast
, *pavail
;
236 if (size
<= 0 || !pfree_head
|| !pused_head
)
240 size
= (size
+ 3) & ~3;
242 pslot
= pfree_head
->next
;
245 /* search an available piece slot */
246 while (pslot
!= NULL
&& size
> pslot
->size
) {
254 if (pslot
->size
== size
) {
255 plast
->next
= pslot
->next
;
258 pavail
= kmem_cache_alloc(sram_piece_cache
, GFP_KERNEL
);
263 pavail
->paddr
= pslot
->paddr
;
265 pslot
->paddr
+= size
;
269 pavail
->pid
= current
->pid
;
271 pslot
= pused_head
->next
;
274 /* insert new piece into used piece list !!! */
275 while (pslot
!= NULL
&& pavail
->paddr
< pslot
->paddr
) {
280 pavail
->next
= pslot
;
281 plast
->next
= pavail
;
283 return pavail
->paddr
;
286 /* Allocate the largest available block. */
287 static void *_sram_alloc_max(struct sram_piece
*pfree_head
,
288 struct sram_piece
*pused_head
,
289 unsigned long *psize
)
291 struct sram_piece
*pslot
, *pmax
;
293 if (!pfree_head
|| !pused_head
)
296 pmax
= pslot
= pfree_head
->next
;
298 /* search an available piece slot */
299 while (pslot
!= NULL
) {
300 if (pslot
->size
> pmax
->size
)
310 return _sram_alloc(*psize
, pfree_head
, pused_head
);
313 /* SRAM free function */
314 static int _sram_free(const void *addr
,
315 struct sram_piece
*pfree_head
,
316 struct sram_piece
*pused_head
)
318 struct sram_piece
*pslot
, *plast
, *pavail
;
320 if (!pfree_head
|| !pused_head
)
323 /* search the relevant memory slot */
324 pslot
= pused_head
->next
;
327 /* search an available piece slot */
328 while (pslot
!= NULL
&& pslot
->paddr
!= addr
) {
336 plast
->next
= pslot
->next
;
340 /* insert free pieces back to the free list */
341 pslot
= pfree_head
->next
;
344 while (pslot
!= NULL
&& addr
> pslot
->paddr
) {
349 if (plast
!= pfree_head
&& plast
->paddr
+ plast
->size
== pavail
->paddr
) {
350 plast
->size
+= pavail
->size
;
351 kmem_cache_free(sram_piece_cache
, pavail
);
353 pavail
->next
= plast
->next
;
354 plast
->next
= pavail
;
358 if (pslot
&& plast
->paddr
+ plast
->size
== pslot
->paddr
) {
359 plast
->size
+= pslot
->size
;
360 plast
->next
= pslot
->next
;
361 kmem_cache_free(sram_piece_cache
, pslot
);
367 int sram_free(const void *addr
)
370 #if L1_CODE_LENGTH != 0
371 if (addr
>= (void *)get_l1_code_start()
372 && addr
< (void *)(get_l1_code_start() + L1_CODE_LENGTH
))
373 return l1_inst_sram_free(addr
);
376 #if L1_DATA_A_LENGTH != 0
377 if (addr
>= (void *)get_l1_data_a_start()
378 && addr
< (void *)(get_l1_data_a_start() + L1_DATA_A_LENGTH
))
379 return l1_data_A_sram_free(addr
);
382 #if L1_DATA_B_LENGTH != 0
383 if (addr
>= (void *)get_l1_data_b_start()
384 && addr
< (void *)(get_l1_data_b_start() + L1_DATA_B_LENGTH
))
385 return l1_data_B_sram_free(addr
);
389 if (addr
>= (void *)L2_START
390 && addr
< (void *)(L2_START
+ L2_LENGTH
))
391 return l2_sram_free(addr
);
396 EXPORT_SYMBOL(sram_free
);
398 void *l1_data_A_sram_alloc(size_t size
)
400 #if L1_DATA_A_LENGTH != 0
406 /* add mutex operation */
407 spin_lock_irqsave(&per_cpu(l1_data_sram_lock
, cpu
), flags
);
409 addr
= _sram_alloc(size
, &per_cpu(free_l1_data_A_sram_head
, cpu
),
410 &per_cpu(used_l1_data_A_sram_head
, cpu
));
412 /* add mutex operation */
413 spin_unlock_irqrestore(&per_cpu(l1_data_sram_lock
, cpu
), flags
);
416 pr_debug("Allocated address in l1_data_A_sram_alloc is 0x%lx+0x%lx\n",
417 (long unsigned int)addr
, size
);
424 EXPORT_SYMBOL(l1_data_A_sram_alloc
);
426 int l1_data_A_sram_free(const void *addr
)
428 #if L1_DATA_A_LENGTH != 0
434 /* add mutex operation */
435 spin_lock_irqsave(&per_cpu(l1_data_sram_lock
, cpu
), flags
);
437 ret
= _sram_free(addr
, &per_cpu(free_l1_data_A_sram_head
, cpu
),
438 &per_cpu(used_l1_data_A_sram_head
, cpu
));
440 /* add mutex operation */
441 spin_unlock_irqrestore(&per_cpu(l1_data_sram_lock
, cpu
), flags
);
449 EXPORT_SYMBOL(l1_data_A_sram_free
);
451 void *l1_data_B_sram_alloc(size_t size
)
453 #if L1_DATA_B_LENGTH != 0
459 /* add mutex operation */
460 spin_lock_irqsave(&per_cpu(l1_data_sram_lock
, cpu
), flags
);
462 addr
= _sram_alloc(size
, &per_cpu(free_l1_data_B_sram_head
, cpu
),
463 &per_cpu(used_l1_data_B_sram_head
, cpu
));
465 /* add mutex operation */
466 spin_unlock_irqrestore(&per_cpu(l1_data_sram_lock
, cpu
), flags
);
469 pr_debug("Allocated address in l1_data_B_sram_alloc is 0x%lx+0x%lx\n",
470 (long unsigned int)addr
, size
);
477 EXPORT_SYMBOL(l1_data_B_sram_alloc
);
479 int l1_data_B_sram_free(const void *addr
)
481 #if L1_DATA_B_LENGTH != 0
487 /* add mutex operation */
488 spin_lock_irqsave(&per_cpu(l1_data_sram_lock
, cpu
), flags
);
490 ret
= _sram_free(addr
, &per_cpu(free_l1_data_B_sram_head
, cpu
),
491 &per_cpu(used_l1_data_B_sram_head
, cpu
));
493 /* add mutex operation */
494 spin_unlock_irqrestore(&per_cpu(l1_data_sram_lock
, cpu
), flags
);
502 EXPORT_SYMBOL(l1_data_B_sram_free
);
504 void *l1_data_sram_alloc(size_t size
)
506 void *addr
= l1_data_A_sram_alloc(size
);
509 addr
= l1_data_B_sram_alloc(size
);
513 EXPORT_SYMBOL(l1_data_sram_alloc
);
515 void *l1_data_sram_zalloc(size_t size
)
517 void *addr
= l1_data_sram_alloc(size
);
520 memset(addr
, 0x00, size
);
524 EXPORT_SYMBOL(l1_data_sram_zalloc
);
526 int l1_data_sram_free(const void *addr
)
529 ret
= l1_data_A_sram_free(addr
);
531 ret
= l1_data_B_sram_free(addr
);
534 EXPORT_SYMBOL(l1_data_sram_free
);
536 void *l1_inst_sram_alloc(size_t size
)
538 #if L1_CODE_LENGTH != 0
544 /* add mutex operation */
545 spin_lock_irqsave(&per_cpu(l1_inst_sram_lock
, cpu
), flags
);
547 addr
= _sram_alloc(size
, &per_cpu(free_l1_inst_sram_head
, cpu
),
548 &per_cpu(used_l1_inst_sram_head
, cpu
));
550 /* add mutex operation */
551 spin_unlock_irqrestore(&per_cpu(l1_inst_sram_lock
, cpu
), flags
);
554 pr_debug("Allocated address in l1_inst_sram_alloc is 0x%lx+0x%lx\n",
555 (long unsigned int)addr
, size
);
562 EXPORT_SYMBOL(l1_inst_sram_alloc
);
564 int l1_inst_sram_free(const void *addr
)
566 #if L1_CODE_LENGTH != 0
572 /* add mutex operation */
573 spin_lock_irqsave(&per_cpu(l1_inst_sram_lock
, cpu
), flags
);
575 ret
= _sram_free(addr
, &per_cpu(free_l1_inst_sram_head
, cpu
),
576 &per_cpu(used_l1_inst_sram_head
, cpu
));
578 /* add mutex operation */
579 spin_unlock_irqrestore(&per_cpu(l1_inst_sram_lock
, cpu
), flags
);
587 EXPORT_SYMBOL(l1_inst_sram_free
);
589 /* L1 Scratchpad memory allocate function */
590 void *l1sram_alloc(size_t size
)
597 /* add mutex operation */
598 spin_lock_irqsave(&per_cpu(l1sram_lock
, cpu
), flags
);
600 addr
= _sram_alloc(size
, &per_cpu(free_l1_ssram_head
, cpu
),
601 &per_cpu(used_l1_ssram_head
, cpu
));
603 /* add mutex operation */
604 spin_unlock_irqrestore(&per_cpu(l1sram_lock
, cpu
), flags
);
610 /* L1 Scratchpad memory allocate function */
611 void *l1sram_alloc_max(size_t *psize
)
618 /* add mutex operation */
619 spin_lock_irqsave(&per_cpu(l1sram_lock
, cpu
), flags
);
621 addr
= _sram_alloc_max(&per_cpu(free_l1_ssram_head
, cpu
),
622 &per_cpu(used_l1_ssram_head
, cpu
), psize
);
624 /* add mutex operation */
625 spin_unlock_irqrestore(&per_cpu(l1sram_lock
, cpu
), flags
);
631 /* L1 Scratchpad memory free function */
632 int l1sram_free(const void *addr
)
639 /* add mutex operation */
640 spin_lock_irqsave(&per_cpu(l1sram_lock
, cpu
), flags
);
642 ret
= _sram_free(addr
, &per_cpu(free_l1_ssram_head
, cpu
),
643 &per_cpu(used_l1_ssram_head
, cpu
));
645 /* add mutex operation */
646 spin_unlock_irqrestore(&per_cpu(l1sram_lock
, cpu
), flags
);
652 void *l2_sram_alloc(size_t size
)
658 /* add mutex operation */
659 spin_lock_irqsave(&l2_sram_lock
, flags
);
661 addr
= _sram_alloc(size
, &free_l2_sram_head
,
664 /* add mutex operation */
665 spin_unlock_irqrestore(&l2_sram_lock
, flags
);
667 pr_debug("Allocated address in l2_sram_alloc is 0x%lx+0x%lx\n",
668 (long unsigned int)addr
, size
);
675 EXPORT_SYMBOL(l2_sram_alloc
);
677 void *l2_sram_zalloc(size_t size
)
679 void *addr
= l2_sram_alloc(size
);
682 memset(addr
, 0x00, size
);
686 EXPORT_SYMBOL(l2_sram_zalloc
);
688 int l2_sram_free(const void *addr
)
694 /* add mutex operation */
695 spin_lock_irqsave(&l2_sram_lock
, flags
);
697 ret
= _sram_free(addr
, &free_l2_sram_head
,
700 /* add mutex operation */
701 spin_unlock_irqrestore(&l2_sram_lock
, flags
);
708 EXPORT_SYMBOL(l2_sram_free
);
710 int sram_free_with_lsl(const void *addr
)
712 struct sram_list_struct
*lsl
, **tmp
;
713 struct mm_struct
*mm
= current
->mm
;
715 for (tmp
= &mm
->context
.sram_list
; *tmp
; tmp
= &(*tmp
)->next
)
716 if ((*tmp
)->addr
== addr
)
727 EXPORT_SYMBOL(sram_free_with_lsl
);
729 /* Allocate memory and keep in L1 SRAM List (lsl) so that the resources are
730 * tracked. These are designed for userspace so that when a process exits,
731 * we can safely reap their resources.
733 void *sram_alloc_with_lsl(size_t size
, unsigned long flags
)
736 struct sram_list_struct
*lsl
= NULL
;
737 struct mm_struct
*mm
= current
->mm
;
739 lsl
= kzalloc(sizeof(struct sram_list_struct
), GFP_KERNEL
);
743 if (flags
& L1_INST_SRAM
)
744 addr
= l1_inst_sram_alloc(size
);
746 if (addr
== NULL
&& (flags
& L1_DATA_A_SRAM
))
747 addr
= l1_data_A_sram_alloc(size
);
749 if (addr
== NULL
&& (flags
& L1_DATA_B_SRAM
))
750 addr
= l1_data_B_sram_alloc(size
);
752 if (addr
== NULL
&& (flags
& L2_SRAM
))
753 addr
= l2_sram_alloc(size
);
761 lsl
->next
= mm
->context
.sram_list
;
762 mm
->context
.sram_list
= lsl
;
765 EXPORT_SYMBOL(sram_alloc_with_lsl
);
767 #ifdef CONFIG_PROC_FS
768 /* Once we get a real allocator, we'll throw all of this away.
769 * Until then, we need some sort of visibility into the L1 alloc.
771 /* Need to keep line of output the same. Currently, that is 44 bytes
772 * (including newline).
774 static int _sram_proc_read(char *buf
, int *len
, int count
, const char *desc
,
775 struct sram_piece
*pfree_head
,
776 struct sram_piece
*pused_head
)
778 struct sram_piece
*pslot
;
780 if (!pfree_head
|| !pused_head
)
783 *len
+= sprintf(&buf
[*len
], "--- SRAM %-14s Size PID State \n", desc
);
785 /* search the relevant memory slot */
786 pslot
= pused_head
->next
;
788 while (pslot
!= NULL
) {
789 *len
+= sprintf(&buf
[*len
], "%p-%p %10i %5i %-10s\n",
790 pslot
->paddr
, pslot
->paddr
+ pslot
->size
,
791 pslot
->size
, pslot
->pid
, "ALLOCATED");
796 pslot
= pfree_head
->next
;
798 while (pslot
!= NULL
) {
799 *len
+= sprintf(&buf
[*len
], "%p-%p %10i %5i %-10s\n",
800 pslot
->paddr
, pslot
->paddr
+ pslot
->size
,
801 pslot
->size
, pslot
->pid
, "FREE");
808 static int sram_proc_read(char *buf
, char **start
, off_t offset
, int count
,
809 int *eof
, void *data
)
814 for (cpu
= 0; cpu
< num_possible_cpus(); ++cpu
) {
815 if (_sram_proc_read(buf
, &len
, count
, "Scratchpad",
816 &per_cpu(free_l1_ssram_head
, cpu
), &per_cpu(used_l1_ssram_head
, cpu
)))
818 #if L1_DATA_A_LENGTH != 0
819 if (_sram_proc_read(buf
, &len
, count
, "L1 Data A",
820 &per_cpu(free_l1_data_A_sram_head
, cpu
),
821 &per_cpu(used_l1_data_A_sram_head
, cpu
)))
824 #if L1_DATA_B_LENGTH != 0
825 if (_sram_proc_read(buf
, &len
, count
, "L1 Data B",
826 &per_cpu(free_l1_data_B_sram_head
, cpu
),
827 &per_cpu(used_l1_data_B_sram_head
, cpu
)))
830 #if L1_CODE_LENGTH != 0
831 if (_sram_proc_read(buf
, &len
, count
, "L1 Instruction",
832 &per_cpu(free_l1_inst_sram_head
, cpu
),
833 &per_cpu(used_l1_inst_sram_head
, cpu
)))
838 if (_sram_proc_read(buf
, &len
, count
, "L2", &free_l2_sram_head
,
847 static int __init
sram_proc_init(void)
849 struct proc_dir_entry
*ptr
;
850 ptr
= create_proc_entry("sram", S_IFREG
| S_IRUGO
, NULL
);
852 printk(KERN_WARNING
"unable to create /proc/sram\n");
855 ptr
->read_proc
= sram_proc_read
;
858 late_initcall(sram_proc_init
);