2 * SRAM allocator for Blackfin on-chip memory
4 * Copyright 2004-2009 Analog Devices Inc.
6 * Licensed under the GPL-2 or later.
9 #include <linux/module.h>
10 #include <linux/kernel.h>
11 #include <linux/types.h>
12 #include <linux/miscdevice.h>
13 #include <linux/ioport.h>
14 #include <linux/fcntl.h>
15 #include <linux/init.h>
16 #include <linux/poll.h>
17 #include <linux/proc_fs.h>
18 #include <linux/spinlock.h>
19 #include <linux/rtc.h>
20 #include <asm/blackfin.h>
21 #include <asm/mem_map.h>
22 #include "blackfin_sram.h"
24 /* the data structure for L1 scratchpad and DATA SRAM */
29 struct sram_piece
*next
;
32 static DEFINE_PER_CPU_SHARED_ALIGNED(spinlock_t
, l1sram_lock
);
33 static DEFINE_PER_CPU(struct sram_piece
, free_l1_ssram_head
);
34 static DEFINE_PER_CPU(struct sram_piece
, used_l1_ssram_head
);
36 #if L1_DATA_A_LENGTH != 0
37 static DEFINE_PER_CPU(struct sram_piece
, free_l1_data_A_sram_head
);
38 static DEFINE_PER_CPU(struct sram_piece
, used_l1_data_A_sram_head
);
41 #if L1_DATA_B_LENGTH != 0
42 static DEFINE_PER_CPU(struct sram_piece
, free_l1_data_B_sram_head
);
43 static DEFINE_PER_CPU(struct sram_piece
, used_l1_data_B_sram_head
);
46 #if L1_DATA_A_LENGTH || L1_DATA_B_LENGTH
47 static DEFINE_PER_CPU_SHARED_ALIGNED(spinlock_t
, l1_data_sram_lock
);
50 #if L1_CODE_LENGTH != 0
51 static DEFINE_PER_CPU_SHARED_ALIGNED(spinlock_t
, l1_inst_sram_lock
);
52 static DEFINE_PER_CPU(struct sram_piece
, free_l1_inst_sram_head
);
53 static DEFINE_PER_CPU(struct sram_piece
, used_l1_inst_sram_head
);
57 static spinlock_t l2_sram_lock ____cacheline_aligned_in_smp
;
58 static struct sram_piece free_l2_sram_head
, used_l2_sram_head
;
61 static struct kmem_cache
*sram_piece_cache
;
63 /* L1 Scratchpad SRAM initialization function */
64 static void __init
l1sram_init(void)
67 unsigned long reserve
;
72 reserve
= sizeof(struct l1_scratch_task_info
);
75 for (cpu
= 0; cpu
< num_possible_cpus(); ++cpu
) {
76 per_cpu(free_l1_ssram_head
, cpu
).next
=
77 kmem_cache_alloc(sram_piece_cache
, GFP_KERNEL
);
78 if (!per_cpu(free_l1_ssram_head
, cpu
).next
) {
79 printk(KERN_INFO
"Fail to initialize Scratchpad data SRAM.\n");
83 per_cpu(free_l1_ssram_head
, cpu
).next
->paddr
= (void *)get_l1_scratch_start_cpu(cpu
) + reserve
;
84 per_cpu(free_l1_ssram_head
, cpu
).next
->size
= L1_SCRATCH_LENGTH
- reserve
;
85 per_cpu(free_l1_ssram_head
, cpu
).next
->pid
= 0;
86 per_cpu(free_l1_ssram_head
, cpu
).next
->next
= NULL
;
88 per_cpu(used_l1_ssram_head
, cpu
).next
= NULL
;
90 /* mutex initialize */
91 spin_lock_init(&per_cpu(l1sram_lock
, cpu
));
92 printk(KERN_INFO
"Blackfin Scratchpad data SRAM: %d KB\n",
93 L1_SCRATCH_LENGTH
>> 10);
97 static void __init
l1_data_sram_init(void)
99 #if L1_DATA_A_LENGTH != 0 || L1_DATA_B_LENGTH != 0
102 #if L1_DATA_A_LENGTH != 0
103 for (cpu
= 0; cpu
< num_possible_cpus(); ++cpu
) {
104 per_cpu(free_l1_data_A_sram_head
, cpu
).next
=
105 kmem_cache_alloc(sram_piece_cache
, GFP_KERNEL
);
106 if (!per_cpu(free_l1_data_A_sram_head
, cpu
).next
) {
107 printk(KERN_INFO
"Fail to initialize L1 Data A SRAM.\n");
111 per_cpu(free_l1_data_A_sram_head
, cpu
).next
->paddr
=
112 (void *)get_l1_data_a_start_cpu(cpu
) + (_ebss_l1
- _sdata_l1
);
113 per_cpu(free_l1_data_A_sram_head
, cpu
).next
->size
=
114 L1_DATA_A_LENGTH
- (_ebss_l1
- _sdata_l1
);
115 per_cpu(free_l1_data_A_sram_head
, cpu
).next
->pid
= 0;
116 per_cpu(free_l1_data_A_sram_head
, cpu
).next
->next
= NULL
;
118 per_cpu(used_l1_data_A_sram_head
, cpu
).next
= NULL
;
120 printk(KERN_INFO
"Blackfin L1 Data A SRAM: %d KB (%d KB free)\n",
121 L1_DATA_A_LENGTH
>> 10,
122 per_cpu(free_l1_data_A_sram_head
, cpu
).next
->size
>> 10);
125 #if L1_DATA_B_LENGTH != 0
126 for (cpu
= 0; cpu
< num_possible_cpus(); ++cpu
) {
127 per_cpu(free_l1_data_B_sram_head
, cpu
).next
=
128 kmem_cache_alloc(sram_piece_cache
, GFP_KERNEL
);
129 if (!per_cpu(free_l1_data_B_sram_head
, cpu
).next
) {
130 printk(KERN_INFO
"Fail to initialize L1 Data B SRAM.\n");
134 per_cpu(free_l1_data_B_sram_head
, cpu
).next
->paddr
=
135 (void *)get_l1_data_b_start_cpu(cpu
) + (_ebss_b_l1
- _sdata_b_l1
);
136 per_cpu(free_l1_data_B_sram_head
, cpu
).next
->size
=
137 L1_DATA_B_LENGTH
- (_ebss_b_l1
- _sdata_b_l1
);
138 per_cpu(free_l1_data_B_sram_head
, cpu
).next
->pid
= 0;
139 per_cpu(free_l1_data_B_sram_head
, cpu
).next
->next
= NULL
;
141 per_cpu(used_l1_data_B_sram_head
, cpu
).next
= NULL
;
143 printk(KERN_INFO
"Blackfin L1 Data B SRAM: %d KB (%d KB free)\n",
144 L1_DATA_B_LENGTH
>> 10,
145 per_cpu(free_l1_data_B_sram_head
, cpu
).next
->size
>> 10);
146 /* mutex initialize */
150 #if L1_DATA_A_LENGTH != 0 || L1_DATA_B_LENGTH != 0
151 for (cpu
= 0; cpu
< num_possible_cpus(); ++cpu
)
152 spin_lock_init(&per_cpu(l1_data_sram_lock
, cpu
));
156 static void __init
l1_inst_sram_init(void)
158 #if L1_CODE_LENGTH != 0
160 for (cpu
= 0; cpu
< num_possible_cpus(); ++cpu
) {
161 per_cpu(free_l1_inst_sram_head
, cpu
).next
=
162 kmem_cache_alloc(sram_piece_cache
, GFP_KERNEL
);
163 if (!per_cpu(free_l1_inst_sram_head
, cpu
).next
) {
164 printk(KERN_INFO
"Failed to initialize L1 Instruction SRAM\n");
168 per_cpu(free_l1_inst_sram_head
, cpu
).next
->paddr
=
169 (void *)get_l1_code_start_cpu(cpu
) + (_etext_l1
- _stext_l1
);
170 per_cpu(free_l1_inst_sram_head
, cpu
).next
->size
=
171 L1_CODE_LENGTH
- (_etext_l1
- _stext_l1
);
172 per_cpu(free_l1_inst_sram_head
, cpu
).next
->pid
= 0;
173 per_cpu(free_l1_inst_sram_head
, cpu
).next
->next
= NULL
;
175 per_cpu(used_l1_inst_sram_head
, cpu
).next
= NULL
;
177 printk(KERN_INFO
"Blackfin L1 Instruction SRAM: %d KB (%d KB free)\n",
178 L1_CODE_LENGTH
>> 10,
179 per_cpu(free_l1_inst_sram_head
, cpu
).next
->size
>> 10);
181 /* mutex initialize */
182 spin_lock_init(&per_cpu(l1_inst_sram_lock
, cpu
));
187 static void __init
l2_sram_init(void)
190 free_l2_sram_head
.next
=
191 kmem_cache_alloc(sram_piece_cache
, GFP_KERNEL
);
192 if (!free_l2_sram_head
.next
) {
193 printk(KERN_INFO
"Fail to initialize L2 SRAM.\n");
197 free_l2_sram_head
.next
->paddr
=
198 (void *)L2_START
+ (_ebss_l2
- _stext_l2
);
199 free_l2_sram_head
.next
->size
=
200 L2_LENGTH
- (_ebss_l2
- _stext_l2
);
201 free_l2_sram_head
.next
->pid
= 0;
202 free_l2_sram_head
.next
->next
= NULL
;
204 used_l2_sram_head
.next
= NULL
;
206 printk(KERN_INFO
"Blackfin L2 SRAM: %d KB (%d KB free)\n",
208 free_l2_sram_head
.next
->size
>> 10);
210 /* mutex initialize */
211 spin_lock_init(&l2_sram_lock
);
215 static int __init
bfin_sram_init(void)
217 sram_piece_cache
= kmem_cache_create("sram_piece_cache",
218 sizeof(struct sram_piece
),
219 0, SLAB_PANIC
, NULL
);
228 pure_initcall(bfin_sram_init
);
230 /* SRAM allocate function */
231 static void *_sram_alloc(size_t size
, struct sram_piece
*pfree_head
,
232 struct sram_piece
*pused_head
)
234 struct sram_piece
*pslot
, *plast
, *pavail
;
236 if (size
<= 0 || !pfree_head
|| !pused_head
)
240 size
= (size
+ 3) & ~3;
242 pslot
= pfree_head
->next
;
245 /* search an available piece slot */
246 while (pslot
!= NULL
&& size
> pslot
->size
) {
254 if (pslot
->size
== size
) {
255 plast
->next
= pslot
->next
;
258 pavail
= kmem_cache_alloc(sram_piece_cache
, GFP_KERNEL
);
263 pavail
->paddr
= pslot
->paddr
;
265 pslot
->paddr
+= size
;
269 pavail
->pid
= current
->pid
;
271 pslot
= pused_head
->next
;
274 /* insert new piece into used piece list !!! */
275 while (pslot
!= NULL
&& pavail
->paddr
< pslot
->paddr
) {
280 pavail
->next
= pslot
;
281 plast
->next
= pavail
;
283 return pavail
->paddr
;
286 /* Allocate the largest available block. */
287 static void *_sram_alloc_max(struct sram_piece
*pfree_head
,
288 struct sram_piece
*pused_head
,
289 unsigned long *psize
)
291 struct sram_piece
*pslot
, *pmax
;
293 if (!pfree_head
|| !pused_head
)
296 pmax
= pslot
= pfree_head
->next
;
298 /* search an available piece slot */
299 while (pslot
!= NULL
) {
300 if (pslot
->size
> pmax
->size
)
310 return _sram_alloc(*psize
, pfree_head
, pused_head
);
313 /* SRAM free function */
314 static int _sram_free(const void *addr
,
315 struct sram_piece
*pfree_head
,
316 struct sram_piece
*pused_head
)
318 struct sram_piece
*pslot
, *plast
, *pavail
;
320 if (!pfree_head
|| !pused_head
)
323 /* search the relevant memory slot */
324 pslot
= pused_head
->next
;
327 /* search an available piece slot */
328 while (pslot
!= NULL
&& pslot
->paddr
!= addr
) {
336 plast
->next
= pslot
->next
;
340 /* insert free pieces back to the free list */
341 pslot
= pfree_head
->next
;
344 while (pslot
!= NULL
&& addr
> pslot
->paddr
) {
349 if (plast
!= pfree_head
&& plast
->paddr
+ plast
->size
== pavail
->paddr
) {
350 plast
->size
+= pavail
->size
;
351 kmem_cache_free(sram_piece_cache
, pavail
);
353 pavail
->next
= plast
->next
;
354 plast
->next
= pavail
;
358 if (pslot
&& plast
->paddr
+ plast
->size
== pslot
->paddr
) {
359 plast
->size
+= pslot
->size
;
360 plast
->next
= pslot
->next
;
361 kmem_cache_free(sram_piece_cache
, pslot
);
367 int sram_free(const void *addr
)
370 #if L1_CODE_LENGTH != 0
371 if (addr
>= (void *)get_l1_code_start()
372 && addr
< (void *)(get_l1_code_start() + L1_CODE_LENGTH
))
373 return l1_inst_sram_free(addr
);
376 #if L1_DATA_A_LENGTH != 0
377 if (addr
>= (void *)get_l1_data_a_start()
378 && addr
< (void *)(get_l1_data_a_start() + L1_DATA_A_LENGTH
))
379 return l1_data_A_sram_free(addr
);
382 #if L1_DATA_B_LENGTH != 0
383 if (addr
>= (void *)get_l1_data_b_start()
384 && addr
< (void *)(get_l1_data_b_start() + L1_DATA_B_LENGTH
))
385 return l1_data_B_sram_free(addr
);
389 if (addr
>= (void *)L2_START
390 && addr
< (void *)(L2_START
+ L2_LENGTH
))
391 return l2_sram_free(addr
);
396 EXPORT_SYMBOL(sram_free
);
398 void *l1_data_A_sram_alloc(size_t size
)
400 #if L1_DATA_A_LENGTH != 0
405 cpu
= smp_processor_id();
406 /* add mutex operation */
407 spin_lock_irqsave(&per_cpu(l1_data_sram_lock
, cpu
), flags
);
409 addr
= _sram_alloc(size
, &per_cpu(free_l1_data_A_sram_head
, cpu
),
410 &per_cpu(used_l1_data_A_sram_head
, cpu
));
412 /* add mutex operation */
413 spin_unlock_irqrestore(&per_cpu(l1_data_sram_lock
, cpu
), flags
);
415 pr_debug("Allocated address in l1_data_A_sram_alloc is 0x%lx+0x%lx\n",
416 (long unsigned int)addr
, size
);
423 EXPORT_SYMBOL(l1_data_A_sram_alloc
);
425 int l1_data_A_sram_free(const void *addr
)
427 #if L1_DATA_A_LENGTH != 0
432 cpu
= smp_processor_id();
433 /* add mutex operation */
434 spin_lock_irqsave(&per_cpu(l1_data_sram_lock
, cpu
), flags
);
436 ret
= _sram_free(addr
, &per_cpu(free_l1_data_A_sram_head
, cpu
),
437 &per_cpu(used_l1_data_A_sram_head
, cpu
));
439 /* add mutex operation */
440 spin_unlock_irqrestore(&per_cpu(l1_data_sram_lock
, cpu
), flags
);
447 EXPORT_SYMBOL(l1_data_A_sram_free
);
449 void *l1_data_B_sram_alloc(size_t size
)
451 #if L1_DATA_B_LENGTH != 0
456 cpu
= smp_processor_id();
457 /* add mutex operation */
458 spin_lock_irqsave(&per_cpu(l1_data_sram_lock
, cpu
), flags
);
460 addr
= _sram_alloc(size
, &per_cpu(free_l1_data_B_sram_head
, cpu
),
461 &per_cpu(used_l1_data_B_sram_head
, cpu
));
463 /* add mutex operation */
464 spin_unlock_irqrestore(&per_cpu(l1_data_sram_lock
, cpu
), flags
);
466 pr_debug("Allocated address in l1_data_B_sram_alloc is 0x%lx+0x%lx\n",
467 (long unsigned int)addr
, size
);
474 EXPORT_SYMBOL(l1_data_B_sram_alloc
);
476 int l1_data_B_sram_free(const void *addr
)
478 #if L1_DATA_B_LENGTH != 0
483 cpu
= smp_processor_id();
484 /* add mutex operation */
485 spin_lock_irqsave(&per_cpu(l1_data_sram_lock
, cpu
), flags
);
487 ret
= _sram_free(addr
, &per_cpu(free_l1_data_B_sram_head
, cpu
),
488 &per_cpu(used_l1_data_B_sram_head
, cpu
));
490 /* add mutex operation */
491 spin_unlock_irqrestore(&per_cpu(l1_data_sram_lock
, cpu
), flags
);
498 EXPORT_SYMBOL(l1_data_B_sram_free
);
500 void *l1_data_sram_alloc(size_t size
)
502 void *addr
= l1_data_A_sram_alloc(size
);
505 addr
= l1_data_B_sram_alloc(size
);
509 EXPORT_SYMBOL(l1_data_sram_alloc
);
511 void *l1_data_sram_zalloc(size_t size
)
513 void *addr
= l1_data_sram_alloc(size
);
516 memset(addr
, 0x00, size
);
520 EXPORT_SYMBOL(l1_data_sram_zalloc
);
522 int l1_data_sram_free(const void *addr
)
525 ret
= l1_data_A_sram_free(addr
);
527 ret
= l1_data_B_sram_free(addr
);
530 EXPORT_SYMBOL(l1_data_sram_free
);
532 void *l1_inst_sram_alloc(size_t size
)
534 #if L1_CODE_LENGTH != 0
539 cpu
= smp_processor_id();
540 /* add mutex operation */
541 spin_lock_irqsave(&per_cpu(l1_inst_sram_lock
, cpu
), flags
);
543 addr
= _sram_alloc(size
, &per_cpu(free_l1_inst_sram_head
, cpu
),
544 &per_cpu(used_l1_inst_sram_head
, cpu
));
546 /* add mutex operation */
547 spin_unlock_irqrestore(&per_cpu(l1_inst_sram_lock
, cpu
), flags
);
549 pr_debug("Allocated address in l1_inst_sram_alloc is 0x%lx+0x%lx\n",
550 (long unsigned int)addr
, size
);
557 EXPORT_SYMBOL(l1_inst_sram_alloc
);
559 int l1_inst_sram_free(const void *addr
)
561 #if L1_CODE_LENGTH != 0
566 cpu
= smp_processor_id();
567 /* add mutex operation */
568 spin_lock_irqsave(&per_cpu(l1_inst_sram_lock
, cpu
), flags
);
570 ret
= _sram_free(addr
, &per_cpu(free_l1_inst_sram_head
, cpu
),
571 &per_cpu(used_l1_inst_sram_head
, cpu
));
573 /* add mutex operation */
574 spin_unlock_irqrestore(&per_cpu(l1_inst_sram_lock
, cpu
), flags
);
581 EXPORT_SYMBOL(l1_inst_sram_free
);
583 /* L1 Scratchpad memory allocate function */
584 void *l1sram_alloc(size_t size
)
590 cpu
= smp_processor_id();
591 /* add mutex operation */
592 spin_lock_irqsave(&per_cpu(l1sram_lock
, cpu
), flags
);
594 addr
= _sram_alloc(size
, &per_cpu(free_l1_ssram_head
, cpu
),
595 &per_cpu(used_l1_ssram_head
, cpu
));
597 /* add mutex operation */
598 spin_unlock_irqrestore(&per_cpu(l1sram_lock
, cpu
), flags
);
603 /* L1 Scratchpad memory allocate function */
604 void *l1sram_alloc_max(size_t *psize
)
610 cpu
= smp_processor_id();
611 /* add mutex operation */
612 spin_lock_irqsave(&per_cpu(l1sram_lock
, cpu
), flags
);
614 addr
= _sram_alloc_max(&per_cpu(free_l1_ssram_head
, cpu
),
615 &per_cpu(used_l1_ssram_head
, cpu
), psize
);
617 /* add mutex operation */
618 spin_unlock_irqrestore(&per_cpu(l1sram_lock
, cpu
), flags
);
623 /* L1 Scratchpad memory free function */
624 int l1sram_free(const void *addr
)
630 cpu
= smp_processor_id();
631 /* add mutex operation */
632 spin_lock_irqsave(&per_cpu(l1sram_lock
, cpu
), flags
);
634 ret
= _sram_free(addr
, &per_cpu(free_l1_ssram_head
, cpu
),
635 &per_cpu(used_l1_ssram_head
, cpu
));
637 /* add mutex operation */
638 spin_unlock_irqrestore(&per_cpu(l1sram_lock
, cpu
), flags
);
643 void *l2_sram_alloc(size_t size
)
649 /* add mutex operation */
650 spin_lock_irqsave(&l2_sram_lock
, flags
);
652 addr
= _sram_alloc(size
, &free_l2_sram_head
,
655 /* add mutex operation */
656 spin_unlock_irqrestore(&l2_sram_lock
, flags
);
658 pr_debug("Allocated address in l2_sram_alloc is 0x%lx+0x%lx\n",
659 (long unsigned int)addr
, size
);
666 EXPORT_SYMBOL(l2_sram_alloc
);
668 void *l2_sram_zalloc(size_t size
)
670 void *addr
= l2_sram_alloc(size
);
673 memset(addr
, 0x00, size
);
677 EXPORT_SYMBOL(l2_sram_zalloc
);
679 int l2_sram_free(const void *addr
)
685 /* add mutex operation */
686 spin_lock_irqsave(&l2_sram_lock
, flags
);
688 ret
= _sram_free(addr
, &free_l2_sram_head
,
691 /* add mutex operation */
692 spin_unlock_irqrestore(&l2_sram_lock
, flags
);
699 EXPORT_SYMBOL(l2_sram_free
);
701 int sram_free_with_lsl(const void *addr
)
703 struct sram_list_struct
*lsl
, **tmp
;
704 struct mm_struct
*mm
= current
->mm
;
706 for (tmp
= &mm
->context
.sram_list
; *tmp
; tmp
= &(*tmp
)->next
)
707 if ((*tmp
)->addr
== addr
)
718 EXPORT_SYMBOL(sram_free_with_lsl
);
720 /* Allocate memory and keep in L1 SRAM List (lsl) so that the resources are
721 * tracked. These are designed for userspace so that when a process exits,
722 * we can safely reap their resources.
724 void *sram_alloc_with_lsl(size_t size
, unsigned long flags
)
727 struct sram_list_struct
*lsl
= NULL
;
728 struct mm_struct
*mm
= current
->mm
;
730 lsl
= kzalloc(sizeof(struct sram_list_struct
), GFP_KERNEL
);
734 if (flags
& L1_INST_SRAM
)
735 addr
= l1_inst_sram_alloc(size
);
737 if (addr
== NULL
&& (flags
& L1_DATA_A_SRAM
))
738 addr
= l1_data_A_sram_alloc(size
);
740 if (addr
== NULL
&& (flags
& L1_DATA_B_SRAM
))
741 addr
= l1_data_B_sram_alloc(size
);
743 if (addr
== NULL
&& (flags
& L2_SRAM
))
744 addr
= l2_sram_alloc(size
);
752 lsl
->next
= mm
->context
.sram_list
;
753 mm
->context
.sram_list
= lsl
;
756 EXPORT_SYMBOL(sram_alloc_with_lsl
);
758 #ifdef CONFIG_PROC_FS
759 /* Once we get a real allocator, we'll throw all of this away.
760 * Until then, we need some sort of visibility into the L1 alloc.
762 /* Need to keep line of output the same. Currently, that is 44 bytes
763 * (including newline).
765 static int _sram_proc_read(char *buf
, int *len
, int count
, const char *desc
,
766 struct sram_piece
*pfree_head
,
767 struct sram_piece
*pused_head
)
769 struct sram_piece
*pslot
;
771 if (!pfree_head
|| !pused_head
)
774 *len
+= sprintf(&buf
[*len
], "--- SRAM %-14s Size PID State \n", desc
);
776 /* search the relevant memory slot */
777 pslot
= pused_head
->next
;
779 while (pslot
!= NULL
) {
780 *len
+= sprintf(&buf
[*len
], "%p-%p %10i %5i %-10s\n",
781 pslot
->paddr
, pslot
->paddr
+ pslot
->size
,
782 pslot
->size
, pslot
->pid
, "ALLOCATED");
787 pslot
= pfree_head
->next
;
789 while (pslot
!= NULL
) {
790 *len
+= sprintf(&buf
[*len
], "%p-%p %10i %5i %-10s\n",
791 pslot
->paddr
, pslot
->paddr
+ pslot
->size
,
792 pslot
->size
, pslot
->pid
, "FREE");
799 static int sram_proc_read(char *buf
, char **start
, off_t offset
, int count
,
800 int *eof
, void *data
)
805 for (cpu
= 0; cpu
< num_possible_cpus(); ++cpu
) {
806 if (_sram_proc_read(buf
, &len
, count
, "Scratchpad",
807 &per_cpu(free_l1_ssram_head
, cpu
), &per_cpu(used_l1_ssram_head
, cpu
)))
809 #if L1_DATA_A_LENGTH != 0
810 if (_sram_proc_read(buf
, &len
, count
, "L1 Data A",
811 &per_cpu(free_l1_data_A_sram_head
, cpu
),
812 &per_cpu(used_l1_data_A_sram_head
, cpu
)))
815 #if L1_DATA_B_LENGTH != 0
816 if (_sram_proc_read(buf
, &len
, count
, "L1 Data B",
817 &per_cpu(free_l1_data_B_sram_head
, cpu
),
818 &per_cpu(used_l1_data_B_sram_head
, cpu
)))
821 #if L1_CODE_LENGTH != 0
822 if (_sram_proc_read(buf
, &len
, count
, "L1 Instruction",
823 &per_cpu(free_l1_inst_sram_head
, cpu
),
824 &per_cpu(used_l1_inst_sram_head
, cpu
)))
829 if (_sram_proc_read(buf
, &len
, count
, "L2", &free_l2_sram_head
,
838 static int __init
sram_proc_init(void)
840 struct proc_dir_entry
*ptr
;
841 ptr
= create_proc_entry("sram", S_IFREG
| S_IRUGO
, NULL
);
843 printk(KERN_WARNING
"unable to create /proc/sram\n");
846 ptr
->read_proc
= sram_proc_read
;
849 late_initcall(sram_proc_init
);