2 * File: arch/blackfin/mm/sram-alloc.c
7 * Description: SRAM allocator for Blackfin L1 and L2 memory
10 * Copyright 2004-2008 Analog Devices Inc.
12 * Bugs: Enter bugs at http://blackfin.uclinux.org/
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2 of the License, or
17 * (at your option) any later version.
19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details.
24 * You should have received a copy of the GNU General Public License
25 * along with this program; if not, see the file COPYING, or write
26 * to the Free Software Foundation, Inc.,
27 * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
30 #include <linux/module.h>
31 #include <linux/kernel.h>
32 #include <linux/types.h>
33 #include <linux/miscdevice.h>
34 #include <linux/ioport.h>
35 #include <linux/fcntl.h>
36 #include <linux/init.h>
37 #include <linux/poll.h>
38 #include <linux/proc_fs.h>
39 #include <linux/spinlock.h>
40 #include <linux/rtc.h>
41 #include <asm/blackfin.h>
42 #include <asm/mem_map.h>
43 #include "blackfin_sram.h"
45 /* the data structure for L1 scratchpad and DATA SRAM */
50 struct sram_piece
*next
;
53 static DEFINE_PER_CPU_SHARED_ALIGNED(spinlock_t
, l1sram_lock
);
54 static DEFINE_PER_CPU(struct sram_piece
, free_l1_ssram_head
);
55 static DEFINE_PER_CPU(struct sram_piece
, used_l1_ssram_head
);
57 #if L1_DATA_A_LENGTH != 0
58 static DEFINE_PER_CPU(struct sram_piece
, free_l1_data_A_sram_head
);
59 static DEFINE_PER_CPU(struct sram_piece
, used_l1_data_A_sram_head
);
62 #if L1_DATA_B_LENGTH != 0
63 static DEFINE_PER_CPU(struct sram_piece
, free_l1_data_B_sram_head
);
64 static DEFINE_PER_CPU(struct sram_piece
, used_l1_data_B_sram_head
);
67 #if L1_DATA_A_LENGTH || L1_DATA_B_LENGTH
68 static DEFINE_PER_CPU_SHARED_ALIGNED(spinlock_t
, l1_data_sram_lock
);
71 #if L1_CODE_LENGTH != 0
72 static DEFINE_PER_CPU_SHARED_ALIGNED(spinlock_t
, l1_inst_sram_lock
);
73 static DEFINE_PER_CPU(struct sram_piece
, free_l1_inst_sram_head
);
74 static DEFINE_PER_CPU(struct sram_piece
, used_l1_inst_sram_head
);
78 static spinlock_t l2_sram_lock ____cacheline_aligned_in_smp
;
79 static struct sram_piece free_l2_sram_head
, used_l2_sram_head
;
82 static struct kmem_cache
*sram_piece_cache
;
84 /* L1 Scratchpad SRAM initialization function */
85 static void __init
l1sram_init(void)
88 unsigned long reserve
;
93 reserve
= sizeof(struct l1_scratch_task_info
);
96 for (cpu
= 0; cpu
< num_possible_cpus(); ++cpu
) {
97 per_cpu(free_l1_ssram_head
, cpu
).next
=
98 kmem_cache_alloc(sram_piece_cache
, GFP_KERNEL
);
99 if (!per_cpu(free_l1_ssram_head
, cpu
).next
) {
100 printk(KERN_INFO
"Fail to initialize Scratchpad data SRAM.\n");
104 per_cpu(free_l1_ssram_head
, cpu
).next
->paddr
= (void *)get_l1_scratch_start_cpu(cpu
) + reserve
;
105 per_cpu(free_l1_ssram_head
, cpu
).next
->size
= L1_SCRATCH_LENGTH
- reserve
;
106 per_cpu(free_l1_ssram_head
, cpu
).next
->pid
= 0;
107 per_cpu(free_l1_ssram_head
, cpu
).next
->next
= NULL
;
109 per_cpu(used_l1_ssram_head
, cpu
).next
= NULL
;
111 /* mutex initialize */
112 spin_lock_init(&per_cpu(l1sram_lock
, cpu
));
113 printk(KERN_INFO
"Blackfin Scratchpad data SRAM: %d KB\n",
114 L1_SCRATCH_LENGTH
>> 10);
118 static void __init
l1_data_sram_init(void)
120 #if L1_DATA_A_LENGTH != 0 || L1_DATA_B_LENGTH != 0
123 #if L1_DATA_A_LENGTH != 0
124 for (cpu
= 0; cpu
< num_possible_cpus(); ++cpu
) {
125 per_cpu(free_l1_data_A_sram_head
, cpu
).next
=
126 kmem_cache_alloc(sram_piece_cache
, GFP_KERNEL
);
127 if (!per_cpu(free_l1_data_A_sram_head
, cpu
).next
) {
128 printk(KERN_INFO
"Fail to initialize L1 Data A SRAM.\n");
132 per_cpu(free_l1_data_A_sram_head
, cpu
).next
->paddr
=
133 (void *)get_l1_data_a_start_cpu(cpu
) + (_ebss_l1
- _sdata_l1
);
134 per_cpu(free_l1_data_A_sram_head
, cpu
).next
->size
=
135 L1_DATA_A_LENGTH
- (_ebss_l1
- _sdata_l1
);
136 per_cpu(free_l1_data_A_sram_head
, cpu
).next
->pid
= 0;
137 per_cpu(free_l1_data_A_sram_head
, cpu
).next
->next
= NULL
;
139 per_cpu(used_l1_data_A_sram_head
, cpu
).next
= NULL
;
141 printk(KERN_INFO
"Blackfin L1 Data A SRAM: %d KB (%d KB free)\n",
142 L1_DATA_A_LENGTH
>> 10,
143 per_cpu(free_l1_data_A_sram_head
, cpu
).next
->size
>> 10);
146 #if L1_DATA_B_LENGTH != 0
147 for (cpu
= 0; cpu
< num_possible_cpus(); ++cpu
) {
148 per_cpu(free_l1_data_B_sram_head
, cpu
).next
=
149 kmem_cache_alloc(sram_piece_cache
, GFP_KERNEL
);
150 if (!per_cpu(free_l1_data_B_sram_head
, cpu
).next
) {
151 printk(KERN_INFO
"Fail to initialize L1 Data B SRAM.\n");
155 per_cpu(free_l1_data_B_sram_head
, cpu
).next
->paddr
=
156 (void *)get_l1_data_b_start_cpu(cpu
) + (_ebss_b_l1
- _sdata_b_l1
);
157 per_cpu(free_l1_data_B_sram_head
, cpu
).next
->size
=
158 L1_DATA_B_LENGTH
- (_ebss_b_l1
- _sdata_b_l1
);
159 per_cpu(free_l1_data_B_sram_head
, cpu
).next
->pid
= 0;
160 per_cpu(free_l1_data_B_sram_head
, cpu
).next
->next
= NULL
;
162 per_cpu(used_l1_data_B_sram_head
, cpu
).next
= NULL
;
164 printk(KERN_INFO
"Blackfin L1 Data B SRAM: %d KB (%d KB free)\n",
165 L1_DATA_B_LENGTH
>> 10,
166 per_cpu(free_l1_data_B_sram_head
, cpu
).next
->size
>> 10);
167 /* mutex initialize */
171 #if L1_DATA_A_LENGTH != 0 || L1_DATA_B_LENGTH != 0
172 for (cpu
= 0; cpu
< num_possible_cpus(); ++cpu
)
173 spin_lock_init(&per_cpu(l1_data_sram_lock
, cpu
));
177 static void __init
l1_inst_sram_init(void)
179 #if L1_CODE_LENGTH != 0
181 for (cpu
= 0; cpu
< num_possible_cpus(); ++cpu
) {
182 per_cpu(free_l1_inst_sram_head
, cpu
).next
=
183 kmem_cache_alloc(sram_piece_cache
, GFP_KERNEL
);
184 if (!per_cpu(free_l1_inst_sram_head
, cpu
).next
) {
185 printk(KERN_INFO
"Failed to initialize L1 Instruction SRAM\n");
189 per_cpu(free_l1_inst_sram_head
, cpu
).next
->paddr
=
190 (void *)get_l1_code_start_cpu(cpu
) + (_etext_l1
- _stext_l1
);
191 per_cpu(free_l1_inst_sram_head
, cpu
).next
->size
=
192 L1_CODE_LENGTH
- (_etext_l1
- _stext_l1
);
193 per_cpu(free_l1_inst_sram_head
, cpu
).next
->pid
= 0;
194 per_cpu(free_l1_inst_sram_head
, cpu
).next
->next
= NULL
;
196 per_cpu(used_l1_inst_sram_head
, cpu
).next
= NULL
;
198 printk(KERN_INFO
"Blackfin L1 Instruction SRAM: %d KB (%d KB free)\n",
199 L1_CODE_LENGTH
>> 10,
200 per_cpu(free_l1_inst_sram_head
, cpu
).next
->size
>> 10);
202 /* mutex initialize */
203 spin_lock_init(&per_cpu(l1_inst_sram_lock
, cpu
));
208 static void __init
l2_sram_init(void)
211 free_l2_sram_head
.next
=
212 kmem_cache_alloc(sram_piece_cache
, GFP_KERNEL
);
213 if (!free_l2_sram_head
.next
) {
214 printk(KERN_INFO
"Fail to initialize L2 SRAM.\n");
218 free_l2_sram_head
.next
->paddr
=
219 (void *)L2_START
+ (_ebss_l2
- _stext_l2
);
220 free_l2_sram_head
.next
->size
=
221 L2_LENGTH
- (_ebss_l2
- _stext_l2
);
222 free_l2_sram_head
.next
->pid
= 0;
223 free_l2_sram_head
.next
->next
= NULL
;
225 used_l2_sram_head
.next
= NULL
;
227 printk(KERN_INFO
"Blackfin L2 SRAM: %d KB (%d KB free)\n",
229 free_l2_sram_head
.next
->size
>> 10);
231 /* mutex initialize */
232 spin_lock_init(&l2_sram_lock
);
236 static int __init
bfin_sram_init(void)
238 sram_piece_cache
= kmem_cache_create("sram_piece_cache",
239 sizeof(struct sram_piece
),
240 0, SLAB_PANIC
, NULL
);
249 pure_initcall(bfin_sram_init
);
251 /* SRAM allocate function */
252 static void *_sram_alloc(size_t size
, struct sram_piece
*pfree_head
,
253 struct sram_piece
*pused_head
)
255 struct sram_piece
*pslot
, *plast
, *pavail
;
257 if (size
<= 0 || !pfree_head
|| !pused_head
)
261 size
= (size
+ 3) & ~3;
263 pslot
= pfree_head
->next
;
266 /* search an available piece slot */
267 while (pslot
!= NULL
&& size
> pslot
->size
) {
275 if (pslot
->size
== size
) {
276 plast
->next
= pslot
->next
;
279 pavail
= kmem_cache_alloc(sram_piece_cache
, GFP_KERNEL
);
284 pavail
->paddr
= pslot
->paddr
;
286 pslot
->paddr
+= size
;
290 pavail
->pid
= current
->pid
;
292 pslot
= pused_head
->next
;
295 /* insert new piece into used piece list !!! */
296 while (pslot
!= NULL
&& pavail
->paddr
< pslot
->paddr
) {
301 pavail
->next
= pslot
;
302 plast
->next
= pavail
;
304 return pavail
->paddr
;
307 /* Allocate the largest available block. */
308 static void *_sram_alloc_max(struct sram_piece
*pfree_head
,
309 struct sram_piece
*pused_head
,
310 unsigned long *psize
)
312 struct sram_piece
*pslot
, *pmax
;
314 if (!pfree_head
|| !pused_head
)
317 pmax
= pslot
= pfree_head
->next
;
319 /* search an available piece slot */
320 while (pslot
!= NULL
) {
321 if (pslot
->size
> pmax
->size
)
331 return _sram_alloc(*psize
, pfree_head
, pused_head
);
334 /* SRAM free function */
335 static int _sram_free(const void *addr
,
336 struct sram_piece
*pfree_head
,
337 struct sram_piece
*pused_head
)
339 struct sram_piece
*pslot
, *plast
, *pavail
;
341 if (!pfree_head
|| !pused_head
)
344 /* search the relevant memory slot */
345 pslot
= pused_head
->next
;
348 /* search an available piece slot */
349 while (pslot
!= NULL
&& pslot
->paddr
!= addr
) {
357 plast
->next
= pslot
->next
;
361 /* insert free pieces back to the free list */
362 pslot
= pfree_head
->next
;
365 while (pslot
!= NULL
&& addr
> pslot
->paddr
) {
370 if (plast
!= pfree_head
&& plast
->paddr
+ plast
->size
== pavail
->paddr
) {
371 plast
->size
+= pavail
->size
;
372 kmem_cache_free(sram_piece_cache
, pavail
);
374 pavail
->next
= plast
->next
;
375 plast
->next
= pavail
;
379 if (pslot
&& plast
->paddr
+ plast
->size
== pslot
->paddr
) {
380 plast
->size
+= pslot
->size
;
381 plast
->next
= pslot
->next
;
382 kmem_cache_free(sram_piece_cache
, pslot
);
388 int sram_free(const void *addr
)
391 #if L1_CODE_LENGTH != 0
392 if (addr
>= (void *)get_l1_code_start()
393 && addr
< (void *)(get_l1_code_start() + L1_CODE_LENGTH
))
394 return l1_inst_sram_free(addr
);
397 #if L1_DATA_A_LENGTH != 0
398 if (addr
>= (void *)get_l1_data_a_start()
399 && addr
< (void *)(get_l1_data_a_start() + L1_DATA_A_LENGTH
))
400 return l1_data_A_sram_free(addr
);
403 #if L1_DATA_B_LENGTH != 0
404 if (addr
>= (void *)get_l1_data_b_start()
405 && addr
< (void *)(get_l1_data_b_start() + L1_DATA_B_LENGTH
))
406 return l1_data_B_sram_free(addr
);
410 if (addr
>= (void *)L2_START
411 && addr
< (void *)(L2_START
+ L2_LENGTH
))
412 return l2_sram_free(addr
);
417 EXPORT_SYMBOL(sram_free
);
419 void *l1_data_A_sram_alloc(size_t size
)
421 #if L1_DATA_A_LENGTH != 0
427 /* add mutex operation */
428 spin_lock_irqsave(&per_cpu(l1_data_sram_lock
, cpu
), flags
);
430 addr
= _sram_alloc(size
, &per_cpu(free_l1_data_A_sram_head
, cpu
),
431 &per_cpu(used_l1_data_A_sram_head
, cpu
));
433 /* add mutex operation */
434 spin_unlock_irqrestore(&per_cpu(l1_data_sram_lock
, cpu
), flags
);
437 pr_debug("Allocated address in l1_data_A_sram_alloc is 0x%lx+0x%lx\n",
438 (long unsigned int)addr
, size
);
445 EXPORT_SYMBOL(l1_data_A_sram_alloc
);
447 int l1_data_A_sram_free(const void *addr
)
449 #if L1_DATA_A_LENGTH != 0
455 /* add mutex operation */
456 spin_lock_irqsave(&per_cpu(l1_data_sram_lock
, cpu
), flags
);
458 ret
= _sram_free(addr
, &per_cpu(free_l1_data_A_sram_head
, cpu
),
459 &per_cpu(used_l1_data_A_sram_head
, cpu
));
461 /* add mutex operation */
462 spin_unlock_irqrestore(&per_cpu(l1_data_sram_lock
, cpu
), flags
);
470 EXPORT_SYMBOL(l1_data_A_sram_free
);
472 void *l1_data_B_sram_alloc(size_t size
)
474 #if L1_DATA_B_LENGTH != 0
480 /* add mutex operation */
481 spin_lock_irqsave(&per_cpu(l1_data_sram_lock
, cpu
), flags
);
483 addr
= _sram_alloc(size
, &per_cpu(free_l1_data_B_sram_head
, cpu
),
484 &per_cpu(used_l1_data_B_sram_head
, cpu
));
486 /* add mutex operation */
487 spin_unlock_irqrestore(&per_cpu(l1_data_sram_lock
, cpu
), flags
);
490 pr_debug("Allocated address in l1_data_B_sram_alloc is 0x%lx+0x%lx\n",
491 (long unsigned int)addr
, size
);
498 EXPORT_SYMBOL(l1_data_B_sram_alloc
);
500 int l1_data_B_sram_free(const void *addr
)
502 #if L1_DATA_B_LENGTH != 0
508 /* add mutex operation */
509 spin_lock_irqsave(&per_cpu(l1_data_sram_lock
, cpu
), flags
);
511 ret
= _sram_free(addr
, &per_cpu(free_l1_data_B_sram_head
, cpu
),
512 &per_cpu(used_l1_data_B_sram_head
, cpu
));
514 /* add mutex operation */
515 spin_unlock_irqrestore(&per_cpu(l1_data_sram_lock
, cpu
), flags
);
523 EXPORT_SYMBOL(l1_data_B_sram_free
);
525 void *l1_data_sram_alloc(size_t size
)
527 void *addr
= l1_data_A_sram_alloc(size
);
530 addr
= l1_data_B_sram_alloc(size
);
534 EXPORT_SYMBOL(l1_data_sram_alloc
);
536 void *l1_data_sram_zalloc(size_t size
)
538 void *addr
= l1_data_sram_alloc(size
);
541 memset(addr
, 0x00, size
);
545 EXPORT_SYMBOL(l1_data_sram_zalloc
);
547 int l1_data_sram_free(const void *addr
)
550 ret
= l1_data_A_sram_free(addr
);
552 ret
= l1_data_B_sram_free(addr
);
555 EXPORT_SYMBOL(l1_data_sram_free
);
557 void *l1_inst_sram_alloc(size_t size
)
559 #if L1_CODE_LENGTH != 0
565 /* add mutex operation */
566 spin_lock_irqsave(&per_cpu(l1_inst_sram_lock
, cpu
), flags
);
568 addr
= _sram_alloc(size
, &per_cpu(free_l1_inst_sram_head
, cpu
),
569 &per_cpu(used_l1_inst_sram_head
, cpu
));
571 /* add mutex operation */
572 spin_unlock_irqrestore(&per_cpu(l1_inst_sram_lock
, cpu
), flags
);
575 pr_debug("Allocated address in l1_inst_sram_alloc is 0x%lx+0x%lx\n",
576 (long unsigned int)addr
, size
);
583 EXPORT_SYMBOL(l1_inst_sram_alloc
);
585 int l1_inst_sram_free(const void *addr
)
587 #if L1_CODE_LENGTH != 0
593 /* add mutex operation */
594 spin_lock_irqsave(&per_cpu(l1_inst_sram_lock
, cpu
), flags
);
596 ret
= _sram_free(addr
, &per_cpu(free_l1_inst_sram_head
, cpu
),
597 &per_cpu(used_l1_inst_sram_head
, cpu
));
599 /* add mutex operation */
600 spin_unlock_irqrestore(&per_cpu(l1_inst_sram_lock
, cpu
), flags
);
608 EXPORT_SYMBOL(l1_inst_sram_free
);
610 /* L1 Scratchpad memory allocate function */
611 void *l1sram_alloc(size_t size
)
618 /* add mutex operation */
619 spin_lock_irqsave(&per_cpu(l1sram_lock
, cpu
), flags
);
621 addr
= _sram_alloc(size
, &per_cpu(free_l1_ssram_head
, cpu
),
622 &per_cpu(used_l1_ssram_head
, cpu
));
624 /* add mutex operation */
625 spin_unlock_irqrestore(&per_cpu(l1sram_lock
, cpu
), flags
);
631 /* L1 Scratchpad memory allocate function */
632 void *l1sram_alloc_max(size_t *psize
)
639 /* add mutex operation */
640 spin_lock_irqsave(&per_cpu(l1sram_lock
, cpu
), flags
);
642 addr
= _sram_alloc_max(&per_cpu(free_l1_ssram_head
, cpu
),
643 &per_cpu(used_l1_ssram_head
, cpu
), psize
);
645 /* add mutex operation */
646 spin_unlock_irqrestore(&per_cpu(l1sram_lock
, cpu
), flags
);
652 /* L1 Scratchpad memory free function */
653 int l1sram_free(const void *addr
)
660 /* add mutex operation */
661 spin_lock_irqsave(&per_cpu(l1sram_lock
, cpu
), flags
);
663 ret
= _sram_free(addr
, &per_cpu(free_l1_ssram_head
, cpu
),
664 &per_cpu(used_l1_ssram_head
, cpu
));
666 /* add mutex operation */
667 spin_unlock_irqrestore(&per_cpu(l1sram_lock
, cpu
), flags
);
673 void *l2_sram_alloc(size_t size
)
679 /* add mutex operation */
680 spin_lock_irqsave(&l2_sram_lock
, flags
);
682 addr
= _sram_alloc(size
, &free_l2_sram_head
,
685 /* add mutex operation */
686 spin_unlock_irqrestore(&l2_sram_lock
, flags
);
688 pr_debug("Allocated address in l2_sram_alloc is 0x%lx+0x%lx\n",
689 (long unsigned int)addr
, size
);
696 EXPORT_SYMBOL(l2_sram_alloc
);
698 void *l2_sram_zalloc(size_t size
)
700 void *addr
= l2_sram_alloc(size
);
703 memset(addr
, 0x00, size
);
707 EXPORT_SYMBOL(l2_sram_zalloc
);
709 int l2_sram_free(const void *addr
)
715 /* add mutex operation */
716 spin_lock_irqsave(&l2_sram_lock
, flags
);
718 ret
= _sram_free(addr
, &free_l2_sram_head
,
721 /* add mutex operation */
722 spin_unlock_irqrestore(&l2_sram_lock
, flags
);
729 EXPORT_SYMBOL(l2_sram_free
);
731 int sram_free_with_lsl(const void *addr
)
733 struct sram_list_struct
*lsl
, **tmp
;
734 struct mm_struct
*mm
= current
->mm
;
736 for (tmp
= &mm
->context
.sram_list
; *tmp
; tmp
= &(*tmp
)->next
)
737 if ((*tmp
)->addr
== addr
)
748 EXPORT_SYMBOL(sram_free_with_lsl
);
750 /* Allocate memory and keep in L1 SRAM List (lsl) so that the resources are
751 * tracked. These are designed for userspace so that when a process exits,
752 * we can safely reap their resources.
754 void *sram_alloc_with_lsl(size_t size
, unsigned long flags
)
757 struct sram_list_struct
*lsl
= NULL
;
758 struct mm_struct
*mm
= current
->mm
;
760 lsl
= kzalloc(sizeof(struct sram_list_struct
), GFP_KERNEL
);
764 if (flags
& L1_INST_SRAM
)
765 addr
= l1_inst_sram_alloc(size
);
767 if (addr
== NULL
&& (flags
& L1_DATA_A_SRAM
))
768 addr
= l1_data_A_sram_alloc(size
);
770 if (addr
== NULL
&& (flags
& L1_DATA_B_SRAM
))
771 addr
= l1_data_B_sram_alloc(size
);
773 if (addr
== NULL
&& (flags
& L2_SRAM
))
774 addr
= l2_sram_alloc(size
);
782 lsl
->next
= mm
->context
.sram_list
;
783 mm
->context
.sram_list
= lsl
;
786 EXPORT_SYMBOL(sram_alloc_with_lsl
);
788 #ifdef CONFIG_PROC_FS
789 /* Once we get a real allocator, we'll throw all of this away.
790 * Until then, we need some sort of visibility into the L1 alloc.
792 /* Need to keep line of output the same. Currently, that is 44 bytes
793 * (including newline).
795 static int _sram_proc_read(char *buf
, int *len
, int count
, const char *desc
,
796 struct sram_piece
*pfree_head
,
797 struct sram_piece
*pused_head
)
799 struct sram_piece
*pslot
;
801 if (!pfree_head
|| !pused_head
)
804 *len
+= sprintf(&buf
[*len
], "--- SRAM %-14s Size PID State \n", desc
);
806 /* search the relevant memory slot */
807 pslot
= pused_head
->next
;
809 while (pslot
!= NULL
) {
810 *len
+= sprintf(&buf
[*len
], "%p-%p %10i %5i %-10s\n",
811 pslot
->paddr
, pslot
->paddr
+ pslot
->size
,
812 pslot
->size
, pslot
->pid
, "ALLOCATED");
817 pslot
= pfree_head
->next
;
819 while (pslot
!= NULL
) {
820 *len
+= sprintf(&buf
[*len
], "%p-%p %10i %5i %-10s\n",
821 pslot
->paddr
, pslot
->paddr
+ pslot
->size
,
822 pslot
->size
, pslot
->pid
, "FREE");
829 static int sram_proc_read(char *buf
, char **start
, off_t offset
, int count
,
830 int *eof
, void *data
)
835 for (cpu
= 0; cpu
< num_possible_cpus(); ++cpu
) {
836 if (_sram_proc_read(buf
, &len
, count
, "Scratchpad",
837 &per_cpu(free_l1_ssram_head
, cpu
), &per_cpu(used_l1_ssram_head
, cpu
)))
839 #if L1_DATA_A_LENGTH != 0
840 if (_sram_proc_read(buf
, &len
, count
, "L1 Data A",
841 &per_cpu(free_l1_data_A_sram_head
, cpu
),
842 &per_cpu(used_l1_data_A_sram_head
, cpu
)))
845 #if L1_DATA_B_LENGTH != 0
846 if (_sram_proc_read(buf
, &len
, count
, "L1 Data B",
847 &per_cpu(free_l1_data_B_sram_head
, cpu
),
848 &per_cpu(used_l1_data_B_sram_head
, cpu
)))
851 #if L1_CODE_LENGTH != 0
852 if (_sram_proc_read(buf
, &len
, count
, "L1 Instruction",
853 &per_cpu(free_l1_inst_sram_head
, cpu
),
854 &per_cpu(used_l1_inst_sram_head
, cpu
)))
859 if (_sram_proc_read(buf
, &len
, count
, "L2", &free_l2_sram_head
,
868 static int __init
sram_proc_init(void)
870 struct proc_dir_entry
*ptr
;
871 ptr
= create_proc_entry("sram", S_IFREG
| S_IRUGO
, NULL
);
873 printk(KERN_WARNING
"unable to create /proc/sram\n");
876 ptr
->read_proc
= sram_proc_read
;
879 late_initcall(sram_proc_init
);