sched: Always provide p->on_cpu
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / arch / blackfin / mm / sram-alloc.c
blobdfd304a4a3ea113a25f23befd8ba8f0078b4bc79
1 /*
2 * SRAM allocator for Blackfin on-chip memory
4 * Copyright 2004-2009 Analog Devices Inc.
6 * Licensed under the GPL-2 or later.
7 */
9 #include <linux/module.h>
10 #include <linux/kernel.h>
11 #include <linux/types.h>
12 #include <linux/miscdevice.h>
13 #include <linux/ioport.h>
14 #include <linux/fcntl.h>
15 #include <linux/init.h>
16 #include <linux/poll.h>
17 #include <linux/proc_fs.h>
18 #include <linux/spinlock.h>
19 #include <linux/rtc.h>
20 #include <linux/slab.h>
21 #include <asm/blackfin.h>
22 #include <asm/mem_map.h>
23 #include "blackfin_sram.h"
25 /* the data structure for L1 scratchpad and DATA SRAM */
26 struct sram_piece {
27 void *paddr;
28 int size;
29 pid_t pid;
30 struct sram_piece *next;
33 static DEFINE_PER_CPU_SHARED_ALIGNED(spinlock_t, l1sram_lock);
34 static DEFINE_PER_CPU(struct sram_piece, free_l1_ssram_head);
35 static DEFINE_PER_CPU(struct sram_piece, used_l1_ssram_head);
37 #if L1_DATA_A_LENGTH != 0
38 static DEFINE_PER_CPU(struct sram_piece, free_l1_data_A_sram_head);
39 static DEFINE_PER_CPU(struct sram_piece, used_l1_data_A_sram_head);
40 #endif
42 #if L1_DATA_B_LENGTH != 0
43 static DEFINE_PER_CPU(struct sram_piece, free_l1_data_B_sram_head);
44 static DEFINE_PER_CPU(struct sram_piece, used_l1_data_B_sram_head);
45 #endif
47 #if L1_DATA_A_LENGTH || L1_DATA_B_LENGTH
48 static DEFINE_PER_CPU_SHARED_ALIGNED(spinlock_t, l1_data_sram_lock);
49 #endif
51 #if L1_CODE_LENGTH != 0
52 static DEFINE_PER_CPU_SHARED_ALIGNED(spinlock_t, l1_inst_sram_lock);
53 static DEFINE_PER_CPU(struct sram_piece, free_l1_inst_sram_head);
54 static DEFINE_PER_CPU(struct sram_piece, used_l1_inst_sram_head);
55 #endif
57 #if L2_LENGTH != 0
58 static spinlock_t l2_sram_lock ____cacheline_aligned_in_smp;
59 static struct sram_piece free_l2_sram_head, used_l2_sram_head;
60 #endif
62 static struct kmem_cache *sram_piece_cache;
64 /* L1 Scratchpad SRAM initialization function */
65 static void __init l1sram_init(void)
67 unsigned int cpu;
68 unsigned long reserve;
70 #ifdef CONFIG_SMP
71 reserve = 0;
72 #else
73 reserve = sizeof(struct l1_scratch_task_info);
74 #endif
76 for (cpu = 0; cpu < num_possible_cpus(); ++cpu) {
77 per_cpu(free_l1_ssram_head, cpu).next =
78 kmem_cache_alloc(sram_piece_cache, GFP_KERNEL);
79 if (!per_cpu(free_l1_ssram_head, cpu).next) {
80 printk(KERN_INFO "Fail to initialize Scratchpad data SRAM.\n");
81 return;
84 per_cpu(free_l1_ssram_head, cpu).next->paddr = (void *)get_l1_scratch_start_cpu(cpu) + reserve;
85 per_cpu(free_l1_ssram_head, cpu).next->size = L1_SCRATCH_LENGTH - reserve;
86 per_cpu(free_l1_ssram_head, cpu).next->pid = 0;
87 per_cpu(free_l1_ssram_head, cpu).next->next = NULL;
89 per_cpu(used_l1_ssram_head, cpu).next = NULL;
91 /* mutex initialize */
92 spin_lock_init(&per_cpu(l1sram_lock, cpu));
93 printk(KERN_INFO "Blackfin Scratchpad data SRAM: %d KB\n",
94 L1_SCRATCH_LENGTH >> 10);
98 static void __init l1_data_sram_init(void)
100 #if L1_DATA_A_LENGTH != 0 || L1_DATA_B_LENGTH != 0
101 unsigned int cpu;
102 #endif
103 #if L1_DATA_A_LENGTH != 0
104 for (cpu = 0; cpu < num_possible_cpus(); ++cpu) {
105 per_cpu(free_l1_data_A_sram_head, cpu).next =
106 kmem_cache_alloc(sram_piece_cache, GFP_KERNEL);
107 if (!per_cpu(free_l1_data_A_sram_head, cpu).next) {
108 printk(KERN_INFO "Fail to initialize L1 Data A SRAM.\n");
109 return;
112 per_cpu(free_l1_data_A_sram_head, cpu).next->paddr =
113 (void *)get_l1_data_a_start_cpu(cpu) + (_ebss_l1 - _sdata_l1);
114 per_cpu(free_l1_data_A_sram_head, cpu).next->size =
115 L1_DATA_A_LENGTH - (_ebss_l1 - _sdata_l1);
116 per_cpu(free_l1_data_A_sram_head, cpu).next->pid = 0;
117 per_cpu(free_l1_data_A_sram_head, cpu).next->next = NULL;
119 per_cpu(used_l1_data_A_sram_head, cpu).next = NULL;
121 printk(KERN_INFO "Blackfin L1 Data A SRAM: %d KB (%d KB free)\n",
122 L1_DATA_A_LENGTH >> 10,
123 per_cpu(free_l1_data_A_sram_head, cpu).next->size >> 10);
125 #endif
126 #if L1_DATA_B_LENGTH != 0
127 for (cpu = 0; cpu < num_possible_cpus(); ++cpu) {
128 per_cpu(free_l1_data_B_sram_head, cpu).next =
129 kmem_cache_alloc(sram_piece_cache, GFP_KERNEL);
130 if (!per_cpu(free_l1_data_B_sram_head, cpu).next) {
131 printk(KERN_INFO "Fail to initialize L1 Data B SRAM.\n");
132 return;
135 per_cpu(free_l1_data_B_sram_head, cpu).next->paddr =
136 (void *)get_l1_data_b_start_cpu(cpu) + (_ebss_b_l1 - _sdata_b_l1);
137 per_cpu(free_l1_data_B_sram_head, cpu).next->size =
138 L1_DATA_B_LENGTH - (_ebss_b_l1 - _sdata_b_l1);
139 per_cpu(free_l1_data_B_sram_head, cpu).next->pid = 0;
140 per_cpu(free_l1_data_B_sram_head, cpu).next->next = NULL;
142 per_cpu(used_l1_data_B_sram_head, cpu).next = NULL;
144 printk(KERN_INFO "Blackfin L1 Data B SRAM: %d KB (%d KB free)\n",
145 L1_DATA_B_LENGTH >> 10,
146 per_cpu(free_l1_data_B_sram_head, cpu).next->size >> 10);
147 /* mutex initialize */
149 #endif
151 #if L1_DATA_A_LENGTH != 0 || L1_DATA_B_LENGTH != 0
152 for (cpu = 0; cpu < num_possible_cpus(); ++cpu)
153 spin_lock_init(&per_cpu(l1_data_sram_lock, cpu));
154 #endif
157 static void __init l1_inst_sram_init(void)
159 #if L1_CODE_LENGTH != 0
160 unsigned int cpu;
161 for (cpu = 0; cpu < num_possible_cpus(); ++cpu) {
162 per_cpu(free_l1_inst_sram_head, cpu).next =
163 kmem_cache_alloc(sram_piece_cache, GFP_KERNEL);
164 if (!per_cpu(free_l1_inst_sram_head, cpu).next) {
165 printk(KERN_INFO "Failed to initialize L1 Instruction SRAM\n");
166 return;
169 per_cpu(free_l1_inst_sram_head, cpu).next->paddr =
170 (void *)get_l1_code_start_cpu(cpu) + (_etext_l1 - _stext_l1);
171 per_cpu(free_l1_inst_sram_head, cpu).next->size =
172 L1_CODE_LENGTH - (_etext_l1 - _stext_l1);
173 per_cpu(free_l1_inst_sram_head, cpu).next->pid = 0;
174 per_cpu(free_l1_inst_sram_head, cpu).next->next = NULL;
176 per_cpu(used_l1_inst_sram_head, cpu).next = NULL;
178 printk(KERN_INFO "Blackfin L1 Instruction SRAM: %d KB (%d KB free)\n",
179 L1_CODE_LENGTH >> 10,
180 per_cpu(free_l1_inst_sram_head, cpu).next->size >> 10);
182 /* mutex initialize */
183 spin_lock_init(&per_cpu(l1_inst_sram_lock, cpu));
185 #endif
188 static void __init l2_sram_init(void)
190 #if L2_LENGTH != 0
191 free_l2_sram_head.next =
192 kmem_cache_alloc(sram_piece_cache, GFP_KERNEL);
193 if (!free_l2_sram_head.next) {
194 printk(KERN_INFO "Fail to initialize L2 SRAM.\n");
195 return;
198 free_l2_sram_head.next->paddr =
199 (void *)L2_START + (_ebss_l2 - _stext_l2);
200 free_l2_sram_head.next->size =
201 L2_LENGTH - (_ebss_l2 - _stext_l2);
202 free_l2_sram_head.next->pid = 0;
203 free_l2_sram_head.next->next = NULL;
205 used_l2_sram_head.next = NULL;
207 printk(KERN_INFO "Blackfin L2 SRAM: %d KB (%d KB free)\n",
208 L2_LENGTH >> 10,
209 free_l2_sram_head.next->size >> 10);
211 /* mutex initialize */
212 spin_lock_init(&l2_sram_lock);
213 #endif
216 static int __init bfin_sram_init(void)
218 sram_piece_cache = kmem_cache_create("sram_piece_cache",
219 sizeof(struct sram_piece),
220 0, SLAB_PANIC, NULL);
222 l1sram_init();
223 l1_data_sram_init();
224 l1_inst_sram_init();
225 l2_sram_init();
227 return 0;
229 pure_initcall(bfin_sram_init);
231 /* SRAM allocate function */
232 static void *_sram_alloc(size_t size, struct sram_piece *pfree_head,
233 struct sram_piece *pused_head)
235 struct sram_piece *pslot, *plast, *pavail;
237 if (size <= 0 || !pfree_head || !pused_head)
238 return NULL;
240 /* Align the size */
241 size = (size + 3) & ~3;
243 pslot = pfree_head->next;
244 plast = pfree_head;
246 /* search an available piece slot */
247 while (pslot != NULL && size > pslot->size) {
248 plast = pslot;
249 pslot = pslot->next;
252 if (!pslot)
253 return NULL;
255 if (pslot->size == size) {
256 plast->next = pslot->next;
257 pavail = pslot;
258 } else {
259 /* use atomic so our L1 allocator can be used atomically */
260 pavail = kmem_cache_alloc(sram_piece_cache, GFP_ATOMIC);
262 if (!pavail)
263 return NULL;
265 pavail->paddr = pslot->paddr;
266 pavail->size = size;
267 pslot->paddr += size;
268 pslot->size -= size;
271 pavail->pid = current->pid;
273 pslot = pused_head->next;
274 plast = pused_head;
276 /* insert new piece into used piece list !!! */
277 while (pslot != NULL && pavail->paddr < pslot->paddr) {
278 plast = pslot;
279 pslot = pslot->next;
282 pavail->next = pslot;
283 plast->next = pavail;
285 return pavail->paddr;
288 /* Allocate the largest available block. */
289 static void *_sram_alloc_max(struct sram_piece *pfree_head,
290 struct sram_piece *pused_head,
291 unsigned long *psize)
293 struct sram_piece *pslot, *pmax;
295 if (!pfree_head || !pused_head)
296 return NULL;
298 pmax = pslot = pfree_head->next;
300 /* search an available piece slot */
301 while (pslot != NULL) {
302 if (pslot->size > pmax->size)
303 pmax = pslot;
304 pslot = pslot->next;
307 if (!pmax)
308 return NULL;
310 *psize = pmax->size;
312 return _sram_alloc(*psize, pfree_head, pused_head);
315 /* SRAM free function */
316 static int _sram_free(const void *addr,
317 struct sram_piece *pfree_head,
318 struct sram_piece *pused_head)
320 struct sram_piece *pslot, *plast, *pavail;
322 if (!pfree_head || !pused_head)
323 return -1;
325 /* search the relevant memory slot */
326 pslot = pused_head->next;
327 plast = pused_head;
329 /* search an available piece slot */
330 while (pslot != NULL && pslot->paddr != addr) {
331 plast = pslot;
332 pslot = pslot->next;
335 if (!pslot)
336 return -1;
338 plast->next = pslot->next;
339 pavail = pslot;
340 pavail->pid = 0;
342 /* insert free pieces back to the free list */
343 pslot = pfree_head->next;
344 plast = pfree_head;
346 while (pslot != NULL && addr > pslot->paddr) {
347 plast = pslot;
348 pslot = pslot->next;
351 if (plast != pfree_head && plast->paddr + plast->size == pavail->paddr) {
352 plast->size += pavail->size;
353 kmem_cache_free(sram_piece_cache, pavail);
354 } else {
355 pavail->next = plast->next;
356 plast->next = pavail;
357 plast = pavail;
360 if (pslot && plast->paddr + plast->size == pslot->paddr) {
361 plast->size += pslot->size;
362 plast->next = pslot->next;
363 kmem_cache_free(sram_piece_cache, pslot);
366 return 0;
369 int sram_free(const void *addr)
372 #if L1_CODE_LENGTH != 0
373 if (addr >= (void *)get_l1_code_start()
374 && addr < (void *)(get_l1_code_start() + L1_CODE_LENGTH))
375 return l1_inst_sram_free(addr);
376 else
377 #endif
378 #if L1_DATA_A_LENGTH != 0
379 if (addr >= (void *)get_l1_data_a_start()
380 && addr < (void *)(get_l1_data_a_start() + L1_DATA_A_LENGTH))
381 return l1_data_A_sram_free(addr);
382 else
383 #endif
384 #if L1_DATA_B_LENGTH != 0
385 if (addr >= (void *)get_l1_data_b_start()
386 && addr < (void *)(get_l1_data_b_start() + L1_DATA_B_LENGTH))
387 return l1_data_B_sram_free(addr);
388 else
389 #endif
390 #if L2_LENGTH != 0
391 if (addr >= (void *)L2_START
392 && addr < (void *)(L2_START + L2_LENGTH))
393 return l2_sram_free(addr);
394 else
395 #endif
396 return -1;
398 EXPORT_SYMBOL(sram_free);
400 void *l1_data_A_sram_alloc(size_t size)
402 #if L1_DATA_A_LENGTH != 0
403 unsigned long flags;
404 void *addr;
405 unsigned int cpu;
407 cpu = smp_processor_id();
408 /* add mutex operation */
409 spin_lock_irqsave(&per_cpu(l1_data_sram_lock, cpu), flags);
411 addr = _sram_alloc(size, &per_cpu(free_l1_data_A_sram_head, cpu),
412 &per_cpu(used_l1_data_A_sram_head, cpu));
414 /* add mutex operation */
415 spin_unlock_irqrestore(&per_cpu(l1_data_sram_lock, cpu), flags);
417 pr_debug("Allocated address in l1_data_A_sram_alloc is 0x%lx+0x%lx\n",
418 (long unsigned int)addr, size);
420 return addr;
421 #else
422 return NULL;
423 #endif
425 EXPORT_SYMBOL(l1_data_A_sram_alloc);
427 int l1_data_A_sram_free(const void *addr)
429 #if L1_DATA_A_LENGTH != 0
430 unsigned long flags;
431 int ret;
432 unsigned int cpu;
434 cpu = smp_processor_id();
435 /* add mutex operation */
436 spin_lock_irqsave(&per_cpu(l1_data_sram_lock, cpu), flags);
438 ret = _sram_free(addr, &per_cpu(free_l1_data_A_sram_head, cpu),
439 &per_cpu(used_l1_data_A_sram_head, cpu));
441 /* add mutex operation */
442 spin_unlock_irqrestore(&per_cpu(l1_data_sram_lock, cpu), flags);
444 return ret;
445 #else
446 return -1;
447 #endif
449 EXPORT_SYMBOL(l1_data_A_sram_free);
451 void *l1_data_B_sram_alloc(size_t size)
453 #if L1_DATA_B_LENGTH != 0
454 unsigned long flags;
455 void *addr;
456 unsigned int cpu;
458 cpu = smp_processor_id();
459 /* add mutex operation */
460 spin_lock_irqsave(&per_cpu(l1_data_sram_lock, cpu), flags);
462 addr = _sram_alloc(size, &per_cpu(free_l1_data_B_sram_head, cpu),
463 &per_cpu(used_l1_data_B_sram_head, cpu));
465 /* add mutex operation */
466 spin_unlock_irqrestore(&per_cpu(l1_data_sram_lock, cpu), flags);
468 pr_debug("Allocated address in l1_data_B_sram_alloc is 0x%lx+0x%lx\n",
469 (long unsigned int)addr, size);
471 return addr;
472 #else
473 return NULL;
474 #endif
476 EXPORT_SYMBOL(l1_data_B_sram_alloc);
478 int l1_data_B_sram_free(const void *addr)
480 #if L1_DATA_B_LENGTH != 0
481 unsigned long flags;
482 int ret;
483 unsigned int cpu;
485 cpu = smp_processor_id();
486 /* add mutex operation */
487 spin_lock_irqsave(&per_cpu(l1_data_sram_lock, cpu), flags);
489 ret = _sram_free(addr, &per_cpu(free_l1_data_B_sram_head, cpu),
490 &per_cpu(used_l1_data_B_sram_head, cpu));
492 /* add mutex operation */
493 spin_unlock_irqrestore(&per_cpu(l1_data_sram_lock, cpu), flags);
495 return ret;
496 #else
497 return -1;
498 #endif
500 EXPORT_SYMBOL(l1_data_B_sram_free);
502 void *l1_data_sram_alloc(size_t size)
504 void *addr = l1_data_A_sram_alloc(size);
506 if (!addr)
507 addr = l1_data_B_sram_alloc(size);
509 return addr;
511 EXPORT_SYMBOL(l1_data_sram_alloc);
513 void *l1_data_sram_zalloc(size_t size)
515 void *addr = l1_data_sram_alloc(size);
517 if (addr)
518 memset(addr, 0x00, size);
520 return addr;
522 EXPORT_SYMBOL(l1_data_sram_zalloc);
524 int l1_data_sram_free(const void *addr)
526 int ret;
527 ret = l1_data_A_sram_free(addr);
528 if (ret == -1)
529 ret = l1_data_B_sram_free(addr);
530 return ret;
532 EXPORT_SYMBOL(l1_data_sram_free);
534 void *l1_inst_sram_alloc(size_t size)
536 #if L1_CODE_LENGTH != 0
537 unsigned long flags;
538 void *addr;
539 unsigned int cpu;
541 cpu = smp_processor_id();
542 /* add mutex operation */
543 spin_lock_irqsave(&per_cpu(l1_inst_sram_lock, cpu), flags);
545 addr = _sram_alloc(size, &per_cpu(free_l1_inst_sram_head, cpu),
546 &per_cpu(used_l1_inst_sram_head, cpu));
548 /* add mutex operation */
549 spin_unlock_irqrestore(&per_cpu(l1_inst_sram_lock, cpu), flags);
551 pr_debug("Allocated address in l1_inst_sram_alloc is 0x%lx+0x%lx\n",
552 (long unsigned int)addr, size);
554 return addr;
555 #else
556 return NULL;
557 #endif
559 EXPORT_SYMBOL(l1_inst_sram_alloc);
561 int l1_inst_sram_free(const void *addr)
563 #if L1_CODE_LENGTH != 0
564 unsigned long flags;
565 int ret;
566 unsigned int cpu;
568 cpu = smp_processor_id();
569 /* add mutex operation */
570 spin_lock_irqsave(&per_cpu(l1_inst_sram_lock, cpu), flags);
572 ret = _sram_free(addr, &per_cpu(free_l1_inst_sram_head, cpu),
573 &per_cpu(used_l1_inst_sram_head, cpu));
575 /* add mutex operation */
576 spin_unlock_irqrestore(&per_cpu(l1_inst_sram_lock, cpu), flags);
578 return ret;
579 #else
580 return -1;
581 #endif
583 EXPORT_SYMBOL(l1_inst_sram_free);
585 /* L1 Scratchpad memory allocate function */
586 void *l1sram_alloc(size_t size)
588 unsigned long flags;
589 void *addr;
590 unsigned int cpu;
592 cpu = smp_processor_id();
593 /* add mutex operation */
594 spin_lock_irqsave(&per_cpu(l1sram_lock, cpu), flags);
596 addr = _sram_alloc(size, &per_cpu(free_l1_ssram_head, cpu),
597 &per_cpu(used_l1_ssram_head, cpu));
599 /* add mutex operation */
600 spin_unlock_irqrestore(&per_cpu(l1sram_lock, cpu), flags);
602 return addr;
605 /* L1 Scratchpad memory allocate function */
606 void *l1sram_alloc_max(size_t *psize)
608 unsigned long flags;
609 void *addr;
610 unsigned int cpu;
612 cpu = smp_processor_id();
613 /* add mutex operation */
614 spin_lock_irqsave(&per_cpu(l1sram_lock, cpu), flags);
616 addr = _sram_alloc_max(&per_cpu(free_l1_ssram_head, cpu),
617 &per_cpu(used_l1_ssram_head, cpu), psize);
619 /* add mutex operation */
620 spin_unlock_irqrestore(&per_cpu(l1sram_lock, cpu), flags);
622 return addr;
625 /* L1 Scratchpad memory free function */
626 int l1sram_free(const void *addr)
628 unsigned long flags;
629 int ret;
630 unsigned int cpu;
632 cpu = smp_processor_id();
633 /* add mutex operation */
634 spin_lock_irqsave(&per_cpu(l1sram_lock, cpu), flags);
636 ret = _sram_free(addr, &per_cpu(free_l1_ssram_head, cpu),
637 &per_cpu(used_l1_ssram_head, cpu));
639 /* add mutex operation */
640 spin_unlock_irqrestore(&per_cpu(l1sram_lock, cpu), flags);
642 return ret;
645 void *l2_sram_alloc(size_t size)
647 #if L2_LENGTH != 0
648 unsigned long flags;
649 void *addr;
651 /* add mutex operation */
652 spin_lock_irqsave(&l2_sram_lock, flags);
654 addr = _sram_alloc(size, &free_l2_sram_head,
655 &used_l2_sram_head);
657 /* add mutex operation */
658 spin_unlock_irqrestore(&l2_sram_lock, flags);
660 pr_debug("Allocated address in l2_sram_alloc is 0x%lx+0x%lx\n",
661 (long unsigned int)addr, size);
663 return addr;
664 #else
665 return NULL;
666 #endif
668 EXPORT_SYMBOL(l2_sram_alloc);
670 void *l2_sram_zalloc(size_t size)
672 void *addr = l2_sram_alloc(size);
674 if (addr)
675 memset(addr, 0x00, size);
677 return addr;
679 EXPORT_SYMBOL(l2_sram_zalloc);
681 int l2_sram_free(const void *addr)
683 #if L2_LENGTH != 0
684 unsigned long flags;
685 int ret;
687 /* add mutex operation */
688 spin_lock_irqsave(&l2_sram_lock, flags);
690 ret = _sram_free(addr, &free_l2_sram_head,
691 &used_l2_sram_head);
693 /* add mutex operation */
694 spin_unlock_irqrestore(&l2_sram_lock, flags);
696 return ret;
697 #else
698 return -1;
699 #endif
701 EXPORT_SYMBOL(l2_sram_free);
703 int sram_free_with_lsl(const void *addr)
705 struct sram_list_struct *lsl, **tmp;
706 struct mm_struct *mm = current->mm;
707 int ret = -1;
709 for (tmp = &mm->context.sram_list; *tmp; tmp = &(*tmp)->next)
710 if ((*tmp)->addr == addr) {
711 lsl = *tmp;
712 ret = sram_free(addr);
713 *tmp = lsl->next;
714 kfree(lsl);
715 break;
718 return ret;
720 EXPORT_SYMBOL(sram_free_with_lsl);
722 /* Allocate memory and keep in L1 SRAM List (lsl) so that the resources are
723 * tracked. These are designed for userspace so that when a process exits,
724 * we can safely reap their resources.
726 void *sram_alloc_with_lsl(size_t size, unsigned long flags)
728 void *addr = NULL;
729 struct sram_list_struct *lsl = NULL;
730 struct mm_struct *mm = current->mm;
732 lsl = kzalloc(sizeof(struct sram_list_struct), GFP_KERNEL);
733 if (!lsl)
734 return NULL;
736 if (flags & L1_INST_SRAM)
737 addr = l1_inst_sram_alloc(size);
739 if (addr == NULL && (flags & L1_DATA_A_SRAM))
740 addr = l1_data_A_sram_alloc(size);
742 if (addr == NULL && (flags & L1_DATA_B_SRAM))
743 addr = l1_data_B_sram_alloc(size);
745 if (addr == NULL && (flags & L2_SRAM))
746 addr = l2_sram_alloc(size);
748 if (addr == NULL) {
749 kfree(lsl);
750 return NULL;
752 lsl->addr = addr;
753 lsl->length = size;
754 lsl->next = mm->context.sram_list;
755 mm->context.sram_list = lsl;
756 return addr;
758 EXPORT_SYMBOL(sram_alloc_with_lsl);
760 #ifdef CONFIG_PROC_FS
761 /* Once we get a real allocator, we'll throw all of this away.
762 * Until then, we need some sort of visibility into the L1 alloc.
764 /* Need to keep line of output the same. Currently, that is 44 bytes
765 * (including newline).
767 static int _sram_proc_read(char *buf, int *len, int count, const char *desc,
768 struct sram_piece *pfree_head,
769 struct sram_piece *pused_head)
771 struct sram_piece *pslot;
773 if (!pfree_head || !pused_head)
774 return -1;
776 *len += sprintf(&buf[*len], "--- SRAM %-14s Size PID State \n", desc);
778 /* search the relevant memory slot */
779 pslot = pused_head->next;
781 while (pslot != NULL) {
782 *len += sprintf(&buf[*len], "%p-%p %10i %5i %-10s\n",
783 pslot->paddr, pslot->paddr + pslot->size,
784 pslot->size, pslot->pid, "ALLOCATED");
786 pslot = pslot->next;
789 pslot = pfree_head->next;
791 while (pslot != NULL) {
792 *len += sprintf(&buf[*len], "%p-%p %10i %5i %-10s\n",
793 pslot->paddr, pslot->paddr + pslot->size,
794 pslot->size, pslot->pid, "FREE");
796 pslot = pslot->next;
799 return 0;
801 static int sram_proc_read(char *buf, char **start, off_t offset, int count,
802 int *eof, void *data)
804 int len = 0;
805 unsigned int cpu;
807 for (cpu = 0; cpu < num_possible_cpus(); ++cpu) {
808 if (_sram_proc_read(buf, &len, count, "Scratchpad",
809 &per_cpu(free_l1_ssram_head, cpu), &per_cpu(used_l1_ssram_head, cpu)))
810 goto not_done;
811 #if L1_DATA_A_LENGTH != 0
812 if (_sram_proc_read(buf, &len, count, "L1 Data A",
813 &per_cpu(free_l1_data_A_sram_head, cpu),
814 &per_cpu(used_l1_data_A_sram_head, cpu)))
815 goto not_done;
816 #endif
817 #if L1_DATA_B_LENGTH != 0
818 if (_sram_proc_read(buf, &len, count, "L1 Data B",
819 &per_cpu(free_l1_data_B_sram_head, cpu),
820 &per_cpu(used_l1_data_B_sram_head, cpu)))
821 goto not_done;
822 #endif
823 #if L1_CODE_LENGTH != 0
824 if (_sram_proc_read(buf, &len, count, "L1 Instruction",
825 &per_cpu(free_l1_inst_sram_head, cpu),
826 &per_cpu(used_l1_inst_sram_head, cpu)))
827 goto not_done;
828 #endif
830 #if L2_LENGTH != 0
831 if (_sram_proc_read(buf, &len, count, "L2", &free_l2_sram_head,
832 &used_l2_sram_head))
833 goto not_done;
834 #endif
835 *eof = 1;
836 not_done:
837 return len;
840 static int __init sram_proc_init(void)
842 struct proc_dir_entry *ptr;
843 ptr = create_proc_entry("sram", S_IFREG | S_IRUGO, NULL);
844 if (!ptr) {
845 printk(KERN_WARNING "unable to create /proc/sram\n");
846 return -1;
848 ptr->read_proc = sram_proc_read;
849 return 0;
851 late_initcall(sram_proc_init);
852 #endif