GUI: Fix Tomato RAF theme for all builds. Compilation typo.
[tomato.git] / release / src-rt-6.x.4708 / linux / linux-2.6.36 / arch / mips / kernel / vpe.c
blob4907fe0d267e9dd783f1be6bda7bae998543a697
1 /*
2 * Copyright (C) 2004, 2005 MIPS Technologies, Inc. All rights reserved.
4 * This program is free software; you can distribute it and/or modify it
5 * under the terms of the GNU General Public License (Version 2) as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
11 * for more details.
13 * You should have received a copy of the GNU General Public License along
14 * with this program; if not, write to the Free Software Foundation, Inc.,
15 * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
19 * VPE support module
21 * Provides support for loading a MIPS SP program on VPE1.
22 * The SP enviroment is rather simple, no tlb's. It needs to be relocatable
23 * (or partially linked). You should initialise your stack in the startup
24 * code. This loader looks for the symbol __start and sets up
25 * execution to resume from there. The MIPS SDE kit contains suitable examples.
27 * To load and run, simply cat a SP 'program file' to /dev/vpe1.
28 * i.e cat spapp >/dev/vpe1.
30 #include <linux/kernel.h>
31 #include <linux/device.h>
32 #include <linux/module.h>
33 #include <linux/fs.h>
34 #include <linux/init.h>
35 #include <asm/uaccess.h>
36 #include <linux/slab.h>
37 #include <linux/list.h>
38 #include <linux/vmalloc.h>
39 #include <linux/elf.h>
40 #include <linux/seq_file.h>
41 #include <linux/syscalls.h>
42 #include <linux/moduleloader.h>
43 #include <linux/interrupt.h>
44 #include <linux/poll.h>
45 #include <linux/bootmem.h>
46 #include <asm/mipsregs.h>
47 #include <asm/mipsmtregs.h>
48 #include <asm/cacheflush.h>
49 #include <asm/atomic.h>
50 #include <asm/cpu.h>
51 #include <asm/mips_mt.h>
52 #include <asm/processor.h>
53 #include <asm/system.h>
54 #include <asm/vpe.h>
55 #include <asm/kspd.h>
57 typedef void *vpe_handle;
59 #ifndef ARCH_SHF_SMALL
60 #define ARCH_SHF_SMALL 0
61 #endif
63 /* If this is set, the section belongs in the init part of the module */
64 #define INIT_OFFSET_MASK (1UL << (BITS_PER_LONG-1))
67 * The number of TCs and VPEs physically available on the core
69 static int hw_tcs, hw_vpes;
70 static char module_name[] = "vpe";
71 static int major;
72 static const int minor = 1; /* fixed for now */
74 #ifdef CONFIG_MIPS_APSP_KSPD
75 static struct kspd_notifications kspd_events;
76 static int kspd_events_reqd;
77 #endif
79 * Size of private kernel buffer for ELF headers and sections
81 #define P_SIZE (256 * 1024)
84 * Size of private kernel buffer for ELF headers and sections
86 #define MAX_VPES 16
87 #define VPE_PATH_MAX 256
89 enum vpe_state {
90 VPE_STATE_UNUSED = 0,
91 VPE_STATE_INUSE,
92 VPE_STATE_RUNNING
95 enum tc_state {
96 TC_STATE_UNUSED = 0,
97 TC_STATE_INUSE,
98 TC_STATE_RUNNING,
99 TC_STATE_DYNAMIC
102 enum load_state {
103 LOAD_STATE_EHDR,
104 LOAD_STATE_PHDR,
105 LOAD_STATE_SHDR,
106 LOAD_STATE_PIMAGE,
107 LOAD_STATE_TRAILER,
108 LOAD_STATE_DONE,
109 LOAD_STATE_ERROR
112 struct vpe {
113 enum vpe_state state;
115 /* (device) minor associated with this vpe */
116 int minor;
118 /* elfloader stuff */
119 unsigned long offset; /* File offset into input stream */
120 void *load_addr;
121 unsigned long copied;
122 char *pbuffer;
123 unsigned long pbsize;
124 /* Program loading state */
125 enum load_state l_state;
126 Elf_Ehdr *l_ehdr;
127 struct elf_phdr *l_phdr;
128 unsigned int l_phlen;
129 Elf_Shdr *l_shdr;
130 unsigned int l_shlen;
131 int *l_phsort; /* Sorted index list of program headers */
132 int l_segoff; /* Offset into current program segment */
133 int l_cur_seg; /* Indirect index of segment currently being loaded */
134 unsigned int l_progminad;
135 unsigned int l_progmaxad;
136 unsigned int l_trailer;
138 unsigned int uid, gid;
139 char cwd[VPE_PATH_MAX];
141 unsigned long __start;
143 /* tc's associated with this vpe */
144 struct list_head tc;
146 /* The list of vpe's */
147 struct list_head list;
149 /* legacy shared symbol address */
150 void *shared_ptr;
152 /* shared area descriptor array address */
153 struct vpe_shared_area *shared_areas;
155 /* the list of who wants to know when something major happens */
156 struct list_head notify;
158 unsigned int ntcs;
161 struct tc {
162 enum tc_state state;
163 int index;
165 struct vpe *pvpe; /* parent VPE */
166 struct list_head tc; /* The list of TC's with this VPE */
167 struct list_head list; /* The global list of tc's */
170 struct {
171 spinlock_t vpe_list_lock;
172 struct list_head vpe_list; /* Virtual processing elements */
173 spinlock_t tc_list_lock;
174 struct list_head tc_list; /* Thread contexts */
175 } vpecontrol = {
176 .vpe_list_lock = SPIN_LOCK_UNLOCKED,
177 .vpe_list = LIST_HEAD_INIT(vpecontrol.vpe_list),
178 .tc_list_lock = SPIN_LOCK_UNLOCKED,
179 .tc_list = LIST_HEAD_INIT(vpecontrol.tc_list)
182 static void release_progmem(void *ptr);
184 * Values and state associated with publishing shared memory areas
187 #define N_PUB_AREAS 4
189 static struct vpe_shared_area published_vpe_area[N_PUB_AREAS] = {
190 {VPE_SHARED_RESERVED, 0},
191 {VPE_SHARED_RESERVED, 0},
192 {VPE_SHARED_RESERVED, 0},
193 {VPE_SHARED_RESERVED, 0} };
195 /* get the vpe associated with this minor */
196 static struct vpe *get_vpe(int minor)
198 struct vpe *res, *v;
200 if (!cpu_has_mipsmt)
201 return NULL;
203 res = NULL;
204 spin_lock(&vpecontrol.vpe_list_lock);
205 list_for_each_entry(v, &vpecontrol.vpe_list, list) {
206 if (v->minor == minor) {
207 res = v;
208 break;
211 spin_unlock(&vpecontrol.vpe_list_lock);
213 return res;
216 /* get the tc associated with this minor */
217 static struct tc *get_tc(int index)
219 struct tc *res, *t;
221 res = NULL;
222 spin_lock(&vpecontrol.tc_list_lock);
223 list_for_each_entry(t, &vpecontrol.tc_list, list) {
224 if (t->index == index) {
225 res = t;
226 break;
229 spin_unlock(&vpecontrol.tc_list_lock);
231 return res;
235 /* allocate a vpe and associate it with this minor (or index) */
236 static struct vpe *alloc_vpe(int minor)
238 struct vpe *v;
240 if ((v = kzalloc(sizeof(struct vpe), GFP_KERNEL)) == NULL)
241 return NULL;
242 printk(KERN_DEBUG "Used kzalloc to allocate %d bytes at %x\n",
243 sizeof(struct vpe), (unsigned int)v);
244 INIT_LIST_HEAD(&v->tc);
245 spin_lock(&vpecontrol.vpe_list_lock);
246 list_add_tail(&v->list, &vpecontrol.vpe_list);
247 spin_unlock(&vpecontrol.vpe_list_lock);
249 INIT_LIST_HEAD(&v->notify);
250 v->minor = minor;
252 return v;
255 /* allocate a tc. At startup only tc0 is running, all other can be halted. */
256 static struct tc *alloc_tc(int index)
258 struct tc *tc;
260 if ((tc = kzalloc(sizeof(struct tc), GFP_KERNEL)) == NULL)
261 goto out;
262 printk(KERN_DEBUG "Used kzalloc to allocate %d bytes at %x\n",
263 sizeof(struct tc), (unsigned int)tc);
264 INIT_LIST_HEAD(&tc->tc);
265 tc->index = index;
267 spin_lock(&vpecontrol.tc_list_lock);
268 list_add_tail(&tc->list, &vpecontrol.tc_list);
269 spin_unlock(&vpecontrol.tc_list_lock);
271 out:
272 return tc;
275 /* clean up and free everything */
276 static void release_vpe(struct vpe *v)
278 list_del(&v->list);
279 if (v->load_addr)
280 release_progmem(v);
281 printk(KERN_DEBUG "Used kfree to free memory at %x\n",
282 (unsigned int)v->l_phsort);
283 kfree(v->l_phsort);
284 printk(KERN_DEBUG "Used kfree to free memory at %x\n",
285 (unsigned int)v);
286 kfree(v);
289 static void __maybe_unused dump_mtregs(void)
291 unsigned long val;
293 val = read_c0_config3();
294 printk("config3 0x%lx MT %ld\n", val,
295 (val & CONFIG3_MT) >> CONFIG3_MT_SHIFT);
297 val = read_c0_mvpcontrol();
298 printk("MVPControl 0x%lx, STLB %ld VPC %ld EVP %ld\n", val,
299 (val & MVPCONTROL_STLB) >> MVPCONTROL_STLB_SHIFT,
300 (val & MVPCONTROL_VPC) >> MVPCONTROL_VPC_SHIFT,
301 (val & MVPCONTROL_EVP));
303 val = read_c0_mvpconf0();
304 printk("mvpconf0 0x%lx, PVPE %ld PTC %ld M %ld\n", val,
305 (val & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT,
306 val & MVPCONF0_PTC, (val & MVPCONF0_M) >> MVPCONF0_M_SHIFT);
310 * The original APRP prototype assumed a single, unshared IRQ for
311 * cross-VPE interrupts, used by the RTLX code. But M3P networking
312 * and other future functions may need to share an IRQ, particularly
313 * in 34K/Malta configurations without an external interrupt controller.
314 * All cross-VPE insterrupt users need to coordinate through shared
315 * functions here.
319 * It would be nice if I could just have this initialized to zero,
320 * but the patchcheck police won't hear of it...
323 static int xvpe_vector_set;
325 #define XVPE_INTR_OFFSET 0
327 static int xvpe_irq = MIPS_CPU_IRQ_BASE + XVPE_INTR_OFFSET;
329 static void xvpe_dispatch(void)
331 do_IRQ(xvpe_irq);
334 /* Name here is generic, as m3pnet.c could in principle be used by non-MIPS */
335 int arch_get_xcpu_irq()
338 * Some of this will ultimately become platform code,
339 * but for now, we're only targeting 34K/FPGA/Malta,
340 * and there's only one generic mechanism.
342 if (!xvpe_vector_set) {
344 * A more elaborate shared variable shouldn't be needed.
345 * Two initializations back-to-back should be harmless.
347 if (cpu_has_vint) {
348 set_vi_handler(XVPE_INTR_OFFSET, xvpe_dispatch);
349 xvpe_vector_set = 1;
350 } else {
351 printk(KERN_ERR "APRP requires vectored interrupts\n");
352 return -1;
356 return xvpe_irq;
358 EXPORT_SYMBOL(arch_get_xcpu_irq);
360 int vpe_send_interrupt(int vpe, int inter)
362 unsigned long flags;
363 unsigned int vpeflags;
365 local_irq_save(flags);
366 vpeflags = dvpe();
369 * Initial version makes same simple-minded assumption
370 * as is implicit elsewhere in this module, that the
371 * only RP of interest is using the first non-Linux TC.
372 * We ignore the parameters provided by the caller!
374 settc(tclimit);
376 * In 34K/Malta, the only cross-VPE interrupts possible
377 * are done by setting SWINT bits in Cause, of which there
378 * are two. SMTC uses SW1 for a multiplexed class of IPIs,
379 * and this mechanism should be generalized to APRP and use
380 * the same protocol. Until that's implemented, send only
381 * SW0 here, regardless of requested type.
383 write_vpe_c0_cause(read_vpe_c0_cause() | C_SW0);
384 evpe(vpeflags);
385 local_irq_restore(flags);
386 return 1;
388 EXPORT_SYMBOL(vpe_send_interrupt);
389 /* Find some VPE program space */
390 static void *alloc_progmem(void *requested, unsigned long len)
392 void *addr;
394 #ifdef CONFIG_MIPS_VPE_LOADER_TOM
396 * This means you must tell Linux to use less memory than you
397 * physically have, for example by passing a mem= boot argument.
399 addr = pfn_to_kaddr(max_low_pfn);
400 if (requested != 0) {
401 if (requested >= addr)
402 addr = requested;
403 else
404 addr = 0;
406 if (addr != 0)
407 memset(addr, 0, len);
408 printk(KERN_DEBUG "pfn_to_kaddr returns %lu bytes of memory at %x\n",
409 len, (unsigned int)addr);
410 #else
411 if (requested != 0) {
412 /* If we have a target in mind, grab a 2x slice and hope... */
413 addr = kzalloc(len*2, GFP_KERNEL);
414 if ((requested >= addr) && (requested < (addr + len)))
415 addr = requested;
416 else
417 addr = 0;
418 } else {
419 /* simply grab some mem for now */
420 addr = kzalloc(len, GFP_KERNEL);
422 #endif
424 return addr;
427 static void release_progmem(void *ptr)
429 #ifndef CONFIG_MIPS_VPE_LOADER_TOM
430 kfree(ptr);
431 #endif
434 /* Update size with this section: return offset. */
435 static long get_offset(unsigned long *size, Elf_Shdr * sechdr)
437 long ret;
439 ret = ALIGN(*size, sechdr->sh_addralign ? : 1);
440 *size = ret + sechdr->sh_size;
441 return ret;
444 /* Lay out the SHF_ALLOC sections in a way not dissimilar to how ld
445 might -- code, read-only data, read-write data, small data. Tally
446 sizes, and place the offsets into sh_entsize fields: high bit means it
447 belongs in init. */
448 static void layout_sections(struct module *mod, const Elf_Ehdr * hdr,
449 Elf_Shdr * sechdrs, const char *secstrings)
451 static unsigned long const masks[][2] = {
452 /* NOTE: all executable code must be the first section
453 * in this array; otherwise modify the text_size
454 * finder in the two loops below */
455 {SHF_EXECINSTR | SHF_ALLOC, ARCH_SHF_SMALL},
456 {SHF_ALLOC, SHF_WRITE | ARCH_SHF_SMALL},
457 {SHF_WRITE | SHF_ALLOC, ARCH_SHF_SMALL},
458 {ARCH_SHF_SMALL | SHF_ALLOC, 0}
460 unsigned int m, i;
462 for (i = 0; i < hdr->e_shnum; i++)
463 sechdrs[i].sh_entsize = ~0UL;
465 for (m = 0; m < ARRAY_SIZE(masks); ++m) {
466 for (i = 0; i < hdr->e_shnum; ++i) {
467 Elf_Shdr *s = &sechdrs[i];
469 // || strncmp(secstrings + s->sh_name, ".init", 5) == 0)
470 if ((s->sh_flags & masks[m][0]) != masks[m][0]
471 || (s->sh_flags & masks[m][1])
472 || s->sh_entsize != ~0UL)
473 continue;
474 s->sh_entsize =
475 get_offset((unsigned long *)&mod->core_size, s);
478 if (m == 0)
479 mod->core_text_size = mod->core_size;
485 /* from module-elf32.c, but subverted a little */
487 struct mips_hi16 {
488 struct mips_hi16 *next;
489 Elf32_Addr *addr;
490 Elf32_Addr value;
493 static struct mips_hi16 *mips_hi16_list;
494 static unsigned int gp_offs, gp_addr;
496 static int apply_r_mips_none(struct module *me, uint32_t *location,
497 Elf32_Addr v)
499 return 0;
502 static int apply_r_mips_gprel16(struct module *me, uint32_t *location,
503 Elf32_Addr v)
505 int rel;
507 if( !(*location & 0xffff) ) {
508 rel = (int)v - gp_addr;
510 else {
511 /* .sbss + gp(relative) + offset */
512 /* kludge! */
513 rel = (int)(short)((int)v + gp_offs +
514 (int)(short)(*location & 0xffff) - gp_addr);
517 if( (rel > 32768) || (rel < -32768) ) {
518 printk(KERN_DEBUG "VPE loader: apply_r_mips_gprel16: "
519 "relative address 0x%x out of range of gp register\n",
520 rel);
521 return -ENOEXEC;
524 *location = (*location & 0xffff0000) | (rel & 0xffff);
526 return 0;
529 static int apply_r_mips_pc16(struct module *me, uint32_t *location,
530 Elf32_Addr v)
532 int rel;
533 rel = (((unsigned int)v - (unsigned int)location));
534 rel >>= 2; // because the offset is in _instructions_ not bytes.
535 rel -= 1; // and one instruction less due to the branch delay slot.
537 if( (rel > 32768) || (rel < -32768) ) {
538 printk(KERN_DEBUG "VPE loader: "
539 "apply_r_mips_pc16: relative address out of range 0x%x\n", rel);
540 return -ENOEXEC;
543 *location = (*location & 0xffff0000) | (rel & 0xffff);
545 return 0;
548 static int apply_r_mips_32(struct module *me, uint32_t *location,
549 Elf32_Addr v)
551 *location += v;
553 return 0;
556 static int apply_r_mips_26(struct module *me, uint32_t *location,
557 Elf32_Addr v)
559 if (v % 4) {
560 printk(KERN_DEBUG "VPE loader: apply_r_mips_26 "
561 " unaligned relocation\n");
562 return -ENOEXEC;
566 * Not desperately convinced this is a good check of an overflow condition
567 * anyway. But it gets in the way of handling undefined weak symbols which
568 * we want to set to zero.
569 * if ((v & 0xf0000000) != (((unsigned long)location + 4) & 0xf0000000)) {
570 * printk(KERN_ERR
571 * "module %s: relocation overflow\n",
572 * me->name);
573 * return -ENOEXEC;
577 *location = (*location & ~0x03ffffff) |
578 ((*location + (v >> 2)) & 0x03ffffff);
579 return 0;
582 static int apply_r_mips_hi16(struct module *me, uint32_t *location,
583 Elf32_Addr v)
585 struct mips_hi16 *n;
588 * We cannot relocate this one now because we don't know the value of
589 * the carry we need to add. Save the information, and let LO16 do the
590 * actual relocation.
592 n = kmalloc(sizeof *n, GFP_KERNEL);
593 printk(KERN_DEBUG "Used kmalloc to allocate %d bytes at %x\n",
594 sizeof(struct mips_hi16), (unsigned int)n);
595 if (!n)
596 return -ENOMEM;
598 n->addr = location;
599 n->value = v;
600 n->next = mips_hi16_list;
601 mips_hi16_list = n;
603 return 0;
606 static int apply_r_mips_lo16(struct module *me, uint32_t *location,
607 Elf32_Addr v)
609 unsigned long insnlo = *location;
610 Elf32_Addr val, vallo;
611 struct mips_hi16 *l, *next;
613 /* Sign extend the addend we extract from the lo insn. */
614 vallo = ((insnlo & 0xffff) ^ 0x8000) - 0x8000;
616 if (mips_hi16_list != NULL) {
618 l = mips_hi16_list;
619 while (l != NULL) {
620 unsigned long insn;
623 * The value for the HI16 had best be the same.
625 if (v != l->value) {
626 printk(KERN_DEBUG "VPE loader: "
627 "apply_r_mips_lo16/hi16: \t"
628 "inconsistent value information\n");
629 goto out_free;
633 * Do the HI16 relocation. Note that we actually don't
634 * need to know anything about the LO16 itself, except
635 * where to find the low 16 bits of the addend needed
636 * by the LO16.
638 insn = *l->addr;
639 val = ((insn & 0xffff) << 16) + vallo;
640 val += v;
643 * Account for the sign extension that will happen in
644 * the low bits.
646 val = ((val >> 16) + ((val & 0x8000) != 0)) & 0xffff;
648 insn = (insn & ~0xffff) | val;
649 *l->addr = insn;
651 next = l->next;
652 printk(KERN_DEBUG "Used kfree to free memory at %x\n",
653 (unsigned int)l);
654 kfree(l);
655 l = next;
658 mips_hi16_list = NULL;
662 * Ok, we're done with the HI16 relocs. Now deal with the LO16.
664 val = v + vallo;
665 insnlo = (insnlo & ~0xffff) | (val & 0xffff);
666 *location = insnlo;
668 return 0;
670 out_free:
671 while (l != NULL) {
672 next = l->next;
673 kfree(l);
674 l = next;
676 mips_hi16_list = NULL;
678 return -ENOEXEC;
681 static int (*reloc_handlers[]) (struct module *me, uint32_t *location,
682 Elf32_Addr v) = {
683 [R_MIPS_NONE] = apply_r_mips_none,
684 [R_MIPS_32] = apply_r_mips_32,
685 [R_MIPS_26] = apply_r_mips_26,
686 [R_MIPS_HI16] = apply_r_mips_hi16,
687 [R_MIPS_LO16] = apply_r_mips_lo16,
688 [R_MIPS_GPREL16] = apply_r_mips_gprel16,
689 [R_MIPS_PC16] = apply_r_mips_pc16
692 static char *rstrs[] = {
693 [R_MIPS_NONE] = "MIPS_NONE",
694 [R_MIPS_32] = "MIPS_32",
695 [R_MIPS_26] = "MIPS_26",
696 [R_MIPS_HI16] = "MIPS_HI16",
697 [R_MIPS_LO16] = "MIPS_LO16",
698 [R_MIPS_GPREL16] = "MIPS_GPREL16",
699 [R_MIPS_PC16] = "MIPS_PC16"
702 static int apply_relocations(Elf32_Shdr *sechdrs,
703 const char *strtab,
704 unsigned int symindex,
705 unsigned int relsec,
706 struct module *me)
708 Elf32_Rel *rel = (void *) sechdrs[relsec].sh_addr;
709 Elf32_Sym *sym;
710 uint32_t *location;
711 unsigned int i;
712 Elf32_Addr v;
713 int res;
715 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
716 Elf32_Word r_info = rel[i].r_info;
718 /* This is where to make the change */
719 location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
720 + rel[i].r_offset;
721 /* This is the symbol it is referring to */
722 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
723 + ELF32_R_SYM(r_info);
725 if (!sym->st_value) {
726 printk(KERN_DEBUG "%s: undefined weak symbol %s\n",
727 me->name, strtab + sym->st_name);
728 /* just print the warning, dont barf */
731 v = sym->st_value;
733 res = reloc_handlers[ELF32_R_TYPE(r_info)](me, location, v);
734 if( res ) {
735 char *r = rstrs[ELF32_R_TYPE(r_info)];
736 printk(KERN_WARNING "VPE loader: .text+0x%x "
737 "relocation type %s for symbol \"%s\" failed\n",
738 rel[i].r_offset, r ? r : "UNKNOWN",
739 strtab + sym->st_name);
740 return res;
744 return 0;
747 void save_gp_address(unsigned int secbase, unsigned int rel)
749 gp_addr = secbase + rel;
750 gp_offs = gp_addr - (secbase & 0xffff0000);
752 /* end module-elf32.c */
756 /* Change all symbols so that sh_value encodes the pointer directly. */
757 static void simplify_symbols(Elf_Shdr * sechdrs,
758 unsigned int symindex,
759 const char *strtab,
760 const char *secstrings,
761 unsigned int nsecs, struct module *mod)
763 Elf_Sym *sym = (void *)sechdrs[symindex].sh_addr;
764 unsigned long secbase, bssbase = 0;
765 unsigned int i, n = sechdrs[symindex].sh_size / sizeof(Elf_Sym);
766 int size;
768 /* find the .bss section for COMMON symbols */
769 for (i = 0; i < nsecs; i++) {
770 if (strncmp(secstrings + sechdrs[i].sh_name, ".bss", 4) == 0) {
771 bssbase = sechdrs[i].sh_addr;
772 break;
776 for (i = 1; i < n; i++) {
777 switch (sym[i].st_shndx) {
778 case SHN_COMMON:
779 /* Allocate space for the symbol in the .bss section.
780 st_value is currently size.
781 We want it to have the address of the symbol. */
783 size = sym[i].st_value;
784 sym[i].st_value = bssbase;
786 bssbase += size;
787 break;
789 case SHN_ABS:
790 /* Don't need to do anything */
791 break;
793 case SHN_UNDEF:
794 /* ret = -ENOENT; */
795 break;
797 case SHN_MIPS_SCOMMON:
798 printk(KERN_DEBUG "simplify_symbols: ignoring SHN_MIPS_SCOMMON "
799 "symbol <%s> st_shndx %d\n", strtab + sym[i].st_name,
800 sym[i].st_shndx);
801 // .sbss section
802 break;
804 default:
805 secbase = sechdrs[sym[i].st_shndx].sh_addr;
807 if (strncmp(strtab + sym[i].st_name, "_gp", 3) == 0) {
808 save_gp_address(secbase, sym[i].st_value);
811 sym[i].st_value += secbase;
812 break;
817 #ifdef DEBUG_ELFLOADER
818 static void dump_elfsymbols(Elf_Shdr * sechdrs, unsigned int symindex,
819 const char *strtab, struct module *mod)
821 Elf_Sym *sym = (void *)sechdrs[symindex].sh_addr;
822 unsigned int i, n = sechdrs[symindex].sh_size / sizeof(Elf_Sym);
824 printk(KERN_DEBUG "dump_elfsymbols: n %d\n", n);
825 for (i = 1; i < n; i++) {
826 printk(KERN_DEBUG " i %d name <%s> 0x%x\n", i,
827 strtab + sym[i].st_name, sym[i].st_value);
830 #endif
832 /* We are prepared so configure and start the VPE... */
833 static int vpe_run(struct vpe * v)
835 unsigned long flags, val, dmt_flag;
836 struct vpe_notifications *n;
837 unsigned int vpeflags;
838 struct tc *t;
840 /* check we are the Master VPE */
841 local_irq_save(flags);
842 val = read_c0_vpeconf0();
843 if (!(val & VPECONF0_MVP)) {
844 printk(KERN_WARNING
845 "VPE loader: only Master VPE's are allowed to configure MT\n");
846 local_irq_restore(flags);
848 return -1;
851 dmt_flag = dmt();
852 vpeflags = dvpe();
854 if (!list_empty(&v->tc)) {
855 if ((t = list_entry(v->tc.next, struct tc, tc)) == NULL) {
856 evpe(vpeflags);
857 emt(dmt_flag);
858 local_irq_restore(flags);
860 printk(KERN_WARNING
861 "VPE loader: TC %d is already in use.\n",
862 t->index);
863 return -ENOEXEC;
865 } else {
866 evpe(vpeflags);
867 emt(dmt_flag);
868 local_irq_restore(flags);
870 printk(KERN_WARNING
871 "VPE loader: No TC's associated with VPE %d\n",
872 v->minor);
874 return -ENOEXEC;
877 /* Put MVPE's into 'configuration state' */
878 set_c0_mvpcontrol(MVPCONTROL_VPC);
880 settc(t->index);
882 /* should check it is halted, and not activated */
883 if ((read_tc_c0_tcstatus() & TCSTATUS_A) || !(read_tc_c0_tchalt() & TCHALT_H)) {
884 evpe(vpeflags);
885 emt(dmt_flag);
886 local_irq_restore(flags);
888 printk(KERN_WARNING "VPE loader: TC %d is already active!\n",
889 t->index);
891 return -ENOEXEC;
894 /* Write the address we want it to start running from in the TCPC register. */
895 write_tc_c0_tcrestart((unsigned long)v->__start);
896 write_tc_c0_tccontext((unsigned long)0);
899 * Mark the TC as activated, not interrupt exempt and not dynamically
900 * allocatable
902 val = read_tc_c0_tcstatus();
903 val = (val & ~(TCSTATUS_DA | TCSTATUS_IXMT)) | TCSTATUS_A;
904 write_tc_c0_tcstatus(val);
906 write_tc_c0_tchalt(read_tc_c0_tchalt() & ~TCHALT_H);
909 * The sde-kit passes 'memsize' to __start in $a3, so set something
910 * here... Or set $a3 to zero and define DFLT_STACK_SIZE and
911 * DFLT_HEAP_SIZE when you compile your program
913 mttgpr(6, v->ntcs);
914 mttgpr(7, physical_memsize);
916 /* set up VPE1 */
918 * bind the TC to VPE 1 as late as possible so we only have the final
919 * VPE registers to set up, and so an EJTAG probe can trigger on it
921 write_tc_c0_tcbind((read_tc_c0_tcbind() & ~TCBIND_CURVPE) | 1);
923 write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() & ~(VPECONF0_VPA));
925 back_to_back_c0_hazard();
927 /* Set up the XTC bit in vpeconf0 to point at our tc */
928 write_vpe_c0_vpeconf0( (read_vpe_c0_vpeconf0() & ~(VPECONF0_XTC))
929 | (t->index << VPECONF0_XTC_SHIFT));
931 back_to_back_c0_hazard();
933 /* enable this VPE */
934 write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() | VPECONF0_VPA);
936 /* clear out any left overs from a previous program */
937 write_vpe_c0_status(0);
938 write_vpe_c0_cause(0);
940 /* take system out of configuration state */
941 clear_c0_mvpcontrol(MVPCONTROL_VPC);
944 * SMTC/SMVP kernels manage VPE enable independently,
945 * but uniprocessor kernels need to turn it on, even
946 * if that wasn't the pre-dvpe() state.
948 #ifdef CONFIG_SMP
949 evpe(vpeflags);
950 #else
951 evpe(EVPE_ENABLE);
952 #endif
953 emt(dmt_flag);
954 local_irq_restore(flags);
956 list_for_each_entry(n, &v->notify, list)
957 n->start(minor);
959 return 0;
962 static int find_vpe_symbols(struct vpe * v, Elf_Shdr * sechdrs,
963 unsigned int symindex, const char *strtab,
964 struct module *mod)
966 Elf_Sym *sym = (void *)sechdrs[symindex].sh_addr;
967 unsigned int i, j, n = sechdrs[symindex].sh_size / sizeof(Elf_Sym);
969 for (i = 1; i < n; i++) {
970 if (strcmp(strtab + sym[i].st_name, "__start") == 0)
971 v->__start = sym[i].st_value;
973 if (strcmp(strtab + sym[i].st_name, "vpe_shared") == 0)
974 v->shared_ptr = (void *)sym[i].st_value;
976 if (strcmp(strtab + sym[i].st_name, "_vpe_shared_areas") == 0) {
977 struct vpe_shared_area *psa
978 = (struct vpe_shared_area *)sym[i].st_value;
979 struct vpe_shared_area *tpsa;
980 v->shared_areas = psa;
981 printk(KERN_INFO"_vpe_shared_areas found, 0x%x\n",
982 (unsigned int)v->shared_areas);
984 * Copy any "published" areas to the descriptor
986 for (j = 0; j < N_PUB_AREAS; j++) {
987 if (published_vpe_area[j].type != VPE_SHARED_RESERVED) {
988 tpsa = psa;
989 while (tpsa->type != VPE_SHARED_NULL) {
990 if ((tpsa->type == VPE_SHARED_RESERVED)
991 || (tpsa->type == published_vpe_area[j].type)) {
992 tpsa->type = published_vpe_area[j].type;
993 tpsa->addr = published_vpe_area[j].addr;
994 break;
996 tpsa++;
1004 if ( (v->__start == 0) || (v->shared_ptr == NULL))
1005 return -1;
1007 return 0;
1011 * Allocates a VPE with some program code space(the load address), copies the
1012 * contents of the program (p)buffer performing relocatations/etc, free's it
1013 * when finished.
1015 static int vpe_elfload(struct vpe * v)
1017 Elf_Ehdr *hdr;
1018 Elf_Shdr *sechdrs;
1019 long err = 0;
1020 char *secstrings, *strtab = NULL;
1021 unsigned int len, i, symindex = 0, strindex = 0, relocate = 0;
1022 struct module mod; // so we can re-use the relocations code
1024 memset(&mod, 0, sizeof(struct module));
1025 strcpy(mod.name, "VPE loader");
1026 hdr = v->l_ehdr;
1027 len = v->pbsize;
1029 /* Sanity checks against insmoding binaries or wrong arch,
1030 weird elf version */
1031 if ((hdr->e_type != ET_REL && hdr->e_type != ET_EXEC)
1032 || !elf_check_arch(hdr)
1033 || hdr->e_shentsize != sizeof(*sechdrs)) {
1034 printk(KERN_WARNING
1035 "VPE loader: program wrong arch or weird elf version\n");
1037 return -ENOEXEC;
1040 if (hdr->e_type == ET_REL)
1041 relocate = 1;
1043 if (len < v->l_phlen + v->l_shlen) {
1044 printk(KERN_ERR "VPE loader: Headers exceed %u bytes\n", len);
1046 return -ENOEXEC;
1049 /* Convenience variables */
1050 sechdrs = (void *)hdr + hdr->e_shoff;
1051 secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
1052 sechdrs[0].sh_addr = 0;
1054 /* And these should exist, but gcc whinges if we don't init them */
1055 symindex = strindex = 0;
1057 if (relocate) {
1058 for (i = 1; i < hdr->e_shnum; i++) {
1059 if (sechdrs[i].sh_type != SHT_NOBITS
1060 && len < sechdrs[i].sh_offset + sechdrs[i].sh_size) {
1061 printk(KERN_ERR "VPE program length %u truncated\n",
1062 len);
1063 return -ENOEXEC;
1066 /* Mark all sections sh_addr with their address in the
1067 temporary image. */
1068 sechdrs[i].sh_addr = (size_t) hdr + sechdrs[i].sh_offset;
1070 /* Internal symbols and strings. */
1071 if (sechdrs[i].sh_type == SHT_SYMTAB) {
1072 symindex = i;
1073 strindex = sechdrs[i].sh_link;
1074 strtab = (char *)hdr + sechdrs[strindex].sh_offset;
1077 layout_sections(&mod, hdr, sechdrs, secstrings);
1079 * Non-relocatable loads should have already done their
1080 * allocates, based on program header table.
1084 memset(v->load_addr, 0, mod.core_size);
1085 if (!v->load_addr)
1086 return -ENOMEM;
1088 pr_info("VPE loader: loading to %p\n", v->load_addr);
1090 if (relocate) {
1091 for (i = 0; i < hdr->e_shnum; i++) {
1092 void *dest;
1094 if (!(sechdrs[i].sh_flags & SHF_ALLOC))
1095 continue;
1097 dest = v->load_addr + sechdrs[i].sh_entsize;
1099 if (sechdrs[i].sh_type != SHT_NOBITS)
1100 memcpy(dest, (void *)sechdrs[i].sh_addr,
1101 sechdrs[i].sh_size);
1102 /* Update sh_addr to point to copy in image. */
1103 sechdrs[i].sh_addr = (unsigned long)dest;
1105 printk(KERN_DEBUG " section sh_name %s sh_addr 0x%x\n",
1106 secstrings + sechdrs[i].sh_name, sechdrs[i].sh_addr);
1109 /* Fix up syms, so that st_value is a pointer to location. */
1110 simplify_symbols(sechdrs, symindex, strtab, secstrings,
1111 hdr->e_shnum, &mod);
1113 /* Now do relocations. */
1114 for (i = 1; i < hdr->e_shnum; i++) {
1115 const char *strtab = (char *)sechdrs[strindex].sh_addr;
1116 unsigned int info = sechdrs[i].sh_info;
1118 /* Not a valid relocation section? */
1119 if (info >= hdr->e_shnum)
1120 continue;
1122 /* Don't bother with non-allocated sections */
1123 if (!(sechdrs[info].sh_flags & SHF_ALLOC))
1124 continue;
1126 if (sechdrs[i].sh_type == SHT_REL)
1127 err = apply_relocations(sechdrs, strtab, symindex, i,
1128 &mod);
1129 else if (sechdrs[i].sh_type == SHT_RELA)
1130 err = apply_relocate_add(sechdrs, strtab, symindex, i,
1131 &mod);
1132 if (err < 0)
1133 return err;
1136 } else {
1139 * Program image is already in memory.
1141 for (i = 0; i < hdr->e_shnum; i++) {
1142 /* Internal symbols and strings. */
1143 if (sechdrs[i].sh_type == SHT_SYMTAB) {
1144 symindex = i;
1145 strindex = sechdrs[i].sh_link;
1146 strtab = (char *)hdr + sechdrs[strindex].sh_offset;
1148 /* mark the symtab's address for when we try to find the
1149 magic symbols */
1150 sechdrs[i].sh_addr = (size_t) hdr + sechdrs[i].sh_offset;
1155 /* make sure it's physically written out */
1156 flush_icache_range((unsigned long)v->load_addr,
1157 (unsigned long)v->load_addr + v->copied);
1159 if ((find_vpe_symbols(v, sechdrs, symindex, strtab, &mod)) < 0) {
1160 if (v->__start == 0) {
1161 printk(KERN_WARNING "VPE loader: program does not contain "
1162 "a __start symbol\n");
1163 return -ENOEXEC;
1166 if (v->shared_ptr == NULL)
1167 printk(KERN_WARNING "VPE loader: "
1168 "program does not contain vpe_shared symbol.\n"
1169 " Unable to use AMVP (AP/SP) facilities.\n");
1171 pr_info("APRP VPE loader: elf loaded\n");
1173 return 0;
1176 static void cleanup_tc(struct tc *tc)
1178 unsigned long flags;
1179 unsigned int mtflags, vpflags;
1180 int tmp;
1182 local_irq_save(flags);
1183 mtflags = dmt();
1184 vpflags = dvpe();
1185 /* Put MVPE's into 'configuration state' */
1186 set_c0_mvpcontrol(MVPCONTROL_VPC);
1188 settc(tc->index);
1189 tmp = read_tc_c0_tcstatus();
1191 /* mark not allocated and not dynamically allocatable */
1192 tmp &= ~(TCSTATUS_A | TCSTATUS_DA);
1193 tmp |= TCSTATUS_IXMT; /* interrupt exempt */
1194 write_tc_c0_tcstatus(tmp);
1196 write_tc_c0_tchalt(TCHALT_H);
1197 mips_ihb();
1199 /* bind it to anything other than VPE1 */
1200 // write_tc_c0_tcbind(read_tc_c0_tcbind() & ~TCBIND_CURVPE); // | TCBIND_CURVPE
1202 clear_c0_mvpcontrol(MVPCONTROL_VPC);
1203 evpe(vpflags);
1204 emt(mtflags);
1205 local_irq_restore(flags);
1208 static int getcwd(char *buff, int size)
1210 mm_segment_t old_fs;
1211 int ret;
1213 old_fs = get_fs();
1214 set_fs(KERNEL_DS);
1216 ret = sys_getcwd(buff, size);
1218 set_fs(old_fs);
1220 return ret;
1223 /* checks VPE is unused and gets ready to load program */
1224 static int vpe_open(struct inode *inode, struct file *filp)
1226 enum vpe_state state;
1227 struct vpe_notifications *not;
1228 struct vpe *v;
1229 int ret;
1231 if (minor != iminor(inode)) {
1232 /* assume only 1 device at the moment. */
1233 pr_warning("VPE loader: only vpe1 is supported\n");
1235 return -ENODEV;
1238 * This treats the tclimit command line configuration input
1239 * as a minor device indication, which is probably unwholesome.
1242 if ((v = get_vpe(tclimit)) == NULL) {
1243 pr_warning("VPE loader: unable to get vpe\n");
1245 return -ENODEV;
1248 state = xchg(&v->state, VPE_STATE_INUSE);
1249 if (state != VPE_STATE_UNUSED) {
1250 printk(KERN_DEBUG "VPE loader: tc in use dumping regs\n");
1252 list_for_each_entry(not, &v->notify, list) {
1253 not->stop(tclimit);
1256 release_progmem(v->load_addr);
1257 kfree(v->l_phsort);
1258 cleanup_tc(get_tc(tclimit));
1261 /* this of-course trashes what was there before... */
1262 v->pbuffer = vmalloc(P_SIZE);
1263 v->load_addr = NULL;
1264 v->copied = 0;
1265 v->offset = 0;
1266 v->l_state = LOAD_STATE_EHDR;
1267 v->l_ehdr = NULL;
1268 v->l_phdr = NULL;
1269 v->l_phsort = NULL;
1270 v->l_shdr = NULL;
1272 v->uid = filp->f_cred->fsuid;
1273 v->gid = filp->f_cred->fsgid;
1275 #ifdef CONFIG_MIPS_APSP_KSPD
1276 /* get kspd to tell us when a syscall_exit happens */
1277 if (!kspd_events_reqd) {
1278 kspd_notify(&kspd_events);
1279 kspd_events_reqd++;
1281 #endif
1283 v->cwd[0] = 0;
1284 ret = getcwd(v->cwd, VPE_PATH_MAX);
1285 if (ret < 0)
1286 printk(KERN_WARNING "VPE loader: open, getcwd returned %d\n", ret);
1288 v->shared_ptr = NULL;
1289 v->shared_areas = NULL;
1290 v->__start = 0;
1292 return 0;
1295 static int vpe_release(struct inode *inode, struct file *filp)
1297 struct vpe *v;
1298 int ret = 0;
1300 v = get_vpe(tclimit);
1301 if (v == NULL)
1302 return -ENODEV;
1304 * If image load had no errors, massage program/section tables
1305 * to reflect movement of program/section data into VPE program
1306 * memory.
1308 if (v->l_state != LOAD_STATE_DONE) {
1309 printk(KERN_WARNING "VPE Release after incomplete load\n");
1310 printk(KERN_DEBUG "Used vfree to free memory at "
1311 "%x after failed load attempt\n",
1312 (unsigned int)v->pbuffer);
1313 if (v->pbuffer != NULL)
1314 vfree(v->pbuffer);
1315 return -ENOEXEC;
1318 if (vpe_elfload(v) >= 0) {
1319 vpe_run(v);
1320 } else {
1321 printk(KERN_WARNING "VPE loader: ELF load failed.\n");
1322 printk(KERN_DEBUG "Used vfree to free memory at "
1323 "%x after failed load attempt\n",
1324 (unsigned int)v->pbuffer);
1325 if (v->pbuffer != NULL)
1326 vfree(v->pbuffer);
1327 ret = -ENOEXEC;
1331 /* It's good to be able to run the SP and if it chokes have a look at
1332 the /dev/rt?. But if we reset the pointer to the shared struct we
1333 lose what has happened. So perhaps if garbage is sent to the vpe
1334 device, use it as a trigger for the reset. Hopefully a nice
1335 executable will be along shortly. */
1336 if (ret < 0)
1337 v->shared_ptr = NULL;
1339 // cleanup any temp buffers
1340 if (v->pbuffer) {
1341 printk(KERN_DEBUG "Used vfree to free memory at %x\n",
1342 (unsigned int)v->pbuffer);
1343 vfree(v->pbuffer);
1345 v->pbsize = 0;
1346 return ret;
1350 * A sort of insertion sort to generate list of program header indices
1351 * in order of their file offsets.
1354 static void indexort(struct elf_phdr *phdr, int nph, int *index)
1356 int i, j, t;
1357 unsigned int toff;
1359 /* Create initial mapping */
1360 for (i = 0; i < nph; i++)
1361 index[i] = i;
1362 /* Do the indexed insert sort */
1363 for (i = 1; i < nph; i++) {
1364 j = i;
1365 t = index[j];
1366 toff = phdr[t].p_offset;
1367 while ((j > 0) && (phdr[index[j-1]].p_offset > toff)) {
1368 index[j] = index[j-1];
1369 j--;
1371 index[j] = t;
1377 * This function has to convert the ELF file image being sequentially
1378 * streamed to the pseudo-device into the binary image, symbol, and
1379 * string information, which the ELF format allows to be in some degree
1380 * of disorder.
1382 * The ELF header and, if present, program header table, are copied into
1383 * a temporary buffer. Loadable program segments, if present, are copied
1384 * into the RP program memory at the addresses specified by the program
1385 * header table.
1387 * Sections not specified by the program header table are loaded into
1388 * memory following the program segments if they are "allocated", or
1389 * into the temporary buffer if they are not. The section header
1390 * table is loaded into the temporary buffer.???
1392 #define CURPHDR (v->l_phdr[v->l_phsort[v->l_cur_seg]])
1393 static ssize_t vpe_write(struct file *file, const char __user * buffer,
1394 size_t count, loff_t * ppos)
1396 size_t ret = count;
1397 struct vpe *v;
1398 int tocopy, uncopied;
1399 int i;
1400 unsigned int progmemlen;
1402 if (iminor(file->f_path.dentry->d_inode) != minor)
1403 return -ENODEV;
1405 v = get_vpe(tclimit);
1406 if (v == NULL)
1407 return -ENODEV;
1409 if (v->pbuffer == NULL) {
1410 printk(KERN_ERR "VPE loader: no buffer for program\n");
1411 return -ENOMEM;
1414 while (count) {
1415 switch (v->l_state) {
1416 case LOAD_STATE_EHDR:
1417 /* Loading ELF Header into scratch buffer */
1418 tocopy = min((unsigned long)count,
1419 sizeof(Elf_Ehdr) - v->offset);
1420 uncopied = copy_from_user(v->pbuffer + v->copied,
1421 buffer, tocopy);
1422 count -= tocopy - uncopied;
1423 v->copied += tocopy - uncopied;
1424 v->offset += tocopy - uncopied;
1425 buffer += tocopy - uncopied;
1426 if (v->copied == sizeof(Elf_Ehdr)) {
1427 v->l_ehdr = (Elf_Ehdr *)v->pbuffer;
1428 if (memcmp(v->l_ehdr->e_ident, ELFMAG, 4) != 0) {
1429 printk(KERN_WARNING "VPE loader: %s\n",
1430 "non-ELF file image");
1431 ret = -ENOEXEC;
1432 v->l_state = LOAD_STATE_ERROR;
1433 break;
1435 if (v->l_ehdr->e_phoff != 0) {
1436 v->l_phdr = (struct elf_phdr *)
1437 (v->pbuffer + v->l_ehdr->e_phoff);
1438 v->l_phlen = v->l_ehdr->e_phentsize
1439 * v->l_ehdr->e_phnum;
1440 /* Check against buffer overflow */
1441 if ((v->copied + v->l_phlen) > v->pbsize) {
1442 printk(KERN_WARNING
1443 "VPE loader: elf program header table size too big\n");
1444 v->l_state = LOAD_STATE_ERROR;
1445 return -ENOMEM;
1447 v->l_state = LOAD_STATE_PHDR;
1449 * Program headers generally indicate
1450 * linked executable with possibly
1451 * valid entry point.
1453 v->__start = v->l_ehdr->e_entry;
1454 } else if (v->l_ehdr->e_shoff != 0) {
1456 * No program headers, but a section
1457 * header table. A relocatable binary.
1458 * We need to load the works into the
1459 * kernel temp buffer to compute the
1460 * RP program image. That limits our
1461 * binary size, but at least we're no
1462 * worse off than the original APRP
1463 * prototype.
1465 v->l_shlen = v->l_ehdr->e_shentsize
1466 * v->l_ehdr->e_shnum;
1467 if ((v->l_ehdr->e_shoff + v->l_shlen
1468 - v->offset) > v->pbsize) {
1469 printk(KERN_WARNING
1470 "VPE loader: elf sections/section table too big.\n");
1471 v->l_state = LOAD_STATE_ERROR;
1472 return -ENOMEM;
1474 v->l_state = LOAD_STATE_SHDR;
1475 } else {
1477 * If neither program nor section tables,
1478 * we don't know what to do.
1480 v->l_state = LOAD_STATE_ERROR;
1481 return -ENOEXEC;
1484 break;
1485 case LOAD_STATE_PHDR:
1486 /* Loading Program Headers into scratch */
1487 tocopy = min((unsigned long)count,
1488 v->l_ehdr->e_phoff + v->l_phlen - v->copied);
1489 uncopied = copy_from_user(v->pbuffer + v->copied,
1490 buffer, tocopy);
1491 count -= tocopy - uncopied;
1492 v->copied += tocopy - uncopied;
1493 v->offset += tocopy - uncopied;
1494 buffer += tocopy - uncopied;
1496 if (v->copied == v->l_ehdr->e_phoff + v->l_phlen) {
1498 * It's legal for the program headers to be
1499 * out of order with respect to the file layout.
1500 * Generate a list of indices, sorted by file
1501 * offset.
1503 v->l_phsort = kmalloc(v->l_ehdr->e_phnum
1504 * sizeof(int), GFP_KERNEL);
1505 printk(KERN_DEBUG
1506 "Used kmalloc to allocate %d bytes of memory at %x\n",
1507 v->l_ehdr->e_phnum*sizeof(int),
1508 (unsigned int)v->l_phsort);
1509 if (!v->l_phsort)
1510 return -ENOMEM; /* Preposterous, but... */
1511 indexort(v->l_phdr, v->l_ehdr->e_phnum,
1512 v->l_phsort);
1514 v->l_progminad = (unsigned int)-1;
1515 v->l_progmaxad = 0;
1516 progmemlen = 0;
1517 for (i = 0; i < v->l_ehdr->e_phnum; i++) {
1518 if (v->l_phdr[v->l_phsort[i]].p_type
1519 == PT_LOAD) {
1520 /* Unstripped .reginfo sections are bad */
1521 if (v->l_phdr[v->l_phsort[i]].p_vaddr
1522 < __UA_LIMIT) {
1523 printk(KERN_WARNING "%s%s%s\n",
1524 "VPE loader: ",
1525 "User-mode p_vaddr, ",
1526 "skipping program segment,");
1527 printk(KERN_WARNING "%s%s%s\n",
1528 "VPE loader: ",
1529 "strip .reginfo from binary ",
1530 "if necessary.");
1531 continue;
1533 if (v->l_phdr[v->l_phsort[i]].p_vaddr
1534 < v->l_progminad)
1535 v->l_progminad =
1536 v->l_phdr[v->l_phsort[i]].p_vaddr;
1537 if ((v->l_phdr[v->l_phsort[i]].p_vaddr
1538 + v->l_phdr[v->l_phsort[i]].p_memsz)
1539 > v->l_progmaxad)
1540 v->l_progmaxad =
1541 v->l_phdr[v->l_phsort[i]].p_vaddr +
1542 v->l_phdr[v->l_phsort[i]].p_memsz;
1545 printk(KERN_INFO "APRP RP program 0x%x to 0x%x\n",
1546 v->l_progminad, v->l_progmaxad);
1548 * Do a simple sanity check of the memory being
1549 * allocated. Abort if greater than an arbitrary
1550 * value of 32MB
1552 if (v->l_progmaxad - v->l_progminad >
1553 32*1024*1024) {
1554 printk(KERN_WARNING
1555 "RP program failed to allocate %d kbytes - limit is 32,768 KB\n",
1556 (v->l_progmaxad - v->l_progminad)/1024);
1557 return -ENOMEM;
1560 v->load_addr = alloc_progmem((void *)v->l_progminad,
1561 v->l_progmaxad - v->l_progminad);
1562 if (!v->load_addr)
1563 return -ENOMEM;
1564 if ((unsigned int)v->load_addr
1565 > v->l_progminad) {
1566 release_progmem(v->load_addr);
1567 return -ENOMEM;
1569 /* Find first segment with loadable content */
1570 for (i = 0; i < v->l_ehdr->e_phnum; i++) {
1571 if (v->l_phdr[v->l_phsort[i]].p_type
1572 == PT_LOAD) {
1573 if (v->l_phdr[v->l_phsort[i]].p_vaddr
1574 < __UA_LIMIT) {
1575 /* Skip userspace segments */
1576 continue;
1578 v->l_cur_seg = i;
1579 break;
1582 if (i == v->l_ehdr->e_phnum) {
1583 /* No loadable program segment? Bogus file. */
1584 printk(KERN_WARNING "Bad ELF file for APRP\n");
1585 return -ENOEXEC;
1587 v->l_segoff = 0;
1588 v->l_state = LOAD_STATE_PIMAGE;
1590 break;
1591 case LOAD_STATE_PIMAGE:
1593 * Skip through input stream until
1594 * first program segment. Would be
1595 * better to have loaded up to here
1596 * into the temp buffer, but for now
1597 * we simply rule out "interesting"
1598 * sections prior to the last program
1599 * segment in an executable file.
1601 if (v->offset < CURPHDR.p_offset) {
1602 uncopied = CURPHDR.p_offset - v->offset;
1603 if (uncopied > count)
1604 uncopied = count;
1605 count -= uncopied;
1606 buffer += uncopied;
1607 v->offset += uncopied;
1608 /* Go back through the "while" */
1609 break;
1612 * Having dispensed with any unlikely fluff,
1613 * copy from user I/O buffer to program segment.
1615 tocopy = min(count, CURPHDR.p_filesz - v->l_segoff);
1617 /* Loading image into RP memory */
1618 uncopied = copy_from_user((char *)CURPHDR.p_vaddr
1619 + v->l_segoff, buffer, tocopy);
1620 count -= tocopy - uncopied;
1621 v->offset += tocopy - uncopied;
1622 v->l_segoff += tocopy - uncopied;
1623 buffer += tocopy - uncopied;
1624 if (v->l_segoff >= CURPHDR.p_filesz) {
1625 /* Finished current segment load */
1626 /* Zero out non-file-sourced image */
1627 uncopied = CURPHDR.p_memsz - CURPHDR.p_filesz;
1628 if (uncopied > 0)
1629 memset((char *)CURPHDR.p_vaddr + v->l_segoff,
1630 0, uncopied);
1631 /* Advance to next segment */
1632 for (i = v->l_cur_seg + 1;
1633 i < v->l_ehdr->e_phnum; i++) {
1634 if (v->l_phdr[v->l_phsort[i]].p_type
1635 == PT_LOAD) {
1636 if (v->l_phdr[v->l_phsort[i]].p_vaddr
1637 < __UA_LIMIT) {
1638 /* Skip userspace segments */
1639 continue;
1641 v->l_cur_seg = i;
1642 break;
1645 /* If none left, prepare to load section headers */
1646 if (i == v->l_ehdr->e_phnum) {
1647 if (v->l_ehdr->e_shoff != 0) {
1648 /* Copy to where we left off in temp buffer */
1649 v->l_shlen = v->l_ehdr->e_shentsize
1650 * v->l_ehdr->e_shnum;
1651 if ((v->l_ehdr->e_shoff + v->l_shlen
1652 - v->offset) > v->pbsize) {
1653 printk(KERN_WARNING
1654 "VPE loader: elf sections/section table too big\n");
1655 v->l_state = LOAD_STATE_ERROR;
1656 return -ENOMEM;
1658 v->l_state = LOAD_STATE_SHDR;
1659 break;
1661 } else {
1662 /* reset offset for new program segment */
1663 v->l_segoff = 0;
1666 break;
1667 case LOAD_STATE_SHDR:
1669 * Read stream into private buffer up
1670 * through and including the section header
1671 * table.
1674 tocopy = min((unsigned long)count,
1675 v->l_ehdr->e_shoff + v->l_shlen - v->offset);
1676 if (tocopy) {
1677 uncopied = copy_from_user(v->pbuffer + v->copied,
1678 buffer, tocopy);
1679 count -= tocopy - uncopied;
1680 v->copied += tocopy - uncopied;
1681 v->offset += tocopy - uncopied;
1682 buffer += tocopy - uncopied;
1684 /* Finished? */
1685 if (v->offset == v->l_ehdr->e_shoff + v->l_shlen) {
1686 unsigned int offset_delta = v->offset - v->copied;
1688 v->l_shdr = (Elf_Shdr *)(v->pbuffer
1689 + v->l_ehdr->e_shoff - offset_delta);
1691 * Check for sections after the section table,
1692 * which for gcc MIPS binaries includes
1693 * the symbol table. Do any other processing
1694 * that requires value within stream, and
1695 * normalize offsets to be relative to
1696 * the header-only layout of temp buffer.
1699 /* Assume no trailer until we detect one */
1700 v->l_trailer = 0;
1701 v->l_state = LOAD_STATE_DONE;
1702 for (i = 0; i < v->l_ehdr->e_shnum; i++) {
1703 if (v->l_shdr[i].sh_offset
1704 > v->l_ehdr->e_shoff) {
1705 v->l_state = LOAD_STATE_TRAILER;
1706 /* Track trailing data length */
1707 if (v->l_trailer
1708 < (v->l_shdr[i].sh_offset
1709 + v->l_shdr[i].sh_size)
1710 - (v->l_ehdr->e_shoff
1711 + v->l_shlen))
1712 v->l_trailer =
1713 (v->l_shdr[i].sh_offset
1714 + v->l_shdr[i].sh_size)
1715 - (v->l_ehdr->e_shoff
1716 + v->l_shlen);
1718 /* Adjust section offset if necessary */
1719 v->l_shdr[i].sh_offset -= offset_delta;
1721 if ((v->copied + v->l_trailer) > v->pbsize) {
1722 printk(KERN_WARNING
1723 "VPE loader: elf size too big. Perhaps strip uneeded symbols\n");
1724 v->l_state = LOAD_STATE_ERROR;
1725 return -ENOMEM;
1728 /* Fix up offsets in ELF header */
1729 v->l_ehdr->e_shoff = (unsigned int)v->l_shdr
1730 - (unsigned int)v->pbuffer;
1732 break;
1733 case LOAD_STATE_TRAILER:
1735 * Symbol and string tables follow section headers
1736 * in gcc binaries for MIPS. Copy into temp buffer.
1738 if (v->l_trailer) {
1739 tocopy = min(count, v->l_trailer);
1740 uncopied = copy_from_user(v->pbuffer + v->copied,
1741 buffer, tocopy);
1742 count -= tocopy - uncopied;
1743 v->l_trailer -= tocopy - uncopied;
1744 v->copied += tocopy - uncopied;
1745 v->offset += tocopy - uncopied;
1746 buffer += tocopy - uncopied;
1748 if (!v->l_trailer)
1749 v->l_state = LOAD_STATE_DONE;
1750 break;
1751 case LOAD_STATE_DONE:
1752 if (count)
1753 count = 0;
1754 break;
1755 case LOAD_STATE_ERROR:
1756 default:
1757 return -EINVAL;
1760 return ret;
1763 static const struct file_operations vpe_fops = {
1764 .owner = THIS_MODULE,
1765 .open = vpe_open,
1766 .release = vpe_release,
1767 .write = vpe_write
1770 /* module wrapper entry points */
1771 /* give me a vpe */
1772 vpe_handle vpe_alloc(void)
1774 int i;
1775 struct vpe *v;
1777 /* find a vpe */
1778 for (i = 1; i < MAX_VPES; i++) {
1779 if ((v = get_vpe(i)) != NULL) {
1780 v->state = VPE_STATE_INUSE;
1781 return v;
1784 return NULL;
1787 EXPORT_SYMBOL(vpe_alloc);
1789 /* start running from here */
1790 int vpe_start(vpe_handle vpe, unsigned long start)
1792 struct vpe *v = vpe;
1794 /* Null start address means use value from ELF file */
1795 if (start)
1796 v->__start = start;
1797 return vpe_run(v);
1800 EXPORT_SYMBOL(vpe_start);
1802 /* halt it for now */
1803 int vpe_stop(vpe_handle vpe)
1805 struct vpe *v = vpe;
1806 struct tc *t;
1807 unsigned int evpe_flags;
1809 evpe_flags = dvpe();
1811 if ((t = list_entry(v->tc.next, struct tc, tc)) != NULL) {
1813 settc(t->index);
1814 write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() & ~VPECONF0_VPA);
1817 evpe(evpe_flags);
1819 return 0;
1822 EXPORT_SYMBOL(vpe_stop);
1824 /* I've done with it thank you */
1825 int vpe_free(vpe_handle vpe)
1827 struct vpe *v = vpe;
1828 struct tc *t;
1829 unsigned int evpe_flags;
1831 if ((t = list_entry(v->tc.next, struct tc, tc)) == NULL) {
1832 return -ENOEXEC;
1835 evpe_flags = dvpe();
1837 /* Put MVPE's into 'configuration state' */
1838 set_c0_mvpcontrol(MVPCONTROL_VPC);
1840 settc(t->index);
1841 write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() & ~VPECONF0_VPA);
1843 /* halt the TC */
1844 write_tc_c0_tchalt(TCHALT_H);
1845 mips_ihb();
1847 /* mark the TC unallocated */
1848 write_tc_c0_tcstatus(read_tc_c0_tcstatus() & ~TCSTATUS_A);
1850 v->state = VPE_STATE_UNUSED;
1852 clear_c0_mvpcontrol(MVPCONTROL_VPC);
1853 evpe(evpe_flags);
1855 return 0;
1858 EXPORT_SYMBOL(vpe_free);
1860 void *vpe_get_shared(int index)
1862 struct vpe *v;
1864 if ((v = get_vpe(index)) == NULL)
1865 return NULL;
1867 return v->shared_ptr;
1870 EXPORT_SYMBOL(vpe_get_shared);
1872 int vpe_getuid(int index)
1874 struct vpe *v;
1876 if ((v = get_vpe(index)) == NULL)
1877 return -1;
1879 return v->uid;
1882 EXPORT_SYMBOL(vpe_getuid);
1884 int vpe_getgid(int index)
1886 struct vpe *v;
1888 if ((v = get_vpe(index)) == NULL)
1889 return -1;
1891 return v->gid;
1894 EXPORT_SYMBOL(vpe_getgid);
1896 int vpe_notify(int index, struct vpe_notifications *notify)
1898 struct vpe *v;
1900 if ((v = get_vpe(index)) == NULL)
1901 return -1;
1903 list_add(&notify->list, &v->notify);
1904 return 0;
1907 EXPORT_SYMBOL(vpe_notify);
1909 char *vpe_getcwd(int index)
1911 struct vpe *v;
1913 if ((v = get_vpe(index)) == NULL)
1914 return NULL;
1916 return v->cwd;
1919 EXPORT_SYMBOL(vpe_getcwd);
1922 * RP applications may contain a _vpe_shared_area descriptor
1923 * array to allow for data sharing with Linux kernel functions
1924 * that's slightly more abstracted and extensible than the
1925 * fixed binding used by the rtlx support. Indeed, the rtlx
1926 * support should ideally be converted to use the generic
1927 * shared area descriptor scheme at some point.
1929 * mips_get_vpe_shared_area() can be used by AP kernel
1930 * modules to get an area pointer of a given type, if
1931 * it exists.
1933 * mips_publish_vpe_area() is used by AP kernel modules
1934 * to share kseg0 kernel memory with the RP. It maintains
1935 * a private table, so that publishing can be done before
1936 * the RP program is launched. Making this table dynamically
1937 * allocated and extensible would be good scalable OS design.
1938 * however, until there's more than one user of the mechanism,
1939 * it should be an acceptable simplification to allow a static
1940 * maximum of 4 published areas.
1943 void *mips_get_vpe_shared_area(int index, int type)
1945 struct vpe *v;
1946 struct vpe_shared_area *vsa;
1948 v = get_vpe(index);
1949 if (v == NULL)
1950 return NULL;
1952 if (v->shared_areas == NULL)
1953 return NULL;
1955 vsa = v->shared_areas;
1957 while (vsa->type != VPE_SHARED_NULL) {
1958 if (vsa->type == type)
1959 return vsa->addr;
1960 else
1961 vsa++;
1963 /* Fell through without finding type */
1965 return NULL;
1967 EXPORT_SYMBOL(mips_get_vpe_shared_area);
1969 int mips_publish_vpe_area(int type, void *ptr)
1971 int i;
1972 int retval = 0;
1973 struct vpe *v;
1974 unsigned long flags;
1975 unsigned int vpflags;
1977 printk(KERN_INFO "mips_publish_vpe_area(0x%x, 0x%x)\n", type, (int)ptr);
1978 if ((unsigned int)ptr >= KSEG2) {
1979 printk(KERN_ERR "VPE area pubish of invalid address 0x%x\n",
1980 (int)ptr);
1981 return 0;
1983 for (i = 0; i < N_PUB_AREAS; i++) {
1984 if (published_vpe_area[i].type == VPE_SHARED_RESERVED) {
1985 published_vpe_area[i].type = type;
1986 published_vpe_area[i].addr = ptr;
1987 retval = type;
1988 break;
1992 * If we've already got a VPE up and running, try to
1993 * update the shared descriptor with the new data.
1995 list_for_each_entry(v, &vpecontrol.vpe_list, list) {
1996 if (v->shared_areas != NULL) {
1997 local_irq_save(flags);
1998 vpflags = dvpe();
1999 for (i = 0; v->shared_areas[i].type != VPE_SHARED_NULL; i++) {
2000 if ((v->shared_areas[i].type == type)
2001 || (v->shared_areas[i].type == VPE_SHARED_RESERVED)) {
2002 v->shared_areas[i].type = type;
2003 v->shared_areas[i].addr = ptr;
2006 evpe(vpflags);
2007 local_irq_restore(flags);
2010 return retval;
2012 EXPORT_SYMBOL(mips_publish_vpe_area);
2014 #ifdef CONFIG_MIPS_APSP_KSPD
2015 static void kspd_sp_exit( int sp_id)
2017 cleanup_tc(get_tc(sp_id));
2019 #endif
2021 static ssize_t store_kill(struct device *dev, struct device_attribute *attr,
2022 const char *buf, size_t len)
2024 struct vpe *vpe = get_vpe(tclimit);
2025 struct vpe_notifications *not;
2027 list_for_each_entry(not, &vpe->notify, list) {
2028 not->stop(tclimit);
2031 release_progmem(vpe->load_addr);
2032 kfree(vpe->l_phsort);
2033 cleanup_tc(get_tc(tclimit));
2034 vpe_stop(vpe);
2035 vpe_free(vpe);
2037 return len;
2040 static ssize_t show_ntcs(struct device *cd, struct device_attribute *attr,
2041 char *buf)
2043 struct vpe *vpe = get_vpe(tclimit);
2045 return sprintf(buf, "%d\n", vpe->ntcs);
2048 static ssize_t store_ntcs(struct device *dev, struct device_attribute *attr,
2049 const char *buf, size_t len)
2051 struct vpe *vpe = get_vpe(tclimit);
2052 unsigned long new;
2053 char *endp;
2055 new = simple_strtoul(buf, &endp, 0);
2056 if (endp == buf)
2057 goto out_einval;
2059 if (new == 0 || new > (hw_tcs - tclimit))
2060 goto out_einval;
2062 vpe->ntcs = new;
2064 return len;
2066 out_einval:
2067 return -EINVAL;
2070 static struct device_attribute vpe_class_attributes[] = {
2071 __ATTR(kill, S_IWUSR, NULL, store_kill),
2072 __ATTR(ntcs, S_IRUGO | S_IWUSR, show_ntcs, store_ntcs),
2076 static void vpe_device_release(struct device *cd)
2078 printk(KERN_DEBUG "Using kfree to free vpe class device at %x\n",
2079 (unsigned int)cd);
2080 kfree(cd);
2083 struct class vpe_class = {
2084 .name = "vpe",
2085 .owner = THIS_MODULE,
2086 .dev_release = vpe_device_release,
2087 .dev_attrs = vpe_class_attributes,
2090 struct device vpe_device;
2092 static int __init vpe_module_init(void)
2094 unsigned int mtflags, vpflags;
2095 unsigned long flags, val;
2096 struct vpe *v = NULL;
2097 struct tc *t;
2098 int tc, err;
2100 if (!cpu_has_mipsmt) {
2101 printk("VPE loader: not a MIPS MT capable processor\n");
2102 return -ENODEV;
2105 if (vpelimit == 0) {
2106 #if defined(CONFIG_MIPS_MT_SMTC) || defined(MIPS_MT_SMP)
2107 printk(KERN_WARNING "No VPEs reserved for VPE loader.\n"
2108 "Pass maxvpes=<n> argument as kernel argument\n");
2109 return -ENODEV;
2110 #else
2111 vpelimit = 1;
2112 #endif
2115 if (tclimit == 0) {
2116 #if defined(CONFIG_MIPS_MT_SMTC) || defined(MIPS_MT_SMP)
2117 printk(KERN_WARNING "No TCs reserved for AP/SP, not "
2118 "initializing VPE loader.\nPass maxtcs=<n> argument as "
2119 "kernel argument\n");
2120 return -ENODEV;
2121 #else
2122 tclimit = 1;
2123 #endif
2126 major = register_chrdev(0, module_name, &vpe_fops);
2127 if (major < 0) {
2128 printk("VPE loader: unable to register character device\n");
2129 return major;
2132 err = class_register(&vpe_class);
2133 if (err) {
2134 printk(KERN_ERR "vpe_class registration failed\n");
2135 goto out_chrdev;
2137 xvpe_vector_set = 0;
2138 device_initialize(&vpe_device);
2139 vpe_device.class = &vpe_class,
2140 vpe_device.parent = NULL,
2141 dev_set_name(&vpe_device, "vpe1");
2142 vpe_device.devt = MKDEV(major, minor);
2143 err = device_add(&vpe_device);
2144 if (err) {
2145 printk(KERN_ERR "Adding vpe_device failed\n");
2146 goto out_class;
2149 local_irq_save(flags);
2150 mtflags = dmt();
2151 vpflags = dvpe();
2153 /* Put MVPE's into 'configuration state' */
2154 set_c0_mvpcontrol(MVPCONTROL_VPC);
2156 /* dump_mtregs(); */
2158 val = read_c0_mvpconf0();
2159 hw_tcs = (val & MVPCONF0_PTC) + 1;
2160 hw_vpes = ((val & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1;
2162 for (tc = tclimit; tc < hw_tcs; tc++) {
2164 * Must re-enable multithreading temporarily or in case we
2165 * reschedule send IPIs or similar we might hang.
2167 clear_c0_mvpcontrol(MVPCONTROL_VPC);
2168 evpe(vpflags);
2169 emt(mtflags);
2170 local_irq_restore(flags);
2171 t = alloc_tc(tc);
2172 if (!t) {
2173 err = -ENOMEM;
2174 goto out;
2177 local_irq_save(flags);
2178 mtflags = dmt();
2179 vpflags = dvpe();
2180 set_c0_mvpcontrol(MVPCONTROL_VPC);
2182 /* VPE's */
2183 if (tc < hw_tcs) {
2184 settc(tc);
2186 if ((v = alloc_vpe(tc)) == NULL) {
2187 printk(KERN_WARNING "VPE: unable to allocate VPE\n");
2189 goto out_reenable;
2192 v->ntcs = hw_tcs - tclimit;
2194 /* add the tc to the list of this vpe's tc's. */
2195 list_add(&t->tc, &v->tc);
2197 /* deactivate all but vpe0 */
2198 if (tc >= tclimit) {
2199 unsigned long tmp = read_vpe_c0_vpeconf0();
2201 tmp &= ~VPECONF0_VPA;
2203 /* master VPE */
2204 tmp |= VPECONF0_MVP;
2205 write_vpe_c0_vpeconf0(tmp);
2208 /* disable multi-threading with TC's */
2209 write_vpe_c0_vpecontrol(read_vpe_c0_vpecontrol() & ~VPECONTROL_TE);
2211 if (tc >= vpelimit) {
2213 * Set config to be the same as vpe0,
2214 * particularly kseg0 coherency alg
2216 write_vpe_c0_config(read_c0_config());
2220 /* TC's */
2221 t->pvpe = v; /* set the parent vpe */
2223 if (tc >= tclimit) {
2224 unsigned long tmp;
2226 settc(tc);
2228 /* Any TC that is bound to VPE0 gets left as is - in case
2229 we are running SMTC on VPE0. A TC that is bound to any
2230 other VPE gets bound to VPE0, ideally I'd like to make
2231 it homeless but it doesn't appear to let me bind a TC
2232 to a non-existent VPE. Which is perfectly reasonable.
2234 The (un)bound state is visible to an EJTAG probe so may
2235 notify GDB...
2238 if (((tmp = read_tc_c0_tcbind()) & TCBIND_CURVPE)) {
2239 /* tc is bound >vpe0 */
2240 write_tc_c0_tcbind(tmp & ~TCBIND_CURVPE);
2242 t->pvpe = get_vpe(0); /* set the parent vpe */
2245 /* halt the TC */
2246 write_tc_c0_tchalt(TCHALT_H);
2247 mips_ihb();
2249 tmp = read_tc_c0_tcstatus();
2251 /* mark not activated and not dynamically allocatable */
2252 tmp &= ~(TCSTATUS_A | TCSTATUS_DA);
2253 tmp |= TCSTATUS_IXMT; /* interrupt exempt */
2254 write_tc_c0_tcstatus(tmp);
2258 out_reenable:
2259 /* release config state */
2260 clear_c0_mvpcontrol(MVPCONTROL_VPC);
2262 evpe(vpflags);
2263 emt(mtflags);
2264 local_irq_restore(flags);
2266 #ifdef CONFIG_MIPS_APSP_KSPD
2267 kspd_events.kspd_sp_exit = kspd_sp_exit;
2268 #endif
2269 return 0;
2271 out_class:
2272 class_unregister(&vpe_class);
2273 out_chrdev:
2274 unregister_chrdev(major, module_name);
2276 out:
2277 return err;
2280 static void __exit vpe_module_exit(void)
2282 struct vpe *v, *n;
2284 device_del(&vpe_device);
2285 unregister_chrdev(major, module_name);
2287 /* No locking needed here */
2288 list_for_each_entry_safe(v, n, &vpecontrol.vpe_list, list) {
2289 if (v->state != VPE_STATE_UNUSED)
2290 release_vpe(v);
2294 module_init(vpe_module_init);
2295 module_exit(vpe_module_exit);
2296 MODULE_DESCRIPTION("MIPS VPE Loader");
2297 MODULE_AUTHOR("Elizabeth Oldham, MIPS Technologies, Inc.");
2298 MODULE_LICENSE("GPL");