2 * Copyright (C) 2004, 2005 MIPS Technologies, Inc. All rights reserved.
4 * This program is free software; you can distribute it and/or modify it
5 * under the terms of the GNU General Public License (Version 2) as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * You should have received a copy of the GNU General Public License along
14 * with this program; if not, write to the Free Software Foundation, Inc.,
15 * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
21 * Provides support for loading a MIPS SP program on VPE1.
22 * The SP enviroment is rather simple, no tlb's. It needs to be relocatable
23 * (or partially linked). You should initialise your stack in the startup
24 * code. This loader looks for the symbol __start and sets up
25 * execution to resume from there. The MIPS SDE kit contains suitable examples.
27 * To load and run, simply cat a SP 'program file' to /dev/vpe1.
28 * i.e cat spapp >/dev/vpe1.
30 #include <linux/kernel.h>
31 #include <linux/device.h>
32 #include <linux/module.h>
34 #include <linux/init.h>
35 #include <asm/uaccess.h>
36 #include <linux/slab.h>
37 #include <linux/list.h>
38 #include <linux/vmalloc.h>
39 #include <linux/elf.h>
40 #include <linux/seq_file.h>
41 #include <linux/syscalls.h>
42 #include <linux/moduleloader.h>
43 #include <linux/interrupt.h>
44 #include <linux/poll.h>
45 #include <linux/bootmem.h>
46 #include <asm/mipsregs.h>
47 #include <asm/mipsmtregs.h>
48 #include <asm/cacheflush.h>
49 #include <asm/atomic.h>
51 #include <asm/mips_mt.h>
52 #include <asm/processor.h>
53 #include <asm/system.h>
57 typedef void *vpe_handle
;
59 #ifndef ARCH_SHF_SMALL
60 #define ARCH_SHF_SMALL 0
63 /* If this is set, the section belongs in the init part of the module */
64 #define INIT_OFFSET_MASK (1UL << (BITS_PER_LONG-1))
67 * The number of TCs and VPEs physically available on the core
69 static int hw_tcs
, hw_vpes
;
70 static char module_name
[] = "vpe";
72 static const int minor
= 1; /* fixed for now */
74 #ifdef CONFIG_MIPS_APSP_KSPD
75 static struct kspd_notifications kspd_events
;
76 static int kspd_events_reqd
;
79 * Size of private kernel buffer for ELF headers and sections
81 #define P_SIZE (256 * 1024)
84 * Size of private kernel buffer for ELF headers and sections
87 #define VPE_PATH_MAX 256
113 enum vpe_state state
;
115 /* (device) minor associated with this vpe */
118 /* elfloader stuff */
119 unsigned long offset
; /* File offset into input stream */
121 unsigned long copied
;
123 unsigned long pbsize
;
124 /* Program loading state */
125 enum load_state l_state
;
127 struct elf_phdr
*l_phdr
;
128 unsigned int l_phlen
;
130 unsigned int l_shlen
;
131 int *l_phsort
; /* Sorted index list of program headers */
132 int l_segoff
; /* Offset into current program segment */
133 int l_cur_seg
; /* Indirect index of segment currently being loaded */
134 unsigned int l_progminad
;
135 unsigned int l_progmaxad
;
136 unsigned int l_trailer
;
138 unsigned int uid
, gid
;
139 char cwd
[VPE_PATH_MAX
];
141 unsigned long __start
;
143 /* tc's associated with this vpe */
146 /* The list of vpe's */
147 struct list_head list
;
149 /* legacy shared symbol address */
152 /* shared area descriptor array address */
153 struct vpe_shared_area
*shared_areas
;
155 /* the list of who wants to know when something major happens */
156 struct list_head notify
;
165 struct vpe
*pvpe
; /* parent VPE */
166 struct list_head tc
; /* The list of TC's with this VPE */
167 struct list_head list
; /* The global list of tc's */
171 spinlock_t vpe_list_lock
;
172 struct list_head vpe_list
; /* Virtual processing elements */
173 spinlock_t tc_list_lock
;
174 struct list_head tc_list
; /* Thread contexts */
176 .vpe_list_lock
= SPIN_LOCK_UNLOCKED
,
177 .vpe_list
= LIST_HEAD_INIT(vpecontrol
.vpe_list
),
178 .tc_list_lock
= SPIN_LOCK_UNLOCKED
,
179 .tc_list
= LIST_HEAD_INIT(vpecontrol
.tc_list
)
182 static void release_progmem(void *ptr
);
184 * Values and state associated with publishing shared memory areas
187 #define N_PUB_AREAS 4
189 static struct vpe_shared_area published_vpe_area
[N_PUB_AREAS
] = {
190 {VPE_SHARED_RESERVED
, 0},
191 {VPE_SHARED_RESERVED
, 0},
192 {VPE_SHARED_RESERVED
, 0},
193 {VPE_SHARED_RESERVED
, 0} };
195 /* get the vpe associated with this minor */
196 static struct vpe
*get_vpe(int minor
)
204 spin_lock(&vpecontrol
.vpe_list_lock
);
205 list_for_each_entry(v
, &vpecontrol
.vpe_list
, list
) {
206 if (v
->minor
== minor
) {
211 spin_unlock(&vpecontrol
.vpe_list_lock
);
216 /* get the tc associated with this minor */
217 static struct tc
*get_tc(int index
)
222 spin_lock(&vpecontrol
.tc_list_lock
);
223 list_for_each_entry(t
, &vpecontrol
.tc_list
, list
) {
224 if (t
->index
== index
) {
229 spin_unlock(&vpecontrol
.tc_list_lock
);
235 /* allocate a vpe and associate it with this minor (or index) */
236 static struct vpe
*alloc_vpe(int minor
)
240 if ((v
= kzalloc(sizeof(struct vpe
), GFP_KERNEL
)) == NULL
)
242 printk(KERN_DEBUG
"Used kzalloc to allocate %d bytes at %x\n",
243 sizeof(struct vpe
), (unsigned int)v
);
244 INIT_LIST_HEAD(&v
->tc
);
245 spin_lock(&vpecontrol
.vpe_list_lock
);
246 list_add_tail(&v
->list
, &vpecontrol
.vpe_list
);
247 spin_unlock(&vpecontrol
.vpe_list_lock
);
249 INIT_LIST_HEAD(&v
->notify
);
255 /* allocate a tc. At startup only tc0 is running, all other can be halted. */
256 static struct tc
*alloc_tc(int index
)
260 if ((tc
= kzalloc(sizeof(struct tc
), GFP_KERNEL
)) == NULL
)
262 printk(KERN_DEBUG
"Used kzalloc to allocate %d bytes at %x\n",
263 sizeof(struct tc
), (unsigned int)tc
);
264 INIT_LIST_HEAD(&tc
->tc
);
267 spin_lock(&vpecontrol
.tc_list_lock
);
268 list_add_tail(&tc
->list
, &vpecontrol
.tc_list
);
269 spin_unlock(&vpecontrol
.tc_list_lock
);
275 /* clean up and free everything */
276 static void release_vpe(struct vpe
*v
)
281 printk(KERN_DEBUG
"Used kfree to free memory at %x\n",
282 (unsigned int)v
->l_phsort
);
284 printk(KERN_DEBUG
"Used kfree to free memory at %x\n",
289 static void __maybe_unused
dump_mtregs(void)
293 val
= read_c0_config3();
294 printk("config3 0x%lx MT %ld\n", val
,
295 (val
& CONFIG3_MT
) >> CONFIG3_MT_SHIFT
);
297 val
= read_c0_mvpcontrol();
298 printk("MVPControl 0x%lx, STLB %ld VPC %ld EVP %ld\n", val
,
299 (val
& MVPCONTROL_STLB
) >> MVPCONTROL_STLB_SHIFT
,
300 (val
& MVPCONTROL_VPC
) >> MVPCONTROL_VPC_SHIFT
,
301 (val
& MVPCONTROL_EVP
));
303 val
= read_c0_mvpconf0();
304 printk("mvpconf0 0x%lx, PVPE %ld PTC %ld M %ld\n", val
,
305 (val
& MVPCONF0_PVPE
) >> MVPCONF0_PVPE_SHIFT
,
306 val
& MVPCONF0_PTC
, (val
& MVPCONF0_M
) >> MVPCONF0_M_SHIFT
);
310 * The original APRP prototype assumed a single, unshared IRQ for
311 * cross-VPE interrupts, used by the RTLX code. But M3P networking
312 * and other future functions may need to share an IRQ, particularly
313 * in 34K/Malta configurations without an external interrupt controller.
314 * All cross-VPE insterrupt users need to coordinate through shared
319 * It would be nice if I could just have this initialized to zero,
320 * but the patchcheck police won't hear of it...
323 static int xvpe_vector_set
;
325 #define XVPE_INTR_OFFSET 0
327 static int xvpe_irq
= MIPS_CPU_IRQ_BASE
+ XVPE_INTR_OFFSET
;
329 static void xvpe_dispatch(void)
334 /* Name here is generic, as m3pnet.c could in principle be used by non-MIPS */
335 int arch_get_xcpu_irq()
338 * Some of this will ultimately become platform code,
339 * but for now, we're only targeting 34K/FPGA/Malta,
340 * and there's only one generic mechanism.
342 if (!xvpe_vector_set
) {
344 * A more elaborate shared variable shouldn't be needed.
345 * Two initializations back-to-back should be harmless.
348 set_vi_handler(XVPE_INTR_OFFSET
, xvpe_dispatch
);
351 printk(KERN_ERR
"APRP requires vectored interrupts\n");
358 EXPORT_SYMBOL(arch_get_xcpu_irq
);
360 int vpe_send_interrupt(int vpe
, int inter
)
363 unsigned int vpeflags
;
365 local_irq_save(flags
);
369 * Initial version makes same simple-minded assumption
370 * as is implicit elsewhere in this module, that the
371 * only RP of interest is using the first non-Linux TC.
372 * We ignore the parameters provided by the caller!
376 * In 34K/Malta, the only cross-VPE interrupts possible
377 * are done by setting SWINT bits in Cause, of which there
378 * are two. SMTC uses SW1 for a multiplexed class of IPIs,
379 * and this mechanism should be generalized to APRP and use
380 * the same protocol. Until that's implemented, send only
381 * SW0 here, regardless of requested type.
383 write_vpe_c0_cause(read_vpe_c0_cause() | C_SW0
);
385 local_irq_restore(flags
);
388 EXPORT_SYMBOL(vpe_send_interrupt
);
389 /* Find some VPE program space */
390 static void *alloc_progmem(void *requested
, unsigned long len
)
394 #ifdef CONFIG_MIPS_VPE_LOADER_TOM
396 * This means you must tell Linux to use less memory than you
397 * physically have, for example by passing a mem= boot argument.
399 addr
= pfn_to_kaddr(max_low_pfn
);
400 if (requested
!= 0) {
401 if (requested
>= addr
)
407 memset(addr
, 0, len
);
408 printk(KERN_DEBUG
"pfn_to_kaddr returns %lu bytes of memory at %x\n",
409 len
, (unsigned int)addr
);
411 if (requested
!= 0) {
412 /* If we have a target in mind, grab a 2x slice and hope... */
413 addr
= kzalloc(len
*2, GFP_KERNEL
);
414 if ((requested
>= addr
) && (requested
< (addr
+ len
)))
419 /* simply grab some mem for now */
420 addr
= kzalloc(len
, GFP_KERNEL
);
427 static void release_progmem(void *ptr
)
429 #ifndef CONFIG_MIPS_VPE_LOADER_TOM
434 /* Update size with this section: return offset. */
435 static long get_offset(unsigned long *size
, Elf_Shdr
* sechdr
)
439 ret
= ALIGN(*size
, sechdr
->sh_addralign
? : 1);
440 *size
= ret
+ sechdr
->sh_size
;
444 /* Lay out the SHF_ALLOC sections in a way not dissimilar to how ld
445 might -- code, read-only data, read-write data, small data. Tally
446 sizes, and place the offsets into sh_entsize fields: high bit means it
448 static void layout_sections(struct module
*mod
, const Elf_Ehdr
* hdr
,
449 Elf_Shdr
* sechdrs
, const char *secstrings
)
451 static unsigned long const masks
[][2] = {
452 /* NOTE: all executable code must be the first section
453 * in this array; otherwise modify the text_size
454 * finder in the two loops below */
455 {SHF_EXECINSTR
| SHF_ALLOC
, ARCH_SHF_SMALL
},
456 {SHF_ALLOC
, SHF_WRITE
| ARCH_SHF_SMALL
},
457 {SHF_WRITE
| SHF_ALLOC
, ARCH_SHF_SMALL
},
458 {ARCH_SHF_SMALL
| SHF_ALLOC
, 0}
462 for (i
= 0; i
< hdr
->e_shnum
; i
++)
463 sechdrs
[i
].sh_entsize
= ~0UL;
465 for (m
= 0; m
< ARRAY_SIZE(masks
); ++m
) {
466 for (i
= 0; i
< hdr
->e_shnum
; ++i
) {
467 Elf_Shdr
*s
= &sechdrs
[i
];
469 // || strncmp(secstrings + s->sh_name, ".init", 5) == 0)
470 if ((s
->sh_flags
& masks
[m
][0]) != masks
[m
][0]
471 || (s
->sh_flags
& masks
[m
][1])
472 || s
->sh_entsize
!= ~0UL)
475 get_offset((unsigned long *)&mod
->core_size
, s
);
479 mod
->core_text_size
= mod
->core_size
;
485 /* from module-elf32.c, but subverted a little */
488 struct mips_hi16
*next
;
493 static struct mips_hi16
*mips_hi16_list
;
494 static unsigned int gp_offs
, gp_addr
;
496 static int apply_r_mips_none(struct module
*me
, uint32_t *location
,
502 static int apply_r_mips_gprel16(struct module
*me
, uint32_t *location
,
507 if( !(*location
& 0xffff) ) {
508 rel
= (int)v
- gp_addr
;
511 /* .sbss + gp(relative) + offset */
513 rel
= (int)(short)((int)v
+ gp_offs
+
514 (int)(short)(*location
& 0xffff) - gp_addr
);
517 if( (rel
> 32768) || (rel
< -32768) ) {
518 printk(KERN_DEBUG
"VPE loader: apply_r_mips_gprel16: "
519 "relative address 0x%x out of range of gp register\n",
524 *location
= (*location
& 0xffff0000) | (rel
& 0xffff);
529 static int apply_r_mips_pc16(struct module
*me
, uint32_t *location
,
533 rel
= (((unsigned int)v
- (unsigned int)location
));
534 rel
>>= 2; // because the offset is in _instructions_ not bytes.
535 rel
-= 1; // and one instruction less due to the branch delay slot.
537 if( (rel
> 32768) || (rel
< -32768) ) {
538 printk(KERN_DEBUG
"VPE loader: "
539 "apply_r_mips_pc16: relative address out of range 0x%x\n", rel
);
543 *location
= (*location
& 0xffff0000) | (rel
& 0xffff);
548 static int apply_r_mips_32(struct module
*me
, uint32_t *location
,
556 static int apply_r_mips_26(struct module
*me
, uint32_t *location
,
560 printk(KERN_DEBUG
"VPE loader: apply_r_mips_26 "
561 " unaligned relocation\n");
566 * Not desperately convinced this is a good check of an overflow condition
567 * anyway. But it gets in the way of handling undefined weak symbols which
568 * we want to set to zero.
569 * if ((v & 0xf0000000) != (((unsigned long)location + 4) & 0xf0000000)) {
571 * "module %s: relocation overflow\n",
577 *location
= (*location
& ~0x03ffffff) |
578 ((*location
+ (v
>> 2)) & 0x03ffffff);
582 static int apply_r_mips_hi16(struct module
*me
, uint32_t *location
,
588 * We cannot relocate this one now because we don't know the value of
589 * the carry we need to add. Save the information, and let LO16 do the
592 n
= kmalloc(sizeof *n
, GFP_KERNEL
);
593 printk(KERN_DEBUG
"Used kmalloc to allocate %d bytes at %x\n",
594 sizeof(struct mips_hi16
), (unsigned int)n
);
600 n
->next
= mips_hi16_list
;
606 static int apply_r_mips_lo16(struct module
*me
, uint32_t *location
,
609 unsigned long insnlo
= *location
;
610 Elf32_Addr val
, vallo
;
611 struct mips_hi16
*l
, *next
;
613 /* Sign extend the addend we extract from the lo insn. */
614 vallo
= ((insnlo
& 0xffff) ^ 0x8000) - 0x8000;
616 if (mips_hi16_list
!= NULL
) {
623 * The value for the HI16 had best be the same.
626 printk(KERN_DEBUG
"VPE loader: "
627 "apply_r_mips_lo16/hi16: \t"
628 "inconsistent value information\n");
633 * Do the HI16 relocation. Note that we actually don't
634 * need to know anything about the LO16 itself, except
635 * where to find the low 16 bits of the addend needed
639 val
= ((insn
& 0xffff) << 16) + vallo
;
643 * Account for the sign extension that will happen in
646 val
= ((val
>> 16) + ((val
& 0x8000) != 0)) & 0xffff;
648 insn
= (insn
& ~0xffff) | val
;
652 printk(KERN_DEBUG
"Used kfree to free memory at %x\n",
658 mips_hi16_list
= NULL
;
662 * Ok, we're done with the HI16 relocs. Now deal with the LO16.
665 insnlo
= (insnlo
& ~0xffff) | (val
& 0xffff);
676 mips_hi16_list
= NULL
;
681 static int (*reloc_handlers
[]) (struct module
*me
, uint32_t *location
,
683 [R_MIPS_NONE
] = apply_r_mips_none
,
684 [R_MIPS_32
] = apply_r_mips_32
,
685 [R_MIPS_26
] = apply_r_mips_26
,
686 [R_MIPS_HI16
] = apply_r_mips_hi16
,
687 [R_MIPS_LO16
] = apply_r_mips_lo16
,
688 [R_MIPS_GPREL16
] = apply_r_mips_gprel16
,
689 [R_MIPS_PC16
] = apply_r_mips_pc16
692 static char *rstrs
[] = {
693 [R_MIPS_NONE
] = "MIPS_NONE",
694 [R_MIPS_32
] = "MIPS_32",
695 [R_MIPS_26
] = "MIPS_26",
696 [R_MIPS_HI16
] = "MIPS_HI16",
697 [R_MIPS_LO16
] = "MIPS_LO16",
698 [R_MIPS_GPREL16
] = "MIPS_GPREL16",
699 [R_MIPS_PC16
] = "MIPS_PC16"
702 static int apply_relocations(Elf32_Shdr
*sechdrs
,
704 unsigned int symindex
,
708 Elf32_Rel
*rel
= (void *) sechdrs
[relsec
].sh_addr
;
715 for (i
= 0; i
< sechdrs
[relsec
].sh_size
/ sizeof(*rel
); i
++) {
716 Elf32_Word r_info
= rel
[i
].r_info
;
718 /* This is where to make the change */
719 location
= (void *)sechdrs
[sechdrs
[relsec
].sh_info
].sh_addr
721 /* This is the symbol it is referring to */
722 sym
= (Elf32_Sym
*)sechdrs
[symindex
].sh_addr
723 + ELF32_R_SYM(r_info
);
725 if (!sym
->st_value
) {
726 printk(KERN_DEBUG
"%s: undefined weak symbol %s\n",
727 me
->name
, strtab
+ sym
->st_name
);
728 /* just print the warning, dont barf */
733 res
= reloc_handlers
[ELF32_R_TYPE(r_info
)](me
, location
, v
);
735 char *r
= rstrs
[ELF32_R_TYPE(r_info
)];
736 printk(KERN_WARNING
"VPE loader: .text+0x%x "
737 "relocation type %s for symbol \"%s\" failed\n",
738 rel
[i
].r_offset
, r
? r
: "UNKNOWN",
739 strtab
+ sym
->st_name
);
747 void save_gp_address(unsigned int secbase
, unsigned int rel
)
749 gp_addr
= secbase
+ rel
;
750 gp_offs
= gp_addr
- (secbase
& 0xffff0000);
752 /* end module-elf32.c */
756 /* Change all symbols so that sh_value encodes the pointer directly. */
757 static void simplify_symbols(Elf_Shdr
* sechdrs
,
758 unsigned int symindex
,
760 const char *secstrings
,
761 unsigned int nsecs
, struct module
*mod
)
763 Elf_Sym
*sym
= (void *)sechdrs
[symindex
].sh_addr
;
764 unsigned long secbase
, bssbase
= 0;
765 unsigned int i
, n
= sechdrs
[symindex
].sh_size
/ sizeof(Elf_Sym
);
768 /* find the .bss section for COMMON symbols */
769 for (i
= 0; i
< nsecs
; i
++) {
770 if (strncmp(secstrings
+ sechdrs
[i
].sh_name
, ".bss", 4) == 0) {
771 bssbase
= sechdrs
[i
].sh_addr
;
776 for (i
= 1; i
< n
; i
++) {
777 switch (sym
[i
].st_shndx
) {
779 /* Allocate space for the symbol in the .bss section.
780 st_value is currently size.
781 We want it to have the address of the symbol. */
783 size
= sym
[i
].st_value
;
784 sym
[i
].st_value
= bssbase
;
790 /* Don't need to do anything */
797 case SHN_MIPS_SCOMMON
:
798 printk(KERN_DEBUG
"simplify_symbols: ignoring SHN_MIPS_SCOMMON "
799 "symbol <%s> st_shndx %d\n", strtab
+ sym
[i
].st_name
,
805 secbase
= sechdrs
[sym
[i
].st_shndx
].sh_addr
;
807 if (strncmp(strtab
+ sym
[i
].st_name
, "_gp", 3) == 0) {
808 save_gp_address(secbase
, sym
[i
].st_value
);
811 sym
[i
].st_value
+= secbase
;
817 #ifdef DEBUG_ELFLOADER
818 static void dump_elfsymbols(Elf_Shdr
* sechdrs
, unsigned int symindex
,
819 const char *strtab
, struct module
*mod
)
821 Elf_Sym
*sym
= (void *)sechdrs
[symindex
].sh_addr
;
822 unsigned int i
, n
= sechdrs
[symindex
].sh_size
/ sizeof(Elf_Sym
);
824 printk(KERN_DEBUG
"dump_elfsymbols: n %d\n", n
);
825 for (i
= 1; i
< n
; i
++) {
826 printk(KERN_DEBUG
" i %d name <%s> 0x%x\n", i
,
827 strtab
+ sym
[i
].st_name
, sym
[i
].st_value
);
832 /* We are prepared so configure and start the VPE... */
833 static int vpe_run(struct vpe
* v
)
835 unsigned long flags
, val
, dmt_flag
;
836 struct vpe_notifications
*n
;
837 unsigned int vpeflags
;
840 /* check we are the Master VPE */
841 local_irq_save(flags
);
842 val
= read_c0_vpeconf0();
843 if (!(val
& VPECONF0_MVP
)) {
845 "VPE loader: only Master VPE's are allowed to configure MT\n");
846 local_irq_restore(flags
);
854 if (!list_empty(&v
->tc
)) {
855 if ((t
= list_entry(v
->tc
.next
, struct tc
, tc
)) == NULL
) {
858 local_irq_restore(flags
);
861 "VPE loader: TC %d is already in use.\n",
868 local_irq_restore(flags
);
871 "VPE loader: No TC's associated with VPE %d\n",
877 /* Put MVPE's into 'configuration state' */
878 set_c0_mvpcontrol(MVPCONTROL_VPC
);
882 /* should check it is halted, and not activated */
883 if ((read_tc_c0_tcstatus() & TCSTATUS_A
) || !(read_tc_c0_tchalt() & TCHALT_H
)) {
886 local_irq_restore(flags
);
888 printk(KERN_WARNING
"VPE loader: TC %d is already active!\n",
894 /* Write the address we want it to start running from in the TCPC register. */
895 write_tc_c0_tcrestart((unsigned long)v
->__start
);
896 write_tc_c0_tccontext((unsigned long)0);
899 * Mark the TC as activated, not interrupt exempt and not dynamically
902 val
= read_tc_c0_tcstatus();
903 val
= (val
& ~(TCSTATUS_DA
| TCSTATUS_IXMT
)) | TCSTATUS_A
;
904 write_tc_c0_tcstatus(val
);
906 write_tc_c0_tchalt(read_tc_c0_tchalt() & ~TCHALT_H
);
909 * The sde-kit passes 'memsize' to __start in $a3, so set something
910 * here... Or set $a3 to zero and define DFLT_STACK_SIZE and
911 * DFLT_HEAP_SIZE when you compile your program
914 mttgpr(7, physical_memsize
);
918 * bind the TC to VPE 1 as late as possible so we only have the final
919 * VPE registers to set up, and so an EJTAG probe can trigger on it
921 write_tc_c0_tcbind((read_tc_c0_tcbind() & ~TCBIND_CURVPE
) | 1);
923 write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() & ~(VPECONF0_VPA
));
925 back_to_back_c0_hazard();
927 /* Set up the XTC bit in vpeconf0 to point at our tc */
928 write_vpe_c0_vpeconf0( (read_vpe_c0_vpeconf0() & ~(VPECONF0_XTC
))
929 | (t
->index
<< VPECONF0_XTC_SHIFT
));
931 back_to_back_c0_hazard();
933 /* enable this VPE */
934 write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() | VPECONF0_VPA
);
936 /* clear out any left overs from a previous program */
937 write_vpe_c0_status(0);
938 write_vpe_c0_cause(0);
940 /* take system out of configuration state */
941 clear_c0_mvpcontrol(MVPCONTROL_VPC
);
944 * SMTC/SMVP kernels manage VPE enable independently,
945 * but uniprocessor kernels need to turn it on, even
946 * if that wasn't the pre-dvpe() state.
954 local_irq_restore(flags
);
956 list_for_each_entry(n
, &v
->notify
, list
)
962 static int find_vpe_symbols(struct vpe
* v
, Elf_Shdr
* sechdrs
,
963 unsigned int symindex
, const char *strtab
,
966 Elf_Sym
*sym
= (void *)sechdrs
[symindex
].sh_addr
;
967 unsigned int i
, j
, n
= sechdrs
[symindex
].sh_size
/ sizeof(Elf_Sym
);
969 for (i
= 1; i
< n
; i
++) {
970 if (strcmp(strtab
+ sym
[i
].st_name
, "__start") == 0)
971 v
->__start
= sym
[i
].st_value
;
973 if (strcmp(strtab
+ sym
[i
].st_name
, "vpe_shared") == 0)
974 v
->shared_ptr
= (void *)sym
[i
].st_value
;
976 if (strcmp(strtab
+ sym
[i
].st_name
, "_vpe_shared_areas") == 0) {
977 struct vpe_shared_area
*psa
978 = (struct vpe_shared_area
*)sym
[i
].st_value
;
979 struct vpe_shared_area
*tpsa
;
980 v
->shared_areas
= psa
;
981 printk(KERN_INFO
"_vpe_shared_areas found, 0x%x\n",
982 (unsigned int)v
->shared_areas
);
984 * Copy any "published" areas to the descriptor
986 for (j
= 0; j
< N_PUB_AREAS
; j
++) {
987 if (published_vpe_area
[j
].type
!= VPE_SHARED_RESERVED
) {
989 while (tpsa
->type
!= VPE_SHARED_NULL
) {
990 if ((tpsa
->type
== VPE_SHARED_RESERVED
)
991 || (tpsa
->type
== published_vpe_area
[j
].type
)) {
992 tpsa
->type
= published_vpe_area
[j
].type
;
993 tpsa
->addr
= published_vpe_area
[j
].addr
;
1004 if ( (v
->__start
== 0) || (v
->shared_ptr
== NULL
))
1011 * Allocates a VPE with some program code space(the load address), copies the
1012 * contents of the program (p)buffer performing relocatations/etc, free's it
1015 static int vpe_elfload(struct vpe
* v
)
1020 char *secstrings
, *strtab
= NULL
;
1021 unsigned int len
, i
, symindex
= 0, strindex
= 0, relocate
= 0;
1022 struct module mod
; // so we can re-use the relocations code
1024 memset(&mod
, 0, sizeof(struct module
));
1025 strcpy(mod
.name
, "VPE loader");
1029 /* Sanity checks against insmoding binaries or wrong arch,
1030 weird elf version */
1031 if ((hdr
->e_type
!= ET_REL
&& hdr
->e_type
!= ET_EXEC
)
1032 || !elf_check_arch(hdr
)
1033 || hdr
->e_shentsize
!= sizeof(*sechdrs
)) {
1035 "VPE loader: program wrong arch or weird elf version\n");
1040 if (hdr
->e_type
== ET_REL
)
1043 if (len
< v
->l_phlen
+ v
->l_shlen
) {
1044 printk(KERN_ERR
"VPE loader: Headers exceed %u bytes\n", len
);
1049 /* Convenience variables */
1050 sechdrs
= (void *)hdr
+ hdr
->e_shoff
;
1051 secstrings
= (void *)hdr
+ sechdrs
[hdr
->e_shstrndx
].sh_offset
;
1052 sechdrs
[0].sh_addr
= 0;
1054 /* And these should exist, but gcc whinges if we don't init them */
1055 symindex
= strindex
= 0;
1058 for (i
= 1; i
< hdr
->e_shnum
; i
++) {
1059 if (sechdrs
[i
].sh_type
!= SHT_NOBITS
1060 && len
< sechdrs
[i
].sh_offset
+ sechdrs
[i
].sh_size
) {
1061 printk(KERN_ERR
"VPE program length %u truncated\n",
1066 /* Mark all sections sh_addr with their address in the
1068 sechdrs
[i
].sh_addr
= (size_t) hdr
+ sechdrs
[i
].sh_offset
;
1070 /* Internal symbols and strings. */
1071 if (sechdrs
[i
].sh_type
== SHT_SYMTAB
) {
1073 strindex
= sechdrs
[i
].sh_link
;
1074 strtab
= (char *)hdr
+ sechdrs
[strindex
].sh_offset
;
1077 layout_sections(&mod
, hdr
, sechdrs
, secstrings
);
1079 * Non-relocatable loads should have already done their
1080 * allocates, based on program header table.
1084 memset(v
->load_addr
, 0, mod
.core_size
);
1088 pr_info("VPE loader: loading to %p\n", v
->load_addr
);
1091 for (i
= 0; i
< hdr
->e_shnum
; i
++) {
1094 if (!(sechdrs
[i
].sh_flags
& SHF_ALLOC
))
1097 dest
= v
->load_addr
+ sechdrs
[i
].sh_entsize
;
1099 if (sechdrs
[i
].sh_type
!= SHT_NOBITS
)
1100 memcpy(dest
, (void *)sechdrs
[i
].sh_addr
,
1101 sechdrs
[i
].sh_size
);
1102 /* Update sh_addr to point to copy in image. */
1103 sechdrs
[i
].sh_addr
= (unsigned long)dest
;
1105 printk(KERN_DEBUG
" section sh_name %s sh_addr 0x%x\n",
1106 secstrings
+ sechdrs
[i
].sh_name
, sechdrs
[i
].sh_addr
);
1109 /* Fix up syms, so that st_value is a pointer to location. */
1110 simplify_symbols(sechdrs
, symindex
, strtab
, secstrings
,
1111 hdr
->e_shnum
, &mod
);
1113 /* Now do relocations. */
1114 for (i
= 1; i
< hdr
->e_shnum
; i
++) {
1115 const char *strtab
= (char *)sechdrs
[strindex
].sh_addr
;
1116 unsigned int info
= sechdrs
[i
].sh_info
;
1118 /* Not a valid relocation section? */
1119 if (info
>= hdr
->e_shnum
)
1122 /* Don't bother with non-allocated sections */
1123 if (!(sechdrs
[info
].sh_flags
& SHF_ALLOC
))
1126 if (sechdrs
[i
].sh_type
== SHT_REL
)
1127 err
= apply_relocations(sechdrs
, strtab
, symindex
, i
,
1129 else if (sechdrs
[i
].sh_type
== SHT_RELA
)
1130 err
= apply_relocate_add(sechdrs
, strtab
, symindex
, i
,
1139 * Program image is already in memory.
1141 for (i
= 0; i
< hdr
->e_shnum
; i
++) {
1142 /* Internal symbols and strings. */
1143 if (sechdrs
[i
].sh_type
== SHT_SYMTAB
) {
1145 strindex
= sechdrs
[i
].sh_link
;
1146 strtab
= (char *)hdr
+ sechdrs
[strindex
].sh_offset
;
1148 /* mark the symtab's address for when we try to find the
1150 sechdrs
[i
].sh_addr
= (size_t) hdr
+ sechdrs
[i
].sh_offset
;
1155 /* make sure it's physically written out */
1156 flush_icache_range((unsigned long)v
->load_addr
,
1157 (unsigned long)v
->load_addr
+ v
->copied
);
1159 if ((find_vpe_symbols(v
, sechdrs
, symindex
, strtab
, &mod
)) < 0) {
1160 if (v
->__start
== 0) {
1161 printk(KERN_WARNING
"VPE loader: program does not contain "
1162 "a __start symbol\n");
1166 if (v
->shared_ptr
== NULL
)
1167 printk(KERN_WARNING
"VPE loader: "
1168 "program does not contain vpe_shared symbol.\n"
1169 " Unable to use AMVP (AP/SP) facilities.\n");
1171 pr_info("APRP VPE loader: elf loaded\n");
1176 static void cleanup_tc(struct tc
*tc
)
1178 unsigned long flags
;
1179 unsigned int mtflags
, vpflags
;
1182 local_irq_save(flags
);
1185 /* Put MVPE's into 'configuration state' */
1186 set_c0_mvpcontrol(MVPCONTROL_VPC
);
1189 tmp
= read_tc_c0_tcstatus();
1191 /* mark not allocated and not dynamically allocatable */
1192 tmp
&= ~(TCSTATUS_A
| TCSTATUS_DA
);
1193 tmp
|= TCSTATUS_IXMT
; /* interrupt exempt */
1194 write_tc_c0_tcstatus(tmp
);
1196 write_tc_c0_tchalt(TCHALT_H
);
1199 /* bind it to anything other than VPE1 */
1200 // write_tc_c0_tcbind(read_tc_c0_tcbind() & ~TCBIND_CURVPE); // | TCBIND_CURVPE
1202 clear_c0_mvpcontrol(MVPCONTROL_VPC
);
1205 local_irq_restore(flags
);
1208 static int getcwd(char *buff
, int size
)
1210 mm_segment_t old_fs
;
1216 ret
= sys_getcwd(buff
, size
);
1223 /* checks VPE is unused and gets ready to load program */
1224 static int vpe_open(struct inode
*inode
, struct file
*filp
)
1226 enum vpe_state state
;
1227 struct vpe_notifications
*not;
1231 if (minor
!= iminor(inode
)) {
1232 /* assume only 1 device at the moment. */
1233 pr_warning("VPE loader: only vpe1 is supported\n");
1238 * This treats the tclimit command line configuration input
1239 * as a minor device indication, which is probably unwholesome.
1242 if ((v
= get_vpe(tclimit
)) == NULL
) {
1243 pr_warning("VPE loader: unable to get vpe\n");
1248 state
= xchg(&v
->state
, VPE_STATE_INUSE
);
1249 if (state
!= VPE_STATE_UNUSED
) {
1250 printk(KERN_DEBUG
"VPE loader: tc in use dumping regs\n");
1252 list_for_each_entry(not, &v
->notify
, list
) {
1256 release_progmem(v
->load_addr
);
1258 cleanup_tc(get_tc(tclimit
));
1261 /* this of-course trashes what was there before... */
1262 v
->pbuffer
= vmalloc(P_SIZE
);
1263 v
->load_addr
= NULL
;
1266 v
->l_state
= LOAD_STATE_EHDR
;
1272 v
->uid
= filp
->f_cred
->fsuid
;
1273 v
->gid
= filp
->f_cred
->fsgid
;
1275 #ifdef CONFIG_MIPS_APSP_KSPD
1276 /* get kspd to tell us when a syscall_exit happens */
1277 if (!kspd_events_reqd
) {
1278 kspd_notify(&kspd_events
);
1284 ret
= getcwd(v
->cwd
, VPE_PATH_MAX
);
1286 printk(KERN_WARNING
"VPE loader: open, getcwd returned %d\n", ret
);
1288 v
->shared_ptr
= NULL
;
1289 v
->shared_areas
= NULL
;
1295 static int vpe_release(struct inode
*inode
, struct file
*filp
)
1300 v
= get_vpe(tclimit
);
1304 * If image load had no errors, massage program/section tables
1305 * to reflect movement of program/section data into VPE program
1308 if (v
->l_state
!= LOAD_STATE_DONE
) {
1309 printk(KERN_WARNING
"VPE Release after incomplete load\n");
1310 printk(KERN_DEBUG
"Used vfree to free memory at "
1311 "%x after failed load attempt\n",
1312 (unsigned int)v
->pbuffer
);
1313 if (v
->pbuffer
!= NULL
)
1318 if (vpe_elfload(v
) >= 0) {
1321 printk(KERN_WARNING
"VPE loader: ELF load failed.\n");
1322 printk(KERN_DEBUG
"Used vfree to free memory at "
1323 "%x after failed load attempt\n",
1324 (unsigned int)v
->pbuffer
);
1325 if (v
->pbuffer
!= NULL
)
1331 /* It's good to be able to run the SP and if it chokes have a look at
1332 the /dev/rt?. But if we reset the pointer to the shared struct we
1333 lose what has happened. So perhaps if garbage is sent to the vpe
1334 device, use it as a trigger for the reset. Hopefully a nice
1335 executable will be along shortly. */
1337 v
->shared_ptr
= NULL
;
1339 // cleanup any temp buffers
1341 printk(KERN_DEBUG
"Used vfree to free memory at %x\n",
1342 (unsigned int)v
->pbuffer
);
1350 * A sort of insertion sort to generate list of program header indices
1351 * in order of their file offsets.
1354 static void indexort(struct elf_phdr
*phdr
, int nph
, int *index
)
1359 /* Create initial mapping */
1360 for (i
= 0; i
< nph
; i
++)
1362 /* Do the indexed insert sort */
1363 for (i
= 1; i
< nph
; i
++) {
1366 toff
= phdr
[t
].p_offset
;
1367 while ((j
> 0) && (phdr
[index
[j
-1]].p_offset
> toff
)) {
1368 index
[j
] = index
[j
-1];
1377 * This function has to convert the ELF file image being sequentially
1378 * streamed to the pseudo-device into the binary image, symbol, and
1379 * string information, which the ELF format allows to be in some degree
1382 * The ELF header and, if present, program header table, are copied into
1383 * a temporary buffer. Loadable program segments, if present, are copied
1384 * into the RP program memory at the addresses specified by the program
1387 * Sections not specified by the program header table are loaded into
1388 * memory following the program segments if they are "allocated", or
1389 * into the temporary buffer if they are not. The section header
1390 * table is loaded into the temporary buffer.???
1392 #define CURPHDR (v->l_phdr[v->l_phsort[v->l_cur_seg]])
1393 static ssize_t
vpe_write(struct file
*file
, const char __user
* buffer
,
1394 size_t count
, loff_t
* ppos
)
1398 int tocopy
, uncopied
;
1400 unsigned int progmemlen
;
1402 if (iminor(file
->f_path
.dentry
->d_inode
) != minor
)
1405 v
= get_vpe(tclimit
);
1409 if (v
->pbuffer
== NULL
) {
1410 printk(KERN_ERR
"VPE loader: no buffer for program\n");
1415 switch (v
->l_state
) {
1416 case LOAD_STATE_EHDR
:
1417 /* Loading ELF Header into scratch buffer */
1418 tocopy
= min((unsigned long)count
,
1419 sizeof(Elf_Ehdr
) - v
->offset
);
1420 uncopied
= copy_from_user(v
->pbuffer
+ v
->copied
,
1422 count
-= tocopy
- uncopied
;
1423 v
->copied
+= tocopy
- uncopied
;
1424 v
->offset
+= tocopy
- uncopied
;
1425 buffer
+= tocopy
- uncopied
;
1426 if (v
->copied
== sizeof(Elf_Ehdr
)) {
1427 v
->l_ehdr
= (Elf_Ehdr
*)v
->pbuffer
;
1428 if (memcmp(v
->l_ehdr
->e_ident
, ELFMAG
, 4) != 0) {
1429 printk(KERN_WARNING
"VPE loader: %s\n",
1430 "non-ELF file image");
1432 v
->l_state
= LOAD_STATE_ERROR
;
1435 if (v
->l_ehdr
->e_phoff
!= 0) {
1436 v
->l_phdr
= (struct elf_phdr
*)
1437 (v
->pbuffer
+ v
->l_ehdr
->e_phoff
);
1438 v
->l_phlen
= v
->l_ehdr
->e_phentsize
1439 * v
->l_ehdr
->e_phnum
;
1440 /* Check against buffer overflow */
1441 if ((v
->copied
+ v
->l_phlen
) > v
->pbsize
) {
1443 "VPE loader: elf program header table size too big\n");
1444 v
->l_state
= LOAD_STATE_ERROR
;
1447 v
->l_state
= LOAD_STATE_PHDR
;
1449 * Program headers generally indicate
1450 * linked executable with possibly
1451 * valid entry point.
1453 v
->__start
= v
->l_ehdr
->e_entry
;
1454 } else if (v
->l_ehdr
->e_shoff
!= 0) {
1456 * No program headers, but a section
1457 * header table. A relocatable binary.
1458 * We need to load the works into the
1459 * kernel temp buffer to compute the
1460 * RP program image. That limits our
1461 * binary size, but at least we're no
1462 * worse off than the original APRP
1465 v
->l_shlen
= v
->l_ehdr
->e_shentsize
1466 * v
->l_ehdr
->e_shnum
;
1467 if ((v
->l_ehdr
->e_shoff
+ v
->l_shlen
1468 - v
->offset
) > v
->pbsize
) {
1470 "VPE loader: elf sections/section table too big.\n");
1471 v
->l_state
= LOAD_STATE_ERROR
;
1474 v
->l_state
= LOAD_STATE_SHDR
;
1477 * If neither program nor section tables,
1478 * we don't know what to do.
1480 v
->l_state
= LOAD_STATE_ERROR
;
1485 case LOAD_STATE_PHDR
:
1486 /* Loading Program Headers into scratch */
1487 tocopy
= min((unsigned long)count
,
1488 v
->l_ehdr
->e_phoff
+ v
->l_phlen
- v
->copied
);
1489 uncopied
= copy_from_user(v
->pbuffer
+ v
->copied
,
1491 count
-= tocopy
- uncopied
;
1492 v
->copied
+= tocopy
- uncopied
;
1493 v
->offset
+= tocopy
- uncopied
;
1494 buffer
+= tocopy
- uncopied
;
1496 if (v
->copied
== v
->l_ehdr
->e_phoff
+ v
->l_phlen
) {
1498 * It's legal for the program headers to be
1499 * out of order with respect to the file layout.
1500 * Generate a list of indices, sorted by file
1503 v
->l_phsort
= kmalloc(v
->l_ehdr
->e_phnum
1504 * sizeof(int), GFP_KERNEL
);
1506 "Used kmalloc to allocate %d bytes of memory at %x\n",
1507 v
->l_ehdr
->e_phnum
*sizeof(int),
1508 (unsigned int)v
->l_phsort
);
1510 return -ENOMEM
; /* Preposterous, but... */
1511 indexort(v
->l_phdr
, v
->l_ehdr
->e_phnum
,
1514 v
->l_progminad
= (unsigned int)-1;
1517 for (i
= 0; i
< v
->l_ehdr
->e_phnum
; i
++) {
1518 if (v
->l_phdr
[v
->l_phsort
[i
]].p_type
1520 /* Unstripped .reginfo sections are bad */
1521 if (v
->l_phdr
[v
->l_phsort
[i
]].p_vaddr
1523 printk(KERN_WARNING
"%s%s%s\n",
1525 "User-mode p_vaddr, ",
1526 "skipping program segment,");
1527 printk(KERN_WARNING
"%s%s%s\n",
1529 "strip .reginfo from binary ",
1533 if (v
->l_phdr
[v
->l_phsort
[i
]].p_vaddr
1536 v
->l_phdr
[v
->l_phsort
[i
]].p_vaddr
;
1537 if ((v
->l_phdr
[v
->l_phsort
[i
]].p_vaddr
1538 + v
->l_phdr
[v
->l_phsort
[i
]].p_memsz
)
1541 v
->l_phdr
[v
->l_phsort
[i
]].p_vaddr
+
1542 v
->l_phdr
[v
->l_phsort
[i
]].p_memsz
;
1545 printk(KERN_INFO
"APRP RP program 0x%x to 0x%x\n",
1546 v
->l_progminad
, v
->l_progmaxad
);
1548 * Do a simple sanity check of the memory being
1549 * allocated. Abort if greater than an arbitrary
1552 if (v
->l_progmaxad
- v
->l_progminad
>
1555 "RP program failed to allocate %d kbytes - limit is 32,768 KB\n",
1556 (v
->l_progmaxad
- v
->l_progminad
)/1024);
1560 v
->load_addr
= alloc_progmem((void *)v
->l_progminad
,
1561 v
->l_progmaxad
- v
->l_progminad
);
1564 if ((unsigned int)v
->load_addr
1566 release_progmem(v
->load_addr
);
1569 /* Find first segment with loadable content */
1570 for (i
= 0; i
< v
->l_ehdr
->e_phnum
; i
++) {
1571 if (v
->l_phdr
[v
->l_phsort
[i
]].p_type
1573 if (v
->l_phdr
[v
->l_phsort
[i
]].p_vaddr
1575 /* Skip userspace segments */
1582 if (i
== v
->l_ehdr
->e_phnum
) {
1583 /* No loadable program segment? Bogus file. */
1584 printk(KERN_WARNING
"Bad ELF file for APRP\n");
1588 v
->l_state
= LOAD_STATE_PIMAGE
;
1591 case LOAD_STATE_PIMAGE
:
1593 * Skip through input stream until
1594 * first program segment. Would be
1595 * better to have loaded up to here
1596 * into the temp buffer, but for now
1597 * we simply rule out "interesting"
1598 * sections prior to the last program
1599 * segment in an executable file.
1601 if (v
->offset
< CURPHDR
.p_offset
) {
1602 uncopied
= CURPHDR
.p_offset
- v
->offset
;
1603 if (uncopied
> count
)
1607 v
->offset
+= uncopied
;
1608 /* Go back through the "while" */
1612 * Having dispensed with any unlikely fluff,
1613 * copy from user I/O buffer to program segment.
1615 tocopy
= min(count
, CURPHDR
.p_filesz
- v
->l_segoff
);
1617 /* Loading image into RP memory */
1618 uncopied
= copy_from_user((char *)CURPHDR
.p_vaddr
1619 + v
->l_segoff
, buffer
, tocopy
);
1620 count
-= tocopy
- uncopied
;
1621 v
->offset
+= tocopy
- uncopied
;
1622 v
->l_segoff
+= tocopy
- uncopied
;
1623 buffer
+= tocopy
- uncopied
;
1624 if (v
->l_segoff
>= CURPHDR
.p_filesz
) {
1625 /* Finished current segment load */
1626 /* Zero out non-file-sourced image */
1627 uncopied
= CURPHDR
.p_memsz
- CURPHDR
.p_filesz
;
1629 memset((char *)CURPHDR
.p_vaddr
+ v
->l_segoff
,
1631 /* Advance to next segment */
1632 for (i
= v
->l_cur_seg
+ 1;
1633 i
< v
->l_ehdr
->e_phnum
; i
++) {
1634 if (v
->l_phdr
[v
->l_phsort
[i
]].p_type
1636 if (v
->l_phdr
[v
->l_phsort
[i
]].p_vaddr
1638 /* Skip userspace segments */
1645 /* If none left, prepare to load section headers */
1646 if (i
== v
->l_ehdr
->e_phnum
) {
1647 if (v
->l_ehdr
->e_shoff
!= 0) {
1648 /* Copy to where we left off in temp buffer */
1649 v
->l_shlen
= v
->l_ehdr
->e_shentsize
1650 * v
->l_ehdr
->e_shnum
;
1651 if ((v
->l_ehdr
->e_shoff
+ v
->l_shlen
1652 - v
->offset
) > v
->pbsize
) {
1654 "VPE loader: elf sections/section table too big\n");
1655 v
->l_state
= LOAD_STATE_ERROR
;
1658 v
->l_state
= LOAD_STATE_SHDR
;
1662 /* reset offset for new program segment */
1667 case LOAD_STATE_SHDR
:
1669 * Read stream into private buffer up
1670 * through and including the section header
1674 tocopy
= min((unsigned long)count
,
1675 v
->l_ehdr
->e_shoff
+ v
->l_shlen
- v
->offset
);
1677 uncopied
= copy_from_user(v
->pbuffer
+ v
->copied
,
1679 count
-= tocopy
- uncopied
;
1680 v
->copied
+= tocopy
- uncopied
;
1681 v
->offset
+= tocopy
- uncopied
;
1682 buffer
+= tocopy
- uncopied
;
1685 if (v
->offset
== v
->l_ehdr
->e_shoff
+ v
->l_shlen
) {
1686 unsigned int offset_delta
= v
->offset
- v
->copied
;
1688 v
->l_shdr
= (Elf_Shdr
*)(v
->pbuffer
1689 + v
->l_ehdr
->e_shoff
- offset_delta
);
1691 * Check for sections after the section table,
1692 * which for gcc MIPS binaries includes
1693 * the symbol table. Do any other processing
1694 * that requires value within stream, and
1695 * normalize offsets to be relative to
1696 * the header-only layout of temp buffer.
1699 /* Assume no trailer until we detect one */
1701 v
->l_state
= LOAD_STATE_DONE
;
1702 for (i
= 0; i
< v
->l_ehdr
->e_shnum
; i
++) {
1703 if (v
->l_shdr
[i
].sh_offset
1704 > v
->l_ehdr
->e_shoff
) {
1705 v
->l_state
= LOAD_STATE_TRAILER
;
1706 /* Track trailing data length */
1708 < (v
->l_shdr
[i
].sh_offset
1709 + v
->l_shdr
[i
].sh_size
)
1710 - (v
->l_ehdr
->e_shoff
1713 (v
->l_shdr
[i
].sh_offset
1714 + v
->l_shdr
[i
].sh_size
)
1715 - (v
->l_ehdr
->e_shoff
1718 /* Adjust section offset if necessary */
1719 v
->l_shdr
[i
].sh_offset
-= offset_delta
;
1721 if ((v
->copied
+ v
->l_trailer
) > v
->pbsize
) {
1723 "VPE loader: elf size too big. Perhaps strip uneeded symbols\n");
1724 v
->l_state
= LOAD_STATE_ERROR
;
1728 /* Fix up offsets in ELF header */
1729 v
->l_ehdr
->e_shoff
= (unsigned int)v
->l_shdr
1730 - (unsigned int)v
->pbuffer
;
1733 case LOAD_STATE_TRAILER
:
1735 * Symbol and string tables follow section headers
1736 * in gcc binaries for MIPS. Copy into temp buffer.
1739 tocopy
= min(count
, v
->l_trailer
);
1740 uncopied
= copy_from_user(v
->pbuffer
+ v
->copied
,
1742 count
-= tocopy
- uncopied
;
1743 v
->l_trailer
-= tocopy
- uncopied
;
1744 v
->copied
+= tocopy
- uncopied
;
1745 v
->offset
+= tocopy
- uncopied
;
1746 buffer
+= tocopy
- uncopied
;
1749 v
->l_state
= LOAD_STATE_DONE
;
1751 case LOAD_STATE_DONE
:
1755 case LOAD_STATE_ERROR
:
1763 static const struct file_operations vpe_fops
= {
1764 .owner
= THIS_MODULE
,
1766 .release
= vpe_release
,
1770 /* module wrapper entry points */
1772 vpe_handle
vpe_alloc(void)
1778 for (i
= 1; i
< MAX_VPES
; i
++) {
1779 if ((v
= get_vpe(i
)) != NULL
) {
1780 v
->state
= VPE_STATE_INUSE
;
1787 EXPORT_SYMBOL(vpe_alloc
);
1789 /* start running from here */
1790 int vpe_start(vpe_handle vpe
, unsigned long start
)
1792 struct vpe
*v
= vpe
;
1794 /* Null start address means use value from ELF file */
1800 EXPORT_SYMBOL(vpe_start
);
1802 /* halt it for now */
1803 int vpe_stop(vpe_handle vpe
)
1805 struct vpe
*v
= vpe
;
1807 unsigned int evpe_flags
;
1809 evpe_flags
= dvpe();
1811 if ((t
= list_entry(v
->tc
.next
, struct tc
, tc
)) != NULL
) {
1814 write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() & ~VPECONF0_VPA
);
1822 EXPORT_SYMBOL(vpe_stop
);
1824 /* I've done with it thank you */
1825 int vpe_free(vpe_handle vpe
)
1827 struct vpe
*v
= vpe
;
1829 unsigned int evpe_flags
;
1831 if ((t
= list_entry(v
->tc
.next
, struct tc
, tc
)) == NULL
) {
1835 evpe_flags
= dvpe();
1837 /* Put MVPE's into 'configuration state' */
1838 set_c0_mvpcontrol(MVPCONTROL_VPC
);
1841 write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() & ~VPECONF0_VPA
);
1844 write_tc_c0_tchalt(TCHALT_H
);
1847 /* mark the TC unallocated */
1848 write_tc_c0_tcstatus(read_tc_c0_tcstatus() & ~TCSTATUS_A
);
1850 v
->state
= VPE_STATE_UNUSED
;
1852 clear_c0_mvpcontrol(MVPCONTROL_VPC
);
1858 EXPORT_SYMBOL(vpe_free
);
1860 void *vpe_get_shared(int index
)
1864 if ((v
= get_vpe(index
)) == NULL
)
1867 return v
->shared_ptr
;
1870 EXPORT_SYMBOL(vpe_get_shared
);
1872 int vpe_getuid(int index
)
1876 if ((v
= get_vpe(index
)) == NULL
)
1882 EXPORT_SYMBOL(vpe_getuid
);
1884 int vpe_getgid(int index
)
1888 if ((v
= get_vpe(index
)) == NULL
)
1894 EXPORT_SYMBOL(vpe_getgid
);
1896 int vpe_notify(int index
, struct vpe_notifications
*notify
)
1900 if ((v
= get_vpe(index
)) == NULL
)
1903 list_add(¬ify
->list
, &v
->notify
);
1907 EXPORT_SYMBOL(vpe_notify
);
1909 char *vpe_getcwd(int index
)
1913 if ((v
= get_vpe(index
)) == NULL
)
1919 EXPORT_SYMBOL(vpe_getcwd
);
1922 * RP applications may contain a _vpe_shared_area descriptor
1923 * array to allow for data sharing with Linux kernel functions
1924 * that's slightly more abstracted and extensible than the
1925 * fixed binding used by the rtlx support. Indeed, the rtlx
1926 * support should ideally be converted to use the generic
1927 * shared area descriptor scheme at some point.
1929 * mips_get_vpe_shared_area() can be used by AP kernel
1930 * modules to get an area pointer of a given type, if
1933 * mips_publish_vpe_area() is used by AP kernel modules
1934 * to share kseg0 kernel memory with the RP. It maintains
1935 * a private table, so that publishing can be done before
1936 * the RP program is launched. Making this table dynamically
1937 * allocated and extensible would be good scalable OS design.
1938 * however, until there's more than one user of the mechanism,
1939 * it should be an acceptable simplification to allow a static
1940 * maximum of 4 published areas.
1943 void *mips_get_vpe_shared_area(int index
, int type
)
1946 struct vpe_shared_area
*vsa
;
1952 if (v
->shared_areas
== NULL
)
1955 vsa
= v
->shared_areas
;
1957 while (vsa
->type
!= VPE_SHARED_NULL
) {
1958 if (vsa
->type
== type
)
1963 /* Fell through without finding type */
1967 EXPORT_SYMBOL(mips_get_vpe_shared_area
);
1969 int mips_publish_vpe_area(int type
, void *ptr
)
1974 unsigned long flags
;
1975 unsigned int vpflags
;
1977 printk(KERN_INFO
"mips_publish_vpe_area(0x%x, 0x%x)\n", type
, (int)ptr
);
1978 if ((unsigned int)ptr
>= KSEG2
) {
1979 printk(KERN_ERR
"VPE area pubish of invalid address 0x%x\n",
1983 for (i
= 0; i
< N_PUB_AREAS
; i
++) {
1984 if (published_vpe_area
[i
].type
== VPE_SHARED_RESERVED
) {
1985 published_vpe_area
[i
].type
= type
;
1986 published_vpe_area
[i
].addr
= ptr
;
1992 * If we've already got a VPE up and running, try to
1993 * update the shared descriptor with the new data.
1995 list_for_each_entry(v
, &vpecontrol
.vpe_list
, list
) {
1996 if (v
->shared_areas
!= NULL
) {
1997 local_irq_save(flags
);
1999 for (i
= 0; v
->shared_areas
[i
].type
!= VPE_SHARED_NULL
; i
++) {
2000 if ((v
->shared_areas
[i
].type
== type
)
2001 || (v
->shared_areas
[i
].type
== VPE_SHARED_RESERVED
)) {
2002 v
->shared_areas
[i
].type
= type
;
2003 v
->shared_areas
[i
].addr
= ptr
;
2007 local_irq_restore(flags
);
2012 EXPORT_SYMBOL(mips_publish_vpe_area
);
2014 #ifdef CONFIG_MIPS_APSP_KSPD
2015 static void kspd_sp_exit( int sp_id
)
2017 cleanup_tc(get_tc(sp_id
));
2021 static ssize_t
store_kill(struct device
*dev
, struct device_attribute
*attr
,
2022 const char *buf
, size_t len
)
2024 struct vpe
*vpe
= get_vpe(tclimit
);
2025 struct vpe_notifications
*not;
2027 list_for_each_entry(not, &vpe
->notify
, list
) {
2031 release_progmem(vpe
->load_addr
);
2032 kfree(vpe
->l_phsort
);
2033 cleanup_tc(get_tc(tclimit
));
2040 static ssize_t
show_ntcs(struct device
*cd
, struct device_attribute
*attr
,
2043 struct vpe
*vpe
= get_vpe(tclimit
);
2045 return sprintf(buf
, "%d\n", vpe
->ntcs
);
2048 static ssize_t
store_ntcs(struct device
*dev
, struct device_attribute
*attr
,
2049 const char *buf
, size_t len
)
2051 struct vpe
*vpe
= get_vpe(tclimit
);
2055 new = simple_strtoul(buf
, &endp
, 0);
2059 if (new == 0 || new > (hw_tcs
- tclimit
))
2070 static struct device_attribute vpe_class_attributes
[] = {
2071 __ATTR(kill
, S_IWUSR
, NULL
, store_kill
),
2072 __ATTR(ntcs
, S_IRUGO
| S_IWUSR
, show_ntcs
, store_ntcs
),
2076 static void vpe_device_release(struct device
*cd
)
2078 printk(KERN_DEBUG
"Using kfree to free vpe class device at %x\n",
2083 struct class vpe_class
= {
2085 .owner
= THIS_MODULE
,
2086 .dev_release
= vpe_device_release
,
2087 .dev_attrs
= vpe_class_attributes
,
2090 struct device vpe_device
;
2092 static int __init
vpe_module_init(void)
2094 unsigned int mtflags
, vpflags
;
2095 unsigned long flags
, val
;
2096 struct vpe
*v
= NULL
;
2100 if (!cpu_has_mipsmt
) {
2101 printk("VPE loader: not a MIPS MT capable processor\n");
2105 if (vpelimit
== 0) {
2106 #if defined(CONFIG_MIPS_MT_SMTC) || defined(MIPS_MT_SMP)
2107 printk(KERN_WARNING
"No VPEs reserved for VPE loader.\n"
2108 "Pass maxvpes=<n> argument as kernel argument\n");
2116 #if defined(CONFIG_MIPS_MT_SMTC) || defined(MIPS_MT_SMP)
2117 printk(KERN_WARNING
"No TCs reserved for AP/SP, not "
2118 "initializing VPE loader.\nPass maxtcs=<n> argument as "
2119 "kernel argument\n");
2126 major
= register_chrdev(0, module_name
, &vpe_fops
);
2128 printk("VPE loader: unable to register character device\n");
2132 err
= class_register(&vpe_class
);
2134 printk(KERN_ERR
"vpe_class registration failed\n");
2137 xvpe_vector_set
= 0;
2138 device_initialize(&vpe_device
);
2139 vpe_device
.class = &vpe_class
,
2140 vpe_device
.parent
= NULL
,
2141 dev_set_name(&vpe_device
, "vpe1");
2142 vpe_device
.devt
= MKDEV(major
, minor
);
2143 err
= device_add(&vpe_device
);
2145 printk(KERN_ERR
"Adding vpe_device failed\n");
2149 local_irq_save(flags
);
2153 /* Put MVPE's into 'configuration state' */
2154 set_c0_mvpcontrol(MVPCONTROL_VPC
);
2156 /* dump_mtregs(); */
2158 val
= read_c0_mvpconf0();
2159 hw_tcs
= (val
& MVPCONF0_PTC
) + 1;
2160 hw_vpes
= ((val
& MVPCONF0_PVPE
) >> MVPCONF0_PVPE_SHIFT
) + 1;
2162 for (tc
= tclimit
; tc
< hw_tcs
; tc
++) {
2164 * Must re-enable multithreading temporarily or in case we
2165 * reschedule send IPIs or similar we might hang.
2167 clear_c0_mvpcontrol(MVPCONTROL_VPC
);
2170 local_irq_restore(flags
);
2177 local_irq_save(flags
);
2180 set_c0_mvpcontrol(MVPCONTROL_VPC
);
2186 if ((v
= alloc_vpe(tc
)) == NULL
) {
2187 printk(KERN_WARNING
"VPE: unable to allocate VPE\n");
2192 v
->ntcs
= hw_tcs
- tclimit
;
2194 /* add the tc to the list of this vpe's tc's. */
2195 list_add(&t
->tc
, &v
->tc
);
2197 /* deactivate all but vpe0 */
2198 if (tc
>= tclimit
) {
2199 unsigned long tmp
= read_vpe_c0_vpeconf0();
2201 tmp
&= ~VPECONF0_VPA
;
2204 tmp
|= VPECONF0_MVP
;
2205 write_vpe_c0_vpeconf0(tmp
);
2208 /* disable multi-threading with TC's */
2209 write_vpe_c0_vpecontrol(read_vpe_c0_vpecontrol() & ~VPECONTROL_TE
);
2211 if (tc
>= vpelimit
) {
2213 * Set config to be the same as vpe0,
2214 * particularly kseg0 coherency alg
2216 write_vpe_c0_config(read_c0_config());
2221 t
->pvpe
= v
; /* set the parent vpe */
2223 if (tc
>= tclimit
) {
2228 /* Any TC that is bound to VPE0 gets left as is - in case
2229 we are running SMTC on VPE0. A TC that is bound to any
2230 other VPE gets bound to VPE0, ideally I'd like to make
2231 it homeless but it doesn't appear to let me bind a TC
2232 to a non-existent VPE. Which is perfectly reasonable.
2234 The (un)bound state is visible to an EJTAG probe so may
2238 if (((tmp
= read_tc_c0_tcbind()) & TCBIND_CURVPE
)) {
2239 /* tc is bound >vpe0 */
2240 write_tc_c0_tcbind(tmp
& ~TCBIND_CURVPE
);
2242 t
->pvpe
= get_vpe(0); /* set the parent vpe */
2246 write_tc_c0_tchalt(TCHALT_H
);
2249 tmp
= read_tc_c0_tcstatus();
2251 /* mark not activated and not dynamically allocatable */
2252 tmp
&= ~(TCSTATUS_A
| TCSTATUS_DA
);
2253 tmp
|= TCSTATUS_IXMT
; /* interrupt exempt */
2254 write_tc_c0_tcstatus(tmp
);
2259 /* release config state */
2260 clear_c0_mvpcontrol(MVPCONTROL_VPC
);
2264 local_irq_restore(flags
);
2266 #ifdef CONFIG_MIPS_APSP_KSPD
2267 kspd_events
.kspd_sp_exit
= kspd_sp_exit
;
2272 class_unregister(&vpe_class
);
2274 unregister_chrdev(major
, module_name
);
2280 static void __exit
vpe_module_exit(void)
2284 device_del(&vpe_device
);
2285 unregister_chrdev(major
, module_name
);
2287 /* No locking needed here */
2288 list_for_each_entry_safe(v
, n
, &vpecontrol
.vpe_list
, list
) {
2289 if (v
->state
!= VPE_STATE_UNUSED
)
2294 module_init(vpe_module_init
);
2295 module_exit(vpe_module_exit
);
2296 MODULE_DESCRIPTION("MIPS VPE Loader");
2297 MODULE_AUTHOR("Elizabeth Oldham, MIPS Technologies, Inc.");
2298 MODULE_LICENSE("GPL");