2 * arch/s390/kernel/process.c
5 * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
6 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
7 * Hartmut Penner (hp@de.ibm.com),
8 * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
10 * Derived from "arch/i386/kernel/process.c"
11 * Copyright (C) 1995, Linus Torvalds
15 * This file handles the architecture-dependent parts of process handling..
18 #define __KERNEL_SYSCALLS__
21 #include <linux/config.h>
22 #include <linux/errno.h>
23 #include <linux/sched.h>
24 #include <linux/kernel.h>
26 #include <linux/smp.h>
27 #include <linux/smp_lock.h>
28 #include <linux/stddef.h>
29 #include <linux/unistd.h>
30 #include <linux/ptrace.h>
31 #include <linux/malloc.h>
32 #include <linux/vmalloc.h>
33 #include <linux/user.h>
34 #include <linux/a.out.h>
35 #include <linux/interrupt.h>
36 #include <linux/delay.h>
37 #include <linux/reboot.h>
38 #include <linux/init.h>
40 #include <asm/uaccess.h>
41 #include <asm/pgtable.h>
42 #include <asm/system.h>
44 #include <asm/processor.h>
45 #include <asm/misc390.h>
48 spinlock_t semaphore_wake_lock
= SPIN_LOCK_UNLOCKED
;
50 asmlinkage
void ret_from_fork(void) __asm__("ret_from_fork");
53 * The idle loop on a S390...
56 static psw_t wait_psw
;
58 int cpu_idle(void *unused
)
60 /* endless idle loop with no priority at all */
63 current
->counter
= -100;
64 wait_psw
.mask
= _WAIT_PSW_MASK
;
65 wait_psw
.addr
= (unsigned long) &&idle_wakeup
| 0x80000000L
;
67 if (softirq_active(smp_processor_id()) &
68 softirq_mask(smp_processor_id())) {
72 if (current
->need_resched
) {
87 As all the register will only be made displayable to the root
88 user ( via printk ) or checking if the uid of the user is 0 from
89 the /proc filesystem please god this will be secure enough DJB.
90 The lines are given one at a time so as not to chew stack space in
91 printk on a crash & also for the proc filesystem when you get
92 0 returned you know you've got all the lines
95 int sprintf_regs(int line
, char *buff
, struct task_struct
* task
,
96 struct thread_struct
*thread
, struct pt_regs
* regs
)
100 u32 backchain
,prev_backchain
,endchain
;
122 thread
= &task
->thread
;
127 linelen
=sprintf(buff
,"\n");
131 linelen
= sprintf(buff
,"User PSW: %08lx %08lx\n",
132 (unsigned long) regs
->psw
.mask
,
133 (unsigned long) regs
->psw
.addr
);
135 linelen
= sprintf(buff
,"pt_regs=NULL some info unavailable\n");
139 linelen
+= sprintf(&buff
[linelen
],
140 "task: %08x ", (addr_t
)task
);
142 linelen
+= sprintf(&buff
[linelen
],
143 "thread: %08x ksp: %08x ",
144 (addr_t
)thread
,(addr_t
)thread
->ksp
);
146 linelen
+= sprintf(&buff
[linelen
],
147 "pt_regs: %08x\n", (addr_t
)regs
);
151 linelen
= sprintf(buff
,"User GPRS:\n");
153 case sp_gprs1
... sp_gprs4
:
155 regno
= (line
-sp_gprs1
)*4;
156 linelen
= sprintf(buff
,"%08x %08x %08x %08x\n",
160 regs
->gprs
[regno
+3]);
165 linelen
= sprintf(buff
,"User ACRS:\n");
167 case sp_acrs1
... sp_acrs4
:
169 regno
= (line
-sp_acrs1
)*4;
170 linelen
= sprintf(buff
,"%08x %08x %08x %08x\n",
174 regs
->acrs
[regno
+3]);
177 case sp_kern_backchain
:
178 if (thread
&& thread
->ksp
&& regs
)
179 linelen
= sprintf(buff
,"Kernel BackChain CallChain BackChain CallChain\n");
182 if(thread
&& thread
->ksp
&& regs
) {
183 backchain
= (thread
->ksp
& PSW_ADDR_MASK
);
184 endchain
= ((backchain
& (-8192)) + 8192);
185 prev_backchain
= backchain
- 1;
186 line
-= sp_kern_backchain1
;
187 for (chaincnt
= 0; ; chaincnt
++) {
188 if ((backchain
== 0) ||
189 (backchain
>= endchain
) ||
191 (prev_backchain
>= backchain
))
193 if ((chaincnt
>> 1) == line
) {
194 linelen
+= sprintf(&buff
[linelen
],"%s%08x %08x ",
195 (chaincnt
&1) ? "":" ",
196 backchain
,*(u32
*)(backchain
+56));
198 if ((chaincnt
>> 1) > line
)
200 prev_backchain
= backchain
;
201 backchain
= (*((u32
*)backchain
)) & PSW_ADDR_MASK
;
204 linelen
+= sprintf(&buff
[linelen
],"\n");
211 void show_regs(struct task_struct
*task
, struct thread_struct
*thread
,
212 struct pt_regs
*regs
)
217 for (line
= 0; sprintf_regs(line
,buff
,task
,thread
,regs
); line
++)
221 int kernel_thread(int (*fn
)(void *), void * arg
, unsigned long flags
)
223 int clone_arg
= flags
| CLONE_VM
;
226 __asm__
__volatile__(
229 " l 4,%6\n" /* load kernel stack ptr of parent */
230 " svc %b2\n" /* Linux system call*/
231 " cl 4,%6\n" /* compare ksp's: child or parent ? */
232 " je 0f\n" /* parent - jump*/
233 " l 15,%6\n" /* fix kernel stack pointer*/
235 " xc 0(96,15),0(15)\n" /* clear save area */
236 " lr 2,%4\n" /* load argument*/
237 " lr 14,%5\n" /* get fn-pointer*/
238 " basr 14,14\n" /* call fn*/
239 " svc %b3\n" /* Linux system call*/
242 : "d" (clone_arg
), "i" (__NR_clone
), "i" (__NR_exit
),
243 "d" (arg
), "d" (fn
), "i" (__LC_KERNEL_STACK
) , "i" (-STACK_FRAME_OVERHEAD
)
249 * Free current thread data structures etc..
251 void exit_thread(void)
255 void flush_thread(void)
258 current
->used_math
= 0;
259 current
->flags
&= ~PF_USEDFPU
;
262 void release_thread(struct task_struct
*dead_task
)
266 int copy_thread(int nr
, unsigned long clone_flags
, unsigned long new_stackp
,
267 unsigned long unused
,
268 struct task_struct
* p
, struct pt_regs
* regs
)
272 unsigned long back_chain
;
276 unsigned long scratch
[2];
277 unsigned long gprs
[10]; /* gprs 6 -15 */
278 unsigned long fprs
[4]; /* fpr 4 and 6 */
279 unsigned long empty
[4];
280 #if CONFIG_REMOTE_DEBUG
281 gdb_pt_regs childregs
;
285 __u32 pgm_old_ilc
; /* single step magic from entry.S */
289 frame
= (struct stack_frame
*) (2*PAGE_SIZE
+ (unsigned long) p
) -1;
290 frame
= (struct stack_frame
*) (((unsigned long) frame
)&-8L);
291 p
->thread
.regs
= &frame
->childregs
;
292 p
->thread
.ksp
= (unsigned long) frame
;
293 frame
->childregs
= *regs
;
294 frame
->childregs
.gprs
[15] = new_stackp
;
297 /* new return point is ret_from_sys_call */
298 frame
->gprs
[8] = ((unsigned long) &ret_from_fork
) | 0x80000000;
300 /* fake return stack for resume(), don't go back to schedule */
301 frame
->gprs
[9] = (unsigned long) frame
;
302 frame
->pgm_svc_step
= 0; /* Nope we aren't single stepping an svc */
303 /* save fprs, if used in last task */
304 save_fp_regs(&p
->thread
.fp_regs
);
305 p
->thread
.user_seg
= __pa((unsigned long) p
->mm
->pgd
) | _SEGMENT_TABLE
;
306 p
->thread
.fs
= USER_DS
;
307 /* Don't copy debug registers */
308 memset(&p
->thread
.per_info
,0,sizeof(p
->thread
.per_info
));
312 asmlinkage
int sys_fork(struct pt_regs regs
)
317 ret
= do_fork(SIGCHLD
, regs
.gprs
[15], ®s
, 0);
322 asmlinkage
int sys_clone(struct pt_regs regs
)
324 unsigned long clone_flags
;
329 clone_flags
= regs
.gprs
[3];
330 newsp
= regs
.gprs
[2];
332 newsp
= regs
.gprs
[15];
333 ret
= do_fork(clone_flags
, newsp
, ®s
, 0);
339 * This is trivial, and on the face of it looks like it
340 * could equally well be done in user mode.
342 * Not so, for quite unobvious reasons - register pressure.
343 * In user mode vfork() cannot have a stack frame, and if
344 * done by calling the "clone()" system call directly, you
345 * do not have enough call-clobbered registers to hold all
346 * the information you need.
348 asmlinkage
int sys_vfork(struct pt_regs regs
)
350 return do_fork(CLONE_VFORK
| CLONE_VM
| SIGCHLD
,
351 regs
.gprs
[15], ®s
, 0);
355 * sys_execve() executes a new program.
357 asmlinkage
int sys_execve(struct pt_regs regs
)
362 filename
= getname((char *) regs
.orig_gpr2
);
363 error
= PTR_ERR(filename
);
364 if (IS_ERR(filename
))
366 error
= do_execve(filename
, (char **) regs
.gprs
[3], (char **) regs
.gprs
[4], ®s
);
368 current
->flags
&= ~PF_DTRACE
;
376 * fill in the FPU structure for a core dump.
378 int dump_fpu (struct pt_regs
* regs
, s390_fp_regs
*fpregs
)
380 save_fp_regs(fpregs
);
385 * fill in the user structure for a core dump..
387 void dump_thread(struct pt_regs
* regs
, struct user
* dump
)
390 /* changed the size calculations - should hopefully work better. lbt */
391 dump
->magic
= CMAGIC
;
392 dump
->start_code
= 0;
393 dump
->start_stack
= regs
->gprs
[15] & ~(PAGE_SIZE
- 1);
394 dump
->u_tsize
= ((unsigned long) current
->mm
->end_code
) >> PAGE_SHIFT
;
395 dump
->u_dsize
= ((unsigned long) (current
->mm
->brk
+ (PAGE_SIZE
-1))) >> PAGE_SHIFT
;
396 dump
->u_dsize
-= dump
->u_tsize
;
398 if (dump
->start_stack
< TASK_SIZE
)
399 dump
->u_ssize
= ((unsigned long) (TASK_SIZE
- dump
->start_stack
)) >> PAGE_SHIFT
;
400 memcpy(&dump
->regs
.gprs
[0],regs
,sizeof(s390_regs
));
401 dump_fpu (regs
, &dump
->regs
.fp_regs
);
402 memcpy(&dump
->regs
.per_info
,¤t
->thread
.per_info
,sizeof(per_struct
));
406 * These bracket the sleeping functions..
408 extern void scheduling_functions_start_here(void);
409 extern void scheduling_functions_end_here(void);
410 #define first_sched ((unsigned long) scheduling_functions_start_here)
411 #define last_sched ((unsigned long) scheduling_functions_end_here)
413 unsigned long get_wchan(struct task_struct
*p
)
415 unsigned long r14
, r15
;
416 unsigned long stack_page
;
418 if (!p
|| p
== current
|| p
->state
== TASK_RUNNING
)
420 stack_page
= (unsigned long) p
;
423 r14
= *(unsigned long *) (r15
+56);
424 if (r14
< first_sched
|| r14
>= last_sched
)
426 r15
= *(unsigned long *) (r15
+60);
427 } while (count
++ < 16);