- pre5:
[davej-history.git] / arch / alpha / kernel / process.c
blob508e278b06c29e0495dae1ffd39b42276111a481
1 /*
2 * linux/arch/alpha/kernel/process.c
4 * Copyright (C) 1995 Linus Torvalds
5 */
7 /*
8 * This file handles the architecture-dependent parts of process handling.
9 */
11 #include <linux/config.h>
12 #include <linux/errno.h>
13 #include <linux/sched.h>
14 #include <linux/kernel.h>
15 #include <linux/mm.h>
16 #include <linux/smp.h>
17 #include <linux/smp_lock.h>
18 #include <linux/stddef.h>
19 #include <linux/unistd.h>
20 #include <linux/ptrace.h>
21 #include <linux/malloc.h>
22 #include <linux/user.h>
23 #include <linux/a.out.h>
24 #include <linux/utsname.h>
25 #include <linux/time.h>
26 #include <linux/major.h>
27 #include <linux/stat.h>
28 #include <linux/mman.h>
29 #include <linux/elfcore.h>
30 #include <linux/reboot.h>
31 #include <linux/console.h>
33 #include <asm/reg.h>
34 #include <asm/uaccess.h>
35 #include <asm/system.h>
36 #include <asm/io.h>
37 #include <asm/pgtable.h>
38 #include <asm/hwrpb.h>
39 #include <asm/fpu.h>
41 #include "proto.h"
42 #include "pci_impl.h"
45 * Initial task structure. Make this a per-architecture thing,
46 * because different architectures tend to have different
47 * alignment requirements and potentially different initial
48 * setup.
51 unsigned long init_user_stack[1024] = { STACK_MAGIC, };
52 static struct vm_area_struct init_mmap = INIT_MMAP;
53 static struct fs_struct init_fs = INIT_FS;
54 static struct files_struct init_files = INIT_FILES;
55 static struct signal_struct init_signals = INIT_SIGNALS;
56 struct mm_struct init_mm = INIT_MM(init_mm);
58 union task_union init_task_union __attribute__((section("init_task")))
59 = { task: INIT_TASK(init_task_union.task) };
62 * No need to acquire the kernel lock, we're entirely local..
64 asmlinkage int
65 sys_sethae(unsigned long hae, unsigned long a1, unsigned long a2,
66 unsigned long a3, unsigned long a4, unsigned long a5,
67 struct pt_regs regs)
69 (&regs)->hae = hae;
70 return 0;
73 void
74 cpu_idle(void)
76 /* An endless idle loop with no priority at all. */
77 current->nice = 20;
78 current->counter = -100;
80 while (1) {
81 /* FIXME -- EV6 and LCA45 know how to power down
82 the CPU. */
84 /* Although we are an idle CPU, we do not want to
85 get into the scheduler unnecessarily. */
86 if (current->need_resched) {
87 schedule();
88 check_pgt_cache();
94 struct halt_info {
95 int mode;
96 char *restart_cmd;
99 static void
100 common_shutdown_1(void *generic_ptr)
102 struct halt_info *how = (struct halt_info *)generic_ptr;
103 struct percpu_struct *cpup;
104 unsigned long *pflags, flags;
105 int cpuid = smp_processor_id();
107 /* No point in taking interrupts anymore. */
108 __cli();
110 cpup = (struct percpu_struct *)
111 ((unsigned long)hwrpb + hwrpb->processor_offset
112 + hwrpb->processor_size * cpuid);
113 pflags = &cpup->flags;
114 flags = *pflags;
116 /* Clear reason to "default"; clear "bootstrap in progress". */
117 flags &= ~0x00ff0001UL;
119 #ifdef CONFIG_SMP
120 /* Secondaries halt here. */
121 if (cpuid != boot_cpuid) {
122 flags |= 0x00040000UL; /* "remain halted" */
123 *pflags = flags;
124 clear_bit(cpuid, &cpu_present_mask);
125 halt();
127 #endif
129 if (how->mode == LINUX_REBOOT_CMD_RESTART) {
130 if (!how->restart_cmd) {
131 flags |= 0x00020000UL; /* "cold bootstrap" */
132 } else {
133 /* For SRM, we could probably set environment
134 variables to get this to work. We'd have to
135 delay this until after srm_paging_stop unless
136 we ever got srm_fixup working.
138 At the moment, SRM will use the last boot device,
139 but the file and flags will be the defaults, when
140 doing a "warm" bootstrap. */
141 flags |= 0x00030000UL; /* "warm bootstrap" */
143 } else {
144 flags |= 0x00040000UL; /* "remain halted" */
146 *pflags = flags;
148 #ifdef CONFIG_SMP
149 /* Wait for the secondaries to halt. */
150 clear_bit(boot_cpuid, &cpu_present_mask);
151 while (cpu_present_mask)
152 barrier();
153 #endif
155 /* If booted from SRM, reset some of the original environment. */
156 if (alpha_using_srm) {
157 #ifdef CONFIG_DUMMY_CONSOLE
158 /* This has the effect of resetting the VGA video origin. */
159 take_over_console(&dummy_con, 0, MAX_NR_CONSOLES-1, 1);
160 #endif
161 /* reset_for_srm(); */
162 set_hae(srm_hae);
165 if (alpha_mv.kill_arch)
166 alpha_mv.kill_arch(how->mode);
168 if (! alpha_using_srm && how->mode != LINUX_REBOOT_CMD_RESTART) {
169 /* Unfortunately, since MILO doesn't currently understand
170 the hwrpb bits above, we can't reliably halt the
171 processor and keep it halted. So just loop. */
172 return;
175 if (alpha_using_srm)
176 srm_paging_stop();
178 halt();
181 static void
182 common_shutdown(int mode, char *restart_cmd)
184 struct halt_info args;
185 args.mode = mode;
186 args.restart_cmd = restart_cmd;
187 #ifdef CONFIG_SMP
188 smp_call_function(common_shutdown_1, &args, 1, 0);
189 #endif
190 common_shutdown_1(&args);
193 void
194 machine_restart(char *restart_cmd)
196 common_shutdown(LINUX_REBOOT_CMD_RESTART, restart_cmd);
199 void
200 machine_halt(void)
202 common_shutdown(LINUX_REBOOT_CMD_HALT, NULL);
205 void
206 machine_power_off(void)
208 common_shutdown(LINUX_REBOOT_CMD_POWER_OFF, NULL);
211 void
212 show_regs(struct pt_regs * regs)
214 printk("\nps: %04lx pc: [<%016lx>]\n", regs->ps, regs->pc);
215 printk("rp: [<%016lx>] sp: %p\n", regs->r26, regs+1);
216 printk(" r0: %016lx r1: %016lx r2: %016lx r3: %016lx\n",
217 regs->r0, regs->r1, regs->r2, regs->r3);
218 printk(" r4: %016lx r5: %016lx r6: %016lx r7: %016lx\n",
219 regs->r4, regs->r5, regs->r6, regs->r7);
220 printk(" r8: %016lx r16: %016lx r17: %016lx r18: %016lx\n",
221 regs->r8, regs->r16, regs->r17, regs->r18);
222 printk("r19: %016lx r20: %016lx r21: %016lx r22: %016lx\n",
223 regs->r19, regs->r20, regs->r21, regs->r22);
224 printk("r23: %016lx r24: %016lx r25: %016lx r26: %016lx\n",
225 regs->r23, regs->r24, regs->r25, regs->r26);
226 printk("r27: %016lx r28: %016lx r29: %016lx hae: %016lx\n",
227 regs->r27, regs->r28, regs->gp, regs->hae);
231 * Re-start a thread when doing execve()
233 void
234 start_thread(struct pt_regs * regs, unsigned long pc, unsigned long sp)
236 set_fs(USER_DS);
237 regs->pc = pc;
238 regs->ps = 8;
239 wrusp(sp);
243 * Free current thread data structures etc..
245 void
246 exit_thread(void)
250 void
251 flush_thread(void)
253 /* Arrange for each exec'ed process to start off with a clean slate
254 with respect to the FPU. This is all exceptions disabled. */
255 current->thread.flags &= ~IEEE_SW_MASK;
256 wrfpcr(FPCR_DYN_NORMAL | ieee_swcr_to_fpcr(0));
259 void
260 release_thread(struct task_struct *dead_task)
265 * "alpha_clone()".. By the time we get here, the
266 * non-volatile registers have also been saved on the
267 * stack. We do some ugly pointer stuff here.. (see
268 * also copy_thread)
270 * Notice that "fork()" is implemented in terms of clone,
271 * with parameters (SIGCHLD, 0).
274 alpha_clone(unsigned long clone_flags, unsigned long usp,
275 struct switch_stack * swstack)
277 if (!usp)
278 usp = rdusp();
279 return do_fork(clone_flags, usp, (struct pt_regs *) (swstack+1), 0);
283 alpha_vfork(struct switch_stack * swstack)
285 return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, rdusp(),
286 (struct pt_regs *) (swstack+1), 0);
290 * Copy an alpha thread..
292 * Note the "stack_offset" stuff: when returning to kernel mode, we need
293 * to have some extra stack-space for the kernel stack that still exists
294 * after the "ret_from_sys_call". When returning to user mode, we only
295 * want the space needed by the syscall stack frame (ie "struct pt_regs").
296 * Use the passed "regs" pointer to determine how much space we need
297 * for a kernel fork().
301 copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
302 unsigned long unused,
303 struct task_struct * p, struct pt_regs * regs)
305 extern void ret_from_sys_call(void);
306 extern void ret_from_smp_fork(void);
308 struct pt_regs * childregs;
309 struct switch_stack * childstack, *stack;
310 unsigned long stack_offset;
312 stack_offset = PAGE_SIZE - sizeof(struct pt_regs);
313 if (!(regs->ps & 8))
314 stack_offset = (PAGE_SIZE-1) & (unsigned long) regs;
315 childregs = (struct pt_regs *) (stack_offset + PAGE_SIZE + (long)p);
317 *childregs = *regs;
318 childregs->r0 = 0;
319 childregs->r19 = 0;
320 childregs->r20 = 1; /* OSF/1 has some strange fork() semantics. */
321 regs->r20 = 0;
322 stack = ((struct switch_stack *) regs) - 1;
323 childstack = ((struct switch_stack *) childregs) - 1;
324 *childstack = *stack;
325 #ifdef CONFIG_SMP
326 childstack->r26 = (unsigned long) ret_from_smp_fork;
327 #else
328 childstack->r26 = (unsigned long) ret_from_sys_call;
329 #endif
330 p->thread.usp = usp;
331 p->thread.ksp = (unsigned long) childstack;
332 p->thread.pal_flags = 1; /* set FEN, clear everything else */
333 p->thread.flags = current->thread.flags;
335 return 0;
339 * fill in the user structure for a core dump..
341 void
342 dump_thread(struct pt_regs * pt, struct user * dump)
344 /* switch stack follows right below pt_regs: */
345 struct switch_stack * sw = ((struct switch_stack *) pt) - 1;
347 dump->magic = CMAGIC;
348 dump->start_code = current->mm->start_code;
349 dump->start_data = current->mm->start_data;
350 dump->start_stack = rdusp() & ~(PAGE_SIZE - 1);
351 dump->u_tsize = ((current->mm->end_code - dump->start_code)
352 >> PAGE_SHIFT);
353 dump->u_dsize = ((current->mm->brk + PAGE_SIZE-1 - dump->start_data)
354 >> PAGE_SHIFT);
355 dump->u_ssize = (current->mm->start_stack - dump->start_stack
356 + PAGE_SIZE-1) >> PAGE_SHIFT;
359 * We store the registers in an order/format that is
360 * compatible with DEC Unix/OSF/1 as this makes life easier
361 * for gdb.
363 dump->regs[EF_V0] = pt->r0;
364 dump->regs[EF_T0] = pt->r1;
365 dump->regs[EF_T1] = pt->r2;
366 dump->regs[EF_T2] = pt->r3;
367 dump->regs[EF_T3] = pt->r4;
368 dump->regs[EF_T4] = pt->r5;
369 dump->regs[EF_T5] = pt->r6;
370 dump->regs[EF_T6] = pt->r7;
371 dump->regs[EF_T7] = pt->r8;
372 dump->regs[EF_S0] = sw->r9;
373 dump->regs[EF_S1] = sw->r10;
374 dump->regs[EF_S2] = sw->r11;
375 dump->regs[EF_S3] = sw->r12;
376 dump->regs[EF_S4] = sw->r13;
377 dump->regs[EF_S5] = sw->r14;
378 dump->regs[EF_S6] = sw->r15;
379 dump->regs[EF_A3] = pt->r19;
380 dump->regs[EF_A4] = pt->r20;
381 dump->regs[EF_A5] = pt->r21;
382 dump->regs[EF_T8] = pt->r22;
383 dump->regs[EF_T9] = pt->r23;
384 dump->regs[EF_T10] = pt->r24;
385 dump->regs[EF_T11] = pt->r25;
386 dump->regs[EF_RA] = pt->r26;
387 dump->regs[EF_T12] = pt->r27;
388 dump->regs[EF_AT] = pt->r28;
389 dump->regs[EF_SP] = rdusp();
390 dump->regs[EF_PS] = pt->ps;
391 dump->regs[EF_PC] = pt->pc;
392 dump->regs[EF_GP] = pt->gp;
393 dump->regs[EF_A0] = pt->r16;
394 dump->regs[EF_A1] = pt->r17;
395 dump->regs[EF_A2] = pt->r18;
396 memcpy((char *)dump->regs + EF_SIZE, sw->fp, 32 * 8);
400 dump_fpu(struct pt_regs * regs, elf_fpregset_t *r)
402 /* switch stack follows right below pt_regs: */
403 struct switch_stack * sw = ((struct switch_stack *) regs) - 1;
404 memcpy(r, sw->fp, 32 * 8);
405 return 1;
409 * sys_execve() executes a new program.
411 * This works due to the alpha calling sequence: the first 6 args
412 * are gotten from registers, while the rest is on the stack, so
413 * we get a0-a5 for free, and then magically find "struct pt_regs"
414 * on the stack for us..
416 * Don't do this at home.
418 asmlinkage int
419 sys_execve(unsigned long a0, unsigned long a1, unsigned long a2,
420 unsigned long a3, unsigned long a4, unsigned long a5,
421 struct pt_regs regs)
423 int error;
424 char * filename;
426 lock_kernel();
427 filename = getname((char *) a0);
428 error = PTR_ERR(filename);
429 if (IS_ERR(filename))
430 goto out;
431 error = do_execve(filename, (char **) a1, (char **) a2, &regs);
432 putname(filename);
433 out:
434 unlock_kernel();
435 return error;
439 * These bracket the sleeping functions..
441 extern void scheduling_functions_start_here(void);
442 extern void scheduling_functions_end_here(void);
443 #define first_sched ((unsigned long) scheduling_functions_start_here)
444 #define last_sched ((unsigned long) scheduling_functions_end_here)
446 unsigned long
447 get_wchan(struct task_struct *p)
449 unsigned long schedule_frame;
450 unsigned long pc;
451 if (!p || p == current || p->state == TASK_RUNNING)
452 return 0;
454 * This one depends on the frame size of schedule(). Do a
455 * "disass schedule" in gdb to find the frame size. Also, the
456 * code assumes that sleep_on() follows immediately after
457 * interruptible_sleep_on() and that add_timer() follows
458 * immediately after interruptible_sleep(). Ugly, isn't it?
459 * Maybe adding a wchan field to task_struct would be better,
460 * after all...
463 pc = thread_saved_pc(&p->thread);
464 if (pc >= first_sched && pc < last_sched) {
465 schedule_frame = ((unsigned long *)p->thread.ksp)[6];
466 return ((unsigned long *)schedule_frame)[12];
468 return pc;