ceph: renew auth tickets before they expire
[linux-2.6.git] / arch / s390 / kernel / ptrace.c
blob9f654da4cecc82377fcd0abf7bd7acfe617eec08
1 /*
2 * arch/s390/kernel/ptrace.c
4 * S390 version
5 * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
6 * Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
7 * Martin Schwidefsky (schwidefsky@de.ibm.com)
9 * Based on PowerPC version
10 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
12 * Derived from "arch/m68k/kernel/ptrace.c"
13 * Copyright (C) 1994 by Hamish Macdonald
14 * Taken from linux/kernel/ptrace.c and modified for M680x0.
15 * linux/kernel/ptrace.c is by Ross Biro 1/23/92, edited by Linus Torvalds
17 * Modified by Cort Dougan (cort@cs.nmt.edu)
20 * This file is subject to the terms and conditions of the GNU General
21 * Public License. See the file README.legal in the main directory of
22 * this archive for more details.
25 #include <linux/kernel.h>
26 #include <linux/sched.h>
27 #include <linux/mm.h>
28 #include <linux/smp.h>
29 #include <linux/errno.h>
30 #include <linux/ptrace.h>
31 #include <linux/user.h>
32 #include <linux/security.h>
33 #include <linux/audit.h>
34 #include <linux/signal.h>
35 #include <linux/elf.h>
36 #include <linux/regset.h>
37 #include <linux/tracehook.h>
38 #include <linux/seccomp.h>
39 #include <trace/syscall.h>
40 #include <asm/compat.h>
41 #include <asm/segment.h>
42 #include <asm/page.h>
43 #include <asm/pgtable.h>
44 #include <asm/pgalloc.h>
45 #include <asm/system.h>
46 #include <asm/uaccess.h>
47 #include <asm/unistd.h>
48 #include "entry.h"
50 #ifdef CONFIG_COMPAT
51 #include "compat_ptrace.h"
52 #endif
54 #define CREATE_TRACE_POINTS
55 #include <trace/events/syscalls.h>
57 enum s390_regset {
58 REGSET_GENERAL,
59 REGSET_FP,
60 REGSET_GENERAL_EXTENDED,
63 static void
64 FixPerRegisters(struct task_struct *task)
66 struct pt_regs *regs;
67 per_struct *per_info;
68 per_cr_words cr_words;
70 regs = task_pt_regs(task);
71 per_info = (per_struct *) &task->thread.per_info;
72 per_info->control_regs.bits.em_instruction_fetch =
73 per_info->single_step | per_info->instruction_fetch;
75 if (per_info->single_step) {
76 per_info->control_regs.bits.starting_addr = 0;
77 #ifdef CONFIG_COMPAT
78 if (is_compat_task())
79 per_info->control_regs.bits.ending_addr = 0x7fffffffUL;
80 else
81 #endif
82 per_info->control_regs.bits.ending_addr = PSW_ADDR_INSN;
83 } else {
84 per_info->control_regs.bits.starting_addr =
85 per_info->starting_addr;
86 per_info->control_regs.bits.ending_addr =
87 per_info->ending_addr;
90 * if any of the control reg tracing bits are on
91 * we switch on per in the psw
93 if (per_info->control_regs.words.cr[0] & PER_EM_MASK)
94 regs->psw.mask |= PSW_MASK_PER;
95 else
96 regs->psw.mask &= ~PSW_MASK_PER;
98 if (per_info->control_regs.bits.em_storage_alteration)
99 per_info->control_regs.bits.storage_alt_space_ctl = 1;
100 else
101 per_info->control_regs.bits.storage_alt_space_ctl = 0;
103 if (task == current) {
104 __ctl_store(cr_words, 9, 11);
105 if (memcmp(&cr_words, &per_info->control_regs.words,
106 sizeof(cr_words)) != 0)
107 __ctl_load(per_info->control_regs.words, 9, 11);
111 void user_enable_single_step(struct task_struct *task)
113 task->thread.per_info.single_step = 1;
114 FixPerRegisters(task);
117 void user_disable_single_step(struct task_struct *task)
119 task->thread.per_info.single_step = 0;
120 FixPerRegisters(task);
124 * Called by kernel/ptrace.c when detaching..
126 * Make sure single step bits etc are not set.
128 void
129 ptrace_disable(struct task_struct *child)
131 /* make sure the single step bit is not set. */
132 user_disable_single_step(child);
135 #ifndef CONFIG_64BIT
136 # define __ADDR_MASK 3
137 #else
138 # define __ADDR_MASK 7
139 #endif
142 * Read the word at offset addr from the user area of a process. The
143 * trouble here is that the information is littered over different
144 * locations. The process registers are found on the kernel stack,
145 * the floating point stuff and the trace settings are stored in
146 * the task structure. In addition the different structures in
147 * struct user contain pad bytes that should be read as zeroes.
148 * Lovely...
150 static unsigned long __peek_user(struct task_struct *child, addr_t addr)
152 struct user *dummy = NULL;
153 addr_t offset, tmp;
155 if (addr < (addr_t) &dummy->regs.acrs) {
157 * psw and gprs are stored on the stack
159 tmp = *(addr_t *)((addr_t) &task_pt_regs(child)->psw + addr);
160 if (addr == (addr_t) &dummy->regs.psw.mask)
161 /* Remove per bit from user psw. */
162 tmp &= ~PSW_MASK_PER;
164 } else if (addr < (addr_t) &dummy->regs.orig_gpr2) {
166 * access registers are stored in the thread structure
168 offset = addr - (addr_t) &dummy->regs.acrs;
169 #ifdef CONFIG_64BIT
171 * Very special case: old & broken 64 bit gdb reading
172 * from acrs[15]. Result is a 64 bit value. Read the
173 * 32 bit acrs[15] value and shift it by 32. Sick...
175 if (addr == (addr_t) &dummy->regs.acrs[15])
176 tmp = ((unsigned long) child->thread.acrs[15]) << 32;
177 else
178 #endif
179 tmp = *(addr_t *)((addr_t) &child->thread.acrs + offset);
181 } else if (addr == (addr_t) &dummy->regs.orig_gpr2) {
183 * orig_gpr2 is stored on the kernel stack
185 tmp = (addr_t) task_pt_regs(child)->orig_gpr2;
187 } else if (addr < (addr_t) &dummy->regs.fp_regs) {
189 * prevent reads of padding hole between
190 * orig_gpr2 and fp_regs on s390.
192 tmp = 0;
194 } else if (addr < (addr_t) (&dummy->regs.fp_regs + 1)) {
196 * floating point regs. are stored in the thread structure
198 offset = addr - (addr_t) &dummy->regs.fp_regs;
199 tmp = *(addr_t *)((addr_t) &child->thread.fp_regs + offset);
200 if (addr == (addr_t) &dummy->regs.fp_regs.fpc)
201 tmp &= (unsigned long) FPC_VALID_MASK
202 << (BITS_PER_LONG - 32);
204 } else if (addr < (addr_t) (&dummy->regs.per_info + 1)) {
206 * per_info is found in the thread structure
208 offset = addr - (addr_t) &dummy->regs.per_info;
209 tmp = *(addr_t *)((addr_t) &child->thread.per_info + offset);
211 } else
212 tmp = 0;
214 return tmp;
217 static int
218 peek_user(struct task_struct *child, addr_t addr, addr_t data)
220 addr_t tmp, mask;
223 * Stupid gdb peeks/pokes the access registers in 64 bit with
224 * an alignment of 4. Programmers from hell...
226 mask = __ADDR_MASK;
227 #ifdef CONFIG_64BIT
228 if (addr >= (addr_t) &((struct user *) NULL)->regs.acrs &&
229 addr < (addr_t) &((struct user *) NULL)->regs.orig_gpr2)
230 mask = 3;
231 #endif
232 if ((addr & mask) || addr > sizeof(struct user) - __ADDR_MASK)
233 return -EIO;
235 tmp = __peek_user(child, addr);
236 return put_user(tmp, (addr_t __user *) data);
240 * Write a word to the user area of a process at location addr. This
241 * operation does have an additional problem compared to peek_user.
242 * Stores to the program status word and on the floating point
243 * control register needs to get checked for validity.
245 static int __poke_user(struct task_struct *child, addr_t addr, addr_t data)
247 struct user *dummy = NULL;
248 addr_t offset;
250 if (addr < (addr_t) &dummy->regs.acrs) {
252 * psw and gprs are stored on the stack
254 if (addr == (addr_t) &dummy->regs.psw.mask &&
255 #ifdef CONFIG_COMPAT
256 data != PSW_MASK_MERGE(psw_user32_bits, data) &&
257 #endif
258 data != PSW_MASK_MERGE(psw_user_bits, data))
259 /* Invalid psw mask. */
260 return -EINVAL;
261 #ifndef CONFIG_64BIT
262 if (addr == (addr_t) &dummy->regs.psw.addr)
263 /* I'd like to reject addresses without the
264 high order bit but older gdb's rely on it */
265 data |= PSW_ADDR_AMODE;
266 #endif
267 *(addr_t *)((addr_t) &task_pt_regs(child)->psw + addr) = data;
269 } else if (addr < (addr_t) (&dummy->regs.orig_gpr2)) {
271 * access registers are stored in the thread structure
273 offset = addr - (addr_t) &dummy->regs.acrs;
274 #ifdef CONFIG_64BIT
276 * Very special case: old & broken 64 bit gdb writing
277 * to acrs[15] with a 64 bit value. Ignore the lower
278 * half of the value and write the upper 32 bit to
279 * acrs[15]. Sick...
281 if (addr == (addr_t) &dummy->regs.acrs[15])
282 child->thread.acrs[15] = (unsigned int) (data >> 32);
283 else
284 #endif
285 *(addr_t *)((addr_t) &child->thread.acrs + offset) = data;
287 } else if (addr == (addr_t) &dummy->regs.orig_gpr2) {
289 * orig_gpr2 is stored on the kernel stack
291 task_pt_regs(child)->orig_gpr2 = data;
293 } else if (addr < (addr_t) &dummy->regs.fp_regs) {
295 * prevent writes of padding hole between
296 * orig_gpr2 and fp_regs on s390.
298 return 0;
300 } else if (addr < (addr_t) (&dummy->regs.fp_regs + 1)) {
302 * floating point regs. are stored in the thread structure
304 if (addr == (addr_t) &dummy->regs.fp_regs.fpc &&
305 (data & ~((unsigned long) FPC_VALID_MASK
306 << (BITS_PER_LONG - 32))) != 0)
307 return -EINVAL;
308 offset = addr - (addr_t) &dummy->regs.fp_regs;
309 *(addr_t *)((addr_t) &child->thread.fp_regs + offset) = data;
311 } else if (addr < (addr_t) (&dummy->regs.per_info + 1)) {
313 * per_info is found in the thread structure
315 offset = addr - (addr_t) &dummy->regs.per_info;
316 *(addr_t *)((addr_t) &child->thread.per_info + offset) = data;
320 FixPerRegisters(child);
321 return 0;
324 static int
325 poke_user(struct task_struct *child, addr_t addr, addr_t data)
327 addr_t mask;
330 * Stupid gdb peeks/pokes the access registers in 64 bit with
331 * an alignment of 4. Programmers from hell indeed...
333 mask = __ADDR_MASK;
334 #ifdef CONFIG_64BIT
335 if (addr >= (addr_t) &((struct user *) NULL)->regs.acrs &&
336 addr < (addr_t) &((struct user *) NULL)->regs.orig_gpr2)
337 mask = 3;
338 #endif
339 if ((addr & mask) || addr > sizeof(struct user) - __ADDR_MASK)
340 return -EIO;
342 return __poke_user(child, addr, data);
345 long arch_ptrace(struct task_struct *child, long request, long addr, long data)
347 ptrace_area parea;
348 int copied, ret;
350 switch (request) {
351 case PTRACE_PEEKUSR:
352 /* read the word at location addr in the USER area. */
353 return peek_user(child, addr, data);
355 case PTRACE_POKEUSR:
356 /* write the word at location addr in the USER area */
357 return poke_user(child, addr, data);
359 case PTRACE_PEEKUSR_AREA:
360 case PTRACE_POKEUSR_AREA:
361 if (copy_from_user(&parea, (void __force __user *) addr,
362 sizeof(parea)))
363 return -EFAULT;
364 addr = parea.kernel_addr;
365 data = parea.process_addr;
366 copied = 0;
367 while (copied < parea.len) {
368 if (request == PTRACE_PEEKUSR_AREA)
369 ret = peek_user(child, addr, data);
370 else {
371 addr_t utmp;
372 if (get_user(utmp,
373 (addr_t __force __user *) data))
374 return -EFAULT;
375 ret = poke_user(child, addr, utmp);
377 if (ret)
378 return ret;
379 addr += sizeof(unsigned long);
380 data += sizeof(unsigned long);
381 copied += sizeof(unsigned long);
383 return 0;
384 default:
385 /* Removing high order bit from addr (only for 31 bit). */
386 addr &= PSW_ADDR_INSN;
387 return ptrace_request(child, request, addr, data);
391 #ifdef CONFIG_COMPAT
393 * Now the fun part starts... a 31 bit program running in the
394 * 31 bit emulation tracing another program. PTRACE_PEEKTEXT,
395 * PTRACE_PEEKDATA, PTRACE_POKETEXT and PTRACE_POKEDATA are easy
396 * to handle, the difference to the 64 bit versions of the requests
397 * is that the access is done in multiples of 4 byte instead of
398 * 8 bytes (sizeof(unsigned long) on 31/64 bit).
399 * The ugly part are PTRACE_PEEKUSR, PTRACE_PEEKUSR_AREA,
400 * PTRACE_POKEUSR and PTRACE_POKEUSR_AREA. If the traced program
401 * is a 31 bit program too, the content of struct user can be
402 * emulated. A 31 bit program peeking into the struct user of
403 * a 64 bit program is a no-no.
407 * Same as peek_user but for a 31 bit program.
409 static u32 __peek_user_compat(struct task_struct *child, addr_t addr)
411 struct user32 *dummy32 = NULL;
412 per_struct32 *dummy_per32 = NULL;
413 addr_t offset;
414 __u32 tmp;
416 if (addr < (addr_t) &dummy32->regs.acrs) {
418 * psw and gprs are stored on the stack
420 if (addr == (addr_t) &dummy32->regs.psw.mask) {
421 /* Fake a 31 bit psw mask. */
422 tmp = (__u32)(task_pt_regs(child)->psw.mask >> 32);
423 tmp = PSW32_MASK_MERGE(psw32_user_bits, tmp);
424 } else if (addr == (addr_t) &dummy32->regs.psw.addr) {
425 /* Fake a 31 bit psw address. */
426 tmp = (__u32) task_pt_regs(child)->psw.addr |
427 PSW32_ADDR_AMODE31;
428 } else {
429 /* gpr 0-15 */
430 tmp = *(__u32 *)((addr_t) &task_pt_regs(child)->psw +
431 addr*2 + 4);
433 } else if (addr < (addr_t) (&dummy32->regs.orig_gpr2)) {
435 * access registers are stored in the thread structure
437 offset = addr - (addr_t) &dummy32->regs.acrs;
438 tmp = *(__u32*)((addr_t) &child->thread.acrs + offset);
440 } else if (addr == (addr_t) (&dummy32->regs.orig_gpr2)) {
442 * orig_gpr2 is stored on the kernel stack
444 tmp = *(__u32*)((addr_t) &task_pt_regs(child)->orig_gpr2 + 4);
446 } else if (addr < (addr_t) &dummy32->regs.fp_regs) {
448 * prevent reads of padding hole between
449 * orig_gpr2 and fp_regs on s390.
451 tmp = 0;
453 } else if (addr < (addr_t) (&dummy32->regs.fp_regs + 1)) {
455 * floating point regs. are stored in the thread structure
457 offset = addr - (addr_t) &dummy32->regs.fp_regs;
458 tmp = *(__u32 *)((addr_t) &child->thread.fp_regs + offset);
460 } else if (addr < (addr_t) (&dummy32->regs.per_info + 1)) {
462 * per_info is found in the thread structure
464 offset = addr - (addr_t) &dummy32->regs.per_info;
465 /* This is magic. See per_struct and per_struct32. */
466 if ((offset >= (addr_t) &dummy_per32->control_regs &&
467 offset < (addr_t) (&dummy_per32->control_regs + 1)) ||
468 (offset >= (addr_t) &dummy_per32->starting_addr &&
469 offset <= (addr_t) &dummy_per32->ending_addr) ||
470 offset == (addr_t) &dummy_per32->lowcore.words.address)
471 offset = offset*2 + 4;
472 else
473 offset = offset*2;
474 tmp = *(__u32 *)((addr_t) &child->thread.per_info + offset);
476 } else
477 tmp = 0;
479 return tmp;
482 static int peek_user_compat(struct task_struct *child,
483 addr_t addr, addr_t data)
485 __u32 tmp;
487 if (!is_compat_task() || (addr & 3) || addr > sizeof(struct user) - 3)
488 return -EIO;
490 tmp = __peek_user_compat(child, addr);
491 return put_user(tmp, (__u32 __user *) data);
495 * Same as poke_user but for a 31 bit program.
497 static int __poke_user_compat(struct task_struct *child,
498 addr_t addr, addr_t data)
500 struct user32 *dummy32 = NULL;
501 per_struct32 *dummy_per32 = NULL;
502 __u32 tmp = (__u32) data;
503 addr_t offset;
505 if (addr < (addr_t) &dummy32->regs.acrs) {
507 * psw, gprs, acrs and orig_gpr2 are stored on the stack
509 if (addr == (addr_t) &dummy32->regs.psw.mask) {
510 /* Build a 64 bit psw mask from 31 bit mask. */
511 if (tmp != PSW32_MASK_MERGE(psw32_user_bits, tmp))
512 /* Invalid psw mask. */
513 return -EINVAL;
514 task_pt_regs(child)->psw.mask =
515 PSW_MASK_MERGE(psw_user32_bits, (__u64) tmp << 32);
516 } else if (addr == (addr_t) &dummy32->regs.psw.addr) {
517 /* Build a 64 bit psw address from 31 bit address. */
518 task_pt_regs(child)->psw.addr =
519 (__u64) tmp & PSW32_ADDR_INSN;
520 } else {
521 /* gpr 0-15 */
522 *(__u32*)((addr_t) &task_pt_regs(child)->psw
523 + addr*2 + 4) = tmp;
525 } else if (addr < (addr_t) (&dummy32->regs.orig_gpr2)) {
527 * access registers are stored in the thread structure
529 offset = addr - (addr_t) &dummy32->regs.acrs;
530 *(__u32*)((addr_t) &child->thread.acrs + offset) = tmp;
532 } else if (addr == (addr_t) (&dummy32->regs.orig_gpr2)) {
534 * orig_gpr2 is stored on the kernel stack
536 *(__u32*)((addr_t) &task_pt_regs(child)->orig_gpr2 + 4) = tmp;
538 } else if (addr < (addr_t) &dummy32->regs.fp_regs) {
540 * prevent writess of padding hole between
541 * orig_gpr2 and fp_regs on s390.
543 return 0;
545 } else if (addr < (addr_t) (&dummy32->regs.fp_regs + 1)) {
547 * floating point regs. are stored in the thread structure
549 if (addr == (addr_t) &dummy32->regs.fp_regs.fpc &&
550 (tmp & ~FPC_VALID_MASK) != 0)
551 /* Invalid floating point control. */
552 return -EINVAL;
553 offset = addr - (addr_t) &dummy32->regs.fp_regs;
554 *(__u32 *)((addr_t) &child->thread.fp_regs + offset) = tmp;
556 } else if (addr < (addr_t) (&dummy32->regs.per_info + 1)) {
558 * per_info is found in the thread structure.
560 offset = addr - (addr_t) &dummy32->regs.per_info;
562 * This is magic. See per_struct and per_struct32.
563 * By incident the offsets in per_struct are exactly
564 * twice the offsets in per_struct32 for all fields.
565 * The 8 byte fields need special handling though,
566 * because the second half (bytes 4-7) is needed and
567 * not the first half.
569 if ((offset >= (addr_t) &dummy_per32->control_regs &&
570 offset < (addr_t) (&dummy_per32->control_regs + 1)) ||
571 (offset >= (addr_t) &dummy_per32->starting_addr &&
572 offset <= (addr_t) &dummy_per32->ending_addr) ||
573 offset == (addr_t) &dummy_per32->lowcore.words.address)
574 offset = offset*2 + 4;
575 else
576 offset = offset*2;
577 *(__u32 *)((addr_t) &child->thread.per_info + offset) = tmp;
581 FixPerRegisters(child);
582 return 0;
585 static int poke_user_compat(struct task_struct *child,
586 addr_t addr, addr_t data)
588 if (!is_compat_task() || (addr & 3) || addr > sizeof(struct user32) - 3)
589 return -EIO;
591 return __poke_user_compat(child, addr, data);
594 long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
595 compat_ulong_t caddr, compat_ulong_t cdata)
597 unsigned long addr = caddr;
598 unsigned long data = cdata;
599 ptrace_area_emu31 parea;
600 int copied, ret;
602 switch (request) {
603 case PTRACE_PEEKUSR:
604 /* read the word at location addr in the USER area. */
605 return peek_user_compat(child, addr, data);
607 case PTRACE_POKEUSR:
608 /* write the word at location addr in the USER area */
609 return poke_user_compat(child, addr, data);
611 case PTRACE_PEEKUSR_AREA:
612 case PTRACE_POKEUSR_AREA:
613 if (copy_from_user(&parea, (void __force __user *) addr,
614 sizeof(parea)))
615 return -EFAULT;
616 addr = parea.kernel_addr;
617 data = parea.process_addr;
618 copied = 0;
619 while (copied < parea.len) {
620 if (request == PTRACE_PEEKUSR_AREA)
621 ret = peek_user_compat(child, addr, data);
622 else {
623 __u32 utmp;
624 if (get_user(utmp,
625 (__u32 __force __user *) data))
626 return -EFAULT;
627 ret = poke_user_compat(child, addr, utmp);
629 if (ret)
630 return ret;
631 addr += sizeof(unsigned int);
632 data += sizeof(unsigned int);
633 copied += sizeof(unsigned int);
635 return 0;
637 return compat_ptrace_request(child, request, addr, data);
639 #endif
641 asmlinkage long do_syscall_trace_enter(struct pt_regs *regs)
643 long ret = 0;
645 /* Do the secure computing check first. */
646 secure_computing(regs->gprs[2]);
649 * The sysc_tracesys code in entry.S stored the system
650 * call number to gprs[2].
652 if (test_thread_flag(TIF_SYSCALL_TRACE) &&
653 (tracehook_report_syscall_entry(regs) ||
654 regs->gprs[2] >= NR_syscalls)) {
656 * Tracing decided this syscall should not happen or the
657 * debugger stored an invalid system call number. Skip
658 * the system call and the system call restart handling.
660 regs->svcnr = 0;
661 ret = -1;
664 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
665 trace_sys_enter(regs, regs->gprs[2]);
667 if (unlikely(current->audit_context))
668 audit_syscall_entry(is_compat_task() ?
669 AUDIT_ARCH_S390 : AUDIT_ARCH_S390X,
670 regs->gprs[2], regs->orig_gpr2,
671 regs->gprs[3], regs->gprs[4],
672 regs->gprs[5]);
673 return ret ?: regs->gprs[2];
676 asmlinkage void do_syscall_trace_exit(struct pt_regs *regs)
678 if (unlikely(current->audit_context))
679 audit_syscall_exit(AUDITSC_RESULT(regs->gprs[2]),
680 regs->gprs[2]);
682 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
683 trace_sys_exit(regs, regs->gprs[2]);
685 if (test_thread_flag(TIF_SYSCALL_TRACE))
686 tracehook_report_syscall_exit(regs, 0);
690 * user_regset definitions.
693 static int s390_regs_get(struct task_struct *target,
694 const struct user_regset *regset,
695 unsigned int pos, unsigned int count,
696 void *kbuf, void __user *ubuf)
698 if (target == current)
699 save_access_regs(target->thread.acrs);
701 if (kbuf) {
702 unsigned long *k = kbuf;
703 while (count > 0) {
704 *k++ = __peek_user(target, pos);
705 count -= sizeof(*k);
706 pos += sizeof(*k);
708 } else {
709 unsigned long __user *u = ubuf;
710 while (count > 0) {
711 if (__put_user(__peek_user(target, pos), u++))
712 return -EFAULT;
713 count -= sizeof(*u);
714 pos += sizeof(*u);
717 return 0;
720 static int s390_regs_set(struct task_struct *target,
721 const struct user_regset *regset,
722 unsigned int pos, unsigned int count,
723 const void *kbuf, const void __user *ubuf)
725 int rc = 0;
727 if (target == current)
728 save_access_regs(target->thread.acrs);
730 if (kbuf) {
731 const unsigned long *k = kbuf;
732 while (count > 0 && !rc) {
733 rc = __poke_user(target, pos, *k++);
734 count -= sizeof(*k);
735 pos += sizeof(*k);
737 } else {
738 const unsigned long __user *u = ubuf;
739 while (count > 0 && !rc) {
740 unsigned long word;
741 rc = __get_user(word, u++);
742 if (rc)
743 break;
744 rc = __poke_user(target, pos, word);
745 count -= sizeof(*u);
746 pos += sizeof(*u);
750 if (rc == 0 && target == current)
751 restore_access_regs(target->thread.acrs);
753 return rc;
756 static int s390_fpregs_get(struct task_struct *target,
757 const struct user_regset *regset, unsigned int pos,
758 unsigned int count, void *kbuf, void __user *ubuf)
760 if (target == current)
761 save_fp_regs(&target->thread.fp_regs);
763 return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
764 &target->thread.fp_regs, 0, -1);
767 static int s390_fpregs_set(struct task_struct *target,
768 const struct user_regset *regset, unsigned int pos,
769 unsigned int count, const void *kbuf,
770 const void __user *ubuf)
772 int rc = 0;
774 if (target == current)
775 save_fp_regs(&target->thread.fp_regs);
777 /* If setting FPC, must validate it first. */
778 if (count > 0 && pos < offsetof(s390_fp_regs, fprs)) {
779 u32 fpc[2] = { target->thread.fp_regs.fpc, 0 };
780 rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &fpc,
781 0, offsetof(s390_fp_regs, fprs));
782 if (rc)
783 return rc;
784 if ((fpc[0] & ~FPC_VALID_MASK) != 0 || fpc[1] != 0)
785 return -EINVAL;
786 target->thread.fp_regs.fpc = fpc[0];
789 if (rc == 0 && count > 0)
790 rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
791 target->thread.fp_regs.fprs,
792 offsetof(s390_fp_regs, fprs), -1);
794 if (rc == 0 && target == current)
795 restore_fp_regs(&target->thread.fp_regs);
797 return rc;
800 static const struct user_regset s390_regsets[] = {
801 [REGSET_GENERAL] = {
802 .core_note_type = NT_PRSTATUS,
803 .n = sizeof(s390_regs) / sizeof(long),
804 .size = sizeof(long),
805 .align = sizeof(long),
806 .get = s390_regs_get,
807 .set = s390_regs_set,
809 [REGSET_FP] = {
810 .core_note_type = NT_PRFPREG,
811 .n = sizeof(s390_fp_regs) / sizeof(long),
812 .size = sizeof(long),
813 .align = sizeof(long),
814 .get = s390_fpregs_get,
815 .set = s390_fpregs_set,
819 static const struct user_regset_view user_s390_view = {
820 .name = UTS_MACHINE,
821 .e_machine = EM_S390,
822 .regsets = s390_regsets,
823 .n = ARRAY_SIZE(s390_regsets)
826 #ifdef CONFIG_COMPAT
827 static int s390_compat_regs_get(struct task_struct *target,
828 const struct user_regset *regset,
829 unsigned int pos, unsigned int count,
830 void *kbuf, void __user *ubuf)
832 if (target == current)
833 save_access_regs(target->thread.acrs);
835 if (kbuf) {
836 compat_ulong_t *k = kbuf;
837 while (count > 0) {
838 *k++ = __peek_user_compat(target, pos);
839 count -= sizeof(*k);
840 pos += sizeof(*k);
842 } else {
843 compat_ulong_t __user *u = ubuf;
844 while (count > 0) {
845 if (__put_user(__peek_user_compat(target, pos), u++))
846 return -EFAULT;
847 count -= sizeof(*u);
848 pos += sizeof(*u);
851 return 0;
854 static int s390_compat_regs_set(struct task_struct *target,
855 const struct user_regset *regset,
856 unsigned int pos, unsigned int count,
857 const void *kbuf, const void __user *ubuf)
859 int rc = 0;
861 if (target == current)
862 save_access_regs(target->thread.acrs);
864 if (kbuf) {
865 const compat_ulong_t *k = kbuf;
866 while (count > 0 && !rc) {
867 rc = __poke_user_compat(target, pos, *k++);
868 count -= sizeof(*k);
869 pos += sizeof(*k);
871 } else {
872 const compat_ulong_t __user *u = ubuf;
873 while (count > 0 && !rc) {
874 compat_ulong_t word;
875 rc = __get_user(word, u++);
876 if (rc)
877 break;
878 rc = __poke_user_compat(target, pos, word);
879 count -= sizeof(*u);
880 pos += sizeof(*u);
884 if (rc == 0 && target == current)
885 restore_access_regs(target->thread.acrs);
887 return rc;
890 static int s390_compat_regs_high_get(struct task_struct *target,
891 const struct user_regset *regset,
892 unsigned int pos, unsigned int count,
893 void *kbuf, void __user *ubuf)
895 compat_ulong_t *gprs_high;
897 gprs_high = (compat_ulong_t *)
898 &task_pt_regs(target)->gprs[pos / sizeof(compat_ulong_t)];
899 if (kbuf) {
900 compat_ulong_t *k = kbuf;
901 while (count > 0) {
902 *k++ = *gprs_high;
903 gprs_high += 2;
904 count -= sizeof(*k);
906 } else {
907 compat_ulong_t __user *u = ubuf;
908 while (count > 0) {
909 if (__put_user(*gprs_high, u++))
910 return -EFAULT;
911 gprs_high += 2;
912 count -= sizeof(*u);
915 return 0;
918 static int s390_compat_regs_high_set(struct task_struct *target,
919 const struct user_regset *regset,
920 unsigned int pos, unsigned int count,
921 const void *kbuf, const void __user *ubuf)
923 compat_ulong_t *gprs_high;
924 int rc = 0;
926 gprs_high = (compat_ulong_t *)
927 &task_pt_regs(target)->gprs[pos / sizeof(compat_ulong_t)];
928 if (kbuf) {
929 const compat_ulong_t *k = kbuf;
930 while (count > 0) {
931 *gprs_high = *k++;
932 *gprs_high += 2;
933 count -= sizeof(*k);
935 } else {
936 const compat_ulong_t __user *u = ubuf;
937 while (count > 0 && !rc) {
938 unsigned long word;
939 rc = __get_user(word, u++);
940 if (rc)
941 break;
942 *gprs_high = word;
943 *gprs_high += 2;
944 count -= sizeof(*u);
948 return rc;
951 static const struct user_regset s390_compat_regsets[] = {
952 [REGSET_GENERAL] = {
953 .core_note_type = NT_PRSTATUS,
954 .n = sizeof(s390_compat_regs) / sizeof(compat_long_t),
955 .size = sizeof(compat_long_t),
956 .align = sizeof(compat_long_t),
957 .get = s390_compat_regs_get,
958 .set = s390_compat_regs_set,
960 [REGSET_FP] = {
961 .core_note_type = NT_PRFPREG,
962 .n = sizeof(s390_fp_regs) / sizeof(compat_long_t),
963 .size = sizeof(compat_long_t),
964 .align = sizeof(compat_long_t),
965 .get = s390_fpregs_get,
966 .set = s390_fpregs_set,
968 [REGSET_GENERAL_EXTENDED] = {
969 .core_note_type = NT_S390_HIGH_GPRS,
970 .n = sizeof(s390_compat_regs_high) / sizeof(compat_long_t),
971 .size = sizeof(compat_long_t),
972 .align = sizeof(compat_long_t),
973 .get = s390_compat_regs_high_get,
974 .set = s390_compat_regs_high_set,
978 static const struct user_regset_view user_s390_compat_view = {
979 .name = "s390",
980 .e_machine = EM_S390,
981 .regsets = s390_compat_regsets,
982 .n = ARRAY_SIZE(s390_compat_regsets)
984 #endif
986 const struct user_regset_view *task_user_regset_view(struct task_struct *task)
988 #ifdef CONFIG_COMPAT
989 if (test_tsk_thread_flag(task, TIF_31BIT))
990 return &user_s390_compat_view;
991 #endif
992 return &user_s390_view;
995 static const char *gpr_names[NUM_GPRS] = {
996 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
997 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
1000 unsigned long regs_get_register(struct pt_regs *regs, unsigned int offset)
1002 if (offset >= NUM_GPRS)
1003 return 0;
1004 return regs->gprs[offset];
1007 int regs_query_register_offset(const char *name)
1009 unsigned long offset;
1011 if (!name || *name != 'r')
1012 return -EINVAL;
1013 if (strict_strtoul(name + 1, 10, &offset))
1014 return -EINVAL;
1015 if (offset >= NUM_GPRS)
1016 return -EINVAL;
1017 return offset;
1020 const char *regs_query_register_name(unsigned int offset)
1022 if (offset >= NUM_GPRS)
1023 return NULL;
1024 return gpr_names[offset];
1027 static int regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr)
1029 unsigned long ksp = kernel_stack_pointer(regs);
1031 return (addr & ~(THREAD_SIZE - 1)) == (ksp & ~(THREAD_SIZE - 1));
1035 * regs_get_kernel_stack_nth() - get Nth entry of the stack
1036 * @regs:pt_regs which contains kernel stack pointer.
1037 * @n:stack entry number.
1039 * regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which
1040 * is specifined by @regs. If the @n th entry is NOT in the kernel stack,
1041 * this returns 0.
1043 unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, unsigned int n)
1045 unsigned long addr;
1047 addr = kernel_stack_pointer(regs) + n * sizeof(long);
1048 if (!regs_within_kernel_stack(regs, addr))
1049 return 0;
1050 return *(unsigned long *)addr;