- Kai Germaschewski: ISDN update (including Makefiles)
[davej-history.git] / arch / sparc64 / kernel / unaligned.c
blobf43204c3b93fb8b06f5e4b0372548f923722a223
1 /* $Id: unaligned.c,v 1.20 2000/04/29 08:05:21 anton Exp $
2 * unaligned.c: Unaligned load/store trap handling with special
3 * cases for the kernel to do them more quickly.
5 * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
6 * Copyright (C) 1996,1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
7 */
10 #include <linux/kernel.h>
11 #include <linux/sched.h>
12 #include <linux/mm.h>
13 #include <asm/asi.h>
14 #include <asm/ptrace.h>
15 #include <asm/processor.h>
16 #include <asm/system.h>
17 #include <asm/uaccess.h>
18 #include <linux/smp.h>
19 #include <linux/smp_lock.h>
20 #include <asm/fpumacro.h>
21 #include <asm/bitops.h>
23 /* #define DEBUG_MNA */
25 enum direction {
26 load, /* ld, ldd, ldh, ldsh */
27 store, /* st, std, sth, stsh */
28 both, /* Swap, ldstub, cas, ... */
29 fpld,
30 fpst,
31 invalid,
34 #ifdef DEBUG_MNA
35 static char *dirstrings[] = {
36 "load", "store", "both", "fpload", "fpstore", "invalid"
38 #endif
40 static inline enum direction decode_direction(unsigned int insn)
42 unsigned long tmp = (insn >> 21) & 1;
44 if(!tmp)
45 return load;
46 else {
47 switch ((insn>>19)&0xf) {
48 case 15: /* swap* */
49 return both;
50 default:
51 return store;
56 /* 16 = double-word, 8 = extra-word, 4 = word, 2 = half-word */
57 static inline int decode_access_size(unsigned int insn)
59 unsigned int tmp;
61 tmp = ((insn >> 19) & 0xf);
62 if (tmp == 11 || tmp == 14) /* ldx/stx */
63 return 8;
64 tmp &= 3;
65 if(!tmp)
66 return 4;
67 else if(tmp == 3)
68 return 16; /* ldd/std - Although it is actually 8 */
69 else if(tmp == 2)
70 return 2;
71 else {
72 printk("Impossible unaligned trap. insn=%08x\n", insn);
73 die_if_kernel("Byte sized unaligned access?!?!", current->thread.kregs);
77 static inline int decode_asi(unsigned int insn, struct pt_regs *regs)
79 if (insn & 0x800000) {
80 if (insn & 0x2000)
81 return (unsigned char)(regs->tstate >> 24); /* %asi */
82 else
83 return (unsigned char)(insn >> 5); /* imm_asi */
84 } else
85 return ASI_P;
88 /* 0x400000 = signed, 0 = unsigned */
89 static inline int decode_signedness(unsigned int insn)
91 return (insn & 0x400000);
94 static inline void maybe_flush_windows(unsigned int rs1, unsigned int rs2,
95 unsigned int rd, int from_kernel)
97 if(rs2 >= 16 || rs1 >= 16 || rd >= 16) {
98 if(from_kernel != 0)
99 __asm__ __volatile__("flushw");
100 else
101 flushw_user();
105 static inline long sign_extend_imm13(long imm)
107 return imm << 51 >> 51;
110 static unsigned long fetch_reg(unsigned int reg, struct pt_regs *regs)
112 unsigned long value;
114 if(reg < 16)
115 return (!reg ? 0 : regs->u_regs[reg]);
116 if (regs->tstate & TSTATE_PRIV) {
117 struct reg_window *win;
118 win = (struct reg_window *)(regs->u_regs[UREG_FP] + STACK_BIAS);
119 value = win->locals[reg - 16];
120 } else if (current->thread.flags & SPARC_FLAG_32BIT) {
121 struct reg_window32 *win32;
122 win32 = (struct reg_window32 *)((unsigned long)((u32)regs->u_regs[UREG_FP]));
123 get_user(value, &win32->locals[reg - 16]);
124 } else {
125 struct reg_window *win;
126 win = (struct reg_window *)(regs->u_regs[UREG_FP] + STACK_BIAS);
127 get_user(value, &win->locals[reg - 16]);
129 return value;
132 static unsigned long *fetch_reg_addr(unsigned int reg, struct pt_regs *regs)
134 if(reg < 16)
135 return &regs->u_regs[reg];
136 if (regs->tstate & TSTATE_PRIV) {
137 struct reg_window *win;
138 win = (struct reg_window *)(regs->u_regs[UREG_FP] + STACK_BIAS);
139 return &win->locals[reg - 16];
140 } else if (current->thread.flags & SPARC_FLAG_32BIT) {
141 struct reg_window32 *win32;
142 win32 = (struct reg_window32 *)((unsigned long)((u32)regs->u_regs[UREG_FP]));
143 return (unsigned long *)&win32->locals[reg - 16];
144 } else {
145 struct reg_window *win;
146 win = (struct reg_window *)(regs->u_regs[UREG_FP] + STACK_BIAS);
147 return &win->locals[reg - 16];
151 static inline unsigned long compute_effective_address(struct pt_regs *regs,
152 unsigned int insn, unsigned int rd)
154 unsigned int rs1 = (insn >> 14) & 0x1f;
155 unsigned int rs2 = insn & 0x1f;
156 int from_kernel = (regs->tstate & TSTATE_PRIV) != 0;
158 if(insn & 0x2000) {
159 maybe_flush_windows(rs1, 0, rd, from_kernel);
160 return (fetch_reg(rs1, regs) + sign_extend_imm13(insn));
161 } else {
162 maybe_flush_windows(rs1, rs2, rd, from_kernel);
163 return (fetch_reg(rs1, regs) + fetch_reg(rs2, regs));
167 /* This is just to make gcc think die_if_kernel does return... */
168 static void unaligned_panic(char *str, struct pt_regs *regs)
170 die_if_kernel(str, regs);
173 #define do_integer_load(dest_reg, size, saddr, is_signed, asi, errh) ({ \
174 __asm__ __volatile__ ( \
175 "wr %4, 0, %%asi\n\t" \
176 "cmp %1, 8\n\t" \
177 "bge,pn %%icc, 9f\n\t" \
178 " cmp %1, 4\n\t" \
179 "be,pt %%icc, 6f\n" \
180 "4:\t" " lduba [%2] %%asi, %%l1\n" \
181 "5:\t" "lduba [%2 + 1] %%asi, %%l2\n\t" \
182 "sll %%l1, 8, %%l1\n\t" \
183 "brz,pt %3, 3f\n\t" \
184 " add %%l1, %%l2, %%l1\n\t" \
185 "sllx %%l1, 48, %%l1\n\t" \
186 "srax %%l1, 48, %%l1\n" \
187 "3:\t" "ba,pt %%xcc, 0f\n\t" \
188 " stx %%l1, [%0]\n" \
189 "6:\t" "lduba [%2 + 1] %%asi, %%l2\n\t" \
190 "sll %%l1, 24, %%l1\n" \
191 "7:\t" "lduba [%2 + 2] %%asi, %%g7\n\t" \
192 "sll %%l2, 16, %%l2\n" \
193 "8:\t" "lduba [%2 + 3] %%asi, %%g1\n\t" \
194 "sll %%g7, 8, %%g7\n\t" \
195 "or %%l1, %%l2, %%l1\n\t" \
196 "or %%g7, %%g1, %%g7\n\t" \
197 "or %%l1, %%g7, %%l1\n\t" \
198 "brnz,a,pt %3, 3f\n\t" \
199 " sra %%l1, 0, %%l1\n" \
200 "3:\t" "ba,pt %%xcc, 0f\n\t" \
201 " stx %%l1, [%0]\n" \
202 "9:\t" "lduba [%2] %%asi, %%l1\n" \
203 "10:\t" "lduba [%2 + 1] %%asi, %%l2\n\t" \
204 "sllx %%l1, 56, %%l1\n" \
205 "11:\t" "lduba [%2 + 2] %%asi, %%g7\n\t" \
206 "sllx %%l2, 48, %%l2\n" \
207 "12:\t" "lduba [%2 + 3] %%asi, %%g1\n\t" \
208 "sllx %%g7, 40, %%g7\n\t" \
209 "sllx %%g1, 32, %%g1\n\t" \
210 "or %%l1, %%l2, %%l1\n\t" \
211 "or %%g7, %%g1, %%g7\n" \
212 "13:\t" "lduba [%2 + 4] %%asi, %%l2\n\t" \
213 "or %%l1, %%g7, %%g7\n" \
214 "14:\t" "lduba [%2 + 5] %%asi, %%g1\n\t" \
215 "sllx %%l2, 24, %%l2\n" \
216 "15:\t" "lduba [%2 + 6] %%asi, %%l1\n\t" \
217 "sllx %%g1, 16, %%g1\n\t" \
218 "or %%g7, %%l2, %%g7\n" \
219 "16:\t" "lduba [%2 + 7] %%asi, %%l2\n\t" \
220 "sllx %%l1, 8, %%l1\n\t" \
221 "or %%g7, %%g1, %%g7\n\t" \
222 "or %%l1, %%l2, %%l1\n\t" \
223 "or %%g7, %%l1, %%g7\n\t" \
224 "cmp %1, 8\n\t" \
225 "be,a,pt %%icc, 0f\n\t" \
226 " stx %%g7, [%0]\n\t" \
227 "srlx %%g7, 32, %%l1\n\t" \
228 "sra %%g7, 0, %%g7\n\t" \
229 "stx %%l1, [%0]\n\t" \
230 "stx %%g7, [%0 + 8]\n" \
231 "0:\n\t" \
232 "wr %%g0, %5, %%asi\n\n\t" \
233 ".section __ex_table\n\t" \
234 ".word 4b, " #errh "\n\t" \
235 ".word 5b, " #errh "\n\t" \
236 ".word 6b, " #errh "\n\t" \
237 ".word 7b, " #errh "\n\t" \
238 ".word 8b, " #errh "\n\t" \
239 ".word 9b, " #errh "\n\t" \
240 ".word 10b, " #errh "\n\t" \
241 ".word 11b, " #errh "\n\t" \
242 ".word 12b, " #errh "\n\t" \
243 ".word 13b, " #errh "\n\t" \
244 ".word 14b, " #errh "\n\t" \
245 ".word 15b, " #errh "\n\t" \
246 ".word 16b, " #errh "\n\n\t" \
247 ".previous\n\t" \
248 : : "r" (dest_reg), "r" (size), "r" (saddr), "r" (is_signed), \
249 "r" (asi), "i" (ASI_AIUS) \
250 : "l1", "l2", "g7", "g1", "cc"); \
253 #define store_common(dst_addr, size, src_val, asi, errh) ({ \
254 __asm__ __volatile__ ( \
255 "wr %3, 0, %%asi\n\t" \
256 "ldx [%2], %%l1\n" \
257 "cmp %1, 2\n\t" \
258 "be,pn %%icc, 2f\n\t" \
259 " cmp %1, 4\n\t" \
260 "be,pt %%icc, 1f\n\t" \
261 " srlx %%l1, 24, %%l2\n\t" \
262 "srlx %%l1, 56, %%g1\n\t" \
263 "srlx %%l1, 48, %%g7\n" \
264 "4:\t" "stba %%g1, [%0] %%asi\n\t" \
265 "srlx %%l1, 40, %%g1\n" \
266 "5:\t" "stba %%g7, [%0 + 1] %%asi\n\t" \
267 "srlx %%l1, 32, %%g7\n" \
268 "6:\t" "stba %%g1, [%0 + 2] %%asi\n" \
269 "7:\t" "stba %%g7, [%0 + 3] %%asi\n\t" \
270 "srlx %%l1, 16, %%g1\n" \
271 "8:\t" "stba %%l2, [%0 + 4] %%asi\n\t" \
272 "srlx %%l1, 8, %%g7\n" \
273 "9:\t" "stba %%g1, [%0 + 5] %%asi\n" \
274 "10:\t" "stba %%g7, [%0 + 6] %%asi\n\t" \
275 "ba,pt %%xcc, 0f\n" \
276 "11:\t" " stba %%l1, [%0 + 7] %%asi\n" \
277 "1:\t" "srl %%l1, 16, %%g7\n" \
278 "12:\t" "stba %%l2, [%0] %%asi\n\t" \
279 "srl %%l1, 8, %%l2\n" \
280 "13:\t" "stba %%g7, [%0 + 1] %%asi\n" \
281 "14:\t" "stba %%l2, [%0 + 2] %%asi\n\t" \
282 "ba,pt %%xcc, 0f\n" \
283 "15:\t" " stba %%l1, [%0 + 3] %%asi\n" \
284 "2:\t" "srl %%l1, 8, %%l2\n" \
285 "16:\t" "stba %%l2, [%0] %%asi\n" \
286 "17:\t" "stba %%l1, [%0 + 1] %%asi\n" \
287 "0:\n\t" \
288 "wr %%g0, %4, %%asi\n\n\t" \
289 ".section __ex_table\n\t" \
290 ".word 4b, " #errh "\n\t" \
291 ".word 5b, " #errh "\n\t" \
292 ".word 6b, " #errh "\n\t" \
293 ".word 7b, " #errh "\n\t" \
294 ".word 8b, " #errh "\n\t" \
295 ".word 9b, " #errh "\n\t" \
296 ".word 10b, " #errh "\n\t" \
297 ".word 11b, " #errh "\n\t" \
298 ".word 12b, " #errh "\n\t" \
299 ".word 13b, " #errh "\n\t" \
300 ".word 14b, " #errh "\n\t" \
301 ".word 15b, " #errh "\n\t" \
302 ".word 16b, " #errh "\n\t" \
303 ".word 17b, " #errh "\n\n\t" \
304 ".previous\n\t" \
305 : : "r" (dst_addr), "r" (size), "r" (src_val), "r" (asi), "i" (ASI_AIUS)\
306 : "l1", "l2", "g7", "g1", "cc"); \
309 #define do_integer_store(reg_num, size, dst_addr, regs, asi, errh) ({ \
310 unsigned long zero = 0; \
311 unsigned long *src_val = &zero; \
313 if (size == 16) { \
314 size = 8; \
315 zero = (((long)(reg_num ? \
316 (unsigned)fetch_reg(reg_num, regs) : 0)) << 32) | \
317 (unsigned)fetch_reg(reg_num + 1, regs); \
318 } else if (reg_num) src_val = fetch_reg_addr(reg_num, regs); \
319 store_common(dst_addr, size, src_val, asi, errh); \
322 /* XXX Need to capture/release other cpu's for SMP around this. */
323 #define do_atomic(srcdest_reg, mem, errh) ({ \
324 unsigned long flags, tmp; \
326 save_and_cli(flags); \
327 tmp = *srcdest_reg; \
328 do_integer_load(srcdest_reg, 4, mem, 0, errh); \
329 store_common(mem, 4, &tmp, errh); \
330 restore_flags(flags); \
333 static inline void advance(struct pt_regs *regs)
335 regs->tpc = regs->tnpc;
336 regs->tnpc += 4;
339 static inline int floating_point_load_or_store_p(unsigned int insn)
341 return (insn >> 24) & 1;
344 static inline int ok_for_kernel(unsigned int insn)
346 return !floating_point_load_or_store_p(insn);
349 void kernel_mna_trap_fault(struct pt_regs *regs, unsigned int insn) __asm__ ("kernel_mna_trap_fault");
351 void kernel_mna_trap_fault(struct pt_regs *regs, unsigned int insn)
353 unsigned long g2 = regs->u_regs [UREG_G2];
354 unsigned long fixup = search_exception_table (regs->tpc, &g2);
356 if (!fixup) {
357 unsigned long address = compute_effective_address(regs, insn, ((insn >> 25) & 0x1f));
358 if(address < PAGE_SIZE) {
359 printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference in mna handler");
360 } else
361 printk(KERN_ALERT "Unable to handle kernel paging request in mna handler");
362 printk(KERN_ALERT " at virtual address %016lx\n",address);
363 printk(KERN_ALERT "current->{mm,active_mm}->context = %016lx\n",
364 (current->mm ? current->mm->context :
365 current->active_mm->context));
366 printk(KERN_ALERT "current->{mm,active_mm}->pgd = %016lx\n",
367 (current->mm ? (unsigned long) current->mm->pgd :
368 (unsigned long) current->active_mm->pgd));
369 die_if_kernel("Oops", regs);
370 /* Not reached */
372 regs->tpc = fixup;
373 regs->tnpc = regs->tpc + 4;
374 regs->u_regs [UREG_G2] = g2;
377 asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn, unsigned long sfar, unsigned long sfsr)
379 enum direction dir = decode_direction(insn);
380 int size = decode_access_size(insn);
382 if(!ok_for_kernel(insn) || dir == both) {
383 printk("Unsupported unaligned load/store trap for kernel at <%016lx>.\n",
384 regs->tpc);
385 unaligned_panic("Kernel does fpu/atomic unaligned load/store.", regs);
387 __asm__ __volatile__ ("\n"
388 "kernel_unaligned_trap_fault:\n\t"
389 "mov %0, %%o0\n\t"
390 "call kernel_mna_trap_fault\n\t"
391 " mov %1, %%o1\n\t"
393 : "r" (regs), "r" (insn)
394 : "o0", "o1", "o2", "o3", "o4", "o5", "o7",
395 "g1", "g2", "g3", "g4", "g5", "g7", "cc");
396 } else {
397 unsigned long addr = compute_effective_address(regs, insn, ((insn >> 25) & 0x1f));
399 #ifdef DEBUG_MNA
400 printk("KMNA: pc=%016lx [dir=%s addr=%016lx size=%d] retpc[%016lx]\n",
401 regs->tpc, dirstrings[dir], addr, size, regs->u_regs[UREG_RETPC]);
402 #endif
403 switch(dir) {
404 case load:
405 do_integer_load(fetch_reg_addr(((insn>>25)&0x1f), regs),
406 size, (unsigned long *) addr,
407 decode_signedness(insn), decode_asi(insn, regs),
408 kernel_unaligned_trap_fault);
409 break;
411 case store:
412 do_integer_store(((insn>>25)&0x1f), size,
413 (unsigned long *) addr, regs,
414 decode_asi(insn, regs),
415 kernel_unaligned_trap_fault);
416 break;
417 #if 0 /* unsupported */
418 case both:
419 do_atomic(fetch_reg_addr(((insn>>25)&0x1f), regs),
420 (unsigned long *) addr,
421 kernel_unaligned_trap_fault);
422 break;
423 #endif
424 default:
425 panic("Impossible kernel unaligned trap.");
426 /* Not reached... */
428 advance(regs);
432 static char popc_helper[] = {
433 0, 1, 1, 2, 1, 2, 2, 3,
434 1, 2, 2, 3, 2, 3, 3, 4,
437 int handle_popc(u32 insn, struct pt_regs *regs)
439 u64 value;
440 int ret, i, rd = ((insn >> 25) & 0x1f);
441 int from_kernel = (regs->tstate & TSTATE_PRIV) != 0;
443 if (insn & 0x2000) {
444 maybe_flush_windows(0, 0, rd, from_kernel);
445 value = sign_extend_imm13(insn);
446 } else {
447 maybe_flush_windows(0, insn & 0x1f, rd, from_kernel);
448 value = fetch_reg(insn & 0x1f, regs);
450 for (ret = 0, i = 0; i < 16; i++) {
451 ret += popc_helper[value & 0xf];
452 value >>= 4;
454 if(rd < 16) {
455 if (rd)
456 regs->u_regs[rd] = ret;
457 } else {
458 if (current->thread.flags & SPARC_FLAG_32BIT) {
459 struct reg_window32 *win32;
460 win32 = (struct reg_window32 *)((unsigned long)((u32)regs->u_regs[UREG_FP]));
461 put_user(ret, &win32->locals[rd - 16]);
462 } else {
463 struct reg_window *win;
464 win = (struct reg_window *)(regs->u_regs[UREG_FP] + STACK_BIAS);
465 put_user(ret, &win->locals[rd - 16]);
468 advance(regs);
469 return 1;
472 extern void do_fpother(struct pt_regs *regs);
473 extern void do_privact(struct pt_regs *regs);
474 extern void data_access_exception(struct pt_regs *regs);
476 int handle_ldf_stq(u32 insn, struct pt_regs *regs)
478 unsigned long addr = compute_effective_address(regs, insn, 0);
479 int freg = ((insn >> 25) & 0x1e) | ((insn >> 20) & 0x20);
480 struct fpustate *f = FPUSTATE;
481 int asi = decode_asi(insn, regs);
482 int flag = (freg < 32) ? FPRS_DL : FPRS_DU;
484 save_and_clear_fpu();
485 current->thread.xfsr[0] &= ~0x1c000;
486 if (freg & 3) {
487 current->thread.xfsr[0] |= (6 << 14) /* invalid_fp_register */;
488 do_fpother(regs);
489 return 0;
491 if (insn & 0x200000) {
492 /* STQ */
493 u64 first = 0, second = 0;
495 if (current->thread.fpsaved[0] & flag) {
496 first = *(u64 *)&f->regs[freg];
497 second = *(u64 *)&f->regs[freg+2];
499 if (asi < 0x80) {
500 do_privact(regs);
501 return 1;
503 switch (asi) {
504 case ASI_P:
505 case ASI_S: break;
506 case ASI_PL:
507 case ASI_SL:
509 /* Need to convert endians */
510 u64 tmp = __swab64p(&first);
512 first = __swab64p(&second);
513 second = tmp;
514 break;
516 default:
517 data_access_exception(regs);
518 return 1;
520 if (put_user (first >> 32, (u32 *)addr) ||
521 __put_user ((u32)first, (u32 *)(addr + 4)) ||
522 __put_user (second >> 32, (u32 *)(addr + 8)) ||
523 __put_user ((u32)second, (u32 *)(addr + 12))) {
524 data_access_exception(regs);
525 return 1;
527 } else {
528 /* LDF, LDDF, LDQF */
529 u32 data[4] __attribute__ ((aligned(8)));
530 int size, i;
531 int err;
533 if (asi < 0x80) {
534 do_privact(regs);
535 return 1;
536 } else if (asi > ASI_SNFL) {
537 data_access_exception(regs);
538 return 1;
540 switch (insn & 0x180000) {
541 case 0x000000: size = 1; break;
542 case 0x100000: size = 4; break;
543 default: size = 2; break;
545 for (i = 0; i < size; i++)
546 data[i] = 0;
548 err = get_user (data[0], (u32 *)addr);
549 if (!err) {
550 for (i = 1; i < size; i++)
551 err |= __get_user (data[i], (u32 *)(addr + 4*i));
553 if (err && !(asi & 0x2 /* NF */)) {
554 data_access_exception(regs);
555 return 1;
557 if (asi & 0x8) /* Little */ {
558 u64 tmp;
560 switch (size) {
561 case 1: data[0] = le32_to_cpup(data + 0); break;
562 default:*(u64 *)(data + 0) = le64_to_cpup((u64 *)(data + 0));
563 break;
564 case 4: tmp = le64_to_cpup((u64 *)(data + 0));
565 *(u64 *)(data + 0) = le64_to_cpup((u64 *)(data + 2));
566 *(u64 *)(data + 2) = tmp;
567 break;
570 if (!(current->thread.fpsaved[0] & FPRS_FEF)) {
571 current->thread.fpsaved[0] = FPRS_FEF;
572 current->thread.gsr[0] = 0;
574 if (!(current->thread.fpsaved[0] & flag)) {
575 if (freg < 32)
576 memset(f->regs, 0, 32*sizeof(u32));
577 else
578 memset(f->regs+32, 0, 32*sizeof(u32));
580 memcpy(f->regs + freg, data, size * 4);
581 current->thread.fpsaved[0] |= flag;
583 advance(regs);
584 return 1;
587 void handle_ld_nf(u32 insn, struct pt_regs *regs)
589 int rd = ((insn >> 25) & 0x1f);
590 int from_kernel = (regs->tstate & TSTATE_PRIV) != 0;
591 unsigned long *reg;
593 maybe_flush_windows(0, 0, rd, from_kernel);
594 reg = fetch_reg_addr(rd, regs);
595 if (from_kernel || rd < 16) {
596 reg[0] = 0;
597 if ((insn & 0x780000) == 0x180000)
598 reg[1] = 0;
599 } else if (current->thread.flags & SPARC_FLAG_32BIT) {
600 put_user(0, (int *)reg);
601 if ((insn & 0x780000) == 0x180000)
602 put_user(0, ((int *)reg) + 1);
603 } else {
604 put_user(0, reg);
605 if ((insn & 0x780000) == 0x180000)
606 put_user(0, reg + 1);
608 advance(regs);
611 void handle_lddfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr)
613 unsigned long pc = regs->tpc;
614 unsigned long tstate = regs->tstate;
615 u32 insn;
616 u32 first, second;
617 u64 value;
618 u8 asi, freg;
619 int flag;
620 struct fpustate *f = FPUSTATE;
622 if(tstate & TSTATE_PRIV)
623 die_if_kernel("lddfmna from kernel", regs);
624 if(current->thread.flags & SPARC_FLAG_32BIT)
625 pc = (u32)pc;
626 if (get_user(insn, (u32 *)pc) != -EFAULT) {
627 asi = sfsr >> 16;
628 if ((asi > ASI_SNFL) ||
629 (asi < ASI_P))
630 goto daex;
631 if (get_user(first, (u32 *)sfar) ||
632 get_user(second, (u32 *)(sfar + 4))) {
633 if (asi & 0x2) /* NF */ {
634 first = 0; second = 0;
635 } else
636 goto daex;
638 save_and_clear_fpu();
639 freg = ((insn >> 25) & 0x1e) | ((insn >> 20) & 0x20);
640 value = (((u64)first) << 32) | second;
641 if (asi & 0x8) /* Little */
642 value = __swab64p(&value);
643 flag = (freg < 32) ? FPRS_DL : FPRS_DU;
644 if (!(current->thread.fpsaved[0] & FPRS_FEF)) {
645 current->thread.fpsaved[0] = FPRS_FEF;
646 current->thread.gsr[0] = 0;
648 if (!(current->thread.fpsaved[0] & flag)) {
649 if (freg < 32)
650 memset(f->regs, 0, 32*sizeof(u32));
651 else
652 memset(f->regs+32, 0, 32*sizeof(u32));
654 *(u64 *)(f->regs + freg) = value;
655 current->thread.fpsaved[0] |= flag;
656 } else {
657 daex: data_access_exception(regs);
658 return;
660 advance(regs);
661 return;
664 void handle_stdfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr)
666 unsigned long pc = regs->tpc;
667 unsigned long tstate = regs->tstate;
668 u32 insn;
669 u64 value;
670 u8 asi, freg;
671 int flag;
672 struct fpustate *f = FPUSTATE;
674 if(tstate & TSTATE_PRIV)
675 die_if_kernel("stdfmna from kernel", regs);
676 if(current->thread.flags & SPARC_FLAG_32BIT)
677 pc = (u32)pc;
678 if (get_user(insn, (u32 *)pc) != -EFAULT) {
679 freg = ((insn >> 25) & 0x1e) | ((insn >> 20) & 0x20);
680 asi = sfsr >> 16;
681 value = 0;
682 flag = (freg < 32) ? FPRS_DL : FPRS_DU;
683 if ((asi > ASI_SNFL) ||
684 (asi < ASI_P))
685 goto daex;
686 save_and_clear_fpu();
687 if (current->thread.fpsaved[0] & flag)
688 value = *(u64 *)&f->regs[freg];
689 switch (asi) {
690 case ASI_P:
691 case ASI_S: break;
692 case ASI_PL:
693 case ASI_SL:
694 value = __swab64p(&value); break;
695 default: goto daex;
697 if (put_user (value >> 32, (u32 *)sfar) ||
698 __put_user ((u32)value, (u32 *)(sfar + 4)))
699 goto daex;
700 } else {
701 daex: data_access_exception(regs);
702 return;
704 advance(regs);
705 return;