Fix various clang warnings
[valgrind.git] / VEX / priv / guest_mips_helpers.c
blob74cfb9c34caa2bad76124397f74037b8e5bc113b
2 /*---------------------------------------------------------------*/
3 /*--- begin guest_mips_helpers.c ---*/
4 /*---------------------------------------------------------------*/
6 /*
7 This file is part of Valgrind, a dynamic binary instrumentation
8 framework.
10 Copyright (C) 2010-2017 RT-RK
12 This program is free software; you can redistribute it and/or
13 modify it under the terms of the GNU General Public License as
14 published by the Free Software Foundation; either version 2 of the
15 License, or (at your option) any later version.
17 This program is distributed in the hope that it will be useful, but
18 WITHOUT ANY WARRANTY; without even the implied warranty of
19 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 General Public License for more details.
22 You should have received a copy of the GNU General Public License
23 along with this program; if not, see <http://www.gnu.org/licenses/>.
25 The GNU General Public License is contained in the file COPYING.
28 #include "libvex_basictypes.h"
29 #include "libvex_emnote.h"
30 #include "libvex_guest_mips32.h"
31 #include "libvex_guest_mips64.h"
32 #include "libvex_ir.h"
33 #include "libvex.h"
35 #include "main_util.h"
36 #include "main_globals.h"
37 #include "guest_generic_bb_to_IR.h"
38 #include "guest_mips_defs.h"
40 #if defined (__GNUC__)
41 #define GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__)
42 #else
43 #define GCC_VERSION 0
44 #endif
46 /* This file contains helper functions for mips guest code. Calls to
47 these functions are generated by the back end.
50 #define ALWAYSDEFD32(field) \
51 { offsetof(VexGuestMIPS32State, field), \
52 (sizeof ((VexGuestMIPS32State*)0)->field) }
54 #define ALWAYSDEFD64(field) \
55 { offsetof(VexGuestMIPS64State, field), \
56 (sizeof ((VexGuestMIPS64State*)0)->field) }
58 IRExpr *guest_mips32_spechelper(const HChar * function_name, IRExpr ** args,
59 IRStmt ** precedingStmts, Int n_precedingStmts)
61 return NULL;
64 IRExpr *guest_mips64_spechelper ( const HChar * function_name, IRExpr ** args,
65 IRStmt ** precedingStmts,
66 Int n_precedingStmts )
68 return NULL;
71 /* VISIBLE TO LIBVEX CLIENT */
72 void LibVEX_GuestMIPS32_initialise( /*OUT*/ VexGuestMIPS32State * vex_state)
74 vex_state->guest_r0 = 0; /* Hardwired to 0 */
75 vex_state->guest_r1 = 0; /* Assembler temporary */
76 vex_state->guest_r2 = 0; /* Values for function returns ... */
77 vex_state->guest_r3 = 0; /* ...and expression evaluation */
78 vex_state->guest_r4 = 0; /* Function arguments */
79 vex_state->guest_r5 = 0;
80 vex_state->guest_r6 = 0;
81 vex_state->guest_r7 = 0;
82 vex_state->guest_r8 = 0; /* Temporaries */
83 vex_state->guest_r9 = 0;
84 vex_state->guest_r10 = 0;
85 vex_state->guest_r11 = 0;
86 vex_state->guest_r12 = 0;
87 vex_state->guest_r13 = 0;
88 vex_state->guest_r14 = 0;
89 vex_state->guest_r15 = 0;
90 vex_state->guest_r16 = 0; /* Saved temporaries */
91 vex_state->guest_r17 = 0;
92 vex_state->guest_r18 = 0;
93 vex_state->guest_r19 = 0;
94 vex_state->guest_r20 = 0;
95 vex_state->guest_r21 = 0;
96 vex_state->guest_r22 = 0;
97 vex_state->guest_r23 = 0;
98 vex_state->guest_r24 = 0; /* Temporaries */
99 vex_state->guest_r25 = 0;
100 vex_state->guest_r26 = 0; /* Reserved for OS kernel */
101 vex_state->guest_r27 = 0;
102 vex_state->guest_r28 = 0; /* Global pointer */
103 vex_state->guest_r29 = 0; /* Stack pointer */
104 vex_state->guest_r30 = 0; /* Frame pointer */
105 vex_state->guest_r31 = 0; /* Return address */
106 vex_state->guest_PC = 0; /* Program counter */
107 vex_state->guest_HI = 0; /* Multiply and divide register higher result */
108 vex_state->guest_LO = 0; /* Multiply and divide register lower result */
110 /* FPU Registers */
111 vex_state->guest_f0 = 0x7ff800007ff80000ULL; /* Floting point GP registers */
112 vex_state->guest_f1 = 0x7ff800007ff80000ULL;
113 vex_state->guest_f2 = 0x7ff800007ff80000ULL;
114 vex_state->guest_f3 = 0x7ff800007ff80000ULL;
115 vex_state->guest_f4 = 0x7ff800007ff80000ULL;
116 vex_state->guest_f5 = 0x7ff800007ff80000ULL;
117 vex_state->guest_f6 = 0x7ff800007ff80000ULL;
118 vex_state->guest_f7 = 0x7ff800007ff80000ULL;
119 vex_state->guest_f8 = 0x7ff800007ff80000ULL;
120 vex_state->guest_f9 = 0x7ff800007ff80000ULL;
121 vex_state->guest_f10 = 0x7ff800007ff80000ULL;
122 vex_state->guest_f11 = 0x7ff800007ff80000ULL;
123 vex_state->guest_f12 = 0x7ff800007ff80000ULL;
124 vex_state->guest_f13 = 0x7ff800007ff80000ULL;
125 vex_state->guest_f14 = 0x7ff800007ff80000ULL;
126 vex_state->guest_f15 = 0x7ff800007ff80000ULL;
127 vex_state->guest_f16 = 0x7ff800007ff80000ULL;
128 vex_state->guest_f17 = 0x7ff800007ff80000ULL;
129 vex_state->guest_f18 = 0x7ff800007ff80000ULL;
130 vex_state->guest_f19 = 0x7ff800007ff80000ULL;
131 vex_state->guest_f20 = 0x7ff800007ff80000ULL;
132 vex_state->guest_f21 = 0x7ff800007ff80000ULL;
133 vex_state->guest_f22 = 0x7ff800007ff80000ULL;
134 vex_state->guest_f23 = 0x7ff800007ff80000ULL;
135 vex_state->guest_f24 = 0x7ff800007ff80000ULL;
136 vex_state->guest_f25 = 0x7ff800007ff80000ULL;
137 vex_state->guest_f26 = 0x7ff800007ff80000ULL;
138 vex_state->guest_f27 = 0x7ff800007ff80000ULL;
139 vex_state->guest_f28 = 0x7ff800007ff80000ULL;
140 vex_state->guest_f29 = 0x7ff800007ff80000ULL;
141 vex_state->guest_f30 = 0x7ff800007ff80000ULL;
142 vex_state->guest_f31 = 0x7ff800007ff80000ULL;
144 vex_state->guest_FIR = 0; /* FP implementation and revision register */
145 vex_state->guest_FCCR = 0; /* FP condition codes register */
146 vex_state->guest_FEXR = 0; /* FP exceptions register */
147 vex_state->guest_FENR = 0; /* FP enables register */
148 vex_state->guest_FCSR = 0; /* FP control/status register */
149 vex_state->guest_ULR = 0; /* TLS */
151 /* Various pseudo-regs mandated by Vex or Valgrind. */
152 /* Emulation notes */
153 vex_state->guest_EMNOTE = 0;
155 /* For clflush: record start and length of area to invalidate */
156 vex_state->guest_CMSTART = 0;
157 vex_state->guest_CMLEN = 0;
158 vex_state->host_EvC_COUNTER = 0;
159 vex_state->host_EvC_FAILADDR = 0;
161 /* Used to record the unredirected guest address at the start of
162 a translation whose start has been redirected. By reading
163 this pseudo-register shortly afterwards, the translation can
164 find out what the corresponding no-redirection address was.
165 Note, this is only set for wrap-style redirects, not for
166 replace-style ones. */
167 vex_state->guest_NRADDR = 0;
169 vex_state->guest_COND = 0;
171 vex_state->guest_CP0_status = 0;
172 vex_state->guest_CP0_Config5 = 0;
174 vex_state->guest_LLaddr = 0xFFFFFFFF;
175 vex_state->guest_LLdata = 0;
177 /* MIPS32 DSP ASE(r2) specific registers */
178 vex_state->guest_DSPControl = 0; /* DSPControl register */
179 vex_state->guest_ac0 = 0; /* Accumulator 0 */
180 vex_state->guest_ac1 = 0; /* Accumulator 1 */
181 vex_state->guest_ac2 = 0; /* Accumulator 2 */
182 vex_state->guest_ac3 = 0; /* Accumulator 3 */
184 vex_state->guest_w0.w64[0] = 0;
185 vex_state->guest_w0.w64[1] = 0;
186 vex_state->guest_w1.w64[0] = 0;
187 vex_state->guest_w1.w64[1] = 0;
188 vex_state->guest_w2.w64[0] = 0;
189 vex_state->guest_w2.w64[1] = 0;
192 void LibVEX_GuestMIPS64_initialise ( /*OUT*/ VexGuestMIPS64State * vex_state )
194 vex_state->guest_r0 = 0; /* Hardwired to 0 */
195 vex_state->guest_r1 = 0; /* Assembler temporary */
196 vex_state->guest_r2 = 0; /* Values for function returns ... */
197 vex_state->guest_r3 = 0;
198 vex_state->guest_r4 = 0; /* Function arguments */
199 vex_state->guest_r5 = 0;
200 vex_state->guest_r6 = 0;
201 vex_state->guest_r7 = 0;
202 vex_state->guest_r8 = 0;
203 vex_state->guest_r9 = 0;
204 vex_state->guest_r10 = 0;
205 vex_state->guest_r11 = 0;
206 vex_state->guest_r12 = 0; /* Temporaries */
207 vex_state->guest_r13 = 0;
208 vex_state->guest_r14 = 0;
209 vex_state->guest_r15 = 0;
210 vex_state->guest_r16 = 0; /* Saved temporaries */
211 vex_state->guest_r17 = 0;
212 vex_state->guest_r18 = 0;
213 vex_state->guest_r19 = 0;
214 vex_state->guest_r20 = 0;
215 vex_state->guest_r21 = 0;
216 vex_state->guest_r22 = 0;
217 vex_state->guest_r23 = 0;
218 vex_state->guest_r24 = 0; /* Temporaries */
219 vex_state->guest_r25 = 0;
220 vex_state->guest_r26 = 0; /* Reserved for OS kernel */
221 vex_state->guest_r27 = 0;
222 vex_state->guest_r28 = 0; /* Global pointer */
223 vex_state->guest_r29 = 0; /* Stack pointer */
224 vex_state->guest_r30 = 0; /* Frame pointer */
225 vex_state->guest_r31 = 0; /* Return address */
226 vex_state->guest_PC = 0; /* Program counter */
227 vex_state->guest_HI = 0; /* Multiply and divide register higher result */
228 vex_state->guest_LO = 0; /* Multiply and divide register lower result */
230 /* FPU Registers */
231 vex_state->guest_f0 = 0x7ff800007ff80000ULL; /* Floting point registers */
232 vex_state->guest_f1 = 0x7ff800007ff80000ULL;
233 vex_state->guest_f2 = 0x7ff800007ff80000ULL;
234 vex_state->guest_f3 = 0x7ff800007ff80000ULL;
235 vex_state->guest_f4 = 0x7ff800007ff80000ULL;
236 vex_state->guest_f5 = 0x7ff800007ff80000ULL;
237 vex_state->guest_f6 = 0x7ff800007ff80000ULL;
238 vex_state->guest_f7 = 0x7ff800007ff80000ULL;
239 vex_state->guest_f8 = 0x7ff800007ff80000ULL;
240 vex_state->guest_f9 = 0x7ff800007ff80000ULL;
241 vex_state->guest_f10 = 0x7ff800007ff80000ULL;
242 vex_state->guest_f11 = 0x7ff800007ff80000ULL;
243 vex_state->guest_f12 = 0x7ff800007ff80000ULL;
244 vex_state->guest_f13 = 0x7ff800007ff80000ULL;
245 vex_state->guest_f14 = 0x7ff800007ff80000ULL;
246 vex_state->guest_f15 = 0x7ff800007ff80000ULL;
247 vex_state->guest_f16 = 0x7ff800007ff80000ULL;
248 vex_state->guest_f17 = 0x7ff800007ff80000ULL;
249 vex_state->guest_f18 = 0x7ff800007ff80000ULL;
250 vex_state->guest_f19 = 0x7ff800007ff80000ULL;
251 vex_state->guest_f20 = 0x7ff800007ff80000ULL;
252 vex_state->guest_f21 = 0x7ff800007ff80000ULL;
253 vex_state->guest_f22 = 0x7ff800007ff80000ULL;
254 vex_state->guest_f23 = 0x7ff800007ff80000ULL;
255 vex_state->guest_f24 = 0x7ff800007ff80000ULL;
256 vex_state->guest_f25 = 0x7ff800007ff80000ULL;
257 vex_state->guest_f26 = 0x7ff800007ff80000ULL;
258 vex_state->guest_f27 = 0x7ff800007ff80000ULL;
259 vex_state->guest_f28 = 0x7ff800007ff80000ULL;
260 vex_state->guest_f29 = 0x7ff800007ff80000ULL;
261 vex_state->guest_f30 = 0x7ff800007ff80000ULL;
262 vex_state->guest_f31 = 0x7ff800007ff80000ULL;
264 vex_state->guest_FIR = 0; /* FP implementation and revision register */
265 vex_state->guest_FCCR = 0; /* FP condition codes register */
266 vex_state->guest_FEXR = 0; /* FP exceptions register */
267 vex_state->guest_FENR = 0; /* FP enables register */
268 vex_state->guest_FCSR = 0; /* FP control/status register */
270 vex_state->guest_ULR = 0;
272 /* Various pseudo-regs mandated by Vex or Valgrind. */
273 /* Emulation notes */
274 vex_state->guest_EMNOTE = 0;
276 /* For clflush: record start and length of area to invalidate */
277 vex_state->guest_CMSTART = 0;
278 vex_state->guest_CMLEN = 0;
279 vex_state->host_EvC_COUNTER = 0;
280 vex_state->host_EvC_FAILADDR = 0;
282 /* Used to record the unredirected guest address at the start of
283 a translation whose start has been redirected. By reading
284 this pseudo-register shortly afterwards, the translation can
285 find out what the corresponding no-redirection address was.
286 Note, this is only set for wrap-style redirects, not for
287 replace-style ones. */
288 vex_state->guest_NRADDR = 0;
290 vex_state->guest_COND = 0;
292 vex_state->guest_CP0_status = MIPS_CP0_STATUS_FR;
294 vex_state->guest_LLaddr = 0xFFFFFFFFFFFFFFFFULL;
295 vex_state->guest_LLdata = 0;
297 vex_state->guest_MSACSR = 0;
300 /*-----------------------------------------------------------*/
301 /*--- Describing the mips guest state, for the benefit ---*/
302 /*--- of iropt and instrumenters. ---*/
303 /*-----------------------------------------------------------*/
305 /* Figure out if any part of the guest state contained in minoff
306 .. maxoff requires precise memory exceptions. If in doubt return
307 True (but this generates significantly slower code).
309 We enforce precise exns for guest SP, PC.
311 Only SP is needed in mode VexRegUpdSpAtMemAccess.
313 Bool guest_mips32_state_requires_precise_mem_exns (
314 Int minoff, Int maxoff, VexRegisterUpdates pxControl
317 Int sp_min = offsetof(VexGuestMIPS32State, guest_r29);
318 Int sp_max = sp_min + 4 - 1;
319 Int pc_min = offsetof(VexGuestMIPS32State, guest_PC);
320 Int pc_max = pc_min + 4 - 1;
322 if (maxoff < sp_min || minoff > sp_max) {
323 /* no overlap with sp */
324 if (pxControl == VexRegUpdSpAtMemAccess)
325 return False; /* We only need to check stack pointer. */
326 } else {
327 return True;
330 if (maxoff < pc_min || minoff > pc_max) {
331 /* no overlap with pc */
332 } else {
333 return True;
336 /* We appear to need precise updates of R11 in order to get proper
337 stacktraces from non-optimised code. */
338 Int fp_min = offsetof(VexGuestMIPS32State, guest_r30);
339 Int fp_max = fp_min + 4 - 1;
341 if (maxoff < fp_min || minoff > fp_max) {
342 /* no overlap with fp */
343 } else {
344 return True;
347 return False;
350 Bool guest_mips64_state_requires_precise_mem_exns (
351 Int minoff, Int maxoff, VexRegisterUpdates pxControl
354 Int sp_min = offsetof(VexGuestMIPS64State, guest_r29);
355 Int sp_max = sp_min + 8 - 1;
356 Int pc_min = offsetof(VexGuestMIPS64State, guest_PC);
357 Int pc_max = pc_min + 8 - 1;
359 if ( maxoff < sp_min || minoff > sp_max ) {
360 /* no overlap with sp */
361 if (pxControl == VexRegUpdSpAtMemAccess)
362 return False; /* We only need to check stack pointer. */
363 } else {
364 return True;
367 if ( maxoff < pc_min || minoff > pc_max ) {
368 /* no overlap with pc */
369 } else {
370 return True;
373 Int fp_min = offsetof(VexGuestMIPS64State, guest_r30);
374 Int fp_max = fp_min + 8 - 1;
376 if ( maxoff < fp_min || minoff > fp_max ) {
377 /* no overlap with fp */
378 } else {
379 return True;
382 return False;
385 VexGuestLayout mips32Guest_layout = {
386 /* Total size of the guest state, in bytes. */
387 .total_sizeB = sizeof(VexGuestMIPS32State),
388 /* Describe the stack pointer. */
389 .offset_SP = offsetof(VexGuestMIPS32State, guest_r29),
390 .sizeof_SP = 4,
391 /* Describe the frame pointer. */
392 .offset_FP = offsetof(VexGuestMIPS32State, guest_r30),
393 .sizeof_FP = 4,
394 /* Describe the instruction pointer. */
395 .offset_IP = offsetof(VexGuestMIPS32State, guest_PC),
396 .sizeof_IP = 4,
397 /* Describe any sections to be regarded by Memcheck as
398 'always-defined'. */
399 .n_alwaysDefd = 8,
400 /* ? :( */
401 .alwaysDefd = {
402 /* 0 */ ALWAYSDEFD32(guest_r0),
403 /* 1 */ ALWAYSDEFD32(guest_r1),
404 /* 2 */ ALWAYSDEFD32(guest_EMNOTE),
405 /* 3 */ ALWAYSDEFD32(guest_CMSTART),
406 /* 4 */ ALWAYSDEFD32(guest_CMLEN),
407 /* 5 */ ALWAYSDEFD32(guest_r29),
408 /* 6 */ ALWAYSDEFD32(guest_r31),
409 /* 7 */ ALWAYSDEFD32(guest_ULR)
413 VexGuestLayout mips64Guest_layout = {
414 /* Total size of the guest state, in bytes. */
415 .total_sizeB = sizeof(VexGuestMIPS64State),
416 /* Describe the stack pointer. */
417 .offset_SP = offsetof(VexGuestMIPS64State, guest_r29),
418 .sizeof_SP = 8,
419 /* Describe the frame pointer. */
420 .offset_FP = offsetof(VexGuestMIPS64State, guest_r30),
421 .sizeof_FP = 8,
422 /* Describe the instruction pointer. */
423 .offset_IP = offsetof(VexGuestMIPS64State, guest_PC),
424 .sizeof_IP = 8,
425 /* Describe any sections to be regarded by Memcheck as
426 'always-defined'. */
427 .n_alwaysDefd = 7,
428 /* ? :( */
429 .alwaysDefd = {
430 /* 0 */ ALWAYSDEFD64 (guest_r0),
431 /* 1 */ ALWAYSDEFD64 (guest_EMNOTE),
432 /* 2 */ ALWAYSDEFD64 (guest_CMSTART),
433 /* 3 */ ALWAYSDEFD64 (guest_CMLEN),
434 /* 4 */ ALWAYSDEFD64 (guest_r29),
435 /* 5 */ ALWAYSDEFD64 (guest_r31),
436 /* 6 */ ALWAYSDEFD64 (guest_ULR)
440 #define ASM_VOLATILE_RDHWR(opcode) \
441 __asm__ __volatile__(".word 0x7C02003B | "#opcode" << 11 \n\t" \
442 : "+r" (x) : : \
445 HWord mips_dirtyhelper_rdhwr ( UInt rd )
447 #if defined(__mips__)
448 register HWord x __asm__("v0") = 0;
450 switch (rd) {
451 case 0: /* x = CPUNum() */
452 ASM_VOLATILE_RDHWR(0); /* rdhwr v0, $0 */
453 break;
455 case 1: /* x = SYNCI_Step() */
456 ASM_VOLATILE_RDHWR(1); /* rdhwr v0, $1 */
457 break;
459 case 2: /* x = CC() */
460 ASM_VOLATILE_RDHWR(2); /* rdhwr v0, $2 */
461 break;
463 case 3: /* x = CCRes() */
464 ASM_VOLATILE_RDHWR(3); /* rdhwr v0, $3 */
465 break;
467 case 31: /* x = CVMX_get_cycles() */
468 ASM_VOLATILE_RDHWR(31); /* rdhwr v0, $31 */
469 break;
471 default:
472 vassert(0);
473 break;
475 return x;
476 #else
477 return 0;
478 #endif
481 #define ASM_VOLATILE_UNARY32(inst) \
482 __asm__ volatile(".set push" "\n\t" \
483 ".set hardfloat" "\n\t" \
484 "cfc1 $8, $31" "\n\t" \
485 "ctc1 %2, $31" "\n\t" \
486 "mtc1 %1, $f20" "\n\t" \
487 #inst" $f20, $f20" "\n\t" \
488 "cfc1 %0, $31" "\n\t" \
489 "ctc1 $8, $31" "\n\t" \
490 ".set pop" "\n\t" \
491 : "=r" (ret) \
492 : "r" (loFsVal), "r" (fcsr) \
493 : "$8", "$f20" \
496 #define ASM_VOLATILE_UNARY32_DOUBLE(inst) \
497 __asm__ volatile(".set push" "\n\t" \
498 ".set hardfloat" "\n\t" \
499 "cfc1 $8, $31" "\n\t" \
500 "ctc1 %2, $31" "\n\t" \
501 "ldc1 $f20, 0(%1)" "\n\t" \
502 #inst" $f20, $f20" "\n\t" \
503 "cfc1 %0, $31" "\n\t" \
504 "ctc1 $8, $31" "\n\t" \
505 ".set pop" "\n\t" \
506 : "=r" (ret) \
507 : "r" (&fsVal), "r" (fcsr) \
508 : "$8", "$f20", "$f21" \
511 #define ASM_VOLATILE_UNARY64(inst) \
512 __asm__ volatile(".set push" "\n\t" \
513 ".set hardfloat" "\n\t" \
514 ".set fp=64" "\n\t" \
515 "cfc1 $8, $31" "\n\t" \
516 "ctc1 %2, $31" "\n\t" \
517 "ldc1 $f24, 0(%1)" "\n\t" \
518 #inst" $f24, $f24" "\n\t" \
519 "cfc1 %0, $31" "\n\t" \
520 "ctc1 $8, $31" "\n\t" \
521 ".set pop" "\n\t" \
522 : "=r" (ret) \
523 : "r" (&(addr[fs])), "r" (fcsr) \
524 : "$8", "$f24" \
527 #define ASM_VOLATILE_MSA_UNARY(inst) \
528 __asm__ volatile(".set push" "\n\t" \
529 ".set mips32r2" "\n\t" \
530 ".set hardfloat" "\n\t" \
531 ".set fp=64" "\n\t" \
532 ".set msa" "\n\t" \
533 ".set noreorder" "\n\t" \
534 "cfcmsa $t0, $1" "\n\t" \
535 "ctcmsa $1, %2" "\n\t" \
536 "ld.b $w24, 0(%1)" "\n\t" \
537 #inst" $w24, $w24" "\n\t" \
538 "cfcmsa %0, $1" "\n\t" \
539 "ctcmsa $1, $t0" "\n\t" \
540 ".set pop" "\n\t" \
541 : "=r" (ret) \
542 : "r" (&(addr[ws])), "r" (msacsr) \
543 : "t0" \
546 #define ASM_VOLATILE_BINARY32(inst) \
547 __asm__ volatile(".set push" "\n\t" \
548 ".set hardfloat" "\n\t" \
549 "cfc1 $8, $31" "\n\t" \
550 "ctc1 %3, $31" "\n\t" \
551 "mtc1 %1, $f20" "\n\t" \
552 "mtc1 %2, $f22" "\n\t" \
553 #inst" $f20, $f20, $f22" "\n\t" \
554 "cfc1 %0, $31" "\n\t" \
555 "ctc1 $8, $31" "\n\t" \
556 ".set pop" "\n\t" \
557 : "=r" (ret) \
558 : "r" (loFsVal), "r" (loFtVal), "r" (fcsr) \
559 : "$8", "$f20", "$f22" \
562 #define ASM_VOLATILE_BINARY32_DOUBLE(inst) \
563 __asm__ volatile(".set push" "\n\t" \
564 ".set hardfloat" "\n\t" \
565 "cfc1 $8, $31" "\n\t" \
566 "ctc1 %3, $31" "\n\t" \
567 "ldc1 $f20, 0(%1)" "\n\t" \
568 "ldc1 $f22, 0(%2)" "\n\t" \
569 #inst" $f20, $f20, $f22" "\n\t" \
570 "cfc1 %0, $31" "\n\t" \
571 "ctc1 $8, $31" "\n\t" \
572 ".set pop" "\n\t" \
573 : "=r" (ret) \
574 : "r" (&fsVal), "r" (&ftVal), "r" (fcsr) \
575 : "$8", "$f20", "$f21", "$f22", "$f23" \
578 #define ASM_VOLATILE_BINARY64(inst) \
579 __asm__ volatile(".set push" "\n\t" \
580 ".set hardfloat" "\n\t" \
581 "cfc1 $8, $31" "\n\t" \
582 "ctc1 %3, $31" "\n\t" \
583 "ldc1 $f24, 0(%1)" "\n\t" \
584 "ldc1 $f26, 0(%2)" "\n\t" \
585 #inst" $f24, $f24, $f26" "\n\t" \
586 "cfc1 %0, $31" "\n\t" \
587 "ctc1 $8, $31" "\n\t" \
588 ".set pop" "\n\t" \
589 : "=r" (ret) \
590 : "r" (&(addr[fs])), "r" (&(addr[ft])), "r" (fcsr) \
591 : "$8", "$f24", "$f26" \
594 #define ASM_VOLATILE_MSA_BINARY(inst) \
595 __asm__ volatile(".set push" "\n\t" \
596 ".set mips32r2" "\n\t" \
597 ".set hardfloat" "\n\t" \
598 ".set fp=64" "\n\t" \
599 ".set msa" "\n\t" \
600 "cfcmsa $t0, $1" "\n\t" \
601 "ctcmsa $1, %3" "\n\t" \
602 "ld.b $w24, 0(%1)" "\n\t" \
603 "ld.b $w26, 0(%2)" "\n\t" \
604 #inst" $w24, $w24, $w26" "\n\t" \
605 "cfcmsa %0, $1" "\n\t" \
606 "ctcmsa $1, $t0" "\n\t" \
607 ".set pop" "\n\t" \
608 : "=r" (ret) \
609 : "r" (&(addr[ws])), "r" (&(addr[wt])), "r" (msacsr)\
610 : "t0" \
613 /* TODO: Add cases for all fpu instructions because all fpu instructions are
614 change the value of FCSR register. */
615 extern UInt mips_dirtyhelper_calculate_FCSR_fp32 ( void* gs, UInt fs, UInt ft,
616 flt_op inst )
618 UInt ret = 0;
619 #if defined(__mips__)
620 VexGuestMIPS32State* guest_state = (VexGuestMIPS32State*)gs;
621 UInt loFsVal, hiFsVal, loFtVal, hiFtVal;
622 #if defined (_MIPSEL)
623 ULong *addr = (ULong *)&guest_state->guest_f0;
624 loFsVal = (UInt)addr[fs];
625 hiFsVal = (UInt)addr[fs+1];
626 loFtVal = (UInt)addr[ft];
627 hiFtVal = (UInt)addr[ft+1];
628 #elif defined (_MIPSEB)
629 UInt *addr = (UInt *)&guest_state->guest_f0;
630 loFsVal = (UInt)addr[fs*2];
631 hiFsVal = (UInt)addr[fs*2+2];
632 loFtVal = (UInt)addr[ft*2];
633 hiFtVal = (UInt)addr[ft*2+2];
634 #endif
635 ULong fsVal = ((ULong) hiFsVal) << 32 | loFsVal;
636 ULong ftVal = ((ULong) hiFtVal) << 32 | loFtVal;
637 UInt fcsr = guest_state->guest_FCSR;
638 switch (inst) {
639 case ROUNDWD:
640 ASM_VOLATILE_UNARY32_DOUBLE(round.w.d)
641 break;
642 case FLOORWS:
643 ASM_VOLATILE_UNARY32(floor.w.s)
644 break;
645 case FLOORWD:
646 ASM_VOLATILE_UNARY32_DOUBLE(floor.w.d)
647 break;
648 case TRUNCWS:
649 ASM_VOLATILE_UNARY32(trunc.w.s)
650 break;
651 case TRUNCWD:
652 ASM_VOLATILE_UNARY32_DOUBLE(trunc.w.d)
653 break;
654 case CEILWS:
655 ASM_VOLATILE_UNARY32(ceil.w.s)
656 break;
657 case CEILWD:
658 ASM_VOLATILE_UNARY32_DOUBLE(ceil.w.d)
659 break;
660 case CVTDS:
661 ASM_VOLATILE_UNARY32(cvt.d.s)
662 break;
663 case CVTDW:
664 ASM_VOLATILE_UNARY32(cvt.d.w)
665 break;
666 case CVTSW:
667 ASM_VOLATILE_UNARY32(cvt.s.w)
668 break;
669 case CVTSD:
670 ASM_VOLATILE_UNARY32_DOUBLE(cvt.s.d)
671 break;
672 case CVTWS:
673 ASM_VOLATILE_UNARY32(cvt.w.s)
674 break;
675 case CVTWD:
676 ASM_VOLATILE_UNARY32_DOUBLE(cvt.w.d)
677 break;
678 case ROUNDWS:
679 ASM_VOLATILE_UNARY32(round.w.s)
680 break;
681 case ADDS:
682 ASM_VOLATILE_BINARY32(add.s)
683 break;
684 case ADDD:
685 ASM_VOLATILE_BINARY32_DOUBLE(add.d)
686 break;
687 case SUBS:
688 ASM_VOLATILE_BINARY32(sub.s)
689 break;
690 case SUBD:
691 ASM_VOLATILE_BINARY32_DOUBLE(sub.d)
692 break;
693 case DIVS:
694 ASM_VOLATILE_BINARY32(div.s)
695 break;
696 default:
697 vassert(0);
698 break;
700 #endif
701 return ret;
704 /* TODO: Add cases for all fpu instructions because all fpu instructions are
705 change the value of FCSR register. */
706 extern UInt mips_dirtyhelper_calculate_FCSR_fp64 ( void* gs, UInt fs, UInt ft,
707 flt_op inst )
709 UInt ret = 0;
710 #if defined(__mips__) && ((__mips == 64) || \
711 (defined(__mips_isa_rev) && (__mips_isa_rev >= 2)))
712 #if defined(VGA_mips32)
713 VexGuestMIPS32State* guest_state = (VexGuestMIPS32State*)gs;
714 #else
715 VexGuestMIPS64State* guest_state = (VexGuestMIPS64State*)gs;
716 #endif
717 ULong *addr = (ULong *)&guest_state->guest_f0;
718 UInt fcsr = guest_state->guest_FCSR;
719 switch (inst) {
720 case ROUNDWD:
721 ASM_VOLATILE_UNARY64(round.w.d)
722 break;
723 case FLOORWS:
724 ASM_VOLATILE_UNARY64(floor.w.s)
725 break;
726 case FLOORWD:
727 ASM_VOLATILE_UNARY64(floor.w.d)
728 break;
729 case TRUNCWS:
730 ASM_VOLATILE_UNARY64(trunc.w.s)
731 break;
732 case TRUNCWD:
733 ASM_VOLATILE_UNARY64(trunc.w.d)
734 break;
735 case CEILWS:
736 ASM_VOLATILE_UNARY64(ceil.w.s)
737 break;
738 case CEILWD:
739 ASM_VOLATILE_UNARY64(ceil.w.d)
740 break;
741 case CVTDS:
742 ASM_VOLATILE_UNARY64(cvt.d.s)
743 break;
744 case CVTDW:
745 ASM_VOLATILE_UNARY64(cvt.d.w)
746 break;
747 case CVTSW:
748 ASM_VOLATILE_UNARY64(cvt.s.w)
749 break;
750 case CVTSD:
751 ASM_VOLATILE_UNARY64(cvt.s.d)
752 break;
753 case CVTWS:
754 ASM_VOLATILE_UNARY64(cvt.w.s)
755 break;
756 case CVTWD:
757 ASM_VOLATILE_UNARY64(cvt.w.d)
758 break;
759 case ROUNDWS:
760 ASM_VOLATILE_UNARY64(round.w.s)
761 break;
762 case CEILLS:
763 ASM_VOLATILE_UNARY64(ceil.l.s)
764 break;
765 case CEILLD:
766 ASM_VOLATILE_UNARY64(ceil.l.d)
767 break;
768 case CVTDL:
769 ASM_VOLATILE_UNARY64(cvt.d.l)
770 break;
771 case CVTLS:
772 ASM_VOLATILE_UNARY64(cvt.l.s)
773 break;
774 case CVTLD:
775 ASM_VOLATILE_UNARY64(cvt.l.d)
776 break;
777 case CVTSL:
778 ASM_VOLATILE_UNARY64(cvt.s.l)
779 break;
780 case FLOORLS:
781 ASM_VOLATILE_UNARY64(floor.l.s)
782 break;
783 case FLOORLD:
784 ASM_VOLATILE_UNARY64(floor.l.d)
785 break;
786 case ROUNDLS:
787 ASM_VOLATILE_UNARY64(round.l.s)
788 break;
789 case ROUNDLD:
790 ASM_VOLATILE_UNARY64(round.l.d)
791 break;
792 case TRUNCLS:
793 ASM_VOLATILE_UNARY64(trunc.l.s)
794 break;
795 case TRUNCLD:
796 ASM_VOLATILE_UNARY64(trunc.l.d)
797 break;
798 case ADDS:
799 ASM_VOLATILE_BINARY64(add.s)
800 break;
801 case ADDD:
802 ASM_VOLATILE_BINARY64(add.d)
803 break;
804 case SUBS:
805 ASM_VOLATILE_BINARY64(sub.s)
806 break;
807 case SUBD:
808 ASM_VOLATILE_BINARY64(sub.d)
809 break;
810 case DIVS:
811 ASM_VOLATILE_BINARY64(div.s)
812 break;
813 #if defined(__mips_isa_rev) && (__mips_isa_rev >= 6)
814 case RINTS:
815 ASM_VOLATILE_UNARY64(rint.s)
816 break;
817 case RINTD:
818 ASM_VOLATILE_UNARY64(rint.d)
819 break;
820 case MAXS:
821 ASM_VOLATILE_BINARY64(max.s)
822 break;
823 case MAXD:
824 ASM_VOLATILE_BINARY64(max.d)
825 break;
826 case MINS:
827 ASM_VOLATILE_BINARY64(min.s)
828 break;
829 case MIND:
830 ASM_VOLATILE_BINARY64(min.d)
831 break;
832 case MAXAS:
833 ASM_VOLATILE_BINARY64(maxa.s)
834 break;
835 case MAXAD:
836 ASM_VOLATILE_BINARY64(maxa.d)
837 break;
838 case MINAS:
839 ASM_VOLATILE_BINARY64(mina.s)
840 break;
841 case MINAD:
842 ASM_VOLATILE_BINARY64(mina.d)
843 break;
844 case CMPAFS:
845 ASM_VOLATILE_BINARY64(cmp.af.s)
846 break;
847 case CMPAFD:
848 ASM_VOLATILE_BINARY64(cmp.af.d)
849 break;
850 case CMPSAFS:
851 ASM_VOLATILE_BINARY64(cmp.saf.s)
852 break;
853 case CMPSAFD:
854 ASM_VOLATILE_BINARY64(cmp.saf.d)
855 break;
856 #endif
857 default:
858 vassert(0);
859 break;
861 #endif
862 return ret;
866 extern UInt mips_dirtyhelper_calculate_MSACSR ( void* gs, UInt ws, UInt wt,
867 msa_flt_op inst ) {
868 UInt ret = 0;
869 /* GCC 4.8 and later support MIPS MSA. */
870 #if defined(__mips__) && (defined(__clang__) || (GCC_VERSION >= 408))
871 #if defined(VGA_mips32)
872 VexGuestMIPS32State* guest_state = (VexGuestMIPS32State*)gs;
873 #else
874 VexGuestMIPS64State* guest_state = (VexGuestMIPS64State*)gs;
875 #endif
876 V128 *addr = (V128 *)&guest_state->guest_w0;
877 UInt msacsr = guest_state->guest_MSACSR;
879 switch (inst) {
880 case FADDW:
881 ASM_VOLATILE_MSA_BINARY(fadd.w)
882 break;
884 case FADDD:
885 ASM_VOLATILE_MSA_BINARY(fadd.d)
886 break;
888 case FSUBW:
889 ASM_VOLATILE_MSA_BINARY(fsub.w);
890 break;
892 case FSUBD:
893 ASM_VOLATILE_MSA_BINARY(fsub.d);
894 break;
896 case FMULW:
897 ASM_VOLATILE_MSA_BINARY(fmul.w);
898 break;
900 case FMULD:
901 ASM_VOLATILE_MSA_BINARY(fmul.d);
902 break;
904 case FDIVW:
905 ASM_VOLATILE_MSA_BINARY(fdiv.w);
906 break;
908 case FDIVD:
909 ASM_VOLATILE_MSA_BINARY(fdiv.d);
910 break;
912 case FMADDW:
913 ASM_VOLATILE_MSA_BINARY(fmadd.w);
914 break;
916 case FMADDD:
917 ASM_VOLATILE_MSA_BINARY(fmadd.d);
918 break;
920 case FCAFW:
921 ASM_VOLATILE_MSA_BINARY(fcaf.w);
922 break;
924 case FCAFD:
925 ASM_VOLATILE_MSA_BINARY(fcaf.d);
926 break;
928 case FSAFW:
929 ASM_VOLATILE_MSA_BINARY(fsaf.w);
930 break;
932 case FSAFD:
933 ASM_VOLATILE_MSA_BINARY(fsaf.d);
934 break;
936 case FCEQW:
937 ASM_VOLATILE_MSA_BINARY(fceq.w);
938 break;
940 case FCEQD:
941 ASM_VOLATILE_MSA_BINARY(fceq.d);
942 break;
944 case FSEQW:
945 ASM_VOLATILE_MSA_BINARY(fseq.w);
946 break;
948 case FSEQD:
949 ASM_VOLATILE_MSA_BINARY(fseq.d);
950 break;
952 case FCLTW:
953 ASM_VOLATILE_MSA_BINARY(fclt.w);
954 break;
956 case FCLTD:
957 ASM_VOLATILE_MSA_BINARY(fclt.d);
958 break;
960 case FSLTW:
961 ASM_VOLATILE_MSA_BINARY(fslt.w);
962 break;
964 case FSLTD:
965 ASM_VOLATILE_MSA_BINARY(fslt.d);
966 break;
968 case FCLEW:
969 ASM_VOLATILE_MSA_BINARY(fcle.w);
970 break;
972 case FCLED:
973 ASM_VOLATILE_MSA_BINARY(fcle.d);
974 break;
976 case FSLEW:
977 ASM_VOLATILE_MSA_BINARY(fsle.w);
978 break;
980 case FSLED:
981 ASM_VOLATILE_MSA_BINARY(fsle.d);
982 break;
984 case FCNEW:
985 ASM_VOLATILE_MSA_BINARY(fcne.w);
986 break;
988 case FCNED:
989 ASM_VOLATILE_MSA_BINARY(fcne.d);
990 break;
992 case FSNEW:
993 ASM_VOLATILE_MSA_BINARY(fsne.w);
994 break;
996 case FSNED:
997 ASM_VOLATILE_MSA_BINARY(fsne.d);
998 break;
1000 case FEXP2W:
1001 ASM_VOLATILE_MSA_BINARY(fexp2.w);
1002 break;
1004 case FEXP2D:
1005 ASM_VOLATILE_MSA_BINARY(fexp2.d);
1006 break;
1008 case FMINW:
1009 ASM_VOLATILE_MSA_BINARY(fmin.w);
1010 break;
1012 case FMIND:
1013 ASM_VOLATILE_MSA_BINARY(fmin.d);
1014 break;
1016 case FMINAW:
1017 ASM_VOLATILE_MSA_BINARY(fmin_a.w);
1018 break;
1020 case FMINAD:
1021 ASM_VOLATILE_MSA_BINARY(fmin_a.d);
1022 break;
1024 case FCUNW:
1025 ASM_VOLATILE_MSA_BINARY(fcun.w);
1026 break;
1028 case FCUND:
1029 ASM_VOLATILE_MSA_BINARY(fcun.d);
1030 break;
1032 case FSUNW:
1033 ASM_VOLATILE_MSA_BINARY(fsun.w);
1034 break;
1036 case FSUND:
1037 ASM_VOLATILE_MSA_BINARY(fsun.d);
1038 break;
1040 case FCORW:
1041 ASM_VOLATILE_MSA_BINARY(fcor.w);
1042 break;
1044 case FCORD:
1045 ASM_VOLATILE_MSA_BINARY(fcor.d);
1046 break;
1048 case FSORW:
1049 ASM_VOLATILE_MSA_BINARY(fsor.w);
1050 break;
1052 case FSORD:
1053 ASM_VOLATILE_MSA_BINARY(fsor.d);
1054 break;
1056 case FCUEQW:
1057 ASM_VOLATILE_MSA_BINARY(fcueq.w);
1058 break;
1060 case FCUEQD:
1061 ASM_VOLATILE_MSA_BINARY(fcueq.d);
1062 break;
1064 case FSUEQW:
1065 ASM_VOLATILE_MSA_BINARY(fsueq.w);
1066 break;
1068 case FSUEQD:
1069 ASM_VOLATILE_MSA_BINARY(fsueq.d);
1070 break;
1072 case FCUNEW:
1073 ASM_VOLATILE_MSA_BINARY(fcune.w);
1074 break;
1076 case FCUNED:
1077 ASM_VOLATILE_MSA_BINARY(fcune.d);
1078 break;
1080 case FSUNEW:
1081 ASM_VOLATILE_MSA_BINARY(fsune.w);
1082 break;
1084 case FSUNED:
1085 ASM_VOLATILE_MSA_BINARY(fsune.d);
1086 break;
1088 case FCULEW:
1089 ASM_VOLATILE_MSA_BINARY(fcule.w);
1090 break;
1092 case FCULED:
1093 ASM_VOLATILE_MSA_BINARY(fcule.d);
1094 break;
1096 case FSULEW:
1097 ASM_VOLATILE_MSA_BINARY(fsule.w);
1098 break;
1100 case FSULED:
1101 ASM_VOLATILE_MSA_BINARY(fsule.d);
1102 break;
1104 case FCULTW:
1105 ASM_VOLATILE_MSA_BINARY(fcult.w);
1106 break;
1108 case FCULTD:
1109 ASM_VOLATILE_MSA_BINARY(fcult.d);
1110 break;
1112 case FSULTW:
1113 ASM_VOLATILE_MSA_BINARY(fsult.w);
1114 break;
1116 case FSULTD:
1117 ASM_VOLATILE_MSA_BINARY(fsult.d);
1118 break;
1120 case FMAXW:
1121 ASM_VOLATILE_MSA_BINARY(fmax.w);
1122 break;
1124 case FMAXD:
1125 ASM_VOLATILE_MSA_BINARY(fmax.d);
1126 break;
1128 case FMAXAW:
1129 ASM_VOLATILE_MSA_BINARY(fmax_a.w);
1130 break;
1132 case FMAXAD:
1133 ASM_VOLATILE_MSA_BINARY(fmax_a.d);
1134 break;
1136 case FFINTSW:
1137 ASM_VOLATILE_MSA_UNARY(ffint_s.w);
1138 break;
1140 case FFINTSD:
1141 ASM_VOLATILE_MSA_UNARY(ffint_s.d);
1142 break;
1144 case FRCPW:
1145 ASM_VOLATILE_MSA_UNARY(frcp.w);
1146 break;
1148 case FRCPD:
1149 ASM_VOLATILE_MSA_UNARY(frcp.d);
1150 break;
1152 case FRSQRTW:
1153 ASM_VOLATILE_MSA_UNARY(frsqrt.w);
1154 break;
1156 case FRSQRTD:
1157 ASM_VOLATILE_MSA_UNARY(frsqrt.d);
1158 break;
1160 case FSQRTW:
1161 ASM_VOLATILE_MSA_UNARY(fsqrt.w);
1162 break;
1164 case FSQRTD:
1165 ASM_VOLATILE_MSA_UNARY(fsqrt.d);
1166 break;
1168 case FRINTW:
1169 ASM_VOLATILE_MSA_UNARY(frint.w);
1170 break;
1172 case FRINTD:
1173 ASM_VOLATILE_MSA_UNARY(frint.d);
1174 break;
1176 case FTRUNCUW:
1177 ASM_VOLATILE_MSA_UNARY(ftrunc_u.w);
1178 break;
1180 case FTRUNCUD:
1181 ASM_VOLATILE_MSA_UNARY(ftrunc_u.d);
1182 break;
1184 case FTRUNCSW:
1185 ASM_VOLATILE_MSA_UNARY(ftrunc_s.w);
1186 break;
1188 case FTRUNCSD:
1189 ASM_VOLATILE_MSA_UNARY(ftrunc_s.d);
1190 break;
1192 case FEXDOH:
1193 ASM_VOLATILE_MSA_BINARY(fexdo.h);
1194 break;
1196 case FEXDOW:
1197 ASM_VOLATILE_MSA_BINARY(fexdo.w);
1198 break;
1200 case FEXUPRW:
1201 ASM_VOLATILE_MSA_UNARY(fexupr.w);
1202 break;
1204 case FEXUPRD:
1205 ASM_VOLATILE_MSA_UNARY(fexupr.d);
1206 break;
1208 case FEXUPLW:
1209 ASM_VOLATILE_MSA_UNARY(fexupl.w);
1210 break;
1212 case FEXUPLD:
1213 ASM_VOLATILE_MSA_UNARY(fexupl.d);
1214 break;
1216 case FTQH:
1217 ASM_VOLATILE_MSA_BINARY(ftq.h);
1218 break;
1220 case FTQW:
1221 ASM_VOLATILE_MSA_BINARY(ftq.w);
1222 break;
1224 case FFQRD:
1225 ASM_VOLATILE_MSA_UNARY(ffqr.d);
1226 break;
1228 case FFQRW:
1229 ASM_VOLATILE_MSA_UNARY(ffqr.w);
1230 break;
1232 case FFQLD:
1233 ASM_VOLATILE_MSA_UNARY(ffql.d);
1234 break;
1236 case FFQLW:
1237 ASM_VOLATILE_MSA_UNARY(ffql.w);
1238 break;
1240 case FTINT_SD:
1241 ASM_VOLATILE_MSA_UNARY(ftint_s.d);
1242 break;
1244 case FTINT_SW:
1245 ASM_VOLATILE_MSA_UNARY(ftint_s.w);
1246 break;
1248 case FTINT_UD:
1249 ASM_VOLATILE_MSA_UNARY(ftint_u.d);
1250 break;
1252 case FTINT_UW:
1253 ASM_VOLATILE_MSA_UNARY(ftint_u.w);
1254 break;
1256 case FLOG2D:
1257 ASM_VOLATILE_MSA_UNARY(flog2.d);
1258 break;
1260 case FLOG2W:
1261 ASM_VOLATILE_MSA_UNARY(flog2.w);
1262 break;
1264 case FFINT_UD:
1265 ASM_VOLATILE_MSA_UNARY(ffint_u.d);
1266 break;
1268 case FFINT_UW:
1269 ASM_VOLATILE_MSA_UNARY(ffint_u.w);
1270 break;
1273 #endif
1274 return ret;
1277 extern UInt mips_dirtyhelper_get_MSAIR(void) {
1278 UInt ret = 0;
1279 /* GCC 4.8 and later support MIPS MSA. */
1280 #if defined(__mips__) && (defined(__clang__) || (GCC_VERSION >= 408))
1281 __asm__ volatile(".set push \n\t"
1282 ".set mips32r2 \n\t"
1283 ".set hardfloat \n\t"
1284 ".set fp=64 \n\t"
1285 ".set msa \n\t"
1286 ".set noreorder \n\t"
1287 "cfcmsa %0, $0 \n\t"
1288 ".set pop \n\t"
1289 : "=r" (ret) : : );
1290 #endif
1291 return ret;
1297 /*---------------------------------------------------------------*/
1298 /*--- end guest_mips_helpers.c ---*/
1299 /*---------------------------------------------------------------*/