9 movi a2, ((\v) >> 32) & 0xffffffff
10 movi a3, ((\v) & 0xffffffff)
14 .macro check_res fr, r, sr
17 movi a3, ((\r) >> 32) & 0xffffffff
21 movi a3, ((\r) & 0xffffffff)
32 /* MAX_FLOAT + MAX_FLOAT = +inf/MAX_FLOAT */
33 test_op2 add.d, f6, f7, f8, F64_MAX, F64_MAX, \
34 F64_PINF, F64_MAX, F64_PINF, F64_MAX, \
35 FSR_OI, FSR_OI, FSR_OI, FSR_OI
40 test_op2 add.d, f6, f7, f8, F64_1, F64_PINF, \
41 F64_PINF, F64_PINF, F64_PINF, F64_PINF, \
42 FSR__, FSR__, FSR__, FSR__
44 /* +inf + -inf = default NaN */
45 test_op2 add.d, f0, f1, f2, F64_PINF, F64_NINF, \
46 F64_DNAN, F64_DNAN, F64_DNAN, F64_DNAN, \
47 FSR_V, FSR_V, FSR_V, FSR_V
52 test_op2 add.d, f9, f10, f11, F64_1, F64_QNAN(1), \
53 F64_QNAN(1), F64_QNAN(1), F64_QNAN(1), F64_QNAN(1), \
54 FSR__, FSR__, FSR__, FSR__
56 test_op2 add.d, f12, f13, f14, F64_1, F64_SNAN(1), \
57 F64_QNAN(1), F64_QNAN(1), F64_QNAN(1), F64_QNAN(1), \
58 FSR_V, FSR_V, FSR_V, FSR_V
60 /* SNaN1 + SNaN2 = QNaN2 */
61 test_op2 add.d, f15, f0, f1, F64_SNAN(1), F64_SNAN(2), \
62 F64_QNAN(1), F64_QNAN(1), F64_QNAN(1), F64_QNAN(1), \
63 FSR_V, FSR_V, FSR_V, FSR_V
64 /* QNaN1 + SNaN2 = QNaN2 */
65 test_op2 add.d, f5, f6, f7, F64_QNAN(1), F64_SNAN(2), \
66 F64_QNAN(1), F64_QNAN(1), F64_QNAN(1), F64_QNAN(1), \
67 FSR_V, FSR_V, FSR_V, FSR_V
68 /* SNaN1 + QNaN2 = QNaN2 */
69 test_op2 add.d, f8, f9, f10, F64_SNAN(1), F64_QNAN(2), \
70 F64_QNAN(1), F64_QNAN(1), F64_QNAN(1), F64_QNAN(1), \
71 FSR_V, FSR_V, FSR_V, FSR_V
75 /* norm - norm = denorm */
76 test_op2 sub.d, f6, f7, f8, F64_MIN_NORM | 1, F64_MIN_NORM, \
77 0x00000001, 0x00000001, 0x00000001, 0x00000001, \
78 FSR__, FSR__, FSR__, FSR__
82 test_op2 mul.d, f0, f1, f2, F64_1 | 1, F64_1 | 1, \
83 F64_1 | 2, F64_1 | 2, F64_1 | 3, F64_1 | 2, \
84 FSR_I, FSR_I, FSR_I, FSR_I
85 /* MAX_FLOAT/2 * MAX_FLOAT/2 = +inf/MAX_FLOAT */
86 test_op2 mul.d, f6, f7, f8, F64_MAX_2, F64_MAX_2, \
87 F64_PINF, F64_MAX, F64_PINF, F64_MAX, \
88 FSR_OI, FSR_OI, FSR_OI, FSR_OI
89 /* min norm * min norm = 0/denorm */
90 test_op2 mul.d, f6, f7, f8, F64_MIN_NORM, F64_MIN_NORM, \
91 F64_0, F64_0, 0x00000001, F64_0, \
92 FSR_UI, FSR_UI, FSR_UI, FSR_UI
93 /* inf * 0 = default NaN */
94 test_op2 mul.d, f6, f7, f8, F64_PINF, F64_0, \
95 F64_DNAN, F64_DNAN, F64_DNAN, F64_DNAN, \
96 FSR_V, FSR_V, FSR_V, FSR_V
100 test_op3 madd.d, f0, f1, f2, f0, F64_0, F64_1 | 1, F64_1 | 1, \
101 F64_1 | 2, F64_1 | 2, F64_1 | 3, F64_1 | 2, \
102 FSR_I, FSR_I, FSR_I, FSR_I
105 test madd_d_precision
106 test_op3 madd.d, f0, f1, f2, f0, \
107 F64_MINUS | F64_1 | 2, F64_1 | 1, F64_1 | 1, \
108 0x3970000000000000, 0x3970000000000000, 0x3970000000000000, 0x3970000000000000, \
109 FSR__, FSR__, FSR__, FSR__
113 /* DFPU madd/msub NaN1, NaN2, NaN3 priority: NaN1, NaN3, NaN2 */
114 test_op3 madd.d, f0, f1, f2, f0, F64_QNAN(1), F64_1, F64_1, \
115 F64_QNAN(1), F64_QNAN(1), F64_QNAN(1), F64_QNAN(1), \
116 FSR__, FSR__, FSR__, FSR__
117 test_op3 madd.d, f0, f1, f2, f0, F64_1, F64_QNAN(2), F64_1, \
118 F64_QNAN(2), F64_QNAN(2), F64_QNAN(2), F64_QNAN(2), \
119 FSR__, FSR__, FSR__, FSR__
120 test_op3 madd.d, f0, f1, f2, f0, F64_1, F64_1, F64_QNAN(3), \
121 F64_QNAN(3), F64_QNAN(3), F64_QNAN(3), F64_QNAN(3), \
122 FSR__, FSR__, FSR__, FSR__
124 test_op3 madd.d, f0, f1, f2, f0, F64_QNAN(1), F64_QNAN(2), F64_1, \
125 F64_QNAN(1), F64_QNAN(1), F64_QNAN(1), F64_QNAN(1), \
126 FSR__, FSR__, FSR__, FSR__
127 test_op3 madd.d, f0, f1, f2, f0, F64_QNAN(1), F64_1, F64_QNAN(3), \
128 F64_QNAN(1), F64_QNAN(1), F64_QNAN(1), F64_QNAN(1), \
129 FSR__, FSR__, FSR__, FSR__
130 test_op3 madd.d, f0, f1, f2, f0, F64_1, F64_QNAN(2), F64_QNAN(3), \
131 F64_QNAN(3), F64_QNAN(3), F64_QNAN(3), F64_QNAN(3), \
132 FSR__, FSR__, FSR__, FSR__
134 test_op3 madd.d, f0, f1, f2, f0, F64_QNAN(1), F64_QNAN(2), F64_QNAN(3), \
135 F64_QNAN(1), F64_QNAN(1), F64_QNAN(1), F64_QNAN(1), \
136 FSR__, FSR__, FSR__, FSR__
138 /* inf * 0 = default NaN */
139 test_op3 madd.d, f0, f1, f2, f0, F64_1, F64_PINF, F64_0, \
140 F64_DNAN, F64_DNAN, F64_DNAN, F64_DNAN, \
141 FSR_V, FSR_V, FSR_V, FSR_V
142 /* inf * 0 + SNaN1 = QNaN1 */
143 test_op3 madd.d, f0, f1, f2, f0, F64_SNAN(1), F64_PINF, F64_0, \
144 F64_QNAN(1), F64_QNAN(1), F64_QNAN(1), F64_QNAN(1), \
145 FSR_V, FSR_V, FSR_V, FSR_V
146 /* inf * 0 + QNaN1 = QNaN1 */
147 test_op3 madd.d, f0, f1, f2, f0, F64_QNAN(1), F64_PINF, F64_0, \
148 F64_QNAN(1), F64_QNAN(1), F64_QNAN(1), F64_QNAN(1), \
149 FSR_V, FSR_V, FSR_V, FSR_V
151 /* madd/msub SNaN turns to QNaN and sets Invalid flag */
152 test_op3 madd.d, f0, f1, f2, f0, F64_SNAN(1), F64_1, F64_1, \
153 F64_QNAN(1), F64_QNAN(1), F64_QNAN(1), F64_QNAN(1), \
154 FSR_V, FSR_V, FSR_V, FSR_V
155 test_op3 madd.d, f0, f1, f2, f0, F64_QNAN(1), F64_SNAN(2), F64_1, \
156 F64_QNAN(1), F64_QNAN(1), F64_QNAN(1), F64_QNAN(1), \
157 FSR_V, FSR_V, FSR_V, FSR_V