Don't use Aast.Any when marking expressions as type Tany
[hiphop-php.git] / hphp / ppc64-asm / asm-ppc64.cpp
blobddbfac3443e09023b78e4213a1da426c4b7ef1cd
1 /*
2 +----------------------------------------------------------------------+
3 | HipHop for PHP |
4 +----------------------------------------------------------------------+
5 | (c) Copyright IBM Corporation 2015-2016 |
6 +----------------------------------------------------------------------+
7 | This source file is subject to version 3.01 of the PHP license, |
8 | that is bundled with this package in the file LICENSE, and is |
9 | available through the world-wide-web at the following url: |
10 | http://www.php.net/license/3_01.txt |
11 | If you did not receive a copy of the PHP license and are unable to |
12 | obtain it through the world-wide-web, please send a note to |
13 | license@php.net so we can mail you a copy immediately. |
14 +----------------------------------------------------------------------+
17 #include "hphp/ppc64-asm/asm-ppc64.h"
18 #include "hphp/ppc64-asm/decoded-instr-ppc64.h"
19 #include "hphp/ppc64-asm/decoder-ppc64.h"
20 #include "hphp/runtime/base/runtime-option.h"
21 #include "hphp/util/trace.h"
22 #include <folly/MicroSpinLock.h>
24 #include <type_traits>
26 TRACE_SET_MOD(asmppc64);
28 namespace ppc64_asm {
30 // Lock to protect TOC when writing.
31 static folly::MicroSpinLock s_TOC;
33 //////////////////////////////////////////////////////////////////////
35 VMTOC::~VMTOC() {
36 FTRACE(1, "Number of values if 64bits stored in TOC: {}\n",
37 std::to_string(m_last_elem_pos));
40 int64_t VMTOC::pushElem(int64_t elem, bool elemMayChange) {
41 int64_t offset;
42 if (elemMayChange) {
43 offset = allocTOC(elem);
45 else {
46 auto& map_elem = m_map[elem];
47 if (map_elem) return map_elem;
48 offset = allocTOC(elem);
49 map_elem = offset;
52 m_last_elem_pos += 1;
53 return offset;
56 VMTOC& VMTOC::getInstance() {
57 static VMTOC instance;
58 return instance;
61 intptr_t VMTOC::getPtrVector() {
62 always_assert(m_tocvector != nullptr);
63 return reinterpret_cast<intptr_t>(m_tocvector->base() + INT16_MAX + 1);
66 int64_t VMTOC::getValue(int64_t index, bool qword) {
67 HPHP::Address addr = reinterpret_cast<HPHP::Address>(
68 static_cast<intptr_t>(index) + getPtrVector());
69 int64_t ret_val = 0;
70 int max_elem = qword ? 8 : 4;
71 for (int i = max_elem-1; i >= 0; i--) {
72 ret_val = addr[i] + (ret_val << 8);
74 return ret_val;
77 uint64_t* VMTOC::getAddr(int64_t index) {
78 return reinterpret_cast<uint64_t*>(
79 static_cast<intptr_t>(index) + getPtrVector());
82 int64_t VMTOC::allocTOC(int64_t target) {
83 folly::MSLGuard g{s_TOC};
84 HPHP::Address addr = m_tocvector->frontier();
85 m_tocvector->qword(target);
86 return addr - (m_tocvector->base() + INT16_MAX + 1);
89 void VMTOC::setTOCDataBlock(HPHP::DataBlock *db) {
90 if(m_tocvector == nullptr) {
91 m_tocvector = db;
92 HPHP::Address addr = m_tocvector->frontier();
93 forceAlignment(addr);
95 return;
98 void VMTOC::forceAlignment(HPHP::Address& addr) {
99 folly::MSLGuard g{s_TOC};
100 // keep 8-byte alignment
101 while (reinterpret_cast<uintptr_t>(addr) % 8 != 0) {
102 uint8_t fill_byte = 0xf0;
103 m_tocvector->assertCanEmit(sizeof(uint8_t));
104 m_tocvector->byte(fill_byte);
105 addr = m_tocvector->frontier();
109 int64_t VMTOC::getIndex(uint64_t elem) {
110 auto pos = m_map.find(elem);
111 if (pos != m_map.end()) {
112 return pos->second;
114 return LLONG_MIN;
117 void BranchParams::decodeInstr(const PPC64Instr* const pinstr) {
118 const DecoderInfo dinfo = Decoder::GetDecoder().decode(pinstr);
119 switch (dinfo.opcode_name()) {
120 case OpcodeNames::op_b:
121 case OpcodeNames::op_bl:
122 assert(dinfo.form() == Form::kI);
123 defineBoBi(BranchConditions::Always);
124 break;
125 case OpcodeNames::op_bc:
126 assert(dinfo.form() == Form::kB);
127 B_form_t bform;
128 bform.instruction = dinfo.instruction_image();
129 m_bo = BranchParams::BO(bform.BO);
130 m_bi = BranchParams::BI(bform.BI);
131 break;
132 case OpcodeNames::op_bcctr:
133 case OpcodeNames::op_bcctrl:
134 assert(dinfo.form() == Form::kXL);
135 XL_form_t xlform;
136 xlform.instruction = dinfo.instruction_image();
137 m_bo = BranchParams::BO(xlform.BT);
138 m_bi = BranchParams::BI(xlform.BA);
139 break;
140 default:
141 assert(false && "Not a valid conditional branch instruction");
142 // also possible: defineBoBi(BranchConditions::Always);
143 break;
146 // Set m_lr accordingly for all 'call' flavors used
147 switch (dinfo.opcode_name()) {
148 case OpcodeNames::op_bl:
149 case OpcodeNames::op_bcctrl:
150 m_lr = true;
151 break;
152 default:
153 m_lr = false;
154 break;
159 * Macro definition for EmitXOForm functions
160 * Format:
161 * X(name, arg3, oe, xop)
162 * name: function name
163 * arg3: ARG if needed, otherwise NONE to skip
164 * oe: parameter value
165 * xop: parameter value
168 #define ADDS\
169 X(add, ARG, 0, 266) \
170 X(addo, ARG, 1, 266) \
171 X(divd, ARG, 0, 489) \
172 X(mulldo, ARG, 1, 233) \
173 X(neg, NONE, 0, 104) \
174 X(subf, ARG, 0, 40) \
175 X(subfo, ARG, 1, 40) \
177 /* Function header: XO1 */
178 #define HEADER_ARG const Reg64& rb,
179 #define HEADER_NONE
181 #define XO1(name, arg3, oe, xop) \
182 void Assembler::name(const Reg64& rt, const Reg64& ra, arg3 bool rc) {
184 /* Function body: XO2 */
185 #define BODY_ARG rb
186 #define BODY_NONE 0
188 #define XO2(name, arg3, oe, xop) \
189 EmitXOForm(31, rn(rt), rn(ra), rn(arg3), oe, xop, rc); \
192 /* Macro expansion for function parts */
193 #define X(name, arg3, oe, xop) \
194 XO1(name, HEADER_##arg3, oe, xop) \
195 XO2(name, BODY_##arg3, oe, xop)
196 ADDS
197 #undef X
199 #undef HEADER_ARG
200 #undef HEADER_NONE
202 #undef BODY_ARG
203 #undef BODY_NONE
205 #undef ADDS
207 void Assembler::addi(const Reg64& rt, const Reg64& ra, Immed imm) {
208 assert(imm.fits(HPHP::sz::word) && "Immediate is too big");
209 EmitDForm(14, rn(rt), rn(ra), imm.w());
212 void Assembler::addis(const Reg64& rt, const Reg64& ra, Immed imm) {
213 assert(imm.fits(HPHP::sz::word) && "Immediate is too big");
214 EmitDForm(15, rn(rt), rn(ra), imm.w());
217 void Assembler::and(const Reg64& ra, const Reg64& rs, const Reg64& rb,
218 bool rc) {
219 EmitXForm(31, rn(rs), rn(ra), rn(rb), 28, rc);
222 void Assembler::andi(const Reg64& ra, const Reg64& rs, Immed imm) {
223 assert(imm.fits(HPHP::sz::word) && "Immediate is too big");
224 EmitDForm(28, rn(rs), rn(ra), imm.w());
227 void Assembler::b(int32_t offset) {
228 EmitIForm(18, uint32_t(offset));
231 void Assembler::bl(int32_t offset) {
232 EmitIForm(18, uint32_t(offset), 0, 1);
235 void Assembler::bc(uint8_t bo, uint8_t bi, int16_t offset) {
236 EmitBForm(16, bo, bi, uint32_t(offset), 0, 0);
239 void Assembler::bcctr(uint8_t bo, uint8_t bi, uint16_t bh) {
240 EmitXLForm(19, bo, bi, (bh & 0x3), 528);
243 void Assembler::bctrl() {
244 // The concept of a conditional call is not existent for upper layers.
245 // Therefore no bcctrl is defined despite being possible.
246 // Only bctrl is defined.
247 BranchParams bp(BranchConditions::Always);
248 EmitXLForm(19, bp.bo(), bp.bi(), (0 /*bh*/ & 0x3), 528, 1);
251 void Assembler::blr() {
252 // The concept of a conditional return is not existent for upper layers.
253 // Therefore no bclr is defined despite being possible.
254 // Only blr is defined.
255 BranchParams bp(BranchConditions::Always);
256 EmitXLForm(19, bp.bo(), bp.bi(), (0 /*bh*/ & 0x3), 16, 0);
259 void Assembler::cmp(uint16_t bf, bool l, const Reg64& ra, const Reg64& rb) {
260 EmitXForm(31, rn((bf << 2) | (uint16_t)l), rn(ra), rn(rb), 0);
263 void Assembler::cmpi(uint16_t bf, bool l, const Reg64& ra, Immed imm) {
264 assert(imm.fits(HPHP::sz::word) && "Immediate is too big");
265 EmitDForm(11, rn((bf << 2) | (uint16_t)l), rn(ra), imm.w());
268 void Assembler::cmpb(const Reg64& rs, const Reg64& ra, const Reg64& rb) {
269 EmitXForm(31, rn(rs), rn(ra), rn(rb), 508);
272 void Assembler::cmpl(uint16_t bf, bool l, const Reg64& ra, const Reg64& rb) {
273 EmitXForm(31, rn((bf << 2) | (uint16_t)l), rn(ra), rn(rb), 32);
276 void Assembler::cmpli(uint16_t bf, bool l, const Reg64& ra, Immed imm) {
277 assert(imm.fits(HPHP::sz::word) && "Immediate is too big");
278 EmitDForm(10, rn((bf << 2) | (uint16_t)l), rn(ra), imm.w());
281 void Assembler::extsb(const Reg64& ra, const Reg64& rs, bool rc) {
282 EmitXForm(31, rn(rs), rn(ra), rn(0), 954, rc);
285 void Assembler::extsh(const Reg64& ra, const Reg64& rs, bool rc) {
286 EmitXForm(31, rn(rs), rn(ra), rn(0), 922, rc);
289 void Assembler::extsw(const Reg64& ra, const Reg64& rs, bool rc) {
290 EmitXForm(31, rn(rs), rn(ra), rn(0), 986, rc);
293 void Assembler::isel(const Reg64& rt, const Reg64& ra, const Reg64& rb,
294 uint8_t bc) {
295 EmitAForm(31, rn(rt), rn(ra), rn(rb), rn(bc), 15);
298 void Assembler::lbz(const Reg64& rt, MemoryRef m) {
299 assertx(Reg64(-1) == m.r.index); // doesn't support base+index
300 EmitDForm(34, rn(rt), rn(m.r.base), m.r.disp);
303 void Assembler::lbzx(const Reg64& rt, MemoryRef m) {
304 assertx(!m.r.disp); // doesn't support immediate displacement
305 EmitXForm(31, rn(rt), rn(m.r.base), rn(m.r.index), 87);
308 void Assembler::ld(const Reg64& rt, MemoryRef m) {
309 assertx(Reg64(-1) == m.r.index); // doesn't support base+index
310 EmitDSForm(58, rn(rt), rn(m.r.base), m.r.disp, 0);
313 void Assembler::ldx(const Reg64& rt, MemoryRef m) {
314 assertx(!m.r.disp); // doesn't support immediate displacement
315 EmitXForm(31, rn(rt), rn(m.r.base), rn(m.r.index), 21);
318 void Assembler::lhz(const Reg64& rt, MemoryRef m) {
319 assertx(Reg64(-1) == m.r.index); // doesn't support base+index
320 EmitDForm(40, rn(rt), rn(m.r.base), m.r.disp);
323 void Assembler::lhzx(const Reg64& rt, MemoryRef m) {
324 assertx(!m.r.disp); // doesn't support immediate displacement
325 EmitXForm(31, rn(rt), rn(m.r.base), rn(m.r.index), 279);
328 void Assembler::lwz(const Reg64& rt, MemoryRef m) {
329 assertx(Reg64(-1) == m.r.index); // doesn't support base+index
330 EmitDForm(32, rn(rt), rn(m.r.base), m.r.disp);
333 void Assembler::lwzx(const Reg64& rt, MemoryRef m) {
334 assertx(!m.r.disp); // doesn't support immediate displacement
335 EmitXForm(31, rn(rt), rn(m.r.base), rn(m.r.index), 23);
338 void Assembler::mfspr(const SpecialReg spr, const Reg64& rs) {
339 EmitXFXForm(31, rn(rs), spr, 339);
342 void Assembler::mtspr(const SpecialReg spr, const Reg64& rs) {
343 EmitXFXForm(31, rn(rs), spr, 467);
346 void Assembler::nor(const Reg64& ra, const Reg64& rs, const Reg64& rb,
347 bool rc) {
348 EmitXForm(31, rn(rs), rn(ra), rn(rb), 124, rc);
351 void Assembler::or(const Reg64& ra, const Reg64& rs, const Reg64& rb,
352 bool rc) {
353 EmitXForm(31, rn(rs), rn(ra), rn(rb), 444, rc);
356 void Assembler::ori(const Reg64& ra, const Reg64& rs, Immed imm) {
357 assert(imm.fits(HPHP::sz::word) && "Immediate is too big");
358 EmitDForm(24, rn(rs), rn(ra), imm.w());
361 void Assembler::oris(const Reg64& ra, const Reg64& rs, Immed imm) {
362 assert(imm.fits(HPHP::sz::word) && "Immediate is too big");
363 EmitDForm(25, rn(rs), rn(ra), imm.w());
366 void Assembler::rldicl(const Reg64& ra, const Reg64& rs, uint8_t sh,
367 uint8_t mb, bool rc) {
368 EmitMDForm(30, rn(rs), rn(ra), sh, mb, 0, rc);
371 void Assembler::rldicr(const Reg64& ra, const Reg64& rs, uint8_t sh,
372 uint8_t mb, bool rc) {
373 EmitMDForm(30, rn(rs), rn(ra), sh, mb, 1, rc);
376 void Assembler::rlwinm(const Reg64& ra, const Reg64& rs, uint8_t sh, uint8_t mb,
377 uint16_t me, bool rc) {
378 EmitMForm(21, rn(rs), rn(ra), rn(sh), mb, me, rc);
381 void Assembler::sld(const Reg64& ra, const Reg64& rs, const Reg64& rb,
382 bool rc) {
383 EmitXForm(31, rn(rs), rn(ra), rn(rb), 27, rc);
386 void Assembler::srad(const Reg64& ra, const Reg64& rs, const Reg64& rb,
387 bool rc) {
388 EmitXForm(31, rn(rs), rn(ra), rn(rb), 794, rc);
391 void Assembler::sradi(const Reg64& ra, const Reg64& rs, uint8_t sh, bool rc) {
392 EmitXSForm(31, rn(rs), rn(ra), sh, 413, rc);
395 void Assembler::stb(const Reg64& rt, MemoryRef m) {
396 assertx(Reg64(-1) == m.r.index); // doesn't support base+index
397 EmitDForm(38, rn(rt), rn(m.r.base), m.r.disp);
400 void Assembler::stbx(const Reg64& rt, MemoryRef m) {
401 assertx(!m.r.disp); // doesn't support immediate displacement
402 EmitXForm(31, rn(rt), rn(m.r.base), rn(m.r.index), 215);
405 void Assembler::sth(const Reg64& rt, MemoryRef m) {
406 assertx(Reg64(-1) == m.r.index); // doesn't support base+index
407 EmitDForm(44, rn(rt), rn(m.r.base), m.r.disp);
410 void Assembler::sthx(const Reg64& rt, MemoryRef m) {
411 assertx(!m.r.disp); // doesn't support immediate displacement
412 EmitXForm(31, rn(rt), rn(m.r.base), rn(m.r.index), 407);
415 void Assembler::stw(const Reg64& rt, MemoryRef m) {
416 assertx(Reg64(-1) == m.r.index); // doesn't support base+index
417 EmitDForm(36, rn(rt), rn(m.r.base), m.r.disp);
420 void Assembler::stwx(const Reg64& rt, MemoryRef m) {
421 assertx(!m.r.disp); // doesn't support immediate displacement
422 EmitXForm(31, rn(rt), rn(m.r.base), rn(m.r.index), 151);
425 void Assembler::std(const Reg64& rt, MemoryRef m) {
426 assertx(Reg64(-1) == m.r.index); // doesn't support base+index
427 EmitDSForm(62, rn(rt), rn(m.r.base), m.r.disp, 0);
430 void Assembler::stdu(const Reg64& rt, MemoryRef m) {
431 assertx(Reg64(-1) == m.r.index); // doesn't support base+index
432 EmitDSForm(62, rn(rt), rn(m.r.base), m.r.disp, 1);
435 void Assembler::stdx(const Reg64& rt, MemoryRef m) {
436 assertx(!m.r.disp); // doesn't support immediate displacement
437 EmitXForm(31, rn(rt), rn(m.r.base), rn(m.r.index), 149);
440 void Assembler::td(uint16_t to, const Reg64& ra, const Reg64& rb) {
441 EmitXForm(31, rn(to), rn(ra), rn(rb), 68);
444 void Assembler::tw(uint16_t to, const Reg64& ra, const Reg64& rb) {
445 EmitXForm(31, rn(to), rn(ra), rn(rb), 4);
448 void Assembler::xor(const Reg64& ra, const Reg64& rs, const Reg64& rb,
449 bool rc) {
450 EmitXForm(31, rn(rs), rn(ra), rn(rb), 316, rc);
453 /* Floating point operations */
454 void Assembler::fadd(const RegXMM& frt, const RegXMM& fra, const RegXMM& frb,
455 bool rc) {
456 EmitAForm(63, rn(frt), rn(fra), rn(frb), rn(0), 21, rc);
459 void Assembler::fsub(const RegXMM& frt, const RegXMM& fra, const RegXMM& frb,
460 bool rc) {
461 EmitAForm(63, rn(frt), rn(fra), rn(frb), rn(0), 20, rc);
464 void Assembler::fmul(const RegXMM& frt, const RegXMM& fra, const RegXMM& frc,
465 bool rc) {
466 EmitAForm(63, rn(frt), rn(fra), rn(0), rn(frc), 25, rc);
469 void Assembler::fdiv(const RegXMM& frt, const RegXMM& fra, const RegXMM& frb,
470 bool rc) {
471 EmitAForm(63, rn(frt), rn(fra), rn(frb), rn(0), 18, rc);
474 void Assembler::unimplemented(){
475 //Emit a instruction with invalid opcode 0x0
476 EmitDForm(0, rn(0), rn(0), 0);
479 //////////////////////////////////////////////////////////////////////
481 void Assembler::patchAbsolute(CodeAddress jmp, CodeAddress dest) {
482 // Initialize code block cb pointing to li64
483 HPHP::CodeBlock cb;
484 cb.init(jmp, Assembler::kLimmLen, "patched bctr");
485 Assembler a{ cb };
486 a.limmediate(reg::r12, ssize_t(dest), ImmType::TocOnly, true);
489 void Assembler::patchBranch(CodeAddress jmp, CodeAddress dest) {
490 auto di = DecodedInstruction(jmp);
492 // Detect Far branch
493 if (di.isFarBranch()) {
494 patchAbsolute(jmp, dest);
495 return;
498 // Regular patch for branch by offset type
499 if (!di.setNearBranchTarget(dest))
500 assert(false && "Can't patch a branch with such a big offset");
503 //////////////////////////////////////////////////////////////////////
505 void Assembler::li64 (const Reg64& rt, int64_t imm64, bool fixedSize) {
506 // li64 always emits 5 instructions i.e. 20 bytes of instructions.
507 // Assumes that 0 bytes will be missing in the end.
508 uint8_t missing = 0;
510 // for assert purposes
511 DEBUG_ONLY CodeAddress li64StartPos = frontier();
513 if (HPHP::jit::deltaFits(imm64, HPHP::sz::word)) {
514 // immediate has only low 16 bits set, use simple load immediate
515 li(rt, static_cast<int16_t>(imm64));
516 if (imm64 & (1ULL << 15) && !(imm64 & (1ULL << 16))) {
517 // clear extended sign that should not be set
518 // (32bits number. Sets the 16th bit but not the 17th, it's not negative!)
519 clrldi(rt, rt, 48);
520 missing = kLi64Len - 2 * instr_size_in_bytes;
521 } else {
522 missing = kLi64Len - 1 * instr_size_in_bytes;
524 } else if (HPHP::jit::deltaFits(imm64, HPHP::sz::dword)) {
525 // immediate has only low 32 bits set
526 lis(rt, static_cast<int16_t>(imm64 >> 16));
527 ori(rt, rt, static_cast<int16_t>(imm64 & UINT16_MAX));
528 if (imm64 & (1ULL << 31) && !(imm64 & (1ULL << 32))) {
529 // clear extended sign
530 // (64bits number. Sets the 32th bit but not the 33th, it's not negative!)
531 clrldi(rt, rt, 32);
532 missing = kLi64Len - 3 * instr_size_in_bytes;
533 } else {
534 missing = kLi64Len - 2 * instr_size_in_bytes;
536 } else if (imm64 >> 48 == 0) {
537 // immediate has only low 48 bits set
538 lis(rt, static_cast<int16_t>(imm64 >> 32));
539 ori(rt, rt, static_cast<int16_t>((imm64 >> 16) & UINT16_MAX));
540 sldi(rt,rt,16);
541 ori(rt, rt, static_cast<int16_t>(imm64 & UINT16_MAX));
542 if (imm64 & (1ULL << 47)) {
543 // clear extended sign
544 clrldi(rt, rt, 16);
545 } else {
546 missing = kLi64Len - 4 * instr_size_in_bytes;
548 } else {
549 // load all 64 bits
550 lis(rt, static_cast<int16_t>(imm64 >> 48));
551 ori(rt, rt, static_cast<int16_t>((imm64 >> 32) & UINT16_MAX));
552 sldi(rt,rt,32);
553 oris(rt, rt, static_cast<int16_t>((imm64 >> 16) & UINT16_MAX));
554 ori(rt, rt, static_cast<int16_t>(imm64 & UINT16_MAX));
557 if (fixedSize) {
558 emitNop(missing);
559 // guarantee our math with kLi64Len is working
560 assert(kLi64Len == frontier() - li64StartPos);
564 void Assembler::li32 (const Reg64& rt, int32_t imm32) {
566 if (HPHP::jit::deltaFits(imm32, HPHP::sz::word)) {
567 // immediate has only low 16 bits set, use simple load immediate
568 li(rt, static_cast<int16_t>(imm32));
569 if (imm32 & (1ULL << 15) && !(imm32 & (1ULL << 16))) {
570 // clear extended sign that should not be set
571 // (32bits number. Sets the 16th bit but not the 17th, it's not negative!)
572 clrldi(rt, rt, 48);
573 } else {
574 emitNop(instr_size_in_bytes); // emit nop for a balanced li32 with 2 instr
576 } else {
577 // immediate has 32 bits set
578 lis(rt, static_cast<int16_t>(imm32 >> 16));
579 ori(rt, rt, static_cast<int16_t>(imm32 & UINT16_MAX));
583 void Assembler::li64TOC(const Reg64& rt, int64_t imm64, ImmType /*immt*/,
584 bool immMayChange) {
585 int64_t TOCoffset;
586 TOCoffset = VMTOC::getInstance().pushElem(imm64, immMayChange);
588 if (TOCoffset > INT16_MAX) {
589 int16_t complement = 0;
590 // If last four bytes is still bigger than a signed 16bits, uses as two
591 // complement.
592 if ((TOCoffset & UINT16_MAX) > INT16_MAX) complement = 1;
593 addis(rt, reg::r2, static_cast<int16_t>((TOCoffset >> 16) + complement));
594 ld (rt, rt[TOCoffset & UINT16_MAX]);
595 } else {
596 ld (rt, reg::r2[TOCoffset]);
597 emitNop(instr_size_in_bytes);
601 void Assembler::limmediate(const Reg64& rt, int64_t imm64,
602 ImmType immt, bool immMayChange) {
603 static_assert(
604 std::is_unsigned<
605 decltype(HPHP::RuntimeOption::EvalPPC64MinTOCImmSize)>::value,
606 "RuntimeOption::EvalPPC64MinTOCImmSize is expected to be unsigned.");
607 always_assert(HPHP::RuntimeOption::EvalPPC64MinTOCImmSize <= 64);
609 if (immt != ImmType::TocOnly) li64(rt, imm64, immt != ImmType::AnyCompact);
610 else li64TOC (rt, imm64, immt, immMayChange);
613 //////////////////////////////////////////////////////////////////////
614 // Label
615 //////////////////////////////////////////////////////////////////////
617 Label::~Label() {
618 if (!m_toPatch.empty()) {
619 assert(m_a && m_address && "Label had jumps but was never set");
621 for (auto& ji : m_toPatch) {
622 assert(ji.a->contains(ji.addr));
623 ji.a->patchBranch(ji.a->toDestAddress(ji.addr), m_address);
627 void Label::branch(Assembler& a, BranchConditions bc,
628 LinkReg lr, bool addrMayChange) {
629 // Only optimize jump if it'll unlikely going to be patched.
630 if (m_address) {
631 // if diff is 0, then this is for sure going to be patched.
632 ssize_t diff = ssize_t(m_address - a.frontier());
633 if (diff) {
634 // check if an unconditional branch with b can be used
635 if (BranchConditions::Always == bc) {
636 // unconditional branch
637 if (HPHP::jit::deltaFitsBits(diff, 26)) {
638 addJump(&a);
639 if (LinkReg::Save == lr) a.bl(diff);
640 else a.b (diff);
641 return;
643 } else {
644 // conditional branch
645 if (HPHP::jit::deltaFits(diff, HPHP::sz::word)) {
646 BranchParams bp(bc);
647 addJump(&a);
648 assert(LinkReg::DoNotTouch == lr &&
649 "Conditional call is NOT supported.");
651 // Special code for overflow handling
652 if (bc == BranchConditions::Overflow ||
653 bc == BranchConditions::NoOverflow) {
654 a.xor(reg::r0, reg::r0, reg::r0,false);
655 a.mtspr(Assembler::SpecialReg::XER, reg::r0);
657 a.bc (bp.bo(), bp.bi(), diff);
658 return;
663 // fallback: use CTR to perform absolute branch up to 64 bits
664 branchFar(a, bc, lr, ImmType::TocOnly, addrMayChange);
667 void Label::branchFar(Assembler& a,
668 BranchConditions bc,
669 LinkReg lr,
670 ImmType immt,
671 bool immMayChange) {
672 // Marking current address for patchAbsolute
673 addJump(&a);
675 // Use reserved function linkage register
676 const ssize_t address = ssize_t(m_address);
677 a.limmediate(reg::r12, address, immt, immMayChange);
679 // When branching to another context, r12 need to keep the target address
680 // to correctly set r2 (TOC reference).
681 a.mtctr(reg::r12);
683 // Special code for overflow handling
684 bool cond = (BranchConditions::Always != bc);
685 if (bc == BranchConditions::Overflow || bc == BranchConditions::NoOverflow) {
686 a.xor(reg::r0, reg::r0, reg::r0,false);
687 a.mtspr(Assembler::SpecialReg::XER, reg::r0);
688 } else if (cond && immt != ImmType::AnyCompact) {
689 // Unconditional branch (jmp or call) doesn't need this reserve bytes
690 a.emitNop(2 * instr_size_in_bytes);
693 BranchParams bp(bc);
694 if (LinkReg::Save == lr) {
695 // call
696 a.bctrl();
697 } else {
698 // jcc
699 a.bcctr(bp.bo(), bp.bi(), 0);
703 void Label::asm_label(Assembler& a) {
704 assert(!m_address && !m_a && "Label was already set");
705 m_a = &a;
706 m_address = a.frontier();
709 void Label::addJump(Assembler* a) {
710 if (m_address) return;
711 JumpInfo info;
712 info.a = a;
713 info.addr = a->codeBlock.frontier();
714 m_toPatch.push_back(info);
717 } // namespace ppc64_asm