Fix load-elim bug for branching instructions going to the same block
[hiphop-php.git] / hphp / ppc64-asm / asm-ppc64.cpp
blob64ce88626c3f705c7b3d63d46e87463f6bd35bed
1 /*
2 +----------------------------------------------------------------------+
3 | HipHop for PHP |
4 +----------------------------------------------------------------------+
5 | (c) Copyright IBM Corporation 2015-2016 |
6 +----------------------------------------------------------------------+
7 | This source file is subject to version 3.01 of the PHP license, |
8 | that is bundled with this package in the file LICENSE, and is |
9 | available through the world-wide-web at the following url: |
10 | http://www.php.net/license/3_01.txt |
11 | If you did not receive a copy of the PHP license and are unable to |
12 | obtain it through the world-wide-web, please send a note to |
13 | license@php.net so we can mail you a copy immediately. |
14 +----------------------------------------------------------------------+
17 #include "hphp/ppc64-asm/asm-ppc64.h"
18 #include "hphp/ppc64-asm/decoded-instr-ppc64.h"
19 #include "hphp/ppc64-asm/decoder-ppc64.h"
20 #include "hphp/runtime/base/runtime-option.h"
21 #include "hphp/util/trace.h"
23 TRACE_SET_MOD(asmppc64);
25 namespace ppc64_asm {
26 VMTOC::~VMTOC() {
27 FTRACE(1, "Number of values stored in TOC: {}\n",
28 std::to_string(m_last_elem_pos));
31 int64_t VMTOC::pushElem(int64_t elem) {
32 auto& map_elem = m_map[elem];
33 if (map_elem) return map_elem;
35 auto offset = allocTOC(static_cast<int32_t>(elem & 0xffffffff), true);
36 map_elem = offset;
37 allocTOC(static_cast<int32_t>((elem & 0xffffffff00000000) >> 32));
38 m_last_elem_pos += 2;
39 return offset;
42 int64_t VMTOC::pushElem(int32_t elem) {
43 auto& map_elem = m_map[elem];
44 if (map_elem) return map_elem;
46 auto offset = allocTOC(elem);
47 map_elem = offset;
48 m_last_elem_pos++;
49 return offset;
52 VMTOC& VMTOC::getInstance() {
53 static VMTOC instance;
54 return instance;
57 intptr_t VMTOC::getPtrVector() {
58 always_assert(m_tocvector != nullptr);
59 return reinterpret_cast<intptr_t>(m_tocvector->base() + INT16_MAX + 1);
62 int64_t VMTOC::getValue(int64_t index, bool qword) {
63 HPHP::Address addr = reinterpret_cast<HPHP::Address>(
64 static_cast<intptr_t>(index) + getPtrVector());
65 int64_t ret_val = 0;
66 int max_elem = qword ? 8 : 4;
67 for (int i = max_elem-1; i >= 0; i--) {
68 ret_val = addr[i] + (ret_val << 8);
70 return ret_val;
73 int64_t VMTOC::allocTOC(int32_t target, bool align) {
74 HPHP::Address addr = m_tocvector->frontier();
75 if (align) {
76 forceAlignment(addr);
77 always_assert(reinterpret_cast<uintptr_t>(addr) % 8 == 0);
80 m_tocvector->assertCanEmit(sizeof(int32_t));
81 m_tocvector->dword(target);
82 return addr - (m_tocvector->base() + INT16_MAX + 1);
85 void VMTOC::setTOCDataBlock(HPHP::DataBlock *db) {
86 if(m_tocvector == nullptr) {
87 m_tocvector = db;
88 HPHP::Address addr = m_tocvector->frontier();
89 forceAlignment(addr);
91 return;
94 void VMTOC::forceAlignment(HPHP::Address& addr) {
95 // keep 8-byte alignment
96 while (reinterpret_cast<uintptr_t>(addr) % 8 != 0) {
97 uint8_t fill_byte = 0xf0;
98 m_tocvector->assertCanEmit(sizeof(uint8_t));
99 m_tocvector->byte(fill_byte);
100 addr = m_tocvector->frontier();
104 void BranchParams::decodeInstr(const PPC64Instr* const pinstr) {
105 const DecoderInfo dinfo = Decoder::GetDecoder().decode(pinstr);
106 switch (dinfo.opcode_name()) {
107 case OpcodeNames::op_b:
108 case OpcodeNames::op_bl:
109 assert(dinfo.form() == Form::kI);
110 defineBoBi(BranchConditions::Always);
111 break;
112 case OpcodeNames::op_bc:
113 assert(dinfo.form() == Form::kB);
114 B_form_t bform;
115 bform.instruction = dinfo.instruction_image();
116 m_bo = BranchParams::BO(bform.BO);
117 m_bi = BranchParams::BI(bform.BI);
118 break;
119 case OpcodeNames::op_bcctr:
120 case OpcodeNames::op_bcctrl:
121 assert(dinfo.form() == Form::kXL);
122 XL_form_t xlform;
123 xlform.instruction = dinfo.instruction_image();
124 m_bo = BranchParams::BO(xlform.BT);
125 m_bi = BranchParams::BI(xlform.BA);
126 break;
127 default:
128 assert(false && "Not a valid conditional branch instruction");
129 // also possible: defineBoBi(BranchConditions::Always);
130 break;
133 // Set m_lr accordingly for all 'call' flavors used
134 switch (dinfo.opcode_name()) {
135 case OpcodeNames::op_bl:
136 case OpcodeNames::op_bcctrl:
137 m_lr = true;
138 break;
139 default:
140 m_lr = false;
141 break;
146 * Macro definition for EmitXOForm functions
147 * Format:
148 * X(name, arg3, oe, xop)
149 * name: function name
150 * arg3: ARG if needed, otherwise NONE to skip
151 * oe: parameter value
152 * xop: parameter value
155 #define ADDS\
156 X(add, ARG, 0, 266) \
157 X(addo, ARG, 1, 266) \
158 X(divd, ARG, 0, 489) \
159 X(mulldo, ARG, 1, 233) \
160 X(neg, NONE, 0, 104) \
161 X(subf, ARG, 0, 40) \
162 X(subfo, ARG, 1, 40) \
164 /* Function header: XO1 */
165 #define HEADER_ARG const Reg64& rb,
166 #define HEADER_NONE
168 #define XO1(name, arg3, oe, xop) \
169 void Assembler::name(const Reg64& rt, const Reg64& ra, arg3 bool rc) {
171 /* Function body: XO2 */
172 #define BODY_ARG rb
173 #define BODY_NONE 0
175 #define XO2(name, arg3, oe, xop) \
176 EmitXOForm(31, rn(rt), rn(ra), rn(arg3), oe, xop, rc); \
179 /* Macro expansion for function parts */
180 #define X(name, arg3, oe, xop) \
181 XO1(name, HEADER_##arg3, oe, xop) \
182 XO2(name, BODY_##arg3, oe, xop)
183 ADDS
184 #undef X
186 #undef HEADER_ARG
187 #undef HEADER_NONE
189 #undef BODY_ARG
190 #undef BODY_NONE
192 #undef ADDS
194 void Assembler::addi(const Reg64& rt, const Reg64& ra, Immed imm) {
195 assert(imm.fits(HPHP::sz::word) && "Immediate is too big");
196 EmitDForm(14, rn(rt), rn(ra), imm.w());
199 void Assembler::addis(const Reg64& rt, const Reg64& ra, Immed imm) {
200 assert(imm.fits(HPHP::sz::word) && "Immediate is too big");
201 EmitDForm(15, rn(rt), rn(ra), imm.w());
204 void Assembler::and(const Reg64& ra, const Reg64& rs, const Reg64& rb,
205 bool rc) {
206 EmitXForm(31, rn(rs), rn(ra), rn(rb), 28, rc);
209 void Assembler::andi(const Reg64& ra, const Reg64& rs, Immed imm) {
210 assert(imm.fits(HPHP::sz::word) && "Immediate is too big");
211 EmitDForm(28, rn(rs), rn(ra), imm.w());
214 void Assembler::b(int32_t offset) {
215 EmitIForm(18, uint32_t(offset));
218 void Assembler::bl(int32_t offset) {
219 EmitIForm(18, uint32_t(offset), 0, 1);
222 void Assembler::bc(uint8_t bo, uint8_t bi, int16_t offset) {
223 EmitBForm(16, bo, bi, uint32_t(offset), 0, 0);
226 void Assembler::bcctr(uint8_t bo, uint8_t bi, uint16_t bh) {
227 EmitXLForm(19, bo, bi, (bh & 0x3), 528);
230 void Assembler::bctrl() {
231 // The concept of a conditional call is not existent for upper layers.
232 // Therefore no bcctrl is defined despite being possible.
233 // Only bctrl is defined.
234 BranchParams bp(BranchConditions::Always);
235 EmitXLForm(19, bp.bo(), bp.bi(), (0 /*bh*/ & 0x3), 528, 1);
238 void Assembler::blr() {
239 // The concept of a conditional return is not existent for upper layers.
240 // Therefore no bclr is defined despite being possible.
241 // Only blr is defined.
242 BranchParams bp(BranchConditions::Always);
243 EmitXLForm(19, bp.bo(), bp.bi(), (0 /*bh*/ & 0x3), 16, 0);
246 void Assembler::cmp(uint16_t bf, bool l, const Reg64& ra, const Reg64& rb) {
247 EmitXForm(31, rn((bf << 2) | (uint16_t)l), rn(ra), rn(rb), 0);
250 void Assembler::cmpi(uint16_t bf, bool l, const Reg64& ra, Immed imm) {
251 assert(imm.fits(HPHP::sz::word) && "Immediate is too big");
252 EmitDForm(11, rn((bf << 2) | (uint16_t)l), rn(ra), imm.w());
255 void Assembler::cmpb(const Reg64& rs, const Reg64& ra, const Reg64& rb) {
256 EmitXForm(31, rn(rs), rn(ra), rn(rb), 508);
259 void Assembler::cmpl(uint16_t bf, bool l, const Reg64& ra, const Reg64& rb) {
260 EmitXForm(31, rn((bf << 2) | (uint16_t)l), rn(ra), rn(rb), 32);
263 void Assembler::cmpli(uint16_t bf, bool l, const Reg64& ra, Immed imm) {
264 assert(imm.fits(HPHP::sz::word) && "Immediate is too big");
265 EmitDForm(10, rn((bf << 2) | (uint16_t)l), rn(ra), imm.w());
268 void Assembler::extsb(const Reg64& ra, const Reg64& rs, bool rc) {
269 EmitXForm(31, rn(rs), rn(ra), rn(0), 954, rc);
272 void Assembler::extsh(const Reg64& ra, const Reg64& rs, bool rc) {
273 EmitXForm(31, rn(rs), rn(ra), rn(0), 922, rc);
276 void Assembler::extsw(const Reg64& ra, const Reg64& rs, bool rc) {
277 EmitXForm(31, rn(rs), rn(ra), rn(0), 986, rc);
280 void Assembler::isel(const Reg64& rt, const Reg64& ra, const Reg64& rb,
281 uint8_t bc) {
282 EmitAForm(31, rn(rt), rn(ra), rn(rb), rn(bc), 15);
285 void Assembler::lbz(const Reg64& rt, MemoryRef m) {
286 assertx(Reg64(-1) == m.r.index); // doesn't support base+index
287 EmitDForm(34, rn(rt), rn(m.r.base), m.r.disp);
290 void Assembler::lbzx(const Reg64& rt, MemoryRef m) {
291 assertx(!m.r.disp); // doesn't support immediate displacement
292 EmitXForm(31, rn(rt), rn(m.r.base), rn(m.r.index), 87);
295 void Assembler::ld(const Reg64& rt, MemoryRef m) {
296 assertx(Reg64(-1) == m.r.index); // doesn't support base+index
297 EmitDSForm(58, rn(rt), rn(m.r.base), m.r.disp, 0);
300 void Assembler::ldx(const Reg64& rt, MemoryRef m) {
301 assertx(!m.r.disp); // doesn't support immediate displacement
302 EmitXForm(31, rn(rt), rn(m.r.base), rn(m.r.index), 21);
305 void Assembler::lhz(const Reg64& rt, MemoryRef m) {
306 assertx(Reg64(-1) == m.r.index); // doesn't support base+index
307 EmitDForm(40, rn(rt), rn(m.r.base), m.r.disp);
310 void Assembler::lhzx(const Reg64& rt, MemoryRef m) {
311 assertx(!m.r.disp); // doesn't support immediate displacement
312 EmitXForm(31, rn(rt), rn(m.r.base), rn(m.r.index), 279);
315 void Assembler::lwz(const Reg64& rt, MemoryRef m) {
316 assertx(Reg64(-1) == m.r.index); // doesn't support base+index
317 EmitDForm(32, rn(rt), rn(m.r.base), m.r.disp);
320 void Assembler::lwzx(const Reg64& rt, MemoryRef m) {
321 assertx(!m.r.disp); // doesn't support immediate displacement
322 EmitXForm(31, rn(rt), rn(m.r.base), rn(m.r.index), 23);
325 void Assembler::mfspr(const SpecialReg spr, const Reg64& rs) {
326 EmitXFXForm(31, rn(rs), spr, 339);
329 void Assembler::mtspr(const SpecialReg spr, const Reg64& rs) {
330 EmitXFXForm(31, rn(rs), spr, 467);
333 void Assembler::nor(const Reg64& ra, const Reg64& rs, const Reg64& rb,
334 bool rc) {
335 EmitXForm(31, rn(rs), rn(ra), rn(rb), 124, rc);
338 void Assembler::or(const Reg64& ra, const Reg64& rs, const Reg64& rb,
339 bool rc) {
340 EmitXForm(31, rn(rs), rn(ra), rn(rb), 444, rc);
343 void Assembler::ori(const Reg64& ra, const Reg64& rs, Immed imm) {
344 assert(imm.fits(HPHP::sz::word) && "Immediate is too big");
345 EmitDForm(24, rn(rs), rn(ra), imm.w());
348 void Assembler::oris(const Reg64& ra, const Reg64& rs, Immed imm) {
349 assert(imm.fits(HPHP::sz::word) && "Immediate is too big");
350 EmitDForm(25, rn(rs), rn(ra), imm.w());
353 void Assembler::rldicl(const Reg64& ra, const Reg64& rs, uint8_t sh,
354 uint8_t mb, bool rc) {
355 EmitMDForm(30, rn(rs), rn(ra), sh, mb, 0, rc);
358 void Assembler::rldicr(const Reg64& ra, const Reg64& rs, uint8_t sh,
359 uint8_t mb, bool rc) {
360 EmitMDForm(30, rn(rs), rn(ra), sh, mb, 1, rc);
363 void Assembler::rlwinm(const Reg64& ra, const Reg64& rs, uint8_t sh, uint8_t mb,
364 uint16_t me, bool rc) {
365 EmitMForm(21, rn(rs), rn(ra), rn(sh), mb, me, rc);
368 void Assembler::sld(const Reg64& ra, const Reg64& rs, const Reg64& rb,
369 bool rc) {
370 EmitXForm(31, rn(rs), rn(ra), rn(rb), 27, rc);
373 void Assembler::srad(const Reg64& ra, const Reg64& rs, const Reg64& rb,
374 bool rc) {
375 EmitXForm(31, rn(rs), rn(ra), rn(rb), 794, rc);
378 void Assembler::sradi(const Reg64& ra, const Reg64& rs, uint8_t sh, bool rc) {
379 EmitXSForm(31, rn(rs), rn(ra), sh, 413, rc);
382 void Assembler::stb(const Reg64& rt, MemoryRef m) {
383 assertx(Reg64(-1) == m.r.index); // doesn't support base+index
384 EmitDForm(38, rn(rt), rn(m.r.base), m.r.disp);
387 void Assembler::stbx(const Reg64& rt, MemoryRef m) {
388 assertx(!m.r.disp); // doesn't support immediate displacement
389 EmitXForm(31, rn(rt), rn(m.r.base), rn(m.r.index), 215);
392 void Assembler::sth(const Reg64& rt, MemoryRef m) {
393 assertx(Reg64(-1) == m.r.index); // doesn't support base+index
394 EmitDForm(44, rn(rt), rn(m.r.base), m.r.disp);
397 void Assembler::sthx(const Reg64& rt, MemoryRef m) {
398 assertx(!m.r.disp); // doesn't support immediate displacement
399 EmitXForm(31, rn(rt), rn(m.r.base), rn(m.r.index), 407);
402 void Assembler::stw(const Reg64& rt, MemoryRef m) {
403 assertx(Reg64(-1) == m.r.index); // doesn't support base+index
404 EmitDForm(36, rn(rt), rn(m.r.base), m.r.disp);
407 void Assembler::stwx(const Reg64& rt, MemoryRef m) {
408 assertx(!m.r.disp); // doesn't support immediate displacement
409 EmitXForm(31, rn(rt), rn(m.r.base), rn(m.r.index), 151);
412 void Assembler::std(const Reg64& rt, MemoryRef m) {
413 assertx(Reg64(-1) == m.r.index); // doesn't support base+index
414 EmitDSForm(62, rn(rt), rn(m.r.base), m.r.disp, 0);
417 void Assembler::stdu(const Reg64& rt, MemoryRef m) {
418 assertx(Reg64(-1) == m.r.index); // doesn't support base+index
419 EmitDSForm(62, rn(rt), rn(m.r.base), m.r.disp, 1);
422 void Assembler::stdx(const Reg64& rt, MemoryRef m) {
423 assertx(!m.r.disp); // doesn't support immediate displacement
424 EmitXForm(31, rn(rt), rn(m.r.base), rn(m.r.index), 149);
427 void Assembler::td(uint16_t to, const Reg64& ra, const Reg64& rb) {
428 EmitXForm(31, rn(to), rn(ra), rn(rb), 68);
431 void Assembler::tw(uint16_t to, const Reg64& ra, const Reg64& rb) {
432 EmitXForm(31, rn(to), rn(ra), rn(rb), 4);
435 void Assembler::xor(const Reg64& ra, const Reg64& rs, const Reg64& rb,
436 bool rc) {
437 EmitXForm(31, rn(rs), rn(ra), rn(rb), 316, rc);
440 /* Floating point operations */
441 void Assembler::fadd(const RegXMM& frt, const RegXMM& fra, const RegXMM& frb,
442 bool rc) {
443 EmitAForm(63, rn(frt), rn(fra), rn(frb), rn(0), 21, rc);
446 void Assembler::fsub(const RegXMM& frt, const RegXMM& fra, const RegXMM& frb,
447 bool rc) {
448 EmitAForm(63, rn(frt), rn(fra), rn(frb), rn(0), 20, rc);
451 void Assembler::fmul(const RegXMM& frt, const RegXMM& fra, const RegXMM& frc,
452 bool rc) {
453 EmitAForm(63, rn(frt), rn(fra), rn(0), rn(frc), 25, rc);
456 void Assembler::fdiv(const RegXMM& frt, const RegXMM& fra, const RegXMM& frb,
457 bool rc) {
458 EmitAForm(63, rn(frt), rn(fra), rn(frb), rn(0), 18, rc);
461 void Assembler::unimplemented(){
462 //Emit a instruction with invalid opcode 0x0
463 EmitDForm(0, rn(0), rn(0), 0);
466 //////////////////////////////////////////////////////////////////////
468 void Assembler::patchAbsolute(CodeAddress jmp, CodeAddress dest) {
469 // Initialize code block cb pointing to li64
470 HPHP::CodeBlock cb;
471 cb.init(jmp, Assembler::kLimmLen, "patched bctr");
472 Assembler a{ cb };
473 a.limmediate(reg::r12, ssize_t(dest),
474 #ifdef USE_TOC_ON_BRANCH
475 ImmType::TocOnly
476 #else
477 ImmType::AnyFixed
478 #endif
482 void Assembler::patchBranch(CodeAddress jmp, CodeAddress dest) {
483 auto di = DecodedInstruction(jmp);
485 // Detect Far branch
486 if (di.isFarBranch()) {
487 patchAbsolute(jmp, dest);
488 return;
491 // Regular patch for branch by offset type
492 if (!di.setNearBranchTarget(dest))
493 assert(false && "Can't patch a branch with such a big offset");
496 //////////////////////////////////////////////////////////////////////
498 void Assembler::li64 (const Reg64& rt, int64_t imm64, bool fixedSize) {
499 // li64 always emits 5 instructions i.e. 20 bytes of instructions.
500 // Assumes that 0 bytes will be missing in the end.
501 uint8_t missing = 0;
503 // for assert purposes
504 DEBUG_ONLY CodeAddress li64StartPos = frontier();
506 if (HPHP::jit::deltaFits(imm64, HPHP::sz::word)) {
507 // immediate has only low 16 bits set, use simple load immediate
508 li(rt, static_cast<int16_t>(imm64));
509 if (imm64 & (1ULL << 15) && !(imm64 & (1ULL << 16))) {
510 // clear extended sign that should not be set
511 // (32bits number. Sets the 16th bit but not the 17th, it's not negative!)
512 clrldi(rt, rt, 48);
513 missing = kLi64Len - 2 * instr_size_in_bytes;
514 } else {
515 missing = kLi64Len - 1 * instr_size_in_bytes;
517 } else if (HPHP::jit::deltaFits(imm64, HPHP::sz::dword)) {
518 // immediate has only low 32 bits set
519 lis(rt, static_cast<int16_t>(imm64 >> 16));
520 ori(rt, rt, static_cast<int16_t>(imm64 & UINT16_MAX));
521 if (imm64 & (1ULL << 31) && !(imm64 & (1ULL << 32))) {
522 // clear extended sign
523 // (64bits number. Sets the 32th bit but not the 33th, it's not negative!)
524 clrldi(rt, rt, 32);
525 missing = kLi64Len - 3 * instr_size_in_bytes;
526 } else {
527 missing = kLi64Len - 2 * instr_size_in_bytes;
529 } else if (imm64 >> 48 == 0) {
530 // immediate has only low 48 bits set
531 lis(rt, static_cast<int16_t>(imm64 >> 32));
532 ori(rt, rt, static_cast<int16_t>((imm64 >> 16) & UINT16_MAX));
533 sldi(rt,rt,16);
534 ori(rt, rt, static_cast<int16_t>(imm64 & UINT16_MAX));
535 if (imm64 & (1ULL << 47)) {
536 // clear extended sign
537 clrldi(rt, rt, 16);
538 } else {
539 missing = kLi64Len - 4 * instr_size_in_bytes;
541 } else {
542 // load all 64 bits
543 lis(rt, static_cast<int16_t>(imm64 >> 48));
544 ori(rt, rt, static_cast<int16_t>((imm64 >> 32) & UINT16_MAX));
545 sldi(rt,rt,32);
546 oris(rt, rt, static_cast<int16_t>((imm64 >> 16) & UINT16_MAX));
547 ori(rt, rt, static_cast<int16_t>(imm64 & UINT16_MAX));
550 if (fixedSize) {
551 emitNop(missing);
552 // guarantee our math with kLi64Len is working
553 assert(kLi64Len == frontier() - li64StartPos);
557 void Assembler::li32 (const Reg64& rt, int32_t imm32) {
559 if (HPHP::jit::deltaFits(imm32, HPHP::sz::word)) {
560 // immediate has only low 16 bits set, use simple load immediate
561 li(rt, static_cast<int16_t>(imm32));
562 if (imm32 & (1ULL << 15) && !(imm32 & (1ULL << 16))) {
563 // clear extended sign that should not be set
564 // (32bits number. Sets the 16th bit but not the 17th, it's not negative!)
565 clrldi(rt, rt, 48);
566 } else {
567 emitNop(instr_size_in_bytes); // emit nop for a balanced li32 with 2 instr
569 } else {
570 // immediate has 32 bits set
571 lis(rt, static_cast<int16_t>(imm32 >> 16));
572 ori(rt, rt, static_cast<int16_t>(imm32 & UINT16_MAX));
576 void Assembler::limmediate(const Reg64& rt, int64_t imm64, ImmType immt) {
577 always_assert(HPHP::RuntimeOption::EvalPPC64MinTOCImmSize >= 0 &&
578 HPHP::RuntimeOption::EvalPPC64MinTOCImmSize <= 64);
580 auto fits = [](int64_t imm, uint16_t shift_n) {
581 return (static_cast<uint64_t>(imm) >> shift_n) == 0 ? true : false;
584 if (
585 #ifndef USE_TOC_ON_BRANCH
586 1 ||
587 #endif
588 (fits(imm64, HPHP::RuntimeOption::EvalPPC64MinTOCImmSize)
589 && (immt != ImmType::TocOnly))) {
590 li64(rt, imm64, immt != ImmType::AnyCompact);
591 return;
594 bool fits32 = fits(imm64, 32);
595 int64_t TOCoffset;
596 if (fits32) {
597 TOCoffset = VMTOC::getInstance().pushElem(
598 static_cast<int32_t>(UINT32_MAX & imm64));
599 } else {
600 TOCoffset = VMTOC::getInstance().pushElem(imm64);
603 auto const toc_start = frontier();
604 if (TOCoffset > INT16_MAX) {
605 int16_t complement = 0;
606 // If last four bytes is still bigger than a signed 16bits, uses as two
607 // complement.
608 if ((TOCoffset & UINT16_MAX) > INT16_MAX) complement = 1;
609 addis(rt, reg::r2, static_cast<int16_t>((TOCoffset >> 16) + complement));
610 if (fits32) lwz(rt, rt[TOCoffset & UINT16_MAX]);
611 else ld (rt, rt[TOCoffset & UINT16_MAX]);
612 } else {
613 if (fits32) lwz(rt, reg::r2[TOCoffset]);
614 else ld (rt, reg::r2[TOCoffset]);
616 bool toc_may_grow = HPHP::RuntimeOption::EvalJitRelocationSize != 0;
617 auto const toc_max_size = (immt == ImmType::AnyFixed) ? kLi64Len
618 : ((immt == ImmType::TocOnly) || toc_may_grow) ? kTocLen
619 : 0;
620 if (toc_max_size) {
621 emitNop(toc_max_size - (frontier() - toc_start));
625 //////////////////////////////////////////////////////////////////////
626 // Label
627 //////////////////////////////////////////////////////////////////////
629 Label::~Label() {
630 if (!m_toPatch.empty()) {
631 assert(m_a && m_address && "Label had jumps but was never set");
633 for (auto& ji : m_toPatch) {
634 ji.a->patchBranch(ji.addr, m_address);
638 void Label::branch(Assembler& a, BranchConditions bc, LinkReg lr) {
639 // Only optimize jump if it'll unlikely going to be patched.
640 if (m_address) {
641 // if diff is 0, then this is for sure going to be patched.
642 ssize_t diff = ssize_t(m_address - a.frontier());
643 if (diff) {
644 // check if an unconditional branch with b can be used
645 if (BranchConditions::Always == bc) {
646 // unconditional branch
647 if (HPHP::jit::deltaFitsBits(diff, 26)) {
648 addJump(&a);
649 if (LinkReg::Save == lr) a.bl(diff);
650 else a.b (diff);
651 return;
653 } else {
654 // conditional branch
655 if (HPHP::jit::deltaFits(diff, HPHP::sz::word)) {
656 BranchParams bp(bc);
657 addJump(&a);
658 assert(LinkReg::DoNotTouch == lr &&
659 "Conditional call is NOT supported.");
661 // Special code for overflow handling
662 if (bc == BranchConditions::Overflow ||
663 bc == BranchConditions::NoOverflow) {
664 a.xor(reg::r0, reg::r0, reg::r0,false);
665 a.mtspr(Assembler::SpecialReg::XER, reg::r0);
667 a.bc (bp.bo(), bp.bi(), diff);
668 return;
673 // fallback: use CTR to perform absolute branch up to 64 bits
674 branchFar(a, bc, lr);
677 void Label::branchFar(Assembler& a,
678 BranchConditions bc,
679 LinkReg lr,
680 ImmType immt) {
681 // Marking current address for patchAbsolute
682 addJump(&a);
684 // Use reserved function linkage register
685 const ssize_t address = ssize_t(m_address);
686 a.limmediate(reg::r12, address, immt);
688 // When branching to another context, r12 need to keep the target address
689 // to correctly set r2 (TOC reference).
690 a.mtctr(reg::r12);
692 // Special code for overflow handling
693 bool cond = (BranchConditions::Always != bc);
694 if (bc == BranchConditions::Overflow || bc == BranchConditions::NoOverflow) {
695 a.xor(reg::r0, reg::r0, reg::r0,false);
696 a.mtspr(Assembler::SpecialReg::XER, reg::r0);
697 } else if (cond && immt != ImmType::AnyCompact) {
698 // Unconditional branch (jmp or call) doesn't need this reserve bytes
699 a.emitNop(2 * instr_size_in_bytes);
702 BranchParams bp(bc);
703 if (LinkReg::Save == lr) {
704 // call
705 a.bctrl();
706 } else {
707 // jcc
708 a.bcctr(bp.bo(), bp.bi(), 0);
712 void Label::asm_label(Assembler& a) {
713 assert(!m_address && !m_a && "Label was already set");
714 m_a = &a;
715 m_address = a.frontier();
718 void Label::addJump(Assembler* a) {
719 if (m_address) return;
720 JumpInfo info;
721 info.a = a;
722 info.addr = a->codeBlock.frontier();
723 m_toPatch.push_back(info);
726 } // namespace ppc64_asm