2 +----------------------------------------------------------------------+
4 +----------------------------------------------------------------------+
5 | (c) Copyright IBM Corporation 2015-2016 |
6 +----------------------------------------------------------------------+
7 | This source file is subject to version 3.01 of the PHP license, |
8 | that is bundled with this package in the file LICENSE, and is |
9 | available through the world-wide-web at the following url: |
10 | http://www.php.net/license/3_01.txt |
11 | If you did not receive a copy of the PHP license and are unable to |
12 | obtain it through the world-wide-web, please send a note to |
13 | license@php.net so we can mail you a copy immediately. |
14 +----------------------------------------------------------------------+
17 #include "hphp/ppc64-asm/asm-ppc64.h"
18 #include "hphp/ppc64-asm/decoded-instr-ppc64.h"
19 #include "hphp/ppc64-asm/decoder-ppc64.h"
20 #include "hphp/runtime/base/runtime-option.h"
21 #include "hphp/util/trace.h"
22 #include <folly/MicroSpinLock.h>
24 #include <type_traits>
26 TRACE_SET_MOD(asmppc64
);
30 // Lock to protect TOC when writing.
31 static folly::MicroSpinLock s_TOC
;
33 //////////////////////////////////////////////////////////////////////
36 FTRACE(1, "Number of values if 64bits stored in TOC: {}\n",
37 std::to_string(m_last_elem_pos
));
40 int64_t VMTOC::pushElem(int64_t elem
, bool elemMayChange
) {
43 offset
= allocTOC(elem
);
46 auto& map_elem
= m_map
[elem
];
47 if (map_elem
) return map_elem
;
48 offset
= allocTOC(elem
);
56 VMTOC
& VMTOC::getInstance() {
57 static VMTOC instance
;
61 intptr_t VMTOC::getPtrVector() {
62 always_assert(m_tocvector
!= nullptr);
63 return reinterpret_cast<intptr_t>(m_tocvector
->base() + INT16_MAX
+ 1);
66 int64_t VMTOC::getValue(int64_t index
, bool qword
) {
67 HPHP::Address addr
= reinterpret_cast<HPHP::Address
>(
68 static_cast<intptr_t>(index
) + getPtrVector());
70 int max_elem
= qword
? 8 : 4;
71 for (int i
= max_elem
-1; i
>= 0; i
--) {
72 ret_val
= addr
[i
] + (ret_val
<< 8);
77 uint64_t* VMTOC::getAddr(int64_t index
) {
78 return reinterpret_cast<uint64_t*>(
79 static_cast<intptr_t>(index
) + getPtrVector());
82 int64_t VMTOC::allocTOC(int64_t target
) {
83 folly::MSLGuard g
{s_TOC
};
84 HPHP::Address addr
= m_tocvector
->frontier();
85 m_tocvector
->qword(target
);
86 return addr
- (m_tocvector
->base() + INT16_MAX
+ 1);
89 void VMTOC::setTOCDataBlock(HPHP::DataBlock
*db
) {
90 if(m_tocvector
== nullptr) {
92 HPHP::Address addr
= m_tocvector
->frontier();
98 void VMTOC::forceAlignment(HPHP::Address
& addr
) {
99 folly::MSLGuard g
{s_TOC
};
100 // keep 8-byte alignment
101 while (reinterpret_cast<uintptr_t>(addr
) % 8 != 0) {
102 uint8_t fill_byte
= 0xf0;
103 m_tocvector
->assertCanEmit(sizeof(uint8_t));
104 m_tocvector
->byte(fill_byte
);
105 addr
= m_tocvector
->frontier();
109 int64_t VMTOC::getIndex(uint64_t elem
) {
110 auto pos
= m_map
.find(elem
);
111 if (pos
!= m_map
.end()) {
117 void BranchParams::decodeInstr(const PPC64Instr
* const pinstr
) {
118 const DecoderInfo dinfo
= Decoder::GetDecoder().decode(pinstr
);
119 switch (dinfo
.opcode_name()) {
120 case OpcodeNames::op_b
:
121 case OpcodeNames::op_bl
:
122 assert(dinfo
.form() == Form::kI
);
123 defineBoBi(BranchConditions::Always
);
125 case OpcodeNames::op_bc
:
126 assert(dinfo
.form() == Form::kB
);
128 bform
.instruction
= dinfo
.instruction_image();
129 m_bo
= BranchParams::BO(bform
.BO
);
130 m_bi
= BranchParams::BI(bform
.BI
);
132 case OpcodeNames::op_bcctr
:
133 case OpcodeNames::op_bcctrl
:
134 assert(dinfo
.form() == Form::kXL
);
136 xlform
.instruction
= dinfo
.instruction_image();
137 m_bo
= BranchParams::BO(xlform
.BT
);
138 m_bi
= BranchParams::BI(xlform
.BA
);
141 assert(false && "Not a valid conditional branch instruction");
142 // also possible: defineBoBi(BranchConditions::Always);
146 // Set m_lr accordingly for all 'call' flavors used
147 switch (dinfo
.opcode_name()) {
148 case OpcodeNames::op_bl
:
149 case OpcodeNames::op_bcctrl
:
159 * Macro definition for EmitXOForm functions
161 * X(name, arg3, oe, xop)
162 * name: function name
163 * arg3: ARG if needed, otherwise NONE to skip
164 * oe: parameter value
165 * xop: parameter value
169 X(add, ARG, 0, 266) \
170 X(addo, ARG, 1, 266) \
171 X(divd, ARG, 0, 489) \
172 X(mulldo, ARG, 1, 233) \
173 X(neg, NONE, 0, 104) \
174 X(subf, ARG, 0, 40) \
175 X(subfo, ARG, 1, 40) \
177 /* Function header: XO1 */
178 #define HEADER_ARG const Reg64& rb,
181 #define XO1(name, arg3, oe, xop) \
182 void Assembler::name(const Reg64& rt, const Reg64& ra, arg3 bool rc) {
184 /* Function body: XO2 */
188 #define XO2(name, arg3, oe, xop) \
189 EmitXOForm(31, rn(rt), rn(ra), rn(arg3), oe, xop, rc); \
192 /* Macro expansion for function parts */
193 #define X(name, arg3, oe, xop) \
194 XO1(name, HEADER_##arg3, oe, xop) \
195 XO2(name, BODY_##arg3, oe, xop)
207 void Assembler::addi(const Reg64
& rt
, const Reg64
& ra
, Immed imm
) {
208 assert(imm
.fits(HPHP::sz::word
) && "Immediate is too big");
209 EmitDForm(14, rn(rt
), rn(ra
), imm
.w());
212 void Assembler::addis(const Reg64
& rt
, const Reg64
& ra
, Immed imm
) {
213 assert(imm
.fits(HPHP::sz::word
) && "Immediate is too big");
214 EmitDForm(15, rn(rt
), rn(ra
), imm
.w());
217 void Assembler::and(const Reg64
& ra
, const Reg64
& rs
, const Reg64
& rb
,
219 EmitXForm(31, rn(rs
), rn(ra
), rn(rb
), 28, rc
);
222 void Assembler::andi(const Reg64
& ra
, const Reg64
& rs
, Immed imm
) {
223 assert(imm
.fits(HPHP::sz::word
) && "Immediate is too big");
224 EmitDForm(28, rn(rs
), rn(ra
), imm
.w());
227 void Assembler::b(int32_t offset
) {
228 EmitIForm(18, uint32_t(offset
));
231 void Assembler::bl(int32_t offset
) {
232 EmitIForm(18, uint32_t(offset
), 0, 1);
235 void Assembler::bc(uint8_t bo
, uint8_t bi
, int16_t offset
) {
236 EmitBForm(16, bo
, bi
, uint32_t(offset
), 0, 0);
239 void Assembler::bcctr(uint8_t bo
, uint8_t bi
, uint16_t bh
) {
240 EmitXLForm(19, bo
, bi
, (bh
& 0x3), 528);
243 void Assembler::bctrl() {
244 // The concept of a conditional call is not existent for upper layers.
245 // Therefore no bcctrl is defined despite being possible.
246 // Only bctrl is defined.
247 BranchParams
bp(BranchConditions::Always
);
248 EmitXLForm(19, bp
.bo(), bp
.bi(), (0 /*bh*/ & 0x3), 528, 1);
251 void Assembler::blr() {
252 // The concept of a conditional return is not existent for upper layers.
253 // Therefore no bclr is defined despite being possible.
254 // Only blr is defined.
255 BranchParams
bp(BranchConditions::Always
);
256 EmitXLForm(19, bp
.bo(), bp
.bi(), (0 /*bh*/ & 0x3), 16, 0);
259 void Assembler::cmp(uint16_t bf
, bool l
, const Reg64
& ra
, const Reg64
& rb
) {
260 EmitXForm(31, rn((bf
<< 2) | (uint16_t)l
), rn(ra
), rn(rb
), 0);
263 void Assembler::cmpi(uint16_t bf
, bool l
, const Reg64
& ra
, Immed imm
) {
264 assert(imm
.fits(HPHP::sz::word
) && "Immediate is too big");
265 EmitDForm(11, rn((bf
<< 2) | (uint16_t)l
), rn(ra
), imm
.w());
268 void Assembler::cmpb(const Reg64
& rs
, const Reg64
& ra
, const Reg64
& rb
) {
269 EmitXForm(31, rn(rs
), rn(ra
), rn(rb
), 508);
272 void Assembler::cmpl(uint16_t bf
, bool l
, const Reg64
& ra
, const Reg64
& rb
) {
273 EmitXForm(31, rn((bf
<< 2) | (uint16_t)l
), rn(ra
), rn(rb
), 32);
276 void Assembler::cmpli(uint16_t bf
, bool l
, const Reg64
& ra
, Immed imm
) {
277 assert(imm
.fits(HPHP::sz::word
) && "Immediate is too big");
278 EmitDForm(10, rn((bf
<< 2) | (uint16_t)l
), rn(ra
), imm
.w());
281 void Assembler::extsb(const Reg64
& ra
, const Reg64
& rs
, bool rc
) {
282 EmitXForm(31, rn(rs
), rn(ra
), rn(0), 954, rc
);
285 void Assembler::extsh(const Reg64
& ra
, const Reg64
& rs
, bool rc
) {
286 EmitXForm(31, rn(rs
), rn(ra
), rn(0), 922, rc
);
289 void Assembler::extsw(const Reg64
& ra
, const Reg64
& rs
, bool rc
) {
290 EmitXForm(31, rn(rs
), rn(ra
), rn(0), 986, rc
);
293 void Assembler::isel(const Reg64
& rt
, const Reg64
& ra
, const Reg64
& rb
,
295 EmitAForm(31, rn(rt
), rn(ra
), rn(rb
), rn(bc
), 15);
298 void Assembler::lbz(const Reg64
& rt
, MemoryRef m
) {
299 assertx(Reg64(-1) == m
.r
.index
); // doesn't support base+index
300 EmitDForm(34, rn(rt
), rn(m
.r
.base
), m
.r
.disp
);
303 void Assembler::lbzx(const Reg64
& rt
, MemoryRef m
) {
304 assertx(!m
.r
.disp
); // doesn't support immediate displacement
305 EmitXForm(31, rn(rt
), rn(m
.r
.base
), rn(m
.r
.index
), 87);
308 void Assembler::ld(const Reg64
& rt
, MemoryRef m
) {
309 assertx(Reg64(-1) == m
.r
.index
); // doesn't support base+index
310 EmitDSForm(58, rn(rt
), rn(m
.r
.base
), m
.r
.disp
, 0);
313 void Assembler::ldx(const Reg64
& rt
, MemoryRef m
) {
314 assertx(!m
.r
.disp
); // doesn't support immediate displacement
315 EmitXForm(31, rn(rt
), rn(m
.r
.base
), rn(m
.r
.index
), 21);
318 void Assembler::lhz(const Reg64
& rt
, MemoryRef m
) {
319 assertx(Reg64(-1) == m
.r
.index
); // doesn't support base+index
320 EmitDForm(40, rn(rt
), rn(m
.r
.base
), m
.r
.disp
);
323 void Assembler::lhzx(const Reg64
& rt
, MemoryRef m
) {
324 assertx(!m
.r
.disp
); // doesn't support immediate displacement
325 EmitXForm(31, rn(rt
), rn(m
.r
.base
), rn(m
.r
.index
), 279);
328 void Assembler::lwz(const Reg64
& rt
, MemoryRef m
) {
329 assertx(Reg64(-1) == m
.r
.index
); // doesn't support base+index
330 EmitDForm(32, rn(rt
), rn(m
.r
.base
), m
.r
.disp
);
333 void Assembler::lwzx(const Reg64
& rt
, MemoryRef m
) {
334 assertx(!m
.r
.disp
); // doesn't support immediate displacement
335 EmitXForm(31, rn(rt
), rn(m
.r
.base
), rn(m
.r
.index
), 23);
338 void Assembler::mfspr(const SpecialReg spr
, const Reg64
& rs
) {
339 EmitXFXForm(31, rn(rs
), spr
, 339);
342 void Assembler::mtspr(const SpecialReg spr
, const Reg64
& rs
) {
343 EmitXFXForm(31, rn(rs
), spr
, 467);
346 void Assembler::nor(const Reg64
& ra
, const Reg64
& rs
, const Reg64
& rb
,
348 EmitXForm(31, rn(rs
), rn(ra
), rn(rb
), 124, rc
);
351 void Assembler::or(const Reg64
& ra
, const Reg64
& rs
, const Reg64
& rb
,
353 EmitXForm(31, rn(rs
), rn(ra
), rn(rb
), 444, rc
);
356 void Assembler::ori(const Reg64
& ra
, const Reg64
& rs
, Immed imm
) {
357 assert(imm
.fits(HPHP::sz::word
) && "Immediate is too big");
358 EmitDForm(24, rn(rs
), rn(ra
), imm
.w());
361 void Assembler::oris(const Reg64
& ra
, const Reg64
& rs
, Immed imm
) {
362 assert(imm
.fits(HPHP::sz::word
) && "Immediate is too big");
363 EmitDForm(25, rn(rs
), rn(ra
), imm
.w());
366 void Assembler::rldicl(const Reg64
& ra
, const Reg64
& rs
, uint8_t sh
,
367 uint8_t mb
, bool rc
) {
368 EmitMDForm(30, rn(rs
), rn(ra
), sh
, mb
, 0, rc
);
371 void Assembler::rldicr(const Reg64
& ra
, const Reg64
& rs
, uint8_t sh
,
372 uint8_t mb
, bool rc
) {
373 EmitMDForm(30, rn(rs
), rn(ra
), sh
, mb
, 1, rc
);
376 void Assembler::rlwinm(const Reg64
& ra
, const Reg64
& rs
, uint8_t sh
, uint8_t mb
,
377 uint16_t me
, bool rc
) {
378 EmitMForm(21, rn(rs
), rn(ra
), rn(sh
), mb
, me
, rc
);
381 void Assembler::sld(const Reg64
& ra
, const Reg64
& rs
, const Reg64
& rb
,
383 EmitXForm(31, rn(rs
), rn(ra
), rn(rb
), 27, rc
);
386 void Assembler::srad(const Reg64
& ra
, const Reg64
& rs
, const Reg64
& rb
,
388 EmitXForm(31, rn(rs
), rn(ra
), rn(rb
), 794, rc
);
391 void Assembler::sradi(const Reg64
& ra
, const Reg64
& rs
, uint8_t sh
, bool rc
) {
392 EmitXSForm(31, rn(rs
), rn(ra
), sh
, 413, rc
);
395 void Assembler::stb(const Reg64
& rt
, MemoryRef m
) {
396 assertx(Reg64(-1) == m
.r
.index
); // doesn't support base+index
397 EmitDForm(38, rn(rt
), rn(m
.r
.base
), m
.r
.disp
);
400 void Assembler::stbx(const Reg64
& rt
, MemoryRef m
) {
401 assertx(!m
.r
.disp
); // doesn't support immediate displacement
402 EmitXForm(31, rn(rt
), rn(m
.r
.base
), rn(m
.r
.index
), 215);
405 void Assembler::sth(const Reg64
& rt
, MemoryRef m
) {
406 assertx(Reg64(-1) == m
.r
.index
); // doesn't support base+index
407 EmitDForm(44, rn(rt
), rn(m
.r
.base
), m
.r
.disp
);
410 void Assembler::sthx(const Reg64
& rt
, MemoryRef m
) {
411 assertx(!m
.r
.disp
); // doesn't support immediate displacement
412 EmitXForm(31, rn(rt
), rn(m
.r
.base
), rn(m
.r
.index
), 407);
415 void Assembler::stw(const Reg64
& rt
, MemoryRef m
) {
416 assertx(Reg64(-1) == m
.r
.index
); // doesn't support base+index
417 EmitDForm(36, rn(rt
), rn(m
.r
.base
), m
.r
.disp
);
420 void Assembler::stwx(const Reg64
& rt
, MemoryRef m
) {
421 assertx(!m
.r
.disp
); // doesn't support immediate displacement
422 EmitXForm(31, rn(rt
), rn(m
.r
.base
), rn(m
.r
.index
), 151);
425 void Assembler::std(const Reg64
& rt
, MemoryRef m
) {
426 assertx(Reg64(-1) == m
.r
.index
); // doesn't support base+index
427 EmitDSForm(62, rn(rt
), rn(m
.r
.base
), m
.r
.disp
, 0);
430 void Assembler::stdu(const Reg64
& rt
, MemoryRef m
) {
431 assertx(Reg64(-1) == m
.r
.index
); // doesn't support base+index
432 EmitDSForm(62, rn(rt
), rn(m
.r
.base
), m
.r
.disp
, 1);
435 void Assembler::stdx(const Reg64
& rt
, MemoryRef m
) {
436 assertx(!m
.r
.disp
); // doesn't support immediate displacement
437 EmitXForm(31, rn(rt
), rn(m
.r
.base
), rn(m
.r
.index
), 149);
440 void Assembler::td(uint16_t to
, const Reg64
& ra
, const Reg64
& rb
) {
441 EmitXForm(31, rn(to
), rn(ra
), rn(rb
), 68);
444 void Assembler::tw(uint16_t to
, const Reg64
& ra
, const Reg64
& rb
) {
445 EmitXForm(31, rn(to
), rn(ra
), rn(rb
), 4);
448 void Assembler::xor(const Reg64
& ra
, const Reg64
& rs
, const Reg64
& rb
,
450 EmitXForm(31, rn(rs
), rn(ra
), rn(rb
), 316, rc
);
453 /* Floating point operations */
454 void Assembler::fadd(const RegXMM
& frt
, const RegXMM
& fra
, const RegXMM
& frb
,
456 EmitAForm(63, rn(frt
), rn(fra
), rn(frb
), rn(0), 21, rc
);
459 void Assembler::fsub(const RegXMM
& frt
, const RegXMM
& fra
, const RegXMM
& frb
,
461 EmitAForm(63, rn(frt
), rn(fra
), rn(frb
), rn(0), 20, rc
);
464 void Assembler::fmul(const RegXMM
& frt
, const RegXMM
& fra
, const RegXMM
& frc
,
466 EmitAForm(63, rn(frt
), rn(fra
), rn(0), rn(frc
), 25, rc
);
469 void Assembler::fdiv(const RegXMM
& frt
, const RegXMM
& fra
, const RegXMM
& frb
,
471 EmitAForm(63, rn(frt
), rn(fra
), rn(frb
), rn(0), 18, rc
);
474 void Assembler::unimplemented(){
475 //Emit a instruction with invalid opcode 0x0
476 EmitDForm(0, rn(0), rn(0), 0);
479 //////////////////////////////////////////////////////////////////////
481 void Assembler::patchAbsolute(CodeAddress jmp
, CodeAddress dest
) {
482 // Initialize code block cb pointing to li64
484 cb
.init(jmp
, Assembler::kLimmLen
, "patched bctr");
486 a
.limmediate(reg::r12
, ssize_t(dest
), ImmType::TocOnly
, true);
489 void Assembler::patchBranch(CodeAddress jmp
, CodeAddress dest
) {
490 auto di
= DecodedInstruction(jmp
);
493 if (di
.isFarBranch()) {
494 patchAbsolute(jmp
, dest
);
498 // Regular patch for branch by offset type
499 if (!di
.setNearBranchTarget(dest
))
500 assert(false && "Can't patch a branch with such a big offset");
503 //////////////////////////////////////////////////////////////////////
505 void Assembler::li64 (const Reg64
& rt
, int64_t imm64
, bool fixedSize
) {
506 // li64 always emits 5 instructions i.e. 20 bytes of instructions.
507 // Assumes that 0 bytes will be missing in the end.
510 // for assert purposes
511 DEBUG_ONLY CodeAddress li64StartPos
= frontier();
513 if (HPHP::jit::deltaFits(imm64
, HPHP::sz::word
)) {
514 // immediate has only low 16 bits set, use simple load immediate
515 li(rt
, static_cast<int16_t>(imm64
));
516 if (imm64
& (1ULL << 15) && !(imm64
& (1ULL << 16))) {
517 // clear extended sign that should not be set
518 // (32bits number. Sets the 16th bit but not the 17th, it's not negative!)
520 missing
= kLi64Len
- 2 * instr_size_in_bytes
;
522 missing
= kLi64Len
- 1 * instr_size_in_bytes
;
524 } else if (HPHP::jit::deltaFits(imm64
, HPHP::sz::dword
)) {
525 // immediate has only low 32 bits set
526 lis(rt
, static_cast<int16_t>(imm64
>> 16));
527 ori(rt
, rt
, static_cast<int16_t>(imm64
& UINT16_MAX
));
528 if (imm64
& (1ULL << 31) && !(imm64
& (1ULL << 32))) {
529 // clear extended sign
530 // (64bits number. Sets the 32th bit but not the 33th, it's not negative!)
532 missing
= kLi64Len
- 3 * instr_size_in_bytes
;
534 missing
= kLi64Len
- 2 * instr_size_in_bytes
;
536 } else if (imm64
>> 48 == 0) {
537 // immediate has only low 48 bits set
538 lis(rt
, static_cast<int16_t>(imm64
>> 32));
539 ori(rt
, rt
, static_cast<int16_t>((imm64
>> 16) & UINT16_MAX
));
541 ori(rt
, rt
, static_cast<int16_t>(imm64
& UINT16_MAX
));
542 if (imm64
& (1ULL << 47)) {
543 // clear extended sign
546 missing
= kLi64Len
- 4 * instr_size_in_bytes
;
550 lis(rt
, static_cast<int16_t>(imm64
>> 48));
551 ori(rt
, rt
, static_cast<int16_t>((imm64
>> 32) & UINT16_MAX
));
553 oris(rt
, rt
, static_cast<int16_t>((imm64
>> 16) & UINT16_MAX
));
554 ori(rt
, rt
, static_cast<int16_t>(imm64
& UINT16_MAX
));
559 // guarantee our math with kLi64Len is working
560 assert(kLi64Len
== frontier() - li64StartPos
);
564 void Assembler::li32 (const Reg64
& rt
, int32_t imm32
) {
566 if (HPHP::jit::deltaFits(imm32
, HPHP::sz::word
)) {
567 // immediate has only low 16 bits set, use simple load immediate
568 li(rt
, static_cast<int16_t>(imm32
));
569 if (imm32
& (1ULL << 15) && !(imm32
& (1ULL << 16))) {
570 // clear extended sign that should not be set
571 // (32bits number. Sets the 16th bit but not the 17th, it's not negative!)
574 emitNop(instr_size_in_bytes
); // emit nop for a balanced li32 with 2 instr
577 // immediate has 32 bits set
578 lis(rt
, static_cast<int16_t>(imm32
>> 16));
579 ori(rt
, rt
, static_cast<int16_t>(imm32
& UINT16_MAX
));
583 void Assembler::li64TOC(const Reg64
& rt
, int64_t imm64
, ImmType
/*immt*/,
586 TOCoffset
= VMTOC::getInstance().pushElem(imm64
, immMayChange
);
588 if (TOCoffset
> INT16_MAX
) {
589 int16_t complement
= 0;
590 // If last four bytes is still bigger than a signed 16bits, uses as two
592 if ((TOCoffset
& UINT16_MAX
) > INT16_MAX
) complement
= 1;
593 addis(rt
, reg::r2
, static_cast<int16_t>((TOCoffset
>> 16) + complement
));
594 ld (rt
, rt
[TOCoffset
& UINT16_MAX
]);
596 ld (rt
, reg::r2
[TOCoffset
]);
597 emitNop(instr_size_in_bytes
);
601 void Assembler::limmediate(const Reg64
& rt
, int64_t imm64
,
602 ImmType immt
, bool immMayChange
) {
605 decltype(HPHP::RuntimeOption::EvalPPC64MinTOCImmSize
)>::value
,
606 "RuntimeOption::EvalPPC64MinTOCImmSize is expected to be unsigned.");
607 always_assert(HPHP::RuntimeOption::EvalPPC64MinTOCImmSize
<= 64);
609 if (immt
!= ImmType::TocOnly
) li64(rt
, imm64
, immt
!= ImmType::AnyCompact
);
610 else li64TOC (rt
, imm64
, immt
, immMayChange
);
613 //////////////////////////////////////////////////////////////////////
615 //////////////////////////////////////////////////////////////////////
618 if (!m_toPatch
.empty()) {
619 assert(m_a
&& m_address
&& "Label had jumps but was never set");
621 for (auto& ji
: m_toPatch
) {
622 assert(ji
.a
->contains(ji
.addr
));
623 ji
.a
->patchBranch(ji
.a
->toDestAddress(ji
.addr
), m_address
);
627 void Label::branch(Assembler
& a
, BranchConditions bc
,
628 LinkReg lr
, bool addrMayChange
) {
629 // Only optimize jump if it'll unlikely going to be patched.
631 // if diff is 0, then this is for sure going to be patched.
632 ssize_t diff
= ssize_t(m_address
- a
.frontier());
634 // check if an unconditional branch with b can be used
635 if (BranchConditions::Always
== bc
) {
636 // unconditional branch
637 if (HPHP::jit::deltaFitsBits(diff
, 26)) {
639 if (LinkReg::Save
== lr
) a
.bl(diff
);
644 // conditional branch
645 if (HPHP::jit::deltaFits(diff
, HPHP::sz::word
)) {
648 assert(LinkReg::DoNotTouch
== lr
&&
649 "Conditional call is NOT supported.");
651 // Special code for overflow handling
652 if (bc
== BranchConditions::Overflow
||
653 bc
== BranchConditions::NoOverflow
) {
654 a
.xor(reg::r0
, reg::r0
, reg::r0
,false);
655 a
.mtspr(Assembler::SpecialReg::XER
, reg::r0
);
657 a
.bc (bp
.bo(), bp
.bi(), diff
);
663 // fallback: use CTR to perform absolute branch up to 64 bits
664 branchFar(a
, bc
, lr
, ImmType::TocOnly
, addrMayChange
);
667 void Label::branchFar(Assembler
& a
,
672 // Marking current address for patchAbsolute
675 // Use reserved function linkage register
676 const ssize_t address
= ssize_t(m_address
);
677 a
.limmediate(reg::r12
, address
, immt
, immMayChange
);
679 // When branching to another context, r12 need to keep the target address
680 // to correctly set r2 (TOC reference).
683 // Special code for overflow handling
684 bool cond
= (BranchConditions::Always
!= bc
);
685 if (bc
== BranchConditions::Overflow
|| bc
== BranchConditions::NoOverflow
) {
686 a
.xor(reg::r0
, reg::r0
, reg::r0
,false);
687 a
.mtspr(Assembler::SpecialReg::XER
, reg::r0
);
688 } else if (cond
&& immt
!= ImmType::AnyCompact
) {
689 // Unconditional branch (jmp or call) doesn't need this reserve bytes
690 a
.emitNop(2 * instr_size_in_bytes
);
694 if (LinkReg::Save
== lr
) {
699 a
.bcctr(bp
.bo(), bp
.bi(), 0);
703 void Label::asm_label(Assembler
& a
) {
704 assert(!m_address
&& !m_a
&& "Label was already set");
706 m_address
= a
.frontier();
709 void Label::addJump(Assembler
* a
) {
710 if (m_address
) return;
713 info
.addr
= a
->codeBlock
.frontier();
714 m_toPatch
.push_back(info
);
717 } // namespace ppc64_asm