Bug 1852740: add tests for the `fetchpriority` attribute in Link headers. r=necko...
[gecko.git] / js / src / jsapi-tests / testAssemblerBuffer.cpp
blob50fc5b6043fb22a939909caeda35e6afe410125e
1 /* This Source Code Form is subject to the terms of the Mozilla Public
2 * License, v. 2.0. If a copy of the MPL was not distributed with this
3 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
5 #include <stdlib.h>
7 #include "jit/shared/IonAssemblerBufferWithConstantPools.h"
8 #include "jsapi-tests/tests.h"
10 // Tests for classes in:
12 // jit/shared/IonAssemblerBuffer.h
13 // jit/shared/IonAssemblerBufferWithConstantPools.h
15 // Classes in js::jit tested:
17 // BufferOffset
18 // BufferSlice (implicitly)
19 // AssemblerBuffer
21 // BranchDeadlineSet
22 // Pool (implicitly)
23 // AssemblerBufferWithConstantPools
26 BEGIN_TEST(testAssemblerBuffer_BufferOffset) {
27 using js::jit::BufferOffset;
29 BufferOffset off1;
30 BufferOffset off2(10);
32 CHECK(!off1.assigned());
33 CHECK(off2.assigned());
34 CHECK_EQUAL(off2.getOffset(), 10);
35 off1 = off2;
36 CHECK(off1.assigned());
37 CHECK_EQUAL(off1.getOffset(), 10);
39 return true;
41 END_TEST(testAssemblerBuffer_BufferOffset)
43 BEGIN_TEST(testAssemblerBuffer_AssemblerBuffer) {
44 using js::jit::BufferOffset;
45 typedef js::jit::AssemblerBuffer<5 * sizeof(uint32_t), uint32_t> AsmBuf;
47 AsmBuf ab;
48 CHECK(ab.isAligned(16));
49 CHECK_EQUAL(ab.size(), 0u);
50 CHECK_EQUAL(ab.nextOffset().getOffset(), 0);
51 CHECK(!ab.oom());
53 BufferOffset off1 = ab.putInt(1000017);
54 CHECK_EQUAL(off1.getOffset(), 0);
55 CHECK_EQUAL(ab.size(), 4u);
56 CHECK_EQUAL(ab.nextOffset().getOffset(), 4);
57 CHECK(!ab.isAligned(16));
58 CHECK(ab.isAligned(4));
59 CHECK(ab.isAligned(1));
60 CHECK_EQUAL(*ab.getInst(off1), 1000017u);
62 BufferOffset off2 = ab.putInt(1000018);
63 CHECK_EQUAL(off2.getOffset(), 4);
65 BufferOffset off3 = ab.putInt(1000019);
66 CHECK_EQUAL(off3.getOffset(), 8);
68 BufferOffset off4 = ab.putInt(1000020);
69 CHECK_EQUAL(off4.getOffset(), 12);
70 CHECK_EQUAL(ab.size(), 16u);
71 CHECK_EQUAL(ab.nextOffset().getOffset(), 16);
73 // Last one in the slice.
74 BufferOffset off5 = ab.putInt(1000021);
75 CHECK_EQUAL(off5.getOffset(), 16);
76 CHECK_EQUAL(ab.size(), 20u);
77 CHECK_EQUAL(ab.nextOffset().getOffset(), 20);
79 BufferOffset off6 = ab.putInt(1000022);
80 CHECK_EQUAL(off6.getOffset(), 20);
81 CHECK_EQUAL(ab.size(), 24u);
82 CHECK_EQUAL(ab.nextOffset().getOffset(), 24);
84 // Reference previous slice. Excercise the finger.
85 CHECK_EQUAL(*ab.getInst(off1), 1000017u);
86 CHECK_EQUAL(*ab.getInst(off6), 1000022u);
87 CHECK_EQUAL(*ab.getInst(off1), 1000017u);
88 CHECK_EQUAL(*ab.getInst(off5), 1000021u);
90 // Too much data for one slice.
91 const uint32_t fixdata[] = {2000036, 2000037, 2000038,
92 2000039, 2000040, 2000041};
94 // Split payload across multiple slices.
95 CHECK_EQUAL(ab.nextOffset().getOffset(), 24);
96 BufferOffset good1 = ab.putBytesLarge(sizeof(fixdata), fixdata);
97 CHECK_EQUAL(good1.getOffset(), 24);
98 CHECK_EQUAL(ab.nextOffset().getOffset(), 48);
99 CHECK_EQUAL(*ab.getInst(good1), 2000036u);
100 CHECK_EQUAL(*ab.getInst(BufferOffset(32)), 2000038u);
101 CHECK_EQUAL(*ab.getInst(BufferOffset(36)), 2000039u);
102 CHECK_EQUAL(*ab.getInst(BufferOffset(40)), 2000040u);
103 CHECK_EQUAL(*ab.getInst(BufferOffset(44)), 2000041u);
105 return true;
107 END_TEST(testAssemblerBuffer_AssemblerBuffer)
109 BEGIN_TEST(testAssemblerBuffer_BranchDeadlineSet) {
110 typedef js::jit::BranchDeadlineSet<3> DLSet;
111 using js::jit::BufferOffset;
113 js::LifoAlloc alloc(1024);
114 DLSet dls(alloc);
116 CHECK(dls.empty());
117 CHECK(alloc.isEmpty()); // Constructor must be infallible.
118 CHECK_EQUAL(dls.size(), 0u);
119 CHECK_EQUAL(dls.maxRangeSize(), 0u);
121 // Removing non-existant deadline is OK.
122 dls.removeDeadline(1, BufferOffset(7));
124 // Add deadlines in increasing order as intended. This is optimal.
125 dls.addDeadline(1, BufferOffset(10));
126 CHECK(!dls.empty());
127 CHECK_EQUAL(dls.size(), 1u);
128 CHECK_EQUAL(dls.maxRangeSize(), 1u);
129 CHECK_EQUAL(dls.earliestDeadline().getOffset(), 10);
130 CHECK_EQUAL(dls.earliestDeadlineRange(), 1u);
132 // Removing non-existant deadline is OK.
133 dls.removeDeadline(1, BufferOffset(7));
134 dls.removeDeadline(1, BufferOffset(17));
135 dls.removeDeadline(0, BufferOffset(10));
136 CHECK_EQUAL(dls.size(), 1u);
137 CHECK_EQUAL(dls.maxRangeSize(), 1u);
139 // Two identical deadlines for different ranges.
140 dls.addDeadline(2, BufferOffset(10));
141 CHECK(!dls.empty());
142 CHECK_EQUAL(dls.size(), 2u);
143 CHECK_EQUAL(dls.maxRangeSize(), 1u);
144 CHECK_EQUAL(dls.earliestDeadline().getOffset(), 10);
146 // It doesn't matter which range earliestDeadlineRange() reports first,
147 // but it must report both.
148 if (dls.earliestDeadlineRange() == 1) {
149 dls.removeDeadline(1, BufferOffset(10));
150 CHECK_EQUAL(dls.earliestDeadline().getOffset(), 10);
151 CHECK_EQUAL(dls.earliestDeadlineRange(), 2u);
152 } else {
153 CHECK_EQUAL(dls.earliestDeadlineRange(), 2u);
154 dls.removeDeadline(2, BufferOffset(10));
155 CHECK_EQUAL(dls.earliestDeadline().getOffset(), 10);
156 CHECK_EQUAL(dls.earliestDeadlineRange(), 1u);
159 // Add deadline which is the front of range 0, but not the global earliest.
160 dls.addDeadline(0, BufferOffset(20));
161 CHECK_EQUAL(dls.earliestDeadline().getOffset(), 10);
162 CHECK(dls.earliestDeadlineRange() > 0);
164 // Non-optimal add to front of single-entry range 0.
165 dls.addDeadline(0, BufferOffset(15));
166 CHECK_EQUAL(dls.earliestDeadline().getOffset(), 10);
167 CHECK(dls.earliestDeadlineRange() > 0);
169 // Append to 2-entry range 0.
170 dls.addDeadline(0, BufferOffset(30));
171 CHECK_EQUAL(dls.earliestDeadline().getOffset(), 10);
172 CHECK(dls.earliestDeadlineRange() > 0);
174 // Add penultimate entry.
175 dls.addDeadline(0, BufferOffset(25));
176 CHECK_EQUAL(dls.earliestDeadline().getOffset(), 10);
177 CHECK(dls.earliestDeadlineRange() > 0);
179 // Prepend, stealing earliest from other range.
180 dls.addDeadline(0, BufferOffset(5));
181 CHECK_EQUAL(dls.earliestDeadline().getOffset(), 5);
182 CHECK_EQUAL(dls.earliestDeadlineRange(), 0u);
184 // Remove central element.
185 dls.removeDeadline(0, BufferOffset(20));
186 CHECK_EQUAL(dls.earliestDeadline().getOffset(), 5);
187 CHECK_EQUAL(dls.earliestDeadlineRange(), 0u);
189 // Remove front, giving back the lead.
190 dls.removeDeadline(0, BufferOffset(5));
191 CHECK_EQUAL(dls.earliestDeadline().getOffset(), 10);
192 CHECK(dls.earliestDeadlineRange() > 0);
194 // Remove front, giving back earliest to range 0.
195 dls.removeDeadline(dls.earliestDeadlineRange(), BufferOffset(10));
196 CHECK_EQUAL(dls.earliestDeadline().getOffset(), 15);
197 CHECK_EQUAL(dls.earliestDeadlineRange(), 0u);
199 // Remove tail.
200 dls.removeDeadline(0, BufferOffset(30));
201 CHECK_EQUAL(dls.earliestDeadline().getOffset(), 15);
202 CHECK_EQUAL(dls.earliestDeadlineRange(), 0u);
204 // Now range 0 = [15, 25].
205 CHECK_EQUAL(dls.size(), 2u);
206 dls.removeDeadline(0, BufferOffset(25));
207 dls.removeDeadline(0, BufferOffset(15));
208 CHECK(dls.empty());
210 return true;
212 END_TEST(testAssemblerBuffer_BranchDeadlineSet)
214 // Mock Assembler class for testing the AssemblerBufferWithConstantPools
215 // callbacks.
216 namespace {
218 struct TestAssembler;
220 typedef js::jit::AssemblerBufferWithConstantPools<
221 /* SliceSize */ 5 * sizeof(uint32_t),
222 /* InstSize */ 4,
223 /* Inst */ uint32_t,
224 /* Asm */ TestAssembler,
225 /* NumShortBranchRanges */ 3>
226 AsmBufWithPool;
228 struct TestAssembler {
229 // Mock instruction set:
231 // 0x1111xxxx - align filler instructions.
232 // 0x2222xxxx - manually inserted 'arith' instructions.
233 // 0xaaaaxxxx - noop filler instruction.
234 // 0xb0bbxxxx - branch xxxx bytes forward. (Pool guard).
235 // 0xb1bbxxxx - branch xxxx bytes forward. (Short-range branch).
236 // 0xb2bbxxxx - branch xxxx bytes forward. (Veneer branch).
237 // 0xb3bbxxxx - branch xxxx bytes forward. (Patched short-range branch).
238 // 0xc0ccxxxx - constant pool load (uninitialized).
239 // 0xc1ccxxxx - constant pool load to index xxxx.
240 // 0xc2ccxxxx - constant pool load xxxx bytes ahead.
241 // 0xffffxxxx - pool header with xxxx bytes.
243 static const unsigned BranchRange = 36;
245 static void InsertIndexIntoTag(uint8_t* load_, uint32_t index) {
246 uint32_t* load = reinterpret_cast<uint32_t*>(load_);
247 MOZ_ASSERT(*load == 0xc0cc0000,
248 "Expected uninitialized constant pool load");
249 MOZ_ASSERT(index < 0x10000);
250 *load = 0xc1cc0000 + index;
253 static void PatchConstantPoolLoad(void* loadAddr, void* constPoolAddr) {
254 uint32_t* load = reinterpret_cast<uint32_t*>(loadAddr);
255 uint32_t index = *load & 0xffff;
256 MOZ_ASSERT(*load == (0xc1cc0000 | index),
257 "Expected constant pool load(index)");
258 ptrdiff_t offset = reinterpret_cast<uint8_t*>(constPoolAddr) -
259 reinterpret_cast<uint8_t*>(loadAddr);
260 offset += index * 4;
261 MOZ_ASSERT(offset % 4 == 0, "Unaligned constant pool");
262 MOZ_ASSERT(offset > 0 && offset < 0x10000, "Pool out of range");
263 *load = 0xc2cc0000 + offset;
266 static void WritePoolGuard(js::jit::BufferOffset branch, uint32_t* dest,
267 js::jit::BufferOffset afterPool) {
268 MOZ_ASSERT(branch.assigned());
269 MOZ_ASSERT(afterPool.assigned());
270 size_t branchOff = branch.getOffset();
271 size_t afterPoolOff = afterPool.getOffset();
272 MOZ_ASSERT(afterPoolOff > branchOff);
273 uint32_t delta = afterPoolOff - branchOff;
274 *dest = 0xb0bb0000 + delta;
277 static void WritePoolHeader(void* start, js::jit::Pool* p, bool isNatural) {
278 MOZ_ASSERT(!isNatural, "Natural pool guards not implemented.");
279 uint32_t* hdr = reinterpret_cast<uint32_t*>(start);
280 *hdr = 0xffff0000 + p->getPoolSize();
283 static void PatchShortRangeBranchToVeneer(AsmBufWithPool* buffer,
284 unsigned rangeIdx,
285 js::jit::BufferOffset deadline,
286 js::jit::BufferOffset veneer) {
287 size_t branchOff = deadline.getOffset() - BranchRange;
288 size_t veneerOff = veneer.getOffset();
289 uint32_t* branch = buffer->getInst(js::jit::BufferOffset(branchOff));
291 MOZ_ASSERT((*branch & 0xffff0000) == 0xb1bb0000,
292 "Expected short-range branch instruction");
293 // Copy branch offset to veneer. A real instruction set would require
294 // some adjustment of the label linked-list.
295 *buffer->getInst(veneer) = 0xb2bb0000 | (*branch & 0xffff);
296 MOZ_ASSERT(veneerOff > branchOff, "Veneer should follow branch");
297 *branch = 0xb3bb0000 + (veneerOff - branchOff);
300 } // namespace
302 BEGIN_TEST(testAssemblerBuffer_AssemblerBufferWithConstantPools) {
303 using js::jit::BufferOffset;
305 AsmBufWithPool ab(/* guardSize= */ 1,
306 /* headerSize= */ 1,
307 /* instBufferAlign(unused)= */ 0,
308 /* poolMaxOffset= */ 17,
309 /* pcBias= */ 0,
310 /* alignFillInst= */ 0x11110000,
311 /* nopFillInst= */ 0xaaaa0000,
312 /* nopFill= */ 0);
314 CHECK(ab.isAligned(16));
315 CHECK_EQUAL(ab.size(), 0u);
316 CHECK_EQUAL(ab.nextOffset().getOffset(), 0);
317 CHECK(!ab.oom());
319 // Each slice holds 5 instructions. Trigger a constant pool inside the slice.
320 uint32_t poolLoad[] = {0xc0cc0000};
321 uint32_t poolData[] = {0xdddd0000, 0xdddd0001, 0xdddd0002, 0xdddd0003};
322 AsmBufWithPool::PoolEntry pe;
323 BufferOffset load =
324 ab.allocEntry(1, 1, (uint8_t*)poolLoad, (uint8_t*)poolData, &pe);
325 CHECK_EQUAL(pe.index(), 0u);
326 CHECK_EQUAL(load.getOffset(), 0);
328 // Pool hasn't been emitted yet. Load has been patched by
329 // InsertIndexIntoTag.
330 CHECK_EQUAL(*ab.getInst(load), 0xc1cc0000);
332 // Expected layout:
334 // 0: load [pc+16]
335 // 4: 0x22220001
336 // 8: guard branch pc+12
337 // 12: pool header
338 // 16: poolData
339 // 20: 0x22220002
341 ab.putInt(0x22220001);
342 // One could argue that the pool should be flushed here since there is no
343 // more room. However, the current implementation doesn't dump pool until
344 // asked to add data:
345 ab.putInt(0x22220002);
347 CHECK_EQUAL(*ab.getInst(BufferOffset(0)), 0xc2cc0010u);
348 CHECK_EQUAL(*ab.getInst(BufferOffset(4)), 0x22220001u);
349 CHECK_EQUAL(*ab.getInst(BufferOffset(8)), 0xb0bb000cu);
350 CHECK_EQUAL(*ab.getInst(BufferOffset(12)), 0xffff0004u);
351 CHECK_EQUAL(*ab.getInst(BufferOffset(16)), 0xdddd0000u);
352 CHECK_EQUAL(*ab.getInst(BufferOffset(20)), 0x22220002u);
354 // allocEntry() overwrites the load instruction! Restore the original.
355 poolLoad[0] = 0xc0cc0000;
357 // Now try with load and pool data on separate slices.
358 load = ab.allocEntry(1, 1, (uint8_t*)poolLoad, (uint8_t*)poolData, &pe);
359 CHECK_EQUAL(pe.index(), 1u); // Global pool entry index.
360 CHECK_EQUAL(load.getOffset(), 24);
361 CHECK_EQUAL(*ab.getInst(load), 0xc1cc0000); // Index into current pool.
362 ab.putInt(0x22220001);
363 ab.putInt(0x22220002);
364 CHECK_EQUAL(*ab.getInst(BufferOffset(24)), 0xc2cc0010u);
365 CHECK_EQUAL(*ab.getInst(BufferOffset(28)), 0x22220001u);
366 CHECK_EQUAL(*ab.getInst(BufferOffset(32)), 0xb0bb000cu);
367 CHECK_EQUAL(*ab.getInst(BufferOffset(36)), 0xffff0004u);
368 CHECK_EQUAL(*ab.getInst(BufferOffset(40)), 0xdddd0000u);
369 CHECK_EQUAL(*ab.getInst(BufferOffset(44)), 0x22220002u);
371 // Two adjacent loads to the same pool.
372 poolLoad[0] = 0xc0cc0000;
373 load = ab.allocEntry(1, 1, (uint8_t*)poolLoad, (uint8_t*)poolData, &pe);
374 CHECK_EQUAL(pe.index(), 2u); // Global pool entry index.
375 CHECK_EQUAL(load.getOffset(), 48);
376 CHECK_EQUAL(*ab.getInst(load), 0xc1cc0000); // Index into current pool.
378 poolLoad[0] = 0xc0cc0000;
379 load = ab.allocEntry(1, 1, (uint8_t*)poolLoad, (uint8_t*)(poolData + 1), &pe);
380 CHECK_EQUAL(pe.index(), 3u); // Global pool entry index.
381 CHECK_EQUAL(load.getOffset(), 52);
382 CHECK_EQUAL(*ab.getInst(load), 0xc1cc0001); // Index into current pool.
384 ab.putInt(0x22220005);
386 CHECK_EQUAL(*ab.getInst(BufferOffset(48)), 0xc2cc0010u); // load pc+16.
387 CHECK_EQUAL(*ab.getInst(BufferOffset(52)), 0xc2cc0010u); // load pc+16.
388 CHECK_EQUAL(*ab.getInst(BufferOffset(56)),
389 0xb0bb0010u); // guard branch pc+16.
390 CHECK_EQUAL(*ab.getInst(BufferOffset(60)), 0xffff0008u); // header 8 bytes.
391 CHECK_EQUAL(*ab.getInst(BufferOffset(64)), 0xdddd0000u); // datum 1.
392 CHECK_EQUAL(*ab.getInst(BufferOffset(68)), 0xdddd0001u); // datum 2.
393 CHECK_EQUAL(*ab.getInst(BufferOffset(72)),
394 0x22220005u); // putInt(0x22220005)
396 // Two loads as above, but the first load has an 8-byte pool entry, and the
397 // second load wouldn't be able to reach its data. This must produce two
398 // pools.
399 poolLoad[0] = 0xc0cc0000;
400 load = ab.allocEntry(1, 2, (uint8_t*)poolLoad, (uint8_t*)(poolData + 2), &pe);
401 CHECK_EQUAL(pe.index(), 4u); // Global pool entry index.
402 CHECK_EQUAL(load.getOffset(), 76);
403 CHECK_EQUAL(*ab.getInst(load), 0xc1cc0000); // Index into current pool.
405 poolLoad[0] = 0xc0cc0000;
406 load = ab.allocEntry(1, 1, (uint8_t*)poolLoad, (uint8_t*)poolData, &pe);
407 CHECK_EQUAL(pe.index(),
408 6u); // Global pool entry index. (Prev one is two indexes).
409 CHECK_EQUAL(load.getOffset(), 96);
410 CHECK_EQUAL(*ab.getInst(load), 0xc1cc0000); // Index into current pool.
412 CHECK_EQUAL(*ab.getInst(BufferOffset(76)), 0xc2cc000cu); // load pc+12.
413 CHECK_EQUAL(*ab.getInst(BufferOffset(80)),
414 0xb0bb0010u); // guard branch pc+16.
415 CHECK_EQUAL(*ab.getInst(BufferOffset(84)), 0xffff0008u); // header 8 bytes.
416 CHECK_EQUAL(*ab.getInst(BufferOffset(88)), 0xdddd0002u); // datum 1.
417 CHECK_EQUAL(*ab.getInst(BufferOffset(92)), 0xdddd0003u); // datum 2.
419 // Second pool is not flushed yet, and there is room for one instruction
420 // after the load. Test the keep-together feature.
421 ab.enterNoPool(2);
422 ab.putInt(0x22220006);
423 ab.putInt(0x22220007);
424 ab.leaveNoPool();
426 CHECK_EQUAL(*ab.getInst(BufferOffset(96)), 0xc2cc000cu); // load pc+16.
427 CHECK_EQUAL(*ab.getInst(BufferOffset(100)),
428 0xb0bb000cu); // guard branch pc+12.
429 CHECK_EQUAL(*ab.getInst(BufferOffset(104)), 0xffff0004u); // header 4 bytes.
430 CHECK_EQUAL(*ab.getInst(BufferOffset(108)), 0xdddd0000u); // datum 1.
431 CHECK_EQUAL(*ab.getInst(BufferOffset(112)), 0x22220006u);
432 CHECK_EQUAL(*ab.getInst(BufferOffset(116)), 0x22220007u);
434 return true;
436 END_TEST(testAssemblerBuffer_AssemblerBufferWithConstantPools)
438 BEGIN_TEST(testAssemblerBuffer_AssemblerBufferWithConstantPools_ShortBranch) {
439 using js::jit::BufferOffset;
441 AsmBufWithPool ab(/* guardSize= */ 1,
442 /* headerSize= */ 1,
443 /* instBufferAlign(unused)= */ 0,
444 /* poolMaxOffset= */ 17,
445 /* pcBias= */ 0,
446 /* alignFillInst= */ 0x11110000,
447 /* nopFillInst= */ 0xaaaa0000,
448 /* nopFill= */ 0);
450 // Insert short-range branch.
451 BufferOffset br1 = ab.putInt(0xb1bb00cc);
452 ab.registerBranchDeadline(
453 1, BufferOffset(br1.getOffset() + TestAssembler::BranchRange));
454 ab.putInt(0x22220001);
455 BufferOffset off = ab.putInt(0x22220002);
456 ab.registerBranchDeadline(
457 1, BufferOffset(off.getOffset() + TestAssembler::BranchRange));
458 ab.putInt(0x22220003);
459 ab.putInt(0x22220004);
461 // Second short-range branch that will be swiped up by hysteresis.
462 BufferOffset br2 = ab.putInt(0xb1bb0d2d);
463 ab.registerBranchDeadline(
464 1, BufferOffset(br2.getOffset() + TestAssembler::BranchRange));
466 // Branch should not have been patched yet here.
467 CHECK_EQUAL(*ab.getInst(br1), 0xb1bb00cc);
468 CHECK_EQUAL(*ab.getInst(br2), 0xb1bb0d2d);
470 // Cancel one of the pending branches.
471 // This is what will happen to most branches as they are bound before
472 // expiring by Assembler::bind().
473 ab.unregisterBranchDeadline(
474 1, BufferOffset(off.getOffset() + TestAssembler::BranchRange));
476 off = ab.putInt(0x22220006);
477 // Here we may or may not have patched the branch yet, but it is inevitable
478 // now:
480 // 0: br1 pc+36
481 // 4: 0x22220001
482 // 8: 0x22220002 (unpatched)
483 // 12: 0x22220003
484 // 16: 0x22220004
485 // 20: br2 pc+20
486 // 24: 0x22220006
487 CHECK_EQUAL(off.getOffset(), 24);
488 // 28: guard branch pc+16
489 // 32: pool header
490 // 36: veneer1
491 // 40: veneer2
492 // 44: 0x22220007
494 off = ab.putInt(0x22220007);
495 CHECK_EQUAL(off.getOffset(), 44);
497 // Now the branch must have been patched.
498 CHECK_EQUAL(*ab.getInst(br1), 0xb3bb0000 + 36); // br1 pc+36 (patched)
499 CHECK_EQUAL(*ab.getInst(BufferOffset(8)),
500 0x22220002u); // 0x22220002 (unpatched)
501 CHECK_EQUAL(*ab.getInst(br2), 0xb3bb0000 + 20); // br2 pc+20 (patched)
502 CHECK_EQUAL(*ab.getInst(BufferOffset(28)), 0xb0bb0010u); // br pc+16 (guard)
503 CHECK_EQUAL(*ab.getInst(BufferOffset(32)),
504 0xffff0000u); // pool header 0 bytes.
505 CHECK_EQUAL(*ab.getInst(BufferOffset(36)),
506 0xb2bb00ccu); // veneer1 w/ original 'cc' offset.
507 CHECK_EQUAL(*ab.getInst(BufferOffset(40)),
508 0xb2bb0d2du); // veneer2 w/ original 'd2d' offset.
509 CHECK_EQUAL(*ab.getInst(BufferOffset(44)), 0x22220007u);
511 return true;
513 END_TEST(testAssemblerBuffer_AssemblerBufferWithConstantPools_ShortBranch)
515 // Test that everything is put together correctly in the ARM64 assembler.
516 #if defined(JS_CODEGEN_ARM64)
518 # include "jit/MacroAssembler-inl.h"
520 BEGIN_TEST(testAssemblerBuffer_ARM64) {
521 using namespace js::jit;
523 js::LifoAlloc lifo(4096);
524 TempAllocator alloc(&lifo);
525 JitContext jc(cx);
526 StackMacroAssembler masm(cx, alloc);
527 AutoCreatedBy acb(masm, __func__);
529 // Branches to an unbound label.
530 Label lab1;
531 masm.branch(Assembler::Equal, &lab1);
532 masm.branch(Assembler::LessThan, &lab1);
533 masm.bind(&lab1);
534 masm.branch(Assembler::Equal, &lab1);
536 CHECK_EQUAL(masm.getInstructionAt(BufferOffset(0))->InstructionBits(),
537 vixl::B_cond | vixl::Assembler::ImmCondBranch(2) | vixl::eq);
538 CHECK_EQUAL(masm.getInstructionAt(BufferOffset(4))->InstructionBits(),
539 vixl::B_cond | vixl::Assembler::ImmCondBranch(1) | vixl::lt);
540 CHECK_EQUAL(masm.getInstructionAt(BufferOffset(8))->InstructionBits(),
541 vixl::B_cond | vixl::Assembler::ImmCondBranch(0) | vixl::eq);
543 // Branches can reach the label, but the linked list of uses needs to be
544 // rearranged. The final conditional branch cannot reach the first branch.
545 Label lab2a;
546 Label lab2b;
547 masm.bind(&lab2a);
548 masm.B(&lab2b);
549 // Generate 1,100,000 bytes of NOPs.
550 for (unsigned n = 0; n < 1100000; n += 4) {
551 masm.Nop();
553 masm.branch(Assembler::LessThan, &lab2b);
554 masm.bind(&lab2b);
555 CHECK_EQUAL(
556 masm.getInstructionAt(BufferOffset(lab2a.offset()))->InstructionBits(),
557 vixl::B | vixl::Assembler::ImmUncondBranch(1100000 / 4 + 2));
558 CHECK_EQUAL(masm.getInstructionAt(BufferOffset(lab2b.offset() - 4))
559 ->InstructionBits(),
560 vixl::B_cond | vixl::Assembler::ImmCondBranch(1) | vixl::lt);
562 // Generate a conditional branch that can't reach its label.
563 Label lab3a;
564 Label lab3b;
565 masm.bind(&lab3a);
566 masm.branch(Assembler::LessThan, &lab3b);
567 for (unsigned n = 0; n < 1100000; n += 4) {
568 masm.Nop();
570 masm.bind(&lab3b);
571 masm.B(&lab3a);
572 Instruction* bcond3 = masm.getInstructionAt(BufferOffset(lab3a.offset()));
573 CHECK_EQUAL(bcond3->BranchType(), vixl::CondBranchType);
574 ptrdiff_t delta = bcond3->ImmPCRawOffset() * 4;
575 Instruction* veneer =
576 masm.getInstructionAt(BufferOffset(lab3a.offset() + delta));
577 CHECK_EQUAL(veneer->BranchType(), vixl::UncondBranchType);
578 delta += veneer->ImmPCRawOffset() * 4;
579 CHECK_EQUAL(delta, lab3b.offset() - lab3a.offset());
580 Instruction* b3 = masm.getInstructionAt(BufferOffset(lab3b.offset()));
581 CHECK_EQUAL(b3->BranchType(), vixl::UncondBranchType);
582 CHECK_EQUAL(4 * b3->ImmPCRawOffset(), -delta);
584 return true;
586 END_TEST(testAssemblerBuffer_ARM64)
587 #endif /* JS_CODEGEN_ARM64 */