2008-09-27 Anders Carlsson <andersca@apple.com>
[webkit/qt.git] / JavaScriptCore / VM / CTI.cpp
blob8f001dbb670ac11734be537516fdca91eafcddd5
1 /*
2 * Copyright (C) 2008 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 #include "config.h"
27 #include "CTI.h"
29 #if ENABLE(CTI)
31 #include "CodeBlock.h"
32 #include "JSArray.h"
33 #include "Machine.h"
34 #include "wrec/WREC.h"
35 #include "ResultType.h"
36 #if PLATFORM(MAC)
37 #include <sys/sysctl.h>
38 #endif
40 using namespace std;
42 namespace JSC {
44 #if PLATFORM(MAC)
45 bool isSSE3Present()
47 struct SSE3Check {
48 SSE3Check()
50 int hasSSE3 = 0;
51 size_t length = sizeof(hasSSE3);
52 int error = sysctlbyname("hw.optional.sse3", &hasSSE3, &length, NULL, 0);
53 present = hasSSE3 && !error;
55 bool present;
57 static SSE3Check check;
58 return check.present;
60 #else
61 bool isSSE3Present()
63 return false;
65 #endif
67 #if COMPILER(GCC) && PLATFORM(X86)
68 asm(
69 ".globl _ctiTrampoline" "\n"
70 "_ctiTrampoline:" "\n"
71 "pushl %esi" "\n"
72 "pushl %edi" "\n"
73 "subl $0x24, %esp" "\n"
74 "movl $512, %esi" "\n"
75 "call *0x30(%esp)" "\n" //Ox30 = 0x0C * 4, 0x0C = CTI_ARGS_code
76 "addl $0x24, %esp" "\n"
77 "popl %edi" "\n"
78 "popl %esi" "\n"
79 "ret" "\n"
82 asm(
83 ".globl _ctiVMThrowTrampoline" "\n"
84 "_ctiVMThrowTrampoline:" "\n"
85 #ifndef NDEBUG
86 "movl 0x34(%esp), %ecx" "\n" //Ox34 = 0x0D * 4, 0x0D = CTI_ARGS_exec
87 "cmpl $0, 8(%ecx)" "\n"
88 "jne 1f" "\n"
89 "int3" "\n"
90 "1:" "\n"
91 #endif
92 "call __ZN3JSC7Machine12cti_vm_throwEPv" "\n"
93 "addl $0x24, %esp" "\n"
94 "popl %edi" "\n"
95 "popl %esi" "\n"
96 "ret" "\n"
99 #elif COMPILER(MSVC)
100 extern "C"
103 __declspec(naked) JSValue* ctiTrampoline(void* code, ExecState* exec, RegisterFile* registerFile, Register* r, ScopeChainNode* scopeChain, JSValue** exception, Profiler**)
105 __asm {
106 push esi;
107 push edi;
108 sub esp, 0x24;
109 mov esi, 512;
110 mov [esp], esp;
111 call [esp + 0x30];
112 add esp, 0x24;
113 pop edi;
114 pop esi;
115 ret;
119 __declspec(naked) void ctiVMThrowTrampoline()
121 __asm {
122 mov [esp], esp;
123 call JSC::Machine::cti_vm_throw;
124 add esp, 0x24;
125 pop edi;
126 pop esi;
127 ret;
133 #endif
136 ALWAYS_INLINE bool CTI::isConstant(int src)
138 return src >= m_codeBlock->numVars && src < m_codeBlock->numVars + m_codeBlock->numConstants;
141 ALWAYS_INLINE JSValue* CTI::getConstant(ExecState* exec, int src)
143 return m_codeBlock->constantRegisters[src - m_codeBlock->numVars].jsValue(exec);
146 // get arg puts an arg from the SF register array into a h/w register
147 ALWAYS_INLINE void CTI::emitGetArg(unsigned src, X86Assembler::RegisterID dst)
149 // TODO: we want to reuse values that are already in registers if we can - add a register allocator!
150 if (isConstant(src)) {
151 JSValue* js = getConstant(m_exec, src);
152 m_jit.movl_i32r(reinterpret_cast<unsigned>(js), dst);
153 } else
154 m_jit.movl_mr(src * sizeof(Register), X86::edi, dst);
157 // get arg puts an arg from the SF register array onto the stack, as an arg to a context threaded function.
158 ALWAYS_INLINE void CTI::emitGetPutArg(unsigned src, unsigned offset, X86Assembler::RegisterID scratch)
160 if (isConstant(src)) {
161 JSValue* js = getConstant(m_exec, src);
162 m_jit.movl_i32m(reinterpret_cast<unsigned>(js), offset + sizeof(void*), X86::esp);
163 } else {
164 m_jit.movl_mr(src * sizeof(Register), X86::edi, scratch);
165 m_jit.movl_rm(scratch, offset + sizeof(void*), X86::esp);
169 // puts an arg onto the stack, as an arg to a context threaded function.
170 ALWAYS_INLINE void CTI::emitPutArg(X86Assembler::RegisterID src, unsigned offset)
172 m_jit.movl_rm(src, offset + sizeof(void*), X86::esp);
175 ALWAYS_INLINE void CTI::emitPutArgConstant(unsigned value, unsigned offset)
177 m_jit.movl_i32m(value, offset + sizeof(void*), X86::esp);
180 ALWAYS_INLINE JSValue* CTI::getConstantImmediateNumericArg(unsigned src)
182 if (isConstant(src)) {
183 JSValue* js = getConstant(m_exec, src);
184 return JSImmediate::isNumber(js) ? js : 0;
186 return 0;
189 ALWAYS_INLINE void CTI::emitPutCTIParam(void* value, unsigned name)
191 m_jit.movl_i32m(reinterpret_cast<intptr_t>(value), name * sizeof(void*), X86::esp);
194 ALWAYS_INLINE void CTI::emitPutCTIParam(X86Assembler::RegisterID from, unsigned name)
196 m_jit.movl_rm(from, name * sizeof(void*), X86::esp);
199 ALWAYS_INLINE void CTI::emitGetCTIParam(unsigned name, X86Assembler::RegisterID to)
201 m_jit.movl_mr(name * sizeof(void*), X86::esp, to);
204 ALWAYS_INLINE void CTI::emitPutToCallFrameHeader(X86Assembler::RegisterID from, RegisterFile::CallFrameHeaderEntry entry)
206 m_jit.movl_rm(from, entry * sizeof(Register), X86::edi);
209 ALWAYS_INLINE void CTI::emitGetFromCallFrameHeader(RegisterFile::CallFrameHeaderEntry entry, X86Assembler::RegisterID to)
211 m_jit.movl_mr(entry * sizeof(Register), X86::edi, to);
214 ALWAYS_INLINE void CTI::emitPutResult(unsigned dst, X86Assembler::RegisterID from)
216 m_jit.movl_rm(from, dst * sizeof(Register), X86::edi);
217 // FIXME: #ifndef NDEBUG, Write the correct m_type to the register.
220 ALWAYS_INLINE void CTI::emitInitRegister(unsigned dst)
222 m_jit.movl_i32m(reinterpret_cast<unsigned>(jsUndefined()), dst * sizeof(Register), X86::edi);
223 // FIXME: #ifndef NDEBUG, Write the correct m_type to the register.
226 #if ENABLE(SAMPLING_TOOL)
227 unsigned inCalledCode = 0;
228 #endif
230 void ctiSetReturnAddress(void** where, void* what)
232 *where = what;
235 void ctiRepatchCallByReturnAddress(void* where, void* what)
237 (static_cast<void**>(where))[-1] = reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(what) - reinterpret_cast<uintptr_t>(where));
240 #ifdef NDEBUG
242 ALWAYS_INLINE void CTI::emitDebugExceptionCheck()
246 #else
248 ALWAYS_INLINE void CTI::emitDebugExceptionCheck()
250 emitGetCTIParam(CTI_ARGS_exec, X86::ecx);
251 m_jit.cmpl_i32m(0, OBJECT_OFFSET(ExecState, m_exception), X86::ecx);
252 X86Assembler::JmpSrc noException = m_jit.emitUnlinkedJe();
253 m_jit.emitInt3();
254 m_jit.link(noException, m_jit.label());
257 void CTI::printOpcodeOperandTypes(unsigned src1, unsigned src2)
259 char which1 = '*';
260 if (isConstant(src1)) {
261 JSValue* js = getConstant(m_exec, src1);
262 which1 =
263 JSImmediate::isImmediate(js) ?
264 (JSImmediate::isNumber(js) ? 'i' :
265 JSImmediate::isBoolean(js) ? 'b' :
266 js->isUndefined() ? 'u' :
267 js->isNull() ? 'n' : '?')
269 (js->isString() ? 's' :
270 js->isObject() ? 'o' :
271 'k');
273 char which2 = '*';
274 if (isConstant(src2)) {
275 JSValue* js = getConstant(m_exec, src2);
276 which2 =
277 JSImmediate::isImmediate(js) ?
278 (JSImmediate::isNumber(js) ? 'i' :
279 JSImmediate::isBoolean(js) ? 'b' :
280 js->isUndefined() ? 'u' :
281 js->isNull() ? 'n' : '?')
283 (js->isString() ? 's' :
284 js->isObject() ? 'o' :
285 'k');
287 if ((which1 != '*') | (which2 != '*'))
288 fprintf(stderr, "Types %c %c\n", which1, which2);
291 #endif
293 ALWAYS_INLINE X86Assembler::JmpSrc CTI::emitCall(unsigned opcodeIndex, X86::RegisterID r)
295 X86Assembler::JmpSrc call = m_jit.emitCall(r);
296 m_calls.append(CallRecord(call, opcodeIndex));
297 emitDebugExceptionCheck();
299 return call;
302 ALWAYS_INLINE X86Assembler::JmpSrc CTI::emitCall(unsigned opcodeIndex, CTIHelper_j helper)
304 #if ENABLE(SAMPLING_TOOL)
305 m_jit.movl_i32m(1, &inCalledCode);
306 #endif
307 X86Assembler::JmpSrc call = m_jit.emitCall();
308 m_calls.append(CallRecord(call, helper, opcodeIndex));
309 emitDebugExceptionCheck();
310 #if ENABLE(SAMPLING_TOOL)
311 m_jit.movl_i32m(0, &inCalledCode);
312 #endif
314 return call;
317 ALWAYS_INLINE X86Assembler::JmpSrc CTI::emitCall(unsigned opcodeIndex, CTIHelper_p helper)
319 #if ENABLE(SAMPLING_TOOL)
320 m_jit.movl_i32m(1, &inCalledCode);
321 #endif
322 X86Assembler::JmpSrc call = m_jit.emitCall();
323 m_calls.append(CallRecord(call, helper, opcodeIndex));
324 emitDebugExceptionCheck();
325 #if ENABLE(SAMPLING_TOOL)
326 m_jit.movl_i32m(0, &inCalledCode);
327 #endif
329 return call;
332 ALWAYS_INLINE X86Assembler::JmpSrc CTI::emitCall(unsigned opcodeIndex, CTIHelper_b helper)
334 #if ENABLE(SAMPLING_TOOL)
335 m_jit.movl_i32m(1, &inCalledCode);
336 #endif
337 X86Assembler::JmpSrc call = m_jit.emitCall();
338 m_calls.append(CallRecord(call, helper, opcodeIndex));
339 emitDebugExceptionCheck();
340 #if ENABLE(SAMPLING_TOOL)
341 m_jit.movl_i32m(0, &inCalledCode);
342 #endif
344 return call;
347 ALWAYS_INLINE X86Assembler::JmpSrc CTI::emitCall(unsigned opcodeIndex, CTIHelper_v helper)
349 #if ENABLE(SAMPLING_TOOL)
350 m_jit.movl_i32m(1, &inCalledCode);
351 #endif
352 X86Assembler::JmpSrc call = m_jit.emitCall();
353 m_calls.append(CallRecord(call, helper, opcodeIndex));
354 emitDebugExceptionCheck();
355 #if ENABLE(SAMPLING_TOOL)
356 m_jit.movl_i32m(0, &inCalledCode);
357 #endif
359 return call;
362 ALWAYS_INLINE X86Assembler::JmpSrc CTI::emitCall(unsigned opcodeIndex, CTIHelper_s helper)
364 #if ENABLE(SAMPLING_TOOL)
365 m_jit.movl_i32m(1, &inCalledCode);
366 #endif
367 X86Assembler::JmpSrc call = m_jit.emitCall();
368 m_calls.append(CallRecord(call, helper, opcodeIndex));
369 emitDebugExceptionCheck();
370 #if ENABLE(SAMPLING_TOOL)
371 m_jit.movl_i32m(0, &inCalledCode);
372 #endif
374 return call;
377 ALWAYS_INLINE void CTI::emitJumpSlowCaseIfNotJSCell(X86Assembler::RegisterID reg, unsigned opcodeIndex)
379 m_jit.testl_i32r(JSImmediate::TagMask, reg);
380 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), opcodeIndex));
383 ALWAYS_INLINE void CTI::emitJumpSlowCaseIfNotImmNum(X86Assembler::RegisterID reg, unsigned opcodeIndex)
385 m_jit.testl_i32r(JSImmediate::TagBitTypeInteger, reg);
386 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJe(), opcodeIndex));
389 ALWAYS_INLINE void CTI::emitJumpSlowCaseIfNotImmNums(X86Assembler::RegisterID reg1, X86Assembler::RegisterID reg2, unsigned opcodeIndex)
391 m_jit.movl_rr(reg1, X86::ecx);
392 m_jit.andl_rr(reg2, X86::ecx);
393 emitJumpSlowCaseIfNotImmNum(X86::ecx, opcodeIndex);
396 ALWAYS_INLINE unsigned CTI::getDeTaggedConstantImmediate(JSValue* imm)
398 ASSERT(JSImmediate::isNumber(imm));
399 return reinterpret_cast<unsigned>(imm) & ~JSImmediate::TagBitTypeInteger;
402 ALWAYS_INLINE void CTI::emitFastArithDeTagImmediate(X86Assembler::RegisterID reg)
404 // op_mod relies on this being a sub - setting zf if result is 0.
405 m_jit.subl_i8r(JSImmediate::TagBitTypeInteger, reg);
408 ALWAYS_INLINE void CTI::emitFastArithReTagImmediate(X86Assembler::RegisterID reg)
410 m_jit.addl_i8r(JSImmediate::TagBitTypeInteger, reg);
413 ALWAYS_INLINE void CTI::emitFastArithPotentiallyReTagImmediate(X86Assembler::RegisterID reg)
415 m_jit.orl_i32r(JSImmediate::TagBitTypeInteger, reg);
418 ALWAYS_INLINE void CTI::emitFastArithImmToInt(X86Assembler::RegisterID reg)
420 m_jit.sarl_i8r(1, reg);
423 ALWAYS_INLINE void CTI::emitFastArithIntToImmOrSlowCase(X86Assembler::RegisterID reg, unsigned opcodeIndex)
425 m_jit.addl_rr(reg, reg);
426 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJo(), opcodeIndex));
427 emitFastArithReTagImmediate(reg);
430 ALWAYS_INLINE void CTI::emitFastArithIntToImmNoCheck(X86Assembler::RegisterID reg)
432 m_jit.addl_rr(reg, reg);
433 emitFastArithReTagImmediate(reg);
436 ALWAYS_INLINE void CTI::emitTagAsBoolImmediate(X86Assembler::RegisterID reg)
438 m_jit.shl_i8r(JSImmediate::ExtendedPayloadShift, reg);
439 m_jit.orl_i32r(JSImmediate::FullTagTypeBool, reg);
442 CTI::CTI(Machine* machine, ExecState* exec, CodeBlock* codeBlock)
443 : m_jit(machine->jitCodeBuffer())
444 , m_machine(machine)
445 , m_exec(exec)
446 , m_codeBlock(codeBlock)
447 , m_labels(codeBlock ? codeBlock->instructions.size() : 0)
448 , m_structureStubCompilationInfo(codeBlock ? codeBlock->structureIDInstructions.size() : 0)
452 #define CTI_COMPILE_BINARY_OP(name) \
453 case name: { \
454 emitGetPutArg(instruction[i + 2].u.operand, 0, X86::ecx); \
455 emitGetPutArg(instruction[i + 3].u.operand, 4, X86::ecx); \
456 emitCall(i, Machine::cti_##name); \
457 emitPutResult(instruction[i + 1].u.operand); \
458 i += 4; \
459 break; \
462 #define CTI_COMPILE_UNARY_OP(name) \
463 case name: { \
464 emitGetPutArg(instruction[i + 2].u.operand, 0, X86::ecx); \
465 emitCall(i, Machine::cti_##name); \
466 emitPutResult(instruction[i + 1].u.operand); \
467 i += 3; \
468 break; \
471 #if ENABLE(SAMPLING_TOOL)
472 OpcodeID currentOpcodeID = static_cast<OpcodeID>(-1);
473 #endif
475 void CTI::compileOpCall(Instruction* instruction, unsigned i, CompileOpCallType type)
477 int dst = instruction[i + 1].u.operand;
478 int firstArg = instruction[i + 4].u.operand;
479 int argCount = instruction[i + 5].u.operand;
480 int registerOffset = instruction[i + 6].u.operand;
482 if (type == OpCallEval)
483 emitGetPutArg(instruction[i + 3].u.operand, 16, X86::ecx);
485 if (type == OpConstruct) {
486 emitPutArgConstant(reinterpret_cast<unsigned>(instruction + i), 20);
487 emitPutArgConstant(argCount, 16);
488 emitPutArgConstant(registerOffset, 12);
489 emitPutArgConstant(firstArg, 8);
490 emitGetPutArg(instruction[i + 3].u.operand, 4, X86::ecx);
491 } else {
492 emitPutArgConstant(reinterpret_cast<unsigned>(instruction + i), 12);
493 emitPutArgConstant(argCount, 8);
494 emitPutArgConstant(registerOffset, 4);
496 int thisVal = instruction[i + 3].u.operand;
497 if (thisVal == missingThisObjectMarker()) {
498 // FIXME: should this be loaded dynamically off m_exec?
499 m_jit.movl_i32m(reinterpret_cast<unsigned>(m_exec->globalThisValue()), firstArg * sizeof(Register), X86::edi);
500 } else {
501 emitGetArg(thisVal, X86::ecx);
502 emitPutResult(firstArg, X86::ecx);
506 X86Assembler::JmpSrc wasEval;
507 if (type == OpCallEval) {
508 emitGetPutArg(instruction[i + 2].u.operand, 0, X86::ecx);
509 emitCall(i, Machine::cti_op_call_eval);
510 m_jit.emitRestoreArgumentReference();
512 emitGetCTIParam(CTI_ARGS_r, X86::edi); // edi := r
514 m_jit.cmpl_i32r(reinterpret_cast<unsigned>(JSImmediate::impossibleValue()), X86::eax);
515 wasEval = m_jit.emitUnlinkedJne();
517 // this sets up the first arg to op_cti_call (func), and explicitly leaves the value in ecx (checked just below).
518 emitGetArg(instruction[i + 2].u.operand, X86::ecx);
519 } else {
520 // this sets up the first arg to op_cti_call (func), and explicitly leaves the value in ecx (checked just below).
521 emitGetPutArg(instruction[i + 2].u.operand, 0, X86::ecx);
524 // Fast check for JS function.
525 m_jit.testl_i32r(JSImmediate::TagMask, X86::ecx);
526 X86Assembler::JmpSrc isNotObject = m_jit.emitUnlinkedJne();
527 m_jit.cmpl_i32m(reinterpret_cast<unsigned>(m_machine->m_jsFunctionVptr), X86::ecx);
528 X86Assembler::JmpSrc isJSFunction = m_jit.emitUnlinkedJe();
529 m_jit.link(isNotObject, m_jit.label());
531 // This handles host functions
532 emitCall(i, ((type == OpConstruct) ? Machine::cti_op_construct_NotJSConstruct : Machine::cti_op_call_NotJSFunction));
534 X86Assembler::JmpSrc wasNotJSFunction = m_jit.emitUnlinkedJmp();
535 m_jit.link(isJSFunction, m_jit.label());
537 // This handles JSFunctions
538 emitCall(i, ((type == OpConstruct) ? Machine::cti_op_construct_JSConstruct : Machine::cti_op_call_JSFunction));
540 // Check the ctiCode has been generated - if not, this is handled in a slow case.
541 m_jit.testl_rr(X86::eax, X86::eax);
542 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJe(), i));
543 emitCall(i, X86::eax);
545 X86Assembler::JmpDst end = m_jit.label();
546 m_jit.link(wasNotJSFunction, end);
547 if (type == OpCallEval)
548 m_jit.link(wasEval, end);
550 // Put the return value in dst. In the interpreter, op_ret does this.
551 emitPutResult(dst);
554 void CTI::compileOpStrictEq(Instruction* instruction, unsigned i, CompileOpStrictEqType type)
556 bool negated = (type == OpNStrictEq);
558 unsigned dst = instruction[i + 1].u.operand;
559 unsigned src1 = instruction[i + 2].u.operand;
560 unsigned src2 = instruction[i + 3].u.operand;
562 emitGetArg(src1, X86::eax);
563 emitGetArg(src2, X86::edx);
565 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
566 X86Assembler::JmpSrc firstNotImmediate = m_jit.emitUnlinkedJe();
567 m_jit.testl_i32r(JSImmediate::TagMask, X86::edx);
568 X86Assembler::JmpSrc secondNotImmediate = m_jit.emitUnlinkedJe();
570 m_jit.cmpl_rr(X86::edx, X86::eax);
571 if (negated)
572 m_jit.setne_r(X86::eax);
573 else
574 m_jit.sete_r(X86::eax);
575 m_jit.movzbl_rr(X86::eax, X86::eax);
576 emitTagAsBoolImmediate(X86::eax);
578 X86Assembler::JmpSrc bothWereImmediates = m_jit.emitUnlinkedJmp();
580 m_jit.link(firstNotImmediate, m_jit.label());
582 // check that edx is immediate but not the zero immediate
583 m_jit.testl_i32r(JSImmediate::TagMask, X86::edx);
584 m_jit.setz_r(X86::ecx);
585 m_jit.movzbl_rr(X86::ecx, X86::ecx); // ecx is now 1 if edx was nonimmediate
586 m_jit.cmpl_i32r(reinterpret_cast<uint32_t>(JSImmediate::zeroImmediate()), X86::edx);
587 m_jit.sete_r(X86::edx);
588 m_jit.movzbl_rr(X86::edx, X86::edx); // edx is now 1 if edx was the 0 immediate
589 m_jit.orl_rr(X86::ecx, X86::edx);
591 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJnz(), i));
593 m_jit.movl_i32r(reinterpret_cast<uint32_t>(jsBoolean(negated)), X86::eax);
595 X86Assembler::JmpSrc firstWasNotImmediate = m_jit.emitUnlinkedJmp();
597 m_jit.link(secondNotImmediate, m_jit.label());
598 // check that eax is not the zero immediate (we know it must be immediate)
599 m_jit.cmpl_i32r(reinterpret_cast<uint32_t>(JSImmediate::zeroImmediate()), X86::eax);
600 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJe(), i));
602 m_jit.movl_i32r(reinterpret_cast<uint32_t>(jsBoolean(negated)), X86::eax);
604 m_jit.link(bothWereImmediates, m_jit.label());
605 m_jit.link(firstWasNotImmediate, m_jit.label());
607 emitPutResult(dst);
610 void CTI::emitSlowScriptCheck(unsigned opcodeIndex)
612 m_jit.subl_i8r(1, X86::esi);
613 X86Assembler::JmpSrc skipTimeout = m_jit.emitUnlinkedJne();
614 emitCall(opcodeIndex, Machine::cti_timeout_check);
616 emitGetCTIParam(CTI_ARGS_exec, X86::ecx);
617 m_jit.movl_mr(OBJECT_OFFSET(ExecState, m_globalData), X86::ecx, X86::ecx);
618 m_jit.movl_mr(OBJECT_OFFSET(JSGlobalData, machine), X86::ecx, X86::ecx);
619 m_jit.movl_mr(OBJECT_OFFSET(Machine, m_ticksUntilNextTimeoutCheck), X86::ecx, X86::esi);
620 m_jit.link(skipTimeout, m_jit.label());
624 This is required since number representation is canonical - values representable as a JSImmediate should not be stored in a JSNumberCell.
626 In the common case, the double value from 'xmmSource' is written to the reusable JSNumberCell pointed to by 'jsNumberCell', then 'jsNumberCell'
627 is written to the output SF Register 'dst', and then a jump is planted (stored into *wroteJSNumberCell).
629 However if the value from xmmSource is representable as a JSImmediate, then the JSImmediate value will be written to the output, and flow
630 control will fall through from the code planted.
632 void CTI::putDoubleResultToJSNumberCellOrJSImmediate(X86::XMMRegisterID xmmSource, X86::RegisterID jsNumberCell, unsigned dst, X86Assembler::JmpSrc* wroteJSNumberCell, X86::XMMRegisterID tempXmm, X86::RegisterID tempReg1, X86::RegisterID tempReg2)
634 // convert (double -> JSImmediate -> double), and check if the value is unchanged - in which case the value is representable as a JSImmediate.
635 m_jit.cvttsd2si_rr(xmmSource, tempReg1);
636 m_jit.addl_rr(tempReg1, tempReg1);
637 m_jit.sarl_i8r(1, tempReg1);
638 m_jit.cvtsi2sd_rr(tempReg1, tempXmm);
639 // Compare & branch if immediate.
640 m_jit.ucomis_rr(tempXmm, xmmSource);
641 X86Assembler::JmpSrc resultIsImm = m_jit.emitUnlinkedJe();
642 X86Assembler::JmpDst resultLookedLikeImmButActuallyIsnt = m_jit.label();
644 // Store the result to the JSNumberCell and jump.
645 m_jit.movsd_rm(xmmSource, OBJECT_OFFSET(JSNumberCell, m_value), jsNumberCell);
646 emitPutResult(dst, jsNumberCell);
647 *wroteJSNumberCell = m_jit.emitUnlinkedJmp();
649 m_jit.link(resultIsImm, m_jit.label());
650 // value == (double)(JSImmediate)value... or at least, it looks that way...
651 // ucomi will report that (0 == -0), and will report true if either input in NaN (result is unordered).
652 m_jit.link(m_jit.emitUnlinkedJp(), resultLookedLikeImmButActuallyIsnt); // Actually was a NaN
653 m_jit.pextrw_irr(3, xmmSource, tempReg2);
654 m_jit.cmpl_i32r(0x8000, tempReg2);
655 m_jit.link(m_jit.emitUnlinkedJe(), resultLookedLikeImmButActuallyIsnt); // Actually was -0
656 // Yes it really really really is representable as a JSImmediate.
657 emitFastArithIntToImmNoCheck(tempReg1);
658 emitPutResult(dst, X86::ecx);
661 void CTI::compileBinaryArithOp(OpcodeID opcodeID, unsigned dst, unsigned src1, unsigned src2, OperandTypes types, unsigned i)
663 StructureID* numberStructureID = m_exec->globalData().numberStructureID.get();
664 X86Assembler::JmpSrc wasJSNumberCell1, wasJSNumberCell1b, wasJSNumberCell2, wasJSNumberCell2b;
666 emitGetArg(src1, X86::eax);
667 emitGetArg(src2, X86::edx);
669 if (types.second().isReusable() && isSSE3Present()) {
670 ASSERT(types.second().mightBeNumber());
672 // Check op2 is a number
673 m_jit.testl_i32r(JSImmediate::TagBitTypeInteger, X86::edx);
674 X86Assembler::JmpSrc op2imm = m_jit.emitUnlinkedJne();
675 if (!types.second().definitelyIsNumber()) {
676 emitJumpSlowCaseIfNotJSCell(X86::edx, i);
677 m_jit.cmpl_i32m(reinterpret_cast<unsigned>(numberStructureID), OBJECT_OFFSET(JSCell, m_structureID), X86::edx);
678 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
681 // (1) In this case src2 is a reusable number cell.
682 // Slow case if src1 is not a number type.
683 m_jit.testl_i32r(JSImmediate::TagBitTypeInteger, X86::eax);
684 X86Assembler::JmpSrc op1imm = m_jit.emitUnlinkedJne();
685 if (!types.first().definitelyIsNumber()) {
686 emitJumpSlowCaseIfNotJSCell(X86::eax, i);
687 m_jit.cmpl_i32m(reinterpret_cast<unsigned>(numberStructureID), OBJECT_OFFSET(JSCell, m_structureID), X86::eax);
688 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
691 // (1a) if we get here, src1 is also a number cell
692 m_jit.movsd_mr(OBJECT_OFFSET(JSNumberCell, m_value), X86::eax, X86::xmm0);
693 X86Assembler::JmpSrc loadedDouble = m_jit.emitUnlinkedJmp();
694 // (1b) if we get here, src1 is an immediate
695 m_jit.link(op1imm, m_jit.label());
696 emitFastArithImmToInt(X86::eax);
697 m_jit.cvtsi2sd_rr(X86::eax, X86::xmm0);
698 // (1c)
699 m_jit.link(loadedDouble, m_jit.label());
700 if (opcodeID == op_add)
701 m_jit.addsd_mr(OBJECT_OFFSET(JSNumberCell, m_value), X86::edx, X86::xmm0);
702 else if (opcodeID == op_sub)
703 m_jit.subsd_mr(OBJECT_OFFSET(JSNumberCell, m_value), X86::edx, X86::xmm0);
704 else {
705 ASSERT(opcodeID == op_mul);
706 m_jit.mulsd_mr(OBJECT_OFFSET(JSNumberCell, m_value), X86::edx, X86::xmm0);
709 putDoubleResultToJSNumberCellOrJSImmediate(X86::xmm0, X86::edx, dst, &wasJSNumberCell2, X86::xmm1, X86::ecx, X86::eax);
710 wasJSNumberCell2b = m_jit.emitUnlinkedJmp();
712 // (2) This handles cases where src2 is an immediate number.
713 // Two slow cases - either src1 isn't an immediate, or the subtract overflows.
714 m_jit.link(op2imm, m_jit.label());
715 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
716 } else if (types.first().isReusable() && isSSE3Present()) {
717 ASSERT(types.first().mightBeNumber());
719 // Check op1 is a number
720 m_jit.testl_i32r(JSImmediate::TagBitTypeInteger, X86::eax);
721 X86Assembler::JmpSrc op1imm = m_jit.emitUnlinkedJne();
722 if (!types.first().definitelyIsNumber()) {
723 emitJumpSlowCaseIfNotJSCell(X86::eax, i);
724 m_jit.cmpl_i32m(reinterpret_cast<unsigned>(numberStructureID), OBJECT_OFFSET(JSCell, m_structureID), X86::eax);
725 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
728 // (1) In this case src1 is a reusable number cell.
729 // Slow case if src2 is not a number type.
730 m_jit.testl_i32r(JSImmediate::TagBitTypeInteger, X86::edx);
731 X86Assembler::JmpSrc op2imm = m_jit.emitUnlinkedJne();
732 if (!types.second().definitelyIsNumber()) {
733 emitJumpSlowCaseIfNotJSCell(X86::edx, i);
734 m_jit.cmpl_i32m(reinterpret_cast<unsigned>(numberStructureID), OBJECT_OFFSET(JSCell, m_structureID), X86::edx);
735 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
738 // (1a) if we get here, src2 is also a number cell
739 m_jit.movsd_mr(OBJECT_OFFSET(JSNumberCell, m_value), X86::edx, X86::xmm1);
740 X86Assembler::JmpSrc loadedDouble = m_jit.emitUnlinkedJmp();
741 // (1b) if we get here, src2 is an immediate
742 m_jit.link(op2imm, m_jit.label());
743 emitFastArithImmToInt(X86::edx);
744 m_jit.cvtsi2sd_rr(X86::edx, X86::xmm1);
745 // (1c)
746 m_jit.link(loadedDouble, m_jit.label());
747 m_jit.movsd_mr(OBJECT_OFFSET(JSNumberCell, m_value), X86::eax, X86::xmm0);
748 if (opcodeID == op_add)
749 m_jit.addsd_rr(X86::xmm1, X86::xmm0);
750 else if (opcodeID == op_sub)
751 m_jit.subsd_rr(X86::xmm1, X86::xmm0);
752 else {
753 ASSERT(opcodeID == op_mul);
754 m_jit.mulsd_rr(X86::xmm1, X86::xmm0);
756 m_jit.movsd_rm(X86::xmm0, OBJECT_OFFSET(JSNumberCell, m_value), X86::eax);
757 emitPutResult(dst);
759 putDoubleResultToJSNumberCellOrJSImmediate(X86::xmm0, X86::eax, dst, &wasJSNumberCell1, X86::xmm1, X86::ecx, X86::edx);
760 wasJSNumberCell1b = m_jit.emitUnlinkedJmp();
762 // (2) This handles cases where src1 is an immediate number.
763 // Two slow cases - either src2 isn't an immediate, or the subtract overflows.
764 m_jit.link(op1imm, m_jit.label());
765 emitJumpSlowCaseIfNotImmNum(X86::edx, i);
766 } else
767 emitJumpSlowCaseIfNotImmNums(X86::eax, X86::edx, i);
769 if (opcodeID == op_add) {
770 emitFastArithDeTagImmediate(X86::eax);
771 m_jit.addl_rr(X86::edx, X86::eax);
772 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJo(), i));
773 } else if (opcodeID == op_sub) {
774 m_jit.subl_rr(X86::edx, X86::eax);
775 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJo(), i));
776 emitFastArithReTagImmediate(X86::eax);
777 } else {
778 ASSERT(opcodeID == op_mul);
779 emitFastArithDeTagImmediate(X86::eax);
780 emitFastArithImmToInt(X86::edx);
781 m_jit.imull_rr(X86::edx, X86::eax);
782 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJo(), i));
783 emitFastArithReTagImmediate(X86::eax);
785 emitPutResult(dst);
787 if (types.second().isReusable() && isSSE3Present()) {
788 m_jit.link(wasJSNumberCell2, m_jit.label());
789 m_jit.link(wasJSNumberCell2b, m_jit.label());
791 else if (types.first().isReusable() && isSSE3Present()) {
792 m_jit.link(wasJSNumberCell1, m_jit.label());
793 m_jit.link(wasJSNumberCell1b, m_jit.label());
797 void CTI::compileBinaryArithOpSlowCase(OpcodeID opcodeID, Vector<SlowCaseEntry>::iterator& iter, unsigned dst, unsigned src1, unsigned src2, OperandTypes types, unsigned i)
799 X86Assembler::JmpDst here = m_jit.label();
800 m_jit.link(iter->from, here);
801 if (types.second().isReusable() && isSSE3Present()) {
802 if (!types.first().definitelyIsNumber()) {
803 m_jit.link((++iter)->from, here);
804 m_jit.link((++iter)->from, here);
806 if (!types.second().definitelyIsNumber()) {
807 m_jit.link((++iter)->from, here);
808 m_jit.link((++iter)->from, here);
810 m_jit.link((++iter)->from, here);
811 } else if (types.first().isReusable() && isSSE3Present()) {
812 if (!types.first().definitelyIsNumber()) {
813 m_jit.link((++iter)->from, here);
814 m_jit.link((++iter)->from, here);
816 if (!types.second().definitelyIsNumber()) {
817 m_jit.link((++iter)->from, here);
818 m_jit.link((++iter)->from, here);
820 m_jit.link((++iter)->from, here);
821 } else
822 m_jit.link((++iter)->from, here);
824 emitGetPutArg(src1, 0, X86::ecx);
825 emitGetPutArg(src2, 4, X86::ecx);
826 if (opcodeID == op_add)
827 emitCall(i, Machine::cti_op_add);
828 else if (opcodeID == op_sub)
829 emitCall(i, Machine::cti_op_sub);
830 else {
831 ASSERT(opcodeID == op_mul);
832 emitCall(i, Machine::cti_op_mul);
834 emitPutResult(dst);
837 void CTI::privateCompileMainPass()
839 Instruction* instruction = m_codeBlock->instructions.begin();
840 unsigned instructionCount = m_codeBlock->instructions.size();
842 unsigned structureIDInstructionIndex = 0;
844 for (unsigned i = 0; i < instructionCount; ) {
845 m_labels[i] = m_jit.label();
847 #if ENABLE(SAMPLING_TOOL)
848 m_jit.movl_i32m(m_machine->getOpcodeID(instruction[i].u.opcode), &currentOpcodeID);
849 #endif
851 ASSERT_WITH_MESSAGE(m_machine->isOpcode(instruction[i].u.opcode), "privateCompileMainPass gone bad @ %d", i);
852 m_jit.emitRestoreArgumentReference();
853 switch (m_machine->getOpcodeID(instruction[i].u.opcode)) {
854 case op_mov: {
855 unsigned src = instruction[i + 2].u.operand;
856 if (isConstant(src))
857 m_jit.movl_i32r(reinterpret_cast<unsigned>(getConstant(m_exec, src)), X86::edx);
858 else
859 emitGetArg(src, X86::edx);
860 emitPutResult(instruction[i + 1].u.operand, X86::edx);
861 i += 3;
862 break;
864 case op_add: {
865 unsigned dst = instruction[i + 1].u.operand;
866 unsigned src1 = instruction[i + 2].u.operand;
867 unsigned src2 = instruction[i + 3].u.operand;
869 if (JSValue* value = getConstantImmediateNumericArg(src1)) {
870 emitGetArg(src2, X86::edx);
871 emitJumpSlowCaseIfNotImmNum(X86::edx, i);
872 m_jit.addl_i32r(getDeTaggedConstantImmediate(value), X86::edx);
873 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJo(), i));
874 emitPutResult(dst, X86::edx);
875 } else if (JSValue* value = getConstantImmediateNumericArg(src2)) {
876 emitGetArg(src1, X86::eax);
877 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
878 m_jit.addl_i32r(getDeTaggedConstantImmediate(value), X86::eax);
879 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJo(), i));
880 emitPutResult(dst);
881 } else {
882 OperandTypes types = OperandTypes::fromInt(instruction[i + 4].u.operand);
883 if (types.first().mightBeNumber() && types.second().mightBeNumber())
884 compileBinaryArithOp(op_add, instruction[i + 1].u.operand, instruction[i + 2].u.operand, instruction[i + 3].u.operand, OperandTypes::fromInt(instruction[i + 4].u.operand), i);
885 else {
886 emitGetPutArg(instruction[i + 2].u.operand, 0, X86::ecx);
887 emitGetPutArg(instruction[i + 3].u.operand, 4, X86::ecx);
888 emitCall(i, Machine::cti_op_add);
889 emitPutResult(instruction[i + 1].u.operand);
893 i += 5;
894 break;
896 case op_end: {
897 if (m_codeBlock->needsFullScopeChain)
898 emitCall(i, Machine::cti_op_end);
899 emitGetArg(instruction[i + 1].u.operand, X86::eax);
900 #if ENABLE(SAMPLING_TOOL)
901 m_jit.movl_i32m(-1, &currentOpcodeID);
902 #endif
903 m_jit.pushl_m(RegisterFile::ReturnPC * static_cast<int>(sizeof(Register)), X86::edi);
904 m_jit.ret();
905 i += 2;
906 break;
908 case op_jmp: {
909 unsigned target = instruction[i + 1].u.operand;
910 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJmp(), i + 1 + target));
911 i += 2;
912 break;
914 case op_pre_inc: {
915 int srcDst = instruction[i + 1].u.operand;
916 emitGetArg(srcDst, X86::eax);
917 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
918 m_jit.addl_i8r(getDeTaggedConstantImmediate(JSImmediate::oneImmediate()), X86::eax);
919 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJo(), i));
920 emitPutResult(srcDst, X86::eax);
921 i += 2;
922 break;
924 case op_loop: {
925 emitSlowScriptCheck(i);
927 unsigned target = instruction[i + 1].u.operand;
928 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJmp(), i + 1 + target));
929 i += 2;
930 break;
932 case op_loop_if_less: {
933 emitSlowScriptCheck(i);
935 unsigned target = instruction[i + 3].u.operand;
936 JSValue* src2imm = getConstantImmediateNumericArg(instruction[i + 2].u.operand);
937 if (src2imm) {
938 emitGetArg(instruction[i + 1].u.operand, X86::edx);
939 emitJumpSlowCaseIfNotImmNum(X86::edx, i);
940 m_jit.cmpl_i32r(reinterpret_cast<unsigned>(src2imm), X86::edx);
941 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJl(), i + 3 + target));
942 } else {
943 emitGetArg(instruction[i + 1].u.operand, X86::eax);
944 emitGetArg(instruction[i + 2].u.operand, X86::edx);
945 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
946 emitJumpSlowCaseIfNotImmNum(X86::edx, i);
947 m_jit.cmpl_rr(X86::edx, X86::eax);
948 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJl(), i + 3 + target));
950 i += 4;
951 break;
953 case op_loop_if_lesseq: {
954 emitSlowScriptCheck(i);
956 unsigned target = instruction[i + 3].u.operand;
957 JSValue* src2imm = getConstantImmediateNumericArg(instruction[i + 2].u.operand);
958 if (src2imm) {
959 emitGetArg(instruction[i + 1].u.operand, X86::edx);
960 emitJumpSlowCaseIfNotImmNum(X86::edx, i);
961 m_jit.cmpl_i32r(reinterpret_cast<unsigned>(src2imm), X86::edx);
962 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJle(), i + 3 + target));
963 } else {
964 emitGetArg(instruction[i + 1].u.operand, X86::eax);
965 emitGetArg(instruction[i + 2].u.operand, X86::edx);
966 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
967 emitJumpSlowCaseIfNotImmNum(X86::edx, i);
968 m_jit.cmpl_rr(X86::edx, X86::eax);
969 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJle(), i + 3 + target));
971 i += 4;
972 break;
974 case op_new_object: {
975 emitCall(i, Machine::cti_op_new_object);
976 emitPutResult(instruction[i + 1].u.operand);
977 i += 2;
978 break;
980 case op_put_by_id: {
981 // In order to be able to repatch both the StructureID, and the object offset, we store one pointer,
982 // to just after the arguments have been loaded into registers 'hotPathBegin', and we generate code
983 // such that the StructureID & offset are always at the same distance from this.
985 emitGetArg(instruction[i + 1].u.operand, X86::eax);
986 emitGetArg(instruction[i + 3].u.operand, X86::edx);
988 ASSERT(m_codeBlock->structureIDInstructions[structureIDInstructionIndex].opcodeIndex == i);
989 X86Assembler::JmpDst hotPathBegin = m_jit.label();
990 m_structureStubCompilationInfo[structureIDInstructionIndex].hotPathBegin = hotPathBegin;
991 ++structureIDInstructionIndex;
993 // Jump to a slow case if either the base object is an immediate, or if the StructureID does not match.
994 emitJumpSlowCaseIfNotJSCell(X86::eax, i);
995 // It is important that the following instruction plants a 32bit immediate, in order that it can be patched over.
996 m_jit.cmpl_i32m(repatchGetByIdDefaultStructureID, OBJECT_OFFSET(JSCell, m_structureID), X86::eax);
997 ASSERT(X86Assembler::getDifferenceBetweenLabels(hotPathBegin, m_jit.label()) == repatchOffsetPutByIdStructureID);
998 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
1000 // Plant a load from a bogus ofset in the object's property map; we will patch this later, if it is to be used.
1001 m_jit.movl_mr(OBJECT_OFFSET(JSObject, m_propertyStorage), X86::eax, X86::eax);
1002 m_jit.movl_rm(X86::edx, repatchGetByIdDefaultOffset, X86::eax);
1003 ASSERT(X86Assembler::getDifferenceBetweenLabels(hotPathBegin, m_jit.label()) == repatchOffsetPutByIdPropertyMapOffset);
1005 i += 8;
1006 break;
1008 case op_get_by_id: {
1009 // As for put_by_id, get_by_id requires the offset of the StructureID and the offset of the access to be repatched.
1010 // Additionally, for get_by_id we need repatch the offset of the branch to the slow case (we repatch this to jump
1011 // to array-length / prototype access tranpolines, and finally we also the the property-map access offset as a label
1012 // to jump back to if one of these trampolies finds a match.
1014 emitGetArg(instruction[i + 2].u.operand, X86::eax);
1016 ASSERT(m_codeBlock->structureIDInstructions[structureIDInstructionIndex].opcodeIndex == i);
1018 X86Assembler::JmpDst hotPathBegin = m_jit.label();
1019 m_structureStubCompilationInfo[structureIDInstructionIndex].hotPathBegin = hotPathBegin;
1020 ++structureIDInstructionIndex;
1022 emitJumpSlowCaseIfNotJSCell(X86::eax, i);
1023 m_jit.cmpl_i32m(repatchGetByIdDefaultStructureID, OBJECT_OFFSET(JSCell, m_structureID), X86::eax);
1024 ASSERT(X86Assembler::getDifferenceBetweenLabels(hotPathBegin, m_jit.label()) == repatchOffsetGetByIdStructureID);
1025 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
1026 ASSERT(X86Assembler::getDifferenceBetweenLabels(hotPathBegin, m_jit.label()) == repatchOffsetGetByIdBranchToSlowCase);
1028 m_jit.movl_mr(OBJECT_OFFSET(JSObject, m_propertyStorage), X86::eax, X86::eax);
1029 m_jit.movl_mr(repatchGetByIdDefaultOffset, X86::eax, X86::ecx);
1030 ASSERT(X86Assembler::getDifferenceBetweenLabels(hotPathBegin, m_jit.label()) == repatchOffsetGetByIdPropertyMapOffset);
1031 emitPutResult(instruction[i + 1].u.operand, X86::ecx);
1033 i += 8;
1034 break;
1036 case op_instanceof: {
1037 emitGetArg(instruction[i + 2].u.operand, X86::eax); // value
1038 emitGetArg(instruction[i + 3].u.operand, X86::ecx); // baseVal
1039 emitGetArg(instruction[i + 4].u.operand, X86::edx); // proto
1041 // check if any are immediates
1042 m_jit.orl_rr(X86::eax, X86::ecx);
1043 m_jit.orl_rr(X86::edx, X86::ecx);
1044 m_jit.testl_i32r(JSImmediate::TagMask, X86::ecx);
1046 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJnz(), i));
1048 // check that all are object type - this is a bit of a bithack to avoid excess branching;
1049 // we check that the sum of the three type codes from StructureIDs is exactly 3 * ObjectType,
1050 // this works because NumberType and StringType are smaller
1051 m_jit.movl_i32r(3 * ObjectType, X86::ecx);
1052 m_jit.movl_mr(OBJECT_OFFSET(JSCell, m_structureID), X86::eax, X86::eax);
1053 m_jit.movl_mr(OBJECT_OFFSET(JSCell, m_structureID), X86::edx, X86::edx);
1054 m_jit.subl_mr(OBJECT_OFFSET(StructureID, m_typeInfo.m_type), X86::eax, X86::ecx);
1055 m_jit.subl_mr(OBJECT_OFFSET(StructureID, m_typeInfo.m_type), X86::edx, X86::ecx);
1056 emitGetArg(instruction[i + 3].u.operand, X86::edx); // reload baseVal
1057 m_jit.movl_mr(OBJECT_OFFSET(JSCell, m_structureID), X86::edx, X86::edx);
1058 m_jit.cmpl_rm(X86::ecx, OBJECT_OFFSET(StructureID, m_typeInfo.m_type), X86::edx);
1060 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
1062 // check that baseVal's flags include ImplementsHasInstance but not OverridesHasInstance
1063 m_jit.movl_mr(OBJECT_OFFSET(StructureID, m_typeInfo.m_flags), X86::edx, X86::ecx);
1064 m_jit.andl_i32r(ImplementsHasInstance | OverridesHasInstance, X86::ecx);
1065 m_jit.cmpl_i32r(ImplementsHasInstance, X86::ecx);
1067 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
1069 emitGetArg(instruction[i + 2].u.operand, X86::ecx); // reload value
1070 emitGetArg(instruction[i + 4].u.operand, X86::edx); // reload proto
1072 // optimistically load true result
1073 m_jit.movl_i32r(reinterpret_cast<int32_t>(jsBoolean(true)), X86::eax);
1075 X86Assembler::JmpDst loop = m_jit.label();
1077 // load value's prototype
1078 m_jit.movl_mr(OBJECT_OFFSET(JSCell, m_structureID), X86::ecx, X86::ecx);
1079 m_jit.movl_mr(OBJECT_OFFSET(StructureID, m_prototype), X86::ecx, X86::ecx);
1081 m_jit.cmpl_rr(X86::ecx, X86::edx);
1082 X86Assembler::JmpSrc exit = m_jit.emitUnlinkedJe();
1084 m_jit.cmpl_i32r(reinterpret_cast<int32_t>(jsNull()), X86::ecx);
1085 X86Assembler::JmpSrc goToLoop = m_jit.emitUnlinkedJne();
1086 m_jit.link(goToLoop, loop);
1088 m_jit.movl_i32r(reinterpret_cast<int32_t>(jsBoolean(false)), X86::eax);
1090 m_jit.link(exit, m_jit.label());
1092 emitPutResult(instruction[i + 1].u.operand);
1094 i += 5;
1095 break;
1097 case op_del_by_id: {
1098 emitGetPutArg(instruction[i + 2].u.operand, 0, X86::ecx);
1099 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 3].u.operand]);
1100 emitPutArgConstant(reinterpret_cast<unsigned>(ident), 4);
1101 emitCall(i, Machine::cti_op_del_by_id);
1102 emitPutResult(instruction[i + 1].u.operand);
1103 i += 4;
1104 break;
1106 case op_mul: {
1107 unsigned dst = instruction[i + 1].u.operand;
1108 unsigned src1 = instruction[i + 2].u.operand;
1109 unsigned src2 = instruction[i + 3].u.operand;
1111 if (JSValue* src1Value = getConstantImmediateNumericArg(src1)) {
1112 emitGetArg(src2, X86::eax);
1113 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
1114 emitFastArithImmToInt(X86::eax);
1115 m_jit.imull_i32r(X86::eax, getDeTaggedConstantImmediate(src1Value), X86::eax);
1116 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJo(), i));
1117 emitFastArithReTagImmediate(X86::eax);
1118 emitPutResult(dst);
1119 } else if (JSValue* src2Value = getConstantImmediateNumericArg(src2)) {
1120 emitGetArg(src1, X86::eax);
1121 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
1122 emitFastArithImmToInt(X86::eax);
1123 m_jit.imull_i32r(X86::eax, getDeTaggedConstantImmediate(src2Value), X86::eax);
1124 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJo(), i));
1125 emitFastArithReTagImmediate(X86::eax);
1126 emitPutResult(dst);
1127 } else
1128 compileBinaryArithOp(op_mul, instruction[i + 1].u.operand, instruction[i + 2].u.operand, instruction[i + 3].u.operand, OperandTypes::fromInt(instruction[i + 4].u.operand), i);
1130 i += 5;
1131 break;
1133 case op_new_func: {
1134 FuncDeclNode* func = (m_codeBlock->functions[instruction[i + 2].u.operand]).get();
1135 emitPutArgConstant(reinterpret_cast<unsigned>(func), 0);
1136 emitCall(i, Machine::cti_op_new_func);
1137 emitPutResult(instruction[i + 1].u.operand);
1138 i += 3;
1139 break;
1141 case op_call: {
1142 compileOpCall(instruction, i);
1143 i += 7;
1144 break;
1146 case op_get_global_var: {
1147 JSVariableObject* globalObject = static_cast<JSVariableObject*>(instruction[i + 2].u.jsCell);
1148 m_jit.movl_i32r(reinterpret_cast<unsigned>(globalObject), X86::eax);
1149 emitGetVariableObjectRegister(X86::eax, instruction[i + 3].u.operand, X86::eax);
1150 emitPutResult(instruction[i + 1].u.operand, X86::eax);
1151 i += 4;
1152 break;
1154 case op_put_global_var: {
1155 JSVariableObject* globalObject = static_cast<JSVariableObject*>(instruction[i + 1].u.jsCell);
1156 m_jit.movl_i32r(reinterpret_cast<unsigned>(globalObject), X86::eax);
1157 emitGetArg(instruction[i + 3].u.operand, X86::edx);
1158 emitPutVariableObjectRegister(X86::edx, X86::eax, instruction[i + 2].u.operand);
1159 i += 4;
1160 break;
1162 case op_get_scoped_var: {
1163 int skip = instruction[i + 3].u.operand + m_codeBlock->needsFullScopeChain;
1165 emitGetCTIParam(CTI_ARGS_scopeChain, X86::eax);
1166 while (skip--)
1167 m_jit.movl_mr(OBJECT_OFFSET(ScopeChainNode, next), X86::eax, X86::eax);
1169 m_jit.movl_mr(OBJECT_OFFSET(ScopeChainNode, object), X86::eax, X86::eax);
1170 emitGetVariableObjectRegister(X86::eax, instruction[i + 2].u.operand, X86::eax);
1171 emitPutResult(instruction[i + 1].u.operand);
1172 i += 4;
1173 break;
1175 case op_put_scoped_var: {
1176 int skip = instruction[i + 2].u.operand + m_codeBlock->needsFullScopeChain;
1178 emitGetCTIParam(CTI_ARGS_scopeChain, X86::edx);
1179 emitGetArg(instruction[i + 3].u.operand, X86::eax);
1180 while (skip--)
1181 m_jit.movl_mr(OBJECT_OFFSET(ScopeChainNode, next), X86::edx, X86::edx);
1183 m_jit.movl_mr(OBJECT_OFFSET(ScopeChainNode, object), X86::edx, X86::edx);
1184 emitPutVariableObjectRegister(X86::eax, X86::edx, instruction[i + 1].u.operand);
1185 i += 4;
1186 break;
1188 case op_ret: {
1189 // Check for an activation - if there is one, jump to the hook below.
1190 m_jit.cmpl_i32m(0, RegisterFile::OptionalCalleeActivation * static_cast<int>(sizeof(Register)), X86::edi);
1191 X86Assembler::JmpSrc activation = m_jit.emitUnlinkedJne();
1192 X86Assembler::JmpDst activated = m_jit.label();
1194 // Check for a profiler - if there is one, jump to the hook below.
1195 emitGetCTIParam(CTI_ARGS_profilerReference, X86::eax);
1196 m_jit.cmpl_i32m(0, X86::eax);
1197 X86Assembler::JmpSrc profile = m_jit.emitUnlinkedJne();
1198 X86Assembler::JmpDst profiled = m_jit.label();
1200 // We could JIT generate the deref, only calling out to C when the refcount hits zero.
1201 if (m_codeBlock->needsFullScopeChain)
1202 emitCall(i, Machine::cti_op_ret_scopeChain);
1204 // Return the result in %eax.
1205 emitGetArg(instruction[i + 1].u.operand, X86::eax);
1207 // Restore the scope chain.
1208 m_jit.movl_mr(RegisterFile::CallerScopeChain * static_cast<int>(sizeof(Register)), X86::edi, X86::edx);
1209 emitGetCTIParam(CTI_ARGS_exec, X86::ecx);
1210 emitPutCTIParam(X86::edx, CTI_ARGS_scopeChain);
1211 m_jit.movl_rm(X86::edx, OBJECT_OFFSET(ExecState, m_scopeChain), X86::ecx);
1213 // Restore ExecState::m_callFrame.
1214 m_jit.movl_rm(X86::edi, OBJECT_OFFSET(ExecState, m_callFrame), X86::ecx);
1216 // Grab the return address.
1217 m_jit.movl_mr(RegisterFile::ReturnPC * static_cast<int>(sizeof(Register)), X86::edi, X86::ecx);
1219 // Restore the machine return addess from the callframe, roll the callframe back to the caller callframe,
1220 // and preserve a copy of r on the stack at CTI_ARGS_r.
1221 m_jit.movl_mr(RegisterFile::CallerRegisters * static_cast<int>(sizeof(Register)), X86::edi, X86::edi);
1222 emitPutCTIParam(X86::edi, CTI_ARGS_r);
1224 m_jit.pushl_r(X86::ecx);
1225 m_jit.ret();
1227 // Activation hook
1228 m_jit.link(activation, m_jit.label());
1229 emitCall(i, Machine::cti_op_ret_activation);
1230 m_jit.link(m_jit.emitUnlinkedJmp(), activated);
1232 // Profiling hook
1233 m_jit.link(profile, m_jit.label());
1234 emitCall(i, Machine::cti_op_ret_profiler);
1235 m_jit.link(m_jit.emitUnlinkedJmp(), profiled);
1237 i += 2;
1238 break;
1240 case op_new_array: {
1241 m_jit.leal_mr(sizeof(Register) * instruction[i + 2].u.operand, X86::edi, X86::edx);
1242 emitPutArg(X86::edx, 0);
1243 emitPutArgConstant(instruction[i + 3].u.operand, 4);
1244 emitCall(i, Machine::cti_op_new_array);
1245 emitPutResult(instruction[i + 1].u.operand);
1246 i += 4;
1247 break;
1249 case op_resolve: {
1250 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 2].u.operand]);
1251 emitPutArgConstant(reinterpret_cast<unsigned>(ident), 0);
1252 emitCall(i, Machine::cti_op_resolve);
1253 emitPutResult(instruction[i + 1].u.operand);
1254 i += 3;
1255 break;
1257 case op_construct: {
1258 compileOpCall(instruction, i, OpConstruct);
1259 i += 7;
1260 break;
1262 case op_construct_verify: {
1263 emitGetArg(instruction[i + 1].u.operand, X86::eax);
1265 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
1266 X86Assembler::JmpSrc isImmediate = m_jit.emitUnlinkedJne();
1267 m_jit.movl_mr(OBJECT_OFFSET(JSCell, m_structureID), X86::eax, X86::ecx);
1268 m_jit.cmpl_i32m(ObjectType, OBJECT_OFFSET(StructureID, m_typeInfo) + OBJECT_OFFSET(TypeInfo, m_type), X86::ecx);
1269 X86Assembler::JmpSrc isObject = m_jit.emitUnlinkedJe();
1271 m_jit.link(isImmediate, m_jit.label());
1272 emitGetArg(instruction[i + 2].u.operand, X86::ecx);
1273 emitPutResult(instruction[i + 1].u.operand, X86::ecx);
1274 m_jit.link(isObject, m_jit.label());
1276 i += 3;
1277 break;
1279 case op_get_by_val: {
1280 emitGetArg(instruction[i + 2].u.operand, X86::eax);
1281 emitGetArg(instruction[i + 3].u.operand, X86::edx);
1282 emitJumpSlowCaseIfNotImmNum(X86::edx, i);
1283 emitFastArithImmToInt(X86::edx);
1284 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
1285 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
1286 m_jit.cmpl_i32m(reinterpret_cast<unsigned>(m_machine->m_jsArrayVptr), X86::eax);
1287 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
1289 // This is an array; get the m_storage pointer into ecx, then check if the index is below the fast cutoff
1290 m_jit.movl_mr(OBJECT_OFFSET(JSArray, m_storage), X86::eax, X86::ecx);
1291 m_jit.cmpl_rm(X86::edx, OBJECT_OFFSET(JSArray, m_fastAccessCutoff), X86::eax);
1292 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJbe(), i));
1294 // Get the value from the vector
1295 m_jit.movl_mr(OBJECT_OFFSET(ArrayStorage, m_vector[0]), X86::ecx, X86::edx, sizeof(JSValue*), X86::eax);
1296 emitPutResult(instruction[i + 1].u.operand);
1297 i += 4;
1298 break;
1300 case op_resolve_func: {
1301 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 3].u.operand]);
1302 emitPutArgConstant(reinterpret_cast<unsigned>(ident), 0);
1303 emitCall(i, Machine::cti_op_resolve_func);
1304 emitPutResult(instruction[i + 1].u.operand);
1305 emitGetCTIParam(CTI_ARGS_2ndResult, X86::eax);
1306 emitPutResult(instruction[i + 2].u.operand);
1307 i += 4;
1308 break;
1310 case op_sub: {
1311 compileBinaryArithOp(op_sub, instruction[i + 1].u.operand, instruction[i + 2].u.operand, instruction[i + 3].u.operand, OperandTypes::fromInt(instruction[i + 4].u.operand), i);
1312 i += 5;
1313 break;
1315 case op_put_by_val: {
1316 emitGetArg(instruction[i + 1].u.operand, X86::eax);
1317 emitGetArg(instruction[i + 2].u.operand, X86::edx);
1318 emitJumpSlowCaseIfNotImmNum(X86::edx, i);
1319 emitFastArithImmToInt(X86::edx);
1320 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
1321 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
1322 m_jit.cmpl_i32m(reinterpret_cast<unsigned>(m_machine->m_jsArrayVptr), X86::eax);
1323 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
1325 // This is an array; get the m_storage pointer into ecx, then check if the index is below the fast cutoff
1326 m_jit.movl_mr(OBJECT_OFFSET(JSArray, m_storage), X86::eax, X86::ecx);
1327 m_jit.cmpl_rm(X86::edx, OBJECT_OFFSET(JSArray, m_fastAccessCutoff), X86::eax);
1328 X86Assembler::JmpSrc inFastVector = m_jit.emitUnlinkedJa();
1329 // No; oh well, check if the access if within the vector - if so, we may still be okay.
1330 m_jit.cmpl_rm(X86::edx, OBJECT_OFFSET(ArrayStorage, m_vectorLength), X86::ecx);
1331 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJbe(), i));
1333 // This is a write to the slow part of the vector; first, we have to check if this would be the first write to this location.
1334 // FIXME: should be able to handle initial write to array; increment the the number of items in the array, and potentially update fast access cutoff.
1335 m_jit.cmpl_i8m(0, OBJECT_OFFSET(ArrayStorage, m_vector[0]), X86::ecx, X86::edx, sizeof(JSValue*));
1336 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJe(), i));
1338 // All good - put the value into the array.
1339 m_jit.link(inFastVector, m_jit.label());
1340 emitGetArg(instruction[i + 3].u.operand, X86::eax);
1341 m_jit.movl_rm(X86::eax, OBJECT_OFFSET(ArrayStorage, m_vector[0]), X86::ecx, X86::edx, sizeof(JSValue*));
1342 i += 4;
1343 break;
1345 CTI_COMPILE_BINARY_OP(op_lesseq)
1346 case op_loop_if_true: {
1347 emitSlowScriptCheck(i);
1349 unsigned target = instruction[i + 2].u.operand;
1350 emitGetArg(instruction[i + 1].u.operand, X86::eax);
1352 m_jit.cmpl_i32r(reinterpret_cast<uint32_t>(JSImmediate::zeroImmediate()), X86::eax);
1353 X86Assembler::JmpSrc isZero = m_jit.emitUnlinkedJe();
1354 m_jit.testl_i32r(JSImmediate::TagBitTypeInteger, X86::eax);
1355 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJne(), i + 2 + target));
1357 m_jit.cmpl_i32r(reinterpret_cast<uint32_t>(JSImmediate::trueImmediate()), X86::eax);
1358 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJe(), i + 2 + target));
1359 m_jit.cmpl_i32r(reinterpret_cast<uint32_t>(JSImmediate::falseImmediate()), X86::eax);
1360 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
1362 m_jit.link(isZero, m_jit.label());
1363 i += 3;
1364 break;
1366 case op_resolve_base: {
1367 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 2].u.operand]);
1368 emitPutArgConstant(reinterpret_cast<unsigned>(ident), 0);
1369 emitCall(i, Machine::cti_op_resolve_base);
1370 emitPutResult(instruction[i + 1].u.operand);
1371 i += 3;
1372 break;
1374 case op_negate: {
1375 emitGetPutArg(instruction[i + 2].u.operand, 0, X86::ecx);
1376 emitCall(i, Machine::cti_op_negate);
1377 emitPutResult(instruction[i + 1].u.operand);
1378 i += 3;
1379 break;
1381 case op_resolve_skip: {
1382 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 2].u.operand]);
1383 emitPutArgConstant(reinterpret_cast<unsigned>(ident), 0);
1384 emitPutArgConstant(instruction[i + 3].u.operand + m_codeBlock->needsFullScopeChain, 4);
1385 emitCall(i, Machine::cti_op_resolve_skip);
1386 emitPutResult(instruction[i + 1].u.operand);
1387 i += 4;
1388 break;
1390 case op_resolve_global: {
1391 // Fast case
1392 unsigned globalObject = reinterpret_cast<unsigned>(instruction[i + 2].u.jsCell);
1393 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 3].u.operand]);
1394 void* structureIDAddr = reinterpret_cast<void*>(instruction + i + 4);
1395 void* offsetAddr = reinterpret_cast<void*>(instruction + i + 5);
1397 // Check StructureID of global object
1398 m_jit.movl_i32r(globalObject, X86::eax);
1399 m_jit.movl_mr(structureIDAddr, X86::edx);
1400 m_jit.cmpl_rm(X86::edx, OBJECT_OFFSET(JSCell, m_structureID), X86::eax);
1401 X86Assembler::JmpSrc slowCase = m_jit.emitUnlinkedJne(); // StructureIDs don't match
1402 m_slowCases.append(SlowCaseEntry(slowCase, i));
1404 // Load cached property
1405 m_jit.movl_mr(OBJECT_OFFSET(JSGlobalObject, m_propertyStorage), X86::eax, X86::eax);
1406 m_jit.movl_mr(offsetAddr, X86::edx);
1407 m_jit.movl_mr(0, X86::eax, X86::edx, sizeof(JSValue*), X86::eax);
1408 emitPutResult(instruction[i + 1].u.operand);
1409 X86Assembler::JmpSrc end = m_jit.emitUnlinkedJmp();
1411 // Slow case
1412 m_jit.link(slowCase, m_jit.label());
1413 emitPutArgConstant(globalObject, 0);
1414 emitPutArgConstant(reinterpret_cast<unsigned>(ident), 4);
1415 emitPutArgConstant(reinterpret_cast<unsigned>(instruction + i), 8);
1416 emitCall(i, Machine::cti_op_resolve_global);
1417 emitPutResult(instruction[i + 1].u.operand);
1418 m_jit.link(end, m_jit.label());
1419 i += 6;
1420 ++structureIDInstructionIndex;
1421 break;
1423 CTI_COMPILE_BINARY_OP(op_div)
1424 case op_pre_dec: {
1425 int srcDst = instruction[i + 1].u.operand;
1426 emitGetArg(srcDst, X86::eax);
1427 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
1428 m_jit.subl_i8r(getDeTaggedConstantImmediate(JSImmediate::oneImmediate()), X86::eax);
1429 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJo(), i));
1430 emitPutResult(srcDst, X86::eax);
1431 i += 2;
1432 break;
1434 case op_jnless: {
1435 unsigned target = instruction[i + 3].u.operand;
1436 JSValue* src2imm = getConstantImmediateNumericArg(instruction[i + 2].u.operand);
1437 if (src2imm) {
1438 emitGetArg(instruction[i + 1].u.operand, X86::edx);
1439 emitJumpSlowCaseIfNotImmNum(X86::edx, i);
1440 m_jit.cmpl_i32r(reinterpret_cast<unsigned>(src2imm), X86::edx);
1441 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJge(), i + 3 + target));
1442 } else {
1443 emitGetArg(instruction[i + 1].u.operand, X86::eax);
1444 emitGetArg(instruction[i + 2].u.operand, X86::edx);
1445 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
1446 emitJumpSlowCaseIfNotImmNum(X86::edx, i);
1447 m_jit.cmpl_rr(X86::edx, X86::eax);
1448 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJge(), i + 3 + target));
1450 i += 4;
1451 break;
1453 case op_not: {
1454 emitGetArg(instruction[i + 2].u.operand, X86::eax);
1455 m_jit.xorl_i8r(JSImmediate::FullTagTypeBool, X86::eax);
1456 m_jit.testl_i32r(JSImmediate::FullTagTypeMask, X86::eax); // i8?
1457 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
1458 m_jit.xorl_i8r((JSImmediate::FullTagTypeBool | JSImmediate::ExtendedPayloadBitBoolValue), X86::eax);
1459 emitPutResult(instruction[i + 1].u.operand);
1460 i += 3;
1461 break;
1463 case op_jfalse: {
1464 unsigned target = instruction[i + 2].u.operand;
1465 emitGetArg(instruction[i + 1].u.operand, X86::eax);
1467 m_jit.cmpl_i32r(reinterpret_cast<uint32_t>(JSImmediate::zeroImmediate()), X86::eax);
1468 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJe(), i + 2 + target));
1469 m_jit.testl_i32r(JSImmediate::TagBitTypeInteger, X86::eax);
1470 X86Assembler::JmpSrc isNonZero = m_jit.emitUnlinkedJne();
1472 m_jit.cmpl_i32r(reinterpret_cast<uint32_t>(JSImmediate::falseImmediate()), X86::eax);
1473 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJe(), i + 2 + target));
1474 m_jit.cmpl_i32r(reinterpret_cast<uint32_t>(JSImmediate::trueImmediate()), X86::eax);
1475 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
1477 m_jit.link(isNonZero, m_jit.label());
1478 i += 3;
1479 break;
1481 case op_post_inc: {
1482 int srcDst = instruction[i + 2].u.operand;
1483 emitGetArg(srcDst, X86::eax);
1484 m_jit.movl_rr(X86::eax, X86::edx);
1485 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
1486 m_jit.addl_i8r(getDeTaggedConstantImmediate(JSImmediate::oneImmediate()), X86::edx);
1487 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJo(), i));
1488 emitPutResult(srcDst, X86::edx);
1489 emitPutResult(instruction[i + 1].u.operand);
1490 i += 3;
1491 break;
1493 case op_unexpected_load: {
1494 JSValue* v = m_codeBlock->unexpectedConstants[instruction[i + 2].u.operand];
1495 m_jit.movl_i32r(reinterpret_cast<unsigned>(v), X86::eax);
1496 emitPutResult(instruction[i + 1].u.operand);
1497 i += 3;
1498 break;
1500 case op_jsr: {
1501 int retAddrDst = instruction[i + 1].u.operand;
1502 int target = instruction[i + 2].u.operand;
1503 m_jit.movl_i32m(0, sizeof(Register) * retAddrDst, X86::edi);
1504 X86Assembler::JmpDst addrPosition = m_jit.label();
1505 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJmp(), i + 2 + target));
1506 X86Assembler::JmpDst sretTarget = m_jit.label();
1507 m_jsrSites.append(JSRInfo(addrPosition, sretTarget));
1508 i += 3;
1509 break;
1511 case op_sret: {
1512 m_jit.jmp_m(sizeof(Register) * instruction[i + 1].u.operand, X86::edi);
1513 i += 2;
1514 break;
1516 case op_eq: {
1517 emitGetArg(instruction[i + 2].u.operand, X86::eax);
1518 emitGetArg(instruction[i + 3].u.operand, X86::edx);
1519 emitJumpSlowCaseIfNotImmNums(X86::eax, X86::edx, i);
1520 m_jit.cmpl_rr(X86::edx, X86::eax);
1521 m_jit.sete_r(X86::eax);
1522 m_jit.movzbl_rr(X86::eax, X86::eax);
1523 emitTagAsBoolImmediate(X86::eax);
1524 emitPutResult(instruction[i + 1].u.operand);
1525 i += 4;
1526 break;
1528 case op_lshift: {
1529 emitGetArg(instruction[i + 2].u.operand, X86::eax);
1530 emitGetArg(instruction[i + 3].u.operand, X86::ecx);
1531 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
1532 emitJumpSlowCaseIfNotImmNum(X86::ecx, i);
1533 emitFastArithImmToInt(X86::eax);
1534 emitFastArithImmToInt(X86::ecx);
1535 m_jit.shll_CLr(X86::eax);
1536 emitFastArithIntToImmOrSlowCase(X86::eax, i);
1537 emitPutResult(instruction[i + 1].u.operand);
1538 i += 4;
1539 break;
1541 case op_bitand: {
1542 unsigned src1 = instruction[i + 2].u.operand;
1543 unsigned src2 = instruction[i + 3].u.operand;
1544 unsigned dst = instruction[i + 1].u.operand;
1545 if (JSValue* value = getConstantImmediateNumericArg(src1)) {
1546 emitGetArg(src2, X86::eax);
1547 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
1548 m_jit.andl_i32r(reinterpret_cast<unsigned>(value), X86::eax); // FIXME: make it more obvious this is relying on the format of JSImmediate
1549 emitPutResult(dst);
1550 } else if (JSValue* value = getConstantImmediateNumericArg(src2)) {
1551 emitGetArg(src1, X86::eax);
1552 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
1553 m_jit.andl_i32r(reinterpret_cast<unsigned>(value), X86::eax);
1554 emitPutResult(dst);
1555 } else {
1556 emitGetArg(src1, X86::eax);
1557 emitGetArg(src2, X86::edx);
1558 m_jit.andl_rr(X86::edx, X86::eax);
1559 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
1560 emitPutResult(dst);
1562 i += 5;
1563 break;
1565 case op_rshift: {
1566 emitGetArg(instruction[i + 2].u.operand, X86::eax);
1567 emitGetArg(instruction[i + 3].u.operand, X86::ecx);
1568 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
1569 emitJumpSlowCaseIfNotImmNum(X86::ecx, i);
1570 emitFastArithImmToInt(X86::ecx);
1571 m_jit.sarl_CLr(X86::eax);
1572 emitFastArithPotentiallyReTagImmediate(X86::eax);
1573 emitPutResult(instruction[i + 1].u.operand);
1574 i += 4;
1575 break;
1577 case op_bitnot: {
1578 emitGetArg(instruction[i + 2].u.operand, X86::eax);
1579 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
1580 m_jit.xorl_i8r(~JSImmediate::TagBitTypeInteger, X86::eax);
1581 emitPutResult(instruction[i + 1].u.operand);
1582 i += 3;
1583 break;
1585 case op_resolve_with_base: {
1586 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 3].u.operand]);
1587 emitPutArgConstant(reinterpret_cast<unsigned>(ident), 0);
1588 emitCall(i, Machine::cti_op_resolve_with_base);
1589 emitPutResult(instruction[i + 1].u.operand);
1590 emitGetCTIParam(CTI_ARGS_2ndResult, X86::eax);
1591 emitPutResult(instruction[i + 2].u.operand);
1592 i += 4;
1593 break;
1595 case op_new_func_exp: {
1596 FuncExprNode* func = (m_codeBlock->functionExpressions[instruction[i + 2].u.operand]).get();
1597 emitPutArgConstant(reinterpret_cast<unsigned>(func), 0);
1598 emitCall(i, Machine::cti_op_new_func_exp);
1599 emitPutResult(instruction[i + 1].u.operand);
1600 i += 3;
1601 break;
1603 case op_mod: {
1604 emitGetArg(instruction[i + 2].u.operand, X86::eax);
1605 emitGetArg(instruction[i + 3].u.operand, X86::ecx);
1606 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
1607 emitJumpSlowCaseIfNotImmNum(X86::ecx, i);
1608 emitFastArithDeTagImmediate(X86::eax);
1609 emitFastArithDeTagImmediate(X86::ecx);
1610 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJe(), i)); // This is checking if the last detag resulted in a value 0.
1611 m_jit.cdq();
1612 m_jit.idivl_r(X86::ecx);
1613 emitFastArithReTagImmediate(X86::edx);
1614 m_jit.movl_rr(X86::edx, X86::eax);
1615 emitPutResult(instruction[i + 1].u.operand);
1616 i += 4;
1617 break;
1619 case op_jtrue: {
1620 unsigned target = instruction[i + 2].u.operand;
1621 emitGetArg(instruction[i + 1].u.operand, X86::eax);
1623 m_jit.cmpl_i32r(reinterpret_cast<uint32_t>(JSImmediate::zeroImmediate()), X86::eax);
1624 X86Assembler::JmpSrc isZero = m_jit.emitUnlinkedJe();
1625 m_jit.testl_i32r(JSImmediate::TagBitTypeInteger, X86::eax);
1626 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJne(), i + 2 + target));
1628 m_jit.cmpl_i32r(reinterpret_cast<uint32_t>(JSImmediate::trueImmediate()), X86::eax);
1629 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJe(), i + 2 + target));
1630 m_jit.cmpl_i32r(reinterpret_cast<uint32_t>(JSImmediate::falseImmediate()), X86::eax);
1631 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJne(), i));
1633 m_jit.link(isZero, m_jit.label());
1634 i += 3;
1635 break;
1637 CTI_COMPILE_BINARY_OP(op_less)
1638 case op_neq: {
1639 emitGetArg(instruction[i + 2].u.operand, X86::eax);
1640 emitGetArg(instruction[i + 3].u.operand, X86::edx);
1641 emitJumpSlowCaseIfNotImmNums(X86::eax, X86::edx, i);
1642 m_jit.cmpl_rr(X86::eax, X86::edx);
1644 m_jit.setne_r(X86::eax);
1645 m_jit.movzbl_rr(X86::eax, X86::eax);
1646 emitTagAsBoolImmediate(X86::eax);
1648 emitPutResult(instruction[i + 1].u.operand);
1650 i += 4;
1651 break;
1653 case op_post_dec: {
1654 int srcDst = instruction[i + 2].u.operand;
1655 emitGetArg(srcDst, X86::eax);
1656 m_jit.movl_rr(X86::eax, X86::edx);
1657 emitJumpSlowCaseIfNotImmNum(X86::eax, i);
1658 m_jit.subl_i8r(getDeTaggedConstantImmediate(JSImmediate::oneImmediate()), X86::edx);
1659 m_slowCases.append(SlowCaseEntry(m_jit.emitUnlinkedJo(), i));
1660 emitPutResult(srcDst, X86::edx);
1661 emitPutResult(instruction[i + 1].u.operand);
1662 i += 3;
1663 break;
1665 CTI_COMPILE_BINARY_OP(op_urshift)
1666 case op_bitxor: {
1667 emitGetArg(instruction[i + 2].u.operand, X86::eax);
1668 emitGetArg(instruction[i + 3].u.operand, X86::edx);
1669 emitJumpSlowCaseIfNotImmNums(X86::eax, X86::edx, i);
1670 m_jit.xorl_rr(X86::edx, X86::eax);
1671 emitFastArithReTagImmediate(X86::eax);
1672 emitPutResult(instruction[i + 1].u.operand);
1673 i += 5;
1674 break;
1676 case op_new_regexp: {
1677 RegExp* regExp = m_codeBlock->regexps[instruction[i + 2].u.operand].get();
1678 emitPutArgConstant(reinterpret_cast<unsigned>(regExp), 0);
1679 emitCall(i, Machine::cti_op_new_regexp);
1680 emitPutResult(instruction[i + 1].u.operand);
1681 i += 3;
1682 break;
1684 case op_bitor: {
1685 emitGetArg(instruction[i + 2].u.operand, X86::eax);
1686 emitGetArg(instruction[i + 3].u.operand, X86::edx);
1687 emitJumpSlowCaseIfNotImmNums(X86::eax, X86::edx, i);
1688 m_jit.orl_rr(X86::edx, X86::eax);
1689 emitPutResult(instruction[i + 1].u.operand);
1690 i += 5;
1691 break;
1693 case op_call_eval: {
1694 compileOpCall(instruction, i, OpCallEval);
1695 i += 7;
1696 break;
1698 case op_throw: {
1699 emitGetPutArg(instruction[i + 1].u.operand, 0, X86::ecx);
1700 emitCall(i, Machine::cti_op_throw);
1701 m_jit.addl_i8r(0x24, X86::esp);
1702 m_jit.popl_r(X86::edi);
1703 m_jit.popl_r(X86::esi);
1704 m_jit.ret();
1705 i += 2;
1706 break;
1708 case op_get_pnames: {
1709 emitGetPutArg(instruction[i + 2].u.operand, 0, X86::ecx);
1710 emitCall(i, Machine::cti_op_get_pnames);
1711 emitPutResult(instruction[i + 1].u.operand);
1712 i += 3;
1713 break;
1715 case op_next_pname: {
1716 emitGetPutArg(instruction[i + 2].u.operand, 0, X86::ecx);
1717 unsigned target = instruction[i + 3].u.operand;
1718 emitCall(i, Machine::cti_op_next_pname);
1719 m_jit.testl_rr(X86::eax, X86::eax);
1720 X86Assembler::JmpSrc endOfIter = m_jit.emitUnlinkedJe();
1721 emitPutResult(instruction[i + 1].u.operand);
1722 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJmp(), i + 3 + target));
1723 m_jit.link(endOfIter, m_jit.label());
1724 i += 4;
1725 break;
1727 case op_push_scope: {
1728 emitGetPutArg(instruction[i + 1].u.operand, 0, X86::ecx);
1729 emitCall(i, Machine::cti_op_push_scope);
1730 i += 2;
1731 break;
1733 case op_pop_scope: {
1734 emitCall(i, Machine::cti_op_pop_scope);
1735 i += 1;
1736 break;
1738 CTI_COMPILE_UNARY_OP(op_typeof)
1739 CTI_COMPILE_UNARY_OP(op_is_undefined)
1740 CTI_COMPILE_UNARY_OP(op_is_boolean)
1741 CTI_COMPILE_UNARY_OP(op_is_number)
1742 CTI_COMPILE_UNARY_OP(op_is_string)
1743 CTI_COMPILE_UNARY_OP(op_is_object)
1744 CTI_COMPILE_UNARY_OP(op_is_function)
1745 case op_stricteq: {
1746 compileOpStrictEq(instruction, i, OpStrictEq);
1747 i += 4;
1748 break;
1750 case op_nstricteq: {
1751 compileOpStrictEq(instruction, i, OpNStrictEq);
1752 i += 4;
1753 break;
1755 case op_to_jsnumber: {
1756 emitGetPutArg(instruction[i + 2].u.operand, 0, X86::ecx);
1757 emitCall(i, Machine::cti_op_to_jsnumber);
1758 emitPutResult(instruction[i + 1].u.operand);
1759 i += 3;
1760 break;
1762 case op_in: {
1763 emitGetPutArg(instruction[i + 2].u.operand, 0, X86::ecx);
1764 emitGetPutArg(instruction[i + 3].u.operand, 4, X86::ecx);
1765 emitCall(i, Machine::cti_op_in);
1766 emitPutResult(instruction[i + 1].u.operand);
1767 i += 4;
1768 break;
1770 case op_push_new_scope: {
1771 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 2].u.operand]);
1772 emitPutArgConstant(reinterpret_cast<unsigned>(ident), 0);
1773 emitGetPutArg(instruction[i + 3].u.operand, 4, X86::ecx);
1774 emitCall(i, Machine::cti_op_push_new_scope);
1775 emitPutResult(instruction[i + 1].u.operand);
1776 i += 4;
1777 break;
1779 case op_catch: {
1780 emitGetCTIParam(CTI_ARGS_r, X86::edi); // edi := r
1781 emitGetCTIParam(CTI_ARGS_exec, X86::ecx);
1782 m_jit.movl_mr(OBJECT_OFFSET(ExecState, m_exception), X86::ecx, X86::eax);
1783 m_jit.movl_i32m(0, OBJECT_OFFSET(ExecState, m_exception), X86::ecx);
1784 emitPutResult(instruction[i + 1].u.operand);
1785 i += 2;
1786 break;
1788 case op_jmp_scopes: {
1789 unsigned count = instruction[i + 1].u.operand;
1790 emitPutArgConstant(count, 0);
1791 emitCall(i, Machine::cti_op_jmp_scopes);
1792 unsigned target = instruction[i + 2].u.operand;
1793 m_jmpTable.append(JmpTable(m_jit.emitUnlinkedJmp(), i + 2 + target));
1794 i += 3;
1795 break;
1797 case op_put_by_index: {
1798 emitGetPutArg(instruction[i + 1].u.operand, 0, X86::ecx);
1799 emitPutArgConstant(instruction[i + 2].u.operand, 4);
1800 emitGetPutArg(instruction[i + 3].u.operand, 8, X86::ecx);
1801 emitCall(i, Machine::cti_op_put_by_index);
1802 i += 4;
1803 break;
1805 case op_switch_imm: {
1806 unsigned tableIndex = instruction[i + 1].u.operand;
1807 unsigned defaultOffset = instruction[i + 2].u.operand;
1808 unsigned scrutinee = instruction[i + 3].u.operand;
1810 // create jump table for switch destinations, track this switch statement.
1811 SimpleJumpTable* jumpTable = &m_codeBlock->immediateSwitchJumpTables[tableIndex];
1812 m_switches.append(SwitchRecord(jumpTable, i, defaultOffset, SwitchRecord::Immediate));
1813 jumpTable->ctiOffsets.grow(jumpTable->branchOffsets.size());
1815 emitGetPutArg(scrutinee, 0, X86::ecx);
1816 emitPutArgConstant(tableIndex, 4);
1817 emitCall(i, Machine::cti_op_switch_imm);
1818 m_jit.jmp_r(X86::eax);
1819 i += 4;
1820 break;
1822 case op_switch_char: {
1823 unsigned tableIndex = instruction[i + 1].u.operand;
1824 unsigned defaultOffset = instruction[i + 2].u.operand;
1825 unsigned scrutinee = instruction[i + 3].u.operand;
1827 // create jump table for switch destinations, track this switch statement.
1828 SimpleJumpTable* jumpTable = &m_codeBlock->characterSwitchJumpTables[tableIndex];
1829 m_switches.append(SwitchRecord(jumpTable, i, defaultOffset, SwitchRecord::Character));
1830 jumpTable->ctiOffsets.grow(jumpTable->branchOffsets.size());
1832 emitGetPutArg(scrutinee, 0, X86::ecx);
1833 emitPutArgConstant(tableIndex, 4);
1834 emitCall(i, Machine::cti_op_switch_char);
1835 m_jit.jmp_r(X86::eax);
1836 i += 4;
1837 break;
1839 case op_switch_string: {
1840 unsigned tableIndex = instruction[i + 1].u.operand;
1841 unsigned defaultOffset = instruction[i + 2].u.operand;
1842 unsigned scrutinee = instruction[i + 3].u.operand;
1844 // create jump table for switch destinations, track this switch statement.
1845 StringJumpTable* jumpTable = &m_codeBlock->stringSwitchJumpTables[tableIndex];
1846 m_switches.append(SwitchRecord(jumpTable, i, defaultOffset));
1848 emitGetPutArg(scrutinee, 0, X86::ecx);
1849 emitPutArgConstant(tableIndex, 4);
1850 emitCall(i, Machine::cti_op_switch_string);
1851 m_jit.jmp_r(X86::eax);
1852 i += 4;
1853 break;
1855 case op_del_by_val: {
1856 emitGetPutArg(instruction[i + 2].u.operand, 0, X86::ecx);
1857 emitGetPutArg(instruction[i + 3].u.operand, 4, X86::ecx);
1858 emitCall(i, Machine::cti_op_del_by_val);
1859 emitPutResult(instruction[i + 1].u.operand);
1860 i += 4;
1861 break;
1863 case op_put_getter: {
1864 emitGetPutArg(instruction[i + 1].u.operand, 0, X86::ecx);
1865 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 2].u.operand]);
1866 emitPutArgConstant(reinterpret_cast<unsigned>(ident), 4);
1867 emitGetPutArg(instruction[i + 3].u.operand, 8, X86::ecx);
1868 emitCall(i, Machine::cti_op_put_getter);
1869 i += 4;
1870 break;
1872 case op_put_setter: {
1873 emitGetPutArg(instruction[i + 1].u.operand, 0, X86::ecx);
1874 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 2].u.operand]);
1875 emitPutArgConstant(reinterpret_cast<unsigned>(ident), 4);
1876 emitGetPutArg(instruction[i + 3].u.operand, 8, X86::ecx);
1877 emitCall(i, Machine::cti_op_put_setter);
1878 i += 4;
1879 break;
1881 case op_new_error: {
1882 JSValue* message = m_codeBlock->unexpectedConstants[instruction[i + 3].u.operand];
1883 emitPutArgConstant(instruction[i + 2].u.operand, 0);
1884 emitPutArgConstant(reinterpret_cast<unsigned>(message), 4);
1885 emitPutArgConstant(m_codeBlock->lineNumberForVPC(&instruction[i]), 8);
1886 emitCall(i, Machine::cti_op_new_error);
1887 emitPutResult(instruction[i + 1].u.operand);
1888 i += 4;
1889 break;
1891 case op_debug: {
1892 emitPutArgConstant(instruction[i + 1].u.operand, 0);
1893 emitPutArgConstant(instruction[i + 2].u.operand, 4);
1894 emitPutArgConstant(instruction[i + 3].u.operand, 8);
1895 emitCall(i, Machine::cti_op_debug);
1896 i += 4;
1897 break;
1899 case op_eq_null: {
1900 unsigned dst = instruction[i + 1].u.operand;
1901 unsigned src1 = instruction[i + 2].u.operand;
1903 emitGetArg(src1, X86::eax);
1904 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
1905 X86Assembler::JmpSrc isImmediate = m_jit.emitUnlinkedJnz();
1907 m_jit.movl_mr(OBJECT_OFFSET(JSCell, m_structureID), X86::eax, X86::ecx);
1908 m_jit.testl_i32m(MasqueradesAsUndefined, OBJECT_OFFSET(StructureID, m_typeInfo.m_flags), X86::ecx);
1909 m_jit.setnz_r(X86::eax);
1911 X86Assembler::JmpSrc wasNotImmediate = m_jit.emitUnlinkedJmp();
1913 m_jit.link(isImmediate, m_jit.label());
1915 m_jit.movl_i32r(~JSImmediate::ExtendedTagBitUndefined, X86::ecx);
1916 m_jit.andl_rr(X86::eax, X86::ecx);
1917 m_jit.cmpl_i32r(JSImmediate::FullTagTypeNull, X86::ecx);
1918 m_jit.sete_r(X86::eax);
1920 m_jit.link(wasNotImmediate, m_jit.label());
1922 m_jit.movzbl_rr(X86::eax, X86::eax);
1923 emitTagAsBoolImmediate(X86::eax);
1924 emitPutResult(dst);
1926 i += 3;
1927 break;
1929 case op_neq_null: {
1930 unsigned dst = instruction[i + 1].u.operand;
1931 unsigned src1 = instruction[i + 2].u.operand;
1933 emitGetArg(src1, X86::eax);
1934 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
1935 X86Assembler::JmpSrc isImmediate = m_jit.emitUnlinkedJnz();
1937 m_jit.movl_mr(OBJECT_OFFSET(JSCell, m_structureID), X86::eax, X86::ecx);
1938 m_jit.testl_i32m(MasqueradesAsUndefined, OBJECT_OFFSET(StructureID, m_typeInfo.m_flags), X86::ecx);
1939 m_jit.setz_r(X86::eax);
1941 X86Assembler::JmpSrc wasNotImmediate = m_jit.emitUnlinkedJmp();
1943 m_jit.link(isImmediate, m_jit.label());
1945 m_jit.movl_i32r(~JSImmediate::ExtendedTagBitUndefined, X86::ecx);
1946 m_jit.andl_rr(X86::eax, X86::ecx);
1947 m_jit.cmpl_i32r(JSImmediate::FullTagTypeNull, X86::ecx);
1948 m_jit.setne_r(X86::eax);
1950 m_jit.link(wasNotImmediate, m_jit.label());
1952 m_jit.movzbl_rr(X86::eax, X86::eax);
1953 emitTagAsBoolImmediate(X86::eax);
1954 emitPutResult(dst);
1956 i += 3;
1957 break;
1959 case op_init: {
1960 // Even though CTI doesn't use them, we initialize our constant
1961 // registers to zap stale pointers, to avoid unnecessarily prolonging
1962 // object lifetime and increasing GC pressure.
1963 size_t count = m_codeBlock->numVars + m_codeBlock->constantRegisters.size();
1964 for (size_t j = 0; j < count; ++j)
1965 emitInitRegister(j);
1967 i+= 1;
1968 break;
1970 case op_init_activation: {
1971 emitCall(i, Machine::cti_op_push_activation);
1973 // Even though CTI doesn't use them, we initialize our constant
1974 // registers to zap stale pointers, to avoid unnecessarily prolonging
1975 // object lifetime and increasing GC pressure.
1976 size_t count = m_codeBlock->numVars + m_codeBlock->constantRegisters.size();
1977 for (size_t j = 0; j < count; ++j)
1978 emitInitRegister(j);
1980 i+= 1;
1981 break;
1983 case op_get_array_length:
1984 case op_get_by_id_chain:
1985 case op_get_by_id_generic:
1986 case op_get_by_id_proto:
1987 case op_get_by_id_self:
1988 case op_get_string_length:
1989 case op_put_by_id_generic:
1990 case op_put_by_id_replace:
1991 case op_put_by_id_transition:
1992 ASSERT_NOT_REACHED();
1996 ASSERT(structureIDInstructionIndex == m_codeBlock->structureIDInstructions.size());
2000 void CTI::privateCompileLinkPass()
2002 unsigned jmpTableCount = m_jmpTable.size();
2003 for (unsigned i = 0; i < jmpTableCount; ++i)
2004 m_jit.link(m_jmpTable[i].from, m_labels[m_jmpTable[i].to]);
2005 m_jmpTable.clear();
2008 #define CTI_COMPILE_BINARY_OP_SLOW_CASE(name) \
2009 case name: { \
2010 m_jit.link(iter->from, m_jit.label()); \
2011 emitGetPutArg(instruction[i + 2].u.operand, 0, X86::ecx); \
2012 emitGetPutArg(instruction[i + 3].u.operand, 4, X86::ecx); \
2013 emitCall(i, Machine::cti_##name); \
2014 emitPutResult(instruction[i + 1].u.operand); \
2015 i += 4; \
2016 break; \
2019 void CTI::privateCompileSlowCases()
2021 unsigned structureIDInstructionIndex = 0;
2023 Instruction* instruction = m_codeBlock->instructions.begin();
2024 for (Vector<SlowCaseEntry>::iterator iter = m_slowCases.begin(); iter != m_slowCases.end(); ++iter) {
2025 unsigned i = iter->to;
2026 m_jit.emitRestoreArgumentReference();
2027 switch (m_machine->getOpcodeID(instruction[i].u.opcode)) {
2028 case op_add: {
2029 unsigned dst = instruction[i + 1].u.operand;
2030 unsigned src1 = instruction[i + 2].u.operand;
2031 unsigned src2 = instruction[i + 3].u.operand;
2032 if (JSValue* value = getConstantImmediateNumericArg(src1)) {
2033 X86Assembler::JmpSrc notImm = iter->from;
2034 m_jit.link((++iter)->from, m_jit.label());
2035 m_jit.subl_i32r(getDeTaggedConstantImmediate(value), X86::edx);
2036 m_jit.link(notImm, m_jit.label());
2037 emitGetPutArg(src1, 0, X86::ecx);
2038 emitPutArg(X86::edx, 4);
2039 emitCall(i, Machine::cti_op_add);
2040 emitPutResult(dst);
2041 } else if (JSValue* value = getConstantImmediateNumericArg(src2)) {
2042 X86Assembler::JmpSrc notImm = iter->from;
2043 m_jit.link((++iter)->from, m_jit.label());
2044 m_jit.subl_i32r(getDeTaggedConstantImmediate(value), X86::eax);
2045 m_jit.link(notImm, m_jit.label());
2046 emitPutArg(X86::eax, 0);
2047 emitGetPutArg(src2, 4, X86::ecx);
2048 emitCall(i, Machine::cti_op_add);
2049 emitPutResult(dst);
2050 } else {
2051 OperandTypes types = OperandTypes::fromInt(instruction[i + 4].u.operand);
2052 if (types.first().mightBeNumber() && types.second().mightBeNumber())
2053 compileBinaryArithOpSlowCase(op_add, iter, dst, src1, src2, types, i);
2054 else
2055 ASSERT_NOT_REACHED();
2058 i += 5;
2059 break;
2061 case op_get_by_val: {
2062 // The slow case that handles accesses to arrays (below) may jump back up to here.
2063 X86Assembler::JmpDst beginGetByValSlow = m_jit.label();
2065 X86Assembler::JmpSrc notImm = iter->from;
2066 m_jit.link((++iter)->from, m_jit.label());
2067 m_jit.link((++iter)->from, m_jit.label());
2068 emitFastArithIntToImmNoCheck(X86::edx);
2069 m_jit.link(notImm, m_jit.label());
2070 emitPutArg(X86::eax, 0);
2071 emitPutArg(X86::edx, 4);
2072 emitCall(i, Machine::cti_op_get_by_val);
2073 emitPutResult(instruction[i + 1].u.operand);
2074 m_jit.link(m_jit.emitUnlinkedJmp(), m_labels[i + 4]);
2076 // This is slow case that handles accesses to arrays above the fast cut-off.
2077 // First, check if this is an access to the vector
2078 m_jit.link((++iter)->from, m_jit.label());
2079 m_jit.cmpl_rm(X86::edx, OBJECT_OFFSET(ArrayStorage, m_vectorLength), X86::ecx);
2080 m_jit.link(m_jit.emitUnlinkedJbe(), beginGetByValSlow);
2082 // okay, missed the fast region, but it is still in the vector. Get the value.
2083 m_jit.movl_mr(OBJECT_OFFSET(ArrayStorage, m_vector[0]), X86::ecx, X86::edx, sizeof(JSValue*), X86::ecx);
2084 // Check whether the value loaded is zero; if so we need to return undefined.
2085 m_jit.testl_rr(X86::ecx, X86::ecx);
2086 m_jit.link(m_jit.emitUnlinkedJe(), beginGetByValSlow);
2087 emitPutResult(instruction[i + 1].u.operand, X86::ecx);
2089 i += 4;
2090 break;
2092 case op_sub: {
2093 compileBinaryArithOpSlowCase(op_sub, iter, instruction[i + 1].u.operand, instruction[i + 2].u.operand, instruction[i + 3].u.operand, OperandTypes::fromInt(instruction[i + 4].u.operand), i);
2094 i += 5;
2095 break;
2097 case op_rshift: {
2098 m_jit.link(iter->from, m_jit.label());
2099 m_jit.link((++iter)->from, m_jit.label());
2100 emitPutArg(X86::eax, 0);
2101 emitPutArg(X86::ecx, 4);
2102 emitCall(i, Machine::cti_op_rshift);
2103 emitPutResult(instruction[i + 1].u.operand);
2104 i += 4;
2105 break;
2107 case op_lshift: {
2108 X86Assembler::JmpSrc notImm1 = iter->from;
2109 X86Assembler::JmpSrc notImm2 = (++iter)->from;
2110 m_jit.link((++iter)->from, m_jit.label());
2111 emitGetArg(instruction[i + 2].u.operand, X86::eax);
2112 emitGetArg(instruction[i + 3].u.operand, X86::ecx);
2113 m_jit.link(notImm1, m_jit.label());
2114 m_jit.link(notImm2, m_jit.label());
2115 emitPutArg(X86::eax, 0);
2116 emitPutArg(X86::ecx, 4);
2117 emitCall(i, Machine::cti_op_lshift);
2118 emitPutResult(instruction[i + 1].u.operand);
2119 i += 4;
2120 break;
2122 case op_loop_if_less: {
2123 emitSlowScriptCheck(i);
2125 unsigned target = instruction[i + 3].u.operand;
2126 JSValue* src2imm = getConstantImmediateNumericArg(instruction[i + 2].u.operand);
2127 if (src2imm) {
2128 m_jit.link(iter->from, m_jit.label());
2129 emitPutArg(X86::edx, 0);
2130 emitGetPutArg(instruction[i + 2].u.operand, 4, X86::ecx);
2131 emitCall(i, Machine::cti_op_loop_if_less);
2132 m_jit.testl_rr(X86::eax, X86::eax);
2133 m_jit.link(m_jit.emitUnlinkedJne(), m_labels[i + 3 + target]);
2134 } else {
2135 m_jit.link(iter->from, m_jit.label());
2136 m_jit.link((++iter)->from, m_jit.label());
2137 emitPutArg(X86::eax, 0);
2138 emitPutArg(X86::edx, 4);
2139 emitCall(i, Machine::cti_op_loop_if_less);
2140 m_jit.testl_rr(X86::eax, X86::eax);
2141 m_jit.link(m_jit.emitUnlinkedJne(), m_labels[i + 3 + target]);
2143 i += 4;
2144 break;
2146 case op_put_by_id: {
2147 m_jit.link(iter->from, m_jit.label());
2148 m_jit.link((++iter)->from, m_jit.label());
2150 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 2].u.operand]);
2151 emitPutArgConstant(reinterpret_cast<unsigned>(ident), 4);
2152 emitPutArg(X86::eax, 0);
2153 emitPutArg(X86::edx, 8);
2154 X86Assembler::JmpSrc call = emitCall(i, Machine::cti_op_put_by_id);
2156 // Track the location of the call; this will be used to recover repatch information.
2157 ASSERT(m_codeBlock->structureIDInstructions[structureIDInstructionIndex].opcodeIndex == i);
2158 m_structureStubCompilationInfo[structureIDInstructionIndex].callReturnLocation = call;
2159 ++structureIDInstructionIndex;
2161 i += 8;
2162 break;
2164 case op_get_by_id: {
2165 // As for the hot path of get_by_id, above, we ensure that we can use an architecture specific offset
2166 // so that we only need track one pointer into the slow case code - we track a pointer to the location
2167 // of the call (which we can use to look up the repatch information), but should a array-length or
2168 // prototype access trampoline fail we want to bail out back to here. To do so we can subtract back
2169 // the distance from the call to the head of the slow case.
2171 m_jit.link(iter->from, m_jit.label());
2172 m_jit.link((++iter)->from, m_jit.label());
2174 #ifndef NDEBUG
2175 X86Assembler::JmpDst coldPathBegin = m_jit.label();
2176 #endif
2177 emitPutArg(X86::eax, 0);
2178 Identifier* ident = &(m_codeBlock->identifiers[instruction[i + 3].u.operand]);
2179 emitPutArgConstant(reinterpret_cast<unsigned>(ident), 4);
2180 X86Assembler::JmpSrc call = emitCall(i, Machine::cti_op_get_by_id);
2181 ASSERT(X86Assembler::getDifferenceBetweenLabels(coldPathBegin, call) == repatchOffsetGetByIdSlowCaseCall);
2182 emitPutResult(instruction[i + 1].u.operand);
2184 // Track the location of the call; this will be used to recover repatch information.
2185 ASSERT(m_codeBlock->structureIDInstructions[structureIDInstructionIndex].opcodeIndex == i);
2186 m_structureStubCompilationInfo[structureIDInstructionIndex].callReturnLocation = call;
2187 ++structureIDInstructionIndex;
2189 i += 8;
2190 break;
2192 case op_resolve_global: {
2193 ++structureIDInstructionIndex;
2194 i += 6;
2195 break;
2197 case op_loop_if_lesseq: {
2198 emitSlowScriptCheck(i);
2200 unsigned target = instruction[i + 3].u.operand;
2201 JSValue* src2imm = getConstantImmediateNumericArg(instruction[i + 2].u.operand);
2202 if (src2imm) {
2203 m_jit.link(iter->from, m_jit.label());
2204 emitPutArg(X86::edx, 0);
2205 emitGetPutArg(instruction[i + 2].u.operand, 4, X86::ecx);
2206 emitCall(i, Machine::cti_op_loop_if_lesseq);
2207 m_jit.testl_rr(X86::eax, X86::eax);
2208 m_jit.link(m_jit.emitUnlinkedJne(), m_labels[i + 3 + target]);
2209 } else {
2210 m_jit.link(iter->from, m_jit.label());
2211 m_jit.link((++iter)->from, m_jit.label());
2212 emitPutArg(X86::eax, 0);
2213 emitPutArg(X86::edx, 4);
2214 emitCall(i, Machine::cti_op_loop_if_lesseq);
2215 m_jit.testl_rr(X86::eax, X86::eax);
2216 m_jit.link(m_jit.emitUnlinkedJne(), m_labels[i + 3 + target]);
2218 i += 4;
2219 break;
2221 case op_pre_inc: {
2222 unsigned srcDst = instruction[i + 1].u.operand;
2223 X86Assembler::JmpSrc notImm = iter->from;
2224 m_jit.link((++iter)->from, m_jit.label());
2225 m_jit.subl_i8r(getDeTaggedConstantImmediate(JSImmediate::oneImmediate()), X86::eax);
2226 m_jit.link(notImm, m_jit.label());
2227 emitPutArg(X86::eax, 0);
2228 emitCall(i, Machine::cti_op_pre_inc);
2229 emitPutResult(srcDst);
2230 i += 2;
2231 break;
2233 case op_put_by_val: {
2234 // Normal slow cases - either is not an immediate imm, or is an array.
2235 X86Assembler::JmpSrc notImm = iter->from;
2236 m_jit.link((++iter)->from, m_jit.label());
2237 m_jit.link((++iter)->from, m_jit.label());
2238 emitFastArithIntToImmNoCheck(X86::edx);
2239 m_jit.link(notImm, m_jit.label());
2240 emitGetArg(instruction[i + 3].u.operand, X86::ecx);
2241 emitPutArg(X86::eax, 0);
2242 emitPutArg(X86::edx, 4);
2243 emitPutArg(X86::ecx, 8);
2244 emitCall(i, Machine::cti_op_put_by_val);
2245 m_jit.link(m_jit.emitUnlinkedJmp(), m_labels[i + 4]);
2247 // slow cases for immediate int accesses to arrays
2248 m_jit.link((++iter)->from, m_jit.label());
2249 m_jit.link((++iter)->from, m_jit.label());
2250 emitGetArg(instruction[i + 3].u.operand, X86::ecx);
2251 emitPutArg(X86::eax, 0);
2252 emitPutArg(X86::edx, 4);
2253 emitPutArg(X86::ecx, 8);
2254 emitCall(i, Machine::cti_op_put_by_val_array);
2256 i += 4;
2257 break;
2259 case op_loop_if_true: {
2260 emitSlowScriptCheck(i);
2262 m_jit.link(iter->from, m_jit.label());
2263 emitPutArg(X86::eax, 0);
2264 emitCall(i, Machine::cti_op_jtrue);
2265 m_jit.testl_rr(X86::eax, X86::eax);
2266 unsigned target = instruction[i + 2].u.operand;
2267 m_jit.link(m_jit.emitUnlinkedJne(), m_labels[i + 2 + target]);
2268 i += 3;
2269 break;
2271 case op_pre_dec: {
2272 unsigned srcDst = instruction[i + 1].u.operand;
2273 X86Assembler::JmpSrc notImm = iter->from;
2274 m_jit.link((++iter)->from, m_jit.label());
2275 m_jit.addl_i8r(getDeTaggedConstantImmediate(JSImmediate::oneImmediate()), X86::eax);
2276 m_jit.link(notImm, m_jit.label());
2277 emitPutArg(X86::eax, 0);
2278 emitCall(i, Machine::cti_op_pre_dec);
2279 emitPutResult(srcDst);
2280 i += 2;
2281 break;
2283 case op_jnless: {
2284 unsigned target = instruction[i + 3].u.operand;
2285 JSValue* src2imm = getConstantImmediateNumericArg(instruction[i + 2].u.operand);
2286 if (src2imm) {
2287 m_jit.link(iter->from, m_jit.label());
2288 emitPutArg(X86::edx, 0);
2289 emitGetPutArg(instruction[i + 2].u.operand, 4, X86::ecx);
2290 emitCall(i, Machine::cti_op_jless);
2291 m_jit.testl_rr(X86::eax, X86::eax);
2292 m_jit.link(m_jit.emitUnlinkedJe(), m_labels[i + 3 + target]);
2293 } else {
2294 m_jit.link(iter->from, m_jit.label());
2295 m_jit.link((++iter)->from, m_jit.label());
2296 emitPutArg(X86::eax, 0);
2297 emitPutArg(X86::edx, 4);
2298 emitCall(i, Machine::cti_op_jless);
2299 m_jit.testl_rr(X86::eax, X86::eax);
2300 m_jit.link(m_jit.emitUnlinkedJe(), m_labels[i + 3 + target]);
2302 i += 4;
2303 break;
2305 case op_not: {
2306 m_jit.link(iter->from, m_jit.label());
2307 m_jit.xorl_i8r(JSImmediate::FullTagTypeBool, X86::eax);
2308 emitPutArg(X86::eax, 0);
2309 emitCall(i, Machine::cti_op_not);
2310 emitPutResult(instruction[i + 1].u.operand);
2311 i += 3;
2312 break;
2314 case op_jfalse: {
2315 m_jit.link(iter->from, m_jit.label());
2316 emitPutArg(X86::eax, 0);
2317 emitCall(i, Machine::cti_op_jtrue);
2318 m_jit.testl_rr(X86::eax, X86::eax);
2319 unsigned target = instruction[i + 2].u.operand;
2320 m_jit.link(m_jit.emitUnlinkedJe(), m_labels[i + 2 + target]); // inverted!
2321 i += 3;
2322 break;
2324 case op_post_inc: {
2325 unsigned srcDst = instruction[i + 2].u.operand;
2326 m_jit.link(iter->from, m_jit.label());
2327 m_jit.link((++iter)->from, m_jit.label());
2328 emitPutArg(X86::eax, 0);
2329 emitCall(i, Machine::cti_op_post_inc);
2330 emitPutResult(instruction[i + 1].u.operand);
2331 emitGetCTIParam(CTI_ARGS_2ndResult, X86::eax);
2332 emitPutResult(srcDst);
2333 i += 3;
2334 break;
2336 case op_bitnot: {
2337 m_jit.link(iter->from, m_jit.label());
2338 emitPutArg(X86::eax, 0);
2339 emitCall(i, Machine::cti_op_bitnot);
2340 emitPutResult(instruction[i + 1].u.operand);
2341 i += 3;
2342 break;
2344 case op_bitand: {
2345 unsigned src1 = instruction[i + 2].u.operand;
2346 unsigned src2 = instruction[i + 3].u.operand;
2347 unsigned dst = instruction[i + 1].u.operand;
2348 if (getConstantImmediateNumericArg(src1)) {
2349 m_jit.link(iter->from, m_jit.label());
2350 emitGetPutArg(src1, 0, X86::ecx);
2351 emitPutArg(X86::eax, 4);
2352 emitCall(i, Machine::cti_op_bitand);
2353 emitPutResult(dst);
2354 } else if (getConstantImmediateNumericArg(src2)) {
2355 m_jit.link(iter->from, m_jit.label());
2356 emitPutArg(X86::eax, 0);
2357 emitGetPutArg(src2, 4, X86::ecx);
2358 emitCall(i, Machine::cti_op_bitand);
2359 emitPutResult(dst);
2360 } else {
2361 m_jit.link(iter->from, m_jit.label());
2362 emitGetPutArg(src1, 0, X86::ecx);
2363 emitPutArg(X86::edx, 4);
2364 emitCall(i, Machine::cti_op_bitand);
2365 emitPutResult(dst);
2367 i += 5;
2368 break;
2370 case op_jtrue: {
2371 m_jit.link(iter->from, m_jit.label());
2372 emitPutArg(X86::eax, 0);
2373 emitCall(i, Machine::cti_op_jtrue);
2374 m_jit.testl_rr(X86::eax, X86::eax);
2375 unsigned target = instruction[i + 2].u.operand;
2376 m_jit.link(m_jit.emitUnlinkedJne(), m_labels[i + 2 + target]);
2377 i += 3;
2378 break;
2380 case op_post_dec: {
2381 unsigned srcDst = instruction[i + 2].u.operand;
2382 m_jit.link(iter->from, m_jit.label());
2383 m_jit.link((++iter)->from, m_jit.label());
2384 emitPutArg(X86::eax, 0);
2385 emitCall(i, Machine::cti_op_post_dec);
2386 emitPutResult(instruction[i + 1].u.operand);
2387 emitGetCTIParam(CTI_ARGS_2ndResult, X86::eax);
2388 emitPutResult(srcDst);
2389 i += 3;
2390 break;
2392 case op_bitxor: {
2393 m_jit.link(iter->from, m_jit.label());
2394 emitPutArg(X86::eax, 0);
2395 emitPutArg(X86::edx, 4);
2396 emitCall(i, Machine::cti_op_bitxor);
2397 emitPutResult(instruction[i + 1].u.operand);
2398 i += 5;
2399 break;
2401 case op_bitor: {
2402 m_jit.link(iter->from, m_jit.label());
2403 emitPutArg(X86::eax, 0);
2404 emitPutArg(X86::edx, 4);
2405 emitCall(i, Machine::cti_op_bitor);
2406 emitPutResult(instruction[i + 1].u.operand);
2407 i += 5;
2408 break;
2410 case op_eq: {
2411 m_jit.link(iter->from, m_jit.label());
2412 emitPutArg(X86::eax, 0);
2413 emitPutArg(X86::edx, 4);
2414 emitCall(i, Machine::cti_op_eq);
2415 emitPutResult(instruction[i + 1].u.operand);
2416 i += 4;
2417 break;
2419 case op_neq: {
2420 m_jit.link(iter->from, m_jit.label());
2421 emitPutArg(X86::eax, 0);
2422 emitPutArg(X86::edx, 4);
2423 emitCall(i, Machine::cti_op_neq);
2424 emitPutResult(instruction[i + 1].u.operand);
2425 i += 4;
2426 break;
2428 CTI_COMPILE_BINARY_OP_SLOW_CASE(op_stricteq);
2429 CTI_COMPILE_BINARY_OP_SLOW_CASE(op_nstricteq);
2430 case op_instanceof: {
2431 m_jit.link(iter->from, m_jit.label());
2432 emitGetPutArg(instruction[i + 2].u.operand, 0, X86::ecx);
2433 emitGetPutArg(instruction[i + 3].u.operand, 4, X86::ecx);
2434 emitGetPutArg(instruction[i + 4].u.operand, 8, X86::ecx);
2435 emitCall(i, Machine::cti_op_instanceof);
2436 emitPutResult(instruction[i + 1].u.operand);
2437 i += 5;
2438 break;
2440 case op_mod: {
2441 X86Assembler::JmpSrc notImm1 = iter->from;
2442 X86Assembler::JmpSrc notImm2 = (++iter)->from;
2443 m_jit.link((++iter)->from, m_jit.label());
2444 emitFastArithReTagImmediate(X86::eax);
2445 emitFastArithReTagImmediate(X86::ecx);
2446 m_jit.link(notImm1, m_jit.label());
2447 m_jit.link(notImm2, m_jit.label());
2448 emitPutArg(X86::eax, 0);
2449 emitPutArg(X86::ecx, 4);
2450 emitCall(i, Machine::cti_op_mod);
2451 emitPutResult(instruction[i + 1].u.operand);
2452 i += 4;
2453 break;
2455 case op_mul: {
2456 int dst = instruction[i + 1].u.operand;
2457 int src1 = instruction[i + 2].u.operand;
2458 int src2 = instruction[i + 3].u.operand;
2459 if (getConstantImmediateNumericArg(src1) || getConstantImmediateNumericArg(src2)) {
2460 m_jit.link(iter->from, m_jit.label());
2461 emitGetPutArg(src1, 0, X86::ecx);
2462 emitGetPutArg(src2, 4, X86::ecx);
2463 emitCall(i, Machine::cti_op_mul);
2464 emitPutResult(dst);
2465 } else
2466 compileBinaryArithOpSlowCase(op_mul, iter, dst, src1, src2, OperandTypes::fromInt(instruction[i + 4].u.operand), i);
2467 i += 5;
2468 break;
2471 case op_call:
2472 case op_call_eval:
2473 case op_construct: {
2474 m_jit.link(iter->from, m_jit.label());
2475 m_jit.emitRestoreArgumentReference();
2477 // We jump to this slow case if the ctiCode for the codeBlock has not yet been generated; compile it now.
2478 emitCall(i, Machine::cti_vm_compile);
2479 emitCall(i, X86::eax);
2481 // Instead of checking for 0 we could initialize the CodeBlock::ctiCode to point to a trampoline that would trigger the translation.
2483 // Put the return value in dst. In the interpreter, op_ret does this.
2484 emitPutResult(instruction[i + 1].u.operand);
2485 i += 7;
2486 break;
2489 default:
2490 ASSERT_NOT_REACHED();
2491 break;
2494 m_jit.link(m_jit.emitUnlinkedJmp(), m_labels[i]);
2497 ASSERT(structureIDInstructionIndex == m_codeBlock->structureIDInstructions.size());
2500 void CTI::privateCompile()
2502 // Could use a popl_m, but would need to offset the following instruction if so.
2503 m_jit.popl_r(X86::ecx);
2504 emitGetCTIParam(CTI_ARGS_r, X86::edi); // edi := r
2505 emitPutToCallFrameHeader(X86::ecx, RegisterFile::ReturnPC);
2507 privateCompileMainPass();
2508 privateCompileLinkPass();
2509 privateCompileSlowCases();
2511 ASSERT(m_jmpTable.isEmpty());
2513 void* code = m_jit.copy();
2514 ASSERT(code);
2516 // Translate vPC offsets into addresses in JIT generated code, for switch tables.
2517 for (unsigned i = 0; i < m_switches.size(); ++i) {
2518 SwitchRecord record = m_switches[i];
2519 unsigned opcodeIndex = record.m_opcodeIndex;
2521 if (record.m_type != SwitchRecord::String) {
2522 ASSERT(record.m_type == SwitchRecord::Immediate || record.m_type == SwitchRecord::Character);
2523 ASSERT(record.m_jumpTable.m_simpleJumpTable->branchOffsets.size() == record.m_jumpTable.m_simpleJumpTable->ctiOffsets.size());
2525 record.m_jumpTable.m_simpleJumpTable->ctiDefault = m_jit.getRelocatedAddress(code, m_labels[opcodeIndex + 3 + record.m_defaultOffset]);
2527 for (unsigned j = 0; j < record.m_jumpTable.m_simpleJumpTable->branchOffsets.size(); ++j) {
2528 unsigned offset = record.m_jumpTable.m_simpleJumpTable->branchOffsets[j];
2529 record.m_jumpTable.m_simpleJumpTable->ctiOffsets[j] = offset ? m_jit.getRelocatedAddress(code, m_labels[opcodeIndex + 3 + offset]) : record.m_jumpTable.m_simpleJumpTable->ctiDefault;
2531 } else {
2532 ASSERT(record.m_type == SwitchRecord::String);
2534 record.m_jumpTable.m_stringJumpTable->ctiDefault = m_jit.getRelocatedAddress(code, m_labels[opcodeIndex + 3 + record.m_defaultOffset]);
2536 StringJumpTable::StringOffsetTable::iterator end = record.m_jumpTable.m_stringJumpTable->offsetTable.end();
2537 for (StringJumpTable::StringOffsetTable::iterator it = record.m_jumpTable.m_stringJumpTable->offsetTable.begin(); it != end; ++it) {
2538 unsigned offset = it->second.branchOffset;
2539 it->second.ctiOffset = offset ? m_jit.getRelocatedAddress(code, m_labels[opcodeIndex + 3 + offset]) : record.m_jumpTable.m_stringJumpTable->ctiDefault;
2544 for (Vector<HandlerInfo>::iterator iter = m_codeBlock->exceptionHandlers.begin(); iter != m_codeBlock->exceptionHandlers.end(); ++iter)
2545 iter->nativeCode = m_jit.getRelocatedAddress(code, m_labels[iter->target]);
2547 for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
2548 if (iter->to)
2549 X86Assembler::link(code, iter->from, iter->to);
2550 m_codeBlock->ctiReturnAddressVPCMap.add(m_jit.getRelocatedAddress(code, iter->from), iter->opcodeIndex);
2553 // Link absolute addresses for jsr
2554 for (Vector<JSRInfo>::iterator iter = m_jsrSites.begin(); iter != m_jsrSites.end(); ++iter)
2555 X86Assembler::linkAbsoluteAddress(code, iter->addrPosition, iter->target);
2557 for (unsigned i = 0; i < m_codeBlock->structureIDInstructions.size(); ++i) {
2558 StructureStubInfo& info = m_codeBlock->structureIDInstructions[i];
2559 info.callReturnLocation = X86Assembler::getRelocatedAddress(code, m_structureStubCompilationInfo[i].callReturnLocation);
2560 info.hotPathBegin = X86Assembler::getRelocatedAddress(code, m_structureStubCompilationInfo[i].hotPathBegin);
2563 m_codeBlock->ctiCode = code;
2566 void CTI::privateCompileGetByIdSelf(StructureID* structureID, size_t cachedOffset, void* returnAddress)
2568 // Check eax is an object of the right StructureID.
2569 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
2570 X86Assembler::JmpSrc failureCases1 = m_jit.emitUnlinkedJne();
2571 m_jit.cmpl_i32m(reinterpret_cast<uint32_t>(structureID), OBJECT_OFFSET(JSCell, m_structureID), X86::eax);
2572 X86Assembler::JmpSrc failureCases2 = m_jit.emitUnlinkedJne();
2574 // Checks out okay! - getDirectOffset
2575 m_jit.movl_mr(OBJECT_OFFSET(JSObject, m_propertyStorage), X86::eax, X86::eax);
2576 m_jit.movl_mr(cachedOffset * sizeof(JSValue*), X86::eax, X86::eax);
2577 m_jit.ret();
2579 void* code = m_jit.copy();
2580 ASSERT(code);
2582 X86Assembler::link(code, failureCases1, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
2583 X86Assembler::link(code, failureCases2, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
2585 m_codeBlock->getStubInfo(returnAddress).stubRoutine = code;
2587 ctiRepatchCallByReturnAddress(returnAddress, code);
2590 void CTI::privateCompileGetByIdProto(StructureID* structureID, StructureID* prototypeStructureID, size_t cachedOffset, void* returnAddress)
2592 #if USE(CTI_REPATCH_PIC)
2593 StructureStubInfo& info = m_codeBlock->getStubInfo(returnAddress);
2595 // We don't want to repatch more than once - in future go to cti_op_put_by_id_generic.
2596 ctiRepatchCallByReturnAddress(returnAddress, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
2598 // The prototype object definitely exists (if this stub exists the CodeBlock is referencing a StructureID that is
2599 // referencing the prototype object - let's speculatively load it's table nice and early!)
2600 JSObject* protoObject = static_cast<JSObject*>(structureID->prototypeForLookup(m_exec));
2601 PropertyStorage* protoPropertyStorage = &protoObject->m_propertyStorage;
2602 m_jit.movl_mr(static_cast<void*>(protoPropertyStorage), X86::edx);
2604 // check eax is an object of the right StructureID.
2605 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
2606 X86Assembler::JmpSrc failureCases1 = m_jit.emitUnlinkedJne();
2607 m_jit.cmpl_i32m(reinterpret_cast<uint32_t>(structureID), OBJECT_OFFSET(JSCell, m_structureID), X86::eax);
2608 X86Assembler::JmpSrc failureCases2 = m_jit.emitUnlinkedJne();
2610 // Check the prototype object's StructureID had not changed.
2611 StructureID** protoStructureIDAddress = &(protoObject->m_structureID);
2612 m_jit.cmpl_i32m(reinterpret_cast<uint32_t>(prototypeStructureID), static_cast<void*>(protoStructureIDAddress));
2613 X86Assembler::JmpSrc failureCases3 = m_jit.emitUnlinkedJne();
2615 // Checks out okay! - getDirectOffset
2616 m_jit.movl_mr(cachedOffset * sizeof(JSValue*), X86::edx, X86::ecx);
2618 X86Assembler::JmpSrc success = m_jit.emitUnlinkedJmp();
2620 void* code = m_jit.copy();
2621 ASSERT(code);
2623 // Use the repatch information to link the failure cases back to the original slow case routine.
2624 void* slowCaseBegin = reinterpret_cast<char*>(info.callReturnLocation) - repatchOffsetGetByIdSlowCaseCall;
2625 X86Assembler::link(code, failureCases1, slowCaseBegin);
2626 X86Assembler::link(code, failureCases2, slowCaseBegin);
2627 X86Assembler::link(code, failureCases3, slowCaseBegin);
2629 // On success return back to the hot patch code, at a point it will perform the store to dest for us.
2630 intptr_t successDest = (intptr_t)(info.hotPathBegin) + repatchOffsetGetByIdPropertyMapOffset;
2631 X86Assembler::link(code, success, reinterpret_cast<void*>(successDest));
2633 // Track the stub we have created so that it will be deleted later.
2634 m_codeBlock->getStubInfo(returnAddress).stubRoutine = code;
2636 // Finally repatch the jump to sow case back in the hot path to jump here instead.
2637 // FIXME: should revert this repatching, on failure.
2638 intptr_t jmpLocation = reinterpret_cast<intptr_t>(info.hotPathBegin) + repatchOffsetGetByIdBranchToSlowCase;
2639 X86Assembler::repatchBranchOffset(jmpLocation, code);
2640 #else
2641 // The prototype object definitely exists (if this stub exists the CodeBlock is referencing a StructureID that is
2642 // referencing the prototype object - let's speculatively load it's table nice and early!)
2643 JSObject* protoObject = static_cast<JSObject*>(structureID->prototypeForLookup(m_exec));
2644 PropertyStorage* protoPropertyStorage = &protoObject->m_propertyStorage;
2645 m_jit.movl_mr(static_cast<void*>(protoPropertyStorage), X86::edx);
2647 // check eax is an object of the right StructureID.
2648 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
2649 X86Assembler::JmpSrc failureCases1 = m_jit.emitUnlinkedJne();
2650 m_jit.cmpl_i32m(reinterpret_cast<uint32_t>(structureID), OBJECT_OFFSET(JSCell, m_structureID), X86::eax);
2651 X86Assembler::JmpSrc failureCases2 = m_jit.emitUnlinkedJne();
2653 // Check the prototype object's StructureID had not changed.
2654 StructureID** protoStructureIDAddress = &(protoObject->m_structureID);
2655 m_jit.cmpl_i32m(reinterpret_cast<uint32_t>(prototypeStructureID), static_cast<void*>(protoStructureIDAddress));
2656 X86Assembler::JmpSrc failureCases3 = m_jit.emitUnlinkedJne();
2658 // Checks out okay! - getDirectOffset
2659 m_jit.movl_mr(cachedOffset * sizeof(JSValue*), X86::edx, X86::eax);
2661 m_jit.ret();
2663 void* code = m_jit.copy();
2664 ASSERT(code);
2666 X86Assembler::link(code, failureCases1, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
2667 X86Assembler::link(code, failureCases2, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
2668 X86Assembler::link(code, failureCases3, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
2670 m_codeBlock->getStubInfo(returnAddress).stubRoutine = code;
2672 ctiRepatchCallByReturnAddress(returnAddress, code);
2673 #endif
2676 void CTI::privateCompileGetByIdChain(StructureID* structureID, StructureIDChain* chain, size_t count, size_t cachedOffset, void* returnAddress)
2678 ASSERT(count);
2680 Vector<X86Assembler::JmpSrc> bucketsOfFail;
2682 // Check eax is an object of the right StructureID.
2683 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
2684 bucketsOfFail.append(m_jit.emitUnlinkedJne());
2685 m_jit.cmpl_i32m(reinterpret_cast<uint32_t>(structureID), OBJECT_OFFSET(JSCell, m_structureID), X86::eax);
2686 bucketsOfFail.append(m_jit.emitUnlinkedJne());
2688 StructureID* currStructureID = structureID;
2689 RefPtr<StructureID>* chainEntries = chain->head();
2690 JSObject* protoObject = 0;
2691 for (unsigned i = 0; i<count; ++i) {
2692 protoObject = static_cast<JSObject*>(currStructureID->prototypeForLookup(m_exec));
2693 currStructureID = chainEntries[i].get();
2695 // Check the prototype object's StructureID had not changed.
2696 StructureID** protoStructureIDAddress = &(protoObject->m_structureID);
2697 m_jit.cmpl_i32m(reinterpret_cast<uint32_t>(currStructureID), static_cast<void*>(protoStructureIDAddress));
2698 bucketsOfFail.append(m_jit.emitUnlinkedJne());
2700 ASSERT(protoObject);
2702 PropertyStorage* protoPropertyStorage = &protoObject->m_propertyStorage;
2703 m_jit.movl_mr(static_cast<void*>(protoPropertyStorage), X86::edx);
2704 m_jit.movl_mr(cachedOffset * sizeof(JSValue*), X86::edx, X86::eax);
2705 m_jit.ret();
2707 bucketsOfFail.append(m_jit.emitUnlinkedJmp());
2709 void* code = m_jit.copy();
2710 ASSERT(code);
2712 for (unsigned i = 0; i < bucketsOfFail.size(); ++i)
2713 X86Assembler::link(code, bucketsOfFail[i], reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
2715 m_codeBlock->getStubInfo(returnAddress).stubRoutine = code;
2717 ctiRepatchCallByReturnAddress(returnAddress, code);
2720 void CTI::privateCompilePutByIdReplace(StructureID* structureID, size_t cachedOffset, void* returnAddress)
2722 // check eax is an object of the right StructureID.
2723 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
2724 X86Assembler::JmpSrc failureCases1 = m_jit.emitUnlinkedJne();
2725 m_jit.cmpl_i32m(reinterpret_cast<uint32_t>(structureID), OBJECT_OFFSET(JSCell, m_structureID), X86::eax);
2726 X86Assembler::JmpSrc failureCases2 = m_jit.emitUnlinkedJne();
2728 // checks out okay! - putDirectOffset
2729 m_jit.movl_mr(OBJECT_OFFSET(JSObject, m_propertyStorage), X86::eax, X86::eax);
2730 m_jit.movl_rm(X86::edx, cachedOffset * sizeof(JSValue*), X86::eax);
2731 m_jit.ret();
2733 void* code = m_jit.copy();
2734 ASSERT(code);
2736 X86Assembler::link(code, failureCases1, reinterpret_cast<void*>(Machine::cti_op_put_by_id_fail));
2737 X86Assembler::link(code, failureCases2, reinterpret_cast<void*>(Machine::cti_op_put_by_id_fail));
2739 m_codeBlock->getStubInfo(returnAddress).stubRoutine = code;
2741 ctiRepatchCallByReturnAddress(returnAddress, code);
2744 extern "C" {
2746 static JSValue* SFX_CALL transitionObject(StructureID* newStructureID, size_t cachedOffset, JSObject* baseObject, JSValue* value)
2748 StructureID* oldStructureID = newStructureID->previousID();
2750 baseObject->transitionTo(newStructureID);
2752 if (oldStructureID->propertyMap().storageSize() == JSObject::inlineStorageCapacity)
2753 baseObject->allocatePropertyStorage(oldStructureID->propertyMap().storageSize(), oldStructureID->propertyMap().size());
2755 baseObject->putDirectOffset(cachedOffset, value);
2756 return baseObject;
2761 static inline bool transitionWillNeedStorageRealloc(StructureID* oldStructureID, StructureID* newStructureID)
2763 if (oldStructureID->propertyMap().storageSize() == JSObject::inlineStorageCapacity)
2764 return true;
2766 if (oldStructureID->propertyMap().storageSize() < JSObject::inlineStorageCapacity)
2767 return false;
2769 if (oldStructureID->propertyMap().size() != newStructureID->propertyMap().size())
2770 return true;
2772 return false;
2775 void CTI::privateCompilePutByIdTransition(StructureID* oldStructureID, StructureID* newStructureID, size_t cachedOffset, StructureIDChain* sIDC, void* returnAddress)
2777 Vector<X86Assembler::JmpSrc, 16> failureCases;
2778 // check eax is an object of the right StructureID.
2779 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
2780 failureCases.append(m_jit.emitUnlinkedJne());
2781 m_jit.cmpl_i32m(reinterpret_cast<uint32_t>(oldStructureID), OBJECT_OFFSET(JSCell, m_structureID), X86::eax);
2782 failureCases.append(m_jit.emitUnlinkedJne());
2783 Vector<X86Assembler::JmpSrc> successCases;
2785 // ecx = baseObject
2786 m_jit.movl_mr(OBJECT_OFFSET(JSCell, m_structureID), X86::eax, X86::ecx);
2787 // proto(ecx) = baseObject->structureID()->prototype()
2788 m_jit.cmpl_i32m(ObjectType, OBJECT_OFFSET(StructureID, m_typeInfo) + OBJECT_OFFSET(TypeInfo, m_type), X86::ecx);
2789 failureCases.append(m_jit.emitUnlinkedJne());
2790 m_jit.movl_mr(OBJECT_OFFSET(StructureID, m_prototype), X86::ecx, X86::ecx);
2792 // ecx = baseObject->m_structureID
2793 for (RefPtr<StructureID>* it = sIDC->head(); *it; ++it) {
2794 // null check the prototype
2795 m_jit.cmpl_i32r(reinterpret_cast<intptr_t> (jsNull()), X86::ecx);
2796 successCases.append(m_jit.emitUnlinkedJe());
2798 // Check the structure id
2799 m_jit.cmpl_i32m(reinterpret_cast<uint32_t>(it->get()), OBJECT_OFFSET(JSCell, m_structureID), X86::ecx);
2800 failureCases.append(m_jit.emitUnlinkedJne());
2802 m_jit.movl_mr(OBJECT_OFFSET(JSCell, m_structureID), X86::ecx, X86::ecx);
2803 m_jit.cmpl_i32m(ObjectType, OBJECT_OFFSET(StructureID, m_typeInfo) + OBJECT_OFFSET(TypeInfo, m_type), X86::ecx);
2804 failureCases.append(m_jit.emitUnlinkedJne());
2805 m_jit.movl_mr(OBJECT_OFFSET(StructureID, m_prototype), X86::ecx, X86::ecx);
2808 failureCases.append(m_jit.emitUnlinkedJne());
2809 for (unsigned i = 0; i < successCases.size(); ++i)
2810 m_jit.link(successCases[i], m_jit.label());
2812 X86Assembler::JmpSrc callTarget;
2813 // Fast case, don't need to do any heavy lifting, so don't bother making a call.
2814 if (!transitionWillNeedStorageRealloc(oldStructureID, newStructureID)) {
2815 // Assumes m_refCount can be decremented easily, refcount decrement is safe as
2816 // codeblock should ensure oldStructureID->m_refCount > 0
2817 m_jit.subl_i8m(1, reinterpret_cast<void*>(oldStructureID));
2818 m_jit.addl_i8m(1, reinterpret_cast<void*>(newStructureID));
2819 m_jit.movl_i32m(reinterpret_cast<uint32_t>(newStructureID), OBJECT_OFFSET(JSCell, m_structureID), X86::eax);
2821 // write the value
2822 m_jit.movl_mr(OBJECT_OFFSET(JSObject, m_propertyStorage), X86::eax, X86::eax);
2823 m_jit.movl_rm(X86::edx, cachedOffset * sizeof(JSValue*), X86::eax);
2824 } else {
2825 // Slow case transition -- we're going to need to quite a bit of work,
2826 // so just make a call
2827 m_jit.pushl_r(X86::edx);
2828 m_jit.pushl_r(X86::eax);
2829 m_jit.movl_i32r(cachedOffset, X86::eax);
2830 m_jit.pushl_r(X86::eax);
2831 m_jit.movl_i32r(reinterpret_cast<uint32_t>(newStructureID), X86::eax);
2832 m_jit.pushl_r(X86::eax);
2833 callTarget = m_jit.emitCall();
2834 m_jit.addl_i32r(4 * sizeof(void*), X86::esp);
2836 m_jit.ret();
2837 void* code = m_jit.copy();
2838 ASSERT(code);
2840 for (unsigned i = 0; i < failureCases.size(); ++i)
2841 X86Assembler::link(code, failureCases[i], reinterpret_cast<void*>(Machine::cti_op_put_by_id_fail));
2843 if (transitionWillNeedStorageRealloc(oldStructureID, newStructureID))
2844 X86Assembler::link(code, callTarget, reinterpret_cast<void*>(transitionObject));
2846 m_codeBlock->getStubInfo(returnAddress).stubRoutine = code;
2848 ctiRepatchCallByReturnAddress(returnAddress, code);
2851 void* CTI::privateCompileArrayLengthTrampoline()
2853 // Check eax is an array
2854 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
2855 X86Assembler::JmpSrc failureCases1 = m_jit.emitUnlinkedJne();
2856 m_jit.cmpl_i32m(reinterpret_cast<unsigned>(m_machine->m_jsArrayVptr), X86::eax);
2857 X86Assembler::JmpSrc failureCases2 = m_jit.emitUnlinkedJne();
2859 // Checks out okay! - get the length from the storage
2860 m_jit.movl_mr(OBJECT_OFFSET(JSArray, m_storage), X86::eax, X86::eax);
2861 m_jit.movl_mr(OBJECT_OFFSET(ArrayStorage, m_length), X86::eax, X86::eax);
2863 m_jit.addl_rr(X86::eax, X86::eax);
2864 X86Assembler::JmpSrc failureCases3 = m_jit.emitUnlinkedJo();
2865 m_jit.addl_i8r(1, X86::eax);
2867 m_jit.ret();
2869 void* code = m_jit.copy();
2870 ASSERT(code);
2872 X86Assembler::link(code, failureCases1, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
2873 X86Assembler::link(code, failureCases2, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
2874 X86Assembler::link(code, failureCases3, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
2876 return code;
2879 void* CTI::privateCompileStringLengthTrampoline()
2881 // Check eax is a string
2882 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
2883 X86Assembler::JmpSrc failureCases1 = m_jit.emitUnlinkedJne();
2884 m_jit.cmpl_i32m(reinterpret_cast<unsigned>(m_machine->m_jsStringVptr), X86::eax);
2885 X86Assembler::JmpSrc failureCases2 = m_jit.emitUnlinkedJne();
2887 // Checks out okay! - get the length from the Ustring.
2888 m_jit.movl_mr(OBJECT_OFFSET(JSString, m_value) + OBJECT_OFFSET(UString, m_rep), X86::eax, X86::eax);
2889 m_jit.movl_mr(OBJECT_OFFSET(UString::Rep, len), X86::eax, X86::eax);
2891 m_jit.addl_rr(X86::eax, X86::eax);
2892 X86Assembler::JmpSrc failureCases3 = m_jit.emitUnlinkedJo();
2893 m_jit.addl_i8r(1, X86::eax);
2895 m_jit.ret();
2897 void* code = m_jit.copy();
2898 ASSERT(code);
2900 X86Assembler::link(code, failureCases1, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
2901 X86Assembler::link(code, failureCases2, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
2902 X86Assembler::link(code, failureCases3, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
2904 return code;
2907 void CTI::patchGetByIdSelf(CodeBlock* codeBlock, StructureID* structureID, size_t cachedOffset, void* returnAddress)
2909 StructureStubInfo& info = codeBlock->getStubInfo(returnAddress);
2911 // We don't want to repatch more than once - in future go to cti_op_get_by_id_generic.
2912 // Should probably go to Machine::cti_op_get_by_id_fail, but that doesn't do anything interesting right now.
2913 ctiRepatchCallByReturnAddress(returnAddress, (void*)(Machine::cti_op_get_by_id_generic));
2915 // Repatch the offset into the propoerty map to load from, then repatch the StructureID to look for.
2916 X86Assembler::repatchDisplacement(reinterpret_cast<intptr_t>(info.hotPathBegin) + repatchOffsetGetByIdPropertyMapOffset, cachedOffset * sizeof(JSValue*));
2917 X86Assembler::repatchImmediate(reinterpret_cast<intptr_t>(info.hotPathBegin) + repatchOffsetGetByIdStructureID, reinterpret_cast<uint32_t>(structureID));
2920 void CTI::patchPutByIdReplace(CodeBlock* codeBlock, StructureID* structureID, size_t cachedOffset, void* returnAddress)
2922 StructureStubInfo& info = codeBlock->getStubInfo(returnAddress);
2924 // We don't want to repatch more than once - in future go to cti_op_put_by_id_generic.
2925 // Should probably go to Machine::cti_op_put_by_id_fail, but that doesn't do anything interesting right now.
2926 ctiRepatchCallByReturnAddress(returnAddress, (void*)(Machine::cti_op_put_by_id_generic));
2928 // Repatch the offset into the propoerty map to load from, then repatch the StructureID to look for.
2929 X86Assembler::repatchDisplacement(reinterpret_cast<intptr_t>(info.hotPathBegin) + repatchOffsetPutByIdPropertyMapOffset, cachedOffset * sizeof(JSValue*));
2930 X86Assembler::repatchImmediate(reinterpret_cast<intptr_t>(info.hotPathBegin) + repatchOffsetPutByIdStructureID, reinterpret_cast<uint32_t>(structureID));
2933 void CTI::privateCompilePatchGetArrayLength(void* returnAddress)
2935 StructureStubInfo& info = m_codeBlock->getStubInfo(returnAddress);
2937 // We don't want to repatch more than once - in future go to cti_op_put_by_id_generic.
2938 ctiRepatchCallByReturnAddress(returnAddress, reinterpret_cast<void*>(Machine::cti_op_get_by_id_fail));
2940 // Check eax is an array
2941 m_jit.testl_i32r(JSImmediate::TagMask, X86::eax);
2942 X86Assembler::JmpSrc failureCases1 = m_jit.emitUnlinkedJne();
2943 m_jit.cmpl_i32m(reinterpret_cast<unsigned>(m_machine->m_jsArrayVptr), X86::eax);
2944 X86Assembler::JmpSrc failureCases2 = m_jit.emitUnlinkedJne();
2946 // Checks out okay! - get the length from the storage
2947 m_jit.movl_mr(OBJECT_OFFSET(JSArray, m_storage), X86::eax, X86::ecx);
2948 m_jit.movl_mr(OBJECT_OFFSET(ArrayStorage, m_length), X86::ecx, X86::ecx);
2950 m_jit.addl_rr(X86::ecx, X86::ecx);
2951 X86Assembler::JmpSrc failureCases3 = m_jit.emitUnlinkedJo();
2952 m_jit.addl_i8r(1, X86::ecx);
2954 X86Assembler::JmpSrc success = m_jit.emitUnlinkedJmp();
2956 void* code = m_jit.copy();
2957 ASSERT(code);
2959 // Use the repatch information to link the failure cases back to the original slow case routine.
2960 void* slowCaseBegin = reinterpret_cast<char*>(info.callReturnLocation) - repatchOffsetGetByIdSlowCaseCall;
2961 X86Assembler::link(code, failureCases1, slowCaseBegin);
2962 X86Assembler::link(code, failureCases2, slowCaseBegin);
2963 X86Assembler::link(code, failureCases3, slowCaseBegin);
2965 // On success return back to the hot patch code, at a point it will perform the store to dest for us.
2966 intptr_t successDest = (intptr_t)(info.hotPathBegin) + repatchOffsetGetByIdPropertyMapOffset;
2967 X86Assembler::link(code, success, reinterpret_cast<void*>(successDest));
2969 // Track the stub we have created so that it will be deleted later.
2970 m_codeBlock->getStubInfo(returnAddress).stubRoutine = code;
2972 // Finally repatch the jump to sow case back in the hot path to jump here instead.
2973 // FIXME: should revert this repatching, on failure.
2974 intptr_t jmpLocation = reinterpret_cast<intptr_t>(info.hotPathBegin) + repatchOffsetGetByIdBranchToSlowCase;
2975 X86Assembler::repatchBranchOffset(jmpLocation, code);
2978 void CTI::emitGetVariableObjectRegister(X86Assembler::RegisterID variableObject, int index, X86Assembler::RegisterID dst)
2980 m_jit.movl_mr(JSVariableObject::offsetOf_d(), variableObject, dst);
2981 m_jit.movl_mr(JSVariableObject::offsetOf_Data_registers(), dst, dst);
2982 m_jit.movl_mr(index * sizeof(Register), dst, dst);
2985 void CTI::emitPutVariableObjectRegister(X86Assembler::RegisterID src, X86Assembler::RegisterID variableObject, int index)
2987 m_jit.movl_mr(JSVariableObject::offsetOf_d(), variableObject, variableObject);
2988 m_jit.movl_mr(JSVariableObject::offsetOf_Data_registers(), variableObject, variableObject);
2989 m_jit.movl_rm(src, index * sizeof(Register), variableObject);
2992 #if ENABLE(WREC)
2994 void* CTI::compileRegExp(ExecState* exec, const UString& pattern, unsigned* numSubpatterns_ptr, const char** error_ptr, bool ignoreCase, bool multiline)
2996 // TODO: better error messages
2997 if (pattern.size() > MaxPatternSize) {
2998 *error_ptr = "regular expression too large";
2999 return 0;
3002 X86Assembler jit(exec->machine()->jitCodeBuffer());
3003 WRECParser parser(pattern, ignoreCase, multiline, jit);
3005 jit.emitConvertToFastCall();
3006 // (0) Setup:
3007 // Preserve regs & initialize outputRegister.
3008 jit.pushl_r(WRECGenerator::outputRegister);
3009 jit.pushl_r(WRECGenerator::currentValueRegister);
3010 // push pos onto the stack, both to preserve and as a parameter available to parseDisjunction
3011 jit.pushl_r(WRECGenerator::currentPositionRegister);
3012 // load output pointer
3013 jit.movl_mr(16
3014 #if COMPILER(MSVC)
3015 + 3 * sizeof(void*)
3016 #endif
3017 , X86::esp, WRECGenerator::outputRegister);
3019 // restart point on match fail.
3020 WRECGenerator::JmpDst nextLabel = jit.label();
3022 // (1) Parse Disjunction:
3024 // Parsing the disjunction should fully consume the pattern.
3025 JmpSrcVector failures;
3026 parser.parseDisjunction(failures);
3027 if (parser.isEndOfPattern()) {
3028 parser.m_err = WRECParser::Error_malformedPattern;
3030 if (parser.m_err) {
3031 // TODO: better error messages
3032 *error_ptr = "TODO: better error messages";
3033 return 0;
3036 // (2) Success:
3037 // Set return value & pop registers from the stack.
3039 jit.testl_rr(WRECGenerator::outputRegister, WRECGenerator::outputRegister);
3040 WRECGenerator::JmpSrc noOutput = jit.emitUnlinkedJe();
3042 jit.movl_rm(WRECGenerator::currentPositionRegister, 4, WRECGenerator::outputRegister);
3043 jit.popl_r(X86::eax);
3044 jit.movl_rm(X86::eax, WRECGenerator::outputRegister);
3045 jit.popl_r(WRECGenerator::currentValueRegister);
3046 jit.popl_r(WRECGenerator::outputRegister);
3047 jit.ret();
3049 jit.link(noOutput, jit.label());
3051 jit.popl_r(X86::eax);
3052 jit.movl_rm(X86::eax, WRECGenerator::outputRegister);
3053 jit.popl_r(WRECGenerator::currentValueRegister);
3054 jit.popl_r(WRECGenerator::outputRegister);
3055 jit.ret();
3057 // (3) Failure:
3058 // All fails link to here. Progress the start point & if it is within scope, loop.
3059 // Otherwise, return fail value.
3060 WRECGenerator::JmpDst here = jit.label();
3061 for (unsigned i = 0; i < failures.size(); ++i)
3062 jit.link(failures[i], here);
3063 failures.clear();
3065 jit.movl_mr(X86::esp, WRECGenerator::currentPositionRegister);
3066 jit.addl_i8r(1, WRECGenerator::currentPositionRegister);
3067 jit.movl_rm(WRECGenerator::currentPositionRegister, X86::esp);
3068 jit.cmpl_rr(WRECGenerator::lengthRegister, WRECGenerator::currentPositionRegister);
3069 jit.link(jit.emitUnlinkedJle(), nextLabel);
3071 jit.addl_i8r(4, X86::esp);
3073 jit.movl_i32r(-1, X86::eax);
3074 jit.popl_r(WRECGenerator::currentValueRegister);
3075 jit.popl_r(WRECGenerator::outputRegister);
3076 jit.ret();
3078 *numSubpatterns_ptr = parser.m_numSubpatterns;
3080 void* code = jit.copy();
3081 ASSERT(code);
3082 return code;
3085 #endif // ENABLE(WREC)
3087 } // namespace JSC
3089 #endif // ENABLE(CTI)