Optimize FrameState for large linear scripts (bug 591836, r=dmandelin).
[mozilla-central.git] / js / src / methodjit / MonoIC.cpp
blob1936daf03d78e8009b7681f3217636a5171f79d2
1 /* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*-
2 * vim: set ts=4 sw=4 et tw=99:
4 * ***** BEGIN LICENSE BLOCK *****
5 * Version: MPL 1.1/GPL 2.0/LGPL 2.1
7 * The contents of this file are subject to the Mozilla Public License Version
8 * 1.1 (the "License"); you may not use this file except in compliance with
9 * the License. You may obtain a copy of the License at
10 * http://www.mozilla.org/MPL/
12 * Software distributed under the License is distributed on an "AS IS" basis,
13 * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
14 * for the specific language governing rights and limitations under the
15 * License.
17 * The Original Code is Mozilla SpiderMonkey JavaScript 1.9 code, released
18 * May 28, 2008.
20 * The Initial Developer of the Original Code is
21 * Brendan Eich <brendan@mozilla.org>
23 * Contributor(s):
24 * David Anderson <danderson@mozilla.com>
25 * David Mandelin <dmandelin@mozilla.com>
27 * Alternatively, the contents of this file may be used under the terms of
28 * either of the GNU General Public License Version 2 or later (the "GPL"),
29 * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
30 * in which case the provisions of the GPL or the LGPL are applicable instead
31 * of those above. If you wish to allow use of your version of this file only
32 * under the terms of either the GPL or the LGPL, and not to allow others to
33 * use your version of this file under the terms of the MPL, indicate your
34 * decision by deleting the provisions above and replace them with the notice
35 * and other provisions required by the GPL or the LGPL. If you do not delete
36 * the provisions above, a recipient may use your version of this file under
37 * the terms of any one of the MPL, the GPL or the LGPL.
39 * ***** END LICENSE BLOCK ***** */
40 #include "jsscope.h"
41 #include "jsnum.h"
42 #include "MonoIC.h"
43 #include "StubCalls.h"
44 #include "StubCalls-inl.h"
45 #include "assembler/assembler/LinkBuffer.h"
46 #include "assembler/assembler/RepatchBuffer.h"
47 #include "assembler/assembler/MacroAssembler.h"
48 #include "assembler/assembler/CodeLocation.h"
49 #include "CodeGenIncludes.h"
50 #include "methodjit/Compiler.h"
51 #include "InlineFrameAssembler.h"
52 #include "jsobj.h"
54 #include "jsinterpinlines.h"
55 #include "jsobjinlines.h"
56 #include "jsscopeinlines.h"
57 #include "jsscriptinlines.h"
59 using namespace js;
60 using namespace js::mjit;
61 using namespace js::mjit::ic;
63 typedef JSC::MacroAssembler::RegisterID RegisterID;
64 typedef JSC::MacroAssembler::Address Address;
65 typedef JSC::MacroAssembler::Jump Jump;
66 typedef JSC::MacroAssembler::Imm32 Imm32;
67 typedef JSC::MacroAssembler::ImmPtr ImmPtr;
68 typedef JSC::MacroAssembler::Call Call;
70 #if defined JS_MONOIC
72 static void
73 PatchGetFallback(VMFrame &f, ic::MICInfo &mic)
75 JSC::RepatchBuffer repatch(mic.stubEntry.executableAddress(), 64);
76 JSC::FunctionPtr fptr(JS_FUNC_TO_DATA_PTR(void *, stubs::GetGlobalName));
77 repatch.relink(mic.stubCall, fptr);
80 void JS_FASTCALL
81 ic::GetGlobalName(VMFrame &f, uint32 index)
83 JSObject *obj = f.fp()->scopeChain().getGlobal();
84 ic::MICInfo &mic = f.fp()->script()->mics[index];
85 JSAtom *atom = f.fp()->script()->getAtom(GET_INDEX(f.regs.pc));
86 jsid id = ATOM_TO_JSID(atom);
88 JS_ASSERT(mic.kind == ic::MICInfo::GET);
90 JS_LOCK_OBJ(f.cx, obj);
91 const Shape *shape = obj->nativeLookup(id);
92 if (!shape ||
93 !shape->hasDefaultGetterOrIsMethod() ||
94 !shape->hasSlot())
96 JS_UNLOCK_OBJ(f.cx, obj);
97 if (shape)
98 PatchGetFallback(f, mic);
99 stubs::GetGlobalName(f);
100 return;
102 uint32 slot = shape->slot;
103 JS_UNLOCK_OBJ(f.cx, obj);
105 mic.u.name.touched = true;
107 /* Patch shape guard. */
108 JSC::RepatchBuffer repatch(mic.entry.executableAddress(), 50);
109 repatch.repatch(mic.shape, obj->shape());
111 /* Patch loads. */
112 JS_ASSERT(slot >= JS_INITIAL_NSLOTS);
113 slot -= JS_INITIAL_NSLOTS;
114 slot *= sizeof(Value);
115 JSC::RepatchBuffer loads(mic.load.executableAddress(), 32, false);
116 #if defined JS_CPU_X86
117 loads.repatch(mic.load.dataLabel32AtOffset(MICInfo::GET_DATA_OFFSET), slot);
118 loads.repatch(mic.load.dataLabel32AtOffset(MICInfo::GET_TYPE_OFFSET), slot + 4);
119 #elif defined JS_CPU_ARM
120 // mic.load actually points to the LDR instruction which fetches the offset, but 'repatch'
121 // knows how to dereference it to find the integer value.
122 loads.repatch(mic.load.dataLabel32AtOffset(0), slot);
123 #elif defined JS_PUNBOX64
124 loads.repatch(mic.load.dataLabel32AtOffset(mic.patchValueOffset), slot);
125 #endif
127 /* Do load anyway... this time. */
128 stubs::GetGlobalName(f);
131 static void JS_FASTCALL
132 SetGlobalNameSlow(VMFrame &f, uint32 index)
134 JSScript *script = f.fp()->script();
135 JSAtom *atom = script->getAtom(GET_INDEX(f.regs.pc));
136 if (script->strictModeCode)
137 stubs::SetGlobalName<true>(f, atom);
138 else
139 stubs::SetGlobalName<false>(f, atom);
142 static void
143 PatchSetFallback(VMFrame &f, ic::MICInfo &mic)
145 JSC::RepatchBuffer repatch(mic.stubEntry.executableAddress(), 64);
146 JSC::FunctionPtr fptr(JS_FUNC_TO_DATA_PTR(void *, SetGlobalNameSlow));
147 repatch.relink(mic.stubCall, fptr);
150 static VoidStubAtom
151 GetStubForSetGlobalName(VMFrame &f)
153 JSScript *script = f.fp()->script();
154 // The property cache doesn't like inc ops, so we use a simpler
155 // stub for that case.
156 return js_CodeSpec[*f.regs.pc].format & (JOF_INC | JOF_DEC)
157 ? STRICT_VARIANT(stubs::SetGlobalNameDumb)
158 : STRICT_VARIANT(stubs::SetGlobalName);
161 void JS_FASTCALL
162 ic::SetGlobalName(VMFrame &f, uint32 index)
164 JSObject *obj = f.fp()->scopeChain().getGlobal();
165 ic::MICInfo &mic = f.fp()->script()->mics[index];
166 JSAtom *atom = f.fp()->script()->getAtom(GET_INDEX(f.regs.pc));
167 jsid id = ATOM_TO_JSID(atom);
169 JS_ASSERT(mic.kind == ic::MICInfo::SET);
171 JS_LOCK_OBJ(f.cx, obj);
172 const Shape *shape = obj->nativeLookup(id);
173 if (!shape ||
174 !shape->hasDefaultGetterOrIsMethod() ||
175 !shape->writable() ||
176 !shape->hasSlot())
178 JS_UNLOCK_OBJ(f.cx, obj);
179 if (shape)
180 PatchSetFallback(f, mic);
181 GetStubForSetGlobalName(f)(f, atom);
182 return;
184 uint32 slot = shape->slot;
185 JS_UNLOCK_OBJ(f.cx, obj);
187 mic.u.name.touched = true;
189 /* Patch shape guard. */
190 JSC::RepatchBuffer repatch(mic.entry.executableAddress(), 50);
191 repatch.repatch(mic.shape, obj->shape());
193 /* Patch loads. */
194 JS_ASSERT(slot >= JS_INITIAL_NSLOTS);
195 slot -= JS_INITIAL_NSLOTS;
196 slot *= sizeof(Value);
198 JSC::RepatchBuffer stores(mic.load.executableAddress(), 32, false);
199 #if defined JS_CPU_X86
200 stores.repatch(mic.load.dataLabel32AtOffset(MICInfo::SET_TYPE_OFFSET), slot + 4);
202 uint32 dataOffset;
203 if (mic.u.name.typeConst)
204 dataOffset = MICInfo::SET_DATA_CONST_TYPE_OFFSET;
205 else
206 dataOffset = MICInfo::SET_DATA_TYPE_OFFSET;
207 stores.repatch(mic.load.dataLabel32AtOffset(dataOffset), slot);
208 #elif defined JS_CPU_ARM
209 // mic.load actually points to the LDR instruction which fetches the offset, but 'repatch'
210 // knows how to dereference it to find the integer value.
211 stores.repatch(mic.load.dataLabel32AtOffset(0), slot);
212 #elif defined JS_PUNBOX64
213 stores.repatch(mic.load.dataLabel32AtOffset(mic.patchValueOffset), slot);
214 #endif
216 // Actually implement the op the slow way.
217 GetStubForSetGlobalName(f)(f, atom);
220 static void * JS_FASTCALL
221 SlowCallFromIC(VMFrame &f, uint32 index)
223 JSScript *oldscript = f.fp()->script();
224 CallICInfo &ic= oldscript->callICs[index];
226 stubs::SlowCall(f, ic.argc);
228 return NULL;
231 static void * JS_FASTCALL
232 SlowNewFromIC(VMFrame &f, uint32 index)
234 JSScript *oldscript = f.fp()->script();
235 CallICInfo &ic = oldscript->callICs[index];
237 stubs::SlowNew(f, ic.argc);
239 return NULL;
243 * Calls have an inline path and an out-of-line path. The inline path is used
244 * in the fastest case: the method has JIT'd code, and |argc == nargs|.
246 * The inline path and OOL path are separated by a guard on the identity of
247 * the callee object. This guard starts as NULL and always fails on the first
248 * hit. On the OOL path, the callee is verified to be both a function and a
249 * scripted function. If these conditions hold, |ic::Call| is invoked.
251 * |ic::Call| first ensures that the callee has JIT code. If it doesn't, the
252 * call to |ic::Call| is patched to a slow path. If it does have JIT'd code,
253 * the following cases can occur:
255 * 1) args != nargs: The call to |ic::Call| is patched with a dynamically
256 * generated stub. This stub inlines a path that looks like:
257 * ----
258 * push frame
259 * if (callee is not compiled) {
260 * Compile(callee);
262 * call callee->arityLabel
264 * The arity label is a special entry point for correcting frames for
265 * arity mismatches.
267 * 2) args == nargs, and the inline call site was not patched yet.
268 * The guard dividing the two paths is patched to guard on the given
269 * function object identity, and the proceeding call is patched to
270 * directly call the JIT code.
272 * 3) args == nargs, and the inline call site was patched already.
273 * A small stub is created which extends the original guard to also
274 * guard on the JSFunction lying underneath the function object.
276 * If the OOL path does not have a scripted function, but does have a
277 * scripted native, then a small stub is generated which inlines the native
278 * invocation.
280 class CallCompiler
282 VMFrame &f;
283 JSContext *cx;
284 CallICInfo &ic;
285 Value *vp;
286 bool callingNew;
288 public:
289 CallCompiler(VMFrame &f, CallICInfo &ic, bool callingNew)
290 : f(f), cx(f.cx), ic(ic), vp(f.regs.sp - (ic.argc + 2)), callingNew(callingNew)
294 JSC::ExecutablePool *poolForSize(size_t size, CallICInfo::PoolIndex index)
296 mjit::ThreadData *jm = &JS_METHODJIT_DATA(cx);
297 JSC::ExecutablePool *ep = jm->execPool->poolForSize(size);
298 if (!ep) {
299 js_ReportOutOfMemory(f.cx);
300 return NULL;
302 JS_ASSERT(!ic.pools[index]);
303 ic.pools[index] = ep;
304 return ep;
307 bool generateFullCallStub(JSScript *script, uint32 flags)
310 * Create a stub that works with arity mismatches. Like the fast-path,
311 * this allocates a frame on the caller side, but also performs extra
312 * checks for compilability. Perhaps this should be a separate, shared
313 * trampoline, but for now we generate it dynamically.
315 Assembler masm;
316 InlineFrameAssembler inlFrame(masm, ic, flags);
317 RegisterID t0 = inlFrame.tempRegs.takeAnyReg();
319 /* Generate the inline frame creation. */
320 inlFrame.assemble();
322 /* funPtrReg is still valid. Check if a compilation is needed. */
323 Address scriptAddr(ic.funPtrReg, offsetof(JSFunction, u) +
324 offsetof(JSFunction::U::Scripted, script));
325 masm.loadPtr(scriptAddr, t0);
328 * Test if script->nmap is NULL - same as checking ncode, but faster
329 * here since ncode has two failure modes and we need to load out of
330 * nmap anyway.
332 masm.loadPtr(Address(t0, offsetof(JSScript, jit)), t0);
333 Jump hasCode = masm.branchTestPtr(Assembler::NonZero, t0, t0);
335 /* Try and compile. On success we get back the nmap pointer. */
336 masm.storePtr(JSFrameReg, FrameAddress(offsetof(VMFrame, regs.fp)));
337 masm.move(Imm32(ic.argc), Registers::ArgReg1);
338 JSC::MacroAssembler::Call tryCompile =
339 masm.stubCall(JS_FUNC_TO_DATA_PTR(void *, stubs::CompileFunction),
340 script->code, ic.frameDepth);
342 Jump notCompiled = masm.branchTestPtr(Assembler::Zero, Registers::ReturnReg,
343 Registers::ReturnReg);
345 masm.call(Registers::ReturnReg);
346 Jump done = masm.jump();
348 hasCode.linkTo(masm.label(), &masm);
350 /* Get nmap[ARITY], set argc, call. */
351 masm.move(Imm32(ic.argc), JSParamReg_Argc);
352 masm.loadPtr(Address(t0, offsetof(JITScript, arityCheck)), t0);
353 masm.call(t0);
355 /* Rejoin with the fast path. */
356 Jump rejoin = masm.jump();
358 /* Worst case - function didn't compile. */
359 notCompiled.linkTo(masm.label(), &masm);
360 masm.loadPtr(FrameAddress(offsetof(VMFrame, regs.fp)), JSFrameReg);
361 notCompiled = masm.jump();
363 JSC::ExecutablePool *ep = poolForSize(masm.size(), CallICInfo::Pool_ScriptStub);
364 if (!ep)
365 return false;
367 JSC::LinkBuffer buffer(&masm, ep);
368 buffer.link(rejoin, ic.funGuard.labelAtOffset(ic.joinPointOffset));
369 buffer.link(done, ic.funGuard.labelAtOffset(ic.joinPointOffset));
370 buffer.link(notCompiled, ic.slowPathStart.labelAtOffset(ic.slowJoinOffset));
371 buffer.link(tryCompile,
372 JSC::FunctionPtr(JS_FUNC_TO_DATA_PTR(void *, stubs::CompileFunction)));
373 JSC::CodeLocationLabel cs = buffer.finalizeCodeAddendum();
375 JaegerSpew(JSpew_PICs, "generated CALL stub %p (%d bytes)\n", cs.executableAddress(),
376 masm.size());
378 JSC::CodeLocationJump oolJump = ic.slowPathStart.jumpAtOffset(ic.oolJumpOffset);
379 uint8 *start = (uint8 *)oolJump.executableAddress();
380 JSC::RepatchBuffer repatch(start - 32, 64);
381 repatch.relink(oolJump, cs);
383 return true;
386 void patchInlinePath(JSScript *script, JSObject *obj)
388 /* Very fast path. */
389 uint8 *start = (uint8 *)ic.funGuard.executableAddress();
390 JSC::RepatchBuffer repatch(start - 32, 64);
392 ic.fastGuardedObject = obj;
394 repatch.repatch(ic.funGuard, obj);
395 repatch.relink(ic.funGuard.callAtOffset(ic.hotCallOffset),
396 JSC::FunctionPtr(script->ncode));
398 JaegerSpew(JSpew_PICs, "patched CALL path %p (obj: %p)\n", start, ic.fastGuardedObject);
401 bool generateStubForClosures(JSObject *obj)
403 /* Slightly less fast path - guard on fun->getFunctionPrivate() instead. */
404 Assembler masm;
406 Registers tempRegs;
407 tempRegs.takeReg(ic.funObjReg);
409 RegisterID t0 = tempRegs.takeAnyReg();
411 /* Guard that it's actually a function object. */
412 Jump claspGuard = masm.branchPtr(Assembler::NotEqual,
413 Address(ic.funObjReg, offsetof(JSObject, clasp)),
414 ImmPtr(&js_FunctionClass));
416 /* Guard that it's the same function. */
417 JSFunction *fun = obj->getFunctionPrivate();
418 masm.loadFunctionPrivate(ic.funObjReg, t0);
419 Jump funGuard = masm.branchPtr(Assembler::NotEqual, t0, ImmPtr(fun));
420 Jump done = masm.jump();
422 JSC::ExecutablePool *ep = poolForSize(masm.size(), CallICInfo::Pool_ClosureStub);
423 if (!ep)
424 return false;
426 JSC::LinkBuffer buffer(&masm, ep);
427 buffer.link(claspGuard, ic.slowPathStart);
428 buffer.link(funGuard, ic.slowPathStart);
429 buffer.link(done, ic.funGuard.labelAtOffset(ic.hotPathOffset));
430 JSC::CodeLocationLabel cs = buffer.finalizeCodeAddendum();
432 JaegerSpew(JSpew_PICs, "generated CALL closure stub %p (%d bytes)\n",
433 cs.executableAddress(), masm.size());
435 uint8 *start = (uint8 *)ic.funJump.executableAddress();
436 JSC::RepatchBuffer repatch(start - 32, 64);
437 repatch.relink(ic.funJump, cs);
439 ic.hasJsFunCheck = true;
441 return true;
444 bool generateNativeStub()
446 Value *vp = f.regs.sp - (ic.argc + 2);
448 JSObject *obj;
449 if (!IsFunctionObject(*vp, &obj))
450 return false;
452 JSFunction *fun = obj->getFunctionPrivate();
453 if ((!callingNew && !fun->isNative()) || (callingNew && !fun->isConstructor()))
454 return false;
456 if (callingNew)
457 vp[1].setMagicWithObjectOrNullPayload(NULL);
459 Native fn = fun->u.n.native;
460 if (!fn(cx, ic.argc, vp))
461 THROWV(true);
463 /* Right now, take slow-path for IC misses or multiple stubs. */
464 if (ic.fastGuardedNative || ic.hasJsFunCheck)
465 return true;
467 /* Native MIC needs to warm up first. */
468 if (!ic.hit) {
469 ic.hit = true;
470 return true;
473 /* Generate fast-path for calling this native. */
474 Assembler masm;
476 /* Guard on the function object identity, for now. */
477 Jump funGuard = masm.branchPtr(Assembler::NotEqual, ic.funObjReg, ImmPtr(obj));
479 Registers tempRegs;
480 #ifndef JS_CPU_X86
481 tempRegs.takeReg(Registers::ArgReg0);
482 tempRegs.takeReg(Registers::ArgReg1);
483 tempRegs.takeReg(Registers::ArgReg2);
484 #endif
485 RegisterID t0 = tempRegs.takeAnyReg();
487 /* Store pc. */
488 masm.storePtr(ImmPtr(cx->regs->pc),
489 FrameAddress(offsetof(VMFrame, regs) + offsetof(JSFrameRegs, pc)));
491 /* Store sp. */
492 uint32 spOffset = sizeof(JSStackFrame) + ic.frameDepth * sizeof(Value);
493 masm.addPtr(Imm32(spOffset), JSFrameReg, t0);
494 masm.storePtr(t0, FrameAddress(offsetof(VMFrame, regs) + offsetof(JSFrameRegs, sp)));
496 /* Grab cx early on to avoid stack mucking on x86. */
497 #ifdef JS_CPU_X86
498 RegisterID cxReg = tempRegs.takeAnyReg();
499 #else
500 RegisterID cxReg = Registers::ArgReg0;
501 #endif
502 masm.loadPtr(FrameAddress(offsetof(VMFrame, cx)), cxReg);
504 #ifdef JS_CPU_X86
505 /* x86's stack should be 16-byte aligned. */
506 masm.subPtr(Imm32(16), Assembler::stackPointerRegister);
507 #endif
509 /* Compute vp. */
510 #ifdef JS_CPU_X86
511 RegisterID vpReg = t0;
512 #else
513 RegisterID vpReg = Registers::ArgReg2;
514 #endif
516 uint32 vpOffset = sizeof(JSStackFrame) + (ic.frameDepth - ic.argc - 2) * sizeof(Value);
517 masm.addPtr(Imm32(vpOffset), JSFrameReg, vpReg);
519 /* Mark vp[1] as magic for |new|. */
520 if (callingNew) {
521 Value v;
522 v.setMagicWithObjectOrNullPayload(NULL);
523 masm.storeValue(v, Address(vpReg, sizeof(Value)));
526 #ifdef JS_CPU_X86
527 masm.storePtr(vpReg, Address(Assembler::stackPointerRegister, 8));
528 #endif
530 /* Push argc. */
531 #ifdef JS_CPU_X86
532 masm.store32(Imm32(ic.argc), Address(Assembler::stackPointerRegister, 4));
533 #else
534 masm.move(Imm32(ic.argc), Registers::ArgReg1);
535 #endif
537 /* Push cx. */
538 #ifdef JS_CPU_X86
539 masm.storePtr(cxReg, Address(Assembler::stackPointerRegister, 0));
540 #endif
542 #ifdef _WIN64
543 /* x64 needs to pad the stack */
544 masm.subPtr(Imm32(32), Assembler::stackPointerRegister);
545 #endif
546 /* Make the call. */
547 Assembler::Call call = masm.call();
549 #ifdef JS_CPU_X86
550 masm.addPtr(Imm32(16), Assembler::stackPointerRegister);
551 #endif
552 #if defined(JS_NO_FASTCALL) && defined(JS_CPU_X86)
553 // Usually JaegerThrowpoline got called from return address.
554 // So in JaegerThrowpoline without fastcall, esp was added by 8.
555 // If we just want to jump there, we need to sub esp by 8 first.
556 masm.subPtr(Imm32(8), Assembler::stackPointerRegister);
557 #endif
559 Jump hasException = masm.branchTest32(Assembler::Zero, Registers::ReturnReg,
560 Registers::ReturnReg);
563 #if defined(JS_NO_FASTCALL) && defined(JS_CPU_X86)
564 // Usually JaegerThrowpoline got called from return address.
565 // So in JaegerThrowpoline without fastcall, esp was added by 8.
566 // If we just want to jump there, we need to sub esp by 8 first.
567 masm.addPtr(Imm32(8), Assembler::stackPointerRegister);
568 #elif defined(_WIN64)
569 /* JaegerThrowpoline expcets that stack is added by 32 for padding */
570 masm.addPtr(Imm32(32), Assembler::stackPointerRegister);
571 #endif
573 Jump done = masm.jump();
575 /* Move JaegerThrowpoline into register for very far jump on x64. */
576 hasException.linkTo(masm.label(), &masm);
577 masm.move(ImmPtr(JS_FUNC_TO_DATA_PTR(void *, JaegerThrowpoline)), Registers::ReturnReg);
578 masm.jump(Registers::ReturnReg);
580 JSC::ExecutablePool *ep = poolForSize(masm.size(), CallICInfo::Pool_NativeStub);
581 if (!ep)
582 THROWV(true);
584 JSC::LinkBuffer buffer(&masm, ep);
585 buffer.link(done, ic.slowPathStart.labelAtOffset(ic.slowJoinOffset));
586 buffer.link(call, JSC::FunctionPtr(JS_FUNC_TO_DATA_PTR(void *, fun->u.n.native)));
587 buffer.link(funGuard, ic.slowPathStart);
589 JSC::CodeLocationLabel cs = buffer.finalizeCodeAddendum();
591 JaegerSpew(JSpew_PICs, "generated native CALL stub %p (%d bytes)\n",
592 cs.executableAddress(), masm.size());
594 uint8 *start = (uint8 *)ic.funJump.executableAddress();
595 JSC::RepatchBuffer repatch(start - 32, 64);
596 repatch.relink(ic.funJump, cs);
598 ic.fastGuardedNative = obj;
600 return true;
603 void *update()
605 stubs::UncachedCallResult ucr;
606 if (callingNew)
607 stubs::UncachedNewHelper(f, ic.argc, &ucr);
608 else
609 stubs::UncachedCallHelper(f, ic.argc, &ucr);
611 // If the function cannot be jitted (generally unjittable or empty script),
612 // patch this site to go to a slow path always.
613 if (!ucr.codeAddr) {
614 JSC::CodeLocationCall oolCall = ic.slowPathStart.callAtOffset(ic.oolCallOffset);
615 uint8 *start = (uint8 *)oolCall.executableAddress();
616 JSC::RepatchBuffer repatch(start - 32, 64);
617 JSC::FunctionPtr fptr = callingNew
618 ? JSC::FunctionPtr(JS_FUNC_TO_DATA_PTR(void *, SlowNewFromIC))
619 : JSC::FunctionPtr(JS_FUNC_TO_DATA_PTR(void *, SlowCallFromIC));
620 repatch.relink(oolCall, fptr);
621 return NULL;
624 JSFunction *fun = ucr.fun;
625 JS_ASSERT(fun);
626 JSScript *script = fun->script();
627 JS_ASSERT(script);
628 JSObject *callee = ucr.callee;
629 JS_ASSERT(callee);
631 uint32 flags = callingNew ? JSFRAME_CONSTRUCTING : 0;
633 if (!ic.hit) {
634 ic.hit = true;
635 return ucr.codeAddr;
638 if (ic.argc != fun->nargs) {
639 if (!generateFullCallStub(script, flags))
640 THROWV(NULL);
641 } else {
642 if (!ic.fastGuardedObject) {
643 patchInlinePath(script, callee);
644 } else if (!ic.hasJsFunCheck &&
645 !ic.fastGuardedNative &&
646 ic.fastGuardedObject->getFunctionPrivate() == fun) {
648 * Note: Multiple "function guard" stubs are not yet
649 * supported, thus the fastGuardedNative check.
651 if (!generateStubForClosures(callee))
652 THROWV(NULL);
653 } else {
654 if (!generateFullCallStub(script, flags))
655 THROWV(NULL);
659 return ucr.codeAddr;
663 void * JS_FASTCALL
664 ic::Call(VMFrame &f, uint32 index)
666 JSScript *oldscript = f.fp()->script();
667 CallICInfo &ic = oldscript->callICs[index];
668 CallCompiler cc(f, ic, false);
669 return cc.update();
672 void * JS_FASTCALL
673 ic::New(VMFrame &f, uint32 index)
675 JSScript *oldscript = f.fp()->script();
676 CallICInfo &ic = oldscript->callICs[index];
677 CallCompiler cc(f, ic, true);
678 return cc.update();
681 void JS_FASTCALL
682 ic::NativeCall(VMFrame &f, uint32 index)
684 JSScript *oldscript = f.fp()->script();
685 CallICInfo &ic = oldscript->callICs[index];
686 CallCompiler cc(f, ic, false);
687 if (!cc.generateNativeStub())
688 stubs::SlowCall(f, ic.argc);
691 void JS_FASTCALL
692 ic::NativeNew(VMFrame &f, uint32 index)
694 JSScript *oldscript = f.fp()->script();
695 CallICInfo &ic = oldscript->callICs[index];
696 CallCompiler cc(f, ic, true);
697 if (!cc.generateNativeStub())
698 stubs::SlowNew(f, ic.argc);
701 void
702 ic::PurgeMICs(JSContext *cx, JSScript *script)
704 /* MICs are purged during GC to handle changing shapes. */
705 JS_ASSERT(cx->runtime->gcRegenShapes);
707 uint32 nmics = script->jit->nMICs;
708 for (uint32 i = 0; i < nmics; i++) {
709 ic::MICInfo &mic = script->mics[i];
710 switch (mic.kind) {
711 case ic::MICInfo::SET:
712 case ic::MICInfo::GET:
714 /* Patch shape guard. */
715 JSC::RepatchBuffer repatch(mic.entry.executableAddress(), 50);
716 repatch.repatch(mic.shape, int(JSObjectMap::INVALID_SHAPE));
719 * If the stub call was patched, leave it alone -- it probably will
720 * just be invalidated again.
722 break;
724 case ic::MICInfo::TRACER:
725 /* Nothing to patch! */
726 break;
727 default:
728 JS_NOT_REACHED("Unknown MIC type during purge");
729 break;
734 void
735 ic::SweepCallICs(JSContext *cx, JSScript *script)
737 for (uint32 i = 0; i < script->jit->nCallICs; i++) {
738 ic::CallICInfo &ic = script->callICs[i];
741 * If the object is unreachable, we're guaranteed not to be currently
742 * executing a stub generated by a guard on that object. This lets us
743 * precisely GC call ICs while keeping the identity guard safe.
745 bool fastFunDead = ic.fastGuardedObject && js_IsAboutToBeFinalized(ic.fastGuardedObject);
746 bool nativeDead = ic.fastGuardedNative && js_IsAboutToBeFinalized(ic.fastGuardedNative);
748 if (!fastFunDead && !nativeDead)
749 continue;
751 uint8 *start = (uint8 *)ic.funGuard.executableAddress();
752 JSC::RepatchBuffer repatch(start - 32, 64);
754 if (fastFunDead) {
755 repatch.repatch(ic.funGuard, NULL);
756 ic.releasePool(CallICInfo::Pool_ClosureStub);
757 ic.hasJsFunCheck = false;
758 ic.fastGuardedObject = NULL;
761 if (nativeDead) {
762 ic.releasePool(CallICInfo::Pool_NativeStub);
763 ic.fastGuardedNative = NULL;
766 repatch.relink(ic.funJump, ic.slowPathStart);
768 ic.hit = false;
772 #endif /* JS_MONOIC */