summaryrefslogtreecommitdiff
path: root/Source/JavaScriptCore/jit/JITOpcodes.cpp
diff options
context:
space:
mode:
authorKonstantin Tokarev <annulen@yandex.ru>2016-08-25 19:20:41 +0300
committerKonstantin Tokarev <annulen@yandex.ru>2017-02-02 12:30:55 +0000
commit6882a04fb36642862b11efe514251d32070c3d65 (patch)
treeb7959826000b061fd5ccc7512035c7478742f7b0 /Source/JavaScriptCore/jit/JITOpcodes.cpp
parentab6df191029eeeb0b0f16f127d553265659f739e (diff)
downloadqtwebkit-6882a04fb36642862b11efe514251d32070c3d65.tar.gz
Imported QtWebKit TP3 (git b57bc6801f1876c3220d5a4bfea33d620d477443)
Change-Id: I3b1d8a2808782c9f34d50240000e20cb38d3680f Reviewed-by: Konstantin Tokarev <annulen@yandex.ru>
Diffstat (limited to 'Source/JavaScriptCore/jit/JITOpcodes.cpp')
-rw-r--r--Source/JavaScriptCore/jit/JITOpcodes.cpp1768
1 files changed, 745 insertions, 1023 deletions
diff --git a/Source/JavaScriptCore/jit/JITOpcodes.cpp b/Source/JavaScriptCore/jit/JITOpcodes.cpp
index 2a88f5052..738cb63fe 100644
--- a/Source/JavaScriptCore/jit/JITOpcodes.cpp
+++ b/Source/JavaScriptCore/jit/JITOpcodes.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2009, 2012 Apple Inc. All rights reserved.
+ * Copyright (C) 2009, 2012-2016 Apple Inc. All rights reserved.
* Copyright (C) 2010 Patrick Gansterer <paroga@paroga.com>
*
* Redistribution and use in source and binary forms, with or without
@@ -28,16 +28,23 @@
#if ENABLE(JIT)
#include "JIT.h"
-#include "Arguments.h"
+#include "BasicBlockLocation.h"
#include "CopiedSpaceInlines.h"
+#include "Debugger.h"
+#include "Exception.h"
#include "Heap.h"
#include "JITInlines.h"
-#include "JITStubCall.h"
#include "JSArray.h"
#include "JSCell.h"
#include "JSFunction.h"
-#include "JSPropertyNameIterator.h"
+#include "JSPropertyNameEnumerator.h"
#include "LinkBuffer.h"
+#include "MaxFrameExtentForSlowPathCall.h"
+#include "SlowPathCall.h"
+#include "TypeLocation.h"
+#include "TypeProfilerLog.h"
+#include "VirtualRegister.h"
+#include "Watchdog.h"
namespace JSC {
@@ -53,37 +60,17 @@ void JIT::emit_op_mov(Instruction* currentInstruction)
int dst = currentInstruction[1].u.operand;
int src = currentInstruction[2].u.operand;
- if (canBeOptimizedOrInlined()) {
- // Use simpler approach, since the DFG thinks that the last result register
- // is always set to the destination on every operation.
- emitGetVirtualRegister(src, regT0);
- emitPutVirtualRegister(dst);
- } else {
- if (m_codeBlock->isConstantRegisterIndex(src)) {
- if (!getConstantOperand(src).isNumber())
- store64(TrustedImm64(JSValue::encode(getConstantOperand(src))), Address(callFrameRegister, dst * sizeof(Register)));
- else
- store64(Imm64(JSValue::encode(getConstantOperand(src))), Address(callFrameRegister, dst * sizeof(Register)));
- if (dst == m_lastResultBytecodeRegister)
- killLastResultRegister();
- } else if ((src == m_lastResultBytecodeRegister) || (dst == m_lastResultBytecodeRegister)) {
- // If either the src or dst is the cached register go though
- // get/put registers to make sure we track this correctly.
- emitGetVirtualRegister(src, regT0);
- emitPutVirtualRegister(dst);
- } else {
- // Perform the copy via regT1; do not disturb any mapping in regT0.
- load64(Address(callFrameRegister, src * sizeof(Register)), regT1);
- store64(regT1, Address(callFrameRegister, dst * sizeof(Register)));
- }
- }
+ emitGetVirtualRegister(src, regT0);
+ emitPutVirtualRegister(dst);
}
+
void JIT::emit_op_end(Instruction* currentInstruction)
{
- RELEASE_ASSERT(returnValueRegister != callFrameRegister);
- emitGetVirtualRegister(currentInstruction[1].u.operand, returnValueRegister);
- restoreReturnAddressBeforeReturn(Address(callFrameRegister, JSStack::ReturnPC * static_cast<int>(sizeof(Register))));
+ RELEASE_ASSERT(returnValueGPR != callFrameRegister);
+ emitGetVirtualRegister(currentInstruction[1].u.operand, returnValueGPR);
+ emitRestoreCalleeSaves();
+ emitFunctionEpilogue();
ret();
}
@@ -96,7 +83,7 @@ void JIT::emit_op_jmp(Instruction* currentInstruction)
void JIT::emit_op_new_object(Instruction* currentInstruction)
{
Structure* structure = currentInstruction[3].u.objectAllocationProfile->structure();
- size_t allocationSize = JSObject::allocationSize(structure->inlineCapacity());
+ size_t allocationSize = JSFinalObject::allocationSize(structure->inlineCapacity());
MarkedAllocator* allocator = &m_vm->heap.allocatorForObjectWithoutDestructor(allocationSize);
RegisterID resultReg = regT0;
@@ -111,43 +98,54 @@ void JIT::emit_op_new_object(Instruction* currentInstruction)
void JIT::emitSlow_op_new_object(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
linkSlowCase(iter);
- JITStubCall stubCall(this, cti_op_new_object);
- stubCall.addArgument(TrustedImmPtr(currentInstruction[3].u.objectAllocationProfile->structure()));
- stubCall.call(currentInstruction[1].u.operand);
+ int dst = currentInstruction[1].u.operand;
+ Structure* structure = currentInstruction[3].u.objectAllocationProfile->structure();
+ callOperation(operationNewObject, structure);
+ emitStoreCell(dst, returnValueGPR);
}
-void JIT::emit_op_check_has_instance(Instruction* currentInstruction)
+void JIT::emit_op_overrides_has_instance(Instruction* currentInstruction)
{
- unsigned baseVal = currentInstruction[3].u.operand;
+ int dst = currentInstruction[1].u.operand;
+ int constructor = currentInstruction[2].u.operand;
+ int hasInstanceValue = currentInstruction[3].u.operand;
+
+ emitGetVirtualRegister(hasInstanceValue, regT0);
- emitGetVirtualRegister(baseVal, regT0);
+ // We don't jump if we know what Symbol.hasInstance would do.
+ Jump customhasInstanceValue = branchPtr(NotEqual, regT0, TrustedImmPtr(m_codeBlock->globalObject()->functionProtoHasInstanceSymbolFunction()));
- // Check that baseVal is a cell.
- emitJumpSlowCaseIfNotJSCell(regT0, baseVal);
+ emitGetVirtualRegister(constructor, regT0);
+
+ // Check that constructor 'ImplementsDefaultHasInstance' i.e. the object is not a C-API user nor a bound function.
+ test8(Zero, Address(regT0, JSCell::typeInfoFlagsOffset()), TrustedImm32(ImplementsDefaultHasInstance), regT0);
+ emitTagBool(regT0);
+ Jump done = jump();
- // Check that baseVal 'ImplementsHasInstance'.
- loadPtr(Address(regT0, JSCell::structureOffset()), regT0);
- addSlowCase(branchTest8(Zero, Address(regT0, Structure::typeInfoFlagsOffset()), TrustedImm32(ImplementsDefaultHasInstance)));
+ customhasInstanceValue.link(this);
+ move(TrustedImm32(ValueTrue), regT0);
+
+ done.link(this);
+ emitPutVirtualRegister(dst);
}
void JIT::emit_op_instanceof(Instruction* currentInstruction)
{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned value = currentInstruction[2].u.operand;
- unsigned proto = currentInstruction[3].u.operand;
+ int dst = currentInstruction[1].u.operand;
+ int value = currentInstruction[2].u.operand;
+ int proto = currentInstruction[3].u.operand;
// Load the operands (baseVal, proto, and value respectively) into registers.
// We use regT0 for baseVal since we will be done with this first, and we can then use it for the result.
emitGetVirtualRegister(value, regT2);
emitGetVirtualRegister(proto, regT1);
- // Check that proto are cells. baseVal must be a cell - this is checked by op_check_has_instance.
+ // Check that proto are cells. baseVal must be a cell - this is checked by the get_by_id for Symbol.hasInstance.
emitJumpSlowCaseIfNotJSCell(regT2, value);
emitJumpSlowCaseIfNotJSCell(regT1, proto);
// Check that prototype is an object
- loadPtr(Address(regT1, JSCell::structureOffset()), regT3);
- addSlowCase(emitJumpIfNotObject(regT3));
+ addSlowCase(emitJumpIfCellNotObject(regT1));
// Optimistically load the result true, and start looping.
// Initially, regT1 still contains proto and regT2 still contains value.
@@ -157,7 +155,7 @@ void JIT::emit_op_instanceof(Instruction* currentInstruction)
// Load the prototype of the object in regT2. If this is equal to regT1 - WIN!
// Otherwise, check if we've hit null - if we have then drop out of the loop, if not go again.
- loadPtr(Address(regT2, JSCell::structureOffset()), regT2);
+ emitLoadStructure(regT2, regT2, regT3);
load64(Address(regT2, Structure::prototypeOffset()), regT2);
Jump isInstance = branchPtr(Equal, regT2, regT1);
emitJumpIfJSCell(regT2).linkTo(loop, this);
@@ -170,10 +168,16 @@ void JIT::emit_op_instanceof(Instruction* currentInstruction)
emitPutVirtualRegister(dst);
}
+void JIT::emit_op_instanceof_custom(Instruction*)
+{
+ // This always goes to slow path since we expect it to be rare.
+ addSlowCase(jump());
+}
+
void JIT::emit_op_is_undefined(Instruction* currentInstruction)
{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned value = currentInstruction[2].u.operand;
+ int dst = currentInstruction[1].u.operand;
+ int value = currentInstruction[2].u.operand;
emitGetVirtualRegister(value, regT0);
Jump isCell = emitJumpIfJSCell(regT0);
@@ -182,56 +186,55 @@ void JIT::emit_op_is_undefined(Instruction* currentInstruction)
Jump done = jump();
isCell.link(this);
- loadPtr(Address(regT0, JSCell::structureOffset()), regT1);
- Jump isMasqueradesAsUndefined = branchTest8(NonZero, Address(regT1, Structure::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined));
+ Jump isMasqueradesAsUndefined = branchTest8(NonZero, Address(regT0, JSCell::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined));
move(TrustedImm32(0), regT0);
Jump notMasqueradesAsUndefined = jump();
isMasqueradesAsUndefined.link(this);
+ emitLoadStructure(regT0, regT1, regT2);
move(TrustedImmPtr(m_codeBlock->globalObject()), regT0);
loadPtr(Address(regT1, Structure::globalObjectOffset()), regT1);
comparePtr(Equal, regT0, regT1, regT0);
notMasqueradesAsUndefined.link(this);
done.link(this);
- emitTagAsBoolImmediate(regT0);
+ emitTagBool(regT0);
emitPutVirtualRegister(dst);
}
void JIT::emit_op_is_boolean(Instruction* currentInstruction)
{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned value = currentInstruction[2].u.operand;
+ int dst = currentInstruction[1].u.operand;
+ int value = currentInstruction[2].u.operand;
emitGetVirtualRegister(value, regT0);
xor64(TrustedImm32(static_cast<int32_t>(ValueFalse)), regT0);
test64(Zero, regT0, TrustedImm32(static_cast<int32_t>(~1)), regT0);
- emitTagAsBoolImmediate(regT0);
+ emitTagBool(regT0);
emitPutVirtualRegister(dst);
}
void JIT::emit_op_is_number(Instruction* currentInstruction)
{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned value = currentInstruction[2].u.operand;
+ int dst = currentInstruction[1].u.operand;
+ int value = currentInstruction[2].u.operand;
emitGetVirtualRegister(value, regT0);
test64(NonZero, regT0, tagTypeNumberRegister, regT0);
- emitTagAsBoolImmediate(regT0);
+ emitTagBool(regT0);
emitPutVirtualRegister(dst);
}
void JIT::emit_op_is_string(Instruction* currentInstruction)
{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned value = currentInstruction[2].u.operand;
+ int dst = currentInstruction[1].u.operand;
+ int value = currentInstruction[2].u.operand;
emitGetVirtualRegister(value, regT0);
Jump isNotCell = emitJumpIfNotJSCell(regT0);
- loadPtr(Address(regT0, JSCell::structureOffset()), regT1);
- compare8(Equal, Address(regT1, Structure::typeInfoTypeOffset()), TrustedImm32(StringType), regT0);
- emitTagAsBoolImmediate(regT0);
+ compare8(Equal, Address(regT0, JSCell::typeInfoTypeOffset()), TrustedImm32(StringType), regT0);
+ emitTagBool(regT0);
Jump done = jump();
isNotCell.link(this);
@@ -241,104 +244,37 @@ void JIT::emit_op_is_string(Instruction* currentInstruction)
emitPutVirtualRegister(dst);
}
-void JIT::emit_op_call(Instruction* currentInstruction)
+void JIT::emit_op_is_object(Instruction* currentInstruction)
{
- compileOpCall(op_call, currentInstruction, m_callLinkInfoIndex++);
-}
-
-void JIT::emit_op_call_eval(Instruction* currentInstruction)
-{
- compileOpCall(op_call_eval, currentInstruction, m_callLinkInfoIndex);
-}
-
-void JIT::emit_op_call_varargs(Instruction* currentInstruction)
-{
- compileOpCall(op_call_varargs, currentInstruction, m_callLinkInfoIndex++);
-}
+ int dst = currentInstruction[1].u.operand;
+ int value = currentInstruction[2].u.operand;
-void JIT::emit_op_construct(Instruction* currentInstruction)
-{
- compileOpCall(op_construct, currentInstruction, m_callLinkInfoIndex++);
-}
+ emitGetVirtualRegister(value, regT0);
+ Jump isNotCell = emitJumpIfNotJSCell(regT0);
-void JIT::emit_op_tear_off_activation(Instruction* currentInstruction)
-{
- int activation = currentInstruction[1].u.operand;
- Jump activationNotCreated = branchTest64(Zero, addressFor(activation));
- JITStubCall stubCall(this, cti_op_tear_off_activation);
- stubCall.addArgument(activation, regT2);
- stubCall.call();
- activationNotCreated.link(this);
-}
+ compare8(AboveOrEqual, Address(regT0, JSCell::typeInfoTypeOffset()), TrustedImm32(ObjectType), regT0);
+ emitTagBool(regT0);
+ Jump done = jump();
-void JIT::emit_op_tear_off_arguments(Instruction* currentInstruction)
-{
- int arguments = currentInstruction[1].u.operand;
- int activation = currentInstruction[2].u.operand;
+ isNotCell.link(this);
+ move(TrustedImm32(ValueFalse), regT0);
- Jump argsNotCreated = branchTest64(Zero, Address(callFrameRegister, sizeof(Register) * (unmodifiedArgumentsRegister(arguments))));
- JITStubCall stubCall(this, cti_op_tear_off_arguments);
- stubCall.addArgument(unmodifiedArgumentsRegister(arguments), regT2);
- stubCall.addArgument(activation, regT2);
- stubCall.call();
- argsNotCreated.link(this);
+ done.link(this);
+ emitPutVirtualRegister(dst);
}
void JIT::emit_op_ret(Instruction* currentInstruction)
{
ASSERT(callFrameRegister != regT1);
- ASSERT(regT1 != returnValueRegister);
- ASSERT(returnValueRegister != callFrameRegister);
+ ASSERT(regT1 != returnValueGPR);
+ ASSERT(returnValueGPR != callFrameRegister);
// Return the result in %eax.
- emitGetVirtualRegister(currentInstruction[1].u.operand, returnValueRegister);
+ emitGetVirtualRegister(currentInstruction[1].u.operand, returnValueGPR);
- // Grab the return address.
- emitGetFromCallFrameHeaderPtr(JSStack::ReturnPC, regT1);
-
- // Restore our caller's "r".
- emitGetFromCallFrameHeaderPtr(JSStack::CallerFrame, callFrameRegister);
-
- // Return.
- restoreReturnAddressBeforeReturn(regT1);
- ret();
-}
-
-void JIT::emit_op_ret_object_or_this(Instruction* currentInstruction)
-{
- ASSERT(callFrameRegister != regT1);
- ASSERT(regT1 != returnValueRegister);
- ASSERT(returnValueRegister != callFrameRegister);
-
- // Return the result in %eax.
- emitGetVirtualRegister(currentInstruction[1].u.operand, returnValueRegister);
- Jump notJSCell = emitJumpIfNotJSCell(returnValueRegister);
- loadPtr(Address(returnValueRegister, JSCell::structureOffset()), regT2);
- Jump notObject = emitJumpIfNotObject(regT2);
-
- // Grab the return address.
- emitGetFromCallFrameHeaderPtr(JSStack::ReturnPC, regT1);
-
- // Restore our caller's "r".
- emitGetFromCallFrameHeaderPtr(JSStack::CallerFrame, callFrameRegister);
-
- // Return.
- restoreReturnAddressBeforeReturn(regT1);
- ret();
-
- // Return 'this' in %eax.
- notJSCell.link(this);
- notObject.link(this);
- emitGetVirtualRegister(currentInstruction[2].u.operand, returnValueRegister);
-
- // Grab the return address.
- emitGetFromCallFrameHeaderPtr(JSStack::ReturnPC, regT1);
-
- // Restore our caller's "r".
- emitGetFromCallFrameHeaderPtr(JSStack::CallerFrame, callFrameRegister);
-
- // Return.
- restoreReturnAddressBeforeReturn(regT1);
+ checkStackPointerAlignment();
+ emitRestoreCalleeSaves();
+ emitFunctionEpilogue();
ret();
}
@@ -350,7 +286,7 @@ void JIT::emit_op_to_primitive(Instruction* currentInstruction)
emitGetVirtualRegister(src, regT0);
Jump isImm = emitJumpIfNotJSCell(regT0);
- addSlowCase(branchPtr(NotEqual, Address(regT0, JSCell::structureOffset()), TrustedImmPtr(m_vm->stringStructure.get())));
+ addSlowCase(emitJumpIfCellObject(regT0));
isImm.link(this);
if (dst != src)
@@ -360,10 +296,8 @@ void JIT::emit_op_to_primitive(Instruction* currentInstruction)
void JIT::emit_op_strcat(Instruction* currentInstruction)
{
- JITStubCall stubCall(this, cti_op_strcat);
- stubCall.addArgument(TrustedImm32(currentInstruction[2].u.operand));
- stubCall.addArgument(TrustedImm32(currentInstruction[3].u.operand));
- stubCall.call(currentInstruction[1].u.operand);
+ JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_strcat);
+ slowPathCall.call();
}
void JIT::emit_op_not(Instruction* currentInstruction)
@@ -386,7 +320,7 @@ void JIT::emit_op_jfalse(Instruction* currentInstruction)
emitGetVirtualRegister(currentInstruction[1].u.operand, regT0);
addJump(branch64(Equal, regT0, TrustedImm64(JSValue::encode(jsNumber(0)))), target);
- Jump isNonZero = emitJumpIfImmediateInteger(regT0);
+ Jump isNonZero = emitJumpIfInt(regT0);
addJump(branch64(Equal, regT0, TrustedImm64(JSValue::encode(jsBoolean(false)))), target);
addSlowCase(branch64(NotEqual, regT0, TrustedImm64(JSValue::encode(jsBoolean(true)))));
@@ -396,15 +330,15 @@ void JIT::emit_op_jfalse(Instruction* currentInstruction)
void JIT::emit_op_jeq_null(Instruction* currentInstruction)
{
- unsigned src = currentInstruction[1].u.operand;
+ int src = currentInstruction[1].u.operand;
unsigned target = currentInstruction[2].u.operand;
emitGetVirtualRegister(src, regT0);
Jump isImmediate = emitJumpIfNotJSCell(regT0);
// First, handle JSCell cases - check MasqueradesAsUndefined bit on the structure.
- loadPtr(Address(regT0, JSCell::structureOffset()), regT2);
- Jump isNotMasqueradesAsUndefined = branchTest8(Zero, Address(regT2, Structure::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined));
+ Jump isNotMasqueradesAsUndefined = branchTest8(Zero, Address(regT0, JSCell::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined));
+ emitLoadStructure(regT0, regT2, regT1);
move(TrustedImmPtr(m_codeBlock->globalObject()), regT0);
addJump(branchPtr(Equal, Address(regT2, Structure::globalObjectOffset()), regT0), target);
Jump masqueradesGlobalObjectIsForeign = jump();
@@ -419,15 +353,15 @@ void JIT::emit_op_jeq_null(Instruction* currentInstruction)
};
void JIT::emit_op_jneq_null(Instruction* currentInstruction)
{
- unsigned src = currentInstruction[1].u.operand;
+ int src = currentInstruction[1].u.operand;
unsigned target = currentInstruction[2].u.operand;
emitGetVirtualRegister(src, regT0);
Jump isImmediate = emitJumpIfNotJSCell(regT0);
// First, handle JSCell cases - check MasqueradesAsUndefined bit on the structure.
- loadPtr(Address(regT0, JSCell::structureOffset()), regT2);
- addJump(branchTest8(Zero, Address(regT2, Structure::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined)), target);
+ addJump(branchTest8(Zero, Address(regT0, JSCell::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined)), target);
+ emitLoadStructure(regT0, regT2, regT1);
move(TrustedImmPtr(m_codeBlock->globalObject()), regT0);
addJump(branchPtr(NotEqual, Address(regT2, Structure::globalObjectOffset()), regT0), target);
Jump wasNotImmediate = jump();
@@ -442,7 +376,7 @@ void JIT::emit_op_jneq_null(Instruction* currentInstruction)
void JIT::emit_op_jneq_ptr(Instruction* currentInstruction)
{
- unsigned src = currentInstruction[1].u.operand;
+ int src = currentInstruction[1].u.operand;
Special::Pointer ptr = currentInstruction[2].u.specialPointer;
unsigned target = currentInstruction[3].u.operand;
@@ -453,9 +387,9 @@ void JIT::emit_op_jneq_ptr(Instruction* currentInstruction)
void JIT::emit_op_eq(Instruction* currentInstruction)
{
emitGetVirtualRegisters(currentInstruction[2].u.operand, regT0, currentInstruction[3].u.operand, regT1);
- emitJumpSlowCaseIfNotImmediateIntegers(regT0, regT1, regT2);
+ emitJumpSlowCaseIfNotInt(regT0, regT1, regT2);
compare32(Equal, regT1, regT0, regT0);
- emitTagAsBoolImmediate(regT0);
+ emitTagBool(regT0);
emitPutVirtualRegister(currentInstruction[1].u.operand);
}
@@ -465,7 +399,7 @@ void JIT::emit_op_jtrue(Instruction* currentInstruction)
emitGetVirtualRegister(currentInstruction[1].u.operand, regT0);
Jump isZero = branch64(Equal, regT0, TrustedImm64(JSValue::encode(jsNumber(0))));
- addJump(emitJumpIfImmediateInteger(regT0), target);
+ addJump(emitJumpIfInt(regT0), target);
addJump(branch64(Equal, regT0, TrustedImm64(JSValue::encode(jsBoolean(true)))), target);
addSlowCase(branch64(NotEqual, regT0, TrustedImm64(JSValue::encode(jsBoolean(false)))));
@@ -476,170 +410,34 @@ void JIT::emit_op_jtrue(Instruction* currentInstruction)
void JIT::emit_op_neq(Instruction* currentInstruction)
{
emitGetVirtualRegisters(currentInstruction[2].u.operand, regT0, currentInstruction[3].u.operand, regT1);
- emitJumpSlowCaseIfNotImmediateIntegers(regT0, regT1, regT2);
+ emitJumpSlowCaseIfNotInt(regT0, regT1, regT2);
compare32(NotEqual, regT1, regT0, regT0);
- emitTagAsBoolImmediate(regT0);
+ emitTagBool(regT0);
emitPutVirtualRegister(currentInstruction[1].u.operand);
}
-void JIT::emit_op_bitxor(Instruction* currentInstruction)
-{
- emitGetVirtualRegisters(currentInstruction[2].u.operand, regT0, currentInstruction[3].u.operand, regT1);
- emitJumpSlowCaseIfNotImmediateIntegers(regT0, regT1, regT2);
- xor64(regT1, regT0);
- emitFastArithReTagImmediate(regT0, regT0);
- emitPutVirtualRegister(currentInstruction[1].u.operand);
-}
-
-void JIT::emit_op_bitor(Instruction* currentInstruction)
-{
- emitGetVirtualRegisters(currentInstruction[2].u.operand, regT0, currentInstruction[3].u.operand, regT1);
- emitJumpSlowCaseIfNotImmediateIntegers(regT0, regT1, regT2);
- or64(regT1, regT0);
- emitPutVirtualRegister(currentInstruction[1].u.operand);
-}
-
void JIT::emit_op_throw(Instruction* currentInstruction)
{
- JITStubCall stubCall(this, cti_op_throw);
- stubCall.addArgument(currentInstruction[1].u.operand, regT2);
- stubCall.call();
- ASSERT(regT0 == returnValueRegister);
-#ifndef NDEBUG
- // cti_op_throw always changes it's return address,
- // this point in the code should never be reached.
- breakpoint();
-#endif
-}
-
-void JIT::emit_op_get_pnames(Instruction* currentInstruction)
-{
- int dst = currentInstruction[1].u.operand;
- int base = currentInstruction[2].u.operand;
- int i = currentInstruction[3].u.operand;
- int size = currentInstruction[4].u.operand;
- int breakTarget = currentInstruction[5].u.operand;
-
- JumpList isNotObject;
-
- emitGetVirtualRegister(base, regT0);
- if (!m_codeBlock->isKnownNotImmediate(base))
- isNotObject.append(emitJumpIfNotJSCell(regT0));
- if (base != m_codeBlock->thisRegister() || m_codeBlock->isStrictMode()) {
- loadPtr(Address(regT0, JSCell::structureOffset()), regT2);
- isNotObject.append(emitJumpIfNotObject(regT2));
- }
-
- // We could inline the case where you have a valid cache, but
- // this call doesn't seem to be hot.
- Label isObject(this);
- JITStubCall getPnamesStubCall(this, cti_op_get_pnames);
- getPnamesStubCall.addArgument(regT0);
- getPnamesStubCall.call(dst);
- load32(Address(regT0, OBJECT_OFFSETOF(JSPropertyNameIterator, m_jsStringsSize)), regT3);
- store64(tagTypeNumberRegister, addressFor(i));
- store32(TrustedImm32(Int32Tag), intTagFor(size));
- store32(regT3, intPayloadFor(size));
- Jump end = jump();
-
- isNotObject.link(this);
- move(regT0, regT1);
- and32(TrustedImm32(~TagBitUndefined), regT1);
- addJump(branch32(Equal, regT1, TrustedImm32(ValueNull)), breakTarget);
-
- JITStubCall toObjectStubCall(this, cti_to_object);
- toObjectStubCall.addArgument(regT0);
- toObjectStubCall.call(base);
- jump().linkTo(isObject, this);
-
- end.link(this);
-}
-
-void JIT::emit_op_next_pname(Instruction* currentInstruction)
-{
- int dst = currentInstruction[1].u.operand;
- int base = currentInstruction[2].u.operand;
- int i = currentInstruction[3].u.operand;
- int size = currentInstruction[4].u.operand;
- int it = currentInstruction[5].u.operand;
- int target = currentInstruction[6].u.operand;
-
- JumpList callHasProperty;
-
- Label begin(this);
- load32(intPayloadFor(i), regT0);
- Jump end = branch32(Equal, regT0, intPayloadFor(size));
-
- // Grab key @ i
- loadPtr(addressFor(it), regT1);
- loadPtr(Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_jsStrings)), regT2);
-
- load64(BaseIndex(regT2, regT0, TimesEight), regT2);
-
- emitPutVirtualRegister(dst, regT2);
-
- // Increment i
- add32(TrustedImm32(1), regT0);
- store32(regT0, intPayloadFor(i));
-
- // Verify that i is valid:
- emitGetVirtualRegister(base, regT0);
-
- // Test base's structure
- loadPtr(Address(regT0, JSCell::structureOffset()), regT2);
- callHasProperty.append(branchPtr(NotEqual, regT2, Address(Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_cachedStructure)))));
-
- // Test base's prototype chain
- loadPtr(Address(Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_cachedPrototypeChain))), regT3);
- loadPtr(Address(regT3, OBJECT_OFFSETOF(StructureChain, m_vector)), regT3);
- addJump(branchTestPtr(Zero, Address(regT3)), target);
-
- Label checkPrototype(this);
- load64(Address(regT2, Structure::prototypeOffset()), regT2);
- callHasProperty.append(emitJumpIfNotJSCell(regT2));
- loadPtr(Address(regT2, JSCell::structureOffset()), regT2);
- callHasProperty.append(branchPtr(NotEqual, regT2, Address(regT3)));
- addPtr(TrustedImm32(sizeof(Structure*)), regT3);
- branchTestPtr(NonZero, Address(regT3)).linkTo(checkPrototype, this);
-
- // Continue loop.
- addJump(jump(), target);
-
- // Slow case: Ask the object if i is valid.
- callHasProperty.link(this);
- emitGetVirtualRegister(dst, regT1);
- JITStubCall stubCall(this, cti_has_property);
- stubCall.addArgument(regT0);
- stubCall.addArgument(regT1);
- stubCall.call();
-
- // Test for valid key.
- addJump(branchTest32(NonZero, regT0), target);
- jump().linkTo(begin, this);
-
- // End of loop.
- end.link(this);
+ ASSERT(regT0 == returnValueGPR);
+ copyCalleeSavesToVMCalleeSavesBuffer();
+ emitGetVirtualRegister(currentInstruction[1].u.operand, regT0);
+ callOperationNoExceptionCheck(operationThrow, regT0);
+ jumpToExceptionHandler();
}
void JIT::emit_op_push_with_scope(Instruction* currentInstruction)
{
- JITStubCall stubCall(this, cti_op_push_with_scope);
- stubCall.addArgument(currentInstruction[1].u.operand, regT2);
- stubCall.call();
-}
-
-void JIT::emit_op_pop_scope(Instruction*)
-{
- JITStubCall(this, cti_op_pop_scope).call();
+ JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_push_with_scope);
+ slowPathCall.call();
}
void JIT::compileOpStrictEq(Instruction* currentInstruction, CompileOpStrictEqType type)
{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned src1 = currentInstruction[2].u.operand;
- unsigned src2 = currentInstruction[3].u.operand;
+ int dst = currentInstruction[1].u.operand;
+ int src1 = currentInstruction[2].u.operand;
+ int src2 = currentInstruction[3].u.operand;
emitGetVirtualRegisters(src1, regT0, src2, regT1);
@@ -650,18 +448,18 @@ void JIT::compileOpStrictEq(Instruction* currentInstruction, CompileOpStrictEqTy
// Jump slow if either is a double. First test if it's an integer, which is fine, and then test
// if it's a double.
- Jump leftOK = emitJumpIfImmediateInteger(regT0);
- addSlowCase(emitJumpIfImmediateNumber(regT0));
+ Jump leftOK = emitJumpIfInt(regT0);
+ addSlowCase(emitJumpIfNumber(regT0));
leftOK.link(this);
- Jump rightOK = emitJumpIfImmediateInteger(regT1);
- addSlowCase(emitJumpIfImmediateNumber(regT1));
+ Jump rightOK = emitJumpIfInt(regT1);
+ addSlowCase(emitJumpIfNumber(regT1));
rightOK.link(this);
if (type == OpStrictEq)
compare64(Equal, regT1, regT0, regT0);
else
compare64(NotEqual, regT1, regT0, regT0);
- emitTagAsBoolImmediate(regT0);
+ emitTagBool(regT0);
emitPutVirtualRegister(dst);
}
@@ -681,69 +479,101 @@ void JIT::emit_op_to_number(Instruction* currentInstruction)
int srcVReg = currentInstruction[2].u.operand;
emitGetVirtualRegister(srcVReg, regT0);
- addSlowCase(emitJumpIfNotImmediateNumber(regT0));
+ addSlowCase(emitJumpIfNotNumber(regT0));
emitPutVirtualRegister(currentInstruction[1].u.operand);
}
-void JIT::emit_op_push_name_scope(Instruction* currentInstruction)
+void JIT::emit_op_to_string(Instruction* currentInstruction)
{
- JITStubCall stubCall(this, cti_op_push_name_scope);
- stubCall.addArgument(TrustedImmPtr(&m_codeBlock->identifier(currentInstruction[1].u.operand)));
- stubCall.addArgument(currentInstruction[2].u.operand, regT2);
- stubCall.addArgument(TrustedImm32(currentInstruction[3].u.operand));
- stubCall.call();
+ int srcVReg = currentInstruction[2].u.operand;
+ emitGetVirtualRegister(srcVReg, regT0);
+
+ addSlowCase(emitJumpIfNotJSCell(regT0));
+ addSlowCase(branch8(NotEqual, Address(regT0, JSCell::typeInfoTypeOffset()), TrustedImm32(StringType)));
+
+ emitPutVirtualRegister(currentInstruction[1].u.operand);
}
void JIT::emit_op_catch(Instruction* currentInstruction)
{
- killLastResultRegister(); // FIXME: Implicitly treat op_catch as a labeled statement, and remove this line of code.
- move(regT0, callFrameRegister);
- peek(regT3, OBJECT_OFFSETOF(struct JITStackFrame, vm) / sizeof(void*));
- load64(Address(regT3, OBJECT_OFFSETOF(VM, exception)), regT0);
- store64(TrustedImm64(JSValue::encode(JSValue())), Address(regT3, OBJECT_OFFSETOF(VM, exception)));
+ restoreCalleeSavesFromVMCalleeSavesBuffer();
+
+ move(TrustedImmPtr(m_vm), regT3);
+ load64(Address(regT3, VM::callFrameForCatchOffset()), callFrameRegister);
+ storePtr(TrustedImmPtr(nullptr), Address(regT3, VM::callFrameForCatchOffset()));
+
+ addPtr(TrustedImm32(stackPointerOffsetFor(codeBlock()) * sizeof(Register)), callFrameRegister, stackPointerRegister);
+
+ callOperationNoExceptionCheck(operationCheckIfExceptionIsUncatchableAndNotifyProfiler);
+ Jump isCatchableException = branchTest32(Zero, returnValueGPR);
+ jumpToExceptionHandler();
+ isCatchableException.link(this);
+
+ move(TrustedImmPtr(m_vm), regT3);
+ load64(Address(regT3, VM::exceptionOffset()), regT0);
+ store64(TrustedImm64(JSValue::encode(JSValue())), Address(regT3, VM::exceptionOffset()));
emitPutVirtualRegister(currentInstruction[1].u.operand);
+
+ load64(Address(regT0, Exception::valueOffset()), regT0);
+ emitPutVirtualRegister(currentInstruction[2].u.operand);
+}
+
+void JIT::emit_op_assert(Instruction* currentInstruction)
+{
+ JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_assert);
+ slowPathCall.call();
+}
+
+void JIT::emit_op_create_lexical_environment(Instruction* currentInstruction)
+{
+ JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_create_lexical_environment);
+ slowPathCall.call();
+}
+
+void JIT::emit_op_get_parent_scope(Instruction* currentInstruction)
+{
+ int currentScope = currentInstruction[2].u.operand;
+ emitGetVirtualRegister(currentScope, regT0);
+ loadPtr(Address(regT0, JSScope::offsetOfNext()), regT0);
+ emitStoreCell(currentInstruction[1].u.operand, regT0);
}
void JIT::emit_op_switch_imm(Instruction* currentInstruction)
{
- unsigned tableIndex = currentInstruction[1].u.operand;
+ size_t tableIndex = currentInstruction[1].u.operand;
unsigned defaultOffset = currentInstruction[2].u.operand;
unsigned scrutinee = currentInstruction[3].u.operand;
// create jump table for switch destinations, track this switch statement.
- SimpleJumpTable* jumpTable = &m_codeBlock->immediateSwitchJumpTable(tableIndex);
+ SimpleJumpTable* jumpTable = &m_codeBlock->switchJumpTable(tableIndex);
m_switches.append(SwitchRecord(jumpTable, m_bytecodeOffset, defaultOffset, SwitchRecord::Immediate));
- jumpTable->ctiOffsets.grow(jumpTable->branchOffsets.size());
+ jumpTable->ensureCTITable();
- JITStubCall stubCall(this, cti_op_switch_imm);
- stubCall.addArgument(scrutinee, regT2);
- stubCall.addArgument(TrustedImm32(tableIndex));
- stubCall.call();
- jump(regT0);
+ emitGetVirtualRegister(scrutinee, regT0);
+ callOperation(operationSwitchImmWithUnknownKeyType, regT0, tableIndex);
+ jump(returnValueGPR);
}
void JIT::emit_op_switch_char(Instruction* currentInstruction)
{
- unsigned tableIndex = currentInstruction[1].u.operand;
+ size_t tableIndex = currentInstruction[1].u.operand;
unsigned defaultOffset = currentInstruction[2].u.operand;
unsigned scrutinee = currentInstruction[3].u.operand;
// create jump table for switch destinations, track this switch statement.
- SimpleJumpTable* jumpTable = &m_codeBlock->characterSwitchJumpTable(tableIndex);
+ SimpleJumpTable* jumpTable = &m_codeBlock->switchJumpTable(tableIndex);
m_switches.append(SwitchRecord(jumpTable, m_bytecodeOffset, defaultOffset, SwitchRecord::Character));
- jumpTable->ctiOffsets.grow(jumpTable->branchOffsets.size());
+ jumpTable->ensureCTITable();
- JITStubCall stubCall(this, cti_op_switch_char);
- stubCall.addArgument(scrutinee, regT2);
- stubCall.addArgument(TrustedImm32(tableIndex));
- stubCall.call();
- jump(regT0);
+ emitGetVirtualRegister(scrutinee, regT0);
+ callOperation(operationSwitchCharWithUnknownKeyType, regT0, tableIndex);
+ jump(returnValueGPR);
}
void JIT::emit_op_switch_string(Instruction* currentInstruction)
{
- unsigned tableIndex = currentInstruction[1].u.operand;
+ size_t tableIndex = currentInstruction[1].u.operand;
unsigned defaultOffset = currentInstruction[2].u.operand;
unsigned scrutinee = currentInstruction[3].u.operand;
@@ -751,53 +581,39 @@ void JIT::emit_op_switch_string(Instruction* currentInstruction)
StringJumpTable* jumpTable = &m_codeBlock->stringSwitchJumpTable(tableIndex);
m_switches.append(SwitchRecord(jumpTable, m_bytecodeOffset, defaultOffset));
- JITStubCall stubCall(this, cti_op_switch_string);
- stubCall.addArgument(scrutinee, regT2);
- stubCall.addArgument(TrustedImm32(tableIndex));
- stubCall.call();
- jump(regT0);
+ emitGetVirtualRegister(scrutinee, regT0);
+ callOperation(operationSwitchStringWithUnknownKeyType, regT0, tableIndex);
+ jump(returnValueGPR);
}
void JIT::emit_op_throw_static_error(Instruction* currentInstruction)
{
- JITStubCall stubCall(this, cti_op_throw_static_error);
- if (!m_codeBlock->getConstant(currentInstruction[1].u.operand).isNumber())
- stubCall.addArgument(TrustedImm64(JSValue::encode(m_codeBlock->getConstant(currentInstruction[1].u.operand))));
- else
- stubCall.addArgument(Imm64(JSValue::encode(m_codeBlock->getConstant(currentInstruction[1].u.operand))));
- stubCall.addArgument(TrustedImm32(currentInstruction[2].u.operand));
- stubCall.call();
+ move(TrustedImm64(JSValue::encode(m_codeBlock->getConstant(currentInstruction[1].u.operand))), regT0);
+ callOperation(operationThrowStaticError, regT0, currentInstruction[2].u.operand);
}
void JIT::emit_op_debug(Instruction* currentInstruction)
{
-#if ENABLE(DEBUG_WITH_BREAKPOINT)
- UNUSED_PARAM(currentInstruction);
- breakpoint();
-#else
- JITStubCall stubCall(this, cti_op_debug);
- stubCall.addArgument(TrustedImm32(currentInstruction[1].u.operand));
- stubCall.addArgument(TrustedImm32(currentInstruction[2].u.operand));
- stubCall.addArgument(TrustedImm32(currentInstruction[3].u.operand));
- stubCall.addArgument(TrustedImm32(currentInstruction[4].u.operand));
- stubCall.call();
-#endif
+ load32(codeBlock()->debuggerRequestsAddress(), regT0);
+ Jump noDebuggerRequests = branchTest32(Zero, regT0);
+ callOperation(operationDebug, currentInstruction[1].u.operand);
+ noDebuggerRequests.link(this);
}
void JIT::emit_op_eq_null(Instruction* currentInstruction)
{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned src1 = currentInstruction[2].u.operand;
+ int dst = currentInstruction[1].u.operand;
+ int src1 = currentInstruction[2].u.operand;
emitGetVirtualRegister(src1, regT0);
Jump isImmediate = emitJumpIfNotJSCell(regT0);
- loadPtr(Address(regT0, JSCell::structureOffset()), regT2);
- Jump isMasqueradesAsUndefined = branchTest8(NonZero, Address(regT2, Structure::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined));
+ Jump isMasqueradesAsUndefined = branchTest8(NonZero, Address(regT0, JSCell::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined));
move(TrustedImm32(0), regT0);
Jump wasNotMasqueradesAsUndefined = jump();
isMasqueradesAsUndefined.link(this);
+ emitLoadStructure(regT0, regT2, regT1);
move(TrustedImmPtr(m_codeBlock->globalObject()), regT0);
loadPtr(Address(regT2, Structure::globalObjectOffset()), regT2);
comparePtr(Equal, regT0, regT2, regT0);
@@ -811,25 +627,25 @@ void JIT::emit_op_eq_null(Instruction* currentInstruction)
wasNotImmediate.link(this);
wasNotMasqueradesAsUndefined.link(this);
- emitTagAsBoolImmediate(regT0);
+ emitTagBool(regT0);
emitPutVirtualRegister(dst);
}
void JIT::emit_op_neq_null(Instruction* currentInstruction)
{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned src1 = currentInstruction[2].u.operand;
+ int dst = currentInstruction[1].u.operand;
+ int src1 = currentInstruction[2].u.operand;
emitGetVirtualRegister(src1, regT0);
Jump isImmediate = emitJumpIfNotJSCell(regT0);
- loadPtr(Address(regT0, JSCell::structureOffset()), regT2);
- Jump isMasqueradesAsUndefined = branchTest8(NonZero, Address(regT2, Structure::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined));
+ Jump isMasqueradesAsUndefined = branchTest8(NonZero, Address(regT0, JSCell::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined));
move(TrustedImm32(1), regT0);
Jump wasNotMasqueradesAsUndefined = jump();
isMasqueradesAsUndefined.link(this);
+ emitLoadStructure(regT0, regT2, regT1);
move(TrustedImmPtr(m_codeBlock->globalObject()), regT0);
loadPtr(Address(regT2, Structure::globalObjectOffset()), regT2);
comparePtr(NotEqual, regT0, regT2, regT0);
@@ -843,213 +659,173 @@ void JIT::emit_op_neq_null(Instruction* currentInstruction)
wasNotImmediate.link(this);
wasNotMasqueradesAsUndefined.link(this);
- emitTagAsBoolImmediate(regT0);
+ emitTagBool(regT0);
emitPutVirtualRegister(dst);
}
void JIT::emit_op_enter(Instruction*)
{
- emitEnterOptimizationCheck();
-
// Even though CTI doesn't use them, we initialize our constant
// registers to zap stale pointers, to avoid unnecessarily prolonging
// object lifetime and increasing GC pressure.
size_t count = m_codeBlock->m_numVars;
- for (size_t j = 0; j < count; ++j)
- emitInitRegister(j);
-}
+ for (size_t j = CodeBlock::llintBaselineCalleeSaveSpaceAsVirtualRegisters(); j < count; ++j)
+ emitInitRegister(virtualRegisterForLocal(j).offset());
-void JIT::emit_op_create_activation(Instruction* currentInstruction)
-{
- unsigned dst = currentInstruction[1].u.operand;
-
- Jump activationCreated = branchTest64(NonZero, Address(callFrameRegister, sizeof(Register) * dst));
- JITStubCall(this, cti_op_push_activation).call(currentInstruction[1].u.operand);
- emitPutVirtualRegister(dst);
- activationCreated.link(this);
-}
+ emitWriteBarrier(m_codeBlock);
-void JIT::emit_op_create_arguments(Instruction* currentInstruction)
-{
- unsigned dst = currentInstruction[1].u.operand;
-
- Jump argsCreated = branchTest64(NonZero, Address(callFrameRegister, sizeof(Register) * dst));
- JITStubCall(this, cti_op_create_arguments).call();
- emitPutVirtualRegister(dst);
- emitPutVirtualRegister(unmodifiedArgumentsRegister(dst));
- argsCreated.link(this);
+ emitEnterOptimizationCheck();
}
-void JIT::emit_op_init_lazy_reg(Instruction* currentInstruction)
+void JIT::emit_op_get_scope(Instruction* currentInstruction)
{
- unsigned dst = currentInstruction[1].u.operand;
-
- store64(TrustedImm64((int64_t)0), Address(callFrameRegister, sizeof(Register) * dst));
+ int dst = currentInstruction[1].u.operand;
+ emitGetFromCallFrameHeaderPtr(JSStack::Callee, regT0);
+ loadPtr(Address(regT0, JSFunction::offsetOfScopeChain()), regT0);
+ emitStoreCell(dst, regT0);
}
-void JIT::emit_op_convert_this(Instruction* currentInstruction)
+void JIT::emit_op_to_this(Instruction* currentInstruction)
{
+ WriteBarrierBase<Structure>* cachedStructure = &currentInstruction[2].u.structure;
emitGetVirtualRegister(currentInstruction[1].u.operand, regT1);
emitJumpSlowCaseIfNotJSCell(regT1);
- if (shouldEmitProfiling()) {
- loadPtr(Address(regT1, JSCell::structureOffset()), regT0);
- emitValueProfilingSite();
- }
- addSlowCase(branchPtr(Equal, Address(regT1, JSCell::structureOffset()), TrustedImmPtr(m_vm->stringStructure.get())));
-}
-void JIT::emit_op_get_callee(Instruction* currentInstruction)
-{
- unsigned result = currentInstruction[1].u.operand;
- emitGetFromCallFrameHeaderPtr(JSStack::Callee, regT0);
- emitValueProfilingSite();
- emitPutVirtualRegister(result);
+ addSlowCase(branch8(NotEqual, Address(regT1, JSCell::typeInfoTypeOffset()), TrustedImm32(FinalObjectType)));
+ loadPtr(cachedStructure, regT2);
+ addSlowCase(branchTestPtr(Zero, regT2));
+ load32(Address(regT2, Structure::structureIDOffset()), regT2);
+ addSlowCase(branch32(NotEqual, Address(regT1, JSCell::structureIDOffset()), regT2));
}
void JIT::emit_op_create_this(Instruction* currentInstruction)
{
int callee = currentInstruction[2].u.operand;
+ WriteBarrierBase<JSCell>* cachedFunction = &currentInstruction[4].u.jsCell;
RegisterID calleeReg = regT0;
+ RegisterID rareDataReg = regT4;
RegisterID resultReg = regT0;
RegisterID allocatorReg = regT1;
RegisterID structureReg = regT2;
+ RegisterID cachedFunctionReg = regT4;
RegisterID scratchReg = regT3;
emitGetVirtualRegister(callee, calleeReg);
- loadPtr(Address(calleeReg, JSFunction::offsetOfAllocationProfile() + ObjectAllocationProfile::offsetOfAllocator()), allocatorReg);
- loadPtr(Address(calleeReg, JSFunction::offsetOfAllocationProfile() + ObjectAllocationProfile::offsetOfStructure()), structureReg);
+ loadPtr(Address(calleeReg, JSFunction::offsetOfRareData()), rareDataReg);
+ addSlowCase(branchTestPtr(Zero, rareDataReg));
+ loadPtr(Address(rareDataReg, FunctionRareData::offsetOfObjectAllocationProfile() + ObjectAllocationProfile::offsetOfAllocator()), allocatorReg);
+ loadPtr(Address(rareDataReg, FunctionRareData::offsetOfObjectAllocationProfile() + ObjectAllocationProfile::offsetOfStructure()), structureReg);
addSlowCase(branchTestPtr(Zero, allocatorReg));
+ loadPtr(cachedFunction, cachedFunctionReg);
+ Jump hasSeenMultipleCallees = branchPtr(Equal, cachedFunctionReg, TrustedImmPtr(JSCell::seenMultipleCalleeObjects()));
+ addSlowCase(branchPtr(NotEqual, calleeReg, cachedFunctionReg));
+ hasSeenMultipleCallees.link(this);
+
emitAllocateJSObject(allocatorReg, structureReg, resultReg, scratchReg);
emitPutVirtualRegister(currentInstruction[1].u.operand);
}
void JIT::emitSlow_op_create_this(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
+ linkSlowCase(iter); // doesn't have rare data
linkSlowCase(iter); // doesn't have an allocation profile
linkSlowCase(iter); // allocation failed
+ linkSlowCase(iter); // cached function didn't match
- JITStubCall stubCall(this, cti_op_create_this);
- stubCall.addArgument(TrustedImm32(currentInstruction[3].u.operand));
- stubCall.call(currentInstruction[1].u.operand);
+ JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_create_this);
+ slowPathCall.call();
+}
+
+void JIT::emit_op_check_tdz(Instruction* currentInstruction)
+{
+ emitGetVirtualRegister(currentInstruction[1].u.operand, regT0);
+ addSlowCase(branchTest64(Zero, regT0));
+}
+
+void JIT::emitSlow_op_check_tdz(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ linkSlowCase(iter);
+ JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_throw_tdz_error);
+ slowPathCall.call();
}
void JIT::emit_op_profile_will_call(Instruction* currentInstruction)
{
- JITStubCall stubCall(this, cti_op_profile_will_call);
- stubCall.addArgument(currentInstruction[1].u.operand, regT1);
- stubCall.call();
+ Jump profilerDone = branchTestPtr(Zero, AbsoluteAddress(m_vm->enabledProfilerAddress()));
+ emitGetVirtualRegister(currentInstruction[1].u.operand, regT0);
+ callOperation(operationProfileWillCall, regT0);
+ profilerDone.link(this);
}
void JIT::emit_op_profile_did_call(Instruction* currentInstruction)
{
- JITStubCall stubCall(this, cti_op_profile_did_call);
- stubCall.addArgument(currentInstruction[1].u.operand, regT1);
- stubCall.call();
+ Jump profilerDone = branchTestPtr(Zero, AbsoluteAddress(m_vm->enabledProfilerAddress()));
+ emitGetVirtualRegister(currentInstruction[1].u.operand, regT0);
+ callOperation(operationProfileDidCall, regT0);
+ profilerDone.link(this);
}
// Slow cases
-void JIT::emitSlow_op_convert_this(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+void JIT::emitSlow_op_to_this(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
- void* globalThis = m_codeBlock->globalObject()->globalThis();
-
linkSlowCase(iter);
- if (shouldEmitProfiling())
- move(TrustedImm64((JSValue::encode(jsUndefined()))), regT0);
- Jump isNotUndefined = branch64(NotEqual, regT1, TrustedImm64(JSValue::encode(jsUndefined())));
- emitValueProfilingSite();
- move(TrustedImm64(JSValue::encode(JSValue(static_cast<JSCell*>(globalThis)))), regT0);
- emitPutVirtualRegister(currentInstruction[1].u.operand, regT0);
- emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_convert_this));
-
linkSlowCase(iter);
- if (shouldEmitProfiling())
- move(TrustedImm64(JSValue::encode(m_vm->stringStructure.get())), regT0);
- isNotUndefined.link(this);
- emitValueProfilingSite();
- JITStubCall stubCall(this, cti_op_convert_this);
- stubCall.addArgument(regT1);
- stubCall.call(currentInstruction[1].u.operand);
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+
+ JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_to_this);
+ slowPathCall.call();
}
void JIT::emitSlow_op_to_primitive(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
linkSlowCase(iter);
- JITStubCall stubCall(this, cti_op_to_primitive);
- stubCall.addArgument(regT0);
- stubCall.call(currentInstruction[1].u.operand);
+ JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_to_primitive);
+ slowPathCall.call();
}
void JIT::emitSlow_op_not(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
linkSlowCase(iter);
- xor64(TrustedImm32(static_cast<int32_t>(ValueFalse)), regT0);
- JITStubCall stubCall(this, cti_op_not);
- stubCall.addArgument(regT0);
- stubCall.call(currentInstruction[1].u.operand);
+
+ JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_not);
+ slowPathCall.call();
}
void JIT::emitSlow_op_jfalse(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
linkSlowCase(iter);
- JITStubCall stubCall(this, cti_op_jtrue);
- stubCall.addArgument(regT0);
- stubCall.call();
- emitJumpSlowToHot(branchTest32(Zero, regT0), currentInstruction[2].u.operand); // inverted!
+ callOperation(operationConvertJSValueToBoolean, regT0);
+ emitJumpSlowToHot(branchTest32(Zero, returnValueGPR), currentInstruction[2].u.operand); // inverted!
}
void JIT::emitSlow_op_jtrue(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
linkSlowCase(iter);
- JITStubCall stubCall(this, cti_op_jtrue);
- stubCall.addArgument(regT0);
- stubCall.call();
- emitJumpSlowToHot(branchTest32(NonZero, regT0), currentInstruction[2].u.operand);
-}
-
-void JIT::emitSlow_op_bitxor(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- linkSlowCase(iter);
- JITStubCall stubCall(this, cti_op_bitxor);
- stubCall.addArgument(regT0);
- stubCall.addArgument(regT1);
- stubCall.call(currentInstruction[1].u.operand);
-}
-
-void JIT::emitSlow_op_bitor(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- linkSlowCase(iter);
- JITStubCall stubCall(this, cti_op_bitor);
- stubCall.addArgument(regT0);
- stubCall.addArgument(regT1);
- stubCall.call(currentInstruction[1].u.operand);
+ callOperation(operationConvertJSValueToBoolean, regT0);
+ emitJumpSlowToHot(branchTest32(NonZero, returnValueGPR), currentInstruction[2].u.operand);
}
void JIT::emitSlow_op_eq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
linkSlowCase(iter);
- JITStubCall stubCall(this, cti_op_eq);
- stubCall.addArgument(regT0);
- stubCall.addArgument(regT1);
- stubCall.call();
- emitTagAsBoolImmediate(regT0);
- emitPutVirtualRegister(currentInstruction[1].u.operand);
+ callOperation(operationCompareEq, regT0, regT1);
+ emitTagBool(returnValueGPR);
+ emitPutVirtualRegister(currentInstruction[1].u.operand, returnValueGPR);
}
void JIT::emitSlow_op_neq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
linkSlowCase(iter);
- JITStubCall stubCall(this, cti_op_eq);
- stubCall.addArgument(regT0);
- stubCall.addArgument(regT1);
- stubCall.call();
+ callOperation(operationCompareEq, regT0, regT1);
xor32(TrustedImm32(0x1), regT0);
- emitTagAsBoolImmediate(regT0);
- emitPutVirtualRegister(currentInstruction[1].u.operand);
+ emitTagBool(returnValueGPR);
+ emitPutVirtualRegister(currentInstruction[1].u.operand, returnValueGPR);
}
void JIT::emitSlow_op_stricteq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
@@ -1057,10 +833,8 @@ void JIT::emitSlow_op_stricteq(Instruction* currentInstruction, Vector<SlowCaseE
linkSlowCase(iter);
linkSlowCase(iter);
linkSlowCase(iter);
- JITStubCall stubCall(this, cti_op_stricteq);
- stubCall.addArgument(regT0);
- stubCall.addArgument(regT1);
- stubCall.call(currentInstruction[1].u.operand);
+ JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_stricteq);
+ slowPathCall.call();
}
void JIT::emitSlow_op_nstricteq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
@@ -1068,196 +842,55 @@ void JIT::emitSlow_op_nstricteq(Instruction* currentInstruction, Vector<SlowCase
linkSlowCase(iter);
linkSlowCase(iter);
linkSlowCase(iter);
- JITStubCall stubCall(this, cti_op_nstricteq);
- stubCall.addArgument(regT0);
- stubCall.addArgument(regT1);
- stubCall.call(currentInstruction[1].u.operand);
-}
-
-void JIT::emitSlow_op_check_has_instance(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned value = currentInstruction[2].u.operand;
- unsigned baseVal = currentInstruction[3].u.operand;
-
- linkSlowCaseIfNotJSCell(iter, baseVal);
- linkSlowCase(iter);
- JITStubCall stubCall(this, cti_op_check_has_instance);
- stubCall.addArgument(value, regT2);
- stubCall.addArgument(baseVal, regT2);
- stubCall.call(dst);
-
- emitJumpSlowToHot(jump(), currentInstruction[4].u.operand);
+ JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_nstricteq);
+ slowPathCall.call();
}
void JIT::emitSlow_op_instanceof(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned value = currentInstruction[2].u.operand;
- unsigned proto = currentInstruction[3].u.operand;
+ int dst = currentInstruction[1].u.operand;
+ int value = currentInstruction[2].u.operand;
+ int proto = currentInstruction[3].u.operand;
linkSlowCaseIfNotJSCell(iter, value);
linkSlowCaseIfNotJSCell(iter, proto);
linkSlowCase(iter);
- JITStubCall stubCall(this, cti_op_instanceof);
- stubCall.addArgument(value, regT2);
- stubCall.addArgument(proto, regT2);
- stubCall.call(dst);
-}
-
-void JIT::emitSlow_op_call(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- compileOpCallSlowCase(op_call, currentInstruction, iter, m_callLinkInfoIndex++);
-}
-
-void JIT::emitSlow_op_call_eval(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- compileOpCallSlowCase(op_call_eval, currentInstruction, iter, m_callLinkInfoIndex);
-}
-
-void JIT::emitSlow_op_call_varargs(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- compileOpCallSlowCase(op_call_varargs, currentInstruction, iter, m_callLinkInfoIndex++);
-}
-
-void JIT::emitSlow_op_construct(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- compileOpCallSlowCase(op_construct, currentInstruction, iter, m_callLinkInfoIndex++);
-}
-
-void JIT::emitSlow_op_to_number(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- linkSlowCase(iter);
-
- JITStubCall stubCall(this, cti_op_to_number);
- stubCall.addArgument(regT0);
- stubCall.call(currentInstruction[1].u.operand);
+ emitGetVirtualRegister(value, regT0);
+ emitGetVirtualRegister(proto, regT1);
+ callOperation(operationInstanceOf, dst, regT0, regT1);
}
-void JIT::emit_op_get_arguments_length(Instruction* currentInstruction)
+void JIT::emitSlow_op_instanceof_custom(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
int dst = currentInstruction[1].u.operand;
- int argumentsRegister = currentInstruction[2].u.operand;
- addSlowCase(branchTest64(NonZero, addressFor(argumentsRegister)));
- emitGetFromCallFrameHeader32(JSStack::ArgumentCount, regT0);
- sub32(TrustedImm32(1), regT0);
- emitFastArithReTagImmediate(regT0, regT0);
- emitPutVirtualRegister(dst, regT0);
-}
+ int value = currentInstruction[2].u.operand;
+ int constructor = currentInstruction[3].u.operand;
+ int hasInstanceValue = currentInstruction[4].u.operand;
-void JIT::emitSlow_op_get_arguments_length(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
linkSlowCase(iter);
- unsigned dst = currentInstruction[1].u.operand;
- unsigned base = currentInstruction[2].u.operand;
- Identifier* ident = &(m_codeBlock->identifier(currentInstruction[3].u.operand));
-
- emitGetVirtualRegister(base, regT0);
- JITStubCall stubCall(this, cti_op_get_by_id_generic);
- stubCall.addArgument(regT0);
- stubCall.addArgument(TrustedImmPtr(ident));
- stubCall.call(dst);
+ emitGetVirtualRegister(value, regT0);
+ emitGetVirtualRegister(constructor, regT1);
+ emitGetVirtualRegister(hasInstanceValue, regT2);
+ callOperation(operationInstanceOfCustom, regT0, regT1, regT2);
+ emitTagBool(returnValueGPR);
+ emitPutVirtualRegister(dst, returnValueGPR);
}
-void JIT::emit_op_get_argument_by_val(Instruction* currentInstruction)
+void JIT::emitSlow_op_to_number(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
- int dst = currentInstruction[1].u.operand;
- int argumentsRegister = currentInstruction[2].u.operand;
- int property = currentInstruction[3].u.operand;
- addSlowCase(branchTest64(NonZero, addressFor(argumentsRegister)));
- emitGetVirtualRegister(property, regT1);
- addSlowCase(emitJumpIfNotImmediateInteger(regT1));
- add32(TrustedImm32(1), regT1);
- // regT1 now contains the integer index of the argument we want, including this
- emitGetFromCallFrameHeader32(JSStack::ArgumentCount, regT2);
- addSlowCase(branch32(AboveOrEqual, regT1, regT2));
+ linkSlowCase(iter);
- neg32(regT1);
- signExtend32ToPtr(regT1, regT1);
- load64(BaseIndex(callFrameRegister, regT1, TimesEight, CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register))), regT0);
- emitValueProfilingSite();
- emitPutVirtualRegister(dst, regT0);
+ JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_to_number);
+ slowPathCall.call();
}
-void JIT::emitSlow_op_get_argument_by_val(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+void JIT::emitSlow_op_to_string(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned arguments = currentInstruction[2].u.operand;
- unsigned property = currentInstruction[3].u.operand;
-
- linkSlowCase(iter);
- Jump skipArgumentsCreation = jump();
-
- linkSlowCase(iter);
- linkSlowCase(iter);
- JITStubCall(this, cti_op_create_arguments).call();
- emitPutVirtualRegister(arguments);
- emitPutVirtualRegister(unmodifiedArgumentsRegister(arguments));
-
- skipArgumentsCreation.link(this);
- JITStubCall stubCall(this, cti_op_get_by_val_generic);
- stubCall.addArgument(arguments, regT2);
- stubCall.addArgument(property, regT2);
- stubCall.callWithValueProfiling(dst);
-}
-
-void JIT::emit_op_put_to_base(Instruction* currentInstruction)
-{
- int base = currentInstruction[1].u.operand;
- int id = currentInstruction[2].u.operand;
- int value = currentInstruction[3].u.operand;
-
- PutToBaseOperation* operation = currentInstruction[4].u.putToBaseOperation;
- switch (operation->m_kind) {
- case PutToBaseOperation::GlobalVariablePutChecked:
- addSlowCase(branchTest8(NonZero, AbsoluteAddress(operation->m_predicatePointer)));
- case PutToBaseOperation::GlobalVariablePut: {
- JSGlobalObject* globalObject = m_codeBlock->globalObject();
- if (operation->m_isDynamic) {
- emitGetVirtualRegister(base, regT0);
- addSlowCase(branchPtr(NotEqual, regT0, TrustedImmPtr(globalObject)));
- }
- emitGetVirtualRegister(value, regT0);
- store64(regT0, operation->m_registerAddress);
- if (Heap::isWriteBarrierEnabled())
- emitWriteBarrier(globalObject, regT0, regT2, ShouldFilterImmediates, WriteBarrierForVariableAccess);
- return;
- }
- case PutToBaseOperation::VariablePut: {
- emitGetVirtualRegisters(base, regT0, value, regT1);
- loadPtr(Address(regT0, JSVariableObject::offsetOfRegisters()), regT2);
- store64(regT1, Address(regT2, operation->m_offset * sizeof(Register)));
- if (Heap::isWriteBarrierEnabled())
- emitWriteBarrier(regT0, regT1, regT2, regT3, ShouldFilterImmediates, WriteBarrierForVariableAccess);
- return;
- }
+ linkSlowCase(iter); // Not JSCell.
+ linkSlowCase(iter); // Not JSString.
- case PutToBaseOperation::GlobalPropertyPut: {
- emitGetVirtualRegisters(base, regT0, value, regT1);
- loadPtr(&operation->m_structure, regT2);
- addSlowCase(branchPtr(NotEqual, Address(regT0, JSCell::structureOffset()), regT2));
- ASSERT(!operation->m_structure || !operation->m_structure->inlineCapacity());
- loadPtr(Address(regT0, JSObject::butterflyOffset()), regT2);
- load32(&operation->m_offsetInButterfly, regT3);
- signExtend32ToPtr(regT3, regT3);
- store64(regT1, BaseIndex(regT2, regT3, TimesEight));
- if (Heap::isWriteBarrierEnabled())
- emitWriteBarrier(regT0, regT1, regT2, regT3, ShouldFilterImmediates, WriteBarrierForVariableAccess);
- return;
- }
-
- case PutToBaseOperation::Uninitialised:
- case PutToBaseOperation::Readonly:
- case PutToBaseOperation::Generic:
- JITStubCall stubCall(this, cti_op_put_to_base);
-
- stubCall.addArgument(TrustedImm32(base));
- stubCall.addArgument(TrustedImmPtr(&m_codeBlock->identifier(id)));
- stubCall.addArgument(TrustedImm32(value));
- stubCall.addArgument(TrustedImmPtr(operation));
- stubCall.call();
- return;
- }
+ JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_to_string);
+ slowPathCall.call();
}
#endif // USE(JSVALUE64)
@@ -1265,13 +898,10 @@ void JIT::emit_op_put_to_base(Instruction* currentInstruction)
void JIT::emit_op_loop_hint(Instruction*)
{
// Emit the JIT optimization check:
- if (canBeOptimized())
+ if (canBeOptimized()) {
addSlowCase(branchAdd32(PositiveOrZero, TrustedImm32(Options::executionCounterIncrementForLoop()),
AbsoluteAddress(m_codeBlock->addressOfJITExecuteCounter())));
-
- // Emit the watchdog timer check:
- if (m_vm->watchdog.isEnabled())
- addSlowCase(branchTest8(NonZero, AbsoluteAddress(m_vm->watchdog.timerDidFireAddress())));
+ }
}
void JIT::emitSlow_op_loop_hint(Instruction*, Vector<SlowCaseEntry>::iterator& iter)
@@ -1281,442 +911,534 @@ void JIT::emitSlow_op_loop_hint(Instruction*, Vector<SlowCaseEntry>::iterator& i
if (canBeOptimized()) {
linkSlowCase(iter);
- JITStubCall stubCall(this, cti_optimize);
- stubCall.addArgument(TrustedImm32(m_bytecodeOffset));
- stubCall.call();
+ copyCalleeSavesFromFrameOrRegisterToVMCalleeSavesBuffer();
+
+ callOperation(operationOptimize, m_bytecodeOffset);
+ Jump noOptimizedEntry = branchTestPtr(Zero, returnValueGPR);
+ if (!ASSERT_DISABLED) {
+ Jump ok = branchPtr(MacroAssembler::Above, returnValueGPR, TrustedImmPtr(bitwise_cast<void*>(static_cast<intptr_t>(1000))));
+ abortWithReason(JITUnreasonableLoopHintJumpTarget);
+ ok.link(this);
+ }
+ jump(returnValueGPR);
+ noOptimizedEntry.link(this);
emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_loop_hint));
}
+#else
+ UNUSED_PARAM(iter);
#endif
+}
- // Emit the slow path of the watchdog timer check:
- if (m_vm->watchdog.isEnabled()) {
- linkSlowCase(iter);
-
- JITStubCall stubCall(this, cti_handle_watchdog_timer);
- stubCall.call();
+void JIT::emit_op_watchdog(Instruction*)
+{
+ ASSERT(m_vm->watchdog());
+ addSlowCase(branchTest8(NonZero, AbsoluteAddress(m_vm->watchdog()->timerDidFireAddress())));
+}
- emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_loop_hint));
- }
+void JIT::emitSlow_op_watchdog(Instruction*, Vector<SlowCaseEntry>::iterator& iter)
+{
+ ASSERT(m_vm->watchdog());
+ linkSlowCase(iter);
+ callOperation(operationHandleWatchdogTimer);
+}
+void JIT::emit_op_new_regexp(Instruction* currentInstruction)
+{
+ callOperation(operationNewRegexp, currentInstruction[1].u.operand, m_codeBlock->regexp(currentInstruction[2].u.operand));
}
-void JIT::emit_resolve_operations(ResolveOperations* resolveOperations, const int* baseVR, const int* valueVR)
+void JIT::emitNewFuncCommon(Instruction* currentInstruction)
{
+ Jump lazyJump;
+ int dst = currentInstruction[1].u.operand;
-#if USE(JSVALUE32_64)
- unmap();
+#if USE(JSVALUE64)
+ emitGetVirtualRegister(currentInstruction[2].u.operand, regT0);
#else
- killLastResultRegister();
+ emitLoadPayload(currentInstruction[2].u.operand, regT0);
#endif
-
- if (resolveOperations->isEmpty()) {
- addSlowCase(jump());
- return;
+ FunctionExecutable* funcExec = m_codeBlock->functionDecl(currentInstruction[3].u.operand);
+
+ OpcodeID opcodeID = m_vm->interpreter->getOpcodeID(currentInstruction->u.opcode);
+ if (opcodeID == op_new_func)
+ callOperation(operationNewFunction, dst, regT0, funcExec);
+ else {
+ ASSERT(opcodeID == op_new_generator_func);
+ callOperation(operationNewGeneratorFunction, dst, regT0, funcExec);
}
+}
- const RegisterID value = regT0;
-#if USE(JSVALUE32_64)
- const RegisterID valueTag = regT1;
-#endif
- const RegisterID scope = regT2;
- const RegisterID scratch = regT3;
-
- JSGlobalObject* globalObject = m_codeBlock->globalObject();
- ResolveOperation* pc = resolveOperations->data();
- emitGetFromCallFrameHeaderPtr(JSStack::ScopeChain, scope);
- bool setBase = false;
- bool resolvingBase = true;
- while (resolvingBase) {
- switch (pc->m_operation) {
- case ResolveOperation::ReturnGlobalObjectAsBase:
- move(TrustedImmPtr(globalObject), value);
-#if USE(JSVALUE32_64)
- move(TrustedImm32(JSValue::CellTag), valueTag);
-#endif
- emitValueProfilingSite();
- emitStoreCell(*baseVR, value);
- return;
- case ResolveOperation::SetBaseToGlobal:
- RELEASE_ASSERT(baseVR);
- setBase = true;
- move(TrustedImmPtr(globalObject), scratch);
- emitStoreCell(*baseVR, scratch);
- resolvingBase = false;
- ++pc;
- break;
- case ResolveOperation::SetBaseToUndefined: {
- RELEASE_ASSERT(baseVR);
- setBase = true;
+void JIT::emit_op_new_func(Instruction* currentInstruction)
+{
+ emitNewFuncCommon(currentInstruction);
+}
+
+void JIT::emit_op_new_generator_func(Instruction* currentInstruction)
+{
+ emitNewFuncCommon(currentInstruction);
+}
+
+void JIT::emitNewFuncExprCommon(Instruction* currentInstruction)
+{
+ Jump notUndefinedScope;
+ int dst = currentInstruction[1].u.operand;
#if USE(JSVALUE64)
- move(TrustedImm64(JSValue::encode(jsUndefined())), scratch);
- emitPutVirtualRegister(*baseVR, scratch);
-#else
- emitStore(*baseVR, jsUndefined());
-#endif
- resolvingBase = false;
- ++pc;
- break;
- }
- case ResolveOperation::SetBaseToScope:
- RELEASE_ASSERT(baseVR);
- setBase = true;
- emitStoreCell(*baseVR, scope);
- resolvingBase = false;
- ++pc;
- break;
- case ResolveOperation::ReturnScopeAsBase:
- emitStoreCell(*baseVR, scope);
- RELEASE_ASSERT(value == regT0);
- move(scope, value);
-#if USE(JSVALUE32_64)
- move(TrustedImm32(JSValue::CellTag), valueTag);
-#endif
- emitValueProfilingSite();
- return;
- case ResolveOperation::SkipTopScopeNode: {
-#if USE(JSVALUE32_64)
- Jump activationNotCreated = branch32(Equal, tagFor(m_codeBlock->activationRegister()), TrustedImm32(JSValue::EmptyValueTag));
-#else
- Jump activationNotCreated = branchTest64(Zero, addressFor(m_codeBlock->activationRegister()));
-#endif
- loadPtr(Address(scope, JSScope::offsetOfNext()), scope);
- activationNotCreated.link(this);
- ++pc;
- break;
- }
- case ResolveOperation::CheckForDynamicEntriesBeforeGlobalScope: {
- move(scope, regT3);
- loadPtr(Address(regT3, JSScope::offsetOfNext()), regT1);
- Jump atTopOfScope = branchTestPtr(Zero, regT1);
- Label loopStart = label();
- loadPtr(Address(regT3, JSCell::structureOffset()), regT2);
- Jump isActivation = branchPtr(Equal, regT2, TrustedImmPtr(globalObject->activationStructure()));
- addSlowCase(branchPtr(NotEqual, regT2, TrustedImmPtr(globalObject->nameScopeStructure())));
- isActivation.link(this);
- move(regT1, regT3);
- loadPtr(Address(regT3, JSScope::offsetOfNext()), regT1);
- branchTestPtr(NonZero, regT1, loopStart);
- atTopOfScope.link(this);
- ++pc;
- break;
- }
- case ResolveOperation::SkipScopes: {
- for (int i = 0; i < pc->m_scopesToSkip; i++)
- loadPtr(Address(scope, JSScope::offsetOfNext()), scope);
- ++pc;
- break;
- }
- case ResolveOperation::Fail:
- addSlowCase(jump());
- return;
- default:
- resolvingBase = false;
- }
- }
- if (baseVR && !setBase)
- emitStoreCell(*baseVR, scope);
-
- RELEASE_ASSERT(valueVR);
- ResolveOperation* resolveValueOperation = pc;
- switch (resolveValueOperation->m_operation) {
- case ResolveOperation::GetAndReturnGlobalProperty: {
- // Verify structure.
- move(TrustedImmPtr(globalObject), regT2);
- move(TrustedImmPtr(resolveValueOperation), regT3);
- loadPtr(Address(regT3, OBJECT_OFFSETOF(ResolveOperation, m_structure)), regT1);
- addSlowCase(branchPtr(NotEqual, regT1, Address(regT2, JSCell::structureOffset())));
-
- // Load property.
- load32(Address(regT3, OBJECT_OFFSETOF(ResolveOperation, m_offset)), regT3);
-
- // regT2: GlobalObject
- // regT3: offset
-#if USE(JSVALUE32_64)
- compileGetDirectOffset(regT2, valueTag, value, regT3, KnownNotFinal);
-#else
- compileGetDirectOffset(regT2, value, regT3, regT1, KnownNotFinal);
-#endif
- break;
- }
- case ResolveOperation::GetAndReturnGlobalVarWatchable:
- case ResolveOperation::GetAndReturnGlobalVar: {
-#if USE(JSVALUE32_64)
- load32(reinterpret_cast<char*>(pc->m_registerAddress) + OBJECT_OFFSETOF(JSValue, u.asBits.tag), valueTag);
- load32(reinterpret_cast<char*>(pc->m_registerAddress) + OBJECT_OFFSETOF(JSValue, u.asBits.payload), value);
-#else
- load64(reinterpret_cast<char*>(pc->m_registerAddress), value);
-#endif
- break;
- }
- case ResolveOperation::GetAndReturnScopedVar: {
- loadPtr(Address(scope, JSVariableObject::offsetOfRegisters()), scope);
-#if USE(JSVALUE32_64)
- load32(Address(scope, pc->m_offset * sizeof(Register) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), valueTag);
- load32(Address(scope, pc->m_offset * sizeof(Register) + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), value);
+ emitGetVirtualRegister(currentInstruction[2].u.operand, regT0);
+ notUndefinedScope = branch64(NotEqual, regT0, TrustedImm64(JSValue::encode(jsUndefined())));
+ store64(TrustedImm64(JSValue::encode(jsUndefined())), Address(callFrameRegister, sizeof(Register) * dst));
#else
- load64(Address(scope, pc->m_offset * sizeof(Register)), value);
+ emitLoadPayload(currentInstruction[2].u.operand, regT0);
+ notUndefinedScope = branch32(NotEqual, tagFor(currentInstruction[2].u.operand), TrustedImm32(JSValue::UndefinedTag));
+ emitStore(dst, jsUndefined());
#endif
- break;
- }
- default:
- CRASH();
- return;
+ Jump done = jump();
+ notUndefinedScope.link(this);
+
+ FunctionExecutable* function = m_codeBlock->functionExpr(currentInstruction[3].u.operand);
+ OpcodeID opcodeID = m_vm->interpreter->getOpcodeID(currentInstruction->u.opcode);
+
+ if (opcodeID == op_new_func_exp || opcodeID == op_new_arrow_func_exp)
+ callOperation(operationNewFunction, dst, regT0, function);
+ else {
+ ASSERT(opcodeID == op_new_generator_func_exp);
+ callOperation(operationNewGeneratorFunction, dst, regT0, function);
}
-#if USE(JSVALUE32_64)
- emitStore(*valueVR, valueTag, value);
-#else
- emitPutVirtualRegister(*valueVR, value);
-#endif
- emitValueProfilingSite();
+ done.link(this);
}
-void JIT::emitSlow_link_resolve_operations(ResolveOperations* resolveOperations, Vector<SlowCaseEntry>::iterator& iter)
+void JIT::emit_op_new_func_exp(Instruction* currentInstruction)
{
- if (resolveOperations->isEmpty()) {
- linkSlowCase(iter);
- return;
- }
+ emitNewFuncExprCommon(currentInstruction);
+}
- ResolveOperation* pc = resolveOperations->data();
- bool resolvingBase = true;
- while (resolvingBase) {
- switch (pc->m_operation) {
- case ResolveOperation::ReturnGlobalObjectAsBase:
- return;
- case ResolveOperation::SetBaseToGlobal:
- resolvingBase = false;
- ++pc;
- break;
- case ResolveOperation::SetBaseToUndefined: {
- resolvingBase = false;
- ++pc;
- break;
- }
- case ResolveOperation::SetBaseToScope:
- resolvingBase = false;
- ++pc;
- break;
- case ResolveOperation::ReturnScopeAsBase:
- return;
- case ResolveOperation::SkipTopScopeNode: {
- ++pc;
- break;
- }
- case ResolveOperation::SkipScopes:
- ++pc;
- break;
- case ResolveOperation::Fail:
- linkSlowCase(iter);
- return;
- case ResolveOperation::CheckForDynamicEntriesBeforeGlobalScope: {
- linkSlowCase(iter);
- ++pc;
- break;
- }
- default:
- resolvingBase = false;
- }
- }
- ResolveOperation* resolveValueOperation = pc;
- switch (resolveValueOperation->m_operation) {
- case ResolveOperation::GetAndReturnGlobalProperty: {
- linkSlowCase(iter);
- break;
- }
- case ResolveOperation::GetAndReturnGlobalVarWatchable:
- case ResolveOperation::GetAndReturnGlobalVar:
- break;
- case ResolveOperation::GetAndReturnScopedVar:
- break;
- default:
- CRASH();
- return;
- }
+void JIT::emit_op_new_generator_func_exp(Instruction* currentInstruction)
+{
+ emitNewFuncExprCommon(currentInstruction);
+}
+
+void JIT::emit_op_new_arrow_func_exp(Instruction* currentInstruction)
+{
+ emitNewFuncExprCommon(currentInstruction);
+}
+
+void JIT::emit_op_new_array(Instruction* currentInstruction)
+{
+ int dst = currentInstruction[1].u.operand;
+ int valuesIndex = currentInstruction[2].u.operand;
+ int size = currentInstruction[3].u.operand;
+ addPtr(TrustedImm32(valuesIndex * sizeof(Register)), callFrameRegister, regT0);
+ callOperation(operationNewArrayWithProfile, dst,
+ currentInstruction[4].u.arrayAllocationProfile, regT0, size);
}
-void JIT::emit_op_resolve(Instruction* currentInstruction)
+void JIT::emit_op_new_array_with_size(Instruction* currentInstruction)
{
- ResolveOperations* operations = currentInstruction[3].u.resolveOperations;
int dst = currentInstruction[1].u.operand;
- emit_resolve_operations(operations, 0, &dst);
+ int sizeIndex = currentInstruction[2].u.operand;
+#if USE(JSVALUE64)
+ emitGetVirtualRegister(sizeIndex, regT0);
+ callOperation(operationNewArrayWithSizeAndProfile, dst,
+ currentInstruction[3].u.arrayAllocationProfile, regT0);
+#else
+ emitLoad(sizeIndex, regT1, regT0);
+ callOperation(operationNewArrayWithSizeAndProfile, dst,
+ currentInstruction[3].u.arrayAllocationProfile, regT1, regT0);
+#endif
}
-void JIT::emitSlow_op_resolve(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+void JIT::emit_op_new_array_buffer(Instruction* currentInstruction)
{
- ResolveOperations* operations = currentInstruction[3].u.resolveOperations;
- emitSlow_link_resolve_operations(operations, iter);
- JITStubCall stubCall(this, cti_op_resolve);
- stubCall.addArgument(TrustedImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
- stubCall.addArgument(TrustedImmPtr(currentInstruction[3].u.resolveOperations));
- stubCall.callWithValueProfiling(currentInstruction[1].u.operand);
+ int dst = currentInstruction[1].u.operand;
+ int valuesIndex = currentInstruction[2].u.operand;
+ int size = currentInstruction[3].u.operand;
+ const JSValue* values = codeBlock()->constantBuffer(valuesIndex);
+ callOperation(operationNewArrayBufferWithProfile, dst, currentInstruction[4].u.arrayAllocationProfile, values, size);
}
-void JIT::emit_op_resolve_base(Instruction* currentInstruction)
+#if USE(JSVALUE64)
+void JIT::emit_op_has_structure_property(Instruction* currentInstruction)
{
- ResolveOperations* operations = currentInstruction[4].u.resolveOperations;
int dst = currentInstruction[1].u.operand;
- emit_resolve_operations(operations, &dst, 0);
+ int base = currentInstruction[2].u.operand;
+ int enumerator = currentInstruction[4].u.operand;
+
+ emitGetVirtualRegister(base, regT0);
+ emitGetVirtualRegister(enumerator, regT1);
+ emitJumpSlowCaseIfNotJSCell(regT0, base);
+
+ load32(Address(regT0, JSCell::structureIDOffset()), regT0);
+ addSlowCase(branch32(NotEqual, regT0, Address(regT1, JSPropertyNameEnumerator::cachedStructureIDOffset())));
+
+ move(TrustedImm64(JSValue::encode(jsBoolean(true))), regT0);
+ emitPutVirtualRegister(dst);
}
-void JIT::emitSlow_op_resolve_base(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+void JIT::privateCompileHasIndexedProperty(ByValInfo* byValInfo, ReturnAddressPtr returnAddress, JITArrayMode arrayMode)
{
- ResolveOperations* operations = currentInstruction[4].u.resolveOperations;
- emitSlow_link_resolve_operations(operations, iter);
- JITStubCall stubCall(this, currentInstruction[3].u.operand ? cti_op_resolve_base_strict_put : cti_op_resolve_base);
- stubCall.addArgument(TrustedImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
- stubCall.addArgument(TrustedImmPtr(currentInstruction[4].u.resolveOperations));
- stubCall.addArgument(TrustedImmPtr(currentInstruction[5].u.putToBaseOperation));
- stubCall.callWithValueProfiling(currentInstruction[1].u.operand);
+ Instruction* currentInstruction = m_codeBlock->instructions().begin() + byValInfo->bytecodeIndex;
+
+ PatchableJump badType;
+
+ // FIXME: Add support for other types like TypedArrays and Arguments.
+ // See https://bugs.webkit.org/show_bug.cgi?id=135033 and https://bugs.webkit.org/show_bug.cgi?id=135034.
+ JumpList slowCases = emitLoadForArrayMode(currentInstruction, arrayMode, badType);
+ move(TrustedImm64(JSValue::encode(jsBoolean(true))), regT0);
+ Jump done = jump();
+
+ LinkBuffer patchBuffer(*m_vm, *this, m_codeBlock);
+
+ patchBuffer.link(badType, CodeLocationLabel(MacroAssemblerCodePtr::createFromExecutableAddress(returnAddress.value())).labelAtOffset(byValInfo->returnAddressToSlowPath));
+ patchBuffer.link(slowCases, CodeLocationLabel(MacroAssemblerCodePtr::createFromExecutableAddress(returnAddress.value())).labelAtOffset(byValInfo->returnAddressToSlowPath));
+
+ patchBuffer.link(done, byValInfo->badTypeJump.labelAtOffset(byValInfo->badTypeJumpToDone));
+
+ byValInfo->stubRoutine = FINALIZE_CODE_FOR_STUB(
+ m_codeBlock, patchBuffer,
+ ("Baseline has_indexed_property stub for %s, return point %p", toCString(*m_codeBlock).data(), returnAddress.value()));
+
+ MacroAssembler::repatchJump(byValInfo->badTypeJump, CodeLocationLabel(byValInfo->stubRoutine->code().code()));
+ MacroAssembler::repatchCall(CodeLocationCall(MacroAssemblerCodePtr(returnAddress)), FunctionPtr(operationHasIndexedPropertyGeneric));
}
-void JIT::emit_op_resolve_with_base(Instruction* currentInstruction)
+void JIT::emit_op_has_indexed_property(Instruction* currentInstruction)
{
- ResolveOperations* operations = currentInstruction[4].u.resolveOperations;
- int base = currentInstruction[1].u.operand;
- int value = currentInstruction[2].u.operand;
- emit_resolve_operations(operations, &base, &value);
+ int dst = currentInstruction[1].u.operand;
+ int base = currentInstruction[2].u.operand;
+ int property = currentInstruction[3].u.operand;
+ ArrayProfile* profile = currentInstruction[4].u.arrayProfile;
+ ByValInfo* byValInfo = m_codeBlock->addByValInfo();
+
+ emitGetVirtualRegisters(base, regT0, property, regT1);
+
+ // This is technically incorrect - we're zero-extending an int32. On the hot path this doesn't matter.
+ // We check the value as if it was a uint32 against the m_vectorLength - which will always fail if
+ // number was signed since m_vectorLength is always less than intmax (since the total allocation
+ // size is always less than 4Gb). As such zero extending will have been correct (and extending the value
+ // to 64-bits is necessary since it's used in the address calculation. We zero extend rather than sign
+ // extending since it makes it easier to re-tag the value in the slow case.
+ zeroExtend32ToPtr(regT1, regT1);
+
+ emitJumpSlowCaseIfNotJSCell(regT0, base);
+ emitArrayProfilingSiteWithCell(regT0, regT2, profile);
+ and32(TrustedImm32(IndexingShapeMask), regT2);
+
+ JITArrayMode mode = chooseArrayMode(profile);
+ PatchableJump badType;
+
+ // FIXME: Add support for other types like TypedArrays and Arguments.
+ // See https://bugs.webkit.org/show_bug.cgi?id=135033 and https://bugs.webkit.org/show_bug.cgi?id=135034.
+ JumpList slowCases = emitLoadForArrayMode(currentInstruction, mode, badType);
+
+ move(TrustedImm64(JSValue::encode(jsBoolean(true))), regT0);
+
+ addSlowCase(badType);
+ addSlowCase(slowCases);
+
+ Label done = label();
+
+ emitPutVirtualRegister(dst);
+
+ Label nextHotPath = label();
+
+ m_byValCompilationInfo.append(ByValCompilationInfo(byValInfo, m_bytecodeOffset, PatchableJump(), badType, mode, profile, done, nextHotPath));
}
-void JIT::emitSlow_op_resolve_with_base(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+void JIT::emitSlow_op_has_indexed_property(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
- ResolveOperations* operations = currentInstruction[4].u.resolveOperations;
- emitSlow_link_resolve_operations(operations, iter);
- JITStubCall stubCall(this, cti_op_resolve_with_base);
- stubCall.addArgument(TrustedImmPtr(&m_codeBlock->identifier(currentInstruction[3].u.operand)));
- stubCall.addArgument(TrustedImm32(currentInstruction[1].u.operand));
- stubCall.addArgument(TrustedImmPtr(currentInstruction[4].u.resolveOperations));
- stubCall.addArgument(TrustedImmPtr(currentInstruction[5].u.putToBaseOperation));
- stubCall.callWithValueProfiling(currentInstruction[2].u.operand);
+ int dst = currentInstruction[1].u.operand;
+ int base = currentInstruction[2].u.operand;
+ int property = currentInstruction[3].u.operand;
+ ByValInfo* byValInfo = m_byValCompilationInfo[m_byValInstructionIndex].byValInfo;
+
+ linkSlowCaseIfNotJSCell(iter, base); // base cell check
+ linkSlowCase(iter); // base array check
+ linkSlowCase(iter); // read barrier
+ linkSlowCase(iter); // vector length check
+ linkSlowCase(iter); // empty value
+
+ Label slowPath = label();
+
+ emitGetVirtualRegister(base, regT0);
+ emitGetVirtualRegister(property, regT1);
+ Call call = callOperation(operationHasIndexedPropertyDefault, dst, regT0, regT1, byValInfo);
+
+ m_byValCompilationInfo[m_byValInstructionIndex].slowPathTarget = slowPath;
+ m_byValCompilationInfo[m_byValInstructionIndex].returnAddress = call;
+ m_byValInstructionIndex++;
}
-void JIT::emit_op_resolve_with_this(Instruction* currentInstruction)
+void JIT::emit_op_get_direct_pname(Instruction* currentInstruction)
{
- ResolveOperations* operations = currentInstruction[4].u.resolveOperations;
- int base = currentInstruction[1].u.operand;
- int value = currentInstruction[2].u.operand;
- emit_resolve_operations(operations, &base, &value);
+ int dst = currentInstruction[1].u.operand;
+ int base = currentInstruction[2].u.operand;
+ int index = currentInstruction[4].u.operand;
+ int enumerator = currentInstruction[5].u.operand;
+
+ // Check that base is a cell
+ emitGetVirtualRegister(base, regT0);
+ emitJumpSlowCaseIfNotJSCell(regT0, base);
+
+ // Check the structure
+ emitGetVirtualRegister(enumerator, regT2);
+ load32(Address(regT0, JSCell::structureIDOffset()), regT1);
+ addSlowCase(branch32(NotEqual, regT1, Address(regT2, JSPropertyNameEnumerator::cachedStructureIDOffset())));
+
+ // Compute the offset
+ emitGetVirtualRegister(index, regT1);
+ // If index is less than the enumerator's cached inline storage, then it's an inline access
+ Jump outOfLineAccess = branch32(AboveOrEqual, regT1, Address(regT2, JSPropertyNameEnumerator::cachedInlineCapacityOffset()));
+ addPtr(TrustedImm32(JSObject::offsetOfInlineStorage()), regT0);
+ signExtend32ToPtr(regT1, regT1);
+ load64(BaseIndex(regT0, regT1, TimesEight), regT0);
+
+ Jump done = jump();
+
+ // Otherwise it's out of line
+ outOfLineAccess.link(this);
+ loadPtr(Address(regT0, JSObject::butterflyOffset()), regT0);
+ addSlowCase(branchIfNotToSpace(regT0));
+ sub32(Address(regT2, JSPropertyNameEnumerator::cachedInlineCapacityOffset()), regT1);
+ neg32(regT1);
+ signExtend32ToPtr(regT1, regT1);
+ int32_t offsetOfFirstProperty = static_cast<int32_t>(offsetInButterfly(firstOutOfLineOffset)) * sizeof(EncodedJSValue);
+ load64(BaseIndex(regT0, regT1, TimesEight, offsetOfFirstProperty), regT0);
+
+ done.link(this);
+ emitValueProfilingSite();
+ emitPutVirtualRegister(dst, regT0);
}
-void JIT::emitSlow_op_resolve_with_this(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+void JIT::emitSlow_op_get_direct_pname(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
- ResolveOperations* operations = currentInstruction[4].u.resolveOperations;
- emitSlow_link_resolve_operations(operations, iter);
- JITStubCall stubCall(this, cti_op_resolve_with_this);
- stubCall.addArgument(TrustedImmPtr(&m_codeBlock->identifier(currentInstruction[3].u.operand)));
- stubCall.addArgument(TrustedImm32(currentInstruction[1].u.operand));
- stubCall.addArgument(TrustedImmPtr(currentInstruction[4].u.resolveOperations));
- stubCall.callWithValueProfiling(currentInstruction[2].u.operand);
+ int base = currentInstruction[2].u.operand;
+ linkSlowCaseIfNotJSCell(iter, base);
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+
+ JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_get_direct_pname);
+ slowPathCall.call();
}
-void JIT::emitSlow_op_put_to_base(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+void JIT::emit_op_enumerator_structure_pname(Instruction* currentInstruction)
{
- int base = currentInstruction[1].u.operand;
- int id = currentInstruction[2].u.operand;
- int value = currentInstruction[3].u.operand;
+ int dst = currentInstruction[1].u.operand;
+ int enumerator = currentInstruction[2].u.operand;
+ int index = currentInstruction[3].u.operand;
- PutToBaseOperation* putToBaseOperation = currentInstruction[4].u.putToBaseOperation;
- switch (putToBaseOperation->m_kind) {
- case PutToBaseOperation::VariablePut:
- return;
+ emitGetVirtualRegister(index, regT0);
+ emitGetVirtualRegister(enumerator, regT1);
+ Jump inBounds = branch32(Below, regT0, Address(regT1, JSPropertyNameEnumerator::endStructurePropertyIndexOffset()));
- case PutToBaseOperation::GlobalVariablePutChecked:
- linkSlowCase(iter);
- case PutToBaseOperation::GlobalVariablePut:
- if (!putToBaseOperation->m_isDynamic)
- return;
- linkSlowCase(iter);
- break;
+ move(TrustedImm64(JSValue::encode(jsNull())), regT0);
- case PutToBaseOperation::Uninitialised:
- case PutToBaseOperation::Readonly:
- case PutToBaseOperation::Generic:
- return;
+ Jump done = jump();
+ inBounds.link(this);
- case PutToBaseOperation::GlobalPropertyPut:
- linkSlowCase(iter);
- break;
+ loadPtr(Address(regT1, JSPropertyNameEnumerator::cachedPropertyNamesVectorOffset()), regT1);
+ signExtend32ToPtr(regT0, regT0);
+ load64(BaseIndex(regT1, regT0, TimesEight), regT0);
+ done.link(this);
+ emitPutVirtualRegister(dst);
+}
+
+void JIT::emit_op_enumerator_generic_pname(Instruction* currentInstruction)
+{
+ int dst = currentInstruction[1].u.operand;
+ int enumerator = currentInstruction[2].u.operand;
+ int index = currentInstruction[3].u.operand;
+
+ emitGetVirtualRegister(index, regT0);
+ emitGetVirtualRegister(enumerator, regT1);
+ Jump inBounds = branch32(Below, regT0, Address(regT1, JSPropertyNameEnumerator::endGenericPropertyIndexOffset()));
+
+ move(TrustedImm64(JSValue::encode(jsNull())), regT0);
+
+ Jump done = jump();
+ inBounds.link(this);
+
+ loadPtr(Address(regT1, JSPropertyNameEnumerator::cachedPropertyNamesVectorOffset()), regT1);
+ signExtend32ToPtr(regT0, regT0);
+ load64(BaseIndex(regT1, regT0, TimesEight), regT0);
+
+ done.link(this);
+ emitPutVirtualRegister(dst);
+}
+
+void JIT::emit_op_profile_type(Instruction* currentInstruction)
+{
+ TypeLocation* cachedTypeLocation = currentInstruction[2].u.location;
+ int valueToProfile = currentInstruction[1].u.operand;
+
+ emitGetVirtualRegister(valueToProfile, regT0);
+
+ JumpList jumpToEnd;
+
+ jumpToEnd.append(branchTest64(Zero, regT0));
+
+ // Compile in a predictive type check, if possible, to see if we can skip writing to the log.
+ // These typechecks are inlined to match those of the 64-bit JSValue type checks.
+ if (cachedTypeLocation->m_lastSeenType == TypeUndefined)
+ jumpToEnd.append(branch64(Equal, regT0, TrustedImm64(JSValue::encode(jsUndefined()))));
+ else if (cachedTypeLocation->m_lastSeenType == TypeNull)
+ jumpToEnd.append(branch64(Equal, regT0, TrustedImm64(JSValue::encode(jsNull()))));
+ else if (cachedTypeLocation->m_lastSeenType == TypeBoolean) {
+ move(regT0, regT1);
+ and64(TrustedImm32(~1), regT1);
+ jumpToEnd.append(branch64(Equal, regT1, TrustedImm64(ValueFalse)));
+ } else if (cachedTypeLocation->m_lastSeenType == TypeMachineInt)
+ jumpToEnd.append(emitJumpIfInt(regT0));
+ else if (cachedTypeLocation->m_lastSeenType == TypeNumber)
+ jumpToEnd.append(emitJumpIfNumber(regT0));
+ else if (cachedTypeLocation->m_lastSeenType == TypeString) {
+ Jump isNotCell = emitJumpIfNotJSCell(regT0);
+ jumpToEnd.append(branch8(Equal, Address(regT0, JSCell::typeInfoTypeOffset()), TrustedImm32(StringType)));
+ isNotCell.link(this);
}
- JITStubCall stubCall(this, cti_op_put_to_base);
+ // Load the type profiling log into T2.
+ TypeProfilerLog* cachedTypeProfilerLog = m_vm->typeProfilerLog();
+ move(TrustedImmPtr(cachedTypeProfilerLog), regT2);
+ // Load the next log entry into T1.
+ loadPtr(Address(regT2, TypeProfilerLog::currentLogEntryOffset()), regT1);
+
+ // Store the JSValue onto the log entry.
+ store64(regT0, Address(regT1, TypeProfilerLog::LogEntry::valueOffset()));
+
+ // Store the structureID of the cell if T0 is a cell, otherwise, store 0 on the log entry.
+ Jump notCell = emitJumpIfNotJSCell(regT0);
+ load32(Address(regT0, JSCell::structureIDOffset()), regT0);
+ store32(regT0, Address(regT1, TypeProfilerLog::LogEntry::structureIDOffset()));
+ Jump skipIsCell = jump();
+ notCell.link(this);
+ store32(TrustedImm32(0), Address(regT1, TypeProfilerLog::LogEntry::structureIDOffset()));
+ skipIsCell.link(this);
+
+ // Store the typeLocation on the log entry.
+ move(TrustedImmPtr(cachedTypeLocation), regT0);
+ store64(regT0, Address(regT1, TypeProfilerLog::LogEntry::locationOffset()));
+
+ // Increment the current log entry.
+ addPtr(TrustedImm32(sizeof(TypeProfilerLog::LogEntry)), regT1);
+ store64(regT1, Address(regT2, TypeProfilerLog::currentLogEntryOffset()));
+ Jump skipClearLog = branchPtr(NotEqual, regT1, TrustedImmPtr(cachedTypeProfilerLog->logEndPtr()));
+ // Clear the log if we're at the end of the log.
+ callOperation(operationProcessTypeProfilerLog);
+ skipClearLog.link(this);
- stubCall.addArgument(TrustedImm32(base));
- stubCall.addArgument(TrustedImmPtr(&m_codeBlock->identifier(id)));
- stubCall.addArgument(TrustedImm32(value));
- stubCall.addArgument(TrustedImmPtr(putToBaseOperation));
- stubCall.call();
+ jumpToEnd.link(this);
}
-void JIT::emit_op_new_regexp(Instruction* currentInstruction)
+#endif // USE(JSVALUE64)
+
+void JIT::emit_op_get_enumerable_length(Instruction* currentInstruction)
{
- JITStubCall stubCall(this, cti_op_new_regexp);
- stubCall.addArgument(TrustedImmPtr(m_codeBlock->regexp(currentInstruction[2].u.operand)));
- stubCall.call(currentInstruction[1].u.operand);
+ JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_get_enumerable_length);
+ slowPathCall.call();
}
-void JIT::emit_op_new_func(Instruction* currentInstruction)
+void JIT::emitSlow_op_has_structure_property(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
- Jump lazyJump;
- int dst = currentInstruction[1].u.operand;
- if (currentInstruction[3].u.operand) {
-#if USE(JSVALUE32_64)
- lazyJump = branch32(NotEqual, tagFor(dst), TrustedImm32(JSValue::EmptyValueTag));
-#else
- lazyJump = branchTest64(NonZero, addressFor(dst));
-#endif
- }
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+
+ JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_has_structure_property);
+ slowPathCall.call();
+}
+
+void JIT::emit_op_has_generic_property(Instruction* currentInstruction)
+{
+ JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_has_generic_property);
+ slowPathCall.call();
+}
- JITStubCall stubCall(this, cti_op_new_func);
- stubCall.addArgument(TrustedImmPtr(m_codeBlock->functionDecl(currentInstruction[2].u.operand)));
- stubCall.call(dst);
+void JIT::emit_op_get_property_enumerator(Instruction* currentInstruction)
+{
+ JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_get_property_enumerator);
+ slowPathCall.call();
+}
+
+void JIT::emit_op_to_index_string(Instruction* currentInstruction)
+{
+ JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_to_index_string);
+ slowPathCall.call();
+}
- if (currentInstruction[3].u.operand) {
-#if USE(JSVALUE32_64)
- unmap();
+void JIT::emit_op_profile_control_flow(Instruction* currentInstruction)
+{
+ BasicBlockLocation* basicBlockLocation = currentInstruction[1].u.basicBlockLocation;
+#if USE(JSVALUE64)
+ basicBlockLocation->emitExecuteCode(*this);
#else
- killLastResultRegister();
+ basicBlockLocation->emitExecuteCode(*this, regT0);
#endif
- lazyJump.link(this);
- }
}
-void JIT::emit_op_new_func_exp(Instruction* currentInstruction)
+void JIT::emit_op_create_direct_arguments(Instruction* currentInstruction)
{
- JITStubCall stubCall(this, cti_op_new_func_exp);
- stubCall.addArgument(TrustedImmPtr(m_codeBlock->functionExpr(currentInstruction[2].u.operand)));
- stubCall.call(currentInstruction[1].u.operand);
+ JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_create_direct_arguments);
+ slowPathCall.call();
}
-void JIT::emit_op_new_array(Instruction* currentInstruction)
+void JIT::emit_op_create_scoped_arguments(Instruction* currentInstruction)
{
- JITStubCall stubCall(this, cti_op_new_array);
- stubCall.addArgument(TrustedImm32(currentInstruction[2].u.operand));
- stubCall.addArgument(TrustedImm32(currentInstruction[3].u.operand));
- stubCall.addArgument(TrustedImmPtr(currentInstruction[4].u.arrayAllocationProfile));
- stubCall.call(currentInstruction[1].u.operand);
+ JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_create_scoped_arguments);
+ slowPathCall.call();
}
-void JIT::emit_op_new_array_with_size(Instruction* currentInstruction)
+void JIT::emit_op_create_out_of_band_arguments(Instruction* currentInstruction)
{
- JITStubCall stubCall(this, cti_op_new_array_with_size);
+ JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_create_out_of_band_arguments);
+ slowPathCall.call();
+}
+
+void JIT::emit_op_copy_rest(Instruction* currentInstruction)
+{
+ JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_copy_rest);
+ slowPathCall.call();
+}
+
+void JIT::emit_op_get_rest_length(Instruction* currentInstruction)
+{
+ int dst = currentInstruction[1].u.operand;
+ unsigned numParamsToSkip = currentInstruction[2].u.unsignedValue;
+ load32(payloadFor(JSStack::ArgumentCount), regT0);
+ sub32(TrustedImm32(1), regT0);
+ Jump zeroLength = branch32(LessThanOrEqual, regT0, Imm32(numParamsToSkip));
+ sub32(Imm32(numParamsToSkip), regT0);
#if USE(JSVALUE64)
- stubCall.addArgument(currentInstruction[2].u.operand, regT2);
+ boxInt32(regT0, JSValueRegs(regT0));
+#endif
+ Jump done = jump();
+
+ zeroLength.link(this);
+#if USE(JSVALUE64)
+ move(TrustedImm64(JSValue::encode(jsNumber(0))), regT0);
+#else
+ move(TrustedImm32(0), regT0);
+#endif
+
+ done.link(this);
+#if USE(JSVALUE64)
+ emitPutVirtualRegister(dst, regT0);
#else
- stubCall.addArgument(currentInstruction[2].u.operand);
+ move(TrustedImm32(JSValue::Int32Tag), regT1);
+ emitPutVirtualRegister(dst, JSValueRegs(regT1, regT0));
#endif
- stubCall.addArgument(TrustedImmPtr(currentInstruction[3].u.arrayAllocationProfile));
- stubCall.call(currentInstruction[1].u.operand);
}
-void JIT::emit_op_new_array_buffer(Instruction* currentInstruction)
+void JIT::emit_op_save(Instruction* currentInstruction)
+{
+ JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_save);
+ slowPathCall.call();
+}
+
+void JIT::emit_op_resume(Instruction* currentInstruction)
{
- JITStubCall stubCall(this, cti_op_new_array_buffer);
- stubCall.addArgument(TrustedImm32(currentInstruction[2].u.operand));
- stubCall.addArgument(TrustedImm32(currentInstruction[3].u.operand));
- stubCall.addArgument(TrustedImmPtr(currentInstruction[4].u.arrayAllocationProfile));
- stubCall.call(currentInstruction[1].u.operand);
+ JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_resume);
+ slowPathCall.call();
}
} // namespace JSC