summaryrefslogtreecommitdiff
path: root/Source/JavaScriptCore/llint
diff options
context:
space:
mode:
authorSimon Hausmann <simon.hausmann@nokia.com>2012-03-12 14:11:15 +0100
committerSimon Hausmann <simon.hausmann@nokia.com>2012-03-12 14:11:15 +0100
commitdd91e772430dc294e3bf478c119ef8d43c0a3358 (patch)
tree6f33ce4d5872a5691e0291eb45bf6ab373a5f567 /Source/JavaScriptCore/llint
parentad0d549d4cc13433f77c1ac8f0ab379c83d93f28 (diff)
downloadqtwebkit-dd91e772430dc294e3bf478c119ef8d43c0a3358.tar.gz
Imported WebKit commit 3db4eb1820ac8fb03065d7ea73a4d9db1e8fea1a (http://svn.webkit.org/repository/webkit/trunk@110422)
This includes build fixes for the latest qtbase/qtdeclarative as well as the final QML2 API.
Diffstat (limited to 'Source/JavaScriptCore/llint')
-rw-r--r--Source/JavaScriptCore/llint/LLIntData.cpp15
-rw-r--r--Source/JavaScriptCore/llint/LLIntEntrypoints.cpp14
-rw-r--r--Source/JavaScriptCore/llint/LLIntOfflineAsmConfig.h18
-rw-r--r--Source/JavaScriptCore/llint/LLIntOffsetsExtractor.cpp4
-rw-r--r--Source/JavaScriptCore/llint/LLIntSlowPaths.cpp135
-rw-r--r--Source/JavaScriptCore/llint/LLIntSlowPaths.h34
-rw-r--r--Source/JavaScriptCore/llint/LLIntThunks.cpp2
-rw-r--r--Source/JavaScriptCore/llint/LowLevelInterpreter.asm1789
-rw-r--r--Source/JavaScriptCore/llint/LowLevelInterpreter.h4
-rw-r--r--Source/JavaScriptCore/llint/LowLevelInterpreter32_64.asm1653
-rw-r--r--Source/JavaScriptCore/llint/LowLevelInterpreter64.asm1490
11 files changed, 3427 insertions, 1731 deletions
diff --git a/Source/JavaScriptCore/llint/LLIntData.cpp b/Source/JavaScriptCore/llint/LLIntData.cpp
index c0fe78142..983a7d706 100644
--- a/Source/JavaScriptCore/llint/LLIntData.cpp
+++ b/Source/JavaScriptCore/llint/LLIntData.cpp
@@ -72,6 +72,7 @@ void Data::performAssertions(JSGlobalData& globalData)
ASSERT(OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag) == 4);
ASSERT(OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload) == 0);
#endif
+#if USE(JSVALUE32_64)
ASSERT(JSValue::Int32Tag == -1);
ASSERT(JSValue::BooleanTag == -2);
ASSERT(JSValue::NullTag == -3);
@@ -80,12 +81,26 @@ void Data::performAssertions(JSGlobalData& globalData)
ASSERT(JSValue::EmptyValueTag == -6);
ASSERT(JSValue::DeletedValueTag == -7);
ASSERT(JSValue::LowestTag == -7);
+#else
+ ASSERT(TagBitTypeOther == 0x2);
+ ASSERT(TagBitBool == 0x4);
+ ASSERT(TagBitUndefined == 0x8);
+ ASSERT(ValueEmpty == 0x0);
+ ASSERT(ValueFalse == (TagBitTypeOther | TagBitBool));
+ ASSERT(ValueTrue == (TagBitTypeOther | TagBitBool | 1));
+ ASSERT(ValueUndefined == (TagBitTypeOther | TagBitUndefined));
+ ASSERT(ValueNull == TagBitTypeOther);
+#endif
ASSERT(StringType == 5);
ASSERT(ObjectType == 13);
ASSERT(MasqueradesAsUndefined == 1);
ASSERT(ImplementsHasInstance == 2);
ASSERT(ImplementsDefaultHasInstance == 8);
+#if USE(JSVALUE64)
+ ASSERT(&globalData.heap.allocatorForObjectWithoutDestructor(sizeof(JSFinalObject)) - &globalData.heap.firstAllocatorWithoutDestructors() == 1);
+#else
ASSERT(&globalData.heap.allocatorForObjectWithoutDestructor(sizeof(JSFinalObject)) - &globalData.heap.firstAllocatorWithoutDestructors() == 3);
+#endif
ASSERT(FirstConstantRegisterIndex == 0x40000000);
ASSERT(GlobalCode == 0);
ASSERT(EvalCode == 1);
diff --git a/Source/JavaScriptCore/llint/LLIntEntrypoints.cpp b/Source/JavaScriptCore/llint/LLIntEntrypoints.cpp
index f610f4b4c..dd7d9433d 100644
--- a/Source/JavaScriptCore/llint/LLIntEntrypoints.cpp
+++ b/Source/JavaScriptCore/llint/LLIntEntrypoints.cpp
@@ -30,8 +30,10 @@
#include "JITCode.h"
#include "JSGlobalData.h"
+#include "JSObject.h"
#include "LLIntThunks.h"
#include "LowLevelInterpreter.h"
+#include "ScopeChain.h"
namespace JSC { namespace LLInt {
@@ -39,14 +41,14 @@ void getFunctionEntrypoint(JSGlobalData& globalData, CodeSpecializationKind kind
{
if (!globalData.canUseJIT()) {
if (kind == CodeForCall) {
- jitCode = JITCode::HostFunction(MacroAssemblerCodeRef::createSelfManagedCodeRef(MacroAssemblerCodePtr(bitwise_cast<void*>(&llint_function_for_call_prologue))));
- arityCheck = MacroAssemblerCodePtr(bitwise_cast<void*>(&llint_function_for_call_arity_check));
+ jitCode = JITCode(MacroAssemblerCodeRef::createLLIntCodeRef(llint_function_for_call_prologue), JITCode::InterpreterThunk);
+ arityCheck = MacroAssemblerCodePtr::createLLIntCodePtr(llint_function_for_call_arity_check);
return;
}
ASSERT(kind == CodeForConstruct);
- jitCode = JITCode::HostFunction(MacroAssemblerCodeRef::createSelfManagedCodeRef(MacroAssemblerCodePtr(bitwise_cast<void*>(&llint_function_for_construct_prologue))));
- arityCheck = MacroAssemblerCodePtr(bitwise_cast<void*>(&llint_function_for_construct_arity_check));
+ jitCode = JITCode(MacroAssemblerCodeRef::createLLIntCodeRef(llint_function_for_construct_prologue), JITCode::InterpreterThunk);
+ arityCheck = MacroAssemblerCodePtr::createLLIntCodePtr(llint_function_for_construct_arity_check);
return;
}
@@ -64,7 +66,7 @@ void getFunctionEntrypoint(JSGlobalData& globalData, CodeSpecializationKind kind
void getEvalEntrypoint(JSGlobalData& globalData, JITCode& jitCode)
{
if (!globalData.canUseJIT()) {
- jitCode = JITCode::HostFunction(MacroAssemblerCodeRef::createSelfManagedCodeRef(MacroAssemblerCodePtr(bitwise_cast<void*>(&llint_eval_prologue))));
+ jitCode = JITCode(MacroAssemblerCodeRef::createLLIntCodeRef(llint_eval_prologue), JITCode::InterpreterThunk);
return;
}
@@ -74,7 +76,7 @@ void getEvalEntrypoint(JSGlobalData& globalData, JITCode& jitCode)
void getProgramEntrypoint(JSGlobalData& globalData, JITCode& jitCode)
{
if (!globalData.canUseJIT()) {
- jitCode = JITCode::HostFunction(MacroAssemblerCodeRef::createSelfManagedCodeRef(MacroAssemblerCodePtr(bitwise_cast<void*>(&llint_program_prologue))));
+ jitCode = JITCode(MacroAssemblerCodeRef::createLLIntCodeRef(llint_program_prologue), JITCode::InterpreterThunk);
return;
}
diff --git a/Source/JavaScriptCore/llint/LLIntOfflineAsmConfig.h b/Source/JavaScriptCore/llint/LLIntOfflineAsmConfig.h
index 9fe86fac4..2539ee9b3 100644
--- a/Source/JavaScriptCore/llint/LLIntOfflineAsmConfig.h
+++ b/Source/JavaScriptCore/llint/LLIntOfflineAsmConfig.h
@@ -43,6 +43,18 @@
#define OFFLINE_ASM_ARMv7 0
#endif
+#if CPU(X86_64)
+#define OFFLINE_ASM_X86_64 1
+#else
+#define OFFLINE_ASM_X86_64 0
+#endif
+
+#if USE(JSVALUE64)
+#define OFFLINE_ASM_JSVALUE64 1
+#else
+#define OFFLINE_ASM_JSVALUE64 0
+#endif
+
#if !ASSERT_DISABLED
#define OFFLINE_ASM_ASSERT_ENABLED 1
#else
@@ -73,6 +85,12 @@
#define OFFLINE_ASM_ALWAYS_ALLOCATE_SLOW 0
#endif
+#if ENABLE(VALUE_PROFILER)
+#define OFFLINE_ASM_VALUE_PROFILER 1
+#else
+#define OFFLINE_ASM_VALUE_PROFILER 0
+#endif
+
#if CPU(ARM_THUMB2)
#define OFFLINE_ASM_GLOBAL_LABEL(label) \
".globl " SYMBOL_STRING(label) "\n" \
diff --git a/Source/JavaScriptCore/llint/LLIntOffsetsExtractor.cpp b/Source/JavaScriptCore/llint/LLIntOffsetsExtractor.cpp
index 5b76cd521..f863cb218 100644
--- a/Source/JavaScriptCore/llint/LLIntOffsetsExtractor.cpp
+++ b/Source/JavaScriptCore/llint/LLIntOffsetsExtractor.cpp
@@ -61,6 +61,7 @@ public:
const unsigned* LLIntOffsetsExtractor::dummy()
{
+#if ENABLE(JIT)
// This is a file generated by offlineasm/generate_offsets_extractor.rb, and contains code
// to create a table of offsets, sizes, and a header identifying what combination of
// Platform.h macros we have set. We include it inside of a method on LLIntOffsetsExtractor
@@ -69,6 +70,9 @@ const unsigned* LLIntOffsetsExtractor::dummy()
// compiler to kindly step aside and yield to our best intentions.
#include "LLIntDesiredOffsets.h"
return extractorTable;
+#else
+ return 0;
+#endif
}
} // namespace JSC
diff --git a/Source/JavaScriptCore/llint/LLIntSlowPaths.cpp b/Source/JavaScriptCore/llint/LLIntSlowPaths.cpp
index 3203d25d2..b6bb664bc 100644
--- a/Source/JavaScriptCore/llint/LLIntSlowPaths.cpp
+++ b/Source/JavaScriptCore/llint/LLIntSlowPaths.cpp
@@ -65,16 +65,7 @@ namespace JSC { namespace LLInt {
#define LLINT_OP_C(index) (exec->r(pc[index].u.operand))
#define LLINT_RETURN_TWO(first, second) do { \
- union { \
- struct { \
- void* a; \
- void* b; \
- } pair; \
- int64_t i; \
- } __rt_u; \
- __rt_u.pair.a = first; \
- __rt_u.pair.b = second; \
- return __rt_u.i; \
+ return encodeResult(first, second); \
} while (false)
#define LLINT_END_IMPL() LLINT_RETURN_TWO(pc, exec)
@@ -114,6 +105,7 @@ namespace JSC { namespace LLInt {
LLINT_END_IMPL(); \
} while (false)
+#if ENABLE(VALUE_PROFILER)
#define LLINT_RETURN_PROFILED(opcode, value) do { \
JSValue __rp_returnValue = (value); \
LLINT_CHECK_EXCEPTION(); \
@@ -122,6 +114,9 @@ namespace JSC { namespace LLInt {
JSValue::encode(__rp_returnValue); \
LLINT_END_IMPL(); \
} while (false)
+#else // ENABLE(VALUE_PROFILER)
+#define LLINT_RETURN_PROFILED(opcode, value) LLINT_RETURN(value)
+#endif // ENABLE(VALUE_PROFILER)
#define LLINT_CALL_END_IMPL(exec, callTarget) LLINT_RETURN_TWO((callTarget), (exec))
@@ -163,7 +158,6 @@ extern "C" SlowPathReturnType llint_trace_operand(ExecState* exec, Instruction*
extern "C" SlowPathReturnType llint_trace_value(ExecState* exec, Instruction* pc, int fromWhere, int operand)
{
- LLINT_BEGIN();
JSValue value = LLINT_OP_C(operand).jsValue();
union {
struct {
@@ -184,14 +178,13 @@ extern "C" SlowPathReturnType llint_trace_value(ExecState* exec, Instruction* pc
u.bits.tag,
u.bits.payload,
value.description());
- LLINT_END();
+ LLINT_END_IMPL();
}
LLINT_SLOW_PATH_DECL(trace_prologue)
{
- LLINT_BEGIN();
dataLog("%p / %p: in prologue.\n", exec->codeBlock(), exec);
- LLINT_END();
+ LLINT_END_IMPL();
}
static void traceFunctionPrologue(ExecState* exec, const char* comment, CodeSpecializationKind kind)
@@ -207,54 +200,52 @@ static void traceFunctionPrologue(ExecState* exec, const char* comment, CodeSpec
LLINT_SLOW_PATH_DECL(trace_prologue_function_for_call)
{
- LLINT_BEGIN();
traceFunctionPrologue(exec, "call prologue", CodeForCall);
- LLINT_END();
+ LLINT_END_IMPL();
}
LLINT_SLOW_PATH_DECL(trace_prologue_function_for_construct)
{
- LLINT_BEGIN();
traceFunctionPrologue(exec, "construct prologue", CodeForConstruct);
- LLINT_END();
+ LLINT_END_IMPL();
}
LLINT_SLOW_PATH_DECL(trace_arityCheck_for_call)
{
- LLINT_BEGIN();
traceFunctionPrologue(exec, "call arity check", CodeForCall);
- LLINT_END();
+ LLINT_END_IMPL();
}
LLINT_SLOW_PATH_DECL(trace_arityCheck_for_construct)
{
- LLINT_BEGIN();
traceFunctionPrologue(exec, "construct arity check", CodeForConstruct);
- LLINT_END();
+ LLINT_END_IMPL();
}
LLINT_SLOW_PATH_DECL(trace)
{
- LLINT_BEGIN();
dataLog("%p / %p: executing bc#%zu, %s, scope %p\n",
exec->codeBlock(),
exec,
static_cast<intptr_t>(pc - exec->codeBlock()->instructions().begin()),
opcodeNames[exec->globalData().interpreter->getOpcodeID(pc[0].u.opcode)],
exec->scopeChain());
- LLINT_END();
+ if (exec->globalData().interpreter->getOpcodeID(pc[0].u.opcode) == op_ret) {
+ dataLog("Will be returning to %p\n", exec->returnPC().value());
+ dataLog("The new cfr will be %p\n", exec->callerFrame());
+ }
+ LLINT_END_IMPL();
}
LLINT_SLOW_PATH_DECL(special_trace)
{
- LLINT_BEGIN();
dataLog("%p / %p: executing special case bc#%zu, op#%u, return PC is %p\n",
exec->codeBlock(),
exec,
static_cast<intptr_t>(pc - exec->codeBlock()->instructions().begin()),
exec->globalData().interpreter->getOpcodeID(pc[0].u.opcode),
exec->returnPC().value());
- LLINT_END();
+ LLINT_END_IMPL();
}
inline bool shouldJIT(ExecState* exec)
@@ -263,6 +254,41 @@ inline bool shouldJIT(ExecState* exec)
return exec->globalData().canUseJIT();
}
+// Returns true if we should try to OSR.
+inline bool jitCompileAndSetHeuristics(CodeBlock* codeBlock, ExecState* exec)
+{
+ if (!codeBlock->checkIfJITThresholdReached()) {
+#if ENABLE(JIT_VERBOSE_OSR)
+ dataLog(" JIT threshold should be lifted.\n");
+#endif
+ return false;
+ }
+
+ CodeBlock::JITCompilationResult result = codeBlock->jitCompile(exec->globalData());
+ switch (result) {
+ case CodeBlock::AlreadyCompiled:
+#if ENABLE(JIT_VERBOSE_OSR)
+ dataLog(" Code was already compiled.\n");
+#endif
+ codeBlock->jitSoon();
+ return true;
+ case CodeBlock::CouldNotCompile:
+#if ENABLE(JIT_VERBOSE_OSR)
+ dataLog(" JIT compilation failed.\n");
+#endif
+ codeBlock->dontJITAnytimeSoon();
+ return false;
+ case CodeBlock::CompiledSuccessfully:
+#if ENABLE(JIT_VERBOSE_OSR)
+ dataLog(" JIT compilation successful.\n");
+#endif
+ codeBlock->jitSoon();
+ return true;
+ }
+ ASSERT_NOT_REACHED();
+ return false;
+}
+
enum EntryKind { Prologue, ArityCheck };
static SlowPathReturnType entryOSR(ExecState* exec, Instruction* pc, CodeBlock* codeBlock, const char *name, EntryKind kind)
{
@@ -274,12 +300,9 @@ static SlowPathReturnType entryOSR(ExecState* exec, Instruction* pc, CodeBlock*
codeBlock->dontJITAnytimeSoon();
LLINT_RETURN_TWO(0, exec);
}
- if (!codeBlock->jitCompile(exec->globalData())) {
-#if ENABLE(JIT_VERBOSE_OSR)
- dataLog(" Code was already compiled.\n");
-#endif
- }
- codeBlock->jitSoon();
+ if (!jitCompileAndSetHeuristics(codeBlock, exec))
+ LLINT_RETURN_TWO(0, exec);
+
if (kind == Prologue)
LLINT_RETURN_TWO(codeBlock->getJITCode().executableAddressAtOffset(0), exec);
ASSERT(kind == ArityCheck);
@@ -324,12 +347,8 @@ LLINT_SLOW_PATH_DECL(loop_osr)
LLINT_RETURN_TWO(0, exec);
}
- if (!codeBlock->jitCompile(exec->globalData())) {
-#if ENABLE(JIT_VERBOSE_OSR)
- dataLog(" Code was already compiled.\n");
-#endif
- }
- codeBlock->jitSoon();
+ if (!jitCompileAndSetHeuristics(codeBlock, exec))
+ LLINT_RETURN_TWO(0, exec);
ASSERT(codeBlock->getJITType() == JITCode::BaselineJIT);
@@ -353,14 +372,9 @@ LLINT_SLOW_PATH_DECL(replace)
dataLog("%p: Entered replace with executeCounter = %d\n", codeBlock, codeBlock->llintExecuteCounter());
#endif
- if (shouldJIT(exec)) {
- if (!codeBlock->jitCompile(exec->globalData())) {
-#if ENABLE(JIT_VERBOSE_OSR)
- dataLog(" Code was already compiled.\n");
-#endif
- }
- codeBlock->jitSoon();
- } else
+ if (shouldJIT(exec))
+ jitCompileAndSetHeuristics(codeBlock, exec);
+ else
codeBlock->dontJITAnytimeSoon();
LLINT_END_IMPL();
}
@@ -664,12 +678,6 @@ LLINT_SLOW_PATH_DECL(slow_path_bitxor)
LLINT_RETURN(jsNumber(LLINT_OP_C(2).jsValue().toInt32(exec) ^ LLINT_OP_C(3).jsValue().toInt32(exec)));
}
-LLINT_SLOW_PATH_DECL(slow_path_bitnot)
-{
- LLINT_BEGIN();
- LLINT_RETURN(jsNumber(~LLINT_OP_C(2).jsValue().toInt32(exec)));
-}
-
LLINT_SLOW_PATH_DECL(slow_path_check_has_instance)
{
LLINT_BEGIN();
@@ -873,8 +881,10 @@ LLINT_SLOW_PATH_DECL(slow_path_get_by_id)
pc[5].u.operand = slot.cachedOffset() * sizeof(JSValue);
}
}
-
+
+#if ENABLE(VALUE_PROFILER)
pc[OPCODE_LENGTH(op_get_by_id) - 1].u.profile->m_buckets[0] = JSValue::encode(result);
+#endif
LLINT_END();
}
@@ -1023,7 +1033,7 @@ LLINT_SLOW_PATH_DECL(slow_path_put_by_val)
if (jsArray->canSetIndex(i))
jsArray->setIndex(globalData, i, value);
else
- JSArray::putByIndex(jsArray, exec, i, value);
+ JSArray::putByIndex(jsArray, exec, i, value, exec->codeBlock()->isStrictMode());
LLINT_END();
}
if (isJSByteArray(baseValue)
@@ -1038,7 +1048,7 @@ LLINT_SLOW_PATH_DECL(slow_path_put_by_val)
LLINT_END();
}
}
- baseValue.put(exec, i, value);
+ baseValue.putByIndex(exec, i, value, exec->codeBlock()->isStrictMode());
LLINT_END();
}
@@ -1078,7 +1088,9 @@ LLINT_SLOW_PATH_DECL(slow_path_del_by_val)
LLINT_SLOW_PATH_DECL(slow_path_put_by_index)
{
LLINT_BEGIN();
- LLINT_OP_C(1).jsValue().put(exec, pc[2].u.operand, LLINT_OP_C(3).jsValue());
+ JSValue arrayValue = LLINT_OP_C(1).jsValue();
+ ASSERT(isJSArray(arrayValue));
+ asArray(arrayValue)->putDirectIndex(exec, pc[2].u.operand, LLINT_OP_C(3).jsValue(), false);
LLINT_END();
}
@@ -1384,7 +1396,7 @@ LLINT_SLOW_PATH_DECL(slow_path_call_varargs)
execCallee->uncheckedR(RegisterFile::Callee) = calleeAsValue;
execCallee->setCallerFrame(exec);
- exec->uncheckedR(RegisterFile::ArgumentCount).tag() = bitwise_cast<int32_t>(pc + OPCODE_LENGTH(op_call_varargs));
+ exec->setCurrentVPC(pc + OPCODE_LENGTH(op_call_varargs));
return setUpCall(execCallee, pc, CodeForCall, calleeAsValue);
}
@@ -1402,7 +1414,7 @@ LLINT_SLOW_PATH_DECL(slow_path_call_eval)
execCallee->setScopeChain(exec->scopeChain());
execCallee->setReturnPC(bitwise_cast<Instruction*>(&llint_generic_return_point));
execCallee->setCodeBlock(0);
- exec->uncheckedR(RegisterFile::ArgumentCount).tag() = bitwise_cast<int32_t>(pc + OPCODE_LENGTH(op_call_eval));
+ exec->setCurrentVPC(pc + OPCODE_LENGTH(op_call_eval));
if (!isHostFunction(calleeAsValue, globalFuncEval))
return setUpCall(execCallee, pc, CodeForCall, calleeAsValue);
@@ -1553,6 +1565,13 @@ LLINT_SLOW_PATH_DECL(slow_path_profile_did_call)
LLINT_END();
}
+LLINT_SLOW_PATH_DECL(throw_from_native_call)
+{
+ LLINT_BEGIN();
+ ASSERT(globalData.exception);
+ LLINT_END();
+}
+
} } // namespace JSC::LLInt
#endif // ENABLE(LLINT)
diff --git a/Source/JavaScriptCore/llint/LLIntSlowPaths.h b/Source/JavaScriptCore/llint/LLIntSlowPaths.h
index fe684d306..334070a07 100644
--- a/Source/JavaScriptCore/llint/LLIntSlowPaths.h
+++ b/Source/JavaScriptCore/llint/LLIntSlowPaths.h
@@ -38,8 +38,40 @@ struct Instruction;
namespace LLInt {
+#if USE(JSVALUE64)
+struct SlowPathReturnType {
+ void* a;
+ void* b;
+
+ SlowPathReturnType(void* a, void* b)
+ : a(a)
+ , b(b)
+ {
+ }
+};
+
+inline SlowPathReturnType encodeResult(void* a, void* b)
+{
+ return SlowPathReturnType(a, b);
+}
+#else
typedef int64_t SlowPathReturnType;
+inline SlowPathReturnType encodeResult(void* a, void* b)
+{
+ union {
+ struct {
+ void* a;
+ void* b;
+ } pair;
+ int64_t i;
+ } u;
+ u.pair.a = a;
+ u.pair.b = b;
+ return u.i;
+}
+#endif
+
extern "C" SlowPathReturnType llint_trace_operand(ExecState*, Instruction*, int fromWhere, int operand);
extern "C" SlowPathReturnType llint_trace_value(ExecState*, Instruction*, int fromWhere, int operand);
@@ -97,7 +129,6 @@ LLINT_SLOW_PATH_DECL(slow_path_urshift);
LLINT_SLOW_PATH_DECL(slow_path_bitand);
LLINT_SLOW_PATH_DECL(slow_path_bitor);
LLINT_SLOW_PATH_DECL(slow_path_bitxor);
-LLINT_SLOW_PATH_DECL(slow_path_bitnot);
LLINT_SLOW_PATH_DECL(slow_path_check_has_instance);
LLINT_SLOW_PATH_DECL(slow_path_instanceof);
LLINT_SLOW_PATH_DECL(slow_path_typeof);
@@ -162,6 +193,7 @@ LLINT_SLOW_PATH_DECL(slow_path_throw_reference_error);
LLINT_SLOW_PATH_DECL(slow_path_debug);
LLINT_SLOW_PATH_DECL(slow_path_profile_will_call);
LLINT_SLOW_PATH_DECL(slow_path_profile_did_call);
+LLINT_SLOW_PATH_DECL(throw_from_native_call);
} } // namespace JSC::LLInt
diff --git a/Source/JavaScriptCore/llint/LLIntThunks.cpp b/Source/JavaScriptCore/llint/LLIntThunks.cpp
index ddb0c46c2..b4d026423 100644
--- a/Source/JavaScriptCore/llint/LLIntThunks.cpp
+++ b/Source/JavaScriptCore/llint/LLIntThunks.cpp
@@ -29,8 +29,10 @@
#if ENABLE(LLINT)
#include "JSInterfaceJIT.h"
+#include "JSObject.h"
#include "LinkBuffer.h"
#include "LowLevelInterpreter.h"
+#include "ScopeChain.h"
namespace JSC { namespace LLInt {
diff --git a/Source/JavaScriptCore/llint/LowLevelInterpreter.asm b/Source/JavaScriptCore/llint/LowLevelInterpreter.asm
index a9f83f680..9af91bef2 100644
--- a/Source/JavaScriptCore/llint/LowLevelInterpreter.asm
+++ b/Source/JavaScriptCore/llint/LowLevelInterpreter.asm
@@ -21,54 +21,8 @@
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
# THE POSSIBILITY OF SUCH DAMAGE.
-
-# Crash course on the language that this is written in (which I just call
-# "assembly" even though it's more than that):
-#
-# - Mostly gas-style operand ordering. The last operand tends to be the
-# destination. So "a := b" is written as "mov b, a". But unlike gas,
-# comparisons are in-order, so "if (a < b)" is written as
-# "bilt a, b, ...".
-#
-# - "b" = byte, "h" = 16-bit word, "i" = 32-bit word, "p" = pointer.
-# Currently this is just 32-bit so "i" and "p" are interchangeable
-# except when an op supports one but not the other.
-#
-# - In general, valid operands for macro invocations and instructions are
-# registers (eg "t0"), addresses (eg "4[t0]"), base-index addresses
-# (eg "7[t0, t1, 2]"), absolute addresses (eg "0xa0000000[]"), or labels
-# (eg "_foo" or ".foo"). Macro invocations can also take anonymous
-# macros as operands. Instructions cannot take anonymous macros.
-#
-# - Labels must have names that begin with either "_" or ".". A "." label
-# is local and gets renamed before code gen to minimize namespace
-# pollution. A "_" label is an extern symbol (i.e. ".globl"). The "_"
-# may or may not be removed during code gen depending on whether the asm
-# conventions for C name mangling on the target platform mandate a "_"
-# prefix.
-#
-# - A "macro" is a lambda expression, which may be either anonymous or
-# named. But this has caveats. "macro" can take zero or more arguments,
-# which may be macros or any valid operands, but it can only return
-# code. But you can do Turing-complete things via continuation passing
-# style: "macro foo (a, b) b(a) end foo(foo, foo)". Actually, don't do
-# that, since you'll just crash the assembler.
-#
-# - An "if" is a conditional on settings. Any identifier supplied in the
-# predicate of an "if" is assumed to be a #define that is available
-# during code gen. So you can't use "if" for computation in a macro, but
-# you can use it to select different pieces of code for different
-# platforms.
-#
-# - Arguments to macros follow lexical scoping rather than dynamic scoping.
-# Const's also follow lexical scoping and may override (hide) arguments
-# or other consts. All variables (arguments and constants) can be bound
-# to operands. Additionally, arguments (but not constants) can be bound
-# to macros.
-
-
-# Below we have a bunch of constant declarations. Each constant must have
-# a corresponding ASSERT() in LLIntData.cpp.
+# First come the common protocols that both interpreters use. Note that each
+# of these must have an ASSERT() in LLIntData.cpp
# These declarations must match interpreter/RegisterFile.h.
const CallFrameHeaderSize = 48
@@ -81,10 +35,24 @@ const CodeBlock = -8
const ThisArgumentOffset = -CallFrameHeaderSize - 8
-# Declare some aliases for the registers we will use.
-const PC = t4
+# Some register conventions.
+if JSVALUE64
+ # - Use a pair of registers to represent the PC: one register for the
+ # base of the register file, and one register for the index.
+ # - The PC base (or PB for short) should be stored in the csr. It will
+ # get clobbered on calls to other JS code, but will get saved on calls
+ # to C functions.
+ # - C calls are still given the Instruction* rather than the PC index.
+ # This requires an add before the call, and a sub after.
+ const PC = t4
+ const PB = t6
+ const tagTypeNumber = csr1
+ const tagMask = csr2
+else
+ const PC = t4
+end
-# Offsets needed for reasoning about value representation.
+# Constants for reasoning about value representation.
if BIG_ENDIAN
const TagOffset = 0
const PayloadOffset = 4
@@ -93,16 +61,6 @@ else
const PayloadOffset = 0
end
-# Value representation constants.
-const Int32Tag = -1
-const BooleanTag = -2
-const NullTag = -3
-const UndefinedTag = -4
-const CellTag = -5
-const EmptyValueTag = -6
-const DeletedValueTag = -7
-const LowestTag = DeletedValueTag
-
# Type constants.
const StringType = 5
const ObjectType = 13
@@ -112,9 +70,6 @@ const MasqueradesAsUndefined = 1
const ImplementsHasInstance = 2
const ImplementsDefaultHasInstance = 8
-# Heap allocation constants.
-const JSFinalObjectSizeClassIndex = 3
-
# Bytecode operand constants.
const FirstConstantRegisterIndex = 0x40000000
@@ -126,14 +81,27 @@ const FunctionCode = 2
# The interpreter steals the tag word of the argument count.
const LLIntReturnPC = ArgumentCount + TagOffset
-# This must match wtf/Vector.h.
-const VectorSizeOffset = 0
-const VectorBufferOffset = 4
-
# String flags.
const HashFlags8BitBuffer = 64
-# Utilities
+# Allocation constants
+if JSVALUE64
+ const JSFinalObjectSizeClassIndex = 1
+else
+ const JSFinalObjectSizeClassIndex = 3
+end
+
+# This must match wtf/Vector.h
+if JSVALUE64
+ const VectorSizeOffset = 0
+ const VectorBufferOffset = 8
+else
+ const VectorSizeOffset = 0
+ const VectorBufferOffset = 4
+end
+
+
+# Some common utilities.
macro crash()
storei 0, 0xbbadbeef[]
move 0, t0
@@ -151,7 +119,7 @@ end
macro preserveReturnAddressAfterCall(destinationRegister)
if ARMv7
move lr, destinationRegister
- elsif X86
+ elsif X86 or X86_64
pop destinationRegister
else
error
@@ -161,109 +129,23 @@ end
macro restoreReturnAddressBeforeReturn(sourceRegister)
if ARMv7
move sourceRegister, lr
- elsif X86
+ elsif X86 or X86_64
push sourceRegister
else
error
end
end
-macro dispatch(advance)
- addp advance * 4, PC
- jmp [PC]
-end
-
-macro dispatchBranchWithOffset(pcOffset)
- lshifti 2, pcOffset
- addp pcOffset, PC
- jmp [PC]
-end
-
-macro dispatchBranch(pcOffset)
- loadi pcOffset, t0
- dispatchBranchWithOffset(t0)
-end
-
-macro dispatchAfterCall()
- loadi ArgumentCount + TagOffset[cfr], PC
- jmp [PC]
-end
-
-macro cCall2(function, arg1, arg2)
- if ARMv7
- move arg1, t0
- move arg2, t1
- elsif X86
- poke arg1, 0
- poke arg2, 1
- else
- error
- end
- call function
-end
-
-# This barely works. arg3 and arg4 should probably be immediates.
-macro cCall4(function, arg1, arg2, arg3, arg4)
- if ARMv7
- move arg1, t0
- move arg2, t1
- move arg3, t2
- move arg4, t3
- elsif X86
- poke arg1, 0
- poke arg2, 1
- poke arg3, 2
- poke arg4, 3
- else
- error
- end
- call function
-end
-
-macro callSlowPath(slow_path)
- cCall2(slow_path, cfr, PC)
- move t0, PC
- move t1, cfr
-end
-
-# Debugging operation if you'd like to print an operand in the instruction stream. fromWhere
-# should be an immediate integer - any integer you like; use it to identify the place you're
-# debugging from. operand should likewise be an immediate, and should identify the operand
-# in the instruction stream you'd like to print out.
-macro traceOperand(fromWhere, operand)
- cCall4(_llint_trace_operand, cfr, PC, fromWhere, operand)
- move t0, PC
- move t1, cfr
-end
-
-# Debugging operation if you'd like to print the value of an operand in the instruction
-# stream. Same as traceOperand(), but assumes that the operand is a register, and prints its
-# value.
-macro traceValue(fromWhere, operand)
- cCall4(_llint_trace_value, cfr, PC, fromWhere, operand)
- move t0, PC
- move t1, cfr
-end
-
macro traceExecution()
if EXECUTION_TRACING
callSlowPath(_llint_trace)
end
end
-# Call a slow_path for call opcodes.
-macro callCallSlowPath(advance, slow_path, action)
- addp advance * 4, PC, t0
- storep t0, ArgumentCount + TagOffset[cfr]
- cCall2(slow_path, cfr, PC)
- move t1, cfr
- action(t0)
-end
-
-macro slowPathForCall(advance, slow_path)
+macro slowPathForCall(advance, slowPath)
callCallSlowPath(
advance,
- slow_path,
+ slowPath,
macro (callee)
call callee
dispatchAfterCall()
@@ -273,26 +155,12 @@ end
macro checkSwitchToJIT(increment, action)
if JIT_ENABLED
loadp CodeBlock[cfr], t0
- baddis increment, CodeBlock::m_llintExecuteCounter[t0], .continue
+ baddis increment, CodeBlock::m_llintExecuteCounter + ExecutionCounter::m_counter[t0], .continue
action()
.continue:
end
end
-macro checkSwitchToJITForLoop()
- checkSwitchToJIT(
- 1,
- macro ()
- storei PC, ArgumentCount + TagOffset[cfr]
- cCall2(_llint_loop_osr, cfr, PC)
- move t1, cfr
- btpz t0, .recover
- jmp t0
- .recover:
- loadi ArgumentCount + TagOffset[cfr], PC
- end)
-end
-
macro checkSwitchToJITForEpilogue()
checkSwitchToJIT(
10,
@@ -305,94 +173,6 @@ macro assertNotConstant(index)
assert(macro (ok) bilt index, FirstConstantRegisterIndex, ok end)
end
-# Index, tag, and payload must be different registers. Index is not
-# changed.
-macro loadConstantOrVariable(index, tag, payload)
- bigteq index, FirstConstantRegisterIndex, .constant
- loadi TagOffset[cfr, index, 8], tag
- loadi PayloadOffset[cfr, index, 8], payload
- jmp .done
-.constant:
- loadp CodeBlock[cfr], payload
- loadp CodeBlock::m_constantRegisters + VectorBufferOffset[payload], payload
- # There is a bit of evil here: if the index contains a value >= FirstConstantRegisterIndex,
- # then value << 3 will be equal to (value - FirstConstantRegisterIndex) << 3.
- loadp TagOffset[payload, index, 8], tag
- loadp PayloadOffset[payload, index, 8], payload
-.done:
-end
-
-# Index and payload may be the same register. Index may be clobbered.
-macro loadConstantOrVariable2Reg(index, tag, payload)
- bigteq index, FirstConstantRegisterIndex, .constant
- loadi TagOffset[cfr, index, 8], tag
- loadi PayloadOffset[cfr, index, 8], payload
- jmp .done
-.constant:
- loadp CodeBlock[cfr], tag
- loadp CodeBlock::m_constantRegisters + VectorBufferOffset[tag], tag
- # There is a bit of evil here: if the index contains a value >= FirstConstantRegisterIndex,
- # then value << 3 will be equal to (value - FirstConstantRegisterIndex) << 3.
- lshifti 3, index
- addp index, tag
- loadp PayloadOffset[tag], payload
- loadp TagOffset[tag], tag
-.done:
-end
-
-macro loadConstantOrVariablePayloadTagCustom(index, tagCheck, payload)
- bigteq index, FirstConstantRegisterIndex, .constant
- tagCheck(TagOffset[cfr, index, 8])
- loadi PayloadOffset[cfr, index, 8], payload
- jmp .done
-.constant:
- loadp CodeBlock[cfr], payload
- loadp CodeBlock::m_constantRegisters + VectorBufferOffset[payload], payload
- # There is a bit of evil here: if the index contains a value >= FirstConstantRegisterIndex,
- # then value << 3 will be equal to (value - FirstConstantRegisterIndex) << 3.
- tagCheck(TagOffset[payload, index, 8])
- loadp PayloadOffset[payload, index, 8], payload
-.done:
-end
-
-# Index and payload must be different registers. Index is not mutated. Use
-# this if you know what the tag of the variable should be. Doing the tag
-# test as part of loading the variable reduces register use, but may not
-# be faster than doing loadConstantOrVariable followed by a branch on the
-# tag.
-macro loadConstantOrVariablePayload(index, expectedTag, payload, slow)
- loadConstantOrVariablePayloadTagCustom(
- index,
- macro (actualTag) bineq actualTag, expectedTag, slow end,
- payload)
-end
-
-macro loadConstantOrVariablePayloadUnchecked(index, payload)
- loadConstantOrVariablePayloadTagCustom(
- index,
- macro (actualTag) end,
- payload)
-end
-
-macro writeBarrier(tag, payload)
- # Nothing to do, since we don't have a generational or incremental collector.
-end
-
-macro valueProfile(tag, payload, profile)
- if JIT_ENABLED
- storei tag, ValueProfile::m_buckets + TagOffset[profile]
- storei payload, ValueProfile::m_buckets + PayloadOffset[profile]
- end
-end
-
-
-# Indicate the beginning of LLInt.
-_llint_begin:
- crash()
-
-
-# Entrypoints into the interpreter
-
macro functionForCallCodeBlockGetter(targetRegister)
loadp Callee[cfr], targetRegister
loadp JSFunction::m_executable[targetRegister], targetRegister
@@ -429,7 +209,7 @@ macro prologue(codeBlockGetter, codeBlockSetter, osrSlowPath, traceSlowPath)
end
codeBlockGetter(t1)
if JIT_ENABLED
- baddis 5, CodeBlock::m_llintExecuteCounter[t1], .continue
+ baddis 5, CodeBlock::m_llintExecuteCounter + ExecutionCounter::m_counter[t1], .continue
cCall2(osrSlowPath, cfr, PC)
move t1, cfr
btpz t0, .recover
@@ -443,14 +223,18 @@ macro prologue(codeBlockGetter, codeBlockSetter, osrSlowPath, traceSlowPath)
codeBlockSetter(t1)
# Set up the PC.
- loadp CodeBlock::m_instructions[t1], t0
- loadp CodeBlock::Instructions::m_instructions + VectorBufferOffset[t0], PC
+ if JSVALUE64
+ loadp CodeBlock::m_instructions[t1], PB
+ move 0, PC
+ else
+ loadp CodeBlock::m_instructions[t1], PC
+ end
end
# Expects that CodeBlock is in t1, which is what prologue() leaves behind.
# Must call dispatch(0) after calling this.
macro functionInitialization(profileArgSkip)
- if JIT_ENABLED
+ if VALUE_PROFILER
# Profile the arguments. Unfortunately, we have no choice but to do this. This
# code is pretty horrendous because of the difference in ordering between
# arguments and value profiles, the desire to have a simple loop-down-to-zero
@@ -459,21 +243,27 @@ macro functionInitialization(profileArgSkip)
# optimal way for architectures that have more than five registers available
# for arbitrary use in the interpreter.
loadi CodeBlock::m_numParameters[t1], t0
- addi -profileArgSkip, t0 # Use addi because that's what has the peephole
- assert(macro (ok) bigteq t0, 0, ok end)
- btiz t0, .argumentProfileDone
+ addp -profileArgSkip, t0 # Use addi because that's what has the peephole
+ assert(macro (ok) bpgteq t0, 0, ok end)
+ btpz t0, .argumentProfileDone
loadp CodeBlock::m_argumentValueProfiles + VectorBufferOffset[t1], t3
- muli sizeof ValueProfile, t0, t2 # Aaaaahhhh! Need strength reduction!
- negi t0
- lshifti 3, t0
+ mulp sizeof ValueProfile, t0, t2 # Aaaaahhhh! Need strength reduction!
+ negp t0
+ lshiftp 3, t0
addp t2, t3
.argumentProfileLoop:
- loadi ThisArgumentOffset + TagOffset + 8 - profileArgSkip * 8[cfr, t0], t2
- subp sizeof ValueProfile, t3
- storei t2, profileArgSkip * sizeof ValueProfile + ValueProfile::m_buckets + TagOffset[t3]
- loadi ThisArgumentOffset + PayloadOffset + 8 - profileArgSkip * 8[cfr, t0], t2
- storei t2, profileArgSkip * sizeof ValueProfile + ValueProfile::m_buckets + PayloadOffset[t3]
- baddinz 8, t0, .argumentProfileLoop
+ if JSVALUE64
+ loadp ThisArgumentOffset + 8 - profileArgSkip * 8[cfr, t0], t2
+ subp sizeof ValueProfile, t3
+ storep t2, profileArgSkip * sizeof ValueProfile + ValueProfile::m_buckets[t3]
+ else
+ loadi ThisArgumentOffset + TagOffset + 8 - profileArgSkip * 8[cfr, t0], t2
+ subp sizeof ValueProfile, t3
+ storei t2, profileArgSkip * sizeof ValueProfile + ValueProfile::m_buckets + TagOffset[t3]
+ loadi ThisArgumentOffset + PayloadOffset + 8 - profileArgSkip * 8[cfr, t0], t2
+ storei t2, profileArgSkip * sizeof ValueProfile + ValueProfile::m_buckets + PayloadOffset[t3]
+ end
+ baddpnz 8, t0, .argumentProfileLoop
.argumentProfileDone:
end
@@ -490,101 +280,6 @@ macro functionInitialization(profileArgSkip)
.stackHeightOK:
end
-# Expects that CodeBlock is in t1, which is what prologue() leaves behind.
-macro functionArityCheck(doneLabel, slow_path)
- loadi PayloadOffset + ArgumentCount[cfr], t0
- biaeq t0, CodeBlock::m_numParameters[t1], doneLabel
- cCall2(slow_path, cfr, PC) # This slow_path has a simple protocol: t0 = 0 => no error, t0 != 0 => error
- move t1, cfr
- btiz t0, .continue
- loadp JITStackFrame::globalData[sp], t1
- loadp JSGlobalData::callFrameForThrow[t1], t0
- jmp JSGlobalData::targetMachinePCForThrow[t1]
-.continue:
- # Reload CodeBlock and PC, since the slow_path clobbered it.
- loadp CodeBlock[cfr], t1
- loadp CodeBlock::m_instructions[t1], t0
- loadp CodeBlock::Instructions::m_instructions + VectorBufferOffset[t0], PC
- jmp doneLabel
-end
-
-_llint_program_prologue:
- prologue(notFunctionCodeBlockGetter, notFunctionCodeBlockSetter, _llint_entry_osr, _llint_trace_prologue)
- dispatch(0)
-
-
-_llint_eval_prologue:
- prologue(notFunctionCodeBlockGetter, notFunctionCodeBlockSetter, _llint_entry_osr, _llint_trace_prologue)
- dispatch(0)
-
-
-_llint_function_for_call_prologue:
- prologue(functionForCallCodeBlockGetter, functionCodeBlockSetter, _llint_entry_osr_function_for_call, _llint_trace_prologue_function_for_call)
-.functionForCallBegin:
- functionInitialization(0)
- dispatch(0)
-
-
-_llint_function_for_construct_prologue:
- prologue(functionForConstructCodeBlockGetter, functionCodeBlockSetter, _llint_entry_osr_function_for_construct, _llint_trace_prologue_function_for_construct)
-.functionForConstructBegin:
- functionInitialization(1)
- dispatch(0)
-
-
-_llint_function_for_call_arity_check:
- prologue(functionForCallCodeBlockGetter, functionCodeBlockSetter, _llint_entry_osr_function_for_call_arityCheck, _llint_trace_arityCheck_for_call)
- functionArityCheck(.functionForCallBegin, _llint_slow_path_call_arityCheck)
-
-
-_llint_function_for_construct_arity_check:
- prologue(functionForConstructCodeBlockGetter, functionCodeBlockSetter, _llint_entry_osr_function_for_construct_arityCheck, _llint_trace_arityCheck_for_construct)
- functionArityCheck(.functionForConstructBegin, _llint_slow_path_construct_arityCheck)
-
-# Instruction implementations
-
-_llint_op_enter:
- traceExecution()
- loadp CodeBlock[cfr], t2
- loadi CodeBlock::m_numVars[t2], t2
- btiz t2, .opEnterDone
- move UndefinedTag, t0
- move 0, t1
-.opEnterLoop:
- subi 1, t2
- storei t0, TagOffset[cfr, t2, 8]
- storei t1, PayloadOffset[cfr, t2, 8]
- btinz t2, .opEnterLoop
-.opEnterDone:
- dispatch(1)
-
-
-_llint_op_create_activation:
- traceExecution()
- loadi 4[PC], t0
- bineq TagOffset[cfr, t0, 8], EmptyValueTag, .opCreateActivationDone
- callSlowPath(_llint_slow_path_create_activation)
-.opCreateActivationDone:
- dispatch(2)
-
-
-_llint_op_init_lazy_reg:
- traceExecution()
- loadi 4[PC], t0
- storei EmptyValueTag, TagOffset[cfr, t0, 8]
- storei 0, PayloadOffset[cfr, t0, 8]
- dispatch(2)
-
-
-_llint_op_create_arguments:
- traceExecution()
- loadi 4[PC], t0
- bineq TagOffset[cfr, t0, 8], EmptyValueTag, .opCreateArgumentsDone
- callSlowPath(_llint_slow_path_create_arguments)
-.opCreateArgumentsDone:
- dispatch(2)
-
-
macro allocateBasicJSObject(sizeClassIndex, classInfoOffset, structure, result, scratch1, scratch2, slowCase)
if ALWAYS_ALLOCATE_SLOW
jmp slowCase
@@ -618,66 +313,62 @@ macro allocateBasicJSObject(sizeClassIndex, classInfoOffset, structure, result,
end
end
-_llint_op_create_this:
- traceExecution()
- loadi 8[PC], t0
- assertNotConstant(t0)
- bineq TagOffset[cfr, t0, 8], CellTag, .opCreateThisSlow
- loadi PayloadOffset[cfr, t0, 8], t0
- loadp JSCell::m_structure[t0], t1
- bbb Structure::m_typeInfo + TypeInfo::m_type[t1], ObjectType, .opCreateThisSlow
- loadp JSObject::m_inheritorID[t0], t2
- btpz t2, .opCreateThisSlow
- allocateBasicJSObject(JSFinalObjectSizeClassIndex, JSGlobalData::jsFinalObjectClassInfo, t2, t0, t1, t3, .opCreateThisSlow)
- loadi 4[PC], t1
- storei CellTag, TagOffset[cfr, t1, 8]
- storei t0, PayloadOffset[cfr, t1, 8]
- dispatch(3)
+macro doReturn()
+ loadp ReturnPC[cfr], t2
+ loadp CallerFrame[cfr], cfr
+ restoreReturnAddressBeforeReturn(t2)
+ ret
+end
-.opCreateThisSlow:
- callSlowPath(_llint_slow_path_create_this)
- dispatch(3)
+# Indicate the beginning of LLInt.
+_llint_begin:
+ crash()
-_llint_op_get_callee:
- traceExecution()
- loadi 4[PC], t0
- loadp PayloadOffset + Callee[cfr], t1
- storei CellTag, TagOffset[cfr, t0, 8]
- storei t1, PayloadOffset[cfr, t0, 8]
- dispatch(2)
+_llint_program_prologue:
+ prologue(notFunctionCodeBlockGetter, notFunctionCodeBlockSetter, _llint_entry_osr, _llint_trace_prologue)
+ dispatch(0)
-_llint_op_convert_this:
- traceExecution()
- loadi 4[PC], t0
- bineq TagOffset[cfr, t0, 8], CellTag, .opConvertThisSlow
- loadi PayloadOffset[cfr, t0, 8], t0
- loadp JSCell::m_structure[t0], t0
- bbb Structure::m_typeInfo + TypeInfo::m_type[t0], ObjectType, .opConvertThisSlow
- dispatch(2)
-.opConvertThisSlow:
- callSlowPath(_llint_slow_path_convert_this)
- dispatch(2)
+_llint_eval_prologue:
+ prologue(notFunctionCodeBlockGetter, notFunctionCodeBlockSetter, _llint_entry_osr, _llint_trace_prologue)
+ dispatch(0)
-_llint_op_new_object:
- traceExecution()
- loadp CodeBlock[cfr], t0
- loadp CodeBlock::m_globalObject[t0], t0
- loadp JSGlobalObject::m_emptyObjectStructure[t0], t1
- allocateBasicJSObject(JSFinalObjectSizeClassIndex, JSGlobalData::jsFinalObjectClassInfo, t1, t0, t2, t3, .opNewObjectSlow)
- loadi 4[PC], t1
- storei CellTag, TagOffset[cfr, t1, 8]
- storei t0, PayloadOffset[cfr, t1, 8]
- dispatch(2)
+_llint_function_for_call_prologue:
+ prologue(functionForCallCodeBlockGetter, functionCodeBlockSetter, _llint_entry_osr_function_for_call, _llint_trace_prologue_function_for_call)
+.functionForCallBegin:
+ functionInitialization(0)
+ dispatch(0)
+
-.opNewObjectSlow:
- callSlowPath(_llint_slow_path_new_object)
- dispatch(2)
+_llint_function_for_construct_prologue:
+ prologue(functionForConstructCodeBlockGetter, functionCodeBlockSetter, _llint_entry_osr_function_for_construct, _llint_trace_prologue_function_for_construct)
+.functionForConstructBegin:
+ functionInitialization(1)
+ dispatch(0)
+
+
+_llint_function_for_call_arity_check:
+ prologue(functionForCallCodeBlockGetter, functionCodeBlockSetter, _llint_entry_osr_function_for_call_arityCheck, _llint_trace_arityCheck_for_call)
+ functionArityCheck(.functionForCallBegin, _llint_slow_path_call_arityCheck)
+
+
+_llint_function_for_construct_arity_check:
+ prologue(functionForConstructCodeBlockGetter, functionCodeBlockSetter, _llint_entry_osr_function_for_construct_arityCheck, _llint_trace_arityCheck_for_construct)
+ functionArityCheck(.functionForConstructBegin, _llint_slow_path_construct_arityCheck)
+
+
+# Value-representation-specific code.
+if JSVALUE64
+ include LowLevelInterpreter64
+else
+ include LowLevelInterpreter32_64
+end
+# Value-representation-agnostic code.
_llint_op_new_array:
traceExecution()
callSlowPath(_llint_slow_path_new_array)
@@ -696,148 +387,6 @@ _llint_op_new_regexp:
dispatch(3)
-_llint_op_mov:
- traceExecution()
- loadi 8[PC], t1
- loadi 4[PC], t0
- loadConstantOrVariable(t1, t2, t3)
- storei t2, TagOffset[cfr, t0, 8]
- storei t3, PayloadOffset[cfr, t0, 8]
- dispatch(3)
-
-
-_llint_op_not:
- traceExecution()
- loadi 8[PC], t0
- loadi 4[PC], t1
- loadConstantOrVariable(t0, t2, t3)
- bineq t2, BooleanTag, .opNotSlow
- xori 1, t3
- storei t2, TagOffset[cfr, t1, 8]
- storei t3, PayloadOffset[cfr, t1, 8]
- dispatch(3)
-
-.opNotSlow:
- callSlowPath(_llint_slow_path_not)
- dispatch(3)
-
-
-_llint_op_eq:
- traceExecution()
- loadi 12[PC], t2
- loadi 8[PC], t0
- loadConstantOrVariable(t2, t3, t1)
- loadConstantOrVariable2Reg(t0, t2, t0)
- bineq t2, t3, .opEqSlow
- bieq t2, CellTag, .opEqSlow
- bib t2, LowestTag, .opEqSlow
- loadi 4[PC], t2
- cieq t0, t1, t0
- storei BooleanTag, TagOffset[cfr, t2, 8]
- storei t0, PayloadOffset[cfr, t2, 8]
- dispatch(4)
-
-.opEqSlow:
- callSlowPath(_llint_slow_path_eq)
- dispatch(4)
-
-
-_llint_op_eq_null:
- traceExecution()
- loadi 8[PC], t0
- loadi 4[PC], t3
- assertNotConstant(t0)
- loadi TagOffset[cfr, t0, 8], t1
- loadi PayloadOffset[cfr, t0, 8], t0
- bineq t1, CellTag, .opEqNullImmediate
- loadp JSCell::m_structure[t0], t1
- tbnz Structure::m_typeInfo + TypeInfo::m_flags[t1], MasqueradesAsUndefined, t1
- jmp .opEqNullNotImmediate
-.opEqNullImmediate:
- cieq t1, NullTag, t2
- cieq t1, UndefinedTag, t1
- ori t2, t1
-.opEqNullNotImmediate:
- storei BooleanTag, TagOffset[cfr, t3, 8]
- storei t1, PayloadOffset[cfr, t3, 8]
- dispatch(3)
-
-
-_llint_op_neq:
- traceExecution()
- loadi 12[PC], t2
- loadi 8[PC], t0
- loadConstantOrVariable(t2, t3, t1)
- loadConstantOrVariable2Reg(t0, t2, t0)
- bineq t2, t3, .opNeqSlow
- bieq t2, CellTag, .opNeqSlow
- bib t2, LowestTag, .opNeqSlow
- loadi 4[PC], t2
- cineq t0, t1, t0
- storei BooleanTag, TagOffset[cfr, t2, 8]
- storei t0, PayloadOffset[cfr, t2, 8]
- dispatch(4)
-
-.opNeqSlow:
- callSlowPath(_llint_slow_path_neq)
- dispatch(4)
-
-
-_llint_op_neq_null:
- traceExecution()
- loadi 8[PC], t0
- loadi 4[PC], t3
- assertNotConstant(t0)
- loadi TagOffset[cfr, t0, 8], t1
- loadi PayloadOffset[cfr, t0, 8], t0
- bineq t1, CellTag, .opNeqNullImmediate
- loadp JSCell::m_structure[t0], t1
- tbz Structure::m_typeInfo + TypeInfo::m_flags[t1], MasqueradesAsUndefined, t1
- jmp .opNeqNullNotImmediate
-.opNeqNullImmediate:
- cineq t1, NullTag, t2
- cineq t1, UndefinedTag, t1
- andi t2, t1
-.opNeqNullNotImmediate:
- storei BooleanTag, TagOffset[cfr, t3, 8]
- storei t1, PayloadOffset[cfr, t3, 8]
- dispatch(3)
-
-
-macro strictEq(equalityOperation, slow_path)
- loadi 12[PC], t2
- loadi 8[PC], t0
- loadConstantOrVariable(t2, t3, t1)
- loadConstantOrVariable2Reg(t0, t2, t0)
- bineq t2, t3, .slow
- bib t2, LowestTag, .slow
- bineq t2, CellTag, .notString
- loadp JSCell::m_structure[t0], t2
- loadp JSCell::m_structure[t1], t3
- bbneq Structure::m_typeInfo + TypeInfo::m_type[t2], StringType, .notString
- bbeq Structure::m_typeInfo + TypeInfo::m_type[t3], StringType, .slow
-.notString:
- loadi 4[PC], t2
- equalityOperation(t0, t1, t0)
- storei BooleanTag, TagOffset[cfr, t2, 8]
- storei t0, PayloadOffset[cfr, t2, 8]
- dispatch(4)
-
-.slow:
- callSlowPath(slow_path)
- dispatch(4)
-end
-
-_llint_op_stricteq:
- traceExecution()
- strictEq(macro (left, right, result) cieq left, right, result end, _llint_slow_path_stricteq)
-
-
-_llint_op_nstricteq:
- traceExecution()
- strictEq(macro (left, right, result) cineq left, right, result end, _llint_slow_path_nstricteq)
-
-
_llint_op_less:
traceExecution()
callSlowPath(_llint_slow_path_less)
@@ -862,360 +411,12 @@ _llint_op_greatereq:
dispatch(4)
-_llint_op_pre_inc:
- traceExecution()
- loadi 4[PC], t0
- bineq TagOffset[cfr, t0, 8], Int32Tag, .opPreIncSlow
- loadi PayloadOffset[cfr, t0, 8], t1
- baddio 1, t1, .opPreIncSlow
- storei t1, PayloadOffset[cfr, t0, 8]
- dispatch(2)
-
-.opPreIncSlow:
- callSlowPath(_llint_slow_path_pre_inc)
- dispatch(2)
-
-
-_llint_op_pre_dec:
- traceExecution()
- loadi 4[PC], t0
- bineq TagOffset[cfr, t0, 8], Int32Tag, .opPreDecSlow
- loadi PayloadOffset[cfr, t0, 8], t1
- bsubio 1, t1, .opPreDecSlow
- storei t1, PayloadOffset[cfr, t0, 8]
- dispatch(2)
-
-.opPreDecSlow:
- callSlowPath(_llint_slow_path_pre_dec)
- dispatch(2)
-
-
-_llint_op_post_inc:
- traceExecution()
- loadi 8[PC], t0
- loadi 4[PC], t1
- bineq TagOffset[cfr, t0, 8], Int32Tag, .opPostIncSlow
- bieq t0, t1, .opPostIncDone
- loadi PayloadOffset[cfr, t0, 8], t2
- move t2, t3
- baddio 1, t3, .opPostIncSlow
- storei Int32Tag, TagOffset[cfr, t1, 8]
- storei t2, PayloadOffset[cfr, t1, 8]
- storei t3, PayloadOffset[cfr, t0, 8]
-.opPostIncDone:
- dispatch(3)
-
-.opPostIncSlow:
- callSlowPath(_llint_slow_path_post_inc)
- dispatch(3)
-
-
-_llint_op_post_dec:
- traceExecution()
- loadi 8[PC], t0
- loadi 4[PC], t1
- bineq TagOffset[cfr, t0, 8], Int32Tag, .opPostDecSlow
- bieq t0, t1, .opPostDecDone
- loadi PayloadOffset[cfr, t0, 8], t2
- move t2, t3
- bsubio 1, t3, .opPostDecSlow
- storei Int32Tag, TagOffset[cfr, t1, 8]
- storei t2, PayloadOffset[cfr, t1, 8]
- storei t3, PayloadOffset[cfr, t0, 8]
-.opPostDecDone:
- dispatch(3)
-
-.opPostDecSlow:
- callSlowPath(_llint_slow_path_post_dec)
- dispatch(3)
-
-
-_llint_op_to_jsnumber:
- traceExecution()
- loadi 8[PC], t0
- loadi 4[PC], t1
- loadConstantOrVariable(t0, t2, t3)
- bieq t2, Int32Tag, .opToJsnumberIsInt
- biaeq t2, EmptyValueTag, .opToJsnumberSlow
-.opToJsnumberIsInt:
- storei t2, TagOffset[cfr, t1, 8]
- storei t3, PayloadOffset[cfr, t1, 8]
- dispatch(3)
-
-.opToJsnumberSlow:
- callSlowPath(_llint_slow_path_to_jsnumber)
- dispatch(3)
-
-
-_llint_op_negate:
- traceExecution()
- loadi 8[PC], t0
- loadi 4[PC], t3
- loadConstantOrVariable(t0, t1, t2)
- bineq t1, Int32Tag, .opNegateSrcNotInt
- btiz t2, 0x7fffffff, .opNegateSlow
- negi t2
- storei Int32Tag, TagOffset[cfr, t3, 8]
- storei t2, PayloadOffset[cfr, t3, 8]
- dispatch(3)
-.opNegateSrcNotInt:
- bia t1, LowestTag, .opNegateSlow
- xori 0x80000000, t1
- storei t1, TagOffset[cfr, t3, 8]
- storei t2, PayloadOffset[cfr, t3, 8]
- dispatch(3)
-
-.opNegateSlow:
- callSlowPath(_llint_slow_path_negate)
- dispatch(3)
-
-
-macro binaryOpCustomStore(integerOperationAndStore, doubleOperation, slow_path)
- loadi 12[PC], t2
- loadi 8[PC], t0
- loadConstantOrVariable(t2, t3, t1)
- loadConstantOrVariable2Reg(t0, t2, t0)
- bineq t2, Int32Tag, .op1NotInt
- bineq t3, Int32Tag, .op2NotInt
- loadi 4[PC], t2
- integerOperationAndStore(t3, t1, t0, .slow, t2)
- dispatch(5)
-
-.op1NotInt:
- # First operand is definitely not an int, the second operand could be anything.
- bia t2, LowestTag, .slow
- bib t3, LowestTag, .op1NotIntOp2Double
- bineq t3, Int32Tag, .slow
- ci2d t1, ft1
- jmp .op1NotIntReady
-.op1NotIntOp2Double:
- fii2d t1, t3, ft1
-.op1NotIntReady:
- loadi 4[PC], t1
- fii2d t0, t2, ft0
- doubleOperation(ft1, ft0)
- stored ft0, [cfr, t1, 8]
- dispatch(5)
-
-.op2NotInt:
- # First operand is definitely an int, the second operand is definitely not.
- loadi 4[PC], t2
- bia t3, LowestTag, .slow
- ci2d t0, ft0
- fii2d t1, t3, ft1
- doubleOperation(ft1, ft0)
- stored ft0, [cfr, t2, 8]
- dispatch(5)
-
-.slow:
- callSlowPath(slow_path)
- dispatch(5)
-end
-
-macro binaryOp(integerOperation, doubleOperation, slow_path)
- binaryOpCustomStore(
- macro (int32Tag, left, right, slow, index)
- integerOperation(left, right, slow)
- storei int32Tag, TagOffset[cfr, index, 8]
- storei right, PayloadOffset[cfr, index, 8]
- end,
- doubleOperation, slow_path)
-end
-
-_llint_op_add:
- traceExecution()
- binaryOp(
- macro (left, right, slow) baddio left, right, slow end,
- macro (left, right) addd left, right end,
- _llint_slow_path_add)
-
-
-_llint_op_mul:
- traceExecution()
- binaryOpCustomStore(
- macro (int32Tag, left, right, slow, index)
- const scratch = int32Tag # We know that we can reuse the int32Tag register since it has a constant.
- move right, scratch
- bmulio left, scratch, slow
- btinz scratch, .done
- bilt left, 0, slow
- bilt right, 0, slow
- .done:
- storei Int32Tag, TagOffset[cfr, index, 8]
- storei scratch, PayloadOffset[cfr, index, 8]
- end,
- macro (left, right) muld left, right end,
- _llint_slow_path_mul)
-
-
-_llint_op_sub:
- traceExecution()
- binaryOp(
- macro (left, right, slow) bsubio left, right, slow end,
- macro (left, right) subd left, right end,
- _llint_slow_path_sub)
-
-
-_llint_op_div:
- traceExecution()
- binaryOpCustomStore(
- macro (int32Tag, left, right, slow, index)
- ci2d left, ft0
- ci2d right, ft1
- divd ft0, ft1
- bcd2i ft1, right, .notInt
- storei int32Tag, TagOffset[cfr, index, 8]
- storei right, PayloadOffset[cfr, index, 8]
- jmp .done
- .notInt:
- stored ft1, [cfr, index, 8]
- .done:
- end,
- macro (left, right) divd left, right end,
- _llint_slow_path_div)
-
-
_llint_op_mod:
traceExecution()
callSlowPath(_llint_slow_path_mod)
dispatch(4)
-macro bitOp(operation, slow_path, advance)
- loadi 12[PC], t2
- loadi 8[PC], t0
- loadConstantOrVariable(t2, t3, t1)
- loadConstantOrVariable2Reg(t0, t2, t0)
- bineq t3, Int32Tag, .slow
- bineq t2, Int32Tag, .slow
- loadi 4[PC], t2
- operation(t1, t0, .slow)
- storei t3, TagOffset[cfr, t2, 8]
- storei t0, PayloadOffset[cfr, t2, 8]
- dispatch(advance)
-
-.slow:
- callSlowPath(slow_path)
- dispatch(advance)
-end
-
-_llint_op_lshift:
- traceExecution()
- bitOp(
- macro (left, right, slow) lshifti left, right end,
- _llint_slow_path_lshift,
- 4)
-
-
-_llint_op_rshift:
- traceExecution()
- bitOp(
- macro (left, right, slow) rshifti left, right end,
- _llint_slow_path_rshift,
- 4)
-
-
-_llint_op_urshift:
- traceExecution()
- bitOp(
- macro (left, right, slow)
- urshifti left, right
- bilt right, 0, slow
- end,
- _llint_slow_path_urshift,
- 4)
-
-
-_llint_op_bitand:
- traceExecution()
- bitOp(
- macro (left, right, slow) andi left, right end,
- _llint_slow_path_bitand,
- 5)
-
-
-_llint_op_bitxor:
- traceExecution()
- bitOp(
- macro (left, right, slow) xori left, right end,
- _llint_slow_path_bitxor,
- 5)
-
-
-_llint_op_bitor:
- traceExecution()
- bitOp(
- macro (left, right, slow) ori left, right end,
- _llint_slow_path_bitor,
- 5)
-
-
-_llint_op_bitnot:
- traceExecution()
- loadi 8[PC], t1
- loadi 4[PC], t0
- loadConstantOrVariable(t1, t2, t3)
- bineq t2, Int32Tag, .opBitnotSlow
- noti t3
- storei t2, TagOffset[cfr, t0, 8]
- storei t3, PayloadOffset[cfr, t0, 8]
- dispatch(3)
-
-.opBitnotSlow:
- callSlowPath(_llint_slow_path_bitnot)
- dispatch(3)
-
-
-_llint_op_check_has_instance:
- traceExecution()
- loadi 4[PC], t1
- loadConstantOrVariablePayload(t1, CellTag, t0, .opCheckHasInstanceSlow)
- loadp JSCell::m_structure[t0], t0
- btbz Structure::m_typeInfo + TypeInfo::m_flags[t0], ImplementsHasInstance, .opCheckHasInstanceSlow
- dispatch(2)
-
-.opCheckHasInstanceSlow:
- callSlowPath(_llint_slow_path_check_has_instance)
- dispatch(2)
-
-
-_llint_op_instanceof:
- traceExecution()
- # Check that baseVal implements the default HasInstance behavior.
- # FIXME: This should be deprecated.
- loadi 12[PC], t1
- loadConstantOrVariablePayloadUnchecked(t1, t0)
- loadp JSCell::m_structure[t0], t0
- btbz Structure::m_typeInfo + TypeInfo::m_flags[t0], ImplementsDefaultHasInstance, .opInstanceofSlow
-
- # Actually do the work.
- loadi 16[PC], t0
- loadi 4[PC], t3
- loadConstantOrVariablePayload(t0, CellTag, t1, .opInstanceofSlow)
- loadp JSCell::m_structure[t1], t2
- bbb Structure::m_typeInfo + TypeInfo::m_type[t2], ObjectType, .opInstanceofSlow
- loadi 8[PC], t0
- loadConstantOrVariablePayload(t0, CellTag, t2, .opInstanceofSlow)
-
- # Register state: t1 = prototype, t2 = value
- move 1, t0
-.opInstanceofLoop:
- loadp JSCell::m_structure[t2], t2
- loadi Structure::m_prototype + PayloadOffset[t2], t2
- bpeq t2, t1, .opInstanceofDone
- btinz t2, .opInstanceofLoop
-
- move 0, t0
-.opInstanceofDone:
- storei BooleanTag, TagOffset[cfr, t3, 8]
- storei t0, PayloadOffset[cfr, t3, 8]
- dispatch(5)
-
-.opInstanceofSlow:
- callSlowPath(_llint_slow_path_instanceof)
- dispatch(5)
-
-
_llint_op_typeof:
traceExecution()
callSlowPath(_llint_slow_path_typeof)
@@ -1276,156 +477,6 @@ _llint_op_resolve_skip:
dispatch(5)
-macro resolveGlobal(size, slow)
- # Operands are as follows:
- # 4[PC] Destination for the load.
- # 8[PC] Property identifier index in the code block.
- # 12[PC] Structure pointer, initialized to 0 by bytecode generator.
- # 16[PC] Offset in global object, initialized to 0 by bytecode generator.
- loadp CodeBlock[cfr], t0
- loadp CodeBlock::m_globalObject[t0], t0
- loadp JSCell::m_structure[t0], t1
- bpneq t1, 12[PC], slow
- loadi 16[PC], t1
- loadp JSObject::m_propertyStorage[t0], t0
- loadi TagOffset[t0, t1, 8], t2
- loadi PayloadOffset[t0, t1, 8], t3
- loadi 4[PC], t0
- storei t2, TagOffset[cfr, t0, 8]
- storei t3, PayloadOffset[cfr, t0, 8]
- loadi (size - 1) * 4[PC], t0
- valueProfile(t2, t3, t0)
-end
-
-_llint_op_resolve_global:
- traceExecution()
- resolveGlobal(6, .opResolveGlobalSlow)
- dispatch(6)
-
-.opResolveGlobalSlow:
- callSlowPath(_llint_slow_path_resolve_global)
- dispatch(6)
-
-
-# Gives you the scope in t0, while allowing you to optionally perform additional checks on the
-# scopes as they are traversed. scopeCheck() is called with two arguments: the register
-# holding the scope, and a register that can be used for scratch. Note that this does not
-# use t3, so you can hold stuff in t3 if need be.
-macro getScope(deBruijinIndexOperand, scopeCheck)
- loadp ScopeChain + PayloadOffset[cfr], t0
- loadi deBruijinIndexOperand, t2
-
- btiz t2, .done
-
- loadp CodeBlock[cfr], t1
- bineq CodeBlock::m_codeType[t1], FunctionCode, .loop
- btbz CodeBlock::m_needsFullScopeChain[t1], .loop
-
- loadi CodeBlock::m_activationRegister[t1], t1
-
- # Need to conditionally skip over one scope.
- bieq TagOffset[cfr, t1, 8], EmptyValueTag, .noActivation
- scopeCheck(t0, t1)
- loadp ScopeChainNode::next[t0], t0
-.noActivation:
- subi 1, t2
-
- btiz t2, .done
-.loop:
- scopeCheck(t0, t1)
- loadp ScopeChainNode::next[t0], t0
- subi 1, t2
- btinz t2, .loop
-
-.done:
-end
-
-_llint_op_resolve_global_dynamic:
- traceExecution()
- loadp JITStackFrame::globalData[sp], t3
- loadp JSGlobalData::activationStructure[t3], t3
- getScope(
- 20[PC],
- macro (scope, scratch)
- loadp ScopeChainNode::object[scope], scratch
- bpneq JSCell::m_structure[scratch], t3, .opResolveGlobalDynamicSuperSlow
- end)
- resolveGlobal(7, .opResolveGlobalDynamicSlow)
- dispatch(7)
-
-.opResolveGlobalDynamicSuperSlow:
- callSlowPath(_llint_slow_path_resolve_for_resolve_global_dynamic)
- dispatch(7)
-
-.opResolveGlobalDynamicSlow:
- callSlowPath(_llint_slow_path_resolve_global_dynamic)
- dispatch(7)
-
-
-_llint_op_get_scoped_var:
- traceExecution()
- # Operands are as follows:
- # 4[PC] Destination for the load.
- # 8[PC] Index of register in the scope.
- # 12[PC] De Bruijin index.
- getScope(12[PC], macro (scope, scratch) end)
- loadi 4[PC], t1
- loadi 8[PC], t2
- loadp ScopeChainNode::object[t0], t0
- loadp JSVariableObject::m_registers[t0], t0
- loadi TagOffset[t0, t2, 8], t3
- loadi PayloadOffset[t0, t2, 8], t0
- storei t3, TagOffset[cfr, t1, 8]
- storei t0, PayloadOffset[cfr, t1, 8]
- loadi 16[PC], t1
- valueProfile(t3, t0, t1)
- dispatch(5)
-
-
-_llint_op_put_scoped_var:
- traceExecution()
- getScope(8[PC], macro (scope, scratch) end)
- loadi 12[PC], t1
- loadConstantOrVariable(t1, t3, t2)
- loadi 4[PC], t1
- writeBarrier(t3, t2)
- loadp ScopeChainNode::object[t0], t0
- loadp JSVariableObject::m_registers[t0], t0
- storei t3, TagOffset[t0, t1, 8]
- storei t2, PayloadOffset[t0, t1, 8]
- dispatch(4)
-
-
-_llint_op_get_global_var:
- traceExecution()
- loadi 8[PC], t1
- loadi 4[PC], t3
- loadp CodeBlock[cfr], t0
- loadp CodeBlock::m_globalObject[t0], t0
- loadp JSGlobalObject::m_registers[t0], t0
- loadi TagOffset[t0, t1, 8], t2
- loadi PayloadOffset[t0, t1, 8], t1
- storei t2, TagOffset[cfr, t3, 8]
- storei t1, PayloadOffset[cfr, t3, 8]
- loadi 12[PC], t3
- valueProfile(t2, t1, t3)
- dispatch(4)
-
-
-_llint_op_put_global_var:
- traceExecution()
- loadi 8[PC], t1
- loadp CodeBlock[cfr], t0
- loadp CodeBlock::m_globalObject[t0], t0
- loadp JSGlobalObject::m_registers[t0], t0
- loadConstantOrVariable(t1, t2, t3)
- loadi 4[PC], t1
- writeBarrier(t2, t3)
- storei t2, TagOffset[t0, t1, 8]
- storei t3, PayloadOffset[t0, t1, 8]
- dispatch(3)
-
-
_llint_op_resolve_base:
traceExecution()
callSlowPath(_llint_slow_path_resolve_base)
@@ -1450,230 +501,12 @@ _llint_op_resolve_with_this:
dispatch(5)
-_llint_op_get_by_id:
- traceExecution()
- # We only do monomorphic get_by_id caching for now, and we do not modify the
- # opcode. We do, however, allow for the cache to change anytime if fails, since
- # ping-ponging is free. At best we get lucky and the get_by_id will continue
- # to take fast path on the new cache. At worst we take slow path, which is what
- # we would have been doing anyway.
- loadi 8[PC], t0
- loadi 16[PC], t1
- loadConstantOrVariablePayload(t0, CellTag, t3, .opGetByIdSlow)
- loadi 20[PC], t2
- loadp JSObject::m_propertyStorage[t3], t0
- bpneq JSCell::m_structure[t3], t1, .opGetByIdSlow
- loadi 4[PC], t1
- loadi TagOffset[t0, t2], t3
- loadi PayloadOffset[t0, t2], t2
- storei t3, TagOffset[cfr, t1, 8]
- storei t2, PayloadOffset[cfr, t1, 8]
- loadi 32[PC], t1
- valueProfile(t3, t2, t1)
- dispatch(9)
-
-.opGetByIdSlow:
- callSlowPath(_llint_slow_path_get_by_id)
- dispatch(9)
-
-
-_llint_op_get_arguments_length:
- traceExecution()
- loadi 8[PC], t0
- loadi 4[PC], t1
- bineq TagOffset[cfr, t0, 8], EmptyValueTag, .opGetArgumentsLengthSlow
- loadi ArgumentCount + PayloadOffset[cfr], t2
- subi 1, t2
- storei Int32Tag, TagOffset[cfr, t1, 8]
- storei t2, PayloadOffset[cfr, t1, 8]
- dispatch(4)
-
-.opGetArgumentsLengthSlow:
- callSlowPath(_llint_slow_path_get_arguments_length)
- dispatch(4)
-
-
-_llint_op_put_by_id:
- traceExecution()
- loadi 4[PC], t3
- loadi 16[PC], t1
- loadConstantOrVariablePayload(t3, CellTag, t0, .opPutByIdSlow)
- loadi 12[PC], t2
- loadp JSObject::m_propertyStorage[t0], t3
- bpneq JSCell::m_structure[t0], t1, .opPutByIdSlow
- loadi 20[PC], t1
- loadConstantOrVariable2Reg(t2, t0, t2)
- writeBarrier(t0, t2)
- storei t0, TagOffset[t3, t1]
- storei t2, PayloadOffset[t3, t1]
- dispatch(9)
-
-.opPutByIdSlow:
- callSlowPath(_llint_slow_path_put_by_id)
- dispatch(9)
-
-
-macro putByIdTransition(additionalChecks)
- traceExecution()
- loadi 4[PC], t3
- loadi 16[PC], t1
- loadConstantOrVariablePayload(t3, CellTag, t0, .opPutByIdSlow)
- loadi 12[PC], t2
- bpneq JSCell::m_structure[t0], t1, .opPutByIdSlow
- additionalChecks(t1, t3, .opPutByIdSlow)
- loadi 20[PC], t1
- loadp JSObject::m_propertyStorage[t0], t3
- addp t1, t3
- loadConstantOrVariable2Reg(t2, t1, t2)
- writeBarrier(t1, t2)
- storei t1, TagOffset[t3]
- loadi 24[PC], t1
- storei t2, PayloadOffset[t3]
- storep t1, JSCell::m_structure[t0]
- dispatch(9)
-end
-
-_llint_op_put_by_id_transition_direct:
- putByIdTransition(macro (oldStructure, scratch, slow) end)
-
-
-_llint_op_put_by_id_transition_normal:
- putByIdTransition(
- macro (oldStructure, scratch, slow)
- const protoCell = oldStructure # Reusing the oldStructure register for the proto
-
- loadp 28[PC], scratch
- assert(macro (ok) btpnz scratch, ok end)
- loadp StructureChain::m_vector[scratch], scratch
- assert(macro (ok) btpnz scratch, ok end)
- bieq Structure::m_prototype + TagOffset[oldStructure], NullTag, .done
- .loop:
- loadi Structure::m_prototype + PayloadOffset[oldStructure], protoCell
- loadp JSCell::m_structure[protoCell], oldStructure
- bpneq oldStructure, [scratch], slow
- addp 4, scratch
- bineq Structure::m_prototype + TagOffset[oldStructure], NullTag, .loop
- .done:
- end)
-
-
_llint_op_del_by_id:
traceExecution()
callSlowPath(_llint_slow_path_del_by_id)
dispatch(4)
-_llint_op_get_by_val:
- traceExecution()
- loadp CodeBlock[cfr], t1
- loadi 8[PC], t2
- loadi 12[PC], t3
- loadp CodeBlock::m_globalData[t1], t1
- loadConstantOrVariablePayload(t2, CellTag, t0, .opGetByValSlow)
- loadp JSGlobalData::jsArrayClassInfo[t1], t2
- loadConstantOrVariablePayload(t3, Int32Tag, t1, .opGetByValSlow)
- bpneq [t0], t2, .opGetByValSlow
- loadp JSArray::m_storage[t0], t3
- biaeq t1, JSArray::m_vectorLength[t0], .opGetByValSlow
- loadi 4[PC], t0
- loadi ArrayStorage::m_vector + TagOffset[t3, t1, 8], t2
- loadi ArrayStorage::m_vector + PayloadOffset[t3, t1, 8], t1
- bieq t2, EmptyValueTag, .opGetByValSlow
- storei t2, TagOffset[cfr, t0, 8]
- storei t1, PayloadOffset[cfr, t0, 8]
- loadi 16[PC], t0
- valueProfile(t2, t1, t0)
- dispatch(5)
-
-.opGetByValSlow:
- callSlowPath(_llint_slow_path_get_by_val)
- dispatch(5)
-
-
-_llint_op_get_argument_by_val:
- traceExecution()
- loadi 8[PC], t0
- loadi 12[PC], t1
- bineq TagOffset[cfr, t0, 8], EmptyValueTag, .opGetArgumentByValSlow
- loadConstantOrVariablePayload(t1, Int32Tag, t2, .opGetArgumentByValSlow)
- addi 1, t2
- loadi ArgumentCount + PayloadOffset[cfr], t1
- biaeq t2, t1, .opGetArgumentByValSlow
- negi t2
- loadi 4[PC], t3
- loadi ThisArgumentOffset + TagOffset[cfr, t2, 8], t0
- loadi ThisArgumentOffset + PayloadOffset[cfr, t2, 8], t1
- storei t0, TagOffset[cfr, t3, 8]
- storei t1, PayloadOffset[cfr, t3, 8]
- dispatch(5)
-
-.opGetArgumentByValSlow:
- callSlowPath(_llint_slow_path_get_argument_by_val)
- dispatch(5)
-
-
-_llint_op_get_by_pname:
- traceExecution()
- loadi 12[PC], t0
- loadConstantOrVariablePayload(t0, CellTag, t1, .opGetByPnameSlow)
- loadi 16[PC], t0
- bpneq t1, PayloadOffset[cfr, t0, 8], .opGetByPnameSlow
- loadi 8[PC], t0
- loadConstantOrVariablePayload(t0, CellTag, t2, .opGetByPnameSlow)
- loadi 20[PC], t0
- loadi PayloadOffset[cfr, t0, 8], t3
- loadp JSCell::m_structure[t2], t0
- bpneq t0, JSPropertyNameIterator::m_cachedStructure[t3], .opGetByPnameSlow
- loadi 24[PC], t0
- loadi [cfr, t0, 8], t0
- subi 1, t0
- biaeq t0, JSPropertyNameIterator::m_numCacheableSlots[t3], .opGetByPnameSlow
- loadp JSObject::m_propertyStorage[t2], t2
- loadi TagOffset[t2, t0, 8], t1
- loadi PayloadOffset[t2, t0, 8], t3
- loadi 4[PC], t0
- storei t1, TagOffset[cfr, t0, 8]
- storei t3, PayloadOffset[cfr, t0, 8]
- dispatch(7)
-
-.opGetByPnameSlow:
- callSlowPath(_llint_slow_path_get_by_pname)
- dispatch(7)
-
-
-_llint_op_put_by_val:
- traceExecution()
- loadi 4[PC], t0
- loadConstantOrVariablePayload(t0, CellTag, t1, .opPutByValSlow)
- loadi 8[PC], t0
- loadConstantOrVariablePayload(t0, Int32Tag, t2, .opPutByValSlow)
- loadp CodeBlock[cfr], t0
- loadp CodeBlock::m_globalData[t0], t0
- loadp JSGlobalData::jsArrayClassInfo[t0], t0
- bpneq [t1], t0, .opPutByValSlow
- biaeq t2, JSArray::m_vectorLength[t1], .opPutByValSlow
- loadp JSArray::m_storage[t1], t0
- bieq ArrayStorage::m_vector + TagOffset[t0, t2, 8], EmptyValueTag, .opPutByValEmpty
-.opPutByValStoreResult:
- loadi 12[PC], t3
- loadConstantOrVariable2Reg(t3, t1, t3)
- writeBarrier(t1, t3)
- storei t1, ArrayStorage::m_vector + TagOffset[t0, t2, 8]
- storei t3, ArrayStorage::m_vector + PayloadOffset[t0, t2, 8]
- dispatch(4)
-
-.opPutByValEmpty:
- addi 1, ArrayStorage::m_numValuesInVector[t0]
- bib t2, ArrayStorage::m_length[t0], .opPutByValStoreResult
- addi 1, t2, t1
- storei t1, ArrayStorage::m_length[t0]
- jmp .opPutByValStoreResult
-
-.opPutByValSlow:
- callSlowPath(_llint_slow_path_put_by_val)
- dispatch(4)
-
-
_llint_op_del_by_val:
traceExecution()
callSlowPath(_llint_slow_path_del_by_val)
@@ -1692,33 +525,12 @@ _llint_op_put_getter_setter:
dispatch(5)
-_llint_op_loop:
- nop
-_llint_op_jmp:
- traceExecution()
- dispatchBranch(4[PC])
-
-
_llint_op_jmp_scopes:
traceExecution()
callSlowPath(_llint_slow_path_jmp_scopes)
dispatch(0)
-macro jumpTrueOrFalse(conditionOp, slow)
- loadi 4[PC], t1
- loadConstantOrVariablePayload(t1, BooleanTag, t0, .slow)
- conditionOp(t0, .target)
- dispatch(3)
-
-.target:
- dispatchBranch(8[PC])
-
-.slow:
- callSlowPath(slow)
- dispatch(0)
-end
-
_llint_op_loop_if_true:
nop
_llint_op_jtrue:
@@ -1737,88 +549,6 @@ _llint_op_jfalse:
_llint_slow_path_jfalse)
-macro equalNull(cellHandler, immediateHandler)
- loadi 4[PC], t0
- loadi TagOffset[cfr, t0, 8], t1
- loadi PayloadOffset[cfr, t0, 8], t0
- bineq t1, CellTag, .immediate
- loadp JSCell::m_structure[t0], t2
- cellHandler(Structure::m_typeInfo + TypeInfo::m_flags[t2], .target)
- dispatch(3)
-
-.target:
- dispatchBranch(8[PC])
-
-.immediate:
- ori 1, t1
- immediateHandler(t1, .target)
- dispatch(3)
-end
-
-_llint_op_jeq_null:
- traceExecution()
- equalNull(
- macro (value, target) btbnz value, MasqueradesAsUndefined, target end,
- macro (value, target) bieq value, NullTag, target end)
-
-
-_llint_op_jneq_null:
- traceExecution()
- equalNull(
- macro (value, target) btbz value, MasqueradesAsUndefined, target end,
- macro (value, target) bineq value, NullTag, target end)
-
-
-_llint_op_jneq_ptr:
- traceExecution()
- loadi 4[PC], t0
- loadi 8[PC], t1
- bineq TagOffset[cfr, t0, 8], CellTag, .opJneqPtrBranch
- bpeq PayloadOffset[cfr, t0, 8], t1, .opJneqPtrFallThrough
-.opJneqPtrBranch:
- dispatchBranch(12[PC])
-.opJneqPtrFallThrough:
- dispatch(4)
-
-
-macro compare(integerCompare, doubleCompare, slow_path)
- loadi 4[PC], t2
- loadi 8[PC], t3
- loadConstantOrVariable(t2, t0, t1)
- loadConstantOrVariable2Reg(t3, t2, t3)
- bineq t0, Int32Tag, .op1NotInt
- bineq t2, Int32Tag, .op2NotInt
- integerCompare(t1, t3, .jumpTarget)
- dispatch(4)
-
-.op1NotInt:
- bia t0, LowestTag, .slow
- bib t2, LowestTag, .op1NotIntOp2Double
- bineq t2, Int32Tag, .slow
- ci2d t3, ft1
- jmp .op1NotIntReady
-.op1NotIntOp2Double:
- fii2d t3, t2, ft1
-.op1NotIntReady:
- fii2d t1, t0, ft0
- doubleCompare(ft0, ft1, .jumpTarget)
- dispatch(4)
-
-.op2NotInt:
- ci2d t1, ft0
- bia t2, LowestTag, .slow
- fii2d t3, t2, ft1
- doubleCompare(ft0, ft1, .jumpTarget)
- dispatch(4)
-
-.jumpTarget:
- dispatchBranch(12[PC])
-
-.slow:
- callSlowPath(slow_path)
- dispatch(0)
-end
-
_llint_op_loop_if_less:
nop
_llint_op_jless:
@@ -1897,117 +627,18 @@ _llint_op_loop_hint:
dispatch(1)
-_llint_op_switch_imm:
- traceExecution()
- loadi 12[PC], t2
- loadi 4[PC], t3
- loadConstantOrVariable(t2, t1, t0)
- loadp CodeBlock[cfr], t2
- loadp CodeBlock::m_rareData[t2], t2
- muli sizeof SimpleJumpTable, t3 # FIXME: would be nice to peephole this!
- loadp CodeBlock::RareData::m_immediateSwitchJumpTables + VectorBufferOffset[t2], t2
- addp t3, t2
- bineq t1, Int32Tag, .opSwitchImmNotInt
- subi SimpleJumpTable::min[t2], t0
- biaeq t0, SimpleJumpTable::branchOffsets + VectorSizeOffset[t2], .opSwitchImmFallThrough
- loadp SimpleJumpTable::branchOffsets + VectorBufferOffset[t2], t3
- loadi [t3, t0, 4], t1
- btiz t1, .opSwitchImmFallThrough
- dispatchBranchWithOffset(t1)
-
-.opSwitchImmNotInt:
- bib t1, LowestTag, .opSwitchImmSlow # Go to slow path if it's a double.
-.opSwitchImmFallThrough:
- dispatchBranch(8[PC])
-
-.opSwitchImmSlow:
- callSlowPath(_llint_slow_path_switch_imm)
- dispatch(0)
-
-
-_llint_op_switch_char:
- traceExecution()
- loadi 12[PC], t2
- loadi 4[PC], t3
- loadConstantOrVariable(t2, t1, t0)
- loadp CodeBlock[cfr], t2
- loadp CodeBlock::m_rareData[t2], t2
- muli sizeof SimpleJumpTable, t3
- loadp CodeBlock::RareData::m_characterSwitchJumpTables + VectorBufferOffset[t2], t2
- addp t3, t2
- bineq t1, CellTag, .opSwitchCharFallThrough
- loadp JSCell::m_structure[t0], t1
- bbneq Structure::m_typeInfo + TypeInfo::m_type[t1], StringType, .opSwitchCharFallThrough
- loadp JSString::m_value[t0], t0
- bineq StringImpl::m_length[t0], 1, .opSwitchCharFallThrough
- loadp StringImpl::m_data8[t0], t1
- btinz StringImpl::m_hashAndFlags[t0], HashFlags8BitBuffer, .opSwitchChar8Bit
- loadh [t1], t0
- jmp .opSwitchCharReady
-.opSwitchChar8Bit:
- loadb [t1], t0
-.opSwitchCharReady:
- subi SimpleJumpTable::min[t2], t0
- biaeq t0, SimpleJumpTable::branchOffsets + VectorSizeOffset[t2], .opSwitchCharFallThrough
- loadp SimpleJumpTable::branchOffsets + VectorBufferOffset[t2], t2
- loadi [t2, t0, 4], t1
- btiz t1, .opSwitchImmFallThrough
- dispatchBranchWithOffset(t1)
-
-.opSwitchCharFallThrough:
- dispatchBranch(8[PC])
-
-
_llint_op_switch_string:
traceExecution()
callSlowPath(_llint_slow_path_switch_string)
dispatch(0)
-_llint_op_new_func:
- traceExecution()
- btiz 12[PC], .opNewFuncUnchecked
- loadi 4[PC], t1
- bineq TagOffset[cfr, t1, 8], EmptyValueTag, .opNewFuncDone
-.opNewFuncUnchecked:
- callSlowPath(_llint_slow_path_new_func)
-.opNewFuncDone:
- dispatch(4)
-
-
_llint_op_new_func_exp:
traceExecution()
callSlowPath(_llint_slow_path_new_func_exp)
dispatch(3)
-macro doCall(slow_path)
- loadi 4[PC], t0
- loadi 16[PC], t1
- loadp LLIntCallLinkInfo::callee[t1], t2
- loadConstantOrVariablePayload(t0, CellTag, t3, .opCallSlow)
- bineq t3, t2, .opCallSlow
- loadi 12[PC], t3
- addp 24, PC
- lshifti 3, t3
- addp cfr, t3 # t3 contains the new value of cfr
- loadp JSFunction::m_scopeChain[t2], t0
- storei t2, Callee + PayloadOffset[t3]
- storei t0, ScopeChain + PayloadOffset[t3]
- loadi 8 - 24[PC], t2
- storei PC, ArgumentCount + TagOffset[cfr]
- storep cfr, CallerFrame[t3]
- storei t2, ArgumentCount + PayloadOffset[t3]
- storei CellTag, Callee + TagOffset[t3]
- storei CellTag, ScopeChain + TagOffset[t3]
- move t3, cfr
- call LLIntCallLinkInfo::machineCodeTarget[t1]
- dispatchAfterCall()
-
-.opCallSlow:
- slowPathForCall(6, slow_path)
-end
-
_llint_op_call:
traceExecution()
doCall(_llint_slow_path_call)
@@ -2066,97 +697,16 @@ _llint_generic_return_point:
dispatchAfterCall()
-_llint_op_tear_off_activation:
- traceExecution()
- loadi 4[PC], t0
- loadi 8[PC], t1
- bineq TagOffset[cfr, t0, 8], EmptyValueTag, .opTearOffActivationCreated
- bieq TagOffset[cfr, t1, 8], EmptyValueTag, .opTearOffActivationNotCreated
-.opTearOffActivationCreated:
- callSlowPath(_llint_slow_path_tear_off_activation)
-.opTearOffActivationNotCreated:
- dispatch(3)
-
-
-_llint_op_tear_off_arguments:
- traceExecution()
- loadi 4[PC], t0
- subi 1, t0 # Get the unmodifiedArgumentsRegister
- bieq TagOffset[cfr, t0, 8], EmptyValueTag, .opTearOffArgumentsNotCreated
- callSlowPath(_llint_slow_path_tear_off_arguments)
-.opTearOffArgumentsNotCreated:
- dispatch(2)
-
-
-macro doReturn()
- loadp ReturnPC[cfr], t2
- loadp CallerFrame[cfr], cfr
- restoreReturnAddressBeforeReturn(t2)
- ret
-end
-
-_llint_op_ret:
- traceExecution()
- checkSwitchToJITForEpilogue()
- loadi 4[PC], t2
- loadConstantOrVariable(t2, t1, t0)
- doReturn()
-
-
-_llint_op_call_put_result:
- loadi 4[PC], t2
- loadi 8[PC], t3
- storei t1, TagOffset[cfr, t2, 8]
- storei t0, PayloadOffset[cfr, t2, 8]
- valueProfile(t1, t0, t3)
- traceExecution() # Needs to be here because it would clobber t1, t0
- dispatch(3)
-
-
-_llint_op_ret_object_or_this:
- traceExecution()
- checkSwitchToJITForEpilogue()
- loadi 4[PC], t2
- loadConstantOrVariable(t2, t1, t0)
- bineq t1, CellTag, .opRetObjectOrThisNotObject
- loadp JSCell::m_structure[t0], t2
- bbb Structure::m_typeInfo + TypeInfo::m_type[t2], ObjectType, .opRetObjectOrThisNotObject
- doReturn()
-
-.opRetObjectOrThisNotObject:
- loadi 8[PC], t2
- loadConstantOrVariable(t2, t1, t0)
- doReturn()
-
-
-_llint_op_method_check:
- traceExecution()
- # We ignore method checks and use normal get_by_id optimizations.
- dispatch(1)
-
-
_llint_op_strcat:
traceExecution()
callSlowPath(_llint_slow_path_strcat)
dispatch(4)
-_llint_op_to_primitive:
+_llint_op_method_check:
traceExecution()
- loadi 8[PC], t2
- loadi 4[PC], t3
- loadConstantOrVariable(t2, t1, t0)
- bineq t1, CellTag, .opToPrimitiveIsImm
- loadp JSCell::m_structure[t0], t2
- bbneq Structure::m_typeInfo + TypeInfo::m_type[t2], StringType, .opToPrimitiveSlowCase
-.opToPrimitiveIsImm:
- storei t1, TagOffset[cfr, t3, 8]
- storei t0, PayloadOffset[cfr, t3, 8]
- dispatch(3)
-
-.opToPrimitiveSlowCase:
- callSlowPath(_llint_slow_path_to_primitive)
- dispatch(3)
+ # We ignore method checks and use normal get_by_id optimizations.
+ dispatch(1)
_llint_op_get_pnames:
@@ -2165,46 +715,6 @@ _llint_op_get_pnames:
dispatch(0) # The slow_path either advances the PC or jumps us to somewhere else.
-_llint_op_next_pname:
- traceExecution()
- loadi 12[PC], t1
- loadi 16[PC], t2
- loadi PayloadOffset[cfr, t1, 8], t0
- bieq t0, PayloadOffset[cfr, t2, 8], .opNextPnameEnd
- loadi 20[PC], t2
- loadi PayloadOffset[cfr, t2, 8], t2
- loadp JSPropertyNameIterator::m_jsStrings[t2], t3
- loadi [t3, t0, 8], t3
- addi 1, t0
- storei t0, PayloadOffset[cfr, t1, 8]
- loadi 4[PC], t1
- storei CellTag, TagOffset[cfr, t1, 8]
- storei t3, PayloadOffset[cfr, t1, 8]
- loadi 8[PC], t3
- loadi PayloadOffset[cfr, t3, 8], t3
- loadp JSCell::m_structure[t3], t1
- bpneq t1, JSPropertyNameIterator::m_cachedStructure[t2], .opNextPnameSlow
- loadp JSPropertyNameIterator::m_cachedPrototypeChain[t2], t0
- loadp StructureChain::m_vector[t0], t0
- btpz [t0], .opNextPnameTarget
-.opNextPnameCheckPrototypeLoop:
- bieq Structure::m_prototype + TagOffset[t1], NullTag, .opNextPnameSlow
- loadp Structure::m_prototype + PayloadOffset[t1], t2
- loadp JSCell::m_structure[t2], t1
- bpneq t1, [t0], .opNextPnameSlow
- addp 4, t0
- btpnz [t0], .opNextPnameCheckPrototypeLoop
-.opNextPnameTarget:
- dispatchBranch(24[PC])
-
-.opNextPnameEnd:
- dispatch(7)
-
-.opNextPnameSlow:
- callSlowPath(_llint_slow_path_next_pname) # This either keeps the PC where it was (causing us to loop) or sets it to target.
- dispatch(0)
-
-
_llint_op_push_scope:
traceExecution()
callSlowPath(_llint_slow_path_push_scope)
@@ -2223,27 +733,6 @@ _llint_op_push_new_scope:
dispatch(4)
-_llint_op_catch:
- # This is where we end up from the JIT's throw trampoline (because the
- # machine code return address will be set to _llint_op_catch), and from
- # the interpreter's throw trampoline (see _llint_throw_trampoline).
- # The JIT throwing protocol calls for the cfr to be in t0. The throwing
- # code must have known that we were throwing to the interpreter, and have
- # set JSGlobalData::targetInterpreterPCForThrow.
- move t0, cfr
- loadp JITStackFrame::globalData[sp], t3
- loadi JSGlobalData::targetInterpreterPCForThrow[t3], PC
- loadi JSGlobalData::exception + PayloadOffset[t3], t0
- loadi JSGlobalData::exception + TagOffset[t3], t1
- storei 0, JSGlobalData::exception + PayloadOffset[t3]
- storei EmptyValueTag, JSGlobalData::exception + TagOffset[t3]
- loadi 4[PC], t2
- storei t0, PayloadOffset[cfr, t2, 8]
- storei t1, TagOffset[cfr, t2, 8]
- traceExecution() # This needs to be here because we don't want to clobber t0, t1, t2, t3 above.
- dispatch(2)
-
-
_llint_op_throw:
traceExecution()
callSlowPath(_llint_slow_path_throw)
@@ -2256,27 +745,6 @@ _llint_op_throw_reference_error:
dispatch(2)
-_llint_op_jsr:
- traceExecution()
- loadi 4[PC], t0
- addi 3 * 4, PC, t1
- storei t1, [cfr, t0, 8]
- dispatchBranch(8[PC])
-
-
-_llint_op_sret:
- traceExecution()
- loadi 4[PC], t0
- loadp [cfr, t0, 8], PC
- dispatch(0)
-
-
-_llint_op_debug:
- traceExecution()
- callSlowPath(_llint_slow_path_debug)
- dispatch(4)
-
-
_llint_op_profile_will_call:
traceExecution()
loadp JITStackFrame::enabledProfilerReference[sp], t0
@@ -2295,29 +763,18 @@ _llint_op_profile_did_call:
dispatch(2)
-_llint_op_end:
+_llint_op_debug:
traceExecution()
- checkSwitchToJITForEpilogue()
- loadi 4[PC], t0
- loadi TagOffset[cfr, t0, 8], t1
- loadi PayloadOffset[cfr, t0, 8], t0
- doReturn()
+ callSlowPath(_llint_slow_path_debug)
+ dispatch(4)
-_llint_throw_from_slow_path_trampoline:
- # When throwing from the interpreter (i.e. throwing from LLIntSlowPaths), so
- # the throw target is not necessarily interpreted code, we come to here.
- # This essentially emulates the JIT's throwing protocol.
- loadp JITStackFrame::globalData[sp], t1
- loadp JSGlobalData::callFrameForThrow[t1], t0
- jmp JSGlobalData::targetMachinePCForThrow[t1]
+_llint_native_call_trampoline:
+ nativeCallTrampoline(NativeExecutable::m_function)
-_llint_throw_during_call_trampoline:
- preserveReturnAddressAfterCall(t2)
- loadp JITStackFrame::globalData[sp], t1
- loadp JSGlobalData::callFrameForThrow[t1], t0
- jmp JSGlobalData::targetMachinePCForThrow[t1]
+_llint_native_construct_trampoline:
+ nativeCallTrampoline(NativeExecutable::m_constructor)
# Lastly, make sure that we can link even though we don't support all opcodes.
diff --git a/Source/JavaScriptCore/llint/LowLevelInterpreter.h b/Source/JavaScriptCore/llint/LowLevelInterpreter.h
index e5a54a45d..6383757cf 100644
--- a/Source/JavaScriptCore/llint/LowLevelInterpreter.h
+++ b/Source/JavaScriptCore/llint/LowLevelInterpreter.h
@@ -48,6 +48,10 @@ extern "C" void llint_generic_return_point();
extern "C" void llint_throw_from_slow_path_trampoline();
extern "C" void llint_throw_during_call_trampoline();
+// Native call trampolines
+extern "C" void llint_native_call_trampoline();
+extern "C" void llint_native_construct_trampoline();
+
#endif // ENABLE(LLINT)
#endif // LowLevelInterpreter_h
diff --git a/Source/JavaScriptCore/llint/LowLevelInterpreter32_64.asm b/Source/JavaScriptCore/llint/LowLevelInterpreter32_64.asm
new file mode 100644
index 000000000..46c6226e5
--- /dev/null
+++ b/Source/JavaScriptCore/llint/LowLevelInterpreter32_64.asm
@@ -0,0 +1,1653 @@
+# Copyright (C) 2011, 2012 Apple Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+# THE POSSIBILITY OF SUCH DAMAGE.
+
+
+# Crash course on the language that this is written in (which I just call
+# "assembly" even though it's more than that):
+#
+# - Mostly gas-style operand ordering. The last operand tends to be the
+# destination. So "a := b" is written as "mov b, a". But unlike gas,
+# comparisons are in-order, so "if (a < b)" is written as
+# "bilt a, b, ...".
+#
+# - "b" = byte, "h" = 16-bit word, "i" = 32-bit word, "p" = pointer.
+# Currently this is just 32-bit so "i" and "p" are interchangeable
+# except when an op supports one but not the other.
+#
+# - In general, valid operands for macro invocations and instructions are
+# registers (eg "t0"), addresses (eg "4[t0]"), base-index addresses
+# (eg "7[t0, t1, 2]"), absolute addresses (eg "0xa0000000[]"), or labels
+# (eg "_foo" or ".foo"). Macro invocations can also take anonymous
+# macros as operands. Instructions cannot take anonymous macros.
+#
+# - Labels must have names that begin with either "_" or ".". A "." label
+# is local and gets renamed before code gen to minimize namespace
+# pollution. A "_" label is an extern symbol (i.e. ".globl"). The "_"
+# may or may not be removed during code gen depending on whether the asm
+# conventions for C name mangling on the target platform mandate a "_"
+# prefix.
+#
+# - A "macro" is a lambda expression, which may be either anonymous or
+# named. But this has caveats. "macro" can take zero or more arguments,
+# which may be macros or any valid operands, but it can only return
+# code. But you can do Turing-complete things via continuation passing
+# style: "macro foo (a, b) b(a) end foo(foo, foo)". Actually, don't do
+# that, since you'll just crash the assembler.
+#
+# - An "if" is a conditional on settings. Any identifier supplied in the
+# predicate of an "if" is assumed to be a #define that is available
+# during code gen. So you can't use "if" for computation in a macro, but
+# you can use it to select different pieces of code for different
+# platforms.
+#
+# - Arguments to macros follow lexical scoping rather than dynamic scoping.
+# Const's also follow lexical scoping and may override (hide) arguments
+# or other consts. All variables (arguments and constants) can be bound
+# to operands. Additionally, arguments (but not constants) can be bound
+# to macros.
+
+
+# Below we have a bunch of constant declarations. Each constant must have
+# a corresponding ASSERT() in LLIntData.cpp.
+
+
+# Value representation constants.
+const Int32Tag = -1
+const BooleanTag = -2
+const NullTag = -3
+const UndefinedTag = -4
+const CellTag = -5
+const EmptyValueTag = -6
+const DeletedValueTag = -7
+const LowestTag = DeletedValueTag
+
+
+# Utilities
+macro dispatch(advance)
+ addp advance * 4, PC
+ jmp [PC]
+end
+
+macro dispatchBranchWithOffset(pcOffset)
+ lshifti 2, pcOffset
+ addp pcOffset, PC
+ jmp [PC]
+end
+
+macro dispatchBranch(pcOffset)
+ loadi pcOffset, t0
+ dispatchBranchWithOffset(t0)
+end
+
+macro dispatchAfterCall()
+ loadi ArgumentCount + TagOffset[cfr], PC
+ jmp [PC]
+end
+
+macro cCall2(function, arg1, arg2)
+ if ARMv7
+ move arg1, t0
+ move arg2, t1
+ elsif X86
+ poke arg1, 0
+ poke arg2, 1
+ else
+ error
+ end
+ call function
+end
+
+# This barely works. arg3 and arg4 should probably be immediates.
+macro cCall4(function, arg1, arg2, arg3, arg4)
+ if ARMv7
+ move arg1, t0
+ move arg2, t1
+ move arg3, t2
+ move arg4, t3
+ elsif X86
+ poke arg1, 0
+ poke arg2, 1
+ poke arg3, 2
+ poke arg4, 3
+ else
+ error
+ end
+ call function
+end
+
+macro callSlowPath(slowPath)
+ cCall2(slowPath, cfr, PC)
+ move t0, PC
+ move t1, cfr
+end
+
+# Debugging operation if you'd like to print an operand in the instruction stream. fromWhere
+# should be an immediate integer - any integer you like; use it to identify the place you're
+# debugging from. operand should likewise be an immediate, and should identify the operand
+# in the instruction stream you'd like to print out.
+macro traceOperand(fromWhere, operand)
+ cCall4(_llint_trace_operand, cfr, PC, fromWhere, operand)
+ move t0, PC
+ move t1, cfr
+end
+
+# Debugging operation if you'd like to print the value of an operand in the instruction
+# stream. Same as traceOperand(), but assumes that the operand is a register, and prints its
+# value.
+macro traceValue(fromWhere, operand)
+ cCall4(_llint_trace_value, cfr, PC, fromWhere, operand)
+ move t0, PC
+ move t1, cfr
+end
+
+# Call a slowPath for call opcodes.
+macro callCallSlowPath(advance, slowPath, action)
+ addp advance * 4, PC, t0
+ storep t0, ArgumentCount + TagOffset[cfr]
+ cCall2(slowPath, cfr, PC)
+ move t1, cfr
+ action(t0)
+end
+
+macro checkSwitchToJITForLoop()
+ checkSwitchToJIT(
+ 1,
+ macro ()
+ storei PC, ArgumentCount + TagOffset[cfr]
+ cCall2(_llint_loop_osr, cfr, PC)
+ move t1, cfr
+ btpz t0, .recover
+ jmp t0
+ .recover:
+ loadi ArgumentCount + TagOffset[cfr], PC
+ end)
+end
+
+# Index, tag, and payload must be different registers. Index is not
+# changed.
+macro loadConstantOrVariable(index, tag, payload)
+ bigteq index, FirstConstantRegisterIndex, .constant
+ loadi TagOffset[cfr, index, 8], tag
+ loadi PayloadOffset[cfr, index, 8], payload
+ jmp .done
+.constant:
+ loadp CodeBlock[cfr], payload
+ loadp CodeBlock::m_constantRegisters + VectorBufferOffset[payload], payload
+ # There is a bit of evil here: if the index contains a value >= FirstConstantRegisterIndex,
+ # then value << 3 will be equal to (value - FirstConstantRegisterIndex) << 3.
+ loadp TagOffset[payload, index, 8], tag
+ loadp PayloadOffset[payload, index, 8], payload
+.done:
+end
+
+# Index and payload may be the same register. Index may be clobbered.
+macro loadConstantOrVariable2Reg(index, tag, payload)
+ bigteq index, FirstConstantRegisterIndex, .constant
+ loadi TagOffset[cfr, index, 8], tag
+ loadi PayloadOffset[cfr, index, 8], payload
+ jmp .done
+.constant:
+ loadp CodeBlock[cfr], tag
+ loadp CodeBlock::m_constantRegisters + VectorBufferOffset[tag], tag
+ # There is a bit of evil here: if the index contains a value >= FirstConstantRegisterIndex,
+ # then value << 3 will be equal to (value - FirstConstantRegisterIndex) << 3.
+ lshifti 3, index
+ addp index, tag
+ loadp PayloadOffset[tag], payload
+ loadp TagOffset[tag], tag
+.done:
+end
+
+macro loadConstantOrVariablePayloadTagCustom(index, tagCheck, payload)
+ bigteq index, FirstConstantRegisterIndex, .constant
+ tagCheck(TagOffset[cfr, index, 8])
+ loadi PayloadOffset[cfr, index, 8], payload
+ jmp .done
+.constant:
+ loadp CodeBlock[cfr], payload
+ loadp CodeBlock::m_constantRegisters + VectorBufferOffset[payload], payload
+ # There is a bit of evil here: if the index contains a value >= FirstConstantRegisterIndex,
+ # then value << 3 will be equal to (value - FirstConstantRegisterIndex) << 3.
+ tagCheck(TagOffset[payload, index, 8])
+ loadp PayloadOffset[payload, index, 8], payload
+.done:
+end
+
+# Index and payload must be different registers. Index is not mutated. Use
+# this if you know what the tag of the variable should be. Doing the tag
+# test as part of loading the variable reduces register use, but may not
+# be faster than doing loadConstantOrVariable followed by a branch on the
+# tag.
+macro loadConstantOrVariablePayload(index, expectedTag, payload, slow)
+ loadConstantOrVariablePayloadTagCustom(
+ index,
+ macro (actualTag) bineq actualTag, expectedTag, slow end,
+ payload)
+end
+
+macro loadConstantOrVariablePayloadUnchecked(index, payload)
+ loadConstantOrVariablePayloadTagCustom(
+ index,
+ macro (actualTag) end,
+ payload)
+end
+
+macro writeBarrier(tag, payload)
+ # Nothing to do, since we don't have a generational or incremental collector.
+end
+
+macro valueProfile(tag, payload, profile)
+ if VALUE_PROFILER
+ storei tag, ValueProfile::m_buckets + TagOffset[profile]
+ storei payload, ValueProfile::m_buckets + PayloadOffset[profile]
+ end
+end
+
+
+# Entrypoints into the interpreter
+
+# Expects that CodeBlock is in t1, which is what prologue() leaves behind.
+macro functionArityCheck(doneLabel, slow_path)
+ loadi PayloadOffset + ArgumentCount[cfr], t0
+ biaeq t0, CodeBlock::m_numParameters[t1], doneLabel
+ cCall2(slow_path, cfr, PC) # This slow_path has a simple protocol: t0 = 0 => no error, t0 != 0 => error
+ move t1, cfr
+ btiz t0, .continue
+ loadp JITStackFrame::globalData[sp], t1
+ loadp JSGlobalData::callFrameForThrow[t1], t0
+ jmp JSGlobalData::targetMachinePCForThrow[t1]
+.continue:
+ # Reload CodeBlock and PC, since the slow_path clobbered it.
+ loadp CodeBlock[cfr], t1
+ loadp CodeBlock::m_instructions[t1], PC
+ jmp doneLabel
+end
+
+
+# Instruction implementations
+
+_llint_op_enter:
+ traceExecution()
+ loadp CodeBlock[cfr], t2
+ loadi CodeBlock::m_numVars[t2], t2
+ btiz t2, .opEnterDone
+ move UndefinedTag, t0
+ move 0, t1
+.opEnterLoop:
+ subi 1, t2
+ storei t0, TagOffset[cfr, t2, 8]
+ storei t1, PayloadOffset[cfr, t2, 8]
+ btinz t2, .opEnterLoop
+.opEnterDone:
+ dispatch(1)
+
+
+_llint_op_create_activation:
+ traceExecution()
+ loadi 4[PC], t0
+ bineq TagOffset[cfr, t0, 8], EmptyValueTag, .opCreateActivationDone
+ callSlowPath(_llint_slow_path_create_activation)
+.opCreateActivationDone:
+ dispatch(2)
+
+
+_llint_op_init_lazy_reg:
+ traceExecution()
+ loadi 4[PC], t0
+ storei EmptyValueTag, TagOffset[cfr, t0, 8]
+ storei 0, PayloadOffset[cfr, t0, 8]
+ dispatch(2)
+
+
+_llint_op_create_arguments:
+ traceExecution()
+ loadi 4[PC], t0
+ bineq TagOffset[cfr, t0, 8], EmptyValueTag, .opCreateArgumentsDone
+ callSlowPath(_llint_slow_path_create_arguments)
+.opCreateArgumentsDone:
+ dispatch(2)
+
+
+_llint_op_create_this:
+ traceExecution()
+ loadi 8[PC], t0
+ assertNotConstant(t0)
+ bineq TagOffset[cfr, t0, 8], CellTag, .opCreateThisSlow
+ loadi PayloadOffset[cfr, t0, 8], t0
+ loadp JSCell::m_structure[t0], t1
+ bbb Structure::m_typeInfo + TypeInfo::m_type[t1], ObjectType, .opCreateThisSlow
+ loadp JSObject::m_inheritorID[t0], t2
+ btpz t2, .opCreateThisSlow
+ allocateBasicJSObject(JSFinalObjectSizeClassIndex, JSGlobalData::jsFinalObjectClassInfo, t2, t0, t1, t3, .opCreateThisSlow)
+ loadi 4[PC], t1
+ storei CellTag, TagOffset[cfr, t1, 8]
+ storei t0, PayloadOffset[cfr, t1, 8]
+ dispatch(3)
+
+.opCreateThisSlow:
+ callSlowPath(_llint_slow_path_create_this)
+ dispatch(3)
+
+
+_llint_op_get_callee:
+ traceExecution()
+ loadi 4[PC], t0
+ loadp PayloadOffset + Callee[cfr], t1
+ storei CellTag, TagOffset[cfr, t0, 8]
+ storei t1, PayloadOffset[cfr, t0, 8]
+ dispatch(2)
+
+
+_llint_op_convert_this:
+ traceExecution()
+ loadi 4[PC], t0
+ bineq TagOffset[cfr, t0, 8], CellTag, .opConvertThisSlow
+ loadi PayloadOffset[cfr, t0, 8], t0
+ loadp JSCell::m_structure[t0], t0
+ bbb Structure::m_typeInfo + TypeInfo::m_type[t0], ObjectType, .opConvertThisSlow
+ dispatch(2)
+
+.opConvertThisSlow:
+ callSlowPath(_llint_slow_path_convert_this)
+ dispatch(2)
+
+
+_llint_op_new_object:
+ traceExecution()
+ loadp CodeBlock[cfr], t0
+ loadp CodeBlock::m_globalObject[t0], t0
+ loadp JSGlobalObject::m_emptyObjectStructure[t0], t1
+ allocateBasicJSObject(JSFinalObjectSizeClassIndex, JSGlobalData::jsFinalObjectClassInfo, t1, t0, t2, t3, .opNewObjectSlow)
+ loadi 4[PC], t1
+ storei CellTag, TagOffset[cfr, t1, 8]
+ storei t0, PayloadOffset[cfr, t1, 8]
+ dispatch(2)
+
+.opNewObjectSlow:
+ callSlowPath(_llint_slow_path_new_object)
+ dispatch(2)
+
+
+_llint_op_mov:
+ traceExecution()
+ loadi 8[PC], t1
+ loadi 4[PC], t0
+ loadConstantOrVariable(t1, t2, t3)
+ storei t2, TagOffset[cfr, t0, 8]
+ storei t3, PayloadOffset[cfr, t0, 8]
+ dispatch(3)
+
+
+_llint_op_not:
+ traceExecution()
+ loadi 8[PC], t0
+ loadi 4[PC], t1
+ loadConstantOrVariable(t0, t2, t3)
+ bineq t2, BooleanTag, .opNotSlow
+ xori 1, t3
+ storei t2, TagOffset[cfr, t1, 8]
+ storei t3, PayloadOffset[cfr, t1, 8]
+ dispatch(3)
+
+.opNotSlow:
+ callSlowPath(_llint_slow_path_not)
+ dispatch(3)
+
+
+_llint_op_eq:
+ traceExecution()
+ loadi 12[PC], t2
+ loadi 8[PC], t0
+ loadConstantOrVariable(t2, t3, t1)
+ loadConstantOrVariable2Reg(t0, t2, t0)
+ bineq t2, t3, .opEqSlow
+ bieq t2, CellTag, .opEqSlow
+ bib t2, LowestTag, .opEqSlow
+ loadi 4[PC], t2
+ cieq t0, t1, t0
+ storei BooleanTag, TagOffset[cfr, t2, 8]
+ storei t0, PayloadOffset[cfr, t2, 8]
+ dispatch(4)
+
+.opEqSlow:
+ callSlowPath(_llint_slow_path_eq)
+ dispatch(4)
+
+
+_llint_op_eq_null:
+ traceExecution()
+ loadi 8[PC], t0
+ loadi 4[PC], t3
+ assertNotConstant(t0)
+ loadi TagOffset[cfr, t0, 8], t1
+ loadi PayloadOffset[cfr, t0, 8], t0
+ bineq t1, CellTag, .opEqNullImmediate
+ loadp JSCell::m_structure[t0], t1
+ tbnz Structure::m_typeInfo + TypeInfo::m_flags[t1], MasqueradesAsUndefined, t1
+ jmp .opEqNullNotImmediate
+.opEqNullImmediate:
+ cieq t1, NullTag, t2
+ cieq t1, UndefinedTag, t1
+ ori t2, t1
+.opEqNullNotImmediate:
+ storei BooleanTag, TagOffset[cfr, t3, 8]
+ storei t1, PayloadOffset[cfr, t3, 8]
+ dispatch(3)
+
+
+_llint_op_neq:
+ traceExecution()
+ loadi 12[PC], t2
+ loadi 8[PC], t0
+ loadConstantOrVariable(t2, t3, t1)
+ loadConstantOrVariable2Reg(t0, t2, t0)
+ bineq t2, t3, .opNeqSlow
+ bieq t2, CellTag, .opNeqSlow
+ bib t2, LowestTag, .opNeqSlow
+ loadi 4[PC], t2
+ cineq t0, t1, t0
+ storei BooleanTag, TagOffset[cfr, t2, 8]
+ storei t0, PayloadOffset[cfr, t2, 8]
+ dispatch(4)
+
+.opNeqSlow:
+ callSlowPath(_llint_slow_path_neq)
+ dispatch(4)
+
+
+_llint_op_neq_null:
+ traceExecution()
+ loadi 8[PC], t0
+ loadi 4[PC], t3
+ assertNotConstant(t0)
+ loadi TagOffset[cfr, t0, 8], t1
+ loadi PayloadOffset[cfr, t0, 8], t0
+ bineq t1, CellTag, .opNeqNullImmediate
+ loadp JSCell::m_structure[t0], t1
+ tbz Structure::m_typeInfo + TypeInfo::m_flags[t1], MasqueradesAsUndefined, t1
+ jmp .opNeqNullNotImmediate
+.opNeqNullImmediate:
+ cineq t1, NullTag, t2
+ cineq t1, UndefinedTag, t1
+ andi t2, t1
+.opNeqNullNotImmediate:
+ storei BooleanTag, TagOffset[cfr, t3, 8]
+ storei t1, PayloadOffset[cfr, t3, 8]
+ dispatch(3)
+
+
+macro strictEq(equalityOperation, slowPath)
+ loadi 12[PC], t2
+ loadi 8[PC], t0
+ loadConstantOrVariable(t2, t3, t1)
+ loadConstantOrVariable2Reg(t0, t2, t0)
+ bineq t2, t3, .slow
+ bib t2, LowestTag, .slow
+ bineq t2, CellTag, .notString
+ loadp JSCell::m_structure[t0], t2
+ loadp JSCell::m_structure[t1], t3
+ bbneq Structure::m_typeInfo + TypeInfo::m_type[t2], StringType, .notString
+ bbeq Structure::m_typeInfo + TypeInfo::m_type[t3], StringType, .slow
+.notString:
+ loadi 4[PC], t2
+ equalityOperation(t0, t1, t0)
+ storei BooleanTag, TagOffset[cfr, t2, 8]
+ storei t0, PayloadOffset[cfr, t2, 8]
+ dispatch(4)
+
+.slow:
+ callSlowPath(slowPath)
+ dispatch(4)
+end
+
+_llint_op_stricteq:
+ traceExecution()
+ strictEq(macro (left, right, result) cieq left, right, result end, _llint_slow_path_stricteq)
+
+
+_llint_op_nstricteq:
+ traceExecution()
+ strictEq(macro (left, right, result) cineq left, right, result end, _llint_slow_path_nstricteq)
+
+
+_llint_op_pre_inc:
+ traceExecution()
+ loadi 4[PC], t0
+ bineq TagOffset[cfr, t0, 8], Int32Tag, .opPreIncSlow
+ loadi PayloadOffset[cfr, t0, 8], t1
+ baddio 1, t1, .opPreIncSlow
+ storei t1, PayloadOffset[cfr, t0, 8]
+ dispatch(2)
+
+.opPreIncSlow:
+ callSlowPath(_llint_slow_path_pre_inc)
+ dispatch(2)
+
+
+_llint_op_pre_dec:
+ traceExecution()
+ loadi 4[PC], t0
+ bineq TagOffset[cfr, t0, 8], Int32Tag, .opPreDecSlow
+ loadi PayloadOffset[cfr, t0, 8], t1
+ bsubio 1, t1, .opPreDecSlow
+ storei t1, PayloadOffset[cfr, t0, 8]
+ dispatch(2)
+
+.opPreDecSlow:
+ callSlowPath(_llint_slow_path_pre_dec)
+ dispatch(2)
+
+
+_llint_op_post_inc:
+ traceExecution()
+ loadi 8[PC], t0
+ loadi 4[PC], t1
+ bineq TagOffset[cfr, t0, 8], Int32Tag, .opPostIncSlow
+ bieq t0, t1, .opPostIncDone
+ loadi PayloadOffset[cfr, t0, 8], t2
+ move t2, t3
+ baddio 1, t3, .opPostIncSlow
+ storei Int32Tag, TagOffset[cfr, t1, 8]
+ storei t2, PayloadOffset[cfr, t1, 8]
+ storei t3, PayloadOffset[cfr, t0, 8]
+.opPostIncDone:
+ dispatch(3)
+
+.opPostIncSlow:
+ callSlowPath(_llint_slow_path_post_inc)
+ dispatch(3)
+
+
+_llint_op_post_dec:
+ traceExecution()
+ loadi 8[PC], t0
+ loadi 4[PC], t1
+ bineq TagOffset[cfr, t0, 8], Int32Tag, .opPostDecSlow
+ bieq t0, t1, .opPostDecDone
+ loadi PayloadOffset[cfr, t0, 8], t2
+ move t2, t3
+ bsubio 1, t3, .opPostDecSlow
+ storei Int32Tag, TagOffset[cfr, t1, 8]
+ storei t2, PayloadOffset[cfr, t1, 8]
+ storei t3, PayloadOffset[cfr, t0, 8]
+.opPostDecDone:
+ dispatch(3)
+
+.opPostDecSlow:
+ callSlowPath(_llint_slow_path_post_dec)
+ dispatch(3)
+
+
+_llint_op_to_jsnumber:
+ traceExecution()
+ loadi 8[PC], t0
+ loadi 4[PC], t1
+ loadConstantOrVariable(t0, t2, t3)
+ bieq t2, Int32Tag, .opToJsnumberIsInt
+ biaeq t2, EmptyValueTag, .opToJsnumberSlow
+.opToJsnumberIsInt:
+ storei t2, TagOffset[cfr, t1, 8]
+ storei t3, PayloadOffset[cfr, t1, 8]
+ dispatch(3)
+
+.opToJsnumberSlow:
+ callSlowPath(_llint_slow_path_to_jsnumber)
+ dispatch(3)
+
+
+_llint_op_negate:
+ traceExecution()
+ loadi 8[PC], t0
+ loadi 4[PC], t3
+ loadConstantOrVariable(t0, t1, t2)
+ bineq t1, Int32Tag, .opNegateSrcNotInt
+ btiz t2, 0x7fffffff, .opNegateSlow
+ negi t2
+ storei Int32Tag, TagOffset[cfr, t3, 8]
+ storei t2, PayloadOffset[cfr, t3, 8]
+ dispatch(3)
+.opNegateSrcNotInt:
+ bia t1, LowestTag, .opNegateSlow
+ xori 0x80000000, t1
+ storei t1, TagOffset[cfr, t3, 8]
+ storei t2, PayloadOffset[cfr, t3, 8]
+ dispatch(3)
+
+.opNegateSlow:
+ callSlowPath(_llint_slow_path_negate)
+ dispatch(3)
+
+
+macro binaryOpCustomStore(integerOperationAndStore, doubleOperation, slowPath)
+ loadi 12[PC], t2
+ loadi 8[PC], t0
+ loadConstantOrVariable(t2, t3, t1)
+ loadConstantOrVariable2Reg(t0, t2, t0)
+ bineq t2, Int32Tag, .op1NotInt
+ bineq t3, Int32Tag, .op2NotInt
+ loadi 4[PC], t2
+ integerOperationAndStore(t3, t1, t0, .slow, t2)
+ dispatch(5)
+
+.op1NotInt:
+ # First operand is definitely not an int, the second operand could be anything.
+ bia t2, LowestTag, .slow
+ bib t3, LowestTag, .op1NotIntOp2Double
+ bineq t3, Int32Tag, .slow
+ ci2d t1, ft1
+ jmp .op1NotIntReady
+.op1NotIntOp2Double:
+ fii2d t1, t3, ft1
+.op1NotIntReady:
+ loadi 4[PC], t1
+ fii2d t0, t2, ft0
+ doubleOperation(ft1, ft0)
+ stored ft0, [cfr, t1, 8]
+ dispatch(5)
+
+.op2NotInt:
+ # First operand is definitely an int, the second operand is definitely not.
+ loadi 4[PC], t2
+ bia t3, LowestTag, .slow
+ ci2d t0, ft0
+ fii2d t1, t3, ft1
+ doubleOperation(ft1, ft0)
+ stored ft0, [cfr, t2, 8]
+ dispatch(5)
+
+.slow:
+ callSlowPath(slowPath)
+ dispatch(5)
+end
+
+macro binaryOp(integerOperation, doubleOperation, slowPath)
+ binaryOpCustomStore(
+ macro (int32Tag, left, right, slow, index)
+ integerOperation(left, right, slow)
+ storei int32Tag, TagOffset[cfr, index, 8]
+ storei right, PayloadOffset[cfr, index, 8]
+ end,
+ doubleOperation, slowPath)
+end
+
+_llint_op_add:
+ traceExecution()
+ binaryOp(
+ macro (left, right, slow) baddio left, right, slow end,
+ macro (left, right) addd left, right end,
+ _llint_slow_path_add)
+
+
+_llint_op_mul:
+ traceExecution()
+ binaryOpCustomStore(
+ macro (int32Tag, left, right, slow, index)
+ const scratch = int32Tag # We know that we can reuse the int32Tag register since it has a constant.
+ move right, scratch
+ bmulio left, scratch, slow
+ btinz scratch, .done
+ bilt left, 0, slow
+ bilt right, 0, slow
+ .done:
+ storei Int32Tag, TagOffset[cfr, index, 8]
+ storei scratch, PayloadOffset[cfr, index, 8]
+ end,
+ macro (left, right) muld left, right end,
+ _llint_slow_path_mul)
+
+
+_llint_op_sub:
+ traceExecution()
+ binaryOp(
+ macro (left, right, slow) bsubio left, right, slow end,
+ macro (left, right) subd left, right end,
+ _llint_slow_path_sub)
+
+
+_llint_op_div:
+ traceExecution()
+ binaryOpCustomStore(
+ macro (int32Tag, left, right, slow, index)
+ ci2d left, ft0
+ ci2d right, ft1
+ divd ft0, ft1
+ bcd2i ft1, right, .notInt
+ storei int32Tag, TagOffset[cfr, index, 8]
+ storei right, PayloadOffset[cfr, index, 8]
+ jmp .done
+ .notInt:
+ stored ft1, [cfr, index, 8]
+ .done:
+ end,
+ macro (left, right) divd left, right end,
+ _llint_slow_path_div)
+
+
+macro bitOp(operation, slowPath, advance)
+ loadi 12[PC], t2
+ loadi 8[PC], t0
+ loadConstantOrVariable(t2, t3, t1)
+ loadConstantOrVariable2Reg(t0, t2, t0)
+ bineq t3, Int32Tag, .slow
+ bineq t2, Int32Tag, .slow
+ loadi 4[PC], t2
+ operation(t1, t0, .slow)
+ storei t3, TagOffset[cfr, t2, 8]
+ storei t0, PayloadOffset[cfr, t2, 8]
+ dispatch(advance)
+
+.slow:
+ callSlowPath(slowPath)
+ dispatch(advance)
+end
+
+_llint_op_lshift:
+ traceExecution()
+ bitOp(
+ macro (left, right, slow) lshifti left, right end,
+ _llint_slow_path_lshift,
+ 4)
+
+
+_llint_op_rshift:
+ traceExecution()
+ bitOp(
+ macro (left, right, slow) rshifti left, right end,
+ _llint_slow_path_rshift,
+ 4)
+
+
+_llint_op_urshift:
+ traceExecution()
+ bitOp(
+ macro (left, right, slow)
+ urshifti left, right
+ bilt right, 0, slow
+ end,
+ _llint_slow_path_urshift,
+ 4)
+
+
+_llint_op_bitand:
+ traceExecution()
+ bitOp(
+ macro (left, right, slow) andi left, right end,
+ _llint_slow_path_bitand,
+ 5)
+
+
+_llint_op_bitxor:
+ traceExecution()
+ bitOp(
+ macro (left, right, slow) xori left, right end,
+ _llint_slow_path_bitxor,
+ 5)
+
+
+_llint_op_bitor:
+ traceExecution()
+ bitOp(
+ macro (left, right, slow) ori left, right end,
+ _llint_slow_path_bitor,
+ 5)
+
+
+_llint_op_check_has_instance:
+ traceExecution()
+ loadi 4[PC], t1
+ loadConstantOrVariablePayload(t1, CellTag, t0, .opCheckHasInstanceSlow)
+ loadp JSCell::m_structure[t0], t0
+ btbz Structure::m_typeInfo + TypeInfo::m_flags[t0], ImplementsHasInstance, .opCheckHasInstanceSlow
+ dispatch(2)
+
+.opCheckHasInstanceSlow:
+ callSlowPath(_llint_slow_path_check_has_instance)
+ dispatch(2)
+
+
+_llint_op_instanceof:
+ traceExecution()
+ # Check that baseVal implements the default HasInstance behavior.
+ # FIXME: This should be deprecated.
+ loadi 12[PC], t1
+ loadConstantOrVariablePayloadUnchecked(t1, t0)
+ loadp JSCell::m_structure[t0], t0
+ btbz Structure::m_typeInfo + TypeInfo::m_flags[t0], ImplementsDefaultHasInstance, .opInstanceofSlow
+
+ # Actually do the work.
+ loadi 16[PC], t0
+ loadi 4[PC], t3
+ loadConstantOrVariablePayload(t0, CellTag, t1, .opInstanceofSlow)
+ loadp JSCell::m_structure[t1], t2
+ bbb Structure::m_typeInfo + TypeInfo::m_type[t2], ObjectType, .opInstanceofSlow
+ loadi 8[PC], t0
+ loadConstantOrVariablePayload(t0, CellTag, t2, .opInstanceofSlow)
+
+ # Register state: t1 = prototype, t2 = value
+ move 1, t0
+.opInstanceofLoop:
+ loadp JSCell::m_structure[t2], t2
+ loadi Structure::m_prototype + PayloadOffset[t2], t2
+ bpeq t2, t1, .opInstanceofDone
+ btinz t2, .opInstanceofLoop
+
+ move 0, t0
+.opInstanceofDone:
+ storei BooleanTag, TagOffset[cfr, t3, 8]
+ storei t0, PayloadOffset[cfr, t3, 8]
+ dispatch(5)
+
+.opInstanceofSlow:
+ callSlowPath(_llint_slow_path_instanceof)
+ dispatch(5)
+
+
+macro resolveGlobal(size, slow)
+ # Operands are as follows:
+ # 4[PC] Destination for the load.
+ # 8[PC] Property identifier index in the code block.
+ # 12[PC] Structure pointer, initialized to 0 by bytecode generator.
+ # 16[PC] Offset in global object, initialized to 0 by bytecode generator.
+ loadp CodeBlock[cfr], t0
+ loadp CodeBlock::m_globalObject[t0], t0
+ loadp JSCell::m_structure[t0], t1
+ bpneq t1, 12[PC], slow
+ loadi 16[PC], t1
+ loadp JSObject::m_propertyStorage[t0], t0
+ loadi TagOffset[t0, t1, 8], t2
+ loadi PayloadOffset[t0, t1, 8], t3
+ loadi 4[PC], t0
+ storei t2, TagOffset[cfr, t0, 8]
+ storei t3, PayloadOffset[cfr, t0, 8]
+ loadi (size - 1) * 4[PC], t0
+ valueProfile(t2, t3, t0)
+end
+
+_llint_op_resolve_global:
+ traceExecution()
+ resolveGlobal(6, .opResolveGlobalSlow)
+ dispatch(6)
+
+.opResolveGlobalSlow:
+ callSlowPath(_llint_slow_path_resolve_global)
+ dispatch(6)
+
+
+# Gives you the scope in t0, while allowing you to optionally perform additional checks on the
+# scopes as they are traversed. scopeCheck() is called with two arguments: the register
+# holding the scope, and a register that can be used for scratch. Note that this does not
+# use t3, so you can hold stuff in t3 if need be.
+macro getScope(deBruijinIndexOperand, scopeCheck)
+ loadp ScopeChain + PayloadOffset[cfr], t0
+ loadi deBruijinIndexOperand, t2
+
+ btiz t2, .done
+
+ loadp CodeBlock[cfr], t1
+ bineq CodeBlock::m_codeType[t1], FunctionCode, .loop
+ btbz CodeBlock::m_needsFullScopeChain[t1], .loop
+
+ loadi CodeBlock::m_activationRegister[t1], t1
+
+ # Need to conditionally skip over one scope.
+ bieq TagOffset[cfr, t1, 8], EmptyValueTag, .noActivation
+ scopeCheck(t0, t1)
+ loadp ScopeChainNode::next[t0], t0
+.noActivation:
+ subi 1, t2
+
+ btiz t2, .done
+.loop:
+ scopeCheck(t0, t1)
+ loadp ScopeChainNode::next[t0], t0
+ subi 1, t2
+ btinz t2, .loop
+
+.done:
+end
+
+_llint_op_resolve_global_dynamic:
+ traceExecution()
+ loadp JITStackFrame::globalData[sp], t3
+ loadp JSGlobalData::activationStructure[t3], t3
+ getScope(
+ 20[PC],
+ macro (scope, scratch)
+ loadp ScopeChainNode::object[scope], scratch
+ bpneq JSCell::m_structure[scratch], t3, .opResolveGlobalDynamicSuperSlow
+ end)
+ resolveGlobal(7, .opResolveGlobalDynamicSlow)
+ dispatch(7)
+
+.opResolveGlobalDynamicSuperSlow:
+ callSlowPath(_llint_slow_path_resolve_for_resolve_global_dynamic)
+ dispatch(7)
+
+.opResolveGlobalDynamicSlow:
+ callSlowPath(_llint_slow_path_resolve_global_dynamic)
+ dispatch(7)
+
+
+_llint_op_get_scoped_var:
+ traceExecution()
+ # Operands are as follows:
+ # 4[PC] Destination for the load.
+ # 8[PC] Index of register in the scope.
+ # 12[PC] De Bruijin index.
+ getScope(12[PC], macro (scope, scratch) end)
+ loadi 4[PC], t1
+ loadi 8[PC], t2
+ loadp ScopeChainNode::object[t0], t0
+ loadp JSVariableObject::m_registers[t0], t0
+ loadi TagOffset[t0, t2, 8], t3
+ loadi PayloadOffset[t0, t2, 8], t0
+ storei t3, TagOffset[cfr, t1, 8]
+ storei t0, PayloadOffset[cfr, t1, 8]
+ loadi 16[PC], t1
+ valueProfile(t3, t0, t1)
+ dispatch(5)
+
+
+_llint_op_put_scoped_var:
+ traceExecution()
+ getScope(8[PC], macro (scope, scratch) end)
+ loadi 12[PC], t1
+ loadConstantOrVariable(t1, t3, t2)
+ loadi 4[PC], t1
+ writeBarrier(t3, t2)
+ loadp ScopeChainNode::object[t0], t0
+ loadp JSVariableObject::m_registers[t0], t0
+ storei t3, TagOffset[t0, t1, 8]
+ storei t2, PayloadOffset[t0, t1, 8]
+ dispatch(4)
+
+
+_llint_op_get_global_var:
+ traceExecution()
+ loadi 8[PC], t1
+ loadi 4[PC], t3
+ loadp CodeBlock[cfr], t0
+ loadp CodeBlock::m_globalObject[t0], t0
+ loadp JSGlobalObject::m_registers[t0], t0
+ loadi TagOffset[t0, t1, 8], t2
+ loadi PayloadOffset[t0, t1, 8], t1
+ storei t2, TagOffset[cfr, t3, 8]
+ storei t1, PayloadOffset[cfr, t3, 8]
+ loadi 12[PC], t3
+ valueProfile(t2, t1, t3)
+ dispatch(4)
+
+
+_llint_op_put_global_var:
+ traceExecution()
+ loadi 8[PC], t1
+ loadp CodeBlock[cfr], t0
+ loadp CodeBlock::m_globalObject[t0], t0
+ loadp JSGlobalObject::m_registers[t0], t0
+ loadConstantOrVariable(t1, t2, t3)
+ loadi 4[PC], t1
+ writeBarrier(t2, t3)
+ storei t2, TagOffset[t0, t1, 8]
+ storei t3, PayloadOffset[t0, t1, 8]
+ dispatch(3)
+
+
+_llint_op_get_by_id:
+ traceExecution()
+ # We only do monomorphic get_by_id caching for now, and we do not modify the
+ # opcode. We do, however, allow for the cache to change anytime if fails, since
+ # ping-ponging is free. At best we get lucky and the get_by_id will continue
+ # to take fast path on the new cache. At worst we take slow path, which is what
+ # we would have been doing anyway.
+ loadi 8[PC], t0
+ loadi 16[PC], t1
+ loadConstantOrVariablePayload(t0, CellTag, t3, .opGetByIdSlow)
+ loadi 20[PC], t2
+ loadp JSObject::m_propertyStorage[t3], t0
+ bpneq JSCell::m_structure[t3], t1, .opGetByIdSlow
+ loadi 4[PC], t1
+ loadi TagOffset[t0, t2], t3
+ loadi PayloadOffset[t0, t2], t2
+ storei t3, TagOffset[cfr, t1, 8]
+ storei t2, PayloadOffset[cfr, t1, 8]
+ loadi 32[PC], t1
+ valueProfile(t3, t2, t1)
+ dispatch(9)
+
+.opGetByIdSlow:
+ callSlowPath(_llint_slow_path_get_by_id)
+ dispatch(9)
+
+
+_llint_op_get_arguments_length:
+ traceExecution()
+ loadi 8[PC], t0
+ loadi 4[PC], t1
+ bineq TagOffset[cfr, t0, 8], EmptyValueTag, .opGetArgumentsLengthSlow
+ loadi ArgumentCount + PayloadOffset[cfr], t2
+ subi 1, t2
+ storei Int32Tag, TagOffset[cfr, t1, 8]
+ storei t2, PayloadOffset[cfr, t1, 8]
+ dispatch(4)
+
+.opGetArgumentsLengthSlow:
+ callSlowPath(_llint_slow_path_get_arguments_length)
+ dispatch(4)
+
+
+_llint_op_put_by_id:
+ traceExecution()
+ loadi 4[PC], t3
+ loadi 16[PC], t1
+ loadConstantOrVariablePayload(t3, CellTag, t0, .opPutByIdSlow)
+ loadi 12[PC], t2
+ loadp JSObject::m_propertyStorage[t0], t3
+ bpneq JSCell::m_structure[t0], t1, .opPutByIdSlow
+ loadi 20[PC], t1
+ loadConstantOrVariable2Reg(t2, t0, t2)
+ writeBarrier(t0, t2)
+ storei t0, TagOffset[t3, t1]
+ storei t2, PayloadOffset[t3, t1]
+ dispatch(9)
+
+.opPutByIdSlow:
+ callSlowPath(_llint_slow_path_put_by_id)
+ dispatch(9)
+
+
+macro putByIdTransition(additionalChecks)
+ traceExecution()
+ loadi 4[PC], t3
+ loadi 16[PC], t1
+ loadConstantOrVariablePayload(t3, CellTag, t0, .opPutByIdSlow)
+ loadi 12[PC], t2
+ bpneq JSCell::m_structure[t0], t1, .opPutByIdSlow
+ additionalChecks(t1, t3, .opPutByIdSlow)
+ loadi 20[PC], t1
+ loadp JSObject::m_propertyStorage[t0], t3
+ addp t1, t3
+ loadConstantOrVariable2Reg(t2, t1, t2)
+ writeBarrier(t1, t2)
+ storei t1, TagOffset[t3]
+ loadi 24[PC], t1
+ storei t2, PayloadOffset[t3]
+ storep t1, JSCell::m_structure[t0]
+ dispatch(9)
+end
+
+_llint_op_put_by_id_transition_direct:
+ putByIdTransition(macro (oldStructure, scratch, slow) end)
+
+
+_llint_op_put_by_id_transition_normal:
+ putByIdTransition(
+ macro (oldStructure, scratch, slow)
+ const protoCell = oldStructure # Reusing the oldStructure register for the proto
+
+ loadp 28[PC], scratch
+ assert(macro (ok) btpnz scratch, ok end)
+ loadp StructureChain::m_vector[scratch], scratch
+ assert(macro (ok) btpnz scratch, ok end)
+ bieq Structure::m_prototype + TagOffset[oldStructure], NullTag, .done
+ .loop:
+ loadi Structure::m_prototype + PayloadOffset[oldStructure], protoCell
+ loadp JSCell::m_structure[protoCell], oldStructure
+ bpneq oldStructure, [scratch], slow
+ addp 4, scratch
+ bineq Structure::m_prototype + TagOffset[oldStructure], NullTag, .loop
+ .done:
+ end)
+
+
+_llint_op_get_by_val:
+ traceExecution()
+ loadp CodeBlock[cfr], t1
+ loadi 8[PC], t2
+ loadi 12[PC], t3
+ loadp CodeBlock::m_globalData[t1], t1
+ loadConstantOrVariablePayload(t2, CellTag, t0, .opGetByValSlow)
+ loadp JSGlobalData::jsArrayClassInfo[t1], t2
+ loadConstantOrVariablePayload(t3, Int32Tag, t1, .opGetByValSlow)
+ bpneq [t0], t2, .opGetByValSlow
+ loadp JSArray::m_storage[t0], t3
+ biaeq t1, JSArray::m_vectorLength[t0], .opGetByValSlow
+ loadi 4[PC], t0
+ loadi ArrayStorage::m_vector + TagOffset[t3, t1, 8], t2
+ loadi ArrayStorage::m_vector + PayloadOffset[t3, t1, 8], t1
+ bieq t2, EmptyValueTag, .opGetByValSlow
+ storei t2, TagOffset[cfr, t0, 8]
+ storei t1, PayloadOffset[cfr, t0, 8]
+ loadi 16[PC], t0
+ valueProfile(t2, t1, t0)
+ dispatch(5)
+
+.opGetByValSlow:
+ callSlowPath(_llint_slow_path_get_by_val)
+ dispatch(5)
+
+
+_llint_op_get_argument_by_val:
+ traceExecution()
+ loadi 8[PC], t0
+ loadi 12[PC], t1
+ bineq TagOffset[cfr, t0, 8], EmptyValueTag, .opGetArgumentByValSlow
+ loadConstantOrVariablePayload(t1, Int32Tag, t2, .opGetArgumentByValSlow)
+ addi 1, t2
+ loadi ArgumentCount + PayloadOffset[cfr], t1
+ biaeq t2, t1, .opGetArgumentByValSlow
+ negi t2
+ loadi 4[PC], t3
+ loadi ThisArgumentOffset + TagOffset[cfr, t2, 8], t0
+ loadi ThisArgumentOffset + PayloadOffset[cfr, t2, 8], t1
+ storei t0, TagOffset[cfr, t3, 8]
+ storei t1, PayloadOffset[cfr, t3, 8]
+ dispatch(5)
+
+.opGetArgumentByValSlow:
+ callSlowPath(_llint_slow_path_get_argument_by_val)
+ dispatch(5)
+
+
+_llint_op_get_by_pname:
+ traceExecution()
+ loadi 12[PC], t0
+ loadConstantOrVariablePayload(t0, CellTag, t1, .opGetByPnameSlow)
+ loadi 16[PC], t0
+ bpneq t1, PayloadOffset[cfr, t0, 8], .opGetByPnameSlow
+ loadi 8[PC], t0
+ loadConstantOrVariablePayload(t0, CellTag, t2, .opGetByPnameSlow)
+ loadi 20[PC], t0
+ loadi PayloadOffset[cfr, t0, 8], t3
+ loadp JSCell::m_structure[t2], t0
+ bpneq t0, JSPropertyNameIterator::m_cachedStructure[t3], .opGetByPnameSlow
+ loadi 24[PC], t0
+ loadi [cfr, t0, 8], t0
+ subi 1, t0
+ biaeq t0, JSPropertyNameIterator::m_numCacheableSlots[t3], .opGetByPnameSlow
+ loadp JSObject::m_propertyStorage[t2], t2
+ loadi TagOffset[t2, t0, 8], t1
+ loadi PayloadOffset[t2, t0, 8], t3
+ loadi 4[PC], t0
+ storei t1, TagOffset[cfr, t0, 8]
+ storei t3, PayloadOffset[cfr, t0, 8]
+ dispatch(7)
+
+.opGetByPnameSlow:
+ callSlowPath(_llint_slow_path_get_by_pname)
+ dispatch(7)
+
+
+_llint_op_put_by_val:
+ traceExecution()
+ loadi 4[PC], t0
+ loadConstantOrVariablePayload(t0, CellTag, t1, .opPutByValSlow)
+ loadi 8[PC], t0
+ loadConstantOrVariablePayload(t0, Int32Tag, t2, .opPutByValSlow)
+ loadp CodeBlock[cfr], t0
+ loadp CodeBlock::m_globalData[t0], t0
+ loadp JSGlobalData::jsArrayClassInfo[t0], t0
+ bpneq [t1], t0, .opPutByValSlow
+ biaeq t2, JSArray::m_vectorLength[t1], .opPutByValSlow
+ loadp JSArray::m_storage[t1], t0
+ bieq ArrayStorage::m_vector + TagOffset[t0, t2, 8], EmptyValueTag, .opPutByValEmpty
+.opPutByValStoreResult:
+ loadi 12[PC], t3
+ loadConstantOrVariable2Reg(t3, t1, t3)
+ writeBarrier(t1, t3)
+ storei t1, ArrayStorage::m_vector + TagOffset[t0, t2, 8]
+ storei t3, ArrayStorage::m_vector + PayloadOffset[t0, t2, 8]
+ dispatch(4)
+
+.opPutByValEmpty:
+ addi 1, ArrayStorage::m_numValuesInVector[t0]
+ bib t2, ArrayStorage::m_length[t0], .opPutByValStoreResult
+ addi 1, t2, t1
+ storei t1, ArrayStorage::m_length[t0]
+ jmp .opPutByValStoreResult
+
+.opPutByValSlow:
+ callSlowPath(_llint_slow_path_put_by_val)
+ dispatch(4)
+
+
+_llint_op_loop:
+ nop
+_llint_op_jmp:
+ traceExecution()
+ dispatchBranch(4[PC])
+
+
+macro jumpTrueOrFalse(conditionOp, slow)
+ loadi 4[PC], t1
+ loadConstantOrVariablePayload(t1, BooleanTag, t0, .slow)
+ conditionOp(t0, .target)
+ dispatch(3)
+
+.target:
+ dispatchBranch(8[PC])
+
+.slow:
+ callSlowPath(slow)
+ dispatch(0)
+end
+
+
+macro equalNull(cellHandler, immediateHandler)
+ loadi 4[PC], t0
+ assertNotConstant(t0)
+ loadi TagOffset[cfr, t0, 8], t1
+ loadi PayloadOffset[cfr, t0, 8], t0
+ bineq t1, CellTag, .immediate
+ loadp JSCell::m_structure[t0], t2
+ cellHandler(Structure::m_typeInfo + TypeInfo::m_flags[t2], .target)
+ dispatch(3)
+
+.target:
+ dispatchBranch(8[PC])
+
+.immediate:
+ ori 1, t1
+ immediateHandler(t1, .target)
+ dispatch(3)
+end
+
+_llint_op_jeq_null:
+ traceExecution()
+ equalNull(
+ macro (value, target) btbnz value, MasqueradesAsUndefined, target end,
+ macro (value, target) bieq value, NullTag, target end)
+
+
+_llint_op_jneq_null:
+ traceExecution()
+ equalNull(
+ macro (value, target) btbz value, MasqueradesAsUndefined, target end,
+ macro (value, target) bineq value, NullTag, target end)
+
+
+_llint_op_jneq_ptr:
+ traceExecution()
+ loadi 4[PC], t0
+ loadi 8[PC], t1
+ bineq TagOffset[cfr, t0, 8], CellTag, .opJneqPtrBranch
+ bpeq PayloadOffset[cfr, t0, 8], t1, .opJneqPtrFallThrough
+.opJneqPtrBranch:
+ dispatchBranch(12[PC])
+.opJneqPtrFallThrough:
+ dispatch(4)
+
+
+macro compare(integerCompare, doubleCompare, slowPath)
+ loadi 4[PC], t2
+ loadi 8[PC], t3
+ loadConstantOrVariable(t2, t0, t1)
+ loadConstantOrVariable2Reg(t3, t2, t3)
+ bineq t0, Int32Tag, .op1NotInt
+ bineq t2, Int32Tag, .op2NotInt
+ integerCompare(t1, t3, .jumpTarget)
+ dispatch(4)
+
+.op1NotInt:
+ bia t0, LowestTag, .slow
+ bib t2, LowestTag, .op1NotIntOp2Double
+ bineq t2, Int32Tag, .slow
+ ci2d t3, ft1
+ jmp .op1NotIntReady
+.op1NotIntOp2Double:
+ fii2d t3, t2, ft1
+.op1NotIntReady:
+ fii2d t1, t0, ft0
+ doubleCompare(ft0, ft1, .jumpTarget)
+ dispatch(4)
+
+.op2NotInt:
+ ci2d t1, ft0
+ bia t2, LowestTag, .slow
+ fii2d t3, t2, ft1
+ doubleCompare(ft0, ft1, .jumpTarget)
+ dispatch(4)
+
+.jumpTarget:
+ dispatchBranch(12[PC])
+
+.slow:
+ callSlowPath(slowPath)
+ dispatch(0)
+end
+
+
+_llint_op_switch_imm:
+ traceExecution()
+ loadi 12[PC], t2
+ loadi 4[PC], t3
+ loadConstantOrVariable(t2, t1, t0)
+ loadp CodeBlock[cfr], t2
+ loadp CodeBlock::m_rareData[t2], t2
+ muli sizeof SimpleJumpTable, t3 # FIXME: would be nice to peephole this!
+ loadp CodeBlock::RareData::m_immediateSwitchJumpTables + VectorBufferOffset[t2], t2
+ addp t3, t2
+ bineq t1, Int32Tag, .opSwitchImmNotInt
+ subi SimpleJumpTable::min[t2], t0
+ biaeq t0, SimpleJumpTable::branchOffsets + VectorSizeOffset[t2], .opSwitchImmFallThrough
+ loadp SimpleJumpTable::branchOffsets + VectorBufferOffset[t2], t3
+ loadi [t3, t0, 4], t1
+ btiz t1, .opSwitchImmFallThrough
+ dispatchBranchWithOffset(t1)
+
+.opSwitchImmNotInt:
+ bib t1, LowestTag, .opSwitchImmSlow # Go to slow path if it's a double.
+.opSwitchImmFallThrough:
+ dispatchBranch(8[PC])
+
+.opSwitchImmSlow:
+ callSlowPath(_llint_slow_path_switch_imm)
+ dispatch(0)
+
+
+_llint_op_switch_char:
+ traceExecution()
+ loadi 12[PC], t2
+ loadi 4[PC], t3
+ loadConstantOrVariable(t2, t1, t0)
+ loadp CodeBlock[cfr], t2
+ loadp CodeBlock::m_rareData[t2], t2
+ muli sizeof SimpleJumpTable, t3
+ loadp CodeBlock::RareData::m_characterSwitchJumpTables + VectorBufferOffset[t2], t2
+ addp t3, t2
+ bineq t1, CellTag, .opSwitchCharFallThrough
+ loadp JSCell::m_structure[t0], t1
+ bbneq Structure::m_typeInfo + TypeInfo::m_type[t1], StringType, .opSwitchCharFallThrough
+ loadp JSString::m_value[t0], t0
+ bineq StringImpl::m_length[t0], 1, .opSwitchCharFallThrough
+ loadp StringImpl::m_data8[t0], t1
+ btinz StringImpl::m_hashAndFlags[t0], HashFlags8BitBuffer, .opSwitchChar8Bit
+ loadh [t1], t0
+ jmp .opSwitchCharReady
+.opSwitchChar8Bit:
+ loadb [t1], t0
+.opSwitchCharReady:
+ subi SimpleJumpTable::min[t2], t0
+ biaeq t0, SimpleJumpTable::branchOffsets + VectorSizeOffset[t2], .opSwitchCharFallThrough
+ loadp SimpleJumpTable::branchOffsets + VectorBufferOffset[t2], t2
+ loadi [t2, t0, 4], t1
+ btiz t1, .opSwitchCharFallThrough
+ dispatchBranchWithOffset(t1)
+
+.opSwitchCharFallThrough:
+ dispatchBranch(8[PC])
+
+
+_llint_op_new_func:
+ traceExecution()
+ btiz 12[PC], .opNewFuncUnchecked
+ loadi 4[PC], t1
+ bineq TagOffset[cfr, t1, 8], EmptyValueTag, .opNewFuncDone
+.opNewFuncUnchecked:
+ callSlowPath(_llint_slow_path_new_func)
+.opNewFuncDone:
+ dispatch(4)
+
+
+macro doCall(slowPath)
+ loadi 4[PC], t0
+ loadi 16[PC], t1
+ loadp LLIntCallLinkInfo::callee[t1], t2
+ loadConstantOrVariablePayload(t0, CellTag, t3, .opCallSlow)
+ bineq t3, t2, .opCallSlow
+ loadi 12[PC], t3
+ addp 24, PC
+ lshifti 3, t3
+ addp cfr, t3 # t3 contains the new value of cfr
+ loadp JSFunction::m_scopeChain[t2], t0
+ storei t2, Callee + PayloadOffset[t3]
+ storei t0, ScopeChain + PayloadOffset[t3]
+ loadi 8 - 24[PC], t2
+ storei PC, ArgumentCount + TagOffset[cfr]
+ storep cfr, CallerFrame[t3]
+ storei t2, ArgumentCount + PayloadOffset[t3]
+ storei CellTag, Callee + TagOffset[t3]
+ storei CellTag, ScopeChain + TagOffset[t3]
+ move t3, cfr
+ call LLIntCallLinkInfo::machineCodeTarget[t1]
+ dispatchAfterCall()
+
+.opCallSlow:
+ slowPathForCall(6, slowPath)
+end
+
+
+_llint_op_tear_off_activation:
+ traceExecution()
+ loadi 4[PC], t0
+ loadi 8[PC], t1
+ bineq TagOffset[cfr, t0, 8], EmptyValueTag, .opTearOffActivationCreated
+ bieq TagOffset[cfr, t1, 8], EmptyValueTag, .opTearOffActivationNotCreated
+.opTearOffActivationCreated:
+ callSlowPath(_llint_slow_path_tear_off_activation)
+.opTearOffActivationNotCreated:
+ dispatch(3)
+
+
+_llint_op_tear_off_arguments:
+ traceExecution()
+ loadi 4[PC], t0
+ subi 1, t0 # Get the unmodifiedArgumentsRegister
+ bieq TagOffset[cfr, t0, 8], EmptyValueTag, .opTearOffArgumentsNotCreated
+ callSlowPath(_llint_slow_path_tear_off_arguments)
+.opTearOffArgumentsNotCreated:
+ dispatch(2)
+
+
+_llint_op_ret:
+ traceExecution()
+ checkSwitchToJITForEpilogue()
+ loadi 4[PC], t2
+ loadConstantOrVariable(t2, t1, t0)
+ doReturn()
+
+
+_llint_op_call_put_result:
+ loadi 4[PC], t2
+ loadi 8[PC], t3
+ storei t1, TagOffset[cfr, t2, 8]
+ storei t0, PayloadOffset[cfr, t2, 8]
+ valueProfile(t1, t0, t3)
+ traceExecution() # Needs to be here because it would clobber t1, t0
+ dispatch(3)
+
+
+_llint_op_ret_object_or_this:
+ traceExecution()
+ checkSwitchToJITForEpilogue()
+ loadi 4[PC], t2
+ loadConstantOrVariable(t2, t1, t0)
+ bineq t1, CellTag, .opRetObjectOrThisNotObject
+ loadp JSCell::m_structure[t0], t2
+ bbb Structure::m_typeInfo + TypeInfo::m_type[t2], ObjectType, .opRetObjectOrThisNotObject
+ doReturn()
+
+.opRetObjectOrThisNotObject:
+ loadi 8[PC], t2
+ loadConstantOrVariable(t2, t1, t0)
+ doReturn()
+
+
+_llint_op_to_primitive:
+ traceExecution()
+ loadi 8[PC], t2
+ loadi 4[PC], t3
+ loadConstantOrVariable(t2, t1, t0)
+ bineq t1, CellTag, .opToPrimitiveIsImm
+ loadp JSCell::m_structure[t0], t2
+ bbneq Structure::m_typeInfo + TypeInfo::m_type[t2], StringType, .opToPrimitiveSlowCase
+.opToPrimitiveIsImm:
+ storei t1, TagOffset[cfr, t3, 8]
+ storei t0, PayloadOffset[cfr, t3, 8]
+ dispatch(3)
+
+.opToPrimitiveSlowCase:
+ callSlowPath(_llint_slow_path_to_primitive)
+ dispatch(3)
+
+
+_llint_op_next_pname:
+ traceExecution()
+ loadi 12[PC], t1
+ loadi 16[PC], t2
+ loadi PayloadOffset[cfr, t1, 8], t0
+ bieq t0, PayloadOffset[cfr, t2, 8], .opNextPnameEnd
+ loadi 20[PC], t2
+ loadi PayloadOffset[cfr, t2, 8], t2
+ loadp JSPropertyNameIterator::m_jsStrings[t2], t3
+ loadi [t3, t0, 8], t3
+ addi 1, t0
+ storei t0, PayloadOffset[cfr, t1, 8]
+ loadi 4[PC], t1
+ storei CellTag, TagOffset[cfr, t1, 8]
+ storei t3, PayloadOffset[cfr, t1, 8]
+ loadi 8[PC], t3
+ loadi PayloadOffset[cfr, t3, 8], t3
+ loadp JSCell::m_structure[t3], t1
+ bpneq t1, JSPropertyNameIterator::m_cachedStructure[t2], .opNextPnameSlow
+ loadp JSPropertyNameIterator::m_cachedPrototypeChain[t2], t0
+ loadp StructureChain::m_vector[t0], t0
+ btpz [t0], .opNextPnameTarget
+.opNextPnameCheckPrototypeLoop:
+ bieq Structure::m_prototype + TagOffset[t1], NullTag, .opNextPnameSlow
+ loadp Structure::m_prototype + PayloadOffset[t1], t2
+ loadp JSCell::m_structure[t2], t1
+ bpneq t1, [t0], .opNextPnameSlow
+ addp 4, t0
+ btpnz [t0], .opNextPnameCheckPrototypeLoop
+.opNextPnameTarget:
+ dispatchBranch(24[PC])
+
+.opNextPnameEnd:
+ dispatch(7)
+
+.opNextPnameSlow:
+ callSlowPath(_llint_slow_path_next_pname) # This either keeps the PC where it was (causing us to loop) or sets it to target.
+ dispatch(0)
+
+
+_llint_op_catch:
+ # This is where we end up from the JIT's throw trampoline (because the
+ # machine code return address will be set to _llint_op_catch), and from
+ # the interpreter's throw trampoline (see _llint_throw_trampoline).
+ # The JIT throwing protocol calls for the cfr to be in t0. The throwing
+ # code must have known that we were throwing to the interpreter, and have
+ # set JSGlobalData::targetInterpreterPCForThrow.
+ move t0, cfr
+ loadp JITStackFrame::globalData[sp], t3
+ loadi JSGlobalData::targetInterpreterPCForThrow[t3], PC
+ loadi JSGlobalData::exception + PayloadOffset[t3], t0
+ loadi JSGlobalData::exception + TagOffset[t3], t1
+ storei 0, JSGlobalData::exception + PayloadOffset[t3]
+ storei EmptyValueTag, JSGlobalData::exception + TagOffset[t3]
+ loadi 4[PC], t2
+ storei t0, PayloadOffset[cfr, t2, 8]
+ storei t1, TagOffset[cfr, t2, 8]
+ traceExecution() # This needs to be here because we don't want to clobber t0, t1, t2, t3 above.
+ dispatch(2)
+
+
+_llint_op_jsr:
+ traceExecution()
+ loadi 4[PC], t0
+ addi 3 * 4, PC, t1
+ storei t1, [cfr, t0, 8]
+ dispatchBranch(8[PC])
+
+
+_llint_op_sret:
+ traceExecution()
+ loadi 4[PC], t0
+ loadp [cfr, t0, 8], PC
+ dispatch(0)
+
+
+_llint_op_end:
+ traceExecution()
+ checkSwitchToJITForEpilogue()
+ loadi 4[PC], t0
+ assertNotConstant(t0)
+ loadi TagOffset[cfr, t0, 8], t1
+ loadi PayloadOffset[cfr, t0, 8], t0
+ doReturn()
+
+
+_llint_throw_from_slow_path_trampoline:
+ # When throwing from the interpreter (i.e. throwing from LLIntSlowPaths), so
+ # the throw target is not necessarily interpreted code, we come to here.
+ # This essentially emulates the JIT's throwing protocol.
+ loadp JITStackFrame::globalData[sp], t1
+ loadp JSGlobalData::callFrameForThrow[t1], t0
+ jmp JSGlobalData::targetMachinePCForThrow[t1]
+
+
+_llint_throw_during_call_trampoline:
+ preserveReturnAddressAfterCall(t2)
+ loadp JITStackFrame::globalData[sp], t1
+ loadp JSGlobalData::callFrameForThrow[t1], t0
+ jmp JSGlobalData::targetMachinePCForThrow[t1]
+
+
+macro nativeCallTrampoline(executableOffsetToFunction)
+ storep 0, CodeBlock[cfr]
+ loadp CallerFrame[cfr], t0
+ loadi ScopeChain + PayloadOffset[t0], t1
+ storei CellTag, ScopeChain + TagOffset[cfr]
+ storei t1, ScopeChain + PayloadOffset[cfr]
+ if X86
+ peek 0, t1
+ storep t1, ReturnPC[cfr]
+ move cfr, t2 # t2 = ecx
+ subp 16 - 4, sp
+ loadi Callee + PayloadOffset[cfr], t1
+ loadp JSFunction::m_executable[t1], t1
+ move t0, cfr
+ call executableOffsetToFunction[t1]
+ addp 16 - 4, sp
+ loadp JITStackFrame::globalData + 4[sp], t3
+ elsif ARMv7
+ move t0, t2
+ preserveReturnAddressAfterCall(t3)
+ storep t3, ReturnPC[cfr]
+ move cfr, t0
+ loadi Callee + PayloadOffset[cfr], t1
+ loadp JSFunction::m_executable[t1], t1
+ move t2, cfr
+ call executableOffsetToFunction[t1]
+ restoreReturnAddressBeforeReturn(t3)
+ loadp JITStackFrame::globalData[sp], t3
+ else
+ error
+ end
+ bineq JSGlobalData::exception + TagOffset[t3], EmptyValueTag, .exception
+ ret
+.exception:
+ preserveReturnAddressAfterCall(t1) # This is really only needed on X86
+ callSlowPath(_llint_throw_from_native_call)
+ jmp _llint_throw_from_slow_path_trampoline
+end
+
diff --git a/Source/JavaScriptCore/llint/LowLevelInterpreter64.asm b/Source/JavaScriptCore/llint/LowLevelInterpreter64.asm
new file mode 100644
index 000000000..d8c931fa0
--- /dev/null
+++ b/Source/JavaScriptCore/llint/LowLevelInterpreter64.asm
@@ -0,0 +1,1490 @@
+# Copyright (C) 2011, 2012 Apple Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+# THE POSSIBILITY OF SUCH DAMAGE.
+
+
+# Some value representation constants.
+const TagBitTypeOther = 0x2
+const TagBitBool = 0x4
+const TagBitUndefined = 0x8
+const ValueEmpty = 0x0
+const ValueFalse = TagBitTypeOther | TagBitBool
+const ValueTrue = TagBitTypeOther | TagBitBool | 1
+const ValueUndefined = TagBitTypeOther | TagBitUndefined
+const ValueNull = TagBitTypeOther
+
+# Utilities.
+macro dispatch(advance)
+ addp advance, PC
+ jmp [PB, PC, 8]
+end
+
+macro dispatchInt(advance)
+ addi advance, PC
+ jmp [PB, PC, 8]
+end
+
+macro dispatchAfterCall()
+ loadi ArgumentCount + TagOffset[cfr], PC
+ loadp CodeBlock[cfr], PB
+ loadp CodeBlock::m_instructions[PB], PB
+ jmp [PB, PC, 8]
+end
+
+macro cCall2(function, arg1, arg2)
+ move arg1, t5
+ move arg2, t4
+ call function
+end
+
+# This barely works. arg3 and arg4 should probably be immediates.
+macro cCall4(function, arg1, arg2, arg3, arg4)
+ move arg1, t5
+ move arg2, t4
+ move arg3, t1
+ move arg4, t2
+ call function
+end
+
+macro prepareStateForCCall()
+ leap [PB, PC, 8], PC
+ move PB, t3
+end
+
+macro restoreStateAfterCCall()
+ move t0, PC
+ move t1, cfr
+ move t3, PB
+ subp PB, PC
+ urshiftp 3, PC
+end
+
+macro callSlowPath(slowPath)
+ prepareStateForCCall()
+ cCall2(slowPath, cfr, PC)
+ restoreStateAfterCCall()
+end
+
+macro traceOperand(fromWhere, operand)
+ prepareStateForCCall()
+ cCall4(_llint_trace_operand, cfr, PC, fromWhere, operand)
+ restoreStateAfterCCall()
+end
+
+macro traceValue(fromWhere, operand)
+ prepareStateForCCall()
+ cCall4(_llint_trace_value, cfr, PC, fromWhere, operand)
+ restoreStateAfterCCall()
+end
+
+# Call a slow path for call call opcodes.
+macro callCallSlowPath(advance, slowPath, action)
+ addi advance, PC, t0
+ storei t0, ArgumentCount + TagOffset[cfr]
+ prepareStateForCCall()
+ cCall2(slowPath, cfr, PC)
+ move t1, cfr
+ action(t0)
+end
+
+macro checkSwitchToJITForLoop()
+ checkSwitchToJIT(
+ 1,
+ macro()
+ storei PC, ArgumentCount + TagOffset[cfr]
+ prepareStateForCCall()
+ cCall2(_llint_loop_osr, cfr, PC)
+ move t1, cfr
+ btpz t0, .recover
+ jmp t0
+ .recover:
+ loadp ArgumentCount + TagOffset[cfr], PC
+ end)
+end
+
+# Index and value must be different registers. Index may be clobbered.
+macro loadConstantOrVariable(index, value)
+ bpgteq index, FirstConstantRegisterIndex, .constant
+ loadp [cfr, index, 8], value
+ jmp .done
+.constant:
+ loadp CodeBlock[cfr], value
+ loadp CodeBlock::m_constantRegisters + VectorBufferOffset[value], value
+ subp FirstConstantRegisterIndex, index
+ loadp [value, index, 8], value
+.done:
+end
+
+macro loadConstantOrVariableInt32(index, value, slow)
+ loadConstantOrVariable(index, value)
+ bpb value, tagTypeNumber, slow
+end
+
+macro loadConstantOrVariableCell(index, value, slow)
+ loadConstantOrVariable(index, value)
+ btpnz value, tagMask, slow
+end
+
+macro writeBarrier(value)
+ # Nothing to do, since we don't have a generational or incremental collector.
+end
+
+macro valueProfile(value, profile)
+ if VALUE_PROFILER
+ storep value, ValueProfile::m_buckets[profile]
+ end
+end
+
+
+# Entrypoints into the interpreter.
+
+# Expects that CodeBlock is in t1, which is what prologue() leaves behind.
+macro functionArityCheck(doneLabel, slow_path)
+ loadi PayloadOffset + ArgumentCount[cfr], t0
+ biaeq t0, CodeBlock::m_numParameters[t1], doneLabel
+ prepareStateForCCall()
+ cCall2(slow_path, cfr, PC) # This slow_path has a simple protocol: t0 = 0 => no error, t0 != 0 => error
+ move t1, cfr
+ btiz t0, .continue
+ loadp JITStackFrame::globalData[sp], t1
+ loadp JSGlobalData::callFrameForThrow[t1], t0
+ jmp JSGlobalData::targetMachinePCForThrow[t1]
+.continue:
+ # Reload CodeBlock and reset PC, since the slow_path clobbered them.
+ loadp CodeBlock[cfr], t1
+ loadp CodeBlock::m_instructions[t1], PB
+ move 0, PC
+ jmp doneLabel
+end
+
+
+# Instruction implementations
+
+_llint_op_enter:
+ traceExecution()
+ loadp CodeBlock[cfr], t2
+ loadi CodeBlock::m_numVars[t2], t2
+ btiz t2, .opEnterDone
+ move ValueUndefined, t0
+.opEnterLoop:
+ subi 1, t2
+ storep t0, [cfr, t2, 8]
+ btinz t2, .opEnterLoop
+.opEnterDone:
+ dispatch(1)
+
+
+_llint_op_create_activation:
+ traceExecution()
+ loadis 8[PB, PC, 8], t0
+ bpneq [cfr, t0, 8], ValueEmpty, .opCreateActivationDone
+ callSlowPath(_llint_slow_path_create_activation)
+.opCreateActivationDone:
+ dispatch(2)
+
+
+_llint_op_init_lazy_reg:
+ traceExecution()
+ loadis 8[PB, PC, 8], t0
+ storep ValueEmpty, [cfr, t0, 8]
+ dispatch(2)
+
+
+_llint_op_create_arguments:
+ traceExecution()
+ loadis 8[PB, PC, 8], t0
+ bpneq [cfr, t0, 8], ValueEmpty, .opCreateArgumentsDone
+ callSlowPath(_llint_slow_path_create_arguments)
+.opCreateArgumentsDone:
+ dispatch(2)
+
+
+_llint_op_create_this:
+ traceExecution()
+ loadis 16[PB, PC, 8], t0
+ assertNotConstant(t0)
+ loadp [cfr, t0, 8], t0
+ btpnz t0, tagMask, .opCreateThisSlow
+ loadp JSCell::m_structure[t0], t1
+ bbb Structure::m_typeInfo + TypeInfo::m_type[t1], ObjectType, .opCreateThisSlow
+ loadp JSObject::m_inheritorID[t0], t2
+ btpz t2, .opCreateThisSlow
+ allocateBasicJSObject(JSFinalObjectSizeClassIndex, JSGlobalData::jsFinalObjectClassInfo, t2, t0, t1, t3, .opCreateThisSlow)
+ loadis 8[PB, PC, 8], t1
+ storep t0, [cfr, t1, 8]
+ dispatch(3)
+
+.opCreateThisSlow:
+ callSlowPath(_llint_slow_path_create_this)
+ dispatch(3)
+
+
+_llint_op_get_callee:
+ traceExecution()
+ loadis 8[PB, PC, 8], t0
+ loadp Callee[cfr], t1
+ storep t1, [cfr, t0, 8]
+ dispatch(2)
+
+
+_llint_op_convert_this:
+ traceExecution()
+ loadis 8[PB, PC, 8], t0
+ loadp [cfr, t0, 8], t0
+ btpnz t0, tagMask, .opConvertThisSlow
+ loadp JSCell::m_structure[t0], t0
+ bbb Structure::m_typeInfo + TypeInfo::m_type[t0], ObjectType, .opConvertThisSlow
+ dispatch(2)
+
+.opConvertThisSlow:
+ callSlowPath(_llint_slow_path_convert_this)
+ dispatch(2)
+
+
+_llint_op_new_object:
+ traceExecution()
+ loadp CodeBlock[cfr], t0
+ loadp CodeBlock::m_globalObject[t0], t0
+ loadp JSGlobalObject::m_emptyObjectStructure[t0], t1
+ allocateBasicJSObject(JSFinalObjectSizeClassIndex, JSGlobalData::jsFinalObjectClassInfo, t1, t0, t2, t3, .opNewObjectSlow)
+ loadis 8[PB, PC, 8], t1
+ storep t0, [cfr, t1, 8]
+ dispatch(2)
+
+.opNewObjectSlow:
+ callSlowPath(_llint_slow_path_new_object)
+ dispatch(2)
+
+
+_llint_op_mov:
+ traceExecution()
+ loadis 16[PB, PC, 8], t1
+ loadis 8[PB, PC, 8], t0
+ loadConstantOrVariable(t1, t2)
+ storep t2, [cfr, t0, 8]
+ dispatch(3)
+
+
+_llint_op_not:
+ traceExecution()
+ loadis 16[PB, PC, 8], t0
+ loadis 8[PB, PC, 8], t1
+ loadConstantOrVariable(t0, t2)
+ xorp ValueFalse, t2
+ btpnz t2, ~1, .opNotSlow
+ xorp ValueTrue, t2
+ storep t2, [cfr, t1, 8]
+ dispatch(3)
+
+.opNotSlow:
+ callSlowPath(_llint_slow_path_not)
+ dispatch(3)
+
+
+macro equalityComparison(integerComparison, slowPath)
+ traceExecution()
+ loadis 24[PB, PC, 8], t0
+ loadis 16[PB, PC, 8], t2
+ loadis 8[PB, PC, 8], t3
+ loadConstantOrVariableInt32(t0, t1, .slow)
+ loadConstantOrVariableInt32(t2, t0, .slow)
+ integerComparison(t0, t1, t0)
+ orp ValueFalse, t0
+ storep t0, [cfr, t3, 8]
+ dispatch(4)
+
+.slow:
+ callSlowPath(slowPath)
+ dispatch(4)
+end
+
+_llint_op_eq:
+ equalityComparison(
+ macro (left, right, result) cieq left, right, result end,
+ _llint_slow_path_eq)
+
+
+_llint_op_neq:
+ equalityComparison(
+ macro (left, right, result) cineq left, right, result end,
+ _llint_slow_path_neq)
+
+
+macro equalNullComparison()
+ loadis 16[PB, PC, 8], t0
+ loadp [cfr, t0, 8], t0
+ btpnz t0, tagMask, .immediate
+ loadp JSCell::m_structure[t0], t2
+ tbnz Structure::m_typeInfo + TypeInfo::m_flags[t2], MasqueradesAsUndefined, t0
+ jmp .done
+.immediate:
+ andp ~TagBitUndefined, t0
+ cpeq t0, ValueNull, t0
+.done:
+end
+
+_llint_op_eq_null:
+ traceExecution()
+ equalNullComparison()
+ loadis 8[PB, PC, 8], t1
+ orp ValueFalse, t0
+ storep t0, [cfr, t1, 8]
+ dispatch(3)
+
+
+_llint_op_neq_null:
+ traceExecution()
+ equalNullComparison()
+ loadis 8[PB, PC, 8], t1
+ xorp ValueTrue, t0
+ storep t0, [cfr, t1, 8]
+ dispatch(3)
+
+
+macro strictEq(equalityOperation, slowPath)
+ traceExecution()
+ loadis 24[PB, PC, 8], t0
+ loadis 16[PB, PC, 8], t2
+ loadConstantOrVariable(t0, t1)
+ loadConstantOrVariable(t2, t0)
+ move t0, t2
+ orp t1, t2
+ btpz t2, tagMask, .slow
+ bpaeq t0, tagTypeNumber, .leftOK
+ btpnz t0, tagTypeNumber, .slow
+.leftOK:
+ bpaeq t1, tagTypeNumber, .rightOK
+ btpnz t1, tagTypeNumber, .slow
+.rightOK:
+ equalityOperation(t0, t1, t0)
+ loadis 8[PB, PC, 8], t1
+ orp ValueFalse, t0
+ storep t0, [cfr, t1, 8]
+ dispatch(4)
+
+.slow:
+ callSlowPath(slowPath)
+ dispatch(4)
+end
+
+_llint_op_stricteq:
+ strictEq(
+ macro (left, right, result) cpeq left, right, result end,
+ _llint_slow_path_stricteq)
+
+
+_llint_op_nstricteq:
+ strictEq(
+ macro (left, right, result) cpneq left, right, result end,
+ _llint_slow_path_nstricteq)
+
+
+macro preOp(arithmeticOperation, slowPath)
+ traceExecution()
+ loadis 8[PB, PC, 8], t0
+ loadp [cfr, t0, 8], t1
+ bpb t1, tagTypeNumber, .slow
+ arithmeticOperation(t1, .slow)
+ orp tagTypeNumber, t1
+ storep t1, [cfr, t0, 8]
+ dispatch(2)
+
+.slow:
+ callSlowPath(slowPath)
+ dispatch(2)
+end
+
+_llint_op_pre_inc:
+ preOp(
+ macro (value, slow) baddio 1, value, slow end,
+ _llint_slow_path_pre_inc)
+
+
+_llint_op_pre_dec:
+ preOp(
+ macro (value, slow) bsubio 1, value, slow end,
+ _llint_slow_path_pre_dec)
+
+
+macro postOp(arithmeticOperation, slowPath)
+ traceExecution()
+ loadis 16[PB, PC, 8], t0
+ loadis 8[PB, PC, 8], t1
+ loadp [cfr, t0, 8], t2
+ bieq t0, t1, .done
+ bpb t2, tagTypeNumber, .slow
+ move t2, t3
+ arithmeticOperation(t3, .slow)
+ orp tagTypeNumber, t3
+ storep t2, [cfr, t1, 8]
+ storep t3, [cfr, t0, 8]
+.done:
+ dispatch(3)
+
+.slow:
+ callSlowPath(slowPath)
+ dispatch(3)
+end
+
+_llint_op_post_inc:
+ postOp(
+ macro (value, slow) baddio 1, value, slow end,
+ _llint_slow_path_post_inc)
+
+
+_llint_op_post_dec:
+ postOp(
+ macro (value, slow) bsubio 1, value, slow end,
+ _llint_slow_path_post_dec)
+
+
+_llint_op_to_jsnumber:
+ traceExecution()
+ loadis 16[PB, PC, 8], t0
+ loadis 8[PB, PC, 8], t1
+ loadConstantOrVariable(t0, t2)
+ bpaeq t2, tagTypeNumber, .opToJsnumberIsImmediate
+ btpz t2, tagTypeNumber, .opToJsnumberSlow
+.opToJsnumberIsImmediate:
+ storep t2, [cfr, t1, 8]
+ dispatch(3)
+
+.opToJsnumberSlow:
+ callSlowPath(_llint_slow_path_to_jsnumber)
+ dispatch(3)
+
+
+_llint_op_negate:
+ traceExecution()
+ loadis 16[PB, PC, 8], t0
+ loadis 8[PB, PC, 8], t1
+ loadConstantOrVariable(t0, t2)
+ bpb t2, tagTypeNumber, .opNegateNotInt
+ btiz t2, 0x7fffffff, .opNegateSlow
+ negi t2
+ orp tagTypeNumber, t2
+ storep t2, [cfr, t1, 8]
+ dispatch(3)
+.opNegateNotInt:
+ btpz t2, tagTypeNumber, .opNegateSlow
+ xorp 0x8000000000000000, t2
+ storep t2, [cfr, t1, 8]
+ dispatch(3)
+
+.opNegateSlow:
+ callSlowPath(_llint_slow_path_negate)
+ dispatch(3)
+
+
+macro binaryOpCustomStore(integerOperationAndStore, doubleOperation, slowPath)
+ loadis 24[PB, PC, 8], t0
+ loadis 16[PB, PC, 8], t2
+ loadConstantOrVariable(t0, t1)
+ loadConstantOrVariable(t2, t0)
+ bpb t0, tagTypeNumber, .op1NotInt
+ bpb t1, tagTypeNumber, .op2NotInt
+ loadis 8[PB, PC, 8], t2
+ integerOperationAndStore(t1, t0, .slow, t2)
+ dispatch(5)
+
+.op1NotInt:
+ # First operand is definitely not an int, the second operand could be anything.
+ btpz t0, tagTypeNumber, .slow
+ bpaeq t1, tagTypeNumber, .op1NotIntOp2Int
+ btpz t1, tagTypeNumber, .slow
+ addp tagTypeNumber, t1
+ fp2d t1, ft1
+ jmp .op1NotIntReady
+.op1NotIntOp2Int:
+ ci2d t1, ft1
+.op1NotIntReady:
+ loadis 8[PB, PC, 8], t2
+ addp tagTypeNumber, t0
+ fp2d t0, ft0
+ doubleOperation(ft1, ft0)
+ fd2p ft0, t0
+ subp tagTypeNumber, t0
+ storep t0, [cfr, t2, 8]
+ dispatch(5)
+
+.op2NotInt:
+ # First operand is definitely an int, the second is definitely not.
+ loadis 8[PB, PC, 8], t2
+ btpz t1, tagTypeNumber, .slow
+ ci2d t0, ft0
+ addp tagTypeNumber, t1
+ fp2d t1, ft1
+ doubleOperation(ft1, ft0)
+ fd2p ft0, t0
+ subp tagTypeNumber, t0
+ storep t0, [cfr, t2, 8]
+ dispatch(5)
+
+.slow:
+ callSlowPath(slowPath)
+ dispatch(5)
+end
+
+macro binaryOp(integerOperation, doubleOperation, slowPath)
+ binaryOpCustomStore(
+ macro (left, right, slow, index)
+ integerOperation(left, right, slow)
+ orp tagTypeNumber, right
+ storep right, [cfr, index, 8]
+ end,
+ doubleOperation, slowPath)
+end
+
+_llint_op_add:
+ traceExecution()
+ binaryOp(
+ macro (left, right, slow) baddio left, right, slow end,
+ macro (left, right) addd left, right end,
+ _llint_slow_path_add)
+
+
+_llint_op_mul:
+ traceExecution()
+ binaryOpCustomStore(
+ macro (left, right, slow, index)
+ # Assume t3 is scratchable.
+ move right, t3
+ bmulio left, t3, slow
+ btinz t3, .done
+ bilt left, 0, slow
+ bilt right, 0, slow
+ .done:
+ orp tagTypeNumber, t3
+ storep t3, [cfr, index, 8]
+ end,
+ macro (left, right) muld left, right end,
+ _llint_slow_path_mul)
+
+
+_llint_op_sub:
+ traceExecution()
+ binaryOp(
+ macro (left, right, slow) bsubio left, right, slow end,
+ macro (left, right) subd left, right end,
+ _llint_slow_path_sub)
+
+
+_llint_op_div:
+ traceExecution()
+ binaryOpCustomStore(
+ macro (left, right, slow, index)
+ # Assume t3 is scratchable.
+ btiz left, slow
+ btinz right, .intOK
+ bilt left, 0, slow
+ .intOK:
+ move left, t3
+ move right, t0
+ cdqi
+ idivi t3
+ btinz t1, slow
+ orp tagTypeNumber, t0
+ storep t0, [cfr, index, 8]
+ end,
+ macro (left, right) divd left, right end,
+ _llint_slow_path_div)
+
+
+macro bitOp(operation, slowPath, advance)
+ loadis 24[PB, PC, 8], t0
+ loadis 16[PB, PC, 8], t2
+ loadis 8[PB, PC, 8], t3
+ loadConstantOrVariable(t0, t1)
+ loadConstantOrVariable(t2, t0)
+ bpb t0, tagTypeNumber, .slow
+ bpb t1, tagTypeNumber, .slow
+ operation(t1, t0, .slow)
+ orp tagTypeNumber, t0
+ storep t0, [cfr, t3, 8]
+ dispatch(advance)
+
+.slow:
+ callSlowPath(slowPath)
+ dispatch(advance)
+end
+
+_llint_op_lshift:
+ traceExecution()
+ bitOp(
+ macro (left, right, slow) lshifti left, right end,
+ _llint_slow_path_lshift,
+ 4)
+
+
+_llint_op_rshift:
+ traceExecution()
+ bitOp(
+ macro (left, right, slow) rshifti left, right end,
+ _llint_slow_path_rshift,
+ 4)
+
+
+_llint_op_urshift:
+ traceExecution()
+ bitOp(
+ macro (left, right, slow)
+ urshifti left, right
+ bilt right, 0, slow
+ end,
+ _llint_slow_path_urshift,
+ 4)
+
+
+_llint_op_bitand:
+ traceExecution()
+ bitOp(
+ macro (left, right, slow) andi left, right end,
+ _llint_slow_path_bitand,
+ 5)
+
+
+_llint_op_bitxor:
+ traceExecution()
+ bitOp(
+ macro (left, right, slow) xori left, right end,
+ _llint_slow_path_bitxor,
+ 5)
+
+
+_llint_op_bitor:
+ traceExecution()
+ bitOp(
+ macro (left, right, slow) ori left, right end,
+ _llint_slow_path_bitor,
+ 5)
+
+
+_llint_op_check_has_instance:
+ traceExecution()
+ loadis 8[PB, PC, 8], t1
+ loadConstantOrVariableCell(t1, t0, .opCheckHasInstanceSlow)
+ loadp JSCell::m_structure[t0], t0
+ btbz Structure::m_typeInfo + TypeInfo::m_flags[t0], ImplementsHasInstance, .opCheckHasInstanceSlow
+ dispatch(2)
+
+.opCheckHasInstanceSlow:
+ callSlowPath(_llint_slow_path_check_has_instance)
+ dispatch(2)
+
+
+_llint_op_instanceof:
+ traceExecution()
+ # Check that baseVal implements the default HasInstance behavior.
+ # FIXME: This should be deprecated.
+ loadis 24[PB, PC, 8], t1
+ loadConstantOrVariable(t1, t0)
+ loadp JSCell::m_structure[t0], t0
+ btbz Structure::m_typeInfo + TypeInfo::m_flags[t0], ImplementsDefaultHasInstance, .opInstanceofSlow
+
+ # Actually do the work.
+ loadis 32[PB, PC, 8], t0
+ loadis 8[PB, PC, 8], t3
+ loadConstantOrVariableCell(t0, t1, .opInstanceofSlow)
+ loadp JSCell::m_structure[t1], t2
+ bbb Structure::m_typeInfo + TypeInfo::m_type[t2], ObjectType, .opInstanceofSlow
+ loadis 16[PB, PC, 8], t0
+ loadConstantOrVariableCell(t0, t2, .opInstanceofSlow)
+
+ # Register state: t1 = prototype, t2 = value
+ move 1, t0
+.opInstanceofLoop:
+ loadp JSCell::m_structure[t2], t2
+ loadp Structure::m_prototype[t2], t2
+ bpeq t2, t1, .opInstanceofDone
+ btpz t2, tagMask, .opInstanceofLoop
+
+ move 0, t0
+.opInstanceofDone:
+ orp ValueFalse, t0
+ storep t0, [cfr, t3, 8]
+ dispatch(5)
+
+.opInstanceofSlow:
+ callSlowPath(_llint_slow_path_instanceof)
+ dispatch(5)
+
+
+macro resolveGlobal(size, slow)
+ # Operands are as follows:
+ # 8[PB, PC, 8] Destination for the load.
+ # 16[PB, PC, 8] Property identifier index in the code block.
+ # 24[PB, PC, 8] Structure pointer, initialized to 0 by bytecode generator.
+ # 32[PB, PC, 8] Offset in global object, initialized to 0 by bytecode generator.
+ loadp CodeBlock[cfr], t0
+ loadp CodeBlock::m_globalObject[t0], t0
+ loadp JSCell::m_structure[t0], t1
+ bpneq t1, 24[PB, PC, 8], slow
+ loadis 32[PB, PC, 8], t1
+ loadp JSObject::m_propertyStorage[t0], t0
+ loadp [t0, t1, 8], t2
+ loadis 8[PB, PC, 8], t0
+ storep t2, [cfr, t0, 8]
+ loadp (size - 1) * 8[PB, PC, 8], t0
+ valueProfile(t2, t0)
+end
+
+_llint_op_resolve_global:
+ traceExecution()
+ resolveGlobal(6, .opResolveGlobalSlow)
+ dispatch(6)
+
+.opResolveGlobalSlow:
+ callSlowPath(_llint_slow_path_resolve_global)
+ dispatch(6)
+
+
+# Gives you the scope in t0, while allowing you to optionally perform additional checks on the
+# scopes as they are traversed. scopeCheck() is called with two arguments: the register
+# holding the scope, and a register that can be used for scratch. Note that this does not
+# use t3, so you can hold stuff in t3 if need be.
+macro getScope(deBruijinIndexOperand, scopeCheck)
+ loadp ScopeChain[cfr], t0
+ loadis deBruijinIndexOperand, t2
+
+ btiz t2, .done
+
+ loadp CodeBlock[cfr], t1
+ bineq CodeBlock::m_codeType[t1], FunctionCode, .loop
+ btbz CodeBlock::m_needsFullScopeChain[t1], .loop
+
+ loadis CodeBlock::m_activationRegister[t1], t1
+
+ # Need to conditionally skip over one scope.
+ btpz [cfr, t1, 8], .noActivation
+ scopeCheck(t0, t1)
+ loadp ScopeChainNode::next[t0], t0
+.noActivation:
+ subi 1, t2
+
+ btiz t2, .done
+.loop:
+ scopeCheck(t0, t1)
+ loadp ScopeChainNode::next[t0], t0
+ subi 1, t2
+ btinz t2, .loop
+
+.done:
+end
+
+_llint_op_resolve_global_dynamic:
+ traceExecution()
+ loadp JITStackFrame::globalData[sp], t3
+ loadp JSGlobalData::activationStructure[t3], t3
+ getScope(
+ 40[PB, PC, 8],
+ macro (scope, scratch)
+ loadp ScopeChainNode::object[scope], scratch
+ bpneq JSCell::m_structure[scratch], t3, .opResolveGlobalDynamicSuperSlow
+ end)
+ resolveGlobal(7, .opResolveGlobalDynamicSlow)
+ dispatch(7)
+
+.opResolveGlobalDynamicSuperSlow:
+ callSlowPath(_llint_slow_path_resolve_for_resolve_global_dynamic)
+ dispatch(7)
+
+.opResolveGlobalDynamicSlow:
+ callSlowPath(_llint_slow_path_resolve_global_dynamic)
+ dispatch(7)
+
+
+_llint_op_get_scoped_var:
+ traceExecution()
+ # Operands are as follows:
+ # 8[PB, PC, 8] Destination for the load
+ # 16[PB, PC, 8] Index of register in the scope
+ # 24[PB, PC, 8] De Bruijin index.
+ getScope(24[PB, PC, 8], macro (scope, scratch) end)
+ loadis 8[PB, PC, 8], t1
+ loadis 16[PB, PC, 8], t2
+ loadp ScopeChainNode::object[t0], t0
+ loadp JSVariableObject::m_registers[t0], t0
+ loadp [t0, t2, 8], t3
+ storep t3, [cfr, t1, 8]
+ loadp 32[PB, PC, 8], t1
+ valueProfile(t3, t1)
+ dispatch(5)
+
+
+_llint_op_put_scoped_var:
+ traceExecution()
+ getScope(16[PB, PC, 8], macro (scope, scratch) end)
+ loadis 24[PB, PC, 8], t1
+ loadConstantOrVariable(t1, t3)
+ loadis 8[PB, PC, 8], t1
+ writeBarrier(t3)
+ loadp ScopeChainNode::object[t0], t0
+ loadp JSVariableObject::m_registers[t0], t0
+ storep t3, [t0, t1, 8]
+ dispatch(4)
+
+
+_llint_op_get_global_var:
+ traceExecution()
+ loadis 16[PB, PC, 8], t1
+ loadis 8[PB, PC, 8], t3
+ loadp CodeBlock[cfr], t0
+ loadp CodeBlock::m_globalObject[t0], t0
+ loadp JSGlobalObject::m_registers[t0], t0
+ loadp [t0, t1, 8], t2
+ storep t2, [cfr, t3, 8]
+ loadp 24[PB, PC, 8], t3
+ valueProfile(t2, t3)
+ dispatch(4)
+
+
+_llint_op_put_global_var:
+ traceExecution()
+ loadis 16[PB, PC, 8], t1
+ loadp CodeBlock[cfr], t0
+ loadp CodeBlock::m_globalObject[t0], t0
+ loadp JSGlobalObject::m_registers[t0], t0
+ loadConstantOrVariable(t1, t2)
+ loadis 8[PB, PC, 8], t1
+ writeBarrier(t2)
+ storep t2, [t0, t1, 8]
+ dispatch(3)
+
+
+_llint_op_get_by_id:
+ traceExecution()
+ # We only do monomorphic get_by_id caching for now, and we do not modify the
+ # opcode. We do, however, allow for the cache to change anytime if fails, since
+ # ping-ponging is free. At best we get lucky and the get_by_id will continue
+ # to take fast path on the new cache. At worst we take slow path, which is what
+ # we would have been doing anyway.
+ loadis 16[PB, PC, 8], t0
+ loadp 32[PB, PC, 8], t1
+ loadConstantOrVariableCell(t0, t3, .opGetByIdSlow)
+ loadis 40[PB, PC, 8], t2
+ loadp JSObject::m_propertyStorage[t3], t0
+ bpneq JSCell::m_structure[t3], t1, .opGetByIdSlow
+ loadis 8[PB, PC, 8], t1
+ loadp [t0, t2], t3
+ storep t3, [cfr, t1, 8]
+ loadp 64[PB, PC, 8], t1
+ valueProfile(t3, t1)
+ dispatch(9)
+
+.opGetByIdSlow:
+ callSlowPath(_llint_slow_path_get_by_id)
+ dispatch(9)
+
+
+_llint_op_get_arguments_length:
+ traceExecution()
+ loadis 16[PB, PC, 8], t0
+ loadis 8[PB, PC, 8], t1
+ btpnz [cfr, t0, 8], .opGetArgumentsLengthSlow
+ loadi ArgumentCount + PayloadOffset[cfr], t2
+ subi 1, t2
+ orp tagTypeNumber, t2
+ storep t2, [cfr, t1, 8]
+ dispatch(4)
+
+.opGetArgumentsLengthSlow:
+ callSlowPath(_llint_slow_path_get_arguments_length)
+ dispatch(4)
+
+
+_llint_op_put_by_id:
+ traceExecution()
+ loadis 8[PB, PC, 8], t3
+ loadp 32[PB, PC, 8], t1
+ loadConstantOrVariableCell(t3, t0, .opPutByIdSlow)
+ loadis 24[PB, PC, 8], t2
+ loadp JSObject::m_propertyStorage[t0], t3
+ bpneq JSCell::m_structure[t0], t1, .opPutByIdSlow
+ loadis 40[PB, PC, 8], t1
+ loadConstantOrVariable(t2, t0)
+ writeBarrier(t0)
+ storep t0, [t3, t1]
+ dispatch(9)
+
+.opPutByIdSlow:
+ callSlowPath(_llint_slow_path_put_by_id)
+ dispatch(9)
+
+
+macro putByIdTransition(additionalChecks)
+ traceExecution()
+ loadis 8[PB, PC, 8], t3
+ loadp 32[PB, PC, 8], t1
+ loadConstantOrVariableCell(t3, t0, .opPutByIdSlow)
+ loadis 24[PB, PC, 8], t2
+ bpneq JSCell::m_structure[t0], t1, .opPutByIdSlow
+ additionalChecks(t1, t3, .opPutByIdSlow)
+ loadis 40[PB, PC, 8], t1
+ loadp JSObject::m_propertyStorage[t0], t3
+ addp t1, t3
+ loadConstantOrVariable(t2, t1)
+ writeBarrier(t1)
+ storep t1, [t3]
+ loadp 48[PB, PC, 8], t1
+ storep t1, JSCell::m_structure[t0]
+ dispatch(9)
+end
+
+_llint_op_put_by_id_transition_direct:
+ putByIdTransition(macro (oldStructure, scratch, slow) end)
+
+
+_llint_op_put_by_id_transition_normal:
+ putByIdTransition(
+ macro (oldStructure, scratch, slow)
+ const protoCell = oldStructure # Reusing the oldStructure register for the proto
+ loadp 56[PB, PC, 8], scratch
+ assert(macro (ok) btpnz scratch, ok end)
+ loadp StructureChain::m_vector[scratch], scratch
+ assert(macro (ok) btpnz scratch, ok end)
+ bpeq Structure::m_prototype[oldStructure], ValueNull, .done
+ .loop:
+ loadp Structure::m_prototype[oldStructure], protoCell
+ loadp JSCell::m_structure[protoCell], oldStructure
+ bpneq oldStructure, [scratch], slow
+ addp 8, scratch
+ bpneq Structure::m_prototype[oldStructure], ValueNull, .loop
+ .done:
+ end)
+
+
+_llint_op_get_by_val:
+ traceExecution()
+ loadp CodeBlock[cfr], t1
+ loadis 16[PB, PC, 8], t2
+ loadis 24[PB, PC, 8], t3
+ loadp CodeBlock::m_globalData[t1], t1
+ loadConstantOrVariableCell(t2, t0, .opGetByValSlow)
+ loadp JSGlobalData::jsArrayClassInfo[t1], t2
+ loadConstantOrVariableInt32(t3, t1, .opGetByValSlow)
+ sxi2p t1, t1
+ bpneq [t0], t2, .opGetByValSlow
+ loadp JSArray::m_storage[t0], t3
+ biaeq t1, JSArray::m_vectorLength[t0], .opGetByValSlow
+ loadis 8[PB, PC, 8], t0
+ loadp ArrayStorage::m_vector[t3, t1, 8], t2
+ btpz t2, .opGetByValSlow
+ storep t2, [cfr, t0, 8]
+ loadp 32[PB, PC, 8], t0
+ valueProfile(t2, t0)
+ dispatch(5)
+
+.opGetByValSlow:
+ callSlowPath(_llint_slow_path_get_by_val)
+ dispatch(5)
+
+
+_llint_op_get_argument_by_val:
+ traceExecution()
+ loadis 16[PB, PC, 8], t0
+ loadis 24[PB, PC, 8], t1
+ btpnz [cfr, t0, 8], .opGetArgumentByValSlow
+ loadConstantOrVariableInt32(t1, t2, .opGetArgumentByValSlow)
+ addi 1, t2
+ loadi ArgumentCount + PayloadOffset[cfr], t1
+ biaeq t2, t1, .opGetArgumentByValSlow
+ negi t2
+ sxi2p t2, t2
+ loadis 8[PB, PC, 8], t3
+ loadp ThisArgumentOffset[cfr, t2, 8], t0
+ storep t0, [cfr, t3, 8]
+ dispatch(5)
+
+.opGetArgumentByValSlow:
+ callSlowPath(_llint_slow_path_get_argument_by_val)
+ dispatch(5)
+
+
+_llint_op_get_by_pname:
+ traceExecution()
+ loadis 24[PB, PC, 8], t1
+ loadConstantOrVariable(t1, t0)
+ loadis 32[PB, PC, 8], t1
+ assertNotConstant(t1)
+ bpneq t0, [cfr, t1, 8], .opGetByPnameSlow
+ loadis 16[PB, PC, 8], t2
+ loadis 40[PB, PC, 8], t3
+ loadConstantOrVariableCell(t2, t0, .opGetByPnameSlow)
+ assertNotConstant(t3)
+ loadp [cfr, t3, 8], t1
+ loadp JSCell::m_structure[t0], t2
+ bpneq t2, JSPropertyNameIterator::m_cachedStructure[t1], .opGetByPnameSlow
+ loadis 48[PB, PC, 8], t3
+ loadi PayloadOffset[cfr, t3, 8], t3
+ subi 1, t3
+ biaeq t3, JSPropertyNameIterator::m_numCacheableSlots[t1], .opGetByPnameSlow
+ loadp JSObject::m_propertyStorage[t0], t0
+ loadp [t0, t3, 8], t0
+ loadis 8[PB, PC, 8], t1
+ storep t0, [cfr, t1, 8]
+ dispatch(7)
+
+.opGetByPnameSlow:
+ callSlowPath(_llint_slow_path_get_by_pname)
+ dispatch(7)
+
+
+_llint_op_put_by_val:
+ traceExecution()
+ loadis 8[PB, PC, 8], t0
+ loadConstantOrVariableCell(t0, t1, .opPutByValSlow)
+ loadis 16[PB, PC, 8], t0
+ loadConstantOrVariableInt32(t0, t2, .opPutByValSlow)
+ sxi2p t2, t2
+ loadp CodeBlock[cfr], t0
+ loadp CodeBlock::m_globalData[t0], t0
+ loadp JSGlobalData::jsArrayClassInfo[t0], t0
+ bpneq [t1], t0, .opPutByValSlow
+ biaeq t2, JSArray::m_vectorLength[t1], .opPutByValSlow
+ loadp JSArray::m_storage[t1], t0
+ btpz ArrayStorage::m_vector[t0, t2, 8], .opPutByValEmpty
+.opPutByValStoreResult:
+ loadis 24[PB, PC, 8], t3
+ loadConstantOrVariable(t3, t1)
+ writeBarrier(t1)
+ storep t1, ArrayStorage::m_vector[t0, t2, 8]
+ dispatch(4)
+
+.opPutByValEmpty:
+ addi 1, ArrayStorage::m_numValuesInVector[t0]
+ bib t2, ArrayStorage::m_length[t0], .opPutByValStoreResult
+ addi 1, t2, t1
+ storei t1, ArrayStorage::m_length[t0]
+ jmp .opPutByValStoreResult
+
+.opPutByValSlow:
+ callSlowPath(_llint_slow_path_put_by_val)
+ dispatch(4)
+
+
+_llint_op_loop:
+ nop
+_llint_op_jmp:
+ traceExecution()
+ dispatchInt(8[PB, PC, 8])
+
+
+macro jumpTrueOrFalse(conditionOp, slow)
+ loadis 8[PB, PC, 8], t1
+ loadConstantOrVariable(t1, t0)
+ xorp ValueFalse, t0
+ btpnz t0, -1, .slow
+ conditionOp(t0, .target)
+ dispatch(3)
+
+.target:
+ dispatchInt(16[PB, PC, 8])
+
+.slow:
+ callSlowPath(slow)
+ dispatch(0)
+end
+
+
+macro equalNull(cellHandler, immediateHandler)
+ loadis 8[PB, PC, 8], t0
+ assertNotConstant(t0)
+ loadp [cfr, t0, 8], t0
+ btpnz t0, tagMask, .immediate
+ loadp JSCell::m_structure[t0], t2
+ cellHandler(Structure::m_typeInfo + TypeInfo::m_flags[t2], .target)
+ dispatch(3)
+
+.target:
+ dispatch(16[PB, PC, 8])
+
+.immediate:
+ andp ~TagBitUndefined, t0
+ immediateHandler(t0, .target)
+ dispatch(3)
+end
+
+_llint_op_jeq_null:
+ traceExecution()
+ equalNull(
+ macro (value, target) btbnz value, MasqueradesAsUndefined, target end,
+ macro (value, target) bpeq value, ValueNull, target end)
+
+
+_llint_op_jneq_null:
+ traceExecution()
+ equalNull(
+ macro (value, target) btbz value, MasqueradesAsUndefined, target end,
+ macro (value, target) bpneq value, ValueNull, target end)
+
+
+_llint_op_jneq_ptr:
+ traceExecution()
+ loadis 8[PB, PC, 8], t0
+ loadp 16[PB, PC, 8], t1
+ bpneq t1, [cfr, t0, 8], .opJneqPtrTarget
+ dispatch(4)
+
+.opJneqPtrTarget:
+ dispatchInt(24[PB, PC, 8])
+
+
+macro compare(integerCompare, doubleCompare, slowPath)
+ loadis 8[PB, PC, 8], t2
+ loadis 16[PB, PC, 8], t3
+ loadConstantOrVariable(t2, t0)
+ loadConstantOrVariable(t3, t1)
+ bpb t0, tagTypeNumber, .op1NotInt
+ bpb t1, tagTypeNumber, .op2NotInt
+ integerCompare(t0, t1, .jumpTarget)
+ dispatch(4)
+
+.op1NotInt:
+ btpz t0, tagTypeNumber, .slow
+ bpb t1, tagTypeNumber, .op1NotIntOp2NotInt
+ ci2d t1, ft1
+ jmp .op1NotIntReady
+.op1NotIntOp2NotInt:
+ btpz t1, tagTypeNumber, .slow
+ addp tagTypeNumber, t1
+ fp2d t1, ft1
+.op1NotIntReady:
+ addp tagTypeNumber, t0
+ fp2d t0, ft0
+ doubleCompare(ft0, ft1, .jumpTarget)
+ dispatch(4)
+
+.op2NotInt:
+ ci2d t0, ft0
+ btpz t1, tagTypeNumber, .slow
+ addp tagTypeNumber, t1
+ fp2d t1, ft1
+ doubleCompare(ft0, ft1, .jumpTarget)
+ dispatch(4)
+
+.jumpTarget:
+ dispatchInt(24[PB, PC, 8])
+
+.slow:
+ callSlowPath(slowPath)
+ dispatch(0)
+end
+
+
+_llint_op_switch_imm:
+ traceExecution()
+ loadis 24[PB, PC, 8], t2
+ loadis 8[PB, PC, 8], t3
+ loadConstantOrVariable(t2, t1)
+ loadp CodeBlock[cfr], t2
+ loadp CodeBlock::m_rareData[t2], t2
+ muli sizeof SimpleJumpTable, t3 # FIXME: would be nice to peephole this!
+ loadp CodeBlock::RareData::m_immediateSwitchJumpTables + VectorBufferOffset[t2], t2
+ addp t3, t2
+ bpb t1, tagTypeNumber, .opSwitchImmNotInt
+ subi SimpleJumpTable::min[t2], t1
+ biaeq t1, SimpleJumpTable::branchOffsets + VectorSizeOffset[t2], .opSwitchImmFallThrough
+ loadp SimpleJumpTable::branchOffsets + VectorBufferOffset[t2], t3
+ loadis [t3, t1, 4], t1
+ btiz t1, .opSwitchImmFallThrough
+ dispatch(t1)
+
+.opSwitchImmNotInt:
+ btpnz t1, tagTypeNumber, .opSwitchImmSlow # Go slow if it's a double.
+.opSwitchImmFallThrough:
+ dispatchInt(16[PB, PC, 8])
+
+.opSwitchImmSlow:
+ callSlowPath(_llint_slow_path_switch_imm)
+ dispatch(0)
+
+
+_llint_op_switch_char:
+ traceExecution()
+ loadis 24[PB, PC, 8], t2
+ loadis 8[PB, PC, 8], t3
+ loadConstantOrVariable(t2, t1)
+ loadp CodeBlock[cfr], t2
+ loadp CodeBlock::m_rareData[t2], t2
+ muli sizeof SimpleJumpTable, t3
+ loadp CodeBlock::RareData::m_characterSwitchJumpTables + VectorBufferOffset[t2], t2
+ addp t3, t2
+ btpnz t1, tagMask, .opSwitchCharFallThrough
+ loadp JSCell::m_structure[t1], t0
+ bbneq Structure::m_typeInfo + TypeInfo::m_type[t0], StringType, .opSwitchCharFallThrough
+ loadp JSString::m_value[t1], t0
+ bineq StringImpl::m_length[t0], 1, .opSwitchCharFallThrough
+ loadp StringImpl::m_data8[t0], t1
+ btinz StringImpl::m_hashAndFlags[t0], HashFlags8BitBuffer, .opSwitchChar8Bit
+ loadh [t1], t0
+ jmp .opSwitchCharReady
+.opSwitchChar8Bit:
+ loadb [t1], t0
+.opSwitchCharReady:
+ subi SimpleJumpTable::min[t2], t0
+ biaeq t0, SimpleJumpTable::branchOffsets + VectorSizeOffset[t2], .opSwitchCharFallThrough
+ loadp SimpleJumpTable::branchOffsets + VectorBufferOffset[t2], t2
+ loadis [t2, t0, 4], t1
+ btiz t1, .opSwitchCharFallThrough
+ dispatch(t1)
+
+.opSwitchCharFallThrough:
+ dispatchInt(16[PB, PC, 8])
+
+
+_llint_op_new_func:
+ traceExecution()
+ btiz 24[PB, PC, 8], .opNewFuncUnchecked
+ loadis 8[PB, PC, 8], t1
+ btpnz [cfr, t1, 8], .opNewFuncDone
+.opNewFuncUnchecked:
+ callSlowPath(_llint_slow_path_new_func)
+.opNewFuncDone:
+ dispatch(4)
+
+
+macro doCall(slowPath)
+ loadis 8[PB, PC, 8], t0
+ loadp 32[PB, PC, 8], t1
+ loadp LLIntCallLinkInfo::callee[t1], t2
+ loadConstantOrVariable(t0, t3)
+ bpneq t3, t2, .opCallSlow
+ loadis 24[PB, PC, 8], t3
+ addi 6, PC
+ lshifti 3, t3
+ addp cfr, t3
+ loadp JSFunction::m_scopeChain[t2], t0
+ storep t2, Callee[t3]
+ storep t0, ScopeChain[t3]
+ loadis 16 - 48[PB, PC, 8], t2
+ storei PC, ArgumentCount + TagOffset[cfr]
+ storep cfr, CallerFrame[t3]
+ storei t2, ArgumentCount + PayloadOffset[t3]
+ move t3, cfr
+ call LLIntCallLinkInfo::machineCodeTarget[t1]
+ dispatchAfterCall()
+
+.opCallSlow:
+ slowPathForCall(6, slowPath)
+end
+
+
+_llint_op_tear_off_activation:
+ traceExecution()
+ loadis 8[PB, PC, 8], t0
+ loadis 16[PB, PC, 8], t1
+ btpnz [cfr, t0, 8], .opTearOffActivationCreated
+ btpz [cfr, t1, 8], .opTearOffActivationNotCreated
+.opTearOffActivationCreated:
+ callSlowPath(_llint_slow_path_tear_off_activation)
+.opTearOffActivationNotCreated:
+ dispatch(3)
+
+
+_llint_op_tear_off_arguments:
+ traceExecution()
+ loadis 8[PB, PC, 8], t0
+ subi 1, t0 # Get the unmodifiedArgumentsRegister
+ btpz [cfr, t0, 8], .opTearOffArgumentsNotCreated
+ callSlowPath(_llint_slow_path_tear_off_arguments)
+.opTearOffArgumentsNotCreated:
+ dispatch(2)
+
+
+_llint_op_ret:
+ traceExecution()
+ checkSwitchToJITForEpilogue()
+ loadis 8[PB, PC, 8], t2
+ loadConstantOrVariable(t2, t0)
+ doReturn()
+
+
+_llint_op_call_put_result:
+ loadis 8[PB, PC, 8], t2
+ loadp 16[PB, PC, 8], t3
+ storep t0, [cfr, t2, 8]
+ valueProfile(t0, t3)
+ traceExecution()
+ dispatch(3)
+
+
+_llint_op_ret_object_or_this:
+ traceExecution()
+ checkSwitchToJITForEpilogue()
+ loadis 8[PB, PC, 8], t2
+ loadConstantOrVariable(t2, t0)
+ btpnz t0, tagMask, .opRetObjectOrThisNotObject
+ loadp JSCell::m_structure[t0], t2
+ bbb Structure::m_typeInfo + TypeInfo::m_type[t2], ObjectType, .opRetObjectOrThisNotObject
+ doReturn()
+
+.opRetObjectOrThisNotObject:
+ loadis 16[PB, PC, 8], t2
+ loadConstantOrVariable(t2, t0)
+ doReturn()
+
+
+_llint_op_to_primitive:
+ traceExecution()
+ loadis 16[PB, PC, 8], t2
+ loadis 8[PB, PC, 8], t3
+ loadConstantOrVariable(t2, t0)
+ btpnz t0, tagMask, .opToPrimitiveIsImm
+ loadp JSCell::m_structure[t0], t2
+ bbneq Structure::m_typeInfo + TypeInfo::m_type[t2], StringType, .opToPrimitiveSlowCase
+.opToPrimitiveIsImm:
+ storep t0, [cfr, t3, 8]
+ dispatch(3)
+
+.opToPrimitiveSlowCase:
+ callSlowPath(_llint_slow_path_to_primitive)
+ dispatch(3)
+
+
+_llint_op_next_pname:
+ traceExecution()
+ loadis 24[PB, PC, 8], t1
+ loadis 32[PB, PC, 8], t2
+ assertNotConstant(t1)
+ assertNotConstant(t2)
+ loadi PayloadOffset[cfr, t1, 8], t0
+ bieq t0, PayloadOffset[cfr, t2, 8], .opNextPnameEnd
+ loadis 40[PB, PC, 8], t2
+ assertNotConstant(t2)
+ loadp [cfr, t2, 8], t2
+ loadp JSPropertyNameIterator::m_jsStrings[t2], t3
+ loadp [t3, t0, 8], t3
+ addi 1, t0
+ storei t0, PayloadOffset[cfr, t1, 8]
+ loadis 8[PB, PC, 8], t1
+ storep t3, [cfr, t1, 8]
+ loadis 16[PB, PC, 8], t3
+ assertNotConstant(t3)
+ loadp [cfr, t3, 8], t3
+ loadp JSCell::m_structure[t3], t1
+ bpneq t1, JSPropertyNameIterator::m_cachedStructure[t2], .opNextPnameSlow
+ loadp JSPropertyNameIterator::m_cachedPrototypeChain[t2], t0
+ loadp StructureChain::m_vector[t0], t0
+ btpz [t0], .opNextPnameTarget
+.opNextPnameCheckPrototypeLoop:
+ bpeq Structure::m_prototype[t1], ValueNull, .opNextPnameSlow
+ loadp Structure::m_prototype[t1], t2
+ loadp JSCell::m_structure[t2], t1
+ bpneq t1, [t0], .opNextPnameSlow
+ addp 8, t0
+ btpnz [t0], .opNextPnameCheckPrototypeLoop
+.opNextPnameTarget:
+ dispatchInt(48[PB, PC, 8])
+
+.opNextPnameEnd:
+ dispatch(7)
+
+.opNextPnameSlow:
+ callSlowPath(_llint_slow_path_next_pname) # This either keeps the PC where it was (causing us to loop) or sets it to target.
+ dispatch(0)
+
+
+_llint_op_catch:
+ # This is where we end up from the JIT's throw trampoline (because the
+ # machine code return address will be set to _llint_op_catch), and from
+ # the interpreter's throw trampoline (see _llint_throw_trampoline).
+ # The JIT throwing protocol calls for the cfr to be in t0. The throwing
+ # code must have known that we were throwing to the interpreter, and have
+ # set JSGlobalData::targetInterpreterPCForThrow.
+ move t0, cfr
+ loadp CodeBlock[cfr], PB
+ loadp CodeBlock::m_instructions[PB], PB
+ loadp JITStackFrame::globalData[sp], t3
+ loadp JSGlobalData::targetInterpreterPCForThrow[t3], PC
+ subp PB, PC
+ urshiftp 3, PC
+ loadp JSGlobalData::exception[t3], t0
+ storep 0, JSGlobalData::exception[t3]
+ loadis 8[PB, PC, 8], t2
+ storep t0, [cfr, t2, 8]
+ traceExecution()
+ dispatch(2)
+
+
+_llint_op_jsr:
+ traceExecution()
+ loadis 8[PB, PC, 8], t0
+ addi 3, PC, t1
+ storei t1, [cfr, t0, 8]
+ dispatchInt(16[PB, PC, 8])
+
+
+_llint_op_sret:
+ traceExecution()
+ loadis 8[PB, PC, 8], t0
+ loadi [cfr, t0, 8], PC
+ dispatch(0)
+
+
+_llint_op_end:
+ traceExecution()
+ checkSwitchToJITForEpilogue()
+ loadis 8[PB, PC, 8], t0
+ assertNotConstant(t0)
+ loadp [cfr, t0, 8], t0
+ doReturn()
+
+
+_llint_throw_from_slow_path_trampoline:
+ # When throwing from the interpreter (i.e. throwing from LLIntSlowPaths), so
+ # the throw target is not necessarily interpreted code, we come to here.
+ # This essentially emulates the JIT's throwing protocol.
+ loadp JITStackFrame::globalData[sp], t1
+ loadp JSGlobalData::callFrameForThrow[t1], t0
+ jmp JSGlobalData::targetMachinePCForThrow[t1]
+
+
+_llint_throw_during_call_trampoline:
+ preserveReturnAddressAfterCall(t2)
+ loadp JITStackFrame::globalData[sp], t1
+ loadp JSGlobalData::callFrameForThrow[t1], t0
+ jmp JSGlobalData::targetMachinePCForThrow[t1]
+
+
+macro nativeCallTrampoline(executableOffsetToFunction)
+ storep 0, CodeBlock[cfr]
+ loadp CallerFrame[cfr], t0
+ loadp ScopeChain[t0], t1
+ storep t1, ScopeChain[cfr]
+ peek 0, t1
+ storep t1, ReturnPC[cfr]
+ move cfr, t5 # t5 = rdi
+ subp 16 - 8, sp
+ loadp Callee[cfr], t4 # t4 = rsi
+ loadp JSFunction::m_executable[t4], t1
+ move t0, cfr # Restore cfr to avoid loading from stack
+ call executableOffsetToFunction[t1]
+ addp 16 - 8, sp
+ loadp JITStackFrame::globalData + 8[sp], t3
+ btpnz JSGlobalData::exception[t3], .exception
+ ret
+.exception:
+ preserveReturnAddressAfterCall(t1)
+ callSlowPath(_llint_throw_from_native_call)
+ jmp _llint_throw_from_slow_path_trampoline
+end
+