summaryrefslogtreecommitdiff
path: root/Source/JavaScriptCore/llint
diff options
context:
space:
mode:
authorAllan Sandfeld Jensen <allan.jensen@digia.com>2013-09-13 12:51:20 +0200
committerThe Qt Project <gerrit-noreply@qt-project.org>2013-09-19 20:50:05 +0200
commitd441d6f39bb846989d95bcf5caf387b42414718d (patch)
treee367e64a75991c554930278175d403c072de6bb8 /Source/JavaScriptCore/llint
parent0060b2994c07842f4c59de64b5e3e430525c4b90 (diff)
downloadqtwebkit-d441d6f39bb846989d95bcf5caf387b42414718d.tar.gz
Import Qt5x2 branch of QtWebkit for Qt 5.2
Importing a new snapshot of webkit. Change-Id: I2d01ad12cdc8af8cb015387641120a9d7ea5f10c Reviewed-by: Allan Sandfeld Jensen <allan.jensen@digia.com>
Diffstat (limited to 'Source/JavaScriptCore/llint')
-rw-r--r--Source/JavaScriptCore/llint/LLIntCLoop.h2
-rw-r--r--Source/JavaScriptCore/llint/LLIntData.cpp15
-rw-r--r--Source/JavaScriptCore/llint/LLIntData.h8
-rw-r--r--Source/JavaScriptCore/llint/LLIntEntrypoints.cpp26
-rw-r--r--Source/JavaScriptCore/llint/LLIntEntrypoints.h16
-rw-r--r--Source/JavaScriptCore/llint/LLIntExceptions.cpp35
-rw-r--r--Source/JavaScriptCore/llint/LLIntOfflineAsmConfig.h22
-rw-r--r--Source/JavaScriptCore/llint/LLIntOffsetsExtractor.cpp2
-rw-r--r--Source/JavaScriptCore/llint/LLIntSlowPaths.cpp229
-rw-r--r--Source/JavaScriptCore/llint/LLIntSlowPaths.h7
-rw-r--r--Source/JavaScriptCore/llint/LLIntThunks.cpp28
-rw-r--r--Source/JavaScriptCore/llint/LLIntThunks.h14
-rw-r--r--Source/JavaScriptCore/llint/LowLevelInterpreter.asm140
-rw-r--r--Source/JavaScriptCore/llint/LowLevelInterpreter.cpp15
-rw-r--r--Source/JavaScriptCore/llint/LowLevelInterpreter32_64.asm284
-rw-r--r--Source/JavaScriptCore/llint/LowLevelInterpreter64.asm231
16 files changed, 552 insertions, 522 deletions
diff --git a/Source/JavaScriptCore/llint/LLIntCLoop.h b/Source/JavaScriptCore/llint/LLIntCLoop.h
index 3a9c77b6d..231e52f66 100644
--- a/Source/JavaScriptCore/llint/LLIntCLoop.h
+++ b/Source/JavaScriptCore/llint/LLIntCLoop.h
@@ -29,7 +29,7 @@
#if ENABLE(LLINT_C_LOOP)
#include "CodeSpecializationKind.h"
-#include "JSValue.h"
+#include "JSCJSValue.h"
#include "MacroAssemblerCodeRef.h"
#include "Opcode.h"
#include "Register.h"
diff --git a/Source/JavaScriptCore/llint/LLIntData.cpp b/Source/JavaScriptCore/llint/LLIntData.cpp
index 90faff2ee..079bf3cec 100644
--- a/Source/JavaScriptCore/llint/LLIntData.cpp
+++ b/Source/JavaScriptCore/llint/LLIntData.cpp
@@ -62,9 +62,9 @@ void initialize()
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wmissing-noreturn"
#endif
-void Data::performAssertions(JSGlobalData& globalData)
+void Data::performAssertions(VM& vm)
{
- UNUSED_PARAM(globalData);
+ UNUSED_PARAM(vm);
// Assertions to match LowLevelInterpreter.asm. If you change any of this code, be
// prepared to change LowLevelInterpreter.asm as well!!
@@ -107,11 +107,6 @@ void Data::performAssertions(JSGlobalData& globalData)
ASSERT(MasqueradesAsUndefined == 1);
ASSERT(ImplementsHasInstance == 2);
ASSERT(ImplementsDefaultHasInstance == 8);
-#if USE(JSVALUE64)
- ASSERT(&globalData.heap.allocatorForObjectWithoutDestructor(JSObject::allocationSize(INLINE_STORAGE_CAPACITY)) - &globalData.heap.firstAllocatorWithoutDestructors() == 1);
-#else
- ASSERT(&globalData.heap.allocatorForObjectWithoutDestructor(JSObject::allocationSize(INLINE_STORAGE_CAPACITY)) - &globalData.heap.firstAllocatorWithoutDestructors() == 3);
-#endif
ASSERT(FirstConstantRegisterIndex == 0x40000000);
ASSERT(GlobalCode == 0);
ASSERT(EvalCode == 1);
@@ -121,11 +116,11 @@ void Data::performAssertions(JSGlobalData& globalData)
#if !ASSERT_DISABLED
Vector<int> testVector;
testVector.resize(42);
- ASSERT(bitwise_cast<size_t*>(&testVector)[0] == 42);
- ASSERT(bitwise_cast<int**>(&testVector)[1] == testVector.begin());
+ ASSERT(bitwise_cast<uint32_t*>(&testVector)[sizeof(void*)/sizeof(uint32_t) + 1] == 42);
+ ASSERT(bitwise_cast<int**>(&testVector)[0] == testVector.begin());
#endif
- ASSERT(StringImpl::s_hashFlag8BitBuffer == 64);
+ ASSERT(StringImpl::s_hashFlag8BitBuffer == 32);
}
#if COMPILER(CLANG)
#pragma clang diagnostic pop
diff --git a/Source/JavaScriptCore/llint/LLIntData.h b/Source/JavaScriptCore/llint/LLIntData.h
index 3b3b6027f..8ed2bceda 100644
--- a/Source/JavaScriptCore/llint/LLIntData.h
+++ b/Source/JavaScriptCore/llint/LLIntData.h
@@ -26,13 +26,13 @@
#ifndef LLIntData_h
#define LLIntData_h
-#include "JSValue.h"
+#include "JSCJSValue.h"
#include "Opcode.h"
#include <wtf/Platform.h>
namespace JSC {
-class JSGlobalData;
+class VM;
struct Instruction;
#if ENABLE(LLINT_C_LOOP)
@@ -47,7 +47,7 @@ namespace LLInt {
class Data {
public:
- static void performAssertions(JSGlobalData&);
+ static void performAssertions(VM&);
private:
static Instruction* s_exceptionInstructions;
@@ -96,7 +96,7 @@ ALWAYS_INLINE void* getCodePtr(OpcodeID id)
class Data {
public:
- static void performAssertions(JSGlobalData&) { }
+ static void performAssertions(VM&) { }
};
#if COMPILER(CLANG)
diff --git a/Source/JavaScriptCore/llint/LLIntEntrypoints.cpp b/Source/JavaScriptCore/llint/LLIntEntrypoints.cpp
index be79134b7..c044568b5 100644
--- a/Source/JavaScriptCore/llint/LLIntEntrypoints.cpp
+++ b/Source/JavaScriptCore/llint/LLIntEntrypoints.cpp
@@ -29,7 +29,7 @@
#if ENABLE(LLINT)
#include "JITCode.h"
-#include "JSGlobalData.h"
+#include "VM.h"
#include "JSObject.h"
#include "LLIntThunks.h"
#include "LowLevelInterpreter.h"
@@ -37,9 +37,9 @@
namespace JSC { namespace LLInt {
-void getFunctionEntrypoint(JSGlobalData& globalData, CodeSpecializationKind kind, JITCode& jitCode, MacroAssemblerCodePtr& arityCheck)
+void getFunctionEntrypoint(VM& vm, CodeSpecializationKind kind, JITCode& jitCode, MacroAssemblerCodePtr& arityCheck)
{
- if (!globalData.canUseJIT()) {
+ if (!vm.canUseJIT()) {
if (kind == CodeForCall) {
jitCode = JITCode(MacroAssemblerCodeRef::createLLIntCodeRef(llint_function_for_call_prologue), JITCode::InterpreterThunk);
arityCheck = MacroAssemblerCodePtr::createLLIntCodePtr(llint_function_for_call_arity_check);
@@ -54,36 +54,36 @@ void getFunctionEntrypoint(JSGlobalData& globalData, CodeSpecializationKind kind
#if ENABLE(JIT)
if (kind == CodeForCall) {
- jitCode = JITCode(globalData.getCTIStub(functionForCallEntryThunkGenerator), JITCode::InterpreterThunk);
- arityCheck = globalData.getCTIStub(functionForCallArityCheckThunkGenerator).code();
+ jitCode = JITCode(vm.getCTIStub(functionForCallEntryThunkGenerator), JITCode::InterpreterThunk);
+ arityCheck = vm.getCTIStub(functionForCallArityCheckThunkGenerator).code();
return;
}
ASSERT(kind == CodeForConstruct);
- jitCode = JITCode(globalData.getCTIStub(functionForConstructEntryThunkGenerator), JITCode::InterpreterThunk);
- arityCheck = globalData.getCTIStub(functionForConstructArityCheckThunkGenerator).code();
+ jitCode = JITCode(vm.getCTIStub(functionForConstructEntryThunkGenerator), JITCode::InterpreterThunk);
+ arityCheck = vm.getCTIStub(functionForConstructArityCheckThunkGenerator).code();
#endif // ENABLE(JIT)
}
-void getEvalEntrypoint(JSGlobalData& globalData, JITCode& jitCode)
+void getEvalEntrypoint(VM& vm, JITCode& jitCode)
{
- if (!globalData.canUseJIT()) {
+ if (!vm.canUseJIT()) {
jitCode = JITCode(MacroAssemblerCodeRef::createLLIntCodeRef(llint_eval_prologue), JITCode::InterpreterThunk);
return;
}
#if ENABLE(JIT)
- jitCode = JITCode(globalData.getCTIStub(evalEntryThunkGenerator), JITCode::InterpreterThunk);
+ jitCode = JITCode(vm.getCTIStub(evalEntryThunkGenerator), JITCode::InterpreterThunk);
#endif
}
-void getProgramEntrypoint(JSGlobalData& globalData, JITCode& jitCode)
+void getProgramEntrypoint(VM& vm, JITCode& jitCode)
{
- if (!globalData.canUseJIT()) {
+ if (!vm.canUseJIT()) {
jitCode = JITCode(MacroAssemblerCodeRef::createLLIntCodeRef(llint_program_prologue), JITCode::InterpreterThunk);
return;
}
#if ENABLE(JIT)
- jitCode = JITCode(globalData.getCTIStub(programEntryThunkGenerator), JITCode::InterpreterThunk);
+ jitCode = JITCode(vm.getCTIStub(programEntryThunkGenerator), JITCode::InterpreterThunk);
#endif
}
diff --git a/Source/JavaScriptCore/llint/LLIntEntrypoints.h b/Source/JavaScriptCore/llint/LLIntEntrypoints.h
index dd7c27798..1cecba0a6 100644
--- a/Source/JavaScriptCore/llint/LLIntEntrypoints.h
+++ b/Source/JavaScriptCore/llint/LLIntEntrypoints.h
@@ -36,25 +36,25 @@ namespace JSC {
class EvalCodeBlock;
class JITCode;
-class JSGlobalData;
+class VM;
class MacroAssemblerCodePtr;
class MacroAssemblerCodeRef;
class ProgramCodeBlock;
namespace LLInt {
-void getFunctionEntrypoint(JSGlobalData&, CodeSpecializationKind, JITCode&, MacroAssemblerCodePtr& arityCheck);
-void getEvalEntrypoint(JSGlobalData&, JITCode&);
-void getProgramEntrypoint(JSGlobalData&, JITCode&);
+void getFunctionEntrypoint(VM&, CodeSpecializationKind, JITCode&, MacroAssemblerCodePtr& arityCheck);
+void getEvalEntrypoint(VM&, JITCode&);
+void getProgramEntrypoint(VM&, JITCode&);
-inline void getEntrypoint(JSGlobalData& globalData, EvalCodeBlock*, JITCode& jitCode)
+inline void getEntrypoint(VM& vm, EvalCodeBlock*, JITCode& jitCode)
{
- getEvalEntrypoint(globalData, jitCode);
+ getEvalEntrypoint(vm, jitCode);
}
-inline void getEntrypoint(JSGlobalData& globalData, ProgramCodeBlock*, JITCode& jitCode)
+inline void getEntrypoint(VM& vm, ProgramCodeBlock*, JITCode& jitCode)
{
- getProgramEntrypoint(globalData, jitCode);
+ getProgramEntrypoint(vm, jitCode);
}
} } // namespace JSC::LLInt
diff --git a/Source/JavaScriptCore/llint/LLIntExceptions.cpp b/Source/JavaScriptCore/llint/LLIntExceptions.cpp
index 17c15aa51..d88c16e7e 100644
--- a/Source/JavaScriptCore/llint/LLIntExceptions.cpp
+++ b/Source/JavaScriptCore/llint/LLIntExceptions.cpp
@@ -34,6 +34,7 @@
#include "JITExceptions.h"
#include "LLIntCommon.h"
#include "LowLevelInterpreter.h"
+#include "Operations.h"
namespace JSC { namespace LLInt {
@@ -47,14 +48,14 @@ static void fixupPCforExceptionIfNeeded(ExecState* exec)
void interpreterThrowInCaller(ExecState* exec, ReturnAddressPtr pc)
{
- JSGlobalData* globalData = &exec->globalData();
- NativeCallFrameTracer tracer(globalData, exec);
+ VM* vm = &exec->vm();
+ NativeCallFrameTracer tracer(vm, exec);
#if LLINT_SLOW_PATH_TRACING
- dataLog("Throwing exception ", globalData->exception, ".\n");
+ dataLog("Throwing exception ", vm->exception, ".\n");
#endif
fixupPCforExceptionIfNeeded(exec);
genericThrow(
- globalData, exec, globalData->exception,
+ vm, exec, vm->exception,
exec->codeBlock()->bytecodeOffset(exec, pc));
}
@@ -64,29 +65,31 @@ Instruction* returnToThrowForThrownException(ExecState* exec)
return LLInt::exceptionInstructions();
}
+static void doThrow(ExecState* exec, Instruction* pc)
+{
+ VM* vm = &exec->vm();
+ NativeCallFrameTracer tracer(vm, exec);
+ fixupPCforExceptionIfNeeded(exec);
+ genericThrow(vm, exec, vm->exception, pc - exec->codeBlock()->instructions().begin());
+}
+
Instruction* returnToThrow(ExecState* exec, Instruction* pc)
{
- JSGlobalData* globalData = &exec->globalData();
- NativeCallFrameTracer tracer(globalData, exec);
#if LLINT_SLOW_PATH_TRACING
- dataLog("Throwing exception ", globalData->exception, " (returnToThrow).\n");
+ VM* vm = &exec->vm();
+ dataLog("Throwing exception ", vm->exception, " (returnToThrow).\n");
#endif
- fixupPCforExceptionIfNeeded(exec);
- genericThrow(globalData, exec, globalData->exception, pc - exec->codeBlock()->instructions().begin());
-
+ doThrow(exec, pc);
return LLInt::exceptionInstructions();
}
void* callToThrow(ExecState* exec, Instruction* pc)
{
- JSGlobalData* globalData = &exec->globalData();
- NativeCallFrameTracer tracer(globalData, exec);
#if LLINT_SLOW_PATH_TRACING
- dataLog("Throwing exception ", globalData->exception, " (callToThrow).\n");
+ VM* vm = &exec->vm();
+ dataLog("Throwing exception ", vm->exception, " (callToThrow).\n");
#endif
- fixupPCforExceptionIfNeeded(exec);
- genericThrow(globalData, exec, globalData->exception, pc - exec->codeBlock()->instructions().begin());
-
+ doThrow(exec, pc);
return LLInt::getCodePtr(llint_throw_during_call_trampoline);
}
diff --git a/Source/JavaScriptCore/llint/LLIntOfflineAsmConfig.h b/Source/JavaScriptCore/llint/LLIntOfflineAsmConfig.h
index 157521373..bad62ddf8 100644
--- a/Source/JavaScriptCore/llint/LLIntOfflineAsmConfig.h
+++ b/Source/JavaScriptCore/llint/LLIntOfflineAsmConfig.h
@@ -35,10 +35,13 @@
#if ENABLE(LLINT_C_LOOP)
#define OFFLINE_ASM_C_LOOP 1
#define OFFLINE_ASM_X86 0
+#define OFFLINE_ASM_ARM 0
#define OFFLINE_ASM_ARMv7 0
+#define OFFLINE_ASM_ARMv7_TRADITIONAL 0
#define OFFLINE_ASM_X86_64 0
#define OFFLINE_ASM_ARMv7s 0
#define OFFLINE_ASM_MIPS 0
+#define OFFLINE_ASM_SH4 0
#else // !ENABLE(LLINT_C_LOOP)
@@ -62,6 +65,19 @@
#define OFFLINE_ASM_ARMv7 0
#endif
+#if CPU(ARM_TRADITIONAL)
+#if WTF_ARM_ARCH_AT_LEAST(7)
+#define OFFLINE_ASM_ARMv7_TRADITIONAL 1
+#define OFFLINE_ASM_ARM 0
+#else
+#define OFFLINE_ASM_ARM 1
+#define OFFLINE_ASM_ARMv7_TRADITIONAL 0
+#endif
+#else
+#define OFFLINE_ASM_ARMv7_TRADITIONAL 0
+#define OFFLINE_ASM_ARM 0
+#endif
+
#if CPU(X86_64)
#define OFFLINE_ASM_X86_64 1
#else
@@ -74,6 +90,12 @@
#define OFFLINE_ASM_MIPS 0
#endif
+#if CPU(SH4)
+#define OFFLINE_ASM_SH4 1
+#else
+#define OFFLINE_ASM_SH4 0
+#endif
+
#endif // !ENABLE(LLINT_C_LOOP)
#if USE(JSVALUE64)
diff --git a/Source/JavaScriptCore/llint/LLIntOffsetsExtractor.cpp b/Source/JavaScriptCore/llint/LLIntOffsetsExtractor.cpp
index 0087fe5ff..0cd2f68a1 100644
--- a/Source/JavaScriptCore/llint/LLIntOffsetsExtractor.cpp
+++ b/Source/JavaScriptCore/llint/LLIntOffsetsExtractor.cpp
@@ -34,7 +34,7 @@
#include "JSArray.h"
#include "JSCell.h"
#include "JSFunction.h"
-#include "JSGlobalData.h"
+#include "VM.h"
#include "JSGlobalObject.h"
#include "JSObject.h"
#include "JSPropertyNameIterator.h"
diff --git a/Source/JavaScriptCore/llint/LLIntSlowPaths.cpp b/Source/JavaScriptCore/llint/LLIntSlowPaths.cpp
index 0bd19d46f..36a43f586 100644
--- a/Source/JavaScriptCore/llint/LLIntSlowPaths.cpp
+++ b/Source/JavaScriptCore/llint/LLIntSlowPaths.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2011, 2012 Apple Inc. All rights reserved.
+ * Copyright (C) 2011, 2012, 2013 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -38,22 +38,25 @@
#include "JIT.h"
#include "JITDriver.h"
#include "JSActivation.h"
+#include "JSCJSValue.h"
#include "JSGlobalObjectFunctions.h"
#include "JSNameScope.h"
#include "JSPropertyNameIterator.h"
#include "JSString.h"
-#include "JSValue.h"
#include "JSWithScope.h"
#include "LLIntCommon.h"
#include "LLIntExceptions.h"
#include "LowLevelInterpreter.h"
+#include "ObjectConstructor.h"
#include "Operations.h"
+#include "StructureRareDataInlines.h"
+#include <wtf/StringPrintStream.h>
namespace JSC { namespace LLInt {
#define LLINT_BEGIN_NO_SET_PC() \
- JSGlobalData& globalData = exec->globalData(); \
- NativeCallFrameTracer tracer(&globalData, exec)
+ VM& vm = exec->vm(); \
+ NativeCallFrameTracer tracer(&vm, exec)
#ifndef NDEBUG
#define LLINT_SET_PC_FOR_STUBS() do { \
@@ -80,13 +83,13 @@ namespace JSC { namespace LLInt {
#define LLINT_END_IMPL() LLINT_RETURN_TWO(pc, exec)
#define LLINT_THROW(exceptionToThrow) do { \
- globalData.exception = (exceptionToThrow); \
+ vm.exception = (exceptionToThrow); \
pc = returnToThrow(exec, pc); \
LLINT_END_IMPL(); \
} while (false)
#define LLINT_CHECK_EXCEPTION() do { \
- if (UNLIKELY(globalData.exception)) { \
+ if (UNLIKELY(vm.exception)) { \
pc = returnToThrow(exec, pc); \
LLINT_END_IMPL(); \
} \
@@ -140,14 +143,14 @@ namespace JSC { namespace LLInt {
#define LLINT_CALL_THROW(exec, pc, exceptionToThrow) do { \
ExecState* __ct_exec = (exec); \
Instruction* __ct_pc = (pc); \
- globalData.exception = (exceptionToThrow); \
+ vm.exception = (exceptionToThrow); \
LLINT_CALL_END_IMPL(__ct_exec, callToThrow(__ct_exec, __ct_pc)); \
} while (false)
#define LLINT_CALL_CHECK_EXCEPTION(exec, pc) do { \
ExecState* __cce_exec = (exec); \
Instruction* __cce_pc = (pc); \
- if (UNLIKELY(globalData.exception)) \
+ if (UNLIKELY(vm.exception)) \
LLINT_CALL_END_IMPL(__cce_exec, callToThrow(__cce_exec, __cce_pc)); \
} while (false)
@@ -166,7 +169,7 @@ extern "C" SlowPathReturnType llint_trace_operand(ExecState* exec, Instruction*
exec->codeBlock(),
exec,
static_cast<intptr_t>(pc - exec->codeBlock()->instructions().begin()),
- exec->globalData().interpreter->getOpcodeID(pc[0].u.opcode),
+ exec->vm().interpreter->getOpcodeID(pc[0].u.opcode),
fromWhere,
operand,
pc[operand].u.operand);
@@ -184,17 +187,18 @@ extern "C" SlowPathReturnType llint_trace_value(ExecState* exec, Instruction* pc
EncodedJSValue asValue;
} u;
u.asValue = JSValue::encode(value);
- dataLogF("%p / %p: executing bc#%zu, op#%u: Trace(%d): %d: %d: %08x:%08x: %s\n",
- exec->codeBlock(),
- exec,
- static_cast<intptr_t>(pc - exec->codeBlock()->instructions().begin()),
- exec->globalData().interpreter->getOpcodeID(pc[0].u.opcode),
- fromWhere,
- operand,
- pc[operand].u.operand,
- u.bits.tag,
- u.bits.payload,
- value.description());
+ dataLogF(
+ "%p / %p: executing bc#%zu, op#%u: Trace(%d): %d: %d: %08x:%08x: %s\n",
+ exec->codeBlock(),
+ exec,
+ static_cast<intptr_t>(pc - exec->codeBlock()->instructions().begin()),
+ exec->vm().interpreter->getOpcodeID(pc[0].u.opcode),
+ fromWhere,
+ operand,
+ pc[operand].u.operand,
+ u.bits.tag,
+ u.bits.payload,
+ toCString(value).data());
LLINT_END_IMPL();
}
@@ -245,9 +249,9 @@ LLINT_SLOW_PATH_DECL(trace)
exec->codeBlock(),
exec,
static_cast<intptr_t>(pc - exec->codeBlock()->instructions().begin()),
- opcodeNames[exec->globalData().interpreter->getOpcodeID(pc[0].u.opcode)],
+ opcodeNames[exec->vm().interpreter->getOpcodeID(pc[0].u.opcode)],
exec->scope());
- if (exec->globalData().interpreter->getOpcodeID(pc[0].u.opcode) == op_ret) {
+ if (exec->vm().interpreter->getOpcodeID(pc[0].u.opcode) == op_ret) {
dataLogF("Will be returning to %p\n", exec->returnPC().value());
dataLogF("The new cfr will be %p\n", exec->callerFrame());
}
@@ -260,7 +264,7 @@ LLINT_SLOW_PATH_DECL(special_trace)
exec->codeBlock(),
exec,
static_cast<intptr_t>(pc - exec->codeBlock()->instructions().begin()),
- exec->globalData().interpreter->getOpcodeID(pc[0].u.opcode),
+ exec->vm().interpreter->getOpcodeID(pc[0].u.opcode),
exec->returnPC().value());
LLINT_END_IMPL();
}
@@ -269,7 +273,7 @@ LLINT_SLOW_PATH_DECL(special_trace)
inline bool shouldJIT(ExecState* exec)
{
// You can modify this to turn off JITting without rebuilding the world.
- return exec->globalData().canUseJIT();
+ return exec->vm().canUseJIT();
}
// Returns true if we should try to OSR.
@@ -305,7 +309,7 @@ inline bool jitCompileAndSetHeuristics(CodeBlock* codeBlock, ExecState* exec)
codeBlock->jitSoon();
return true;
}
- ASSERT_NOT_REACHED();
+ RELEASE_ASSERT_NOT_REACHED();
return false;
}
@@ -313,8 +317,7 @@ enum EntryKind { Prologue, ArityCheck };
static SlowPathReturnType entryOSR(ExecState* exec, Instruction*, CodeBlock* codeBlock, const char *name, EntryKind kind)
{
#if ENABLE(JIT_VERBOSE_OSR)
- dataLogF("%p: Entered %s with executeCounter = %s\n", codeBlock, name,
- codeBlock->llintExecuteCounter().status());
+ dataLog(*codeBlock, ": Entered ", name, " with executeCounter = ", codeBlock->llintExecuteCounter(), "\n");
#else
UNUSED_PARAM(name);
#endif
@@ -362,8 +365,7 @@ LLINT_SLOW_PATH_DECL(loop_osr)
CodeBlock* codeBlock = exec->codeBlock();
#if ENABLE(JIT_VERBOSE_OSR)
- dataLogF("%p: Entered loop_osr with executeCounter = %s\n", codeBlock,
- codeBlock->llintExecuteCounter().status());
+ dataLog(*codeBlock, ": Entered loop_osr with executeCounter = ", codeBlock->llintExecuteCounter(), "\n");
#endif
if (!shouldJIT(exec)) {
@@ -378,7 +380,7 @@ LLINT_SLOW_PATH_DECL(loop_osr)
Vector<BytecodeAndMachineOffset> map;
codeBlock->jitCodeMap()->decode(map);
- BytecodeAndMachineOffset* mapping = binarySearch<BytecodeAndMachineOffset, unsigned, BytecodeAndMachineOffset::getBytecodeIndex>(map.begin(), map.size(), pc - codeBlock->instructions().begin());
+ BytecodeAndMachineOffset* mapping = binarySearch<BytecodeAndMachineOffset, unsigned>(map, map.size(), pc - codeBlock->instructions().begin(), BytecodeAndMachineOffset::getBytecodeIndex);
ASSERT(mapping);
ASSERT(mapping->m_bytecodeIndex == static_cast<unsigned>(pc - codeBlock->instructions().begin()));
@@ -393,8 +395,7 @@ LLINT_SLOW_PATH_DECL(replace)
CodeBlock* codeBlock = exec->codeBlock();
#if ENABLE(JIT_VERBOSE_OSR)
- dataLogF("%p: Entered replace with executeCounter = %s\n", codeBlock,
- codeBlock->llintExecuteCounter().status());
+ dataLog(*codeBlock, ": Entered replace with executeCounter = ", codeBlock->llintExecuteCounter(), "\n");
#endif
if (shouldJIT(exec))
@@ -413,13 +414,13 @@ LLINT_SLOW_PATH_DECL(stack_check)
dataLogF("CodeBlock = %p.\n", exec->codeBlock());
dataLogF("Num callee registers = %u.\n", exec->codeBlock()->m_numCalleeRegisters);
dataLogF("Num vars = %u.\n", exec->codeBlock()->m_numVars);
- dataLogF("Current end is at %p.\n", exec->globalData().interpreter->stack().end());
+ dataLogF("Current end is at %p.\n", exec->vm().interpreter->stack().end());
#endif
- ASSERT(&exec->registers()[exec->codeBlock()->m_numCalleeRegisters] > exec->globalData().interpreter->stack().end());
- if (UNLIKELY(!globalData.interpreter->stack().grow(&exec->registers()[exec->codeBlock()->m_numCalleeRegisters]))) {
+ ASSERT(&exec->registers()[exec->codeBlock()->m_numCalleeRegisters] > exec->vm().interpreter->stack().end());
+ if (UNLIKELY(!vm.interpreter->stack().grow(&exec->registers()[exec->codeBlock()->m_numCalleeRegisters]))) {
ReturnAddressPtr returnPC = exec->returnPC();
exec = exec->callerFrame();
- globalData.exception = createStackOverflowError(exec);
+ vm.exception = createStackOverflowError(exec);
interpreterThrowInCaller(exec, returnPC);
pc = returnToThrowForThrownException(exec);
}
@@ -429,11 +430,11 @@ LLINT_SLOW_PATH_DECL(stack_check)
LLINT_SLOW_PATH_DECL(slow_path_call_arityCheck)
{
LLINT_BEGIN();
- ExecState* newExec = CommonSlowPaths::arityCheckFor(exec, &globalData.interpreter->stack(), CodeForCall);
+ ExecState* newExec = CommonSlowPaths::arityCheckFor(exec, &vm.interpreter->stack(), CodeForCall);
if (!newExec) {
ReturnAddressPtr returnPC = exec->returnPC();
exec = exec->callerFrame();
- globalData.exception = createStackOverflowError(exec);
+ vm.exception = createStackOverflowError(exec);
interpreterThrowInCaller(exec, returnPC);
LLINT_RETURN_TWO(bitwise_cast<void*>(static_cast<uintptr_t>(1)), exec);
}
@@ -443,11 +444,11 @@ LLINT_SLOW_PATH_DECL(slow_path_call_arityCheck)
LLINT_SLOW_PATH_DECL(slow_path_construct_arityCheck)
{
LLINT_BEGIN();
- ExecState* newExec = CommonSlowPaths::arityCheckFor(exec, &globalData.interpreter->stack(), CodeForConstruct);
+ ExecState* newExec = CommonSlowPaths::arityCheckFor(exec, &vm.interpreter->stack(), CodeForConstruct);
if (!newExec) {
ReturnAddressPtr returnPC = exec->returnPC();
exec = exec->callerFrame();
- globalData.exception = createStackOverflowError(exec);
+ vm.exception = createStackOverflowError(exec);
interpreterThrowInCaller(exec, returnPC);
LLINT_RETURN_TWO(bitwise_cast<void*>(static_cast<uintptr_t>(1)), exec);
}
@@ -460,7 +461,7 @@ LLINT_SLOW_PATH_DECL(slow_path_create_activation)
#if LLINT_SLOW_PATH_TRACING
dataLogF("Creating an activation, exec = %p!\n", exec);
#endif
- JSActivation* activation = JSActivation::create(globalData, exec, exec->codeBlock());
+ JSActivation* activation = JSActivation::create(vm, exec, exec->codeBlock());
exec->setScope(activation);
LLINT_RETURN(JSValue(activation));
}
@@ -468,7 +469,7 @@ LLINT_SLOW_PATH_DECL(slow_path_create_activation)
LLINT_SLOW_PATH_DECL(slow_path_create_arguments)
{
LLINT_BEGIN();
- JSValue arguments = JSValue(Arguments::create(globalData, exec));
+ JSValue arguments = JSValue(Arguments::create(vm, exec));
LLINT_CHECK_EXCEPTION();
exec->uncheckedR(pc[1].u.operand) = arguments;
exec->uncheckedR(unmodifiedArgumentsRegister(pc[1].u.operand)) = arguments;
@@ -484,8 +485,9 @@ LLINT_SLOW_PATH_DECL(slow_path_create_this)
ConstructData constructData;
ASSERT(constructor->methodTable()->getConstructData(constructor, constructData) == ConstructTypeJS);
#endif
-
- Structure* structure = constructor->cachedInheritorID(exec);
+
+ size_t inlineCapacity = pc[3].u.operand;
+ Structure* structure = constructor->allocationProfile(exec, inlineCapacity)->structure();
LLINT_RETURN(constructEmptyObject(exec, structure));
}
@@ -504,7 +506,7 @@ LLINT_SLOW_PATH_DECL(slow_path_convert_this)
LLINT_SLOW_PATH_DECL(slow_path_new_object)
{
LLINT_BEGIN();
- LLINT_RETURN(constructEmptyObject(exec));
+ LLINT_RETURN(constructEmptyObject(exec, pc[3].u.objectAllocationProfile->structure()));
}
LLINT_SLOW_PATH_DECL(slow_path_new_array)
@@ -531,7 +533,7 @@ LLINT_SLOW_PATH_DECL(slow_path_new_regexp)
RegExp* regExp = exec->codeBlock()->regexp(pc[2].u.operand);
if (!regExp->isValid())
LLINT_THROW(createSyntaxError(exec, "Invalid flag supplied to RegExp constructor."));
- LLINT_RETURN(RegExpObject::create(globalData, exec->lexicalGlobalObject(), exec->lexicalGlobalObject()->regExpStructure(), regExp));
+ LLINT_RETURN(RegExpObject::create(vm, exec->lexicalGlobalObject(), exec->lexicalGlobalObject()->regExpStructure(), regExp));
}
LLINT_SLOW_PATH_DECL(slow_path_not)
@@ -600,23 +602,7 @@ LLINT_SLOW_PATH_DECL(slow_path_pre_dec)
LLINT_RETURN(jsNumber(LLINT_OP(1).jsValue().toNumber(exec) - 1));
}
-LLINT_SLOW_PATH_DECL(slow_path_post_inc)
-{
- LLINT_BEGIN();
- double result = LLINT_OP(2).jsValue().toNumber(exec);
- LLINT_OP(2) = jsNumber(result + 1);
- LLINT_RETURN(jsNumber(result));
-}
-
-LLINT_SLOW_PATH_DECL(slow_path_post_dec)
-{
- LLINT_BEGIN();
- double result = LLINT_OP(2).jsValue().toNumber(exec);
- LLINT_OP(2) = jsNumber(result - 1);
- LLINT_RETURN(jsNumber(result));
-}
-
-LLINT_SLOW_PATH_DECL(slow_path_to_jsnumber)
+LLINT_SLOW_PATH_DECL(slow_path_to_number)
{
LLINT_BEGIN();
LLINT_RETURN(jsNumber(LLINT_OP_C(2).jsValue().toNumber(exec)));
@@ -745,7 +731,7 @@ LLINT_SLOW_PATH_DECL(slow_path_check_has_instance)
LLINT_RETURN(jsBoolean(baseObject->methodTable()->customHasInstance(baseObject, exec, value)));
}
}
- LLINT_THROW(createInvalidParamError(exec, "instanceof", baseVal));
+ LLINT_THROW(createInvalidParameterError(exec, "instanceof", baseVal));
}
LLINT_SLOW_PATH_DECL(slow_path_instanceof)
@@ -785,10 +771,12 @@ LLINT_SLOW_PATH_DECL(slow_path_resolve)
{
LLINT_BEGIN();
Identifier ident = exec->codeBlock()->identifier(pc[2].u.operand);
- ResolveOperations* operations = exec->codeBlock()->resolveOperations(pc[3].u.operand);
+ ResolveOperations* operations = pc[3].u.resolveOperations;
JSValue result = JSScope::resolve(exec, ident, operations);
ASSERT(operations->size());
- ASSERT(operations == exec->codeBlock()->resolveOperations(pc[3].u.operand));
+ if (operations->isEmpty())
+ LLINT_RETURN_PROFILED(op_resolve, result);
+
switch (operations->data()[0].m_operation) {
case ResolveOperation::GetAndReturnGlobalProperty:
pc[0].u.opcode = LLInt::getOpcode(llint_op_resolve_global_property);
@@ -818,7 +806,7 @@ LLINT_SLOW_PATH_DECL(slow_path_resolve)
LLINT_SLOW_PATH_DECL(slow_path_put_to_base)
{
LLINT_BEGIN();
- PutToBaseOperation* operation = exec->codeBlock()->putToBaseOperation(pc[4].u.operand);
+ PutToBaseOperation* operation = pc[4].u.putToBaseOperation;
JSScope::resolvePut(exec, LLINT_OP_C(1).jsValue(), exec->codeBlock()->identifier(pc[2].u.operand), LLINT_OP_C(3).jsValue(), operation);
switch (operation->m_kind) {
case PutToBaseOperation::VariablePut:
@@ -835,15 +823,21 @@ LLINT_SLOW_PATH_DECL(slow_path_resolve_base)
{
LLINT_BEGIN();
Identifier& ident = exec->codeBlock()->identifier(pc[2].u.operand);
- ResolveOperations* operations = exec->codeBlock()->resolveOperations(pc[4].u.operand);
+ ResolveOperations* operations = pc[4].u.resolveOperations;
JSValue result;
if (pc[3].u.operand) {
- result = JSScope::resolveBase(exec, ident, true, operations, exec->codeBlock()->putToBaseOperation(pc[5].u.operand));
+ result = JSScope::resolveBase(exec, ident, true, operations, pc[5].u.putToBaseOperation);
if (!result)
- LLINT_THROW(globalData.exception);
+ LLINT_THROW(vm.exception);
} else
- result = JSScope::resolveBase(exec, ident, false, operations, exec->codeBlock()->putToBaseOperation(pc[5].u.operand));
+ result = JSScope::resolveBase(exec, ident, false, operations, pc[5].u.putToBaseOperation);
+
ASSERT(operations->size());
+ if (operations->isEmpty()) {
+ LLINT_PROFILE_VALUE(op_resolve_base, result);
+ LLINT_RETURN(result);
+ }
+
switch (operations->data()[0].m_operation) {
case ResolveOperation::ReturnGlobalObjectAsBase:
pc[0].u.opcode = LLInt::getOpcode(llint_op_resolve_base_to_global);
@@ -864,22 +858,11 @@ LLINT_SLOW_PATH_DECL(slow_path_resolve_base)
LLINT_RETURN(result);
}
-LLINT_SLOW_PATH_DECL(slow_path_ensure_property_exists)
-{
- LLINT_BEGIN();
- JSObject* object = asObject(LLINT_OP(1).jsValue());
- PropertySlot slot(object);
- Identifier& ident = exec->codeBlock()->identifier(pc[2].u.operand);
- if (!object->getPropertySlot(exec, ident, slot))
- LLINT_THROW(createErrorForInvalidGlobalAssignment(exec, ident.string()));
- LLINT_END();
-}
-
LLINT_SLOW_PATH_DECL(slow_path_resolve_with_base)
{
LLINT_BEGIN();
- ResolveOperations* operations = exec->codeBlock()->resolveOperations(pc[4].u.operand);
- JSValue result = JSScope::resolveWithBase(exec, exec->codeBlock()->identifier(pc[3].u.operand), &LLINT_OP(1), operations, exec->codeBlock()->putToBaseOperation(pc[5].u.operand));
+ ResolveOperations* operations = pc[4].u.resolveOperations;
+ JSValue result = JSScope::resolveWithBase(exec, exec->codeBlock()->identifier(pc[3].u.operand), &LLINT_OP(1), operations, pc[5].u.putToBaseOperation);
LLINT_CHECK_EXCEPTION();
LLINT_OP(2) = result;
LLINT_PROFILE_VALUE(op_resolve_with_base, result);
@@ -889,7 +872,7 @@ LLINT_SLOW_PATH_DECL(slow_path_resolve_with_base)
LLINT_SLOW_PATH_DECL(slow_path_resolve_with_this)
{
LLINT_BEGIN();
- ResolveOperations* operations = exec->codeBlock()->resolveOperations(pc[4].u.operand);
+ ResolveOperations* operations = pc[4].u.resolveOperations;
JSValue result = JSScope::resolveWithThis(exec, exec->codeBlock()->identifier(pc[3].u.operand), &LLINT_OP(1), operations);
LLINT_CHECK_EXCEPTION();
LLINT_OP(2) = result;
@@ -929,7 +912,7 @@ LLINT_SLOW_PATH_DECL(slow_path_get_by_id)
if (!structure->isUncacheableDictionary()
&& !structure->typeInfo().prohibitsPropertyCaching()) {
pc[4].u.structure.set(
- globalData, codeBlock->ownerExecutable(), structure);
+ vm, codeBlock->ownerExecutable(), structure);
if (isInlineOffset(slot.cachedOffset())) {
pc[0].u.opcode = LLInt::getOpcode(llint_op_get_by_id);
pc[5].u.operand = offsetInInlineStorage(slot.cachedOffset()) * sizeof(JSValue) + JSObject::offsetOfInlineStorage();
@@ -976,7 +959,7 @@ LLINT_SLOW_PATH_DECL(slow_path_put_by_id)
JSValue baseValue = LLINT_OP_C(1).jsValue();
PutPropertySlot slot(codeBlock->isStrictMode());
if (pc[8].u.operand)
- asObject(baseValue)->putDirect(globalData, ident, LLINT_OP_C(3).jsValue(), slot);
+ asObject(baseValue)->putDirect(vm, ident, LLINT_OP_C(3).jsValue(), slot);
else
baseValue.put(exec, ident, LLINT_OP_C(3).jsValue(), slot);
LLINT_CHECK_EXCEPTION();
@@ -1003,17 +986,17 @@ LLINT_SLOW_PATH_DECL(slow_path_put_by_id)
if (normalizePrototypeChain(exec, baseCell) != InvalidPrototypeChain) {
ASSERT(structure->previousID()->isObject());
pc[4].u.structure.set(
- globalData, codeBlock->ownerExecutable(), structure->previousID());
+ vm, codeBlock->ownerExecutable(), structure->previousID());
if (isInlineOffset(slot.cachedOffset()))
pc[5].u.operand = offsetInInlineStorage(slot.cachedOffset()) * sizeof(JSValue) + JSObject::offsetOfInlineStorage();
else
pc[5].u.operand = offsetInButterfly(slot.cachedOffset()) * sizeof(JSValue);
pc[6].u.structure.set(
- globalData, codeBlock->ownerExecutable(), structure);
+ vm, codeBlock->ownerExecutable(), structure);
StructureChain* chain = structure->prototypeChain(exec);
ASSERT(chain);
pc[7].u.structureChain.set(
- globalData, codeBlock->ownerExecutable(), chain);
+ vm, codeBlock->ownerExecutable(), chain);
if (pc[8].u.operand) {
if (isInlineOffset(slot.cachedOffset()))
@@ -1030,7 +1013,7 @@ LLINT_SLOW_PATH_DECL(slow_path_put_by_id)
}
} else {
pc[4].u.structure.set(
- globalData, codeBlock->ownerExecutable(), structure);
+ vm, codeBlock->ownerExecutable(), structure);
if (isInlineOffset(slot.cachedOffset())) {
pc[0].u.opcode = LLInt::getOpcode(llint_op_put_by_id);
pc[5].u.operand = offsetInInlineStorage(slot.cachedOffset()) * sizeof(JSValue) + JSObject::offsetOfInlineStorage();
@@ -1090,7 +1073,7 @@ LLINT_SLOW_PATH_DECL(slow_path_get_argument_by_val)
LLINT_BEGIN();
JSValue arguments = LLINT_OP(2).jsValue();
if (!arguments) {
- arguments = Arguments::create(globalData, exec);
+ arguments = Arguments::create(vm, exec);
LLINT_CHECK_EXCEPTION();
LLINT_OP(2) = arguments;
exec->uncheckedR(unmodifiedArgumentsRegister(pc[2].u.operand)) = arguments;
@@ -1118,7 +1101,7 @@ LLINT_SLOW_PATH_DECL(slow_path_put_by_val)
if (baseValue.isObject()) {
JSObject* object = asObject(baseValue);
if (object->canSetIndexQuickly(i))
- object->setIndexQuickly(globalData, i, value);
+ object->setIndexQuickly(vm, i, value);
else
object->methodTable()->putByIndex(object, exec, i, value, exec->codeBlock()->isStrictMode());
LLINT_END();
@@ -1193,9 +1176,9 @@ LLINT_SLOW_PATH_DECL(slow_path_put_getter_setter)
ASSERT(getter.isObject() || setter.isObject());
if (!getter.isUndefined())
- accessor->setGetter(globalData, asObject(getter));
+ accessor->setGetter(vm, asObject(getter));
if (!setter.isUndefined())
- accessor->setSetter(globalData, asObject(setter));
+ accessor->setSetter(vm, asObject(setter));
baseObj->putDirectAccessor(
exec,
exec->codeBlock()->identifier(pc[2].u.operand),
@@ -1203,18 +1186,6 @@ LLINT_SLOW_PATH_DECL(slow_path_put_getter_setter)
LLINT_END();
}
-LLINT_SLOW_PATH_DECL(slow_path_jmp_scopes)
-{
- LLINT_BEGIN();
- unsigned count = pc[1].u.operand;
- JSScope* tmp = exec->scope();
- while (count--)
- tmp = tmp->next();
- exec->setScope(tmp);
- pc += pc[2].u.operand;
- LLINT_END();
-}
-
LLINT_SLOW_PATH_DECL(slow_path_jtrue)
{
LLINT_BEGIN();
@@ -1345,7 +1316,7 @@ LLINT_SLOW_PATH_DECL(slow_path_new_func_exp)
static SlowPathReturnType handleHostCall(ExecState* execCallee, Instruction* pc, JSValue callee, CodeSpecializationKind kind)
{
ExecState* exec = execCallee->callerFrame();
- JSGlobalData& globalData = exec->globalData();
+ VM& vm = exec->vm();
execCallee->setScope(exec->scope());
execCallee->setCodeBlock(0);
@@ -1358,9 +1329,9 @@ static SlowPathReturnType handleHostCall(ExecState* execCallee, Instruction* pc,
ASSERT(callType != CallTypeJS);
if (callType == CallTypeHost) {
- NativeCallFrameTracer tracer(&globalData, execCallee);
+ NativeCallFrameTracer tracer(&vm, execCallee);
execCallee->setCallee(asObject(callee));
- globalData.hostCallReturnValue = JSValue::decode(callData.native.function(execCallee));
+ vm.hostCallReturnValue = JSValue::decode(callData.native.function(execCallee));
LLINT_CALL_RETURN(execCallee, pc, LLInt::getCodePtr(getHostCallReturnValue));
}
@@ -1381,9 +1352,9 @@ static SlowPathReturnType handleHostCall(ExecState* execCallee, Instruction* pc,
ASSERT(constructType != ConstructTypeJS);
if (constructType == ConstructTypeHost) {
- NativeCallFrameTracer tracer(&globalData, execCallee);
+ NativeCallFrameTracer tracer(&vm, execCallee);
execCallee->setCallee(asObject(callee));
- globalData.hostCallReturnValue = JSValue::decode(constructData.native.function(execCallee));
+ vm.hostCallReturnValue = JSValue::decode(constructData.native.function(execCallee));
LLINT_CALL_RETURN(execCallee, pc, LLInt::getCodePtr(getHostCallReturnValue));
}
@@ -1408,7 +1379,7 @@ inline SlowPathReturnType setUpCall(ExecState* execCallee, Instruction* pc, Code
JSFunction* callee = jsCast<JSFunction*>(calleeAsFunctionCell);
JSScope* scope = callee->scopeUnchecked();
- JSGlobalData& globalData = *scope->globalData();
+ VM& vm = *scope->vm();
execCallee->setScope(scope);
ExecutableBase* executable = callee->executable();
@@ -1429,12 +1400,12 @@ inline SlowPathReturnType setUpCall(ExecState* execCallee, Instruction* pc, Code
codePtr = functionExecutable->jsCodeEntryFor(kind);
}
- if (callLinkInfo) {
+ if (!LLINT_ALWAYS_ACCESS_SLOW && callLinkInfo) {
if (callLinkInfo->isOnList())
callLinkInfo->remove();
ExecState* execCaller = execCallee->callerFrame();
- callLinkInfo->callee.set(globalData, execCaller->codeBlock()->ownerExecutable(), callee);
- callLinkInfo->lastSeenCallee.set(globalData, execCaller->codeBlock()->ownerExecutable(), callee);
+ callLinkInfo->callee.set(vm, execCaller->codeBlock()->ownerExecutable(), callee);
+ callLinkInfo->lastSeenCallee.set(vm, execCaller->codeBlock()->ownerExecutable(), callee);
callLinkInfo->machineCodeTarget = codePtr;
if (codeBlock)
codeBlock->linkIncomingCall(callLinkInfo);
@@ -1486,7 +1457,7 @@ LLINT_SLOW_PATH_DECL(slow_path_call_varargs)
JSValue calleeAsValue = LLINT_OP_C(1).jsValue();
ExecState* execCallee = loadVarargs(
- exec, &globalData.interpreter->stack(),
+ exec, &vm.interpreter->stack(),
LLINT_OP_C(2).jsValue(), LLINT_OP_C(3).jsValue(), pc[4].u.operand);
LLINT_CALL_CHECK_EXCEPTION(exec, pc);
@@ -1515,7 +1486,7 @@ LLINT_SLOW_PATH_DECL(slow_path_call_eval)
if (!isHostFunction(calleeAsValue, globalFuncEval))
return setUpCall(execCallee, pc, CodeForCall, calleeAsValue);
- globalData.hostCallReturnValue = eval(execCallee);
+ vm.hostCallReturnValue = eval(execCallee);
LLINT_CALL_RETURN(execCallee, pc, LLInt::getCodePtr(getHostCallReturnValue));
}
@@ -1523,7 +1494,7 @@ LLINT_SLOW_PATH_DECL(slow_path_tear_off_activation)
{
LLINT_BEGIN();
ASSERT(exec->codeBlock()->needsFullScopeChain());
- jsCast<JSActivation*>(LLINT_OP(1).jsValue())->tearOff(globalData);
+ jsCast<JSActivation*>(LLINT_OP(1).jsValue())->tearOff(vm);
LLINT_END();
}
@@ -1625,9 +1596,17 @@ LLINT_SLOW_PATH_DECL(slow_path_throw_static_error)
{
LLINT_BEGIN();
if (pc[2].u.operand)
- LLINT_THROW(createReferenceError(exec, LLINT_OP_C(1).jsValue().toString(exec)->value(exec)));
+ LLINT_THROW(createReferenceError(exec, errorDescriptionForValue(exec, LLINT_OP_C(1).jsValue())->value(exec)));
else
- LLINT_THROW(createTypeError(exec, LLINT_OP_C(1).jsValue().toString(exec)->value(exec)));
+ LLINT_THROW(createTypeError(exec, errorDescriptionForValue(exec, LLINT_OP_C(1).jsValue())->value(exec)));
+}
+
+LLINT_SLOW_PATH_DECL(slow_path_handle_watchdog_timer)
+{
+ LLINT_BEGIN_NO_SET_PC();
+ if (UNLIKELY(vm.watchdog.didFire(exec)))
+ LLINT_THROW(createTerminatedExecutionException(&vm));
+ LLINT_RETURN_TWO(0, exec);
}
LLINT_SLOW_PATH_DECL(slow_path_debug)
@@ -1638,7 +1617,7 @@ LLINT_SLOW_PATH_DECL(slow_path_debug)
int lastLine = pc[3].u.operand;
int column = pc[4].u.operand;
- globalData.interpreter->debug(exec, static_cast<DebugHookID>(debugHookID), firstLine, lastLine, column);
+ vm.interpreter->debug(exec, static_cast<DebugHookID>(debugHookID), firstLine, lastLine, column);
LLINT_END();
}
@@ -1646,7 +1625,7 @@ LLINT_SLOW_PATH_DECL(slow_path_debug)
LLINT_SLOW_PATH_DECL(slow_path_profile_will_call)
{
LLINT_BEGIN();
- if (Profiler* profiler = globalData.enabledProfiler())
+ if (LegacyProfiler* profiler = vm.enabledProfiler())
profiler->willExecute(exec, LLINT_OP(1).jsValue());
LLINT_END();
}
@@ -1654,7 +1633,7 @@ LLINT_SLOW_PATH_DECL(slow_path_profile_will_call)
LLINT_SLOW_PATH_DECL(slow_path_profile_did_call)
{
LLINT_BEGIN();
- if (Profiler* profiler = globalData.enabledProfiler())
+ if (LegacyProfiler* profiler = vm.enabledProfiler())
profiler->didExecute(exec, LLINT_OP(1).jsValue());
LLINT_END();
}
@@ -1662,7 +1641,7 @@ LLINT_SLOW_PATH_DECL(slow_path_profile_did_call)
LLINT_SLOW_PATH_DECL(throw_from_native_call)
{
LLINT_BEGIN();
- ASSERT(globalData.exception);
+ ASSERT(vm.exception);
LLINT_END();
}
diff --git a/Source/JavaScriptCore/llint/LLIntSlowPaths.h b/Source/JavaScriptCore/llint/LLIntSlowPaths.h
index 99fbaccfa..dbf68b2f9 100644
--- a/Source/JavaScriptCore/llint/LLIntSlowPaths.h
+++ b/Source/JavaScriptCore/llint/LLIntSlowPaths.h
@@ -135,9 +135,7 @@ LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_greater);
LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_greatereq);
LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_pre_inc);
LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_pre_dec);
-LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_post_inc);
-LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_post_dec);
-LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_to_jsnumber);
+LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_to_number);
LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_negate);
LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_add);
LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_mul);
@@ -159,7 +157,6 @@ LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_in);
LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_resolve);
LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_put_to_base);
LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_resolve_base);
-LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_ensure_property_exists);
LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_resolve_with_base);
LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_resolve_with_this);
LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_init_global_const_check);
@@ -174,7 +171,6 @@ LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_put_by_val);
LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_del_by_val);
LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_put_by_index);
LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_put_getter_setter);
-LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_jmp_scopes);
LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_jtrue);
LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_jfalse);
LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_jless);
@@ -205,6 +201,7 @@ LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_pop_scope);
LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_push_name_scope);
LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_throw);
LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_throw_static_error);
+LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_handle_watchdog_timer);
LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_debug);
LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_profile_will_call);
LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_profile_did_call);
diff --git a/Source/JavaScriptCore/llint/LLIntThunks.cpp b/Source/JavaScriptCore/llint/LLIntThunks.cpp
index ef19c766d..fe57aa374 100644
--- a/Source/JavaScriptCore/llint/LLIntThunks.cpp
+++ b/Source/JavaScriptCore/llint/LLIntThunks.cpp
@@ -38,7 +38,7 @@ namespace JSC { namespace LLInt {
#if !ENABLE(LLINT_C_LOOP)
-static MacroAssemblerCodeRef generateThunkWithJumpTo(JSGlobalData* globalData, void (*target)(), const char *thunkKind)
+static MacroAssemblerCodeRef generateThunkWithJumpTo(VM* vm, void (*target)(), const char *thunkKind)
{
JSInterfaceJIT jit;
@@ -46,38 +46,38 @@ static MacroAssemblerCodeRef generateThunkWithJumpTo(JSGlobalData* globalData, v
jit.move(JSInterfaceJIT::TrustedImmPtr(bitwise_cast<void*>(target)), JSInterfaceJIT::regT0);
jit.jump(JSInterfaceJIT::regT0);
- LinkBuffer patchBuffer(*globalData, &jit, GLOBAL_THUNK_ID);
+ LinkBuffer patchBuffer(*vm, &jit, GLOBAL_THUNK_ID);
return FINALIZE_CODE(patchBuffer, ("LLInt %s prologue thunk", thunkKind));
}
-MacroAssemblerCodeRef functionForCallEntryThunkGenerator(JSGlobalData* globalData)
+MacroAssemblerCodeRef functionForCallEntryThunkGenerator(VM* vm)
{
- return generateThunkWithJumpTo(globalData, llint_function_for_call_prologue, "function for call");
+ return generateThunkWithJumpTo(vm, llint_function_for_call_prologue, "function for call");
}
-MacroAssemblerCodeRef functionForConstructEntryThunkGenerator(JSGlobalData* globalData)
+MacroAssemblerCodeRef functionForConstructEntryThunkGenerator(VM* vm)
{
- return generateThunkWithJumpTo(globalData, llint_function_for_construct_prologue, "function for construct");
+ return generateThunkWithJumpTo(vm, llint_function_for_construct_prologue, "function for construct");
}
-MacroAssemblerCodeRef functionForCallArityCheckThunkGenerator(JSGlobalData* globalData)
+MacroAssemblerCodeRef functionForCallArityCheckThunkGenerator(VM* vm)
{
- return generateThunkWithJumpTo(globalData, llint_function_for_call_arity_check, "function for call with arity check");
+ return generateThunkWithJumpTo(vm, llint_function_for_call_arity_check, "function for call with arity check");
}
-MacroAssemblerCodeRef functionForConstructArityCheckThunkGenerator(JSGlobalData* globalData)
+MacroAssemblerCodeRef functionForConstructArityCheckThunkGenerator(VM* vm)
{
- return generateThunkWithJumpTo(globalData, llint_function_for_construct_arity_check, "function for construct with arity check");
+ return generateThunkWithJumpTo(vm, llint_function_for_construct_arity_check, "function for construct with arity check");
}
-MacroAssemblerCodeRef evalEntryThunkGenerator(JSGlobalData* globalData)
+MacroAssemblerCodeRef evalEntryThunkGenerator(VM* vm)
{
- return generateThunkWithJumpTo(globalData, llint_eval_prologue, "eval");
+ return generateThunkWithJumpTo(vm, llint_eval_prologue, "eval");
}
-MacroAssemblerCodeRef programEntryThunkGenerator(JSGlobalData* globalData)
+MacroAssemblerCodeRef programEntryThunkGenerator(VM* vm)
{
- return generateThunkWithJumpTo(globalData, llint_program_prologue, "program");
+ return generateThunkWithJumpTo(vm, llint_program_prologue, "program");
}
#endif // !ENABLE(LLINT_C_LOOP)
diff --git a/Source/JavaScriptCore/llint/LLIntThunks.h b/Source/JavaScriptCore/llint/LLIntThunks.h
index ee119e0b9..b46cc00e7 100644
--- a/Source/JavaScriptCore/llint/LLIntThunks.h
+++ b/Source/JavaScriptCore/llint/LLIntThunks.h
@@ -34,16 +34,16 @@
namespace JSC {
-class JSGlobalData;
+class VM;
namespace LLInt {
-MacroAssemblerCodeRef functionForCallEntryThunkGenerator(JSGlobalData*);
-MacroAssemblerCodeRef functionForConstructEntryThunkGenerator(JSGlobalData*);
-MacroAssemblerCodeRef functionForCallArityCheckThunkGenerator(JSGlobalData*);
-MacroAssemblerCodeRef functionForConstructArityCheckThunkGenerator(JSGlobalData*);
-MacroAssemblerCodeRef evalEntryThunkGenerator(JSGlobalData*);
-MacroAssemblerCodeRef programEntryThunkGenerator(JSGlobalData*);
+MacroAssemblerCodeRef functionForCallEntryThunkGenerator(VM*);
+MacroAssemblerCodeRef functionForConstructEntryThunkGenerator(VM*);
+MacroAssemblerCodeRef functionForCallArityCheckThunkGenerator(VM*);
+MacroAssemblerCodeRef functionForConstructArityCheckThunkGenerator(VM*);
+MacroAssemblerCodeRef evalEntryThunkGenerator(VM*);
+MacroAssemblerCodeRef programEntryThunkGenerator(VM*);
} } // namespace JSC::LLInt
diff --git a/Source/JavaScriptCore/llint/LowLevelInterpreter.asm b/Source/JavaScriptCore/llint/LowLevelInterpreter.asm
index 9de48f1f6..85917a512 100644
--- a/Source/JavaScriptCore/llint/LowLevelInterpreter.asm
+++ b/Source/JavaScriptCore/llint/LowLevelInterpreter.asm
@@ -118,7 +118,7 @@ const FunctionCode = 2
const LLIntReturnPC = ArgumentCount + TagOffset
# String flags.
-const HashFlags8BitBuffer = 64
+const HashFlags8BitBuffer = 32
# Copied from PropertyOffset.h
const firstOutOfLineOffset = 100
@@ -154,11 +154,11 @@ else
end
# This must match wtf/Vector.h
-const VectorSizeOffset = 0
+const VectorBufferOffset = 0
if JSVALUE64
- const VectorBufferOffset = 8
+ const VectorSizeOffset = 12
else
- const VectorBufferOffset = 4
+ const VectorSizeOffset = 8
end
@@ -182,9 +182,11 @@ macro assert(assertion)
end
macro preserveReturnAddressAfterCall(destinationRegister)
- if C_LOOP or ARMv7 or MIPS
+ if C_LOOP or ARM or ARMv7 or ARMv7_TRADITIONAL or MIPS
# In C_LOOP case, we're only preserving the bytecode vPC.
move lr, destinationRegister
+ elsif SH4
+ stspr destinationRegister
elsif X86 or X86_64
pop destinationRegister
else
@@ -193,9 +195,11 @@ macro preserveReturnAddressAfterCall(destinationRegister)
end
macro restoreReturnAddressBeforeReturn(sourceRegister)
- if C_LOOP or ARMv7 or MIPS
+ if C_LOOP or ARM or ARMv7 or ARMv7_TRADITIONAL or MIPS
# In C_LOOP case, we're only restoring the bytecode vPC.
move sourceRegister, lr
+ elsif SH4
+ ldspr sourceRegister
elsif X86 or X86_64
push sourceRegister
else
@@ -358,8 +362,8 @@ macro functionInitialization(profileArgSkip)
# Check stack height.
loadi CodeBlock::m_numCalleeRegisters[t1], t0
- loadp CodeBlock::m_globalData[t1], t2
- loadp JSGlobalData::interpreter[t2], t2 # FIXME: Can get to the JSStack from the JITStackFrame
+ loadp CodeBlock::m_vm[t1], t2
+ loadp VM::interpreter[t2], t2 # FIXME: Can get to the JSStack from the JITStackFrame
lshifti 3, t0
addp t0, cfr, t0
bpaeq Interpreter::m_stack + JSStack::m_end[t2], t0, .stackHeightOK
@@ -369,32 +373,21 @@ macro functionInitialization(profileArgSkip)
.stackHeightOK:
end
-macro allocateBasicJSObject(sizeClassIndex, structure, result, scratch1, scratch2, slowCase)
+macro allocateJSObject(allocator, structure, result, scratch1, slowCase)
if ALWAYS_ALLOCATE_SLOW
jmp slowCase
else
- const offsetOfMySizeClass =
- JSGlobalData::heap +
- Heap::m_objectSpace +
- MarkedSpace::m_normalSpace +
- MarkedSpace::Subspace::preciseAllocators +
- sizeClassIndex * sizeof MarkedAllocator
-
const offsetOfFirstFreeCell =
MarkedAllocator::m_freeList +
MarkedBlock::FreeList::head
- # FIXME: we can get the global data in one load from the stack.
- loadp CodeBlock[cfr], scratch1
- loadp CodeBlock::m_globalData[scratch1], scratch1
-
# Get the object from the free list.
- loadp offsetOfMySizeClass + offsetOfFirstFreeCell[scratch1], result
+ loadp offsetOfFirstFreeCell[allocator], result
btpz result, slowCase
# Remove the object from the free list.
- loadp [result], scratch2
- storep scratch2, offsetOfMySizeClass + offsetOfFirstFreeCell[scratch1]
+ loadp [result], scratch1
+ storep scratch1, offsetOfFirstFreeCell[allocator]
# Initialize the object.
storep structure, JSCell::m_structure[result]
@@ -536,11 +529,8 @@ _llint_op_in:
dispatch(4)
macro getPutToBaseOperationField(scratch, scratch1, fieldOffset, fieldGetter)
- loadisFromInstruction(4, scratch)
- mulp sizeof PutToBaseOperation, scratch, scratch
- loadp CodeBlock[cfr], scratch1
- loadp VectorBufferOffset + CodeBlock::m_putToBaseOperations[scratch1], scratch1
- fieldGetter(fieldOffset[scratch1, scratch, 1])
+ loadpFromInstruction(4, scratch)
+ fieldGetter(fieldOffset[scratch])
end
macro moveJSValueFromRegisterWithoutProfiling(value, destBuffer, destOffsetReg)
@@ -588,12 +578,9 @@ _llint_op_put_to_base:
callSlowPath(_llint_slow_path_put_to_base)
dispatch(5)
-macro getResolveOperation(resolveOperationIndex, dest, scratch)
- loadisFromInstruction(resolveOperationIndex, dest)
- mulp sizeof ResolveOperations, dest, dest
- loadp CodeBlock[cfr], scratch
- loadp VectorBufferOffset + CodeBlock::m_resolveOperations[scratch], scratch
- loadp VectorBufferOffset[scratch, dest, 1], dest
+macro getResolveOperation(resolveOperationIndex, dest)
+ loadpFromInstruction(resolveOperationIndex, dest)
+ loadp VectorBufferOffset[dest], dest
end
macro getScope(loadInitialScope, scopeCount, dest, scratch)
@@ -656,7 +643,7 @@ end
_llint_op_resolve_global_property:
traceExecution()
- getResolveOperation(3, t0, t1)
+ getResolveOperation(3, t0)
loadp CodeBlock[cfr], t1
loadp CodeBlock::m_globalObject[t1], t1
loadp ResolveOperation::m_structure[t0], t2
@@ -675,7 +662,7 @@ _llint_op_resolve_global_property:
_llint_op_resolve_global_var:
traceExecution()
- getResolveOperation(3, t0, t1)
+ getResolveOperation(3, t0)
loadp ResolveOperation::m_registerAddress[t0], t0
loadisFromInstruction(1, t1)
moveJSValueFromSlot(t0, cfr, t1, 4, t3)
@@ -697,13 +684,13 @@ end
_llint_op_resolve_scoped_var:
traceExecution()
- getResolveOperation(3, t0, t1)
+ getResolveOperation(3, t0)
resolveScopedVarBody(t0)
dispatch(5)
_llint_op_resolve_scoped_var_on_top_scope:
traceExecution()
- getResolveOperation(3, t0, t1)
+ getResolveOperation(3, t0)
# Load destination index
loadisFromInstruction(1, t3)
@@ -720,7 +707,7 @@ _llint_op_resolve_scoped_var_on_top_scope:
_llint_op_resolve_scoped_var_with_top_scope_check:
traceExecution()
- getResolveOperation(3, t0, t1)
+ getResolveOperation(3, t0)
# First ResolveOperation tells us what register to check
loadis ResolveOperation::m_activationRegister[t0], t1
@@ -747,7 +734,7 @@ _llint_op_resolve_scoped_var_with_top_scope_check:
_llint_op_resolve:
.llint_op_resolve_local:
traceExecution()
- getResolveOperation(3, t0, t1)
+ getResolveOperation(3, t0)
btpz t0, .noInstructions
loadis ResolveOperation::m_operation[t0], t1
bineq t1, ResolveOperationSkipScopes, .notSkipScopes
@@ -783,7 +770,7 @@ _llint_op_resolve_base_to_global_dynamic:
_llint_op_resolve_base_to_scope:
traceExecution()
- getResolveOperation(4, t0, t1)
+ getResolveOperation(4, t0)
# First ResolveOperation is to skip scope chain nodes
getScope(macro(dest)
loadp ScopeChain + PayloadOffset[cfr], dest
@@ -800,7 +787,7 @@ _llint_op_resolve_base_to_scope:
_llint_op_resolve_base_to_scope_with_top_scope_check:
traceExecution()
- getResolveOperation(4, t0, t1)
+ getResolveOperation(4, t0)
# First ResolveOperation tells us what register to check
loadis ResolveOperation::m_activationRegister[t0], t1
@@ -831,14 +818,9 @@ _llint_op_resolve_base:
callSlowPath(_llint_slow_path_resolve_base)
dispatch(7)
-_llint_op_ensure_property_exists:
- traceExecution()
- callSlowPath(_llint_slow_path_ensure_property_exists)
- dispatch(3)
-
macro interpretResolveWithBase(opcodeLength, slowPath)
traceExecution()
- getResolveOperation(4, t0, t1)
+ getResolveOperation(4, t0)
btpz t0, .slowPath
loadp ScopeChain[cfr], t3
@@ -990,19 +972,6 @@ _llint_op_put_getter_setter:
dispatch(5)
-_llint_op_jmp_scopes:
- traceExecution()
- callSlowPath(_llint_slow_path_jmp_scopes)
- dispatch(0)
-
-
-_llint_op_loop_if_true:
- traceExecution()
- jumpTrueOrFalse(
- macro (value, target) btinz value, target end,
- _llint_slow_path_jtrue)
-
-
_llint_op_jtrue:
traceExecution()
jumpTrueOrFalse(
@@ -1010,13 +979,6 @@ _llint_op_jtrue:
_llint_slow_path_jtrue)
-_llint_op_loop_if_false:
- traceExecution()
- jumpTrueOrFalse(
- macro (value, target) btiz value, target end,
- _llint_slow_path_jfalse)
-
-
_llint_op_jfalse:
traceExecution()
jumpTrueOrFalse(
@@ -1024,14 +986,6 @@ _llint_op_jfalse:
_llint_slow_path_jfalse)
-_llint_op_loop_if_less:
- traceExecution()
- compare(
- macro (left, right, target) bilt left, right, target end,
- macro (left, right, target) bdlt left, right, target end,
- _llint_slow_path_jless)
-
-
_llint_op_jless:
traceExecution()
compare(
@@ -1048,14 +1002,6 @@ _llint_op_jnless:
_llint_slow_path_jnless)
-_llint_op_loop_if_greater:
- traceExecution()
- compare(
- macro (left, right, target) bigt left, right, target end,
- macro (left, right, target) bdgt left, right, target end,
- _llint_slow_path_jgreater)
-
-
_llint_op_jgreater:
traceExecution()
compare(
@@ -1072,14 +1018,6 @@ _llint_op_jngreater:
_llint_slow_path_jngreater)
-_llint_op_loop_if_lesseq:
- traceExecution()
- compare(
- macro (left, right, target) bilteq left, right, target end,
- macro (left, right, target) bdlteq left, right, target end,
- _llint_slow_path_jlesseq)
-
-
_llint_op_jlesseq:
traceExecution()
compare(
@@ -1096,14 +1034,6 @@ _llint_op_jnlesseq:
_llint_slow_path_jnlesseq)
-_llint_op_loop_if_greatereq:
- traceExecution()
- compare(
- macro (left, right, target) bigteq left, right, target end,
- macro (left, right, target) bdgteq left, right, target end,
- _llint_slow_path_jgreatereq)
-
-
_llint_op_jgreatereq:
traceExecution()
compare(
@@ -1122,9 +1052,17 @@ _llint_op_jngreatereq:
_llint_op_loop_hint:
traceExecution()
+ loadp JITStackFrame::vm[sp], t1
+ loadb VM::watchdog+Watchdog::m_timerDidFire[t1], t0
+ btbnz t0, .handleWatchdogTimer
+.afterWatchdogTimerCheck:
checkSwitchToJITForLoop()
dispatch(1)
-
+.handleWatchdogTimer:
+ callWatchdogTimerHandler(.throwHandler)
+ jmp .afterWatchdogTimerCheck
+.throwHandler:
+ jmp _llint_throw_from_slow_path_trampoline
_llint_op_switch_string:
traceExecution()
diff --git a/Source/JavaScriptCore/llint/LowLevelInterpreter.cpp b/Source/JavaScriptCore/llint/LowLevelInterpreter.cpp
index b2ce2483e..a616ce9f0 100644
--- a/Source/JavaScriptCore/llint/LowLevelInterpreter.cpp
+++ b/Source/JavaScriptCore/llint/LowLevelInterpreter.cpp
@@ -35,6 +35,7 @@
#include "CodeBlock.h"
#include "LLIntCLoop.h"
#include "LLIntSlowPaths.h"
+#include "Operations.h"
#include "VMInspector.h"
#include <wtf/Assertions.h>
#include <wtf/MathExtras.h>
@@ -264,7 +265,7 @@ JSValue CLoop::execute(CallFrame* callFrame, OpcodeID bootstrapOpcodeId,
return JSValue();
}
- ASSERT(callFrame->globalData().topCallFrame == callFrame);
+ ASSERT(callFrame->vm().topCallFrame == callFrame);
// Define the pseudo registers used by the LLINT C Loop backend:
ASSERT(sizeof(CLoopRegister) == sizeof(intptr_t));
@@ -307,12 +308,10 @@ JSValue CLoop::execute(CallFrame* callFrame, OpcodeID bootstrapOpcodeId,
CLoopRegister rRetVPC;
CLoopDoubleRegister d0, d1;
-#if COMPILER(MSVC)
// Keep the compiler happy. We don't really need this, but the compiler
// will complain. This makes the warning go away.
t0.i = 0;
t1.i = 0;
-#endif
// Instantiate the pseudo JIT stack frame used by the LLINT C Loop backend:
JITStackFrame jitStackFrame;
@@ -320,10 +319,10 @@ JSValue CLoop::execute(CallFrame* callFrame, OpcodeID bootstrapOpcodeId,
// The llint expects the native stack pointer, sp, to be pointing to the
// jitStackFrame (which is the simulation of the native stack frame):
JITStackFrame* const sp = &jitStackFrame;
- sp->globalData = &callFrame->globalData();
+ sp->vm = &callFrame->vm();
- // Set up an alias for the globalData ptr in the JITStackFrame:
- JSGlobalData* &globalData = sp->globalData;
+ // Set up an alias for the vm ptr in the JITStackFrame:
+ VM* &vm = sp->vm;
CodeBlock* codeBlock = callFrame->codeBlock();
Instruction* vPC;
@@ -424,7 +423,7 @@ JSValue CLoop::execute(CallFrame* callFrame, OpcodeID bootstrapOpcodeId,
callFrame = callFrame->callerFrame();
// The part in getHostCallReturnValueWithExecState():
- JSValue result = globalData->hostCallReturnValue;
+ JSValue result = vm->hostCallReturnValue;
#if USE(JSVALUE32_64)
t1.i = result.tag();
t0.i = result.payload();
@@ -436,7 +435,7 @@ JSValue CLoop::execute(CallFrame* callFrame, OpcodeID bootstrapOpcodeId,
OFFLINE_ASM_GLUE_LABEL(ctiOpThrowNotCaught)
{
- return globalData->exception;
+ return vm->exception;
}
#if !ENABLE(COMPUTED_GOTO_OPCODES)
diff --git a/Source/JavaScriptCore/llint/LowLevelInterpreter32_64.asm b/Source/JavaScriptCore/llint/LowLevelInterpreter32_64.asm
index 9a17985bc..b9b457034 100644
--- a/Source/JavaScriptCore/llint/LowLevelInterpreter32_64.asm
+++ b/Source/JavaScriptCore/llint/LowLevelInterpreter32_64.asm
@@ -105,7 +105,7 @@ macro dispatchAfterCall()
end
macro cCall2(function, arg1, arg2)
- if ARMv7
+ if ARM or ARMv7 or ARMv7_TRADITIONAL
move arg1, t0
move arg2, t1
call function
@@ -113,7 +113,7 @@ macro cCall2(function, arg1, arg2)
poke arg1, 0
poke arg2, 1
call function
- elsif MIPS
+ elsif MIPS or SH4
move arg1, a0
move arg2, a1
call function
@@ -126,7 +126,7 @@ end
# This barely works. arg3 and arg4 should probably be immediates.
macro cCall4(function, arg1, arg2, arg3, arg4)
- if ARMv7
+ if ARM or ARMv7 or ARMv7_TRADITIONAL
move arg1, t0
move arg2, t1
move arg3, t2
@@ -138,7 +138,7 @@ macro cCall4(function, arg1, arg2, arg3, arg4)
poke arg3, 2
poke arg4, 3
call function
- elsif MIPS
+ elsif MIPS or SH4
move arg1, a0
move arg2, a1
move arg3, a2
@@ -185,6 +185,14 @@ macro callCallSlowPath(advance, slowPath, action)
action(t0)
end
+macro callWatchdogTimerHandler(throwHandler)
+ storei PC, ArgumentCount + TagOffset[cfr]
+ cCall2(_llint_slow_path_handle_watchdog_timer, cfr, PC)
+ move t1, cfr
+ btpnz t0, throwHandler
+ loadi ArgumentCount + TagOffset[cfr], PC
+end
+
macro checkSwitchToJITForLoop()
checkSwitchToJIT(
1,
@@ -302,9 +310,9 @@ macro functionArityCheck(doneLabel, slow_path)
cCall2(slow_path, cfr, PC) # This slow_path has a simple protocol: t0 = 0 => no error, t0 != 0 => error
move t1, cfr
btiz t0, .continue
- loadp JITStackFrame::globalData[sp], t1
- loadp JSGlobalData::callFrameForThrow[t1], t0
- jmp JSGlobalData::targetMachinePCForThrow[t1]
+ loadp JITStackFrame::vm[sp], t1
+ loadp VM::callFrameForThrow[t1], t0
+ jmp VM::targetMachinePCForThrow[t1]
.continue:
# Reload CodeBlock and PC, since the slow_path clobbered it.
loadp CodeBlock[cfr], t1
@@ -361,17 +369,18 @@ _llint_op_create_this:
traceExecution()
loadi 8[PC], t0
loadp PayloadOffset[cfr, t0, 8], t0
- loadp JSFunction::m_cachedInheritorID[t0], t2
- btpz t2, .opCreateThisSlow
- allocateBasicJSObject(JSFinalObjectSizeClassIndex, t2, t0, t1, t3, .opCreateThisSlow)
+ loadp JSFunction::m_allocationProfile + ObjectAllocationProfile::m_allocator[t0], t1
+ loadp JSFunction::m_allocationProfile + ObjectAllocationProfile::m_structure[t0], t2
+ btpz t1, .opCreateThisSlow
+ allocateJSObject(t1, t2, t0, t3, .opCreateThisSlow)
loadi 4[PC], t1
storei CellTag, TagOffset[cfr, t1, 8]
storei t0, PayloadOffset[cfr, t1, 8]
- dispatch(3)
+ dispatch(4)
.opCreateThisSlow:
callSlowPath(_llint_slow_path_create_this)
- dispatch(3)
+ dispatch(4)
_llint_op_get_callee:
@@ -403,18 +412,18 @@ _llint_op_convert_this:
_llint_op_new_object:
traceExecution()
- loadp CodeBlock[cfr], t0
- loadp CodeBlock::m_globalObject[t0], t0
- loadp JSGlobalObject::m_emptyObjectStructure[t0], t1
- allocateBasicJSObject(JSFinalObjectSizeClassIndex, t1, t0, t2, t3, .opNewObjectSlow)
+ loadpFromInstruction(3, t0)
+ loadp ObjectAllocationProfile::m_allocator[t0], t1
+ loadp ObjectAllocationProfile::m_structure[t0], t2
+ allocateJSObject(t1, t2, t0, t3, .opNewObjectSlow)
loadi 4[PC], t1
storei CellTag, TagOffset[cfr, t1, 8]
storei t0, PayloadOffset[cfr, t1, 8]
- dispatch(2)
+ dispatch(4)
.opNewObjectSlow:
callSlowPath(_llint_slow_path_new_object)
- dispatch(2)
+ dispatch(4)
_llint_op_mov:
@@ -571,88 +580,48 @@ _llint_op_nstricteq:
strictEq(macro (left, right, result) cineq left, right, result end, _llint_slow_path_nstricteq)
-_llint_op_pre_inc:
+_llint_op_inc:
traceExecution()
loadi 4[PC], t0
- bineq TagOffset[cfr, t0, 8], Int32Tag, .opPreIncSlow
+ bineq TagOffset[cfr, t0, 8], Int32Tag, .opIncSlow
loadi PayloadOffset[cfr, t0, 8], t1
- baddio 1, t1, .opPreIncSlow
+ baddio 1, t1, .opIncSlow
storei t1, PayloadOffset[cfr, t0, 8]
dispatch(2)
-.opPreIncSlow:
+.opIncSlow:
callSlowPath(_llint_slow_path_pre_inc)
dispatch(2)
-_llint_op_pre_dec:
+_llint_op_dec:
traceExecution()
loadi 4[PC], t0
- bineq TagOffset[cfr, t0, 8], Int32Tag, .opPreDecSlow
+ bineq TagOffset[cfr, t0, 8], Int32Tag, .opDecSlow
loadi PayloadOffset[cfr, t0, 8], t1
- bsubio 1, t1, .opPreDecSlow
+ bsubio 1, t1, .opDecSlow
storei t1, PayloadOffset[cfr, t0, 8]
dispatch(2)
-.opPreDecSlow:
+.opDecSlow:
callSlowPath(_llint_slow_path_pre_dec)
dispatch(2)
-_llint_op_post_inc:
- traceExecution()
- loadi 8[PC], t0
- loadi 4[PC], t1
- bineq TagOffset[cfr, t0, 8], Int32Tag, .opPostIncSlow
- bieq t0, t1, .opPostIncDone
- loadi PayloadOffset[cfr, t0, 8], t2
- move t2, t3
- baddio 1, t3, .opPostIncSlow
- storei Int32Tag, TagOffset[cfr, t1, 8]
- storei t2, PayloadOffset[cfr, t1, 8]
- storei t3, PayloadOffset[cfr, t0, 8]
-.opPostIncDone:
- dispatch(3)
-
-.opPostIncSlow:
- callSlowPath(_llint_slow_path_post_inc)
- dispatch(3)
-
-
-_llint_op_post_dec:
- traceExecution()
- loadi 8[PC], t0
- loadi 4[PC], t1
- bineq TagOffset[cfr, t0, 8], Int32Tag, .opPostDecSlow
- bieq t0, t1, .opPostDecDone
- loadi PayloadOffset[cfr, t0, 8], t2
- move t2, t3
- bsubio 1, t3, .opPostDecSlow
- storei Int32Tag, TagOffset[cfr, t1, 8]
- storei t2, PayloadOffset[cfr, t1, 8]
- storei t3, PayloadOffset[cfr, t0, 8]
-.opPostDecDone:
- dispatch(3)
-
-.opPostDecSlow:
- callSlowPath(_llint_slow_path_post_dec)
- dispatch(3)
-
-
-_llint_op_to_jsnumber:
+_llint_op_to_number:
traceExecution()
loadi 8[PC], t0
loadi 4[PC], t1
loadConstantOrVariable(t0, t2, t3)
- bieq t2, Int32Tag, .opToJsnumberIsInt
- biaeq t2, EmptyValueTag, .opToJsnumberSlow
-.opToJsnumberIsInt:
+ bieq t2, Int32Tag, .opToNumberIsInt
+ biaeq t2, LowestTag, .opToNumberSlow
+.opToNumberIsInt:
storei t2, TagOffset[cfr, t1, 8]
storei t3, PayloadOffset[cfr, t1, 8]
dispatch(3)
-.opToJsnumberSlow:
- callSlowPath(_llint_slow_path_to_jsnumber)
+.opToNumberSlow:
+ callSlowPath(_llint_slow_path_to_number)
dispatch(3)
@@ -1076,7 +1045,7 @@ _llint_op_get_array_length:
loadi 4[PC], t1
loadp 32[PC], t2
loadp JSObject::m_butterfly[t3], t0
- loadi -sizeof IndexingHeader + IndexingHeader::m_publicLength[t0], t0
+ loadi -sizeof IndexingHeader + IndexingHeader::u.lengths.publicLength[t0], t0
bilt t0, 0, .opGetArrayLengthSlow
valueProfile(Int32Tag, t0, t2)
storep t0, PayloadOffset[cfr, t1, 8]
@@ -1211,14 +1180,14 @@ _llint_op_get_by_val:
bineq t2, ContiguousShape, .opGetByValNotContiguous
.opGetByValIsContiguous:
- biaeq t1, -sizeof IndexingHeader + IndexingHeader::m_publicLength[t3], .opGetByValSlow
+ biaeq t1, -sizeof IndexingHeader + IndexingHeader::u.lengths.publicLength[t3], .opGetByValOutOfBounds
loadi TagOffset[t3, t1, 8], t2
loadi PayloadOffset[t3, t1, 8], t1
jmp .opGetByValDone
.opGetByValNotContiguous:
bineq t2, DoubleShape, .opGetByValNotDouble
- biaeq t1, -sizeof IndexingHeader + IndexingHeader::m_publicLength[t3], .opGetByValSlow
+ biaeq t1, -sizeof IndexingHeader + IndexingHeader::u.lengths.publicLength[t3], .opGetByValOutOfBounds
loadd [t3, t1, 8], ft0
bdnequn ft0, ft0, .opGetByValSlow
# FIXME: This could be massively optimized.
@@ -1229,13 +1198,13 @@ _llint_op_get_by_val:
.opGetByValNotDouble:
subi ArrayStorageShape, t2
bia t2, SlowPutArrayStorageShape - ArrayStorageShape, .opGetByValSlow
- biaeq t1, -sizeof IndexingHeader + IndexingHeader::m_vectorLength[t3], .opGetByValSlow
+ biaeq t1, -sizeof IndexingHeader + IndexingHeader::u.lengths.vectorLength[t3], .opGetByValOutOfBounds
loadi ArrayStorage::m_vector + TagOffset[t3, t1, 8], t2
loadi ArrayStorage::m_vector + PayloadOffset[t3, t1, 8], t1
.opGetByValDone:
loadi 4[PC], t0
- bieq t2, EmptyValueTag, .opGetByValSlow
+ bieq t2, EmptyValueTag, .opGetByValOutOfBounds
.opGetByValNotEmpty:
storei t2, TagOffset[cfr, t0, 8]
storei t1, PayloadOffset[cfr, t0, 8]
@@ -1243,6 +1212,11 @@ _llint_op_get_by_val:
valueProfile(t2, t1, t0)
dispatch(6)
+.opGetByValOutOfBounds:
+ if VALUE_PROFILER
+ loadpFromInstruction(4, t0)
+ storeb 1, ArrayProfile::m_outOfBounds[t0]
+ end
.opGetByValSlow:
callSlowPath(_llint_slow_path_get_by_val)
dispatch(6)
@@ -1306,20 +1280,20 @@ _llint_op_get_by_pname:
macro contiguousPutByVal(storeCallback)
- biaeq t3, -sizeof IndexingHeader + IndexingHeader::m_publicLength[t0], .outOfBounds
+ biaeq t3, -sizeof IndexingHeader + IndexingHeader::u.lengths.publicLength[t0], .outOfBounds
.storeResult:
loadi 12[PC], t2
storeCallback(t2, t1, t0, t3)
dispatch(5)
.outOfBounds:
- biaeq t3, -sizeof IndexingHeader + IndexingHeader::m_vectorLength[t0], .opPutByValSlow
+ biaeq t3, -sizeof IndexingHeader + IndexingHeader::u.lengths.vectorLength[t0], .opPutByValOutOfBounds
if VALUE_PROFILER
loadp 16[PC], t2
storeb 1, ArrayProfile::m_mayStoreToHole[t2]
end
addi 1, t3, t2
- storei t2, -sizeof IndexingHeader + IndexingHeader::m_publicLength[t0]
+ storei t2, -sizeof IndexingHeader + IndexingHeader::u.lengths.publicLength[t0]
jmp .storeResult
end
@@ -1373,7 +1347,7 @@ _llint_op_put_by_val:
.opPutByValNotContiguous:
bineq t2, ArrayStorageShape, .opPutByValSlow
- biaeq t3, -sizeof IndexingHeader + IndexingHeader::m_vectorLength[t0], .opPutByValSlow
+ biaeq t3, -sizeof IndexingHeader + IndexingHeader::u.lengths.vectorLength[t0], .opPutByValOutOfBounds
bieq ArrayStorage::m_vector + TagOffset[t0, t3, 8], EmptyValueTag, .opPutByValArrayStorageEmpty
.opPutByValArrayStorageStoreResult:
loadi 12[PC], t2
@@ -1389,21 +1363,21 @@ _llint_op_put_by_val:
storeb 1, ArrayProfile::m_mayStoreToHole[t1]
end
addi 1, ArrayStorage::m_numValuesInVector[t0]
- bib t3, -sizeof IndexingHeader + IndexingHeader::m_publicLength[t0], .opPutByValArrayStorageStoreResult
+ bib t3, -sizeof IndexingHeader + IndexingHeader::u.lengths.publicLength[t0], .opPutByValArrayStorageStoreResult
addi 1, t3, t1
- storei t1, -sizeof IndexingHeader + IndexingHeader::m_publicLength[t0]
+ storei t1, -sizeof IndexingHeader + IndexingHeader::u.lengths.publicLength[t0]
jmp .opPutByValArrayStorageStoreResult
+.opPutByValOutOfBounds:
+ if VALUE_PROFILER
+ loadpFromInstruction(4, t0)
+ storeb 1, ArrayProfile::m_outOfBounds[t0]
+ end
.opPutByValSlow:
callSlowPath(_llint_slow_path_put_by_val)
dispatch(5)
-_llint_op_loop:
- traceExecution()
- dispatchBranch(4[PC])
-
-
_llint_op_jmp:
traceExecution()
dispatchBranch(4[PC])
@@ -1475,7 +1449,7 @@ _llint_op_jneq_ptr:
loadp CodeBlock[cfr], t2
loadp CodeBlock::m_globalObject[t2], t2
bineq TagOffset[cfr, t0, 8], CellTag, .opJneqPtrBranch
- loadp JSGlobalObject::m_specialPointers[t2, t1, 8], t1
+ loadp JSGlobalObject::m_specialPointers[t2, t1, 4], t1
bpeq PayloadOffset[cfr, t0, 8], t1, .opJneqPtrFallThrough
.opJneqPtrBranch:
dispatchBranch(12[PC])
@@ -1755,14 +1729,14 @@ _llint_op_catch:
# the interpreter's throw trampoline (see _llint_throw_trampoline).
# The JIT throwing protocol calls for the cfr to be in t0. The throwing
# code must have known that we were throwing to the interpreter, and have
- # set JSGlobalData::targetInterpreterPCForThrow.
+ # set VM::targetInterpreterPCForThrow.
move t0, cfr
- loadp JITStackFrame::globalData[sp], t3
- loadi JSGlobalData::targetInterpreterPCForThrow[t3], PC
- loadi JSGlobalData::exception + PayloadOffset[t3], t0
- loadi JSGlobalData::exception + TagOffset[t3], t1
- storei 0, JSGlobalData::exception + PayloadOffset[t3]
- storei EmptyValueTag, JSGlobalData::exception + TagOffset[t3]
+ loadp JITStackFrame::vm[sp], t3
+ loadi VM::targetInterpreterPCForThrow[t3], PC
+ loadi VM::exception + PayloadOffset[t3], t0
+ loadi VM::exception + TagOffset[t3], t1
+ storei 0, VM::exception + PayloadOffset[t3]
+ storei EmptyValueTag, VM::exception + TagOffset[t3]
loadi 4[PC], t2
storei t0, PayloadOffset[cfr, t2, 8]
storei t1, TagOffset[cfr, t2, 8]
@@ -1770,6 +1744,71 @@ _llint_op_catch:
dispatch(2)
+# Gives you the scope in t0, while allowing you to optionally perform additional checks on the
+# scopes as they are traversed. scopeCheck() is called with two arguments: the register
+# holding the scope, and a register that can be used for scratch. Note that this does not
+# use t3, so you can hold stuff in t3 if need be.
+macro getDeBruijnScope(deBruijinIndexOperand, scopeCheck)
+ loadp ScopeChain + PayloadOffset[cfr], t0
+ loadi deBruijinIndexOperand, t2
+
+ btiz t2, .done
+
+ loadp CodeBlock[cfr], t1
+ bineq CodeBlock::m_codeType[t1], FunctionCode, .loop
+ btbz CodeBlock::m_needsActivation[t1], .loop
+
+ loadi CodeBlock::m_activationRegister[t1], t1
+
+ # Need to conditionally skip over one scope.
+ bieq TagOffset[cfr, t1, 8], EmptyValueTag, .noActivation
+ scopeCheck(t0, t1)
+ loadp JSScope::m_next[t0], t0
+.noActivation:
+ subi 1, t2
+
+ btiz t2, .done
+.loop:
+ scopeCheck(t0, t1)
+ loadp JSScope::m_next[t0], t0
+ subi 1, t2
+ btinz t2, .loop
+
+.done:
+
+end
+
+_llint_op_get_scoped_var:
+ traceExecution()
+ # Operands are as follows:
+ # 4[PC] Destination for the load.
+ # 8[PC] Index of register in the scope.
+ # 12[PC] De Bruijin index.
+ getDeBruijnScope(12[PC], macro (scope, scratch) end)
+ loadi 4[PC], t1
+ loadi 8[PC], t2
+ loadp JSVariableObject::m_registers[t0], t0
+ loadi TagOffset[t0, t2, 8], t3
+ loadi PayloadOffset[t0, t2, 8], t0
+ storei t3, TagOffset[cfr, t1, 8]
+ storei t0, PayloadOffset[cfr, t1, 8]
+ loadi 16[PC], t1
+ valueProfile(t3, t0, t1)
+ dispatch(5)
+
+
+_llint_op_put_scoped_var:
+ traceExecution()
+ getDeBruijnScope(8[PC], macro (scope, scratch) end)
+ loadi 12[PC], t1
+ loadConstantOrVariable(t1, t3, t2)
+ loadi 4[PC], t1
+ writeBarrier(t3, t2)
+ loadp JSVariableObject::m_registers[t0], t0
+ storei t3, TagOffset[t0, t1, 8]
+ storei t2, PayloadOffset[t0, t1, 8]
+ dispatch(4)
+
_llint_op_end:
traceExecution()
checkSwitchToJITForEpilogue()
@@ -1784,16 +1823,16 @@ _llint_throw_from_slow_path_trampoline:
# When throwing from the interpreter (i.e. throwing from LLIntSlowPaths), so
# the throw target is not necessarily interpreted code, we come to here.
# This essentially emulates the JIT's throwing protocol.
- loadp JITStackFrame::globalData[sp], t1
- loadp JSGlobalData::callFrameForThrow[t1], t0
- jmp JSGlobalData::targetMachinePCForThrow[t1]
+ loadp JITStackFrame::vm[sp], t1
+ loadp VM::callFrameForThrow[t1], t0
+ jmp VM::targetMachinePCForThrow[t1]
_llint_throw_during_call_trampoline:
preserveReturnAddressAfterCall(t2)
- loadp JITStackFrame::globalData[sp], t1
- loadp JSGlobalData::callFrameForThrow[t1], t0
- jmp JSGlobalData::targetMachinePCForThrow[t1]
+ loadp JITStackFrame::vm[sp], t1
+ loadp VM::callFrameForThrow[t1], t0
+ jmp VM::targetMachinePCForThrow[t1]
macro nativeCallTrampoline(executableOffsetToFunction)
@@ -1803,8 +1842,8 @@ macro nativeCallTrampoline(executableOffsetToFunction)
storei CellTag, ScopeChain + TagOffset[cfr]
storei t1, ScopeChain + PayloadOffset[cfr]
if X86
- loadp JITStackFrame::globalData + 4[sp], t3 # Additional offset for return address
- storep cfr, JSGlobalData::topCallFrame[t3]
+ loadp JITStackFrame::vm + 4[sp], t3 # Additional offset for return address
+ storep cfr, VM::topCallFrame[t3]
peek 0, t1
storep t1, ReturnPC[cfr]
move cfr, t2 # t2 = ecx
@@ -1814,10 +1853,10 @@ macro nativeCallTrampoline(executableOffsetToFunction)
move t0, cfr
call executableOffsetToFunction[t1]
addp 16 - 4, sp
- loadp JITStackFrame::globalData + 4[sp], t3
- elsif ARMv7
- loadp JITStackFrame::globalData[sp], t3
- storep cfr, JSGlobalData::topCallFrame[t3]
+ loadp JITStackFrame::vm + 4[sp], t3
+ elsif ARM or ARMv7 or ARMv7_TRADITIONAL
+ loadp JITStackFrame::vm[sp], t3
+ storep cfr, VM::topCallFrame[t3]
move t0, t2
preserveReturnAddressAfterCall(t3)
storep t3, ReturnPC[cfr]
@@ -1827,10 +1866,10 @@ macro nativeCallTrampoline(executableOffsetToFunction)
move t2, cfr
call executableOffsetToFunction[t1]
restoreReturnAddressBeforeReturn(t3)
- loadp JITStackFrame::globalData[sp], t3
+ loadp JITStackFrame::vm[sp], t3
elsif MIPS
- loadp JITStackFrame::globalData[sp], t3
- storep cfr, JSGlobalData::topCallFrame[t3]
+ loadp JITStackFrame::vm[sp], t3
+ storep cfr, VM::topCallFrame[t3]
move t0, t2
preserveReturnAddressAfterCall(t3)
storep t3, ReturnPC[cfr]
@@ -1841,10 +1880,23 @@ macro nativeCallTrampoline(executableOffsetToFunction)
move t0, a0
call executableOffsetToFunction[t1]
restoreReturnAddressBeforeReturn(t3)
- loadp JITStackFrame::globalData[sp], t3
+ loadp JITStackFrame::vm[sp], t3
+ elsif SH4
+ loadp JITStackFrame::vm[sp], t3
+ storep cfr, VM::topCallFrame[t3]
+ move t0, t2
+ preserveReturnAddressAfterCall(t3)
+ storep t3, ReturnPC[cfr]
+ move cfr, t0
+ loadi Callee + PayloadOffset[cfr], t1
+ loadp JSFunction::m_executable[t1], t1
+ move t2, cfr
+ call executableOffsetToFunction[t1]
+ restoreReturnAddressBeforeReturn(t3)
+ loadp JITStackFrame::vm[sp], t3
elsif C_LOOP
- loadp JITStackFrame::globalData[sp], t3
- storep cfr, JSGlobalData::topCallFrame[t3]
+ loadp JITStackFrame::vm[sp], t3
+ storep cfr, VM::topCallFrame[t3]
move t0, t2
preserveReturnAddressAfterCall(t3)
storep t3, ReturnPC[cfr]
@@ -1854,11 +1906,11 @@ macro nativeCallTrampoline(executableOffsetToFunction)
move t2, cfr
cloopCallNative executableOffsetToFunction[t1]
restoreReturnAddressBeforeReturn(t3)
- loadp JITStackFrame::globalData[sp], t3
+ loadp JITStackFrame::vm[sp], t3
else
error
end
- bineq JSGlobalData::exception + TagOffset[t3], EmptyValueTag, .exception
+ bineq VM::exception + TagOffset[t3], EmptyValueTag, .exception
ret
.exception:
preserveReturnAddressAfterCall(t1) # This is really only needed on X86
diff --git a/Source/JavaScriptCore/llint/LowLevelInterpreter64.asm b/Source/JavaScriptCore/llint/LowLevelInterpreter64.asm
index ed6799ef3..741963573 100644
--- a/Source/JavaScriptCore/llint/LowLevelInterpreter64.asm
+++ b/Source/JavaScriptCore/llint/LowLevelInterpreter64.asm
@@ -126,6 +126,16 @@ macro callCallSlowPath(advance, slowPath, action)
action(t0)
end
+macro callWatchdogTimerHandler(throwHandler)
+ storei PC, ArgumentCount + TagOffset[cfr]
+ prepareStateForCCall()
+ cCall2(_llint_slow_path_handle_watchdog_timer, cfr, PC)
+ move t1, cfr
+ btpnz t0, throwHandler
+ move t3, PB
+ loadi ArgumentCount + TagOffset[cfr], PC
+end
+
macro checkSwitchToJITForLoop()
checkSwitchToJIT(
1,
@@ -186,9 +196,9 @@ macro functionArityCheck(doneLabel, slow_path)
cCall2(slow_path, cfr, PC) # This slow_path has a simple protocol: t0 = 0 => no error, t0 != 0 => error
move t1, cfr
btiz t0, .continue
- loadp JITStackFrame::globalData[sp], t1
- loadp JSGlobalData::callFrameForThrow[t1], t0
- jmp JSGlobalData::targetMachinePCForThrow[t1]
+ loadp JITStackFrame::vm[sp], t1
+ loadp VM::callFrameForThrow[t1], t0
+ jmp VM::targetMachinePCForThrow[t1]
.continue:
# Reload CodeBlock and reset PC, since the slow_path clobbered them.
loadp CodeBlock[cfr], t1
@@ -243,16 +253,17 @@ _llint_op_create_this:
traceExecution()
loadisFromInstruction(2, t0)
loadp [cfr, t0, 8], t0
- loadp JSFunction::m_cachedInheritorID[t0], t2
- btpz t2, .opCreateThisSlow
- allocateBasicJSObject(JSFinalObjectSizeClassIndex, t2, t0, t1, t3, .opCreateThisSlow)
+ loadp JSFunction::m_allocationProfile + ObjectAllocationProfile::m_allocator[t0], t1
+ loadp JSFunction::m_allocationProfile + ObjectAllocationProfile::m_structure[t0], t2
+ btpz t1, .opCreateThisSlow
+ allocateJSObject(t1, t2, t0, t3, .opCreateThisSlow)
loadisFromInstruction(1, t1)
storeq t0, [cfr, t1, 8]
- dispatch(3)
+ dispatch(4)
.opCreateThisSlow:
callSlowPath(_llint_slow_path_create_this)
- dispatch(3)
+ dispatch(4)
_llint_op_get_callee:
@@ -283,17 +294,17 @@ _llint_op_convert_this:
_llint_op_new_object:
traceExecution()
- loadp CodeBlock[cfr], t0
- loadp CodeBlock::m_globalObject[t0], t0
- loadp JSGlobalObject::m_emptyObjectStructure[t0], t1
- allocateBasicJSObject(JSFinalObjectSizeClassIndex, t1, t0, t2, t3, .opNewObjectSlow)
+ loadpFromInstruction(3, t0)
+ loadp ObjectAllocationProfile::m_allocator[t0], t1
+ loadp ObjectAllocationProfile::m_structure[t0], t2
+ allocateJSObject(t1, t2, t0, t3, .opNewObjectSlow)
loadisFromInstruction(1, t1)
storeq t0, [cfr, t1, 8]
- dispatch(2)
+ dispatch(4)
.opNewObjectSlow:
callSlowPath(_llint_slow_path_new_object)
- dispatch(2)
+ dispatch(4)
_llint_op_mov:
@@ -440,63 +451,31 @@ macro preOp(arithmeticOperation, slowPath)
dispatch(2)
end
-_llint_op_pre_inc:
+_llint_op_inc:
preOp(
macro (value, slow) baddio 1, value, slow end,
_llint_slow_path_pre_inc)
-_llint_op_pre_dec:
+_llint_op_dec:
preOp(
macro (value, slow) bsubio 1, value, slow end,
_llint_slow_path_pre_dec)
-macro postOp(arithmeticOperation, slowPath)
- traceExecution()
- loadisFromInstruction(2, t0)
- loadisFromInstruction(1, t1)
- loadq [cfr, t0, 8], t2
- bieq t0, t1, .done
- bqb t2, tagTypeNumber, .slow
- move t2, t3
- arithmeticOperation(t3, .slow)
- orq tagTypeNumber, t3
- storeq t2, [cfr, t1, 8]
- storeq t3, [cfr, t0, 8]
-.done:
- dispatch(3)
-
-.slow:
- callSlowPath(slowPath)
- dispatch(3)
-end
-
-_llint_op_post_inc:
- postOp(
- macro (value, slow) baddio 1, value, slow end,
- _llint_slow_path_post_inc)
-
-
-_llint_op_post_dec:
- postOp(
- macro (value, slow) bsubio 1, value, slow end,
- _llint_slow_path_post_dec)
-
-
-_llint_op_to_jsnumber:
+_llint_op_to_number:
traceExecution()
loadisFromInstruction(2, t0)
loadisFromInstruction(1, t1)
loadConstantOrVariable(t0, t2)
- bqaeq t2, tagTypeNumber, .opToJsnumberIsImmediate
- btqz t2, tagTypeNumber, .opToJsnumberSlow
-.opToJsnumberIsImmediate:
+ bqaeq t2, tagTypeNumber, .opToNumberIsImmediate
+ btqz t2, tagTypeNumber, .opToNumberSlow
+.opToNumberIsImmediate:
storeq t2, [cfr, t1, 8]
dispatch(3)
-.opToJsnumberSlow:
- callSlowPath(_llint_slow_path_to_jsnumber)
+.opToNumberSlow:
+ callSlowPath(_llint_slow_path_to_number)
dispatch(3)
@@ -907,7 +886,7 @@ _llint_op_get_array_length:
loadisFromInstruction(1, t1)
loadpFromInstruction(8, t2)
loadp JSObject::m_butterfly[t3], t0
- loadi -sizeof IndexingHeader + IndexingHeader::m_publicLength[t0], t0
+ loadi -sizeof IndexingHeader + IndexingHeader::u.lengths.publicLength[t0], t0
bilt t0, 0, .opGetArrayLengthSlow
orq tagTypeNumber, t0
valueProfile(t0, t2)
@@ -1040,18 +1019,18 @@ _llint_op_get_by_val:
bineq t2, ContiguousShape, .opGetByValNotContiguous
.opGetByValIsContiguous:
- biaeq t1, -sizeof IndexingHeader + IndexingHeader::m_publicLength[t3], .opGetByValSlow
+ biaeq t1, -sizeof IndexingHeader + IndexingHeader::u.lengths.publicLength[t3], .opGetByValOutOfBounds
loadisFromInstruction(1, t0)
loadq [t3, t1, 8], t2
- btqz t2, .opGetByValSlow
+ btqz t2, .opGetByValOutOfBounds
jmp .opGetByValDone
.opGetByValNotContiguous:
bineq t2, DoubleShape, .opGetByValNotDouble
- biaeq t1, -sizeof IndexingHeader + IndexingHeader::m_publicLength[t3], .opGetByValSlow
+ biaeq t1, -sizeof IndexingHeader + IndexingHeader::u.lengths.publicLength[t3], .opGetByValOutOfBounds
loadis 8[PB, PC, 8], t0
loadd [t3, t1, 8], ft0
- bdnequn ft0, ft0, .opGetByValSlow
+ bdnequn ft0, ft0, .opGetByValOutOfBounds
fd2q ft0, t2
subq tagTypeNumber, t2
jmp .opGetByValDone
@@ -1059,10 +1038,10 @@ _llint_op_get_by_val:
.opGetByValNotDouble:
subi ArrayStorageShape, t2
bia t2, SlowPutArrayStorageShape - ArrayStorageShape, .opGetByValSlow
- biaeq t1, -sizeof IndexingHeader + IndexingHeader::m_vectorLength[t3], .opGetByValSlow
+ biaeq t1, -sizeof IndexingHeader + IndexingHeader::u.lengths.vectorLength[t3], .opGetByValOutOfBounds
loadisFromInstruction(1, t0)
loadq ArrayStorage::m_vector[t3, t1, 8], t2
- btqz t2, .opGetByValSlow
+ btqz t2, .opGetByValOutOfBounds
.opGetByValDone:
storeq t2, [cfr, t0, 8]
@@ -1070,6 +1049,11 @@ _llint_op_get_by_val:
valueProfile(t2, t0)
dispatch(6)
+.opGetByValOutOfBounds:
+ if VALUE_PROFILER
+ loadpFromInstruction(4, t0)
+ storeb 1, ArrayProfile::m_outOfBounds[t0]
+ end
.opGetByValSlow:
callSlowPath(_llint_slow_path_get_by_val)
dispatch(6)
@@ -1089,7 +1073,7 @@ _llint_op_get_argument_by_val:
negi t2
sxi2q t2, t2
loadisFromInstruction(1, t3)
- loadpFromInstruction(4, t1)
+ loadpFromInstruction(5, t1)
loadq ThisArgumentOffset[cfr, t2, 8], t0
storeq t0, [cfr, t3, 8]
valueProfile(t0, t1)
@@ -1133,20 +1117,20 @@ _llint_op_get_by_pname:
macro contiguousPutByVal(storeCallback)
- biaeq t3, -sizeof IndexingHeader + IndexingHeader::m_publicLength[t0], .outOfBounds
+ biaeq t3, -sizeof IndexingHeader + IndexingHeader::u.lengths.publicLength[t0], .outOfBounds
.storeResult:
loadisFromInstruction(3, t2)
storeCallback(t2, t1, [t0, t3, 8])
dispatch(5)
.outOfBounds:
- biaeq t3, -sizeof IndexingHeader + IndexingHeader::m_vectorLength[t0], .opPutByValSlow
+ biaeq t3, -sizeof IndexingHeader + IndexingHeader::u.lengths.vectorLength[t0], .opPutByValOutOfBounds
if VALUE_PROFILER
loadp 32[PB, PC, 8], t2
storeb 1, ArrayProfile::m_mayStoreToHole[t2]
end
addi 1, t3, t2
- storei t2, -sizeof IndexingHeader + IndexingHeader::m_publicLength[t0]
+ storei t2, -sizeof IndexingHeader + IndexingHeader::u.lengths.publicLength[t0]
jmp .storeResult
end
@@ -1197,7 +1181,7 @@ _llint_op_put_by_val:
.opPutByValNotContiguous:
bineq t2, ArrayStorageShape, .opPutByValSlow
- biaeq t3, -sizeof IndexingHeader + IndexingHeader::m_vectorLength[t0], .opPutByValSlow
+ biaeq t3, -sizeof IndexingHeader + IndexingHeader::u.lengths.vectorLength[t0], .opPutByValOutOfBounds
btqz ArrayStorage::m_vector[t0, t3, 8], .opPutByValArrayStorageEmpty
.opPutByValArrayStorageStoreResult:
loadisFromInstruction(3, t2)
@@ -1212,21 +1196,21 @@ _llint_op_put_by_val:
storeb 1, ArrayProfile::m_mayStoreToHole[t1]
end
addi 1, ArrayStorage::m_numValuesInVector[t0]
- bib t3, -sizeof IndexingHeader + IndexingHeader::m_publicLength[t0], .opPutByValArrayStorageStoreResult
+ bib t3, -sizeof IndexingHeader + IndexingHeader::u.lengths.publicLength[t0], .opPutByValArrayStorageStoreResult
addi 1, t3, t1
- storei t1, -sizeof IndexingHeader + IndexingHeader::m_publicLength[t0]
+ storei t1, -sizeof IndexingHeader + IndexingHeader::u.lengths.publicLength[t0]
jmp .opPutByValArrayStorageStoreResult
+.opPutByValOutOfBounds:
+ if VALUE_PROFILER
+ loadpFromInstruction(4, t0)
+ storeb 1, ArrayProfile::m_outOfBounds[t0]
+ end
.opPutByValSlow:
callSlowPath(_llint_slow_path_put_by_val)
dispatch(5)
-_llint_op_loop:
- traceExecution()
- dispatchIntIndirect(1)
-
-
_llint_op_jmp:
traceExecution()
dispatchIntIndirect(1)
@@ -1581,16 +1565,16 @@ _llint_op_catch:
# the interpreter's throw trampoline (see _llint_throw_trampoline).
# The JIT throwing protocol calls for the cfr to be in t0. The throwing
# code must have known that we were throwing to the interpreter, and have
- # set JSGlobalData::targetInterpreterPCForThrow.
+ # set VM::targetInterpreterPCForThrow.
move t0, cfr
loadp CodeBlock[cfr], PB
loadp CodeBlock::m_instructions[PB], PB
- loadp JITStackFrame::globalData[sp], t3
- loadp JSGlobalData::targetInterpreterPCForThrow[t3], PC
+ loadp JITStackFrame::vm[sp], t3
+ loadp VM::targetInterpreterPCForThrow[t3], PC
subp PB, PC
rshiftp 3, PC
- loadq JSGlobalData::exception[t3], t0
- storeq 0, JSGlobalData::exception[t3]
+ loadq VM::exception[t3], t0
+ storeq 0, VM::exception[t3]
loadisFromInstruction(1, t2)
storeq t0, [cfr, t2, 8]
traceExecution()
@@ -1610,23 +1594,84 @@ _llint_throw_from_slow_path_trampoline:
# When throwing from the interpreter (i.e. throwing from LLIntSlowPaths), so
# the throw target is not necessarily interpreted code, we come to here.
# This essentially emulates the JIT's throwing protocol.
- loadp JITStackFrame::globalData[sp], t1
- loadp JSGlobalData::callFrameForThrow[t1], t0
- jmp JSGlobalData::targetMachinePCForThrow[t1]
+ loadp JITStackFrame::vm[sp], t1
+ loadp VM::callFrameForThrow[t1], t0
+ jmp VM::targetMachinePCForThrow[t1]
_llint_throw_during_call_trampoline:
preserveReturnAddressAfterCall(t2)
- loadp JITStackFrame::globalData[sp], t1
- loadp JSGlobalData::callFrameForThrow[t1], t0
- jmp JSGlobalData::targetMachinePCForThrow[t1]
+ loadp JITStackFrame::vm[sp], t1
+ loadp VM::callFrameForThrow[t1], t0
+ jmp VM::targetMachinePCForThrow[t1]
+
+# Gives you the scope in t0, while allowing you to optionally perform additional checks on the
+# scopes as they are traversed. scopeCheck() is called with two arguments: the register
+# holding the scope, and a register that can be used for scratch. Note that this does not
+# use t3, so you can hold stuff in t3 if need be.
+macro getDeBruijnScope(deBruijinIndexOperand, scopeCheck)
+ loadp ScopeChain[cfr], t0
+ loadis deBruijinIndexOperand, t2
+
+ btiz t2, .done
+ loadp CodeBlock[cfr], t1
+ bineq CodeBlock::m_codeType[t1], FunctionCode, .loop
+ btbz CodeBlock::m_needsActivation[t1], .loop
+
+ loadis CodeBlock::m_activationRegister[t1], t1
+
+ # Need to conditionally skip over one scope.
+ btpz [cfr, t1, 8], .noActivation
+ scopeCheck(t0, t1)
+ loadp JSScope::m_next[t0], t0
+.noActivation:
+ subi 1, t2
+
+ btiz t2, .done
+.loop:
+ scopeCheck(t0, t1)
+ loadp JSScope::m_next[t0], t0
+ subi 1, t2
+ btinz t2, .loop
+
+.done:
+end
+
+_llint_op_get_scoped_var:
+ traceExecution()
+ # Operands are as follows:
+ # pc[1]: Destination for the load
+ # pc[2]: Index of register in the scope
+ # 24[PB, PC, 8] De Bruijin index.
+ getDeBruijnScope(24[PB, PC, 8], macro (scope, scratch) end)
+ loadisFromInstruction(1, t1)
+ loadisFromInstruction(2, t2)
+
+ loadp JSVariableObject::m_registers[t0], t0
+ loadp [t0, t2, 8], t3
+ storep t3, [cfr, t1, 8]
+ loadp 32[PB, PC, 8], t1
+ valueProfile(t3, t1)
+ dispatch(5)
+
+
+_llint_op_put_scoped_var:
+ traceExecution()
+ getDeBruijnScope(16[PB, PC, 8], macro (scope, scratch) end)
+ loadis 24[PB, PC, 8], t1
+ loadConstantOrVariable(t1, t3)
+ loadis 8[PB, PC, 8], t1
+ writeBarrier(t3)
+ loadp JSVariableObject::m_registers[t0], t0
+ storep t3, [t0, t1, 8]
+ dispatch(4)
macro nativeCallTrampoline(executableOffsetToFunction)
storep 0, CodeBlock[cfr]
if X86_64
- loadp JITStackFrame::globalData + 8[sp], t0
- storep cfr, JSGlobalData::topCallFrame[t0]
+ loadp JITStackFrame::vm + 8[sp], t0
+ storep cfr, VM::topCallFrame[t0]
loadp CallerFrame[cfr], t0
loadq ScopeChain[t0], t1
storeq t1, ScopeChain[cfr]
@@ -1639,15 +1684,15 @@ macro nativeCallTrampoline(executableOffsetToFunction)
move t0, cfr # Restore cfr to avoid loading from stack
call executableOffsetToFunction[t1]
addp 16 - 8, sp
- loadp JITStackFrame::globalData + 8[sp], t3
+ loadp JITStackFrame::vm + 8[sp], t3
elsif C_LOOP
loadp CallerFrame[cfr], t0
loadp ScopeChain[t0], t1
storep t1, ScopeChain[cfr]
- loadp JITStackFrame::globalData[sp], t3
- storep cfr, JSGlobalData::topCallFrame[t3]
+ loadp JITStackFrame::vm[sp], t3
+ storep cfr, VM::topCallFrame[t3]
move t0, t2
preserveReturnAddressAfterCall(t3)
@@ -1659,20 +1704,20 @@ macro nativeCallTrampoline(executableOffsetToFunction)
cloopCallNative executableOffsetToFunction[t1]
restoreReturnAddressBeforeReturn(t3)
- loadp JITStackFrame::globalData[sp], t3
+ loadp JITStackFrame::vm[sp], t3
else
error
end
- btqnz JSGlobalData::exception[t3], .exception
+ btqnz VM::exception[t3], .exception
ret
.exception:
preserveReturnAddressAfterCall(t1)
loadi ArgumentCount + TagOffset[cfr], PC
loadp CodeBlock[cfr], PB
loadp CodeBlock::m_instructions[PB], PB
- loadp JITStackFrame::globalData[sp], t0
- storep cfr, JSGlobalData::topCallFrame[t0]
+ loadp JITStackFrame::vm[sp], t0
+ storep cfr, VM::topCallFrame[t0]
callSlowPath(_llint_throw_from_native_call)
jmp _llint_throw_from_slow_path_trampoline
end