summaryrefslogtreecommitdiff
path: root/Source/JavaScriptCore/jit
diff options
context:
space:
mode:
authorSimon Hausmann <simon.hausmann@digia.com>2012-11-22 09:09:45 +0100
committerSimon Hausmann <simon.hausmann@digia.com>2012-11-22 09:10:13 +0100
commit470286ecfe79d59df14944e5b5d34630fc739391 (patch)
tree43983212872e06cebefd2ae474418fa2908ca54c /Source/JavaScriptCore/jit
parent23037105e948c2065da5a937d3a2396b0ff45c1e (diff)
downloadqtwebkit-470286ecfe79d59df14944e5b5d34630fc739391.tar.gz
Imported WebKit commit e89504fa9195b2063b2530961d4b73dd08de3242 (http://svn.webkit.org/repository/webkit/trunk@135485)
Change-Id: I03774e5ac79721c13ffa30d152537a74d0b12e66 Reviewed-by: Simon Hausmann <simon.hausmann@digia.com>
Diffstat (limited to 'Source/JavaScriptCore/jit')
-rw-r--r--Source/JavaScriptCore/jit/ClosureCallStubRoutine.cpp62
-rw-r--r--Source/JavaScriptCore/jit/ClosureCallStubRoutine.h66
-rw-r--r--Source/JavaScriptCore/jit/GCAwareJITStubRoutine.cpp4
-rw-r--r--Source/JavaScriptCore/jit/GCAwareJITStubRoutine.h5
-rw-r--r--Source/JavaScriptCore/jit/HostCallReturnValue.cpp2
-rw-r--r--Source/JavaScriptCore/jit/JIT.cpp37
-rw-r--r--Source/JavaScriptCore/jit/JIT.h21
-rw-r--r--Source/JavaScriptCore/jit/JITArithmetic.cpp12
-rw-r--r--Source/JavaScriptCore/jit/JITArithmetic32_64.cpp2
-rw-r--r--Source/JavaScriptCore/jit/JITCall.cpp2
-rw-r--r--Source/JavaScriptCore/jit/JITCall32_64.cpp2
-rw-r--r--Source/JavaScriptCore/jit/JITDisassembler.cpp101
-rw-r--r--Source/JavaScriptCore/jit/JITDisassembler.h78
-rw-r--r--Source/JavaScriptCore/jit/JITExceptions.cpp2
-rw-r--r--Source/JavaScriptCore/jit/JITInlines.h (renamed from Source/JavaScriptCore/jit/JITInlineMethods.h)26
-rw-r--r--Source/JavaScriptCore/jit/JITOpcodes.cpp18
-rw-r--r--Source/JavaScriptCore/jit/JITOpcodes32_64.cpp14
-rw-r--r--Source/JavaScriptCore/jit/JITPropertyAccess.cpp90
-rw-r--r--Source/JavaScriptCore/jit/JITPropertyAccess32_64.cpp73
-rw-r--r--Source/JavaScriptCore/jit/JITStubRoutine.h8
-rw-r--r--Source/JavaScriptCore/jit/JITStubs.cpp39
-rw-r--r--Source/JavaScriptCore/jit/JITStubs.h2
-rw-r--r--Source/JavaScriptCore/jit/JumpReplacementWatchpoint.cpp2
23 files changed, 596 insertions, 72 deletions
diff --git a/Source/JavaScriptCore/jit/ClosureCallStubRoutine.cpp b/Source/JavaScriptCore/jit/ClosureCallStubRoutine.cpp
new file mode 100644
index 000000000..73704aa03
--- /dev/null
+++ b/Source/JavaScriptCore/jit/ClosureCallStubRoutine.cpp
@@ -0,0 +1,62 @@
+/*
+ * Copyright (C) 2012 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "ClosureCallStubRoutine.h"
+
+#if ENABLE(JIT)
+
+#include "Executable.h"
+#include "Heap.h"
+#include "JSGlobalData.h"
+#include "SlotVisitor.h"
+#include "Structure.h"
+
+namespace JSC {
+
+ClosureCallStubRoutine::ClosureCallStubRoutine(
+ const MacroAssemblerCodeRef& code, JSGlobalData& globalData, const JSCell* owner,
+ Structure* structure, ExecutableBase* executable, const CodeOrigin& codeOrigin)
+ : GCAwareJITStubRoutine(code, globalData, true)
+ , m_structure(globalData, owner, structure)
+ , m_executable(globalData, owner, executable)
+ , m_codeOrigin(codeOrigin)
+{
+}
+
+ClosureCallStubRoutine::~ClosureCallStubRoutine()
+{
+}
+
+void ClosureCallStubRoutine::markRequiredObjectsInternal(SlotVisitor& visitor)
+{
+ visitor.append(&m_structure);
+ visitor.append(&m_executable);
+}
+
+} // namespace JSC
+
+#endif // ENABLE(JIT)
+
diff --git a/Source/JavaScriptCore/jit/ClosureCallStubRoutine.h b/Source/JavaScriptCore/jit/ClosureCallStubRoutine.h
new file mode 100644
index 000000000..3fd020691
--- /dev/null
+++ b/Source/JavaScriptCore/jit/ClosureCallStubRoutine.h
@@ -0,0 +1,66 @@
+/*
+ * Copyright (C) 2012 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef ClosureCallStubRoutine_h
+#define ClosureCallStubRoutine_h
+
+#include <wtf/Platform.h>
+
+#if ENABLE(JIT)
+
+#include "CodeOrigin.h"
+#include "GCAwareJITStubRoutine.h"
+
+namespace JSC {
+
+class ClosureCallStubRoutine : public GCAwareJITStubRoutine {
+public:
+ ClosureCallStubRoutine(
+ const MacroAssemblerCodeRef&, JSGlobalData&, const JSCell* owner,
+ Structure*, ExecutableBase*, const CodeOrigin&);
+
+ virtual ~ClosureCallStubRoutine();
+
+ Structure* structure() const { return m_structure.get(); }
+ ExecutableBase* executable() const { return m_executable.get(); }
+ const CodeOrigin& codeOrigin() const { return m_codeOrigin; }
+
+protected:
+ virtual void markRequiredObjectsInternal(SlotVisitor&);
+
+private:
+ WriteBarrier<Structure> m_structure;
+ WriteBarrier<ExecutableBase> m_executable;
+ // This allows us to figure out who a call is linked to by searching through
+ // stub routines.
+ CodeOrigin m_codeOrigin;
+};
+
+} // namespace JSC
+
+#endif // ENABLE(JIT)
+
+#endif // ClosureCallStubRoutine_h
+
diff --git a/Source/JavaScriptCore/jit/GCAwareJITStubRoutine.cpp b/Source/JavaScriptCore/jit/GCAwareJITStubRoutine.cpp
index 0f0eb14b7..521e49751 100644
--- a/Source/JavaScriptCore/jit/GCAwareJITStubRoutine.cpp
+++ b/Source/JavaScriptCore/jit/GCAwareJITStubRoutine.cpp
@@ -30,17 +30,17 @@
#include "Heap.h"
#include "JSGlobalData.h"
-
#include "SlotVisitor.h"
#include "Structure.h"
namespace JSC {
GCAwareJITStubRoutine::GCAwareJITStubRoutine(
- const MacroAssemblerCodeRef& code, JSGlobalData& globalData)
+ const MacroAssemblerCodeRef& code, JSGlobalData& globalData, bool isClosureCall)
: JITStubRoutine(code)
, m_mayBeExecuting(false)
, m_isJettisoned(false)
+ , m_isClosureCall(isClosureCall)
{
globalData.heap.m_jitStubRoutines.add(this);
}
diff --git a/Source/JavaScriptCore/jit/GCAwareJITStubRoutine.h b/Source/JavaScriptCore/jit/GCAwareJITStubRoutine.h
index 59bc76beb..e5ce281e8 100644
--- a/Source/JavaScriptCore/jit/GCAwareJITStubRoutine.h
+++ b/Source/JavaScriptCore/jit/GCAwareJITStubRoutine.h
@@ -54,7 +54,7 @@ class JITStubRoutineSet;
// list which does not get reclaimed all at once).
class GCAwareJITStubRoutine : public JITStubRoutine {
public:
- GCAwareJITStubRoutine(const MacroAssemblerCodeRef&, JSGlobalData&);
+ GCAwareJITStubRoutine(const MacroAssemblerCodeRef&, JSGlobalData&, bool isClosureCall = false);
virtual ~GCAwareJITStubRoutine();
void markRequiredObjects(SlotVisitor& visitor)
@@ -64,6 +64,8 @@ public:
void deleteFromGC();
+ bool isClosureCall() const { return m_isClosureCall; }
+
protected:
virtual void observeZeroRefCount();
@@ -74,6 +76,7 @@ private:
bool m_mayBeExecuting;
bool m_isJettisoned;
+ bool m_isClosureCall;
};
// Use this if you want to mark one additional object during GC if your stub
diff --git a/Source/JavaScriptCore/jit/HostCallReturnValue.cpp b/Source/JavaScriptCore/jit/HostCallReturnValue.cpp
index c4d2e6ad9..967c499b9 100644
--- a/Source/JavaScriptCore/jit/HostCallReturnValue.cpp
+++ b/Source/JavaScriptCore/jit/HostCallReturnValue.cpp
@@ -29,7 +29,7 @@
#include "CallFrame.h"
#include <wtf/InlineASM.h>
#include "JSObject.h"
-#include "JSValueInlineMethods.h"
+#include "JSValueInlines.h"
namespace JSC {
diff --git a/Source/JavaScriptCore/jit/JIT.cpp b/Source/JavaScriptCore/jit/JIT.cpp
index 3102c7693..cccf33bf6 100644
--- a/Source/JavaScriptCore/jit/JIT.cpp
+++ b/Source/JavaScriptCore/jit/JIT.cpp
@@ -38,7 +38,7 @@ JSC::MacroAssemblerX86Common::SSE2CheckState JSC::MacroAssemblerX86Common::s_sse
#include <wtf/CryptographicallyRandomNumber.h>
#include "DFGNode.h" // for DFG_SUCCESS_STATS
#include "Interpreter.h"
-#include "JITInlineMethods.h"
+#include "JITInlines.h"
#include "JITStubCall.h"
#include "JSArray.h"
#include "JSFunction.h"
@@ -212,6 +212,8 @@ void JIT::privateCompileMainPass()
m_callLinkInfoIndex = 0;
for (m_bytecodeOffset = 0; m_bytecodeOffset < instructionCount; ) {
+ if (m_disassembler)
+ m_disassembler->setForBytecodeMainPath(m_bytecodeOffset, label());
Instruction* currentInstruction = instructionsBegin + m_bytecodeOffset;
ASSERT_WITH_MESSAGE(m_interpreter->isOpcode(currentInstruction->u.opcode), "privateCompileMainPass gone bad @ %d", m_bytecodeOffset);
@@ -228,7 +230,7 @@ void JIT::privateCompileMainPass()
m_labels[m_bytecodeOffset] = label();
#if ENABLE(JIT_VERBOSE)
- dataLog("Old JIT emitting code for bc#%u at offset 0x%lx.\n", m_bytecodeOffset, (long)debugOffset());
+ dataLogF("Old JIT emitting code for bc#%u at offset 0x%lx.\n", m_bytecodeOffset, (long)debugOffset());
#endif
switch (m_interpreter->getOpcodeID(currentInstruction->u.opcode)) {
@@ -251,6 +253,7 @@ void JIT::privateCompileMainPass()
DEFINE_OP(op_call_varargs)
DEFINE_OP(op_catch)
DEFINE_OP(op_construct)
+ DEFINE_OP(op_get_callee)
DEFINE_OP(op_create_this)
DEFINE_OP(op_convert_this)
DEFINE_OP(op_init_lazy_reg)
@@ -448,8 +451,11 @@ void JIT::privateCompileSlowCases()
#endif
#if ENABLE(JIT_VERBOSE)
- dataLog("Old JIT emitting slow code for bc#%u at offset 0x%lx.\n", m_bytecodeOffset, (long)debugOffset());
+ dataLogF("Old JIT emitting slow code for bc#%u at offset 0x%lx.\n", m_bytecodeOffset, (long)debugOffset());
#endif
+
+ if (m_disassembler)
+ m_disassembler->setForBytecodeSlowPath(m_bytecodeOffset, label());
switch (m_interpreter->getOpcodeID(currentInstruction->u.opcode)) {
DEFINE_SLOWCASE_OP(op_add)
@@ -624,6 +630,12 @@ JITCode JIT::privateCompile(CodePtr* functionEntryArityCheck, JITCompilationEffo
break;
}
#endif
+
+ if (Options::showDisassembly())
+ m_disassembler = adoptPtr(new JITDisassembler(m_codeBlock));
+
+ if (m_disassembler)
+ m_disassembler->setStartOfCode(label());
// Just add a little bit of randomness to the codegen
if (m_randomGenerator.getUint32() & 1)
@@ -683,6 +695,9 @@ JITCode JIT::privateCompile(CodePtr* functionEntryArityCheck, JITCompilationEffo
privateCompileMainPass();
privateCompileLinkPass();
privateCompileSlowCases();
+
+ if (m_disassembler)
+ m_disassembler->setEndOfSlowPath(label());
Label arityCheck;
if (m_codeBlock->codeType() == FunctionCode) {
@@ -712,6 +727,9 @@ JITCode JIT::privateCompile(CodePtr* functionEntryArityCheck, JITCompilationEffo
}
ASSERT(m_jmpTable.isEmpty());
+
+ if (m_disassembler)
+ m_disassembler->setEndOfCode(label());
LinkBuffer patchBuffer(*m_globalData, this, m_codeBlock, effort);
if (patchBuffer.didFailToAllocate())
@@ -780,10 +798,11 @@ JITCode JIT::privateCompile(CodePtr* functionEntryArityCheck, JITCompilationEffo
for (unsigned i = 0; i < m_codeBlock->numberOfCallLinkInfos(); ++i) {
CallLinkInfo& info = m_codeBlock->callLinkInfo(i);
info.callType = m_callStructureStubCompilationInfo[i].callType;
- info.bytecodeIndex = m_callStructureStubCompilationInfo[i].bytecodeIndex;
+ info.codeOrigin = CodeOrigin(m_callStructureStubCompilationInfo[i].bytecodeIndex);
info.callReturnLocation = patchBuffer.locationOfNearCall(m_callStructureStubCompilationInfo[i].callReturnLocation);
info.hotPathBegin = patchBuffer.locationOf(m_callStructureStubCompilationInfo[i].hotPathBegin);
info.hotPathOther = patchBuffer.locationOfNearCall(m_callStructureStubCompilationInfo[i].hotPathOther);
+ info.calleeGPR = regT0;
}
#if ENABLE(DFG_JIT) || ENABLE(LLINT)
@@ -803,11 +822,11 @@ JITCode JIT::privateCompile(CodePtr* functionEntryArityCheck, JITCompilationEffo
if (m_codeBlock->codeType() == FunctionCode && functionEntryArityCheck)
*functionEntryArityCheck = patchBuffer.locationOf(arityCheck);
+
+ if (m_disassembler)
+ m_disassembler->dump(patchBuffer);
- CodeRef result = FINALIZE_CODE(
- patchBuffer,
- ("Baseline JIT code for CodeBlock %p, instruction count = %u",
- m_codeBlock, m_codeBlock->instructionCount()));
+ CodeRef result = patchBuffer.finalizeCodeWithoutDisassembly();
m_globalData->machineCodeBytesPerBytecodeWordForBaselineJIT.add(
static_cast<double>(result.size()) /
@@ -816,7 +835,7 @@ JITCode JIT::privateCompile(CodePtr* functionEntryArityCheck, JITCompilationEffo
m_codeBlock->shrinkToFit(CodeBlock::LateShrink);
#if ENABLE(JIT_VERBOSE)
- dataLog("JIT generated code for %p at [%p, %p).\n", m_codeBlock, result.executableMemory()->start(), result.executableMemory()->end());
+ dataLogF("JIT generated code for %p at [%p, %p).\n", m_codeBlock, result.executableMemory()->start(), result.executableMemory()->end());
#endif
return JITCode(result, JITCode::BaselineJIT);
diff --git a/Source/JavaScriptCore/jit/JIT.h b/Source/JavaScriptCore/jit/JIT.h
index dcf87d352..c0d60add1 100644
--- a/Source/JavaScriptCore/jit/JIT.h
+++ b/Source/JavaScriptCore/jit/JIT.h
@@ -46,6 +46,7 @@
#include "CodeBlock.h"
#include "CompactJITCodeMap.h"
#include "Interpreter.h"
+#include "JITDisassembler.h"
#include "JSInterfaceJIT.h"
#include "Opcode.h"
#include "Profiler.h"
@@ -474,7 +475,9 @@ namespace JSC {
// Property is int-checked and zero extended. Base is cell checked.
// Structure is already profiled. Returns the slow cases. Fall-through
// case contains result in regT0, and it is not yet profiled.
- JumpList emitContiguousGetByVal(Instruction*, PatchableJump& badType);
+ JumpList emitInt32GetByVal(Instruction* instruction, PatchableJump& badType) { return emitContiguousGetByVal(instruction, badType, Int32Shape); }
+ JumpList emitDoubleGetByVal(Instruction*, PatchableJump& badType);
+ JumpList emitContiguousGetByVal(Instruction*, PatchableJump& badType, IndexingType expectedShape = ContiguousShape);
JumpList emitArrayStorageGetByVal(Instruction*, PatchableJump& badType);
JumpList emitIntTypedArrayGetByVal(Instruction*, PatchableJump& badType, const TypedArrayDescriptor&, size_t elementSize, TypedArraySignedness);
JumpList emitFloatTypedArrayGetByVal(Instruction*, PatchableJump& badType, const TypedArrayDescriptor&, size_t elementSize);
@@ -483,7 +486,19 @@ namespace JSC {
// The value to store is not yet loaded. Property is int-checked and
// zero-extended. Base is cell checked. Structure is already profiled.
// returns the slow cases.
- JumpList emitContiguousPutByVal(Instruction*, PatchableJump& badType);
+ JumpList emitInt32PutByVal(Instruction* currentInstruction, PatchableJump& badType)
+ {
+ return emitGenericContiguousPutByVal(currentInstruction, badType, Int32Shape);
+ }
+ JumpList emitDoublePutByVal(Instruction* currentInstruction, PatchableJump& badType)
+ {
+ return emitGenericContiguousPutByVal(currentInstruction, badType, DoubleShape);
+ }
+ JumpList emitContiguousPutByVal(Instruction* currentInstruction, PatchableJump& badType)
+ {
+ return emitGenericContiguousPutByVal(currentInstruction, badType);
+ }
+ JumpList emitGenericContiguousPutByVal(Instruction*, PatchableJump& badType, IndexingType indexingShape = ContiguousShape);
JumpList emitArrayStoragePutByVal(Instruction*, PatchableJump& badType);
JumpList emitIntTypedArrayPutByVal(Instruction*, PatchableJump& badType, const TypedArrayDescriptor&, size_t elementSize, TypedArraySignedness, TypedArrayRounding);
JumpList emitFloatTypedArrayPutByVal(Instruction*, PatchableJump& badType, const TypedArrayDescriptor&, size_t elementSize);
@@ -632,6 +647,7 @@ namespace JSC {
void emit_op_call_put_result(Instruction*);
void emit_op_catch(Instruction*);
void emit_op_construct(Instruction*);
+ void emit_op_get_callee(Instruction*);
void emit_op_create_this(Instruction*);
void emit_op_convert_this(Instruction*);
void emit_op_create_arguments(Instruction*);
@@ -930,6 +946,7 @@ namespace JSC {
int m_uninterruptedConstantSequenceBegin;
#endif
#endif
+ OwnPtr<JITDisassembler> m_disassembler;
WeakRandom m_randomGenerator;
static CodeRef stringGetByValStubGenerator(JSGlobalData*);
diff --git a/Source/JavaScriptCore/jit/JITArithmetic.cpp b/Source/JavaScriptCore/jit/JITArithmetic.cpp
index 21d59bc33..bcb3dd74a 100644
--- a/Source/JavaScriptCore/jit/JITArithmetic.cpp
+++ b/Source/JavaScriptCore/jit/JITArithmetic.cpp
@@ -29,7 +29,7 @@
#include "JIT.h"
#include "CodeBlock.h"
-#include "JITInlineMethods.h"
+#include "JITInlines.h"
#include "JITStubCall.h"
#include "JITStubs.h"
#include "JSArray.h"
@@ -1090,18 +1090,20 @@ void JIT::emit_op_div(Instruction* currentInstruction)
// access). So if we are DFG compiling anything in the program, we want this code to
// ensure that it produces integers whenever possible.
- // FIXME: This will fail to convert to integer if the result is zero. We should
- // distinguish between positive zero and negative zero here.
-
JumpList notInteger;
branchConvertDoubleToInt32(fpRegT0, regT0, notInteger, fpRegT1);
// If we've got an integer, we might as well make that the result of the division.
emitFastArithReTagImmediate(regT0, regT0);
Jump isInteger = jump();
notInteger.link(this);
- add32(TrustedImm32(1), AbsoluteAddress(&m_codeBlock->addSpecialFastCaseProfile(m_bytecodeOffset)->m_counter));
moveDoubleTo64(fpRegT0, regT0);
+ Jump doubleZero = branchTest64(Zero, regT0);
+ add32(TrustedImm32(1), AbsoluteAddress(&m_codeBlock->addSpecialFastCaseProfile(m_bytecodeOffset)->m_counter));
sub64(tagTypeNumberRegister, regT0);
+ Jump trueDouble = jump();
+ doubleZero.link(this);
+ move(tagTypeNumberRegister, regT0);
+ trueDouble.link(this);
isInteger.link(this);
#else
// Double result.
diff --git a/Source/JavaScriptCore/jit/JITArithmetic32_64.cpp b/Source/JavaScriptCore/jit/JITArithmetic32_64.cpp
index 62a359eeb..960d06091 100644
--- a/Source/JavaScriptCore/jit/JITArithmetic32_64.cpp
+++ b/Source/JavaScriptCore/jit/JITArithmetic32_64.cpp
@@ -30,7 +30,7 @@
#include "JIT.h"
#include "CodeBlock.h"
-#include "JITInlineMethods.h"
+#include "JITInlines.h"
#include "JITStubCall.h"
#include "JITStubs.h"
#include "JSArray.h"
diff --git a/Source/JavaScriptCore/jit/JITCall.cpp b/Source/JavaScriptCore/jit/JITCall.cpp
index 074bf7f97..006c5b741 100644
--- a/Source/JavaScriptCore/jit/JITCall.cpp
+++ b/Source/JavaScriptCore/jit/JITCall.cpp
@@ -31,7 +31,7 @@
#include "Arguments.h"
#include "CodeBlock.h"
-#include "JITInlineMethods.h"
+#include "JITInlines.h"
#include "JITStubCall.h"
#include "JSArray.h"
#include "JSFunction.h"
diff --git a/Source/JavaScriptCore/jit/JITCall32_64.cpp b/Source/JavaScriptCore/jit/JITCall32_64.cpp
index ad827cdf9..ecd5cf126 100644
--- a/Source/JavaScriptCore/jit/JITCall32_64.cpp
+++ b/Source/JavaScriptCore/jit/JITCall32_64.cpp
@@ -32,7 +32,7 @@
#include "Arguments.h"
#include "CodeBlock.h"
#include "Interpreter.h"
-#include "JITInlineMethods.h"
+#include "JITInlines.h"
#include "JITStubCall.h"
#include "JSArray.h"
#include "JSFunction.h"
diff --git a/Source/JavaScriptCore/jit/JITDisassembler.cpp b/Source/JavaScriptCore/jit/JITDisassembler.cpp
new file mode 100644
index 000000000..35b939913
--- /dev/null
+++ b/Source/JavaScriptCore/jit/JITDisassembler.cpp
@@ -0,0 +1,101 @@
+/*
+ * Copyright (C) 2012 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "JITDisassembler.h"
+
+#if ENABLE(JIT)
+
+#include "CodeBlock.h"
+#include "JIT.h"
+
+namespace JSC {
+
+JITDisassembler::JITDisassembler(CodeBlock *codeBlock)
+ : m_codeBlock(codeBlock)
+ , m_labelForBytecodeIndexInMainPath(codeBlock->instructionCount())
+ , m_labelForBytecodeIndexInSlowPath(codeBlock->instructionCount())
+{
+}
+
+JITDisassembler::~JITDisassembler()
+{
+}
+
+void JITDisassembler::dump(LinkBuffer& linkBuffer)
+{
+ dataLogF("Baseline JIT code for CodeBlock %p, instruction count = %u:\n", m_codeBlock, m_codeBlock->instructionCount());
+ dataLogF(" Code at [%p, %p):\n", linkBuffer.debugAddress(), static_cast<char*>(linkBuffer.debugAddress()) + linkBuffer.debugSize());
+ dumpDisassembly(linkBuffer, m_startOfCode, m_labelForBytecodeIndexInMainPath[0]);
+
+ MacroAssembler::Label firstSlowLabel;
+ for (unsigned i = 0; i < m_labelForBytecodeIndexInSlowPath.size(); ++i) {
+ if (m_labelForBytecodeIndexInSlowPath[i].isSet()) {
+ firstSlowLabel = m_labelForBytecodeIndexInSlowPath[i];
+ break;
+ }
+ }
+ dumpForInstructions(linkBuffer, " ", m_labelForBytecodeIndexInMainPath, firstSlowLabel.isSet() ? firstSlowLabel : m_endOfSlowPath);
+ dataLogF(" (End Of Main Path)\n");
+ dumpForInstructions(linkBuffer, " (S) ", m_labelForBytecodeIndexInSlowPath, m_endOfSlowPath);
+ dataLogF(" (End Of Slow Path)\n");
+
+ dumpDisassembly(linkBuffer, m_endOfSlowPath, m_endOfCode);
+}
+
+void JITDisassembler::dumpForInstructions(LinkBuffer& linkBuffer, const char* prefix, Vector<MacroAssembler::Label>& labels, MacroAssembler::Label endLabel)
+{
+ for (unsigned i = 0 ; i < labels.size();) {
+ if (!labels[i].isSet()) {
+ i++;
+ continue;
+ }
+ dataLogF("%s", prefix);
+ m_codeBlock->dump(i);
+ for (unsigned nextIndex = i + 1; ; nextIndex++) {
+ if (nextIndex >= labels.size()) {
+ dumpDisassembly(linkBuffer, labels[i], endLabel);
+ return;
+ }
+ if (labels[nextIndex].isSet()) {
+ dumpDisassembly(linkBuffer, labels[i], labels[nextIndex]);
+ i = nextIndex;
+ break;
+ }
+ }
+ }
+}
+
+void JITDisassembler::dumpDisassembly(LinkBuffer& linkBuffer, MacroAssembler::Label from, MacroAssembler::Label to)
+{
+ CodeLocationLabel fromLocation = linkBuffer.locationOf(from);
+ CodeLocationLabel toLocation = linkBuffer.locationOf(to);
+ disassemble(fromLocation, bitwise_cast<uintptr_t>(toLocation.executableAddress()) - bitwise_cast<uintptr_t>(fromLocation.executableAddress()), " ", WTF::dataFile());
+}
+
+} // namespace JSC
+
+#endif // ENABLE(JIT)
+
diff --git a/Source/JavaScriptCore/jit/JITDisassembler.h b/Source/JavaScriptCore/jit/JITDisassembler.h
new file mode 100644
index 000000000..f8e917d98
--- /dev/null
+++ b/Source/JavaScriptCore/jit/JITDisassembler.h
@@ -0,0 +1,78 @@
+/*
+ * Copyright (C) 2012 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef JITDisassembler_h
+#define JITDisassembler_h
+
+#include <wtf/Platform.h>
+
+#if ENABLE(JIT)
+
+#include "LinkBuffer.h"
+#include "MacroAssembler.h"
+#include <wtf/Vector.h>
+
+namespace JSC {
+
+class CodeBlock;
+
+class JITDisassembler {
+ WTF_MAKE_FAST_ALLOCATED;
+public:
+ JITDisassembler(CodeBlock*);
+ ~JITDisassembler();
+
+ void setStartOfCode(MacroAssembler::Label label) { m_startOfCode = label; }
+ void setForBytecodeMainPath(unsigned bytecodeIndex, MacroAssembler::Label label)
+ {
+ m_labelForBytecodeIndexInMainPath[bytecodeIndex] = label;
+ }
+ void setForBytecodeSlowPath(unsigned bytecodeIndex, MacroAssembler::Label label)
+ {
+ m_labelForBytecodeIndexInSlowPath[bytecodeIndex] = label;
+ }
+ void setEndOfSlowPath(MacroAssembler::Label label) { m_endOfSlowPath = label; }
+ void setEndOfCode(MacroAssembler::Label label) { m_endOfCode = label; }
+
+ void dump(LinkBuffer&);
+
+private:
+ void dumpForInstructions(LinkBuffer&, const char* prefix, Vector<MacroAssembler::Label>& labels, MacroAssembler::Label endLabel);
+ void dumpDisassembly(LinkBuffer&, MacroAssembler::Label from, MacroAssembler::Label to);
+
+ CodeBlock* m_codeBlock;
+ MacroAssembler::Label m_startOfCode;
+ Vector<MacroAssembler::Label> m_labelForBytecodeIndexInMainPath;
+ Vector<MacroAssembler::Label> m_labelForBytecodeIndexInSlowPath;
+ MacroAssembler::Label m_endOfSlowPath;
+ MacroAssembler::Label m_endOfCode;
+};
+
+} // namespace JSC
+
+#endif // ENABLE(JIT)
+
+#endif // JITDisassembler_h
+
diff --git a/Source/JavaScriptCore/jit/JITExceptions.cpp b/Source/JavaScriptCore/jit/JITExceptions.cpp
index f6cec24bd..aeb869474 100644
--- a/Source/JavaScriptCore/jit/JITExceptions.cpp
+++ b/Source/JavaScriptCore/jit/JITExceptions.cpp
@@ -39,7 +39,7 @@ namespace JSC {
ExceptionHandler genericThrow(JSGlobalData* globalData, ExecState* callFrame, JSValue exceptionValue, unsigned vPCIndex)
{
ASSERT(exceptionValue);
-
+
globalData->exception = JSValue();
HandlerInfo* handler = globalData->interpreter->throwException(callFrame, exceptionValue, vPCIndex); // This may update callFrame & exceptionValue!
globalData->exception = exceptionValue;
diff --git a/Source/JavaScriptCore/jit/JITInlineMethods.h b/Source/JavaScriptCore/jit/JITInlines.h
index 410bdf710..e6f95b94c 100644
--- a/Source/JavaScriptCore/jit/JITInlineMethods.h
+++ b/Source/JavaScriptCore/jit/JITInlines.h
@@ -23,8 +23,8 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef JITInlineMethods_h
-#define JITInlineMethods_h
+#ifndef JITInlines_h
+#define JITInlines_h
#if ENABLE(JIT)
@@ -528,12 +528,12 @@ inline void JIT::emitArrayProfileStoreToHoleSpecialCase(ArrayProfile* arrayProfi
#endif
}
-static inline bool arrayProfileSaw(ArrayProfile* profile, IndexingType capability)
+static inline bool arrayProfileSaw(ArrayModes arrayModes, IndexingType capability)
{
#if ENABLE(VALUE_PROFILER)
- return !!(profile->observedArrayModes() & (asArrayModes(NonArray | capability) | asArrayModes(ArrayClass | capability)));
+ return arrayModesInclude(arrayModes, capability);
#else
- UNUSED_PARAM(profile);
+ UNUSED_PARAM(arrayModes);
UNUSED_PARAM(capability);
return false;
#endif
@@ -541,9 +541,20 @@ static inline bool arrayProfileSaw(ArrayProfile* profile, IndexingType capabilit
inline JITArrayMode JIT::chooseArrayMode(ArrayProfile* profile)
{
- if (arrayProfileSaw(profile, ArrayStorageShape))
+#if ENABLE(VALUE_PROFILER)
+ profile->computeUpdatedPrediction(m_codeBlock);
+ ArrayModes arrayModes = profile->observedArrayModes();
+ if (arrayProfileSaw(arrayModes, DoubleShape))
+ return JITDouble;
+ if (arrayProfileSaw(arrayModes, Int32Shape))
+ return JITInt32;
+ if (arrayProfileSaw(arrayModes, ArrayStorageShape))
return JITArrayStorage;
return JITContiguous;
+#else
+ UNUSED_PARAM(profile);
+ return JITContiguous;
+#endif
}
#if USE(JSVALUE32_64)
@@ -998,4 +1009,5 @@ ALWAYS_INLINE void JIT::emitTagAsBoolImmediate(RegisterID reg)
#endif // ENABLE(JIT)
-#endif
+#endif // JITInlines_h
+
diff --git a/Source/JavaScriptCore/jit/JITOpcodes.cpp b/Source/JavaScriptCore/jit/JITOpcodes.cpp
index 4fb9d8cd5..9f0ce3a77 100644
--- a/Source/JavaScriptCore/jit/JITOpcodes.cpp
+++ b/Source/JavaScriptCore/jit/JITOpcodes.cpp
@@ -29,9 +29,9 @@
#include "JIT.h"
#include "Arguments.h"
-#include "CopiedSpaceInlineMethods.h"
+#include "CopiedSpaceInlines.h"
#include "Heap.h"
-#include "JITInlineMethods.h"
+#include "JITInlines.h"
#include "JITStubCall.h"
#include "JSArray.h"
#include "JSCell.h"
@@ -1210,9 +1210,18 @@ void JIT::emit_op_convert_this(Instruction* currentInstruction)
addSlowCase(branchPtr(Equal, Address(regT1, JSCell::structureOffset()), TrustedImmPtr(m_globalData->stringStructure.get())));
}
-void JIT::emit_op_create_this(Instruction* currentInstruction)
+void JIT::emit_op_get_callee(Instruction* currentInstruction)
{
+ unsigned result = currentInstruction[1].u.operand;
emitGetFromCallFrameHeaderPtr(JSStack::Callee, regT0);
+ emitValueProfilingSite();
+ emitPutVirtualRegister(result);
+}
+
+void JIT::emit_op_create_this(Instruction* currentInstruction)
+{
+ int callee = currentInstruction[2].u.operand;
+ emitGetVirtualRegister(callee, regT0);
loadPtr(Address(regT0, JSFunction::offsetOfCachedInheritorID()), regT2);
addSlowCase(branchTestPtr(Zero, regT2));
@@ -1952,6 +1961,7 @@ void JIT::emit_op_new_array(Instruction* currentInstruction)
JITStubCall stubCall(this, cti_op_new_array);
stubCall.addArgument(TrustedImm32(currentInstruction[2].u.operand));
stubCall.addArgument(TrustedImm32(currentInstruction[3].u.operand));
+ stubCall.addArgument(TrustedImmPtr(currentInstruction[4].u.arrayAllocationProfile));
stubCall.call(currentInstruction[1].u.operand);
}
@@ -1963,6 +1973,7 @@ void JIT::emit_op_new_array_with_size(Instruction* currentInstruction)
#else
stubCall.addArgument(currentInstruction[2].u.operand);
#endif
+ stubCall.addArgument(TrustedImmPtr(currentInstruction[3].u.arrayAllocationProfile));
stubCall.call(currentInstruction[1].u.operand);
}
@@ -1971,6 +1982,7 @@ void JIT::emit_op_new_array_buffer(Instruction* currentInstruction)
JITStubCall stubCall(this, cti_op_new_array_buffer);
stubCall.addArgument(TrustedImm32(currentInstruction[2].u.operand));
stubCall.addArgument(TrustedImm32(currentInstruction[3].u.operand));
+ stubCall.addArgument(TrustedImmPtr(currentInstruction[4].u.arrayAllocationProfile));
stubCall.call(currentInstruction[1].u.operand);
}
diff --git a/Source/JavaScriptCore/jit/JITOpcodes32_64.cpp b/Source/JavaScriptCore/jit/JITOpcodes32_64.cpp
index 9c5d260ab..13daf962a 100644
--- a/Source/JavaScriptCore/jit/JITOpcodes32_64.cpp
+++ b/Source/JavaScriptCore/jit/JITOpcodes32_64.cpp
@@ -30,7 +30,7 @@
#if USE(JSVALUE32_64)
#include "JIT.h"
-#include "JITInlineMethods.h"
+#include "JITInlines.h"
#include "JITStubCall.h"
#include "JSArray.h"
#include "JSCell.h"
@@ -1467,9 +1467,19 @@ void JIT::emit_op_init_lazy_reg(Instruction* currentInstruction)
emitStore(dst, JSValue());
}
-void JIT::emit_op_create_this(Instruction* currentInstruction)
+void JIT::emit_op_get_callee(Instruction* currentInstruction)
{
+ int dst = currentInstruction[1].u.operand;
emitGetFromCallFrameHeaderPtr(JSStack::Callee, regT0);
+ move(TrustedImm32(JSValue::CellTag), regT1);
+ emitValueProfilingSite();
+ emitStore(dst, regT1, regT0);
+}
+
+void JIT::emit_op_create_this(Instruction* currentInstruction)
+{
+ int callee = currentInstruction[2].u.operand;
+ emitLoadPayload(callee, regT0);
loadPtr(Address(regT0, JSFunction::offsetOfCachedInheritorID()), regT2);
addSlowCase(branchTestPtr(Zero, regT2));
diff --git a/Source/JavaScriptCore/jit/JITPropertyAccess.cpp b/Source/JavaScriptCore/jit/JITPropertyAccess.cpp
index 6362598f4..57a5685eb 100644
--- a/Source/JavaScriptCore/jit/JITPropertyAccess.cpp
+++ b/Source/JavaScriptCore/jit/JITPropertyAccess.cpp
@@ -32,7 +32,7 @@
#include "GCAwareJITStubRoutine.h"
#include "GetterSetter.h"
#include "Interpreter.h"
-#include "JITInlineMethods.h"
+#include "JITInlines.h"
#include "JITStubCall.h"
#include "JSArray.h"
#include "JSFunction.h"
@@ -98,7 +98,7 @@ void JIT::emit_op_get_by_val(Instruction* currentInstruction)
unsigned base = currentInstruction[2].u.operand;
unsigned property = currentInstruction[3].u.operand;
ArrayProfile* profile = currentInstruction[4].u.arrayProfile;
-
+
emitGetVirtualRegisters(base, regT0, property, regT1);
emitJumpSlowCaseIfNotImmediateInteger(regT1);
@@ -120,6 +120,12 @@ void JIT::emit_op_get_by_val(Instruction* currentInstruction)
JITArrayMode mode = chooseArrayMode(profile);
switch (mode) {
+ case JITInt32:
+ slowCases = emitInt32GetByVal(currentInstruction, badType);
+ break;
+ case JITDouble:
+ slowCases = emitDoubleGetByVal(currentInstruction, badType);
+ break;
case JITContiguous:
slowCases = emitContiguousGetByVal(currentInstruction, badType);
break;
@@ -148,11 +154,26 @@ void JIT::emit_op_get_by_val(Instruction* currentInstruction)
m_byValCompilationInfo.append(ByValCompilationInfo(m_bytecodeOffset, badType, mode, done));
}
-JIT::JumpList JIT::emitContiguousGetByVal(Instruction*, PatchableJump& badType)
+JIT::JumpList JIT::emitDoubleGetByVal(Instruction*, PatchableJump& badType)
{
JumpList slowCases;
- badType = patchableBranch32(NotEqual, regT2, TrustedImm32(ContiguousShape));
+ badType = patchableBranch32(NotEqual, regT2, TrustedImm32(DoubleShape));
+ loadPtr(Address(regT0, JSObject::butterflyOffset()), regT2);
+ slowCases.append(branch32(AboveOrEqual, regT1, Address(regT2, Butterfly::offsetOfPublicLength())));
+ loadDouble(BaseIndex(regT2, regT1, TimesEight), fpRegT0);
+ slowCases.append(branchDouble(DoubleNotEqualOrUnordered, fpRegT0, fpRegT0));
+ moveDoubleTo64(fpRegT0, regT0);
+ sub64(tagTypeNumberRegister, regT0);
+
+ return slowCases;
+}
+
+JIT::JumpList JIT::emitContiguousGetByVal(Instruction*, PatchableJump& badType, IndexingType expectedShape)
+{
+ JumpList slowCases;
+
+ badType = patchableBranch32(NotEqual, regT2, TrustedImm32(expectedShape));
loadPtr(Address(regT0, JSObject::butterflyOffset()), regT2);
slowCases.append(branch32(AboveOrEqual, regT1, Address(regT2, Butterfly::offsetOfPublicLength())));
load64(BaseIndex(regT2, regT1, TimesEight), regT0);
@@ -304,6 +325,12 @@ void JIT::emit_op_put_by_val(Instruction* currentInstruction)
JITArrayMode mode = chooseArrayMode(profile);
switch (mode) {
+ case JITInt32:
+ slowCases = emitInt32PutByVal(currentInstruction, badType);
+ break;
+ case JITDouble:
+ slowCases = emitDoublePutByVal(currentInstruction, badType);
+ break;
case JITContiguous:
slowCases = emitContiguousPutByVal(currentInstruction, badType);
break;
@@ -325,24 +352,48 @@ void JIT::emit_op_put_by_val(Instruction* currentInstruction)
emitWriteBarrier(regT0, regT3, regT1, regT3, ShouldFilterImmediates, WriteBarrierForPropertyAccess);
}
-JIT::JumpList JIT::emitContiguousPutByVal(Instruction* currentInstruction, PatchableJump& badType)
+JIT::JumpList JIT::emitGenericContiguousPutByVal(Instruction* currentInstruction, PatchableJump& badType, IndexingType indexingShape)
{
unsigned value = currentInstruction[3].u.operand;
ArrayProfile* profile = currentInstruction[4].u.arrayProfile;
- badType = patchableBranch32(NotEqual, regT2, TrustedImm32(ContiguousShape));
+ JumpList slowCases;
+
+ badType = patchableBranch32(NotEqual, regT2, TrustedImm32(indexingShape));
loadPtr(Address(regT0, JSObject::butterflyOffset()), regT2);
Jump outOfBounds = branch32(AboveOrEqual, regT1, Address(regT2, Butterfly::offsetOfPublicLength()));
Label storeResult = label();
emitGetVirtualRegister(value, regT3);
- store64(regT3, BaseIndex(regT2, regT1, TimesEight));
+ switch (indexingShape) {
+ case Int32Shape:
+ slowCases.append(emitJumpIfNotImmediateInteger(regT3));
+ store64(regT3, BaseIndex(regT2, regT1, TimesEight));
+ break;
+ case DoubleShape: {
+ Jump notInt = emitJumpIfNotImmediateInteger(regT3);
+ convertInt32ToDouble(regT3, fpRegT0);
+ Jump ready = jump();
+ notInt.link(this);
+ add64(tagTypeNumberRegister, regT3);
+ move64ToDouble(regT3, fpRegT0);
+ slowCases.append(branchDouble(DoubleNotEqualOrUnordered, fpRegT0, fpRegT0));
+ ready.link(this);
+ storeDouble(fpRegT0, BaseIndex(regT2, regT1, TimesEight));
+ break;
+ }
+ case ContiguousShape:
+ store64(regT3, BaseIndex(regT2, regT1, TimesEight));
+ break;
+ default:
+ CRASH();
+ break;
+ }
Jump done = jump();
outOfBounds.link(this);
- JumpList slowCases;
slowCases.append(branch32(AboveOrEqual, regT1, Address(regT2, Butterfly::offsetOfVectorLength())));
emitArrayProfileStoreToHoleSpecialCase(profile);
@@ -394,12 +445,23 @@ void JIT::emitSlow_op_put_by_val(Instruction* currentInstruction, Vector<SlowCas
unsigned base = currentInstruction[1].u.operand;
unsigned property = currentInstruction[2].u.operand;
unsigned value = currentInstruction[3].u.operand;
+ ArrayProfile* profile = currentInstruction[4].u.arrayProfile;
linkSlowCase(iter); // property int32 check
linkSlowCaseIfNotJSCell(iter, base); // base cell check
linkSlowCase(iter); // base not array check
linkSlowCase(iter); // out of bounds
+ JITArrayMode mode = chooseArrayMode(profile);
+ switch (mode) {
+ case JITInt32:
+ case JITDouble:
+ linkSlowCase(iter); // value type check
+ break;
+ default:
+ break;
+ }
+
Label slowPath = label();
JITStubCall stubPutByValCall(this, cti_op_put_by_val);
@@ -1312,6 +1374,12 @@ void JIT::privateCompileGetByVal(ByValInfo* byValInfo, ReturnAddressPtr returnAd
JumpList slowCases;
switch (arrayMode) {
+ case JITInt32:
+ slowCases = emitInt32GetByVal(currentInstruction, badType);
+ break;
+ case JITDouble:
+ slowCases = emitDoubleGetByVal(currentInstruction, badType);
+ break;
case JITContiguous:
slowCases = emitContiguousGetByVal(currentInstruction, badType);
break;
@@ -1375,6 +1443,12 @@ void JIT::privateCompilePutByVal(ByValInfo* byValInfo, ReturnAddressPtr returnAd
JumpList slowCases;
switch (arrayMode) {
+ case JITInt32:
+ slowCases = emitInt32PutByVal(currentInstruction, badType);
+ break;
+ case JITDouble:
+ slowCases = emitDoublePutByVal(currentInstruction, badType);
+ break;
case JITContiguous:
slowCases = emitContiguousPutByVal(currentInstruction, badType);
break;
diff --git a/Source/JavaScriptCore/jit/JITPropertyAccess32_64.cpp b/Source/JavaScriptCore/jit/JITPropertyAccess32_64.cpp
index 939766f04..be146a402 100644
--- a/Source/JavaScriptCore/jit/JITPropertyAccess32_64.cpp
+++ b/Source/JavaScriptCore/jit/JITPropertyAccess32_64.cpp
@@ -32,7 +32,7 @@
#include "CodeBlock.h"
#include "GCAwareJITStubRoutine.h"
#include "Interpreter.h"
-#include "JITInlineMethods.h"
+#include "JITInlines.h"
#include "JITStubCall.h"
#include "JSArray.h"
#include "JSFunction.h"
@@ -153,6 +153,12 @@ void JIT::emit_op_get_by_val(Instruction* currentInstruction)
JITArrayMode mode = chooseArrayMode(profile);
switch (mode) {
+ case JITInt32:
+ slowCases = emitInt32GetByVal(currentInstruction, badType);
+ break;
+ case JITDouble:
+ slowCases = emitDoubleGetByVal(currentInstruction, badType);
+ break;
case JITContiguous:
slowCases = emitContiguousGetByVal(currentInstruction, badType);
break;
@@ -181,11 +187,11 @@ void JIT::emit_op_get_by_val(Instruction* currentInstruction)
m_byValCompilationInfo.append(ByValCompilationInfo(m_bytecodeOffset, badType, mode, done));
}
-JIT::JumpList JIT::emitContiguousGetByVal(Instruction*, PatchableJump& badType)
+JIT::JumpList JIT::emitContiguousGetByVal(Instruction*, PatchableJump& badType, IndexingType expectedShape)
{
JumpList slowCases;
- badType = patchableBranch32(NotEqual, regT1, TrustedImm32(ContiguousShape));
+ badType = patchableBranch32(NotEqual, regT1, TrustedImm32(expectedShape));
loadPtr(Address(regT0, JSObject::butterflyOffset()), regT3);
slowCases.append(branch32(AboveOrEqual, regT2, Address(regT3, Butterfly::offsetOfPublicLength())));
@@ -197,6 +203,22 @@ JIT::JumpList JIT::emitContiguousGetByVal(Instruction*, PatchableJump& badType)
return slowCases;
}
+JIT::JumpList JIT::emitDoubleGetByVal(Instruction*, PatchableJump& badType)
+{
+ JumpList slowCases;
+
+ badType = patchableBranch32(NotEqual, regT1, TrustedImm32(DoubleShape));
+
+ loadPtr(Address(regT0, JSObject::butterflyOffset()), regT3);
+ slowCases.append(branch32(AboveOrEqual, regT2, Address(regT3, Butterfly::offsetOfPublicLength())));
+
+ loadDouble(BaseIndex(regT3, regT2, TimesEight), fpRegT0);
+ slowCases.append(branchDouble(DoubleNotEqualOrUnordered, fpRegT0, fpRegT0));
+ moveDoubleToInts(fpRegT0, regT0, regT1);
+
+ return slowCases;
+}
+
JIT::JumpList JIT::emitArrayStorageGetByVal(Instruction*, PatchableJump& badType)
{
JumpList slowCases;
@@ -270,6 +292,12 @@ void JIT::emit_op_put_by_val(Instruction* currentInstruction)
JITArrayMode mode = chooseArrayMode(profile);
switch (mode) {
+ case JITInt32:
+ slowCases = emitInt32PutByVal(currentInstruction, badType);
+ break;
+ case JITDouble:
+ slowCases = emitDoublePutByVal(currentInstruction, badType);
+ break;
case JITContiguous:
slowCases = emitContiguousPutByVal(currentInstruction, badType);
break;
@@ -289,7 +317,7 @@ void JIT::emit_op_put_by_val(Instruction* currentInstruction)
m_byValCompilationInfo.append(ByValCompilationInfo(m_bytecodeOffset, badType, mode, done));
}
-JIT::JumpList JIT::emitContiguousPutByVal(Instruction* currentInstruction, PatchableJump& badType)
+JIT::JumpList JIT::emitGenericContiguousPutByVal(Instruction* currentInstruction, PatchableJump& badType, IndexingType indexingShape)
{
unsigned value = currentInstruction[3].u.operand;
ArrayProfile* profile = currentInstruction[4].u.arrayProfile;
@@ -303,8 +331,30 @@ JIT::JumpList JIT::emitContiguousPutByVal(Instruction* currentInstruction, Patch
Label storeResult = label();
emitLoad(value, regT1, regT0);
- store32(regT0, BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.payload)));
- store32(regT1, BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag)));
+ switch (indexingShape) {
+ case Int32Shape:
+ slowCases.append(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
+ // Fall through.
+ case ContiguousShape:
+ store32(regT0, BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.payload)));
+ store32(regT1, BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag)));
+ break;
+ case DoubleShape: {
+ Jump notInt = branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag));
+ convertInt32ToDouble(regT0, fpRegT0);
+ Jump ready = jump();
+ notInt.link(this);
+ moveIntsToDouble(regT0, regT1, fpRegT0, fpRegT1);
+ slowCases.append(branchDouble(DoubleNotEqualOrUnordered, fpRegT0, fpRegT0));
+ ready.link(this);
+ storeDouble(fpRegT0, BaseIndex(regT3, regT2, TimesEight));
+ break;
+ }
+ default:
+ CRASH();
+ break;
+ }
+
Jump done = jump();
outOfBounds.link(this);
@@ -364,12 +414,23 @@ void JIT::emitSlow_op_put_by_val(Instruction* currentInstruction, Vector<SlowCas
unsigned base = currentInstruction[1].u.operand;
unsigned property = currentInstruction[2].u.operand;
unsigned value = currentInstruction[3].u.operand;
+ ArrayProfile* profile = currentInstruction[4].u.arrayProfile;
linkSlowCase(iter); // property int32 check
linkSlowCaseIfNotJSCell(iter, base); // base cell check
linkSlowCase(iter); // base not array check
linkSlowCase(iter); // out of bounds
+ JITArrayMode mode = chooseArrayMode(profile);
+ switch (mode) {
+ case JITInt32:
+ case JITDouble:
+ linkSlowCase(iter); // value type check
+ break;
+ default:
+ break;
+ }
+
Label slowPath = label();
JITStubCall stubPutByValCall(this, cti_op_put_by_val);
diff --git a/Source/JavaScriptCore/jit/JITStubRoutine.h b/Source/JavaScriptCore/jit/JITStubRoutine.h
index a46fcfd1a..020ef6907 100644
--- a/Source/JavaScriptCore/jit/JITStubRoutine.h
+++ b/Source/JavaScriptCore/jit/JITStubRoutine.h
@@ -150,11 +150,11 @@ protected:
};
// Helper for the creation of simple stub routines that need no help from the GC.
-#define FINALIZE_CODE_FOR_STUB(patchBuffer, dataLogArguments) \
- (adoptRef(new JITStubRoutine(FINALIZE_CODE((patchBuffer), dataLogArguments))))
+#define FINALIZE_CODE_FOR_STUB(patchBuffer, dataLogFArguments) \
+ (adoptRef(new JITStubRoutine(FINALIZE_CODE((patchBuffer), dataLogFArguments))))
-#define FINALIZE_CODE_FOR_DFG_STUB(patchBuffer, dataLogArguments) \
- (adoptRef(new JITStubRoutine(FINALIZE_DFG_CODE((patchBuffer), dataLogArguments))))
+#define FINALIZE_CODE_FOR_DFG_STUB(patchBuffer, dataLogFArguments) \
+ (adoptRef(new JITStubRoutine(FINALIZE_DFG_CODE((patchBuffer), dataLogFArguments))))
} // namespace JSC
diff --git a/Source/JavaScriptCore/jit/JITStubs.cpp b/Source/JavaScriptCore/jit/JITStubs.cpp
index 5ddb98dee..760ffd429 100644
--- a/Source/JavaScriptCore/jit/JITStubs.cpp
+++ b/Source/JavaScriptCore/jit/JITStubs.cpp
@@ -1808,12 +1808,12 @@ DEFINE_STUB_FUNCTION(void, optimize)
unsigned bytecodeIndex = stackFrame.args[0].int32();
#if ENABLE(JIT_VERBOSE_OSR)
- dataLog("%p: Entered optimize with bytecodeIndex = %u, executeCounter = %s, reoptimizationRetryCounter = %u, optimizationDelayCounter = %u, exitCounter = ", codeBlock, bytecodeIndex, codeBlock->jitExecuteCounter().status(), codeBlock->reoptimizationRetryCounter(), codeBlock->optimizationDelayCounter());
+ dataLogF("%p: Entered optimize with bytecodeIndex = %u, executeCounter = %s, reoptimizationRetryCounter = %u, optimizationDelayCounter = %u, exitCounter = ", codeBlock, bytecodeIndex, codeBlock->jitExecuteCounter().status(), codeBlock->reoptimizationRetryCounter(), codeBlock->optimizationDelayCounter());
if (codeBlock->hasOptimizedReplacement())
- dataLog("%u", codeBlock->replacement()->osrExitCounter());
+ dataLogF("%u", codeBlock->replacement()->osrExitCounter());
else
- dataLog("N/A");
- dataLog("\n");
+ dataLogF("N/A");
+ dataLogF("\n");
#endif
if (!codeBlock->checkIfOptimizationThresholdReached()) {
@@ -1823,7 +1823,7 @@ DEFINE_STUB_FUNCTION(void, optimize)
if (codeBlock->hasOptimizedReplacement()) {
#if ENABLE(JIT_VERBOSE_OSR)
- dataLog("Considering OSR into %p(%p).\n", codeBlock, codeBlock->replacement());
+ dataLogF("Considering OSR into %p(%p).\n", codeBlock, codeBlock->replacement());
#endif
// If we have an optimized replacement, then it must be the case that we entered
// cti_optimize from a loop. That's because is there's an optimized replacement,
@@ -1840,7 +1840,7 @@ DEFINE_STUB_FUNCTION(void, optimize)
// additional checking anyway, to reduce the amount of recompilation thrashing.
if (codeBlock->replacement()->shouldReoptimizeFromLoopNow()) {
#if ENABLE(JIT_VERBOSE_OSR)
- dataLog("Triggering reoptimization of %p(%p) (in loop).\n", codeBlock, codeBlock->replacement());
+ dataLogF("Triggering reoptimization of %p(%p) (in loop).\n", codeBlock, codeBlock->replacement());
#endif
codeBlock->reoptimize();
return;
@@ -1848,7 +1848,7 @@ DEFINE_STUB_FUNCTION(void, optimize)
} else {
if (!codeBlock->shouldOptimizeNow()) {
#if ENABLE(JIT_VERBOSE_OSR)
- dataLog("Delaying optimization for %p (in loop) because of insufficient profiling.\n", codeBlock);
+ dataLogF("Delaying optimization for %p (in loop) because of insufficient profiling.\n", codeBlock);
#endif
return;
}
@@ -1857,14 +1857,14 @@ DEFINE_STUB_FUNCTION(void, optimize)
JSObject* error = codeBlock->compileOptimized(callFrame, scope, bytecodeIndex);
#if ENABLE(JIT_VERBOSE_OSR)
if (error)
- dataLog("WARNING: optimized compilation failed.\n");
+ dataLogF("WARNING: optimized compilation failed.\n");
#else
UNUSED_PARAM(error);
#endif
if (codeBlock->replacement() == codeBlock) {
#if ENABLE(JIT_VERBOSE_OSR)
- dataLog("Optimizing %p failed.\n", codeBlock);
+ dataLogF("Optimizing %p failed.\n", codeBlock);
#endif
ASSERT(codeBlock->getJITType() == JITCode::BaselineJIT);
@@ -1877,8 +1877,13 @@ DEFINE_STUB_FUNCTION(void, optimize)
ASSERT(optimizedCodeBlock->getJITType() == JITCode::DFGJIT);
if (void* address = DFG::prepareOSREntry(callFrame, optimizedCodeBlock, bytecodeIndex)) {
+ if (Options::showDFGDisassembly()) {
+ dataLogF(
+ "Performing OSR from code block %p to code block %p, address %p to %p.\n",
+ codeBlock, optimizedCodeBlock, (STUB_RETURN_ADDRESS).value(), address);
+ }
#if ENABLE(JIT_VERBOSE_OSR)
- dataLog("Optimizing %p succeeded, performing OSR after a delay of %u.\n", codeBlock, codeBlock->optimizationDelayCounter());
+ dataLogF("Optimizing %p succeeded, performing OSR after a delay of %u.\n", codeBlock, codeBlock->optimizationDelayCounter());
#endif
codeBlock->optimizeSoon();
@@ -1887,7 +1892,7 @@ DEFINE_STUB_FUNCTION(void, optimize)
}
#if ENABLE(JIT_VERBOSE_OSR)
- dataLog("Optimizing %p succeeded, OSR failed, after a delay of %u.\n", codeBlock, codeBlock->optimizationDelayCounter());
+ dataLogF("Optimizing %p succeeded, OSR failed, after a delay of %u.\n", codeBlock, codeBlock->optimizationDelayCounter());
#endif
// Count the OSR failure as a speculation failure. If this happens a lot, then
@@ -1895,7 +1900,7 @@ DEFINE_STUB_FUNCTION(void, optimize)
optimizedCodeBlock->countOSRExit();
#if ENABLE(JIT_VERBOSE_OSR)
- dataLog("Encountered OSR failure into %p(%p).\n", codeBlock, codeBlock->replacement());
+ dataLogF("Encountered OSR failure into %p(%p).\n", codeBlock, codeBlock->replacement());
#endif
// We are a lot more conservative about triggering reoptimization after OSR failure than
@@ -1908,7 +1913,7 @@ DEFINE_STUB_FUNCTION(void, optimize)
// reoptimization trigger.
if (optimizedCodeBlock->shouldReoptimizeNow()) {
#if ENABLE(JIT_VERBOSE_OSR)
- dataLog("Triggering reoptimization of %p(%p) (after OSR fail).\n", codeBlock, codeBlock->replacement());
+ dataLogF("Triggering reoptimization of %p(%p) (after OSR fail).\n", codeBlock, codeBlock->replacement());
#endif
codeBlock->reoptimize();
return;
@@ -2228,21 +2233,21 @@ DEFINE_STUB_FUNCTION(JSObject*, op_new_array)
{
STUB_INIT_STACK_FRAME(stackFrame);
- return constructArray(stackFrame.callFrame, reinterpret_cast<JSValue*>(&stackFrame.callFrame->registers()[stackFrame.args[0].int32()]), stackFrame.args[1].int32());
+ return constructArray(stackFrame.callFrame, stackFrame.args[2].arrayAllocationProfile(), reinterpret_cast<JSValue*>(&stackFrame.callFrame->registers()[stackFrame.args[0].int32()]), stackFrame.args[1].int32());
}
DEFINE_STUB_FUNCTION(JSObject*, op_new_array_with_size)
{
STUB_INIT_STACK_FRAME(stackFrame);
- return constructArrayWithSizeQuirk(stackFrame.callFrame, stackFrame.callFrame->lexicalGlobalObject(), stackFrame.args[0].jsValue());
+ return constructArrayWithSizeQuirk(stackFrame.callFrame, stackFrame.args[1].arrayAllocationProfile(), stackFrame.callFrame->lexicalGlobalObject(), stackFrame.args[0].jsValue());
}
DEFINE_STUB_FUNCTION(JSObject*, op_new_array_buffer)
{
STUB_INIT_STACK_FRAME(stackFrame);
- return constructArray(stackFrame.callFrame, stackFrame.callFrame->codeBlock()->constantBuffer(stackFrame.args[0].int32()), stackFrame.args[1].int32());
+ return constructArray(stackFrame.callFrame, stackFrame.args[2].arrayAllocationProfile(), stackFrame.callFrame->codeBlock()->constantBuffer(stackFrame.args[0].int32()), stackFrame.args[1].int32());
}
DEFINE_STUB_FUNCTION(void, op_init_global_const_check)
@@ -2470,7 +2475,7 @@ DEFINE_STUB_FUNCTION(void, op_put_by_val)
JSValue baseValue = stackFrame.args[0].jsValue();
JSValue subscript = stackFrame.args[1].jsValue();
JSValue value = stackFrame.args[2].jsValue();
-
+
if (baseValue.isObject() && subscript.isInt32()) {
// See if it's worth optimizing at all.
JSObject* object = asObject(baseValue);
diff --git a/Source/JavaScriptCore/jit/JITStubs.h b/Source/JavaScriptCore/jit/JITStubs.h
index 5761236b1..3bf13bbdf 100644
--- a/Source/JavaScriptCore/jit/JITStubs.h
+++ b/Source/JavaScriptCore/jit/JITStubs.h
@@ -45,6 +45,7 @@ namespace JSC {
struct StructureStubInfo;
+ class ArrayAllocationProfile;
class CodeBlock;
class ExecutablePool;
class FunctionExecutable;
@@ -85,6 +86,7 @@ namespace JSC {
ReturnAddressPtr returnAddress() { return ReturnAddressPtr(asPointer); }
ResolveOperations* resolveOperations() { return static_cast<ResolveOperations*>(asPointer); }
PutToBaseOperation* putToBaseOperation() { return static_cast<PutToBaseOperation*>(asPointer); }
+ ArrayAllocationProfile* arrayAllocationProfile() { return static_cast<ArrayAllocationProfile*>(asPointer); }
};
struct TrampolineStructure {
diff --git a/Source/JavaScriptCore/jit/JumpReplacementWatchpoint.cpp b/Source/JavaScriptCore/jit/JumpReplacementWatchpoint.cpp
index 26eae57be..13270d4d3 100644
--- a/Source/JavaScriptCore/jit/JumpReplacementWatchpoint.cpp
+++ b/Source/JavaScriptCore/jit/JumpReplacementWatchpoint.cpp
@@ -47,7 +47,7 @@ void JumpReplacementWatchpoint::fireInternal()
void* source = bitwise_cast<void*>(m_source);
void* destination = bitwise_cast<void*>(m_destination);
if (Options::showDisassembly())
- dataLog("Firing jump replacement watchpoint from %p, to %p.\n", source, destination);
+ dataLogF("Firing jump replacement watchpoint from %p, to %p.\n", source, destination);
MacroAssembler::replaceWithJump(CodeLocationLabel(source), CodeLocationLabel(destination));
if (isOnList())
remove();