summaryrefslogtreecommitdiff
path: root/Source/JavaScriptCore/jit
diff options
context:
space:
mode:
authorSimon Hausmann <simon.hausmann@nokia.com>2012-07-11 13:45:28 +0200
committerSimon Hausmann <simon.hausmann@nokia.com>2012-07-11 13:45:28 +0200
commitd6a599dbc9d824a462b2b206316e102bf8136446 (patch)
treeecb257a5e55b2239d74b90fdad62fccd661cf286 /Source/JavaScriptCore/jit
parent3ccc3a85f09a83557b391aae380d3bf5f81a2911 (diff)
downloadqtwebkit-d6a599dbc9d824a462b2b206316e102bf8136446.tar.gz
Imported WebKit commit 8ff1f22783a32de82fee915abd55bd1b298f2644 (http://svn.webkit.org/repository/webkit/trunk@122325)
New snapshot that should work with the latest Qt build system changes
Diffstat (limited to 'Source/JavaScriptCore/jit')
-rw-r--r--Source/JavaScriptCore/jit/ExecutableAllocator.cpp2
-rw-r--r--Source/JavaScriptCore/jit/ExecutableAllocator.h14
-rw-r--r--Source/JavaScriptCore/jit/ExecutableAllocatorFixedVMPool.cpp16
-rw-r--r--Source/JavaScriptCore/jit/GCAwareJITStubRoutine.cpp127
-rw-r--r--Source/JavaScriptCore/jit/GCAwareJITStubRoutine.h124
-rw-r--r--Source/JavaScriptCore/jit/JIT.cpp20
-rw-r--r--Source/JavaScriptCore/jit/JIT.h79
-rw-r--r--Source/JavaScriptCore/jit/JITInlineMethods.h27
-rw-r--r--Source/JavaScriptCore/jit/JITOpcodes.cpp44
-rw-r--r--Source/JavaScriptCore/jit/JITOpcodes32_64.cpp8
-rw-r--r--Source/JavaScriptCore/jit/JITPropertyAccess.cpp205
-rw-r--r--Source/JavaScriptCore/jit/JITPropertyAccess32_64.cpp209
-rw-r--r--Source/JavaScriptCore/jit/JITStubRoutine.cpp48
-rw-r--r--Source/JavaScriptCore/jit/JITStubRoutine.h161
-rw-r--r--Source/JavaScriptCore/jit/JITStubs.cpp75
-rw-r--r--Source/JavaScriptCore/jit/JITStubs.h4
-rw-r--r--Source/JavaScriptCore/jit/JSInterfaceJIT.h32
-rw-r--r--Source/JavaScriptCore/jit/SpecializedThunkJIT.h6
-rw-r--r--Source/JavaScriptCore/jit/ThunkGenerators.cpp22
19 files changed, 883 insertions, 340 deletions
diff --git a/Source/JavaScriptCore/jit/ExecutableAllocator.cpp b/Source/JavaScriptCore/jit/ExecutableAllocator.cpp
index 79399196e..e9bb66ce7 100644
--- a/Source/JavaScriptCore/jit/ExecutableAllocator.cpp
+++ b/Source/JavaScriptCore/jit/ExecutableAllocator.cpp
@@ -55,7 +55,7 @@ namespace JSC {
class DemandExecutableAllocator : public MetaAllocator {
public:
DemandExecutableAllocator()
- : MetaAllocator(32) // round up all allocations to 32 bytes
+ : MetaAllocator(jitAllocationGranule)
{
MutexLocker lock(allocatorsMutex());
allocators().add(this);
diff --git a/Source/JavaScriptCore/jit/ExecutableAllocator.h b/Source/JavaScriptCore/jit/ExecutableAllocator.h
index 8cd5cba07..85779e6a8 100644
--- a/Source/JavaScriptCore/jit/ExecutableAllocator.h
+++ b/Source/JavaScriptCore/jit/ExecutableAllocator.h
@@ -77,6 +77,8 @@ namespace JSC {
class JSGlobalData;
void releaseExecutableMemory(JSGlobalData&);
+static const unsigned jitAllocationGranule = 32;
+
inline size_t roundUpAllocationSize(size_t request, size_t granularity)
{
if ((std::numeric_limits<size_t>::max() - granularity) <= request)
@@ -101,6 +103,18 @@ typedef WTF::MetaAllocatorHandle ExecutableMemoryHandle;
class DemandExecutableAllocator;
#endif
+#if ENABLE(EXECUTABLE_ALLOCATOR_FIXED)
+#if CPU(ARM)
+static const size_t fixedExecutableMemoryPoolSize = 16 * 1024 * 1024;
+#elif CPU(X86_64)
+static const size_t fixedExecutableMemoryPoolSize = 1024 * 1024 * 1024;
+#else
+static const size_t fixedExecutableMemoryPoolSize = 32 * 1024 * 1024;
+#endif
+
+extern uintptr_t startOfFixedExecutableMemoryPool;
+#endif
+
class ExecutableAllocator {
enum ProtectionSetting { Writable, Executable };
diff --git a/Source/JavaScriptCore/jit/ExecutableAllocatorFixedVMPool.cpp b/Source/JavaScriptCore/jit/ExecutableAllocatorFixedVMPool.cpp
index 2e08f1205..ad3343d11 100644
--- a/Source/JavaScriptCore/jit/ExecutableAllocatorFixedVMPool.cpp
+++ b/Source/JavaScriptCore/jit/ExecutableAllocatorFixedVMPool.cpp
@@ -45,27 +45,23 @@ using namespace WTF;
namespace JSC {
-#if CPU(ARM)
-static const size_t fixedPoolSize = 16 * 1024 * 1024;
-#elif CPU(X86_64)
-static const size_t fixedPoolSize = 1024 * 1024 * 1024;
-#else
-static const size_t fixedPoolSize = 32 * 1024 * 1024;
-#endif
+uintptr_t startOfFixedExecutableMemoryPool;
class FixedVMPoolExecutableAllocator : public MetaAllocator {
public:
FixedVMPoolExecutableAllocator()
- : MetaAllocator(32) // round up all allocations to 32 bytes
+ : MetaAllocator(jitAllocationGranule) // round up all allocations to 32 bytes
{
- m_reservation = PageReservation::reserveWithGuardPages(fixedPoolSize, OSAllocator::JSJITCodePages, EXECUTABLE_POOL_WRITABLE, true);
+ m_reservation = PageReservation::reserveWithGuardPages(fixedExecutableMemoryPoolSize, OSAllocator::JSJITCodePages, EXECUTABLE_POOL_WRITABLE, true);
#if !(ENABLE(CLASSIC_INTERPRETER) || ENABLE(LLINT))
if (!m_reservation)
CRASH();
#endif
if (m_reservation) {
- ASSERT(m_reservation.size() == fixedPoolSize);
+ ASSERT(m_reservation.size() == fixedExecutableMemoryPoolSize);
addFreshFreeSpace(m_reservation.base(), m_reservation.size());
+
+ startOfFixedExecutableMemoryPool = reinterpret_cast<uintptr_t>(m_reservation.base());
}
}
diff --git a/Source/JavaScriptCore/jit/GCAwareJITStubRoutine.cpp b/Source/JavaScriptCore/jit/GCAwareJITStubRoutine.cpp
new file mode 100644
index 000000000..7ea61178c
--- /dev/null
+++ b/Source/JavaScriptCore/jit/GCAwareJITStubRoutine.cpp
@@ -0,0 +1,127 @@
+/*
+ * Copyright (C) 2012 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "GCAwareJITStubRoutine.h"
+
+#if ENABLE(JIT)
+
+#include "Heap.h"
+#include "JSGlobalData.h"
+#include "ScopeChain.h"
+#include "SlotVisitor.h"
+#include "Structure.h"
+
+namespace JSC {
+
+GCAwareJITStubRoutine::GCAwareJITStubRoutine(
+ const MacroAssemblerCodeRef& code, JSGlobalData& globalData)
+ : JITStubRoutine(code)
+ , m_mayBeExecuting(false)
+ , m_isJettisoned(false)
+{
+ globalData.heap.m_jitStubRoutines.add(this);
+}
+
+GCAwareJITStubRoutine::~GCAwareJITStubRoutine() { }
+
+void GCAwareJITStubRoutine::observeZeroRefCount()
+{
+ if (m_isJettisoned) {
+ // This case is needed for when the system shuts down. It may be that
+ // the JIT stub routine set gets deleted before we get around to deleting
+ // this guy. In that case the GC informs us that we're jettisoned already
+ // and that we should delete ourselves as soon as the ref count reaches
+ // zero.
+ delete this;
+ return;
+ }
+
+ ASSERT(!m_refCount);
+
+ m_isJettisoned = true;
+}
+
+void GCAwareJITStubRoutine::deleteFromGC()
+{
+ ASSERT(m_isJettisoned);
+ ASSERT(!m_refCount);
+ ASSERT(!m_mayBeExecuting);
+
+ delete this;
+}
+
+void GCAwareJITStubRoutine::markRequiredObjectsInternal(SlotVisitor&)
+{
+}
+
+MarkingGCAwareJITStubRoutineWithOneObject::MarkingGCAwareJITStubRoutineWithOneObject(
+ const MacroAssemblerCodeRef& code, JSGlobalData& globalData, const JSCell* owner,
+ JSCell* object)
+ : GCAwareJITStubRoutine(code, globalData)
+ , m_object(globalData, owner, object)
+{
+}
+
+MarkingGCAwareJITStubRoutineWithOneObject::~MarkingGCAwareJITStubRoutineWithOneObject()
+{
+}
+
+void MarkingGCAwareJITStubRoutineWithOneObject::markRequiredObjectsInternal(SlotVisitor& visitor)
+{
+ visitor.append(&m_object);
+}
+
+PassRefPtr<JITStubRoutine> createJITStubRoutine(
+ const MacroAssemblerCodeRef& code,
+ JSGlobalData& globalData,
+ const JSCell*,
+ bool makesCalls)
+{
+ if (!makesCalls)
+ return adoptRef(new JITStubRoutine(code));
+
+ return static_pointer_cast<JITStubRoutine>(
+ adoptRef(new GCAwareJITStubRoutine(code, globalData)));
+}
+
+PassRefPtr<JITStubRoutine> createJITStubRoutine(
+ const MacroAssemblerCodeRef& code,
+ JSGlobalData& globalData,
+ const JSCell* owner,
+ bool makesCalls,
+ JSCell* object)
+{
+ if (!makesCalls)
+ return adoptRef(new JITStubRoutine(code));
+
+ return static_pointer_cast<JITStubRoutine>(
+ adoptRef(new MarkingGCAwareJITStubRoutineWithOneObject(code, globalData, owner, object)));
+}
+
+} // namespace JSC
+
+#endif // ENABLE(JIT)
+
diff --git a/Source/JavaScriptCore/jit/GCAwareJITStubRoutine.h b/Source/JavaScriptCore/jit/GCAwareJITStubRoutine.h
new file mode 100644
index 000000000..59bc76beb
--- /dev/null
+++ b/Source/JavaScriptCore/jit/GCAwareJITStubRoutine.h
@@ -0,0 +1,124 @@
+/*
+ * Copyright (C) 2012 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef GCAwareJITStubRoutine_h
+#define GCAwareJITStubRoutine_h
+
+#include <wtf/Platform.h>
+
+#if ENABLE(JIT)
+
+#include "JITStubRoutine.h"
+#include "JSObject.h"
+#include "JSString.h"
+#include "WriteBarrier.h"
+#include <wtf/RefCounted.h>
+#include <wtf/Vector.h>
+
+namespace JSC {
+
+class JITStubRoutineSet;
+
+// Use this stub routine if you know that your code might be on stack when
+// either GC or other kinds of stub deletion happen. Basicaly, if your stub
+// routine makes calls (either to JS code or to C++ code) then you should
+// assume that it's possible for that JS or C++ code to do something that
+// causes the system to try to delete your routine. Using this routine type
+// ensures that the actual deletion is delayed until the GC proves that the
+// routine is no longer running. You can also subclass this routine if you
+// want to mark additional objects during GC in those cases where the
+// routine is known to be executing, or if you want to force this routine to
+// keep other routines alive (for example due to the use of a slow-path
+// list which does not get reclaimed all at once).
+class GCAwareJITStubRoutine : public JITStubRoutine {
+public:
+ GCAwareJITStubRoutine(const MacroAssemblerCodeRef&, JSGlobalData&);
+ virtual ~GCAwareJITStubRoutine();
+
+ void markRequiredObjects(SlotVisitor& visitor)
+ {
+ markRequiredObjectsInternal(visitor);
+ }
+
+ void deleteFromGC();
+
+protected:
+ virtual void observeZeroRefCount();
+
+ virtual void markRequiredObjectsInternal(SlotVisitor&);
+
+private:
+ friend class JITStubRoutineSet;
+
+ bool m_mayBeExecuting;
+ bool m_isJettisoned;
+};
+
+// Use this if you want to mark one additional object during GC if your stub
+// routine is known to be executing.
+class MarkingGCAwareJITStubRoutineWithOneObject : public GCAwareJITStubRoutine {
+public:
+ MarkingGCAwareJITStubRoutineWithOneObject(
+ const MacroAssemblerCodeRef&, JSGlobalData&, const JSCell* owner, JSCell*);
+ virtual ~MarkingGCAwareJITStubRoutineWithOneObject();
+
+protected:
+ virtual void markRequiredObjectsInternal(SlotVisitor&);
+
+private:
+ WriteBarrier<JSCell> m_object;
+};
+
+// Helper for easily creating a GC-aware JIT stub routine. For the varargs,
+// pass zero or more JSCell*'s. This will either create a JITStubRoutine, a
+// GCAwareJITStubRoutine, or an ObjectMarkingGCAwareJITStubRoutine as
+// appropriate. Generally you only need to pass pointers that will be used
+// after the first call to C++ or JS.
+//
+// PassRefPtr<JITStubRoutine> createJITStubRoutine(
+// const MacroAssemblerCodeRef& code,
+// JSGlobalData& globalData,
+// const JSCell* owner,
+// bool makesCalls,
+// ...);
+//
+// Note that we don't actually use C-style varargs because that leads to
+// strange type-related problems. For example it would preclude us from using
+// our custom of passing '0' as NULL pointer. Besides, when I did try to write
+// this function using varargs, I ended up with more code than this simple
+// way.
+
+PassRefPtr<JITStubRoutine> createJITStubRoutine(
+ const MacroAssemblerCodeRef&, JSGlobalData&, const JSCell* owner, bool makesCalls);
+PassRefPtr<JITStubRoutine> createJITStubRoutine(
+ const MacroAssemblerCodeRef&, JSGlobalData&, const JSCell* owner, bool makesCalls,
+ JSCell*);
+
+} // namespace JSC
+
+#endif // ENABLE(JIT)
+
+#endif // GCAwareJITStubRoutine_h
+
diff --git a/Source/JavaScriptCore/jit/JIT.cpp b/Source/JavaScriptCore/jit/JIT.cpp
index e1e034b19..285355f1b 100644
--- a/Source/JavaScriptCore/jit/JIT.cpp
+++ b/Source/JavaScriptCore/jit/JIT.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2008, 2009 Apple Inc. All rights reserved.
+ * Copyright (C) 2008, 2009, 2012 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -99,7 +99,7 @@ void JIT::emitOptimizationCheck(OptimizationCheckKind kind)
if (!canBeOptimized())
return;
- Jump skipOptimize = branchAdd32(Signed, TrustedImm32(kind == LoopOptimizationCheck ? Options::executionCounterIncrementForLoop : Options::executionCounterIncrementForReturn), AbsoluteAddress(m_codeBlock->addressOfJITExecuteCounter()));
+ Jump skipOptimize = branchAdd32(Signed, TrustedImm32(kind == LoopOptimizationCheck ? Options::executionCounterIncrementForLoop() : Options::executionCounterIncrementForReturn()), AbsoluteAddress(m_codeBlock->addressOfJITExecuteCounter()));
JITStubCall stubCall(this, cti_optimize);
stubCall.addArgument(TrustedImm32(m_bytecodeOffset));
if (kind == EnterOptimizationCheck)
@@ -255,6 +255,7 @@ void JIT::privateCompileMainPass()
DEFINE_OP(op_create_activation)
DEFINE_OP(op_eq)
DEFINE_OP(op_eq_null)
+ case op_get_by_id_out_of_line:
DEFINE_OP(op_get_by_id)
DEFINE_OP(op_get_arguments_length)
DEFINE_OP(op_get_by_val)
@@ -319,8 +320,11 @@ void JIT::privateCompileMainPass()
DEFINE_OP(op_profile_will_call)
DEFINE_OP(op_push_new_scope)
DEFINE_OP(op_push_scope)
+ case op_put_by_id_out_of_line:
case op_put_by_id_transition_direct:
case op_put_by_id_transition_normal:
+ case op_put_by_id_transition_direct_out_of_line:
+ case op_put_by_id_transition_normal_out_of_line:
DEFINE_OP(op_put_by_id)
DEFINE_OP(op_put_by_index)
DEFINE_OP(op_put_by_val)
@@ -441,6 +445,7 @@ void JIT::privateCompileSlowCases()
DEFINE_SLOWCASE_OP(op_create_this)
DEFINE_SLOWCASE_OP(op_div)
DEFINE_SLOWCASE_OP(op_eq)
+ case op_get_by_id_out_of_line:
DEFINE_SLOWCASE_OP(op_get_by_id)
DEFINE_SLOWCASE_OP(op_get_arguments_length)
DEFINE_SLOWCASE_OP(op_get_by_val)
@@ -472,16 +477,17 @@ void JIT::privateCompileSlowCases()
DEFINE_SLOWCASE_OP(op_neq)
DEFINE_SLOWCASE_OP(op_new_array)
DEFINE_SLOWCASE_OP(op_new_object)
- DEFINE_SLOWCASE_OP(op_new_func)
- DEFINE_SLOWCASE_OP(op_new_func_exp)
DEFINE_SLOWCASE_OP(op_not)
DEFINE_SLOWCASE_OP(op_nstricteq)
DEFINE_SLOWCASE_OP(op_post_dec)
DEFINE_SLOWCASE_OP(op_post_inc)
DEFINE_SLOWCASE_OP(op_pre_dec)
DEFINE_SLOWCASE_OP(op_pre_inc)
+ case op_put_by_id_out_of_line:
case op_put_by_id_transition_direct:
case op_put_by_id_transition_normal:
+ case op_put_by_id_transition_direct_out_of_line:
+ case op_put_by_id_transition_normal_out_of_line:
DEFINE_SLOWCASE_OP(op_put_by_id)
DEFINE_SLOWCASE_OP(op_put_by_val)
DEFINE_SLOWCASE_OP(op_put_global_var_check);
@@ -539,6 +545,7 @@ ALWAYS_INLINE void PropertyStubCompilationInfo::copyToStubInfo(StructureStubInfo
CodeLocationLabel hotPathBeginLocation = linkBuffer.locationOf(hotPathBegin);
info.patch.baseline.u.get.structureToCompare = MacroAssembler::differenceBetweenCodePtr(hotPathBeginLocation, linkBuffer.locationOf(getStructureToCompare));
info.patch.baseline.u.get.structureCheck = MacroAssembler::differenceBetweenCodePtr(hotPathBeginLocation, linkBuffer.locationOf(getStructureCheck));
+ info.patch.baseline.u.get.propertyStorageLoad = MacroAssembler::differenceBetweenCodePtr(hotPathBeginLocation, linkBuffer.locationOf(propertyStorageLoad));
#if USE(JSVALUE64)
info.patch.baseline.u.get.displacementLabel = MacroAssembler::differenceBetweenCodePtr(hotPathBeginLocation, linkBuffer.locationOf(getDisplacementLabel));
#else
@@ -552,6 +559,7 @@ ALWAYS_INLINE void PropertyStubCompilationInfo::copyToStubInfo(StructureStubInfo
case PutById:
CodeLocationLabel hotPathBeginLocation = linkBuffer.locationOf(hotPathBegin);
info.patch.baseline.u.put.structureToCompare = MacroAssembler::differenceBetweenCodePtr(hotPathBeginLocation, linkBuffer.locationOf(putStructureToCompare));
+ info.patch.baseline.u.put.propertyStorageLoad = MacroAssembler::differenceBetweenCodePtr(hotPathBeginLocation, linkBuffer.locationOf(propertyStorageLoad));
#if USE(JSVALUE64)
info.patch.baseline.u.put.displacementLabel = MacroAssembler::differenceBetweenCodePtr(hotPathBeginLocation, linkBuffer.locationOf(putDisplacementLabel));
#else
@@ -763,7 +771,9 @@ JITCode JIT::privateCompile(CodePtr* functionEntryArityCheck, JITCompilationEffo
*functionEntryArityCheck = patchBuffer.locationOf(arityCheck);
CodeRef result = FINALIZE_CODE(
- patchBuffer, ("Baseline JIT code for CodeBlock %p", m_codeBlock));
+ patchBuffer,
+ ("Baseline JIT code for CodeBlock %p, instruction count = %u",
+ m_codeBlock, m_codeBlock->instructionCount()));
m_globalData->machineCodeBytesPerBytecodeWordForBaselineJIT.add(
static_cast<double>(result.size()) /
diff --git a/Source/JavaScriptCore/jit/JIT.h b/Source/JavaScriptCore/jit/JIT.h
index 6d4c578c0..987c4a163 100644
--- a/Source/JavaScriptCore/jit/JIT.h
+++ b/Source/JavaScriptCore/jit/JIT.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2008 Apple Inc. All rights reserved.
+ * Copyright (C) 2008, 2012 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -157,6 +157,7 @@ namespace JSC {
MacroAssembler::Label hotPathBegin;
MacroAssembler::DataLabelPtr getStructureToCompare;
MacroAssembler::PatchableJump getStructureCheck;
+ MacroAssembler::ConvertibleLoadLabel propertyStorageLoad;
#if USE(JSVALUE64)
MacroAssembler::DataLabelCompact getDisplacementLabel;
#else
@@ -185,17 +186,24 @@ namespace JSC {
#endif
- PropertyStubCompilationInfo(PropertyStubGetById_T, unsigned bytecodeIndex, MacroAssembler::Label hotPathBegin,
+ PropertyStubCompilationInfo(
+ PropertyStubGetById_T, unsigned bytecodeIndex, MacroAssembler::Label hotPathBegin,
+ MacroAssembler::DataLabelPtr structureToCompare,
+ MacroAssembler::PatchableJump structureCheck,
+ MacroAssembler::ConvertibleLoadLabel propertyStorageLoad,
#if USE(JSVALUE64)
- MacroAssembler::DataLabelPtr structureToCompare, MacroAssembler::PatchableJump structureCheck, MacroAssembler::DataLabelCompact displacementLabel, MacroAssembler::Label putResult)
+ MacroAssembler::DataLabelCompact displacementLabel,
#else
- MacroAssembler::DataLabelPtr structureToCompare, MacroAssembler::PatchableJump structureCheck, MacroAssembler::DataLabelCompact displacementLabel1, MacroAssembler::DataLabelCompact displacementLabel2, MacroAssembler::Label putResult)
+ MacroAssembler::DataLabelCompact displacementLabel1,
+ MacroAssembler::DataLabelCompact displacementLabel2,
#endif
+ MacroAssembler::Label putResult)
: m_type(GetById)
, bytecodeIndex(bytecodeIndex)
, hotPathBegin(hotPathBegin)
, getStructureToCompare(structureToCompare)
, getStructureCheck(structureCheck)
+ , propertyStorageLoad(propertyStorageLoad)
#if USE(JSVALUE64)
, getDisplacementLabel(displacementLabel)
#else
@@ -206,15 +214,21 @@ namespace JSC {
{
}
- PropertyStubCompilationInfo(PropertyStubPutById_T, unsigned bytecodeIndex, MacroAssembler::Label hotPathBegin,
+ PropertyStubCompilationInfo(
+ PropertyStubPutById_T, unsigned bytecodeIndex, MacroAssembler::Label hotPathBegin,
+ MacroAssembler::DataLabelPtr structureToCompare,
+ MacroAssembler::ConvertibleLoadLabel propertyStorageLoad,
#if USE(JSVALUE64)
- MacroAssembler::DataLabelPtr structureToCompare, MacroAssembler::DataLabel32 displacementLabel)
+ MacroAssembler::DataLabel32 displacementLabel
#else
- MacroAssembler::DataLabelPtr structureToCompare, MacroAssembler::DataLabel32 displacementLabel1, MacroAssembler::DataLabel32 displacementLabel2)
+ MacroAssembler::DataLabel32 displacementLabel1,
+ MacroAssembler::DataLabel32 displacementLabel2
#endif
+ )
: m_type(PutById)
, bytecodeIndex(bytecodeIndex)
, hotPathBegin(hotPathBegin)
+ , propertyStorageLoad(propertyStorageLoad)
, putStructureToCompare(structureToCompare)
#if USE(JSVALUE64)
, putDisplacementLabel(displacementLabel)
@@ -295,40 +309,40 @@ namespace JSC {
return JIT(globalData, codeBlock).privateCompile(functionEntryArityCheck, effort);
}
- static void compileGetByIdProto(JSGlobalData* globalData, CallFrame* callFrame, CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, Structure* prototypeStructure, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset, ReturnAddressPtr returnAddress)
+ static void compileGetByIdProto(JSGlobalData* globalData, CallFrame* callFrame, CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, Structure* prototypeStructure, const Identifier& ident, const PropertySlot& slot, PropertyOffset cachedOffset, ReturnAddressPtr returnAddress)
{
JIT jit(globalData, codeBlock);
jit.m_bytecodeOffset = stubInfo->bytecodeIndex;
jit.privateCompileGetByIdProto(stubInfo, structure, prototypeStructure, ident, slot, cachedOffset, returnAddress, callFrame);
}
- static void compileGetByIdSelfList(JSGlobalData* globalData, CodeBlock* codeBlock, StructureStubInfo* stubInfo, PolymorphicAccessStructureList* polymorphicStructures, int currentIndex, Structure* structure, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset)
+ static void compileGetByIdSelfList(JSGlobalData* globalData, CodeBlock* codeBlock, StructureStubInfo* stubInfo, PolymorphicAccessStructureList* polymorphicStructures, int currentIndex, Structure* structure, const Identifier& ident, const PropertySlot& slot, PropertyOffset cachedOffset)
{
JIT jit(globalData, codeBlock);
jit.m_bytecodeOffset = stubInfo->bytecodeIndex;
jit.privateCompileGetByIdSelfList(stubInfo, polymorphicStructures, currentIndex, structure, ident, slot, cachedOffset);
}
- static void compileGetByIdProtoList(JSGlobalData* globalData, CallFrame* callFrame, CodeBlock* codeBlock, StructureStubInfo* stubInfo, PolymorphicAccessStructureList* prototypeStructureList, int currentIndex, Structure* structure, Structure* prototypeStructure, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset)
+ static void compileGetByIdProtoList(JSGlobalData* globalData, CallFrame* callFrame, CodeBlock* codeBlock, StructureStubInfo* stubInfo, PolymorphicAccessStructureList* prototypeStructureList, int currentIndex, Structure* structure, Structure* prototypeStructure, const Identifier& ident, const PropertySlot& slot, PropertyOffset cachedOffset)
{
JIT jit(globalData, codeBlock);
jit.m_bytecodeOffset = stubInfo->bytecodeIndex;
jit.privateCompileGetByIdProtoList(stubInfo, prototypeStructureList, currentIndex, structure, prototypeStructure, ident, slot, cachedOffset, callFrame);
}
- static void compileGetByIdChainList(JSGlobalData* globalData, CallFrame* callFrame, CodeBlock* codeBlock, StructureStubInfo* stubInfo, PolymorphicAccessStructureList* prototypeStructureList, int currentIndex, Structure* structure, StructureChain* chain, size_t count, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset)
+ static void compileGetByIdChainList(JSGlobalData* globalData, CallFrame* callFrame, CodeBlock* codeBlock, StructureStubInfo* stubInfo, PolymorphicAccessStructureList* prototypeStructureList, int currentIndex, Structure* structure, StructureChain* chain, size_t count, const Identifier& ident, const PropertySlot& slot, PropertyOffset cachedOffset)
{
JIT jit(globalData, codeBlock);
jit.m_bytecodeOffset = stubInfo->bytecodeIndex;
jit.privateCompileGetByIdChainList(stubInfo, prototypeStructureList, currentIndex, structure, chain, count, ident, slot, cachedOffset, callFrame);
}
- static void compileGetByIdChain(JSGlobalData* globalData, CallFrame* callFrame, CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, StructureChain* chain, size_t count, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset, ReturnAddressPtr returnAddress)
+ static void compileGetByIdChain(JSGlobalData* globalData, CallFrame* callFrame, CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, StructureChain* chain, size_t count, const Identifier& ident, const PropertySlot& slot, PropertyOffset cachedOffset, ReturnAddressPtr returnAddress)
{
JIT jit(globalData, codeBlock);
jit.m_bytecodeOffset = stubInfo->bytecodeIndex;
jit.privateCompileGetByIdChain(stubInfo, structure, chain, count, ident, slot, cachedOffset, returnAddress, callFrame);
}
- static void compilePutByIdTransition(JSGlobalData* globalData, CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* oldStructure, Structure* newStructure, size_t cachedOffset, StructureChain* chain, ReturnAddressPtr returnAddress, bool direct)
+ static void compilePutByIdTransition(JSGlobalData* globalData, CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* oldStructure, Structure* newStructure, PropertyOffset cachedOffset, StructureChain* chain, ReturnAddressPtr returnAddress, bool direct)
{
JIT jit(globalData, codeBlock);
jit.m_bytecodeOffset = stubInfo->bytecodeIndex;
@@ -358,9 +372,9 @@ namespace JSC {
static void resetPatchGetById(RepatchBuffer&, StructureStubInfo*);
static void resetPatchPutById(RepatchBuffer&, StructureStubInfo*);
- static void patchGetByIdSelf(CodeBlock* codeblock, StructureStubInfo*, Structure*, size_t cachedOffset, ReturnAddressPtr returnAddress);
- static void patchPutByIdReplace(CodeBlock* codeblock, StructureStubInfo*, Structure*, size_t cachedOffset, ReturnAddressPtr returnAddress, bool direct);
- static void patchMethodCallProto(JSGlobalData&, CodeBlock* codeblock, MethodCallLinkInfo&, StructureStubInfo&, JSObject*, Structure*, JSObject*, ReturnAddressPtr);
+ static void patchGetByIdSelf(CodeBlock*, StructureStubInfo*, Structure*, PropertyOffset cachedOffset, ReturnAddressPtr);
+ static void patchPutByIdReplace(CodeBlock*, StructureStubInfo*, Structure*, PropertyOffset cachedOffset, ReturnAddressPtr, bool direct);
+ static void patchMethodCallProto(JSGlobalData&, CodeBlock*, MethodCallLinkInfo&, StructureStubInfo&, JSObject*, Structure*, JSObject*, ReturnAddressPtr);
static void compilePatchGetArrayLength(JSGlobalData* globalData, CodeBlock* codeBlock, ReturnAddressPtr returnAddress)
{
@@ -377,12 +391,12 @@ namespace JSC {
void privateCompileLinkPass();
void privateCompileSlowCases();
JITCode privateCompile(CodePtr* functionEntryArityCheck, JITCompilationEffort);
- void privateCompileGetByIdProto(StructureStubInfo*, Structure*, Structure* prototypeStructure, const Identifier&, const PropertySlot&, size_t cachedOffset, ReturnAddressPtr returnAddress, CallFrame* callFrame);
- void privateCompileGetByIdSelfList(StructureStubInfo*, PolymorphicAccessStructureList*, int, Structure*, const Identifier&, const PropertySlot&, size_t cachedOffset);
- void privateCompileGetByIdProtoList(StructureStubInfo*, PolymorphicAccessStructureList*, int, Structure*, Structure* prototypeStructure, const Identifier&, const PropertySlot&, size_t cachedOffset, CallFrame* callFrame);
- void privateCompileGetByIdChainList(StructureStubInfo*, PolymorphicAccessStructureList*, int, Structure*, StructureChain* chain, size_t count, const Identifier&, const PropertySlot&, size_t cachedOffset, CallFrame* callFrame);
- void privateCompileGetByIdChain(StructureStubInfo*, Structure*, StructureChain*, size_t count, const Identifier&, const PropertySlot&, size_t cachedOffset, ReturnAddressPtr returnAddress, CallFrame* callFrame);
- void privateCompilePutByIdTransition(StructureStubInfo*, Structure*, Structure*, size_t cachedOffset, StructureChain*, ReturnAddressPtr returnAddress, bool direct);
+ void privateCompileGetByIdProto(StructureStubInfo*, Structure*, Structure* prototypeStructure, const Identifier&, const PropertySlot&, PropertyOffset cachedOffset, ReturnAddressPtr, CallFrame*);
+ void privateCompileGetByIdSelfList(StructureStubInfo*, PolymorphicAccessStructureList*, int, Structure*, const Identifier&, const PropertySlot&, PropertyOffset cachedOffset);
+ void privateCompileGetByIdProtoList(StructureStubInfo*, PolymorphicAccessStructureList*, int, Structure*, Structure* prototypeStructure, const Identifier&, const PropertySlot&, PropertyOffset cachedOffset, CallFrame*);
+ void privateCompileGetByIdChainList(StructureStubInfo*, PolymorphicAccessStructureList*, int, Structure*, StructureChain*, size_t count, const Identifier&, const PropertySlot&, PropertyOffset cachedOffset, CallFrame*);
+ void privateCompileGetByIdChain(StructureStubInfo*, Structure*, StructureChain*, size_t count, const Identifier&, const PropertySlot&, PropertyOffset cachedOffset, ReturnAddressPtr, CallFrame*);
+ void privateCompilePutByIdTransition(StructureStubInfo*, Structure*, Structure*, PropertyOffset cachedOffset, StructureChain*, ReturnAddressPtr, bool direct);
PassRefPtr<ExecutableMemoryHandle> privateCompileCTIMachineTrampolines(JSGlobalData*, TrampolineStructure*);
Label privateCompileCTINativeCall(JSGlobalData*, bool isConstruct = false);
@@ -423,7 +437,6 @@ namespace JSC {
template<typename ClassType, bool destructor, typename StructureType> void emitAllocateBasicJSObject(StructureType, RegisterID result, RegisterID storagePtr);
void emitAllocateBasicStorage(size_t, RegisterID result, RegisterID storagePtr);
template<typename T> void emitAllocateJSFinalObject(T structure, RegisterID result, RegisterID storagePtr);
- void emitAllocateJSFunction(FunctionExecutable*, RegisterID scopeChain, RegisterID result, RegisterID storagePtr);
void emitAllocateJSArray(unsigned valuesRegister, unsigned length, RegisterID cellResult, RegisterID storageResult, RegisterID storagePtr);
#if ENABLE(VALUE_PROFILER)
@@ -437,6 +450,8 @@ namespace JSC {
void emitValueProfilingSite() { }
#endif
+ enum FinalObjectMode { MayBeFinal, KnownNotFinal };
+
#if USE(JSVALUE32_64)
bool getOperandConstantImmediateInt(unsigned op1, unsigned op2, unsigned& op, int32_t& constant);
@@ -469,10 +484,10 @@ namespace JSC {
void compileGetByIdHotPath();
void compileGetByIdSlowCase(int resultVReg, int baseVReg, Identifier* ident, Vector<SlowCaseEntry>::iterator& iter, bool isMethodCheck = false);
- void compileGetDirectOffset(RegisterID base, RegisterID resultTag, RegisterID resultPayload, size_t cachedOffset);
- void compileGetDirectOffset(JSObject* base, RegisterID resultTag, RegisterID resultPayload, size_t cachedOffset);
- void compileGetDirectOffset(RegisterID base, RegisterID resultTag, RegisterID resultPayload, RegisterID offset);
- void compilePutDirectOffset(RegisterID base, RegisterID valueTag, RegisterID valuePayload, size_t cachedOffset);
+ void compileGetDirectOffset(RegisterID base, RegisterID resultTag, RegisterID resultPayload, PropertyOffset cachedOffset);
+ void compileGetDirectOffset(JSObject* base, RegisterID resultTag, RegisterID resultPayload, PropertyOffset cachedOffset);
+ void compileGetDirectOffset(RegisterID base, RegisterID resultTag, RegisterID resultPayload, RegisterID offset, FinalObjectMode = MayBeFinal);
+ void compilePutDirectOffset(RegisterID base, RegisterID valueTag, RegisterID valuePayload, PropertyOffset cachedOffset);
// Arithmetic opcode helpers
void emitAdd32Constant(unsigned dst, unsigned op, int32_t constant, ResultType opType);
@@ -548,10 +563,10 @@ namespace JSC {
void compileGetByIdHotPath(int baseVReg, Identifier*);
void compileGetByIdSlowCase(int resultVReg, int baseVReg, Identifier* ident, Vector<SlowCaseEntry>::iterator& iter, bool isMethodCheck = false);
- void compileGetDirectOffset(RegisterID base, RegisterID result, size_t cachedOffset);
- void compileGetDirectOffset(JSObject* base, RegisterID result, size_t cachedOffset);
- void compileGetDirectOffset(RegisterID base, RegisterID result, RegisterID offset, RegisterID scratch);
- void compilePutDirectOffset(RegisterID base, RegisterID value, size_t cachedOffset);
+ void compileGetDirectOffset(RegisterID base, RegisterID result, PropertyOffset cachedOffset);
+ void compileGetDirectOffset(JSObject* base, RegisterID result, PropertyOffset cachedOffset);
+ void compileGetDirectOffset(RegisterID base, RegisterID result, RegisterID offset, RegisterID scratch, FinalObjectMode = MayBeFinal);
+ void compilePutDirectOffset(RegisterID base, RegisterID value, PropertyOffset cachedOffset);
#endif // USE(JSVALUE32_64)
@@ -750,8 +765,6 @@ namespace JSC {
void emitSlow_op_to_jsnumber(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_to_primitive(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_urshift(Instruction*, Vector<SlowCaseEntry>::iterator&);
- void emitSlow_op_new_func(Instruction*, Vector<SlowCaseEntry>::iterator&);
- void emitSlow_op_new_func_exp(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_new_array(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitRightShift(Instruction*, bool isUnsigned);
diff --git a/Source/JavaScriptCore/jit/JITInlineMethods.h b/Source/JavaScriptCore/jit/JITInlineMethods.h
index 40985ac90..d1cee7ef7 100644
--- a/Source/JavaScriptCore/jit/JITInlineMethods.h
+++ b/Source/JavaScriptCore/jit/JITInlineMethods.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2008 Apple Inc. All rights reserved.
+ * Copyright (C) 2008, 2012 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -429,8 +429,7 @@ template <typename ClassType, bool destructor, typename StructureType> inline vo
storePtr(TrustedImmPtr(0), Address(result, JSObject::offsetOfInheritorID()));
// initialize the object's property storage pointer
- addPtr(TrustedImm32(sizeof(JSObject)), result, storagePtr);
- storePtr(storagePtr, Address(result, ClassType::offsetOfPropertyStorage()));
+ storePtr(TrustedImmPtr(0), Address(result, ClassType::offsetOfOutOfLineStorage()));
}
template <typename T> inline void JIT::emitAllocateJSFinalObject(T structure, RegisterID result, RegisterID scratch)
@@ -438,28 +437,6 @@ template <typename T> inline void JIT::emitAllocateJSFinalObject(T structure, Re
emitAllocateBasicJSObject<JSFinalObject, false, T>(structure, result, scratch);
}
-inline void JIT::emitAllocateJSFunction(FunctionExecutable* executable, RegisterID scopeChain, RegisterID result, RegisterID storagePtr)
-{
- emitAllocateBasicJSObject<JSFunction, true>(TrustedImmPtr(m_codeBlock->globalObject()->namedFunctionStructure()), result, storagePtr);
-
- // store the function's scope chain
- storePtr(scopeChain, Address(result, JSFunction::offsetOfScopeChain()));
-
- // store the function's executable member
- storePtr(TrustedImmPtr(executable), Address(result, JSFunction::offsetOfExecutable()));
-
- // clear the function's inheritorID
- storePtr(TrustedImmPtr(0), Address(result, JSFunction::offsetOfCachedInheritorID()));
-
- // store the function's name
- ASSERT(executable->nameValue());
- int functionNameOffset = sizeof(JSValue) * m_codeBlock->globalObject()->functionNameOffset();
- storePtr(TrustedImmPtr(executable->nameValue()), Address(regT1, functionNameOffset + OBJECT_OFFSETOF(JSValue, u.asBits.payload)));
-#if USE(JSVALUE32_64)
- store32(TrustedImm32(JSValue::CellTag), Address(regT1, functionNameOffset + OBJECT_OFFSETOF(JSValue, u.asBits.tag)));
-#endif
-}
-
inline void JIT::emitAllocateBasicStorage(size_t size, RegisterID result, RegisterID storagePtr)
{
CopiedAllocator* allocator = &m_globalData->heap.storageAllocator();
diff --git a/Source/JavaScriptCore/jit/JITOpcodes.cpp b/Source/JavaScriptCore/jit/JITOpcodes.cpp
index 2e448dd52..c0af6f9e9 100644
--- a/Source/JavaScriptCore/jit/JITOpcodes.cpp
+++ b/Source/JavaScriptCore/jit/JITOpcodes.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2009 Apple Inc. All rights reserved.
+ * Copyright (C) 2009, 2012 Apple Inc. All rights reserved.
* Copyright (C) 2010 Patrick Gansterer <paroga@paroga.com>
*
* Redistribution and use in source and binary forms, with or without
@@ -701,9 +701,8 @@ void JIT::emit_op_resolve_global(Instruction* currentInstruction, bool)
// Load cached property
// Assume that the global object always uses external storage.
- loadPtr(Address(regT0, OBJECT_OFFSETOF(JSGlobalObject, m_propertyStorage)), regT0);
load32(Address(regT2, OBJECT_OFFSETOF(GlobalResolveInfo, offset)), regT1);
- loadPtr(BaseIndex(regT0, regT1, ScalePtr), regT0);
+ compileGetDirectOffset(regT0, regT0, regT1, regT0, KnownNotFinal);
emitValueProfilingSite();
emitPutVirtualRegister(currentInstruction[1].u.operand);
}
@@ -1618,11 +1617,9 @@ void JIT::emit_op_new_func(Instruction* currentInstruction)
#endif
}
- FunctionExecutable* executable = m_codeBlock->functionDecl(currentInstruction[2].u.operand);
- emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain, regT2);
- emitAllocateJSFunction(executable, regT2, regT0, regT1);
-
- emitStoreCell(dst, regT0);
+ JITStubCall stubCall(this, cti_op_new_func);
+ stubCall.addArgument(TrustedImmPtr(m_codeBlock->functionDecl(currentInstruction[2].u.operand)));
+ stubCall.call(dst);
if (currentInstruction[3].u.operand) {
#if USE(JSVALUE32_64)
@@ -1634,44 +1631,13 @@ void JIT::emit_op_new_func(Instruction* currentInstruction)
}
}
-void JIT::emitSlow_op_new_func(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- linkSlowCase(iter);
- JITStubCall stubCall(this, cti_op_new_func);
- stubCall.addArgument(TrustedImmPtr(m_codeBlock->functionDecl(currentInstruction[2].u.operand)));
- stubCall.call(currentInstruction[1].u.operand);
-}
-
void JIT::emit_op_new_func_exp(Instruction* currentInstruction)
{
- FunctionExecutable* executable = m_codeBlock->functionExpr(currentInstruction[2].u.operand);
-
- // We only inline the allocation of a anonymous function expressions
- // If we want to be able to allocate a named function expression, we would
- // need to be able to do inline allocation of a JSStaticScopeObject.
- if (executable->name().isNull()) {
- emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain, regT2);
- emitAllocateJSFunction(executable, regT2, regT0, regT1);
- emitStoreCell(currentInstruction[1].u.operand, regT0);
- return;
- }
-
JITStubCall stubCall(this, cti_op_new_func_exp);
stubCall.addArgument(TrustedImmPtr(m_codeBlock->functionExpr(currentInstruction[2].u.operand)));
stubCall.call(currentInstruction[1].u.operand);
}
-void JIT::emitSlow_op_new_func_exp(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- FunctionExecutable* executable = m_codeBlock->functionExpr(currentInstruction[2].u.operand);
- if (!executable->name().isNull())
- return;
- linkSlowCase(iter);
- JITStubCall stubCall(this, cti_op_new_func_exp);
- stubCall.addArgument(TrustedImmPtr(executable));
- stubCall.call(currentInstruction[1].u.operand);
-}
-
void JIT::emit_op_new_array(Instruction* currentInstruction)
{
int length = currentInstruction[3].u.operand;
diff --git a/Source/JavaScriptCore/jit/JITOpcodes32_64.cpp b/Source/JavaScriptCore/jit/JITOpcodes32_64.cpp
index 4f8589557..095ea57d3 100644
--- a/Source/JavaScriptCore/jit/JITOpcodes32_64.cpp
+++ b/Source/JavaScriptCore/jit/JITOpcodes32_64.cpp
@@ -794,16 +794,14 @@ void JIT::emit_op_resolve_global(Instruction* currentInstruction, bool dynamic)
// Verify structure.
- move(TrustedImmPtr(globalObject), regT0);
+ move(TrustedImmPtr(globalObject), regT2);
move(TrustedImmPtr(resolveInfoAddress), regT3);
loadPtr(Address(regT3, OBJECT_OFFSETOF(GlobalResolveInfo, structure)), regT1);
- addSlowCase(branchPtr(NotEqual, regT1, Address(regT0, JSCell::structureOffset())));
+ addSlowCase(branchPtr(NotEqual, regT1, Address(regT2, JSCell::structureOffset())));
// Load property.
- loadPtr(Address(regT0, OBJECT_OFFSETOF(JSGlobalObject, m_propertyStorage)), regT2);
load32(Address(regT3, OBJECT_OFFSETOF(GlobalResolveInfo, offset)), regT3);
- load32(BaseIndex(regT2, regT3, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT0); // payload
- load32(BaseIndex(regT2, regT3, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag)), regT1); // tag
+ compileGetDirectOffset(regT2, regT1, regT0, regT3, KnownNotFinal);
emitValueProfilingSite();
emitStore(dst, regT1, regT0);
map(m_bytecodeOffset + (dynamic ? OPCODE_LENGTH(op_resolve_global_dynamic) : OPCODE_LENGTH(op_resolve_global)), dst, regT1, regT0);
diff --git a/Source/JavaScriptCore/jit/JITPropertyAccess.cpp b/Source/JavaScriptCore/jit/JITPropertyAccess.cpp
index 7478f9184..466cff7db 100644
--- a/Source/JavaScriptCore/jit/JITPropertyAccess.cpp
+++ b/Source/JavaScriptCore/jit/JITPropertyAccess.cpp
@@ -29,6 +29,7 @@
#include "JIT.h"
#include "CodeBlock.h"
+#include "GCAwareJITStubRoutine.h"
#include "GetterSetter.h"
#include "Interpreter.h"
#include "JITInlineMethods.h"
@@ -151,10 +152,26 @@ void JIT::emitSlow_op_get_by_val(Instruction* currentInstruction, Vector<SlowCas
emitValueProfilingSite();
}
-void JIT::compileGetDirectOffset(RegisterID base, RegisterID result, RegisterID offset, RegisterID scratch)
+void JIT::compileGetDirectOffset(RegisterID base, RegisterID result, RegisterID offset, RegisterID scratch, FinalObjectMode finalObjectMode)
{
- loadPtr(Address(base, JSObject::offsetOfPropertyStorage()), scratch);
- loadPtr(BaseIndex(scratch, offset, ScalePtr, 0), result);
+ ASSERT(sizeof(JSValue) == 8);
+
+ if (finalObjectMode == MayBeFinal) {
+ Jump isInline = branch32(LessThan, offset, TrustedImm32(inlineStorageCapacity));
+ loadPtr(Address(base, JSObject::offsetOfOutOfLineStorage()), scratch);
+ Jump done = jump();
+ isInline.link(this);
+ addPtr(TrustedImm32(JSObject::offsetOfInlineStorage() + inlineStorageCapacity * sizeof(EncodedJSValue)), base, scratch);
+ done.link(this);
+ } else {
+#if !ASSERT_DISABLED
+ Jump isOutOfLine = branch32(GreaterThanOrEqual, offset, TrustedImm32(inlineStorageCapacity));
+ breakpoint();
+ isOutOfLine.link(this);
+#endif
+ loadPtr(Address(base, JSObject::offsetOfOutOfLineStorage()), scratch);
+ }
+ loadPtr(BaseIndex(scratch, offset, ScalePtr, -inlineStorageCapacity * static_cast<ptrdiff_t>(sizeof(JSValue))), result);
}
void JIT::emit_op_get_by_pname(Instruction* currentInstruction)
@@ -177,6 +194,7 @@ void JIT::emit_op_get_by_pname(Instruction* currentInstruction)
load32(addressFor(i), regT3);
sub32(TrustedImm32(1), regT3);
addSlowCase(branch32(AboveOrEqual, regT3, Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_numCacheableSlots))));
+ add32(Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_offsetBase)), regT3);
compileGetDirectOffset(regT0, regT0, regT3, regT1);
emitPutVirtualRegister(dst, regT0);
@@ -283,7 +301,8 @@ void JIT::emit_op_del_by_id(Instruction* currentInstruction)
void JIT::emit_op_method_check(Instruction* currentInstruction)
{
// Assert that the following instruction is a get_by_id.
- ASSERT(m_interpreter->getOpcodeID((currentInstruction + OPCODE_LENGTH(op_method_check))->u.opcode) == op_get_by_id);
+ ASSERT(m_interpreter->getOpcodeID((currentInstruction + OPCODE_LENGTH(op_method_check))->u.opcode) == op_get_by_id
+ || m_interpreter->getOpcodeID((currentInstruction + OPCODE_LENGTH(op_method_check))->u.opcode) == op_get_by_id_out_of_line);
currentInstruction += OPCODE_LENGTH(op_method_check);
unsigned resultVReg = currentInstruction[1].u.operand;
@@ -373,14 +392,14 @@ void JIT::compileGetByIdHotPath(int baseVReg, Identifier*)
PatchableJump structureCheck = patchableBranchPtrWithPatch(NotEqual, Address(regT0, JSCell::structureOffset()), structureToCompare, TrustedImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure)));
addSlowCase(structureCheck);
- loadPtr(Address(regT0, JSObject::offsetOfPropertyStorage()), regT0);
+ ConvertibleLoadLabel propertyStorageLoad = convertibleLoadPtr(Address(regT0, JSObject::offsetOfOutOfLineStorage()), regT0);
DataLabelCompact displacementLabel = loadPtrWithCompactAddressOffsetPatch(Address(regT0, patchGetByIdDefaultOffset), regT0);
Label putResult(this);
END_UNINTERRUPTED_SEQUENCE(sequenceGetByIdHotPath);
- m_propertyAccessCompilationInfo.append(PropertyStubCompilationInfo(PropertyStubGetById, m_bytecodeOffset, hotPathBegin, structureToCompare, structureCheck, displacementLabel, putResult));
+ m_propertyAccessCompilationInfo.append(PropertyStubCompilationInfo(PropertyStubGetById, m_bytecodeOffset, hotPathBegin, structureToCompare, structureCheck, propertyStorageLoad, displacementLabel, putResult));
}
void JIT::emitSlow_op_get_by_id(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
@@ -440,14 +459,14 @@ void JIT::emit_op_put_by_id(Instruction* currentInstruction)
DataLabelPtr structureToCompare;
addSlowCase(branchPtrWithPatch(NotEqual, Address(regT0, JSCell::structureOffset()), structureToCompare, TrustedImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure))));
- loadPtr(Address(regT0, JSObject::offsetOfPropertyStorage()), regT2);
+ ConvertibleLoadLabel propertyStorageLoad = convertibleLoadPtr(Address(regT0, JSObject::offsetOfOutOfLineStorage()), regT2);
DataLabel32 displacementLabel = storePtrWithAddressOffsetPatch(regT1, Address(regT2, patchPutByIdDefaultOffset));
END_UNINTERRUPTED_SEQUENCE(sequencePutById);
emitWriteBarrier(regT0, regT1, regT2, regT3, ShouldFilterImmediates, WriteBarrierForPropertyAccess);
- m_propertyAccessCompilationInfo.append(PropertyStubCompilationInfo(PropertyStubPutById, m_bytecodeOffset, hotPathBegin, structureToCompare, displacementLabel));
+ m_propertyAccessCompilationInfo.append(PropertyStubCompilationInfo(PropertyStubPutById, m_bytecodeOffset, hotPathBegin, structureToCompare, propertyStorageLoad, displacementLabel));
}
void JIT::emitSlow_op_put_by_id(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
@@ -471,28 +490,41 @@ void JIT::emitSlow_op_put_by_id(Instruction* currentInstruction, Vector<SlowCase
// Compile a store into an object's property storage. May overwrite the
// value in objectReg.
-void JIT::compilePutDirectOffset(RegisterID base, RegisterID value, size_t cachedOffset)
+void JIT::compilePutDirectOffset(RegisterID base, RegisterID value, PropertyOffset cachedOffset)
{
- int offset = cachedOffset * sizeof(JSValue);
- loadPtr(Address(base, JSObject::offsetOfPropertyStorage()), base);
- storePtr(value, Address(base, offset));
+ if (isInlineOffset(cachedOffset)) {
+ storePtr(value, Address(base, JSObject::offsetOfInlineStorage() + sizeof(JSValue) * offsetInInlineStorage(cachedOffset)));
+ return;
+ }
+
+ loadPtr(Address(base, JSObject::offsetOfOutOfLineStorage()), base);
+ storePtr(value, Address(base, sizeof(JSValue) * offsetInOutOfLineStorage(cachedOffset)));
}
// Compile a load from an object's property storage. May overwrite base.
-void JIT::compileGetDirectOffset(RegisterID base, RegisterID result, size_t cachedOffset)
+void JIT::compileGetDirectOffset(RegisterID base, RegisterID result, PropertyOffset cachedOffset)
{
- int offset = cachedOffset * sizeof(JSValue);
- loadPtr(Address(base, JSObject::offsetOfPropertyStorage()), result);
- loadPtr(Address(result, offset), result);
+ if (isInlineOffset(cachedOffset)) {
+ loadPtr(Address(base, JSObject::offsetOfInlineStorage() + sizeof(JSValue) * offsetInInlineStorage(cachedOffset)), result);
+ return;
+ }
+
+ loadPtr(Address(base, JSObject::offsetOfOutOfLineStorage()), result);
+ loadPtr(Address(result, sizeof(JSValue) * offsetInOutOfLineStorage(cachedOffset)), result);
}
-void JIT::compileGetDirectOffset(JSObject* base, RegisterID result, size_t cachedOffset)
+void JIT::compileGetDirectOffset(JSObject* base, RegisterID result, PropertyOffset cachedOffset)
{
- loadPtr(base->addressOfPropertyStorage(), result);
- loadPtr(Address(result, cachedOffset * sizeof(WriteBarrier<Unknown>)), result);
+ if (isInlineOffset(cachedOffset)) {
+ loadPtr(base->locationForOffset(cachedOffset), result);
+ return;
+ }
+
+ loadPtr(base->addressOfOutOfLineStorage(), result);
+ loadPtr(Address(result, offsetInOutOfLineStorage(cachedOffset) * sizeof(WriteBarrier<Unknown>)), result);
}
-void JIT::privateCompilePutByIdTransition(StructureStubInfo* stubInfo, Structure* oldStructure, Structure* newStructure, size_t cachedOffset, StructureChain* chain, ReturnAddressPtr returnAddress, bool direct)
+void JIT::privateCompilePutByIdTransition(StructureStubInfo* stubInfo, Structure* oldStructure, Structure* newStructure, PropertyOffset cachedOffset, StructureChain* chain, ReturnAddressPtr returnAddress, bool direct)
{
JumpList failureCases;
// Check eax is an object of the right Structure.
@@ -522,7 +554,7 @@ void JIT::privateCompilePutByIdTransition(StructureStubInfo* stubInfo, Structure
#endif
// emit a call only if storage realloc is needed
- bool willNeedStorageRealloc = oldStructure->propertyStorageCapacity() != newStructure->propertyStorageCapacity();
+ bool willNeedStorageRealloc = oldStructure->outOfLineCapacity() != newStructure->outOfLineCapacity();
if (willNeedStorageRealloc) {
// This trampoline was called to like a JIT stub; before we can can call again we need to
// remove the return address from the stack, to prevent the stack from becoming misaligned.
@@ -532,7 +564,7 @@ void JIT::privateCompilePutByIdTransition(StructureStubInfo* stubInfo, Structure
stubCall.skipArgument(); // base
stubCall.skipArgument(); // ident
stubCall.skipArgument(); // value
- stubCall.addArgument(TrustedImm32(oldStructure->propertyStorageCapacity()));
+ stubCall.addArgument(TrustedImm32(oldStructure->outOfLineCapacity()));
stubCall.addArgument(TrustedImmPtr(newStructure));
stubCall.call(regT0);
emitGetJITStubArg(2, regT1);
@@ -564,15 +596,20 @@ void JIT::privateCompilePutByIdTransition(StructureStubInfo* stubInfo, Structure
patchBuffer.link(m_calls[0].from, FunctionPtr(cti_op_put_by_id_transition_realloc));
}
- stubInfo->stubRoutine = FINALIZE_CODE(
- patchBuffer,
- ("Baseline put_by_id transition for CodeBlock %p, return point %p",
- m_codeBlock, returnAddress.value()));
+ stubInfo->stubRoutine = createJITStubRoutine(
+ FINALIZE_CODE(
+ patchBuffer,
+ ("Baseline put_by_id transition for CodeBlock %p, return point %p",
+ m_codeBlock, returnAddress.value())),
+ *m_globalData,
+ m_codeBlock->ownerExecutable(),
+ willNeedStorageRealloc,
+ newStructure);
RepatchBuffer repatchBuffer(m_codeBlock);
- repatchBuffer.relinkCallerToTrampoline(returnAddress, CodeLocationLabel(stubInfo->stubRoutine.code()));
+ repatchBuffer.relinkCallerToTrampoline(returnAddress, CodeLocationLabel(stubInfo->stubRoutine->code().code()));
}
-void JIT::patchGetByIdSelf(CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, size_t cachedOffset, ReturnAddressPtr returnAddress)
+void JIT::patchGetByIdSelf(CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, PropertyOffset cachedOffset, ReturnAddressPtr returnAddress)
{
RepatchBuffer repatchBuffer(codeBlock);
@@ -580,14 +617,13 @@ void JIT::patchGetByIdSelf(CodeBlock* codeBlock, StructureStubInfo* stubInfo, St
// Should probably go to cti_op_get_by_id_fail, but that doesn't do anything interesting right now.
repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_self_fail));
- int offset = sizeof(JSValue) * cachedOffset;
-
// Patch the offset into the propoerty map to load from, then patch the Structure to look for.
repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelPtrAtOffset(stubInfo->patch.baseline.u.get.structureToCompare), structure);
- repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelCompactAtOffset(stubInfo->patch.baseline.u.get.displacementLabel), offset);
+ repatchBuffer.setLoadInstructionIsActive(stubInfo->hotPathBegin.convertibleLoadAtOffset(stubInfo->patch.baseline.u.get.propertyStorageLoad), isOutOfLineOffset(cachedOffset));
+ repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelCompactAtOffset(stubInfo->patch.baseline.u.get.displacementLabel), offsetRelativeToPatchedStorage(cachedOffset));
}
-void JIT::patchPutByIdReplace(CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, size_t cachedOffset, ReturnAddressPtr returnAddress, bool direct)
+void JIT::patchPutByIdReplace(CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, PropertyOffset cachedOffset, ReturnAddressPtr returnAddress, bool direct)
{
RepatchBuffer repatchBuffer(codeBlock);
@@ -595,11 +631,10 @@ void JIT::patchPutByIdReplace(CodeBlock* codeBlock, StructureStubInfo* stubInfo,
// Should probably go to cti_op_put_by_id_fail, but that doesn't do anything interesting right now.
repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(direct ? cti_op_put_by_id_direct_generic : cti_op_put_by_id_generic));
- int offset = sizeof(JSValue) * cachedOffset;
-
// Patch the offset into the propoerty map to load from, then patch the Structure to look for.
repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelPtrAtOffset(stubInfo->patch.baseline.u.put.structureToCompare), structure);
- repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabel32AtOffset(stubInfo->patch.baseline.u.put.displacementLabel), offset);
+ repatchBuffer.setLoadInstructionIsActive(stubInfo->hotPathBegin.convertibleLoadAtOffset(stubInfo->patch.baseline.u.put.propertyStorageLoad), isOutOfLineOffset(cachedOffset));
+ repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabel32AtOffset(stubInfo->patch.baseline.u.put.displacementLabel), offsetRelativeToPatchedStorage(cachedOffset));
}
void JIT::privateCompilePatchGetArrayLength(ReturnAddressPtr returnAddress)
@@ -628,7 +663,7 @@ void JIT::privateCompilePatchGetArrayLength(ReturnAddressPtr returnAddress)
patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(stubInfo->patch.baseline.u.get.putResult));
// Track the stub we have created so that it will be deleted later.
- stubInfo->stubRoutine = FINALIZE_CODE(
+ stubInfo->stubRoutine = FINALIZE_CODE_FOR_STUB(
patchBuffer,
("Basline JIT get_by_id array length stub for CodeBlock %p, return point %p",
m_codeBlock, stubInfo->hotPathBegin.labelAtOffset(
@@ -637,13 +672,13 @@ void JIT::privateCompilePatchGetArrayLength(ReturnAddressPtr returnAddress)
// Finally patch the jump to slow case back in the hot path to jump here instead.
CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(stubInfo->patch.baseline.u.get.structureCheck);
RepatchBuffer repatchBuffer(m_codeBlock);
- repatchBuffer.relink(jumpLocation, CodeLocationLabel(stubInfo->stubRoutine.code()));
+ repatchBuffer.relink(jumpLocation, CodeLocationLabel(stubInfo->stubRoutine->code().code()));
// We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_array_fail));
}
-void JIT::privateCompileGetByIdProto(StructureStubInfo* stubInfo, Structure* structure, Structure* prototypeStructure, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset, ReturnAddressPtr returnAddress, CallFrame* callFrame)
+void JIT::privateCompileGetByIdProto(StructureStubInfo* stubInfo, Structure* structure, Structure* prototypeStructure, const Identifier& ident, const PropertySlot& slot, PropertyOffset cachedOffset, ReturnAddressPtr returnAddress, CallFrame* callFrame)
{
// The prototype object definitely exists (if this stub exists the CodeBlock is referencing a Structure that is
// referencing the prototype object - let's speculatively load it's table nice and early!)
@@ -695,22 +730,26 @@ void JIT::privateCompileGetByIdProto(StructureStubInfo* stubInfo, Structure* str
}
}
// Track the stub we have created so that it will be deleted later.
- stubInfo->stubRoutine = FINALIZE_CODE(
- patchBuffer,
- ("Baseline JIT get_by_id proto stub for CodeBlock %p, return point %p",
- m_codeBlock, stubInfo->hotPathBegin.labelAtOffset(
- stubInfo->patch.baseline.u.get.putResult).executableAddress()));
+ stubInfo->stubRoutine = createJITStubRoutine(
+ FINALIZE_CODE(
+ patchBuffer,
+ ("Baseline JIT get_by_id proto stub for CodeBlock %p, return point %p",
+ m_codeBlock, stubInfo->hotPathBegin.labelAtOffset(
+ stubInfo->patch.baseline.u.get.putResult).executableAddress())),
+ *m_globalData,
+ m_codeBlock->ownerExecutable(),
+ needsStubLink);
// Finally patch the jump to slow case back in the hot path to jump here instead.
CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(stubInfo->patch.baseline.u.get.structureCheck);
RepatchBuffer repatchBuffer(m_codeBlock);
- repatchBuffer.relink(jumpLocation, CodeLocationLabel(stubInfo->stubRoutine.code()));
+ repatchBuffer.relink(jumpLocation, CodeLocationLabel(stubInfo->stubRoutine->code().code()));
// We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_proto_list));
}
-void JIT::privateCompileGetByIdSelfList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* polymorphicStructures, int currentIndex, Structure* structure, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset)
+void JIT::privateCompileGetByIdSelfList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* polymorphicStructures, int currentIndex, Structure* structure, const Identifier& ident, const PropertySlot& slot, PropertyOffset cachedOffset)
{
Jump failureCase = checkStructure(regT0, structure);
bool needsStubLink = false;
@@ -747,7 +786,7 @@ void JIT::privateCompileGetByIdSelfList(StructureStubInfo* stubInfo, Polymorphic
}
// Use the patch information to link the failure cases back to the original slow case routine.
- CodeLocationLabel lastProtoBegin = CodeLocationLabel(polymorphicStructures->list[currentIndex - 1].stubRoutine.code());
+ CodeLocationLabel lastProtoBegin = CodeLocationLabel(JITStubRoutine::asCodePtr(polymorphicStructures->list[currentIndex - 1].stubRoutine));
if (!lastProtoBegin)
lastProtoBegin = stubInfo->callReturnLocation.labelAtOffset(-stubInfo->patch.baseline.u.get.coldPathBegin);
@@ -756,21 +795,25 @@ void JIT::privateCompileGetByIdSelfList(StructureStubInfo* stubInfo, Polymorphic
// On success return back to the hot patch code, at a point it will perform the store to dest for us.
patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(stubInfo->patch.baseline.u.get.putResult));
- MacroAssemblerCodeRef stubCode = FINALIZE_CODE(
- patchBuffer,
- ("Baseline JIT get_by_id list stub for CodeBlock %p, return point %p",
- m_codeBlock, stubInfo->hotPathBegin.labelAtOffset(
- stubInfo->patch.baseline.u.get.putResult).executableAddress()));
+ RefPtr<JITStubRoutine> stubCode = createJITStubRoutine(
+ FINALIZE_CODE(
+ patchBuffer,
+ ("Baseline JIT get_by_id list stub for CodeBlock %p, return point %p",
+ m_codeBlock, stubInfo->hotPathBegin.labelAtOffset(
+ stubInfo->patch.baseline.u.get.putResult).executableAddress())),
+ *m_globalData,
+ m_codeBlock->ownerExecutable(),
+ needsStubLink);
polymorphicStructures->list[currentIndex].set(*m_globalData, m_codeBlock->ownerExecutable(), stubCode, structure, isDirect);
// Finally patch the jump to slow case back in the hot path to jump here instead.
CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(stubInfo->patch.baseline.u.get.structureCheck);
RepatchBuffer repatchBuffer(m_codeBlock);
- repatchBuffer.relink(jumpLocation, CodeLocationLabel(stubCode.code()));
+ repatchBuffer.relink(jumpLocation, CodeLocationLabel(stubCode->code().code()));
}
-void JIT::privateCompileGetByIdProtoList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* prototypeStructures, int currentIndex, Structure* structure, Structure* prototypeStructure, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset, CallFrame* callFrame)
+void JIT::privateCompileGetByIdProtoList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* prototypeStructures, int currentIndex, Structure* structure, Structure* prototypeStructure, const Identifier& ident, const PropertySlot& slot, PropertyOffset cachedOffset, CallFrame* callFrame)
{
// The prototype object definitely exists (if this stub exists the CodeBlock is referencing a Structure that is
// referencing the prototype object - let's speculatively load it's table nice and early!)
@@ -819,27 +862,31 @@ void JIT::privateCompileGetByIdProtoList(StructureStubInfo* stubInfo, Polymorphi
}
// Use the patch information to link the failure cases back to the original slow case routine.
- CodeLocationLabel lastProtoBegin = CodeLocationLabel(prototypeStructures->list[currentIndex - 1].stubRoutine.code());
+ CodeLocationLabel lastProtoBegin = CodeLocationLabel(JITStubRoutine::asCodePtr(prototypeStructures->list[currentIndex - 1].stubRoutine));
patchBuffer.link(failureCases1, lastProtoBegin);
patchBuffer.link(failureCases2, lastProtoBegin);
// On success return back to the hot patch code, at a point it will perform the store to dest for us.
patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(stubInfo->patch.baseline.u.get.putResult));
- MacroAssemblerCodeRef stubCode = FINALIZE_CODE(
- patchBuffer,
- ("Baseline JIT get_by_id proto list stub for CodeBlock %p, return point %p",
- m_codeBlock, stubInfo->hotPathBegin.labelAtOffset(
- stubInfo->patch.baseline.u.get.putResult).executableAddress()));
+ RefPtr<JITStubRoutine> stubCode = createJITStubRoutine(
+ FINALIZE_CODE(
+ patchBuffer,
+ ("Baseline JIT get_by_id proto list stub for CodeBlock %p, return point %p",
+ m_codeBlock, stubInfo->hotPathBegin.labelAtOffset(
+ stubInfo->patch.baseline.u.get.putResult).executableAddress())),
+ *m_globalData,
+ m_codeBlock->ownerExecutable(),
+ needsStubLink);
prototypeStructures->list[currentIndex].set(*m_globalData, m_codeBlock->ownerExecutable(), stubCode, structure, prototypeStructure, isDirect);
// Finally patch the jump to slow case back in the hot path to jump here instead.
CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(stubInfo->patch.baseline.u.get.structureCheck);
RepatchBuffer repatchBuffer(m_codeBlock);
- repatchBuffer.relink(jumpLocation, CodeLocationLabel(stubCode.code()));
+ repatchBuffer.relink(jumpLocation, CodeLocationLabel(stubCode->code().code()));
}
-void JIT::privateCompileGetByIdChainList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* prototypeStructures, int currentIndex, Structure* structure, StructureChain* chain, size_t count, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset, CallFrame* callFrame)
+void JIT::privateCompileGetByIdChainList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* prototypeStructures, int currentIndex, Structure* structure, StructureChain* chain, size_t count, const Identifier& ident, const PropertySlot& slot, PropertyOffset cachedOffset, CallFrame* callFrame)
{
ASSERT(count);
JumpList bucketsOfFail;
@@ -892,18 +939,22 @@ void JIT::privateCompileGetByIdChainList(StructureStubInfo* stubInfo, Polymorphi
}
// Use the patch information to link the failure cases back to the original slow case routine.
- CodeLocationLabel lastProtoBegin = CodeLocationLabel(prototypeStructures->list[currentIndex - 1].stubRoutine.code());
+ CodeLocationLabel lastProtoBegin = CodeLocationLabel(JITStubRoutine::asCodePtr(prototypeStructures->list[currentIndex - 1].stubRoutine));
patchBuffer.link(bucketsOfFail, lastProtoBegin);
// On success return back to the hot patch code, at a point it will perform the store to dest for us.
patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(stubInfo->patch.baseline.u.get.putResult));
- CodeRef stubRoutine = FINALIZE_CODE(
- patchBuffer,
- ("Baseline JIT get_by_id chain list stub for CodeBlock %p, return point %p",
- m_codeBlock, stubInfo->hotPathBegin.labelAtOffset(
- stubInfo->patch.baseline.u.get.putResult).executableAddress()));
+ RefPtr<JITStubRoutine> stubRoutine = createJITStubRoutine(
+ FINALIZE_CODE(
+ patchBuffer,
+ ("Baseline JIT get_by_id chain list stub for CodeBlock %p, return point %p",
+ m_codeBlock, stubInfo->hotPathBegin.labelAtOffset(
+ stubInfo->patch.baseline.u.get.putResult).executableAddress())),
+ *m_globalData,
+ m_codeBlock->ownerExecutable(),
+ needsStubLink);
// Track the stub we have created so that it will be deleted later.
prototypeStructures->list[currentIndex].set(callFrame->globalData(), m_codeBlock->ownerExecutable(), stubRoutine, structure, chain, isDirect);
@@ -911,10 +962,10 @@ void JIT::privateCompileGetByIdChainList(StructureStubInfo* stubInfo, Polymorphi
// Finally patch the jump to slow case back in the hot path to jump here instead.
CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(stubInfo->patch.baseline.u.get.structureCheck);
RepatchBuffer repatchBuffer(m_codeBlock);
- repatchBuffer.relink(jumpLocation, CodeLocationLabel(stubRoutine.code()));
+ repatchBuffer.relink(jumpLocation, CodeLocationLabel(stubRoutine->code().code()));
}
-void JIT::privateCompileGetByIdChain(StructureStubInfo* stubInfo, Structure* structure, StructureChain* chain, size_t count, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset, ReturnAddressPtr returnAddress, CallFrame* callFrame)
+void JIT::privateCompileGetByIdChain(StructureStubInfo* stubInfo, Structure* structure, StructureChain* chain, size_t count, const Identifier& ident, const PropertySlot& slot, PropertyOffset cachedOffset, ReturnAddressPtr returnAddress, CallFrame* callFrame)
{
ASSERT(count);
@@ -970,17 +1021,21 @@ void JIT::privateCompileGetByIdChain(StructureStubInfo* stubInfo, Structure* str
patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(stubInfo->patch.baseline.u.get.putResult));
// Track the stub we have created so that it will be deleted later.
- CodeRef stubRoutine = FINALIZE_CODE(
- patchBuffer,
- ("Baseline JIT get_by_id chain stub for CodeBlock %p, return point %p",
- m_codeBlock, stubInfo->hotPathBegin.labelAtOffset(
- stubInfo->patch.baseline.u.get.putResult).executableAddress()));
+ RefPtr<JITStubRoutine> stubRoutine = createJITStubRoutine(
+ FINALIZE_CODE(
+ patchBuffer,
+ ("Baseline JIT get_by_id chain stub for CodeBlock %p, return point %p",
+ m_codeBlock, stubInfo->hotPathBegin.labelAtOffset(
+ stubInfo->patch.baseline.u.get.putResult).executableAddress())),
+ *m_globalData,
+ m_codeBlock->ownerExecutable(),
+ needsStubLink);
stubInfo->stubRoutine = stubRoutine;
// Finally patch the jump to slow case back in the hot path to jump here instead.
CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(stubInfo->patch.baseline.u.get.structureCheck);
RepatchBuffer repatchBuffer(m_codeBlock);
- repatchBuffer.relink(jumpLocation, CodeLocationLabel(stubRoutine.code()));
+ repatchBuffer.relink(jumpLocation, CodeLocationLabel(stubRoutine->code().code()));
// We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_proto_list));
diff --git a/Source/JavaScriptCore/jit/JITPropertyAccess32_64.cpp b/Source/JavaScriptCore/jit/JITPropertyAccess32_64.cpp
index a44c576c5..84996d9f0 100644
--- a/Source/JavaScriptCore/jit/JITPropertyAccess32_64.cpp
+++ b/Source/JavaScriptCore/jit/JITPropertyAccess32_64.cpp
@@ -30,6 +30,7 @@
#include "JIT.h"
#include "CodeBlock.h"
+#include "GCAwareJITStubRoutine.h"
#include "Interpreter.h"
#include "JITInlineMethods.h"
#include "JITStubCall.h"
@@ -93,7 +94,8 @@ void JIT::emit_op_del_by_id(Instruction* currentInstruction)
void JIT::emit_op_method_check(Instruction* currentInstruction)
{
// Assert that the following instruction is a get_by_id.
- ASSERT(m_interpreter->getOpcodeID((currentInstruction + OPCODE_LENGTH(op_method_check))->u.opcode) == op_get_by_id);
+ ASSERT(m_interpreter->getOpcodeID((currentInstruction + OPCODE_LENGTH(op_method_check))->u.opcode) == op_get_by_id
+ || m_interpreter->getOpcodeID((currentInstruction + OPCODE_LENGTH(op_method_check))->u.opcode) == op_get_by_id_out_of_line);
currentInstruction += OPCODE_LENGTH(op_method_check);
@@ -333,7 +335,7 @@ void JIT::compileGetByIdHotPath()
PatchableJump structureCheck = patchableBranchPtrWithPatch(NotEqual, Address(regT0, JSCell::structureOffset()), structureToCompare, TrustedImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure)));
addSlowCase(structureCheck);
- loadPtr(Address(regT0, JSObject::offsetOfPropertyStorage()), regT2);
+ ConvertibleLoadLabel propertyStorageLoad = convertibleLoadPtr(Address(regT0, JSObject::offsetOfOutOfLineStorage()), regT2);
DataLabelCompact displacementLabel1 = loadPtrWithCompactAddressOffsetPatch(Address(regT2, patchGetByIdDefaultOffset), regT0); // payload
DataLabelCompact displacementLabel2 = loadPtrWithCompactAddressOffsetPatch(Address(regT2, patchGetByIdDefaultOffset), regT1); // tag
@@ -341,7 +343,7 @@ void JIT::compileGetByIdHotPath()
END_UNINTERRUPTED_SEQUENCE(sequenceGetByIdHotPath);
- m_propertyAccessCompilationInfo.append(PropertyStubCompilationInfo(PropertyStubGetById, m_bytecodeOffset, hotPathBegin, structureToCompare, structureCheck, displacementLabel1, displacementLabel2, putResult));
+ m_propertyAccessCompilationInfo.append(PropertyStubCompilationInfo(PropertyStubGetById, m_bytecodeOffset, hotPathBegin, structureToCompare, structureCheck, propertyStorageLoad, displacementLabel1, displacementLabel2, putResult));
}
void JIT::emitSlow_op_get_by_id(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
@@ -399,7 +401,7 @@ void JIT::emit_op_put_by_id(Instruction* currentInstruction)
DataLabelPtr structureToCompare;
addSlowCase(branchPtrWithPatch(NotEqual, Address(regT0, JSCell::structureOffset()), structureToCompare, TrustedImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure))));
- loadPtr(Address(regT0, JSObject::offsetOfPropertyStorage()), regT1);
+ ConvertibleLoadLabel propertyStorageLoad = convertibleLoadPtr(Address(regT0, JSObject::offsetOfOutOfLineStorage()), regT1);
DataLabel32 displacementLabel1 = storePtrWithAddressOffsetPatch(regT2, Address(regT1, patchPutByIdDefaultOffset)); // payload
DataLabel32 displacementLabel2 = storePtrWithAddressOffsetPatch(regT3, Address(regT1, patchPutByIdDefaultOffset)); // tag
@@ -407,7 +409,7 @@ void JIT::emit_op_put_by_id(Instruction* currentInstruction)
emitWriteBarrier(regT0, regT2, regT1, regT2, ShouldFilterImmediates, WriteBarrierForPropertyAccess);
- m_propertyAccessCompilationInfo.append(PropertyStubCompilationInfo(PropertyStubPutById, m_bytecodeOffset, hotPathBegin, structureToCompare, displacementLabel1, displacementLabel2));
+ m_propertyAccessCompilationInfo.append(PropertyStubCompilationInfo(PropertyStubPutById, m_bytecodeOffset, hotPathBegin, structureToCompare, propertyStorageLoad, displacementLabel1, displacementLabel2));
}
void JIT::emitSlow_op_put_by_id(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
@@ -430,30 +432,41 @@ void JIT::emitSlow_op_put_by_id(Instruction* currentInstruction, Vector<SlowCase
}
// Compile a store into an object's property storage. May overwrite base.
-void JIT::compilePutDirectOffset(RegisterID base, RegisterID valueTag, RegisterID valuePayload, size_t cachedOffset)
+void JIT::compilePutDirectOffset(RegisterID base, RegisterID valueTag, RegisterID valuePayload, PropertyOffset cachedOffset)
{
- int offset = cachedOffset;
- loadPtr(Address(base, JSObject::offsetOfPropertyStorage()), base);
- emitStore(offset, valueTag, valuePayload, base);
+ if (isOutOfLineOffset(cachedOffset))
+ loadPtr(Address(base, JSObject::offsetOfOutOfLineStorage()), base);
+ emitStore(indexRelativeToBase(cachedOffset), valueTag, valuePayload, base);
}
// Compile a load from an object's property storage. May overwrite base.
-void JIT::compileGetDirectOffset(RegisterID base, RegisterID resultTag, RegisterID resultPayload, size_t cachedOffset)
+void JIT::compileGetDirectOffset(RegisterID base, RegisterID resultTag, RegisterID resultPayload, PropertyOffset cachedOffset)
{
- int offset = cachedOffset;
+ if (isInlineOffset(cachedOffset)) {
+ emitLoad(indexRelativeToBase(cachedOffset), resultTag, resultPayload, base);
+ return;
+ }
+
RegisterID temp = resultPayload;
- loadPtr(Address(base, JSObject::offsetOfPropertyStorage()), temp);
- emitLoad(offset, resultTag, resultPayload, temp);
+ loadPtr(Address(base, JSObject::offsetOfOutOfLineStorage()), temp);
+ emitLoad(indexRelativeToBase(cachedOffset), resultTag, resultPayload, temp);
}
-void JIT::compileGetDirectOffset(JSObject* base, RegisterID resultTag, RegisterID resultPayload, size_t cachedOffset)
+void JIT::compileGetDirectOffset(JSObject* base, RegisterID resultTag, RegisterID resultPayload, PropertyOffset cachedOffset)
{
- loadPtr(base->addressOfPropertyStorage(), resultTag);
- load32(Address(resultTag, cachedOffset * sizeof(WriteBarrier<Unknown>) + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), resultPayload);
- load32(Address(resultTag, cachedOffset * sizeof(WriteBarrier<Unknown>) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), resultTag);
+ if (isInlineOffset(cachedOffset)) {
+ move(TrustedImmPtr(base->locationForOffset(cachedOffset)), resultTag);
+ load32(Address(resultTag, OBJECT_OFFSETOF(JSValue, u.asBits.payload)), resultPayload);
+ load32(Address(resultTag, OBJECT_OFFSETOF(JSValue, u.asBits.tag)), resultTag);
+ return;
+ }
+
+ loadPtr(base->addressOfOutOfLineStorage(), resultTag);
+ load32(Address(resultTag, offsetInOutOfLineStorage(cachedOffset) * sizeof(WriteBarrier<Unknown>) + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), resultPayload);
+ load32(Address(resultTag, offsetInOutOfLineStorage(cachedOffset) * sizeof(WriteBarrier<Unknown>) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), resultTag);
}
-void JIT::privateCompilePutByIdTransition(StructureStubInfo* stubInfo, Structure* oldStructure, Structure* newStructure, size_t cachedOffset, StructureChain* chain, ReturnAddressPtr returnAddress, bool direct)
+void JIT::privateCompilePutByIdTransition(StructureStubInfo* stubInfo, Structure* oldStructure, Structure* newStructure, PropertyOffset cachedOffset, StructureChain* chain, ReturnAddressPtr returnAddress, bool direct)
{
// The code below assumes that regT0 contains the basePayload and regT1 contains the baseTag. Restore them from the stack.
#if CPU(MIPS) || CPU(SH4) || CPU(ARM)
@@ -489,7 +502,7 @@ void JIT::privateCompilePutByIdTransition(StructureStubInfo* stubInfo, Structure
// Reallocate property storage if needed.
Call callTarget;
- bool willNeedStorageRealloc = oldStructure->propertyStorageCapacity() != newStructure->propertyStorageCapacity();
+ bool willNeedStorageRealloc = oldStructure->outOfLineCapacity() != newStructure->outOfLineCapacity();
if (willNeedStorageRealloc) {
// This trampoline was called to like a JIT stub; before we can can call again we need to
// remove the return address from the stack, to prevent the stack from becoming misaligned.
@@ -499,7 +512,7 @@ void JIT::privateCompilePutByIdTransition(StructureStubInfo* stubInfo, Structure
stubCall.skipArgument(); // base
stubCall.skipArgument(); // ident
stubCall.skipArgument(); // value
- stubCall.addArgument(TrustedImm32(oldStructure->propertyStorageCapacity()));
+ stubCall.addArgument(TrustedImm32(oldStructure->outOfLineCapacity()));
stubCall.addArgument(TrustedImmPtr(newStructure));
stubCall.call(regT0);
@@ -545,15 +558,20 @@ void JIT::privateCompilePutByIdTransition(StructureStubInfo* stubInfo, Structure
patchBuffer.link(m_calls[0].from, FunctionPtr(cti_op_put_by_id_transition_realloc));
}
- stubInfo->stubRoutine = FINALIZE_CODE(
- patchBuffer,
- ("Baseline put_by_id transition stub for CodeBlock %p, return point %p",
- m_codeBlock, returnAddress.value()));
+ stubInfo->stubRoutine = createJITStubRoutine(
+ FINALIZE_CODE(
+ patchBuffer,
+ ("Baseline put_by_id transition stub for CodeBlock %p, return point %p",
+ m_codeBlock, returnAddress.value())),
+ *m_globalData,
+ m_codeBlock->ownerExecutable(),
+ willNeedStorageRealloc,
+ newStructure);
RepatchBuffer repatchBuffer(m_codeBlock);
- repatchBuffer.relinkCallerToTrampoline(returnAddress, CodeLocationLabel(stubInfo->stubRoutine.code()));
+ repatchBuffer.relinkCallerToTrampoline(returnAddress, CodeLocationLabel(stubInfo->stubRoutine->code().code()));
}
-void JIT::patchGetByIdSelf(CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, size_t cachedOffset, ReturnAddressPtr returnAddress)
+void JIT::patchGetByIdSelf(CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, PropertyOffset cachedOffset, ReturnAddressPtr returnAddress)
{
RepatchBuffer repatchBuffer(codeBlock);
@@ -561,15 +579,14 @@ void JIT::patchGetByIdSelf(CodeBlock* codeBlock, StructureStubInfo* stubInfo, St
// Should probably go to JITStubs::cti_op_get_by_id_fail, but that doesn't do anything interesting right now.
repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_self_fail));
- int offset = sizeof(JSValue) * cachedOffset;
-
// Patch the offset into the propoerty map to load from, then patch the Structure to look for.
repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelPtrAtOffset(stubInfo->patch.baseline.u.get.structureToCompare), structure);
- repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelCompactAtOffset(stubInfo->patch.baseline.u.get.displacementLabel1), offset + OBJECT_OFFSETOF(JSValue, u.asBits.payload)); // payload
- repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelCompactAtOffset(stubInfo->patch.baseline.u.get.displacementLabel2), offset + OBJECT_OFFSETOF(JSValue, u.asBits.tag)); // tag
+ repatchBuffer.setLoadInstructionIsActive(stubInfo->hotPathBegin.convertibleLoadAtOffset(stubInfo->patch.baseline.u.get.propertyStorageLoad), isOutOfLineOffset(cachedOffset));
+ repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelCompactAtOffset(stubInfo->patch.baseline.u.get.displacementLabel1), offsetRelativeToPatchedStorage(cachedOffset) + OBJECT_OFFSETOF(JSValue, u.asBits.payload)); // payload
+ repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelCompactAtOffset(stubInfo->patch.baseline.u.get.displacementLabel2), offsetRelativeToPatchedStorage(cachedOffset) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)); // tag
}
-void JIT::patchPutByIdReplace(CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, size_t cachedOffset, ReturnAddressPtr returnAddress, bool direct)
+void JIT::patchPutByIdReplace(CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, PropertyOffset cachedOffset, ReturnAddressPtr returnAddress, bool direct)
{
RepatchBuffer repatchBuffer(codeBlock);
@@ -577,12 +594,11 @@ void JIT::patchPutByIdReplace(CodeBlock* codeBlock, StructureStubInfo* stubInfo,
// Should probably go to cti_op_put_by_id_fail, but that doesn't do anything interesting right now.
repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(direct ? cti_op_put_by_id_direct_generic : cti_op_put_by_id_generic));
- int offset = sizeof(JSValue) * cachedOffset;
-
// Patch the offset into the propoerty map to load from, then patch the Structure to look for.
repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelPtrAtOffset(stubInfo->patch.baseline.u.put.structureToCompare), structure);
- repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabel32AtOffset(stubInfo->patch.baseline.u.put.displacementLabel1), offset + OBJECT_OFFSETOF(JSValue, u.asBits.payload)); // payload
- repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabel32AtOffset(stubInfo->patch.baseline.u.put.displacementLabel2), offset + OBJECT_OFFSETOF(JSValue, u.asBits.tag)); // tag
+ repatchBuffer.setLoadInstructionIsActive(stubInfo->hotPathBegin.convertibleLoadAtOffset(stubInfo->patch.baseline.u.put.propertyStorageLoad), isOutOfLineOffset(cachedOffset));
+ repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabel32AtOffset(stubInfo->patch.baseline.u.put.displacementLabel1), offsetRelativeToPatchedStorage(cachedOffset) + OBJECT_OFFSETOF(JSValue, u.asBits.payload)); // payload
+ repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabel32AtOffset(stubInfo->patch.baseline.u.put.displacementLabel2), offsetRelativeToPatchedStorage(cachedOffset) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)); // tag
}
void JIT::privateCompilePatchGetArrayLength(ReturnAddressPtr returnAddress)
@@ -614,7 +630,7 @@ void JIT::privateCompilePatchGetArrayLength(ReturnAddressPtr returnAddress)
patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(stubInfo->patch.baseline.u.get.putResult));
// Track the stub we have created so that it will be deleted later.
- stubInfo->stubRoutine = FINALIZE_CODE(
+ stubInfo->stubRoutine = FINALIZE_CODE_FOR_STUB(
patchBuffer,
("Baseline get_by_id array length stub for CodeBlock %p, return point %p",
m_codeBlock, stubInfo->hotPathBegin.labelAtOffset(
@@ -623,13 +639,13 @@ void JIT::privateCompilePatchGetArrayLength(ReturnAddressPtr returnAddress)
// Finally patch the jump to slow case back in the hot path to jump here instead.
CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(stubInfo->patch.baseline.u.get.structureCheck);
RepatchBuffer repatchBuffer(m_codeBlock);
- repatchBuffer.relink(jumpLocation, CodeLocationLabel(stubInfo->stubRoutine.code()));
+ repatchBuffer.relink(jumpLocation, CodeLocationLabel(stubInfo->stubRoutine->code().code()));
// We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_array_fail));
}
-void JIT::privateCompileGetByIdProto(StructureStubInfo* stubInfo, Structure* structure, Structure* prototypeStructure, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset, ReturnAddressPtr returnAddress, CallFrame* callFrame)
+void JIT::privateCompileGetByIdProto(StructureStubInfo* stubInfo, Structure* structure, Structure* prototypeStructure, const Identifier& ident, const PropertySlot& slot, PropertyOffset cachedOffset, ReturnAddressPtr returnAddress, CallFrame* callFrame)
{
// regT0 holds a JSCell*
@@ -684,23 +700,27 @@ void JIT::privateCompileGetByIdProto(StructureStubInfo* stubInfo, Structure* str
}
// Track the stub we have created so that it will be deleted later.
- stubInfo->stubRoutine = FINALIZE_CODE(
- patchBuffer,
- ("Baseline get_by_id proto stub for CodeBlock %p, return point %p",
- m_codeBlock, stubInfo->hotPathBegin.labelAtOffset(
- stubInfo->patch.baseline.u.get.putResult).executableAddress()));
+ stubInfo->stubRoutine = createJITStubRoutine(
+ FINALIZE_CODE(
+ patchBuffer,
+ ("Baseline get_by_id proto stub for CodeBlock %p, return point %p",
+ m_codeBlock, stubInfo->hotPathBegin.labelAtOffset(
+ stubInfo->patch.baseline.u.get.putResult).executableAddress())),
+ *m_globalData,
+ m_codeBlock->ownerExecutable(),
+ needsStubLink);
// Finally patch the jump to slow case back in the hot path to jump here instead.
CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(stubInfo->patch.baseline.u.get.structureCheck);
RepatchBuffer repatchBuffer(m_codeBlock);
- repatchBuffer.relink(jumpLocation, CodeLocationLabel(stubInfo->stubRoutine.code()));
+ repatchBuffer.relink(jumpLocation, CodeLocationLabel(stubInfo->stubRoutine->code().code()));
// We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_proto_list));
}
-void JIT::privateCompileGetByIdSelfList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* polymorphicStructures, int currentIndex, Structure* structure, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset)
+void JIT::privateCompileGetByIdSelfList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* polymorphicStructures, int currentIndex, Structure* structure, const Identifier& ident, const PropertySlot& slot, PropertyOffset cachedOffset)
{
// regT0 holds a JSCell*
Jump failureCase = checkStructure(regT0, structure);
@@ -737,7 +757,7 @@ void JIT::privateCompileGetByIdSelfList(StructureStubInfo* stubInfo, Polymorphic
}
}
// Use the patch information to link the failure cases back to the original slow case routine.
- CodeLocationLabel lastProtoBegin = CodeLocationLabel(polymorphicStructures->list[currentIndex - 1].stubRoutine.code());
+ CodeLocationLabel lastProtoBegin = CodeLocationLabel(JITStubRoutine::asCodePtr(polymorphicStructures->list[currentIndex - 1].stubRoutine));
if (!lastProtoBegin)
lastProtoBegin = stubInfo->callReturnLocation.labelAtOffset(-stubInfo->patch.baseline.u.get.coldPathBegin);
@@ -746,21 +766,25 @@ void JIT::privateCompileGetByIdSelfList(StructureStubInfo* stubInfo, Polymorphic
// On success return back to the hot patch code, at a point it will perform the store to dest for us.
patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(stubInfo->patch.baseline.u.get.putResult));
- MacroAssemblerCodeRef stubRoutine = FINALIZE_CODE(
- patchBuffer,
- ("Baseline get_by_id self list stub for CodeBlock %p, return point %p",
- m_codeBlock, stubInfo->hotPathBegin.labelAtOffset(
- stubInfo->patch.baseline.u.get.putResult).executableAddress()));
+ RefPtr<JITStubRoutine> stubRoutine = createJITStubRoutine(
+ FINALIZE_CODE(
+ patchBuffer,
+ ("Baseline get_by_id self list stub for CodeBlock %p, return point %p",
+ m_codeBlock, stubInfo->hotPathBegin.labelAtOffset(
+ stubInfo->patch.baseline.u.get.putResult).executableAddress())),
+ *m_globalData,
+ m_codeBlock->ownerExecutable(),
+ needsStubLink);
polymorphicStructures->list[currentIndex].set(*m_globalData, m_codeBlock->ownerExecutable(), stubRoutine, structure, isDirect);
// Finally patch the jump to slow case back in the hot path to jump here instead.
CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(stubInfo->patch.baseline.u.get.structureCheck);
RepatchBuffer repatchBuffer(m_codeBlock);
- repatchBuffer.relink(jumpLocation, CodeLocationLabel(stubRoutine.code()));
+ repatchBuffer.relink(jumpLocation, CodeLocationLabel(stubRoutine->code().code()));
}
-void JIT::privateCompileGetByIdProtoList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* prototypeStructures, int currentIndex, Structure* structure, Structure* prototypeStructure, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset, CallFrame* callFrame)
+void JIT::privateCompileGetByIdProtoList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* prototypeStructures, int currentIndex, Structure* structure, Structure* prototypeStructure, const Identifier& ident, const PropertySlot& slot, PropertyOffset cachedOffset, CallFrame* callFrame)
{
// regT0 holds a JSCell*
@@ -808,28 +832,32 @@ void JIT::privateCompileGetByIdProtoList(StructureStubInfo* stubInfo, Polymorphi
}
}
// Use the patch information to link the failure cases back to the original slow case routine.
- CodeLocationLabel lastProtoBegin = CodeLocationLabel(prototypeStructures->list[currentIndex - 1].stubRoutine.code());
+ CodeLocationLabel lastProtoBegin = CodeLocationLabel(JITStubRoutine::asCodePtr(prototypeStructures->list[currentIndex - 1].stubRoutine));
patchBuffer.link(failureCases1, lastProtoBegin);
patchBuffer.link(failureCases2, lastProtoBegin);
// On success return back to the hot patch code, at a point it will perform the store to dest for us.
patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(stubInfo->patch.baseline.u.get.putResult));
- MacroAssemblerCodeRef stubRoutine = FINALIZE_CODE(
- patchBuffer,
- ("Baseline get_by_id proto list stub for CodeBlock %p, return point %p",
- m_codeBlock, stubInfo->hotPathBegin.labelAtOffset(
- stubInfo->patch.baseline.u.get.putResult).executableAddress()));
+ RefPtr<JITStubRoutine> stubRoutine = createJITStubRoutine(
+ FINALIZE_CODE(
+ patchBuffer,
+ ("Baseline get_by_id proto list stub for CodeBlock %p, return point %p",
+ m_codeBlock, stubInfo->hotPathBegin.labelAtOffset(
+ stubInfo->patch.baseline.u.get.putResult).executableAddress())),
+ *m_globalData,
+ m_codeBlock->ownerExecutable(),
+ needsStubLink);
prototypeStructures->list[currentIndex].set(callFrame->globalData(), m_codeBlock->ownerExecutable(), stubRoutine, structure, prototypeStructure, isDirect);
// Finally patch the jump to slow case back in the hot path to jump here instead.
CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(stubInfo->patch.baseline.u.get.structureCheck);
RepatchBuffer repatchBuffer(m_codeBlock);
- repatchBuffer.relink(jumpLocation, CodeLocationLabel(stubRoutine.code()));
+ repatchBuffer.relink(jumpLocation, CodeLocationLabel(stubRoutine->code().code()));
}
-void JIT::privateCompileGetByIdChainList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* prototypeStructures, int currentIndex, Structure* structure, StructureChain* chain, size_t count, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset, CallFrame* callFrame)
+void JIT::privateCompileGetByIdChainList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* prototypeStructures, int currentIndex, Structure* structure, StructureChain* chain, size_t count, const Identifier& ident, const PropertySlot& slot, PropertyOffset cachedOffset, CallFrame* callFrame)
{
// regT0 holds a JSCell*
ASSERT(count);
@@ -882,18 +910,22 @@ void JIT::privateCompileGetByIdChainList(StructureStubInfo* stubInfo, Polymorphi
}
}
// Use the patch information to link the failure cases back to the original slow case routine.
- CodeLocationLabel lastProtoBegin = CodeLocationLabel(prototypeStructures->list[currentIndex - 1].stubRoutine.code());
+ CodeLocationLabel lastProtoBegin = CodeLocationLabel(JITStubRoutine::asCodePtr(prototypeStructures->list[currentIndex - 1].stubRoutine));
patchBuffer.link(bucketsOfFail, lastProtoBegin);
// On success return back to the hot patch code, at a point it will perform the store to dest for us.
patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(stubInfo->patch.baseline.u.get.putResult));
- MacroAssemblerCodeRef stubRoutine = FINALIZE_CODE(
- patchBuffer,
- ("Baseline get_by_id chain list stub for CodeBlock %p, return point %p",
- m_codeBlock, stubInfo->hotPathBegin.labelAtOffset(
- stubInfo->patch.baseline.u.get.putResult).executableAddress()));
+ RefPtr<JITStubRoutine> stubRoutine = createJITStubRoutine(
+ FINALIZE_CODE(
+ patchBuffer,
+ ("Baseline get_by_id chain list stub for CodeBlock %p, return point %p",
+ m_codeBlock, stubInfo->hotPathBegin.labelAtOffset(
+ stubInfo->patch.baseline.u.get.putResult).executableAddress())),
+ *m_globalData,
+ m_codeBlock->ownerExecutable(),
+ needsStubLink);
// Track the stub we have created so that it will be deleted later.
prototypeStructures->list[currentIndex].set(callFrame->globalData(), m_codeBlock->ownerExecutable(), stubRoutine, structure, chain, isDirect);
@@ -901,10 +933,10 @@ void JIT::privateCompileGetByIdChainList(StructureStubInfo* stubInfo, Polymorphi
// Finally patch the jump to slow case back in the hot path to jump here instead.
CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(stubInfo->patch.baseline.u.get.structureCheck);
RepatchBuffer repatchBuffer(m_codeBlock);
- repatchBuffer.relink(jumpLocation, CodeLocationLabel(stubRoutine.code()));
+ repatchBuffer.relink(jumpLocation, CodeLocationLabel(stubRoutine->code().code()));
}
-void JIT::privateCompileGetByIdChain(StructureStubInfo* stubInfo, Structure* structure, StructureChain* chain, size_t count, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset, ReturnAddressPtr returnAddress, CallFrame* callFrame)
+void JIT::privateCompileGetByIdChain(StructureStubInfo* stubInfo, Structure* structure, StructureChain* chain, size_t count, const Identifier& ident, const PropertySlot& slot, PropertyOffset cachedOffset, ReturnAddressPtr returnAddress, CallFrame* callFrame)
{
// regT0 holds a JSCell*
ASSERT(count);
@@ -959,29 +991,47 @@ void JIT::privateCompileGetByIdChain(StructureStubInfo* stubInfo, Structure* str
patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(stubInfo->patch.baseline.u.get.putResult));
// Track the stub we have created so that it will be deleted later.
- MacroAssemblerCodeRef stubRoutine = FINALIZE_CODE(
- patchBuffer,
- ("Baseline get_by_id chain stub for CodeBlock %p, return point %p",
- m_codeBlock, stubInfo->hotPathBegin.labelAtOffset(
- stubInfo->patch.baseline.u.get.putResult).executableAddress()));
+ RefPtr<JITStubRoutine> stubRoutine = createJITStubRoutine(
+ FINALIZE_CODE(
+ patchBuffer,
+ ("Baseline get_by_id chain stub for CodeBlock %p, return point %p",
+ m_codeBlock, stubInfo->hotPathBegin.labelAtOffset(
+ stubInfo->patch.baseline.u.get.putResult).executableAddress())),
+ *m_globalData,
+ m_codeBlock->ownerExecutable(),
+ needsStubLink);
stubInfo->stubRoutine = stubRoutine;
// Finally patch the jump to slow case back in the hot path to jump here instead.
CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(stubInfo->patch.baseline.u.get.structureCheck);
RepatchBuffer repatchBuffer(m_codeBlock);
- repatchBuffer.relink(jumpLocation, CodeLocationLabel(stubRoutine.code()));
+ repatchBuffer.relink(jumpLocation, CodeLocationLabel(stubRoutine->code().code()));
// We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_proto_list));
}
-void JIT::compileGetDirectOffset(RegisterID base, RegisterID resultTag, RegisterID resultPayload, RegisterID offset)
+void JIT::compileGetDirectOffset(RegisterID base, RegisterID resultTag, RegisterID resultPayload, RegisterID offset, FinalObjectMode finalObjectMode)
{
ASSERT(sizeof(JSValue) == 8);
- loadPtr(Address(base, JSObject::offsetOfPropertyStorage()), base);
- loadPtr(BaseIndex(base, offset, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.payload)), resultPayload);
- loadPtr(BaseIndex(base, offset, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag)), resultTag);
+ if (finalObjectMode == MayBeFinal) {
+ Jump isInline = branch32(LessThan, offset, TrustedImm32(inlineStorageCapacity));
+ loadPtr(Address(base, JSObject::offsetOfOutOfLineStorage()), base);
+ Jump done = jump();
+ isInline.link(this);
+ addPtr(TrustedImmPtr(JSObject::offsetOfInlineStorage() + inlineStorageCapacity * sizeof(EncodedJSValue)), base);
+ done.link(this);
+ } else {
+#if !ASSERT_DISABLED
+ Jump isOutOfLine = branch32(GreaterThanOrEqual, offset, TrustedImm32(inlineStorageCapacity));
+ breakpoint();
+ isOutOfLine.link(this);
+#endif
+ loadPtr(Address(base, JSObject::offsetOfOutOfLineStorage()), base);
+ }
+ load32(BaseIndex(base, offset, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.payload) - inlineStorageCapacity * sizeof(EncodedJSValue)), resultPayload);
+ load32(BaseIndex(base, offset, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag) - inlineStorageCapacity * sizeof(EncodedJSValue)), resultTag);
}
void JIT::emit_op_get_by_pname(Instruction* currentInstruction)
@@ -1006,6 +1056,7 @@ void JIT::emit_op_get_by_pname(Instruction* currentInstruction)
load32(addressFor(i), regT3);
sub32(TrustedImm32(1), regT3);
addSlowCase(branch32(AboveOrEqual, regT3, Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_numCacheableSlots))));
+ add32(Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_offsetBase)), regT3);
compileGetDirectOffset(regT2, regT1, regT0, regT3);
emitStore(dst, regT1, regT0);
diff --git a/Source/JavaScriptCore/jit/JITStubRoutine.cpp b/Source/JavaScriptCore/jit/JITStubRoutine.cpp
new file mode 100644
index 000000000..951665318
--- /dev/null
+++ b/Source/JavaScriptCore/jit/JITStubRoutine.cpp
@@ -0,0 +1,48 @@
+/*
+ * Copyright (C) 2012 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "JITStubRoutine.h"
+
+#if ENABLE(JIT)
+
+#include "JSObject.h"
+#include "ScopeChain.h"
+#include "SlotVisitor.h"
+
+namespace JSC {
+
+JITStubRoutine::~JITStubRoutine() { }
+
+void JITStubRoutine::observeZeroRefCount()
+{
+ ASSERT(!m_refCount);
+ delete this;
+}
+
+} // namespace JSC
+
+#endif // ENABLE(JIT)
+
diff --git a/Source/JavaScriptCore/jit/JITStubRoutine.h b/Source/JavaScriptCore/jit/JITStubRoutine.h
new file mode 100644
index 000000000..4400589ff
--- /dev/null
+++ b/Source/JavaScriptCore/jit/JITStubRoutine.h
@@ -0,0 +1,161 @@
+/*
+ * Copyright (C) 2012 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef JITStubRoutine_h
+#define JITStubRoutine_h
+
+#include <wtf/Platform.h>
+
+#if ENABLE(JIT)
+
+#include "ExecutableAllocator.h"
+#include "MacroAssemblerCodeRef.h"
+#include <wtf/RefCounted.h>
+#include <wtf/Vector.h>
+
+namespace JSC {
+
+class JITStubRoutineSet;
+
+// This is a base-class for JIT stub routines, and also the class you want
+// to instantiate directly if you have a routine that does not need any
+// help from the GC. If in doubt, use one of the other stub routines. But
+// if you know for sure that the stub routine cannot be on the stack while
+// someone triggers a stub routine reset, then using this will speed up
+// memory reclamation. One case where a stub routine satisfies this
+// condition is if it doesn't make any calls, to either C++ or JS code. In
+// such a routine you know that it cannot be on the stack when anything
+// interesting happens.
+// See GCAwareJITStubRoutine.h for the other stub routines.
+class JITStubRoutine {
+ WTF_MAKE_NONCOPYABLE(JITStubRoutine);
+ WTF_MAKE_FAST_ALLOCATED;
+public:
+ JITStubRoutine(const MacroAssemblerCodeRef& code)
+ : m_code(code)
+ , m_refCount(1)
+ {
+ }
+
+ // Use this if you want to pass a CodePtr to someone who insists on taking
+ // a RefPtr<JITStubRoutine>.
+ static PassRefPtr<JITStubRoutine> createSelfManagedRoutine(
+ MacroAssemblerCodePtr rawCodePointer)
+ {
+ return adoptRef(new JITStubRoutine(MacroAssemblerCodeRef::createSelfManagedCodeRef(rawCodePointer)));
+ }
+
+ virtual ~JITStubRoutine();
+
+ // MacroAssemblerCodeRef is copyable, but at the cost of reference
+ // counting churn. Returning a reference is a good way of reducing
+ // the churn.
+ const MacroAssemblerCodeRef& code() const { return m_code; }
+
+ static MacroAssemblerCodePtr asCodePtr(PassRefPtr<JITStubRoutine> stubRoutine)
+ {
+ if (!stubRoutine)
+ return MacroAssemblerCodePtr();
+
+ MacroAssemblerCodePtr result = stubRoutine->code().code();
+ ASSERT(!!result);
+ return result;
+ }
+
+ void ref()
+ {
+ m_refCount++;
+ }
+
+ void deref()
+ {
+ if (--m_refCount)
+ return;
+ observeZeroRefCount();
+ }
+
+ // Helpers for the GC to determine how to deal with marking JIT stub
+ // routines.
+ uintptr_t startAddress() const { return m_code.executableMemory()->startAsInteger(); }
+ uintptr_t endAddress() const { return m_code.executableMemory()->endAsInteger(); }
+ static uintptr_t addressStep() { return jitAllocationGranule; }
+
+ static bool canPerformRangeFilter()
+ {
+#if ENABLE(EXECUTABLE_ALLOCATOR_FIXED)
+ return true;
+#else
+ return false;
+#endif
+ }
+ static uintptr_t filteringStartAddress()
+ {
+#if ENABLE(EXECUTABLE_ALLOCATOR_FIXED)
+ return startOfFixedExecutableMemoryPool;
+#else
+ UNREACHABLE_FOR_PLATFORM();
+ return 0;
+#endif
+ }
+ static size_t filteringExtentSize()
+ {
+#if ENABLE(EXECUTABLE_ALLOCATOR_FIXED)
+ return fixedExecutableMemoryPoolSize;
+#else
+ UNREACHABLE_FOR_PLATFORM();
+ return 0;
+#endif
+ }
+ static bool passesFilter(uintptr_t address)
+ {
+ if (!canPerformRangeFilter()) {
+ // Just check that the address doesn't use any special values that would make
+ // our hashtables upset.
+ return address >= jitAllocationGranule && address != std::numeric_limits<uintptr_t>::max();
+ }
+
+ if (address - filteringStartAddress() >= filteringExtentSize())
+ return false;
+
+ return true;
+ }
+
+protected:
+ virtual void observeZeroRefCount();
+
+ MacroAssemblerCodeRef m_code;
+ unsigned m_refCount;
+};
+
+// Helper for the creation of simple stub routines that need no help from the GC.
+#define FINALIZE_CODE_FOR_STUB(patchBuffer, dataLogArguments) \
+ (adoptRef(new JITStubRoutine(FINALIZE_CODE((patchBuffer), dataLogArguments))))
+
+} // namespace JSC
+
+#endif // ENABLE(JIT)
+
+#endif // JITStubRoutine_h
+
diff --git a/Source/JavaScriptCore/jit/JITStubs.cpp b/Source/JavaScriptCore/jit/JITStubs.cpp
index 6b8082886..2273f0f38 100644
--- a/Source/JavaScriptCore/jit/JITStubs.cpp
+++ b/Source/JavaScriptCore/jit/JITStubs.cpp
@@ -622,38 +622,46 @@ SYMBOL_STRING(ctiOpThrowNotCaught) ":" "\n"
#elif COMPILER(GCC) && CPU(ARM_TRADITIONAL)
asm (
+".text" "\n"
".globl " SYMBOL_STRING(ctiTrampoline) "\n"
HIDE_SYMBOL(ctiTrampoline) "\n"
+INLINE_ARM_FUNCTION(ctiTrampoline)
SYMBOL_STRING(ctiTrampoline) ":" "\n"
"stmdb sp!, {r1-r3}" "\n"
- "stmdb sp!, {r4-r8, lr}" "\n"
+ "stmdb sp!, {r4-r6, r8-r11, lr}" "\n"
"sub sp, sp, #" STRINGIZE_VALUE_OF(PRESERVEDR4_OFFSET) "\n"
- "mov r4, r2" "\n"
- "mov r5, #512" "\n"
+ "mov r5, r2" "\n"
+ "mov r6, #512" "\n"
// r0 contains the code
- "mov lr, pc" "\n"
- "mov pc, r0" "\n"
+ "blx r0" "\n"
"add sp, sp, #" STRINGIZE_VALUE_OF(PRESERVEDR4_OFFSET) "\n"
- "ldmia sp!, {r4-r8, lr}" "\n"
+ "ldmia sp!, {r4-r6, r8-r11, lr}" "\n"
"add sp, sp, #12" "\n"
- "mov pc, lr" "\n"
+ "bx lr" "\n"
+".globl " SYMBOL_STRING(ctiTrampolineEnd) "\n"
+HIDE_SYMBOL(ctiTrampolineEnd) "\n"
+SYMBOL_STRING(ctiTrampolineEnd) ":" "\n"
);
asm (
+".text" "\n"
".globl " SYMBOL_STRING(ctiVMThrowTrampoline) "\n"
HIDE_SYMBOL(ctiVMThrowTrampoline) "\n"
+INLINE_ARM_FUNCTION(ctiVMThrowTrampoline)
SYMBOL_STRING(ctiVMThrowTrampoline) ":" "\n"
"mov r0, sp" "\n"
"bl " SYMBOL_STRING(cti_vm_throw) "\n"
// Both has the same return sequence
+".text" "\n"
".globl " SYMBOL_STRING(ctiOpThrowNotCaught) "\n"
HIDE_SYMBOL(ctiOpThrowNotCaught) "\n"
+INLINE_ARM_FUNCTION(ctiOpThrowNotCaught)
SYMBOL_STRING(ctiOpThrowNotCaught) ":" "\n"
"add sp, sp, #" STRINGIZE_VALUE_OF(PRESERVEDR4_OFFSET) "\n"
- "ldmia sp!, {r4-r8, lr}" "\n"
+ "ldmia sp!, {r4-r6, r8-r11, lr}" "\n"
"add sp, sp, #12" "\n"
- "mov pc, lr" "\n"
+ "bx lr" "\n"
);
#elif COMPILER(RVCT) && CPU(ARM_THUMB2)
@@ -954,7 +962,7 @@ NEVER_INLINE void JITThunks::tryCacheGetByID(CallFrame* callFrame, CodeBlock* co
return;
}
- size_t offset = slot.cachedOffset();
+ PropertyOffset offset = slot.cachedOffset();
size_t count = normalizePrototypeChain(callFrame, baseValue, slot.slotBase(), propertyName, offset);
if (!count) {
stubInfo->accessType = access_get_by_id_generic;
@@ -1156,11 +1164,12 @@ template<typename T> static T throwExceptionFromOpCall(JITStackFrame& jitStackFr
}; \
asm ( \
".globl " SYMBOL_STRING(cti_##op) "\n" \
+ INLINE_ARM_FUNCTION(cti_##op) \
SYMBOL_STRING(cti_##op) ":" "\n" \
"str lr, [sp, #" STRINGIZE_VALUE_OF(THUNK_RETURN_ADDRESS_OFFSET) "]" "\n" \
"bl " SYMBOL_STRING(JITStubThunked_##op) "\n" \
"ldr lr, [sp, #" STRINGIZE_VALUE_OF(THUNK_RETURN_ADDRESS_OFFSET) "]" "\n" \
- "mov pc, lr" "\n" \
+ "bx lr" "\n" \
); \
rtype JITStubThunked_##op(STUB_ARGS_DECLARATION)
@@ -1486,13 +1495,16 @@ DEFINE_STUB_FUNCTION(JSObject*, op_put_by_id_transition_realloc)
JSValue baseValue = stackFrame.args[0].jsValue();
int32_t oldSize = stackFrame.args[3].int32();
Structure* newStructure = stackFrame.args[4].structure();
- int32_t newSize = newStructure->propertyStorageCapacity();
+ int32_t newSize = newStructure->outOfLineCapacity();
+
+ ASSERT(oldSize >= 0);
+ ASSERT(newSize > oldSize);
ASSERT(baseValue.isObject());
JSObject* base = asObject(baseValue);
JSGlobalData& globalData = *stackFrame.globalData;
- PropertyStorage newStorage = base->growPropertyStorage(globalData, oldSize, newSize);
- base->setPropertyStorage(globalData, newStorage, newStructure);
+ PropertyStorage newStorage = base->growOutOfLineStorage(globalData, oldSize, newSize);
+ base->setOutOfLineStorage(globalData, newStorage, newStructure);
return base;
}
@@ -1710,7 +1722,7 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_id_self_fail)
if (stubInfo->accessType == access_get_by_id_self) {
ASSERT(!stubInfo->stubRoutine);
- polymorphicStructureList = new PolymorphicAccessStructureList(callFrame->globalData(), codeBlock->ownerExecutable(), MacroAssemblerCodeRef(), stubInfo->u.getByIdSelf.baseObjectStructure.get(), true);
+ polymorphicStructureList = new PolymorphicAccessStructureList(callFrame->globalData(), codeBlock->ownerExecutable(), 0, stubInfo->u.getByIdSelf.baseObjectStructure.get(), true);
stubInfo->initGetByIdSelfList(polymorphicStructureList, 1);
} else {
polymorphicStructureList = stubInfo->u.getByIdSelfList.structureList;
@@ -1736,12 +1748,12 @@ static PolymorphicAccessStructureList* getPolymorphicAccessStructureListSlot(JSG
switch (stubInfo->accessType) {
case access_get_by_id_proto:
prototypeStructureList = new PolymorphicAccessStructureList(globalData, owner, stubInfo->stubRoutine, stubInfo->u.getByIdProto.baseObjectStructure.get(), stubInfo->u.getByIdProto.prototypeStructure.get(), true);
- stubInfo->stubRoutine = MacroAssemblerCodeRef();
+ stubInfo->stubRoutine.clear();
stubInfo->initGetByIdProtoList(prototypeStructureList, 2);
break;
case access_get_by_id_chain:
prototypeStructureList = new PolymorphicAccessStructureList(globalData, owner, stubInfo->stubRoutine, stubInfo->u.getByIdChain.baseObjectStructure.get(), stubInfo->u.getByIdChain.chain.get(), true);
- stubInfo->stubRoutine = MacroAssemblerCodeRef();
+ stubInfo->stubRoutine.clear();
stubInfo->initGetByIdProtoList(prototypeStructureList, 2);
break;
case access_get_by_id_proto_list:
@@ -1814,7 +1826,7 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_id_proto_list)
ASSERT(slot.slotBase().isObject());
JSObject* slotBaseObject = asObject(slot.slotBase());
- size_t offset = slot.cachedOffset();
+ PropertyOffset offset = slot.cachedOffset();
if (slot.slotBase() == baseValue)
ctiPatchCallByReturnAddress(codeBlock, STUB_RETURN_ADDRESS, FunctionPtr(cti_op_get_by_id_proto_fail));
@@ -1928,7 +1940,12 @@ DEFINE_STUB_FUNCTION(void, optimize)
unsigned bytecodeIndex = stackFrame.args[0].int32();
#if ENABLE(JIT_VERBOSE_OSR)
- dataLog("%p: Entered optimize with bytecodeIndex = %u, executeCounter = %s, reoptimizationRetryCounter = %u, optimizationDelayCounter = %u\n", codeBlock, bytecodeIndex, codeBlock->jitExecuteCounter().status(), codeBlock->reoptimizationRetryCounter(), codeBlock->optimizationDelayCounter());
+ dataLog("%p: Entered optimize with bytecodeIndex = %u, executeCounter = %s, reoptimizationRetryCounter = %u, optimizationDelayCounter = %u, exitCounter = ", codeBlock, bytecodeIndex, codeBlock->jitExecuteCounter().status(), codeBlock->reoptimizationRetryCounter(), codeBlock->optimizationDelayCounter());
+ if (codeBlock->hasOptimizedReplacement())
+ dataLog("%u", codeBlock->replacement()->osrExitCounter());
+ else
+ dataLog("N/A");
+ dataLog("\n");
#endif
if (!codeBlock->checkIfOptimizationThresholdReached()) {
@@ -1938,8 +1955,21 @@ DEFINE_STUB_FUNCTION(void, optimize)
if (codeBlock->hasOptimizedReplacement()) {
#if ENABLE(JIT_VERBOSE_OSR)
- dataLog("Considering OSR into %p(%p) with success/fail %u/%u.\n", codeBlock, codeBlock->replacement(), codeBlock->replacement()->speculativeSuccessCounter(), codeBlock->replacement()->speculativeFailCounter());
+ dataLog("Considering OSR into %p(%p).\n", codeBlock, codeBlock->replacement());
#endif
+ // If we have an optimized replacement, then it must be the case that we entered
+ // cti_optimize from a loop. That's because is there's an optimized replacement,
+ // then all calls to this function will be relinked to the replacement and so
+ // the prologue OSR will never fire.
+
+ // This is an interesting threshold check. Consider that a function OSR exits
+ // in the middle of a loop, while having a relatively low exit count. The exit
+ // will reset the execution counter to some target threshold, meaning that this
+ // code won't be reached until that loop heats up for >=1000 executions. But then
+ // we do a second check here, to see if we should either reoptimize, or just
+ // attempt OSR entry. Hence it might even be correct for
+ // shouldReoptimizeFromLoopNow() to always return true. But we make it do some
+ // additional checking anyway, to reduce the amount of recompilation thrashing.
if (codeBlock->replacement()->shouldReoptimizeFromLoopNow()) {
#if ENABLE(JIT_VERBOSE_OSR)
dataLog("Triggering reoptimization of %p(%p) (in loop).\n", codeBlock, codeBlock->replacement());
@@ -1985,7 +2015,6 @@ DEFINE_STUB_FUNCTION(void, optimize)
#endif
codeBlock->optimizeSoon();
- optimizedCodeBlock->countSpeculationSuccess();
STUB_SET_RETURN_ADDRESS(address);
return;
}
@@ -1996,10 +2025,10 @@ DEFINE_STUB_FUNCTION(void, optimize)
// Count the OSR failure as a speculation failure. If this happens a lot, then
// reoptimize.
- optimizedCodeBlock->countSpeculationFailure();
+ optimizedCodeBlock->countOSRExit();
#if ENABLE(JIT_VERBOSE_OSR)
- dataLog("Encountered OSR failure into %p(%p) with success/fail %u/%u.\n", codeBlock, codeBlock->replacement(), codeBlock->replacement()->speculativeSuccessCounter(), codeBlock->replacement()->speculativeFailCounter());
+ dataLog("Encountered OSR failure into %p(%p).\n", codeBlock, codeBlock->replacement());
#endif
// We are a lot more conservative about triggering reoptimization after OSR failure than
diff --git a/Source/JavaScriptCore/jit/JITStubs.h b/Source/JavaScriptCore/jit/JITStubs.h
index d2bc15e64..22a1dd773 100644
--- a/Source/JavaScriptCore/jit/JITStubs.h
+++ b/Source/JavaScriptCore/jit/JITStubs.h
@@ -190,8 +190,10 @@ namespace JSC {
void* preservedR4;
void* preservedR5;
void* preservedR6;
- void* preservedR7;
void* preservedR8;
+ void* preservedR9;
+ void* preservedR10;
+ void* preservedR11;
void* preservedLink;
RegisterFile* registerFile;
diff --git a/Source/JavaScriptCore/jit/JSInterfaceJIT.h b/Source/JavaScriptCore/jit/JSInterfaceJIT.h
index 05d1ce5ad..6b7dd2184 100644
--- a/Source/JavaScriptCore/jit/JSInterfaceJIT.h
+++ b/Source/JavaScriptCore/jit/JSInterfaceJIT.h
@@ -94,7 +94,7 @@ namespace JSC {
static const FPRegisterID fpRegT1 = X86Registers::xmm1;
static const FPRegisterID fpRegT2 = X86Registers::xmm2;
static const FPRegisterID fpRegT3 = X86Registers::xmm3;
-#elif CPU(ARM_THUMB2)
+#elif CPU(ARM)
static const RegisterID returnValueRegister = ARMRegisters::r0;
static const RegisterID cachedResultRegister = ARMRegisters::r0;
static const RegisterID firstArgumentRegister = ARMRegisters::r0;
@@ -107,35 +107,11 @@ namespace JSC {
static const RegisterID regT1 = ARMRegisters::r1;
static const RegisterID regT2 = ARMRegisters::r2;
static const RegisterID regT3 = ARMRegisters::r4;
-
+
+ // Update ctiTrampoline in JITStubs.cpp if these values are changed!
static const RegisterID callFrameRegister = ARMRegisters::r5;
static const RegisterID timeoutCheckRegister = ARMRegisters::r6;
-
- static const FPRegisterID fpRegT0 = ARMRegisters::d0;
- static const FPRegisterID fpRegT1 = ARMRegisters::d1;
- static const FPRegisterID fpRegT2 = ARMRegisters::d2;
- static const FPRegisterID fpRegT3 = ARMRegisters::d3;
-#elif CPU(ARM_TRADITIONAL)
- static const RegisterID returnValueRegister = ARMRegisters::r0;
- static const RegisterID cachedResultRegister = ARMRegisters::r0;
- static const RegisterID firstArgumentRegister = ARMRegisters::r0;
-
- static const RegisterID timeoutCheckRegister = ARMRegisters::r5;
- static const RegisterID callFrameRegister = ARMRegisters::r4;
-
- static const RegisterID regT0 = ARMRegisters::r0;
- static const RegisterID regT1 = ARMRegisters::r1;
- static const RegisterID regT2 = ARMRegisters::r2;
- // Callee preserved
- static const RegisterID regT3 = ARMRegisters::r7;
-
- static const RegisterID regS0 = ARMRegisters::S0;
- // Callee preserved
- static const RegisterID regS1 = ARMRegisters::S1;
-
- static const RegisterID regStackPtr = ARMRegisters::sp;
- static const RegisterID regLink = ARMRegisters::lr;
-
+
static const FPRegisterID fpRegT0 = ARMRegisters::d0;
static const FPRegisterID fpRegT1 = ARMRegisters::d1;
static const FPRegisterID fpRegT2 = ARMRegisters::d2;
diff --git a/Source/JavaScriptCore/jit/SpecializedThunkJIT.h b/Source/JavaScriptCore/jit/SpecializedThunkJIT.h
index c98e57d12..e17b45d94 100644
--- a/Source/JavaScriptCore/jit/SpecializedThunkJIT.h
+++ b/Source/JavaScriptCore/jit/SpecializedThunkJIT.h
@@ -37,9 +37,7 @@ namespace JSC {
class SpecializedThunkJIT : public JSInterfaceJIT {
public:
static const int ThisArgument = -1;
- SpecializedThunkJIT(int expectedArgCount, JSGlobalData* globalData)
- : m_expectedArgCount(expectedArgCount)
- , m_globalData(globalData)
+ SpecializedThunkJIT(int expectedArgCount)
{
// Check that we have the expected number of arguments
m_failures.append(branch32(NotEqual, payloadFor(RegisterFile::ArgumentCount), TrustedImm32(expectedArgCount + 1)));
@@ -166,8 +164,6 @@ namespace JSC {
#endif
}
- int m_expectedArgCount;
- JSGlobalData* m_globalData;
MacroAssembler::JumpList m_failures;
Vector<std::pair<Call, FunctionPtr> > m_calls;
};
diff --git a/Source/JavaScriptCore/jit/ThunkGenerators.cpp b/Source/JavaScriptCore/jit/ThunkGenerators.cpp
index c440b5157..c6431c22d 100644
--- a/Source/JavaScriptCore/jit/ThunkGenerators.cpp
+++ b/Source/JavaScriptCore/jit/ThunkGenerators.cpp
@@ -75,7 +75,7 @@ static void charToString(SpecializedThunkJIT& jit, JSGlobalData* globalData, Mac
MacroAssemblerCodeRef charCodeAtThunkGenerator(JSGlobalData* globalData)
{
- SpecializedThunkJIT jit(1, globalData);
+ SpecializedThunkJIT jit(1);
stringCharLoad(jit);
jit.returnInt32(SpecializedThunkJIT::regT0);
return jit.finalize(*globalData, globalData->jitStubs->ctiNativeCall(), "charCodeAt");
@@ -83,7 +83,7 @@ MacroAssemblerCodeRef charCodeAtThunkGenerator(JSGlobalData* globalData)
MacroAssemblerCodeRef charAtThunkGenerator(JSGlobalData* globalData)
{
- SpecializedThunkJIT jit(1, globalData);
+ SpecializedThunkJIT jit(1);
stringCharLoad(jit);
charToString(jit, globalData, SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT1);
jit.returnJSCell(SpecializedThunkJIT::regT0);
@@ -92,7 +92,7 @@ MacroAssemblerCodeRef charAtThunkGenerator(JSGlobalData* globalData)
MacroAssemblerCodeRef fromCharCodeThunkGenerator(JSGlobalData* globalData)
{
- SpecializedThunkJIT jit(1, globalData);
+ SpecializedThunkJIT jit(1);
// load char code
jit.loadInt32Argument(0, SpecializedThunkJIT::regT0);
charToString(jit, globalData, SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT1);
@@ -102,7 +102,7 @@ MacroAssemblerCodeRef fromCharCodeThunkGenerator(JSGlobalData* globalData)
MacroAssemblerCodeRef sqrtThunkGenerator(JSGlobalData* globalData)
{
- SpecializedThunkJIT jit(1, globalData);
+ SpecializedThunkJIT jit(1);
if (!jit.supportsFloatingPointSqrt())
return MacroAssemblerCodeRef::createSelfManagedCodeRef(globalData->jitStubs->ctiNativeCall());
@@ -182,7 +182,7 @@ static const double halfConstant = 0.5;
MacroAssemblerCodeRef floorThunkGenerator(JSGlobalData* globalData)
{
- SpecializedThunkJIT jit(1, globalData);
+ SpecializedThunkJIT jit(1);
MacroAssembler::Jump nonIntJump;
if (!UnaryDoubleOpWrapper(floor) || !jit.supportsFloatingPoint())
return MacroAssemblerCodeRef::createSelfManagedCodeRef(globalData->jitStubs->ctiNativeCall());
@@ -214,7 +214,7 @@ MacroAssemblerCodeRef floorThunkGenerator(JSGlobalData* globalData)
MacroAssemblerCodeRef ceilThunkGenerator(JSGlobalData* globalData)
{
- SpecializedThunkJIT jit(1, globalData);
+ SpecializedThunkJIT jit(1);
if (!UnaryDoubleOpWrapper(ceil) || !jit.supportsFloatingPoint())
return MacroAssemblerCodeRef::createSelfManagedCodeRef(globalData->jitStubs->ctiNativeCall());
MacroAssembler::Jump nonIntJump;
@@ -233,7 +233,7 @@ MacroAssemblerCodeRef ceilThunkGenerator(JSGlobalData* globalData)
MacroAssemblerCodeRef roundThunkGenerator(JSGlobalData* globalData)
{
- SpecializedThunkJIT jit(1, globalData);
+ SpecializedThunkJIT jit(1);
if (!UnaryDoubleOpWrapper(jsRound) || !jit.supportsFloatingPoint())
return MacroAssemblerCodeRef::createSelfManagedCodeRef(globalData->jitStubs->ctiNativeCall());
MacroAssembler::Jump nonIntJump;
@@ -269,7 +269,7 @@ MacroAssemblerCodeRef expThunkGenerator(JSGlobalData* globalData)
{
if (!UnaryDoubleOpWrapper(exp))
return MacroAssemblerCodeRef::createSelfManagedCodeRef(globalData->jitStubs->ctiNativeCall());
- SpecializedThunkJIT jit(1, globalData);
+ SpecializedThunkJIT jit(1);
if (!jit.supportsFloatingPoint())
return MacroAssemblerCodeRef::createSelfManagedCodeRef(globalData->jitStubs->ctiNativeCall());
jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
@@ -282,7 +282,7 @@ MacroAssemblerCodeRef logThunkGenerator(JSGlobalData* globalData)
{
if (!UnaryDoubleOpWrapper(log))
return MacroAssemblerCodeRef::createSelfManagedCodeRef(globalData->jitStubs->ctiNativeCall());
- SpecializedThunkJIT jit(1, globalData);
+ SpecializedThunkJIT jit(1);
if (!jit.supportsFloatingPoint())
return MacroAssemblerCodeRef::createSelfManagedCodeRef(globalData->jitStubs->ctiNativeCall());
jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
@@ -293,7 +293,7 @@ MacroAssemblerCodeRef logThunkGenerator(JSGlobalData* globalData)
MacroAssemblerCodeRef absThunkGenerator(JSGlobalData* globalData)
{
- SpecializedThunkJIT jit(1, globalData);
+ SpecializedThunkJIT jit(1);
if (!jit.supportsFloatingPointAbs())
return MacroAssemblerCodeRef::createSelfManagedCodeRef(globalData->jitStubs->ctiNativeCall());
MacroAssembler::Jump nonIntJump;
@@ -313,7 +313,7 @@ MacroAssemblerCodeRef absThunkGenerator(JSGlobalData* globalData)
MacroAssemblerCodeRef powThunkGenerator(JSGlobalData* globalData)
{
- SpecializedThunkJIT jit(2, globalData);
+ SpecializedThunkJIT jit(2);
if (!jit.supportsFloatingPoint())
return MacroAssemblerCodeRef::createSelfManagedCodeRef(globalData->jitStubs->ctiNativeCall());