summaryrefslogtreecommitdiff
path: root/Source/JavaScriptCore/jit
diff options
context:
space:
mode:
Diffstat (limited to 'Source/JavaScriptCore/jit')
-rw-r--r--Source/JavaScriptCore/jit/CompactJITCodeMap.h298
-rw-r--r--Source/JavaScriptCore/jit/ExecutableAllocator.cpp180
-rw-r--r--Source/JavaScriptCore/jit/ExecutableAllocator.h243
-rw-r--r--Source/JavaScriptCore/jit/ExecutableAllocatorFixedVMPool.cpp144
-rw-r--r--Source/JavaScriptCore/jit/JIT.cpp741
-rw-r--r--Source/JavaScriptCore/jit/JIT.h1175
-rw-r--r--Source/JavaScriptCore/jit/JITArithmetic.cpp1139
-rw-r--r--Source/JavaScriptCore/jit/JITArithmetic32_64.cpp1277
-rw-r--r--Source/JavaScriptCore/jit/JITCall.cpp213
-rw-r--r--Source/JavaScriptCore/jit/JITCall32_64.cpp299
-rw-r--r--Source/JavaScriptCore/jit/JITCode.h167
-rw-r--r--Source/JavaScriptCore/jit/JITInlineMethods.h987
-rw-r--r--Source/JavaScriptCore/jit/JITOpcodes.cpp1660
-rw-r--r--Source/JavaScriptCore/jit/JITOpcodes32_64.cpp1733
-rw-r--r--Source/JavaScriptCore/jit/JITPropertyAccess.cpp1179
-rw-r--r--Source/JavaScriptCore/jit/JITPropertyAccess32_64.cpp1133
-rw-r--r--Source/JavaScriptCore/jit/JITStubCall.h263
-rw-r--r--Source/JavaScriptCore/jit/JITStubs.cpp3748
-rw-r--r--Source/JavaScriptCore/jit/JITStubs.h451
-rw-r--r--Source/JavaScriptCore/jit/JITWriteBarrier.h147
-rw-r--r--Source/JavaScriptCore/jit/JSInterfaceJIT.h358
-rw-r--r--Source/JavaScriptCore/jit/SpecializedThunkJIT.h179
-rw-r--r--Source/JavaScriptCore/jit/ThunkGenerators.cpp350
-rw-r--r--Source/JavaScriptCore/jit/ThunkGenerators.h51
24 files changed, 18115 insertions, 0 deletions
diff --git a/Source/JavaScriptCore/jit/CompactJITCodeMap.h b/Source/JavaScriptCore/jit/CompactJITCodeMap.h
new file mode 100644
index 000000000..5b92a8961
--- /dev/null
+++ b/Source/JavaScriptCore/jit/CompactJITCodeMap.h
@@ -0,0 +1,298 @@
+/*
+ * Copyright (C) 2011 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * its contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef CompactJITCodeMap_h
+#define CompactJITCodeMap_h
+
+#include <wtf/Assertions.h>
+#include <wtf/FastAllocBase.h>
+#include <wtf/FastMalloc.h>
+#include <wtf/OwnPtr.h>
+#include <wtf/PassOwnPtr.h>
+#include <wtf/UnusedParam.h>
+#include <wtf/Vector.h>
+
+namespace JSC {
+
+// Gives you a compressed map between between bytecode indices and machine code
+// entry points. The compression simply tries to use either 1, 2, or 4 bytes for
+// any given offset. The largest offset that can be stored is 2^30.
+
+// Example use:
+//
+// CompactJITCodeMap::Encoder encoder(map);
+// encoder.append(a, b);
+// encoder.append(c, d); // preconditions: c >= a, d >= b
+// OwnPtr<CompactJITCodeMap> map = encoder.finish();
+//
+// At some later time:
+//
+// Vector<BytecodeAndMachineOffset> decoded;
+// map->decode(decoded);
+
+struct BytecodeAndMachineOffset {
+ BytecodeAndMachineOffset() { }
+
+ BytecodeAndMachineOffset(unsigned bytecodeIndex, unsigned machineCodeOffset)
+ : m_bytecodeIndex(bytecodeIndex)
+ , m_machineCodeOffset(machineCodeOffset)
+ {
+ }
+
+ unsigned m_bytecodeIndex;
+ unsigned m_machineCodeOffset;
+
+ static inline unsigned getBytecodeIndex(BytecodeAndMachineOffset* mapping)
+ {
+ return mapping->m_bytecodeIndex;
+ }
+
+ static inline unsigned getMachineCodeOffset(BytecodeAndMachineOffset* mapping)
+ {
+ return mapping->m_machineCodeOffset;
+ }
+};
+
+class CompactJITCodeMap {
+ WTF_MAKE_FAST_ALLOCATED;
+public:
+ ~CompactJITCodeMap()
+ {
+ if (m_buffer)
+ fastFree(m_buffer);
+ }
+
+ unsigned numberOfEntries() const
+ {
+ return m_numberOfEntries;
+ }
+
+ void decode(Vector<BytecodeAndMachineOffset>& result) const;
+
+private:
+ CompactJITCodeMap(uint8_t* buffer, unsigned size, unsigned numberOfEntries)
+ : m_buffer(buffer)
+#if !ASSERT_DISABLED
+ , m_size(size)
+#endif
+ , m_numberOfEntries(numberOfEntries)
+ {
+ UNUSED_PARAM(size);
+ }
+
+ uint8_t at(unsigned index) const
+ {
+ ASSERT(index < m_size);
+ return m_buffer[index];
+ }
+
+ unsigned decodeNumber(unsigned& index) const
+ {
+ uint8_t headValue = at(index++);
+ if (!(headValue & 128))
+ return headValue;
+ if (!(headValue & 64))
+ return (static_cast<unsigned>(headValue & ~128) << 8) | at(index++);
+ unsigned second = at(index++);
+ unsigned third = at(index++);
+ unsigned fourth = at(index++);
+ return (static_cast<unsigned>(headValue & ~(128 + 64)) << 24) | (second << 16) | (third << 8) | fourth;
+ }
+
+ uint8_t* m_buffer;
+#if !ASSERT_DISABLED
+ unsigned m_size;
+#endif
+ unsigned m_numberOfEntries;
+
+public:
+ class Encoder {
+ WTF_MAKE_NONCOPYABLE(Encoder);
+ public:
+ Encoder();
+ ~Encoder();
+
+ void ensureCapacityFor(unsigned numberOfEntriesToAdd);
+ void append(unsigned bytecodeIndex, unsigned machineCodeOffset);
+ PassOwnPtr<CompactJITCodeMap> finish();
+
+ private:
+ void appendByte(uint8_t value);
+ void encodeNumber(uint32_t value);
+
+ uint8_t* m_buffer;
+ unsigned m_size;
+ unsigned m_capacity;
+ unsigned m_numberOfEntries;
+
+ unsigned m_previousBytecodeIndex;
+ unsigned m_previousMachineCodeOffset;
+ };
+
+ class Decoder {
+ WTF_MAKE_NONCOPYABLE(Decoder);
+ public:
+ Decoder(const CompactJITCodeMap*);
+
+ unsigned numberOfEntriesRemaining() const;
+ void read(unsigned& bytecodeIndex, unsigned& machineCodeOffset);
+
+ private:
+ const CompactJITCodeMap* m_jitCodeMap;
+ unsigned m_previousBytecodeIndex;
+ unsigned m_previousMachineCodeOffset;
+ unsigned m_numberOfEntriesRemaining;
+ unsigned m_bufferIndex;
+ };
+
+private:
+ friend class Encoder;
+ friend class Decoder;
+};
+
+inline void CompactJITCodeMap::decode(Vector<BytecodeAndMachineOffset>& result) const
+{
+ Decoder decoder(this);
+ result.resize(decoder.numberOfEntriesRemaining());
+ for (unsigned i = 0; i < result.size(); ++i)
+ decoder.read(result[i].m_bytecodeIndex, result[i].m_machineCodeOffset);
+
+ ASSERT(!decoder.numberOfEntriesRemaining());
+}
+
+inline CompactJITCodeMap::Encoder::Encoder()
+ : m_buffer(0)
+ , m_size(0)
+ , m_capacity(0)
+ , m_numberOfEntries(0)
+ , m_previousBytecodeIndex(0)
+ , m_previousMachineCodeOffset(0)
+{
+}
+
+inline CompactJITCodeMap::Encoder::~Encoder()
+{
+ if (m_buffer)
+ fastFree(m_buffer);
+}
+
+inline void CompactJITCodeMap::Encoder::append(unsigned bytecodeIndex, unsigned machineCodeOffset)
+{
+ ASSERT(bytecodeIndex >= m_previousBytecodeIndex);
+ ASSERT(machineCodeOffset >= m_previousMachineCodeOffset);
+ ensureCapacityFor(1);
+ encodeNumber(bytecodeIndex - m_previousBytecodeIndex);
+ encodeNumber(machineCodeOffset - m_previousMachineCodeOffset);
+ m_previousBytecodeIndex = bytecodeIndex;
+ m_previousMachineCodeOffset = machineCodeOffset;
+ m_numberOfEntries++;
+}
+
+inline PassOwnPtr<CompactJITCodeMap> CompactJITCodeMap::Encoder::finish()
+{
+ m_capacity = m_size;
+ m_buffer = static_cast<uint8_t*>(fastRealloc(m_buffer, m_capacity));
+ OwnPtr<CompactJITCodeMap> result = adoptPtr(new CompactJITCodeMap(m_buffer, m_size, m_numberOfEntries));
+ m_buffer = 0;
+ m_size = 0;
+ m_capacity = 0;
+ m_numberOfEntries = 0;
+ m_previousBytecodeIndex = 0;
+ m_previousMachineCodeOffset = 0;
+ return result.release();
+}
+
+inline void CompactJITCodeMap::Encoder::appendByte(uint8_t value)
+{
+ ASSERT(m_size + 1 <= m_capacity);
+ m_buffer[m_size++] = value;
+}
+
+inline void CompactJITCodeMap::Encoder::encodeNumber(uint32_t value)
+{
+ ASSERT(m_size + 4 <= m_capacity);
+ ASSERT(value < (1 << 30));
+ if (value <= 127) {
+ uint8_t headValue = static_cast<uint8_t>(value);
+ ASSERT(!(headValue & 128));
+ appendByte(headValue);
+ } else if (value <= 16383) {
+ uint8_t headValue = static_cast<uint8_t>(value >> 8);
+ ASSERT(!(headValue & 128));
+ ASSERT(!(headValue & 64));
+ appendByte(headValue | 128);
+ appendByte(static_cast<uint8_t>(value));
+ } else {
+ uint8_t headValue = static_cast<uint8_t>(value >> 24);
+ ASSERT(!(headValue & 128));
+ ASSERT(!(headValue & 64));
+ appendByte(headValue | 128 | 64);
+ appendByte(static_cast<uint8_t>(value >> 16));
+ appendByte(static_cast<uint8_t>(value >> 8));
+ appendByte(static_cast<uint8_t>(value));
+ }
+}
+
+inline void CompactJITCodeMap::Encoder::ensureCapacityFor(unsigned numberOfEntriesToAdd)
+{
+ unsigned capacityNeeded = m_size + numberOfEntriesToAdd * 2 * 4;
+ if (capacityNeeded > m_capacity) {
+ m_capacity = capacityNeeded * 2;
+ m_buffer = static_cast<uint8_t*>(fastRealloc(m_buffer, m_capacity));
+ }
+}
+
+inline CompactJITCodeMap::Decoder::Decoder(const CompactJITCodeMap* jitCodeMap)
+ : m_jitCodeMap(jitCodeMap)
+ , m_previousBytecodeIndex(0)
+ , m_previousMachineCodeOffset(0)
+ , m_numberOfEntriesRemaining(jitCodeMap->m_numberOfEntries)
+ , m_bufferIndex(0)
+{
+}
+
+inline unsigned CompactJITCodeMap::Decoder::numberOfEntriesRemaining() const
+{
+ ASSERT(m_numberOfEntriesRemaining || m_bufferIndex == m_jitCodeMap->m_size);
+ return m_numberOfEntriesRemaining;
+}
+
+inline void CompactJITCodeMap::Decoder::read(unsigned& bytecodeIndex, unsigned& machineCodeOffset)
+{
+ ASSERT(numberOfEntriesRemaining());
+
+ m_previousBytecodeIndex += m_jitCodeMap->decodeNumber(m_bufferIndex);
+ m_previousMachineCodeOffset += m_jitCodeMap->decodeNumber(m_bufferIndex);
+ bytecodeIndex = m_previousBytecodeIndex;
+ machineCodeOffset = m_previousMachineCodeOffset;
+ m_numberOfEntriesRemaining--;
+}
+
+} // namespace JSC
+
+#endif // CompactJITCodeMap_h
diff --git a/Source/JavaScriptCore/jit/ExecutableAllocator.cpp b/Source/JavaScriptCore/jit/ExecutableAllocator.cpp
new file mode 100644
index 000000000..82c149d0e
--- /dev/null
+++ b/Source/JavaScriptCore/jit/ExecutableAllocator.cpp
@@ -0,0 +1,180 @@
+/*
+ * Copyright (C) 2008 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+
+#include "ExecutableAllocator.h"
+
+#if ENABLE(EXECUTABLE_ALLOCATOR_DEMAND)
+#include <wtf/MetaAllocator.h>
+#include <wtf/PageReservation.h>
+#include <wtf/VMTags.h>
+#endif
+
+#if ENABLE(ASSEMBLER)
+
+using namespace WTF;
+
+namespace JSC {
+
+#if ENABLE(EXECUTABLE_ALLOCATOR_DEMAND)
+
+class DemandExecutableAllocator : public MetaAllocator {
+public:
+ DemandExecutableAllocator()
+ : MetaAllocator(32) // round up all allocations to 32 bytes
+ {
+ // Don't preallocate any memory here.
+ }
+
+ virtual ~DemandExecutableAllocator()
+ {
+ for (unsigned i = 0; i < reservations.size(); ++i)
+ reservations.at(i).deallocate();
+ }
+
+protected:
+ virtual void* allocateNewSpace(size_t& numPages)
+ {
+ size_t newNumPages = (((numPages * pageSize() + JIT_ALLOCATOR_LARGE_ALLOC_SIZE - 1) / JIT_ALLOCATOR_LARGE_ALLOC_SIZE * JIT_ALLOCATOR_LARGE_ALLOC_SIZE) + pageSize() - 1) / pageSize();
+
+ ASSERT(newNumPages >= numPages);
+
+ numPages = newNumPages;
+
+ PageReservation reservation = PageReservation::reserve(numPages * pageSize(), OSAllocator::JSJITCodePages, EXECUTABLE_POOL_WRITABLE, true);
+ if (!reservation)
+ CRASH();
+
+ reservations.append(reservation);
+
+ return reservation.base();
+ }
+
+ virtual void notifyNeedPage(void* page)
+ {
+ OSAllocator::commit(page, pageSize(), EXECUTABLE_POOL_WRITABLE, true);
+ }
+
+ virtual void notifyPageIsFree(void* page)
+ {
+ OSAllocator::decommit(page, pageSize());
+ }
+
+private:
+ Vector<PageReservation, 16> reservations;
+};
+
+static DemandExecutableAllocator* allocator;
+
+void ExecutableAllocator::initializeAllocator()
+{
+ ASSERT(!allocator);
+ allocator = new DemandExecutableAllocator();
+}
+
+ExecutableAllocator::ExecutableAllocator(JSGlobalData&)
+{
+ ASSERT(allocator);
+}
+
+bool ExecutableAllocator::isValid() const
+{
+ return true;
+}
+
+bool ExecutableAllocator::underMemoryPressure()
+{
+ return false;
+}
+
+PassRefPtr<ExecutableMemoryHandle> ExecutableAllocator::allocate(JSGlobalData&, size_t sizeInBytes)
+{
+ RefPtr<ExecutableMemoryHandle> result = allocator->allocate(sizeInBytes);
+ if (!result)
+ CRASH();
+ return result.release();
+}
+
+size_t ExecutableAllocator::committedByteCount()
+{
+ return allocator->bytesCommitted();
+}
+
+#if ENABLE(META_ALLOCATOR_PROFILE)
+void ExecutableAllocator::dumpProfile()
+{
+ allocator->dumpProfile();
+}
+#endif
+
+#endif // ENABLE(EXECUTABLE_ALLOCATOR_DEMAND)
+
+#if ENABLE(ASSEMBLER_WX_EXCLUSIVE)
+
+#if OS(WINDOWS)
+#error "ASSEMBLER_WX_EXCLUSIVE not yet suported on this platform."
+#endif
+
+void ExecutableAllocator::reprotectRegion(void* start, size_t size, ProtectionSetting setting)
+{
+ size_t pageSize = WTF::pageSize();
+
+ // Calculate the start of the page containing this region,
+ // and account for this extra memory within size.
+ intptr_t startPtr = reinterpret_cast<intptr_t>(start);
+ intptr_t pageStartPtr = startPtr & ~(pageSize - 1);
+ void* pageStart = reinterpret_cast<void*>(pageStartPtr);
+ size += (startPtr - pageStartPtr);
+
+ // Round size up
+ size += (pageSize - 1);
+ size &= ~(pageSize - 1);
+
+ mprotect(pageStart, size, (setting == Writable) ? PROTECTION_FLAGS_RW : PROTECTION_FLAGS_RX);
+}
+
+#endif
+
+#if CPU(ARM_TRADITIONAL) && OS(LINUX) && COMPILER(RVCT)
+
+__asm void ExecutableAllocator::cacheFlush(void* code, size_t size)
+{
+ ARM
+ push {r7}
+ add r1, r1, r0
+ mov r7, #0xf0000
+ add r7, r7, #0x2
+ mov r2, #0x0
+ svc #0x0
+ pop {r7}
+ bx lr
+}
+
+#endif
+
+}
+
+#endif // HAVE(ASSEMBLER)
diff --git a/Source/JavaScriptCore/jit/ExecutableAllocator.h b/Source/JavaScriptCore/jit/ExecutableAllocator.h
new file mode 100644
index 000000000..876bda62e
--- /dev/null
+++ b/Source/JavaScriptCore/jit/ExecutableAllocator.h
@@ -0,0 +1,243 @@
+/*
+ * Copyright (C) 2008 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef ExecutableAllocator_h
+#define ExecutableAllocator_h
+#include <stddef.h> // for ptrdiff_t
+#include <limits>
+#include <wtf/Assertions.h>
+#include <wtf/MetaAllocatorHandle.h>
+#include <wtf/PageAllocation.h>
+#include <wtf/PassRefPtr.h>
+#include <wtf/RefCounted.h>
+#include <wtf/UnusedParam.h>
+#include <wtf/Vector.h>
+
+#if OS(IOS)
+#include <libkern/OSCacheControl.h>
+#endif
+
+#if OS(IOS) || OS(QNX)
+#include <sys/mman.h>
+#endif
+
+#if CPU(MIPS) && OS(LINUX)
+#include <sys/cachectl.h>
+#endif
+
+#if CPU(SH4) && OS(LINUX)
+#include <asm/cachectl.h>
+#include <asm/unistd.h>
+#include <sys/syscall.h>
+#include <unistd.h>
+#endif
+
+#if OS(WINCE)
+// From pkfuncs.h (private header file from the Platform Builder)
+#define CACHE_SYNC_ALL 0x07F
+extern "C" __declspec(dllimport) void CacheRangeFlush(LPVOID pAddr, DWORD dwLength, DWORD dwFlags);
+#endif
+
+#define JIT_ALLOCATOR_LARGE_ALLOC_SIZE (pageSize() * 4)
+
+#if ENABLE(ASSEMBLER_WX_EXCLUSIVE)
+#define PROTECTION_FLAGS_RW (PROT_READ | PROT_WRITE)
+#define PROTECTION_FLAGS_RX (PROT_READ | PROT_EXEC)
+#define EXECUTABLE_POOL_WRITABLE false
+#else
+#define EXECUTABLE_POOL_WRITABLE true
+#endif
+
+namespace JSC {
+
+class JSGlobalData;
+void releaseExecutableMemory(JSGlobalData&);
+
+inline size_t roundUpAllocationSize(size_t request, size_t granularity)
+{
+ if ((std::numeric_limits<size_t>::max() - granularity) <= request)
+ CRASH(); // Allocation is too large
+
+ // Round up to next page boundary
+ size_t size = request + (granularity - 1);
+ size = size & ~(granularity - 1);
+ ASSERT(size >= request);
+ return size;
+}
+
+}
+
+#if ENABLE(JIT) && ENABLE(ASSEMBLER)
+
+namespace JSC {
+
+typedef WTF::MetaAllocatorHandle ExecutableMemoryHandle;
+
+class ExecutableAllocator {
+ enum ProtectionSetting { Writable, Executable };
+
+public:
+ ExecutableAllocator(JSGlobalData&);
+
+ static void initializeAllocator();
+
+ bool isValid() const;
+
+ static bool underMemoryPressure();
+
+#if ENABLE(META_ALLOCATOR_PROFILE)
+ static void dumpProfile();
+#else
+ static void dumpProfile() { }
+#endif
+
+ PassRefPtr<ExecutableMemoryHandle> allocate(JSGlobalData&, size_t sizeInBytes);
+
+#if ENABLE(ASSEMBLER_WX_EXCLUSIVE)
+ static void makeWritable(void* start, size_t size)
+ {
+ reprotectRegion(start, size, Writable);
+ }
+
+ static void makeExecutable(void* start, size_t size)
+ {
+ reprotectRegion(start, size, Executable);
+ }
+#else
+ static void makeWritable(void*, size_t) {}
+ static void makeExecutable(void*, size_t) {}
+#endif
+
+
+#if CPU(X86) || CPU(X86_64)
+ static void cacheFlush(void*, size_t)
+ {
+ }
+#elif CPU(MIPS)
+ static void cacheFlush(void* code, size_t size)
+ {
+#if GCC_VERSION_AT_LEAST(4, 3, 0)
+#if WTF_MIPS_ISA_REV(2) && !GCC_VERSION_AT_LEAST(4, 4, 3)
+ int lineSize;
+ asm("rdhwr %0, $1" : "=r" (lineSize));
+ //
+ // Modify "start" and "end" to avoid GCC 4.3.0-4.4.2 bug in
+ // mips_expand_synci_loop that may execute synci one more time.
+ // "start" points to the fisrt byte of the cache line.
+ // "end" points to the last byte of the line before the last cache line.
+ // Because size is always a multiple of 4, this is safe to set
+ // "end" to the last byte.
+ //
+ intptr_t start = reinterpret_cast<intptr_t>(code) & (-lineSize);
+ intptr_t end = ((reinterpret_cast<intptr_t>(code) + size - 1) & (-lineSize)) - 1;
+ __builtin___clear_cache(reinterpret_cast<char*>(start), reinterpret_cast<char*>(end));
+#else
+ intptr_t end = reinterpret_cast<intptr_t>(code) + size;
+ __builtin___clear_cache(reinterpret_cast<char*>(code), reinterpret_cast<char*>(end));
+#endif
+#else
+ _flush_cache(reinterpret_cast<char*>(code), size, BCACHE);
+#endif
+ }
+#elif CPU(ARM_THUMB2) && OS(IOS)
+ static void cacheFlush(void* code, size_t size)
+ {
+ sys_cache_control(kCacheFunctionPrepareForExecution, code, size);
+ }
+#elif CPU(ARM_THUMB2) && OS(LINUX)
+ static void cacheFlush(void* code, size_t size)
+ {
+ asm volatile (
+ "push {r7}\n"
+ "mov r0, %0\n"
+ "mov r1, %1\n"
+ "movw r7, #0x2\n"
+ "movt r7, #0xf\n"
+ "movs r2, #0x0\n"
+ "svc 0x0\n"
+ "pop {r7}\n"
+ :
+ : "r" (code), "r" (reinterpret_cast<char*>(code) + size)
+ : "r0", "r1", "r2");
+ }
+#elif CPU(ARM_TRADITIONAL) && OS(LINUX) && COMPILER(RVCT)
+ static __asm void cacheFlush(void* code, size_t size);
+#elif CPU(ARM_TRADITIONAL) && OS(LINUX) && COMPILER(GCC)
+ static void cacheFlush(void* code, size_t size)
+ {
+ asm volatile (
+ "push {r7}\n"
+ "mov r0, %0\n"
+ "mov r1, %1\n"
+ "mov r7, #0xf0000\n"
+ "add r7, r7, #0x2\n"
+ "mov r2, #0x0\n"
+ "svc 0x0\n"
+ "pop {r7}\n"
+ :
+ : "r" (code), "r" (reinterpret_cast<char*>(code) + size)
+ : "r0", "r1", "r2");
+ }
+#elif OS(WINCE)
+ static void cacheFlush(void* code, size_t size)
+ {
+ CacheRangeFlush(code, size, CACHE_SYNC_ALL);
+ }
+#elif CPU(SH4) && OS(LINUX)
+ static void cacheFlush(void* code, size_t size)
+ {
+#ifdef CACHEFLUSH_D_L2
+ syscall(__NR_cacheflush, reinterpret_cast<unsigned>(code), size, CACHEFLUSH_D_WB | CACHEFLUSH_I | CACHEFLUSH_D_L2);
+#else
+ syscall(__NR_cacheflush, reinterpret_cast<unsigned>(code), size, CACHEFLUSH_D_WB | CACHEFLUSH_I);
+#endif
+ }
+#elif OS(QNX)
+ static void cacheFlush(void* code, size_t size)
+ {
+#if !ENABLE(ASSEMBLER_WX_EXCLUSIVE)
+ msync(code, size, MS_INVALIDATE_ICACHE);
+#else
+ UNUSED_PARAM(code);
+ UNUSED_PARAM(size);
+#endif
+ }
+#else
+ #error "The cacheFlush support is missing on this platform."
+#endif
+ static size_t committedByteCount();
+
+private:
+
+#if ENABLE(ASSEMBLER_WX_EXCLUSIVE)
+ static void reprotectRegion(void*, size_t, ProtectionSetting);
+#endif
+};
+
+} // namespace JSC
+
+#endif // ENABLE(JIT) && ENABLE(ASSEMBLER)
+
+#endif // !defined(ExecutableAllocator)
diff --git a/Source/JavaScriptCore/jit/ExecutableAllocatorFixedVMPool.cpp b/Source/JavaScriptCore/jit/ExecutableAllocatorFixedVMPool.cpp
new file mode 100644
index 000000000..3771c74a9
--- /dev/null
+++ b/Source/JavaScriptCore/jit/ExecutableAllocatorFixedVMPool.cpp
@@ -0,0 +1,144 @@
+/*
+ * Copyright (C) 2009 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+
+#include "ExecutableAllocator.h"
+
+#if ENABLE(EXECUTABLE_ALLOCATOR_FIXED)
+
+#include <errno.h>
+
+#include <sys/mman.h>
+#include <unistd.h>
+#include <wtf/MetaAllocator.h>
+#include <wtf/PageReservation.h>
+#include <wtf/VMTags.h>
+
+#if OS(LINUX)
+#include <stdio.h>
+#endif
+
+using namespace WTF;
+
+namespace JSC {
+
+#if CPU(ARM)
+static const size_t fixedPoolSize = 16 * 1024 * 1024;
+#elif CPU(X86_64)
+static const size_t fixedPoolSize = 1024 * 1024 * 1024;
+#else
+static const size_t fixedPoolSize = 32 * 1024 * 1024;
+#endif
+
+class FixedVMPoolExecutableAllocator : public MetaAllocator {
+public:
+ FixedVMPoolExecutableAllocator()
+ : MetaAllocator(32) // round up all allocations to 32 bytes
+ {
+ m_reservation = PageReservation::reserveWithGuardPages(fixedPoolSize, OSAllocator::JSJITCodePages, EXECUTABLE_POOL_WRITABLE, true);
+#if !ENABLE(INTERPRETER)
+ if (!m_reservation)
+ CRASH();
+#endif
+ if (m_reservation) {
+ ASSERT(m_reservation.size() == fixedPoolSize);
+ addFreshFreeSpace(m_reservation.base(), m_reservation.size());
+ }
+ }
+
+protected:
+ virtual void* allocateNewSpace(size_t&)
+ {
+ // We're operating in a fixed pool, so new allocation is always prohibited.
+ return 0;
+ }
+
+ virtual void notifyNeedPage(void* page)
+ {
+ m_reservation.commit(page, pageSize());
+ }
+
+ virtual void notifyPageIsFree(void* page)
+ {
+ m_reservation.decommit(page, pageSize());
+ }
+
+private:
+ PageReservation m_reservation;
+};
+
+static FixedVMPoolExecutableAllocator* allocator;
+
+void ExecutableAllocator::initializeAllocator()
+{
+ ASSERT(!allocator);
+ allocator = new FixedVMPoolExecutableAllocator();
+}
+
+ExecutableAllocator::ExecutableAllocator(JSGlobalData&)
+{
+ ASSERT(allocator);
+}
+
+bool ExecutableAllocator::isValid() const
+{
+ return !!allocator->bytesReserved();
+}
+
+bool ExecutableAllocator::underMemoryPressure()
+{
+ MetaAllocator::Statistics statistics = allocator->currentStatistics();
+ return statistics.bytesAllocated > statistics.bytesReserved / 2;
+}
+
+PassRefPtr<ExecutableMemoryHandle> ExecutableAllocator::allocate(JSGlobalData& globalData, size_t sizeInBytes)
+{
+ RefPtr<ExecutableMemoryHandle> result = allocator->allocate(sizeInBytes);
+ if (!result) {
+ releaseExecutableMemory(globalData);
+ result = allocator->allocate(sizeInBytes);
+ if (!result)
+ CRASH();
+ }
+ return result.release();
+}
+
+size_t ExecutableAllocator::committedByteCount()
+{
+ return allocator->bytesCommitted();
+}
+
+#if ENABLE(META_ALLOCATOR_PROFILE)
+void ExecutableAllocator::dumpProfile()
+{
+ allocator->dumpProfile();
+}
+#endif
+
+}
+
+
+#endif // ENABLE(EXECUTABLE_ALLOCATOR_FIXED)
diff --git a/Source/JavaScriptCore/jit/JIT.cpp b/Source/JavaScriptCore/jit/JIT.cpp
new file mode 100644
index 000000000..4a6e3fb3d
--- /dev/null
+++ b/Source/JavaScriptCore/jit/JIT.cpp
@@ -0,0 +1,741 @@
+/*
+ * Copyright (C) 2008, 2009 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+
+#if ENABLE(JIT)
+#include "JIT.h"
+
+// This probably does not belong here; adding here for now as a quick Windows build fix.
+#if ENABLE(ASSEMBLER) && CPU(X86) && !OS(MAC_OS_X)
+#include "MacroAssembler.h"
+JSC::MacroAssemblerX86Common::SSE2CheckState JSC::MacroAssemblerX86Common::s_sse2CheckState = NotCheckedSSE2;
+#endif
+
+#include "CodeBlock.h"
+#include "CryptographicallyRandomNumber.h"
+#include "DFGNode.h" // for DFG_SUCCESS_STATS
+#include "Interpreter.h"
+#include "JITInlineMethods.h"
+#include "JITStubCall.h"
+#include "JSArray.h"
+#include "JSFunction.h"
+#include "LinkBuffer.h"
+#include "RepatchBuffer.h"
+#include "ResultType.h"
+#include "SamplingTool.h"
+
+using namespace std;
+
+namespace JSC {
+
+void ctiPatchNearCallByReturnAddress(CodeBlock* codeblock, ReturnAddressPtr returnAddress, MacroAssemblerCodePtr newCalleeFunction)
+{
+ RepatchBuffer repatchBuffer(codeblock);
+ repatchBuffer.relinkNearCallerToTrampoline(returnAddress, newCalleeFunction);
+}
+
+void ctiPatchCallByReturnAddress(CodeBlock* codeblock, ReturnAddressPtr returnAddress, MacroAssemblerCodePtr newCalleeFunction)
+{
+ RepatchBuffer repatchBuffer(codeblock);
+ repatchBuffer.relinkCallerToTrampoline(returnAddress, newCalleeFunction);
+}
+
+void ctiPatchCallByReturnAddress(CodeBlock* codeblock, ReturnAddressPtr returnAddress, FunctionPtr newCalleeFunction)
+{
+ RepatchBuffer repatchBuffer(codeblock);
+ repatchBuffer.relinkCallerToFunction(returnAddress, newCalleeFunction);
+}
+
+JIT::JIT(JSGlobalData* globalData, CodeBlock* codeBlock)
+ : m_interpreter(globalData->interpreter)
+ , m_globalData(globalData)
+ , m_codeBlock(codeBlock)
+ , m_labels(codeBlock ? codeBlock->numberOfInstructions() : 0)
+ , m_bytecodeOffset((unsigned)-1)
+#if USE(JSVALUE32_64)
+ , m_jumpTargetIndex(0)
+ , m_mappedBytecodeOffset((unsigned)-1)
+ , m_mappedVirtualRegisterIndex(RegisterFile::ReturnPC)
+ , m_mappedTag((RegisterID)-1)
+ , m_mappedPayload((RegisterID)-1)
+#else
+ , m_lastResultBytecodeRegister(std::numeric_limits<int>::max())
+ , m_jumpTargetsPosition(0)
+#endif
+#if USE(OS_RANDOMNESS)
+ , m_randomGenerator(cryptographicallyRandomNumber())
+#else
+ , m_randomGenerator(static_cast<unsigned>(randomNumber() * 0xFFFFFFF))
+#endif
+{
+}
+
+#if ENABLE(DFG_JIT)
+void JIT::emitOptimizationCheck(OptimizationCheckKind kind)
+{
+ if (!shouldEmitProfiling())
+ return;
+
+ Jump skipOptimize = branchAdd32(Signed, TrustedImm32(kind == LoopOptimizationCheck ? Options::executionCounterIncrementForLoop : Options::executionCounterIncrementForReturn), AbsoluteAddress(m_codeBlock->addressOfExecuteCounter()));
+ JITStubCall stubCall(this, kind == LoopOptimizationCheck ? cti_optimize_from_loop : cti_optimize_from_ret);
+ if (kind == LoopOptimizationCheck)
+ stubCall.addArgument(Imm32(m_bytecodeOffset));
+ stubCall.call();
+ skipOptimize.link(this);
+}
+#endif
+
+#if CPU(X86)
+void JIT::emitTimeoutCheck()
+{
+ Jump skipTimeout = branchSub32(NonZero, TrustedImm32(1), AbsoluteAddress(&m_globalData->m_timeoutCount));
+ JITStubCall stubCall(this, cti_timeout_check);
+ stubCall.addArgument(regT1, regT0); // save last result registers.
+ stubCall.call(regT0);
+ store32(regT0, &m_globalData->m_timeoutCount);
+ stubCall.getArgument(0, regT1, regT0); // reload last result registers.
+ skipTimeout.link(this);
+}
+#elif USE(JSVALUE32_64)
+void JIT::emitTimeoutCheck()
+{
+ Jump skipTimeout = branchSub32(NonZero, TrustedImm32(1), timeoutCheckRegister);
+ JITStubCall stubCall(this, cti_timeout_check);
+ stubCall.addArgument(regT1, regT0); // save last result registers.
+ stubCall.call(timeoutCheckRegister);
+ stubCall.getArgument(0, regT1, regT0); // reload last result registers.
+ skipTimeout.link(this);
+}
+#else
+void JIT::emitTimeoutCheck()
+{
+ Jump skipTimeout = branchSub32(NonZero, TrustedImm32(1), timeoutCheckRegister);
+ JITStubCall(this, cti_timeout_check).call(timeoutCheckRegister);
+ skipTimeout.link(this);
+
+ killLastResultRegister();
+}
+#endif
+
+#define NEXT_OPCODE(name) \
+ m_bytecodeOffset += OPCODE_LENGTH(name); \
+ break;
+
+#if USE(JSVALUE32_64)
+#define DEFINE_BINARY_OP(name) \
+ case name: { \
+ JITStubCall stubCall(this, cti_##name); \
+ stubCall.addArgument(currentInstruction[2].u.operand); \
+ stubCall.addArgument(currentInstruction[3].u.operand); \
+ stubCall.call(currentInstruction[1].u.operand); \
+ NEXT_OPCODE(name); \
+ }
+
+#define DEFINE_UNARY_OP(name) \
+ case name: { \
+ JITStubCall stubCall(this, cti_##name); \
+ stubCall.addArgument(currentInstruction[2].u.operand); \
+ stubCall.call(currentInstruction[1].u.operand); \
+ NEXT_OPCODE(name); \
+ }
+
+#else // USE(JSVALUE32_64)
+
+#define DEFINE_BINARY_OP(name) \
+ case name: { \
+ JITStubCall stubCall(this, cti_##name); \
+ stubCall.addArgument(currentInstruction[2].u.operand, regT2); \
+ stubCall.addArgument(currentInstruction[3].u.operand, regT2); \
+ stubCall.call(currentInstruction[1].u.operand); \
+ NEXT_OPCODE(name); \
+ }
+
+#define DEFINE_UNARY_OP(name) \
+ case name: { \
+ JITStubCall stubCall(this, cti_##name); \
+ stubCall.addArgument(currentInstruction[2].u.operand, regT2); \
+ stubCall.call(currentInstruction[1].u.operand); \
+ NEXT_OPCODE(name); \
+ }
+#endif // USE(JSVALUE32_64)
+
+#define DEFINE_OP(name) \
+ case name: { \
+ emit_##name(currentInstruction); \
+ NEXT_OPCODE(name); \
+ }
+
+#define DEFINE_SLOWCASE_OP(name) \
+ case name: { \
+ emitSlow_##name(currentInstruction, iter); \
+ NEXT_OPCODE(name); \
+ }
+
+void JIT::privateCompileMainPass()
+{
+ Instruction* instructionsBegin = m_codeBlock->instructions().begin();
+ unsigned instructionCount = m_codeBlock->instructions().size();
+
+ m_globalResolveInfoIndex = 0;
+ m_callLinkInfoIndex = 0;
+
+ for (m_bytecodeOffset = 0; m_bytecodeOffset < instructionCount; ) {
+ Instruction* currentInstruction = instructionsBegin + m_bytecodeOffset;
+ ASSERT_WITH_MESSAGE(m_interpreter->isOpcode(currentInstruction->u.opcode), "privateCompileMainPass gone bad @ %d", m_bytecodeOffset);
+
+#if ENABLE(OPCODE_SAMPLING)
+ if (m_bytecodeOffset > 0) // Avoid the overhead of sampling op_enter twice.
+ sampleInstruction(currentInstruction);
+#endif
+
+#if USE(JSVALUE64)
+ if (atJumpTarget())
+ killLastResultRegister();
+#endif
+
+ m_labels[m_bytecodeOffset] = label();
+
+#if ENABLE(JIT_VERBOSE)
+ printf("Old JIT emitting code for bc#%u at offset 0x%lx.\n", m_bytecodeOffset, (long)debugOffset());
+#endif
+
+ switch (m_interpreter->getOpcodeID(currentInstruction->u.opcode)) {
+ DEFINE_BINARY_OP(op_del_by_val)
+ DEFINE_BINARY_OP(op_in)
+ DEFINE_BINARY_OP(op_less)
+ DEFINE_BINARY_OP(op_lesseq)
+ DEFINE_BINARY_OP(op_greater)
+ DEFINE_BINARY_OP(op_greatereq)
+ DEFINE_UNARY_OP(op_is_boolean)
+ DEFINE_UNARY_OP(op_is_function)
+ DEFINE_UNARY_OP(op_is_number)
+ DEFINE_UNARY_OP(op_is_object)
+ DEFINE_UNARY_OP(op_is_string)
+ DEFINE_UNARY_OP(op_is_undefined)
+#if USE(JSVALUE64)
+ DEFINE_UNARY_OP(op_negate)
+#endif
+ DEFINE_UNARY_OP(op_typeof)
+
+ DEFINE_OP(op_add)
+ DEFINE_OP(op_bitand)
+ DEFINE_OP(op_bitnot)
+ DEFINE_OP(op_bitor)
+ DEFINE_OP(op_bitxor)
+ DEFINE_OP(op_call)
+ DEFINE_OP(op_call_eval)
+ DEFINE_OP(op_call_varargs)
+ DEFINE_OP(op_catch)
+ DEFINE_OP(op_construct)
+ DEFINE_OP(op_get_callee)
+ DEFINE_OP(op_create_this)
+ DEFINE_OP(op_convert_this)
+ DEFINE_OP(op_init_lazy_reg)
+ DEFINE_OP(op_create_arguments)
+ DEFINE_OP(op_debug)
+ DEFINE_OP(op_del_by_id)
+ DEFINE_OP(op_div)
+ DEFINE_OP(op_end)
+ DEFINE_OP(op_enter)
+ DEFINE_OP(op_create_activation)
+ DEFINE_OP(op_eq)
+ DEFINE_OP(op_eq_null)
+ DEFINE_OP(op_get_by_id)
+ DEFINE_OP(op_get_arguments_length)
+ DEFINE_OP(op_get_by_val)
+ DEFINE_OP(op_get_argument_by_val)
+ DEFINE_OP(op_get_by_pname)
+ DEFINE_OP(op_get_global_var)
+ DEFINE_OP(op_get_pnames)
+ DEFINE_OP(op_get_scoped_var)
+ DEFINE_OP(op_check_has_instance)
+ DEFINE_OP(op_instanceof)
+ DEFINE_OP(op_jeq_null)
+ DEFINE_OP(op_jfalse)
+ DEFINE_OP(op_jmp)
+ DEFINE_OP(op_jmp_scopes)
+ DEFINE_OP(op_jneq_null)
+ DEFINE_OP(op_jneq_ptr)
+ DEFINE_OP(op_jless)
+ DEFINE_OP(op_jlesseq)
+ DEFINE_OP(op_jgreater)
+ DEFINE_OP(op_jgreatereq)
+ DEFINE_OP(op_jnless)
+ DEFINE_OP(op_jnlesseq)
+ DEFINE_OP(op_jngreater)
+ DEFINE_OP(op_jngreatereq)
+ DEFINE_OP(op_jsr)
+ DEFINE_OP(op_jtrue)
+ DEFINE_OP(op_loop)
+ DEFINE_OP(op_loop_hint)
+ DEFINE_OP(op_loop_if_less)
+ DEFINE_OP(op_loop_if_lesseq)
+ DEFINE_OP(op_loop_if_greater)
+ DEFINE_OP(op_loop_if_greatereq)
+ DEFINE_OP(op_loop_if_true)
+ DEFINE_OP(op_loop_if_false)
+ DEFINE_OP(op_lshift)
+ DEFINE_OP(op_method_check)
+ DEFINE_OP(op_mod)
+ DEFINE_OP(op_mov)
+ DEFINE_OP(op_mul)
+#if USE(JSVALUE32_64)
+ DEFINE_OP(op_negate)
+#endif
+ DEFINE_OP(op_neq)
+ DEFINE_OP(op_neq_null)
+ DEFINE_OP(op_new_array)
+ DEFINE_OP(op_new_array_buffer)
+ DEFINE_OP(op_new_func)
+ DEFINE_OP(op_new_func_exp)
+ DEFINE_OP(op_new_object)
+ DEFINE_OP(op_new_regexp)
+ DEFINE_OP(op_next_pname)
+ DEFINE_OP(op_not)
+ DEFINE_OP(op_nstricteq)
+ DEFINE_OP(op_pop_scope)
+ DEFINE_OP(op_post_dec)
+ DEFINE_OP(op_post_inc)
+ DEFINE_OP(op_pre_dec)
+ DEFINE_OP(op_pre_inc)
+ DEFINE_OP(op_profile_did_call)
+ DEFINE_OP(op_profile_will_call)
+ DEFINE_OP(op_push_new_scope)
+ DEFINE_OP(op_push_scope)
+ DEFINE_OP(op_put_by_id)
+ DEFINE_OP(op_put_by_index)
+ DEFINE_OP(op_put_by_val)
+ DEFINE_OP(op_put_getter)
+ DEFINE_OP(op_put_global_var)
+ DEFINE_OP(op_put_scoped_var)
+ DEFINE_OP(op_put_setter)
+ DEFINE_OP(op_resolve)
+ DEFINE_OP(op_resolve_base)
+ DEFINE_OP(op_ensure_property_exists)
+ DEFINE_OP(op_resolve_global)
+ DEFINE_OP(op_resolve_global_dynamic)
+ DEFINE_OP(op_resolve_skip)
+ DEFINE_OP(op_resolve_with_base)
+ DEFINE_OP(op_resolve_with_this)
+ DEFINE_OP(op_ret)
+ DEFINE_OP(op_call_put_result)
+ DEFINE_OP(op_ret_object_or_this)
+ DEFINE_OP(op_rshift)
+ DEFINE_OP(op_urshift)
+ DEFINE_OP(op_sret)
+ DEFINE_OP(op_strcat)
+ DEFINE_OP(op_stricteq)
+ DEFINE_OP(op_sub)
+ DEFINE_OP(op_switch_char)
+ DEFINE_OP(op_switch_imm)
+ DEFINE_OP(op_switch_string)
+ DEFINE_OP(op_tear_off_activation)
+ DEFINE_OP(op_tear_off_arguments)
+ DEFINE_OP(op_throw)
+ DEFINE_OP(op_throw_reference_error)
+ DEFINE_OP(op_to_jsnumber)
+ DEFINE_OP(op_to_primitive)
+
+ case op_get_array_length:
+ case op_get_by_id_chain:
+ case op_get_by_id_generic:
+ case op_get_by_id_proto:
+ case op_get_by_id_self:
+ case op_get_by_id_getter_chain:
+ case op_get_by_id_getter_proto:
+ case op_get_by_id_getter_self:
+ case op_get_by_id_custom_chain:
+ case op_get_by_id_custom_proto:
+ case op_get_by_id_custom_self:
+ case op_get_string_length:
+ case op_put_by_id_generic:
+ case op_put_by_id_replace:
+ case op_put_by_id_transition:
+ ASSERT_NOT_REACHED();
+ }
+ }
+
+ ASSERT(m_callLinkInfoIndex == m_callStructureStubCompilationInfo.size());
+
+#ifndef NDEBUG
+ // Reset this, in order to guard its use with ASSERTs.
+ m_bytecodeOffset = (unsigned)-1;
+#endif
+}
+
+void JIT::privateCompileLinkPass()
+{
+ unsigned jmpTableCount = m_jmpTable.size();
+ for (unsigned i = 0; i < jmpTableCount; ++i)
+ m_jmpTable[i].from.linkTo(m_labels[m_jmpTable[i].toBytecodeOffset], this);
+ m_jmpTable.clear();
+}
+
+void JIT::privateCompileSlowCases()
+{
+ Instruction* instructionsBegin = m_codeBlock->instructions().begin();
+
+ m_propertyAccessInstructionIndex = 0;
+ m_globalResolveInfoIndex = 0;
+ m_callLinkInfoIndex = 0;
+
+#if !ASSERT_DISABLED && ENABLE(VALUE_PROFILER)
+ // Use this to assert that slow-path code associates new profiling sites with existing
+ // ValueProfiles rather than creating new ones. This ensures that for a given instruction
+ // (say, get_by_id) we get combined statistics for both the fast-path executions of that
+ // instructions and the slow-path executions. Furthermore, if the slow-path code created
+ // new ValueProfiles then the ValueProfiles would no longer be sorted by bytecode offset,
+ // which would break the invariant necessary to use CodeBlock::valueProfileForBytecodeOffset().
+ unsigned numberOfValueProfiles = m_codeBlock->numberOfValueProfiles();
+#endif
+
+ for (Vector<SlowCaseEntry>::iterator iter = m_slowCases.begin(); iter != m_slowCases.end();) {
+#if USE(JSVALUE64)
+ killLastResultRegister();
+#endif
+
+ m_bytecodeOffset = iter->to;
+#ifndef NDEBUG
+ unsigned firstTo = m_bytecodeOffset;
+#endif
+ Instruction* currentInstruction = instructionsBegin + m_bytecodeOffset;
+
+#if ENABLE(VALUE_PROFILER)
+ RareCaseProfile* rareCaseProfile = 0;
+ if (m_canBeOptimized)
+ rareCaseProfile = m_codeBlock->addRareCaseProfile(m_bytecodeOffset);
+#endif
+
+#if ENABLE(JIT_VERBOSE)
+ printf("Old JIT emitting slow code for bc#%u at offset 0x%lx.\n", m_bytecodeOffset, (long)debugOffset());
+#endif
+
+ switch (m_interpreter->getOpcodeID(currentInstruction->u.opcode)) {
+ DEFINE_SLOWCASE_OP(op_add)
+ DEFINE_SLOWCASE_OP(op_bitand)
+ DEFINE_SLOWCASE_OP(op_bitnot)
+ DEFINE_SLOWCASE_OP(op_bitor)
+ DEFINE_SLOWCASE_OP(op_bitxor)
+ DEFINE_SLOWCASE_OP(op_call)
+ DEFINE_SLOWCASE_OP(op_call_eval)
+ DEFINE_SLOWCASE_OP(op_call_varargs)
+ DEFINE_SLOWCASE_OP(op_construct)
+ DEFINE_SLOWCASE_OP(op_convert_this)
+ DEFINE_SLOWCASE_OP(op_create_this)
+ DEFINE_SLOWCASE_OP(op_div)
+ DEFINE_SLOWCASE_OP(op_eq)
+ DEFINE_SLOWCASE_OP(op_get_by_id)
+ DEFINE_SLOWCASE_OP(op_get_arguments_length)
+ DEFINE_SLOWCASE_OP(op_get_by_val)
+ DEFINE_SLOWCASE_OP(op_get_argument_by_val)
+ DEFINE_SLOWCASE_OP(op_get_by_pname)
+ DEFINE_SLOWCASE_OP(op_check_has_instance)
+ DEFINE_SLOWCASE_OP(op_instanceof)
+ DEFINE_SLOWCASE_OP(op_jfalse)
+ DEFINE_SLOWCASE_OP(op_jless)
+ DEFINE_SLOWCASE_OP(op_jlesseq)
+ DEFINE_SLOWCASE_OP(op_jgreater)
+ DEFINE_SLOWCASE_OP(op_jgreatereq)
+ DEFINE_SLOWCASE_OP(op_jnless)
+ DEFINE_SLOWCASE_OP(op_jnlesseq)
+ DEFINE_SLOWCASE_OP(op_jngreater)
+ DEFINE_SLOWCASE_OP(op_jngreatereq)
+ DEFINE_SLOWCASE_OP(op_jtrue)
+ DEFINE_SLOWCASE_OP(op_loop_if_less)
+ DEFINE_SLOWCASE_OP(op_loop_if_lesseq)
+ DEFINE_SLOWCASE_OP(op_loop_if_greater)
+ DEFINE_SLOWCASE_OP(op_loop_if_greatereq)
+ DEFINE_SLOWCASE_OP(op_loop_if_true)
+ DEFINE_SLOWCASE_OP(op_loop_if_false)
+ DEFINE_SLOWCASE_OP(op_lshift)
+ DEFINE_SLOWCASE_OP(op_method_check)
+ DEFINE_SLOWCASE_OP(op_mod)
+ DEFINE_SLOWCASE_OP(op_mul)
+#if USE(JSVALUE32_64)
+ DEFINE_SLOWCASE_OP(op_negate)
+#endif
+ DEFINE_SLOWCASE_OP(op_neq)
+ DEFINE_SLOWCASE_OP(op_new_object)
+ DEFINE_SLOWCASE_OP(op_new_func)
+ DEFINE_SLOWCASE_OP(op_new_func_exp)
+ DEFINE_SLOWCASE_OP(op_not)
+ DEFINE_SLOWCASE_OP(op_nstricteq)
+ DEFINE_SLOWCASE_OP(op_post_dec)
+ DEFINE_SLOWCASE_OP(op_post_inc)
+ DEFINE_SLOWCASE_OP(op_pre_dec)
+ DEFINE_SLOWCASE_OP(op_pre_inc)
+ DEFINE_SLOWCASE_OP(op_put_by_id)
+ DEFINE_SLOWCASE_OP(op_put_by_val)
+ DEFINE_SLOWCASE_OP(op_resolve_global)
+ DEFINE_SLOWCASE_OP(op_resolve_global_dynamic)
+ DEFINE_SLOWCASE_OP(op_rshift)
+ DEFINE_SLOWCASE_OP(op_urshift)
+ DEFINE_SLOWCASE_OP(op_stricteq)
+ DEFINE_SLOWCASE_OP(op_sub)
+ DEFINE_SLOWCASE_OP(op_to_jsnumber)
+ DEFINE_SLOWCASE_OP(op_to_primitive)
+ default:
+ ASSERT_NOT_REACHED();
+ }
+
+ ASSERT_WITH_MESSAGE(iter == m_slowCases.end() || firstTo != iter->to,"Not enough jumps linked in slow case codegen.");
+ ASSERT_WITH_MESSAGE(firstTo == (iter - 1)->to, "Too many jumps linked in slow case codegen.");
+
+#if ENABLE(VALUE_PROFILER)
+ if (m_canBeOptimized)
+ add32(Imm32(1), AbsoluteAddress(&rareCaseProfile->m_counter));
+#endif
+
+ emitJumpSlowToHot(jump(), 0);
+ }
+
+ ASSERT(m_propertyAccessInstructionIndex == m_propertyAccessCompilationInfo.size());
+ ASSERT(m_callLinkInfoIndex == m_callStructureStubCompilationInfo.size());
+#if ENABLE(VALUE_PROFILER)
+ ASSERT(numberOfValueProfiles == m_codeBlock->numberOfValueProfiles());
+#endif
+
+#ifndef NDEBUG
+ // Reset this, in order to guard its use with ASSERTs.
+ m_bytecodeOffset = (unsigned)-1;
+#endif
+}
+
+JITCode JIT::privateCompile(CodePtr* functionEntryArityCheck)
+{
+#if ENABLE(VALUE_PROFILER)
+ m_canBeOptimized = m_codeBlock->canCompileWithDFG();
+#endif
+
+ // Just add a little bit of randomness to the codegen
+ if (m_randomGenerator.getUint32() & 1)
+ nop();
+
+ preserveReturnAddressAfterCall(regT2);
+ emitPutToCallFrameHeader(regT2, RegisterFile::ReturnPC);
+ emitPutImmediateToCallFrameHeader(m_codeBlock, RegisterFile::CodeBlock);
+
+ Label beginLabel(this);
+
+ sampleCodeBlock(m_codeBlock);
+#if ENABLE(OPCODE_SAMPLING)
+ sampleInstruction(m_codeBlock->instructions().begin());
+#endif
+
+ Jump registerFileCheck;
+ if (m_codeBlock->codeType() == FunctionCode) {
+#if ENABLE(DFG_JIT)
+#if DFG_ENABLE(SUCCESS_STATS)
+ static SamplingCounter counter("orignalJIT");
+ emitCount(counter);
+#endif
+#endif
+
+#if ENABLE(VALUE_PROFILER)
+ ASSERT(m_bytecodeOffset == (unsigned)-1);
+ if (shouldEmitProfiling()) {
+ m_codeBlock->setArgumentValueProfileSize(m_codeBlock->m_numParameters);
+ for (int argument = 0; argument < m_codeBlock->m_numParameters; ++argument) {
+ // If this is a constructor, then we want to put in a dummy profiling site (to
+ // keep things consistent) but we don't actually want to record the dummy value.
+ if (m_codeBlock->m_isConstructor && !argument)
+ continue;
+ int offset = CallFrame::argumentOffsetIncludingThis(argument) * static_cast<int>(sizeof(Register));
+#if USE(JSVALUE64)
+ loadPtr(Address(callFrameRegister, offset), regT0);
+#elif USE(JSVALUE32_64)
+ load32(Address(callFrameRegister, offset + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT0);
+ load32(Address(callFrameRegister, offset + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), regT1);
+#endif
+ emitValueProfilingSite(m_codeBlock->valueProfileForArgument(argument));
+ }
+ }
+#endif
+
+ addPtr(Imm32(m_codeBlock->m_numCalleeRegisters * sizeof(Register)), callFrameRegister, regT1);
+ registerFileCheck = branchPtr(Below, AbsoluteAddress(m_globalData->interpreter->registerFile().addressOfEnd()), regT1);
+ }
+
+ Label functionBody = label();
+
+#if ENABLE(VALUE_PROFILER)
+ if (m_canBeOptimized)
+ add32(Imm32(1), AbsoluteAddress(&m_codeBlock->m_executionEntryCount));
+#endif
+
+ privateCompileMainPass();
+ privateCompileLinkPass();
+ privateCompileSlowCases();
+
+ Label arityCheck;
+ if (m_codeBlock->codeType() == FunctionCode) {
+ registerFileCheck.link(this);
+ m_bytecodeOffset = 0;
+ JITStubCall(this, cti_register_file_check).call();
+#ifndef NDEBUG
+ m_bytecodeOffset = (unsigned)-1; // Reset this, in order to guard its use with ASSERTs.
+#endif
+ jump(functionBody);
+
+ arityCheck = label();
+ preserveReturnAddressAfterCall(regT2);
+ emitPutToCallFrameHeader(regT2, RegisterFile::ReturnPC);
+ emitPutImmediateToCallFrameHeader(m_codeBlock, RegisterFile::CodeBlock);
+
+ load32(payloadFor(RegisterFile::ArgumentCount), regT1);
+ branch32(AboveOrEqual, regT1, TrustedImm32(m_codeBlock->m_numParameters)).linkTo(beginLabel, this);
+
+ JITStubCall(this, m_codeBlock->m_isConstructor ? cti_op_construct_arityCheck : cti_op_call_arityCheck).call(callFrameRegister);
+
+ jump(beginLabel);
+ }
+
+ ASSERT(m_jmpTable.isEmpty());
+
+ LinkBuffer patchBuffer(*m_globalData, this);
+
+ // Translate vPC offsets into addresses in JIT generated code, for switch tables.
+ for (unsigned i = 0; i < m_switches.size(); ++i) {
+ SwitchRecord record = m_switches[i];
+ unsigned bytecodeOffset = record.bytecodeOffset;
+
+ if (record.type != SwitchRecord::String) {
+ ASSERT(record.type == SwitchRecord::Immediate || record.type == SwitchRecord::Character);
+ ASSERT(record.jumpTable.simpleJumpTable->branchOffsets.size() == record.jumpTable.simpleJumpTable->ctiOffsets.size());
+
+ record.jumpTable.simpleJumpTable->ctiDefault = patchBuffer.locationOf(m_labels[bytecodeOffset + record.defaultOffset]);
+
+ for (unsigned j = 0; j < record.jumpTable.simpleJumpTable->branchOffsets.size(); ++j) {
+ unsigned offset = record.jumpTable.simpleJumpTable->branchOffsets[j];
+ record.jumpTable.simpleJumpTable->ctiOffsets[j] = offset ? patchBuffer.locationOf(m_labels[bytecodeOffset + offset]) : record.jumpTable.simpleJumpTable->ctiDefault;
+ }
+ } else {
+ ASSERT(record.type == SwitchRecord::String);
+
+ record.jumpTable.stringJumpTable->ctiDefault = patchBuffer.locationOf(m_labels[bytecodeOffset + record.defaultOffset]);
+
+ StringJumpTable::StringOffsetTable::iterator end = record.jumpTable.stringJumpTable->offsetTable.end();
+ for (StringJumpTable::StringOffsetTable::iterator it = record.jumpTable.stringJumpTable->offsetTable.begin(); it != end; ++it) {
+ unsigned offset = it->second.branchOffset;
+ it->second.ctiOffset = offset ? patchBuffer.locationOf(m_labels[bytecodeOffset + offset]) : record.jumpTable.stringJumpTable->ctiDefault;
+ }
+ }
+ }
+
+ for (size_t i = 0; i < m_codeBlock->numberOfExceptionHandlers(); ++i) {
+ HandlerInfo& handler = m_codeBlock->exceptionHandler(i);
+ handler.nativeCode = patchBuffer.locationOf(m_labels[handler.target]);
+ }
+
+ for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
+ if (iter->to)
+ patchBuffer.link(iter->from, FunctionPtr(iter->to));
+ }
+
+ if (m_codeBlock->needsCallReturnIndices()) {
+ m_codeBlock->callReturnIndexVector().reserveCapacity(m_calls.size());
+ for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter)
+ m_codeBlock->callReturnIndexVector().append(CallReturnOffsetToBytecodeOffset(patchBuffer.returnAddressOffset(iter->from), iter->bytecodeOffset));
+ }
+
+ // Link absolute addresses for jsr
+ for (Vector<JSRInfo>::iterator iter = m_jsrSites.begin(); iter != m_jsrSites.end(); ++iter)
+ patchBuffer.patch(iter->storeLocation, patchBuffer.locationOf(iter->target).executableAddress());
+
+ m_codeBlock->setNumberOfStructureStubInfos(m_propertyAccessCompilationInfo.size());
+ for (unsigned i = 0; i < m_propertyAccessCompilationInfo.size(); ++i) {
+ StructureStubInfo& info = m_codeBlock->structureStubInfo(i);
+ ASSERT(m_propertyAccessCompilationInfo[i].bytecodeIndex != std::numeric_limits<unsigned>::max());
+ info.bytecodeIndex = m_propertyAccessCompilationInfo[i].bytecodeIndex;
+ info.callReturnLocation = patchBuffer.locationOf(m_propertyAccessCompilationInfo[i].callReturnLocation);
+ info.hotPathBegin = patchBuffer.locationOf(m_propertyAccessCompilationInfo[i].hotPathBegin);
+ }
+ m_codeBlock->setNumberOfCallLinkInfos(m_callStructureStubCompilationInfo.size());
+ for (unsigned i = 0; i < m_codeBlock->numberOfCallLinkInfos(); ++i) {
+ CallLinkInfo& info = m_codeBlock->callLinkInfo(i);
+ info.callType = m_callStructureStubCompilationInfo[i].callType;
+ info.bytecodeIndex = m_callStructureStubCompilationInfo[i].bytecodeIndex;
+ info.callReturnLocation = CodeLocationLabel(patchBuffer.locationOfNearCall(m_callStructureStubCompilationInfo[i].callReturnLocation));
+ info.hotPathBegin = patchBuffer.locationOf(m_callStructureStubCompilationInfo[i].hotPathBegin);
+ info.hotPathOther = patchBuffer.locationOfNearCall(m_callStructureStubCompilationInfo[i].hotPathOther);
+ }
+ unsigned methodCallCount = m_methodCallCompilationInfo.size();
+ m_codeBlock->addMethodCallLinkInfos(methodCallCount);
+ for (unsigned i = 0; i < methodCallCount; ++i) {
+ MethodCallLinkInfo& info = m_codeBlock->methodCallLinkInfo(i);
+ info.bytecodeIndex = m_methodCallCompilationInfo[i].bytecodeIndex;
+ info.cachedStructure.setLocation(patchBuffer.locationOf(m_methodCallCompilationInfo[i].structureToCompare));
+ info.callReturnLocation = m_codeBlock->structureStubInfo(m_methodCallCompilationInfo[i].propertyAccessIndex).callReturnLocation;
+ }
+
+#if ENABLE(DFG_JIT)
+ if (m_canBeOptimized) {
+ CompactJITCodeMap::Encoder jitCodeMapEncoder;
+ for (unsigned bytecodeOffset = 0; bytecodeOffset < m_labels.size(); ++bytecodeOffset) {
+ if (m_labels[bytecodeOffset].isSet())
+ jitCodeMapEncoder.append(bytecodeOffset, patchBuffer.offsetOf(m_labels[bytecodeOffset]));
+ }
+ m_codeBlock->setJITCodeMap(jitCodeMapEncoder.finish());
+ }
+#endif
+
+ if (m_codeBlock->codeType() == FunctionCode && functionEntryArityCheck)
+ *functionEntryArityCheck = patchBuffer.locationOf(arityCheck);
+
+ CodeRef result = patchBuffer.finalizeCode();
+
+#if ENABLE(JIT_VERBOSE)
+ printf("JIT generated code for %p at [%p, %p).\n", m_codeBlock, result.executableMemory()->start(), result.executableMemory()->end());
+#endif
+
+ return JITCode(result, JITCode::BaselineJIT);
+}
+
+void JIT::linkFor(JSFunction* callee, CodeBlock* callerCodeBlock, CodeBlock* calleeCodeBlock, JIT::CodePtr code, CallLinkInfo* callLinkInfo, JSGlobalData* globalData, CodeSpecializationKind kind)
+{
+ RepatchBuffer repatchBuffer(callerCodeBlock);
+
+ ASSERT(!callLinkInfo->isLinked());
+ callLinkInfo->callee.set(*globalData, callLinkInfo->hotPathBegin, callerCodeBlock->ownerExecutable(), callee);
+ callLinkInfo->lastSeenCallee.set(*globalData, callerCodeBlock->ownerExecutable(), callee);
+ repatchBuffer.relink(callLinkInfo->hotPathOther, code);
+
+ if (calleeCodeBlock)
+ calleeCodeBlock->linkIncomingCall(callLinkInfo);
+
+ // Patch the slow patch so we do not continue to try to link.
+ if (kind == CodeForCall) {
+ repatchBuffer.relink(CodeLocationNearCall(callLinkInfo->callReturnLocation), globalData->jitStubs->ctiVirtualCall());
+ return;
+ }
+
+ ASSERT(kind == CodeForConstruct);
+ repatchBuffer.relink(CodeLocationNearCall(callLinkInfo->callReturnLocation), globalData->jitStubs->ctiVirtualConstruct());
+}
+
+} // namespace JSC
+
+#endif // ENABLE(JIT)
diff --git a/Source/JavaScriptCore/jit/JIT.h b/Source/JavaScriptCore/jit/JIT.h
new file mode 100644
index 000000000..750b9d818
--- /dev/null
+++ b/Source/JavaScriptCore/jit/JIT.h
@@ -0,0 +1,1175 @@
+/*
+ * Copyright (C) 2008 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef JIT_h
+#define JIT_h
+
+#if ENABLE(JIT)
+
+// Verbose logging of code generation
+#define ENABLE_JIT_VERBOSE 0
+// Verbose logging for OSR-related code.
+#define ENABLE_JIT_VERBOSE_OSR 0
+
+// We've run into some problems where changing the size of the class JIT leads to
+// performance fluctuations. Try forcing alignment in an attempt to stabalize this.
+#if COMPILER(GCC)
+#define JIT_CLASS_ALIGNMENT __attribute__ ((aligned (32)))
+#else
+#define JIT_CLASS_ALIGNMENT
+#endif
+
+#define ASSERT_JIT_OFFSET_UNUSED(variable, actual, expected) ASSERT_WITH_MESSAGE_UNUSED(variable, actual == expected, "JIT Offset \"%s\" should be %d, not %d.\n", #expected, static_cast<int>(expected), static_cast<int>(actual));
+#define ASSERT_JIT_OFFSET(actual, expected) ASSERT_WITH_MESSAGE(actual == expected, "JIT Offset \"%s\" should be %d, not %d.\n", #expected, static_cast<int>(expected), static_cast<int>(actual));
+
+#include "CodeBlock.h"
+#include "CompactJITCodeMap.h"
+#include "Interpreter.h"
+#include "JSInterfaceJIT.h"
+#include "Opcode.h"
+#include "Profiler.h"
+#include <bytecode/SamplingTool.h>
+
+namespace JSC {
+
+ class CodeBlock;
+ class FunctionExecutable;
+ class JIT;
+ class JSPropertyNameIterator;
+ class Interpreter;
+ class Register;
+ class RegisterFile;
+ class ScopeChainNode;
+ class StructureChain;
+
+ struct CallLinkInfo;
+ struct Instruction;
+ struct OperandTypes;
+ struct PolymorphicAccessStructureList;
+ struct SimpleJumpTable;
+ struct StringJumpTable;
+ struct StructureStubInfo;
+
+ struct CallRecord {
+ MacroAssembler::Call from;
+ unsigned bytecodeOffset;
+ void* to;
+
+ CallRecord()
+ {
+ }
+
+ CallRecord(MacroAssembler::Call from, unsigned bytecodeOffset, void* to = 0)
+ : from(from)
+ , bytecodeOffset(bytecodeOffset)
+ , to(to)
+ {
+ }
+ };
+
+ struct JumpTable {
+ MacroAssembler::Jump from;
+ unsigned toBytecodeOffset;
+
+ JumpTable(MacroAssembler::Jump f, unsigned t)
+ : from(f)
+ , toBytecodeOffset(t)
+ {
+ }
+ };
+
+ struct SlowCaseEntry {
+ MacroAssembler::Jump from;
+ unsigned to;
+ unsigned hint;
+
+ SlowCaseEntry(MacroAssembler::Jump f, unsigned t, unsigned h = 0)
+ : from(f)
+ , to(t)
+ , hint(h)
+ {
+ }
+ };
+
+ struct SwitchRecord {
+ enum Type {
+ Immediate,
+ Character,
+ String
+ };
+
+ Type type;
+
+ union {
+ SimpleJumpTable* simpleJumpTable;
+ StringJumpTable* stringJumpTable;
+ } jumpTable;
+
+ unsigned bytecodeOffset;
+ unsigned defaultOffset;
+
+ SwitchRecord(SimpleJumpTable* jumpTable, unsigned bytecodeOffset, unsigned defaultOffset, Type type)
+ : type(type)
+ , bytecodeOffset(bytecodeOffset)
+ , defaultOffset(defaultOffset)
+ {
+ this->jumpTable.simpleJumpTable = jumpTable;
+ }
+
+ SwitchRecord(StringJumpTable* jumpTable, unsigned bytecodeOffset, unsigned defaultOffset)
+ : type(String)
+ , bytecodeOffset(bytecodeOffset)
+ , defaultOffset(defaultOffset)
+ {
+ this->jumpTable.stringJumpTable = jumpTable;
+ }
+ };
+
+ struct PropertyStubCompilationInfo {
+ unsigned bytecodeIndex;
+ MacroAssembler::Call callReturnLocation;
+ MacroAssembler::Label hotPathBegin;
+
+#if !ASSERT_DISABLED
+ PropertyStubCompilationInfo()
+ : bytecodeIndex(std::numeric_limits<unsigned>::max())
+ {
+ }
+#endif
+ };
+
+ struct StructureStubCompilationInfo {
+ MacroAssembler::DataLabelPtr hotPathBegin;
+ MacroAssembler::Call hotPathOther;
+ MacroAssembler::Call callReturnLocation;
+ CallLinkInfo::CallType callType;
+ unsigned bytecodeIndex;
+ };
+
+ struct MethodCallCompilationInfo {
+ MethodCallCompilationInfo(unsigned bytecodeIndex, unsigned propertyAccessIndex)
+ : bytecodeIndex(bytecodeIndex)
+ , propertyAccessIndex(propertyAccessIndex)
+ {
+ }
+
+ unsigned bytecodeIndex;
+ MacroAssembler::DataLabelPtr structureToCompare;
+ unsigned propertyAccessIndex;
+ };
+
+ // Near calls can only be patched to other JIT code, regular calls can be patched to JIT code or relinked to stub functions.
+ void ctiPatchNearCallByReturnAddress(CodeBlock* codeblock, ReturnAddressPtr returnAddress, MacroAssemblerCodePtr newCalleeFunction);
+ void ctiPatchCallByReturnAddress(CodeBlock* codeblock, ReturnAddressPtr returnAddress, MacroAssemblerCodePtr newCalleeFunction);
+ void ctiPatchCallByReturnAddress(CodeBlock* codeblock, ReturnAddressPtr returnAddress, FunctionPtr newCalleeFunction);
+
+ class JIT : private JSInterfaceJIT {
+ friend class JITStubCall;
+
+ using MacroAssembler::Jump;
+ using MacroAssembler::JumpList;
+ using MacroAssembler::Label;
+
+ static const int patchGetByIdDefaultStructure = -1;
+ static const int patchGetByIdDefaultOffset = 0;
+ // Magic number - initial offset cannot be representable as a signed 8bit value, or the X86Assembler
+ // will compress the displacement, and we may not be able to fit a patched offset.
+ static const int patchPutByIdDefaultOffset = 256;
+
+ public:
+ static JITCode compile(JSGlobalData* globalData, CodeBlock* codeBlock, CodePtr* functionEntryArityCheck = 0)
+ {
+ return JIT(globalData, codeBlock).privateCompile(functionEntryArityCheck);
+ }
+
+ static void compileGetByIdProto(JSGlobalData* globalData, CallFrame* callFrame, CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, Structure* prototypeStructure, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset, ReturnAddressPtr returnAddress)
+ {
+ JIT jit(globalData, codeBlock);
+ jit.privateCompileGetByIdProto(stubInfo, structure, prototypeStructure, ident, slot, cachedOffset, returnAddress, callFrame);
+ }
+
+ static void compileGetByIdSelfList(JSGlobalData* globalData, CodeBlock* codeBlock, StructureStubInfo* stubInfo, PolymorphicAccessStructureList* polymorphicStructures, int currentIndex, Structure* structure, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset)
+ {
+ JIT jit(globalData, codeBlock);
+ jit.privateCompileGetByIdSelfList(stubInfo, polymorphicStructures, currentIndex, structure, ident, slot, cachedOffset);
+ }
+ static void compileGetByIdProtoList(JSGlobalData* globalData, CallFrame* callFrame, CodeBlock* codeBlock, StructureStubInfo* stubInfo, PolymorphicAccessStructureList* prototypeStructureList, int currentIndex, Structure* structure, Structure* prototypeStructure, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset)
+ {
+ JIT jit(globalData, codeBlock);
+ jit.privateCompileGetByIdProtoList(stubInfo, prototypeStructureList, currentIndex, structure, prototypeStructure, ident, slot, cachedOffset, callFrame);
+ }
+ static void compileGetByIdChainList(JSGlobalData* globalData, CallFrame* callFrame, CodeBlock* codeBlock, StructureStubInfo* stubInfo, PolymorphicAccessStructureList* prototypeStructureList, int currentIndex, Structure* structure, StructureChain* chain, size_t count, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset)
+ {
+ JIT jit(globalData, codeBlock);
+ jit.privateCompileGetByIdChainList(stubInfo, prototypeStructureList, currentIndex, structure, chain, count, ident, slot, cachedOffset, callFrame);
+ }
+
+ static void compileGetByIdChain(JSGlobalData* globalData, CallFrame* callFrame, CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, StructureChain* chain, size_t count, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset, ReturnAddressPtr returnAddress)
+ {
+ JIT jit(globalData, codeBlock);
+ jit.privateCompileGetByIdChain(stubInfo, structure, chain, count, ident, slot, cachedOffset, returnAddress, callFrame);
+ }
+
+ static void compilePutByIdTransition(JSGlobalData* globalData, CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* oldStructure, Structure* newStructure, size_t cachedOffset, StructureChain* chain, ReturnAddressPtr returnAddress, bool direct)
+ {
+ JIT jit(globalData, codeBlock);
+ jit.privateCompilePutByIdTransition(stubInfo, oldStructure, newStructure, cachedOffset, chain, returnAddress, direct);
+ }
+
+ static PassRefPtr<ExecutableMemoryHandle> compileCTIMachineTrampolines(JSGlobalData* globalData, TrampolineStructure *trampolines)
+ {
+ if (!globalData->canUseJIT())
+ return 0;
+ JIT jit(globalData, 0);
+ return jit.privateCompileCTIMachineTrampolines(globalData, trampolines);
+ }
+
+ static CodeRef compileCTINativeCall(JSGlobalData* globalData, NativeFunction func)
+ {
+ if (!globalData->canUseJIT())
+ return CodeRef();
+ JIT jit(globalData, 0);
+ return jit.privateCompileCTINativeCall(globalData, func);
+ }
+
+ static void resetPatchGetById(RepatchBuffer&, StructureStubInfo*);
+ static void resetPatchPutById(RepatchBuffer&, StructureStubInfo*);
+ static void patchGetByIdSelf(CodeBlock* codeblock, StructureStubInfo*, Structure*, size_t cachedOffset, ReturnAddressPtr returnAddress);
+ static void patchPutByIdReplace(CodeBlock* codeblock, StructureStubInfo*, Structure*, size_t cachedOffset, ReturnAddressPtr returnAddress, bool direct);
+ static void patchMethodCallProto(JSGlobalData&, CodeBlock* codeblock, MethodCallLinkInfo&, JSObject*, Structure*, JSObject*, ReturnAddressPtr);
+
+ static void compilePatchGetArrayLength(JSGlobalData* globalData, CodeBlock* codeBlock, ReturnAddressPtr returnAddress)
+ {
+ JIT jit(globalData, codeBlock);
+ return jit.privateCompilePatchGetArrayLength(returnAddress);
+ }
+
+ static void linkFor(JSFunction* callee, CodeBlock* callerCodeBlock, CodeBlock* calleeCodeBlock, CodePtr, CallLinkInfo*, JSGlobalData*, CodeSpecializationKind);
+
+ private:
+ struct JSRInfo {
+ DataLabelPtr storeLocation;
+ Label target;
+
+ JSRInfo(DataLabelPtr storeLocation, Label targetLocation)
+ : storeLocation(storeLocation)
+ , target(targetLocation)
+ {
+ }
+ };
+
+ JIT(JSGlobalData*, CodeBlock* = 0);
+
+ void privateCompileMainPass();
+ void privateCompileLinkPass();
+ void privateCompileSlowCases();
+ JITCode privateCompile(CodePtr* functionEntryArityCheck);
+ void privateCompileGetByIdProto(StructureStubInfo*, Structure*, Structure* prototypeStructure, const Identifier&, const PropertySlot&, size_t cachedOffset, ReturnAddressPtr returnAddress, CallFrame* callFrame);
+ void privateCompileGetByIdSelfList(StructureStubInfo*, PolymorphicAccessStructureList*, int, Structure*, const Identifier&, const PropertySlot&, size_t cachedOffset);
+ void privateCompileGetByIdProtoList(StructureStubInfo*, PolymorphicAccessStructureList*, int, Structure*, Structure* prototypeStructure, const Identifier&, const PropertySlot&, size_t cachedOffset, CallFrame* callFrame);
+ void privateCompileGetByIdChainList(StructureStubInfo*, PolymorphicAccessStructureList*, int, Structure*, StructureChain* chain, size_t count, const Identifier&, const PropertySlot&, size_t cachedOffset, CallFrame* callFrame);
+ void privateCompileGetByIdChain(StructureStubInfo*, Structure*, StructureChain*, size_t count, const Identifier&, const PropertySlot&, size_t cachedOffset, ReturnAddressPtr returnAddress, CallFrame* callFrame);
+ void privateCompilePutByIdTransition(StructureStubInfo*, Structure*, Structure*, size_t cachedOffset, StructureChain*, ReturnAddressPtr returnAddress, bool direct);
+
+ PassRefPtr<ExecutableMemoryHandle> privateCompileCTIMachineTrampolines(JSGlobalData*, TrampolineStructure*);
+ Label privateCompileCTINativeCall(JSGlobalData*, bool isConstruct = false);
+ CodeRef privateCompileCTINativeCall(JSGlobalData*, NativeFunction);
+ void privateCompilePatchGetArrayLength(ReturnAddressPtr returnAddress);
+
+ static bool isDirectPutById(StructureStubInfo*);
+
+ void addSlowCase(Jump);
+ void addSlowCase(JumpList);
+ void addSlowCase();
+ void addJump(Jump, int);
+ void emitJumpSlowToHot(Jump, int);
+
+ void compileOpCall(OpcodeID, Instruction*, unsigned callLinkInfoIndex);
+ void compileOpCallSlowCase(OpcodeID, Instruction*, Vector<SlowCaseEntry>::iterator&, unsigned callLinkInfoIndex);
+ void compileLoadVarargs(Instruction*);
+ void compileCallEval();
+ void compileCallEvalSlowCase(Vector<SlowCaseEntry>::iterator&);
+
+ enum CompileOpStrictEqType { OpStrictEq, OpNStrictEq };
+ void compileOpStrictEq(Instruction* instruction, CompileOpStrictEqType type);
+ bool isOperandConstantImmediateDouble(unsigned src);
+
+ void emitLoadDouble(int index, FPRegisterID value);
+ void emitLoadInt32ToDouble(int index, FPRegisterID value);
+ Jump emitJumpIfNotObject(RegisterID structureReg);
+ Jump emitJumpIfNotType(RegisterID baseReg, RegisterID scratchReg, JSType);
+
+ void testPrototype(JSValue, JumpList& failureCases);
+
+ enum WriteBarrierMode { UnconditionalWriteBarrier, ShouldFilterImmediates };
+ // value register in write barrier is used before any scratch registers
+ // so may safely be the same as either of the scratch registers.
+ void emitWriteBarrier(RegisterID owner, RegisterID valueTag, RegisterID scratch, RegisterID scratch2, WriteBarrierMode, WriteBarrierUseKind);
+ void emitWriteBarrier(JSCell* owner, RegisterID value, RegisterID scratch, WriteBarrierMode, WriteBarrierUseKind);
+
+ template<typename ClassType, typename StructureType> void emitAllocateBasicJSObject(StructureType, RegisterID result, RegisterID storagePtr);
+ template<typename T> void emitAllocateJSFinalObject(T structure, RegisterID result, RegisterID storagePtr);
+ void emitAllocateJSFunction(FunctionExecutable*, RegisterID scopeChain, RegisterID result, RegisterID storagePtr);
+
+ enum ValueProfilingSiteKind { FirstProfilingSite, SubsequentProfilingSite };
+#if ENABLE(VALUE_PROFILER)
+ // This assumes that the value to profile is in regT0 and that regT3 is available for
+ // scratch.
+ void emitValueProfilingSite(ValueProfile*);
+ void emitValueProfilingSite(ValueProfilingSiteKind);
+#else
+ void emitValueProfilingSite(ValueProfilingSiteKind) { }
+#endif
+
+#if USE(JSVALUE32_64)
+ bool getOperandConstantImmediateInt(unsigned op1, unsigned op2, unsigned& op, int32_t& constant);
+
+ void emitLoadTag(int index, RegisterID tag);
+ void emitLoadPayload(int index, RegisterID payload);
+
+ void emitLoad(const JSValue& v, RegisterID tag, RegisterID payload);
+ void emitLoad(int index, RegisterID tag, RegisterID payload, RegisterID base = callFrameRegister);
+ void emitLoad2(int index1, RegisterID tag1, RegisterID payload1, int index2, RegisterID tag2, RegisterID payload2);
+
+ void emitStore(int index, RegisterID tag, RegisterID payload, RegisterID base = callFrameRegister);
+ void emitStore(int index, const JSValue constant, RegisterID base = callFrameRegister);
+ void emitStoreInt32(int index, RegisterID payload, bool indexIsInt32 = false);
+ void emitStoreInt32(int index, TrustedImm32 payload, bool indexIsInt32 = false);
+ void emitStoreAndMapInt32(int index, RegisterID tag, RegisterID payload, bool indexIsInt32, size_t opcodeLength);
+ void emitStoreCell(int index, RegisterID payload, bool indexIsCell = false);
+ void emitStoreBool(int index, RegisterID payload, bool indexIsBool = false);
+ void emitStoreDouble(int index, FPRegisterID value);
+
+ bool isLabeled(unsigned bytecodeOffset);
+ void map(unsigned bytecodeOffset, int virtualRegisterIndex, RegisterID tag, RegisterID payload);
+ void unmap(RegisterID);
+ void unmap();
+ bool isMapped(int virtualRegisterIndex);
+ bool getMappedPayload(int virtualRegisterIndex, RegisterID& payload);
+ bool getMappedTag(int virtualRegisterIndex, RegisterID& tag);
+
+ void emitJumpSlowCaseIfNotJSCell(int virtualRegisterIndex);
+ void emitJumpSlowCaseIfNotJSCell(int virtualRegisterIndex, RegisterID tag);
+
+ void compileGetByIdHotPath();
+ void compileGetByIdSlowCase(int resultVReg, int baseVReg, Identifier* ident, Vector<SlowCaseEntry>::iterator& iter, bool isMethodCheck = false);
+ void compileGetDirectOffset(RegisterID base, RegisterID resultTag, RegisterID resultPayload, size_t cachedOffset);
+ void compileGetDirectOffset(JSObject* base, RegisterID resultTag, RegisterID resultPayload, size_t cachedOffset);
+ void compileGetDirectOffset(RegisterID base, RegisterID resultTag, RegisterID resultPayload, RegisterID offset);
+ void compilePutDirectOffset(RegisterID base, RegisterID valueTag, RegisterID valuePayload, size_t cachedOffset);
+
+ // Arithmetic opcode helpers
+ void emitAdd32Constant(unsigned dst, unsigned op, int32_t constant, ResultType opType);
+ void emitSub32Constant(unsigned dst, unsigned op, int32_t constant, ResultType opType);
+ void emitBinaryDoubleOp(OpcodeID, unsigned dst, unsigned op1, unsigned op2, OperandTypes, JumpList& notInt32Op1, JumpList& notInt32Op2, bool op1IsInRegisters = true, bool op2IsInRegisters = true);
+
+#if CPU(X86)
+ // These architecture specific value are used to enable patching - see comment on op_put_by_id.
+ static const int patchOffsetPutByIdStructure = 7;
+ static const int patchOffsetPutByIdPropertyMapOffset1 = 22;
+ static const int patchOffsetPutByIdPropertyMapOffset2 = 28;
+ // These architecture specific value are used to enable patching - see comment on op_get_by_id.
+ static const int patchOffsetGetByIdStructure = 7;
+ static const int patchOffsetGetByIdBranchToSlowCase = 13;
+ static const int patchOffsetGetByIdPropertyMapOffset1 = 19;
+ static const int patchOffsetGetByIdPropertyMapOffset2 = 22;
+ static const int patchOffsetGetByIdPutResult = 22;
+#if ENABLE(OPCODE_SAMPLING)
+ static const int patchOffsetGetByIdSlowCaseCall = 37;
+#else
+ static const int patchOffsetGetByIdSlowCaseCall = 33;
+#endif
+ static const int patchOffsetOpCallCompareToJump = 6;
+
+ static const int patchOffsetMethodCheckProtoObj = 11;
+ static const int patchOffsetMethodCheckProtoStruct = 18;
+ static const int patchOffsetMethodCheckPutFunction = 29;
+#elif CPU(ARM_TRADITIONAL)
+ // These architecture specific value are used to enable patching - see comment on op_put_by_id.
+ static const int patchOffsetPutByIdStructure = 4;
+ static const int patchOffsetPutByIdPropertyMapOffset1 = 20;
+ static const int patchOffsetPutByIdPropertyMapOffset2 = 28;
+ // These architecture specific value are used to enable patching - see comment on op_get_by_id.
+ static const int patchOffsetGetByIdStructure = 4;
+ static const int patchOffsetGetByIdBranchToSlowCase = 16;
+ static const int patchOffsetGetByIdPropertyMapOffset1 = 20;
+ static const int patchOffsetGetByIdPropertyMapOffset2 = 28;
+ static const int patchOffsetGetByIdPutResult = 36;
+#if ENABLE(OPCODE_SAMPLING)
+ #error "OPCODE_SAMPLING is not yet supported"
+#else
+ static const int patchOffsetGetByIdSlowCaseCall = 40;
+#endif
+ static const int patchOffsetOpCallCompareToJump = 12;
+
+ static const int patchOffsetMethodCheckProtoObj = 12;
+ static const int patchOffsetMethodCheckProtoStruct = 20;
+ static const int patchOffsetMethodCheckPutFunction = 32;
+
+ // sequenceOpCall
+ static const int sequenceOpCallInstructionSpace = 12;
+ static const int sequenceOpCallConstantSpace = 2;
+ // sequenceMethodCheck
+ static const int sequenceMethodCheckInstructionSpace = 40;
+ static const int sequenceMethodCheckConstantSpace = 6;
+ // sequenceGetByIdHotPath
+ static const int sequenceGetByIdHotPathInstructionSpace = 36;
+ static const int sequenceGetByIdHotPathConstantSpace = 4;
+ // sequenceGetByIdSlowCase
+ static const int sequenceGetByIdSlowCaseInstructionSpace = 56;
+ static const int sequenceGetByIdSlowCaseConstantSpace = 3;
+ // sequencePutById
+ static const int sequencePutByIdInstructionSpace = 36;
+ static const int sequencePutByIdConstantSpace = 4;
+#elif CPU(ARM_THUMB2)
+ // These architecture specific value are used to enable patching - see comment on op_put_by_id.
+ static const int patchOffsetPutByIdStructure = 10;
+ static const int patchOffsetPutByIdPropertyMapOffset1 = 36;
+ static const int patchOffsetPutByIdPropertyMapOffset2 = 48;
+ // These architecture specific value are used to enable patching - see comment on op_get_by_id.
+ static const int patchOffsetGetByIdStructure = 10;
+ static const int patchOffsetGetByIdBranchToSlowCase = 26;
+ static const int patchOffsetGetByIdPropertyMapOffset1 = 28;
+ static const int patchOffsetGetByIdPropertyMapOffset2 = 30;
+ static const int patchOffsetGetByIdPutResult = 32;
+#if ENABLE(OPCODE_SAMPLING)
+ #error "OPCODE_SAMPLING is not yet supported"
+#else
+ static const int patchOffsetGetByIdSlowCaseCall = 40;
+#endif
+ static const int patchOffsetOpCallCompareToJump = 16;
+
+ static const int patchOffsetMethodCheckProtoObj = 24;
+ static const int patchOffsetMethodCheckProtoStruct = 34;
+ static const int patchOffsetMethodCheckPutFunction = 58;
+
+ // sequenceOpCall
+ static const int sequenceOpCallInstructionSpace = 12;
+ static const int sequenceOpCallConstantSpace = 2;
+ // sequenceMethodCheck
+ static const int sequenceMethodCheckInstructionSpace = 40;
+ static const int sequenceMethodCheckConstantSpace = 6;
+ // sequenceGetByIdHotPath
+ static const int sequenceGetByIdHotPathInstructionSpace = 36;
+ static const int sequenceGetByIdHotPathConstantSpace = 4;
+ // sequenceGetByIdSlowCase
+ static const int sequenceGetByIdSlowCaseInstructionSpace = 40;
+ static const int sequenceGetByIdSlowCaseConstantSpace = 2;
+ // sequencePutById
+ static const int sequencePutByIdInstructionSpace = 36;
+ static const int sequencePutByIdConstantSpace = 4;
+#elif CPU(MIPS)
+#if WTF_MIPS_ISA(1)
+ static const int patchOffsetPutByIdStructure = 16;
+ static const int patchOffsetPutByIdPropertyMapOffset1 = 56;
+ static const int patchOffsetPutByIdPropertyMapOffset2 = 72;
+ static const int patchOffsetGetByIdStructure = 16;
+ static const int patchOffsetGetByIdBranchToSlowCase = 48;
+ static const int patchOffsetGetByIdPropertyMapOffset1 = 56;
+ static const int patchOffsetGetByIdPropertyMapOffset2 = 76;
+ static const int patchOffsetGetByIdPutResult = 96;
+#if ENABLE(OPCODE_SAMPLING)
+ #error "OPCODE_SAMPLING is not yet supported"
+#else
+ static const int patchOffsetGetByIdSlowCaseCall = 56;
+#endif
+ static const int patchOffsetOpCallCompareToJump = 32;
+ static const int patchOffsetMethodCheckProtoObj = 32;
+ static const int patchOffsetMethodCheckProtoStruct = 56;
+ static const int patchOffsetMethodCheckPutFunction = 88;
+#else // WTF_MIPS_ISA(1)
+ static const int patchOffsetPutByIdStructure = 12;
+ static const int patchOffsetPutByIdPropertyMapOffset1 = 48;
+ static const int patchOffsetPutByIdPropertyMapOffset2 = 64;
+ static const int patchOffsetGetByIdStructure = 12;
+ static const int patchOffsetGetByIdBranchToSlowCase = 44;
+ static const int patchOffsetGetByIdPropertyMapOffset1 = 48;
+ static const int patchOffsetGetByIdPropertyMapOffset2 = 64;
+ static const int patchOffsetGetByIdPutResult = 80;
+#if ENABLE(OPCODE_SAMPLING)
+ #error "OPCODE_SAMPLING is not yet supported"
+#else
+ static const int patchOffsetGetByIdSlowCaseCall = 56;
+#endif
+ static const int patchOffsetOpCallCompareToJump = 32;
+ static const int patchOffsetMethodCheckProtoObj = 32;
+ static const int patchOffsetMethodCheckProtoStruct = 52;
+ static const int patchOffsetMethodCheckPutFunction = 84;
+#endif
+#elif CPU(SH4)
+ // These architecture specific value are used to enable patching - see comment on op_put_by_id.
+ static const int patchOffsetGetByIdStructure = 6;
+ static const int patchOffsetPutByIdPropertyMapOffset = 24;
+ static const int patchOffsetPutByIdStructure = 6;
+ // These architecture specific value are used to enable patching - see comment on op_get_by_id.
+ static const int patchOffsetGetByIdBranchToSlowCase = 10;
+ static const int patchOffsetGetByIdPropertyMapOffset = 24;
+ static const int patchOffsetGetByIdPutResult = 24;
+
+ // sequenceOpCall
+ static const int sequenceOpCallInstructionSpace = 12;
+ static const int sequenceOpCallConstantSpace = 2;
+ // sequenceMethodCheck
+ static const int sequenceMethodCheckInstructionSpace = 40;
+ static const int sequenceMethodCheckConstantSpace = 6;
+ // sequenceGetByIdHotPath
+ static const int sequenceGetByIdHotPathInstructionSpace = 36;
+ static const int sequenceGetByIdHotPathConstantSpace = 5;
+ // sequenceGetByIdSlowCase
+ static const int sequenceGetByIdSlowCaseInstructionSpace = 30;
+ static const int sequenceGetByIdSlowCaseConstantSpace = 3;
+ // sequencePutById
+ static const int sequencePutByIdInstructionSpace = 36;
+ static const int sequencePutByIdConstantSpace = 5;
+
+ static const int patchOffsetGetByIdPropertyMapOffset1 = 20;
+ static const int patchOffsetGetByIdPropertyMapOffset2 = 22;
+
+ static const int patchOffsetPutByIdPropertyMapOffset1 = 20;
+ static const int patchOffsetPutByIdPropertyMapOffset2 = 26;
+
+#if ENABLE(OPCODE_SAMPLING)
+ static const int patchOffsetGetByIdSlowCaseCall = 0; // FIMXE
+#else
+ static const int patchOffsetGetByIdSlowCaseCall = 26;
+#endif
+ static const int patchOffsetOpCallCompareToJump = 4;
+
+ static const int patchOffsetMethodCheckProtoObj = 12;
+ static const int patchOffsetMethodCheckProtoStruct = 20;
+ static const int patchOffsetMethodCheckPutFunction = 32;
+#else
+#error "JSVALUE32_64 not supported on this platform."
+#endif
+
+#else // USE(JSVALUE32_64)
+ void emitGetVirtualRegister(int src, RegisterID dst);
+ void emitGetVirtualRegisters(int src1, RegisterID dst1, int src2, RegisterID dst2);
+ void emitPutVirtualRegister(unsigned dst, RegisterID from = regT0);
+ void emitStoreCell(unsigned dst, RegisterID payload, bool /* only used in JSValue32_64 */ = false)
+ {
+ emitPutVirtualRegister(dst, payload);
+ }
+
+ int32_t getConstantOperandImmediateInt(unsigned src);
+
+ void killLastResultRegister();
+
+ Jump emitJumpIfJSCell(RegisterID);
+ Jump emitJumpIfBothJSCells(RegisterID, RegisterID, RegisterID);
+ void emitJumpSlowCaseIfJSCell(RegisterID);
+ Jump emitJumpIfNotJSCell(RegisterID);
+ void emitJumpSlowCaseIfNotJSCell(RegisterID);
+ void emitJumpSlowCaseIfNotJSCell(RegisterID, int VReg);
+#if USE(JSVALUE32_64)
+ JIT::Jump emitJumpIfImmediateNumber(RegisterID reg)
+ {
+ return emitJumpIfImmediateInteger(reg);
+ }
+
+ JIT::Jump emitJumpIfNotImmediateNumber(RegisterID reg)
+ {
+ return emitJumpIfNotImmediateInteger(reg);
+ }
+#endif
+ Jump emitJumpIfImmediateInteger(RegisterID);
+ Jump emitJumpIfNotImmediateInteger(RegisterID);
+ Jump emitJumpIfNotImmediateIntegers(RegisterID, RegisterID, RegisterID);
+ void emitJumpSlowCaseIfNotImmediateInteger(RegisterID);
+ void emitJumpSlowCaseIfNotImmediateNumber(RegisterID);
+ void emitJumpSlowCaseIfNotImmediateIntegers(RegisterID, RegisterID, RegisterID);
+
+#if USE(JSVALUE32_64)
+ void emitFastArithDeTagImmediate(RegisterID);
+ Jump emitFastArithDeTagImmediateJumpIfZero(RegisterID);
+#endif
+ void emitFastArithReTagImmediate(RegisterID src, RegisterID dest);
+ void emitFastArithIntToImmNoCheck(RegisterID src, RegisterID dest);
+
+ void emitTagAsBoolImmediate(RegisterID reg);
+ void compileBinaryArithOp(OpcodeID, unsigned dst, unsigned src1, unsigned src2, OperandTypes opi);
+#if USE(JSVALUE64)
+ void compileBinaryArithOpSlowCase(OpcodeID, Vector<SlowCaseEntry>::iterator&, unsigned dst, unsigned src1, unsigned src2, OperandTypes, bool op1HasImmediateIntFastCase, bool op2HasImmediateIntFastCase);
+#else
+ void compileBinaryArithOpSlowCase(OpcodeID, Vector<SlowCaseEntry>::iterator&, unsigned dst, unsigned src1, unsigned src2, OperandTypes);
+#endif
+
+ void compileGetByIdHotPath(int baseVReg, Identifier*);
+ void compileGetByIdSlowCase(int resultVReg, int baseVReg, Identifier* ident, Vector<SlowCaseEntry>::iterator& iter, bool isMethodCheck = false);
+ void compileGetDirectOffset(RegisterID base, RegisterID result, size_t cachedOffset);
+ void compileGetDirectOffset(JSObject* base, RegisterID result, size_t cachedOffset);
+ void compileGetDirectOffset(RegisterID base, RegisterID result, RegisterID offset, RegisterID scratch);
+ void compilePutDirectOffset(RegisterID base, RegisterID value, size_t cachedOffset);
+
+#if CPU(X86_64)
+ // These architecture specific value are used to enable patching - see comment on op_put_by_id.
+ static const int patchOffsetPutByIdStructure = 10;
+ static const int patchOffsetPutByIdPropertyMapOffset = 31;
+ // These architecture specific value are used to enable patching - see comment on op_get_by_id.
+ static const int patchOffsetGetByIdStructure = 10;
+ static const int patchOffsetGetByIdBranchToSlowCase = 20;
+ static const int patchOffsetGetByIdPropertyMapOffset = 28;
+ static const int patchOffsetGetByIdPutResult = 28;
+#if ENABLE(OPCODE_SAMPLING)
+ static const int patchOffsetGetByIdSlowCaseCall = 64;
+#else
+ static const int patchOffsetGetByIdSlowCaseCall = 54;
+#endif
+ static const int patchOffsetOpCallCompareToJump = 9;
+
+ static const int patchOffsetMethodCheckProtoObj = 20;
+ static const int patchOffsetMethodCheckProtoStruct = 30;
+ static const int patchOffsetMethodCheckPutFunction = 50;
+#elif CPU(X86)
+ // These architecture specific value are used to enable patching - see comment on op_put_by_id.
+ static const int patchOffsetPutByIdStructure = 7;
+ static const int patchOffsetPutByIdPropertyMapOffset = 22;
+ // These architecture specific value are used to enable patching - see comment on op_get_by_id.
+ static const int patchOffsetGetByIdStructure = 7;
+ static const int patchOffsetGetByIdBranchToSlowCase = 13;
+ static const int patchOffsetGetByIdPropertyMapOffset = 22;
+ static const int patchOffsetGetByIdPutResult = 22;
+#if ENABLE(OPCODE_SAMPLING)
+ static const int patchOffsetGetByIdSlowCaseCall = 33;
+#else
+ static const int patchOffsetGetByIdSlowCaseCall = 23;
+#endif
+ static const int patchOffsetOpCallCompareToJump = 6;
+
+ static const int patchOffsetMethodCheckProtoObj = 11;
+ static const int patchOffsetMethodCheckProtoStruct = 18;
+ static const int patchOffsetMethodCheckPutFunction = 29;
+#elif CPU(ARM_THUMB2)
+ // These architecture specific value are used to enable patching - see comment on op_put_by_id.
+ static const int patchOffsetPutByIdStructure = 10;
+ static const int patchOffsetPutByIdPropertyMapOffset = 46;
+ // These architecture specific value are used to enable patching - see comment on op_get_by_id.
+ static const int patchOffsetGetByIdStructure = 10;
+ static const int patchOffsetGetByIdBranchToSlowCase = 26;
+ static const int patchOffsetGetByIdPropertyMapOffset = 46;
+ static const int patchOffsetGetByIdPutResult = 50;
+#if ENABLE(OPCODE_SAMPLING)
+ static const int patchOffsetGetByIdSlowCaseCall = 0; // FIMXE
+#else
+ static const int patchOffsetGetByIdSlowCaseCall = 28;
+#endif
+ static const int patchOffsetOpCallCompareToJump = 16;
+
+ static const int patchOffsetMethodCheckProtoObj = 24;
+ static const int patchOffsetMethodCheckProtoStruct = 34;
+ static const int patchOffsetMethodCheckPutFunction = 58;
+#elif CPU(ARM_TRADITIONAL)
+ // These architecture specific value are used to enable patching - see comment on op_put_by_id.
+ static const int patchOffsetPutByIdStructure = 4;
+ static const int patchOffsetPutByIdPropertyMapOffset = 20;
+ // These architecture specific value are used to enable patching - see comment on op_get_by_id.
+ static const int patchOffsetGetByIdStructure = 4;
+ static const int patchOffsetGetByIdBranchToSlowCase = 16;
+ static const int patchOffsetGetByIdPropertyMapOffset = 20;
+ static const int patchOffsetGetByIdPutResult = 28;
+#if ENABLE(OPCODE_SAMPLING)
+ #error "OPCODE_SAMPLING is not yet supported"
+#else
+ static const int patchOffsetGetByIdSlowCaseCall = 28;
+#endif
+ static const int patchOffsetOpCallCompareToJump = 12;
+
+ static const int patchOffsetMethodCheckProtoObj = 12;
+ static const int patchOffsetMethodCheckProtoStruct = 20;
+ static const int patchOffsetMethodCheckPutFunction = 32;
+
+ // sequenceOpCall
+ static const int sequenceOpCallInstructionSpace = 12;
+ static const int sequenceOpCallConstantSpace = 2;
+ // sequenceMethodCheck
+ static const int sequenceMethodCheckInstructionSpace = 40;
+ static const int sequenceMethodCheckConstantSpace = 6;
+ // sequenceGetByIdHotPath
+ static const int sequenceGetByIdHotPathInstructionSpace = 28;
+ static const int sequenceGetByIdHotPathConstantSpace = 3;
+ // sequenceGetByIdSlowCase
+ static const int sequenceGetByIdSlowCaseInstructionSpace = 32;
+ static const int sequenceGetByIdSlowCaseConstantSpace = 2;
+ // sequencePutById
+ static const int sequencePutByIdInstructionSpace = 28;
+ static const int sequencePutByIdConstantSpace = 3;
+#elif CPU(MIPS)
+#if WTF_MIPS_ISA(1)
+ static const int patchOffsetPutByIdStructure = 16;
+ static const int patchOffsetPutByIdPropertyMapOffset = 68;
+ static const int patchOffsetGetByIdStructure = 16;
+ static const int patchOffsetGetByIdBranchToSlowCase = 48;
+ static const int patchOffsetGetByIdPropertyMapOffset = 68;
+ static const int patchOffsetGetByIdPutResult = 88;
+#if ENABLE(OPCODE_SAMPLING)
+ #error "OPCODE_SAMPLING is not yet supported"
+#else
+ static const int patchOffsetGetByIdSlowCaseCall = 40;
+#endif
+ static const int patchOffsetOpCallCompareToJump = 32;
+ static const int patchOffsetMethodCheckProtoObj = 32;
+ static const int patchOffsetMethodCheckProtoStruct = 56;
+ static const int patchOffsetMethodCheckPutFunction = 88;
+#else // WTF_MIPS_ISA(1)
+ static const int patchOffsetPutByIdStructure = 12;
+ static const int patchOffsetPutByIdPropertyMapOffset = 60;
+ static const int patchOffsetGetByIdStructure = 12;
+ static const int patchOffsetGetByIdBranchToSlowCase = 44;
+ static const int patchOffsetGetByIdPropertyMapOffset = 60;
+ static const int patchOffsetGetByIdPutResult = 76;
+#if ENABLE(OPCODE_SAMPLING)
+ #error "OPCODE_SAMPLING is not yet supported"
+#else
+ static const int patchOffsetGetByIdSlowCaseCall = 40;
+#endif
+ static const int patchOffsetOpCallCompareToJump = 32;
+ static const int patchOffsetMethodCheckProtoObj = 32;
+ static const int patchOffsetMethodCheckProtoStruct = 52;
+ static const int patchOffsetMethodCheckPutFunction = 84;
+#endif
+#endif
+#endif // USE(JSVALUE32_64)
+
+#if (defined(ASSEMBLER_HAS_CONSTANT_POOL) && ASSEMBLER_HAS_CONSTANT_POOL)
+#define BEGIN_UNINTERRUPTED_SEQUENCE(name) do { beginUninterruptedSequence(name ## InstructionSpace, name ## ConstantSpace); } while (false)
+#define END_UNINTERRUPTED_SEQUENCE_FOR_PUT(name, dst) do { endUninterruptedSequence(name ## InstructionSpace, name ## ConstantSpace, dst); } while (false)
+#define END_UNINTERRUPTED_SEQUENCE(name) END_UNINTERRUPTED_SEQUENCE_FOR_PUT(name, 0)
+
+ void beginUninterruptedSequence(int, int);
+ void endUninterruptedSequence(int, int, int);
+
+#else
+#define BEGIN_UNINTERRUPTED_SEQUENCE(name) do { beginUninterruptedSequence(); } while (false)
+#define END_UNINTERRUPTED_SEQUENCE(name) do { endUninterruptedSequence(); } while (false)
+#define END_UNINTERRUPTED_SEQUENCE_FOR_PUT(name, dst) do { endUninterruptedSequence(); } while (false)
+#endif
+
+ void emit_compareAndJump(OpcodeID, unsigned op1, unsigned op2, unsigned target, RelationalCondition);
+ void emit_compareAndJumpSlow(unsigned op1, unsigned op2, unsigned target, DoubleCondition, int (JIT_STUB *stub)(STUB_ARGS_DECLARATION), bool invert, Vector<SlowCaseEntry>::iterator&);
+
+ void emit_op_add(Instruction*);
+ void emit_op_bitand(Instruction*);
+ void emit_op_bitnot(Instruction*);
+ void emit_op_bitor(Instruction*);
+ void emit_op_bitxor(Instruction*);
+ void emit_op_call(Instruction*);
+ void emit_op_call_eval(Instruction*);
+ void emit_op_call_varargs(Instruction*);
+ void emit_op_call_put_result(Instruction*);
+ void emit_op_catch(Instruction*);
+ void emit_op_construct(Instruction*);
+ void emit_op_get_callee(Instruction*);
+ void emit_op_create_this(Instruction*);
+ void emit_op_convert_this(Instruction*);
+ void emit_op_create_arguments(Instruction*);
+ void emit_op_debug(Instruction*);
+ void emit_op_del_by_id(Instruction*);
+ void emit_op_div(Instruction*);
+ void emit_op_end(Instruction*);
+ void emit_op_enter(Instruction*);
+ void emit_op_create_activation(Instruction*);
+ void emit_op_eq(Instruction*);
+ void emit_op_eq_null(Instruction*);
+ void emit_op_get_by_id(Instruction*);
+ void emit_op_get_arguments_length(Instruction*);
+ void emit_op_get_by_val(Instruction*);
+ void emit_op_get_argument_by_val(Instruction*);
+ void emit_op_get_by_pname(Instruction*);
+ void emit_op_get_global_var(Instruction*);
+ void emit_op_get_scoped_var(Instruction*);
+ void emit_op_init_lazy_reg(Instruction*);
+ void emit_op_check_has_instance(Instruction*);
+ void emit_op_instanceof(Instruction*);
+ void emit_op_jeq_null(Instruction*);
+ void emit_op_jfalse(Instruction*);
+ void emit_op_jmp(Instruction*);
+ void emit_op_jmp_scopes(Instruction*);
+ void emit_op_jneq_null(Instruction*);
+ void emit_op_jneq_ptr(Instruction*);
+ void emit_op_jless(Instruction*);
+ void emit_op_jlesseq(Instruction*);
+ void emit_op_jgreater(Instruction*);
+ void emit_op_jgreatereq(Instruction*);
+ void emit_op_jnless(Instruction*);
+ void emit_op_jnlesseq(Instruction*);
+ void emit_op_jngreater(Instruction*);
+ void emit_op_jngreatereq(Instruction*);
+ void emit_op_jsr(Instruction*);
+ void emit_op_jtrue(Instruction*);
+ void emit_op_loop(Instruction*);
+ void emit_op_loop_hint(Instruction*);
+ void emit_op_loop_if_less(Instruction*);
+ void emit_op_loop_if_lesseq(Instruction*);
+ void emit_op_loop_if_greater(Instruction*);
+ void emit_op_loop_if_greatereq(Instruction*);
+ void emit_op_loop_if_true(Instruction*);
+ void emit_op_loop_if_false(Instruction*);
+ void emit_op_lshift(Instruction*);
+ void emit_op_method_check(Instruction*);
+ void emit_op_mod(Instruction*);
+ void emit_op_mov(Instruction*);
+ void emit_op_mul(Instruction*);
+ void emit_op_negate(Instruction*);
+ void emit_op_neq(Instruction*);
+ void emit_op_neq_null(Instruction*);
+ void emit_op_new_array(Instruction*);
+ void emit_op_new_array_buffer(Instruction*);
+ void emit_op_new_func(Instruction*);
+ void emit_op_new_func_exp(Instruction*);
+ void emit_op_new_object(Instruction*);
+ void emit_op_new_regexp(Instruction*);
+ void emit_op_get_pnames(Instruction*);
+ void emit_op_next_pname(Instruction*);
+ void emit_op_not(Instruction*);
+ void emit_op_nstricteq(Instruction*);
+ void emit_op_pop_scope(Instruction*);
+ void emit_op_post_dec(Instruction*);
+ void emit_op_post_inc(Instruction*);
+ void emit_op_pre_dec(Instruction*);
+ void emit_op_pre_inc(Instruction*);
+ void emit_op_profile_did_call(Instruction*);
+ void emit_op_profile_will_call(Instruction*);
+ void emit_op_push_new_scope(Instruction*);
+ void emit_op_push_scope(Instruction*);
+ void emit_op_put_by_id(Instruction*);
+ void emit_op_put_by_index(Instruction*);
+ void emit_op_put_by_val(Instruction*);
+ void emit_op_put_getter(Instruction*);
+ void emit_op_put_global_var(Instruction*);
+ void emit_op_put_scoped_var(Instruction*);
+ void emit_op_put_setter(Instruction*);
+ void emit_op_resolve(Instruction*);
+ void emit_op_resolve_base(Instruction*);
+ void emit_op_ensure_property_exists(Instruction*);
+ void emit_op_resolve_global(Instruction*, bool dynamic = false);
+ void emit_op_resolve_global_dynamic(Instruction*);
+ void emit_op_resolve_skip(Instruction*);
+ void emit_op_resolve_with_base(Instruction*);
+ void emit_op_resolve_with_this(Instruction*);
+ void emit_op_ret(Instruction*);
+ void emit_op_ret_object_or_this(Instruction*);
+ void emit_op_rshift(Instruction*);
+ void emit_op_sret(Instruction*);
+ void emit_op_strcat(Instruction*);
+ void emit_op_stricteq(Instruction*);
+ void emit_op_sub(Instruction*);
+ void emit_op_switch_char(Instruction*);
+ void emit_op_switch_imm(Instruction*);
+ void emit_op_switch_string(Instruction*);
+ void emit_op_tear_off_activation(Instruction*);
+ void emit_op_tear_off_arguments(Instruction*);
+ void emit_op_throw(Instruction*);
+ void emit_op_throw_reference_error(Instruction*);
+ void emit_op_to_jsnumber(Instruction*);
+ void emit_op_to_primitive(Instruction*);
+ void emit_op_unexpected_load(Instruction*);
+ void emit_op_urshift(Instruction*);
+#if ENABLE(JIT_USE_SOFT_MODULO)
+ void softModulo();
+#endif
+
+ void emitSlow_op_add(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_bitand(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_bitnot(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_bitor(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_bitxor(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_call(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_call_eval(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_call_varargs(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_construct(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_convert_this(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_create_this(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_div(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_eq(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_get_by_id(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_get_arguments_length(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_get_by_val(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_get_argument_by_val(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_get_by_pname(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_check_has_instance(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_instanceof(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_jfalse(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_jless(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_jlesseq(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_jgreater(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_jgreatereq(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_jnless(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_jnlesseq(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_jngreater(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_jngreatereq(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_jtrue(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_loop_if_less(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_loop_if_lesseq(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_loop_if_greater(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_loop_if_greatereq(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_loop_if_true(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_loop_if_false(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_lshift(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_method_check(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_mod(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_mul(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_negate(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_neq(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_new_object(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_not(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_nstricteq(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_post_dec(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_post_inc(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_pre_dec(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_pre_inc(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_put_by_id(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_put_by_val(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_resolve_global(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_resolve_global_dynamic(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_rshift(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_stricteq(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_sub(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_to_jsnumber(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_to_primitive(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_urshift(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_new_func(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_new_func_exp(Instruction*, Vector<SlowCaseEntry>::iterator&);
+
+
+ void emitRightShift(Instruction*, bool isUnsigned);
+ void emitRightShiftSlowCase(Instruction*, Vector<SlowCaseEntry>::iterator&, bool isUnsigned);
+
+ /* This function is deprecated. */
+ void emitGetJITStubArg(unsigned argumentNumber, RegisterID dst);
+
+ void emitInitRegister(unsigned dst);
+
+ void emitPutToCallFrameHeader(RegisterID from, RegisterFile::CallFrameHeaderEntry entry);
+ void emitPutCellToCallFrameHeader(RegisterID from, RegisterFile::CallFrameHeaderEntry);
+ void emitPutIntToCallFrameHeader(RegisterID from, RegisterFile::CallFrameHeaderEntry);
+ void emitPutImmediateToCallFrameHeader(void* value, RegisterFile::CallFrameHeaderEntry entry);
+ void emitGetFromCallFrameHeaderPtr(RegisterFile::CallFrameHeaderEntry entry, RegisterID to, RegisterID from = callFrameRegister);
+ void emitGetFromCallFrameHeader32(RegisterFile::CallFrameHeaderEntry entry, RegisterID to, RegisterID from = callFrameRegister);
+
+ JSValue getConstantOperand(unsigned src);
+ bool isOperandConstantImmediateInt(unsigned src);
+ bool isOperandConstantImmediateChar(unsigned src);
+
+ bool atJumpTarget();
+
+ Jump getSlowCase(Vector<SlowCaseEntry>::iterator& iter)
+ {
+ return iter++->from;
+ }
+ void linkSlowCase(Vector<SlowCaseEntry>::iterator& iter)
+ {
+ iter->from.link(this);
+ ++iter;
+ }
+ void linkDummySlowCase(Vector<SlowCaseEntry>::iterator& iter)
+ {
+ ASSERT(!iter->from.isSet());
+ ++iter;
+ }
+ void linkSlowCaseIfNotJSCell(Vector<SlowCaseEntry>::iterator&, int virtualRegisterIndex);
+
+ Jump checkStructure(RegisterID reg, Structure* structure);
+
+ void restoreArgumentReference();
+ void restoreArgumentReferenceForTrampoline();
+ void updateTopCallFrame();
+
+ Call emitNakedCall(CodePtr function = CodePtr());
+
+ void preserveReturnAddressAfterCall(RegisterID);
+ void restoreReturnAddressBeforeReturn(RegisterID);
+ void restoreReturnAddressBeforeReturn(Address);
+
+ // Loads the character value of a single character string into dst.
+ void emitLoadCharacterString(RegisterID src, RegisterID dst, JumpList& failures);
+
+ enum OptimizationCheckKind { LoopOptimizationCheck, RetOptimizationCheck };
+#if ENABLE(DFG_JIT)
+ void emitOptimizationCheck(OptimizationCheckKind);
+#else
+ void emitOptimizationCheck(OptimizationCheckKind) { }
+#endif
+
+ void emitTimeoutCheck();
+#ifndef NDEBUG
+ void printBytecodeOperandTypes(unsigned src1, unsigned src2);
+#endif
+
+#if ENABLE(SAMPLING_FLAGS)
+ void setSamplingFlag(int32_t);
+ void clearSamplingFlag(int32_t);
+#endif
+
+#if ENABLE(SAMPLING_COUNTERS)
+ void emitCount(AbstractSamplingCounter&, int32_t = 1);
+#endif
+
+#if ENABLE(OPCODE_SAMPLING)
+ void sampleInstruction(Instruction*, bool = false);
+#endif
+
+#if ENABLE(CODEBLOCK_SAMPLING)
+ void sampleCodeBlock(CodeBlock*);
+#else
+ void sampleCodeBlock(CodeBlock*) {}
+#endif
+
+#if ENABLE(DFG_JIT)
+ bool canBeOptimized() { return m_canBeOptimized; }
+ bool shouldEmitProfiling() { return m_canBeOptimized; }
+#else
+ bool canBeOptimized() { return false; }
+ // Enables use of value profiler with tiered compilation turned off,
+ // in which case all code gets profiled.
+ bool shouldEmitProfiling() { return true; }
+#endif
+
+ Interpreter* m_interpreter;
+ JSGlobalData* m_globalData;
+ CodeBlock* m_codeBlock;
+
+ Vector<CallRecord> m_calls;
+ Vector<Label> m_labels;
+ Vector<PropertyStubCompilationInfo> m_propertyAccessCompilationInfo;
+ Vector<StructureStubCompilationInfo> m_callStructureStubCompilationInfo;
+ Vector<MethodCallCompilationInfo> m_methodCallCompilationInfo;
+ Vector<JumpTable> m_jmpTable;
+
+ unsigned m_bytecodeOffset;
+ Vector<JSRInfo> m_jsrSites;
+ Vector<SlowCaseEntry> m_slowCases;
+ Vector<SwitchRecord> m_switches;
+
+ unsigned m_propertyAccessInstructionIndex;
+ unsigned m_globalResolveInfoIndex;
+ unsigned m_callLinkInfoIndex;
+
+#if USE(JSVALUE32_64)
+ unsigned m_jumpTargetIndex;
+ unsigned m_mappedBytecodeOffset;
+ int m_mappedVirtualRegisterIndex;
+ RegisterID m_mappedTag;
+ RegisterID m_mappedPayload;
+#else
+ int m_lastResultBytecodeRegister;
+#endif
+ unsigned m_jumpTargetsPosition;
+
+#ifndef NDEBUG
+#if defined(ASSEMBLER_HAS_CONSTANT_POOL) && ASSEMBLER_HAS_CONSTANT_POOL
+ Label m_uninterruptedInstructionSequenceBegin;
+ int m_uninterruptedConstantSequenceBegin;
+#endif
+#endif
+ WeakRandom m_randomGenerator;
+ static CodeRef stringGetByValStubGenerator(JSGlobalData*);
+
+#if ENABLE(VALUE_PROFILER)
+ bool m_canBeOptimized;
+#endif
+ } JIT_CLASS_ALIGNMENT;
+
+ inline void JIT::emit_op_loop(Instruction* currentInstruction)
+ {
+ emitTimeoutCheck();
+ emit_op_jmp(currentInstruction);
+ }
+
+ inline void JIT::emit_op_loop_hint(Instruction*)
+ {
+ emitOptimizationCheck(LoopOptimizationCheck);
+ }
+
+ inline void JIT::emit_op_loop_if_true(Instruction* currentInstruction)
+ {
+ emitTimeoutCheck();
+ emit_op_jtrue(currentInstruction);
+ }
+
+ inline void JIT::emitSlow_op_loop_if_true(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+ {
+ emitSlow_op_jtrue(currentInstruction, iter);
+ }
+
+ inline void JIT::emit_op_loop_if_false(Instruction* currentInstruction)
+ {
+ emitTimeoutCheck();
+ emit_op_jfalse(currentInstruction);
+ }
+
+ inline void JIT::emitSlow_op_loop_if_false(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+ {
+ emitSlow_op_jfalse(currentInstruction, iter);
+ }
+
+ inline void JIT::emit_op_loop_if_less(Instruction* currentInstruction)
+ {
+ emitTimeoutCheck();
+ emit_op_jless(currentInstruction);
+ }
+
+ inline void JIT::emitSlow_op_loop_if_less(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+ {
+ emitSlow_op_jless(currentInstruction, iter);
+ }
+
+ inline void JIT::emit_op_loop_if_lesseq(Instruction* currentInstruction)
+ {
+ emitTimeoutCheck();
+ emit_op_jlesseq(currentInstruction);
+ }
+
+ inline void JIT::emitSlow_op_loop_if_lesseq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+ {
+ emitSlow_op_jlesseq(currentInstruction, iter);
+ }
+
+ inline void JIT::emit_op_loop_if_greater(Instruction* currentInstruction)
+ {
+ emitTimeoutCheck();
+ emit_op_jgreater(currentInstruction);
+ }
+
+ inline void JIT::emitSlow_op_loop_if_greater(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+ {
+ emitSlow_op_jgreater(currentInstruction, iter);
+ }
+
+ inline void JIT::emit_op_loop_if_greatereq(Instruction* currentInstruction)
+ {
+ emitTimeoutCheck();
+ emit_op_jgreatereq(currentInstruction);
+ }
+
+ inline void JIT::emitSlow_op_loop_if_greatereq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+ {
+ emitSlow_op_jgreatereq(currentInstruction, iter);
+ }
+
+} // namespace JSC
+
+#endif // ENABLE(JIT)
+
+#endif // JIT_h
diff --git a/Source/JavaScriptCore/jit/JITArithmetic.cpp b/Source/JavaScriptCore/jit/JITArithmetic.cpp
new file mode 100644
index 000000000..362cc6241
--- /dev/null
+++ b/Source/JavaScriptCore/jit/JITArithmetic.cpp
@@ -0,0 +1,1139 @@
+/*
+ * Copyright (C) 2008 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+
+#if ENABLE(JIT)
+#include "JIT.h"
+
+#include "CodeBlock.h"
+#include "JITInlineMethods.h"
+#include "JITStubCall.h"
+#include "JITStubs.h"
+#include "JSArray.h"
+#include "JSFunction.h"
+#include "Interpreter.h"
+#include "ResultType.h"
+#include "SamplingTool.h"
+
+#ifndef NDEBUG
+#include <stdio.h>
+#endif
+
+using namespace std;
+
+namespace JSC {
+
+void JIT::emit_op_jless(Instruction* currentInstruction)
+{
+ unsigned op1 = currentInstruction[1].u.operand;
+ unsigned op2 = currentInstruction[2].u.operand;
+ unsigned target = currentInstruction[3].u.operand;
+
+ emit_compareAndJump(op_jless, op1, op2, target, LessThan);
+}
+
+void JIT::emit_op_jlesseq(Instruction* currentInstruction)
+{
+ unsigned op1 = currentInstruction[1].u.operand;
+ unsigned op2 = currentInstruction[2].u.operand;
+ unsigned target = currentInstruction[3].u.operand;
+
+ emit_compareAndJump(op_jlesseq, op1, op2, target, LessThanOrEqual);
+}
+
+void JIT::emit_op_jgreater(Instruction* currentInstruction)
+{
+ unsigned op1 = currentInstruction[1].u.operand;
+ unsigned op2 = currentInstruction[2].u.operand;
+ unsigned target = currentInstruction[3].u.operand;
+
+ emit_compareAndJump(op_jgreater, op1, op2, target, GreaterThan);
+}
+
+void JIT::emit_op_jgreatereq(Instruction* currentInstruction)
+{
+ unsigned op1 = currentInstruction[1].u.operand;
+ unsigned op2 = currentInstruction[2].u.operand;
+ unsigned target = currentInstruction[3].u.operand;
+
+ emit_compareAndJump(op_jgreatereq, op1, op2, target, GreaterThanOrEqual);
+}
+
+void JIT::emit_op_jnless(Instruction* currentInstruction)
+{
+ unsigned op1 = currentInstruction[1].u.operand;
+ unsigned op2 = currentInstruction[2].u.operand;
+ unsigned target = currentInstruction[3].u.operand;
+
+ emit_compareAndJump(op_jnless, op1, op2, target, GreaterThanOrEqual);
+}
+
+void JIT::emit_op_jnlesseq(Instruction* currentInstruction)
+{
+ unsigned op1 = currentInstruction[1].u.operand;
+ unsigned op2 = currentInstruction[2].u.operand;
+ unsigned target = currentInstruction[3].u.operand;
+
+ emit_compareAndJump(op_jnlesseq, op1, op2, target, GreaterThan);
+}
+
+void JIT::emit_op_jngreater(Instruction* currentInstruction)
+{
+ unsigned op1 = currentInstruction[1].u.operand;
+ unsigned op2 = currentInstruction[2].u.operand;
+ unsigned target = currentInstruction[3].u.operand;
+
+ emit_compareAndJump(op_jngreater, op1, op2, target, LessThanOrEqual);
+}
+
+void JIT::emit_op_jngreatereq(Instruction* currentInstruction)
+{
+ unsigned op1 = currentInstruction[1].u.operand;
+ unsigned op2 = currentInstruction[2].u.operand;
+ unsigned target = currentInstruction[3].u.operand;
+
+ emit_compareAndJump(op_jngreatereq, op1, op2, target, LessThan);
+}
+
+void JIT::emitSlow_op_jless(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned op1 = currentInstruction[1].u.operand;
+ unsigned op2 = currentInstruction[2].u.operand;
+ unsigned target = currentInstruction[3].u.operand;
+
+ emit_compareAndJumpSlow(op1, op2, target, DoubleLessThan, cti_op_jless, false, iter);
+}
+
+void JIT::emitSlow_op_jlesseq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned op1 = currentInstruction[1].u.operand;
+ unsigned op2 = currentInstruction[2].u.operand;
+ unsigned target = currentInstruction[3].u.operand;
+
+ emit_compareAndJumpSlow(op1, op2, target, DoubleLessThanOrEqual, cti_op_jlesseq, false, iter);
+}
+
+void JIT::emitSlow_op_jgreater(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned op1 = currentInstruction[1].u.operand;
+ unsigned op2 = currentInstruction[2].u.operand;
+ unsigned target = currentInstruction[3].u.operand;
+
+ emit_compareAndJumpSlow(op1, op2, target, DoubleGreaterThan, cti_op_jgreater, false, iter);
+}
+
+void JIT::emitSlow_op_jgreatereq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned op1 = currentInstruction[1].u.operand;
+ unsigned op2 = currentInstruction[2].u.operand;
+ unsigned target = currentInstruction[3].u.operand;
+
+ emit_compareAndJumpSlow(op1, op2, target, DoubleGreaterThanOrEqual, cti_op_jgreatereq, false, iter);
+}
+
+void JIT::emitSlow_op_jnless(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned op1 = currentInstruction[1].u.operand;
+ unsigned op2 = currentInstruction[2].u.operand;
+ unsigned target = currentInstruction[3].u.operand;
+
+ emit_compareAndJumpSlow(op1, op2, target, DoubleGreaterThanOrEqualOrUnordered, cti_op_jless, true, iter);
+}
+
+void JIT::emitSlow_op_jnlesseq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned op1 = currentInstruction[1].u.operand;
+ unsigned op2 = currentInstruction[2].u.operand;
+ unsigned target = currentInstruction[3].u.operand;
+
+ emit_compareAndJumpSlow(op1, op2, target, DoubleGreaterThanOrUnordered, cti_op_jlesseq, true, iter);
+}
+
+void JIT::emitSlow_op_jngreater(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned op1 = currentInstruction[1].u.operand;
+ unsigned op2 = currentInstruction[2].u.operand;
+ unsigned target = currentInstruction[3].u.operand;
+
+ emit_compareAndJumpSlow(op1, op2, target, DoubleLessThanOrEqualOrUnordered, cti_op_jgreater, true, iter);
+}
+
+void JIT::emitSlow_op_jngreatereq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned op1 = currentInstruction[1].u.operand;
+ unsigned op2 = currentInstruction[2].u.operand;
+ unsigned target = currentInstruction[3].u.operand;
+
+ emit_compareAndJumpSlow(op1, op2, target, DoubleLessThanOrUnordered, cti_op_jgreatereq, true, iter);
+}
+
+#if USE(JSVALUE64)
+
+void JIT::emit_op_lshift(Instruction* currentInstruction)
+{
+ unsigned result = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+
+ emitGetVirtualRegisters(op1, regT0, op2, regT2);
+ // FIXME: would we be better using 'emitJumpSlowCaseIfNotImmediateIntegers'? - we *probably* ought to be consistent.
+ emitJumpSlowCaseIfNotImmediateInteger(regT0);
+ emitJumpSlowCaseIfNotImmediateInteger(regT2);
+ emitFastArithImmToInt(regT0);
+ emitFastArithImmToInt(regT2);
+ lshift32(regT2, regT0);
+ emitFastArithReTagImmediate(regT0, regT0);
+ emitPutVirtualRegister(result);
+}
+
+void JIT::emitSlow_op_lshift(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned result = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+
+ UNUSED_PARAM(op1);
+ UNUSED_PARAM(op2);
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+ JITStubCall stubCall(this, cti_op_lshift);
+ stubCall.addArgument(regT0);
+ stubCall.addArgument(regT2);
+ stubCall.call(result);
+}
+
+void JIT::emit_op_rshift(Instruction* currentInstruction)
+{
+ unsigned result = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+
+ if (isOperandConstantImmediateInt(op2)) {
+ // isOperandConstantImmediateInt(op2) => 1 SlowCase
+ emitGetVirtualRegister(op1, regT0);
+ emitJumpSlowCaseIfNotImmediateInteger(regT0);
+ // Mask with 0x1f as per ecma-262 11.7.2 step 7.
+ rshift32(Imm32(getConstantOperandImmediateInt(op2) & 0x1f), regT0);
+ } else {
+ emitGetVirtualRegisters(op1, regT0, op2, regT2);
+ if (supportsFloatingPointTruncate()) {
+ Jump lhsIsInt = emitJumpIfImmediateInteger(regT0);
+ // supportsFloatingPoint() && USE(JSVALUE64) => 3 SlowCases
+ addSlowCase(emitJumpIfNotImmediateNumber(regT0));
+ addPtr(tagTypeNumberRegister, regT0);
+ movePtrToDouble(regT0, fpRegT0);
+ addSlowCase(branchTruncateDoubleToInt32(fpRegT0, regT0));
+ lhsIsInt.link(this);
+ emitJumpSlowCaseIfNotImmediateInteger(regT2);
+ } else {
+ // !supportsFloatingPoint() => 2 SlowCases
+ emitJumpSlowCaseIfNotImmediateInteger(regT0);
+ emitJumpSlowCaseIfNotImmediateInteger(regT2);
+ }
+ emitFastArithImmToInt(regT2);
+ rshift32(regT2, regT0);
+ }
+ emitFastArithIntToImmNoCheck(regT0, regT0);
+ emitPutVirtualRegister(result);
+}
+
+void JIT::emitSlow_op_rshift(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned result = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+
+ JITStubCall stubCall(this, cti_op_rshift);
+
+ if (isOperandConstantImmediateInt(op2)) {
+ linkSlowCase(iter);
+ stubCall.addArgument(regT0);
+ stubCall.addArgument(op2, regT2);
+ } else {
+ if (supportsFloatingPointTruncate()) {
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+ // We're reloading op1 to regT0 as we can no longer guarantee that
+ // we have not munged the operand. It may have already been shifted
+ // correctly, but it still will not have been tagged.
+ stubCall.addArgument(op1, regT0);
+ stubCall.addArgument(regT2);
+ } else {
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+ stubCall.addArgument(regT0);
+ stubCall.addArgument(regT2);
+ }
+ }
+
+ stubCall.call(result);
+}
+
+void JIT::emit_op_urshift(Instruction* currentInstruction)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+
+ // Slow case of urshift makes assumptions about what registers hold the
+ // shift arguments, so any changes must be updated there as well.
+ if (isOperandConstantImmediateInt(op2)) {
+ emitGetVirtualRegister(op1, regT0);
+ emitJumpSlowCaseIfNotImmediateInteger(regT0);
+ emitFastArithImmToInt(regT0);
+ int shift = getConstantOperand(op2).asInt32();
+ if (shift)
+ urshift32(Imm32(shift & 0x1f), regT0);
+ // unsigned shift < 0 or shift = k*2^32 may result in (essentially)
+ // a toUint conversion, which can result in a value we can represent
+ // as an immediate int.
+ if (shift < 0 || !(shift & 31))
+ addSlowCase(branch32(LessThan, regT0, TrustedImm32(0)));
+ emitFastArithReTagImmediate(regT0, regT0);
+ emitPutVirtualRegister(dst, regT0);
+ return;
+ }
+ emitGetVirtualRegisters(op1, regT0, op2, regT1);
+ if (!isOperandConstantImmediateInt(op1))
+ emitJumpSlowCaseIfNotImmediateInteger(regT0);
+ emitJumpSlowCaseIfNotImmediateInteger(regT1);
+ emitFastArithImmToInt(regT0);
+ emitFastArithImmToInt(regT1);
+ urshift32(regT1, regT0);
+ addSlowCase(branch32(LessThan, regT0, TrustedImm32(0)));
+ emitFastArithReTagImmediate(regT0, regT0);
+ emitPutVirtualRegister(dst, regT0);
+}
+
+void JIT::emitSlow_op_urshift(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+ if (isOperandConstantImmediateInt(op2)) {
+ int shift = getConstantOperand(op2).asInt32();
+ // op1 = regT0
+ linkSlowCase(iter); // int32 check
+ if (supportsFloatingPointTruncate()) {
+ JumpList failures;
+ failures.append(emitJumpIfNotImmediateNumber(regT0)); // op1 is not a double
+ addPtr(tagTypeNumberRegister, regT0);
+ movePtrToDouble(regT0, fpRegT0);
+ failures.append(branchTruncateDoubleToInt32(fpRegT0, regT0));
+ if (shift)
+ urshift32(Imm32(shift & 0x1f), regT0);
+ if (shift < 0 || !(shift & 31))
+ failures.append(branch32(LessThan, regT0, TrustedImm32(0)));
+ emitFastArithReTagImmediate(regT0, regT0);
+ emitPutVirtualRegister(dst, regT0);
+ emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_rshift));
+ failures.link(this);
+ }
+ if (shift < 0 || !(shift & 31))
+ linkSlowCase(iter); // failed to box in hot path
+ } else {
+ // op1 = regT0
+ // op2 = regT1
+ if (!isOperandConstantImmediateInt(op1)) {
+ linkSlowCase(iter); // int32 check -- op1 is not an int
+ if (supportsFloatingPointTruncate()) {
+ JumpList failures;
+ failures.append(emitJumpIfNotImmediateNumber(regT0)); // op1 is not a double
+ addPtr(tagTypeNumberRegister, regT0);
+ movePtrToDouble(regT0, fpRegT0);
+ failures.append(branchTruncateDoubleToInt32(fpRegT0, regT0));
+ failures.append(emitJumpIfNotImmediateInteger(regT1)); // op2 is not an int
+ emitFastArithImmToInt(regT1);
+ urshift32(regT1, regT0);
+ failures.append(branch32(LessThan, regT0, TrustedImm32(0)));
+ emitFastArithReTagImmediate(regT0, regT0);
+ emitPutVirtualRegister(dst, regT0);
+ emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_rshift));
+ failures.link(this);
+ }
+ }
+
+ linkSlowCase(iter); // int32 check - op2 is not an int
+ linkSlowCase(iter); // Can't represent unsigned result as an immediate
+ }
+
+ JITStubCall stubCall(this, cti_op_urshift);
+ stubCall.addArgument(op1, regT0);
+ stubCall.addArgument(op2, regT1);
+ stubCall.call(dst);
+}
+
+void JIT::emit_compareAndJump(OpcodeID, unsigned op1, unsigned op2, unsigned target, RelationalCondition condition)
+{
+ // We generate inline code for the following cases in the fast path:
+ // - int immediate to constant int immediate
+ // - constant int immediate to int immediate
+ // - int immediate to int immediate
+
+ if (isOperandConstantImmediateChar(op1)) {
+ emitGetVirtualRegister(op2, regT0);
+ addSlowCase(emitJumpIfNotJSCell(regT0));
+ JumpList failures;
+ emitLoadCharacterString(regT0, regT0, failures);
+ addSlowCase(failures);
+ addJump(branch32(commute(condition), regT0, Imm32(asString(getConstantOperand(op1))->tryGetValue()[0])), target);
+ return;
+ }
+ if (isOperandConstantImmediateChar(op2)) {
+ emitGetVirtualRegister(op1, regT0);
+ addSlowCase(emitJumpIfNotJSCell(regT0));
+ JumpList failures;
+ emitLoadCharacterString(regT0, regT0, failures);
+ addSlowCase(failures);
+ addJump(branch32(condition, regT0, Imm32(asString(getConstantOperand(op2))->tryGetValue()[0])), target);
+ return;
+ }
+ if (isOperandConstantImmediateInt(op2)) {
+ emitGetVirtualRegister(op1, regT0);
+ emitJumpSlowCaseIfNotImmediateInteger(regT0);
+ int32_t op2imm = getConstantOperandImmediateInt(op2);
+ addJump(branch32(condition, regT0, Imm32(op2imm)), target);
+ } else if (isOperandConstantImmediateInt(op1)) {
+ emitGetVirtualRegister(op2, regT1);
+ emitJumpSlowCaseIfNotImmediateInteger(regT1);
+ int32_t op1imm = getConstantOperandImmediateInt(op1);
+ addJump(branch32(commute(condition), regT1, Imm32(op1imm)), target);
+ } else {
+ emitGetVirtualRegisters(op1, regT0, op2, regT1);
+ emitJumpSlowCaseIfNotImmediateInteger(regT0);
+ emitJumpSlowCaseIfNotImmediateInteger(regT1);
+
+ addJump(branch32(condition, regT0, regT1), target);
+ }
+}
+
+void JIT::emit_compareAndJumpSlow(unsigned op1, unsigned op2, unsigned target, DoubleCondition condition, int (JIT_STUB *stub)(STUB_ARGS_DECLARATION), bool invert, Vector<SlowCaseEntry>::iterator& iter)
+{
+ COMPILE_ASSERT(OPCODE_LENGTH(op_jless) == OPCODE_LENGTH(op_jlesseq), OPCODE_LENGTH_op_jlesseq_equals_op_jless);
+ COMPILE_ASSERT(OPCODE_LENGTH(op_jless) == OPCODE_LENGTH(op_jnless), OPCODE_LENGTH_op_jnless_equals_op_jless);
+ COMPILE_ASSERT(OPCODE_LENGTH(op_jless) == OPCODE_LENGTH(op_jnlesseq), OPCODE_LENGTH_op_jnlesseq_equals_op_jless);
+ COMPILE_ASSERT(OPCODE_LENGTH(op_jless) == OPCODE_LENGTH(op_jgreater), OPCODE_LENGTH_op_jgreater_equals_op_jless);
+ COMPILE_ASSERT(OPCODE_LENGTH(op_jless) == OPCODE_LENGTH(op_jgreatereq), OPCODE_LENGTH_op_jgreatereq_equals_op_jless);
+ COMPILE_ASSERT(OPCODE_LENGTH(op_jless) == OPCODE_LENGTH(op_jngreater), OPCODE_LENGTH_op_jngreater_equals_op_jless);
+ COMPILE_ASSERT(OPCODE_LENGTH(op_jless) == OPCODE_LENGTH(op_jngreatereq), OPCODE_LENGTH_op_jngreatereq_equals_op_jless);
+
+ // We generate inline code for the following cases in the slow path:
+ // - floating-point number to constant int immediate
+ // - constant int immediate to floating-point number
+ // - floating-point number to floating-point number.
+ if (isOperandConstantImmediateChar(op1) || isOperandConstantImmediateChar(op2)) {
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+ JITStubCall stubCall(this, stub);
+ stubCall.addArgument(op1, regT0);
+ stubCall.addArgument(op2, regT1);
+ stubCall.call();
+ emitJumpSlowToHot(branchTest32(invert ? Zero : NonZero, regT0), target);
+ return;
+ }
+
+ if (isOperandConstantImmediateInt(op2)) {
+ linkSlowCase(iter);
+
+ if (supportsFloatingPoint()) {
+ Jump fail1 = emitJumpIfNotImmediateNumber(regT0);
+ addPtr(tagTypeNumberRegister, regT0);
+ movePtrToDouble(regT0, fpRegT0);
+
+ int32_t op2imm = getConstantOperand(op2).asInt32();
+
+ move(Imm32(op2imm), regT1);
+ convertInt32ToDouble(regT1, fpRegT1);
+
+ emitJumpSlowToHot(branchDouble(condition, fpRegT0, fpRegT1), target);
+
+ emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_jless));
+
+ fail1.link(this);
+ }
+
+ JITStubCall stubCall(this, stub);
+ stubCall.addArgument(regT0);
+ stubCall.addArgument(op2, regT2);
+ stubCall.call();
+ emitJumpSlowToHot(branchTest32(invert ? Zero : NonZero, regT0), target);
+
+ } else if (isOperandConstantImmediateInt(op1)) {
+ linkSlowCase(iter);
+
+ if (supportsFloatingPoint()) {
+ Jump fail1 = emitJumpIfNotImmediateNumber(regT1);
+ addPtr(tagTypeNumberRegister, regT1);
+ movePtrToDouble(regT1, fpRegT1);
+
+ int32_t op1imm = getConstantOperand(op1).asInt32();
+
+ move(Imm32(op1imm), regT0);
+ convertInt32ToDouble(regT0, fpRegT0);
+
+ emitJumpSlowToHot(branchDouble(condition, fpRegT0, fpRegT1), target);
+
+ emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_jless));
+
+ fail1.link(this);
+ }
+
+ JITStubCall stubCall(this, stub);
+ stubCall.addArgument(op1, regT2);
+ stubCall.addArgument(regT1);
+ stubCall.call();
+ emitJumpSlowToHot(branchTest32(invert ? Zero : NonZero, regT0), target);
+ } else {
+ linkSlowCase(iter);
+
+ if (supportsFloatingPoint()) {
+ Jump fail1 = emitJumpIfNotImmediateNumber(regT0);
+ Jump fail2 = emitJumpIfNotImmediateNumber(regT1);
+ Jump fail3 = emitJumpIfImmediateInteger(regT1);
+ addPtr(tagTypeNumberRegister, regT0);
+ addPtr(tagTypeNumberRegister, regT1);
+ movePtrToDouble(regT0, fpRegT0);
+ movePtrToDouble(regT1, fpRegT1);
+
+ emitJumpSlowToHot(branchDouble(condition, fpRegT0, fpRegT1), target);
+
+ emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_jless));
+
+ fail1.link(this);
+ fail2.link(this);
+ fail3.link(this);
+ }
+
+ linkSlowCase(iter);
+ JITStubCall stubCall(this, stub);
+ stubCall.addArgument(regT0);
+ stubCall.addArgument(regT1);
+ stubCall.call();
+ emitJumpSlowToHot(branchTest32(invert ? Zero : NonZero, regT0), target);
+ }
+}
+
+void JIT::emit_op_bitand(Instruction* currentInstruction)
+{
+ unsigned result = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+
+ if (isOperandConstantImmediateInt(op1)) {
+ emitGetVirtualRegister(op2, regT0);
+ emitJumpSlowCaseIfNotImmediateInteger(regT0);
+ int32_t imm = getConstantOperandImmediateInt(op1);
+ andPtr(Imm32(imm), regT0);
+ if (imm >= 0)
+ emitFastArithIntToImmNoCheck(regT0, regT0);
+ } else if (isOperandConstantImmediateInt(op2)) {
+ emitGetVirtualRegister(op1, regT0);
+ emitJumpSlowCaseIfNotImmediateInteger(regT0);
+ int32_t imm = getConstantOperandImmediateInt(op2);
+ andPtr(Imm32(imm), regT0);
+ if (imm >= 0)
+ emitFastArithIntToImmNoCheck(regT0, regT0);
+ } else {
+ emitGetVirtualRegisters(op1, regT0, op2, regT1);
+ andPtr(regT1, regT0);
+ emitJumpSlowCaseIfNotImmediateInteger(regT0);
+ }
+ emitPutVirtualRegister(result);
+}
+
+void JIT::emitSlow_op_bitand(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned result = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+
+ linkSlowCase(iter);
+ if (isOperandConstantImmediateInt(op1)) {
+ JITStubCall stubCall(this, cti_op_bitand);
+ stubCall.addArgument(op1, regT2);
+ stubCall.addArgument(regT0);
+ stubCall.call(result);
+ } else if (isOperandConstantImmediateInt(op2)) {
+ JITStubCall stubCall(this, cti_op_bitand);
+ stubCall.addArgument(regT0);
+ stubCall.addArgument(op2, regT2);
+ stubCall.call(result);
+ } else {
+ JITStubCall stubCall(this, cti_op_bitand);
+ stubCall.addArgument(op1, regT2);
+ stubCall.addArgument(regT1);
+ stubCall.call(result);
+ }
+}
+
+void JIT::emit_op_post_inc(Instruction* currentInstruction)
+{
+ unsigned result = currentInstruction[1].u.operand;
+ unsigned srcDst = currentInstruction[2].u.operand;
+
+ emitGetVirtualRegister(srcDst, regT0);
+ move(regT0, regT1);
+ emitJumpSlowCaseIfNotImmediateInteger(regT0);
+ addSlowCase(branchAdd32(Overflow, TrustedImm32(1), regT1));
+ emitFastArithIntToImmNoCheck(regT1, regT1);
+ emitPutVirtualRegister(srcDst, regT1);
+ emitPutVirtualRegister(result);
+}
+
+void JIT::emitSlow_op_post_inc(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned result = currentInstruction[1].u.operand;
+ unsigned srcDst = currentInstruction[2].u.operand;
+
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+ JITStubCall stubCall(this, cti_op_post_inc);
+ stubCall.addArgument(regT0);
+ stubCall.addArgument(Imm32(srcDst));
+ stubCall.call(result);
+}
+
+void JIT::emit_op_post_dec(Instruction* currentInstruction)
+{
+ unsigned result = currentInstruction[1].u.operand;
+ unsigned srcDst = currentInstruction[2].u.operand;
+
+ emitGetVirtualRegister(srcDst, regT0);
+ move(regT0, regT1);
+ emitJumpSlowCaseIfNotImmediateInteger(regT0);
+ addSlowCase(branchSub32(Overflow, TrustedImm32(1), regT1));
+ emitFastArithIntToImmNoCheck(regT1, regT1);
+ emitPutVirtualRegister(srcDst, regT1);
+ emitPutVirtualRegister(result);
+}
+
+void JIT::emitSlow_op_post_dec(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned result = currentInstruction[1].u.operand;
+ unsigned srcDst = currentInstruction[2].u.operand;
+
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+ JITStubCall stubCall(this, cti_op_post_dec);
+ stubCall.addArgument(regT0);
+ stubCall.addArgument(Imm32(srcDst));
+ stubCall.call(result);
+}
+
+void JIT::emit_op_pre_inc(Instruction* currentInstruction)
+{
+ unsigned srcDst = currentInstruction[1].u.operand;
+
+ emitGetVirtualRegister(srcDst, regT0);
+ emitJumpSlowCaseIfNotImmediateInteger(regT0);
+ addSlowCase(branchAdd32(Overflow, TrustedImm32(1), regT0));
+ emitFastArithIntToImmNoCheck(regT0, regT0);
+ emitPutVirtualRegister(srcDst);
+}
+
+void JIT::emitSlow_op_pre_inc(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned srcDst = currentInstruction[1].u.operand;
+
+ Jump notImm = getSlowCase(iter);
+ linkSlowCase(iter);
+ emitGetVirtualRegister(srcDst, regT0);
+ notImm.link(this);
+ JITStubCall stubCall(this, cti_op_pre_inc);
+ stubCall.addArgument(regT0);
+ stubCall.call(srcDst);
+}
+
+void JIT::emit_op_pre_dec(Instruction* currentInstruction)
+{
+ unsigned srcDst = currentInstruction[1].u.operand;
+
+ emitGetVirtualRegister(srcDst, regT0);
+ emitJumpSlowCaseIfNotImmediateInteger(regT0);
+ addSlowCase(branchSub32(Overflow, TrustedImm32(1), regT0));
+ emitFastArithIntToImmNoCheck(regT0, regT0);
+ emitPutVirtualRegister(srcDst);
+}
+
+void JIT::emitSlow_op_pre_dec(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned srcDst = currentInstruction[1].u.operand;
+
+ Jump notImm = getSlowCase(iter);
+ linkSlowCase(iter);
+ emitGetVirtualRegister(srcDst, regT0);
+ notImm.link(this);
+ JITStubCall stubCall(this, cti_op_pre_dec);
+ stubCall.addArgument(regT0);
+ stubCall.call(srcDst);
+}
+
+/* ------------------------------ BEGIN: OP_MOD ------------------------------ */
+
+#if CPU(X86) || CPU(X86_64) || CPU(MIPS)
+
+void JIT::emit_op_mod(Instruction* currentInstruction)
+{
+ unsigned result = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+
+#if CPU(X86) || CPU(X86_64)
+ // Make sure registers are correct for x86 IDIV instructions.
+ ASSERT(regT0 == X86Registers::eax);
+ ASSERT(regT1 == X86Registers::edx);
+ ASSERT(regT2 == X86Registers::ecx);
+#endif
+
+ emitGetVirtualRegisters(op1, regT0, op2, regT2);
+ emitJumpSlowCaseIfNotImmediateInteger(regT0);
+ emitJumpSlowCaseIfNotImmediateInteger(regT2);
+
+ addSlowCase(branchPtr(Equal, regT2, TrustedImmPtr(JSValue::encode(jsNumber(0)))));
+ m_assembler.cdq();
+ m_assembler.idivl_r(regT2);
+ emitFastArithReTagImmediate(regT1, regT0);
+ emitPutVirtualRegister(result);
+}
+
+void JIT::emitSlow_op_mod(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned result = currentInstruction[1].u.operand;
+
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+ JITStubCall stubCall(this, cti_op_mod);
+ stubCall.addArgument(regT0);
+ stubCall.addArgument(regT2);
+ stubCall.call(result);
+}
+
+#else // CPU(X86) || CPU(X86_64) || CPU(MIPS)
+
+void JIT::emit_op_mod(Instruction* currentInstruction)
+{
+ unsigned result = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+
+ JITStubCall stubCall(this, cti_op_mod);
+ stubCall.addArgument(op1, regT2);
+ stubCall.addArgument(op2, regT2);
+ stubCall.call(result);
+}
+
+void JIT::emitSlow_op_mod(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+#if ENABLE(JIT_USE_SOFT_MODULO)
+ unsigned result = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+ JITStubCall stubCall(this, cti_op_mod);
+ stubCall.addArgument(op1, regT2);
+ stubCall.addArgument(op2, regT2);
+ stubCall.call(result);
+#else
+ ASSERT_NOT_REACHED();
+#endif
+}
+
+#endif // CPU(X86) || CPU(X86_64)
+
+/* ------------------------------ END: OP_MOD ------------------------------ */
+
+/* ------------------------------ BEGIN: USE(JSVALUE64) (OP_ADD, OP_SUB, OP_MUL) ------------------------------ */
+
+void JIT::compileBinaryArithOp(OpcodeID opcodeID, unsigned, unsigned op1, unsigned op2, OperandTypes)
+{
+ emitGetVirtualRegisters(op1, regT0, op2, regT1);
+ emitJumpSlowCaseIfNotImmediateInteger(regT0);
+ emitJumpSlowCaseIfNotImmediateInteger(regT1);
+#if ENABLE(VALUE_PROFILER)
+ RareCaseProfile* profile = m_codeBlock->addSpecialFastCaseProfile(m_bytecodeOffset);
+#endif
+ if (opcodeID == op_add)
+ addSlowCase(branchAdd32(Overflow, regT1, regT0));
+ else if (opcodeID == op_sub)
+ addSlowCase(branchSub32(Overflow, regT1, regT0));
+ else {
+ ASSERT(opcodeID == op_mul);
+#if ENABLE(VALUE_PROFILER)
+ if (m_canBeOptimized) {
+ // We want to be able to measure if this is taking the slow case just
+ // because of negative zero. If this produces positive zero, then we
+ // don't want the slow case to be taken because that will throw off
+ // speculative compilation.
+ move(regT0, regT2);
+ addSlowCase(branchMul32(Overflow, regT1, regT2));
+ JumpList done;
+ done.append(branchTest32(NonZero, regT2));
+ Jump negativeZero = branch32(LessThan, regT0, Imm32(0));
+ done.append(branch32(GreaterThanOrEqual, regT1, Imm32(0)));
+ negativeZero.link(this);
+ // We only get here if we have a genuine negative zero. Record this,
+ // so that the speculative JIT knows that we failed speculation
+ // because of a negative zero.
+ add32(Imm32(1), AbsoluteAddress(&profile->m_counter));
+ addSlowCase(jump());
+ done.link(this);
+ move(regT2, regT0);
+ } else {
+ addSlowCase(branchMul32(Overflow, regT1, regT0));
+ addSlowCase(branchTest32(Zero, regT0));
+ }
+#else
+ addSlowCase(branchMul32(Overflow, regT1, regT0));
+ addSlowCase(branchTest32(Zero, regT0));
+#endif
+ }
+ emitFastArithIntToImmNoCheck(regT0, regT0);
+}
+
+void JIT::compileBinaryArithOpSlowCase(OpcodeID opcodeID, Vector<SlowCaseEntry>::iterator& iter, unsigned result, unsigned op1, unsigned op2, OperandTypes types, bool op1HasImmediateIntFastCase, bool op2HasImmediateIntFastCase)
+{
+ // We assume that subtracting TagTypeNumber is equivalent to adding DoubleEncodeOffset.
+ COMPILE_ASSERT(((TagTypeNumber + DoubleEncodeOffset) == 0), TagTypeNumber_PLUS_DoubleEncodeOffset_EQUALS_0);
+
+ Jump notImm1;
+ Jump notImm2;
+ if (op1HasImmediateIntFastCase) {
+ notImm2 = getSlowCase(iter);
+ } else if (op2HasImmediateIntFastCase) {
+ notImm1 = getSlowCase(iter);
+ } else {
+ notImm1 = getSlowCase(iter);
+ notImm2 = getSlowCase(iter);
+ }
+
+ linkSlowCase(iter); // Integer overflow case - we could handle this in JIT code, but this is likely rare.
+ if (opcodeID == op_mul && !op1HasImmediateIntFastCase && !op2HasImmediateIntFastCase) // op_mul has an extra slow case to handle 0 * negative number.
+ linkSlowCase(iter);
+ emitGetVirtualRegister(op1, regT0);
+
+ Label stubFunctionCall(this);
+ JITStubCall stubCall(this, opcodeID == op_add ? cti_op_add : opcodeID == op_sub ? cti_op_sub : cti_op_mul);
+ if (op1HasImmediateIntFastCase || op2HasImmediateIntFastCase) {
+ emitGetVirtualRegister(op1, regT0);
+ emitGetVirtualRegister(op2, regT1);
+ }
+ stubCall.addArgument(regT0);
+ stubCall.addArgument(regT1);
+ stubCall.call(result);
+ Jump end = jump();
+
+ if (op1HasImmediateIntFastCase) {
+ notImm2.link(this);
+ if (!types.second().definitelyIsNumber())
+ emitJumpIfNotImmediateNumber(regT0).linkTo(stubFunctionCall, this);
+ emitGetVirtualRegister(op1, regT1);
+ convertInt32ToDouble(regT1, fpRegT1);
+ addPtr(tagTypeNumberRegister, regT0);
+ movePtrToDouble(regT0, fpRegT2);
+ } else if (op2HasImmediateIntFastCase) {
+ notImm1.link(this);
+ if (!types.first().definitelyIsNumber())
+ emitJumpIfNotImmediateNumber(regT0).linkTo(stubFunctionCall, this);
+ emitGetVirtualRegister(op2, regT1);
+ convertInt32ToDouble(regT1, fpRegT1);
+ addPtr(tagTypeNumberRegister, regT0);
+ movePtrToDouble(regT0, fpRegT2);
+ } else {
+ // if we get here, eax is not an int32, edx not yet checked.
+ notImm1.link(this);
+ if (!types.first().definitelyIsNumber())
+ emitJumpIfNotImmediateNumber(regT0).linkTo(stubFunctionCall, this);
+ if (!types.second().definitelyIsNumber())
+ emitJumpIfNotImmediateNumber(regT1).linkTo(stubFunctionCall, this);
+ addPtr(tagTypeNumberRegister, regT0);
+ movePtrToDouble(regT0, fpRegT1);
+ Jump op2isDouble = emitJumpIfNotImmediateInteger(regT1);
+ convertInt32ToDouble(regT1, fpRegT2);
+ Jump op2wasInteger = jump();
+
+ // if we get here, eax IS an int32, edx is not.
+ notImm2.link(this);
+ if (!types.second().definitelyIsNumber())
+ emitJumpIfNotImmediateNumber(regT1).linkTo(stubFunctionCall, this);
+ convertInt32ToDouble(regT0, fpRegT1);
+ op2isDouble.link(this);
+ addPtr(tagTypeNumberRegister, regT1);
+ movePtrToDouble(regT1, fpRegT2);
+ op2wasInteger.link(this);
+ }
+
+ if (opcodeID == op_add)
+ addDouble(fpRegT2, fpRegT1);
+ else if (opcodeID == op_sub)
+ subDouble(fpRegT2, fpRegT1);
+ else if (opcodeID == op_mul)
+ mulDouble(fpRegT2, fpRegT1);
+ else {
+ ASSERT(opcodeID == op_div);
+ divDouble(fpRegT2, fpRegT1);
+ }
+ moveDoubleToPtr(fpRegT1, regT0);
+ subPtr(tagTypeNumberRegister, regT0);
+ emitPutVirtualRegister(result, regT0);
+
+ end.link(this);
+}
+
+void JIT::emit_op_add(Instruction* currentInstruction)
+{
+ unsigned result = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+ OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
+
+ if (!types.first().mightBeNumber() || !types.second().mightBeNumber()) {
+ addSlowCase();
+ JITStubCall stubCall(this, cti_op_add);
+ stubCall.addArgument(op1, regT2);
+ stubCall.addArgument(op2, regT2);
+ stubCall.call(result);
+ return;
+ }
+
+ if (isOperandConstantImmediateInt(op1)) {
+ emitGetVirtualRegister(op2, regT0);
+ emitJumpSlowCaseIfNotImmediateInteger(regT0);
+ addSlowCase(branchAdd32(Overflow, Imm32(getConstantOperandImmediateInt(op1)), regT0));
+ emitFastArithIntToImmNoCheck(regT0, regT0);
+ } else if (isOperandConstantImmediateInt(op2)) {
+ emitGetVirtualRegister(op1, regT0);
+ emitJumpSlowCaseIfNotImmediateInteger(regT0);
+ addSlowCase(branchAdd32(Overflow, Imm32(getConstantOperandImmediateInt(op2)), regT0));
+ emitFastArithIntToImmNoCheck(regT0, regT0);
+ } else
+ compileBinaryArithOp(op_add, result, op1, op2, types);
+
+ emitPutVirtualRegister(result);
+}
+
+void JIT::emitSlow_op_add(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned result = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+ OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
+
+ if (!types.first().mightBeNumber() || !types.second().mightBeNumber()) {
+ linkDummySlowCase(iter);
+ return;
+ }
+
+ bool op1HasImmediateIntFastCase = isOperandConstantImmediateInt(op1);
+ bool op2HasImmediateIntFastCase = !op1HasImmediateIntFastCase && isOperandConstantImmediateInt(op2);
+ compileBinaryArithOpSlowCase(op_add, iter, result, op1, op2, types, op1HasImmediateIntFastCase, op2HasImmediateIntFastCase);
+}
+
+void JIT::emit_op_mul(Instruction* currentInstruction)
+{
+ unsigned result = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+ OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
+
+ // For now, only plant a fast int case if the constant operand is greater than zero.
+ int32_t value;
+ if (isOperandConstantImmediateInt(op1) && ((value = getConstantOperandImmediateInt(op1)) > 0)) {
+#if ENABLE(VALUE_PROFILER)
+ // Add a special fast case profile because the DFG JIT will expect one.
+ m_codeBlock->addSpecialFastCaseProfile(m_bytecodeOffset);
+#endif
+ emitGetVirtualRegister(op2, regT0);
+ emitJumpSlowCaseIfNotImmediateInteger(regT0);
+ addSlowCase(branchMul32(Overflow, Imm32(value), regT0, regT0));
+ emitFastArithReTagImmediate(regT0, regT0);
+ } else if (isOperandConstantImmediateInt(op2) && ((value = getConstantOperandImmediateInt(op2)) > 0)) {
+#if ENABLE(VALUE_PROFILER)
+ // Add a special fast case profile because the DFG JIT will expect one.
+ m_codeBlock->addSpecialFastCaseProfile(m_bytecodeOffset);
+#endif
+ emitGetVirtualRegister(op1, regT0);
+ emitJumpSlowCaseIfNotImmediateInteger(regT0);
+ addSlowCase(branchMul32(Overflow, Imm32(value), regT0, regT0));
+ emitFastArithReTagImmediate(regT0, regT0);
+ } else
+ compileBinaryArithOp(op_mul, result, op1, op2, types);
+
+ emitPutVirtualRegister(result);
+}
+
+void JIT::emitSlow_op_mul(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned result = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+ OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
+
+ bool op1HasImmediateIntFastCase = isOperandConstantImmediateInt(op1) && getConstantOperandImmediateInt(op1) > 0;
+ bool op2HasImmediateIntFastCase = !op1HasImmediateIntFastCase && isOperandConstantImmediateInt(op2) && getConstantOperandImmediateInt(op2) > 0;
+ compileBinaryArithOpSlowCase(op_mul, iter, result, op1, op2, types, op1HasImmediateIntFastCase, op2HasImmediateIntFastCase);
+}
+
+void JIT::emit_op_div(Instruction* currentInstruction)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+ OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
+
+ if (isOperandConstantImmediateDouble(op1)) {
+ emitGetVirtualRegister(op1, regT0);
+ addPtr(tagTypeNumberRegister, regT0);
+ movePtrToDouble(regT0, fpRegT0);
+ } else if (isOperandConstantImmediateInt(op1)) {
+ emitLoadInt32ToDouble(op1, fpRegT0);
+ } else {
+ emitGetVirtualRegister(op1, regT0);
+ if (!types.first().definitelyIsNumber())
+ emitJumpSlowCaseIfNotImmediateNumber(regT0);
+ Jump notInt = emitJumpIfNotImmediateInteger(regT0);
+ convertInt32ToDouble(regT0, fpRegT0);
+ Jump skipDoubleLoad = jump();
+ notInt.link(this);
+ addPtr(tagTypeNumberRegister, regT0);
+ movePtrToDouble(regT0, fpRegT0);
+ skipDoubleLoad.link(this);
+ }
+
+ if (isOperandConstantImmediateDouble(op2)) {
+ emitGetVirtualRegister(op2, regT1);
+ addPtr(tagTypeNumberRegister, regT1);
+ movePtrToDouble(regT1, fpRegT1);
+ } else if (isOperandConstantImmediateInt(op2)) {
+ emitLoadInt32ToDouble(op2, fpRegT1);
+ } else {
+ emitGetVirtualRegister(op2, regT1);
+ if (!types.second().definitelyIsNumber())
+ emitJumpSlowCaseIfNotImmediateNumber(regT1);
+ Jump notInt = emitJumpIfNotImmediateInteger(regT1);
+ convertInt32ToDouble(regT1, fpRegT1);
+ Jump skipDoubleLoad = jump();
+ notInt.link(this);
+ addPtr(tagTypeNumberRegister, regT1);
+ movePtrToDouble(regT1, fpRegT1);
+ skipDoubleLoad.link(this);
+ }
+ divDouble(fpRegT1, fpRegT0);
+
+#if ENABLE(VALUE_PROFILER)
+ // Is the result actually an integer? The DFG JIT would really like to know. If it's
+ // not an integer, we increment a count. If this together with the slow case counter
+ // are below threshold then the DFG JIT will compile this division with a specualtion
+ // that the remainder is zero.
+
+ // As well, there are cases where a double result here would cause an important field
+ // in the heap to sometimes have doubles in it, resulting in double predictions getting
+ // propagated to a use site where it might cause damage (such as the index to an array
+ // access). So if we are DFG compiling anything in the program, we want this code to
+ // ensure that it produces integers whenever possible.
+
+ // FIXME: This will fail to convert to integer if the result is zero. We should
+ // distinguish between positive zero and negative zero here.
+
+ JumpList notInteger;
+ branchConvertDoubleToInt32(fpRegT0, regT0, notInteger, fpRegT1);
+ // If we've got an integer, we might as well make that the result of the division.
+ emitFastArithReTagImmediate(regT0, regT0);
+ Jump isInteger = jump();
+ notInteger.link(this);
+ add32(Imm32(1), AbsoluteAddress(&m_codeBlock->addSpecialFastCaseProfile(m_bytecodeOffset)->m_counter));
+ moveDoubleToPtr(fpRegT0, regT0);
+ subPtr(tagTypeNumberRegister, regT0);
+ isInteger.link(this);
+#else
+ // Double result.
+ moveDoubleToPtr(fpRegT0, regT0);
+ subPtr(tagTypeNumberRegister, regT0);
+#endif
+
+ emitPutVirtualRegister(dst, regT0);
+}
+
+void JIT::emitSlow_op_div(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned result = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+ OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
+ if (types.first().definitelyIsNumber() && types.second().definitelyIsNumber()) {
+#ifndef NDEBUG
+ breakpoint();
+#endif
+ return;
+ }
+ if (!isOperandConstantImmediateDouble(op1) && !isOperandConstantImmediateInt(op1)) {
+ if (!types.first().definitelyIsNumber())
+ linkSlowCase(iter);
+ }
+ if (!isOperandConstantImmediateDouble(op2) && !isOperandConstantImmediateInt(op2)) {
+ if (!types.second().definitelyIsNumber())
+ linkSlowCase(iter);
+ }
+ // There is an extra slow case for (op1 * -N) or (-N * op2), to check for 0 since this should produce a result of -0.
+ JITStubCall stubCall(this, cti_op_div);
+ stubCall.addArgument(op1, regT2);
+ stubCall.addArgument(op2, regT2);
+ stubCall.call(result);
+}
+
+void JIT::emit_op_sub(Instruction* currentInstruction)
+{
+ unsigned result = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+ OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
+
+ compileBinaryArithOp(op_sub, result, op1, op2, types);
+ emitPutVirtualRegister(result);
+}
+
+void JIT::emitSlow_op_sub(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned result = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+ OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
+
+ compileBinaryArithOpSlowCase(op_sub, iter, result, op1, op2, types, false, false);
+}
+
+/* ------------------------------ END: OP_ADD, OP_SUB, OP_MUL ------------------------------ */
+
+#endif // USE(JSVALUE64)
+
+} // namespace JSC
+
+#endif // ENABLE(JIT)
diff --git a/Source/JavaScriptCore/jit/JITArithmetic32_64.cpp b/Source/JavaScriptCore/jit/JITArithmetic32_64.cpp
new file mode 100644
index 000000000..4916261fe
--- /dev/null
+++ b/Source/JavaScriptCore/jit/JITArithmetic32_64.cpp
@@ -0,0 +1,1277 @@
+/*
+* Copyright (C) 2008 Apple Inc. All rights reserved.
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions
+* are met:
+* 1. Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+* 2. Redistributions in binary form must reproduce the above copyright
+* notice, this list of conditions and the following disclaimer in the
+* documentation and/or other materials provided with the distribution.
+*
+* THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+#include "config.h"
+
+#if ENABLE(JIT)
+#if USE(JSVALUE32_64)
+#include "JIT.h"
+
+#include "CodeBlock.h"
+#include "JITInlineMethods.h"
+#include "JITStubCall.h"
+#include "JITStubs.h"
+#include "JSArray.h"
+#include "JSFunction.h"
+#include "Interpreter.h"
+#include "ResultType.h"
+#include "SamplingTool.h"
+
+#ifndef NDEBUG
+#include <stdio.h>
+#endif
+
+using namespace std;
+
+namespace JSC {
+
+void JIT::emit_op_negate(Instruction* currentInstruction)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned src = currentInstruction[2].u.operand;
+
+ emitLoad(src, regT1, regT0);
+
+ Jump srcNotInt = branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag));
+ addSlowCase(branchTest32(Zero, regT0, TrustedImm32(0x7fffffff)));
+ neg32(regT0);
+ emitStoreInt32(dst, regT0, (dst == src));
+
+ Jump end = jump();
+
+ srcNotInt.link(this);
+ addSlowCase(branch32(Above, regT1, TrustedImm32(JSValue::LowestTag)));
+
+ xor32(TrustedImm32(1 << 31), regT1);
+ store32(regT1, tagFor(dst));
+ if (dst != src)
+ store32(regT0, payloadFor(dst));
+
+ end.link(this);
+}
+
+void JIT::emitSlow_op_negate(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+
+ linkSlowCase(iter); // 0x7fffffff check
+ linkSlowCase(iter); // double check
+
+ JITStubCall stubCall(this, cti_op_negate);
+ stubCall.addArgument(regT1, regT0);
+ stubCall.call(dst);
+}
+
+void JIT::emit_compareAndJump(OpcodeID opcode, unsigned op1, unsigned op2, unsigned target, RelationalCondition condition)
+{
+ JumpList notInt32Op1;
+ JumpList notInt32Op2;
+
+ // Character less.
+ if (isOperandConstantImmediateChar(op1)) {
+ emitLoad(op2, regT1, regT0);
+ addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag)));
+ JumpList failures;
+ emitLoadCharacterString(regT0, regT0, failures);
+ addSlowCase(failures);
+ addJump(branch32(commute(condition), regT0, Imm32(asString(getConstantOperand(op1))->tryGetValue()[0])), target);
+ return;
+ }
+ if (isOperandConstantImmediateChar(op2)) {
+ emitLoad(op1, regT1, regT0);
+ addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag)));
+ JumpList failures;
+ emitLoadCharacterString(regT0, regT0, failures);
+ addSlowCase(failures);
+ addJump(branch32(condition, regT0, Imm32(asString(getConstantOperand(op2))->tryGetValue()[0])), target);
+ return;
+ }
+ if (isOperandConstantImmediateInt(op1)) {
+ emitLoad(op2, regT3, regT2);
+ notInt32Op2.append(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)));
+ addJump(branch32(commute(condition), regT2, Imm32(getConstantOperand(op1).asInt32())), target);
+ } else if (isOperandConstantImmediateInt(op2)) {
+ emitLoad(op1, regT1, regT0);
+ notInt32Op1.append(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
+ addJump(branch32(condition, regT0, Imm32(getConstantOperand(op2).asInt32())), target);
+ } else {
+ emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
+ notInt32Op1.append(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
+ notInt32Op2.append(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)));
+ addJump(branch32(condition, regT0, regT2), target);
+ }
+
+ if (!supportsFloatingPoint()) {
+ addSlowCase(notInt32Op1);
+ addSlowCase(notInt32Op2);
+ return;
+ }
+ Jump end = jump();
+
+ // Double less.
+ emitBinaryDoubleOp(opcode, target, op1, op2, OperandTypes(), notInt32Op1, notInt32Op2, !isOperandConstantImmediateInt(op1), isOperandConstantImmediateInt(op1) || !isOperandConstantImmediateInt(op2));
+ end.link(this);
+}
+
+void JIT::emit_compareAndJumpSlow(unsigned op1, unsigned op2, unsigned target, DoubleCondition, int (JIT_STUB *stub)(STUB_ARGS_DECLARATION), bool invert, Vector<SlowCaseEntry>::iterator& iter)
+{
+ if (isOperandConstantImmediateChar(op1) || isOperandConstantImmediateChar(op2)) {
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+ } else {
+ if (!supportsFloatingPoint()) {
+ if (!isOperandConstantImmediateInt(op1) && !isOperandConstantImmediateInt(op2))
+ linkSlowCase(iter); // int32 check
+ linkSlowCase(iter); // int32 check
+ } else {
+ if (!isOperandConstantImmediateInt(op1)) {
+ linkSlowCase(iter); // double check
+ linkSlowCase(iter); // int32 check
+ }
+ if (isOperandConstantImmediateInt(op1) || !isOperandConstantImmediateInt(op2))
+ linkSlowCase(iter); // double check
+ }
+ }
+ JITStubCall stubCall(this, stub);
+ stubCall.addArgument(op1);
+ stubCall.addArgument(op2);
+ stubCall.call();
+ emitJumpSlowToHot(branchTest32(invert ? Zero : NonZero, regT0), target);
+}
+
+// LeftShift (<<)
+
+void JIT::emit_op_lshift(Instruction* currentInstruction)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+
+ if (isOperandConstantImmediateInt(op2)) {
+ emitLoad(op1, regT1, regT0);
+ addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
+ lshift32(Imm32(getConstantOperand(op2).asInt32()), regT0);
+ emitStoreAndMapInt32(dst, regT1, regT0, dst == op1, OPCODE_LENGTH(op_lshift));
+ return;
+ }
+
+ emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
+ if (!isOperandConstantImmediateInt(op1))
+ addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
+ addSlowCase(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)));
+ lshift32(regT2, regT0);
+ emitStoreAndMapInt32(dst, regT1, regT0, dst == op1 || dst == op2, OPCODE_LENGTH(op_lshift));
+}
+
+void JIT::emitSlow_op_lshift(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+
+ if (!isOperandConstantImmediateInt(op1) && !isOperandConstantImmediateInt(op2))
+ linkSlowCase(iter); // int32 check
+ linkSlowCase(iter); // int32 check
+
+ JITStubCall stubCall(this, cti_op_lshift);
+ stubCall.addArgument(op1);
+ stubCall.addArgument(op2);
+ stubCall.call(dst);
+}
+
+// RightShift (>>) and UnsignedRightShift (>>>) helper
+
+void JIT::emitRightShift(Instruction* currentInstruction, bool isUnsigned)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+
+ // Slow case of rshift makes assumptions about what registers hold the
+ // shift arguments, so any changes must be updated there as well.
+ if (isOperandConstantImmediateInt(op2)) {
+ emitLoad(op1, regT1, regT0);
+ addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
+ int shift = getConstantOperand(op2).asInt32() & 0x1f;
+ if (shift) {
+ if (isUnsigned)
+ urshift32(Imm32(shift), regT0);
+ else
+ rshift32(Imm32(shift), regT0);
+ } else if (isUnsigned) // signed right shift by zero is simply toInt conversion
+ addSlowCase(branch32(LessThan, regT0, TrustedImm32(0)));
+ emitStoreAndMapInt32(dst, regT1, regT0, dst == op1, OPCODE_LENGTH(op_rshift));
+ } else {
+ emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
+ if (!isOperandConstantImmediateInt(op1))
+ addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
+ addSlowCase(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)));
+ if (isUnsigned) {
+ urshift32(regT2, regT0);
+ addSlowCase(branch32(LessThan, regT0, TrustedImm32(0)));
+ } else
+ rshift32(regT2, regT0);
+ emitStoreAndMapInt32(dst, regT1, regT0, dst == op1, OPCODE_LENGTH(op_rshift));
+ }
+}
+
+void JIT::emitRightShiftSlowCase(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter, bool isUnsigned)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+ if (isOperandConstantImmediateInt(op2)) {
+ int shift = getConstantOperand(op2).asInt32() & 0x1f;
+ // op1 = regT1:regT0
+ linkSlowCase(iter); // int32 check
+ if (supportsFloatingPointTruncate()) {
+ JumpList failures;
+ failures.append(branch32(AboveOrEqual, regT1, TrustedImm32(JSValue::LowestTag)));
+ emitLoadDouble(op1, fpRegT0);
+ failures.append(branchTruncateDoubleToInt32(fpRegT0, regT0));
+ if (shift) {
+ if (isUnsigned)
+ urshift32(Imm32(shift), regT0);
+ else
+ rshift32(Imm32(shift), regT0);
+ } else if (isUnsigned) // signed right shift by zero is simply toInt conversion
+ failures.append(branch32(LessThan, regT0, TrustedImm32(0)));
+ move(TrustedImm32(JSValue::Int32Tag), regT1);
+ emitStoreInt32(dst, regT0, false);
+ emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_rshift));
+ failures.link(this);
+ }
+ if (isUnsigned && !shift)
+ linkSlowCase(iter); // failed to box in hot path
+ } else {
+ // op1 = regT1:regT0
+ // op2 = regT3:regT2
+ if (!isOperandConstantImmediateInt(op1)) {
+ linkSlowCase(iter); // int32 check -- op1 is not an int
+ if (supportsFloatingPointTruncate()) {
+ JumpList failures;
+ failures.append(branch32(Above, regT1, TrustedImm32(JSValue::LowestTag))); // op1 is not a double
+ emitLoadDouble(op1, fpRegT0);
+ failures.append(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag))); // op2 is not an int
+ failures.append(branchTruncateDoubleToInt32(fpRegT0, regT0));
+ if (isUnsigned) {
+ urshift32(regT2, regT0);
+ failures.append(branch32(LessThan, regT0, TrustedImm32(0)));
+ } else
+ rshift32(regT2, regT0);
+ move(TrustedImm32(JSValue::Int32Tag), regT1);
+ emitStoreInt32(dst, regT0, false);
+ emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_rshift));
+ failures.link(this);
+ }
+ }
+
+ linkSlowCase(iter); // int32 check - op2 is not an int
+ if (isUnsigned)
+ linkSlowCase(iter); // Can't represent unsigned result as an immediate
+ }
+
+ JITStubCall stubCall(this, isUnsigned ? cti_op_urshift : cti_op_rshift);
+ stubCall.addArgument(op1);
+ stubCall.addArgument(op2);
+ stubCall.call(dst);
+}
+
+// RightShift (>>)
+
+void JIT::emit_op_rshift(Instruction* currentInstruction)
+{
+ emitRightShift(currentInstruction, false);
+}
+
+void JIT::emitSlow_op_rshift(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ emitRightShiftSlowCase(currentInstruction, iter, false);
+}
+
+// UnsignedRightShift (>>>)
+
+void JIT::emit_op_urshift(Instruction* currentInstruction)
+{
+ emitRightShift(currentInstruction, true);
+}
+
+void JIT::emitSlow_op_urshift(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ emitRightShiftSlowCase(currentInstruction, iter, true);
+}
+
+// BitAnd (&)
+
+void JIT::emit_op_bitand(Instruction* currentInstruction)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+
+ unsigned op;
+ int32_t constant;
+ if (getOperandConstantImmediateInt(op1, op2, op, constant)) {
+ emitLoad(op, regT1, regT0);
+ addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
+ and32(Imm32(constant), regT0);
+ emitStoreAndMapInt32(dst, regT1, regT0, dst == op, OPCODE_LENGTH(op_bitand));
+ return;
+ }
+
+ emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
+ addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
+ addSlowCase(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)));
+ and32(regT2, regT0);
+ emitStoreAndMapInt32(dst, regT1, regT0, (op1 == dst || op2 == dst), OPCODE_LENGTH(op_bitand));
+}
+
+void JIT::emitSlow_op_bitand(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+
+ if (!isOperandConstantImmediateInt(op1) && !isOperandConstantImmediateInt(op2))
+ linkSlowCase(iter); // int32 check
+ linkSlowCase(iter); // int32 check
+
+ JITStubCall stubCall(this, cti_op_bitand);
+ stubCall.addArgument(op1);
+ stubCall.addArgument(op2);
+ stubCall.call(dst);
+}
+
+// BitOr (|)
+
+void JIT::emit_op_bitor(Instruction* currentInstruction)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+
+ unsigned op;
+ int32_t constant;
+ if (getOperandConstantImmediateInt(op1, op2, op, constant)) {
+ emitLoad(op, regT1, regT0);
+ addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
+ or32(Imm32(constant), regT0);
+ emitStoreAndMapInt32(dst, regT1, regT0, op == dst, OPCODE_LENGTH(op_bitor));
+ return;
+ }
+
+ emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
+ addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
+ addSlowCase(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)));
+ or32(regT2, regT0);
+ emitStoreAndMapInt32(dst, regT1, regT0, (op1 == dst || op2 == dst), OPCODE_LENGTH(op_bitor));
+}
+
+void JIT::emitSlow_op_bitor(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+
+ if (!isOperandConstantImmediateInt(op1) && !isOperandConstantImmediateInt(op2))
+ linkSlowCase(iter); // int32 check
+ linkSlowCase(iter); // int32 check
+
+ JITStubCall stubCall(this, cti_op_bitor);
+ stubCall.addArgument(op1);
+ stubCall.addArgument(op2);
+ stubCall.call(dst);
+}
+
+// BitXor (^)
+
+void JIT::emit_op_bitxor(Instruction* currentInstruction)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+
+ unsigned op;
+ int32_t constant;
+ if (getOperandConstantImmediateInt(op1, op2, op, constant)) {
+ emitLoad(op, regT1, regT0);
+ addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
+ xor32(Imm32(constant), regT0);
+ emitStoreAndMapInt32(dst, regT1, regT0, op == dst, OPCODE_LENGTH(op_bitxor));
+ return;
+ }
+
+ emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
+ addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
+ addSlowCase(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)));
+ xor32(regT2, regT0);
+ emitStoreAndMapInt32(dst, regT1, regT0, (op1 == dst || op2 == dst), OPCODE_LENGTH(op_bitxor));
+}
+
+void JIT::emitSlow_op_bitxor(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+
+ if (!isOperandConstantImmediateInt(op1) && !isOperandConstantImmediateInt(op2))
+ linkSlowCase(iter); // int32 check
+ linkSlowCase(iter); // int32 check
+
+ JITStubCall stubCall(this, cti_op_bitxor);
+ stubCall.addArgument(op1);
+ stubCall.addArgument(op2);
+ stubCall.call(dst);
+}
+
+// BitNot (~)
+
+void JIT::emit_op_bitnot(Instruction* currentInstruction)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned src = currentInstruction[2].u.operand;
+
+ emitLoad(src, regT1, regT0);
+ addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
+
+ not32(regT0);
+ emitStoreAndMapInt32(dst, regT1, regT0, dst == src, OPCODE_LENGTH(op_bitnot));
+}
+
+void JIT::emitSlow_op_bitnot(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+
+ linkSlowCase(iter); // int32 check
+
+ JITStubCall stubCall(this, cti_op_bitnot);
+ stubCall.addArgument(regT1, regT0);
+ stubCall.call(dst);
+}
+
+// PostInc (i++)
+
+void JIT::emit_op_post_inc(Instruction* currentInstruction)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned srcDst = currentInstruction[2].u.operand;
+
+ emitLoad(srcDst, regT1, regT0);
+ addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
+
+ if (dst == srcDst) // x = x++ is a noop for ints.
+ return;
+
+ move(regT0, regT2);
+ addSlowCase(branchAdd32(Overflow, TrustedImm32(1), regT2));
+ emitStoreInt32(srcDst, regT2, true);
+
+ emitStoreAndMapInt32(dst, regT1, regT0, false, OPCODE_LENGTH(op_post_inc));
+}
+
+void JIT::emitSlow_op_post_inc(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned srcDst = currentInstruction[2].u.operand;
+
+ linkSlowCase(iter); // int32 check
+ if (dst != srcDst)
+ linkSlowCase(iter); // overflow check
+
+ JITStubCall stubCall(this, cti_op_post_inc);
+ stubCall.addArgument(srcDst);
+ stubCall.addArgument(Imm32(srcDst));
+ stubCall.call(dst);
+}
+
+// PostDec (i--)
+
+void JIT::emit_op_post_dec(Instruction* currentInstruction)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned srcDst = currentInstruction[2].u.operand;
+
+ emitLoad(srcDst, regT1, regT0);
+ addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
+
+ if (dst == srcDst) // x = x-- is a noop for ints.
+ return;
+
+ move(regT0, regT2);
+ addSlowCase(branchSub32(Overflow, TrustedImm32(1), regT2));
+ emitStoreInt32(srcDst, regT2, true);
+
+ emitStoreAndMapInt32(dst, regT1, regT0, false, OPCODE_LENGTH(op_post_dec));
+}
+
+void JIT::emitSlow_op_post_dec(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned srcDst = currentInstruction[2].u.operand;
+
+ linkSlowCase(iter); // int32 check
+ if (dst != srcDst)
+ linkSlowCase(iter); // overflow check
+
+ JITStubCall stubCall(this, cti_op_post_dec);
+ stubCall.addArgument(srcDst);
+ stubCall.addArgument(TrustedImm32(srcDst));
+ stubCall.call(dst);
+}
+
+// PreInc (++i)
+
+void JIT::emit_op_pre_inc(Instruction* currentInstruction)
+{
+ unsigned srcDst = currentInstruction[1].u.operand;
+
+ emitLoad(srcDst, regT1, regT0);
+
+ addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
+ addSlowCase(branchAdd32(Overflow, TrustedImm32(1), regT0));
+ emitStoreAndMapInt32(srcDst, regT1, regT0, true, OPCODE_LENGTH(op_pre_inc));
+}
+
+void JIT::emitSlow_op_pre_inc(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned srcDst = currentInstruction[1].u.operand;
+
+ linkSlowCase(iter); // int32 check
+ linkSlowCase(iter); // overflow check
+
+ JITStubCall stubCall(this, cti_op_pre_inc);
+ stubCall.addArgument(srcDst);
+ stubCall.call(srcDst);
+}
+
+// PreDec (--i)
+
+void JIT::emit_op_pre_dec(Instruction* currentInstruction)
+{
+ unsigned srcDst = currentInstruction[1].u.operand;
+
+ emitLoad(srcDst, regT1, regT0);
+
+ addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
+ addSlowCase(branchSub32(Overflow, TrustedImm32(1), regT0));
+ emitStoreAndMapInt32(srcDst, regT1, regT0, true, OPCODE_LENGTH(op_pre_dec));
+}
+
+void JIT::emitSlow_op_pre_dec(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned srcDst = currentInstruction[1].u.operand;
+
+ linkSlowCase(iter); // int32 check
+ linkSlowCase(iter); // overflow check
+
+ JITStubCall stubCall(this, cti_op_pre_dec);
+ stubCall.addArgument(srcDst);
+ stubCall.call(srcDst);
+}
+
+// Addition (+)
+
+void JIT::emit_op_add(Instruction* currentInstruction)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+ OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
+
+ if (!types.first().mightBeNumber() || !types.second().mightBeNumber()) {
+ addSlowCase();
+ JITStubCall stubCall(this, cti_op_add);
+ stubCall.addArgument(op1);
+ stubCall.addArgument(op2);
+ stubCall.call(dst);
+ return;
+ }
+
+ JumpList notInt32Op1;
+ JumpList notInt32Op2;
+
+ unsigned op;
+ int32_t constant;
+ if (getOperandConstantImmediateInt(op1, op2, op, constant)) {
+ emitAdd32Constant(dst, op, constant, op == op1 ? types.first() : types.second());
+ return;
+ }
+
+ emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
+ notInt32Op1.append(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
+ notInt32Op2.append(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)));
+
+ // Int32 case.
+ addSlowCase(branchAdd32(Overflow, regT2, regT0));
+ emitStoreInt32(dst, regT0, (op1 == dst || op2 == dst));
+
+ if (!supportsFloatingPoint()) {
+ addSlowCase(notInt32Op1);
+ addSlowCase(notInt32Op2);
+ return;
+ }
+ Jump end = jump();
+
+ // Double case.
+ emitBinaryDoubleOp(op_add, dst, op1, op2, types, notInt32Op1, notInt32Op2);
+ end.link(this);
+}
+
+void JIT::emitAdd32Constant(unsigned dst, unsigned op, int32_t constant, ResultType opType)
+{
+ // Int32 case.
+ emitLoad(op, regT1, regT0);
+ Jump notInt32 = branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag));
+ addSlowCase(branchAdd32(Overflow, Imm32(constant), regT0));
+ emitStoreInt32(dst, regT0, (op == dst));
+
+ // Double case.
+ if (!supportsFloatingPoint()) {
+ addSlowCase(notInt32);
+ return;
+ }
+ Jump end = jump();
+
+ notInt32.link(this);
+ if (!opType.definitelyIsNumber())
+ addSlowCase(branch32(Above, regT1, TrustedImm32(JSValue::LowestTag)));
+ move(Imm32(constant), regT2);
+ convertInt32ToDouble(regT2, fpRegT0);
+ emitLoadDouble(op, fpRegT1);
+ addDouble(fpRegT1, fpRegT0);
+ emitStoreDouble(dst, fpRegT0);
+
+ end.link(this);
+}
+
+void JIT::emitSlow_op_add(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+ OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
+
+ if (!types.first().mightBeNumber() || !types.second().mightBeNumber()) {
+ linkDummySlowCase(iter);
+ return;
+ }
+
+ unsigned op;
+ int32_t constant;
+ if (getOperandConstantImmediateInt(op1, op2, op, constant)) {
+ linkSlowCase(iter); // overflow check
+
+ if (!supportsFloatingPoint())
+ linkSlowCase(iter); // non-sse case
+ else {
+ ResultType opType = op == op1 ? types.first() : types.second();
+ if (!opType.definitelyIsNumber())
+ linkSlowCase(iter); // double check
+ }
+ } else {
+ linkSlowCase(iter); // overflow check
+
+ if (!supportsFloatingPoint()) {
+ linkSlowCase(iter); // int32 check
+ linkSlowCase(iter); // int32 check
+ } else {
+ if (!types.first().definitelyIsNumber())
+ linkSlowCase(iter); // double check
+
+ if (!types.second().definitelyIsNumber()) {
+ linkSlowCase(iter); // int32 check
+ linkSlowCase(iter); // double check
+ }
+ }
+ }
+
+ JITStubCall stubCall(this, cti_op_add);
+ stubCall.addArgument(op1);
+ stubCall.addArgument(op2);
+ stubCall.call(dst);
+}
+
+// Subtraction (-)
+
+void JIT::emit_op_sub(Instruction* currentInstruction)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+ OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
+
+ JumpList notInt32Op1;
+ JumpList notInt32Op2;
+
+ if (isOperandConstantImmediateInt(op2)) {
+ emitSub32Constant(dst, op1, getConstantOperand(op2).asInt32(), types.first());
+ return;
+ }
+
+ emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
+ notInt32Op1.append(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
+ notInt32Op2.append(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)));
+
+ // Int32 case.
+ addSlowCase(branchSub32(Overflow, regT2, regT0));
+ emitStoreInt32(dst, regT0, (op1 == dst || op2 == dst));
+
+ if (!supportsFloatingPoint()) {
+ addSlowCase(notInt32Op1);
+ addSlowCase(notInt32Op2);
+ return;
+ }
+ Jump end = jump();
+
+ // Double case.
+ emitBinaryDoubleOp(op_sub, dst, op1, op2, types, notInt32Op1, notInt32Op2);
+ end.link(this);
+}
+
+void JIT::emitSub32Constant(unsigned dst, unsigned op, int32_t constant, ResultType opType)
+{
+ // Int32 case.
+ emitLoad(op, regT1, regT0);
+ Jump notInt32 = branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag));
+ addSlowCase(branchSub32(Overflow, Imm32(constant), regT0));
+ emitStoreInt32(dst, regT0, (op == dst));
+
+ // Double case.
+ if (!supportsFloatingPoint()) {
+ addSlowCase(notInt32);
+ return;
+ }
+ Jump end = jump();
+
+ notInt32.link(this);
+ if (!opType.definitelyIsNumber())
+ addSlowCase(branch32(Above, regT1, TrustedImm32(JSValue::LowestTag)));
+ move(Imm32(constant), regT2);
+ convertInt32ToDouble(regT2, fpRegT0);
+ emitLoadDouble(op, fpRegT1);
+ subDouble(fpRegT0, fpRegT1);
+ emitStoreDouble(dst, fpRegT1);
+
+ end.link(this);
+}
+
+void JIT::emitSlow_op_sub(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+ OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
+
+ if (isOperandConstantImmediateInt(op2)) {
+ linkSlowCase(iter); // overflow check
+
+ if (!supportsFloatingPoint() || !types.first().definitelyIsNumber())
+ linkSlowCase(iter); // int32 or double check
+ } else {
+ linkSlowCase(iter); // overflow check
+
+ if (!supportsFloatingPoint()) {
+ linkSlowCase(iter); // int32 check
+ linkSlowCase(iter); // int32 check
+ } else {
+ if (!types.first().definitelyIsNumber())
+ linkSlowCase(iter); // double check
+
+ if (!types.second().definitelyIsNumber()) {
+ linkSlowCase(iter); // int32 check
+ linkSlowCase(iter); // double check
+ }
+ }
+ }
+
+ JITStubCall stubCall(this, cti_op_sub);
+ stubCall.addArgument(op1);
+ stubCall.addArgument(op2);
+ stubCall.call(dst);
+}
+
+void JIT::emitBinaryDoubleOp(OpcodeID opcodeID, unsigned dst, unsigned op1, unsigned op2, OperandTypes types, JumpList& notInt32Op1, JumpList& notInt32Op2, bool op1IsInRegisters, bool op2IsInRegisters)
+{
+ JumpList end;
+
+ if (!notInt32Op1.empty()) {
+ // Double case 1: Op1 is not int32; Op2 is unknown.
+ notInt32Op1.link(this);
+
+ ASSERT(op1IsInRegisters);
+
+ // Verify Op1 is double.
+ if (!types.first().definitelyIsNumber())
+ addSlowCase(branch32(Above, regT1, TrustedImm32(JSValue::LowestTag)));
+
+ if (!op2IsInRegisters)
+ emitLoad(op2, regT3, regT2);
+
+ Jump doubleOp2 = branch32(Below, regT3, TrustedImm32(JSValue::LowestTag));
+
+ if (!types.second().definitelyIsNumber())
+ addSlowCase(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)));
+
+ convertInt32ToDouble(regT2, fpRegT0);
+ Jump doTheMath = jump();
+
+ // Load Op2 as double into double register.
+ doubleOp2.link(this);
+ emitLoadDouble(op2, fpRegT0);
+
+ // Do the math.
+ doTheMath.link(this);
+ switch (opcodeID) {
+ case op_mul:
+ emitLoadDouble(op1, fpRegT2);
+ mulDouble(fpRegT2, fpRegT0);
+ emitStoreDouble(dst, fpRegT0);
+ break;
+ case op_add:
+ emitLoadDouble(op1, fpRegT2);
+ addDouble(fpRegT2, fpRegT0);
+ emitStoreDouble(dst, fpRegT0);
+ break;
+ case op_sub:
+ emitLoadDouble(op1, fpRegT1);
+ subDouble(fpRegT0, fpRegT1);
+ emitStoreDouble(dst, fpRegT1);
+ break;
+ case op_div: {
+ emitLoadDouble(op1, fpRegT1);
+ divDouble(fpRegT0, fpRegT1);
+
+#if ENABLE(VALUE_PROFILER)
+ // Is the result actually an integer? The DFG JIT would really like to know. If it's
+ // not an integer, we increment a count. If this together with the slow case counter
+ // are below threshold then the DFG JIT will compile this division with a specualtion
+ // that the remainder is zero.
+
+ // As well, there are cases where a double result here would cause an important field
+ // in the heap to sometimes have doubles in it, resulting in double predictions getting
+ // propagated to a use site where it might cause damage (such as the index to an array
+ // access). So if we are DFG compiling anything in the program, we want this code to
+ // ensure that it produces integers whenever possible.
+
+ // FIXME: This will fail to convert to integer if the result is zero. We should
+ // distinguish between positive zero and negative zero here.
+
+ JumpList notInteger;
+ branchConvertDoubleToInt32(fpRegT1, regT2, notInteger, fpRegT0);
+ // If we've got an integer, we might as well make that the result of the division.
+ emitStoreInt32(dst, regT2);
+ Jump isInteger = jump();
+ notInteger.link(this);
+ add32(Imm32(1), AbsoluteAddress(&m_codeBlock->specialFastCaseProfileForBytecodeOffset(m_bytecodeOffset)->m_counter));
+ emitStoreDouble(dst, fpRegT1);
+ isInteger.link(this);
+#else
+ emitStoreDouble(dst, fpRegT1);
+#endif
+ break;
+ }
+ case op_jless:
+ emitLoadDouble(op1, fpRegT2);
+ addJump(branchDouble(DoubleLessThan, fpRegT2, fpRegT0), dst);
+ break;
+ case op_jlesseq:
+ emitLoadDouble(op1, fpRegT2);
+ addJump(branchDouble(DoubleLessThanOrEqual, fpRegT2, fpRegT0), dst);
+ break;
+ case op_jgreater:
+ emitLoadDouble(op1, fpRegT2);
+ addJump(branchDouble(DoubleGreaterThan, fpRegT2, fpRegT0), dst);
+ break;
+ case op_jgreatereq:
+ emitLoadDouble(op1, fpRegT2);
+ addJump(branchDouble(DoubleGreaterThanOrEqual, fpRegT2, fpRegT0), dst);
+ break;
+ case op_jnless:
+ emitLoadDouble(op1, fpRegT2);
+ addJump(branchDouble(DoubleLessThanOrEqualOrUnordered, fpRegT0, fpRegT2), dst);
+ break;
+ case op_jnlesseq:
+ emitLoadDouble(op1, fpRegT2);
+ addJump(branchDouble(DoubleLessThanOrUnordered, fpRegT0, fpRegT2), dst);
+ break;
+ case op_jngreater:
+ emitLoadDouble(op1, fpRegT2);
+ addJump(branchDouble(DoubleGreaterThanOrEqualOrUnordered, fpRegT0, fpRegT2), dst);
+ break;
+ case op_jngreatereq:
+ emitLoadDouble(op1, fpRegT2);
+ addJump(branchDouble(DoubleGreaterThanOrUnordered, fpRegT0, fpRegT2), dst);
+ break;
+ default:
+ ASSERT_NOT_REACHED();
+ }
+
+ if (!notInt32Op2.empty())
+ end.append(jump());
+ }
+
+ if (!notInt32Op2.empty()) {
+ // Double case 2: Op1 is int32; Op2 is not int32.
+ notInt32Op2.link(this);
+
+ ASSERT(op2IsInRegisters);
+
+ if (!op1IsInRegisters)
+ emitLoadPayload(op1, regT0);
+
+ convertInt32ToDouble(regT0, fpRegT0);
+
+ // Verify op2 is double.
+ if (!types.second().definitelyIsNumber())
+ addSlowCase(branch32(Above, regT3, TrustedImm32(JSValue::LowestTag)));
+
+ // Do the math.
+ switch (opcodeID) {
+ case op_mul:
+ emitLoadDouble(op2, fpRegT2);
+ mulDouble(fpRegT2, fpRegT0);
+ emitStoreDouble(dst, fpRegT0);
+ break;
+ case op_add:
+ emitLoadDouble(op2, fpRegT2);
+ addDouble(fpRegT2, fpRegT0);
+ emitStoreDouble(dst, fpRegT0);
+ break;
+ case op_sub:
+ emitLoadDouble(op2, fpRegT2);
+ subDouble(fpRegT2, fpRegT0);
+ emitStoreDouble(dst, fpRegT0);
+ break;
+ case op_div: {
+ emitLoadDouble(op2, fpRegT2);
+ divDouble(fpRegT2, fpRegT0);
+#if ENABLE(VALUE_PROFILER)
+ // Is the result actually an integer? The DFG JIT would really like to know. If it's
+ // not an integer, we increment a count. If this together with the slow case counter
+ // are below threshold then the DFG JIT will compile this division with a specualtion
+ // that the remainder is zero.
+
+ // As well, there are cases where a double result here would cause an important field
+ // in the heap to sometimes have doubles in it, resulting in double predictions getting
+ // propagated to a use site where it might cause damage (such as the index to an array
+ // access). So if we are DFG compiling anything in the program, we want this code to
+ // ensure that it produces integers whenever possible.
+
+ // FIXME: This will fail to convert to integer if the result is zero. We should
+ // distinguish between positive zero and negative zero here.
+
+ JumpList notInteger;
+ branchConvertDoubleToInt32(fpRegT0, regT2, notInteger, fpRegT1);
+ // If we've got an integer, we might as well make that the result of the division.
+ emitStoreInt32(dst, regT2);
+ Jump isInteger = jump();
+ notInteger.link(this);
+ add32(Imm32(1), AbsoluteAddress(&m_codeBlock->specialFastCaseProfileForBytecodeOffset(m_bytecodeOffset)->m_counter));
+ emitStoreDouble(dst, fpRegT0);
+ isInteger.link(this);
+#else
+ emitStoreDouble(dst, fpRegT0);
+#endif
+ break;
+ }
+ case op_jless:
+ emitLoadDouble(op2, fpRegT1);
+ addJump(branchDouble(DoubleLessThan, fpRegT0, fpRegT1), dst);
+ break;
+ case op_jlesseq:
+ emitLoadDouble(op2, fpRegT1);
+ addJump(branchDouble(DoubleLessThanOrEqual, fpRegT0, fpRegT1), dst);
+ break;
+ case op_jgreater:
+ emitLoadDouble(op2, fpRegT1);
+ addJump(branchDouble(DoubleGreaterThan, fpRegT0, fpRegT1), dst);
+ break;
+ case op_jgreatereq:
+ emitLoadDouble(op2, fpRegT1);
+ addJump(branchDouble(DoubleGreaterThanOrEqual, fpRegT0, fpRegT1), dst);
+ break;
+ case op_jnless:
+ emitLoadDouble(op2, fpRegT1);
+ addJump(branchDouble(DoubleLessThanOrEqualOrUnordered, fpRegT1, fpRegT0), dst);
+ break;
+ case op_jnlesseq:
+ emitLoadDouble(op2, fpRegT1);
+ addJump(branchDouble(DoubleLessThanOrUnordered, fpRegT1, fpRegT0), dst);
+ break;
+ case op_jngreater:
+ emitLoadDouble(op2, fpRegT1);
+ addJump(branchDouble(DoubleGreaterThanOrEqualOrUnordered, fpRegT1, fpRegT0), dst);
+ break;
+ case op_jngreatereq:
+ emitLoadDouble(op2, fpRegT1);
+ addJump(branchDouble(DoubleGreaterThanOrUnordered, fpRegT1, fpRegT0), dst);
+ break;
+ default:
+ ASSERT_NOT_REACHED();
+ }
+ }
+
+ end.link(this);
+}
+
+// Multiplication (*)
+
+void JIT::emit_op_mul(Instruction* currentInstruction)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+ OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
+
+#if ENABLE(VALUE_PROFILER)
+ m_codeBlock->addSpecialFastCaseProfile(m_bytecodeOffset);
+#endif
+
+ JumpList notInt32Op1;
+ JumpList notInt32Op2;
+
+ emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
+ notInt32Op1.append(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
+ notInt32Op2.append(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)));
+
+ // Int32 case.
+ move(regT0, regT3);
+ addSlowCase(branchMul32(Overflow, regT2, regT0));
+ addSlowCase(branchTest32(Zero, regT0));
+ emitStoreInt32(dst, regT0, (op1 == dst || op2 == dst));
+
+ if (!supportsFloatingPoint()) {
+ addSlowCase(notInt32Op1);
+ addSlowCase(notInt32Op2);
+ return;
+ }
+ Jump end = jump();
+
+ // Double case.
+ emitBinaryDoubleOp(op_mul, dst, op1, op2, types, notInt32Op1, notInt32Op2);
+ end.link(this);
+}
+
+void JIT::emitSlow_op_mul(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+ OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
+
+ Jump overflow = getSlowCase(iter); // overflow check
+ linkSlowCase(iter); // zero result check
+
+ Jump negZero = branchOr32(Signed, regT2, regT3);
+ emitStoreInt32(dst, TrustedImm32(0), (op1 == dst || op2 == dst));
+
+ emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_mul));
+
+ negZero.link(this);
+#if ENABLE(VALUE_PROFILER)
+ // We only get here if we have a genuine negative zero. Record this,
+ // so that the speculative JIT knows that we failed speculation
+ // because of a negative zero.
+ add32(Imm32(1), AbsoluteAddress(&m_codeBlock->specialFastCaseProfileForBytecodeOffset(m_bytecodeOffset)->m_counter));
+#endif
+ overflow.link(this);
+
+ if (!supportsFloatingPoint()) {
+ linkSlowCase(iter); // int32 check
+ linkSlowCase(iter); // int32 check
+ }
+
+ if (supportsFloatingPoint()) {
+ if (!types.first().definitelyIsNumber())
+ linkSlowCase(iter); // double check
+
+ if (!types.second().definitelyIsNumber()) {
+ linkSlowCase(iter); // int32 check
+ linkSlowCase(iter); // double check
+ }
+ }
+
+ Label jitStubCall(this);
+ JITStubCall stubCall(this, cti_op_mul);
+ stubCall.addArgument(op1);
+ stubCall.addArgument(op2);
+ stubCall.call(dst);
+}
+
+// Division (/)
+
+void JIT::emit_op_div(Instruction* currentInstruction)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+ OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
+
+#if ENABLE(VALUE_PROFILER)
+ m_codeBlock->addSpecialFastCaseProfile(m_bytecodeOffset);
+#endif
+
+ if (!supportsFloatingPoint()) {
+ addSlowCase(jump());
+ return;
+ }
+
+ // Int32 divide.
+ JumpList notInt32Op1;
+ JumpList notInt32Op2;
+
+ JumpList end;
+
+ emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
+
+ notInt32Op1.append(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
+ notInt32Op2.append(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)));
+
+ convertInt32ToDouble(regT0, fpRegT0);
+ convertInt32ToDouble(regT2, fpRegT1);
+ divDouble(fpRegT1, fpRegT0);
+#if ENABLE(VALUE_PROFILER)
+ // Is the result actually an integer? The DFG JIT would really like to know. If it's
+ // not an integer, we increment a count. If this together with the slow case counter
+ // are below threshold then the DFG JIT will compile this division with a specualtion
+ // that the remainder is zero.
+
+ // As well, there are cases where a double result here would cause an important field
+ // in the heap to sometimes have doubles in it, resulting in double predictions getting
+ // propagated to a use site where it might cause damage (such as the index to an array
+ // access). So if we are DFG compiling anything in the program, we want this code to
+ // ensure that it produces integers whenever possible.
+
+ // FIXME: This will fail to convert to integer if the result is zero. We should
+ // distinguish between positive zero and negative zero here.
+
+ JumpList notInteger;
+ branchConvertDoubleToInt32(fpRegT0, regT2, notInteger, fpRegT1);
+ // If we've got an integer, we might as well make that the result of the division.
+ emitStoreInt32(dst, regT2);
+ end.append(jump());
+ notInteger.link(this);
+ add32(Imm32(1), AbsoluteAddress(&m_codeBlock->specialFastCaseProfileForBytecodeOffset(m_bytecodeOffset)->m_counter));
+ emitStoreDouble(dst, fpRegT0);
+#else
+ emitStoreDouble(dst, fpRegT0);
+#endif
+ end.append(jump());
+
+ // Double divide.
+ emitBinaryDoubleOp(op_div, dst, op1, op2, types, notInt32Op1, notInt32Op2);
+ end.link(this);
+}
+
+void JIT::emitSlow_op_div(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+ OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
+
+ if (!supportsFloatingPoint())
+ linkSlowCase(iter);
+ else {
+ if (!types.first().definitelyIsNumber())
+ linkSlowCase(iter); // double check
+
+ if (!types.second().definitelyIsNumber()) {
+ linkSlowCase(iter); // int32 check
+ linkSlowCase(iter); // double check
+ }
+ }
+
+ JITStubCall stubCall(this, cti_op_div);
+ stubCall.addArgument(op1);
+ stubCall.addArgument(op2);
+ stubCall.call(dst);
+}
+
+// Mod (%)
+
+/* ------------------------------ BEGIN: OP_MOD ------------------------------ */
+
+void JIT::emit_op_mod(Instruction* currentInstruction)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+
+#if ENABLE(JIT_USE_SOFT_MODULO)
+
+#if CPU(X86) || CPU(X86_64)
+ // Make sure registers are correct for x86 IDIV instructions.
+ ASSERT(regT0 == X86Registers::eax);
+ ASSERT(regT1 == X86Registers::edx);
+ ASSERT(regT2 == X86Registers::ecx);
+ ASSERT(regT3 == X86Registers::ebx);
+#endif
+
+ emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
+ addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
+ addSlowCase(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)));
+
+ addSlowCase(branch32(Equal, regT2, TrustedImm32(0)));
+
+ emitNakedCall(m_globalData->jitStubs->ctiSoftModulo());
+
+ emitStoreInt32(dst, regT0, (op1 == dst || op2 == dst));
+#else
+ JITStubCall stubCall(this, cti_op_mod);
+ stubCall.addArgument(op1);
+ stubCall.addArgument(op2);
+ stubCall.call(dst);
+#endif
+}
+
+void JIT::emitSlow_op_mod(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+#if ENABLE(JIT_USE_SOFT_MODULO)
+ unsigned result = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+ JITStubCall stubCall(this, cti_op_mod);
+ stubCall.addArgument(op1);
+ stubCall.addArgument(op2);
+ stubCall.call(result);
+#else
+ UNUSED_PARAM(currentInstruction);
+ UNUSED_PARAM(iter);
+ ASSERT_NOT_REACHED();
+#endif
+}
+
+/* ------------------------------ END: OP_MOD ------------------------------ */
+
+} // namespace JSC
+
+#endif // USE(JSVALUE32_64)
+#endif // ENABLE(JIT)
diff --git a/Source/JavaScriptCore/jit/JITCall.cpp b/Source/JavaScriptCore/jit/JITCall.cpp
new file mode 100644
index 000000000..465f1add8
--- /dev/null
+++ b/Source/JavaScriptCore/jit/JITCall.cpp
@@ -0,0 +1,213 @@
+/*
+ * Copyright (C) 2008 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+
+#if ENABLE(JIT)
+#if USE(JSVALUE64)
+#include "JIT.h"
+
+#include "Arguments.h"
+#include "CodeBlock.h"
+#include "JITInlineMethods.h"
+#include "JITStubCall.h"
+#include "JSArray.h"
+#include "JSFunction.h"
+#include "Interpreter.h"
+#include "ResultType.h"
+#include "SamplingTool.h"
+
+#ifndef NDEBUG
+#include <stdio.h>
+#endif
+
+using namespace std;
+
+namespace JSC {
+
+void JIT::emit_op_call_put_result(Instruction* instruction)
+{
+ int dst = instruction[1].u.operand;
+ emitValueProfilingSite(FirstProfilingSite);
+ emitPutVirtualRegister(dst);
+}
+
+void JIT::compileLoadVarargs(Instruction* instruction)
+{
+ int thisValue = instruction[2].u.operand;
+ int arguments = instruction[3].u.operand;
+ int firstFreeRegister = instruction[4].u.operand;
+
+ killLastResultRegister();
+
+ JumpList slowCase;
+ JumpList end;
+ if (m_codeBlock->usesArguments() && arguments == m_codeBlock->argumentsRegister()) {
+ emitGetVirtualRegister(arguments, regT0);
+ slowCase.append(branchPtr(NotEqual, regT0, TrustedImmPtr(JSValue::encode(JSValue()))));
+
+ emitGetFromCallFrameHeader32(RegisterFile::ArgumentCount, regT0);
+ slowCase.append(branch32(Above, regT0, TrustedImm32(Arguments::MaxArguments + 1)));
+ // regT0: argumentCountIncludingThis
+
+ move(regT0, regT1);
+ add32(TrustedImm32(firstFreeRegister + RegisterFile::CallFrameHeaderSize), regT1);
+ lshift32(TrustedImm32(3), regT1);
+ addPtr(callFrameRegister, regT1);
+ // regT1: newCallFrame
+
+ slowCase.append(branchPtr(Below, AbsoluteAddress(m_globalData->interpreter->registerFile().addressOfEnd()), regT1));
+
+ // Initialize ArgumentCount.
+ emitFastArithReTagImmediate(regT0, regT2);
+ storePtr(regT2, Address(regT1, RegisterFile::ArgumentCount * static_cast<int>(sizeof(Register))));
+
+ // Initialize 'this'.
+ emitGetVirtualRegister(thisValue, regT2);
+ storePtr(regT2, Address(regT1, CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register))));
+
+ // Copy arguments.
+ neg32(regT0);
+ signExtend32ToPtr(regT0, regT0);
+ end.append(branchAddPtr(Zero, Imm32(1), regT0));
+ // regT0: -argumentCount
+
+ Label copyLoop = label();
+ loadPtr(BaseIndex(callFrameRegister, regT0, TimesEight, CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register))), regT2);
+ storePtr(regT2, BaseIndex(regT1, regT0, TimesEight, CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register))));
+ branchAddPtr(NonZero, Imm32(1), regT0).linkTo(copyLoop, this);
+
+ end.append(jump());
+ }
+
+ if (m_codeBlock->usesArguments() && arguments == m_codeBlock->argumentsRegister())
+ slowCase.link(this);
+
+ JITStubCall stubCall(this, cti_op_load_varargs);
+ stubCall.addArgument(thisValue, regT0);
+ stubCall.addArgument(arguments, regT0);
+ stubCall.addArgument(Imm32(firstFreeRegister));
+ stubCall.call(regT1);
+
+ if (m_codeBlock->usesArguments() && arguments == m_codeBlock->argumentsRegister())
+ end.link(this);
+}
+
+void JIT::compileCallEval()
+{
+ JITStubCall stubCall(this, cti_op_call_eval); // Initializes ScopeChain; ReturnPC; CodeBlock.
+ stubCall.call();
+ addSlowCase(branchPtr(Equal, regT0, TrustedImmPtr(JSValue::encode(JSValue()))));
+ emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, callFrameRegister);
+
+ sampleCodeBlock(m_codeBlock);
+}
+
+void JIT::compileCallEvalSlowCase(Vector<SlowCaseEntry>::iterator& iter)
+{
+ linkSlowCase(iter);
+
+ emitGetFromCallFrameHeaderPtr(RegisterFile::Callee, regT0);
+ emitNakedCall(m_globalData->jitStubs->ctiVirtualCall());
+
+ sampleCodeBlock(m_codeBlock);
+}
+
+void JIT::compileOpCall(OpcodeID opcodeID, Instruction* instruction, unsigned callLinkInfoIndex)
+{
+ int callee = instruction[1].u.operand;
+
+ /* Caller always:
+ - Updates callFrameRegister to callee callFrame.
+ - Initializes ArgumentCount; CallerFrame; Callee.
+
+ For a JS call:
+ - Caller initializes ScopeChain.
+ - Callee initializes ReturnPC; CodeBlock.
+ - Callee restores callFrameRegister before return.
+
+ For a non-JS call:
+ - Caller initializes ScopeChain; ReturnPC; CodeBlock.
+ - Caller restores callFrameRegister after return.
+ */
+
+ if (opcodeID == op_call_varargs)
+ compileLoadVarargs(instruction);
+ else {
+ int argCount = instruction[2].u.operand;
+ int registerOffset = instruction[3].u.operand;
+
+ addPtr(TrustedImm32(registerOffset * sizeof(Register)), callFrameRegister, regT1);
+ store32(TrustedImm32(argCount), Address(regT1, RegisterFile::ArgumentCount * static_cast<int>(sizeof(Register))));
+ } // regT1 holds newCallFrame with ArgumentCount initialized.
+ emitGetVirtualRegister(callee, regT0); // regT0 holds callee.
+
+ storePtr(callFrameRegister, Address(regT1, RegisterFile::CallerFrame * static_cast<int>(sizeof(Register))));
+ storePtr(regT0, Address(regT1, RegisterFile::Callee * static_cast<int>(sizeof(Register))));
+ move(regT1, callFrameRegister);
+
+ if (opcodeID == op_call_eval) {
+ compileCallEval();
+ return;
+ }
+
+ DataLabelPtr addressOfLinkedFunctionCheck;
+ BEGIN_UNINTERRUPTED_SEQUENCE(sequenceOpCall);
+ Jump slowCase = branchPtrWithPatch(NotEqual, regT0, addressOfLinkedFunctionCheck, TrustedImmPtr(JSValue::encode(JSValue())));
+ END_UNINTERRUPTED_SEQUENCE(sequenceOpCall);
+ addSlowCase(slowCase);
+
+ ASSERT_JIT_OFFSET(differenceBetween(addressOfLinkedFunctionCheck, slowCase), patchOffsetOpCallCompareToJump);
+ ASSERT(m_callStructureStubCompilationInfo.size() == callLinkInfoIndex);
+ m_callStructureStubCompilationInfo.append(StructureStubCompilationInfo());
+ m_callStructureStubCompilationInfo[callLinkInfoIndex].hotPathBegin = addressOfLinkedFunctionCheck;
+ m_callStructureStubCompilationInfo[callLinkInfoIndex].callType = CallLinkInfo::callTypeFor(opcodeID);
+ m_callStructureStubCompilationInfo[callLinkInfoIndex].bytecodeIndex = m_bytecodeOffset;
+
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_scopeChain)), regT1);
+ emitPutToCallFrameHeader(regT1, RegisterFile::ScopeChain);
+ m_callStructureStubCompilationInfo[callLinkInfoIndex].hotPathOther = emitNakedCall();
+
+ sampleCodeBlock(m_codeBlock);
+}
+
+void JIT::compileOpCallSlowCase(OpcodeID opcodeID, Instruction*, Vector<SlowCaseEntry>::iterator& iter, unsigned callLinkInfoIndex)
+{
+ if (opcodeID == op_call_eval) {
+ compileCallEvalSlowCase(iter);
+ return;
+ }
+
+ linkSlowCase(iter);
+
+ m_callStructureStubCompilationInfo[callLinkInfoIndex].callReturnLocation = emitNakedCall(opcodeID == op_construct ? m_globalData->jitStubs->ctiVirtualConstructLink() : m_globalData->jitStubs->ctiVirtualCallLink());
+
+ sampleCodeBlock(m_codeBlock);
+}
+
+} // namespace JSC
+
+#endif // USE(JSVALUE64)
+#endif // ENABLE(JIT)
diff --git a/Source/JavaScriptCore/jit/JITCall32_64.cpp b/Source/JavaScriptCore/jit/JITCall32_64.cpp
new file mode 100644
index 000000000..01c9db094
--- /dev/null
+++ b/Source/JavaScriptCore/jit/JITCall32_64.cpp
@@ -0,0 +1,299 @@
+/*
+ * Copyright (C) 2008 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+
+#if ENABLE(JIT)
+#if USE(JSVALUE32_64)
+#include "JIT.h"
+
+#include "Arguments.h"
+#include "CodeBlock.h"
+#include "Interpreter.h"
+#include "JITInlineMethods.h"
+#include "JITStubCall.h"
+#include "JSArray.h"
+#include "JSFunction.h"
+#include "ResultType.h"
+#include "SamplingTool.h"
+
+#ifndef NDEBUG
+#include <stdio.h>
+#endif
+
+using namespace std;
+
+namespace JSC {
+
+void JIT::emit_op_call_put_result(Instruction* instruction)
+{
+ int dst = instruction[1].u.operand;
+ emitValueProfilingSite(FirstProfilingSite);
+ emitStore(dst, regT1, regT0);
+}
+
+void JIT::emit_op_ret(Instruction* currentInstruction)
+{
+ emitOptimizationCheck(RetOptimizationCheck);
+
+ unsigned dst = currentInstruction[1].u.operand;
+
+ emitLoad(dst, regT1, regT0);
+ emitGetFromCallFrameHeaderPtr(RegisterFile::ReturnPC, regT2);
+ emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, callFrameRegister);
+
+ restoreReturnAddressBeforeReturn(regT2);
+ ret();
+}
+
+void JIT::emit_op_ret_object_or_this(Instruction* currentInstruction)
+{
+ emitOptimizationCheck(RetOptimizationCheck);
+
+ unsigned result = currentInstruction[1].u.operand;
+ unsigned thisReg = currentInstruction[2].u.operand;
+
+ emitLoad(result, regT1, regT0);
+ Jump notJSCell = branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag));
+ loadPtr(Address(regT0, JSCell::structureOffset()), regT2);
+ Jump notObject = emitJumpIfNotObject(regT2);
+
+ emitGetFromCallFrameHeaderPtr(RegisterFile::ReturnPC, regT2);
+ emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, callFrameRegister);
+
+ restoreReturnAddressBeforeReturn(regT2);
+ ret();
+
+ notJSCell.link(this);
+ notObject.link(this);
+ emitLoad(thisReg, regT1, regT0);
+
+ emitGetFromCallFrameHeaderPtr(RegisterFile::ReturnPC, regT2);
+ emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, callFrameRegister);
+
+ restoreReturnAddressBeforeReturn(regT2);
+ ret();
+}
+
+void JIT::emitSlow_op_call(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ compileOpCallSlowCase(op_call, currentInstruction, iter, m_callLinkInfoIndex++);
+}
+
+void JIT::emitSlow_op_call_eval(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ compileOpCallSlowCase(op_call_eval, currentInstruction, iter, m_callLinkInfoIndex);
+}
+
+void JIT::emitSlow_op_call_varargs(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ compileOpCallSlowCase(op_call_varargs, currentInstruction, iter, m_callLinkInfoIndex++);
+}
+
+void JIT::emitSlow_op_construct(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ compileOpCallSlowCase(op_construct, currentInstruction, iter, m_callLinkInfoIndex++);
+}
+
+void JIT::emit_op_call(Instruction* currentInstruction)
+{
+ compileOpCall(op_call, currentInstruction, m_callLinkInfoIndex++);
+}
+
+void JIT::emit_op_call_eval(Instruction* currentInstruction)
+{
+ compileOpCall(op_call_eval, currentInstruction, m_callLinkInfoIndex);
+}
+
+void JIT::emit_op_call_varargs(Instruction* currentInstruction)
+{
+ compileOpCall(op_call_varargs, currentInstruction, m_callLinkInfoIndex++);
+}
+
+void JIT::emit_op_construct(Instruction* currentInstruction)
+{
+ compileOpCall(op_construct, currentInstruction, m_callLinkInfoIndex++);
+}
+
+void JIT::compileLoadVarargs(Instruction* instruction)
+{
+ int thisValue = instruction[2].u.operand;
+ int arguments = instruction[3].u.operand;
+ int firstFreeRegister = instruction[4].u.operand;
+
+ JumpList slowCase;
+ JumpList end;
+ if (m_codeBlock->usesArguments() && arguments == m_codeBlock->argumentsRegister()) {
+ emitLoadTag(arguments, regT1);
+ slowCase.append(branch32(NotEqual, regT1, TrustedImm32(JSValue::EmptyValueTag)));
+
+ load32(payloadFor(RegisterFile::ArgumentCount), regT2);
+ slowCase.append(branch32(Above, regT2, TrustedImm32(Arguments::MaxArguments + 1)));
+ // regT2: argumentCountIncludingThis
+
+ move(regT2, regT3);
+ add32(TrustedImm32(firstFreeRegister + RegisterFile::CallFrameHeaderSize), regT3);
+ lshift32(TrustedImm32(3), regT3);
+ addPtr(callFrameRegister, regT3);
+ // regT3: newCallFrame
+
+ slowCase.append(branchPtr(Below, AbsoluteAddress(m_globalData->interpreter->registerFile().addressOfEnd()), regT3));
+
+ // Initialize ArgumentCount.
+ store32(regT2, payloadFor(RegisterFile::ArgumentCount, regT3));
+
+ // Initialize 'this'.
+ emitLoad(thisValue, regT1, regT0);
+ store32(regT0, Address(regT3, OBJECT_OFFSETOF(JSValue, u.asBits.payload) + (CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register)))));
+ store32(regT1, Address(regT3, OBJECT_OFFSETOF(JSValue, u.asBits.tag) + (CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register)))));
+
+ // Copy arguments.
+ neg32(regT2);
+ end.append(branchAdd32(Zero, Imm32(1), regT2));
+ // regT2: -argumentCount;
+
+ Label copyLoop = label();
+ load32(BaseIndex(callFrameRegister, regT2, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.payload) +(CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register)))), regT0);
+ load32(BaseIndex(callFrameRegister, regT2, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag) +(CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register)))), regT1);
+ store32(regT0, BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.payload) +(CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register)))));
+ store32(regT1, BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag) +(CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register)))));
+ branchAdd32(NonZero, Imm32(1), regT2).linkTo(copyLoop, this);
+
+ end.append(jump());
+ }
+
+ if (m_codeBlock->usesArguments() && arguments == m_codeBlock->argumentsRegister())
+ slowCase.link(this);
+
+ JITStubCall stubCall(this, cti_op_load_varargs);
+ stubCall.addArgument(thisValue);
+ stubCall.addArgument(arguments);
+ stubCall.addArgument(Imm32(firstFreeRegister));
+ stubCall.call(regT3);
+
+ if (m_codeBlock->usesArguments() && arguments == m_codeBlock->argumentsRegister())
+ end.link(this);
+}
+
+void JIT::compileCallEval()
+{
+ JITStubCall stubCall(this, cti_op_call_eval); // Initializes ScopeChain; ReturnPC; CodeBlock.
+ stubCall.call();
+ addSlowCase(branch32(Equal, regT1, TrustedImm32(JSValue::EmptyValueTag)));
+ emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, callFrameRegister);
+
+ sampleCodeBlock(m_codeBlock);
+}
+
+void JIT::compileCallEvalSlowCase(Vector<SlowCaseEntry>::iterator& iter)
+{
+ linkSlowCase(iter);
+
+ emitLoad(RegisterFile::Callee, regT1, regT0);
+ emitNakedCall(m_globalData->jitStubs->ctiVirtualCall());
+
+ sampleCodeBlock(m_codeBlock);
+}
+
+void JIT::compileOpCall(OpcodeID opcodeID, Instruction* instruction, unsigned callLinkInfoIndex)
+{
+ int callee = instruction[1].u.operand;
+
+ /* Caller always:
+ - Updates callFrameRegister to callee callFrame.
+ - Initializes ArgumentCount; CallerFrame; Callee.
+
+ For a JS call:
+ - Caller initializes ScopeChain.
+ - Callee initializes ReturnPC; CodeBlock.
+ - Callee restores callFrameRegister before return.
+
+ For a non-JS call:
+ - Caller initializes ScopeChain; ReturnPC; CodeBlock.
+ - Caller restores callFrameRegister after return.
+ */
+
+ if (opcodeID == op_call_varargs)
+ compileLoadVarargs(instruction);
+ else {
+ int argCount = instruction[2].u.operand;
+ int registerOffset = instruction[3].u.operand;
+
+ addPtr(TrustedImm32(registerOffset * sizeof(Register)), callFrameRegister, regT3);
+
+ store32(TrustedImm32(argCount), payloadFor(RegisterFile::ArgumentCount, regT3));
+ } // regT3 holds newCallFrame with ArgumentCount initialized.
+ emitLoad(callee, regT1, regT0); // regT1, regT0 holds callee.
+
+ storePtr(callFrameRegister, Address(regT3, RegisterFile::CallerFrame * static_cast<int>(sizeof(Register))));
+ emitStore(RegisterFile::Callee, regT1, regT0, regT3);
+ move(regT3, callFrameRegister);
+
+ if (opcodeID == op_call_eval) {
+ compileCallEval();
+ return;
+ }
+
+ DataLabelPtr addressOfLinkedFunctionCheck;
+ BEGIN_UNINTERRUPTED_SEQUENCE(sequenceOpCall);
+ Jump slowCase = branchPtrWithPatch(NotEqual, regT0, addressOfLinkedFunctionCheck, TrustedImmPtr(0));
+ END_UNINTERRUPTED_SEQUENCE(sequenceOpCall);
+
+ addSlowCase(slowCase);
+ addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag)));
+
+ ASSERT_JIT_OFFSET(differenceBetween(addressOfLinkedFunctionCheck, slowCase), patchOffsetOpCallCompareToJump);
+ ASSERT(m_callStructureStubCompilationInfo.size() == callLinkInfoIndex);
+ m_callStructureStubCompilationInfo.append(StructureStubCompilationInfo());
+ m_callStructureStubCompilationInfo[callLinkInfoIndex].hotPathBegin = addressOfLinkedFunctionCheck;
+ m_callStructureStubCompilationInfo[callLinkInfoIndex].callType = CallLinkInfo::callTypeFor(opcodeID);
+ m_callStructureStubCompilationInfo[callLinkInfoIndex].bytecodeIndex = m_bytecodeOffset;
+
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_scopeChain)), regT1);
+ emitPutCellToCallFrameHeader(regT1, RegisterFile::ScopeChain);
+ m_callStructureStubCompilationInfo[callLinkInfoIndex].hotPathOther = emitNakedCall();
+
+ sampleCodeBlock(m_codeBlock);
+}
+
+void JIT::compileOpCallSlowCase(OpcodeID opcodeID, Instruction*, Vector<SlowCaseEntry>::iterator& iter, unsigned callLinkInfoIndex)
+{
+ if (opcodeID == op_call_eval) {
+ compileCallEvalSlowCase(iter);
+ return;
+ }
+
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+
+ m_callStructureStubCompilationInfo[callLinkInfoIndex].callReturnLocation = emitNakedCall(opcodeID == op_construct ? m_globalData->jitStubs->ctiVirtualConstructLink() : m_globalData->jitStubs->ctiVirtualCallLink());
+
+ sampleCodeBlock(m_codeBlock);
+}
+
+} // namespace JSC
+
+#endif // USE(JSVALUE32_64)
+#endif // ENABLE(JIT)
diff --git a/Source/JavaScriptCore/jit/JITCode.h b/Source/JavaScriptCore/jit/JITCode.h
new file mode 100644
index 000000000..f63c4a1a8
--- /dev/null
+++ b/Source/JavaScriptCore/jit/JITCode.h
@@ -0,0 +1,167 @@
+/*
+ * Copyright (C) 2008 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef JITCode_h
+#define JITCode_h
+
+#if ENABLE(JIT)
+#include "CallFrame.h"
+#include "JSValue.h"
+#include "MacroAssemblerCodeRef.h"
+#include "Profiler.h"
+#endif
+
+namespace JSC {
+
+#if ENABLE(JIT)
+ class JSGlobalData;
+ class RegisterFile;
+#endif
+
+ class JITCode {
+#if ENABLE(JIT)
+ typedef MacroAssemblerCodeRef CodeRef;
+ typedef MacroAssemblerCodePtr CodePtr;
+#else
+ JITCode() { }
+#endif
+ public:
+ enum JITType { HostCallThunk, BaselineJIT, DFGJIT };
+
+ static JITType bottomTierJIT()
+ {
+ return BaselineJIT;
+ }
+
+ static JITType topTierJIT()
+ {
+ return DFGJIT;
+ }
+
+ static JITType nextTierJIT(JITType jitType)
+ {
+ ASSERT_UNUSED(jitType, jitType == BaselineJIT || jitType == DFGJIT);
+ return DFGJIT;
+ }
+
+#if ENABLE(JIT)
+ JITCode()
+ {
+ }
+
+ JITCode(const CodeRef ref, JITType jitType)
+ : m_ref(ref)
+ , m_jitType(jitType)
+ {
+ }
+
+ bool operator !() const
+ {
+ return !m_ref;
+ }
+
+ CodePtr addressForCall()
+ {
+ return m_ref.code();
+ }
+
+ void* executableAddressAtOffset(size_t offset) const
+ {
+ ASSERT(offset < size());
+ return reinterpret_cast<char*>(m_ref.code().executableAddress()) + offset;
+ }
+
+ void* dataAddressAtOffset(size_t offset) const
+ {
+ ASSERT(offset < size());
+ return reinterpret_cast<char*>(m_ref.code().dataLocation()) + offset;
+ }
+
+ // This function returns the offset in bytes of 'pointerIntoCode' into
+ // this block of code. The pointer provided must be a pointer into this
+ // block of code. It is ASSERTed that no codeblock >4gb in size.
+ unsigned offsetOf(void* pointerIntoCode)
+ {
+ intptr_t result = reinterpret_cast<intptr_t>(pointerIntoCode) - reinterpret_cast<intptr_t>(m_ref.code().executableAddress());
+ ASSERT(static_cast<intptr_t>(static_cast<unsigned>(result)) == result);
+ return static_cast<unsigned>(result);
+ }
+
+ // Execute the code!
+ inline JSValue execute(RegisterFile* registerFile, CallFrame* callFrame, JSGlobalData* globalData)
+ {
+ JSValue result = JSValue::decode(ctiTrampoline(m_ref.code().executableAddress(), registerFile, callFrame, 0, Profiler::enabledProfilerReference(), globalData));
+ return globalData->exception ? jsNull() : result;
+ }
+
+ void* start() const
+ {
+ return m_ref.code().dataLocation();
+ }
+
+ size_t size() const
+ {
+ ASSERT(m_ref.code().executableAddress());
+ return m_ref.size();
+ }
+
+ ExecutableMemoryHandle* getExecutableMemory()
+ {
+ return m_ref.executableMemory();
+ }
+
+ JITType jitType()
+ {
+ return m_jitType;
+ }
+
+ // Host functions are a bit special; they have a m_code pointer but they
+ // do not individully ref the executable pool containing the trampoline.
+ static JITCode HostFunction(CodeRef code)
+ {
+ return JITCode(code, HostCallThunk);
+ }
+
+ void clear()
+ {
+ m_ref.~CodeRef();
+ new (NotNull, &m_ref) CodeRef();
+ }
+
+ private:
+ JITCode(PassRefPtr<ExecutableMemoryHandle> executableMemory, JITType jitType)
+ : m_ref(executableMemory)
+ , m_jitType(jitType)
+ {
+ }
+
+ CodeRef m_ref;
+ JITType m_jitType;
+#endif // ENABLE(JIT)
+ };
+
+};
+
+#endif
diff --git a/Source/JavaScriptCore/jit/JITInlineMethods.h b/Source/JavaScriptCore/jit/JITInlineMethods.h
new file mode 100644
index 000000000..5540ffab3
--- /dev/null
+++ b/Source/JavaScriptCore/jit/JITInlineMethods.h
@@ -0,0 +1,987 @@
+/*
+ * Copyright (C) 2008 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef JITInlineMethods_h
+#define JITInlineMethods_h
+
+
+#if ENABLE(JIT)
+
+namespace JSC {
+
+/* Deprecated: Please use JITStubCall instead. */
+
+ALWAYS_INLINE void JIT::emitGetJITStubArg(unsigned argumentNumber, RegisterID dst)
+{
+ unsigned argumentStackOffset = (argumentNumber * (sizeof(JSValue) / sizeof(void*))) + JITSTACKFRAME_ARGS_INDEX;
+ peek(dst, argumentStackOffset);
+}
+
+ALWAYS_INLINE bool JIT::isOperandConstantImmediateDouble(unsigned src)
+{
+ return m_codeBlock->isConstantRegisterIndex(src) && getConstantOperand(src).isDouble();
+}
+
+ALWAYS_INLINE JSValue JIT::getConstantOperand(unsigned src)
+{
+ ASSERT(m_codeBlock->isConstantRegisterIndex(src));
+ return m_codeBlock->getConstant(src);
+}
+
+ALWAYS_INLINE void JIT::emitPutToCallFrameHeader(RegisterID from, RegisterFile::CallFrameHeaderEntry entry)
+{
+ storePtr(from, payloadFor(entry, callFrameRegister));
+}
+
+ALWAYS_INLINE void JIT::emitPutCellToCallFrameHeader(RegisterID from, RegisterFile::CallFrameHeaderEntry entry)
+{
+#if USE(JSVALUE32_64)
+ store32(TrustedImm32(JSValue::CellTag), tagFor(entry, callFrameRegister));
+#endif
+ storePtr(from, payloadFor(entry, callFrameRegister));
+}
+
+ALWAYS_INLINE void JIT::emitPutIntToCallFrameHeader(RegisterID from, RegisterFile::CallFrameHeaderEntry entry)
+{
+ store32(TrustedImm32(Int32Tag), intTagFor(entry, callFrameRegister));
+ store32(from, intPayloadFor(entry, callFrameRegister));
+}
+
+ALWAYS_INLINE void JIT::emitPutImmediateToCallFrameHeader(void* value, RegisterFile::CallFrameHeaderEntry entry)
+{
+ storePtr(TrustedImmPtr(value), Address(callFrameRegister, entry * sizeof(Register)));
+}
+
+ALWAYS_INLINE void JIT::emitGetFromCallFrameHeaderPtr(RegisterFile::CallFrameHeaderEntry entry, RegisterID to, RegisterID from)
+{
+ loadPtr(Address(from, entry * sizeof(Register)), to);
+#if USE(JSVALUE64)
+ killLastResultRegister();
+#endif
+}
+
+ALWAYS_INLINE void JIT::emitLoadCharacterString(RegisterID src, RegisterID dst, JumpList& failures)
+{
+ failures.append(branchPtr(NotEqual, Address(src, JSCell::classInfoOffset()), TrustedImmPtr(&JSString::s_info)));
+ failures.append(branch32(NotEqual, MacroAssembler::Address(src, ThunkHelpers::jsStringLengthOffset()), TrustedImm32(1)));
+ loadPtr(MacroAssembler::Address(src, ThunkHelpers::jsStringValueOffset()), dst);
+ failures.append(branchTest32(Zero, dst));
+ loadPtr(MacroAssembler::Address(dst, ThunkHelpers::stringImplFlagsOffset()), regT1);
+ loadPtr(MacroAssembler::Address(dst, ThunkHelpers::stringImplDataOffset()), dst);
+
+ JumpList is16Bit;
+ JumpList cont8Bit;
+ is16Bit.append(branchTest32(Zero, regT1, TrustedImm32(ThunkHelpers::stringImpl8BitFlag())));
+ load8(MacroAssembler::Address(dst, 0), dst);
+ cont8Bit.append(jump());
+ is16Bit.link(this);
+ load16(MacroAssembler::Address(dst, 0), dst);
+ cont8Bit.link(this);
+}
+
+ALWAYS_INLINE void JIT::emitGetFromCallFrameHeader32(RegisterFile::CallFrameHeaderEntry entry, RegisterID to, RegisterID from)
+{
+ load32(Address(from, entry * sizeof(Register)), to);
+#if USE(JSVALUE64)
+ killLastResultRegister();
+#endif
+}
+
+ALWAYS_INLINE JIT::Call JIT::emitNakedCall(CodePtr function)
+{
+ ASSERT(m_bytecodeOffset != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.
+
+ Call nakedCall = nearCall();
+ m_calls.append(CallRecord(nakedCall, m_bytecodeOffset, function.executableAddress()));
+ return nakedCall;
+}
+
+ALWAYS_INLINE bool JIT::atJumpTarget()
+{
+ while (m_jumpTargetsPosition < m_codeBlock->numberOfJumpTargets() && m_codeBlock->jumpTarget(m_jumpTargetsPosition) <= m_bytecodeOffset) {
+ if (m_codeBlock->jumpTarget(m_jumpTargetsPosition) == m_bytecodeOffset)
+ return true;
+ ++m_jumpTargetsPosition;
+ }
+ return false;
+}
+
+#if defined(ASSEMBLER_HAS_CONSTANT_POOL) && ASSEMBLER_HAS_CONSTANT_POOL
+
+ALWAYS_INLINE void JIT::beginUninterruptedSequence(int insnSpace, int constSpace)
+{
+ JSInterfaceJIT::beginUninterruptedSequence();
+#if CPU(ARM_TRADITIONAL)
+#ifndef NDEBUG
+ // Ensure the label after the sequence can also fit
+ insnSpace += sizeof(ARMWord);
+ constSpace += sizeof(uint64_t);
+#endif
+
+ ensureSpace(insnSpace, constSpace);
+
+#elif CPU(SH4)
+#ifndef NDEBUG
+ insnSpace += sizeof(SH4Word);
+ constSpace += sizeof(uint64_t);
+#endif
+
+ m_assembler.ensureSpace(insnSpace + m_assembler.maxInstructionSize + 2, constSpace + 8);
+#endif
+
+#if defined(ASSEMBLER_HAS_CONSTANT_POOL) && ASSEMBLER_HAS_CONSTANT_POOL
+#ifndef NDEBUG
+ m_uninterruptedInstructionSequenceBegin = label();
+ m_uninterruptedConstantSequenceBegin = sizeOfConstantPool();
+#endif
+#endif
+}
+
+ALWAYS_INLINE void JIT::endUninterruptedSequence(int insnSpace, int constSpace, int dst)
+{
+ UNUSED_PARAM(dst);
+#if defined(ASSEMBLER_HAS_CONSTANT_POOL) && ASSEMBLER_HAS_CONSTANT_POOL
+ /* There are several cases when the uninterrupted sequence is larger than
+ * maximum required offset for pathing the same sequence. Eg.: if in a
+ * uninterrupted sequence the last macroassembler's instruction is a stub
+ * call, it emits store instruction(s) which should not be included in the
+ * calculation of length of uninterrupted sequence. So, the insnSpace and
+ * constSpace should be upper limit instead of hard limit.
+ */
+#if CPU(SH4)
+ if ((dst > 15) || (dst < -16)) {
+ insnSpace += 8;
+ constSpace += 2;
+ }
+
+ if (((dst >= -16) && (dst < 0)) || ((dst > 7) && (dst <= 15)))
+ insnSpace += 8;
+#endif
+ ASSERT(differenceBetween(m_uninterruptedInstructionSequenceBegin, label()) <= insnSpace);
+ ASSERT(sizeOfConstantPool() - m_uninterruptedConstantSequenceBegin <= constSpace);
+#endif
+ JSInterfaceJIT::endUninterruptedSequence();
+}
+
+#endif
+
+#if CPU(ARM)
+
+ALWAYS_INLINE void JIT::preserveReturnAddressAfterCall(RegisterID reg)
+{
+ move(linkRegister, reg);
+}
+
+ALWAYS_INLINE void JIT::restoreReturnAddressBeforeReturn(RegisterID reg)
+{
+ move(reg, linkRegister);
+}
+
+ALWAYS_INLINE void JIT::restoreReturnAddressBeforeReturn(Address address)
+{
+ loadPtr(address, linkRegister);
+}
+#elif CPU(SH4)
+
+ALWAYS_INLINE void JIT::preserveReturnAddressAfterCall(RegisterID reg)
+{
+ m_assembler.stspr(reg);
+}
+
+ALWAYS_INLINE void JIT::restoreReturnAddressBeforeReturn(RegisterID reg)
+{
+ m_assembler.ldspr(reg);
+}
+
+ALWAYS_INLINE void JIT::restoreReturnAddressBeforeReturn(Address address)
+{
+ loadPtrLinkReg(address);
+}
+
+#elif CPU(MIPS)
+
+ALWAYS_INLINE void JIT::preserveReturnAddressAfterCall(RegisterID reg)
+{
+ move(returnAddressRegister, reg);
+}
+
+ALWAYS_INLINE void JIT::restoreReturnAddressBeforeReturn(RegisterID reg)
+{
+ move(reg, returnAddressRegister);
+}
+
+ALWAYS_INLINE void JIT::restoreReturnAddressBeforeReturn(Address address)
+{
+ loadPtr(address, returnAddressRegister);
+}
+
+#else // CPU(X86) || CPU(X86_64)
+
+ALWAYS_INLINE void JIT::preserveReturnAddressAfterCall(RegisterID reg)
+{
+ pop(reg);
+}
+
+ALWAYS_INLINE void JIT::restoreReturnAddressBeforeReturn(RegisterID reg)
+{
+ push(reg);
+}
+
+ALWAYS_INLINE void JIT::restoreReturnAddressBeforeReturn(Address address)
+{
+ push(address);
+}
+
+#endif
+
+ALWAYS_INLINE void JIT::restoreArgumentReference()
+{
+ move(stackPointerRegister, firstArgumentRegister);
+ poke(callFrameRegister, OBJECT_OFFSETOF(struct JITStackFrame, callFrame) / sizeof(void*));
+}
+
+ALWAYS_INLINE void JIT::updateTopCallFrame()
+{
+ storePtr(callFrameRegister, &m_globalData->topCallFrame);
+}
+
+ALWAYS_INLINE void JIT::restoreArgumentReferenceForTrampoline()
+{
+#if CPU(X86)
+ // Within a trampoline the return address will be on the stack at this point.
+ addPtr(TrustedImm32(sizeof(void*)), stackPointerRegister, firstArgumentRegister);
+#elif CPU(ARM)
+ move(stackPointerRegister, firstArgumentRegister);
+#elif CPU(SH4)
+ move(stackPointerRegister, firstArgumentRegister);
+#endif
+ // In the trampoline on x86-64, the first argument register is not overwritten.
+}
+
+ALWAYS_INLINE JIT::Jump JIT::checkStructure(RegisterID reg, Structure* structure)
+{
+ return branchPtr(NotEqual, Address(reg, JSCell::structureOffset()), TrustedImmPtr(structure));
+}
+
+ALWAYS_INLINE void JIT::linkSlowCaseIfNotJSCell(Vector<SlowCaseEntry>::iterator& iter, int vReg)
+{
+ if (!m_codeBlock->isKnownNotImmediate(vReg))
+ linkSlowCase(iter);
+}
+
+ALWAYS_INLINE void JIT::addSlowCase(Jump jump)
+{
+ ASSERT(m_bytecodeOffset != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.
+
+ m_slowCases.append(SlowCaseEntry(jump, m_bytecodeOffset));
+}
+
+ALWAYS_INLINE void JIT::addSlowCase(JumpList jumpList)
+{
+ ASSERT(m_bytecodeOffset != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.
+
+ const JumpList::JumpVector& jumpVector = jumpList.jumps();
+ size_t size = jumpVector.size();
+ for (size_t i = 0; i < size; ++i)
+ m_slowCases.append(SlowCaseEntry(jumpVector[i], m_bytecodeOffset));
+}
+
+ALWAYS_INLINE void JIT::addSlowCase()
+{
+ ASSERT(m_bytecodeOffset != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.
+
+ Jump emptyJump; // Doing it this way to make Windows happy.
+ m_slowCases.append(SlowCaseEntry(emptyJump, m_bytecodeOffset));
+}
+
+ALWAYS_INLINE void JIT::addJump(Jump jump, int relativeOffset)
+{
+ ASSERT(m_bytecodeOffset != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.
+
+ m_jmpTable.append(JumpTable(jump, m_bytecodeOffset + relativeOffset));
+}
+
+ALWAYS_INLINE void JIT::emitJumpSlowToHot(Jump jump, int relativeOffset)
+{
+ ASSERT(m_bytecodeOffset != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.
+
+ jump.linkTo(m_labels[m_bytecodeOffset + relativeOffset], this);
+}
+
+ALWAYS_INLINE JIT::Jump JIT::emitJumpIfNotObject(RegisterID structureReg)
+{
+ return branch8(Below, Address(structureReg, Structure::typeInfoTypeOffset()), TrustedImm32(ObjectType));
+}
+
+ALWAYS_INLINE JIT::Jump JIT::emitJumpIfNotType(RegisterID baseReg, RegisterID scratchReg, JSType type)
+{
+ loadPtr(Address(baseReg, JSCell::structureOffset()), scratchReg);
+ return branch8(NotEqual, Address(scratchReg, Structure::typeInfoTypeOffset()), TrustedImm32(type));
+}
+
+#if ENABLE(SAMPLING_FLAGS)
+ALWAYS_INLINE void JIT::setSamplingFlag(int32_t flag)
+{
+ ASSERT(flag >= 1);
+ ASSERT(flag <= 32);
+ or32(TrustedImm32(1u << (flag - 1)), AbsoluteAddress(SamplingFlags::addressOfFlags()));
+}
+
+ALWAYS_INLINE void JIT::clearSamplingFlag(int32_t flag)
+{
+ ASSERT(flag >= 1);
+ ASSERT(flag <= 32);
+ and32(TrustedImm32(~(1u << (flag - 1))), AbsoluteAddress(SamplingFlags::addressOfFlags()));
+}
+#endif
+
+#if ENABLE(SAMPLING_COUNTERS)
+ALWAYS_INLINE void JIT::emitCount(AbstractSamplingCounter& counter, int32_t count)
+{
+ add64(TrustedImm32(count), AbsoluteAddress(counter.addressOfCounter()));
+}
+#endif
+
+#if ENABLE(OPCODE_SAMPLING)
+#if CPU(X86_64)
+ALWAYS_INLINE void JIT::sampleInstruction(Instruction* instruction, bool inHostFunction)
+{
+ move(TrustedImmPtr(m_interpreter->sampler()->sampleSlot()), X86Registers::ecx);
+ storePtr(TrustedImmPtr(m_interpreter->sampler()->encodeSample(instruction, inHostFunction)), X86Registers::ecx);
+}
+#else
+ALWAYS_INLINE void JIT::sampleInstruction(Instruction* instruction, bool inHostFunction)
+{
+ storePtr(TrustedImmPtr(m_interpreter->sampler()->encodeSample(instruction, inHostFunction)), m_interpreter->sampler()->sampleSlot());
+}
+#endif
+#endif
+
+#if ENABLE(CODEBLOCK_SAMPLING)
+#if CPU(X86_64)
+ALWAYS_INLINE void JIT::sampleCodeBlock(CodeBlock* codeBlock)
+{
+ move(TrustedImmPtr(m_interpreter->sampler()->codeBlockSlot()), X86Registers::ecx);
+ storePtr(TrustedImmPtr(codeBlock), X86Registers::ecx);
+}
+#else
+ALWAYS_INLINE void JIT::sampleCodeBlock(CodeBlock* codeBlock)
+{
+ storePtr(TrustedImmPtr(codeBlock), m_interpreter->sampler()->codeBlockSlot());
+}
+#endif
+#endif
+
+ALWAYS_INLINE bool JIT::isOperandConstantImmediateChar(unsigned src)
+{
+ return m_codeBlock->isConstantRegisterIndex(src) && getConstantOperand(src).isString() && asString(getConstantOperand(src).asCell())->length() == 1;
+}
+
+template <typename ClassType, typename StructureType> inline void JIT::emitAllocateBasicJSObject(StructureType structure, RegisterID result, RegisterID storagePtr)
+{
+ MarkedSpace::SizeClass* sizeClass = &m_globalData->heap.sizeClassForObject(sizeof(ClassType));
+ loadPtr(&sizeClass->firstFreeCell, result);
+ addSlowCase(branchTestPtr(Zero, result));
+
+ // remove the object from the free list
+ loadPtr(Address(result), storagePtr);
+ storePtr(storagePtr, &sizeClass->firstFreeCell);
+
+ // initialize the object's structure
+ storePtr(structure, Address(result, JSCell::structureOffset()));
+
+ // initialize the object's classInfo pointer
+ storePtr(TrustedImmPtr(&ClassType::s_info), Address(result, JSCell::classInfoOffset()));
+
+ // initialize the inheritor ID
+ storePtr(TrustedImmPtr(0), Address(result, JSObject::offsetOfInheritorID()));
+
+ // initialize the object's property storage pointer
+ addPtr(TrustedImm32(sizeof(JSObject)), result, storagePtr);
+ storePtr(storagePtr, Address(result, ClassType::offsetOfPropertyStorage()));
+}
+
+template <typename T> inline void JIT::emitAllocateJSFinalObject(T structure, RegisterID result, RegisterID scratch)
+{
+ emitAllocateBasicJSObject<JSFinalObject>(structure, result, scratch);
+}
+
+inline void JIT::emitAllocateJSFunction(FunctionExecutable* executable, RegisterID scopeChain, RegisterID result, RegisterID storagePtr)
+{
+ emitAllocateBasicJSObject<JSFunction>(TrustedImmPtr(m_codeBlock->globalObject()->namedFunctionStructure()), result, storagePtr);
+
+ // store the function's scope chain
+ storePtr(scopeChain, Address(result, JSFunction::offsetOfScopeChain()));
+
+ // store the function's executable member
+ storePtr(TrustedImmPtr(executable), Address(result, JSFunction::offsetOfExecutable()));
+
+ // store the function's name
+ ASSERT(executable->nameValue());
+ int functionNameOffset = sizeof(JSValue) * m_codeBlock->globalObject()->functionNameOffset();
+ storePtr(TrustedImmPtr(executable->nameValue()), Address(regT1, functionNameOffset + OBJECT_OFFSETOF(JSValue, u.asBits.payload)));
+#if USE(JSVALUE32_64)
+ store32(TrustedImm32(JSValue::CellTag), Address(regT1, functionNameOffset + OBJECT_OFFSETOF(JSValue, u.asBits.tag)));
+#endif
+}
+
+#if ENABLE(VALUE_PROFILER)
+inline void JIT::emitValueProfilingSite(ValueProfile* valueProfile)
+{
+ ASSERT(shouldEmitProfiling());
+ ASSERT(valueProfile);
+
+ const RegisterID value = regT0;
+#if USE(JSVALUE32_64)
+ const RegisterID valueTag = regT1;
+#endif
+ const RegisterID scratch = regT3;
+
+ if (ValueProfile::numberOfBuckets == 1) {
+ // We're in a simple configuration: only one bucket, so we can just do a direct
+ // store.
+#if USE(JSVALUE64)
+ storePtr(value, valueProfile->m_buckets);
+#else
+ EncodedValueDescriptor* descriptor = bitwise_cast<EncodedValueDescriptor*>(valueProfile->m_buckets);
+ store32(value, &descriptor->asBits.payload);
+ store32(valueTag, &descriptor->asBits.tag);
+#endif
+ return;
+ }
+
+ if (m_randomGenerator.getUint32() & 1)
+ add32(Imm32(1), bucketCounterRegister);
+ else
+ add32(Imm32(3), bucketCounterRegister);
+ and32(Imm32(ValueProfile::bucketIndexMask), bucketCounterRegister);
+ move(ImmPtr(valueProfile->m_buckets), scratch);
+#if USE(JSVALUE64)
+ storePtr(value, BaseIndex(scratch, bucketCounterRegister, TimesEight));
+#elif USE(JSVALUE32_64)
+ store32(value, BaseIndex(scratch, bucketCounterRegister, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.payload)));
+ store32(valueTag, BaseIndex(scratch, bucketCounterRegister, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag)));
+#endif
+}
+
+inline void JIT::emitValueProfilingSite(ValueProfilingSiteKind siteKind)
+{
+ if (!shouldEmitProfiling())
+ return;
+
+ ValueProfile* valueProfile;
+ if (siteKind == FirstProfilingSite)
+ valueProfile = m_codeBlock->addValueProfile(m_bytecodeOffset);
+ else {
+ ASSERT(siteKind == SubsequentProfilingSite);
+ valueProfile = m_codeBlock->valueProfileForBytecodeOffset(m_bytecodeOffset);
+ }
+
+ emitValueProfilingSite(valueProfile);
+}
+#endif
+
+#if USE(JSVALUE32_64)
+
+inline void JIT::emitLoadTag(int index, RegisterID tag)
+{
+ RegisterID mappedTag;
+ if (getMappedTag(index, mappedTag)) {
+ move(mappedTag, tag);
+ unmap(tag);
+ return;
+ }
+
+ if (m_codeBlock->isConstantRegisterIndex(index)) {
+ move(Imm32(getConstantOperand(index).tag()), tag);
+ unmap(tag);
+ return;
+ }
+
+ load32(tagFor(index), tag);
+ unmap(tag);
+}
+
+inline void JIT::emitLoadPayload(int index, RegisterID payload)
+{
+ RegisterID mappedPayload;
+ if (getMappedPayload(index, mappedPayload)) {
+ move(mappedPayload, payload);
+ unmap(payload);
+ return;
+ }
+
+ if (m_codeBlock->isConstantRegisterIndex(index)) {
+ move(Imm32(getConstantOperand(index).payload()), payload);
+ unmap(payload);
+ return;
+ }
+
+ load32(payloadFor(index), payload);
+ unmap(payload);
+}
+
+inline void JIT::emitLoad(const JSValue& v, RegisterID tag, RegisterID payload)
+{
+ move(Imm32(v.payload()), payload);
+ move(Imm32(v.tag()), tag);
+}
+
+inline void JIT::emitLoad(int index, RegisterID tag, RegisterID payload, RegisterID base)
+{
+ ASSERT(tag != payload);
+
+ if (base == callFrameRegister) {
+ ASSERT(payload != base);
+ emitLoadPayload(index, payload);
+ emitLoadTag(index, tag);
+ return;
+ }
+
+ if (payload == base) { // avoid stomping base
+ load32(tagFor(index, base), tag);
+ load32(payloadFor(index, base), payload);
+ return;
+ }
+
+ load32(payloadFor(index, base), payload);
+ load32(tagFor(index, base), tag);
+}
+
+inline void JIT::emitLoad2(int index1, RegisterID tag1, RegisterID payload1, int index2, RegisterID tag2, RegisterID payload2)
+{
+ if (isMapped(index1)) {
+ emitLoad(index1, tag1, payload1);
+ emitLoad(index2, tag2, payload2);
+ return;
+ }
+ emitLoad(index2, tag2, payload2);
+ emitLoad(index1, tag1, payload1);
+}
+
+inline void JIT::emitLoadDouble(int index, FPRegisterID value)
+{
+ if (m_codeBlock->isConstantRegisterIndex(index)) {
+ WriteBarrier<Unknown>& inConstantPool = m_codeBlock->constantRegister(index);
+ loadDouble(&inConstantPool, value);
+ } else
+ loadDouble(addressFor(index), value);
+}
+
+inline void JIT::emitLoadInt32ToDouble(int index, FPRegisterID value)
+{
+ if (m_codeBlock->isConstantRegisterIndex(index)) {
+ WriteBarrier<Unknown>& inConstantPool = m_codeBlock->constantRegister(index);
+ char* bytePointer = reinterpret_cast<char*>(&inConstantPool);
+ convertInt32ToDouble(AbsoluteAddress(bytePointer + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), value);
+ } else
+ convertInt32ToDouble(payloadFor(index), value);
+}
+
+inline void JIT::emitStore(int index, RegisterID tag, RegisterID payload, RegisterID base)
+{
+ store32(payload, payloadFor(index, base));
+ store32(tag, tagFor(index, base));
+}
+
+inline void JIT::emitStoreInt32(int index, RegisterID payload, bool indexIsInt32)
+{
+ store32(payload, payloadFor(index, callFrameRegister));
+ if (!indexIsInt32)
+ store32(TrustedImm32(JSValue::Int32Tag), tagFor(index, callFrameRegister));
+}
+
+inline void JIT::emitStoreAndMapInt32(int index, RegisterID tag, RegisterID payload, bool indexIsInt32, size_t opcodeLength)
+{
+ emitStoreInt32(index, payload, indexIsInt32);
+ map(m_bytecodeOffset + opcodeLength, index, tag, payload);
+}
+
+inline void JIT::emitStoreInt32(int index, TrustedImm32 payload, bool indexIsInt32)
+{
+ store32(payload, payloadFor(index, callFrameRegister));
+ if (!indexIsInt32)
+ store32(TrustedImm32(JSValue::Int32Tag), tagFor(index, callFrameRegister));
+}
+
+inline void JIT::emitStoreCell(int index, RegisterID payload, bool indexIsCell)
+{
+ store32(payload, payloadFor(index, callFrameRegister));
+ if (!indexIsCell)
+ store32(TrustedImm32(JSValue::CellTag), tagFor(index, callFrameRegister));
+}
+
+inline void JIT::emitStoreBool(int index, RegisterID payload, bool indexIsBool)
+{
+ store32(payload, payloadFor(index, callFrameRegister));
+ if (!indexIsBool)
+ store32(TrustedImm32(JSValue::BooleanTag), tagFor(index, callFrameRegister));
+}
+
+inline void JIT::emitStoreDouble(int index, FPRegisterID value)
+{
+ storeDouble(value, addressFor(index));
+}
+
+inline void JIT::emitStore(int index, const JSValue constant, RegisterID base)
+{
+ store32(Imm32(constant.payload()), payloadFor(index, base));
+ store32(Imm32(constant.tag()), tagFor(index, base));
+}
+
+ALWAYS_INLINE void JIT::emitInitRegister(unsigned dst)
+{
+ emitStore(dst, jsUndefined());
+}
+
+inline bool JIT::isLabeled(unsigned bytecodeOffset)
+{
+ for (size_t numberOfJumpTargets = m_codeBlock->numberOfJumpTargets(); m_jumpTargetIndex != numberOfJumpTargets; ++m_jumpTargetIndex) {
+ unsigned jumpTarget = m_codeBlock->jumpTarget(m_jumpTargetIndex);
+ if (jumpTarget == bytecodeOffset)
+ return true;
+ if (jumpTarget > bytecodeOffset)
+ return false;
+ }
+ return false;
+}
+
+inline void JIT::map(unsigned bytecodeOffset, int virtualRegisterIndex, RegisterID tag, RegisterID payload)
+{
+ if (isLabeled(bytecodeOffset))
+ return;
+
+ m_mappedBytecodeOffset = bytecodeOffset;
+ m_mappedVirtualRegisterIndex = virtualRegisterIndex;
+ m_mappedTag = tag;
+ m_mappedPayload = payload;
+}
+
+inline void JIT::unmap(RegisterID registerID)
+{
+ if (m_mappedTag == registerID)
+ m_mappedTag = (RegisterID)-1;
+ else if (m_mappedPayload == registerID)
+ m_mappedPayload = (RegisterID)-1;
+}
+
+inline void JIT::unmap()
+{
+ m_mappedBytecodeOffset = (unsigned)-1;
+ m_mappedVirtualRegisterIndex = RegisterFile::ReturnPC;
+ m_mappedTag = (RegisterID)-1;
+ m_mappedPayload = (RegisterID)-1;
+}
+
+inline bool JIT::isMapped(int virtualRegisterIndex)
+{
+ if (m_mappedBytecodeOffset != m_bytecodeOffset)
+ return false;
+ if (m_mappedVirtualRegisterIndex != virtualRegisterIndex)
+ return false;
+ return true;
+}
+
+inline bool JIT::getMappedPayload(int virtualRegisterIndex, RegisterID& payload)
+{
+ if (m_mappedBytecodeOffset != m_bytecodeOffset)
+ return false;
+ if (m_mappedVirtualRegisterIndex != virtualRegisterIndex)
+ return false;
+ if (m_mappedPayload == (RegisterID)-1)
+ return false;
+ payload = m_mappedPayload;
+ return true;
+}
+
+inline bool JIT::getMappedTag(int virtualRegisterIndex, RegisterID& tag)
+{
+ if (m_mappedBytecodeOffset != m_bytecodeOffset)
+ return false;
+ if (m_mappedVirtualRegisterIndex != virtualRegisterIndex)
+ return false;
+ if (m_mappedTag == (RegisterID)-1)
+ return false;
+ tag = m_mappedTag;
+ return true;
+}
+
+inline void JIT::emitJumpSlowCaseIfNotJSCell(int virtualRegisterIndex)
+{
+ if (!m_codeBlock->isKnownNotImmediate(virtualRegisterIndex)) {
+ if (m_codeBlock->isConstantRegisterIndex(virtualRegisterIndex))
+ addSlowCase(jump());
+ else
+ addSlowCase(emitJumpIfNotJSCell(virtualRegisterIndex));
+ }
+}
+
+inline void JIT::emitJumpSlowCaseIfNotJSCell(int virtualRegisterIndex, RegisterID tag)
+{
+ if (!m_codeBlock->isKnownNotImmediate(virtualRegisterIndex)) {
+ if (m_codeBlock->isConstantRegisterIndex(virtualRegisterIndex))
+ addSlowCase(jump());
+ else
+ addSlowCase(branch32(NotEqual, tag, TrustedImm32(JSValue::CellTag)));
+ }
+}
+
+ALWAYS_INLINE bool JIT::isOperandConstantImmediateInt(unsigned src)
+{
+ return m_codeBlock->isConstantRegisterIndex(src) && getConstantOperand(src).isInt32();
+}
+
+ALWAYS_INLINE bool JIT::getOperandConstantImmediateInt(unsigned op1, unsigned op2, unsigned& op, int32_t& constant)
+{
+ if (isOperandConstantImmediateInt(op1)) {
+ constant = getConstantOperand(op1).asInt32();
+ op = op2;
+ return true;
+ }
+
+ if (isOperandConstantImmediateInt(op2)) {
+ constant = getConstantOperand(op2).asInt32();
+ op = op1;
+ return true;
+ }
+
+ return false;
+}
+
+#else // USE(JSVALUE32_64)
+
+ALWAYS_INLINE void JIT::killLastResultRegister()
+{
+ m_lastResultBytecodeRegister = std::numeric_limits<int>::max();
+}
+
+// get arg puts an arg from the SF register array into a h/w register
+ALWAYS_INLINE void JIT::emitGetVirtualRegister(int src, RegisterID dst)
+{
+ ASSERT(m_bytecodeOffset != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.
+
+ // TODO: we want to reuse values that are already in registers if we can - add a register allocator!
+ if (m_codeBlock->isConstantRegisterIndex(src)) {
+ JSValue value = m_codeBlock->getConstant(src);
+ move(ImmPtr(JSValue::encode(value)), dst);
+ killLastResultRegister();
+ return;
+ }
+
+ if (src == m_lastResultBytecodeRegister && m_codeBlock->isTemporaryRegisterIndex(src) && !atJumpTarget()) {
+ // The argument we want is already stored in eax
+ if (dst != cachedResultRegister)
+ move(cachedResultRegister, dst);
+ killLastResultRegister();
+ return;
+ }
+
+ loadPtr(Address(callFrameRegister, src * sizeof(Register)), dst);
+ killLastResultRegister();
+}
+
+ALWAYS_INLINE void JIT::emitGetVirtualRegisters(int src1, RegisterID dst1, int src2, RegisterID dst2)
+{
+ if (src2 == m_lastResultBytecodeRegister) {
+ emitGetVirtualRegister(src2, dst2);
+ emitGetVirtualRegister(src1, dst1);
+ } else {
+ emitGetVirtualRegister(src1, dst1);
+ emitGetVirtualRegister(src2, dst2);
+ }
+}
+
+ALWAYS_INLINE int32_t JIT::getConstantOperandImmediateInt(unsigned src)
+{
+ return getConstantOperand(src).asInt32();
+}
+
+ALWAYS_INLINE bool JIT::isOperandConstantImmediateInt(unsigned src)
+{
+ return m_codeBlock->isConstantRegisterIndex(src) && getConstantOperand(src).isInt32();
+}
+
+ALWAYS_INLINE void JIT::emitPutVirtualRegister(unsigned dst, RegisterID from)
+{
+ storePtr(from, Address(callFrameRegister, dst * sizeof(Register)));
+ m_lastResultBytecodeRegister = (from == cachedResultRegister) ? static_cast<int>(dst) : std::numeric_limits<int>::max();
+}
+
+ALWAYS_INLINE void JIT::emitInitRegister(unsigned dst)
+{
+ storePtr(TrustedImmPtr(JSValue::encode(jsUndefined())), Address(callFrameRegister, dst * sizeof(Register)));
+}
+
+ALWAYS_INLINE JIT::Jump JIT::emitJumpIfJSCell(RegisterID reg)
+{
+#if USE(JSVALUE64)
+ return branchTestPtr(Zero, reg, tagMaskRegister);
+#else
+ return branchTest32(Zero, reg, TrustedImm32(TagMask));
+#endif
+}
+
+ALWAYS_INLINE JIT::Jump JIT::emitJumpIfBothJSCells(RegisterID reg1, RegisterID reg2, RegisterID scratch)
+{
+ move(reg1, scratch);
+ orPtr(reg2, scratch);
+ return emitJumpIfJSCell(scratch);
+}
+
+ALWAYS_INLINE void JIT::emitJumpSlowCaseIfJSCell(RegisterID reg)
+{
+ addSlowCase(emitJumpIfJSCell(reg));
+}
+
+ALWAYS_INLINE JIT::Jump JIT::emitJumpIfNotJSCell(RegisterID reg)
+{
+#if USE(JSVALUE64)
+ return branchTestPtr(NonZero, reg, tagMaskRegister);
+#else
+ return branchTest32(NonZero, reg, TrustedImm32(TagMask));
+#endif
+}
+
+ALWAYS_INLINE void JIT::emitJumpSlowCaseIfNotJSCell(RegisterID reg)
+{
+ addSlowCase(emitJumpIfNotJSCell(reg));
+}
+
+ALWAYS_INLINE void JIT::emitJumpSlowCaseIfNotJSCell(RegisterID reg, int vReg)
+{
+ if (!m_codeBlock->isKnownNotImmediate(vReg))
+ emitJumpSlowCaseIfNotJSCell(reg);
+}
+
+#if USE(JSVALUE64)
+
+inline void JIT::emitLoadDouble(int index, FPRegisterID value)
+{
+ if (m_codeBlock->isConstantRegisterIndex(index)) {
+ WriteBarrier<Unknown>& inConstantPool = m_codeBlock->constantRegister(index);
+ loadDouble(&inConstantPool, value);
+ } else
+ loadDouble(addressFor(index), value);
+}
+
+inline void JIT::emitLoadInt32ToDouble(int index, FPRegisterID value)
+{
+ if (m_codeBlock->isConstantRegisterIndex(index)) {
+ ASSERT(isOperandConstantImmediateInt(index));
+ convertInt32ToDouble(Imm32(getConstantOperand(index).asInt32()), value);
+ } else
+ convertInt32ToDouble(addressFor(index), value);
+}
+#endif
+
+ALWAYS_INLINE JIT::Jump JIT::emitJumpIfImmediateInteger(RegisterID reg)
+{
+#if USE(JSVALUE64)
+ return branchPtr(AboveOrEqual, reg, tagTypeNumberRegister);
+#else
+ return branchTest32(NonZero, reg, TrustedImm32(TagTypeNumber));
+#endif
+}
+
+ALWAYS_INLINE JIT::Jump JIT::emitJumpIfNotImmediateInteger(RegisterID reg)
+{
+#if USE(JSVALUE64)
+ return branchPtr(Below, reg, tagTypeNumberRegister);
+#else
+ return branchTest32(Zero, reg, TrustedImm32(TagTypeNumber));
+#endif
+}
+
+ALWAYS_INLINE JIT::Jump JIT::emitJumpIfNotImmediateIntegers(RegisterID reg1, RegisterID reg2, RegisterID scratch)
+{
+ move(reg1, scratch);
+ andPtr(reg2, scratch);
+ return emitJumpIfNotImmediateInteger(scratch);
+}
+
+ALWAYS_INLINE void JIT::emitJumpSlowCaseIfNotImmediateInteger(RegisterID reg)
+{
+ addSlowCase(emitJumpIfNotImmediateInteger(reg));
+}
+
+ALWAYS_INLINE void JIT::emitJumpSlowCaseIfNotImmediateIntegers(RegisterID reg1, RegisterID reg2, RegisterID scratch)
+{
+ addSlowCase(emitJumpIfNotImmediateIntegers(reg1, reg2, scratch));
+}
+
+ALWAYS_INLINE void JIT::emitJumpSlowCaseIfNotImmediateNumber(RegisterID reg)
+{
+ addSlowCase(emitJumpIfNotImmediateNumber(reg));
+}
+
+#if USE(JSVALUE32_64)
+ALWAYS_INLINE void JIT::emitFastArithDeTagImmediate(RegisterID reg)
+{
+ subPtr(TrustedImm32(TagTypeNumber), reg);
+}
+
+ALWAYS_INLINE JIT::Jump JIT::emitFastArithDeTagImmediateJumpIfZero(RegisterID reg)
+{
+ return branchSubPtr(Zero, TrustedImm32(TagTypeNumber), reg);
+}
+#endif
+
+ALWAYS_INLINE void JIT::emitFastArithReTagImmediate(RegisterID src, RegisterID dest)
+{
+#if USE(JSVALUE64)
+ emitFastArithIntToImmNoCheck(src, dest);
+#else
+ if (src != dest)
+ move(src, dest);
+ addPtr(TrustedImm32(TagTypeNumber), dest);
+#endif
+}
+
+// operand is int32_t, must have been zero-extended if register is 64-bit.
+ALWAYS_INLINE void JIT::emitFastArithIntToImmNoCheck(RegisterID src, RegisterID dest)
+{
+#if USE(JSVALUE64)
+ if (src != dest)
+ move(src, dest);
+ orPtr(tagTypeNumberRegister, dest);
+#else
+ signExtend32ToPtr(src, dest);
+ addPtr(dest, dest);
+ emitFastArithReTagImmediate(dest, dest);
+#endif
+}
+
+ALWAYS_INLINE void JIT::emitTagAsBoolImmediate(RegisterID reg)
+{
+ or32(TrustedImm32(static_cast<int32_t>(ValueFalse)), reg);
+}
+
+#endif // USE(JSVALUE32_64)
+
+} // namespace JSC
+
+#endif // ENABLE(JIT)
+
+#endif
diff --git a/Source/JavaScriptCore/jit/JITOpcodes.cpp b/Source/JavaScriptCore/jit/JITOpcodes.cpp
new file mode 100644
index 000000000..f5be279a6
--- /dev/null
+++ b/Source/JavaScriptCore/jit/JITOpcodes.cpp
@@ -0,0 +1,1660 @@
+/*
+ * Copyright (C) 2009 Apple Inc. All rights reserved.
+ * Copyright (C) 2010 Patrick Gansterer <paroga@paroga.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#if ENABLE(JIT)
+#include "JIT.h"
+
+#include "Arguments.h"
+#include "Heap.h"
+#include "JITInlineMethods.h"
+#include "JITStubCall.h"
+#include "JSArray.h"
+#include "JSCell.h"
+#include "JSFunction.h"
+#include "JSPropertyNameIterator.h"
+#include "LinkBuffer.h"
+
+namespace JSC {
+
+#if USE(JSVALUE64)
+
+PassRefPtr<ExecutableMemoryHandle> JIT::privateCompileCTIMachineTrampolines(JSGlobalData* globalData, TrampolineStructure *trampolines)
+{
+ // (2) The second function provides fast property access for string length
+ Label stringLengthBegin = align();
+
+ // Check eax is a string
+ Jump string_failureCases1 = emitJumpIfNotJSCell(regT0);
+ Jump string_failureCases2 = branchPtr(NotEqual, Address(regT0, JSCell::classInfoOffset()), TrustedImmPtr(&JSString::s_info));
+
+ // Checks out okay! - get the length from the Ustring.
+ load32(Address(regT0, OBJECT_OFFSETOF(JSString, m_length)), regT0);
+
+ Jump string_failureCases3 = branch32(LessThan, regT0, TrustedImm32(0));
+
+ // regT0 contains a 64 bit value (is positive, is zero extended) so we don't need sign extend here.
+ emitFastArithIntToImmNoCheck(regT0, regT0);
+
+ ret();
+
+ // (3) Trampolines for the slow cases of op_call / op_call_eval / op_construct.
+ COMPILE_ASSERT(sizeof(CodeType) == 4, CodeTypeEnumMustBe32Bit);
+
+ JumpList callSlowCase;
+ JumpList constructSlowCase;
+
+ // VirtualCallLink Trampoline
+ // regT0 holds callee; callFrame is moved and partially initialized.
+ Label virtualCallLinkBegin = align();
+ callSlowCase.append(emitJumpIfNotJSCell(regT0));
+ callSlowCase.append(emitJumpIfNotType(regT0, regT1, JSFunctionType));
+
+ // Finish canonical initialization before JS function call.
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_scopeChain)), regT1);
+ emitPutCellToCallFrameHeader(regT1, RegisterFile::ScopeChain);
+
+ // Also initialize ReturnPC for use by lazy linking and exceptions.
+ preserveReturnAddressAfterCall(regT3);
+ emitPutToCallFrameHeader(regT3, RegisterFile::ReturnPC);
+
+ restoreArgumentReference();
+ Call callLazyLinkCall = call();
+ restoreReturnAddressBeforeReturn(regT3);
+ jump(regT0);
+
+ // VirtualConstructLink Trampoline
+ // regT0 holds callee; callFrame is moved and partially initialized.
+ Label virtualConstructLinkBegin = align();
+ constructSlowCase.append(emitJumpIfNotJSCell(regT0));
+ constructSlowCase.append(emitJumpIfNotType(regT0, regT1, JSFunctionType));
+
+ // Finish canonical initialization before JS function call.
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_scopeChain)), regT1);
+ emitPutCellToCallFrameHeader(regT1, RegisterFile::ScopeChain);
+
+ // Also initialize ReturnPC for use by lazy linking and exeptions.
+ preserveReturnAddressAfterCall(regT3);
+ emitPutToCallFrameHeader(regT3, RegisterFile::ReturnPC);
+
+ restoreArgumentReference();
+ Call callLazyLinkConstruct = call();
+ restoreReturnAddressBeforeReturn(regT3);
+ jump(regT0);
+
+ // VirtualCall Trampoline
+ // regT0 holds callee; regT2 will hold the FunctionExecutable.
+ Label virtualCallBegin = align();
+ callSlowCase.append(emitJumpIfNotJSCell(regT0));
+ callSlowCase.append(emitJumpIfNotType(regT0, regT1, JSFunctionType));
+
+ // Finish canonical initialization before JS function call.
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_scopeChain)), regT1);
+ emitPutCellToCallFrameHeader(regT1, RegisterFile::ScopeChain);
+
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_executable)), regT2);
+ Jump hasCodeBlock1 = branch32(GreaterThanOrEqual, Address(regT2, OBJECT_OFFSETOF(FunctionExecutable, m_numParametersForCall)), TrustedImm32(0));
+ preserveReturnAddressAfterCall(regT3);
+ restoreArgumentReference();
+ Call callCompileCall = call();
+ restoreReturnAddressBeforeReturn(regT3);
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_executable)), regT2);
+
+ hasCodeBlock1.link(this);
+ loadPtr(Address(regT2, OBJECT_OFFSETOF(FunctionExecutable, m_jitCodeForCallWithArityCheck)), regT0);
+ jump(regT0);
+
+ // VirtualConstruct Trampoline
+ // regT0 holds callee; regT2 will hold the FunctionExecutable.
+ Label virtualConstructBegin = align();
+ constructSlowCase.append(emitJumpIfNotJSCell(regT0));
+ constructSlowCase.append(emitJumpIfNotType(regT0, regT1, JSFunctionType));
+
+ // Finish canonical initialization before JS function call.
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_scopeChain)), regT1);
+ emitPutCellToCallFrameHeader(regT1, RegisterFile::ScopeChain);
+
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_executable)), regT2);
+ Jump hasCodeBlock2 = branch32(GreaterThanOrEqual, Address(regT2, OBJECT_OFFSETOF(FunctionExecutable, m_numParametersForConstruct)), TrustedImm32(0));
+ preserveReturnAddressAfterCall(regT3);
+ restoreArgumentReference();
+ Call callCompileConstruct = call();
+ restoreReturnAddressBeforeReturn(regT3);
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_executable)), regT2);
+
+ hasCodeBlock2.link(this);
+ loadPtr(Address(regT2, OBJECT_OFFSETOF(FunctionExecutable, m_jitCodeForConstructWithArityCheck)), regT0);
+ jump(regT0);
+
+ callSlowCase.link(this);
+ // Finish canonical initialization before JS function call.
+ emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, regT2);
+ emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain, regT2, regT2);
+ emitPutCellToCallFrameHeader(regT2, RegisterFile::ScopeChain);
+
+ // Also initialize ReturnPC and CodeBlock, like a JS function would.
+ preserveReturnAddressAfterCall(regT3);
+ emitPutToCallFrameHeader(regT3, RegisterFile::ReturnPC);
+ emitPutImmediateToCallFrameHeader(0, RegisterFile::CodeBlock);
+
+ storePtr(callFrameRegister, &m_globalData->topCallFrame);
+ restoreArgumentReference();
+ Call callCallNotJSFunction = call();
+ emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, callFrameRegister);
+ restoreReturnAddressBeforeReturn(regT3);
+ ret();
+
+ constructSlowCase.link(this);
+ // Finish canonical initialization before JS function call.
+ emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, regT2);
+ emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain, regT2, regT2);
+ emitPutCellToCallFrameHeader(regT2, RegisterFile::ScopeChain);
+
+ // Also initialize ReturnPC and CodeBlock, like a JS function would.
+ preserveReturnAddressAfterCall(regT3);
+ emitPutToCallFrameHeader(regT3, RegisterFile::ReturnPC);
+ emitPutImmediateToCallFrameHeader(0, RegisterFile::CodeBlock);
+
+ storePtr(callFrameRegister, &m_globalData->topCallFrame);
+ restoreArgumentReference();
+ Call callConstructNotJSFunction = call();
+ emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, callFrameRegister);
+ restoreReturnAddressBeforeReturn(regT3);
+ ret();
+
+ // NativeCall Trampoline
+ Label nativeCallThunk = privateCompileCTINativeCall(globalData);
+ Label nativeConstructThunk = privateCompileCTINativeCall(globalData, true);
+
+ Call string_failureCases1Call = makeTailRecursiveCall(string_failureCases1);
+ Call string_failureCases2Call = makeTailRecursiveCall(string_failureCases2);
+ Call string_failureCases3Call = makeTailRecursiveCall(string_failureCases3);
+
+ // All trampolines constructed! copy the code, link up calls, and set the pointers on the Machine object.
+ LinkBuffer patchBuffer(*m_globalData, this);
+
+ patchBuffer.link(string_failureCases1Call, FunctionPtr(cti_op_get_by_id_string_fail));
+ patchBuffer.link(string_failureCases2Call, FunctionPtr(cti_op_get_by_id_string_fail));
+ patchBuffer.link(string_failureCases3Call, FunctionPtr(cti_op_get_by_id_string_fail));
+ patchBuffer.link(callLazyLinkCall, FunctionPtr(cti_vm_lazyLinkCall));
+ patchBuffer.link(callLazyLinkConstruct, FunctionPtr(cti_vm_lazyLinkConstruct));
+ patchBuffer.link(callCompileCall, FunctionPtr(cti_op_call_jitCompile));
+ patchBuffer.link(callCompileConstruct, FunctionPtr(cti_op_construct_jitCompile));
+ patchBuffer.link(callCallNotJSFunction, FunctionPtr(cti_op_call_NotJSFunction));
+ patchBuffer.link(callConstructNotJSFunction, FunctionPtr(cti_op_construct_NotJSConstruct));
+
+ CodeRef finalCode = patchBuffer.finalizeCode();
+ RefPtr<ExecutableMemoryHandle> executableMemory = finalCode.executableMemory();
+
+ trampolines->ctiVirtualCallLink = patchBuffer.trampolineAt(virtualCallLinkBegin);
+ trampolines->ctiVirtualConstructLink = patchBuffer.trampolineAt(virtualConstructLinkBegin);
+ trampolines->ctiVirtualCall = patchBuffer.trampolineAt(virtualCallBegin);
+ trampolines->ctiVirtualConstruct = patchBuffer.trampolineAt(virtualConstructBegin);
+ trampolines->ctiNativeCall = patchBuffer.trampolineAt(nativeCallThunk);
+ trampolines->ctiNativeConstruct = patchBuffer.trampolineAt(nativeConstructThunk);
+ trampolines->ctiStringLengthTrampoline = patchBuffer.trampolineAt(stringLengthBegin);
+
+ return executableMemory.release();
+}
+
+JIT::Label JIT::privateCompileCTINativeCall(JSGlobalData* globalData, bool isConstruct)
+{
+ int executableOffsetToFunction = isConstruct ? OBJECT_OFFSETOF(NativeExecutable, m_constructor) : OBJECT_OFFSETOF(NativeExecutable, m_function);
+
+ Label nativeCallThunk = align();
+
+ emitPutImmediateToCallFrameHeader(0, RegisterFile::CodeBlock);
+
+#if CPU(X86_64)
+ // Load caller frame's scope chain into this callframe so that whatever we call can
+ // get to its global data.
+ emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, regT0);
+ emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain, regT1, regT0);
+ emitPutCellToCallFrameHeader(regT1, RegisterFile::ScopeChain);
+
+ peek(regT1);
+ emitPutToCallFrameHeader(regT1, RegisterFile::ReturnPC);
+
+ // Calling convention: f(edi, esi, edx, ecx, ...);
+ // Host function signature: f(ExecState*);
+ move(callFrameRegister, X86Registers::edi);
+
+ subPtr(TrustedImm32(16 - sizeof(void*)), stackPointerRegister); // Align stack after call.
+
+ emitGetFromCallFrameHeaderPtr(RegisterFile::Callee, X86Registers::esi);
+ loadPtr(Address(X86Registers::esi, OBJECT_OFFSETOF(JSFunction, m_executable)), X86Registers::r9);
+ move(regT0, callFrameRegister); // Eagerly restore caller frame register to avoid loading from stack.
+ call(Address(X86Registers::r9, executableOffsetToFunction));
+
+ addPtr(TrustedImm32(16 - sizeof(void*)), stackPointerRegister);
+
+#elif CPU(ARM)
+ // Load caller frame's scope chain into this callframe so that whatever we call can
+ // get to its global data.
+ emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, regT2);
+ emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain, regT1, regT2);
+ emitPutCellToCallFrameHeader(regT1, RegisterFile::ScopeChain);
+
+ preserveReturnAddressAfterCall(regT3); // Callee preserved
+ emitPutToCallFrameHeader(regT3, RegisterFile::ReturnPC);
+
+ // Calling convention: f(r0 == regT0, r1 == regT1, ...);
+ // Host function signature: f(ExecState*);
+ move(callFrameRegister, ARMRegisters::r0);
+
+ emitGetFromCallFrameHeaderPtr(RegisterFile::Callee, ARMRegisters::r1);
+ move(regT2, callFrameRegister); // Eagerly restore caller frame register to avoid loading from stack.
+ loadPtr(Address(ARMRegisters::r1, OBJECT_OFFSETOF(JSFunction, m_executable)), regT2);
+ call(Address(regT2, executableOffsetToFunction));
+
+ restoreReturnAddressBeforeReturn(regT3);
+
+#elif CPU(MIPS)
+ // Load caller frame's scope chain into this callframe so that whatever we call can
+ // get to its global data.
+ emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, regT0);
+ emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain, regT1, regT0);
+ emitPutCellToCallFrameHeader(regT1, RegisterFile::ScopeChain);
+
+ preserveReturnAddressAfterCall(regT3); // Callee preserved
+ emitPutToCallFrameHeader(regT3, RegisterFile::ReturnPC);
+
+ // Calling convention: f(a0, a1, a2, a3);
+ // Host function signature: f(ExecState*);
+
+ // Allocate stack space for 16 bytes (8-byte aligned)
+ // 16 bytes (unused) for 4 arguments
+ subPtr(TrustedImm32(16), stackPointerRegister);
+
+ // Setup arg0
+ move(callFrameRegister, MIPSRegisters::a0);
+
+ // Call
+ emitGetFromCallFrameHeaderPtr(RegisterFile::Callee, MIPSRegisters::a2);
+ loadPtr(Address(MIPSRegisters::a2, OBJECT_OFFSETOF(JSFunction, m_executable)), regT2);
+ move(regT0, callFrameRegister); // Eagerly restore caller frame register to avoid loading from stack.
+ call(Address(regT2, executableOffsetToFunction));
+
+ // Restore stack space
+ addPtr(TrustedImm32(16), stackPointerRegister);
+
+ restoreReturnAddressBeforeReturn(regT3);
+
+#else
+#error "JIT not supported on this platform."
+ UNUSED_PARAM(executableOffsetToFunction);
+ breakpoint();
+#endif
+
+ // Check for an exception
+ loadPtr(&(globalData->exception), regT2);
+ Jump exceptionHandler = branchTestPtr(NonZero, regT2);
+
+ // Return.
+ ret();
+
+ // Handle an exception
+ exceptionHandler.link(this);
+
+ // Grab the return address.
+ preserveReturnAddressAfterCall(regT1);
+
+ move(TrustedImmPtr(&globalData->exceptionLocation), regT2);
+ storePtr(regT1, regT2);
+ poke(callFrameRegister, OBJECT_OFFSETOF(struct JITStackFrame, callFrame) / sizeof(void*));
+
+ storePtr(callFrameRegister, &m_globalData->topCallFrame);
+ // Set the return address.
+ move(TrustedImmPtr(FunctionPtr(ctiVMThrowTrampoline).value()), regT1);
+ restoreReturnAddressBeforeReturn(regT1);
+
+ ret();
+
+ return nativeCallThunk;
+}
+
+JIT::CodeRef JIT::privateCompileCTINativeCall(JSGlobalData* globalData, NativeFunction)
+{
+ return CodeRef::createSelfManagedCodeRef(globalData->jitStubs->ctiNativeCall());
+}
+
+void JIT::emit_op_mov(Instruction* currentInstruction)
+{
+ int dst = currentInstruction[1].u.operand;
+ int src = currentInstruction[2].u.operand;
+
+ if (canBeOptimized()) {
+ // Use simpler approach, since the DFG thinks that the last result register
+ // is always set to the destination on every operation.
+ emitGetVirtualRegister(src, regT0);
+ emitPutVirtualRegister(dst);
+ } else {
+ if (m_codeBlock->isConstantRegisterIndex(src)) {
+ storePtr(ImmPtr(JSValue::encode(getConstantOperand(src))), Address(callFrameRegister, dst * sizeof(Register)));
+ if (dst == m_lastResultBytecodeRegister)
+ killLastResultRegister();
+ } else if ((src == m_lastResultBytecodeRegister) || (dst == m_lastResultBytecodeRegister)) {
+ // If either the src or dst is the cached register go though
+ // get/put registers to make sure we track this correctly.
+ emitGetVirtualRegister(src, regT0);
+ emitPutVirtualRegister(dst);
+ } else {
+ // Perform the copy via regT1; do not disturb any mapping in regT0.
+ loadPtr(Address(callFrameRegister, src * sizeof(Register)), regT1);
+ storePtr(regT1, Address(callFrameRegister, dst * sizeof(Register)));
+ }
+ }
+}
+
+void JIT::emit_op_end(Instruction* currentInstruction)
+{
+ ASSERT(returnValueRegister != callFrameRegister);
+ emitGetVirtualRegister(currentInstruction[1].u.operand, returnValueRegister);
+ restoreReturnAddressBeforeReturn(Address(callFrameRegister, RegisterFile::ReturnPC * static_cast<int>(sizeof(Register))));
+ ret();
+}
+
+void JIT::emit_op_jmp(Instruction* currentInstruction)
+{
+ unsigned target = currentInstruction[1].u.operand;
+ addJump(jump(), target);
+}
+
+void JIT::emit_op_new_object(Instruction* currentInstruction)
+{
+ emitAllocateJSFinalObject(ImmPtr(m_codeBlock->globalObject()->emptyObjectStructure()), regT0, regT1);
+
+ emitPutVirtualRegister(currentInstruction[1].u.operand);
+}
+
+void JIT::emitSlow_op_new_object(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ linkSlowCase(iter);
+ JITStubCall(this, cti_op_new_object).call(currentInstruction[1].u.operand);
+}
+
+void JIT::emit_op_check_has_instance(Instruction* currentInstruction)
+{
+ unsigned baseVal = currentInstruction[1].u.operand;
+
+ emitGetVirtualRegister(baseVal, regT0);
+
+ // Check that baseVal is a cell.
+ emitJumpSlowCaseIfNotJSCell(regT0, baseVal);
+
+ // Check that baseVal 'ImplementsHasInstance'.
+ loadPtr(Address(regT0, JSCell::structureOffset()), regT0);
+ addSlowCase(branchTest8(Zero, Address(regT0, Structure::typeInfoFlagsOffset()), TrustedImm32(ImplementsHasInstance)));
+}
+
+void JIT::emit_op_instanceof(Instruction* currentInstruction)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned value = currentInstruction[2].u.operand;
+ unsigned baseVal = currentInstruction[3].u.operand;
+ unsigned proto = currentInstruction[4].u.operand;
+
+ // Load the operands (baseVal, proto, and value respectively) into registers.
+ // We use regT0 for baseVal since we will be done with this first, and we can then use it for the result.
+ emitGetVirtualRegister(value, regT2);
+ emitGetVirtualRegister(baseVal, regT0);
+ emitGetVirtualRegister(proto, regT1);
+
+ // Check that proto are cells. baseVal must be a cell - this is checked by op_check_has_instance.
+ emitJumpSlowCaseIfNotJSCell(regT2, value);
+ emitJumpSlowCaseIfNotJSCell(regT1, proto);
+
+ // Check that prototype is an object
+ loadPtr(Address(regT1, JSCell::structureOffset()), regT3);
+ addSlowCase(emitJumpIfNotObject(regT3));
+
+ // Fixme: this check is only needed because the JSC API allows HasInstance to be overridden; we should deprecate this.
+ // Check that baseVal 'ImplementsDefaultHasInstance'.
+ loadPtr(Address(regT0, JSCell::structureOffset()), regT0);
+ addSlowCase(branchTest8(Zero, Address(regT0, Structure::typeInfoFlagsOffset()), TrustedImm32(ImplementsDefaultHasInstance)));
+
+ // Optimistically load the result true, and start looping.
+ // Initially, regT1 still contains proto and regT2 still contains value.
+ // As we loop regT2 will be updated with its prototype, recursively walking the prototype chain.
+ move(TrustedImmPtr(JSValue::encode(jsBoolean(true))), regT0);
+ Label loop(this);
+
+ // Load the prototype of the object in regT2. If this is equal to regT1 - WIN!
+ // Otherwise, check if we've hit null - if we have then drop out of the loop, if not go again.
+ loadPtr(Address(regT2, JSCell::structureOffset()), regT2);
+ loadPtr(Address(regT2, Structure::prototypeOffset()), regT2);
+ Jump isInstance = branchPtr(Equal, regT2, regT1);
+ emitJumpIfJSCell(regT2).linkTo(loop, this);
+
+ // We get here either by dropping out of the loop, or if value was not an Object. Result is false.
+ move(TrustedImmPtr(JSValue::encode(jsBoolean(false))), regT0);
+
+ // isInstance jumps right down to here, to skip setting the result to false (it has already set true).
+ isInstance.link(this);
+ emitPutVirtualRegister(dst);
+}
+
+void JIT::emit_op_call(Instruction* currentInstruction)
+{
+ compileOpCall(op_call, currentInstruction, m_callLinkInfoIndex++);
+}
+
+void JIT::emit_op_call_eval(Instruction* currentInstruction)
+{
+ compileOpCall(op_call_eval, currentInstruction, m_callLinkInfoIndex);
+}
+
+void JIT::emit_op_call_varargs(Instruction* currentInstruction)
+{
+ compileOpCall(op_call_varargs, currentInstruction, m_callLinkInfoIndex++);
+}
+
+void JIT::emit_op_construct(Instruction* currentInstruction)
+{
+ compileOpCall(op_construct, currentInstruction, m_callLinkInfoIndex++);
+}
+
+void JIT::emit_op_tear_off_activation(Instruction* currentInstruction)
+{
+ unsigned activation = currentInstruction[1].u.operand;
+ unsigned arguments = currentInstruction[2].u.operand;
+ Jump activationCreated = branchTestPtr(NonZero, addressFor(activation));
+ Jump argumentsNotCreated = branchTestPtr(Zero, addressFor(arguments));
+ activationCreated.link(this);
+ JITStubCall stubCall(this, cti_op_tear_off_activation);
+ stubCall.addArgument(activation, regT2);
+ stubCall.addArgument(unmodifiedArgumentsRegister(arguments), regT2);
+ stubCall.call();
+ argumentsNotCreated.link(this);
+}
+
+void JIT::emit_op_tear_off_arguments(Instruction* currentInstruction)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+
+ Jump argsNotCreated = branchTestPtr(Zero, Address(callFrameRegister, sizeof(Register) * (unmodifiedArgumentsRegister(dst))));
+ JITStubCall stubCall(this, cti_op_tear_off_arguments);
+ stubCall.addArgument(unmodifiedArgumentsRegister(dst), regT2);
+ stubCall.call();
+ argsNotCreated.link(this);
+}
+
+void JIT::emit_op_ret(Instruction* currentInstruction)
+{
+ emitOptimizationCheck(RetOptimizationCheck);
+
+ ASSERT(callFrameRegister != regT1);
+ ASSERT(regT1 != returnValueRegister);
+ ASSERT(returnValueRegister != callFrameRegister);
+
+ // Return the result in %eax.
+ emitGetVirtualRegister(currentInstruction[1].u.operand, returnValueRegister);
+
+ // Grab the return address.
+ emitGetFromCallFrameHeaderPtr(RegisterFile::ReturnPC, regT1);
+
+ // Restore our caller's "r".
+ emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, callFrameRegister);
+
+ // Return.
+ restoreReturnAddressBeforeReturn(regT1);
+ ret();
+}
+
+void JIT::emit_op_ret_object_or_this(Instruction* currentInstruction)
+{
+ emitOptimizationCheck(RetOptimizationCheck);
+
+ ASSERT(callFrameRegister != regT1);
+ ASSERT(regT1 != returnValueRegister);
+ ASSERT(returnValueRegister != callFrameRegister);
+
+ // Return the result in %eax.
+ emitGetVirtualRegister(currentInstruction[1].u.operand, returnValueRegister);
+ Jump notJSCell = emitJumpIfNotJSCell(returnValueRegister);
+ loadPtr(Address(returnValueRegister, JSCell::structureOffset()), regT2);
+ Jump notObject = emitJumpIfNotObject(regT2);
+
+ // Grab the return address.
+ emitGetFromCallFrameHeaderPtr(RegisterFile::ReturnPC, regT1);
+
+ // Restore our caller's "r".
+ emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, callFrameRegister);
+
+ // Return.
+ restoreReturnAddressBeforeReturn(regT1);
+ ret();
+
+ // Return 'this' in %eax.
+ notJSCell.link(this);
+ notObject.link(this);
+ emitGetVirtualRegister(currentInstruction[2].u.operand, returnValueRegister);
+
+ // Grab the return address.
+ emitGetFromCallFrameHeaderPtr(RegisterFile::ReturnPC, regT1);
+
+ // Restore our caller's "r".
+ emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, callFrameRegister);
+
+ // Return.
+ restoreReturnAddressBeforeReturn(regT1);
+ ret();
+}
+
+void JIT::emit_op_resolve(Instruction* currentInstruction)
+{
+ JITStubCall stubCall(this, cti_op_resolve);
+ stubCall.addArgument(TrustedImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
+ stubCall.callWithValueProfiling(currentInstruction[1].u.operand, FirstProfilingSite);
+}
+
+void JIT::emit_op_to_primitive(Instruction* currentInstruction)
+{
+ int dst = currentInstruction[1].u.operand;
+ int src = currentInstruction[2].u.operand;
+
+ emitGetVirtualRegister(src, regT0);
+
+ Jump isImm = emitJumpIfNotJSCell(regT0);
+ addSlowCase(branchPtr(NotEqual, Address(regT0, JSCell::classInfoOffset()), TrustedImmPtr(&JSString::s_info)));
+ isImm.link(this);
+
+ if (dst != src)
+ emitPutVirtualRegister(dst);
+
+}
+
+void JIT::emit_op_strcat(Instruction* currentInstruction)
+{
+ JITStubCall stubCall(this, cti_op_strcat);
+ stubCall.addArgument(Imm32(currentInstruction[2].u.operand));
+ stubCall.addArgument(Imm32(currentInstruction[3].u.operand));
+ stubCall.call(currentInstruction[1].u.operand);
+}
+
+void JIT::emit_op_resolve_base(Instruction* currentInstruction)
+{
+ JITStubCall stubCall(this, currentInstruction[3].u.operand ? cti_op_resolve_base_strict_put : cti_op_resolve_base);
+ stubCall.addArgument(TrustedImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
+ stubCall.callWithValueProfiling(currentInstruction[1].u.operand, FirstProfilingSite);
+}
+
+void JIT::emit_op_ensure_property_exists(Instruction* currentInstruction)
+{
+ JITStubCall stubCall(this, cti_op_ensure_property_exists);
+ stubCall.addArgument(Imm32(currentInstruction[1].u.operand));
+ stubCall.addArgument(TrustedImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
+ stubCall.call(currentInstruction[1].u.operand);
+}
+
+void JIT::emit_op_resolve_skip(Instruction* currentInstruction)
+{
+ JITStubCall stubCall(this, cti_op_resolve_skip);
+ stubCall.addArgument(TrustedImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
+ stubCall.addArgument(Imm32(currentInstruction[3].u.operand));
+ stubCall.callWithValueProfiling(currentInstruction[1].u.operand, FirstProfilingSite);
+}
+
+void JIT::emit_op_resolve_global(Instruction* currentInstruction, bool)
+{
+ // Fast case
+ void* globalObject = m_codeBlock->globalObject();
+ unsigned currentIndex = m_globalResolveInfoIndex++;
+ GlobalResolveInfo* resolveInfoAddress = &(m_codeBlock->globalResolveInfo(currentIndex));
+
+ // Check Structure of global object
+ move(TrustedImmPtr(globalObject), regT0);
+ move(TrustedImmPtr(resolveInfoAddress), regT2);
+ loadPtr(Address(regT2, OBJECT_OFFSETOF(GlobalResolveInfo, structure)), regT1);
+ addSlowCase(branchPtr(NotEqual, regT1, Address(regT0, JSCell::structureOffset()))); // Structures don't match
+
+ // Load cached property
+ // Assume that the global object always uses external storage.
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSGlobalObject, m_propertyStorage)), regT0);
+ load32(Address(regT2, OBJECT_OFFSETOF(GlobalResolveInfo, offset)), regT1);
+ loadPtr(BaseIndex(regT0, regT1, ScalePtr), regT0);
+ emitValueProfilingSite(FirstProfilingSite);
+ emitPutVirtualRegister(currentInstruction[1].u.operand);
+}
+
+void JIT::emitSlow_op_resolve_global(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ Identifier* ident = &m_codeBlock->identifier(currentInstruction[2].u.operand);
+
+ unsigned currentIndex = m_globalResolveInfoIndex++;
+
+ linkSlowCase(iter);
+ JITStubCall stubCall(this, cti_op_resolve_global);
+ stubCall.addArgument(TrustedImmPtr(ident));
+ stubCall.addArgument(Imm32(currentIndex));
+ stubCall.addArgument(regT0);
+ stubCall.callWithValueProfiling(dst, SubsequentProfilingSite);
+}
+
+void JIT::emit_op_not(Instruction* currentInstruction)
+{
+ emitGetVirtualRegister(currentInstruction[2].u.operand, regT0);
+
+ // Invert against JSValue(false); if the value was tagged as a boolean, then all bits will be
+ // clear other than the low bit (which will be 0 or 1 for false or true inputs respectively).
+ // Then invert against JSValue(true), which will add the tag back in, and flip the low bit.
+ xorPtr(TrustedImm32(static_cast<int32_t>(ValueFalse)), regT0);
+ addSlowCase(branchTestPtr(NonZero, regT0, TrustedImm32(static_cast<int32_t>(~1))));
+ xorPtr(TrustedImm32(static_cast<int32_t>(ValueTrue)), regT0);
+
+ emitPutVirtualRegister(currentInstruction[1].u.operand);
+}
+
+void JIT::emit_op_jfalse(Instruction* currentInstruction)
+{
+ unsigned target = currentInstruction[2].u.operand;
+ emitGetVirtualRegister(currentInstruction[1].u.operand, regT0);
+
+ addJump(branchPtr(Equal, regT0, TrustedImmPtr(JSValue::encode(jsNumber(0)))), target);
+ Jump isNonZero = emitJumpIfImmediateInteger(regT0);
+
+ addJump(branchPtr(Equal, regT0, TrustedImmPtr(JSValue::encode(jsBoolean(false)))), target);
+ addSlowCase(branchPtr(NotEqual, regT0, TrustedImmPtr(JSValue::encode(jsBoolean(true)))));
+
+ isNonZero.link(this);
+}
+
+void JIT::emit_op_jeq_null(Instruction* currentInstruction)
+{
+ unsigned src = currentInstruction[1].u.operand;
+ unsigned target = currentInstruction[2].u.operand;
+
+ emitGetVirtualRegister(src, regT0);
+ Jump isImmediate = emitJumpIfNotJSCell(regT0);
+
+ // First, handle JSCell cases - check MasqueradesAsUndefined bit on the structure.
+ loadPtr(Address(regT0, JSCell::structureOffset()), regT2);
+ addJump(branchTest8(NonZero, Address(regT2, Structure::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined)), target);
+ Jump wasNotImmediate = jump();
+
+ // Now handle the immediate cases - undefined & null
+ isImmediate.link(this);
+ andPtr(TrustedImm32(~TagBitUndefined), regT0);
+ addJump(branchPtr(Equal, regT0, TrustedImmPtr(JSValue::encode(jsNull()))), target);
+
+ wasNotImmediate.link(this);
+};
+void JIT::emit_op_jneq_null(Instruction* currentInstruction)
+{
+ unsigned src = currentInstruction[1].u.operand;
+ unsigned target = currentInstruction[2].u.operand;
+
+ emitGetVirtualRegister(src, regT0);
+ Jump isImmediate = emitJumpIfNotJSCell(regT0);
+
+ // First, handle JSCell cases - check MasqueradesAsUndefined bit on the structure.
+ loadPtr(Address(regT0, JSCell::structureOffset()), regT2);
+ addJump(branchTest8(Zero, Address(regT2, Structure::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined)), target);
+ Jump wasNotImmediate = jump();
+
+ // Now handle the immediate cases - undefined & null
+ isImmediate.link(this);
+ andPtr(TrustedImm32(~TagBitUndefined), regT0);
+ addJump(branchPtr(NotEqual, regT0, TrustedImmPtr(JSValue::encode(jsNull()))), target);
+
+ wasNotImmediate.link(this);
+}
+
+void JIT::emit_op_jneq_ptr(Instruction* currentInstruction)
+{
+ unsigned src = currentInstruction[1].u.operand;
+ JSCell* ptr = currentInstruction[2].u.jsCell.get();
+ unsigned target = currentInstruction[3].u.operand;
+
+ emitGetVirtualRegister(src, regT0);
+ addJump(branchPtr(NotEqual, regT0, TrustedImmPtr(JSValue::encode(JSValue(ptr)))), target);
+}
+
+void JIT::emit_op_jsr(Instruction* currentInstruction)
+{
+ int retAddrDst = currentInstruction[1].u.operand;
+ int target = currentInstruction[2].u.operand;
+ DataLabelPtr storeLocation = storePtrWithPatch(TrustedImmPtr(0), Address(callFrameRegister, sizeof(Register) * retAddrDst));
+ addJump(jump(), target);
+ m_jsrSites.append(JSRInfo(storeLocation, label()));
+ killLastResultRegister();
+}
+
+void JIT::emit_op_sret(Instruction* currentInstruction)
+{
+ jump(Address(callFrameRegister, sizeof(Register) * currentInstruction[1].u.operand));
+ killLastResultRegister();
+}
+
+void JIT::emit_op_eq(Instruction* currentInstruction)
+{
+ emitGetVirtualRegisters(currentInstruction[2].u.operand, regT0, currentInstruction[3].u.operand, regT1);
+ emitJumpSlowCaseIfNotImmediateIntegers(regT0, regT1, regT2);
+ compare32(Equal, regT1, regT0, regT0);
+ emitTagAsBoolImmediate(regT0);
+ emitPutVirtualRegister(currentInstruction[1].u.operand);
+}
+
+void JIT::emit_op_bitnot(Instruction* currentInstruction)
+{
+ emitGetVirtualRegister(currentInstruction[2].u.operand, regT0);
+ emitJumpSlowCaseIfNotImmediateInteger(regT0);
+ not32(regT0);
+ emitFastArithIntToImmNoCheck(regT0, regT0);
+ emitPutVirtualRegister(currentInstruction[1].u.operand);
+}
+
+void JIT::emit_op_resolve_with_base(Instruction* currentInstruction)
+{
+ JITStubCall stubCall(this, cti_op_resolve_with_base);
+ stubCall.addArgument(TrustedImmPtr(&m_codeBlock->identifier(currentInstruction[3].u.operand)));
+ stubCall.addArgument(Imm32(currentInstruction[1].u.operand));
+ stubCall.callWithValueProfiling(currentInstruction[2].u.operand, FirstProfilingSite);
+}
+
+void JIT::emit_op_resolve_with_this(Instruction* currentInstruction)
+{
+ JITStubCall stubCall(this, cti_op_resolve_with_this);
+ stubCall.addArgument(TrustedImmPtr(&m_codeBlock->identifier(currentInstruction[3].u.operand)));
+ stubCall.addArgument(Imm32(currentInstruction[1].u.operand));
+ stubCall.callWithValueProfiling(currentInstruction[2].u.operand, FirstProfilingSite);
+}
+
+void JIT::emit_op_jtrue(Instruction* currentInstruction)
+{
+ unsigned target = currentInstruction[2].u.operand;
+ emitGetVirtualRegister(currentInstruction[1].u.operand, regT0);
+
+ Jump isZero = branchPtr(Equal, regT0, TrustedImmPtr(JSValue::encode(jsNumber(0))));
+ addJump(emitJumpIfImmediateInteger(regT0), target);
+
+ addJump(branchPtr(Equal, regT0, TrustedImmPtr(JSValue::encode(jsBoolean(true)))), target);
+ addSlowCase(branchPtr(NotEqual, regT0, TrustedImmPtr(JSValue::encode(jsBoolean(false)))));
+
+ isZero.link(this);
+}
+
+void JIT::emit_op_neq(Instruction* currentInstruction)
+{
+ emitGetVirtualRegisters(currentInstruction[2].u.operand, regT0, currentInstruction[3].u.operand, regT1);
+ emitJumpSlowCaseIfNotImmediateIntegers(regT0, regT1, regT2);
+ compare32(NotEqual, regT1, regT0, regT0);
+ emitTagAsBoolImmediate(regT0);
+
+ emitPutVirtualRegister(currentInstruction[1].u.operand);
+
+}
+
+void JIT::emit_op_bitxor(Instruction* currentInstruction)
+{
+ emitGetVirtualRegisters(currentInstruction[2].u.operand, regT0, currentInstruction[3].u.operand, regT1);
+ emitJumpSlowCaseIfNotImmediateIntegers(regT0, regT1, regT2);
+ xorPtr(regT1, regT0);
+ emitFastArithReTagImmediate(regT0, regT0);
+ emitPutVirtualRegister(currentInstruction[1].u.operand);
+}
+
+void JIT::emit_op_bitor(Instruction* currentInstruction)
+{
+ emitGetVirtualRegisters(currentInstruction[2].u.operand, regT0, currentInstruction[3].u.operand, regT1);
+ emitJumpSlowCaseIfNotImmediateIntegers(regT0, regT1, regT2);
+ orPtr(regT1, regT0);
+ emitPutVirtualRegister(currentInstruction[1].u.operand);
+}
+
+void JIT::emit_op_throw(Instruction* currentInstruction)
+{
+ JITStubCall stubCall(this, cti_op_throw);
+ stubCall.addArgument(currentInstruction[1].u.operand, regT2);
+ stubCall.call();
+ ASSERT(regT0 == returnValueRegister);
+#ifndef NDEBUG
+ // cti_op_throw always changes it's return address,
+ // this point in the code should never be reached.
+ breakpoint();
+#endif
+}
+
+void JIT::emit_op_get_pnames(Instruction* currentInstruction)
+{
+ int dst = currentInstruction[1].u.operand;
+ int base = currentInstruction[2].u.operand;
+ int i = currentInstruction[3].u.operand;
+ int size = currentInstruction[4].u.operand;
+ int breakTarget = currentInstruction[5].u.operand;
+
+ JumpList isNotObject;
+
+ emitGetVirtualRegister(base, regT0);
+ if (!m_codeBlock->isKnownNotImmediate(base))
+ isNotObject.append(emitJumpIfNotJSCell(regT0));
+ if (base != m_codeBlock->thisRegister() || m_codeBlock->isStrictMode()) {
+ loadPtr(Address(regT0, JSCell::structureOffset()), regT2);
+ isNotObject.append(emitJumpIfNotObject(regT2));
+ }
+
+ // We could inline the case where you have a valid cache, but
+ // this call doesn't seem to be hot.
+ Label isObject(this);
+ JITStubCall getPnamesStubCall(this, cti_op_get_pnames);
+ getPnamesStubCall.addArgument(regT0);
+ getPnamesStubCall.call(dst);
+ load32(Address(regT0, OBJECT_OFFSETOF(JSPropertyNameIterator, m_jsStringsSize)), regT3);
+ storePtr(tagTypeNumberRegister, payloadFor(i));
+ store32(TrustedImm32(Int32Tag), intTagFor(size));
+ store32(regT3, intPayloadFor(size));
+ Jump end = jump();
+
+ isNotObject.link(this);
+ move(regT0, regT1);
+ and32(TrustedImm32(~TagBitUndefined), regT1);
+ addJump(branch32(Equal, regT1, TrustedImm32(ValueNull)), breakTarget);
+
+ JITStubCall toObjectStubCall(this, cti_to_object);
+ toObjectStubCall.addArgument(regT0);
+ toObjectStubCall.call(base);
+ jump().linkTo(isObject, this);
+
+ end.link(this);
+}
+
+void JIT::emit_op_next_pname(Instruction* currentInstruction)
+{
+ int dst = currentInstruction[1].u.operand;
+ int base = currentInstruction[2].u.operand;
+ int i = currentInstruction[3].u.operand;
+ int size = currentInstruction[4].u.operand;
+ int it = currentInstruction[5].u.operand;
+ int target = currentInstruction[6].u.operand;
+
+ JumpList callHasProperty;
+
+ Label begin(this);
+ load32(intPayloadFor(i), regT0);
+ Jump end = branch32(Equal, regT0, intPayloadFor(size));
+
+ // Grab key @ i
+ loadPtr(addressFor(it), regT1);
+ loadPtr(Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_jsStrings)), regT2);
+
+ loadPtr(BaseIndex(regT2, regT0, TimesEight), regT2);
+
+ emitPutVirtualRegister(dst, regT2);
+
+ // Increment i
+ add32(TrustedImm32(1), regT0);
+ store32(regT0, intPayloadFor(i));
+
+ // Verify that i is valid:
+ emitGetVirtualRegister(base, regT0);
+
+ // Test base's structure
+ loadPtr(Address(regT0, JSCell::structureOffset()), regT2);
+ callHasProperty.append(branchPtr(NotEqual, regT2, Address(Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_cachedStructure)))));
+
+ // Test base's prototype chain
+ loadPtr(Address(Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_cachedPrototypeChain))), regT3);
+ loadPtr(Address(regT3, OBJECT_OFFSETOF(StructureChain, m_vector)), regT3);
+ addJump(branchTestPtr(Zero, Address(regT3)), target);
+
+ Label checkPrototype(this);
+ loadPtr(Address(regT2, Structure::prototypeOffset()), regT2);
+ callHasProperty.append(emitJumpIfNotJSCell(regT2));
+ loadPtr(Address(regT2, JSCell::structureOffset()), regT2);
+ callHasProperty.append(branchPtr(NotEqual, regT2, Address(regT3)));
+ addPtr(TrustedImm32(sizeof(Structure*)), regT3);
+ branchTestPtr(NonZero, Address(regT3)).linkTo(checkPrototype, this);
+
+ // Continue loop.
+ addJump(jump(), target);
+
+ // Slow case: Ask the object if i is valid.
+ callHasProperty.link(this);
+ emitGetVirtualRegister(dst, regT1);
+ JITStubCall stubCall(this, cti_has_property);
+ stubCall.addArgument(regT0);
+ stubCall.addArgument(regT1);
+ stubCall.call();
+
+ // Test for valid key.
+ addJump(branchTest32(NonZero, regT0), target);
+ jump().linkTo(begin, this);
+
+ // End of loop.
+ end.link(this);
+}
+
+void JIT::emit_op_push_scope(Instruction* currentInstruction)
+{
+ JITStubCall stubCall(this, cti_op_push_scope);
+ stubCall.addArgument(currentInstruction[1].u.operand, regT2);
+ stubCall.call(currentInstruction[1].u.operand);
+}
+
+void JIT::emit_op_pop_scope(Instruction*)
+{
+ JITStubCall(this, cti_op_pop_scope).call();
+}
+
+void JIT::compileOpStrictEq(Instruction* currentInstruction, CompileOpStrictEqType type)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned src1 = currentInstruction[2].u.operand;
+ unsigned src2 = currentInstruction[3].u.operand;
+
+ emitGetVirtualRegisters(src1, regT0, src2, regT1);
+
+ // Jump to a slow case if either operand is a number, or if both are JSCell*s.
+ move(regT0, regT2);
+ orPtr(regT1, regT2);
+ addSlowCase(emitJumpIfJSCell(regT2));
+ addSlowCase(emitJumpIfImmediateNumber(regT2));
+
+ if (type == OpStrictEq)
+ compare32(Equal, regT1, regT0, regT0);
+ else
+ compare32(NotEqual, regT1, regT0, regT0);
+ emitTagAsBoolImmediate(regT0);
+
+ emitPutVirtualRegister(dst);
+}
+
+void JIT::emit_op_stricteq(Instruction* currentInstruction)
+{
+ compileOpStrictEq(currentInstruction, OpStrictEq);
+}
+
+void JIT::emit_op_nstricteq(Instruction* currentInstruction)
+{
+ compileOpStrictEq(currentInstruction, OpNStrictEq);
+}
+
+void JIT::emit_op_to_jsnumber(Instruction* currentInstruction)
+{
+ int srcVReg = currentInstruction[2].u.operand;
+ emitGetVirtualRegister(srcVReg, regT0);
+
+ Jump wasImmediate = emitJumpIfImmediateInteger(regT0);
+
+ emitJumpSlowCaseIfNotJSCell(regT0, srcVReg);
+ loadPtr(Address(regT0, JSCell::structureOffset()), regT2);
+ addSlowCase(branch8(NotEqual, Address(regT2, Structure::typeInfoTypeOffset()), TrustedImm32(NumberType)));
+
+ wasImmediate.link(this);
+
+ emitPutVirtualRegister(currentInstruction[1].u.operand);
+}
+
+void JIT::emit_op_push_new_scope(Instruction* currentInstruction)
+{
+ JITStubCall stubCall(this, cti_op_push_new_scope);
+ stubCall.addArgument(TrustedImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
+ stubCall.addArgument(currentInstruction[3].u.operand, regT2);
+ stubCall.call(currentInstruction[1].u.operand);
+}
+
+void JIT::emit_op_catch(Instruction* currentInstruction)
+{
+ killLastResultRegister(); // FIXME: Implicitly treat op_catch as a labeled statement, and remove this line of code.
+ move(regT0, callFrameRegister);
+ peek(regT3, OBJECT_OFFSETOF(struct JITStackFrame, globalData) / sizeof(void*));
+ loadPtr(Address(regT3, OBJECT_OFFSETOF(JSGlobalData, exception)), regT0);
+ storePtr(TrustedImmPtr(JSValue::encode(JSValue())), Address(regT3, OBJECT_OFFSETOF(JSGlobalData, exception)));
+ emitPutVirtualRegister(currentInstruction[1].u.operand);
+}
+
+void JIT::emit_op_jmp_scopes(Instruction* currentInstruction)
+{
+ JITStubCall stubCall(this, cti_op_jmp_scopes);
+ stubCall.addArgument(Imm32(currentInstruction[1].u.operand));
+ stubCall.call();
+ addJump(jump(), currentInstruction[2].u.operand);
+}
+
+void JIT::emit_op_switch_imm(Instruction* currentInstruction)
+{
+ unsigned tableIndex = currentInstruction[1].u.operand;
+ unsigned defaultOffset = currentInstruction[2].u.operand;
+ unsigned scrutinee = currentInstruction[3].u.operand;
+
+ // create jump table for switch destinations, track this switch statement.
+ SimpleJumpTable* jumpTable = &m_codeBlock->immediateSwitchJumpTable(tableIndex);
+ m_switches.append(SwitchRecord(jumpTable, m_bytecodeOffset, defaultOffset, SwitchRecord::Immediate));
+ jumpTable->ctiOffsets.grow(jumpTable->branchOffsets.size());
+
+ JITStubCall stubCall(this, cti_op_switch_imm);
+ stubCall.addArgument(scrutinee, regT2);
+ stubCall.addArgument(Imm32(tableIndex));
+ stubCall.call();
+ jump(regT0);
+}
+
+void JIT::emit_op_switch_char(Instruction* currentInstruction)
+{
+ unsigned tableIndex = currentInstruction[1].u.operand;
+ unsigned defaultOffset = currentInstruction[2].u.operand;
+ unsigned scrutinee = currentInstruction[3].u.operand;
+
+ // create jump table for switch destinations, track this switch statement.
+ SimpleJumpTable* jumpTable = &m_codeBlock->characterSwitchJumpTable(tableIndex);
+ m_switches.append(SwitchRecord(jumpTable, m_bytecodeOffset, defaultOffset, SwitchRecord::Character));
+ jumpTable->ctiOffsets.grow(jumpTable->branchOffsets.size());
+
+ JITStubCall stubCall(this, cti_op_switch_char);
+ stubCall.addArgument(scrutinee, regT2);
+ stubCall.addArgument(Imm32(tableIndex));
+ stubCall.call();
+ jump(regT0);
+}
+
+void JIT::emit_op_switch_string(Instruction* currentInstruction)
+{
+ unsigned tableIndex = currentInstruction[1].u.operand;
+ unsigned defaultOffset = currentInstruction[2].u.operand;
+ unsigned scrutinee = currentInstruction[3].u.operand;
+
+ // create jump table for switch destinations, track this switch statement.
+ StringJumpTable* jumpTable = &m_codeBlock->stringSwitchJumpTable(tableIndex);
+ m_switches.append(SwitchRecord(jumpTable, m_bytecodeOffset, defaultOffset));
+
+ JITStubCall stubCall(this, cti_op_switch_string);
+ stubCall.addArgument(scrutinee, regT2);
+ stubCall.addArgument(Imm32(tableIndex));
+ stubCall.call();
+ jump(regT0);
+}
+
+void JIT::emit_op_throw_reference_error(Instruction* currentInstruction)
+{
+ JITStubCall stubCall(this, cti_op_throw_reference_error);
+ stubCall.addArgument(ImmPtr(JSValue::encode(m_codeBlock->getConstant(currentInstruction[1].u.operand))));
+ stubCall.call();
+}
+
+void JIT::emit_op_debug(Instruction* currentInstruction)
+{
+#if ENABLE(DEBUG_WITH_BREAKPOINT)
+ UNUSED_PARAM(currentInstruction);
+ breakpoint();
+#else
+ JITStubCall stubCall(this, cti_op_debug);
+ stubCall.addArgument(Imm32(currentInstruction[1].u.operand));
+ stubCall.addArgument(Imm32(currentInstruction[2].u.operand));
+ stubCall.addArgument(Imm32(currentInstruction[3].u.operand));
+ stubCall.call();
+#endif
+}
+
+void JIT::emit_op_eq_null(Instruction* currentInstruction)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned src1 = currentInstruction[2].u.operand;
+
+ emitGetVirtualRegister(src1, regT0);
+ Jump isImmediate = emitJumpIfNotJSCell(regT0);
+
+ loadPtr(Address(regT0, JSCell::structureOffset()), regT2);
+ test8(NonZero, Address(regT2, Structure::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined), regT0);
+
+ Jump wasNotImmediate = jump();
+
+ isImmediate.link(this);
+
+ andPtr(TrustedImm32(~TagBitUndefined), regT0);
+ comparePtr(Equal, regT0, TrustedImm32(ValueNull), regT0);
+
+ wasNotImmediate.link(this);
+
+ emitTagAsBoolImmediate(regT0);
+ emitPutVirtualRegister(dst);
+
+}
+
+void JIT::emit_op_neq_null(Instruction* currentInstruction)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned src1 = currentInstruction[2].u.operand;
+
+ emitGetVirtualRegister(src1, regT0);
+ Jump isImmediate = emitJumpIfNotJSCell(regT0);
+
+ loadPtr(Address(regT0, JSCell::structureOffset()), regT2);
+ test8(Zero, Address(regT2, Structure::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined), regT0);
+
+ Jump wasNotImmediate = jump();
+
+ isImmediate.link(this);
+
+ andPtr(TrustedImm32(~TagBitUndefined), regT0);
+ comparePtr(NotEqual, regT0, TrustedImm32(ValueNull), regT0);
+
+ wasNotImmediate.link(this);
+
+ emitTagAsBoolImmediate(regT0);
+ emitPutVirtualRegister(dst);
+}
+
+void JIT::emit_op_enter(Instruction*)
+{
+ // Even though CTI doesn't use them, we initialize our constant
+ // registers to zap stale pointers, to avoid unnecessarily prolonging
+ // object lifetime and increasing GC pressure.
+ size_t count = m_codeBlock->m_numVars;
+ for (size_t j = 0; j < count; ++j)
+ emitInitRegister(j);
+
+}
+
+void JIT::emit_op_create_activation(Instruction* currentInstruction)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+
+ Jump activationCreated = branchTestPtr(NonZero, Address(callFrameRegister, sizeof(Register) * dst));
+ JITStubCall(this, cti_op_push_activation).call(currentInstruction[1].u.operand);
+ emitPutVirtualRegister(dst);
+ activationCreated.link(this);
+}
+
+void JIT::emit_op_create_arguments(Instruction* currentInstruction)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+
+ Jump argsCreated = branchTestPtr(NonZero, Address(callFrameRegister, sizeof(Register) * dst));
+ JITStubCall(this, cti_op_create_arguments).call();
+ emitPutVirtualRegister(dst);
+ emitPutVirtualRegister(unmodifiedArgumentsRegister(dst));
+ argsCreated.link(this);
+}
+
+void JIT::emit_op_init_lazy_reg(Instruction* currentInstruction)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+
+ storePtr(TrustedImmPtr(0), Address(callFrameRegister, sizeof(Register) * dst));
+}
+
+void JIT::emit_op_convert_this(Instruction* currentInstruction)
+{
+ emitGetVirtualRegister(currentInstruction[1].u.operand, regT0);
+
+ emitJumpSlowCaseIfNotJSCell(regT0);
+ addSlowCase(branchPtr(Equal, Address(regT0, JSCell::classInfoOffset()), TrustedImmPtr(&JSString::s_info)));
+}
+
+void JIT::emit_op_get_callee(Instruction* currentInstruction)
+{
+ unsigned result = currentInstruction[1].u.operand;
+ emitGetFromCallFrameHeaderPtr(RegisterFile::Callee, regT0);
+ emitPutVirtualRegister(result);
+}
+
+void JIT::emit_op_create_this(Instruction* currentInstruction)
+{
+ emitGetVirtualRegister(currentInstruction[2].u.operand, regT2);
+ emitJumpSlowCaseIfNotJSCell(regT2, currentInstruction[2].u.operand);
+ loadPtr(Address(regT2, JSCell::structureOffset()), regT1);
+ addSlowCase(emitJumpIfNotObject(regT1));
+
+ // now we know that the prototype is an object, but we don't know if it's got an
+ // inheritor ID
+
+ loadPtr(Address(regT2, JSObject::offsetOfInheritorID()), regT2);
+ addSlowCase(branchTestPtr(Zero, regT2));
+
+ // now regT2 contains the inheritorID, which is the structure that the newly
+ // allocated object will have.
+
+ emitAllocateJSFinalObject(regT2, regT0, regT1);
+
+ emitPutVirtualRegister(currentInstruction[1].u.operand);
+}
+
+void JIT::emitSlow_op_create_this(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ linkSlowCaseIfNotJSCell(iter, currentInstruction[2].u.operand); // not a cell
+ linkSlowCase(iter); // not an object
+ linkSlowCase(iter); // doesn't have an inheritor ID
+ linkSlowCase(iter); // allocation failed
+ JITStubCall stubCall(this, cti_op_create_this);
+ stubCall.addArgument(currentInstruction[2].u.operand, regT1);
+ stubCall.call(currentInstruction[1].u.operand);
+}
+
+void JIT::emit_op_profile_will_call(Instruction* currentInstruction)
+{
+ peek(regT1, OBJECT_OFFSETOF(JITStackFrame, enabledProfilerReference) / sizeof(void*));
+ Jump noProfiler = branchTestPtr(Zero, Address(regT1));
+
+ JITStubCall stubCall(this, cti_op_profile_will_call);
+ stubCall.addArgument(currentInstruction[1].u.operand, regT1);
+ stubCall.call();
+ noProfiler.link(this);
+
+}
+
+void JIT::emit_op_profile_did_call(Instruction* currentInstruction)
+{
+ peek(regT1, OBJECT_OFFSETOF(JITStackFrame, enabledProfilerReference) / sizeof(void*));
+ Jump noProfiler = branchTestPtr(Zero, Address(regT1));
+
+ JITStubCall stubCall(this, cti_op_profile_did_call);
+ stubCall.addArgument(currentInstruction[1].u.operand, regT1);
+ stubCall.call();
+ noProfiler.link(this);
+}
+
+
+// Slow cases
+
+void JIT::emitSlow_op_convert_this(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ void* globalThis = m_codeBlock->globalObject()->globalScopeChain()->globalThis.get();
+
+ linkSlowCase(iter);
+ Jump isNotUndefined = branchPtr(NotEqual, regT0, TrustedImmPtr(JSValue::encode(jsUndefined())));
+ move(TrustedImmPtr(globalThis), regT0);
+ emitPutVirtualRegister(currentInstruction[1].u.operand, regT0);
+ emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_convert_this));
+
+ isNotUndefined.link(this);
+ linkSlowCase(iter);
+ JITStubCall stubCall(this, cti_op_convert_this);
+ stubCall.addArgument(regT0);
+ stubCall.call(currentInstruction[1].u.operand);
+}
+
+void JIT::emitSlow_op_to_primitive(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ linkSlowCase(iter);
+
+ JITStubCall stubCall(this, cti_op_to_primitive);
+ stubCall.addArgument(regT0);
+ stubCall.call(currentInstruction[1].u.operand);
+}
+
+void JIT::emitSlow_op_not(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ linkSlowCase(iter);
+ xorPtr(TrustedImm32(static_cast<int32_t>(ValueFalse)), regT0);
+ JITStubCall stubCall(this, cti_op_not);
+ stubCall.addArgument(regT0);
+ stubCall.call(currentInstruction[1].u.operand);
+}
+
+void JIT::emitSlow_op_jfalse(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ linkSlowCase(iter);
+ JITStubCall stubCall(this, cti_op_jtrue);
+ stubCall.addArgument(regT0);
+ stubCall.call();
+ emitJumpSlowToHot(branchTest32(Zero, regT0), currentInstruction[2].u.operand); // inverted!
+}
+
+void JIT::emitSlow_op_bitnot(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ linkSlowCase(iter);
+ JITStubCall stubCall(this, cti_op_bitnot);
+ stubCall.addArgument(regT0);
+ stubCall.call(currentInstruction[1].u.operand);
+}
+
+void JIT::emitSlow_op_jtrue(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ linkSlowCase(iter);
+ JITStubCall stubCall(this, cti_op_jtrue);
+ stubCall.addArgument(regT0);
+ stubCall.call();
+ emitJumpSlowToHot(branchTest32(NonZero, regT0), currentInstruction[2].u.operand);
+}
+
+void JIT::emitSlow_op_bitxor(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ linkSlowCase(iter);
+ JITStubCall stubCall(this, cti_op_bitxor);
+ stubCall.addArgument(regT0);
+ stubCall.addArgument(regT1);
+ stubCall.call(currentInstruction[1].u.operand);
+}
+
+void JIT::emitSlow_op_bitor(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ linkSlowCase(iter);
+ JITStubCall stubCall(this, cti_op_bitor);
+ stubCall.addArgument(regT0);
+ stubCall.addArgument(regT1);
+ stubCall.call(currentInstruction[1].u.operand);
+}
+
+void JIT::emitSlow_op_eq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ linkSlowCase(iter);
+ JITStubCall stubCall(this, cti_op_eq);
+ stubCall.addArgument(regT0);
+ stubCall.addArgument(regT1);
+ stubCall.call();
+ emitTagAsBoolImmediate(regT0);
+ emitPutVirtualRegister(currentInstruction[1].u.operand);
+}
+
+void JIT::emitSlow_op_neq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ linkSlowCase(iter);
+ JITStubCall stubCall(this, cti_op_eq);
+ stubCall.addArgument(regT0);
+ stubCall.addArgument(regT1);
+ stubCall.call();
+ xor32(TrustedImm32(0x1), regT0);
+ emitTagAsBoolImmediate(regT0);
+ emitPutVirtualRegister(currentInstruction[1].u.operand);
+}
+
+void JIT::emitSlow_op_stricteq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+ JITStubCall stubCall(this, cti_op_stricteq);
+ stubCall.addArgument(regT0);
+ stubCall.addArgument(regT1);
+ stubCall.call(currentInstruction[1].u.operand);
+}
+
+void JIT::emitSlow_op_nstricteq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+ JITStubCall stubCall(this, cti_op_nstricteq);
+ stubCall.addArgument(regT0);
+ stubCall.addArgument(regT1);
+ stubCall.call(currentInstruction[1].u.operand);
+}
+
+void JIT::emitSlow_op_check_has_instance(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned baseVal = currentInstruction[1].u.operand;
+
+ linkSlowCaseIfNotJSCell(iter, baseVal);
+ linkSlowCase(iter);
+ JITStubCall stubCall(this, cti_op_check_has_instance);
+ stubCall.addArgument(baseVal, regT2);
+ stubCall.call();
+}
+
+void JIT::emitSlow_op_instanceof(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned value = currentInstruction[2].u.operand;
+ unsigned baseVal = currentInstruction[3].u.operand;
+ unsigned proto = currentInstruction[4].u.operand;
+
+ linkSlowCaseIfNotJSCell(iter, value);
+ linkSlowCaseIfNotJSCell(iter, proto);
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+ JITStubCall stubCall(this, cti_op_instanceof);
+ stubCall.addArgument(value, regT2);
+ stubCall.addArgument(baseVal, regT2);
+ stubCall.addArgument(proto, regT2);
+ stubCall.call(dst);
+}
+
+void JIT::emitSlow_op_call(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ compileOpCallSlowCase(op_call, currentInstruction, iter, m_callLinkInfoIndex++);
+}
+
+void JIT::emitSlow_op_call_eval(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ compileOpCallSlowCase(op_call_eval, currentInstruction, iter, m_callLinkInfoIndex);
+}
+
+void JIT::emitSlow_op_call_varargs(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ compileOpCallSlowCase(op_call_varargs, currentInstruction, iter, m_callLinkInfoIndex++);
+}
+
+void JIT::emitSlow_op_construct(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ compileOpCallSlowCase(op_construct, currentInstruction, iter, m_callLinkInfoIndex++);
+}
+
+void JIT::emitSlow_op_to_jsnumber(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ linkSlowCaseIfNotJSCell(iter, currentInstruction[2].u.operand);
+ linkSlowCase(iter);
+
+ JITStubCall stubCall(this, cti_op_to_jsnumber);
+ stubCall.addArgument(regT0);
+ stubCall.call(currentInstruction[1].u.operand);
+}
+
+void JIT::emit_op_get_arguments_length(Instruction* currentInstruction)
+{
+ int dst = currentInstruction[1].u.operand;
+ int argumentsRegister = currentInstruction[2].u.operand;
+ addSlowCase(branchTestPtr(NonZero, addressFor(argumentsRegister)));
+ emitGetFromCallFrameHeader32(RegisterFile::ArgumentCount, regT0);
+ sub32(TrustedImm32(1), regT0);
+ emitFastArithReTagImmediate(regT0, regT0);
+ emitPutVirtualRegister(dst, regT0);
+}
+
+void JIT::emitSlow_op_get_arguments_length(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ linkSlowCase(iter);
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned base = currentInstruction[2].u.operand;
+ Identifier* ident = &(m_codeBlock->identifier(currentInstruction[3].u.operand));
+
+ emitGetVirtualRegister(base, regT0);
+ JITStubCall stubCall(this, cti_op_get_by_id_generic);
+ stubCall.addArgument(regT0);
+ stubCall.addArgument(TrustedImmPtr(ident));
+ stubCall.call(dst);
+}
+
+void JIT::emit_op_get_argument_by_val(Instruction* currentInstruction)
+{
+ int dst = currentInstruction[1].u.operand;
+ int argumentsRegister = currentInstruction[2].u.operand;
+ int property = currentInstruction[3].u.operand;
+ addSlowCase(branchTestPtr(NonZero, addressFor(argumentsRegister)));
+ emitGetVirtualRegister(property, regT1);
+ addSlowCase(emitJumpIfNotImmediateInteger(regT1));
+ add32(TrustedImm32(1), regT1);
+ // regT1 now contains the integer index of the argument we want, including this
+ emitGetFromCallFrameHeader32(RegisterFile::ArgumentCount, regT2);
+ addSlowCase(branch32(AboveOrEqual, regT1, regT2));
+
+ neg32(regT1);
+ signExtend32ToPtr(regT1, regT1);
+ loadPtr(BaseIndex(callFrameRegister, regT1, TimesEight, CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register))), regT0);
+ emitPutVirtualRegister(dst, regT0);
+}
+
+void JIT::emitSlow_op_get_argument_by_val(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned arguments = currentInstruction[2].u.operand;
+ unsigned property = currentInstruction[3].u.operand;
+
+ linkSlowCase(iter);
+ Jump skipArgumentsCreation = jump();
+
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+ JITStubCall(this, cti_op_create_arguments).call();
+ emitPutVirtualRegister(arguments);
+ emitPutVirtualRegister(unmodifiedArgumentsRegister(arguments));
+
+ skipArgumentsCreation.link(this);
+ JITStubCall stubCall(this, cti_op_get_by_val);
+ stubCall.addArgument(arguments, regT2);
+ stubCall.addArgument(property, regT2);
+ stubCall.call(dst);
+}
+
+#endif // USE(JSVALUE64)
+
+void JIT::emit_op_resolve_global_dynamic(Instruction* currentInstruction)
+{
+ int skip = currentInstruction[5].u.operand;
+
+ emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain, regT0);
+
+ bool checkTopLevel = m_codeBlock->codeType() == FunctionCode && m_codeBlock->needsFullScopeChain();
+ ASSERT(skip || !checkTopLevel);
+ if (checkTopLevel && skip--) {
+ Jump activationNotCreated;
+ if (checkTopLevel)
+ activationNotCreated = branchTestPtr(Zero, addressFor(m_codeBlock->activationRegister()));
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(ScopeChainNode, object)), regT1);
+ addSlowCase(checkStructure(regT1, m_globalData->activationStructure.get()));
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(ScopeChainNode, next)), regT0);
+ activationNotCreated.link(this);
+ }
+ while (skip--) {
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(ScopeChainNode, object)), regT1);
+ addSlowCase(checkStructure(regT1, m_globalData->activationStructure.get()));
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(ScopeChainNode, next)), regT0);
+ }
+ emit_op_resolve_global(currentInstruction, true);
+}
+
+void JIT::emitSlow_op_resolve_global_dynamic(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ Identifier* ident = &m_codeBlock->identifier(currentInstruction[2].u.operand);
+ int skip = currentInstruction[5].u.operand;
+ while (skip--)
+ linkSlowCase(iter);
+ JITStubCall resolveStubCall(this, cti_op_resolve);
+ resolveStubCall.addArgument(TrustedImmPtr(ident));
+ resolveStubCall.call(dst);
+ emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_resolve_global_dynamic));
+
+ unsigned currentIndex = m_globalResolveInfoIndex++;
+
+ linkSlowCase(iter); // We managed to skip all the nodes in the scope chain, but the cache missed.
+ JITStubCall stubCall(this, cti_op_resolve_global);
+ stubCall.addArgument(TrustedImmPtr(ident));
+ stubCall.addArgument(Imm32(currentIndex));
+ stubCall.addArgument(regT0);
+ stubCall.callWithValueProfiling(dst, SubsequentProfilingSite); // The first profiling site is in emit_op_resolve_global
+}
+
+void JIT::emit_op_new_regexp(Instruction* currentInstruction)
+{
+ JITStubCall stubCall(this, cti_op_new_regexp);
+ stubCall.addArgument(TrustedImmPtr(m_codeBlock->regexp(currentInstruction[2].u.operand)));
+ stubCall.call(currentInstruction[1].u.operand);
+}
+
+void JIT::emit_op_new_func(Instruction* currentInstruction)
+{
+ Jump lazyJump;
+ int dst = currentInstruction[1].u.operand;
+ if (currentInstruction[3].u.operand) {
+#if USE(JSVALUE32_64)
+ lazyJump = branch32(NotEqual, tagFor(dst), TrustedImm32(JSValue::EmptyValueTag));
+#else
+ lazyJump = branchTestPtr(NonZero, addressFor(dst));
+#endif
+ }
+
+ FunctionExecutable* executable = m_codeBlock->functionDecl(currentInstruction[2].u.operand);
+ emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain, regT2);
+ emitAllocateJSFunction(executable, regT2, regT0, regT1);
+
+ emitStoreCell(dst, regT0);
+
+ if (currentInstruction[3].u.operand) {
+#if USE(JSVALUE32_64)
+ unmap();
+#else
+ killLastResultRegister();
+#endif
+ lazyJump.link(this);
+ }
+}
+
+void JIT::emitSlow_op_new_func(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ linkSlowCase(iter);
+ JITStubCall stubCall(this, cti_op_new_func);
+ stubCall.addArgument(TrustedImmPtr(m_codeBlock->functionDecl(currentInstruction[2].u.operand)));
+ stubCall.call(currentInstruction[1].u.operand);
+}
+
+void JIT::emit_op_new_func_exp(Instruction* currentInstruction)
+{
+ FunctionExecutable* executable = m_codeBlock->functionExpr(currentInstruction[2].u.operand);
+
+ // We only inline the allocation of a anonymous function expressions
+ // If we want to be able to allocate a named function expression, we would
+ // need to be able to do inline allocation of a JSStaticScopeObject.
+ if (executable->name().isNull()) {
+ emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain, regT2);
+ emitAllocateJSFunction(executable, regT2, regT0, regT1);
+ emitStoreCell(currentInstruction[1].u.operand, regT0);
+ return;
+ }
+
+ JITStubCall stubCall(this, cti_op_new_func_exp);
+ stubCall.addArgument(TrustedImmPtr(m_codeBlock->functionExpr(currentInstruction[2].u.operand)));
+ stubCall.call(currentInstruction[1].u.operand);
+}
+
+void JIT::emitSlow_op_new_func_exp(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ FunctionExecutable* executable = m_codeBlock->functionExpr(currentInstruction[2].u.operand);
+ if (!executable->name().isNull())
+ return;
+ linkSlowCase(iter);
+ JITStubCall stubCall(this, cti_op_new_func_exp);
+ stubCall.addArgument(TrustedImmPtr(executable));
+ stubCall.call(currentInstruction[1].u.operand);
+}
+
+void JIT::emit_op_new_array(Instruction* currentInstruction)
+{
+ JITStubCall stubCall(this, cti_op_new_array);
+ stubCall.addArgument(Imm32(currentInstruction[2].u.operand));
+ stubCall.addArgument(Imm32(currentInstruction[3].u.operand));
+ stubCall.call(currentInstruction[1].u.operand);
+}
+
+void JIT::emit_op_new_array_buffer(Instruction* currentInstruction)
+{
+ JITStubCall stubCall(this, cti_op_new_array_buffer);
+ stubCall.addArgument(Imm32(currentInstruction[2].u.operand));
+ stubCall.addArgument(Imm32(currentInstruction[3].u.operand));
+ stubCall.call(currentInstruction[1].u.operand);
+}
+
+} // namespace JSC
+
+#endif // ENABLE(JIT)
diff --git a/Source/JavaScriptCore/jit/JITOpcodes32_64.cpp b/Source/JavaScriptCore/jit/JITOpcodes32_64.cpp
new file mode 100644
index 000000000..9d1cbce8d
--- /dev/null
+++ b/Source/JavaScriptCore/jit/JITOpcodes32_64.cpp
@@ -0,0 +1,1733 @@
+/*
+ * Copyright (C) 2009 Apple Inc. All rights reserved.
+ * Copyright (C) 2010 Patrick Gansterer <paroga@paroga.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+
+#if ENABLE(JIT)
+#if USE(JSVALUE32_64)
+#include "JIT.h"
+
+#include "JITInlineMethods.h"
+#include "JITStubCall.h"
+#include "JSArray.h"
+#include "JSCell.h"
+#include "JSFunction.h"
+#include "JSPropertyNameIterator.h"
+#include "LinkBuffer.h"
+
+namespace JSC {
+
+PassRefPtr<ExecutableMemoryHandle> JIT::privateCompileCTIMachineTrampolines(JSGlobalData* globalData, TrampolineStructure *trampolines)
+{
+#if ENABLE(JIT_USE_SOFT_MODULO)
+ Label softModBegin = align();
+ softModulo();
+#endif
+ // (1) This function provides fast property access for string length
+ Label stringLengthBegin = align();
+
+ // regT0 holds payload, regT1 holds tag
+
+ Jump string_failureCases1 = branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag));
+ Jump string_failureCases2 = branchPtr(NotEqual, Address(regT0, JSCell::classInfoOffset()), TrustedImmPtr(&JSString::s_info));
+
+ // Checks out okay! - get the length from the Ustring.
+ load32(Address(regT0, OBJECT_OFFSETOF(JSString, m_length)), regT2);
+
+ Jump string_failureCases3 = branch32(Above, regT2, TrustedImm32(INT_MAX));
+ move(regT2, regT0);
+ move(TrustedImm32(JSValue::Int32Tag), regT1);
+
+ ret();
+
+ JumpList callSlowCase;
+ JumpList constructSlowCase;
+
+ // VirtualCallLink Trampoline
+ // regT1, regT0 holds callee; callFrame is moved and partially initialized.
+ Label virtualCallLinkBegin = align();
+ callSlowCase.append(branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag)));
+ callSlowCase.append(emitJumpIfNotType(regT0, regT1, JSFunctionType));
+
+ // Finish canonical initialization before JS function call.
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_scopeChain)), regT1);
+ emitPutCellToCallFrameHeader(regT1, RegisterFile::ScopeChain);
+
+ // Also initialize ReturnPC for use by lazy linking and exceptions.
+ preserveReturnAddressAfterCall(regT3);
+ emitPutToCallFrameHeader(regT3, RegisterFile::ReturnPC);
+
+ restoreArgumentReference();
+ Call callLazyLinkCall = call();
+ restoreReturnAddressBeforeReturn(regT3);
+ jump(regT0);
+
+ // VirtualConstructLink Trampoline
+ // regT1, regT0 holds callee; callFrame is moved and partially initialized.
+ Label virtualConstructLinkBegin = align();
+ constructSlowCase.append(branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag)));
+ constructSlowCase.append(emitJumpIfNotType(regT0, regT1, JSFunctionType));
+
+ // Finish canonical initialization before JS function call.
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_scopeChain)), regT1);
+ emitPutCellToCallFrameHeader(regT1, RegisterFile::ScopeChain);
+
+ // Also initialize ReturnPC for use by lazy linking and exeptions.
+ preserveReturnAddressAfterCall(regT3);
+ emitPutToCallFrameHeader(regT3, RegisterFile::ReturnPC);
+
+ restoreArgumentReference();
+ Call callLazyLinkConstruct = call();
+ restoreReturnAddressBeforeReturn(regT3);
+ jump(regT0);
+
+ // VirtualCall Trampoline
+ // regT1, regT0 holds callee; regT2 will hold the FunctionExecutable.
+ Label virtualCallBegin = align();
+ callSlowCase.append(branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag)));
+ callSlowCase.append(emitJumpIfNotType(regT0, regT1, JSFunctionType));
+
+ // Finish canonical initialization before JS function call.
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_scopeChain)), regT1);
+ emitPutCellToCallFrameHeader(regT1, RegisterFile::ScopeChain);
+
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_executable)), regT2);
+ Jump hasCodeBlock1 = branch32(GreaterThanOrEqual, Address(regT2, OBJECT_OFFSETOF(FunctionExecutable, m_numParametersForCall)), TrustedImm32(0));
+ preserveReturnAddressAfterCall(regT3);
+ restoreArgumentReference();
+ Call callCompileCall = call();
+ restoreReturnAddressBeforeReturn(regT3);
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_executable)), regT2);
+
+ hasCodeBlock1.link(this);
+ loadPtr(Address(regT2, OBJECT_OFFSETOF(FunctionExecutable, m_jitCodeForCallWithArityCheck)), regT0);
+ jump(regT0);
+
+ // VirtualConstruct Trampoline
+ // regT1, regT0 holds callee; regT2 will hold the FunctionExecutable.
+ Label virtualConstructBegin = align();
+ constructSlowCase.append(branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag)));
+ constructSlowCase.append(emitJumpIfNotType(regT0, regT1, JSFunctionType));
+
+ // Finish canonical initialization before JS function call.
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_scopeChain)), regT1);
+ emitPutCellToCallFrameHeader(regT1, RegisterFile::ScopeChain);
+
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_executable)), regT2);
+ Jump hasCodeBlock2 = branch32(GreaterThanOrEqual, Address(regT2, OBJECT_OFFSETOF(FunctionExecutable, m_numParametersForConstruct)), TrustedImm32(0));
+ preserveReturnAddressAfterCall(regT3);
+ restoreArgumentReference();
+ Call callCompileConstruct = call();
+ restoreReturnAddressBeforeReturn(regT3);
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_executable)), regT2);
+
+ hasCodeBlock2.link(this);
+ loadPtr(Address(regT2, OBJECT_OFFSETOF(FunctionExecutable, m_jitCodeForConstructWithArityCheck)), regT0);
+ jump(regT0);
+
+ callSlowCase.link(this);
+ // Finish canonical initialization before JS function call.
+ emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, regT2);
+ emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain, regT2, regT2);
+ emitPutCellToCallFrameHeader(regT2, RegisterFile::ScopeChain);
+
+ // Also initialize ReturnPC and CodeBlock, like a JS function would.
+ preserveReturnAddressAfterCall(regT3);
+ emitPutToCallFrameHeader(regT3, RegisterFile::ReturnPC);
+ emitPutImmediateToCallFrameHeader(0, RegisterFile::CodeBlock);
+
+ storePtr(callFrameRegister, &m_globalData->topCallFrame);
+ restoreArgumentReference();
+ Call callCallNotJSFunction = call();
+ emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, callFrameRegister);
+ restoreReturnAddressBeforeReturn(regT3);
+ ret();
+
+ constructSlowCase.link(this);
+ // Finish canonical initialization before JS function call.
+ emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, regT2);
+ emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain, regT2, regT2);
+ emitPutCellToCallFrameHeader(regT2, RegisterFile::ScopeChain);
+
+ // Also initialize ReturnPC and CodeBlock, like a JS function would.
+ preserveReturnAddressAfterCall(regT3);
+ emitPutToCallFrameHeader(regT3, RegisterFile::ReturnPC);
+ emitPutImmediateToCallFrameHeader(0, RegisterFile::CodeBlock);
+
+ storePtr(callFrameRegister, &m_globalData->topCallFrame);
+ restoreArgumentReference();
+ Call callConstructNotJSFunction = call();
+ emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, callFrameRegister);
+ restoreReturnAddressBeforeReturn(regT3);
+ ret();
+
+ // NativeCall Trampoline
+ Label nativeCallThunk = privateCompileCTINativeCall(globalData);
+ Label nativeConstructThunk = privateCompileCTINativeCall(globalData, true);
+
+ Call string_failureCases1Call = makeTailRecursiveCall(string_failureCases1);
+ Call string_failureCases2Call = makeTailRecursiveCall(string_failureCases2);
+ Call string_failureCases3Call = makeTailRecursiveCall(string_failureCases3);
+
+ // All trampolines constructed! copy the code, link up calls, and set the pointers on the Machine object.
+ LinkBuffer patchBuffer(*m_globalData, this);
+
+ patchBuffer.link(string_failureCases1Call, FunctionPtr(cti_op_get_by_id_string_fail));
+ patchBuffer.link(string_failureCases2Call, FunctionPtr(cti_op_get_by_id_string_fail));
+ patchBuffer.link(string_failureCases3Call, FunctionPtr(cti_op_get_by_id_string_fail));
+ patchBuffer.link(callLazyLinkCall, FunctionPtr(cti_vm_lazyLinkCall));
+ patchBuffer.link(callLazyLinkConstruct, FunctionPtr(cti_vm_lazyLinkConstruct));
+ patchBuffer.link(callCompileCall, FunctionPtr(cti_op_call_jitCompile));
+ patchBuffer.link(callCompileConstruct, FunctionPtr(cti_op_construct_jitCompile));
+ patchBuffer.link(callCallNotJSFunction, FunctionPtr(cti_op_call_NotJSFunction));
+ patchBuffer.link(callConstructNotJSFunction, FunctionPtr(cti_op_construct_NotJSConstruct));
+
+ CodeRef finalCode = patchBuffer.finalizeCode();
+ RefPtr<ExecutableMemoryHandle> executableMemory = finalCode.executableMemory();
+
+ trampolines->ctiVirtualCallLink = patchBuffer.trampolineAt(virtualCallLinkBegin);
+ trampolines->ctiVirtualConstructLink = patchBuffer.trampolineAt(virtualConstructLinkBegin);
+ trampolines->ctiVirtualCall = patchBuffer.trampolineAt(virtualCallBegin);
+ trampolines->ctiVirtualConstruct = patchBuffer.trampolineAt(virtualConstructBegin);
+ trampolines->ctiNativeCall = patchBuffer.trampolineAt(nativeCallThunk);
+ trampolines->ctiNativeConstruct = patchBuffer.trampolineAt(nativeConstructThunk);
+ trampolines->ctiStringLengthTrampoline = patchBuffer.trampolineAt(stringLengthBegin);
+
+#if ENABLE(JIT_USE_SOFT_MODULO)
+ trampolines->ctiSoftModulo = patchBuffer.trampolineAt(softModBegin);
+#endif
+
+ return executableMemory.release();
+}
+
+JIT::Label JIT::privateCompileCTINativeCall(JSGlobalData* globalData, bool isConstruct)
+{
+ int executableOffsetToFunction = isConstruct ? OBJECT_OFFSETOF(NativeExecutable, m_constructor) : OBJECT_OFFSETOF(NativeExecutable, m_function);
+
+ Label nativeCallThunk = align();
+
+ emitPutImmediateToCallFrameHeader(0, RegisterFile::CodeBlock);
+
+#if CPU(X86)
+ // Load caller frame's scope chain into this callframe so that whatever we call can
+ // get to its global data.
+ emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, regT0);
+ emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain, regT1, regT0);
+ emitPutCellToCallFrameHeader(regT1, RegisterFile::ScopeChain);
+
+ peek(regT1);
+ emitPutToCallFrameHeader(regT1, RegisterFile::ReturnPC);
+
+ // Calling convention: f(ecx, edx, ...);
+ // Host function signature: f(ExecState*);
+ move(callFrameRegister, X86Registers::ecx);
+
+ subPtr(TrustedImm32(16 - sizeof(void*)), stackPointerRegister); // Align stack after call.
+
+ // call the function
+ emitGetFromCallFrameHeaderPtr(RegisterFile::Callee, regT1);
+ loadPtr(Address(regT1, OBJECT_OFFSETOF(JSFunction, m_executable)), regT1);
+ move(regT0, callFrameRegister); // Eagerly restore caller frame register to avoid loading from stack.
+ call(Address(regT1, executableOffsetToFunction));
+
+ addPtr(TrustedImm32(16 - sizeof(void*)), stackPointerRegister);
+
+#elif CPU(ARM)
+ // Load caller frame's scope chain into this callframe so that whatever we call can
+ // get to its global data.
+ emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, regT2);
+ emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain, regT1, regT2);
+ emitPutCellToCallFrameHeader(regT1, RegisterFile::ScopeChain);
+
+ preserveReturnAddressAfterCall(regT3); // Callee preserved
+ emitPutToCallFrameHeader(regT3, RegisterFile::ReturnPC);
+
+ // Calling convention: f(r0 == regT0, r1 == regT1, ...);
+ // Host function signature: f(ExecState*);
+ move(callFrameRegister, ARMRegisters::r0);
+
+ // call the function
+ emitGetFromCallFrameHeaderPtr(RegisterFile::Callee, ARMRegisters::r1);
+ move(regT2, callFrameRegister); // Eagerly restore caller frame register to avoid loading from stack.
+ loadPtr(Address(ARMRegisters::r1, OBJECT_OFFSETOF(JSFunction, m_executable)), regT2);
+ call(Address(regT2, executableOffsetToFunction));
+
+ restoreReturnAddressBeforeReturn(regT3);
+#elif CPU(SH4)
+ // Load caller frame's scope chain into this callframe so that whatever we call can
+ // get to its global data.
+ emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, regT2);
+ emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain, regT1, regT2);
+ emitPutCellToCallFrameHeader(regT1, RegisterFile::ScopeChain);
+
+ preserveReturnAddressAfterCall(regT3); // Callee preserved
+ emitPutToCallFrameHeader(regT3, RegisterFile::ReturnPC);
+
+ // Calling convention: f(r0 == regT4, r1 == regT5, ...);
+ // Host function signature: f(ExecState*);
+ move(callFrameRegister, regT4);
+
+ emitGetFromCallFrameHeaderPtr(RegisterFile::Callee, regT5);
+ move(regT2, callFrameRegister); // Eagerly restore caller frame register to avoid loading from stack.
+ loadPtr(Address(regT5, OBJECT_OFFSETOF(JSFunction, m_executable)), regT2);
+
+ call(Address(regT2, executableOffsetToFunction), regT0);
+ restoreReturnAddressBeforeReturn(regT3);
+#elif CPU(MIPS)
+ // Load caller frame's scope chain into this callframe so that whatever we call can
+ // get to its global data.
+ emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, regT0);
+ emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain, regT1, regT0);
+ emitPutCellToCallFrameHeader(regT1, RegisterFile::ScopeChain);
+
+ preserveReturnAddressAfterCall(regT3); // Callee preserved
+ emitPutToCallFrameHeader(regT3, RegisterFile::ReturnPC);
+
+ // Calling convention: f(a0, a1, a2, a3);
+ // Host function signature: f(ExecState*);
+
+ // Allocate stack space for 16 bytes (8-byte aligned)
+ // 16 bytes (unused) for 4 arguments
+ subPtr(TrustedImm32(16), stackPointerRegister);
+
+ // Setup arg0
+ move(callFrameRegister, MIPSRegisters::a0);
+
+ // Call
+ emitGetFromCallFrameHeaderPtr(RegisterFile::Callee, MIPSRegisters::a2);
+ loadPtr(Address(MIPSRegisters::a2, OBJECT_OFFSETOF(JSFunction, m_executable)), regT2);
+ move(regT0, callFrameRegister); // Eagerly restore caller frame register to avoid loading from stack.
+ call(Address(regT2, executableOffsetToFunction));
+
+ // Restore stack space
+ addPtr(TrustedImm32(16), stackPointerRegister);
+
+ restoreReturnAddressBeforeReturn(regT3);
+
+#else
+#error "JIT not supported on this platform."
+ UNUSED_PARAM(executableOffsetToFunction);
+ breakpoint();
+#endif // CPU(X86)
+
+ // Check for an exception
+ Jump sawException = branch32(NotEqual, AbsoluteAddress(reinterpret_cast<char*>(&globalData->exception) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), TrustedImm32(JSValue::EmptyValueTag));
+
+ // Return.
+ ret();
+
+ // Handle an exception
+ sawException.link(this);
+
+ // Grab the return address.
+ preserveReturnAddressAfterCall(regT1);
+
+ move(TrustedImmPtr(&globalData->exceptionLocation), regT2);
+ storePtr(regT1, regT2);
+ poke(callFrameRegister, OBJECT_OFFSETOF(struct JITStackFrame, callFrame) / sizeof(void*));
+
+ // Set the return address.
+ move(TrustedImmPtr(FunctionPtr(ctiVMThrowTrampoline).value()), regT1);
+ restoreReturnAddressBeforeReturn(regT1);
+
+ ret();
+
+ return nativeCallThunk;
+}
+
+JIT::CodeRef JIT::privateCompileCTINativeCall(JSGlobalData* globalData, NativeFunction func)
+{
+ Call nativeCall;
+
+ emitPutImmediateToCallFrameHeader(0, RegisterFile::CodeBlock);
+
+#if CPU(X86)
+ // Load caller frame's scope chain into this callframe so that whatever we call can
+ // get to its global data.
+ emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, regT0);
+ emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain, regT1, regT0);
+ emitPutCellToCallFrameHeader(regT1, RegisterFile::ScopeChain);
+
+ peek(regT1);
+ emitPutToCallFrameHeader(regT1, RegisterFile::ReturnPC);
+
+ // Calling convention: f(ecx, edx, ...);
+ // Host function signature: f(ExecState*);
+ move(callFrameRegister, X86Registers::ecx);
+
+ subPtr(TrustedImm32(16 - sizeof(void*)), stackPointerRegister); // Align stack after call.
+
+ move(regT0, callFrameRegister); // Eagerly restore caller frame register to avoid loading from stack.
+
+ // call the function
+ nativeCall = call();
+
+ addPtr(TrustedImm32(16 - sizeof(void*)), stackPointerRegister);
+
+#elif CPU(ARM)
+ // Load caller frame's scope chain into this callframe so that whatever we call can
+ // get to its global data.
+ emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, regT2);
+ emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain, regT1, regT2);
+ emitPutCellToCallFrameHeader(regT1, RegisterFile::ScopeChain);
+
+ preserveReturnAddressAfterCall(regT3); // Callee preserved
+ emitPutToCallFrameHeader(regT3, RegisterFile::ReturnPC);
+
+ // Calling convention: f(r0 == regT0, r1 == regT1, ...);
+ // Host function signature: f(ExecState*);
+ move(callFrameRegister, ARMRegisters::r0);
+
+ emitGetFromCallFrameHeaderPtr(RegisterFile::Callee, ARMRegisters::r1);
+ move(regT2, callFrameRegister); // Eagerly restore caller frame register to avoid loading from stack.
+ loadPtr(Address(ARMRegisters::r1, OBJECT_OFFSETOF(JSFunction, m_executable)), regT2);
+
+ // call the function
+ nativeCall = call();
+
+ restoreReturnAddressBeforeReturn(regT3);
+
+#elif CPU(MIPS)
+ // Load caller frame's scope chain into this callframe so that whatever we call can
+ // get to its global data.
+ emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, regT0);
+ emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain, regT1, regT0);
+ emitPutCellToCallFrameHeader(regT1, RegisterFile::ScopeChain);
+
+ preserveReturnAddressAfterCall(regT3); // Callee preserved
+ emitPutToCallFrameHeader(regT3, RegisterFile::ReturnPC);
+
+ // Calling convention: f(a0, a1, a2, a3);
+ // Host function signature: f(ExecState*);
+
+ // Allocate stack space for 16 bytes (8-byte aligned)
+ // 16 bytes (unused) for 4 arguments
+ subPtr(TrustedImm32(16), stackPointerRegister);
+
+ // Setup arg0
+ move(callFrameRegister, MIPSRegisters::a0);
+
+ // Call
+ emitGetFromCallFrameHeaderPtr(RegisterFile::Callee, MIPSRegisters::a2);
+ loadPtr(Address(MIPSRegisters::a2, OBJECT_OFFSETOF(JSFunction, m_executable)), regT2);
+ move(regT0, callFrameRegister); // Eagerly restore caller frame register to avoid loading from stack.
+
+ // call the function
+ nativeCall = call();
+
+ // Restore stack space
+ addPtr(TrustedImm32(16), stackPointerRegister);
+
+ restoreReturnAddressBeforeReturn(regT3);
+#elif CPU(SH4)
+ // Load caller frame's scope chain into this callframe so that whatever we call can
+ // get to its global data.
+ emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, regT2);
+ emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain, regT1, regT2);
+ emitPutCellToCallFrameHeader(regT1, RegisterFile::ScopeChain);
+
+ preserveReturnAddressAfterCall(regT3); // Callee preserved
+ emitPutToCallFrameHeader(regT3, RegisterFile::ReturnPC);
+
+ // Calling convention: f(r0 == regT4, r1 == regT5, ...);
+ // Host function signature: f(ExecState*);
+ move(callFrameRegister, regT4);
+
+ emitGetFromCallFrameHeaderPtr(RegisterFile::Callee, regT5);
+ move(regT2, callFrameRegister); // Eagerly restore caller frame register to avoid loading from stack.
+ loadPtr(Address(regT5, OBJECT_OFFSETOF(JSFunction, m_executable)), regT2);
+
+ // call the function
+ nativeCall = call();
+
+ restoreReturnAddressBeforeReturn(regT3);
+#else
+#error "JIT not supported on this platform."
+ breakpoint();
+#endif // CPU(X86)
+
+ // Check for an exception
+ Jump sawException = branch32(NotEqual, AbsoluteAddress(reinterpret_cast<char*>(&globalData->exception) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), TrustedImm32(JSValue::EmptyValueTag));
+
+ // Return.
+ ret();
+
+ // Handle an exception
+ sawException.link(this);
+
+ // Grab the return address.
+ preserveReturnAddressAfterCall(regT1);
+
+ move(TrustedImmPtr(&globalData->exceptionLocation), regT2);
+ storePtr(regT1, regT2);
+ poke(callFrameRegister, OBJECT_OFFSETOF(struct JITStackFrame, callFrame) / sizeof(void*));
+
+ // Set the return address.
+ move(TrustedImmPtr(FunctionPtr(ctiVMThrowTrampoline).value()), regT1);
+ restoreReturnAddressBeforeReturn(regT1);
+
+ ret();
+
+ // All trampolines constructed! copy the code, link up calls, and set the pointers on the Machine object.
+ LinkBuffer patchBuffer(*m_globalData, this);
+
+ patchBuffer.link(nativeCall, FunctionPtr(func));
+ return patchBuffer.finalizeCode();
+}
+
+void JIT::emit_op_mov(Instruction* currentInstruction)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned src = currentInstruction[2].u.operand;
+
+ if (m_codeBlock->isConstantRegisterIndex(src))
+ emitStore(dst, getConstantOperand(src));
+ else {
+ emitLoad(src, regT1, regT0);
+ emitStore(dst, regT1, regT0);
+ map(m_bytecodeOffset + OPCODE_LENGTH(op_mov), dst, regT1, regT0);
+ }
+}
+
+void JIT::emit_op_end(Instruction* currentInstruction)
+{
+ ASSERT(returnValueRegister != callFrameRegister);
+ emitLoad(currentInstruction[1].u.operand, regT1, regT0);
+ restoreReturnAddressBeforeReturn(Address(callFrameRegister, RegisterFile::ReturnPC * static_cast<int>(sizeof(Register))));
+ ret();
+}
+
+void JIT::emit_op_jmp(Instruction* currentInstruction)
+{
+ unsigned target = currentInstruction[1].u.operand;
+ addJump(jump(), target);
+}
+
+void JIT::emit_op_new_object(Instruction* currentInstruction)
+{
+ emitAllocateJSFinalObject(ImmPtr(m_codeBlock->globalObject()->emptyObjectStructure()), regT0, regT1);
+
+ emitStoreCell(currentInstruction[1].u.operand, regT0);
+}
+
+void JIT::emitSlow_op_new_object(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ linkSlowCase(iter);
+ JITStubCall(this, cti_op_new_object).call(currentInstruction[1].u.operand);
+}
+
+void JIT::emit_op_check_has_instance(Instruction* currentInstruction)
+{
+ unsigned baseVal = currentInstruction[1].u.operand;
+
+ emitLoadPayload(baseVal, regT0);
+
+ // Check that baseVal is a cell.
+ emitJumpSlowCaseIfNotJSCell(baseVal);
+
+ // Check that baseVal 'ImplementsHasInstance'.
+ loadPtr(Address(regT0, JSCell::structureOffset()), regT0);
+ addSlowCase(branchTest8(Zero, Address(regT0, Structure::typeInfoFlagsOffset()), TrustedImm32(ImplementsHasInstance)));
+}
+
+void JIT::emit_op_instanceof(Instruction* currentInstruction)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned value = currentInstruction[2].u.operand;
+ unsigned baseVal = currentInstruction[3].u.operand;
+ unsigned proto = currentInstruction[4].u.operand;
+
+ // Load the operands into registers.
+ // We use regT0 for baseVal since we will be done with this first, and we can then use it for the result.
+ emitLoadPayload(value, regT2);
+ emitLoadPayload(baseVal, regT0);
+ emitLoadPayload(proto, regT1);
+
+ // Check that proto are cells. baseVal must be a cell - this is checked by op_check_has_instance.
+ emitJumpSlowCaseIfNotJSCell(value);
+ emitJumpSlowCaseIfNotJSCell(proto);
+
+ // Check that prototype is an object
+ loadPtr(Address(regT1, JSCell::structureOffset()), regT3);
+ addSlowCase(emitJumpIfNotObject(regT3));
+
+ // Fixme: this check is only needed because the JSC API allows HasInstance to be overridden; we should deprecate this.
+ // Check that baseVal 'ImplementsDefaultHasInstance'.
+ loadPtr(Address(regT0, JSCell::structureOffset()), regT0);
+ addSlowCase(branchTest8(Zero, Address(regT0, Structure::typeInfoFlagsOffset()), TrustedImm32(ImplementsDefaultHasInstance)));
+
+ // Optimistically load the result true, and start looping.
+ // Initially, regT1 still contains proto and regT2 still contains value.
+ // As we loop regT2 will be updated with its prototype, recursively walking the prototype chain.
+ move(TrustedImm32(1), regT0);
+ Label loop(this);
+
+ // Load the prototype of the cell in regT2. If this is equal to regT1 - WIN!
+ // Otherwise, check if we've hit null - if we have then drop out of the loop, if not go again.
+ loadPtr(Address(regT2, JSCell::structureOffset()), regT2);
+ load32(Address(regT2, Structure::prototypeOffset() + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT2);
+ Jump isInstance = branchPtr(Equal, regT2, regT1);
+ branchTest32(NonZero, regT2).linkTo(loop, this);
+
+ // We get here either by dropping out of the loop, or if value was not an Object. Result is false.
+ move(TrustedImm32(0), regT0);
+
+ // isInstance jumps right down to here, to skip setting the result to false (it has already set true).
+ isInstance.link(this);
+ emitStoreBool(dst, regT0);
+}
+
+void JIT::emitSlow_op_check_has_instance(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned baseVal = currentInstruction[1].u.operand;
+
+ linkSlowCaseIfNotJSCell(iter, baseVal);
+ linkSlowCase(iter);
+
+ JITStubCall stubCall(this, cti_op_check_has_instance);
+ stubCall.addArgument(baseVal);
+ stubCall.call();
+}
+
+void JIT::emitSlow_op_instanceof(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned value = currentInstruction[2].u.operand;
+ unsigned baseVal = currentInstruction[3].u.operand;
+ unsigned proto = currentInstruction[4].u.operand;
+
+ linkSlowCaseIfNotJSCell(iter, value);
+ linkSlowCaseIfNotJSCell(iter, proto);
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+
+ JITStubCall stubCall(this, cti_op_instanceof);
+ stubCall.addArgument(value);
+ stubCall.addArgument(baseVal);
+ stubCall.addArgument(proto);
+ stubCall.call(dst);
+}
+
+void JIT::emit_op_tear_off_activation(Instruction* currentInstruction)
+{
+ unsigned activation = currentInstruction[1].u.operand;
+ unsigned arguments = currentInstruction[2].u.operand;
+ Jump activationCreated = branch32(NotEqual, tagFor(activation), TrustedImm32(JSValue::EmptyValueTag));
+ Jump argumentsNotCreated = branch32(Equal, tagFor(arguments), TrustedImm32(JSValue::EmptyValueTag));
+ activationCreated.link(this);
+ JITStubCall stubCall(this, cti_op_tear_off_activation);
+ stubCall.addArgument(currentInstruction[1].u.operand);
+ stubCall.addArgument(unmodifiedArgumentsRegister(currentInstruction[2].u.operand));
+ stubCall.call();
+ argumentsNotCreated.link(this);
+}
+
+void JIT::emit_op_tear_off_arguments(Instruction* currentInstruction)
+{
+ int dst = currentInstruction[1].u.operand;
+
+ Jump argsNotCreated = branch32(Equal, tagFor(unmodifiedArgumentsRegister(dst)), TrustedImm32(JSValue::EmptyValueTag));
+ JITStubCall stubCall(this, cti_op_tear_off_arguments);
+ stubCall.addArgument(unmodifiedArgumentsRegister(dst));
+ stubCall.call();
+ argsNotCreated.link(this);
+}
+
+void JIT::emit_op_resolve(Instruction* currentInstruction)
+{
+ JITStubCall stubCall(this, cti_op_resolve);
+ stubCall.addArgument(TrustedImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
+ stubCall.callWithValueProfiling(currentInstruction[1].u.operand, FirstProfilingSite);
+}
+
+void JIT::emit_op_to_primitive(Instruction* currentInstruction)
+{
+ int dst = currentInstruction[1].u.operand;
+ int src = currentInstruction[2].u.operand;
+
+ emitLoad(src, regT1, regT0);
+
+ Jump isImm = branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag));
+ addSlowCase(branchPtr(NotEqual, Address(regT0, JSCell::classInfoOffset()), TrustedImmPtr(&JSString::s_info)));
+ isImm.link(this);
+
+ if (dst != src)
+ emitStore(dst, regT1, regT0);
+ map(m_bytecodeOffset + OPCODE_LENGTH(op_to_primitive), dst, regT1, regT0);
+}
+
+void JIT::emitSlow_op_to_primitive(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ int dst = currentInstruction[1].u.operand;
+
+ linkSlowCase(iter);
+
+ JITStubCall stubCall(this, cti_op_to_primitive);
+ stubCall.addArgument(regT1, regT0);
+ stubCall.call(dst);
+}
+
+void JIT::emit_op_strcat(Instruction* currentInstruction)
+{
+ JITStubCall stubCall(this, cti_op_strcat);
+ stubCall.addArgument(Imm32(currentInstruction[2].u.operand));
+ stubCall.addArgument(Imm32(currentInstruction[3].u.operand));
+ stubCall.call(currentInstruction[1].u.operand);
+}
+
+void JIT::emit_op_resolve_base(Instruction* currentInstruction)
+{
+ JITStubCall stubCall(this, currentInstruction[3].u.operand ? cti_op_resolve_base_strict_put : cti_op_resolve_base);
+ stubCall.addArgument(TrustedImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
+ stubCall.callWithValueProfiling(currentInstruction[1].u.operand, FirstProfilingSite);
+}
+
+void JIT::emit_op_ensure_property_exists(Instruction* currentInstruction)
+{
+ JITStubCall stubCall(this, cti_op_ensure_property_exists);
+ stubCall.addArgument(Imm32(currentInstruction[1].u.operand));
+ stubCall.addArgument(TrustedImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
+ stubCall.call(currentInstruction[1].u.operand);
+}
+
+void JIT::emit_op_resolve_skip(Instruction* currentInstruction)
+{
+ JITStubCall stubCall(this, cti_op_resolve_skip);
+ stubCall.addArgument(TrustedImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
+ stubCall.addArgument(Imm32(currentInstruction[3].u.operand));
+ stubCall.callWithValueProfiling(currentInstruction[1].u.operand, FirstProfilingSite);
+}
+
+void JIT::emit_op_resolve_global(Instruction* currentInstruction, bool dynamic)
+{
+ // FIXME: Optimize to use patching instead of so many memory accesses.
+
+ unsigned dst = currentInstruction[1].u.operand;
+ void* globalObject = m_codeBlock->globalObject();
+
+ unsigned currentIndex = m_globalResolveInfoIndex++;
+ GlobalResolveInfo* resolveInfoAddress = &m_codeBlock->globalResolveInfo(currentIndex);
+
+
+ // Verify structure.
+ move(TrustedImmPtr(globalObject), regT0);
+ move(TrustedImmPtr(resolveInfoAddress), regT3);
+ loadPtr(Address(regT3, OBJECT_OFFSETOF(GlobalResolveInfo, structure)), regT1);
+ addSlowCase(branchPtr(NotEqual, regT1, Address(regT0, JSCell::structureOffset())));
+
+ // Load property.
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSGlobalObject, m_propertyStorage)), regT2);
+ load32(Address(regT3, OBJECT_OFFSETOF(GlobalResolveInfo, offset)), regT3);
+ load32(BaseIndex(regT2, regT3, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT0); // payload
+ load32(BaseIndex(regT2, regT3, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag)), regT1); // tag
+ emitValueProfilingSite(FirstProfilingSite);
+ emitStore(dst, regT1, regT0);
+ map(m_bytecodeOffset + (dynamic ? OPCODE_LENGTH(op_resolve_global_dynamic) : OPCODE_LENGTH(op_resolve_global)), dst, regT1, regT0);
+}
+
+void JIT::emitSlow_op_resolve_global(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ Identifier* ident = &m_codeBlock->identifier(currentInstruction[2].u.operand);
+
+ unsigned currentIndex = m_globalResolveInfoIndex++;
+
+ linkSlowCase(iter);
+ JITStubCall stubCall(this, cti_op_resolve_global);
+ stubCall.addArgument(TrustedImmPtr(ident));
+ stubCall.addArgument(Imm32(currentIndex));
+ stubCall.callWithValueProfiling(dst, SubsequentProfilingSite);
+}
+
+void JIT::emit_op_not(Instruction* currentInstruction)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned src = currentInstruction[2].u.operand;
+
+ emitLoadTag(src, regT0);
+
+ emitLoad(src, regT1, regT0);
+ addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::BooleanTag)));
+ xor32(TrustedImm32(1), regT0);
+
+ emitStoreBool(dst, regT0, (dst == src));
+}
+
+void JIT::emitSlow_op_not(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned src = currentInstruction[2].u.operand;
+
+ linkSlowCase(iter);
+
+ JITStubCall stubCall(this, cti_op_not);
+ stubCall.addArgument(src);
+ stubCall.call(dst);
+}
+
+void JIT::emit_op_jfalse(Instruction* currentInstruction)
+{
+ unsigned cond = currentInstruction[1].u.operand;
+ unsigned target = currentInstruction[2].u.operand;
+
+ emitLoad(cond, regT1, regT0);
+
+ ASSERT((JSValue::BooleanTag + 1 == JSValue::Int32Tag) && !(JSValue::Int32Tag + 1));
+ addSlowCase(branch32(Below, regT1, TrustedImm32(JSValue::BooleanTag)));
+ addJump(branchTest32(Zero, regT0), target);
+}
+
+void JIT::emitSlow_op_jfalse(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned cond = currentInstruction[1].u.operand;
+ unsigned target = currentInstruction[2].u.operand;
+
+ linkSlowCase(iter);
+
+ if (supportsFloatingPoint()) {
+ // regT1 contains the tag from the hot path.
+ Jump notNumber = branch32(Above, regT1, Imm32(JSValue::LowestTag));
+
+ emitLoadDouble(cond, fpRegT0);
+ emitJumpSlowToHot(branchDoubleZeroOrNaN(fpRegT0, fpRegT1), target);
+ emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_jfalse));
+
+ notNumber.link(this);
+ }
+
+ JITStubCall stubCall(this, cti_op_jtrue);
+ stubCall.addArgument(cond);
+ stubCall.call();
+ emitJumpSlowToHot(branchTest32(Zero, regT0), target); // Inverted.
+}
+
+void JIT::emit_op_jtrue(Instruction* currentInstruction)
+{
+ unsigned cond = currentInstruction[1].u.operand;
+ unsigned target = currentInstruction[2].u.operand;
+
+ emitLoad(cond, regT1, regT0);
+
+ ASSERT((JSValue::BooleanTag + 1 == JSValue::Int32Tag) && !(JSValue::Int32Tag + 1));
+ addSlowCase(branch32(Below, regT1, TrustedImm32(JSValue::BooleanTag)));
+ addJump(branchTest32(NonZero, regT0), target);
+}
+
+void JIT::emitSlow_op_jtrue(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned cond = currentInstruction[1].u.operand;
+ unsigned target = currentInstruction[2].u.operand;
+
+ linkSlowCase(iter);
+
+ if (supportsFloatingPoint()) {
+ // regT1 contains the tag from the hot path.
+ Jump notNumber = branch32(Above, regT1, Imm32(JSValue::LowestTag));
+
+ emitLoadDouble(cond, fpRegT0);
+ emitJumpSlowToHot(branchDoubleNonZero(fpRegT0, fpRegT1), target);
+ emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_jtrue));
+
+ notNumber.link(this);
+ }
+
+ JITStubCall stubCall(this, cti_op_jtrue);
+ stubCall.addArgument(cond);
+ stubCall.call();
+ emitJumpSlowToHot(branchTest32(NonZero, regT0), target);
+}
+
+void JIT::emit_op_jeq_null(Instruction* currentInstruction)
+{
+ unsigned src = currentInstruction[1].u.operand;
+ unsigned target = currentInstruction[2].u.operand;
+
+ emitLoad(src, regT1, regT0);
+
+ Jump isImmediate = branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag));
+
+ // First, handle JSCell cases - check MasqueradesAsUndefined bit on the structure.
+ loadPtr(Address(regT0, JSCell::structureOffset()), regT2);
+ addJump(branchTest8(NonZero, Address(regT2, Structure::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined)), target);
+
+ Jump wasNotImmediate = jump();
+
+ // Now handle the immediate cases - undefined & null
+ isImmediate.link(this);
+
+ ASSERT((JSValue::UndefinedTag + 1 == JSValue::NullTag) && (JSValue::NullTag & 0x1));
+ or32(TrustedImm32(1), regT1);
+ addJump(branch32(Equal, regT1, TrustedImm32(JSValue::NullTag)), target);
+
+ wasNotImmediate.link(this);
+}
+
+void JIT::emit_op_jneq_null(Instruction* currentInstruction)
+{
+ unsigned src = currentInstruction[1].u.operand;
+ unsigned target = currentInstruction[2].u.operand;
+
+ emitLoad(src, regT1, regT0);
+
+ Jump isImmediate = branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag));
+
+ // First, handle JSCell cases - check MasqueradesAsUndefined bit on the structure.
+ loadPtr(Address(regT0, JSCell::structureOffset()), regT2);
+ addJump(branchTest8(Zero, Address(regT2, Structure::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined)), target);
+
+ Jump wasNotImmediate = jump();
+
+ // Now handle the immediate cases - undefined & null
+ isImmediate.link(this);
+
+ ASSERT((JSValue::UndefinedTag + 1 == JSValue::NullTag) && (JSValue::NullTag & 0x1));
+ or32(TrustedImm32(1), regT1);
+ addJump(branch32(NotEqual, regT1, TrustedImm32(JSValue::NullTag)), target);
+
+ wasNotImmediate.link(this);
+}
+
+void JIT::emit_op_jneq_ptr(Instruction* currentInstruction)
+{
+ unsigned src = currentInstruction[1].u.operand;
+ JSCell* ptr = currentInstruction[2].u.jsCell.get();
+ unsigned target = currentInstruction[3].u.operand;
+
+ emitLoad(src, regT1, regT0);
+ addJump(branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag)), target);
+ addJump(branchPtr(NotEqual, regT0, TrustedImmPtr(ptr)), target);
+}
+
+void JIT::emit_op_jsr(Instruction* currentInstruction)
+{
+ int retAddrDst = currentInstruction[1].u.operand;
+ int target = currentInstruction[2].u.operand;
+ DataLabelPtr storeLocation = storePtrWithPatch(TrustedImmPtr(0), Address(callFrameRegister, sizeof(Register) * retAddrDst));
+ addJump(jump(), target);
+ m_jsrSites.append(JSRInfo(storeLocation, label()));
+}
+
+void JIT::emit_op_sret(Instruction* currentInstruction)
+{
+ jump(Address(callFrameRegister, sizeof(Register) * currentInstruction[1].u.operand));
+}
+
+void JIT::emit_op_eq(Instruction* currentInstruction)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned src1 = currentInstruction[2].u.operand;
+ unsigned src2 = currentInstruction[3].u.operand;
+
+ emitLoad2(src1, regT1, regT0, src2, regT3, regT2);
+ addSlowCase(branch32(NotEqual, regT1, regT3));
+ addSlowCase(branch32(Equal, regT1, TrustedImm32(JSValue::CellTag)));
+ addSlowCase(branch32(Below, regT1, TrustedImm32(JSValue::LowestTag)));
+
+ compare32(Equal, regT0, regT2, regT0);
+
+ emitStoreBool(dst, regT0);
+}
+
+void JIT::emitSlow_op_eq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+
+ JumpList storeResult;
+ JumpList genericCase;
+
+ genericCase.append(getSlowCase(iter)); // tags not equal
+
+ linkSlowCase(iter); // tags equal and JSCell
+ genericCase.append(branchPtr(NotEqual, Address(regT0, JSCell::classInfoOffset()), TrustedImmPtr(&JSString::s_info)));
+ genericCase.append(branchPtr(NotEqual, Address(regT2, JSCell::classInfoOffset()), TrustedImmPtr(&JSString::s_info)));
+
+ // String case.
+ JITStubCall stubCallEqStrings(this, cti_op_eq_strings);
+ stubCallEqStrings.addArgument(regT0);
+ stubCallEqStrings.addArgument(regT2);
+ stubCallEqStrings.call();
+ storeResult.append(jump());
+
+ // Generic case.
+ genericCase.append(getSlowCase(iter)); // doubles
+ genericCase.link(this);
+ JITStubCall stubCallEq(this, cti_op_eq);
+ stubCallEq.addArgument(op1);
+ stubCallEq.addArgument(op2);
+ stubCallEq.call(regT0);
+
+ storeResult.link(this);
+ emitStoreBool(dst, regT0);
+}
+
+void JIT::emit_op_neq(Instruction* currentInstruction)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned src1 = currentInstruction[2].u.operand;
+ unsigned src2 = currentInstruction[3].u.operand;
+
+ emitLoad2(src1, regT1, regT0, src2, regT3, regT2);
+ addSlowCase(branch32(NotEqual, regT1, regT3));
+ addSlowCase(branch32(Equal, regT1, TrustedImm32(JSValue::CellTag)));
+ addSlowCase(branch32(Below, regT1, TrustedImm32(JSValue::LowestTag)));
+
+ compare32(NotEqual, regT0, regT2, regT0);
+
+ emitStoreBool(dst, regT0);
+}
+
+void JIT::emitSlow_op_neq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+
+ JumpList storeResult;
+ JumpList genericCase;
+
+ genericCase.append(getSlowCase(iter)); // tags not equal
+
+ linkSlowCase(iter); // tags equal and JSCell
+ genericCase.append(branchPtr(NotEqual, Address(regT0, JSCell::classInfoOffset()), TrustedImmPtr(&JSString::s_info)));
+ genericCase.append(branchPtr(NotEqual, Address(regT2, JSCell::classInfoOffset()), TrustedImmPtr(&JSString::s_info)));
+
+ // String case.
+ JITStubCall stubCallEqStrings(this, cti_op_eq_strings);
+ stubCallEqStrings.addArgument(regT0);
+ stubCallEqStrings.addArgument(regT2);
+ stubCallEqStrings.call(regT0);
+ storeResult.append(jump());
+
+ // Generic case.
+ genericCase.append(getSlowCase(iter)); // doubles
+ genericCase.link(this);
+ JITStubCall stubCallEq(this, cti_op_eq);
+ stubCallEq.addArgument(regT1, regT0);
+ stubCallEq.addArgument(regT3, regT2);
+ stubCallEq.call(regT0);
+
+ storeResult.link(this);
+ xor32(TrustedImm32(0x1), regT0);
+ emitStoreBool(dst, regT0);
+}
+
+void JIT::compileOpStrictEq(Instruction* currentInstruction, CompileOpStrictEqType type)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned src1 = currentInstruction[2].u.operand;
+ unsigned src2 = currentInstruction[3].u.operand;
+
+ emitLoad2(src1, regT1, regT0, src2, regT3, regT2);
+
+ // Bail if the tags differ, or are double.
+ addSlowCase(branch32(NotEqual, regT1, regT3));
+ addSlowCase(branch32(Below, regT1, TrustedImm32(JSValue::LowestTag)));
+
+ // Jump to a slow case if both are strings.
+ Jump notCell = branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag));
+ Jump firstNotString = branchPtr(NotEqual, Address(regT0, JSCell::classInfoOffset()), TrustedImmPtr(&JSString::s_info));
+ addSlowCase(branchPtr(Equal, Address(regT2, JSCell::classInfoOffset()), TrustedImmPtr(&JSString::s_info)));
+ notCell.link(this);
+ firstNotString.link(this);
+
+ // Simply compare the payloads.
+ if (type == OpStrictEq)
+ compare32(Equal, regT0, regT2, regT0);
+ else
+ compare32(NotEqual, regT0, regT2, regT0);
+
+ emitStoreBool(dst, regT0);
+}
+
+void JIT::emit_op_stricteq(Instruction* currentInstruction)
+{
+ compileOpStrictEq(currentInstruction, OpStrictEq);
+}
+
+void JIT::emitSlow_op_stricteq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned src1 = currentInstruction[2].u.operand;
+ unsigned src2 = currentInstruction[3].u.operand;
+
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+
+ JITStubCall stubCall(this, cti_op_stricteq);
+ stubCall.addArgument(src1);
+ stubCall.addArgument(src2);
+ stubCall.call(dst);
+}
+
+void JIT::emit_op_nstricteq(Instruction* currentInstruction)
+{
+ compileOpStrictEq(currentInstruction, OpNStrictEq);
+}
+
+void JIT::emitSlow_op_nstricteq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned src1 = currentInstruction[2].u.operand;
+ unsigned src2 = currentInstruction[3].u.operand;
+
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+
+ JITStubCall stubCall(this, cti_op_nstricteq);
+ stubCall.addArgument(src1);
+ stubCall.addArgument(src2);
+ stubCall.call(dst);
+}
+
+void JIT::emit_op_eq_null(Instruction* currentInstruction)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned src = currentInstruction[2].u.operand;
+
+ emitLoad(src, regT1, regT0);
+ Jump isImmediate = branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag));
+
+ loadPtr(Address(regT0, JSCell::structureOffset()), regT1);
+ test8(NonZero, Address(regT1, Structure::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined), regT1);
+
+ Jump wasNotImmediate = jump();
+
+ isImmediate.link(this);
+
+ compare32(Equal, regT1, TrustedImm32(JSValue::NullTag), regT2);
+ compare32(Equal, regT1, TrustedImm32(JSValue::UndefinedTag), regT1);
+ or32(regT2, regT1);
+
+ wasNotImmediate.link(this);
+
+ emitStoreBool(dst, regT1);
+}
+
+void JIT::emit_op_neq_null(Instruction* currentInstruction)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned src = currentInstruction[2].u.operand;
+
+ emitLoad(src, regT1, regT0);
+ Jump isImmediate = branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag));
+
+ loadPtr(Address(regT0, JSCell::structureOffset()), regT1);
+ test8(Zero, Address(regT1, Structure::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined), regT1);
+
+ Jump wasNotImmediate = jump();
+
+ isImmediate.link(this);
+
+ compare32(NotEqual, regT1, TrustedImm32(JSValue::NullTag), regT2);
+ compare32(NotEqual, regT1, TrustedImm32(JSValue::UndefinedTag), regT1);
+ and32(regT2, regT1);
+
+ wasNotImmediate.link(this);
+
+ emitStoreBool(dst, regT1);
+}
+
+void JIT::emit_op_resolve_with_base(Instruction* currentInstruction)
+{
+ JITStubCall stubCall(this, cti_op_resolve_with_base);
+ stubCall.addArgument(TrustedImmPtr(&m_codeBlock->identifier(currentInstruction[3].u.operand)));
+ stubCall.addArgument(Imm32(currentInstruction[1].u.operand));
+ stubCall.callWithValueProfiling(currentInstruction[2].u.operand, FirstProfilingSite);
+}
+
+void JIT::emit_op_resolve_with_this(Instruction* currentInstruction)
+{
+ JITStubCall stubCall(this, cti_op_resolve_with_this);
+ stubCall.addArgument(TrustedImmPtr(&m_codeBlock->identifier(currentInstruction[3].u.operand)));
+ stubCall.addArgument(Imm32(currentInstruction[1].u.operand));
+ stubCall.callWithValueProfiling(currentInstruction[2].u.operand, FirstProfilingSite);
+}
+
+void JIT::emit_op_throw(Instruction* currentInstruction)
+{
+ unsigned exception = currentInstruction[1].u.operand;
+ JITStubCall stubCall(this, cti_op_throw);
+ stubCall.addArgument(exception);
+ stubCall.call();
+
+#ifndef NDEBUG
+ // cti_op_throw always changes it's return address,
+ // this point in the code should never be reached.
+ breakpoint();
+#endif
+}
+
+void JIT::emit_op_get_pnames(Instruction* currentInstruction)
+{
+ int dst = currentInstruction[1].u.operand;
+ int base = currentInstruction[2].u.operand;
+ int i = currentInstruction[3].u.operand;
+ int size = currentInstruction[4].u.operand;
+ int breakTarget = currentInstruction[5].u.operand;
+
+ JumpList isNotObject;
+
+ emitLoad(base, regT1, regT0);
+ if (!m_codeBlock->isKnownNotImmediate(base))
+ isNotObject.append(branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag)));
+ if (base != m_codeBlock->thisRegister() || m_codeBlock->isStrictMode()) {
+ loadPtr(Address(regT0, JSCell::structureOffset()), regT2);
+ isNotObject.append(emitJumpIfNotObject(regT2));
+ }
+
+ // We could inline the case where you have a valid cache, but
+ // this call doesn't seem to be hot.
+ Label isObject(this);
+ JITStubCall getPnamesStubCall(this, cti_op_get_pnames);
+ getPnamesStubCall.addArgument(regT0);
+ getPnamesStubCall.call(dst);
+ load32(Address(regT0, OBJECT_OFFSETOF(JSPropertyNameIterator, m_jsStringsSize)), regT3);
+ store32(TrustedImm32(Int32Tag), intTagFor(i));
+ store32(TrustedImm32(0), intPayloadFor(i));
+ store32(TrustedImm32(Int32Tag), intTagFor(size));
+ store32(regT3, payloadFor(size));
+ Jump end = jump();
+
+ isNotObject.link(this);
+ addJump(branch32(Equal, regT1, TrustedImm32(JSValue::NullTag)), breakTarget);
+ addJump(branch32(Equal, regT1, TrustedImm32(JSValue::UndefinedTag)), breakTarget);
+ JITStubCall toObjectStubCall(this, cti_to_object);
+ toObjectStubCall.addArgument(regT1, regT0);
+ toObjectStubCall.call(base);
+ jump().linkTo(isObject, this);
+
+ end.link(this);
+}
+
+void JIT::emit_op_next_pname(Instruction* currentInstruction)
+{
+ int dst = currentInstruction[1].u.operand;
+ int base = currentInstruction[2].u.operand;
+ int i = currentInstruction[3].u.operand;
+ int size = currentInstruction[4].u.operand;
+ int it = currentInstruction[5].u.operand;
+ int target = currentInstruction[6].u.operand;
+
+ JumpList callHasProperty;
+
+ Label begin(this);
+ load32(intPayloadFor(i), regT0);
+ Jump end = branch32(Equal, regT0, intPayloadFor(size));
+
+ // Grab key @ i
+ loadPtr(payloadFor(it), regT1);
+ loadPtr(Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_jsStrings)), regT2);
+ load32(BaseIndex(regT2, regT0, TimesEight), regT2);
+ store32(TrustedImm32(JSValue::CellTag), tagFor(dst));
+ store32(regT2, payloadFor(dst));
+
+ // Increment i
+ add32(TrustedImm32(1), regT0);
+ store32(regT0, intPayloadFor(i));
+
+ // Verify that i is valid:
+ loadPtr(payloadFor(base), regT0);
+
+ // Test base's structure
+ loadPtr(Address(regT0, JSCell::structureOffset()), regT2);
+ callHasProperty.append(branchPtr(NotEqual, regT2, Address(Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_cachedStructure)))));
+
+ // Test base's prototype chain
+ loadPtr(Address(Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_cachedPrototypeChain))), regT3);
+ loadPtr(Address(regT3, OBJECT_OFFSETOF(StructureChain, m_vector)), regT3);
+ addJump(branchTestPtr(Zero, Address(regT3)), target);
+
+ Label checkPrototype(this);
+ callHasProperty.append(branch32(Equal, Address(regT2, Structure::prototypeOffset() + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), TrustedImm32(JSValue::NullTag)));
+ loadPtr(Address(regT2, Structure::prototypeOffset() + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT2);
+ loadPtr(Address(regT2, JSCell::structureOffset()), regT2);
+ callHasProperty.append(branchPtr(NotEqual, regT2, Address(regT3)));
+ addPtr(TrustedImm32(sizeof(Structure*)), regT3);
+ branchTestPtr(NonZero, Address(regT3)).linkTo(checkPrototype, this);
+
+ // Continue loop.
+ addJump(jump(), target);
+
+ // Slow case: Ask the object if i is valid.
+ callHasProperty.link(this);
+ loadPtr(addressFor(dst), regT1);
+ JITStubCall stubCall(this, cti_has_property);
+ stubCall.addArgument(regT0);
+ stubCall.addArgument(regT1);
+ stubCall.call();
+
+ // Test for valid key.
+ addJump(branchTest32(NonZero, regT0), target);
+ jump().linkTo(begin, this);
+
+ // End of loop.
+ end.link(this);
+}
+
+void JIT::emit_op_push_scope(Instruction* currentInstruction)
+{
+ JITStubCall stubCall(this, cti_op_push_scope);
+ stubCall.addArgument(currentInstruction[1].u.operand);
+ stubCall.call(currentInstruction[1].u.operand);
+}
+
+void JIT::emit_op_pop_scope(Instruction*)
+{
+ JITStubCall(this, cti_op_pop_scope).call();
+}
+
+void JIT::emit_op_to_jsnumber(Instruction* currentInstruction)
+{
+ int dst = currentInstruction[1].u.operand;
+ int src = currentInstruction[2].u.operand;
+
+ emitLoad(src, regT1, regT0);
+
+ Jump isInt32 = branch32(Equal, regT1, TrustedImm32(JSValue::Int32Tag));
+ addSlowCase(branch32(AboveOrEqual, regT1, TrustedImm32(JSValue::EmptyValueTag)));
+ isInt32.link(this);
+
+ if (src != dst)
+ emitStore(dst, regT1, regT0);
+ map(m_bytecodeOffset + OPCODE_LENGTH(op_to_jsnumber), dst, regT1, regT0);
+}
+
+void JIT::emitSlow_op_to_jsnumber(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ int dst = currentInstruction[1].u.operand;
+
+ linkSlowCase(iter);
+
+ JITStubCall stubCall(this, cti_op_to_jsnumber);
+ stubCall.addArgument(regT1, regT0);
+ stubCall.call(dst);
+}
+
+void JIT::emit_op_push_new_scope(Instruction* currentInstruction)
+{
+ JITStubCall stubCall(this, cti_op_push_new_scope);
+ stubCall.addArgument(TrustedImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
+ stubCall.addArgument(currentInstruction[3].u.operand);
+ stubCall.call(currentInstruction[1].u.operand);
+}
+
+void JIT::emit_op_catch(Instruction* currentInstruction)
+{
+ // cti_op_throw returns the callFrame for the handler.
+ move(regT0, callFrameRegister);
+
+ // Now store the exception returned by cti_op_throw.
+ loadPtr(Address(stackPointerRegister, OBJECT_OFFSETOF(struct JITStackFrame, globalData)), regT3);
+ load32(Address(regT3, OBJECT_OFFSETOF(JSGlobalData, exception) + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT0);
+ load32(Address(regT3, OBJECT_OFFSETOF(JSGlobalData, exception) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), regT1);
+ store32(TrustedImm32(JSValue().payload()), Address(regT3, OBJECT_OFFSETOF(JSGlobalData, exception) + OBJECT_OFFSETOF(JSValue, u.asBits.payload)));
+ store32(TrustedImm32(JSValue().tag()), Address(regT3, OBJECT_OFFSETOF(JSGlobalData, exception) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)));
+
+ unsigned exception = currentInstruction[1].u.operand;
+ emitStore(exception, regT1, regT0);
+ map(m_bytecodeOffset + OPCODE_LENGTH(op_catch), exception, regT1, regT0);
+}
+
+void JIT::emit_op_jmp_scopes(Instruction* currentInstruction)
+{
+ JITStubCall stubCall(this, cti_op_jmp_scopes);
+ stubCall.addArgument(Imm32(currentInstruction[1].u.operand));
+ stubCall.call();
+ addJump(jump(), currentInstruction[2].u.operand);
+}
+
+void JIT::emit_op_switch_imm(Instruction* currentInstruction)
+{
+ unsigned tableIndex = currentInstruction[1].u.operand;
+ unsigned defaultOffset = currentInstruction[2].u.operand;
+ unsigned scrutinee = currentInstruction[3].u.operand;
+
+ // create jump table for switch destinations, track this switch statement.
+ SimpleJumpTable* jumpTable = &m_codeBlock->immediateSwitchJumpTable(tableIndex);
+ m_switches.append(SwitchRecord(jumpTable, m_bytecodeOffset, defaultOffset, SwitchRecord::Immediate));
+ jumpTable->ctiOffsets.grow(jumpTable->branchOffsets.size());
+
+ JITStubCall stubCall(this, cti_op_switch_imm);
+ stubCall.addArgument(scrutinee);
+ stubCall.addArgument(Imm32(tableIndex));
+ stubCall.call();
+ jump(regT0);
+}
+
+void JIT::emit_op_switch_char(Instruction* currentInstruction)
+{
+ unsigned tableIndex = currentInstruction[1].u.operand;
+ unsigned defaultOffset = currentInstruction[2].u.operand;
+ unsigned scrutinee = currentInstruction[3].u.operand;
+
+ // create jump table for switch destinations, track this switch statement.
+ SimpleJumpTable* jumpTable = &m_codeBlock->characterSwitchJumpTable(tableIndex);
+ m_switches.append(SwitchRecord(jumpTable, m_bytecodeOffset, defaultOffset, SwitchRecord::Character));
+ jumpTable->ctiOffsets.grow(jumpTable->branchOffsets.size());
+
+ JITStubCall stubCall(this, cti_op_switch_char);
+ stubCall.addArgument(scrutinee);
+ stubCall.addArgument(Imm32(tableIndex));
+ stubCall.call();
+ jump(regT0);
+}
+
+void JIT::emit_op_switch_string(Instruction* currentInstruction)
+{
+ unsigned tableIndex = currentInstruction[1].u.operand;
+ unsigned defaultOffset = currentInstruction[2].u.operand;
+ unsigned scrutinee = currentInstruction[3].u.operand;
+
+ // create jump table for switch destinations, track this switch statement.
+ StringJumpTable* jumpTable = &m_codeBlock->stringSwitchJumpTable(tableIndex);
+ m_switches.append(SwitchRecord(jumpTable, m_bytecodeOffset, defaultOffset));
+
+ JITStubCall stubCall(this, cti_op_switch_string);
+ stubCall.addArgument(scrutinee);
+ stubCall.addArgument(Imm32(tableIndex));
+ stubCall.call();
+ jump(regT0);
+}
+
+void JIT::emit_op_throw_reference_error(Instruction* currentInstruction)
+{
+ unsigned message = currentInstruction[1].u.operand;
+
+ JITStubCall stubCall(this, cti_op_throw_reference_error);
+ stubCall.addArgument(m_codeBlock->getConstant(message));
+ stubCall.call();
+}
+
+void JIT::emit_op_debug(Instruction* currentInstruction)
+{
+#if ENABLE(DEBUG_WITH_BREAKPOINT)
+ UNUSED_PARAM(currentInstruction);
+ breakpoint();
+#else
+ JITStubCall stubCall(this, cti_op_debug);
+ stubCall.addArgument(Imm32(currentInstruction[1].u.operand));
+ stubCall.addArgument(Imm32(currentInstruction[2].u.operand));
+ stubCall.addArgument(Imm32(currentInstruction[3].u.operand));
+ stubCall.call();
+#endif
+}
+
+
+void JIT::emit_op_enter(Instruction*)
+{
+ // Even though JIT code doesn't use them, we initialize our constant
+ // registers to zap stale pointers, to avoid unnecessarily prolonging
+ // object lifetime and increasing GC pressure.
+ for (int i = 0; i < m_codeBlock->m_numVars; ++i)
+ emitStore(i, jsUndefined());
+}
+
+void JIT::emit_op_create_activation(Instruction* currentInstruction)
+{
+ unsigned activation = currentInstruction[1].u.operand;
+
+ Jump activationCreated = branch32(NotEqual, tagFor(activation), TrustedImm32(JSValue::EmptyValueTag));
+ JITStubCall(this, cti_op_push_activation).call(activation);
+ activationCreated.link(this);
+}
+
+void JIT::emit_op_create_arguments(Instruction* currentInstruction)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+
+ Jump argsCreated = branch32(NotEqual, tagFor(dst), TrustedImm32(JSValue::EmptyValueTag));
+
+ JITStubCall(this, cti_op_create_arguments).call();
+ emitStore(dst, regT1, regT0);
+ emitStore(unmodifiedArgumentsRegister(dst), regT1, regT0);
+
+ argsCreated.link(this);
+}
+
+void JIT::emit_op_init_lazy_reg(Instruction* currentInstruction)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+
+ emitStore(dst, JSValue());
+}
+
+void JIT::emit_op_get_callee(Instruction* currentInstruction)
+{
+ int dst = currentInstruction[1].u.operand;
+ emitGetFromCallFrameHeaderPtr(RegisterFile::Callee, regT0);
+ emitStoreCell(dst, regT0);
+}
+
+void JIT::emit_op_create_this(Instruction* currentInstruction)
+{
+ emitLoad(currentInstruction[2].u.operand, regT1, regT0);
+ emitJumpSlowCaseIfNotJSCell(currentInstruction[2].u.operand, regT1);
+ loadPtr(Address(regT0, JSCell::structureOffset()), regT1);
+ addSlowCase(emitJumpIfNotObject(regT1));
+
+ // now we know that the prototype is an object, but we don't know if it's got an
+ // inheritor ID
+
+ loadPtr(Address(regT0, JSObject::offsetOfInheritorID()), regT2);
+ addSlowCase(branchTestPtr(Zero, regT2));
+
+ // now regT2 contains the inheritorID, which is the structure that the newly
+ // allocated object will have.
+
+ emitAllocateJSFinalObject(regT2, regT0, regT1);
+
+ emitStoreCell(currentInstruction[1].u.operand, regT0);
+}
+
+void JIT::emitSlow_op_create_this(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ linkSlowCaseIfNotJSCell(iter, currentInstruction[2].u.operand); // not a cell
+ linkSlowCase(iter); // not an object
+ linkSlowCase(iter); // doesn't have an inheritor ID
+ linkSlowCase(iter); // allocation failed
+ unsigned protoRegister = currentInstruction[2].u.operand;
+ emitLoad(protoRegister, regT1, regT0);
+ JITStubCall stubCall(this, cti_op_create_this);
+ stubCall.addArgument(regT1, regT0);
+ stubCall.call(currentInstruction[1].u.operand);
+}
+
+void JIT::emit_op_convert_this(Instruction* currentInstruction)
+{
+ unsigned thisRegister = currentInstruction[1].u.operand;
+
+ emitLoad(thisRegister, regT1, regT0);
+
+ addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag)));
+ addSlowCase(branchPtr(Equal, Address(regT0, JSCell::classInfoOffset()), TrustedImmPtr(&JSString::s_info)));
+
+ map(m_bytecodeOffset + OPCODE_LENGTH(op_convert_this), thisRegister, regT1, regT0);
+}
+
+void JIT::emitSlow_op_convert_this(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ void* globalThis = m_codeBlock->globalObject()->globalScopeChain()->globalThis.get();
+ unsigned thisRegister = currentInstruction[1].u.operand;
+
+ linkSlowCase(iter);
+ Jump isNotUndefined = branch32(NotEqual, regT1, TrustedImm32(JSValue::UndefinedTag));
+ move(TrustedImmPtr(globalThis), regT0);
+ move(TrustedImm32(JSValue::CellTag), regT1);
+ emitStore(thisRegister, regT1, regT0);
+ emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_convert_this));
+
+ isNotUndefined.link(this);
+ linkSlowCase(iter);
+ JITStubCall stubCall(this, cti_op_convert_this);
+ stubCall.addArgument(regT1, regT0);
+ stubCall.call(thisRegister);
+}
+
+void JIT::emit_op_profile_will_call(Instruction* currentInstruction)
+{
+ peek(regT2, OBJECT_OFFSETOF(JITStackFrame, enabledProfilerReference) / sizeof(void*));
+ Jump noProfiler = branchTestPtr(Zero, Address(regT2));
+
+ JITStubCall stubCall(this, cti_op_profile_will_call);
+ stubCall.addArgument(currentInstruction[1].u.operand);
+ stubCall.call();
+ noProfiler.link(this);
+}
+
+void JIT::emit_op_profile_did_call(Instruction* currentInstruction)
+{
+ peek(regT2, OBJECT_OFFSETOF(JITStackFrame, enabledProfilerReference) / sizeof(void*));
+ Jump noProfiler = branchTestPtr(Zero, Address(regT2));
+
+ JITStubCall stubCall(this, cti_op_profile_did_call);
+ stubCall.addArgument(currentInstruction[1].u.operand);
+ stubCall.call();
+ noProfiler.link(this);
+}
+
+void JIT::emit_op_get_arguments_length(Instruction* currentInstruction)
+{
+ int dst = currentInstruction[1].u.operand;
+ int argumentsRegister = currentInstruction[2].u.operand;
+ addSlowCase(branch32(NotEqual, tagFor(argumentsRegister), TrustedImm32(JSValue::EmptyValueTag)));
+ load32(payloadFor(RegisterFile::ArgumentCount), regT0);
+ sub32(TrustedImm32(1), regT0);
+ emitStoreInt32(dst, regT0);
+}
+
+void JIT::emitSlow_op_get_arguments_length(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ linkSlowCase(iter);
+ int dst = currentInstruction[1].u.operand;
+ int base = currentInstruction[2].u.operand;
+ int ident = currentInstruction[3].u.operand;
+
+ JITStubCall stubCall(this, cti_op_get_by_id_generic);
+ stubCall.addArgument(base);
+ stubCall.addArgument(TrustedImmPtr(&(m_codeBlock->identifier(ident))));
+ stubCall.call(dst);
+}
+
+void JIT::emit_op_get_argument_by_val(Instruction* currentInstruction)
+{
+ int dst = currentInstruction[1].u.operand;
+ int argumentsRegister = currentInstruction[2].u.operand;
+ int property = currentInstruction[3].u.operand;
+ addSlowCase(branch32(NotEqual, tagFor(argumentsRegister), TrustedImm32(JSValue::EmptyValueTag)));
+ emitLoad(property, regT1, regT2);
+ addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
+ add32(TrustedImm32(1), regT2);
+ // regT2 now contains the integer index of the argument we want, including this
+ load32(payloadFor(RegisterFile::ArgumentCount), regT3);
+ addSlowCase(branch32(AboveOrEqual, regT2, regT3));
+
+ neg32(regT2);
+ loadPtr(BaseIndex(callFrameRegister, regT2, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.payload) + CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register))), regT0);
+ loadPtr(BaseIndex(callFrameRegister, regT2, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag) + CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register))), regT1);
+ emitStore(dst, regT1, regT0);
+}
+
+void JIT::emitSlow_op_get_argument_by_val(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned arguments = currentInstruction[2].u.operand;
+ unsigned property = currentInstruction[3].u.operand;
+
+ linkSlowCase(iter);
+ Jump skipArgumentsCreation = jump();
+
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+ JITStubCall(this, cti_op_create_arguments).call();
+ emitStore(arguments, regT1, regT0);
+ emitStore(unmodifiedArgumentsRegister(arguments), regT1, regT0);
+
+ skipArgumentsCreation.link(this);
+ JITStubCall stubCall(this, cti_op_get_by_val);
+ stubCall.addArgument(arguments);
+ stubCall.addArgument(property);
+ stubCall.call(dst);
+}
+
+#if ENABLE(JIT_USE_SOFT_MODULO)
+void JIT::softModulo()
+{
+ move(regT2, regT3);
+ move(regT0, regT2);
+ move(TrustedImm32(0), regT1);
+ JumpList exitBranch;
+
+ // Check for negative result reminder
+ Jump positiveRegT3 = branch32(GreaterThanOrEqual, regT3, TrustedImm32(0));
+ neg32(regT3);
+ xor32(TrustedImm32(1), regT1);
+ positiveRegT3.link(this);
+
+ Jump positiveRegT2 = branch32(GreaterThanOrEqual, regT2, TrustedImm32(0));
+ neg32(regT2);
+ xor32(TrustedImm32(2), regT1);
+ positiveRegT2.link(this);
+
+ // Save the condition for negative reminder
+ push(regT1);
+
+ exitBranch.append(branch32(LessThan, regT2, regT3));
+
+ // Power of two fast case
+ move(regT3, regT0);
+ sub32(TrustedImm32(1), regT0);
+ Jump notPowerOfTwo = branchTest32(NonZero, regT0, regT3);
+ and32(regT0, regT2);
+ exitBranch.append(jump());
+
+ notPowerOfTwo.link(this);
+
+#if CPU(X86) || CPU(X86_64)
+ move(regT2, regT0);
+ m_assembler.cdq();
+ m_assembler.idivl_r(regT3);
+ move(regT1, regT2);
+#elif CPU(MIPS)
+ m_assembler.div(regT2, regT3);
+ m_assembler.mfhi(regT2);
+#else
+ countLeadingZeros32(regT2, regT0);
+ countLeadingZeros32(regT3, regT1);
+ sub32(regT0, regT1);
+
+ Jump useFullTable = branch32(Equal, regT1, TrustedImm32(31));
+
+ neg32(regT1);
+ add32(TrustedImm32(31), regT1);
+
+ int elementSizeByShift = -1;
+#if CPU(ARM)
+ elementSizeByShift = 3;
+#else
+#error "JIT_USE_SOFT_MODULO not yet supported on this platform."
+#endif
+ relativeTableJump(regT1, elementSizeByShift);
+
+ useFullTable.link(this);
+ // Modulo table
+ for (int i = 31; i > 0; --i) {
+#if CPU(ARM_TRADITIONAL)
+ m_assembler.cmp_r(regT2, m_assembler.lsl(regT3, i));
+ m_assembler.sub_r(regT2, regT2, m_assembler.lsl(regT3, i), ARMAssembler::CS);
+#elif CPU(ARM_THUMB2)
+ ShiftTypeAndAmount shift(SRType_LSL, i);
+ m_assembler.sub_S(regT1, regT2, regT3, shift);
+ m_assembler.it(ARMv7Assembler::ConditionCS);
+ m_assembler.mov(regT2, regT1);
+#else
+#error "JIT_USE_SOFT_MODULO not yet supported on this platform."
+#endif
+ }
+
+ Jump lower = branch32(Below, regT2, regT3);
+ sub32(regT3, regT2);
+ lower.link(this);
+#endif
+
+ exitBranch.link(this);
+
+ // Check for negative reminder
+ pop(regT1);
+ Jump positiveResult = branch32(Equal, regT1, TrustedImm32(0));
+ neg32(regT2);
+ positiveResult.link(this);
+
+ move(regT2, regT0);
+ ret();
+}
+#endif // ENABLE(JIT_USE_SOFT_MODULO)
+
+} // namespace JSC
+
+#endif // USE(JSVALUE32_64)
+#endif // ENABLE(JIT)
diff --git a/Source/JavaScriptCore/jit/JITPropertyAccess.cpp b/Source/JavaScriptCore/jit/JITPropertyAccess.cpp
new file mode 100644
index 000000000..48951e879
--- /dev/null
+++ b/Source/JavaScriptCore/jit/JITPropertyAccess.cpp
@@ -0,0 +1,1179 @@
+/*
+ * Copyright (C) 2008, 2009 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+
+#if ENABLE(JIT)
+#include "JIT.h"
+
+#include "CodeBlock.h"
+#include "GetterSetter.h"
+#include "JITInlineMethods.h"
+#include "JITStubCall.h"
+#include "JSArray.h"
+#include "JSFunction.h"
+#include "JSPropertyNameIterator.h"
+#include "Interpreter.h"
+#include "LinkBuffer.h"
+#include "RepatchBuffer.h"
+#include "ResultType.h"
+#include "SamplingTool.h"
+
+#ifndef NDEBUG
+#include <stdio.h>
+#endif
+
+using namespace std;
+
+namespace JSC {
+#if USE(JSVALUE64)
+
+JIT::CodeRef JIT::stringGetByValStubGenerator(JSGlobalData* globalData)
+{
+ JSInterfaceJIT jit;
+ JumpList failures;
+ failures.append(jit.branchPtr(NotEqual, Address(regT0, JSCell::classInfoOffset()), TrustedImmPtr(&JSString::s_info)));
+
+ // Load string length to regT2, and start the process of loading the data pointer into regT0
+ jit.load32(Address(regT0, ThunkHelpers::jsStringLengthOffset()), regT2);
+ jit.loadPtr(Address(regT0, ThunkHelpers::jsStringValueOffset()), regT0);
+ failures.append(jit.branchTest32(Zero, regT0));
+
+ // Do an unsigned compare to simultaneously filter negative indices as well as indices that are too large
+ failures.append(jit.branch32(AboveOrEqual, regT1, regT2));
+
+ // Load the character
+ JumpList is16Bit;
+ JumpList cont8Bit;
+ // Load the string flags
+ jit.loadPtr(Address(regT0, ThunkHelpers::stringImplFlagsOffset()), regT2);
+ jit.loadPtr(Address(regT0, ThunkHelpers::stringImplDataOffset()), regT0);
+ is16Bit.append(jit.branchTest32(Zero, regT2, TrustedImm32(ThunkHelpers::stringImpl8BitFlag())));
+ jit.load8(BaseIndex(regT0, regT1, TimesOne, 0), regT0);
+ cont8Bit.append(jit.jump());
+ is16Bit.link(&jit);
+ jit.load16(BaseIndex(regT0, regT1, TimesTwo, 0), regT0);
+ cont8Bit.link(&jit);
+
+ failures.append(jit.branch32(AboveOrEqual, regT0, TrustedImm32(0x100)));
+ jit.move(TrustedImmPtr(globalData->smallStrings.singleCharacterStrings()), regT1);
+ jit.loadPtr(BaseIndex(regT1, regT0, ScalePtr, 0), regT0);
+ jit.ret();
+
+ failures.link(&jit);
+ jit.move(TrustedImm32(0), regT0);
+ jit.ret();
+
+ LinkBuffer patchBuffer(*globalData, &jit);
+ return patchBuffer.finalizeCode();
+}
+
+void JIT::emit_op_get_by_val(Instruction* currentInstruction)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned base = currentInstruction[2].u.operand;
+ unsigned property = currentInstruction[3].u.operand;
+
+ emitGetVirtualRegisters(base, regT0, property, regT1);
+ emitJumpSlowCaseIfNotImmediateInteger(regT1);
+
+ // This is technically incorrect - we're zero-extending an int32. On the hot path this doesn't matter.
+ // We check the value as if it was a uint32 against the m_vectorLength - which will always fail if
+ // number was signed since m_vectorLength is always less than intmax (since the total allocation
+ // size is always less than 4Gb). As such zero extending wil have been correct (and extending the value
+ // to 64-bits is necessary since it's used in the address calculation. We zero extend rather than sign
+ // extending since it makes it easier to re-tag the value in the slow case.
+ zeroExtend32ToPtr(regT1, regT1);
+
+ emitJumpSlowCaseIfNotJSCell(regT0, base);
+ addSlowCase(branchPtr(NotEqual, Address(regT0, JSCell::classInfoOffset()), TrustedImmPtr(&JSArray::s_info)));
+
+ loadPtr(Address(regT0, JSArray::storageOffset()), regT2);
+ addSlowCase(branch32(AboveOrEqual, regT1, Address(regT0, JSArray::vectorLengthOffset())));
+
+ loadPtr(BaseIndex(regT2, regT1, ScalePtr, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])), regT0);
+ addSlowCase(branchTestPtr(Zero, regT0));
+
+ emitValueProfilingSite(FirstProfilingSite);
+ emitPutVirtualRegister(dst);
+}
+
+void JIT::emitSlow_op_get_by_val(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned base = currentInstruction[2].u.operand;
+ unsigned property = currentInstruction[3].u.operand;
+
+ linkSlowCase(iter); // property int32 check
+ linkSlowCaseIfNotJSCell(iter, base); // base cell check
+ Jump nonCell = jump();
+ linkSlowCase(iter); // base array check
+ Jump notString = branchPtr(NotEqual, Address(regT0, JSCell::classInfoOffset()), TrustedImmPtr(&JSString::s_info));
+ emitNakedCall(CodeLocationLabel(m_globalData->getCTIStub(stringGetByValStubGenerator).code()));
+ Jump failed = branchTestPtr(Zero, regT0);
+ emitPutVirtualRegister(dst, regT0);
+ emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_get_by_val));
+ failed.link(this);
+ notString.link(this);
+ nonCell.link(this);
+
+ linkSlowCase(iter); // vector length check
+ linkSlowCase(iter); // empty value
+
+ JITStubCall stubCall(this, cti_op_get_by_val);
+ stubCall.addArgument(base, regT2);
+ stubCall.addArgument(property, regT2);
+ stubCall.call(dst);
+
+ emitValueProfilingSite(SubsequentProfilingSite);
+}
+
+void JIT::compileGetDirectOffset(RegisterID base, RegisterID result, RegisterID offset, RegisterID scratch)
+{
+ loadPtr(Address(base, JSObject::offsetOfPropertyStorage()), scratch);
+ loadPtr(BaseIndex(scratch, offset, ScalePtr, 0), result);
+}
+
+void JIT::emit_op_get_by_pname(Instruction* currentInstruction)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned base = currentInstruction[2].u.operand;
+ unsigned property = currentInstruction[3].u.operand;
+ unsigned expected = currentInstruction[4].u.operand;
+ unsigned iter = currentInstruction[5].u.operand;
+ unsigned i = currentInstruction[6].u.operand;
+
+ emitGetVirtualRegister(property, regT0);
+ addSlowCase(branchPtr(NotEqual, regT0, addressFor(expected)));
+ emitGetVirtualRegisters(base, regT0, iter, regT1);
+ emitJumpSlowCaseIfNotJSCell(regT0, base);
+
+ // Test base's structure
+ loadPtr(Address(regT0, JSCell::structureOffset()), regT2);
+ addSlowCase(branchPtr(NotEqual, regT2, Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_cachedStructure))));
+ load32(addressFor(i), regT3);
+ sub32(TrustedImm32(1), regT3);
+ addSlowCase(branch32(AboveOrEqual, regT3, Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_numCacheableSlots))));
+ compileGetDirectOffset(regT0, regT0, regT3, regT1);
+
+ emitPutVirtualRegister(dst, regT0);
+}
+
+void JIT::emitSlow_op_get_by_pname(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned base = currentInstruction[2].u.operand;
+ unsigned property = currentInstruction[3].u.operand;
+
+ linkSlowCase(iter);
+ linkSlowCaseIfNotJSCell(iter, base);
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+
+ JITStubCall stubCall(this, cti_op_get_by_val);
+ stubCall.addArgument(base, regT2);
+ stubCall.addArgument(property, regT2);
+ stubCall.call(dst);
+}
+
+void JIT::emit_op_put_by_val(Instruction* currentInstruction)
+{
+ unsigned base = currentInstruction[1].u.operand;
+ unsigned property = currentInstruction[2].u.operand;
+ unsigned value = currentInstruction[3].u.operand;
+
+ emitGetVirtualRegisters(base, regT0, property, regT1);
+ emitJumpSlowCaseIfNotImmediateInteger(regT1);
+ // See comment in op_get_by_val.
+ zeroExtend32ToPtr(regT1, regT1);
+ emitJumpSlowCaseIfNotJSCell(regT0, base);
+ addSlowCase(branchPtr(NotEqual, Address(regT0, JSCell::classInfoOffset()), TrustedImmPtr(&JSArray::s_info)));
+ addSlowCase(branch32(AboveOrEqual, regT1, Address(regT0, JSArray::vectorLengthOffset())));
+
+ loadPtr(Address(regT0, JSArray::storageOffset()), regT2);
+ Jump empty = branchTestPtr(Zero, BaseIndex(regT2, regT1, ScalePtr, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])));
+
+ Label storeResult(this);
+ emitGetVirtualRegister(value, regT3);
+ storePtr(regT3, BaseIndex(regT2, regT1, ScalePtr, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])));
+ Jump end = jump();
+
+ empty.link(this);
+ add32(TrustedImm32(1), Address(regT2, OBJECT_OFFSETOF(ArrayStorage, m_numValuesInVector)));
+ branch32(Below, regT1, Address(regT2, OBJECT_OFFSETOF(ArrayStorage, m_length))).linkTo(storeResult, this);
+
+ add32(TrustedImm32(1), regT1);
+ store32(regT1, Address(regT2, OBJECT_OFFSETOF(ArrayStorage, m_length)));
+ sub32(TrustedImm32(1), regT1);
+ jump().linkTo(storeResult, this);
+
+ end.link(this);
+
+ emitWriteBarrier(regT0, regT3, regT1, regT3, ShouldFilterImmediates, WriteBarrierForPropertyAccess);
+}
+
+void JIT::emitSlow_op_put_by_val(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned base = currentInstruction[1].u.operand;
+ unsigned property = currentInstruction[2].u.operand;
+ unsigned value = currentInstruction[3].u.operand;
+
+ linkSlowCase(iter); // property int32 check
+ linkSlowCaseIfNotJSCell(iter, base); // base cell check
+ linkSlowCase(iter); // base not array check
+ linkSlowCase(iter); // in vector check
+
+ JITStubCall stubPutByValCall(this, cti_op_put_by_val);
+ stubPutByValCall.addArgument(regT0);
+ stubPutByValCall.addArgument(property, regT2);
+ stubPutByValCall.addArgument(value, regT2);
+ stubPutByValCall.call();
+}
+
+void JIT::emit_op_put_by_index(Instruction* currentInstruction)
+{
+ JITStubCall stubCall(this, cti_op_put_by_index);
+ stubCall.addArgument(currentInstruction[1].u.operand, regT2);
+ stubCall.addArgument(TrustedImm32(currentInstruction[2].u.operand));
+ stubCall.addArgument(currentInstruction[3].u.operand, regT2);
+ stubCall.call();
+}
+
+void JIT::emit_op_put_getter(Instruction* currentInstruction)
+{
+ JITStubCall stubCall(this, cti_op_put_getter);
+ stubCall.addArgument(currentInstruction[1].u.operand, regT2);
+ stubCall.addArgument(TrustedImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
+ stubCall.addArgument(currentInstruction[3].u.operand, regT2);
+ stubCall.call();
+}
+
+void JIT::emit_op_put_setter(Instruction* currentInstruction)
+{
+ JITStubCall stubCall(this, cti_op_put_setter);
+ stubCall.addArgument(currentInstruction[1].u.operand, regT2);
+ stubCall.addArgument(TrustedImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
+ stubCall.addArgument(currentInstruction[3].u.operand, regT2);
+ stubCall.call();
+}
+
+void JIT::emit_op_del_by_id(Instruction* currentInstruction)
+{
+ JITStubCall stubCall(this, cti_op_del_by_id);
+ stubCall.addArgument(currentInstruction[2].u.operand, regT2);
+ stubCall.addArgument(TrustedImmPtr(&m_codeBlock->identifier(currentInstruction[3].u.operand)));
+ stubCall.call(currentInstruction[1].u.operand);
+}
+
+void JIT::emit_op_method_check(Instruction* currentInstruction)
+{
+ // Assert that the following instruction is a get_by_id.
+ ASSERT(m_interpreter->getOpcodeID((currentInstruction + OPCODE_LENGTH(op_method_check))->u.opcode) == op_get_by_id);
+
+ currentInstruction += OPCODE_LENGTH(op_method_check);
+ unsigned resultVReg = currentInstruction[1].u.operand;
+ unsigned baseVReg = currentInstruction[2].u.operand;
+ Identifier* ident = &(m_codeBlock->identifier(currentInstruction[3].u.operand));
+
+ emitGetVirtualRegister(baseVReg, regT0);
+
+ // Do the method check - check the object & its prototype's structure inline (this is the common case).
+ m_methodCallCompilationInfo.append(MethodCallCompilationInfo(m_bytecodeOffset, m_propertyAccessCompilationInfo.size()));
+ MethodCallCompilationInfo& info = m_methodCallCompilationInfo.last();
+
+ Jump notCell = emitJumpIfNotJSCell(regT0);
+
+ BEGIN_UNINTERRUPTED_SEQUENCE(sequenceMethodCheck);
+
+ Jump structureCheck = branchPtrWithPatch(NotEqual, Address(regT0, JSCell::structureOffset()), info.structureToCompare, TrustedImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure)));
+ DataLabelPtr protoStructureToCompare, protoObj = moveWithPatch(TrustedImmPtr(0), regT1);
+ Jump protoStructureCheck = branchPtrWithPatch(NotEqual, Address(regT1, JSCell::structureOffset()), protoStructureToCompare, TrustedImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure)));
+
+ // This will be relinked to load the function without doing a load.
+ DataLabelPtr putFunction = moveWithPatch(TrustedImmPtr(0), regT0);
+
+ END_UNINTERRUPTED_SEQUENCE(sequenceMethodCheck);
+
+ Jump match = jump();
+
+ ASSERT_JIT_OFFSET_UNUSED(protoObj, differenceBetween(info.structureToCompare, protoObj), patchOffsetMethodCheckProtoObj);
+ ASSERT_JIT_OFFSET(differenceBetween(info.structureToCompare, protoStructureToCompare), patchOffsetMethodCheckProtoStruct);
+ ASSERT_JIT_OFFSET_UNUSED(putFunction, differenceBetween(info.structureToCompare, putFunction), patchOffsetMethodCheckPutFunction);
+
+ // Link the failure cases here.
+ notCell.link(this);
+ structureCheck.link(this);
+ protoStructureCheck.link(this);
+
+ // Do a regular(ish) get_by_id (the slow case will be link to
+ // cti_op_get_by_id_method_check instead of cti_op_get_by_id.
+ compileGetByIdHotPath(baseVReg, ident);
+
+ match.link(this);
+ emitValueProfilingSite(FirstProfilingSite);
+ emitPutVirtualRegister(resultVReg);
+
+ // We've already generated the following get_by_id, so make sure it's skipped over.
+ m_bytecodeOffset += OPCODE_LENGTH(op_get_by_id);
+}
+
+void JIT::emitSlow_op_method_check(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ currentInstruction += OPCODE_LENGTH(op_method_check);
+ unsigned resultVReg = currentInstruction[1].u.operand;
+ unsigned baseVReg = currentInstruction[2].u.operand;
+ Identifier* ident = &(m_codeBlock->identifier(currentInstruction[3].u.operand));
+
+ compileGetByIdSlowCase(resultVReg, baseVReg, ident, iter, true);
+ emitValueProfilingSite(SubsequentProfilingSite);
+
+ // We've already generated the following get_by_id, so make sure it's skipped over.
+ m_bytecodeOffset += OPCODE_LENGTH(op_get_by_id);
+}
+
+void JIT::emit_op_get_by_id(Instruction* currentInstruction)
+{
+ unsigned resultVReg = currentInstruction[1].u.operand;
+ unsigned baseVReg = currentInstruction[2].u.operand;
+ Identifier* ident = &(m_codeBlock->identifier(currentInstruction[3].u.operand));
+
+ emitGetVirtualRegister(baseVReg, regT0);
+ compileGetByIdHotPath(baseVReg, ident);
+ emitValueProfilingSite(FirstProfilingSite);
+ emitPutVirtualRegister(resultVReg);
+}
+
+void JIT::compileGetByIdHotPath(int baseVReg, Identifier*)
+{
+ // As for put_by_id, get_by_id requires the offset of the Structure and the offset of the access to be patched.
+ // Additionally, for get_by_id we need patch the offset of the branch to the slow case (we patch this to jump
+ // to array-length / prototype access tranpolines, and finally we also the the property-map access offset as a label
+ // to jump back to if one of these trampolies finds a match.
+
+ emitJumpSlowCaseIfNotJSCell(regT0, baseVReg);
+
+ BEGIN_UNINTERRUPTED_SEQUENCE(sequenceGetByIdHotPath);
+
+ Label hotPathBegin(this);
+ m_propertyAccessCompilationInfo.append(PropertyStubCompilationInfo());
+ m_propertyAccessCompilationInfo.last().bytecodeIndex = m_bytecodeOffset;
+ m_propertyAccessCompilationInfo.last().hotPathBegin = hotPathBegin;
+
+ DataLabelPtr structureToCompare;
+ Jump structureCheck = branchPtrWithPatch(NotEqual, Address(regT0, JSCell::structureOffset()), structureToCompare, TrustedImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure)));
+ addSlowCase(structureCheck);
+ ASSERT_JIT_OFFSET(differenceBetween(hotPathBegin, structureToCompare), patchOffsetGetByIdStructure);
+ ASSERT_JIT_OFFSET(differenceBetween(hotPathBegin, structureCheck), patchOffsetGetByIdBranchToSlowCase)
+
+ loadPtr(Address(regT0, JSObject::offsetOfPropertyStorage()), regT0);
+ DataLabelCompact displacementLabel = loadPtrWithCompactAddressOffsetPatch(Address(regT0, patchGetByIdDefaultOffset), regT0);
+ ASSERT_JIT_OFFSET_UNUSED(displacementLabel, differenceBetween(hotPathBegin, displacementLabel), patchOffsetGetByIdPropertyMapOffset);
+
+ Label putResult(this);
+
+ END_UNINTERRUPTED_SEQUENCE(sequenceGetByIdHotPath);
+
+ ASSERT_JIT_OFFSET(differenceBetween(hotPathBegin, putResult), patchOffsetGetByIdPutResult);
+}
+
+void JIT::emitSlow_op_get_by_id(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned resultVReg = currentInstruction[1].u.operand;
+ unsigned baseVReg = currentInstruction[2].u.operand;
+ Identifier* ident = &(m_codeBlock->identifier(currentInstruction[3].u.operand));
+
+ compileGetByIdSlowCase(resultVReg, baseVReg, ident, iter, false);
+ emitValueProfilingSite(SubsequentProfilingSite);
+}
+
+void JIT::compileGetByIdSlowCase(int resultVReg, int baseVReg, Identifier* ident, Vector<SlowCaseEntry>::iterator& iter, bool isMethodCheck)
+{
+ // As for the hot path of get_by_id, above, we ensure that we can use an architecture specific offset
+ // so that we only need track one pointer into the slow case code - we track a pointer to the location
+ // of the call (which we can use to look up the patch information), but should a array-length or
+ // prototype access trampoline fail we want to bail out back to here. To do so we can subtract back
+ // the distance from the call to the head of the slow case.
+
+ linkSlowCaseIfNotJSCell(iter, baseVReg);
+ linkSlowCase(iter);
+
+ BEGIN_UNINTERRUPTED_SEQUENCE(sequenceGetByIdSlowCase);
+
+#ifndef NDEBUG
+ Label coldPathBegin(this);
+#endif
+ JITStubCall stubCall(this, isMethodCheck ? cti_op_get_by_id_method_check : cti_op_get_by_id);
+ stubCall.addArgument(regT0);
+ stubCall.addArgument(TrustedImmPtr(ident));
+ Call call = stubCall.call(resultVReg);
+
+ END_UNINTERRUPTED_SEQUENCE(sequenceGetByIdSlowCase);
+
+ ASSERT_JIT_OFFSET(differenceBetween(coldPathBegin, call), patchOffsetGetByIdSlowCaseCall);
+
+ // Track the location of the call; this will be used to recover patch information.
+ m_propertyAccessCompilationInfo[m_propertyAccessInstructionIndex++].callReturnLocation = call;
+}
+
+void JIT::emit_op_put_by_id(Instruction* currentInstruction)
+{
+ unsigned baseVReg = currentInstruction[1].u.operand;
+ unsigned valueVReg = currentInstruction[3].u.operand;
+
+ // In order to be able to patch both the Structure, and the object offset, we store one pointer,
+ // to just after the arguments have been loaded into registers 'hotPathBegin', and we generate code
+ // such that the Structure & offset are always at the same distance from this.
+
+ emitGetVirtualRegisters(baseVReg, regT0, valueVReg, regT1);
+
+ // Jump to a slow case if either the base object is an immediate, or if the Structure does not match.
+ emitJumpSlowCaseIfNotJSCell(regT0, baseVReg);
+
+ BEGIN_UNINTERRUPTED_SEQUENCE(sequencePutById);
+
+ Label hotPathBegin(this);
+ m_propertyAccessCompilationInfo.append(PropertyStubCompilationInfo());
+ m_propertyAccessCompilationInfo.last().bytecodeIndex = m_bytecodeOffset;
+ m_propertyAccessCompilationInfo.last().hotPathBegin = hotPathBegin;
+
+ // It is important that the following instruction plants a 32bit immediate, in order that it can be patched over.
+ DataLabelPtr structureToCompare;
+ addSlowCase(branchPtrWithPatch(NotEqual, Address(regT0, JSCell::structureOffset()), structureToCompare, TrustedImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure))));
+ ASSERT_JIT_OFFSET(differenceBetween(hotPathBegin, structureToCompare), patchOffsetPutByIdStructure);
+
+ loadPtr(Address(regT0, JSObject::offsetOfPropertyStorage()), regT2);
+ DataLabel32 displacementLabel = storePtrWithAddressOffsetPatch(regT1, Address(regT2, patchPutByIdDefaultOffset));
+
+ END_UNINTERRUPTED_SEQUENCE(sequencePutById);
+
+ emitWriteBarrier(regT0, regT1, regT2, regT3, ShouldFilterImmediates, WriteBarrierForPropertyAccess);
+
+ ASSERT_JIT_OFFSET_UNUSED(displacementLabel, differenceBetween(hotPathBegin, displacementLabel), patchOffsetPutByIdPropertyMapOffset);
+}
+
+void JIT::emitSlow_op_put_by_id(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned baseVReg = currentInstruction[1].u.operand;
+ Identifier* ident = &(m_codeBlock->identifier(currentInstruction[2].u.operand));
+ unsigned direct = currentInstruction[8].u.operand;
+
+ linkSlowCaseIfNotJSCell(iter, baseVReg);
+ linkSlowCase(iter);
+
+ JITStubCall stubCall(this, direct ? cti_op_put_by_id_direct : cti_op_put_by_id);
+ stubCall.addArgument(regT0);
+ stubCall.addArgument(TrustedImmPtr(ident));
+ stubCall.addArgument(regT1);
+ Call call = stubCall.call();
+
+ // Track the location of the call; this will be used to recover patch information.
+ m_propertyAccessCompilationInfo[m_propertyAccessInstructionIndex++].callReturnLocation = call;
+}
+
+// Compile a store into an object's property storage. May overwrite the
+// value in objectReg.
+void JIT::compilePutDirectOffset(RegisterID base, RegisterID value, size_t cachedOffset)
+{
+ int offset = cachedOffset * sizeof(JSValue);
+ loadPtr(Address(base, JSObject::offsetOfPropertyStorage()), base);
+ storePtr(value, Address(base, offset));
+}
+
+// Compile a load from an object's property storage. May overwrite base.
+void JIT::compileGetDirectOffset(RegisterID base, RegisterID result, size_t cachedOffset)
+{
+ int offset = cachedOffset * sizeof(JSValue);
+ loadPtr(Address(base, JSObject::offsetOfPropertyStorage()), result);
+ loadPtr(Address(result, offset), result);
+}
+
+void JIT::compileGetDirectOffset(JSObject* base, RegisterID result, size_t cachedOffset)
+{
+ loadPtr(base->addressOfPropertyStorage(), result);
+ loadPtr(Address(result, cachedOffset * sizeof(WriteBarrier<Unknown>)), result);
+}
+
+void JIT::privateCompilePutByIdTransition(StructureStubInfo* stubInfo, Structure* oldStructure, Structure* newStructure, size_t cachedOffset, StructureChain* chain, ReturnAddressPtr returnAddress, bool direct)
+{
+ JumpList failureCases;
+ // Check eax is an object of the right Structure.
+ failureCases.append(emitJumpIfNotJSCell(regT0));
+ failureCases.append(branchPtr(NotEqual, Address(regT0, JSCell::structureOffset()), TrustedImmPtr(oldStructure)));
+
+ testPrototype(oldStructure->storedPrototype(), failureCases);
+
+ ASSERT(oldStructure->storedPrototype().isNull() || oldStructure->storedPrototype().asCell()->structure() == chain->head()->get());
+
+ // ecx = baseObject->m_structure
+ if (!direct) {
+ for (WriteBarrier<Structure>* it = chain->head(); *it; ++it) {
+ ASSERT((*it)->storedPrototype().isNull() || (*it)->storedPrototype().asCell()->structure() == it[1].get());
+ testPrototype((*it)->storedPrototype(), failureCases);
+ }
+ }
+
+ Call callTarget;
+
+ // emit a call only if storage realloc is needed
+ bool willNeedStorageRealloc = oldStructure->propertyStorageCapacity() != newStructure->propertyStorageCapacity();
+ if (willNeedStorageRealloc) {
+ // This trampoline was called to like a JIT stub; before we can can call again we need to
+ // remove the return address from the stack, to prevent the stack from becoming misaligned.
+ preserveReturnAddressAfterCall(regT3);
+
+ JITStubCall stubCall(this, cti_op_put_by_id_transition_realloc);
+ stubCall.skipArgument(); // base
+ stubCall.skipArgument(); // ident
+ stubCall.skipArgument(); // value
+ stubCall.addArgument(TrustedImm32(oldStructure->propertyStorageCapacity()));
+ stubCall.addArgument(TrustedImm32(newStructure->propertyStorageCapacity()));
+ stubCall.call(regT0);
+ emitGetJITStubArg(2, regT1);
+
+ restoreReturnAddressBeforeReturn(regT3);
+ }
+
+ // Planting the new structure triggers the write barrier so we need
+ // an unconditional barrier here.
+ emitWriteBarrier(regT0, regT1, regT2, regT3, UnconditionalWriteBarrier, WriteBarrierForPropertyAccess);
+
+ ASSERT(newStructure->classInfo() == oldStructure->classInfo());
+ storePtr(TrustedImmPtr(newStructure), Address(regT0, JSCell::structureOffset()));
+ compilePutDirectOffset(regT0, regT1, cachedOffset);
+
+ ret();
+
+ ASSERT(!failureCases.empty());
+ failureCases.link(this);
+ restoreArgumentReferenceForTrampoline();
+ Call failureCall = tailRecursiveCall();
+
+ LinkBuffer patchBuffer(*m_globalData, this);
+
+ patchBuffer.link(failureCall, FunctionPtr(direct ? cti_op_put_by_id_direct_fail : cti_op_put_by_id_fail));
+
+ if (willNeedStorageRealloc) {
+ ASSERT(m_calls.size() == 1);
+ patchBuffer.link(m_calls[0].from, FunctionPtr(cti_op_put_by_id_transition_realloc));
+ }
+
+ stubInfo->stubRoutine = patchBuffer.finalizeCode();
+ RepatchBuffer repatchBuffer(m_codeBlock);
+ repatchBuffer.relinkCallerToTrampoline(returnAddress, CodeLocationLabel(stubInfo->stubRoutine.code()));
+}
+
+void JIT::patchGetByIdSelf(CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, size_t cachedOffset, ReturnAddressPtr returnAddress)
+{
+ RepatchBuffer repatchBuffer(codeBlock);
+
+ // We don't want to patch more than once - in future go to cti_op_get_by_id_generic.
+ // Should probably go to cti_op_get_by_id_fail, but that doesn't do anything interesting right now.
+ repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_self_fail));
+
+ int offset = sizeof(JSValue) * cachedOffset;
+
+ // Patch the offset into the propoerty map to load from, then patch the Structure to look for.
+ repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelPtrAtOffset(patchOffsetGetByIdStructure), structure);
+ repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelCompactAtOffset(patchOffsetGetByIdPropertyMapOffset), offset);
+}
+
+void JIT::patchPutByIdReplace(CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, size_t cachedOffset, ReturnAddressPtr returnAddress, bool direct)
+{
+ RepatchBuffer repatchBuffer(codeBlock);
+
+ // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
+ // Should probably go to cti_op_put_by_id_fail, but that doesn't do anything interesting right now.
+ repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(direct ? cti_op_put_by_id_direct_generic : cti_op_put_by_id_generic));
+
+ int offset = sizeof(JSValue) * cachedOffset;
+
+ // Patch the offset into the propoerty map to load from, then patch the Structure to look for.
+ repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelPtrAtOffset(patchOffsetPutByIdStructure), structure);
+ repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabel32AtOffset(patchOffsetPutByIdPropertyMapOffset), offset);
+}
+
+void JIT::privateCompilePatchGetArrayLength(ReturnAddressPtr returnAddress)
+{
+ StructureStubInfo* stubInfo = &m_codeBlock->getStubInfo(returnAddress);
+
+ // Check eax is an array
+ Jump failureCases1 = branchPtr(NotEqual, Address(regT0, JSCell::classInfoOffset()), TrustedImmPtr(&JSArray::s_info));
+
+ // Checks out okay! - get the length from the storage
+ loadPtr(Address(regT0, JSArray::storageOffset()), regT3);
+ load32(Address(regT3, OBJECT_OFFSETOF(ArrayStorage, m_length)), regT2);
+ Jump failureCases2 = branch32(LessThan, regT2, TrustedImm32(0));
+
+ emitFastArithIntToImmNoCheck(regT2, regT0);
+ Jump success = jump();
+
+ LinkBuffer patchBuffer(*m_globalData, this);
+
+ // Use the patch information to link the failure cases back to the original slow case routine.
+ CodeLocationLabel slowCaseBegin = stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall);
+ patchBuffer.link(failureCases1, slowCaseBegin);
+ patchBuffer.link(failureCases2, slowCaseBegin);
+
+ // On success return back to the hot patch code, at a point it will perform the store to dest for us.
+ patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
+
+ // Track the stub we have created so that it will be deleted later.
+ stubInfo->stubRoutine = patchBuffer.finalizeCode();
+
+ // Finally patch the jump to slow case back in the hot path to jump here instead.
+ CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
+ RepatchBuffer repatchBuffer(m_codeBlock);
+ repatchBuffer.relink(jumpLocation, CodeLocationLabel(stubInfo->stubRoutine.code()));
+
+ // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
+ repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_array_fail));
+}
+
+void JIT::privateCompileGetByIdProto(StructureStubInfo* stubInfo, Structure* structure, Structure* prototypeStructure, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset, ReturnAddressPtr returnAddress, CallFrame* callFrame)
+{
+ // The prototype object definitely exists (if this stub exists the CodeBlock is referencing a Structure that is
+ // referencing the prototype object - let's speculatively load it's table nice and early!)
+ JSObject* protoObject = asObject(structure->prototypeForLookup(callFrame));
+
+ // Check eax is an object of the right Structure.
+ Jump failureCases1 = checkStructure(regT0, structure);
+
+ // Check the prototype object's Structure had not changed.
+ move(TrustedImmPtr(protoObject), regT3);
+ Jump failureCases2 = branchPtr(NotEqual, Address(regT3, JSCell::structureOffset()), TrustedImmPtr(prototypeStructure));
+
+ bool needsStubLink = false;
+
+ // Checks out okay!
+ if (slot.cachedPropertyType() == PropertySlot::Getter) {
+ needsStubLink = true;
+ compileGetDirectOffset(protoObject, regT1, cachedOffset);
+ JITStubCall stubCall(this, cti_op_get_by_id_getter_stub);
+ stubCall.addArgument(regT1);
+ stubCall.addArgument(regT0);
+ stubCall.addArgument(TrustedImmPtr(stubInfo->callReturnLocation.executableAddress()));
+ stubCall.call();
+ } else if (slot.cachedPropertyType() == PropertySlot::Custom) {
+ needsStubLink = true;
+ JITStubCall stubCall(this, cti_op_get_by_id_custom_stub);
+ stubCall.addArgument(TrustedImmPtr(protoObject));
+ stubCall.addArgument(TrustedImmPtr(FunctionPtr(slot.customGetter()).executableAddress()));
+ stubCall.addArgument(TrustedImmPtr(const_cast<Identifier*>(&ident)));
+ stubCall.addArgument(TrustedImmPtr(stubInfo->callReturnLocation.executableAddress()));
+ stubCall.call();
+ } else
+ compileGetDirectOffset(protoObject, regT0, cachedOffset);
+ Jump success = jump();
+ LinkBuffer patchBuffer(*m_globalData, this);
+
+ // Use the patch information to link the failure cases back to the original slow case routine.
+ CodeLocationLabel slowCaseBegin = stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall);
+ patchBuffer.link(failureCases1, slowCaseBegin);
+ patchBuffer.link(failureCases2, slowCaseBegin);
+
+ // On success return back to the hot patch code, at a point it will perform the store to dest for us.
+ patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
+
+ if (needsStubLink) {
+ for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
+ if (iter->to)
+ patchBuffer.link(iter->from, FunctionPtr(iter->to));
+ }
+ }
+ // Track the stub we have created so that it will be deleted later.
+ stubInfo->stubRoutine = patchBuffer.finalizeCode();
+
+ // Finally patch the jump to slow case back in the hot path to jump here instead.
+ CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
+ RepatchBuffer repatchBuffer(m_codeBlock);
+ repatchBuffer.relink(jumpLocation, CodeLocationLabel(stubInfo->stubRoutine.code()));
+
+ // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
+ repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_proto_list));
+}
+
+void JIT::privateCompileGetByIdSelfList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* polymorphicStructures, int currentIndex, Structure* structure, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset)
+{
+ Jump failureCase = checkStructure(regT0, structure);
+ bool needsStubLink = false;
+ bool isDirect = false;
+ if (slot.cachedPropertyType() == PropertySlot::Getter) {
+ needsStubLink = true;
+ compileGetDirectOffset(regT0, regT1, cachedOffset);
+ JITStubCall stubCall(this, cti_op_get_by_id_getter_stub);
+ stubCall.addArgument(regT1);
+ stubCall.addArgument(regT0);
+ stubCall.addArgument(TrustedImmPtr(stubInfo->callReturnLocation.executableAddress()));
+ stubCall.call();
+ } else if (slot.cachedPropertyType() == PropertySlot::Custom) {
+ needsStubLink = true;
+ JITStubCall stubCall(this, cti_op_get_by_id_custom_stub);
+ stubCall.addArgument(regT0);
+ stubCall.addArgument(TrustedImmPtr(FunctionPtr(slot.customGetter()).executableAddress()));
+ stubCall.addArgument(TrustedImmPtr(const_cast<Identifier*>(&ident)));
+ stubCall.addArgument(TrustedImmPtr(stubInfo->callReturnLocation.executableAddress()));
+ stubCall.call();
+ } else {
+ isDirect = true;
+ compileGetDirectOffset(regT0, regT0, cachedOffset);
+ }
+ Jump success = jump();
+
+ LinkBuffer patchBuffer(*m_globalData, this);
+
+ if (needsStubLink) {
+ for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
+ if (iter->to)
+ patchBuffer.link(iter->from, FunctionPtr(iter->to));
+ }
+ }
+
+ // Use the patch information to link the failure cases back to the original slow case routine.
+ CodeLocationLabel lastProtoBegin = CodeLocationLabel(polymorphicStructures->list[currentIndex - 1].stubRoutine.code());
+ if (!lastProtoBegin)
+ lastProtoBegin = stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall);
+
+ patchBuffer.link(failureCase, lastProtoBegin);
+
+ // On success return back to the hot patch code, at a point it will perform the store to dest for us.
+ patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
+
+ MacroAssemblerCodeRef stubCode = patchBuffer.finalizeCode();
+
+ polymorphicStructures->list[currentIndex].set(*m_globalData, m_codeBlock->ownerExecutable(), stubCode, structure, isDirect);
+
+ // Finally patch the jump to slow case back in the hot path to jump here instead.
+ CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
+ RepatchBuffer repatchBuffer(m_codeBlock);
+ repatchBuffer.relink(jumpLocation, CodeLocationLabel(stubCode.code()));
+}
+
+void JIT::privateCompileGetByIdProtoList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* prototypeStructures, int currentIndex, Structure* structure, Structure* prototypeStructure, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset, CallFrame* callFrame)
+{
+ // The prototype object definitely exists (if this stub exists the CodeBlock is referencing a Structure that is
+ // referencing the prototype object - let's speculatively load it's table nice and early!)
+ JSObject* protoObject = asObject(structure->prototypeForLookup(callFrame));
+
+ // Check eax is an object of the right Structure.
+ Jump failureCases1 = checkStructure(regT0, structure);
+
+ // Check the prototype object's Structure had not changed.
+ move(TrustedImmPtr(protoObject), regT3);
+ Jump failureCases2 = branchPtr(NotEqual, Address(regT3, JSCell::structureOffset()), TrustedImmPtr(prototypeStructure));
+
+ // Checks out okay!
+ bool needsStubLink = false;
+ bool isDirect = false;
+ if (slot.cachedPropertyType() == PropertySlot::Getter) {
+ needsStubLink = true;
+ compileGetDirectOffset(protoObject, regT1, cachedOffset);
+ JITStubCall stubCall(this, cti_op_get_by_id_getter_stub);
+ stubCall.addArgument(regT1);
+ stubCall.addArgument(regT0);
+ stubCall.addArgument(TrustedImmPtr(stubInfo->callReturnLocation.executableAddress()));
+ stubCall.call();
+ } else if (slot.cachedPropertyType() == PropertySlot::Custom) {
+ needsStubLink = true;
+ JITStubCall stubCall(this, cti_op_get_by_id_custom_stub);
+ stubCall.addArgument(TrustedImmPtr(protoObject));
+ stubCall.addArgument(TrustedImmPtr(FunctionPtr(slot.customGetter()).executableAddress()));
+ stubCall.addArgument(TrustedImmPtr(const_cast<Identifier*>(&ident)));
+ stubCall.addArgument(TrustedImmPtr(stubInfo->callReturnLocation.executableAddress()));
+ stubCall.call();
+ } else {
+ isDirect = true;
+ compileGetDirectOffset(protoObject, regT0, cachedOffset);
+ }
+
+ Jump success = jump();
+
+ LinkBuffer patchBuffer(*m_globalData, this);
+
+ if (needsStubLink) {
+ for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
+ if (iter->to)
+ patchBuffer.link(iter->from, FunctionPtr(iter->to));
+ }
+ }
+
+ // Use the patch information to link the failure cases back to the original slow case routine.
+ CodeLocationLabel lastProtoBegin = CodeLocationLabel(prototypeStructures->list[currentIndex - 1].stubRoutine.code());
+ patchBuffer.link(failureCases1, lastProtoBegin);
+ patchBuffer.link(failureCases2, lastProtoBegin);
+
+ // On success return back to the hot patch code, at a point it will perform the store to dest for us.
+ patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
+
+ MacroAssemblerCodeRef stubCode = patchBuffer.finalizeCode();
+ prototypeStructures->list[currentIndex].set(*m_globalData, m_codeBlock->ownerExecutable(), stubCode, structure, prototypeStructure, isDirect);
+
+ // Finally patch the jump to slow case back in the hot path to jump here instead.
+ CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
+ RepatchBuffer repatchBuffer(m_codeBlock);
+ repatchBuffer.relink(jumpLocation, CodeLocationLabel(stubCode.code()));
+}
+
+void JIT::privateCompileGetByIdChainList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* prototypeStructures, int currentIndex, Structure* structure, StructureChain* chain, size_t count, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset, CallFrame* callFrame)
+{
+ ASSERT(count);
+ JumpList bucketsOfFail;
+
+ // Check eax is an object of the right Structure.
+ Jump baseObjectCheck = checkStructure(regT0, structure);
+ bucketsOfFail.append(baseObjectCheck);
+
+ Structure* currStructure = structure;
+ WriteBarrier<Structure>* it = chain->head();
+ JSObject* protoObject = 0;
+ for (unsigned i = 0; i < count; ++i, ++it) {
+ protoObject = asObject(currStructure->prototypeForLookup(callFrame));
+ currStructure = it->get();
+ testPrototype(protoObject, bucketsOfFail);
+ }
+ ASSERT(protoObject);
+
+ bool needsStubLink = false;
+ bool isDirect = false;
+ if (slot.cachedPropertyType() == PropertySlot::Getter) {
+ needsStubLink = true;
+ compileGetDirectOffset(protoObject, regT1, cachedOffset);
+ JITStubCall stubCall(this, cti_op_get_by_id_getter_stub);
+ stubCall.addArgument(regT1);
+ stubCall.addArgument(regT0);
+ stubCall.addArgument(TrustedImmPtr(stubInfo->callReturnLocation.executableAddress()));
+ stubCall.call();
+ } else if (slot.cachedPropertyType() == PropertySlot::Custom) {
+ needsStubLink = true;
+ JITStubCall stubCall(this, cti_op_get_by_id_custom_stub);
+ stubCall.addArgument(TrustedImmPtr(protoObject));
+ stubCall.addArgument(TrustedImmPtr(FunctionPtr(slot.customGetter()).executableAddress()));
+ stubCall.addArgument(TrustedImmPtr(const_cast<Identifier*>(&ident)));
+ stubCall.addArgument(TrustedImmPtr(stubInfo->callReturnLocation.executableAddress()));
+ stubCall.call();
+ } else {
+ isDirect = true;
+ compileGetDirectOffset(protoObject, regT0, cachedOffset);
+ }
+ Jump success = jump();
+
+ LinkBuffer patchBuffer(*m_globalData, this);
+
+ if (needsStubLink) {
+ for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
+ if (iter->to)
+ patchBuffer.link(iter->from, FunctionPtr(iter->to));
+ }
+ }
+
+ // Use the patch information to link the failure cases back to the original slow case routine.
+ CodeLocationLabel lastProtoBegin = CodeLocationLabel(prototypeStructures->list[currentIndex - 1].stubRoutine.code());
+
+ patchBuffer.link(bucketsOfFail, lastProtoBegin);
+
+ // On success return back to the hot patch code, at a point it will perform the store to dest for us.
+ patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
+
+ CodeRef stubRoutine = patchBuffer.finalizeCode();
+
+ // Track the stub we have created so that it will be deleted later.
+ prototypeStructures->list[currentIndex].set(callFrame->globalData(), m_codeBlock->ownerExecutable(), stubRoutine, structure, chain, isDirect);
+
+ // Finally patch the jump to slow case back in the hot path to jump here instead.
+ CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
+ RepatchBuffer repatchBuffer(m_codeBlock);
+ repatchBuffer.relink(jumpLocation, CodeLocationLabel(stubRoutine.code()));
+}
+
+void JIT::privateCompileGetByIdChain(StructureStubInfo* stubInfo, Structure* structure, StructureChain* chain, size_t count, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset, ReturnAddressPtr returnAddress, CallFrame* callFrame)
+{
+ ASSERT(count);
+
+ JumpList bucketsOfFail;
+
+ // Check eax is an object of the right Structure.
+ bucketsOfFail.append(checkStructure(regT0, structure));
+
+ Structure* currStructure = structure;
+ WriteBarrier<Structure>* it = chain->head();
+ JSObject* protoObject = 0;
+ for (unsigned i = 0; i < count; ++i, ++it) {
+ protoObject = asObject(currStructure->prototypeForLookup(callFrame));
+ currStructure = it->get();
+ testPrototype(protoObject, bucketsOfFail);
+ }
+ ASSERT(protoObject);
+
+ bool needsStubLink = false;
+ if (slot.cachedPropertyType() == PropertySlot::Getter) {
+ needsStubLink = true;
+ compileGetDirectOffset(protoObject, regT1, cachedOffset);
+ JITStubCall stubCall(this, cti_op_get_by_id_getter_stub);
+ stubCall.addArgument(regT1);
+ stubCall.addArgument(regT0);
+ stubCall.addArgument(TrustedImmPtr(stubInfo->callReturnLocation.executableAddress()));
+ stubCall.call();
+ } else if (slot.cachedPropertyType() == PropertySlot::Custom) {
+ needsStubLink = true;
+ JITStubCall stubCall(this, cti_op_get_by_id_custom_stub);
+ stubCall.addArgument(TrustedImmPtr(protoObject));
+ stubCall.addArgument(TrustedImmPtr(FunctionPtr(slot.customGetter()).executableAddress()));
+ stubCall.addArgument(TrustedImmPtr(const_cast<Identifier*>(&ident)));
+ stubCall.addArgument(TrustedImmPtr(stubInfo->callReturnLocation.executableAddress()));
+ stubCall.call();
+ } else
+ compileGetDirectOffset(protoObject, regT0, cachedOffset);
+ Jump success = jump();
+
+ LinkBuffer patchBuffer(*m_globalData, this);
+
+ if (needsStubLink) {
+ for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
+ if (iter->to)
+ patchBuffer.link(iter->from, FunctionPtr(iter->to));
+ }
+ }
+
+ // Use the patch information to link the failure cases back to the original slow case routine.
+ patchBuffer.link(bucketsOfFail, stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall));
+
+ // On success return back to the hot patch code, at a point it will perform the store to dest for us.
+ patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
+
+ // Track the stub we have created so that it will be deleted later.
+ CodeRef stubRoutine = patchBuffer.finalizeCode();
+ stubInfo->stubRoutine = stubRoutine;
+
+ // Finally patch the jump to slow case back in the hot path to jump here instead.
+ CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
+ RepatchBuffer repatchBuffer(m_codeBlock);
+ repatchBuffer.relink(jumpLocation, CodeLocationLabel(stubRoutine.code()));
+
+ // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
+ repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_proto_list));
+}
+
+void JIT::emit_op_get_scoped_var(Instruction* currentInstruction)
+{
+ int skip = currentInstruction[3].u.operand;
+
+ emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain, regT0);
+ bool checkTopLevel = m_codeBlock->codeType() == FunctionCode && m_codeBlock->needsFullScopeChain();
+ ASSERT(skip || !checkTopLevel);
+ if (checkTopLevel && skip--) {
+ Jump activationNotCreated;
+ if (checkTopLevel)
+ activationNotCreated = branchTestPtr(Zero, addressFor(m_codeBlock->activationRegister()));
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(ScopeChainNode, next)), regT0);
+ activationNotCreated.link(this);
+ }
+ while (skip--)
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(ScopeChainNode, next)), regT0);
+
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(ScopeChainNode, object)), regT0);
+ loadPtr(Address(regT0, JSVariableObject::offsetOfRegisters()), regT0);
+ loadPtr(Address(regT0, currentInstruction[2].u.operand * sizeof(Register)), regT0);
+ emitValueProfilingSite(FirstProfilingSite);
+ emitPutVirtualRegister(currentInstruction[1].u.operand);
+}
+
+void JIT::emit_op_put_scoped_var(Instruction* currentInstruction)
+{
+ int skip = currentInstruction[2].u.operand;
+
+ emitGetVirtualRegister(currentInstruction[3].u.operand, regT0);
+
+ emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain, regT1);
+ bool checkTopLevel = m_codeBlock->codeType() == FunctionCode && m_codeBlock->needsFullScopeChain();
+ ASSERT(skip || !checkTopLevel);
+ if (checkTopLevel && skip--) {
+ Jump activationNotCreated;
+ if (checkTopLevel)
+ activationNotCreated = branchTestPtr(Zero, addressFor(m_codeBlock->activationRegister()));
+ loadPtr(Address(regT1, OBJECT_OFFSETOF(ScopeChainNode, next)), regT1);
+ activationNotCreated.link(this);
+ }
+ while (skip--)
+ loadPtr(Address(regT1, OBJECT_OFFSETOF(ScopeChainNode, next)), regT1);
+ loadPtr(Address(regT1, OBJECT_OFFSETOF(ScopeChainNode, object)), regT1);
+
+ emitWriteBarrier(regT1, regT0, regT2, regT3, ShouldFilterImmediates, WriteBarrierForVariableAccess);
+
+ loadPtr(Address(regT1, JSVariableObject::offsetOfRegisters()), regT1);
+ storePtr(regT0, Address(regT1, currentInstruction[1].u.operand * sizeof(Register)));
+}
+
+void JIT::emit_op_get_global_var(Instruction* currentInstruction)
+{
+ JSVariableObject* globalObject = m_codeBlock->globalObject();
+ loadPtr(&globalObject->m_registers, regT0);
+ loadPtr(Address(regT0, currentInstruction[2].u.operand * sizeof(Register)), regT0);
+ emitValueProfilingSite(FirstProfilingSite);
+ emitPutVirtualRegister(currentInstruction[1].u.operand);
+}
+
+void JIT::emit_op_put_global_var(Instruction* currentInstruction)
+{
+ JSGlobalObject* globalObject = m_codeBlock->globalObject();
+
+ emitGetVirtualRegister(currentInstruction[2].u.operand, regT0);
+
+ move(TrustedImmPtr(globalObject), regT1);
+ loadPtr(Address(regT1, JSVariableObject::offsetOfRegisters()), regT1);
+ storePtr(regT0, Address(regT1, currentInstruction[1].u.operand * sizeof(Register)));
+ emitWriteBarrier(globalObject, regT0, regT2, ShouldFilterImmediates, WriteBarrierForVariableAccess);
+}
+
+void JIT::resetPatchGetById(RepatchBuffer& repatchBuffer, StructureStubInfo* stubInfo)
+{
+ repatchBuffer.relink(stubInfo->callReturnLocation, cti_op_get_by_id);
+ repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelPtrAtOffset(patchOffsetGetByIdStructure), reinterpret_cast<void*>(-1));
+ repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelCompactAtOffset(patchOffsetGetByIdPropertyMapOffset), 0);
+ repatchBuffer.relink(stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase), stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall));
+}
+
+void JIT::resetPatchPutById(RepatchBuffer& repatchBuffer, StructureStubInfo* stubInfo)
+{
+ if (isDirectPutById(stubInfo))
+ repatchBuffer.relink(stubInfo->callReturnLocation, cti_op_put_by_id_direct);
+ else
+ repatchBuffer.relink(stubInfo->callReturnLocation, cti_op_put_by_id);
+ repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelPtrAtOffset(patchOffsetPutByIdStructure), reinterpret_cast<void*>(-1));
+ repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelCompactAtOffset(patchOffsetPutByIdPropertyMapOffset), 0);
+}
+
+#endif // USE(JSVALUE64)
+
+void JIT::emitWriteBarrier(RegisterID owner, RegisterID value, RegisterID scratch, RegisterID scratch2, WriteBarrierMode mode, WriteBarrierUseKind useKind)
+{
+ UNUSED_PARAM(owner);
+ UNUSED_PARAM(scratch);
+ UNUSED_PARAM(scratch2);
+ UNUSED_PARAM(useKind);
+ UNUSED_PARAM(value);
+ UNUSED_PARAM(mode);
+ ASSERT(owner != scratch);
+ ASSERT(owner != scratch2);
+
+#if ENABLE(WRITE_BARRIER_PROFILING)
+ emitCount(WriteBarrierCounters::jitCounterFor(useKind));
+#endif
+
+#if ENABLE(GGC)
+ Jump filterCells;
+ if (mode == ShouldFilterImmediates)
+ filterCells = emitJumpIfNotJSCell(value);
+ move(owner, scratch);
+ andPtr(TrustedImm32(static_cast<int32_t>(MarkedBlock::blockMask)), scratch);
+ move(owner, scratch2);
+ // consume additional 8 bits as we're using an approximate filter
+ rshift32(TrustedImm32(MarkedBlock::atomShift + 8), scratch2);
+ andPtr(TrustedImm32(MarkedBlock::atomMask >> 8), scratch2);
+ Jump filter = branchTest8(Zero, BaseIndex(scratch, scratch2, TimesOne, MarkedBlock::offsetOfMarks()));
+ move(owner, scratch2);
+ rshift32(TrustedImm32(MarkedBlock::cardShift), scratch2);
+ andPtr(TrustedImm32(MarkedBlock::cardMask), scratch2);
+ store8(TrustedImm32(1), BaseIndex(scratch, scratch2, TimesOne, MarkedBlock::offsetOfCards()));
+ filter.link(this);
+ if (mode == ShouldFilterImmediates)
+ filterCells.link(this);
+#endif
+}
+
+void JIT::emitWriteBarrier(JSCell* owner, RegisterID value, RegisterID scratch, WriteBarrierMode mode, WriteBarrierUseKind useKind)
+{
+ UNUSED_PARAM(owner);
+ UNUSED_PARAM(scratch);
+ UNUSED_PARAM(useKind);
+ UNUSED_PARAM(value);
+ UNUSED_PARAM(mode);
+
+#if ENABLE(WRITE_BARRIER_PROFILING)
+ emitCount(WriteBarrierCounters::jitCounterFor(useKind));
+#endif
+
+#if ENABLE(GGC)
+ Jump filterCells;
+ if (mode == ShouldFilterImmediates)
+ filterCells = emitJumpIfNotJSCell(value);
+ uint8_t* cardAddress = Heap::addressOfCardFor(owner);
+ move(TrustedImmPtr(cardAddress), scratch);
+ store8(TrustedImm32(1), Address(scratch));
+ if (mode == ShouldFilterImmediates)
+ filterCells.link(this);
+#endif
+}
+
+void JIT::testPrototype(JSValue prototype, JumpList& failureCases)
+{
+ if (prototype.isNull())
+ return;
+
+ ASSERT(prototype.isCell());
+ move(TrustedImmPtr(prototype.asCell()), regT3);
+ failureCases.append(branchPtr(NotEqual, Address(regT3, JSCell::structureOffset()), TrustedImmPtr(prototype.asCell()->structure())));
+}
+
+void JIT::patchMethodCallProto(JSGlobalData& globalData, CodeBlock* codeBlock, MethodCallLinkInfo& methodCallLinkInfo, JSObject* callee, Structure* structure, JSObject* proto, ReturnAddressPtr returnAddress)
+{
+ RepatchBuffer repatchBuffer(codeBlock);
+
+ CodeLocationDataLabelPtr structureLocation = methodCallLinkInfo.cachedStructure.location();
+ methodCallLinkInfo.cachedStructure.set(globalData, structureLocation, codeBlock->ownerExecutable(), structure);
+
+ Structure* prototypeStructure = proto->structure();
+ methodCallLinkInfo.cachedPrototypeStructure.set(globalData, structureLocation.dataLabelPtrAtOffset(patchOffsetMethodCheckProtoStruct), codeBlock->ownerExecutable(), prototypeStructure);
+ methodCallLinkInfo.cachedPrototype.set(globalData, structureLocation.dataLabelPtrAtOffset(patchOffsetMethodCheckProtoObj), codeBlock->ownerExecutable(), proto);
+ methodCallLinkInfo.cachedFunction.set(globalData, structureLocation.dataLabelPtrAtOffset(patchOffsetMethodCheckPutFunction), codeBlock->ownerExecutable(), callee);
+ repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_method_check_update));
+}
+
+bool JIT::isDirectPutById(StructureStubInfo* stubInfo)
+{
+ switch (stubInfo->accessType) {
+ case access_put_by_id_transition_normal:
+ return false;
+ case access_put_by_id_transition_direct:
+ return true;
+ case access_put_by_id_replace:
+ case access_put_by_id_generic: {
+ void* oldCall = MacroAssembler::readCallTarget(stubInfo->callReturnLocation).executableAddress();
+ if (oldCall == bitwise_cast<void*>(cti_op_put_by_id_direct)
+ || oldCall == bitwise_cast<void*>(cti_op_put_by_id_direct_generic)
+ || oldCall == bitwise_cast<void*>(cti_op_put_by_id_direct_fail))
+ return true;
+ ASSERT(oldCall == bitwise_cast<void*>(cti_op_put_by_id)
+ || oldCall == bitwise_cast<void*>(cti_op_put_by_id_generic)
+ || oldCall == bitwise_cast<void*>(cti_op_put_by_id_fail));
+ return false;
+ }
+ default:
+ ASSERT_NOT_REACHED();
+ return false;
+ }
+}
+
+} // namespace JSC
+
+#endif // ENABLE(JIT)
diff --git a/Source/JavaScriptCore/jit/JITPropertyAccess32_64.cpp b/Source/JavaScriptCore/jit/JITPropertyAccess32_64.cpp
new file mode 100644
index 000000000..b73d7ab75
--- /dev/null
+++ b/Source/JavaScriptCore/jit/JITPropertyAccess32_64.cpp
@@ -0,0 +1,1133 @@
+/*
+ * Copyright (C) 2008, 2009 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+
+#if ENABLE(JIT)
+#if USE(JSVALUE32_64)
+#include "JIT.h"
+
+#include "CodeBlock.h"
+#include "JITInlineMethods.h"
+#include "JITStubCall.h"
+#include "JSArray.h"
+#include "JSFunction.h"
+#include "JSPropertyNameIterator.h"
+#include "Interpreter.h"
+#include "LinkBuffer.h"
+#include "RepatchBuffer.h"
+#include "ResultType.h"
+#include "SamplingTool.h"
+
+#ifndef NDEBUG
+#include <stdio.h>
+#endif
+
+using namespace std;
+
+namespace JSC {
+
+void JIT::emit_op_put_by_index(Instruction* currentInstruction)
+{
+ unsigned base = currentInstruction[1].u.operand;
+ unsigned property = currentInstruction[2].u.operand;
+ unsigned value = currentInstruction[3].u.operand;
+
+ JITStubCall stubCall(this, cti_op_put_by_index);
+ stubCall.addArgument(base);
+ stubCall.addArgument(Imm32(property));
+ stubCall.addArgument(value);
+ stubCall.call();
+}
+
+void JIT::emit_op_put_getter(Instruction* currentInstruction)
+{
+ unsigned base = currentInstruction[1].u.operand;
+ unsigned property = currentInstruction[2].u.operand;
+ unsigned function = currentInstruction[3].u.operand;
+
+ JITStubCall stubCall(this, cti_op_put_getter);
+ stubCall.addArgument(base);
+ stubCall.addArgument(TrustedImmPtr(&m_codeBlock->identifier(property)));
+ stubCall.addArgument(function);
+ stubCall.call();
+}
+
+void JIT::emit_op_put_setter(Instruction* currentInstruction)
+{
+ unsigned base = currentInstruction[1].u.operand;
+ unsigned property = currentInstruction[2].u.operand;
+ unsigned function = currentInstruction[3].u.operand;
+
+ JITStubCall stubCall(this, cti_op_put_setter);
+ stubCall.addArgument(base);
+ stubCall.addArgument(TrustedImmPtr(&m_codeBlock->identifier(property)));
+ stubCall.addArgument(function);
+ stubCall.call();
+}
+
+void JIT::emit_op_del_by_id(Instruction* currentInstruction)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned base = currentInstruction[2].u.operand;
+ unsigned property = currentInstruction[3].u.operand;
+
+ JITStubCall stubCall(this, cti_op_del_by_id);
+ stubCall.addArgument(base);
+ stubCall.addArgument(TrustedImmPtr(&m_codeBlock->identifier(property)));
+ stubCall.call(dst);
+}
+
+void JIT::emit_op_method_check(Instruction* currentInstruction)
+{
+ // Assert that the following instruction is a get_by_id.
+ ASSERT(m_interpreter->getOpcodeID((currentInstruction + OPCODE_LENGTH(op_method_check))->u.opcode) == op_get_by_id);
+
+ currentInstruction += OPCODE_LENGTH(op_method_check);
+
+ // Do the method check - check the object & its prototype's structure inline (this is the common case).
+ m_methodCallCompilationInfo.append(MethodCallCompilationInfo(m_bytecodeOffset, m_propertyAccessCompilationInfo.size()));
+ MethodCallCompilationInfo& info = m_methodCallCompilationInfo.last();
+
+ int dst = currentInstruction[1].u.operand;
+ int base = currentInstruction[2].u.operand;
+
+ emitLoad(base, regT1, regT0);
+ emitJumpSlowCaseIfNotJSCell(base, regT1);
+
+ BEGIN_UNINTERRUPTED_SEQUENCE(sequenceMethodCheck);
+
+ Jump structureCheck = branchPtrWithPatch(NotEqual, Address(regT0, JSCell::structureOffset()), info.structureToCompare, TrustedImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure)));
+ DataLabelPtr protoStructureToCompare, protoObj = moveWithPatch(TrustedImmPtr(0), regT2);
+ Jump protoStructureCheck = branchPtrWithPatch(NotEqual, Address(regT2, JSCell::structureOffset()), protoStructureToCompare, TrustedImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure)));
+
+ // This will be relinked to load the function without doing a load.
+ DataLabelPtr putFunction = moveWithPatch(TrustedImmPtr(0), regT0);
+
+ END_UNINTERRUPTED_SEQUENCE(sequenceMethodCheck);
+
+ move(TrustedImm32(JSValue::CellTag), regT1);
+ Jump match = jump();
+
+ ASSERT_JIT_OFFSET_UNUSED(protoObj, differenceBetween(info.structureToCompare, protoObj), patchOffsetMethodCheckProtoObj);
+ ASSERT_JIT_OFFSET(differenceBetween(info.structureToCompare, protoStructureToCompare), patchOffsetMethodCheckProtoStruct);
+ ASSERT_JIT_OFFSET_UNUSED(putFunction, differenceBetween(info.structureToCompare, putFunction), patchOffsetMethodCheckPutFunction);
+
+ // Link the failure cases here.
+ structureCheck.link(this);
+ protoStructureCheck.link(this);
+
+ // Do a regular(ish) get_by_id (the slow case will be link to
+ // cti_op_get_by_id_method_check instead of cti_op_get_by_id.
+ compileGetByIdHotPath();
+
+ match.link(this);
+ emitValueProfilingSite(FirstProfilingSite);
+ emitStore(dst, regT1, regT0);
+ map(m_bytecodeOffset + OPCODE_LENGTH(op_method_check) + OPCODE_LENGTH(op_get_by_id), dst, regT1, regT0);
+
+ // We've already generated the following get_by_id, so make sure it's skipped over.
+ m_bytecodeOffset += OPCODE_LENGTH(op_get_by_id);
+}
+
+void JIT::emitSlow_op_method_check(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ currentInstruction += OPCODE_LENGTH(op_method_check);
+
+ int dst = currentInstruction[1].u.operand;
+ int base = currentInstruction[2].u.operand;
+ int ident = currentInstruction[3].u.operand;
+
+ compileGetByIdSlowCase(dst, base, &(m_codeBlock->identifier(ident)), iter, true);
+ emitValueProfilingSite(SubsequentProfilingSite);
+
+ // We've already generated the following get_by_id, so make sure it's skipped over.
+ m_bytecodeOffset += OPCODE_LENGTH(op_get_by_id);
+}
+
+JIT::CodeRef JIT::stringGetByValStubGenerator(JSGlobalData* globalData)
+{
+ JSInterfaceJIT jit;
+ JumpList failures;
+ failures.append(jit.branchPtr(NotEqual, Address(regT0, JSCell::classInfoOffset()), TrustedImmPtr(&JSString::s_info)));
+
+ // Load string length to regT1, and start the process of loading the data pointer into regT0
+ jit.load32(Address(regT0, ThunkHelpers::jsStringLengthOffset()), regT1);
+ jit.loadPtr(Address(regT0, ThunkHelpers::jsStringValueOffset()), regT0);
+ failures.append(jit.branchTest32(Zero, regT0));
+
+ // Do an unsigned compare to simultaneously filter negative indices as well as indices that are too large
+ failures.append(jit.branch32(AboveOrEqual, regT2, regT1));
+
+ // Load the character
+ JumpList is16Bit;
+ JumpList cont8Bit;
+ // Load the string flags
+ jit.loadPtr(Address(regT0, ThunkHelpers::stringImplFlagsOffset()), regT1);
+ jit.loadPtr(Address(regT0, ThunkHelpers::stringImplDataOffset()), regT0);
+ is16Bit.append(jit.branchTest32(Zero, regT1, TrustedImm32(ThunkHelpers::stringImpl8BitFlag())));
+ jit.load8(BaseIndex(regT0, regT2, TimesOne, 0), regT0);
+ cont8Bit.append(jit.jump());
+ is16Bit.link(&jit);
+ jit.load16(BaseIndex(regT0, regT2, TimesTwo, 0), regT0);
+
+ cont8Bit.link(&jit);
+
+ failures.append(jit.branch32(AboveOrEqual, regT0, TrustedImm32(0x100)));
+ jit.move(TrustedImmPtr(globalData->smallStrings.singleCharacterStrings()), regT1);
+ jit.loadPtr(BaseIndex(regT1, regT0, ScalePtr, 0), regT0);
+ jit.move(TrustedImm32(JSValue::CellTag), regT1); // We null check regT0 on return so this is safe
+ jit.ret();
+
+ failures.link(&jit);
+ jit.move(TrustedImm32(0), regT0);
+ jit.ret();
+
+ LinkBuffer patchBuffer(*globalData, &jit);
+ return patchBuffer.finalizeCode();
+}
+
+void JIT::emit_op_get_by_val(Instruction* currentInstruction)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned base = currentInstruction[2].u.operand;
+ unsigned property = currentInstruction[3].u.operand;
+
+ emitLoad2(base, regT1, regT0, property, regT3, regT2);
+
+ addSlowCase(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)));
+ emitJumpSlowCaseIfNotJSCell(base, regT1);
+ addSlowCase(branchPtr(NotEqual, Address(regT0, JSCell::classInfoOffset()), TrustedImmPtr(&JSArray::s_info)));
+
+ loadPtr(Address(regT0, JSArray::storageOffset()), regT3);
+ addSlowCase(branch32(AboveOrEqual, regT2, Address(regT0, JSArray::vectorLengthOffset())));
+
+ load32(BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), regT1); // tag
+ load32(BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT0); // payload
+ addSlowCase(branch32(Equal, regT1, TrustedImm32(JSValue::EmptyValueTag)));
+
+ emitValueProfilingSite(FirstProfilingSite);
+ emitStore(dst, regT1, regT0);
+ map(m_bytecodeOffset + OPCODE_LENGTH(op_get_by_val), dst, regT1, regT0);
+}
+
+void JIT::emitSlow_op_get_by_val(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned base = currentInstruction[2].u.operand;
+ unsigned property = currentInstruction[3].u.operand;
+
+ linkSlowCase(iter); // property int32 check
+ linkSlowCaseIfNotJSCell(iter, base); // base cell check
+
+ Jump nonCell = jump();
+ linkSlowCase(iter); // base array check
+ Jump notString = branchPtr(NotEqual, Address(regT0, JSCell::classInfoOffset()), TrustedImmPtr(&JSString::s_info));
+ emitNakedCall(m_globalData->getCTIStub(stringGetByValStubGenerator).code());
+ Jump failed = branchTestPtr(Zero, regT0);
+ emitStore(dst, regT1, regT0);
+ emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_get_by_val));
+ failed.link(this);
+ notString.link(this);
+ nonCell.link(this);
+
+ linkSlowCase(iter); // vector length check
+ linkSlowCase(iter); // empty value
+
+ JITStubCall stubCall(this, cti_op_get_by_val);
+ stubCall.addArgument(base);
+ stubCall.addArgument(property);
+ stubCall.call(dst);
+
+ emitValueProfilingSite(SubsequentProfilingSite);
+}
+
+void JIT::emit_op_put_by_val(Instruction* currentInstruction)
+{
+ unsigned base = currentInstruction[1].u.operand;
+ unsigned property = currentInstruction[2].u.operand;
+ unsigned value = currentInstruction[3].u.operand;
+
+ emitLoad2(base, regT1, regT0, property, regT3, regT2);
+
+ addSlowCase(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)));
+ emitJumpSlowCaseIfNotJSCell(base, regT1);
+ addSlowCase(branchPtr(NotEqual, Address(regT0, JSCell::classInfoOffset()), TrustedImmPtr(&JSArray::s_info)));
+ addSlowCase(branch32(AboveOrEqual, regT2, Address(regT0, JSArray::vectorLengthOffset())));
+
+ emitWriteBarrier(regT0, regT1, regT1, regT3, UnconditionalWriteBarrier, WriteBarrierForPropertyAccess);
+ loadPtr(Address(regT0, JSArray::storageOffset()), regT3);
+
+ Jump empty = branch32(Equal, BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), TrustedImm32(JSValue::EmptyValueTag));
+
+ Label storeResult(this);
+ emitLoad(value, regT1, regT0);
+ store32(regT0, BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.payload))); // payload
+ store32(regT1, BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.tag))); // tag
+ Jump end = jump();
+
+ empty.link(this);
+ add32(TrustedImm32(1), Address(regT3, OBJECT_OFFSETOF(ArrayStorage, m_numValuesInVector)));
+ branch32(Below, regT2, Address(regT3, OBJECT_OFFSETOF(ArrayStorage, m_length))).linkTo(storeResult, this);
+
+ add32(TrustedImm32(1), regT2, regT0);
+ store32(regT0, Address(regT3, OBJECT_OFFSETOF(ArrayStorage, m_length)));
+ jump().linkTo(storeResult, this);
+
+ end.link(this);
+}
+
+void JIT::emitSlow_op_put_by_val(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned base = currentInstruction[1].u.operand;
+ unsigned property = currentInstruction[2].u.operand;
+ unsigned value = currentInstruction[3].u.operand;
+
+ linkSlowCase(iter); // property int32 check
+ linkSlowCaseIfNotJSCell(iter, base); // base cell check
+ linkSlowCase(iter); // base not array check
+ linkSlowCase(iter); // in vector check
+
+ JITStubCall stubPutByValCall(this, cti_op_put_by_val);
+ stubPutByValCall.addArgument(base);
+ stubPutByValCall.addArgument(property);
+ stubPutByValCall.addArgument(value);
+ stubPutByValCall.call();
+}
+
+void JIT::emit_op_get_by_id(Instruction* currentInstruction)
+{
+ int dst = currentInstruction[1].u.operand;
+ int base = currentInstruction[2].u.operand;
+
+ emitLoad(base, regT1, regT0);
+ emitJumpSlowCaseIfNotJSCell(base, regT1);
+ compileGetByIdHotPath();
+ emitValueProfilingSite(FirstProfilingSite);
+ emitStore(dst, regT1, regT0);
+ map(m_bytecodeOffset + OPCODE_LENGTH(op_get_by_id), dst, regT1, regT0);
+}
+
+void JIT::compileGetByIdHotPath()
+{
+ // As for put_by_id, get_by_id requires the offset of the Structure and the offset of the access to be patched.
+ // Additionally, for get_by_id we need patch the offset of the branch to the slow case (we patch this to jump
+ // to array-length / prototype access tranpolines, and finally we also the the property-map access offset as a label
+ // to jump back to if one of these trampolies finds a match.
+
+ BEGIN_UNINTERRUPTED_SEQUENCE(sequenceGetByIdHotPath);
+
+ Label hotPathBegin(this);
+ m_propertyAccessCompilationInfo.append(PropertyStubCompilationInfo());
+ m_propertyAccessCompilationInfo.last().bytecodeIndex = m_bytecodeOffset;
+ m_propertyAccessCompilationInfo.last().hotPathBegin = hotPathBegin;
+
+ DataLabelPtr structureToCompare;
+ Jump structureCheck = branchPtrWithPatch(NotEqual, Address(regT0, JSCell::structureOffset()), structureToCompare, TrustedImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure)));
+ addSlowCase(structureCheck);
+ ASSERT_JIT_OFFSET(differenceBetween(hotPathBegin, structureToCompare), patchOffsetGetByIdStructure);
+ ASSERT_JIT_OFFSET(differenceBetween(hotPathBegin, structureCheck), patchOffsetGetByIdBranchToSlowCase);
+
+ loadPtr(Address(regT0, JSObject::offsetOfPropertyStorage()), regT2);
+ DataLabelCompact displacementLabel1 = loadPtrWithCompactAddressOffsetPatch(Address(regT2, patchGetByIdDefaultOffset), regT0); // payload
+ ASSERT_JIT_OFFSET_UNUSED(displacementLabel1, differenceBetween(hotPathBegin, displacementLabel1), patchOffsetGetByIdPropertyMapOffset1);
+ DataLabelCompact displacementLabel2 = loadPtrWithCompactAddressOffsetPatch(Address(regT2, patchGetByIdDefaultOffset), regT1); // tag
+ ASSERT_JIT_OFFSET_UNUSED(displacementLabel2, differenceBetween(hotPathBegin, displacementLabel2), patchOffsetGetByIdPropertyMapOffset2);
+
+ Label putResult(this);
+ ASSERT_JIT_OFFSET(differenceBetween(hotPathBegin, putResult), patchOffsetGetByIdPutResult);
+
+ END_UNINTERRUPTED_SEQUENCE(sequenceGetByIdHotPath);
+}
+
+void JIT::emitSlow_op_get_by_id(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ int dst = currentInstruction[1].u.operand;
+ int base = currentInstruction[2].u.operand;
+ int ident = currentInstruction[3].u.operand;
+
+ compileGetByIdSlowCase(dst, base, &(m_codeBlock->identifier(ident)), iter);
+ emitValueProfilingSite(SubsequentProfilingSite);
+}
+
+void JIT::compileGetByIdSlowCase(int dst, int base, Identifier* ident, Vector<SlowCaseEntry>::iterator& iter, bool isMethodCheck)
+{
+ // As for the hot path of get_by_id, above, we ensure that we can use an architecture specific offset
+ // so that we only need track one pointer into the slow case code - we track a pointer to the location
+ // of the call (which we can use to look up the patch information), but should a array-length or
+ // prototype access trampoline fail we want to bail out back to here. To do so we can subtract back
+ // the distance from the call to the head of the slow case.
+ linkSlowCaseIfNotJSCell(iter, base);
+ linkSlowCase(iter);
+
+ BEGIN_UNINTERRUPTED_SEQUENCE(sequenceGetByIdSlowCase);
+
+#ifndef NDEBUG
+ Label coldPathBegin(this);
+#endif
+ JITStubCall stubCall(this, isMethodCheck ? cti_op_get_by_id_method_check : cti_op_get_by_id);
+ stubCall.addArgument(regT1, regT0);
+ stubCall.addArgument(TrustedImmPtr(ident));
+ Call call = stubCall.call(dst);
+
+ END_UNINTERRUPTED_SEQUENCE_FOR_PUT(sequenceGetByIdSlowCase, dst);
+
+ ASSERT_JIT_OFFSET(differenceBetween(coldPathBegin, call), patchOffsetGetByIdSlowCaseCall);
+
+ // Track the location of the call; this will be used to recover patch information.
+ m_propertyAccessCompilationInfo[m_propertyAccessInstructionIndex++].callReturnLocation = call;
+}
+
+void JIT::emit_op_put_by_id(Instruction* currentInstruction)
+{
+ // In order to be able to patch both the Structure, and the object offset, we store one pointer,
+ // to just after the arguments have been loaded into registers 'hotPathBegin', and we generate code
+ // such that the Structure & offset are always at the same distance from this.
+
+ int base = currentInstruction[1].u.operand;
+ int value = currentInstruction[3].u.operand;
+
+ emitLoad2(base, regT1, regT0, value, regT3, regT2);
+
+ emitJumpSlowCaseIfNotJSCell(base, regT1);
+
+ BEGIN_UNINTERRUPTED_SEQUENCE(sequencePutById);
+
+ Label hotPathBegin(this);
+ m_propertyAccessCompilationInfo.append(PropertyStubCompilationInfo());
+ m_propertyAccessCompilationInfo.last().bytecodeIndex = m_bytecodeOffset;
+ m_propertyAccessCompilationInfo.last().hotPathBegin = hotPathBegin;
+
+ // It is important that the following instruction plants a 32bit immediate, in order that it can be patched over.
+ DataLabelPtr structureToCompare;
+ addSlowCase(branchPtrWithPatch(NotEqual, Address(regT0, JSCell::structureOffset()), structureToCompare, TrustedImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure))));
+ ASSERT_JIT_OFFSET(differenceBetween(hotPathBegin, structureToCompare), patchOffsetPutByIdStructure);
+
+ loadPtr(Address(regT0, JSObject::offsetOfPropertyStorage()), regT1);
+ DataLabel32 displacementLabel1 = storePtrWithAddressOffsetPatch(regT2, Address(regT1, patchPutByIdDefaultOffset)); // payload
+ DataLabel32 displacementLabel2 = storePtrWithAddressOffsetPatch(regT3, Address(regT1, patchPutByIdDefaultOffset)); // tag
+
+ END_UNINTERRUPTED_SEQUENCE(sequencePutById);
+
+ emitWriteBarrier(regT0, regT2, regT1, regT2, ShouldFilterImmediates, WriteBarrierForPropertyAccess);
+
+ ASSERT_JIT_OFFSET_UNUSED(displacementLabel1, differenceBetween(hotPathBegin, displacementLabel1), patchOffsetPutByIdPropertyMapOffset1);
+ ASSERT_JIT_OFFSET_UNUSED(displacementLabel2, differenceBetween(hotPathBegin, displacementLabel2), patchOffsetPutByIdPropertyMapOffset2);
+}
+
+void JIT::emitSlow_op_put_by_id(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ int base = currentInstruction[1].u.operand;
+ int ident = currentInstruction[2].u.operand;
+ int direct = currentInstruction[8].u.operand;
+
+ linkSlowCaseIfNotJSCell(iter, base);
+ linkSlowCase(iter);
+
+ JITStubCall stubCall(this, direct ? cti_op_put_by_id_direct : cti_op_put_by_id);
+ stubCall.addArgument(base);
+ stubCall.addArgument(TrustedImmPtr(&(m_codeBlock->identifier(ident))));
+ stubCall.addArgument(regT3, regT2);
+ Call call = stubCall.call();
+
+ // Track the location of the call; this will be used to recover patch information.
+ m_propertyAccessCompilationInfo[m_propertyAccessInstructionIndex++].callReturnLocation = call;
+}
+
+// Compile a store into an object's property storage. May overwrite base.
+void JIT::compilePutDirectOffset(RegisterID base, RegisterID valueTag, RegisterID valuePayload, size_t cachedOffset)
+{
+ int offset = cachedOffset;
+ loadPtr(Address(base, JSObject::offsetOfPropertyStorage()), base);
+ emitStore(offset, valueTag, valuePayload, base);
+}
+
+// Compile a load from an object's property storage. May overwrite base.
+void JIT::compileGetDirectOffset(RegisterID base, RegisterID resultTag, RegisterID resultPayload, size_t cachedOffset)
+{
+ int offset = cachedOffset;
+ RegisterID temp = resultPayload;
+ loadPtr(Address(base, JSObject::offsetOfPropertyStorage()), temp);
+ emitLoad(offset, resultTag, resultPayload, temp);
+}
+
+void JIT::compileGetDirectOffset(JSObject* base, RegisterID resultTag, RegisterID resultPayload, size_t cachedOffset)
+{
+ loadPtr(base->addressOfPropertyStorage(), resultTag);
+ load32(Address(resultTag, cachedOffset * sizeof(WriteBarrier<Unknown>) + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), resultPayload);
+ load32(Address(resultTag, cachedOffset * sizeof(WriteBarrier<Unknown>) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), resultTag);
+}
+
+void JIT::privateCompilePutByIdTransition(StructureStubInfo* stubInfo, Structure* oldStructure, Structure* newStructure, size_t cachedOffset, StructureChain* chain, ReturnAddressPtr returnAddress, bool direct)
+{
+ // The code below assumes that regT0 contains the basePayload and regT1 contains the baseTag. Restore them from the stack.
+#if CPU(MIPS) || CPU(SH4) || CPU(ARM)
+ // For MIPS, we don't add sizeof(void*) to the stack offset.
+ load32(Address(stackPointerRegister, OBJECT_OFFSETOF(JITStackFrame, args[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT0);
+ // For MIPS, we don't add sizeof(void*) to the stack offset.
+ load32(Address(stackPointerRegister, OBJECT_OFFSETOF(JITStackFrame, args[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), regT1);
+#else
+ load32(Address(stackPointerRegister, OBJECT_OFFSETOF(JITStackFrame, args[0]) + sizeof(void*) + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT0);
+ load32(Address(stackPointerRegister, OBJECT_OFFSETOF(JITStackFrame, args[0]) + sizeof(void*) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), regT1);
+#endif
+
+ JumpList failureCases;
+ failureCases.append(branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag)));
+ failureCases.append(branchPtr(NotEqual, Address(regT0, JSCell::structureOffset()), TrustedImmPtr(oldStructure)));
+ testPrototype(oldStructure->storedPrototype(), failureCases);
+
+ if (!direct) {
+ // Verify that nothing in the prototype chain has a setter for this property.
+ for (WriteBarrier<Structure>* it = chain->head(); *it; ++it)
+ testPrototype((*it)->storedPrototype(), failureCases);
+ }
+
+ // Reallocate property storage if needed.
+ Call callTarget;
+ bool willNeedStorageRealloc = oldStructure->propertyStorageCapacity() != newStructure->propertyStorageCapacity();
+ if (willNeedStorageRealloc) {
+ // This trampoline was called to like a JIT stub; before we can can call again we need to
+ // remove the return address from the stack, to prevent the stack from becoming misaligned.
+ preserveReturnAddressAfterCall(regT3);
+
+ JITStubCall stubCall(this, cti_op_put_by_id_transition_realloc);
+ stubCall.skipArgument(); // base
+ stubCall.skipArgument(); // ident
+ stubCall.skipArgument(); // value
+ stubCall.addArgument(TrustedImm32(oldStructure->propertyStorageCapacity()));
+ stubCall.addArgument(TrustedImm32(newStructure->propertyStorageCapacity()));
+ stubCall.call(regT0);
+
+ restoreReturnAddressBeforeReturn(regT3);
+
+#if CPU(MIPS) || CPU(SH4) || CPU(ARM)
+ // For MIPS, we don't add sizeof(void*) to the stack offset.
+ load32(Address(stackPointerRegister, OBJECT_OFFSETOF(JITStackFrame, args[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT0);
+ // For MIPS, we don't add sizeof(void*) to the stack offset.
+ load32(Address(stackPointerRegister, OBJECT_OFFSETOF(JITStackFrame, args[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), regT1);
+#else
+ load32(Address(stackPointerRegister, OBJECT_OFFSETOF(JITStackFrame, args[0]) + sizeof(void*) + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT0);
+ load32(Address(stackPointerRegister, OBJECT_OFFSETOF(JITStackFrame, args[0]) + sizeof(void*) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), regT1);
+#endif
+ }
+
+ emitWriteBarrier(regT0, regT1, regT1, regT3, UnconditionalWriteBarrier, WriteBarrierForPropertyAccess);
+
+ storePtr(TrustedImmPtr(newStructure), Address(regT0, JSCell::structureOffset()));
+#if CPU(MIPS) || CPU(SH4) || CPU(ARM)
+ // For MIPS, we don't add sizeof(void*) to the stack offset.
+ load32(Address(stackPointerRegister, OBJECT_OFFSETOF(JITStackFrame, args[2]) + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT3);
+ load32(Address(stackPointerRegister, OBJECT_OFFSETOF(JITStackFrame, args[2]) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), regT2);
+#else
+ load32(Address(stackPointerRegister, OBJECT_OFFSETOF(JITStackFrame, args[2]) + sizeof(void*) + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT3);
+ load32(Address(stackPointerRegister, OBJECT_OFFSETOF(JITStackFrame, args[2]) + sizeof(void*) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), regT2);
+#endif
+ compilePutDirectOffset(regT0, regT2, regT3, cachedOffset);
+
+ ret();
+
+ ASSERT(!failureCases.empty());
+ failureCases.link(this);
+ restoreArgumentReferenceForTrampoline();
+ Call failureCall = tailRecursiveCall();
+
+ LinkBuffer patchBuffer(*m_globalData, this);
+
+ patchBuffer.link(failureCall, FunctionPtr(direct ? cti_op_put_by_id_direct_fail : cti_op_put_by_id_fail));
+
+ if (willNeedStorageRealloc) {
+ ASSERT(m_calls.size() == 1);
+ patchBuffer.link(m_calls[0].from, FunctionPtr(cti_op_put_by_id_transition_realloc));
+ }
+
+ stubInfo->stubRoutine = patchBuffer.finalizeCode();
+ RepatchBuffer repatchBuffer(m_codeBlock);
+ repatchBuffer.relinkCallerToTrampoline(returnAddress, CodeLocationLabel(stubInfo->stubRoutine.code()));
+}
+
+void JIT::patchGetByIdSelf(CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, size_t cachedOffset, ReturnAddressPtr returnAddress)
+{
+ RepatchBuffer repatchBuffer(codeBlock);
+
+ // We don't want to patch more than once - in future go to cti_op_get_by_id_generic.
+ // Should probably go to JITStubs::cti_op_get_by_id_fail, but that doesn't do anything interesting right now.
+ repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_self_fail));
+
+ int offset = sizeof(JSValue) * cachedOffset;
+
+ // Patch the offset into the propoerty map to load from, then patch the Structure to look for.
+ repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelPtrAtOffset(patchOffsetGetByIdStructure), structure);
+ repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelCompactAtOffset(patchOffsetGetByIdPropertyMapOffset1), offset + OBJECT_OFFSETOF(JSValue, u.asBits.payload)); // payload
+ repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelCompactAtOffset(patchOffsetGetByIdPropertyMapOffset2), offset + OBJECT_OFFSETOF(JSValue, u.asBits.tag)); // tag
+}
+
+void JIT::patchPutByIdReplace(CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, size_t cachedOffset, ReturnAddressPtr returnAddress, bool direct)
+{
+ RepatchBuffer repatchBuffer(codeBlock);
+
+ // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
+ // Should probably go to cti_op_put_by_id_fail, but that doesn't do anything interesting right now.
+ repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(direct ? cti_op_put_by_id_direct_generic : cti_op_put_by_id_generic));
+
+ int offset = sizeof(JSValue) * cachedOffset;
+
+ // Patch the offset into the propoerty map to load from, then patch the Structure to look for.
+ repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelPtrAtOffset(patchOffsetPutByIdStructure), structure);
+ repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabel32AtOffset(patchOffsetPutByIdPropertyMapOffset1), offset + OBJECT_OFFSETOF(JSValue, u.asBits.payload)); // payload
+ repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabel32AtOffset(patchOffsetPutByIdPropertyMapOffset2), offset + OBJECT_OFFSETOF(JSValue, u.asBits.tag)); // tag
+}
+
+void JIT::privateCompilePatchGetArrayLength(ReturnAddressPtr returnAddress)
+{
+ StructureStubInfo* stubInfo = &m_codeBlock->getStubInfo(returnAddress);
+
+ // regT0 holds a JSCell*
+
+ // Check for array
+ Jump failureCases1 = branchPtr(NotEqual, Address(regT0, JSCell::classInfoOffset()), TrustedImmPtr(&JSArray::s_info));
+
+ // Checks out okay! - get the length from the storage
+ loadPtr(Address(regT0, JSArray::storageOffset()), regT2);
+ load32(Address(regT2, OBJECT_OFFSETOF(ArrayStorage, m_length)), regT2);
+
+ Jump failureCases2 = branch32(Above, regT2, TrustedImm32(INT_MAX));
+ move(regT2, regT0);
+ move(TrustedImm32(JSValue::Int32Tag), regT1);
+ Jump success = jump();
+
+ LinkBuffer patchBuffer(*m_globalData, this);
+
+ // Use the patch information to link the failure cases back to the original slow case routine.
+ CodeLocationLabel slowCaseBegin = stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall);
+ patchBuffer.link(failureCases1, slowCaseBegin);
+ patchBuffer.link(failureCases2, slowCaseBegin);
+
+ // On success return back to the hot patch code, at a point it will perform the store to dest for us.
+ patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
+
+ // Track the stub we have created so that it will be deleted later.
+ stubInfo->stubRoutine = patchBuffer.finalizeCode();
+
+ // Finally patch the jump to slow case back in the hot path to jump here instead.
+ CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
+ RepatchBuffer repatchBuffer(m_codeBlock);
+ repatchBuffer.relink(jumpLocation, CodeLocationLabel(stubInfo->stubRoutine.code()));
+
+ // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
+ repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_array_fail));
+}
+
+void JIT::privateCompileGetByIdProto(StructureStubInfo* stubInfo, Structure* structure, Structure* prototypeStructure, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset, ReturnAddressPtr returnAddress, CallFrame* callFrame)
+{
+ // regT0 holds a JSCell*
+
+ // The prototype object definitely exists (if this stub exists the CodeBlock is referencing a Structure that is
+ // referencing the prototype object - let's speculatively load it's table nice and early!)
+ JSObject* protoObject = asObject(structure->prototypeForLookup(callFrame));
+
+ Jump failureCases1 = checkStructure(regT0, structure);
+
+ // Check the prototype object's Structure had not changed.
+ move(TrustedImmPtr(protoObject), regT3);
+ Jump failureCases2 = branchPtr(NotEqual, Address(regT3, JSCell::structureOffset()), TrustedImmPtr(prototypeStructure));
+
+ bool needsStubLink = false;
+ // Checks out okay!
+ if (slot.cachedPropertyType() == PropertySlot::Getter) {
+ needsStubLink = true;
+ compileGetDirectOffset(protoObject, regT2, regT1, cachedOffset);
+ JITStubCall stubCall(this, cti_op_get_by_id_getter_stub);
+ stubCall.addArgument(regT1);
+ stubCall.addArgument(regT0);
+ stubCall.addArgument(TrustedImmPtr(stubInfo->callReturnLocation.executableAddress()));
+ stubCall.call();
+ } else if (slot.cachedPropertyType() == PropertySlot::Custom) {
+ needsStubLink = true;
+ JITStubCall stubCall(this, cti_op_get_by_id_custom_stub);
+ stubCall.addArgument(TrustedImmPtr(protoObject));
+ stubCall.addArgument(TrustedImmPtr(FunctionPtr(slot.customGetter()).executableAddress()));
+ stubCall.addArgument(TrustedImmPtr(const_cast<Identifier*>(&ident)));
+ stubCall.addArgument(TrustedImmPtr(stubInfo->callReturnLocation.executableAddress()));
+ stubCall.call();
+ } else
+ compileGetDirectOffset(protoObject, regT1, regT0, cachedOffset);
+
+ Jump success = jump();
+
+ LinkBuffer patchBuffer(*m_globalData, this);
+
+ // Use the patch information to link the failure cases back to the original slow case routine.
+ CodeLocationLabel slowCaseBegin = stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall);
+ patchBuffer.link(failureCases1, slowCaseBegin);
+ patchBuffer.link(failureCases2, slowCaseBegin);
+
+ // On success return back to the hot patch code, at a point it will perform the store to dest for us.
+ patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
+
+ if (needsStubLink) {
+ for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
+ if (iter->to)
+ patchBuffer.link(iter->from, FunctionPtr(iter->to));
+ }
+ }
+
+ // Track the stub we have created so that it will be deleted later.
+ stubInfo->stubRoutine = patchBuffer.finalizeCode();
+
+ // Finally patch the jump to slow case back in the hot path to jump here instead.
+ CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
+ RepatchBuffer repatchBuffer(m_codeBlock);
+ repatchBuffer.relink(jumpLocation, CodeLocationLabel(stubInfo->stubRoutine.code()));
+
+ // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
+ repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_proto_list));
+}
+
+
+void JIT::privateCompileGetByIdSelfList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* polymorphicStructures, int currentIndex, Structure* structure, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset)
+{
+ // regT0 holds a JSCell*
+ Jump failureCase = checkStructure(regT0, structure);
+ bool needsStubLink = false;
+ bool isDirect = false;
+ if (slot.cachedPropertyType() == PropertySlot::Getter) {
+ needsStubLink = true;
+ compileGetDirectOffset(regT0, regT2, regT1, cachedOffset);
+ JITStubCall stubCall(this, cti_op_get_by_id_getter_stub);
+ stubCall.addArgument(regT1);
+ stubCall.addArgument(regT0);
+ stubCall.addArgument(TrustedImmPtr(stubInfo->callReturnLocation.executableAddress()));
+ stubCall.call();
+ } else if (slot.cachedPropertyType() == PropertySlot::Custom) {
+ needsStubLink = true;
+ JITStubCall stubCall(this, cti_op_get_by_id_custom_stub);
+ stubCall.addArgument(regT0);
+ stubCall.addArgument(TrustedImmPtr(FunctionPtr(slot.customGetter()).executableAddress()));
+ stubCall.addArgument(TrustedImmPtr(const_cast<Identifier*>(&ident)));
+ stubCall.addArgument(TrustedImmPtr(stubInfo->callReturnLocation.executableAddress()));
+ stubCall.call();
+ } else {
+ isDirect = true;
+ compileGetDirectOffset(regT0, regT1, regT0, cachedOffset);
+ }
+
+ Jump success = jump();
+
+ LinkBuffer patchBuffer(*m_globalData, this);
+ if (needsStubLink) {
+ for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
+ if (iter->to)
+ patchBuffer.link(iter->from, FunctionPtr(iter->to));
+ }
+ }
+ // Use the patch information to link the failure cases back to the original slow case routine.
+ CodeLocationLabel lastProtoBegin = CodeLocationLabel(polymorphicStructures->list[currentIndex - 1].stubRoutine.code());
+ if (!lastProtoBegin)
+ lastProtoBegin = stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall);
+
+ patchBuffer.link(failureCase, lastProtoBegin);
+
+ // On success return back to the hot patch code, at a point it will perform the store to dest for us.
+ patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
+
+ CodeRef stubRoutine = patchBuffer.finalizeCode();
+
+ polymorphicStructures->list[currentIndex].set(*m_globalData, m_codeBlock->ownerExecutable(), stubRoutine, structure, isDirect);
+
+ // Finally patch the jump to slow case back in the hot path to jump here instead.
+ CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
+ RepatchBuffer repatchBuffer(m_codeBlock);
+ repatchBuffer.relink(jumpLocation, CodeLocationLabel(stubRoutine.code()));
+}
+
+void JIT::privateCompileGetByIdProtoList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* prototypeStructures, int currentIndex, Structure* structure, Structure* prototypeStructure, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset, CallFrame* callFrame)
+{
+ // regT0 holds a JSCell*
+
+ // The prototype object definitely exists (if this stub exists the CodeBlock is referencing a Structure that is
+ // referencing the prototype object - let's speculatively load it's table nice and early!)
+ JSObject* protoObject = asObject(structure->prototypeForLookup(callFrame));
+
+ // Check eax is an object of the right Structure.
+ Jump failureCases1 = checkStructure(regT0, structure);
+
+ // Check the prototype object's Structure had not changed.
+ move(TrustedImmPtr(protoObject), regT3);
+ Jump failureCases2 = branchPtr(NotEqual, Address(regT3, JSCell::structureOffset()), TrustedImmPtr(prototypeStructure));
+
+ bool needsStubLink = false;
+ bool isDirect = false;
+ if (slot.cachedPropertyType() == PropertySlot::Getter) {
+ needsStubLink = true;
+ compileGetDirectOffset(protoObject, regT2, regT1, cachedOffset);
+ JITStubCall stubCall(this, cti_op_get_by_id_getter_stub);
+ stubCall.addArgument(regT1);
+ stubCall.addArgument(regT0);
+ stubCall.addArgument(TrustedImmPtr(stubInfo->callReturnLocation.executableAddress()));
+ stubCall.call();
+ } else if (slot.cachedPropertyType() == PropertySlot::Custom) {
+ needsStubLink = true;
+ JITStubCall stubCall(this, cti_op_get_by_id_custom_stub);
+ stubCall.addArgument(TrustedImmPtr(protoObject));
+ stubCall.addArgument(TrustedImmPtr(FunctionPtr(slot.customGetter()).executableAddress()));
+ stubCall.addArgument(TrustedImmPtr(const_cast<Identifier*>(&ident)));
+ stubCall.addArgument(TrustedImmPtr(stubInfo->callReturnLocation.executableAddress()));
+ stubCall.call();
+ } else {
+ isDirect = true;
+ compileGetDirectOffset(protoObject, regT1, regT0, cachedOffset);
+ }
+
+ Jump success = jump();
+
+ LinkBuffer patchBuffer(*m_globalData, this);
+ if (needsStubLink) {
+ for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
+ if (iter->to)
+ patchBuffer.link(iter->from, FunctionPtr(iter->to));
+ }
+ }
+ // Use the patch information to link the failure cases back to the original slow case routine.
+ CodeLocationLabel lastProtoBegin = CodeLocationLabel(prototypeStructures->list[currentIndex - 1].stubRoutine.code());
+ patchBuffer.link(failureCases1, lastProtoBegin);
+ patchBuffer.link(failureCases2, lastProtoBegin);
+
+ // On success return back to the hot patch code, at a point it will perform the store to dest for us.
+ patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
+
+ CodeRef stubRoutine = patchBuffer.finalizeCode();
+
+ prototypeStructures->list[currentIndex].set(callFrame->globalData(), m_codeBlock->ownerExecutable(), stubRoutine, structure, prototypeStructure, isDirect);
+
+ // Finally patch the jump to slow case back in the hot path to jump here instead.
+ CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
+ RepatchBuffer repatchBuffer(m_codeBlock);
+ repatchBuffer.relink(jumpLocation, CodeLocationLabel(stubRoutine.code()));
+}
+
+void JIT::privateCompileGetByIdChainList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* prototypeStructures, int currentIndex, Structure* structure, StructureChain* chain, size_t count, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset, CallFrame* callFrame)
+{
+ // regT0 holds a JSCell*
+ ASSERT(count);
+
+ JumpList bucketsOfFail;
+
+ // Check eax is an object of the right Structure.
+ bucketsOfFail.append(checkStructure(regT0, structure));
+
+ Structure* currStructure = structure;
+ WriteBarrier<Structure>* it = chain->head();
+ JSObject* protoObject = 0;
+ for (unsigned i = 0; i < count; ++i, ++it) {
+ protoObject = asObject(currStructure->prototypeForLookup(callFrame));
+ currStructure = it->get();
+ testPrototype(protoObject, bucketsOfFail);
+ }
+ ASSERT(protoObject);
+
+ bool needsStubLink = false;
+ bool isDirect = false;
+ if (slot.cachedPropertyType() == PropertySlot::Getter) {
+ needsStubLink = true;
+ compileGetDirectOffset(protoObject, regT2, regT1, cachedOffset);
+ JITStubCall stubCall(this, cti_op_get_by_id_getter_stub);
+ stubCall.addArgument(regT1);
+ stubCall.addArgument(regT0);
+ stubCall.addArgument(TrustedImmPtr(stubInfo->callReturnLocation.executableAddress()));
+ stubCall.call();
+ } else if (slot.cachedPropertyType() == PropertySlot::Custom) {
+ needsStubLink = true;
+ JITStubCall stubCall(this, cti_op_get_by_id_custom_stub);
+ stubCall.addArgument(TrustedImmPtr(protoObject));
+ stubCall.addArgument(TrustedImmPtr(FunctionPtr(slot.customGetter()).executableAddress()));
+ stubCall.addArgument(TrustedImmPtr(const_cast<Identifier*>(&ident)));
+ stubCall.addArgument(TrustedImmPtr(stubInfo->callReturnLocation.executableAddress()));
+ stubCall.call();
+ } else {
+ isDirect = true;
+ compileGetDirectOffset(protoObject, regT1, regT0, cachedOffset);
+ }
+
+ Jump success = jump();
+
+ LinkBuffer patchBuffer(*m_globalData, this);
+ if (needsStubLink) {
+ for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
+ if (iter->to)
+ patchBuffer.link(iter->from, FunctionPtr(iter->to));
+ }
+ }
+ // Use the patch information to link the failure cases back to the original slow case routine.
+ CodeLocationLabel lastProtoBegin = CodeLocationLabel(prototypeStructures->list[currentIndex - 1].stubRoutine.code());
+
+ patchBuffer.link(bucketsOfFail, lastProtoBegin);
+
+ // On success return back to the hot patch code, at a point it will perform the store to dest for us.
+ patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
+
+ CodeRef stubRoutine = patchBuffer.finalizeCode();
+
+ // Track the stub we have created so that it will be deleted later.
+ prototypeStructures->list[currentIndex].set(callFrame->globalData(), m_codeBlock->ownerExecutable(), stubRoutine, structure, chain, isDirect);
+
+ // Finally patch the jump to slow case back in the hot path to jump here instead.
+ CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
+ RepatchBuffer repatchBuffer(m_codeBlock);
+ repatchBuffer.relink(jumpLocation, CodeLocationLabel(stubRoutine.code()));
+}
+
+void JIT::privateCompileGetByIdChain(StructureStubInfo* stubInfo, Structure* structure, StructureChain* chain, size_t count, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset, ReturnAddressPtr returnAddress, CallFrame* callFrame)
+{
+ // regT0 holds a JSCell*
+ ASSERT(count);
+
+ JumpList bucketsOfFail;
+
+ // Check eax is an object of the right Structure.
+ bucketsOfFail.append(checkStructure(regT0, structure));
+
+ Structure* currStructure = structure;
+ WriteBarrier<Structure>* it = chain->head();
+ JSObject* protoObject = 0;
+ for (unsigned i = 0; i < count; ++i, ++it) {
+ protoObject = asObject(currStructure->prototypeForLookup(callFrame));
+ currStructure = it->get();
+ testPrototype(protoObject, bucketsOfFail);
+ }
+ ASSERT(protoObject);
+
+ bool needsStubLink = false;
+ if (slot.cachedPropertyType() == PropertySlot::Getter) {
+ needsStubLink = true;
+ compileGetDirectOffset(protoObject, regT2, regT1, cachedOffset);
+ JITStubCall stubCall(this, cti_op_get_by_id_getter_stub);
+ stubCall.addArgument(regT1);
+ stubCall.addArgument(regT0);
+ stubCall.addArgument(TrustedImmPtr(stubInfo->callReturnLocation.executableAddress()));
+ stubCall.call();
+ } else if (slot.cachedPropertyType() == PropertySlot::Custom) {
+ needsStubLink = true;
+ JITStubCall stubCall(this, cti_op_get_by_id_custom_stub);
+ stubCall.addArgument(TrustedImmPtr(protoObject));
+ stubCall.addArgument(TrustedImmPtr(FunctionPtr(slot.customGetter()).executableAddress()));
+ stubCall.addArgument(TrustedImmPtr(const_cast<Identifier*>(&ident)));
+ stubCall.addArgument(TrustedImmPtr(stubInfo->callReturnLocation.executableAddress()));
+ stubCall.call();
+ } else
+ compileGetDirectOffset(protoObject, regT1, regT0, cachedOffset);
+ Jump success = jump();
+
+ LinkBuffer patchBuffer(*m_globalData, this);
+ if (needsStubLink) {
+ for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
+ if (iter->to)
+ patchBuffer.link(iter->from, FunctionPtr(iter->to));
+ }
+ }
+ // Use the patch information to link the failure cases back to the original slow case routine.
+ patchBuffer.link(bucketsOfFail, stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall));
+
+ // On success return back to the hot patch code, at a point it will perform the store to dest for us.
+ patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
+
+ // Track the stub we have created so that it will be deleted later.
+ CodeRef stubRoutine = patchBuffer.finalizeCode();
+ stubInfo->stubRoutine = stubRoutine;
+
+ // Finally patch the jump to slow case back in the hot path to jump here instead.
+ CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
+ RepatchBuffer repatchBuffer(m_codeBlock);
+ repatchBuffer.relink(jumpLocation, CodeLocationLabel(stubRoutine.code()));
+
+ // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
+ repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_proto_list));
+}
+
+void JIT::compileGetDirectOffset(RegisterID base, RegisterID resultTag, RegisterID resultPayload, RegisterID offset)
+{
+ ASSERT(sizeof(JSValue) == 8);
+
+ loadPtr(Address(base, JSObject::offsetOfPropertyStorage()), base);
+ loadPtr(BaseIndex(base, offset, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.payload)), resultPayload);
+ loadPtr(BaseIndex(base, offset, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag)), resultTag);
+}
+
+void JIT::emit_op_get_by_pname(Instruction* currentInstruction)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned base = currentInstruction[2].u.operand;
+ unsigned property = currentInstruction[3].u.operand;
+ unsigned expected = currentInstruction[4].u.operand;
+ unsigned iter = currentInstruction[5].u.operand;
+ unsigned i = currentInstruction[6].u.operand;
+
+ emitLoad2(property, regT1, regT0, base, regT3, regT2);
+ emitJumpSlowCaseIfNotJSCell(property, regT1);
+ addSlowCase(branchPtr(NotEqual, regT0, payloadFor(expected)));
+ // Property registers are now available as the property is known
+ emitJumpSlowCaseIfNotJSCell(base, regT3);
+ emitLoadPayload(iter, regT1);
+
+ // Test base's structure
+ loadPtr(Address(regT2, JSCell::structureOffset()), regT0);
+ addSlowCase(branchPtr(NotEqual, regT0, Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_cachedStructure))));
+ load32(addressFor(i), regT3);
+ sub32(TrustedImm32(1), regT3);
+ addSlowCase(branch32(AboveOrEqual, regT3, Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_numCacheableSlots))));
+ compileGetDirectOffset(regT2, regT1, regT0, regT3);
+
+ emitStore(dst, regT1, regT0);
+ map(m_bytecodeOffset + OPCODE_LENGTH(op_get_by_pname), dst, regT1, regT0);
+}
+
+void JIT::emitSlow_op_get_by_pname(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned base = currentInstruction[2].u.operand;
+ unsigned property = currentInstruction[3].u.operand;
+
+ linkSlowCaseIfNotJSCell(iter, property);
+ linkSlowCase(iter);
+ linkSlowCaseIfNotJSCell(iter, base);
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+
+ JITStubCall stubCall(this, cti_op_get_by_val);
+ stubCall.addArgument(base);
+ stubCall.addArgument(property);
+ stubCall.call(dst);
+}
+
+void JIT::emit_op_get_scoped_var(Instruction* currentInstruction)
+{
+ int dst = currentInstruction[1].u.operand;
+ int index = currentInstruction[2].u.operand;
+ int skip = currentInstruction[3].u.operand;
+
+ emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain, regT2);
+ bool checkTopLevel = m_codeBlock->codeType() == FunctionCode && m_codeBlock->needsFullScopeChain();
+ ASSERT(skip || !checkTopLevel);
+ if (checkTopLevel && skip--) {
+ Jump activationNotCreated;
+ if (checkTopLevel)
+ activationNotCreated = branch32(Equal, tagFor(m_codeBlock->activationRegister()), TrustedImm32(JSValue::EmptyValueTag));
+ loadPtr(Address(regT2, OBJECT_OFFSETOF(ScopeChainNode, next)), regT2);
+ activationNotCreated.link(this);
+ }
+ while (skip--)
+ loadPtr(Address(regT2, OBJECT_OFFSETOF(ScopeChainNode, next)), regT2);
+
+ loadPtr(Address(regT2, OBJECT_OFFSETOF(ScopeChainNode, object)), regT2);
+ loadPtr(Address(regT2, JSVariableObject::offsetOfRegisters()), regT2);
+
+ emitLoad(index, regT1, regT0, regT2);
+ emitValueProfilingSite(FirstProfilingSite);
+ emitStore(dst, regT1, regT0);
+ map(m_bytecodeOffset + OPCODE_LENGTH(op_get_scoped_var), dst, regT1, regT0);
+}
+
+void JIT::emit_op_put_scoped_var(Instruction* currentInstruction)
+{
+ int index = currentInstruction[1].u.operand;
+ int skip = currentInstruction[2].u.operand;
+ int value = currentInstruction[3].u.operand;
+
+ emitLoad(value, regT1, regT0);
+
+ emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain, regT2);
+ bool checkTopLevel = m_codeBlock->codeType() == FunctionCode && m_codeBlock->needsFullScopeChain();
+ ASSERT(skip || !checkTopLevel);
+ if (checkTopLevel && skip--) {
+ Jump activationNotCreated;
+ if (checkTopLevel)
+ activationNotCreated = branch32(Equal, tagFor(m_codeBlock->activationRegister()), TrustedImm32(JSValue::EmptyValueTag));
+ loadPtr(Address(regT2, OBJECT_OFFSETOF(ScopeChainNode, next)), regT2);
+ activationNotCreated.link(this);
+ }
+ while (skip--)
+ loadPtr(Address(regT2, OBJECT_OFFSETOF(ScopeChainNode, next)), regT2);
+ loadPtr(Address(regT2, OBJECT_OFFSETOF(ScopeChainNode, object)), regT2);
+
+ loadPtr(Address(regT2, JSVariableObject::offsetOfRegisters()), regT3);
+ emitStore(index, regT1, regT0, regT3);
+ emitWriteBarrier(regT2, regT1, regT0, regT1, ShouldFilterImmediates, WriteBarrierForVariableAccess);
+}
+
+void JIT::emit_op_get_global_var(Instruction* currentInstruction)
+{
+ int dst = currentInstruction[1].u.operand;
+ JSGlobalObject* globalObject = m_codeBlock->globalObject();
+ ASSERT(globalObject->isGlobalObject());
+ int index = currentInstruction[2].u.operand;
+
+ loadPtr(&globalObject->m_registers, regT2);
+
+ emitLoad(index, regT1, regT0, regT2);
+ emitValueProfilingSite(FirstProfilingSite);
+ emitStore(dst, regT1, regT0);
+ map(m_bytecodeOffset + OPCODE_LENGTH(op_get_global_var), dst, regT1, regT0);
+}
+
+void JIT::emit_op_put_global_var(Instruction* currentInstruction)
+{
+ int index = currentInstruction[1].u.operand;
+ int value = currentInstruction[2].u.operand;
+
+ JSGlobalObject* globalObject = m_codeBlock->globalObject();
+
+ emitLoad(value, regT1, regT0);
+ move(TrustedImmPtr(globalObject), regT2);
+
+ emitWriteBarrier(globalObject, regT1, regT3, ShouldFilterImmediates, WriteBarrierForVariableAccess);
+
+ loadPtr(Address(regT2, JSVariableObject::offsetOfRegisters()), regT2);
+ emitStore(index, regT1, regT0, regT2);
+ map(m_bytecodeOffset + OPCODE_LENGTH(op_put_global_var), value, regT1, regT0);
+}
+
+void JIT::resetPatchGetById(RepatchBuffer& repatchBuffer, StructureStubInfo* stubInfo)
+{
+ repatchBuffer.relink(stubInfo->callReturnLocation, cti_op_get_by_id);
+ repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelPtrAtOffset(patchOffsetGetByIdStructure), reinterpret_cast<void*>(-1));
+ repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelCompactAtOffset(patchOffsetGetByIdPropertyMapOffset1), 0);
+ repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelCompactAtOffset(patchOffsetGetByIdPropertyMapOffset2), 0);
+ repatchBuffer.relink(stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase), stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall));
+}
+
+void JIT::resetPatchPutById(RepatchBuffer& repatchBuffer, StructureStubInfo* stubInfo)
+{
+ if (isDirectPutById(stubInfo))
+ repatchBuffer.relink(stubInfo->callReturnLocation, cti_op_put_by_id_direct);
+ else
+ repatchBuffer.relink(stubInfo->callReturnLocation, cti_op_put_by_id);
+ repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelPtrAtOffset(patchOffsetPutByIdStructure), reinterpret_cast<void*>(-1));
+ repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabel32AtOffset(patchOffsetPutByIdPropertyMapOffset1), 0);
+ repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabel32AtOffset(patchOffsetPutByIdPropertyMapOffset2), 0);
+}
+
+} // namespace JSC
+
+#endif // USE(JSVALUE32_64)
+#endif // ENABLE(JIT)
diff --git a/Source/JavaScriptCore/jit/JITStubCall.h b/Source/JavaScriptCore/jit/JITStubCall.h
new file mode 100644
index 000000000..18441c363
--- /dev/null
+++ b/Source/JavaScriptCore/jit/JITStubCall.h
@@ -0,0 +1,263 @@
+/*
+ * Copyright (C) 2008 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef JITStubCall_h
+#define JITStubCall_h
+
+#include "MacroAssemblerCodeRef.h"
+
+#if ENABLE(JIT)
+
+namespace JSC {
+
+ class JITStubCall {
+ public:
+ JITStubCall(JIT* jit, JSObject* (JIT_STUB *stub)(STUB_ARGS_DECLARATION))
+ : m_jit(jit)
+ , m_stub(stub)
+ , m_returnType(Cell)
+ , m_stackIndex(JITSTACKFRAME_ARGS_INDEX)
+ {
+ }
+
+ JITStubCall(JIT* jit, JSPropertyNameIterator* (JIT_STUB *stub)(STUB_ARGS_DECLARATION))
+ : m_jit(jit)
+ , m_stub(stub)
+ , m_returnType(Cell)
+ , m_stackIndex(JITSTACKFRAME_ARGS_INDEX)
+ {
+ }
+
+ JITStubCall(JIT* jit, void* (JIT_STUB *stub)(STUB_ARGS_DECLARATION))
+ : m_jit(jit)
+ , m_stub(stub)
+ , m_returnType(VoidPtr)
+ , m_stackIndex(JITSTACKFRAME_ARGS_INDEX)
+ {
+ }
+
+ JITStubCall(JIT* jit, int (JIT_STUB *stub)(STUB_ARGS_DECLARATION))
+ : m_jit(jit)
+ , m_stub(stub)
+ , m_returnType(Int)
+ , m_stackIndex(JITSTACKFRAME_ARGS_INDEX)
+ {
+ }
+
+ JITStubCall(JIT* jit, bool (JIT_STUB *stub)(STUB_ARGS_DECLARATION))
+ : m_jit(jit)
+ , m_stub(stub)
+ , m_returnType(Int)
+ , m_stackIndex(JITSTACKFRAME_ARGS_INDEX)
+ {
+ }
+
+ JITStubCall(JIT* jit, void (JIT_STUB *stub)(STUB_ARGS_DECLARATION))
+ : m_jit(jit)
+ , m_stub(stub)
+ , m_returnType(Void)
+ , m_stackIndex(JITSTACKFRAME_ARGS_INDEX)
+ {
+ }
+
+#if USE(JSVALUE32_64)
+ JITStubCall(JIT* jit, EncodedJSValue (JIT_STUB *stub)(STUB_ARGS_DECLARATION))
+ : m_jit(jit)
+ , m_stub(stub)
+ , m_returnType(Value)
+ , m_stackIndex(JITSTACKFRAME_ARGS_INDEX)
+ {
+ }
+#endif
+
+ // Arguments are added first to last.
+
+ void skipArgument()
+ {
+ m_stackIndex += stackIndexStep;
+ }
+
+ void addArgument(JIT::TrustedImm32 argument)
+ {
+ m_jit->poke(argument, m_stackIndex);
+ m_stackIndex += stackIndexStep;
+ }
+
+ void addArgument(JIT::TrustedImmPtr argument)
+ {
+ m_jit->poke(argument, m_stackIndex);
+ m_stackIndex += stackIndexStep;
+ }
+
+ void addArgument(JIT::RegisterID argument)
+ {
+ m_jit->poke(argument, m_stackIndex);
+ m_stackIndex += stackIndexStep;
+ }
+
+#if USE(JSVALUE32_64)
+ void addArgument(const JSValue& value)
+ {
+ m_jit->poke(JIT::Imm32(value.payload()), m_stackIndex);
+ m_jit->poke(JIT::Imm32(value.tag()), m_stackIndex + 1);
+ m_stackIndex += stackIndexStep;
+ }
+#endif
+
+ void addArgument(JIT::RegisterID tag, JIT::RegisterID payload)
+ {
+ m_jit->poke(payload, m_stackIndex);
+ m_jit->poke(tag, m_stackIndex + 1);
+ m_stackIndex += stackIndexStep;
+ }
+
+#if USE(JSVALUE32_64)
+ void addArgument(unsigned srcVirtualRegister)
+ {
+ if (m_jit->m_codeBlock->isConstantRegisterIndex(srcVirtualRegister)) {
+ addArgument(m_jit->getConstantOperand(srcVirtualRegister));
+ return;
+ }
+
+ m_jit->emitLoad(srcVirtualRegister, JIT::regT1, JIT::regT0);
+ addArgument(JIT::regT1, JIT::regT0);
+ }
+
+ void getArgument(size_t argumentNumber, JIT::RegisterID tag, JIT::RegisterID payload)
+ {
+ size_t stackIndex = JITSTACKFRAME_ARGS_INDEX + (argumentNumber * stackIndexStep);
+ m_jit->peek(payload, stackIndex);
+ m_jit->peek(tag, stackIndex + 1);
+ }
+#else
+ void addArgument(unsigned src, JIT::RegisterID scratchRegister) // src is a virtual register.
+ {
+ if (m_jit->m_codeBlock->isConstantRegisterIndex(src))
+ addArgument(JIT::ImmPtr(JSValue::encode(m_jit->m_codeBlock->getConstant(src))));
+ else {
+ m_jit->loadPtr(JIT::Address(JIT::callFrameRegister, src * sizeof(Register)), scratchRegister);
+ addArgument(scratchRegister);
+ }
+ m_jit->killLastResultRegister();
+ }
+#endif
+
+ JIT::Call call()
+ {
+#if ENABLE(OPCODE_SAMPLING)
+ if (m_jit->m_bytecodeOffset != (unsigned)-1)
+ m_jit->sampleInstruction(m_jit->m_codeBlock->instructions().begin() + m_jit->m_bytecodeOffset, true);
+#endif
+
+ m_jit->restoreArgumentReference();
+ m_jit->updateTopCallFrame();
+ JIT::Call call = m_jit->call();
+ m_jit->m_calls.append(CallRecord(call, m_jit->m_bytecodeOffset, m_stub.value()));
+
+#if ENABLE(OPCODE_SAMPLING)
+ if (m_jit->m_bytecodeOffset != (unsigned)-1)
+ m_jit->sampleInstruction(m_jit->m_codeBlock->instructions().begin() + m_jit->m_bytecodeOffset, false);
+#endif
+
+#if USE(JSVALUE32_64)
+ m_jit->unmap();
+#else
+ m_jit->killLastResultRegister();
+#endif
+ return call;
+ }
+
+#if USE(JSVALUE32_64)
+ JIT::Call call(unsigned dst) // dst is a virtual register.
+ {
+ ASSERT(m_returnType == Value || m_returnType == Cell);
+ JIT::Call call = this->call();
+ if (m_returnType == Value)
+ m_jit->emitStore(dst, JIT::regT1, JIT::regT0);
+ else
+ m_jit->emitStoreCell(dst, JIT::returnValueRegister);
+ return call;
+ }
+
+ JIT::Call callWithValueProfiling(unsigned dst, JIT::ValueProfilingSiteKind kind)
+ {
+ ASSERT(m_returnType == Value || m_returnType == Cell);
+ JIT::Call call = this->call();
+ ASSERT(JIT::returnValueRegister == JIT::regT0);
+ if (m_returnType == Cell)
+ m_jit->move(JIT::TrustedImm32(JSValue::CellTag), JIT::regT1);
+ m_jit->emitValueProfilingSite(kind);
+ if (m_returnType == Value)
+ m_jit->emitStore(dst, JIT::regT1, JIT::regT0);
+ else
+ m_jit->emitStoreCell(dst, JIT::returnValueRegister);
+ return call;
+ }
+#else
+ JIT::Call call(unsigned dst) // dst is a virtual register.
+ {
+ ASSERT(m_returnType == VoidPtr || m_returnType == Cell);
+ JIT::Call call = this->call();
+ m_jit->emitPutVirtualRegister(dst);
+ return call;
+ }
+
+ JIT::Call callWithValueProfiling(unsigned dst, JIT::ValueProfilingSiteKind kind)
+ {
+ ASSERT(m_returnType == VoidPtr || m_returnType == Cell);
+ JIT::Call call = this->call();
+ ASSERT(JIT::returnValueRegister == JIT::regT0);
+ m_jit->emitValueProfilingSite(kind);
+ m_jit->emitPutVirtualRegister(dst);
+ return call;
+ }
+#endif
+
+ JIT::Call call(JIT::RegisterID dst) // dst is a machine register.
+ {
+#if USE(JSVALUE32_64)
+ ASSERT(m_returnType == Value || m_returnType == VoidPtr || m_returnType == Int || m_returnType == Cell);
+#else
+ ASSERT(m_returnType == VoidPtr || m_returnType == Int || m_returnType == Cell);
+#endif
+ JIT::Call call = this->call();
+ if (dst != JIT::returnValueRegister)
+ m_jit->move(JIT::returnValueRegister, dst);
+ return call;
+ }
+
+ private:
+ static const size_t stackIndexStep = sizeof(EncodedJSValue) == 2 * sizeof(void*) ? 2 : 1;
+
+ JIT* m_jit;
+ FunctionPtr m_stub;
+ enum { Void, VoidPtr, Int, Value, Cell } m_returnType;
+ size_t m_stackIndex;
+ };
+}
+
+#endif // ENABLE(JIT)
+
+#endif // JITStubCall_h
diff --git a/Source/JavaScriptCore/jit/JITStubs.cpp b/Source/JavaScriptCore/jit/JITStubs.cpp
new file mode 100644
index 000000000..eda8c8fba
--- /dev/null
+++ b/Source/JavaScriptCore/jit/JITStubs.cpp
@@ -0,0 +1,3748 @@
+/*
+ * Copyright (C) 2008, 2009 Apple Inc. All rights reserved.
+ * Copyright (C) 2008 Cameron Zwarich <cwzwarich@uwaterloo.ca>
+ * Copyright (C) Research In Motion Limited 2010, 2011. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * its contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+
+#if ENABLE(JIT)
+#include "JITStubs.h"
+
+#include "Arguments.h"
+#include "CallFrame.h"
+#include "CodeBlock.h"
+#include "DFGOSREntry.h"
+#include "Debugger.h"
+#include "ExceptionHelpers.h"
+#include "GetterSetter.h"
+#include "Heap.h"
+#include "InlineASM.h"
+#include "JIT.h"
+#include "JSActivation.h"
+#include "JSArray.h"
+#include "JSByteArray.h"
+#include "JSFunction.h"
+#include "JSGlobalObjectFunctions.h"
+#include "JSNotAnObject.h"
+#include "JSPropertyNameIterator.h"
+#include "JSStaticScopeObject.h"
+#include "JSString.h"
+#include "ObjectPrototype.h"
+#include "Operations.h"
+#include "Parser.h"
+#include "Profiler.h"
+#include "RegExpObject.h"
+#include "RegExpPrototype.h"
+#include "Register.h"
+#include "SamplingTool.h"
+#include "Strong.h"
+#include <wtf/StdLibExtras.h>
+#include <stdarg.h>
+#include <stdio.h>
+
+using namespace std;
+
+namespace JSC {
+
+#if USE(JSVALUE32_64)
+
+#if COMPILER(GCC) && CPU(X86)
+
+// These ASSERTs remind you that, if you change the layout of JITStackFrame, you
+// need to change the assembly trampolines below to match.
+COMPILE_ASSERT(offsetof(struct JITStackFrame, code) % 16 == 0x0, JITStackFrame_maintains_16byte_stack_alignment);
+COMPILE_ASSERT(offsetof(struct JITStackFrame, savedEBX) == 0x3c, JITStackFrame_stub_argument_space_matches_ctiTrampoline);
+COMPILE_ASSERT(offsetof(struct JITStackFrame, callFrame) == 0x58, JITStackFrame_callFrame_offset_matches_ctiTrampoline);
+COMPILE_ASSERT(offsetof(struct JITStackFrame, code) == 0x50, JITStackFrame_code_offset_matches_ctiTrampoline);
+
+asm (
+".text\n"
+".globl " SYMBOL_STRING(ctiTrampoline) "\n"
+HIDE_SYMBOL(ctiTrampoline) "\n"
+SYMBOL_STRING(ctiTrampoline) ":" "\n"
+ "pushl %ebp" "\n"
+ "movl %esp, %ebp" "\n"
+ "pushl %esi" "\n"
+ "pushl %edi" "\n"
+ "pushl %ebx" "\n"
+ "subl $0x3c, %esp" "\n"
+ "movl 0x58(%esp), %edi" "\n"
+ "call *0x50(%esp)" "\n"
+ "addl $0x3c, %esp" "\n"
+ "popl %ebx" "\n"
+ "popl %edi" "\n"
+ "popl %esi" "\n"
+ "popl %ebp" "\n"
+ "ret" "\n"
+);
+
+asm (
+".globl " SYMBOL_STRING(ctiVMThrowTrampoline) "\n"
+HIDE_SYMBOL(ctiVMThrowTrampoline) "\n"
+SYMBOL_STRING(ctiVMThrowTrampoline) ":" "\n"
+ "movl %esp, %ecx" "\n"
+ "call " SYMBOL_STRING_RELOCATION(cti_vm_throw) "\n"
+ "int3" "\n"
+);
+
+asm (
+".globl " SYMBOL_STRING(ctiOpThrowNotCaught) "\n"
+HIDE_SYMBOL(ctiOpThrowNotCaught) "\n"
+SYMBOL_STRING(ctiOpThrowNotCaught) ":" "\n"
+ "addl $0x3c, %esp" "\n"
+ "popl %ebx" "\n"
+ "popl %edi" "\n"
+ "popl %esi" "\n"
+ "popl %ebp" "\n"
+ "ret" "\n"
+);
+
+#elif COMPILER(GCC) && CPU(X86_64)
+
+// These ASSERTs remind you that, if you change the layout of JITStackFrame, you
+// need to change the assembly trampolines below to match.
+COMPILE_ASSERT(offsetof(struct JITStackFrame, code) % 32 == 0x0, JITStackFrame_maintains_32byte_stack_alignment);
+COMPILE_ASSERT(offsetof(struct JITStackFrame, savedRBX) == 0x48, JITStackFrame_stub_argument_space_matches_ctiTrampoline);
+COMPILE_ASSERT(offsetof(struct JITStackFrame, callFrame) == 0x90, JITStackFrame_callFrame_offset_matches_ctiTrampoline);
+COMPILE_ASSERT(offsetof(struct JITStackFrame, code) == 0x80, JITStackFrame_code_offset_matches_ctiTrampoline);
+
+asm (
+".globl " SYMBOL_STRING(ctiTrampoline) "\n"
+HIDE_SYMBOL(ctiTrampoline) "\n"
+SYMBOL_STRING(ctiTrampoline) ":" "\n"
+ "pushq %rbp" "\n"
+ "movq %rsp, %rbp" "\n"
+ "pushq %r12" "\n"
+ "pushq %r13" "\n"
+ "pushq %r14" "\n"
+ "pushq %r15" "\n"
+ "pushq %rbx" "\n"
+ "subq $0x48, %rsp" "\n"
+ "movq $512, %r12" "\n"
+ "movq $0xFFFF000000000000, %r14" "\n"
+ "movq $0xFFFF000000000002, %r15" "\n"
+ "movq 0x90(%rsp), %r13" "\n"
+ "call *0x80(%rsp)" "\n"
+ "addq $0x48, %rsp" "\n"
+ "popq %rbx" "\n"
+ "popq %r15" "\n"
+ "popq %r14" "\n"
+ "popq %r13" "\n"
+ "popq %r12" "\n"
+ "popq %rbp" "\n"
+ "ret" "\n"
+);
+
+asm (
+".globl " SYMBOL_STRING(ctiVMThrowTrampoline) "\n"
+HIDE_SYMBOL(ctiVMThrowTrampoline) "\n"
+SYMBOL_STRING(ctiVMThrowTrampoline) ":" "\n"
+ "movq %rsp, %rdi" "\n"
+ "call " SYMBOL_STRING_RELOCATION(cti_vm_throw) "\n"
+ "int3" "\n"
+);
+
+asm (
+".globl " SYMBOL_STRING(ctiOpThrowNotCaught) "\n"
+HIDE_SYMBOL(ctiOpThrowNotCaught) "\n"
+SYMBOL_STRING(ctiOpThrowNotCaught) ":" "\n"
+ "addq $0x48, %rsp" "\n"
+ "popq %rbx" "\n"
+ "popq %r15" "\n"
+ "popq %r14" "\n"
+ "popq %r13" "\n"
+ "popq %r12" "\n"
+ "popq %rbp" "\n"
+ "ret" "\n"
+);
+
+#elif (COMPILER(GCC) || COMPILER(RVCT)) && CPU(ARM_THUMB2)
+
+#define THUNK_RETURN_ADDRESS_OFFSET 0x38
+#define PRESERVED_RETURN_ADDRESS_OFFSET 0x3C
+#define PRESERVED_R4_OFFSET 0x40
+#define PRESERVED_R5_OFFSET 0x44
+#define PRESERVED_R6_OFFSET 0x48
+#define PRESERVED_R7_OFFSET 0x4C
+#define PRESERVED_R8_OFFSET 0x50
+#define PRESERVED_R9_OFFSET 0x54
+#define PRESERVED_R10_OFFSET 0x58
+#define PRESERVED_R11_OFFSET 0x5C
+#define REGISTER_FILE_OFFSET 0x60
+#define CALLFRAME_OFFSET 0x64
+#define EXCEPTION_OFFSET 0x64
+#define ENABLE_PROFILER_REFERENCE_OFFSET 0x68
+
+#elif (COMPILER(GCC) || COMPILER(MSVC) || COMPILER(RVCT)) && CPU(ARM_TRADITIONAL)
+
+// Also update the MSVC section (defined at DEFINE_STUB_FUNCTION)
+// when changing one of the following values.
+#define THUNK_RETURN_ADDRESS_OFFSET 64
+#define PRESERVEDR4_OFFSET 68
+
+#elif COMPILER(MSVC) && CPU(X86)
+
+// These ASSERTs remind you that, if you change the layout of JITStackFrame, you
+// need to change the assembly trampolines below to match.
+COMPILE_ASSERT(offsetof(struct JITStackFrame, code) % 16 == 0x0, JITStackFrame_maintains_16byte_stack_alignment);
+COMPILE_ASSERT(offsetof(struct JITStackFrame, savedEBX) == 0x3c, JITStackFrame_stub_argument_space_matches_ctiTrampoline);
+COMPILE_ASSERT(offsetof(struct JITStackFrame, callFrame) == 0x58, JITStackFrame_callFrame_offset_matches_ctiTrampoline);
+COMPILE_ASSERT(offsetof(struct JITStackFrame, code) == 0x50, JITStackFrame_code_offset_matches_ctiTrampoline);
+
+extern "C" {
+
+ __declspec(naked) EncodedJSValue ctiTrampoline(void* code, RegisterFile*, CallFrame*, void* /*unused1*/, Profiler**, JSGlobalData*)
+ {
+ __asm {
+ push ebp;
+ mov ebp, esp;
+ push esi;
+ push edi;
+ push ebx;
+ sub esp, 0x3c;
+ mov ecx, esp;
+ mov edi, [esp + 0x58];
+ call [esp + 0x50];
+ add esp, 0x3c;
+ pop ebx;
+ pop edi;
+ pop esi;
+ pop ebp;
+ ret;
+ }
+ }
+
+ __declspec(naked) void ctiVMThrowTrampoline()
+ {
+ __asm {
+ mov ecx, esp;
+ call cti_vm_throw;
+ add esp, 0x3c;
+ pop ebx;
+ pop edi;
+ pop esi;
+ pop ebp;
+ ret;
+ }
+ }
+
+ __declspec(naked) void ctiOpThrowNotCaught()
+ {
+ __asm {
+ add esp, 0x3c;
+ pop ebx;
+ pop edi;
+ pop esi;
+ pop ebp;
+ ret;
+ }
+ }
+}
+
+#elif CPU(MIPS)
+
+#define PRESERVED_GP_OFFSET 60
+#define PRESERVED_S0_OFFSET 64
+#define PRESERVED_S1_OFFSET 68
+#define PRESERVED_S2_OFFSET 72
+#define PRESERVED_RETURN_ADDRESS_OFFSET 76
+#define THUNK_RETURN_ADDRESS_OFFSET 80
+#define REGISTER_FILE_OFFSET 84
+#define CALLFRAME_OFFSET 88
+#define EXCEPTION_OFFSET 92
+#define ENABLE_PROFILER_REFERENCE_OFFSET 96
+#define GLOBAL_DATA_OFFSET 100
+#define STACK_LENGTH 104
+#elif CPU(SH4)
+#define SYMBOL_STRING(name) #name
+/* code (r4), RegisterFile* (r5), CallFrame* (r6), JSValue* exception (r7), Profiler**(sp), JSGlobalData (sp)*/
+
+asm volatile (
+".text\n"
+".globl " SYMBOL_STRING(ctiTrampoline) "\n"
+HIDE_SYMBOL(ctiTrampoline) "\n"
+SYMBOL_STRING(ctiTrampoline) ":" "\n"
+ "mov.l r7, @-r15" "\n"
+ "mov.l r6, @-r15" "\n"
+ "mov.l r5, @-r15" "\n"
+ "mov.l r8, @-r15" "\n"
+ "mov #127, r8" "\n"
+ "mov.l r14, @-r15" "\n"
+ "sts.l pr, @-r15" "\n"
+ "mov.l r13, @-r15" "\n"
+ "mov.l r11, @-r15" "\n"
+ "mov.l r10, @-r15" "\n"
+ "add #-60, r15" "\n"
+ "mov r6, r14" "\n"
+ "jsr @r4" "\n"
+ "nop" "\n"
+ "add #60, r15" "\n"
+ "mov.l @r15+,r10" "\n"
+ "mov.l @r15+,r11" "\n"
+ "mov.l @r15+,r13" "\n"
+ "lds.l @r15+,pr" "\n"
+ "mov.l @r15+,r14" "\n"
+ "mov.l @r15+,r8" "\n"
+ "add #12, r15" "\n"
+ "rts" "\n"
+ "nop" "\n"
+);
+
+asm volatile (
+".globl " SYMBOL_STRING(ctiVMThrowTrampoline) "\n"
+HIDE_SYMBOL(ctiVMThrowTrampoline) "\n"
+SYMBOL_STRING(ctiVMThrowTrampoline) ":" "\n"
+ "mov.l .L2"SYMBOL_STRING(cti_vm_throw)",r0" "\n"
+ "mov r15, r4" "\n"
+ "mov.l @(r0,r12),r11" "\n"
+ "jsr @r11" "\n"
+ "nop" "\n"
+ "add #60, r15" "\n"
+ "mov.l @r15+,r10" "\n"
+ "mov.l @r15+,r11" "\n"
+ "mov.l @r15+,r13" "\n"
+ "lds.l @r15+,pr" "\n"
+ "mov.l @r15+,r14" "\n"
+ "mov.l @r15+,r8" "\n"
+ "add #12, r15" "\n"
+ "rts" "\n"
+ "nop" "\n"
+ ".align 2" "\n"
+ ".L2"SYMBOL_STRING(cti_vm_throw)":.long " SYMBOL_STRING(cti_vm_throw)"@GOT \n"
+);
+
+asm volatile (
+".globl " SYMBOL_STRING(ctiOpThrowNotCaught) "\n"
+HIDE_SYMBOL(ctiOpThrowNotCaught) "\n"
+SYMBOL_STRING(ctiOpThrowNotCaught) ":" "\n"
+ "add #60, r15" "\n"
+ "mov.l @r15+,r10" "\n"
+ "mov.l @r15+,r11" "\n"
+ "mov.l @r15+,r13" "\n"
+ "lds.l @r15+,pr" "\n"
+ "mov.l @r15+,r14" "\n"
+ "mov.l @r15+,r8" "\n"
+ "add #12, r15" "\n"
+ "rts" "\n"
+ "nop" "\n"
+);
+#else
+ #error "JIT not supported on this platform."
+#endif
+
+#else // USE(JSVALUE32_64)
+
+#if COMPILER(GCC) && CPU(X86_64)
+
+// These ASSERTs remind you that, if you change the layout of JITStackFrame, you
+// need to change the assembly trampolines below to match.
+COMPILE_ASSERT(offsetof(struct JITStackFrame, callFrame) == 0x58, JITStackFrame_callFrame_offset_matches_ctiTrampoline);
+COMPILE_ASSERT(offsetof(struct JITStackFrame, code) == 0x48, JITStackFrame_code_offset_matches_ctiTrampoline);
+COMPILE_ASSERT(offsetof(struct JITStackFrame, savedRBX) == 0x78, JITStackFrame_stub_argument_space_matches_ctiTrampoline);
+
+asm (
+".text\n"
+".globl " SYMBOL_STRING(ctiTrampoline) "\n"
+HIDE_SYMBOL(ctiTrampoline) "\n"
+SYMBOL_STRING(ctiTrampoline) ":" "\n"
+ "pushq %rbp" "\n"
+ "movq %rsp, %rbp" "\n"
+ "pushq %r12" "\n"
+ "pushq %r13" "\n"
+ "pushq %r14" "\n"
+ "pushq %r15" "\n"
+ "pushq %rbx" "\n"
+ // Form the JIT stubs area
+ "pushq %r9" "\n"
+ "pushq %r8" "\n"
+ "pushq %rcx" "\n"
+ "pushq %rdx" "\n"
+ "pushq %rsi" "\n"
+ "pushq %rdi" "\n"
+ "subq $0x48, %rsp" "\n"
+ "movq $512, %r12" "\n"
+ "movq $0xFFFF000000000000, %r14" "\n"
+ "movq $0xFFFF000000000002, %r15" "\n"
+ "movq %rdx, %r13" "\n"
+ "call *%rdi" "\n"
+ "addq $0x78, %rsp" "\n"
+ "popq %rbx" "\n"
+ "popq %r15" "\n"
+ "popq %r14" "\n"
+ "popq %r13" "\n"
+ "popq %r12" "\n"
+ "popq %rbp" "\n"
+ "ret" "\n"
+);
+
+asm (
+".globl " SYMBOL_STRING(ctiVMThrowTrampoline) "\n"
+HIDE_SYMBOL(ctiVMThrowTrampoline) "\n"
+SYMBOL_STRING(ctiVMThrowTrampoline) ":" "\n"
+ "movq %rsp, %rdi" "\n"
+ "call " SYMBOL_STRING_RELOCATION(cti_vm_throw) "\n"
+ "int3" "\n"
+);
+
+asm (
+".globl " SYMBOL_STRING(ctiOpThrowNotCaught) "\n"
+HIDE_SYMBOL(ctiOpThrowNotCaught) "\n"
+SYMBOL_STRING(ctiOpThrowNotCaught) ":" "\n"
+ "addq $0x78, %rsp" "\n"
+ "popq %rbx" "\n"
+ "popq %r15" "\n"
+ "popq %r14" "\n"
+ "popq %r13" "\n"
+ "popq %r12" "\n"
+ "popq %rbp" "\n"
+ "ret" "\n"
+);
+
+#else
+ #error "JIT not supported on this platform."
+#endif
+
+#endif // USE(JSVALUE32_64)
+
+#if CPU(MIPS)
+asm (
+".text" "\n"
+".align 2" "\n"
+".set noreorder" "\n"
+".set nomacro" "\n"
+".set nomips16" "\n"
+".globl " SYMBOL_STRING(ctiTrampoline) "\n"
+".ent " SYMBOL_STRING(ctiTrampoline) "\n"
+SYMBOL_STRING(ctiTrampoline) ":" "\n"
+ "addiu $29,$29,-" STRINGIZE_VALUE_OF(STACK_LENGTH) "\n"
+ "sw $31," STRINGIZE_VALUE_OF(PRESERVED_RETURN_ADDRESS_OFFSET) "($29)" "\n"
+ "sw $18," STRINGIZE_VALUE_OF(PRESERVED_S2_OFFSET) "($29)" "\n"
+ "sw $17," STRINGIZE_VALUE_OF(PRESERVED_S1_OFFSET) "($29)" "\n"
+ "sw $16," STRINGIZE_VALUE_OF(PRESERVED_S0_OFFSET) "($29)" "\n"
+#if WTF_MIPS_PIC
+ "sw $28," STRINGIZE_VALUE_OF(PRESERVED_GP_OFFSET) "($29)" "\n"
+#endif
+ "move $16,$6 # set callFrameRegister" "\n"
+ "li $17,512 # set timeoutCheckRegister" "\n"
+ "move $25,$4 # move executableAddress to t9" "\n"
+ "sw $5," STRINGIZE_VALUE_OF(REGISTER_FILE_OFFSET) "($29) # store registerFile to current stack" "\n"
+ "sw $6," STRINGIZE_VALUE_OF(CALLFRAME_OFFSET) "($29) # store callFrame to curent stack" "\n"
+ "sw $7," STRINGIZE_VALUE_OF(EXCEPTION_OFFSET) "($29) # store exception to current stack" "\n"
+ "lw $8," STRINGIZE_VALUE_OF(STACK_LENGTH + 16) "($29) # load enableProfilerReference from previous stack" "\n"
+ "lw $9," STRINGIZE_VALUE_OF(STACK_LENGTH + 20) "($29) # load globalData from previous stack" "\n"
+ "sw $8," STRINGIZE_VALUE_OF(ENABLE_PROFILER_REFERENCE_OFFSET) "($29) # store enableProfilerReference to current stack" "\n"
+ "jalr $25" "\n"
+ "sw $9," STRINGIZE_VALUE_OF(GLOBAL_DATA_OFFSET) "($29) # store globalData to current stack" "\n"
+ "lw $16," STRINGIZE_VALUE_OF(PRESERVED_S0_OFFSET) "($29)" "\n"
+ "lw $17," STRINGIZE_VALUE_OF(PRESERVED_S1_OFFSET) "($29)" "\n"
+ "lw $18," STRINGIZE_VALUE_OF(PRESERVED_S2_OFFSET) "($29)" "\n"
+ "lw $31," STRINGIZE_VALUE_OF(PRESERVED_RETURN_ADDRESS_OFFSET) "($29)" "\n"
+ "jr $31" "\n"
+ "addiu $29,$29," STRINGIZE_VALUE_OF(STACK_LENGTH) "\n"
+".set reorder" "\n"
+".set macro" "\n"
+".end " SYMBOL_STRING(ctiTrampoline) "\n"
+);
+
+asm (
+".text" "\n"
+".align 2" "\n"
+".set noreorder" "\n"
+".set nomacro" "\n"
+".set nomips16" "\n"
+".globl " SYMBOL_STRING(ctiVMThrowTrampoline) "\n"
+".ent " SYMBOL_STRING(ctiVMThrowTrampoline) "\n"
+SYMBOL_STRING(ctiVMThrowTrampoline) ":" "\n"
+#if WTF_MIPS_PIC
+ "lw $28," STRINGIZE_VALUE_OF(PRESERVED_GP_OFFSET) "($29)" "\n"
+".set macro" "\n"
+ "la $25," SYMBOL_STRING(cti_vm_throw) "\n"
+".set nomacro" "\n"
+ "bal " SYMBOL_STRING(cti_vm_throw) "\n"
+ "move $4,$29" "\n"
+#else
+ "jal " SYMBOL_STRING(cti_vm_throw) "\n"
+ "move $4,$29" "\n"
+#endif
+ "lw $16," STRINGIZE_VALUE_OF(PRESERVED_S0_OFFSET) "($29)" "\n"
+ "lw $17," STRINGIZE_VALUE_OF(PRESERVED_S1_OFFSET) "($29)" "\n"
+ "lw $18," STRINGIZE_VALUE_OF(PRESERVED_S2_OFFSET) "($29)" "\n"
+ "lw $31," STRINGIZE_VALUE_OF(PRESERVED_RETURN_ADDRESS_OFFSET) "($29)" "\n"
+ "jr $31" "\n"
+ "addiu $29,$29," STRINGIZE_VALUE_OF(STACK_LENGTH) "\n"
+".set reorder" "\n"
+".set macro" "\n"
+".end " SYMBOL_STRING(ctiVMThrowTrampoline) "\n"
+);
+
+asm (
+".text" "\n"
+".align 2" "\n"
+".set noreorder" "\n"
+".set nomacro" "\n"
+".set nomips16" "\n"
+".globl " SYMBOL_STRING(ctiOpThrowNotCaught) "\n"
+".ent " SYMBOL_STRING(ctiOpThrowNotCaught) "\n"
+SYMBOL_STRING(ctiOpThrowNotCaught) ":" "\n"
+ "lw $16," STRINGIZE_VALUE_OF(PRESERVED_S0_OFFSET) "($29)" "\n"
+ "lw $17," STRINGIZE_VALUE_OF(PRESERVED_S1_OFFSET) "($29)" "\n"
+ "lw $18," STRINGIZE_VALUE_OF(PRESERVED_S2_OFFSET) "($29)" "\n"
+ "lw $31," STRINGIZE_VALUE_OF(PRESERVED_RETURN_ADDRESS_OFFSET) "($29)" "\n"
+ "jr $31" "\n"
+ "addiu $29,$29," STRINGIZE_VALUE_OF(STACK_LENGTH) "\n"
+".set reorder" "\n"
+".set macro" "\n"
+".end " SYMBOL_STRING(ctiOpThrowNotCaught) "\n"
+);
+#endif
+
+#if COMPILER(GCC) && CPU(ARM_THUMB2)
+
+asm (
+".text" "\n"
+".align 2" "\n"
+".globl " SYMBOL_STRING(ctiTrampoline) "\n"
+HIDE_SYMBOL(ctiTrampoline) "\n"
+".thumb" "\n"
+".thumb_func " THUMB_FUNC_PARAM(ctiTrampoline) "\n"
+SYMBOL_STRING(ctiTrampoline) ":" "\n"
+ "sub sp, sp, #" STRINGIZE_VALUE_OF(ENABLE_PROFILER_REFERENCE_OFFSET) "\n"
+ "str lr, [sp, #" STRINGIZE_VALUE_OF(PRESERVED_RETURN_ADDRESS_OFFSET) "]" "\n"
+ "str r4, [sp, #" STRINGIZE_VALUE_OF(PRESERVED_R4_OFFSET) "]" "\n"
+ "str r5, [sp, #" STRINGIZE_VALUE_OF(PRESERVED_R5_OFFSET) "]" "\n"
+ "str r6, [sp, #" STRINGIZE_VALUE_OF(PRESERVED_R6_OFFSET) "]" "\n"
+ "str r7, [sp, #" STRINGIZE_VALUE_OF(PRESERVED_R7_OFFSET) "]" "\n"
+ "str r8, [sp, #" STRINGIZE_VALUE_OF(PRESERVED_R8_OFFSET) "]" "\n"
+ "str r9, [sp, #" STRINGIZE_VALUE_OF(PRESERVED_R9_OFFSET) "]" "\n"
+ "str r10, [sp, #" STRINGIZE_VALUE_OF(PRESERVED_R10_OFFSET) "]" "\n"
+ "str r11, [sp, #" STRINGIZE_VALUE_OF(PRESERVED_R11_OFFSET) "]" "\n"
+ "str r1, [sp, #" STRINGIZE_VALUE_OF(REGISTER_FILE_OFFSET) "]" "\n"
+ "str r2, [sp, #" STRINGIZE_VALUE_OF(CALLFRAME_OFFSET) "]" "\n"
+ "str r3, [sp, #" STRINGIZE_VALUE_OF(EXCEPTION_OFFSET) "]" "\n"
+ "mov r5, r2" "\n"
+ "mov r6, #512" "\n"
+ "blx r0" "\n"
+ "ldr r11, [sp, #" STRINGIZE_VALUE_OF(PRESERVED_R11_OFFSET) "]" "\n"
+ "ldr r10, [sp, #" STRINGIZE_VALUE_OF(PRESERVED_R10_OFFSET) "]" "\n"
+ "ldr r9, [sp, #" STRINGIZE_VALUE_OF(PRESERVED_R9_OFFSET) "]" "\n"
+ "ldr r8, [sp, #" STRINGIZE_VALUE_OF(PRESERVED_R8_OFFSET) "]" "\n"
+ "ldr r7, [sp, #" STRINGIZE_VALUE_OF(PRESERVED_R7_OFFSET) "]" "\n"
+ "ldr r6, [sp, #" STRINGIZE_VALUE_OF(PRESERVED_R6_OFFSET) "]" "\n"
+ "ldr r5, [sp, #" STRINGIZE_VALUE_OF(PRESERVED_R5_OFFSET) "]" "\n"
+ "ldr r4, [sp, #" STRINGIZE_VALUE_OF(PRESERVED_R4_OFFSET) "]" "\n"
+ "ldr lr, [sp, #" STRINGIZE_VALUE_OF(PRESERVED_RETURN_ADDRESS_OFFSET) "]" "\n"
+ "add sp, sp, #" STRINGIZE_VALUE_OF(ENABLE_PROFILER_REFERENCE_OFFSET) "\n"
+ "bx lr" "\n"
+);
+
+asm (
+".text" "\n"
+".align 2" "\n"
+".globl " SYMBOL_STRING(ctiVMThrowTrampoline) "\n"
+HIDE_SYMBOL(ctiVMThrowTrampoline) "\n"
+".thumb" "\n"
+".thumb_func " THUMB_FUNC_PARAM(ctiVMThrowTrampoline) "\n"
+SYMBOL_STRING(ctiVMThrowTrampoline) ":" "\n"
+ "mov r0, sp" "\n"
+ "bl " SYMBOL_STRING_RELOCATION(cti_vm_throw) "\n"
+ "ldr r11, [sp, #" STRINGIZE_VALUE_OF(PRESERVED_R11_OFFSET) "]" "\n"
+ "ldr r10, [sp, #" STRINGIZE_VALUE_OF(PRESERVED_R10_OFFSET) "]" "\n"
+ "ldr r9, [sp, #" STRINGIZE_VALUE_OF(PRESERVED_R9_OFFSET) "]" "\n"
+ "ldr r8, [sp, #" STRINGIZE_VALUE_OF(PRESERVED_R8_OFFSET) "]" "\n"
+ "ldr r7, [sp, #" STRINGIZE_VALUE_OF(PRESERVED_R7_OFFSET) "]" "\n"
+ "ldr r6, [sp, #" STRINGIZE_VALUE_OF(PRESERVED_R6_OFFSET) "]" "\n"
+ "ldr r5, [sp, #" STRINGIZE_VALUE_OF(PRESERVED_R5_OFFSET) "]" "\n"
+ "ldr r4, [sp, #" STRINGIZE_VALUE_OF(PRESERVED_R4_OFFSET) "]" "\n"
+ "ldr lr, [sp, #" STRINGIZE_VALUE_OF(PRESERVED_RETURN_ADDRESS_OFFSET) "]" "\n"
+ "add sp, sp, #" STRINGIZE_VALUE_OF(ENABLE_PROFILER_REFERENCE_OFFSET) "\n"
+ "bx lr" "\n"
+);
+
+asm (
+".text" "\n"
+".align 2" "\n"
+".globl " SYMBOL_STRING(ctiOpThrowNotCaught) "\n"
+HIDE_SYMBOL(ctiOpThrowNotCaught) "\n"
+".thumb" "\n"
+".thumb_func " THUMB_FUNC_PARAM(ctiOpThrowNotCaught) "\n"
+SYMBOL_STRING(ctiOpThrowNotCaught) ":" "\n"
+ "ldr r11, [sp, #" STRINGIZE_VALUE_OF(PRESERVED_R11_OFFSET) "]" "\n"
+ "ldr r10, [sp, #" STRINGIZE_VALUE_OF(PRESERVED_R10_OFFSET) "]" "\n"
+ "ldr r9, [sp, #" STRINGIZE_VALUE_OF(PRESERVED_R9_OFFSET) "]" "\n"
+ "ldr r8, [sp, #" STRINGIZE_VALUE_OF(PRESERVED_R8_OFFSET) "]" "\n"
+ "ldr r7, [sp, #" STRINGIZE_VALUE_OF(PRESERVED_R7_OFFSET) "]" "\n"
+ "ldr r6, [sp, #" STRINGIZE_VALUE_OF(PRESERVED_R6_OFFSET) "]" "\n"
+ "ldr r5, [sp, #" STRINGIZE_VALUE_OF(PRESERVED_R5_OFFSET) "]" "\n"
+ "ldr r4, [sp, #" STRINGIZE_VALUE_OF(PRESERVED_R4_OFFSET) "]" "\n"
+ "ldr lr, [sp, #" STRINGIZE_VALUE_OF(PRESERVED_RETURN_ADDRESS_OFFSET) "]" "\n"
+ "add sp, sp, #" STRINGIZE_VALUE_OF(ENABLE_PROFILER_REFERENCE_OFFSET) "\n"
+ "bx lr" "\n"
+);
+
+#elif COMPILER(GCC) && CPU(ARM_TRADITIONAL)
+
+asm (
+".globl " SYMBOL_STRING(ctiTrampoline) "\n"
+HIDE_SYMBOL(ctiTrampoline) "\n"
+SYMBOL_STRING(ctiTrampoline) ":" "\n"
+ "stmdb sp!, {r1-r3}" "\n"
+ "stmdb sp!, {r4-r8, lr}" "\n"
+ "sub sp, sp, #" STRINGIZE_VALUE_OF(PRESERVEDR4_OFFSET) "\n"
+ "mov r4, r2" "\n"
+ "mov r5, #512" "\n"
+ // r0 contains the code
+ "mov lr, pc" "\n"
+ "mov pc, r0" "\n"
+ "add sp, sp, #" STRINGIZE_VALUE_OF(PRESERVEDR4_OFFSET) "\n"
+ "ldmia sp!, {r4-r8, lr}" "\n"
+ "add sp, sp, #12" "\n"
+ "mov pc, lr" "\n"
+);
+
+asm (
+".globl " SYMBOL_STRING(ctiVMThrowTrampoline) "\n"
+HIDE_SYMBOL(ctiVMThrowTrampoline) "\n"
+SYMBOL_STRING(ctiVMThrowTrampoline) ":" "\n"
+ "mov r0, sp" "\n"
+ "bl " SYMBOL_STRING(cti_vm_throw) "\n"
+
+// Both has the same return sequence
+".globl " SYMBOL_STRING(ctiOpThrowNotCaught) "\n"
+HIDE_SYMBOL(ctiOpThrowNotCaught) "\n"
+SYMBOL_STRING(ctiOpThrowNotCaught) ":" "\n"
+ "add sp, sp, #" STRINGIZE_VALUE_OF(PRESERVEDR4_OFFSET) "\n"
+ "ldmia sp!, {r4-r8, lr}" "\n"
+ "add sp, sp, #12" "\n"
+ "mov pc, lr" "\n"
+);
+
+#elif COMPILER(RVCT) && CPU(ARM_THUMB2)
+
+__asm EncodedJSValue ctiTrampoline(void*, RegisterFile*, CallFrame*, JSValue*, Profiler**, JSGlobalData*)
+{
+ PRESERVE8
+ sub sp, sp, # ENABLE_PROFILER_REFERENCE_OFFSET
+ str lr, [sp, # PRESERVED_RETURN_ADDRESS_OFFSET ]
+ str r4, [sp, # PRESERVED_R4_OFFSET ]
+ str r5, [sp, # PRESERVED_R5_OFFSET ]
+ str r6, [sp, # PRESERVED_R6_OFFSET ]
+ str r7, [sp, # PRESERVED_R7_OFFSET ]
+ str r8, [sp, # PRESERVED_R8_OFFSET ]
+ str r9, [sp, # PRESERVED_R9_OFFSET ]
+ str r10, [sp, # PRESERVED_R10_OFFSET ]
+ str r11, [sp, # PRESERVED_R11_OFFSET ]
+ str r1, [sp, # REGISTER_FILE_OFFSET ]
+ str r2, [sp, # CALLFRAME_OFFSET ]
+ str r3, [sp, # EXCEPTION_OFFSET ]
+ mov r5, r2
+ mov r6, #512
+ blx r0
+ ldr r11, [sp, # PRESERVED_R11_OFFSET ]
+ ldr r10, [sp, # PRESERVED_R10_OFFSET ]
+ ldr r9, [sp, # PRESERVED_R9_OFFSET ]
+ ldr r8, [sp, # PRESERVED_R8_OFFSET ]
+ ldr r7, [sp, # PRESERVED_R7_OFFSET ]
+ ldr r6, [sp, # PRESERVED_R6_OFFSET ]
+ ldr r5, [sp, # PRESERVED_R5_OFFSET ]
+ ldr r4, [sp, # PRESERVED_R4_OFFSET ]
+ ldr lr, [sp, # PRESERVED_RETURN_ADDRESS_OFFSET ]
+ add sp, sp, # ENABLE_PROFILER_REFERENCE_OFFSET
+ bx lr
+}
+
+__asm void ctiVMThrowTrampoline()
+{
+ PRESERVE8
+ mov r0, sp
+ bl cti_vm_throw
+ ldr r11, [sp, # PRESERVED_R11_OFFSET ]
+ ldr r10, [sp, # PRESERVED_R10_OFFSET ]
+ ldr r9, [sp, # PRESERVED_R9_OFFSET ]
+ ldr r8, [sp, # PRESERVED_R8_OFFSET ]
+ ldr r7, [sp, # PRESERVED_R7_OFFSET ]
+ ldr r6, [sp, # PRESERVED_R6_OFFSET ]
+ ldr r6, [sp, # PRESERVED_R6_OFFSET ]
+ ldr r5, [sp, # PRESERVED_R5_OFFSET ]
+ ldr r4, [sp, # PRESERVED_R4_OFFSET ]
+ ldr lr, [sp, # PRESERVED_RETURN_ADDRESS_OFFSET ]
+ add sp, sp, # ENABLE_PROFILER_REFERENCE_OFFSET
+ bx lr
+}
+
+__asm void ctiOpThrowNotCaught()
+{
+ PRESERVE8
+ ldr r11, [sp, # PRESERVED_R11_OFFSET ]
+ ldr r10, [sp, # PRESERVED_R10_OFFSET ]
+ ldr r9, [sp, # PRESERVED_R9_OFFSET ]
+ ldr r8, [sp, # PRESERVED_R8_OFFSET ]
+ ldr r7, [sp, # PRESERVED_R7_OFFSET ]
+ ldr r6, [sp, # PRESERVED_R6_OFFSET ]
+ ldr r6, [sp, # PRESERVED_R6_OFFSET ]
+ ldr r5, [sp, # PRESERVED_R5_OFFSET ]
+ ldr r4, [sp, # PRESERVED_R4_OFFSET ]
+ ldr lr, [sp, # PRESERVED_RETURN_ADDRESS_OFFSET ]
+ add sp, sp, # ENABLE_PROFILER_REFERENCE_OFFSET
+ bx lr
+}
+
+#elif COMPILER(RVCT) && CPU(ARM_TRADITIONAL)
+
+__asm EncodedJSValue ctiTrampoline(void*, RegisterFile*, CallFrame*, void* /*unused1*/, Profiler**, JSGlobalData*)
+{
+ ARM
+ stmdb sp!, {r1-r3}
+ stmdb sp!, {r4-r8, lr}
+ sub sp, sp, # PRESERVEDR4_OFFSET
+ mov r4, r2
+ mov r5, #512
+ mov lr, pc
+ bx r0
+ add sp, sp, # PRESERVEDR4_OFFSET
+ ldmia sp!, {r4-r8, lr}
+ add sp, sp, #12
+ bx lr
+}
+
+__asm void ctiVMThrowTrampoline()
+{
+ ARM
+ PRESERVE8
+ mov r0, sp
+ bl cti_vm_throw
+ add sp, sp, # PRESERVEDR4_OFFSET
+ ldmia sp!, {r4-r8, lr}
+ add sp, sp, #12
+ bx lr
+}
+
+__asm void ctiOpThrowNotCaught()
+{
+ ARM
+ add sp, sp, # PRESERVEDR4_OFFSET
+ ldmia sp!, {r4-r8, lr}
+ add sp, sp, #12
+ bx lr
+}
+#endif
+
+#if ENABLE(OPCODE_SAMPLING)
+ #define CTI_SAMPLER stackFrame.globalData->interpreter->sampler()
+#else
+ #define CTI_SAMPLER 0
+#endif
+
+JITThunks::JITThunks(JSGlobalData* globalData)
+ : m_hostFunctionStubMap(adoptPtr(new HostFunctionStubMap))
+{
+ if (!globalData->canUseJIT())
+ return;
+
+ m_executableMemory = JIT::compileCTIMachineTrampolines(globalData, &m_trampolineStructure);
+ ASSERT(!!m_executableMemory);
+#if CPU(ARM_THUMB2)
+ // Unfortunate the arm compiler does not like the use of offsetof on JITStackFrame (since it contains non POD types),
+ // and the OBJECT_OFFSETOF macro does not appear constantish enough for it to be happy with its use in COMPILE_ASSERT
+ // macros.
+ ASSERT(OBJECT_OFFSETOF(struct JITStackFrame, preservedReturnAddress) == PRESERVED_RETURN_ADDRESS_OFFSET);
+ ASSERT(OBJECT_OFFSETOF(struct JITStackFrame, preservedR4) == PRESERVED_R4_OFFSET);
+ ASSERT(OBJECT_OFFSETOF(struct JITStackFrame, preservedR5) == PRESERVED_R5_OFFSET);
+ ASSERT(OBJECT_OFFSETOF(struct JITStackFrame, preservedR6) == PRESERVED_R6_OFFSET);
+ ASSERT(OBJECT_OFFSETOF(struct JITStackFrame, preservedR7) == PRESERVED_R7_OFFSET);
+ ASSERT(OBJECT_OFFSETOF(struct JITStackFrame, preservedR8) == PRESERVED_R8_OFFSET);
+ ASSERT(OBJECT_OFFSETOF(struct JITStackFrame, preservedR9) == PRESERVED_R9_OFFSET);
+ ASSERT(OBJECT_OFFSETOF(struct JITStackFrame, preservedR10) == PRESERVED_R10_OFFSET);
+ ASSERT(OBJECT_OFFSETOF(struct JITStackFrame, preservedR11) == PRESERVED_R11_OFFSET);
+
+ ASSERT(OBJECT_OFFSETOF(struct JITStackFrame, registerFile) == REGISTER_FILE_OFFSET);
+ ASSERT(OBJECT_OFFSETOF(struct JITStackFrame, callFrame) == CALLFRAME_OFFSET);
+ // The fifth argument is the first item already on the stack.
+ ASSERT(OBJECT_OFFSETOF(struct JITStackFrame, enabledProfilerReference) == ENABLE_PROFILER_REFERENCE_OFFSET);
+
+ ASSERT(OBJECT_OFFSETOF(struct JITStackFrame, thunkReturnAddress) == THUNK_RETURN_ADDRESS_OFFSET);
+
+#elif CPU(ARM_TRADITIONAL)
+
+ ASSERT(OBJECT_OFFSETOF(struct JITStackFrame, thunkReturnAddress) == THUNK_RETURN_ADDRESS_OFFSET);
+ ASSERT(OBJECT_OFFSETOF(struct JITStackFrame, preservedR4) == PRESERVEDR4_OFFSET);
+
+
+#elif CPU(MIPS)
+ ASSERT(OBJECT_OFFSETOF(struct JITStackFrame, preservedGP) == PRESERVED_GP_OFFSET);
+ ASSERT(OBJECT_OFFSETOF(struct JITStackFrame, preservedS0) == PRESERVED_S0_OFFSET);
+ ASSERT(OBJECT_OFFSETOF(struct JITStackFrame, preservedS1) == PRESERVED_S1_OFFSET);
+ ASSERT(OBJECT_OFFSETOF(struct JITStackFrame, preservedS2) == PRESERVED_S2_OFFSET);
+ ASSERT(OBJECT_OFFSETOF(struct JITStackFrame, preservedReturnAddress) == PRESERVED_RETURN_ADDRESS_OFFSET);
+ ASSERT(OBJECT_OFFSETOF(struct JITStackFrame, thunkReturnAddress) == THUNK_RETURN_ADDRESS_OFFSET);
+ ASSERT(OBJECT_OFFSETOF(struct JITStackFrame, registerFile) == REGISTER_FILE_OFFSET);
+ ASSERT(OBJECT_OFFSETOF(struct JITStackFrame, callFrame) == CALLFRAME_OFFSET);
+ ASSERT(OBJECT_OFFSETOF(struct JITStackFrame, unused1) == EXCEPTION_OFFSET);
+ ASSERT(OBJECT_OFFSETOF(struct JITStackFrame, enabledProfilerReference) == ENABLE_PROFILER_REFERENCE_OFFSET);
+ ASSERT(OBJECT_OFFSETOF(struct JITStackFrame, globalData) == GLOBAL_DATA_OFFSET);
+
+#endif
+}
+
+JITThunks::~JITThunks()
+{
+}
+
+NEVER_INLINE void JITThunks::tryCachePutByID(CallFrame* callFrame, CodeBlock* codeBlock, ReturnAddressPtr returnAddress, JSValue baseValue, const PutPropertySlot& slot, StructureStubInfo* stubInfo, bool direct)
+{
+ // The interpreter checks for recursion here; I do not believe this can occur in CTI.
+
+ if (!baseValue.isCell())
+ return;
+
+ // Uncacheable: give up.
+ if (!slot.isCacheable()) {
+ ctiPatchCallByReturnAddress(codeBlock, returnAddress, FunctionPtr(direct ? cti_op_put_by_id_direct_generic : cti_op_put_by_id_generic));
+ return;
+ }
+
+ JSCell* baseCell = baseValue.asCell();
+ Structure* structure = baseCell->structure();
+
+ if (structure->isUncacheableDictionary() || structure->typeInfo().prohibitsPropertyCaching()) {
+ ctiPatchCallByReturnAddress(codeBlock, returnAddress, FunctionPtr(direct ? cti_op_put_by_id_direct_generic : cti_op_put_by_id_generic));
+ return;
+ }
+
+ // If baseCell != base, then baseCell must be a proxy for another object.
+ if (baseCell != slot.base()) {
+ ctiPatchCallByReturnAddress(codeBlock, returnAddress, FunctionPtr(direct ? cti_op_put_by_id_direct_generic : cti_op_put_by_id_generic));
+ return;
+ }
+
+ // Cache hit: Specialize instruction and ref Structures.
+
+ // Structure transition, cache transition info
+ if (slot.type() == PutPropertySlot::NewProperty) {
+ if (structure->isDictionary()) {
+ ctiPatchCallByReturnAddress(codeBlock, returnAddress, FunctionPtr(direct ? cti_op_put_by_id_direct_generic : cti_op_put_by_id_generic));
+ return;
+ }
+
+ // put_by_id_transition checks the prototype chain for setters.
+ normalizePrototypeChain(callFrame, baseCell);
+
+ StructureChain* prototypeChain = structure->prototypeChain(callFrame);
+ stubInfo->initPutByIdTransition(callFrame->globalData(), codeBlock->ownerExecutable(), structure->previousID(), structure, prototypeChain, direct);
+ JIT::compilePutByIdTransition(callFrame->scopeChain()->globalData, codeBlock, stubInfo, structure->previousID(), structure, slot.cachedOffset(), prototypeChain, returnAddress, direct);
+ return;
+ }
+
+ stubInfo->initPutByIdReplace(callFrame->globalData(), codeBlock->ownerExecutable(), structure);
+
+ JIT::patchPutByIdReplace(codeBlock, stubInfo, structure, slot.cachedOffset(), returnAddress, direct);
+}
+
+NEVER_INLINE void JITThunks::tryCacheGetByID(CallFrame* callFrame, CodeBlock* codeBlock, ReturnAddressPtr returnAddress, JSValue baseValue, const Identifier& propertyName, const PropertySlot& slot, StructureStubInfo* stubInfo)
+{
+ // FIXME: Write a test that proves we need to check for recursion here just
+ // like the interpreter does, then add a check for recursion.
+
+ // FIXME: Cache property access for immediates.
+ if (!baseValue.isCell()) {
+ ctiPatchCallByReturnAddress(codeBlock, returnAddress, FunctionPtr(cti_op_get_by_id_generic));
+ return;
+ }
+
+ JSGlobalData* globalData = &callFrame->globalData();
+
+ if (isJSArray(baseValue) && propertyName == callFrame->propertyNames().length) {
+ JIT::compilePatchGetArrayLength(callFrame->scopeChain()->globalData, codeBlock, returnAddress);
+ return;
+ }
+
+ if (isJSString(baseValue) && propertyName == callFrame->propertyNames().length) {
+ // The tradeoff of compiling an patched inline string length access routine does not seem
+ // to pay off, so we currently only do this for arrays.
+ ctiPatchCallByReturnAddress(codeBlock, returnAddress, globalData->jitStubs->ctiStringLengthTrampoline());
+ return;
+ }
+
+ // Uncacheable: give up.
+ if (!slot.isCacheable()) {
+ ctiPatchCallByReturnAddress(codeBlock, returnAddress, FunctionPtr(cti_op_get_by_id_generic));
+ return;
+ }
+
+ JSCell* baseCell = baseValue.asCell();
+ Structure* structure = baseCell->structure();
+
+ if (structure->isUncacheableDictionary() || structure->typeInfo().prohibitsPropertyCaching()) {
+ ctiPatchCallByReturnAddress(codeBlock, returnAddress, FunctionPtr(cti_op_get_by_id_generic));
+ return;
+ }
+
+ // Cache hit: Specialize instruction and ref Structures.
+
+ if (slot.slotBase() == baseValue) {
+ // set this up, so derefStructures can do it's job.
+ stubInfo->initGetByIdSelf(callFrame->globalData(), codeBlock->ownerExecutable(), structure);
+ if ((slot.cachedPropertyType() != PropertySlot::Value) || ((slot.cachedOffset() * sizeof(JSValue)) > (unsigned)MacroAssembler::MaximumCompactPtrAlignedAddressOffset))
+ ctiPatchCallByReturnAddress(codeBlock, returnAddress, FunctionPtr(cti_op_get_by_id_self_fail));
+ else
+ JIT::patchGetByIdSelf(codeBlock, stubInfo, structure, slot.cachedOffset(), returnAddress);
+ return;
+ }
+
+ if (structure->isDictionary()) {
+ ctiPatchCallByReturnAddress(codeBlock, returnAddress, FunctionPtr(cti_op_get_by_id_generic));
+ return;
+ }
+
+ if (slot.slotBase() == structure->prototypeForLookup(callFrame)) {
+ ASSERT(slot.slotBase().isObject());
+
+ JSObject* slotBaseObject = asObject(slot.slotBase());
+ size_t offset = slot.cachedOffset();
+
+ // Since we're accessing a prototype in a loop, it's a good bet that it
+ // should not be treated as a dictionary.
+ if (slotBaseObject->structure()->isDictionary()) {
+ slotBaseObject->flattenDictionaryObject(callFrame->globalData());
+ offset = slotBaseObject->structure()->get(callFrame->globalData(), propertyName);
+ }
+
+ stubInfo->initGetByIdProto(callFrame->globalData(), codeBlock->ownerExecutable(), structure, slotBaseObject->structure());
+
+ ASSERT(!structure->isDictionary());
+ ASSERT(!slotBaseObject->structure()->isDictionary());
+ JIT::compileGetByIdProto(callFrame->scopeChain()->globalData, callFrame, codeBlock, stubInfo, structure, slotBaseObject->structure(), propertyName, slot, offset, returnAddress);
+ return;
+ }
+
+ size_t offset = slot.cachedOffset();
+ size_t count = normalizePrototypeChain(callFrame, baseValue, slot.slotBase(), propertyName, offset);
+ if (!count) {
+ stubInfo->accessType = access_get_by_id_generic;
+ return;
+ }
+
+ StructureChain* prototypeChain = structure->prototypeChain(callFrame);
+ stubInfo->initGetByIdChain(callFrame->globalData(), codeBlock->ownerExecutable(), structure, prototypeChain);
+ JIT::compileGetByIdChain(callFrame->scopeChain()->globalData, callFrame, codeBlock, stubInfo, structure, prototypeChain, count, propertyName, slot, offset, returnAddress);
+}
+
+#ifndef NDEBUG
+
+extern "C" {
+
+static void jscGeneratedNativeCode()
+{
+ // When executing a JIT stub function (which might do an allocation), we hack the return address
+ // to pretend to be executing this function, to keep stack logging tools from blowing out
+ // memory.
+}
+
+}
+
+struct StackHack {
+ ALWAYS_INLINE StackHack(JITStackFrame& stackFrame)
+ : stackFrame(stackFrame)
+ , savedReturnAddress(*stackFrame.returnAddressSlot())
+ {
+ *stackFrame.returnAddressSlot() = ReturnAddressPtr(FunctionPtr(jscGeneratedNativeCode));
+ }
+
+ ALWAYS_INLINE ~StackHack()
+ {
+ *stackFrame.returnAddressSlot() = savedReturnAddress;
+ }
+
+ JITStackFrame& stackFrame;
+ ReturnAddressPtr savedReturnAddress;
+};
+
+#define STUB_INIT_STACK_FRAME(stackFrame) JITStackFrame& stackFrame = *reinterpret_cast_ptr<JITStackFrame*>(STUB_ARGS); StackHack stackHack(stackFrame)
+#define STUB_SET_RETURN_ADDRESS(returnAddress) stackHack.savedReturnAddress = ReturnAddressPtr(returnAddress)
+#define STUB_RETURN_ADDRESS stackHack.savedReturnAddress
+
+#else
+
+#define STUB_INIT_STACK_FRAME(stackFrame) JITStackFrame& stackFrame = *reinterpret_cast_ptr<JITStackFrame*>(STUB_ARGS)
+#define STUB_SET_RETURN_ADDRESS(returnAddress) *stackFrame.returnAddressSlot() = ReturnAddressPtr(returnAddress)
+#define STUB_RETURN_ADDRESS *stackFrame.returnAddressSlot()
+
+#endif
+
+// The reason this is not inlined is to avoid having to do a PIC branch
+// to get the address of the ctiVMThrowTrampoline function. It's also
+// good to keep the code size down by leaving as much of the exception
+// handling code out of line as possible.
+static NEVER_INLINE void returnToThrowTrampoline(JSGlobalData* globalData, ReturnAddressPtr exceptionLocation, ReturnAddressPtr& returnAddressSlot)
+{
+ ASSERT(globalData->exception);
+ globalData->exceptionLocation = exceptionLocation;
+ returnAddressSlot = ReturnAddressPtr(FunctionPtr(ctiVMThrowTrampoline));
+}
+
+#define VM_THROW_EXCEPTION() \
+ do { \
+ VM_THROW_EXCEPTION_AT_END(); \
+ return 0; \
+ } while (0)
+#define VM_THROW_EXCEPTION_AT_END() \
+ do {\
+ returnToThrowTrampoline(stackFrame.globalData, STUB_RETURN_ADDRESS, STUB_RETURN_ADDRESS);\
+ } while (0)
+
+#define CHECK_FOR_EXCEPTION() \
+ do { \
+ if (UNLIKELY(stackFrame.globalData->exception)) \
+ VM_THROW_EXCEPTION(); \
+ } while (0)
+#define CHECK_FOR_EXCEPTION_AT_END() \
+ do { \
+ if (UNLIKELY(stackFrame.globalData->exception)) \
+ VM_THROW_EXCEPTION_AT_END(); \
+ } while (0)
+#define CHECK_FOR_EXCEPTION_VOID() \
+ do { \
+ if (UNLIKELY(stackFrame.globalData->exception)) { \
+ VM_THROW_EXCEPTION_AT_END(); \
+ return; \
+ } \
+ } while (0)
+
+struct ExceptionHandler {
+ void* catchRoutine;
+ CallFrame* callFrame;
+};
+
+static ExceptionHandler jitThrow(JSGlobalData* globalData, CallFrame* callFrame, JSValue exceptionValue, ReturnAddressPtr faultLocation)
+{
+ ASSERT(exceptionValue);
+
+ unsigned vPCIndex = callFrame->codeBlock()->bytecodeOffset(faultLocation);
+ globalData->exception = JSValue();
+ HandlerInfo* handler = globalData->interpreter->throwException(callFrame, exceptionValue, vPCIndex); // This may update callFrame & exceptionValue!
+ globalData->exception = exceptionValue;
+
+ void* catchRoutine = handler ? handler->nativeCode.executableAddress() : FunctionPtr(ctiOpThrowNotCaught).value();
+ ASSERT(catchRoutine);
+ ExceptionHandler exceptionHandler = { catchRoutine, callFrame };
+ return exceptionHandler;
+}
+
+// Helper function for JIT stubs that may throw an exception in the middle of
+// processing a function call. This function rolls back the register file to
+// our caller, so exception processing can proceed from a valid state.
+template<typename T> static T throwExceptionFromOpCall(JITStackFrame& jitStackFrame, CallFrame* newCallFrame, ReturnAddressPtr& returnAddressSlot)
+{
+ CallFrame* callFrame = newCallFrame->callerFrame();
+ ASSERT(callFrame->globalData().exception);
+ jitStackFrame.callFrame = callFrame;
+ callFrame->globalData().topCallFrame = callFrame;
+ returnToThrowTrampoline(&callFrame->globalData(), ReturnAddressPtr(newCallFrame->returnPC()), returnAddressSlot);
+ return T();
+}
+
+template<typename T> static T throwExceptionFromOpCall(JITStackFrame& jitStackFrame, CallFrame* newCallFrame, ReturnAddressPtr& returnAddressSlot, JSValue exception)
+{
+ newCallFrame->callerFrame()->globalData().exception = exception;
+ return throwExceptionFromOpCall<T>(jitStackFrame, newCallFrame, returnAddressSlot);
+}
+
+#if CPU(ARM_THUMB2) && COMPILER(GCC)
+
+#define DEFINE_STUB_FUNCTION(rtype, op) \
+ extern "C" { \
+ rtype JITStubThunked_##op(STUB_ARGS_DECLARATION); \
+ }; \
+ asm ( \
+ ".text" "\n" \
+ ".align 2" "\n" \
+ ".globl " SYMBOL_STRING(cti_##op) "\n" \
+ HIDE_SYMBOL(cti_##op) "\n" \
+ ".thumb" "\n" \
+ ".thumb_func " THUMB_FUNC_PARAM(cti_##op) "\n" \
+ SYMBOL_STRING(cti_##op) ":" "\n" \
+ "str lr, [sp, #" STRINGIZE_VALUE_OF(THUNK_RETURN_ADDRESS_OFFSET) "]" "\n" \
+ "bl " SYMBOL_STRING(JITStubThunked_##op) "\n" \
+ "ldr lr, [sp, #" STRINGIZE_VALUE_OF(THUNK_RETURN_ADDRESS_OFFSET) "]" "\n" \
+ "bx lr" "\n" \
+ ); \
+ rtype JITStubThunked_##op(STUB_ARGS_DECLARATION) \
+
+#elif CPU(MIPS)
+#if WTF_MIPS_PIC
+#define DEFINE_STUB_FUNCTION(rtype, op) \
+ extern "C" { \
+ rtype JITStubThunked_##op(STUB_ARGS_DECLARATION); \
+ }; \
+ asm ( \
+ ".text" "\n" \
+ ".align 2" "\n" \
+ ".set noreorder" "\n" \
+ ".set nomacro" "\n" \
+ ".set nomips16" "\n" \
+ ".globl " SYMBOL_STRING(cti_##op) "\n" \
+ ".ent " SYMBOL_STRING(cti_##op) "\n" \
+ SYMBOL_STRING(cti_##op) ":" "\n" \
+ "lw $28," STRINGIZE_VALUE_OF(PRESERVED_GP_OFFSET) "($29)" "\n" \
+ "sw $31," STRINGIZE_VALUE_OF(THUNK_RETURN_ADDRESS_OFFSET) "($29)" "\n" \
+ ".set macro" "\n" \
+ "la $25," SYMBOL_STRING(JITStubThunked_##op) "\n" \
+ ".set nomacro" "\n" \
+ ".reloc 1f,R_MIPS_JALR," SYMBOL_STRING(JITStubThunked_##op) "\n" \
+ "1: jalr $25" "\n" \
+ "nop" "\n" \
+ "lw $31," STRINGIZE_VALUE_OF(THUNK_RETURN_ADDRESS_OFFSET) "($29)" "\n" \
+ "jr $31" "\n" \
+ "nop" "\n" \
+ ".set reorder" "\n" \
+ ".set macro" "\n" \
+ ".end " SYMBOL_STRING(cti_##op) "\n" \
+ ); \
+ rtype JITStubThunked_##op(STUB_ARGS_DECLARATION)
+
+#else // WTF_MIPS_PIC
+#define DEFINE_STUB_FUNCTION(rtype, op) \
+ extern "C" { \
+ rtype JITStubThunked_##op(STUB_ARGS_DECLARATION); \
+ }; \
+ asm ( \
+ ".text" "\n" \
+ ".align 2" "\n" \
+ ".set noreorder" "\n" \
+ ".set nomacro" "\n" \
+ ".set nomips16" "\n" \
+ ".globl " SYMBOL_STRING(cti_##op) "\n" \
+ ".ent " SYMBOL_STRING(cti_##op) "\n" \
+ SYMBOL_STRING(cti_##op) ":" "\n" \
+ "sw $31," STRINGIZE_VALUE_OF(THUNK_RETURN_ADDRESS_OFFSET) "($29)" "\n" \
+ "jal " SYMBOL_STRING(JITStubThunked_##op) "\n" \
+ "nop" "\n" \
+ "lw $31," STRINGIZE_VALUE_OF(THUNK_RETURN_ADDRESS_OFFSET) "($29)" "\n" \
+ "jr $31" "\n" \
+ "nop" "\n" \
+ ".set reorder" "\n" \
+ ".set macro" "\n" \
+ ".end " SYMBOL_STRING(cti_##op) "\n" \
+ ); \
+ rtype JITStubThunked_##op(STUB_ARGS_DECLARATION)
+
+#endif
+
+#elif CPU(ARM_TRADITIONAL) && COMPILER(GCC)
+
+#define DEFINE_STUB_FUNCTION(rtype, op) \
+ extern "C" { \
+ rtype JITStubThunked_##op(STUB_ARGS_DECLARATION); \
+ }; \
+ asm ( \
+ ".globl " SYMBOL_STRING(cti_##op) "\n" \
+ SYMBOL_STRING(cti_##op) ":" "\n" \
+ "str lr, [sp, #" STRINGIZE_VALUE_OF(THUNK_RETURN_ADDRESS_OFFSET) "]" "\n" \
+ "bl " SYMBOL_STRING(JITStubThunked_##op) "\n" \
+ "ldr lr, [sp, #" STRINGIZE_VALUE_OF(THUNK_RETURN_ADDRESS_OFFSET) "]" "\n" \
+ "mov pc, lr" "\n" \
+ ); \
+ rtype JITStubThunked_##op(STUB_ARGS_DECLARATION)
+
+#elif (CPU(ARM_THUMB2) || CPU(ARM_TRADITIONAL)) && COMPILER(RVCT)
+
+#define DEFINE_STUB_FUNCTION(rtype, op) rtype JITStubThunked_##op(STUB_ARGS_DECLARATION)
+
+/* The following is a workaround for RVCT toolchain; precompiler macros are not expanded before the code is passed to the assembler */
+
+/* The following section is a template to generate code for GeneratedJITStubs_RVCT.h */
+/* The pattern "#xxx#" will be replaced with "xxx" */
+
+/*
+RVCT(extern "C" #rtype# JITStubThunked_#op#(STUB_ARGS_DECLARATION);)
+RVCT(__asm #rtype# cti_#op#(STUB_ARGS_DECLARATION))
+RVCT({)
+RVCT( PRESERVE8)
+RVCT( IMPORT JITStubThunked_#op#)
+RVCT( str lr, [sp, # THUNK_RETURN_ADDRESS_OFFSET])
+RVCT( bl JITStubThunked_#op#)
+RVCT( ldr lr, [sp, # THUNK_RETURN_ADDRESS_OFFSET])
+RVCT( bx lr)
+RVCT(})
+RVCT()
+*/
+
+/* Include the generated file */
+#include "GeneratedJITStubs_RVCT.h"
+
+#elif CPU(ARM_TRADITIONAL) && COMPILER(MSVC)
+
+#define DEFINE_STUB_FUNCTION(rtype, op) extern "C" rtype JITStubThunked_##op(STUB_ARGS_DECLARATION)
+
+/* The following is a workaround for MSVC toolchain; inline assembler is not supported */
+
+/* The following section is a template to generate code for GeneratedJITStubs_MSVC.asm */
+/* The pattern "#xxx#" will be replaced with "xxx" */
+
+/*
+MSVC_BEGIN( AREA Trampoline, CODE)
+MSVC_BEGIN()
+MSVC_BEGIN( EXPORT ctiTrampoline)
+MSVC_BEGIN( EXPORT ctiVMThrowTrampoline)
+MSVC_BEGIN( EXPORT ctiOpThrowNotCaught)
+MSVC_BEGIN()
+MSVC_BEGIN(ctiTrampoline PROC)
+MSVC_BEGIN( stmdb sp!, {r1-r3})
+MSVC_BEGIN( stmdb sp!, {r4-r8, lr})
+MSVC_BEGIN( sub sp, sp, #68 ; sync with PRESERVEDR4_OFFSET)
+MSVC_BEGIN( mov r4, r2)
+MSVC_BEGIN( mov r5, #512)
+MSVC_BEGIN( ; r0 contains the code)
+MSVC_BEGIN( mov lr, pc)
+MSVC_BEGIN( bx r0)
+MSVC_BEGIN( add sp, sp, #68 ; sync with PRESERVEDR4_OFFSET)
+MSVC_BEGIN( ldmia sp!, {r4-r8, lr})
+MSVC_BEGIN( add sp, sp, #12)
+MSVC_BEGIN( bx lr)
+MSVC_BEGIN(ctiTrampoline ENDP)
+MSVC_BEGIN()
+MSVC_BEGIN(ctiVMThrowTrampoline PROC)
+MSVC_BEGIN( mov r0, sp)
+MSVC_BEGIN( mov lr, pc)
+MSVC_BEGIN( bl cti_vm_throw)
+MSVC_BEGIN(ctiOpThrowNotCaught)
+MSVC_BEGIN( add sp, sp, #68 ; sync with PRESERVEDR4_OFFSET)
+MSVC_BEGIN( ldmia sp!, {r4-r8, lr})
+MSVC_BEGIN( add sp, sp, #12)
+MSVC_BEGIN( bx lr)
+MSVC_BEGIN(ctiVMThrowTrampoline ENDP)
+MSVC_BEGIN()
+
+MSVC( EXPORT cti_#op#)
+MSVC( IMPORT JITStubThunked_#op#)
+MSVC(cti_#op# PROC)
+MSVC( str lr, [sp, #64] ; sync with THUNK_RETURN_ADDRESS_OFFSET)
+MSVC( bl JITStubThunked_#op#)
+MSVC( ldr lr, [sp, #64] ; sync with THUNK_RETURN_ADDRESS_OFFSET)
+MSVC( bx lr)
+MSVC(cti_#op# ENDP)
+MSVC()
+
+MSVC_END( END)
+*/
+
+#elif CPU(SH4)
+#define DEFINE_STUB_FUNCTION(rtype, op) \
+ extern "C" { \
+ rtype JITStubThunked_##op(STUB_ARGS_DECLARATION); \
+ }; \
+ asm volatile( \
+ ".align 2" "\n" \
+ ".globl " SYMBOL_STRING(cti_##op) "\n" \
+ SYMBOL_STRING(cti_##op) ":" "\n" \
+ "sts pr, r11" "\n" \
+ "mov.l r11, @(0x38, r15)" "\n" \
+ "mov.l .L2"SYMBOL_STRING(JITStubThunked_##op)",r0" "\n" \
+ "mov.l @(r0,r12),r11" "\n" \
+ "jsr @r11" "\n" \
+ "nop" "\n" \
+ "mov.l @(0x38, r15), r11 " "\n" \
+ "lds r11, pr " "\n" \
+ "rts" "\n" \
+ "nop" "\n" \
+ ".align 2" "\n" \
+ ".L2"SYMBOL_STRING(JITStubThunked_##op)":.long " SYMBOL_STRING(JITStubThunked_##op)"@GOT \n" \
+ ); \
+ rtype JITStubThunked_##op(STUB_ARGS_DECLARATION)
+#else
+#define DEFINE_STUB_FUNCTION(rtype, op) rtype JIT_STUB cti_##op(STUB_ARGS_DECLARATION)
+#endif
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_create_this)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+ CallFrame* callFrame = stackFrame.callFrame;
+
+ JSFunction* constructor = asFunction(callFrame->callee());
+#if !ASSERT_DISABLED
+ ConstructData constructData;
+ ASSERT(constructor->methodTable()->getConstructData(constructor, constructData) == ConstructTypeJS);
+#endif
+
+ Structure* structure;
+ JSValue proto = stackFrame.args[0].jsValue();
+ if (proto.isObject())
+ structure = asObject(proto)->inheritorID(*stackFrame.globalData);
+ else
+ structure = constructor->scope()->globalObject->emptyObjectStructure();
+ JSValue result = constructEmptyObject(callFrame, structure);
+
+ return JSValue::encode(result);
+}
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_convert_this)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ JSValue v1 = stackFrame.args[0].jsValue();
+ CallFrame* callFrame = stackFrame.callFrame;
+
+ ASSERT(v1.isPrimitive());
+
+ JSObject* result = v1.toThisObject(callFrame);
+ CHECK_FOR_EXCEPTION_AT_END();
+ return JSValue::encode(result);
+}
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_add)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ JSValue v1 = stackFrame.args[0].jsValue();
+ JSValue v2 = stackFrame.args[1].jsValue();
+ CallFrame* callFrame = stackFrame.callFrame;
+
+ if (v1.isString()) {
+ JSValue result = v2.isString()
+ ? jsString(callFrame, asString(v1), asString(v2))
+ : jsString(callFrame, asString(v1), v2.toPrimitiveString(callFrame));
+ CHECK_FOR_EXCEPTION_AT_END();
+ return JSValue::encode(result);
+ }
+
+ if (v1.isNumber() && v2.isNumber())
+ return JSValue::encode(jsNumber(v1.asNumber() + v2.asNumber()));
+
+ // All other cases are pretty uncommon
+ JSValue result = jsAddSlowCase(callFrame, v1, v2);
+ CHECK_FOR_EXCEPTION_AT_END();
+ return JSValue::encode(result);
+}
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_pre_inc)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ JSValue v = stackFrame.args[0].jsValue();
+
+ CallFrame* callFrame = stackFrame.callFrame;
+ JSValue result = jsNumber(v.toNumber(callFrame) + 1);
+ CHECK_FOR_EXCEPTION_AT_END();
+ return JSValue::encode(result);
+}
+
+DEFINE_STUB_FUNCTION(int, timeout_check)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ JSGlobalData* globalData = stackFrame.globalData;
+ TimeoutChecker& timeoutChecker = globalData->timeoutChecker;
+
+ if (globalData->terminator.shouldTerminate()) {
+ globalData->exception = createTerminatedExecutionException(globalData);
+ VM_THROW_EXCEPTION_AT_END();
+ } else if (timeoutChecker.didTimeOut(stackFrame.callFrame)) {
+ globalData->exception = createInterruptedExecutionException(globalData);
+ VM_THROW_EXCEPTION_AT_END();
+ }
+
+ return timeoutChecker.ticksUntilNextCheck();
+}
+
+DEFINE_STUB_FUNCTION(void*, register_file_check)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+ CallFrame* callFrame = stackFrame.callFrame;
+
+ if (UNLIKELY(!stackFrame.registerFile->grow(&callFrame->registers()[callFrame->codeBlock()->m_numCalleeRegisters])))
+ return throwExceptionFromOpCall<void*>(stackFrame, callFrame, STUB_RETURN_ADDRESS, createStackOverflowError(callFrame->callerFrame()));
+
+ return callFrame;
+}
+
+DEFINE_STUB_FUNCTION(JSObject*, op_new_object)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ return constructEmptyObject(stackFrame.callFrame);
+}
+
+DEFINE_STUB_FUNCTION(void, op_put_by_id_generic)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ PutPropertySlot slot(stackFrame.callFrame->codeBlock()->isStrictMode());
+ stackFrame.args[0].jsValue().put(stackFrame.callFrame, stackFrame.args[1].identifier(), stackFrame.args[2].jsValue(), slot);
+ CHECK_FOR_EXCEPTION_AT_END();
+}
+
+DEFINE_STUB_FUNCTION(void, op_put_by_id_direct_generic)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ PutPropertySlot slot(stackFrame.callFrame->codeBlock()->isStrictMode());
+ stackFrame.args[0].jsValue().putDirect(stackFrame.callFrame, stackFrame.args[1].identifier(), stackFrame.args[2].jsValue(), slot);
+ CHECK_FOR_EXCEPTION_AT_END();
+}
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_id_generic)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ CallFrame* callFrame = stackFrame.callFrame;
+ Identifier& ident = stackFrame.args[1].identifier();
+
+ JSValue baseValue = stackFrame.args[0].jsValue();
+ PropertySlot slot(baseValue);
+ JSValue result = baseValue.get(callFrame, ident, slot);
+
+ CHECK_FOR_EXCEPTION_AT_END();
+ return JSValue::encode(result);
+}
+
+DEFINE_STUB_FUNCTION(void, op_put_by_id)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+ CallFrame* callFrame = stackFrame.callFrame;
+ Identifier& ident = stackFrame.args[1].identifier();
+
+ PutPropertySlot slot(callFrame->codeBlock()->isStrictMode());
+ stackFrame.args[0].jsValue().put(callFrame, ident, stackFrame.args[2].jsValue(), slot);
+
+ CodeBlock* codeBlock = stackFrame.callFrame->codeBlock();
+ StructureStubInfo* stubInfo = &codeBlock->getStubInfo(STUB_RETURN_ADDRESS);
+ if (!stubInfo->seenOnce())
+ stubInfo->setSeen();
+ else
+ JITThunks::tryCachePutByID(callFrame, codeBlock, STUB_RETURN_ADDRESS, stackFrame.args[0].jsValue(), slot, stubInfo, false);
+
+ CHECK_FOR_EXCEPTION_AT_END();
+}
+
+DEFINE_STUB_FUNCTION(void, op_put_by_id_direct)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+ CallFrame* callFrame = stackFrame.callFrame;
+ Identifier& ident = stackFrame.args[1].identifier();
+
+ PutPropertySlot slot(callFrame->codeBlock()->isStrictMode());
+ stackFrame.args[0].jsValue().putDirect(callFrame, ident, stackFrame.args[2].jsValue(), slot);
+
+ CodeBlock* codeBlock = stackFrame.callFrame->codeBlock();
+ StructureStubInfo* stubInfo = &codeBlock->getStubInfo(STUB_RETURN_ADDRESS);
+ if (!stubInfo->seenOnce())
+ stubInfo->setSeen();
+ else
+ JITThunks::tryCachePutByID(callFrame, codeBlock, STUB_RETURN_ADDRESS, stackFrame.args[0].jsValue(), slot, stubInfo, true);
+
+ CHECK_FOR_EXCEPTION_AT_END();
+}
+
+DEFINE_STUB_FUNCTION(void, op_put_by_id_fail)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ CallFrame* callFrame = stackFrame.callFrame;
+ Identifier& ident = stackFrame.args[1].identifier();
+
+ PutPropertySlot slot(callFrame->codeBlock()->isStrictMode());
+ stackFrame.args[0].jsValue().put(callFrame, ident, stackFrame.args[2].jsValue(), slot);
+
+ CHECK_FOR_EXCEPTION_AT_END();
+}
+
+DEFINE_STUB_FUNCTION(void, op_put_by_id_direct_fail)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ CallFrame* callFrame = stackFrame.callFrame;
+ Identifier& ident = stackFrame.args[1].identifier();
+
+ PutPropertySlot slot(callFrame->codeBlock()->isStrictMode());
+ stackFrame.args[0].jsValue().putDirect(callFrame, ident, stackFrame.args[2].jsValue(), slot);
+
+ CHECK_FOR_EXCEPTION_AT_END();
+}
+
+DEFINE_STUB_FUNCTION(JSObject*, op_put_by_id_transition_realloc)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ JSValue baseValue = stackFrame.args[0].jsValue();
+ int32_t oldSize = stackFrame.args[3].int32();
+ int32_t newSize = stackFrame.args[4].int32();
+
+ ASSERT(baseValue.isObject());
+ JSObject* base = asObject(baseValue);
+ base->allocatePropertyStorage(*stackFrame.globalData, oldSize, newSize);
+
+ return base;
+}
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_id_method_check)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ CallFrame* callFrame = stackFrame.callFrame;
+ Identifier& ident = stackFrame.args[1].identifier();
+
+ JSValue baseValue = stackFrame.args[0].jsValue();
+ PropertySlot slot(baseValue);
+ JSValue result = baseValue.get(callFrame, ident, slot);
+ CHECK_FOR_EXCEPTION();
+
+ CodeBlock* codeBlock = stackFrame.callFrame->codeBlock();
+ MethodCallLinkInfo& methodCallLinkInfo = codeBlock->getMethodCallLinkInfo(STUB_RETURN_ADDRESS);
+
+ if (!methodCallLinkInfo.seenOnce()) {
+ methodCallLinkInfo.setSeen();
+ return JSValue::encode(result);
+ }
+
+ // If we successfully got something, then the base from which it is being accessed must
+ // be an object. (Assertion to ensure asObject() call below is safe, which comes after
+ // an isCacheable() chceck.
+ ASSERT(!slot.isCacheableValue() || slot.slotBase().isObject());
+
+ // Check that:
+ // * We're dealing with a JSCell,
+ // * the property is cachable,
+ // * it's not a dictionary
+ // * there is a function cached.
+ Structure* structure;
+ JSCell* specific;
+ JSObject* slotBaseObject;
+ if (baseValue.isCell()
+ && slot.isCacheableValue()
+ && !(structure = baseValue.asCell()->structure())->isUncacheableDictionary()
+ && (slotBaseObject = asObject(slot.slotBase()))->getPropertySpecificValue(callFrame, ident, specific)
+ && specific
+ ) {
+
+ JSObject* callee = asObject(specific);
+
+ // Since we're accessing a prototype in a loop, it's a good bet that it
+ // should not be treated as a dictionary.
+ if (slotBaseObject->structure()->isDictionary())
+ slotBaseObject->flattenDictionaryObject(callFrame->globalData());
+
+ // The result fetched should always be the callee!
+ ASSERT(result == JSValue(callee));
+
+ // Check to see if the function is on the object's prototype. Patch up the code to optimize.
+ if (slot.slotBase() == structure->prototypeForLookup(callFrame)) {
+ JIT::patchMethodCallProto(callFrame->globalData(), codeBlock, methodCallLinkInfo, callee, structure, slotBaseObject, STUB_RETURN_ADDRESS);
+ return JSValue::encode(result);
+ }
+
+ // Check to see if the function is on the object itself.
+ // Since we generate the method-check to check both the structure and a prototype-structure (since this
+ // is the common case) we have a problem - we need to patch the prototype structure check to do something
+ // useful. We could try to nop it out altogether, but that's a little messy, so lets do something simpler
+ // for now. For now it performs a check on a special object on the global object only used for this
+ // purpose. The object is in no way exposed, and as such the check will always pass.
+ if (slot.slotBase() == baseValue) {
+ JIT::patchMethodCallProto(callFrame->globalData(), codeBlock, methodCallLinkInfo, callee, structure, callFrame->scopeChain()->globalObject->methodCallDummy(), STUB_RETURN_ADDRESS);
+ return JSValue::encode(result);
+ }
+ }
+
+ // Revert the get_by_id op back to being a regular get_by_id - allow it to cache like normal, if it needs to.
+ ctiPatchCallByReturnAddress(codeBlock, STUB_RETURN_ADDRESS, FunctionPtr(cti_op_get_by_id));
+ return JSValue::encode(result);
+}
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_id_method_check_update)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ CallFrame* callFrame = stackFrame.callFrame;
+ Identifier& ident = stackFrame.args[1].identifier();
+
+ JSValue baseValue = stackFrame.args[0].jsValue();
+ PropertySlot slot(baseValue);
+ JSValue result = baseValue.get(callFrame, ident, slot);
+ CHECK_FOR_EXCEPTION();
+
+ CodeBlock* codeBlock = stackFrame.callFrame->codeBlock();
+ MethodCallLinkInfo& methodCallLinkInfo = codeBlock->getMethodCallLinkInfo(STUB_RETURN_ADDRESS);
+
+ ASSERT(methodCallLinkInfo.seenOnce());
+
+ // If we successfully got something, then the base from which it is being accessed must
+ // be an object. (Assertion to ensure asObject() call below is safe, which comes after
+ // an isCacheable() chceck.
+ ASSERT(!slot.isCacheableValue() || slot.slotBase().isObject());
+
+ // Check that:
+ // * We're dealing with a JSCell,
+ // * the property is cachable,
+ // * it's not a dictionary
+ // * there is a function cached.
+ Structure* structure;
+ JSCell* specific;
+ JSObject* slotBaseObject;
+ if (!(baseValue.isCell()
+ && slot.isCacheableValue()
+ && !(structure = baseValue.asCell()->structure())->isUncacheableDictionary()
+ && (slotBaseObject = asObject(slot.slotBase()))->getPropertySpecificValue(callFrame, ident, specific)
+ && specific
+ )
+ || (slot.slotBase() != structure->prototypeForLookup(callFrame)
+ && slot.slotBase() != baseValue)) {
+ // Revert the get_by_id op back to being a regular get_by_id - allow it to cache like normal, if it needs to.
+ ctiPatchCallByReturnAddress(codeBlock, STUB_RETURN_ADDRESS, FunctionPtr(cti_op_get_by_id));
+ return JSValue::encode(result);
+ }
+
+ // Now check if the situation has changed sufficiently that we should bail out of
+ // doing method_check optimizations entirely, or if it changed only slightly, in
+ // which case we can just repatch.
+
+ JSValue proto = structure->prototypeForLookup(callFrame);
+
+ bool previousWasProto = methodCallLinkInfo.cachedPrototype.get() != codeBlock->globalObject()->methodCallDummy();
+ bool currentIsProto = slot.slotBase() == proto;
+
+ JSObject* callee = asObject(specific);
+
+ if (previousWasProto != currentIsProto
+ || !structure->transitivelyTransitionedFrom(methodCallLinkInfo.cachedStructure.get())
+ || (previousWasProto && !slotBaseObject->structure()->transitivelyTransitionedFrom(methodCallLinkInfo.cachedPrototypeStructure.get()))
+ || specific != methodCallLinkInfo.cachedFunction.get()) {
+ ctiPatchCallByReturnAddress(codeBlock, STUB_RETURN_ADDRESS, FunctionPtr(cti_op_get_by_id));
+ return JSValue::encode(result);
+ }
+
+ // It makes sense to simply repatch the method_check.
+
+ // Since we're accessing a prototype in a loop, it's a good bet that it
+ // should not be treated as a dictionary.
+ if (slotBaseObject->structure()->isDictionary())
+ slotBaseObject->flattenDictionaryObject(callFrame->globalData());
+
+ // The result fetched should always be the callee!
+ ASSERT(result == JSValue(callee));
+
+ // Check to see if the function is on the object's prototype. Patch up the code to optimize.
+ if (slot.slotBase() == proto) {
+ JIT::patchMethodCallProto(callFrame->globalData(), codeBlock, methodCallLinkInfo, callee, structure, slotBaseObject, STUB_RETURN_ADDRESS);
+ return JSValue::encode(result);
+ }
+
+ ASSERT(slot.slotBase() == baseValue);
+
+ // Since we generate the method-check to check both the structure and a prototype-structure (since this
+ // is the common case) we have a problem - we need to patch the prototype structure check to do something
+ // useful. We could try to nop it out altogether, but that's a little messy, so lets do something simpler
+ // for now. For now it performs a check on a special object on the global object only used for this
+ // purpose. The object is in no way exposed, and as such the check will always pass.
+ JIT::patchMethodCallProto(callFrame->globalData(), codeBlock, methodCallLinkInfo, callee, structure, callFrame->scopeChain()->globalObject->methodCallDummy(), STUB_RETURN_ADDRESS);
+ return JSValue::encode(result);
+}
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_id)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+ CallFrame* callFrame = stackFrame.callFrame;
+ Identifier& ident = stackFrame.args[1].identifier();
+
+ JSValue baseValue = stackFrame.args[0].jsValue();
+ PropertySlot slot(baseValue);
+ JSValue result = baseValue.get(callFrame, ident, slot);
+
+ CodeBlock* codeBlock = stackFrame.callFrame->codeBlock();
+ StructureStubInfo* stubInfo = &codeBlock->getStubInfo(STUB_RETURN_ADDRESS);
+ if (!stubInfo->seenOnce())
+ stubInfo->setSeen();
+ else
+ JITThunks::tryCacheGetByID(callFrame, codeBlock, STUB_RETURN_ADDRESS, baseValue, ident, slot, stubInfo);
+
+ CHECK_FOR_EXCEPTION_AT_END();
+ return JSValue::encode(result);
+}
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_id_self_fail)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ CallFrame* callFrame = stackFrame.callFrame;
+ Identifier& ident = stackFrame.args[1].identifier();
+
+ JSValue baseValue = stackFrame.args[0].jsValue();
+ PropertySlot slot(baseValue);
+ JSValue result = baseValue.get(callFrame, ident, slot);
+
+ CHECK_FOR_EXCEPTION();
+
+ if (baseValue.isCell()
+ && slot.isCacheable()
+ && !baseValue.asCell()->structure()->isUncacheableDictionary()
+ && slot.slotBase() == baseValue) {
+
+ CodeBlock* codeBlock = callFrame->codeBlock();
+ StructureStubInfo* stubInfo = &codeBlock->getStubInfo(STUB_RETURN_ADDRESS);
+
+ ASSERT(slot.slotBase().isObject());
+
+ PolymorphicAccessStructureList* polymorphicStructureList;
+ int listIndex = 1;
+
+ if (stubInfo->accessType == access_get_by_id_self) {
+ ASSERT(!stubInfo->stubRoutine);
+ polymorphicStructureList = new PolymorphicAccessStructureList(callFrame->globalData(), codeBlock->ownerExecutable(), MacroAssemblerCodeRef(), stubInfo->u.getByIdSelf.baseObjectStructure.get(), true);
+ stubInfo->initGetByIdSelfList(polymorphicStructureList, 1);
+ } else {
+ polymorphicStructureList = stubInfo->u.getByIdSelfList.structureList;
+ listIndex = stubInfo->u.getByIdSelfList.listSize;
+ }
+ if (listIndex < POLYMORPHIC_LIST_CACHE_SIZE) {
+ stubInfo->u.getByIdSelfList.listSize++;
+ JIT::compileGetByIdSelfList(callFrame->scopeChain()->globalData, codeBlock, stubInfo, polymorphicStructureList, listIndex, baseValue.asCell()->structure(), ident, slot, slot.cachedOffset());
+
+ if (listIndex == (POLYMORPHIC_LIST_CACHE_SIZE - 1))
+ ctiPatchCallByReturnAddress(codeBlock, STUB_RETURN_ADDRESS, FunctionPtr(cti_op_get_by_id_generic));
+ }
+ } else
+ ctiPatchCallByReturnAddress(callFrame->codeBlock(), STUB_RETURN_ADDRESS, FunctionPtr(cti_op_get_by_id_generic));
+ return JSValue::encode(result);
+}
+
+static PolymorphicAccessStructureList* getPolymorphicAccessStructureListSlot(JSGlobalData& globalData, ScriptExecutable* owner, StructureStubInfo* stubInfo, int& listIndex)
+{
+ PolymorphicAccessStructureList* prototypeStructureList = 0;
+ listIndex = 1;
+
+ switch (stubInfo->accessType) {
+ case access_get_by_id_proto:
+ prototypeStructureList = new PolymorphicAccessStructureList(globalData, owner, stubInfo->stubRoutine, stubInfo->u.getByIdProto.baseObjectStructure.get(), stubInfo->u.getByIdProto.prototypeStructure.get(), true);
+ stubInfo->stubRoutine = MacroAssemblerCodeRef();
+ stubInfo->initGetByIdProtoList(prototypeStructureList, 2);
+ break;
+ case access_get_by_id_chain:
+ prototypeStructureList = new PolymorphicAccessStructureList(globalData, owner, stubInfo->stubRoutine, stubInfo->u.getByIdChain.baseObjectStructure.get(), stubInfo->u.getByIdChain.chain.get(), true);
+ stubInfo->stubRoutine = MacroAssemblerCodeRef();
+ stubInfo->initGetByIdProtoList(prototypeStructureList, 2);
+ break;
+ case access_get_by_id_proto_list:
+ prototypeStructureList = stubInfo->u.getByIdProtoList.structureList;
+ listIndex = stubInfo->u.getByIdProtoList.listSize;
+ if (listIndex < POLYMORPHIC_LIST_CACHE_SIZE)
+ stubInfo->u.getByIdProtoList.listSize++;
+ break;
+ default:
+ ASSERT_NOT_REACHED();
+ }
+
+ ASSERT(listIndex <= POLYMORPHIC_LIST_CACHE_SIZE);
+ return prototypeStructureList;
+}
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_id_getter_stub)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+ CallFrame* callFrame = stackFrame.callFrame;
+ GetterSetter* getterSetter = asGetterSetter(stackFrame.args[0].jsObject());
+ if (!getterSetter->getter())
+ return JSValue::encode(jsUndefined());
+ JSObject* getter = asObject(getterSetter->getter());
+ CallData callData;
+ CallType callType = getter->methodTable()->getCallData(getter, callData);
+ JSValue result = call(callFrame, getter, callType, callData, stackFrame.args[1].jsObject(), ArgList());
+ if (callFrame->hadException())
+ returnToThrowTrampoline(&callFrame->globalData(), stackFrame.args[2].returnAddress(), STUB_RETURN_ADDRESS);
+
+ return JSValue::encode(result);
+}
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_id_custom_stub)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+ CallFrame* callFrame = stackFrame.callFrame;
+ JSObject* slotBase = stackFrame.args[0].jsObject();
+ PropertySlot::GetValueFunc getter = reinterpret_cast<PropertySlot::GetValueFunc>(stackFrame.args[1].asPointer);
+ const Identifier& ident = stackFrame.args[2].identifier();
+ JSValue result = getter(callFrame, slotBase, ident);
+ if (callFrame->hadException())
+ returnToThrowTrampoline(&callFrame->globalData(), stackFrame.args[3].returnAddress(), STUB_RETURN_ADDRESS);
+
+ return JSValue::encode(result);
+}
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_id_proto_list)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ CallFrame* callFrame = stackFrame.callFrame;
+ const Identifier& propertyName = stackFrame.args[1].identifier();
+
+ JSValue baseValue = stackFrame.args[0].jsValue();
+ PropertySlot slot(baseValue);
+ JSValue result = baseValue.get(callFrame, propertyName, slot);
+
+ CHECK_FOR_EXCEPTION();
+
+ if (!baseValue.isCell() || !slot.isCacheable() || baseValue.asCell()->structure()->isDictionary() || baseValue.asCell()->structure()->typeInfo().prohibitsPropertyCaching()) {
+ ctiPatchCallByReturnAddress(callFrame->codeBlock(), STUB_RETURN_ADDRESS, FunctionPtr(cti_op_get_by_id_proto_fail));
+ return JSValue::encode(result);
+ }
+
+ Structure* structure = baseValue.asCell()->structure();
+ CodeBlock* codeBlock = callFrame->codeBlock();
+ StructureStubInfo* stubInfo = &codeBlock->getStubInfo(STUB_RETURN_ADDRESS);
+
+ ASSERT(slot.slotBase().isObject());
+ JSObject* slotBaseObject = asObject(slot.slotBase());
+
+ size_t offset = slot.cachedOffset();
+
+ if (slot.slotBase() == baseValue)
+ ctiPatchCallByReturnAddress(codeBlock, STUB_RETURN_ADDRESS, FunctionPtr(cti_op_get_by_id_proto_fail));
+ else if (slot.slotBase() == baseValue.asCell()->structure()->prototypeForLookup(callFrame)) {
+ ASSERT(!baseValue.asCell()->structure()->isDictionary());
+ // Since we're accessing a prototype in a loop, it's a good bet that it
+ // should not be treated as a dictionary.
+ if (slotBaseObject->structure()->isDictionary()) {
+ slotBaseObject->flattenDictionaryObject(callFrame->globalData());
+ offset = slotBaseObject->structure()->get(callFrame->globalData(), propertyName);
+ }
+
+ int listIndex;
+ PolymorphicAccessStructureList* prototypeStructureList = getPolymorphicAccessStructureListSlot(callFrame->globalData(), codeBlock->ownerExecutable(), stubInfo, listIndex);
+ if (listIndex < POLYMORPHIC_LIST_CACHE_SIZE) {
+ JIT::compileGetByIdProtoList(callFrame->scopeChain()->globalData, callFrame, codeBlock, stubInfo, prototypeStructureList, listIndex, structure, slotBaseObject->structure(), propertyName, slot, offset);
+
+ if (listIndex == (POLYMORPHIC_LIST_CACHE_SIZE - 1))
+ ctiPatchCallByReturnAddress(codeBlock, STUB_RETURN_ADDRESS, FunctionPtr(cti_op_get_by_id_proto_list_full));
+ }
+ } else if (size_t count = normalizePrototypeChain(callFrame, baseValue, slot.slotBase(), propertyName, offset)) {
+ ASSERT(!baseValue.asCell()->structure()->isDictionary());
+ int listIndex;
+ PolymorphicAccessStructureList* prototypeStructureList = getPolymorphicAccessStructureListSlot(callFrame->globalData(), codeBlock->ownerExecutable(), stubInfo, listIndex);
+
+ if (listIndex < POLYMORPHIC_LIST_CACHE_SIZE) {
+ StructureChain* protoChain = structure->prototypeChain(callFrame);
+ JIT::compileGetByIdChainList(callFrame->scopeChain()->globalData, callFrame, codeBlock, stubInfo, prototypeStructureList, listIndex, structure, protoChain, count, propertyName, slot, offset);
+
+ if (listIndex == (POLYMORPHIC_LIST_CACHE_SIZE - 1))
+ ctiPatchCallByReturnAddress(codeBlock, STUB_RETURN_ADDRESS, FunctionPtr(cti_op_get_by_id_proto_list_full));
+ }
+ } else
+ ctiPatchCallByReturnAddress(codeBlock, STUB_RETURN_ADDRESS, FunctionPtr(cti_op_get_by_id_proto_fail));
+
+ return JSValue::encode(result);
+}
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_id_proto_list_full)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ JSValue baseValue = stackFrame.args[0].jsValue();
+ PropertySlot slot(baseValue);
+ JSValue result = baseValue.get(stackFrame.callFrame, stackFrame.args[1].identifier(), slot);
+
+ CHECK_FOR_EXCEPTION_AT_END();
+ return JSValue::encode(result);
+}
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_id_proto_fail)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ JSValue baseValue = stackFrame.args[0].jsValue();
+ PropertySlot slot(baseValue);
+ JSValue result = baseValue.get(stackFrame.callFrame, stackFrame.args[1].identifier(), slot);
+
+ CHECK_FOR_EXCEPTION_AT_END();
+ return JSValue::encode(result);
+}
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_id_array_fail)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ JSValue baseValue = stackFrame.args[0].jsValue();
+ PropertySlot slot(baseValue);
+ JSValue result = baseValue.get(stackFrame.callFrame, stackFrame.args[1].identifier(), slot);
+
+ CHECK_FOR_EXCEPTION_AT_END();
+ return JSValue::encode(result);
+}
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_id_string_fail)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ JSValue baseValue = stackFrame.args[0].jsValue();
+ PropertySlot slot(baseValue);
+ JSValue result = baseValue.get(stackFrame.callFrame, stackFrame.args[1].identifier(), slot);
+
+ CHECK_FOR_EXCEPTION_AT_END();
+ return JSValue::encode(result);
+}
+
+DEFINE_STUB_FUNCTION(void, op_check_has_instance)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ CallFrame* callFrame = stackFrame.callFrame;
+ JSValue baseVal = stackFrame.args[0].jsValue();
+
+ // ECMA-262 15.3.5.3:
+ // Throw an exception either if baseVal is not an object, or if it does not implement 'HasInstance' (i.e. is a function).
+#ifndef NDEBUG
+ TypeInfo typeInfo(UnspecifiedType);
+ ASSERT(!baseVal.isObject() || !(typeInfo = asObject(baseVal)->structure()->typeInfo()).implementsHasInstance());
+#endif
+ stackFrame.globalData->exception = createInvalidParamError(callFrame, "instanceof", baseVal);
+ VM_THROW_EXCEPTION_AT_END();
+}
+
+#if ENABLE(DFG_JIT)
+DEFINE_STUB_FUNCTION(void, optimize_from_loop)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ CallFrame* callFrame = stackFrame.callFrame;
+ CodeBlock* codeBlock = callFrame->codeBlock();
+ unsigned bytecodeIndex = stackFrame.args[0].int32();
+
+#if ENABLE(JIT_VERBOSE_OSR)
+ printf("Entered optimize_from_loop with executeCounter = %d, reoptimizationRetryCounter = %u, optimizationDelayCounter = %u\n", codeBlock->executeCounter(), codeBlock->reoptimizationRetryCounter(), codeBlock->optimizationDelayCounter());
+#endif
+
+ if (codeBlock->hasOptimizedReplacement()) {
+#if ENABLE(JIT_VERBOSE_OSR)
+ printf("Considering loop OSR into %p(%p) with success/fail %u/%u.\n", codeBlock, codeBlock->replacement(), codeBlock->replacement()->speculativeSuccessCounter(), codeBlock->replacement()->speculativeFailCounter());
+#endif
+ if (codeBlock->replacement()->shouldReoptimizeFromLoopNow()) {
+#if ENABLE(JIT_VERBOSE_OSR)
+ printf("Triggering reoptimization of %p(%p) (in loop).\n", codeBlock, codeBlock->replacement());
+#endif
+ codeBlock->reoptimize();
+ return;
+ }
+ } else {
+ if (!codeBlock->shouldOptimizeNow()) {
+#if ENABLE(JIT_VERBOSE_OSR)
+ printf("Delaying optimization for %p (in loop) because of insufficient profiling.\n", codeBlock);
+#endif
+ return;
+ }
+
+ ScopeChainNode* scopeChain = callFrame->scopeChain();
+
+ JSObject* error = codeBlock->compileOptimized(callFrame, scopeChain);
+#if ENABLE(JIT_VERBOSE_OSR)
+ if (error)
+ fprintf(stderr, "WARNING: optimized compilation from loop failed.\n");
+#else
+ UNUSED_PARAM(error);
+#endif
+
+ if (codeBlock->replacement() == codeBlock) {
+#if ENABLE(JIT_VERBOSE_OSR)
+ printf("Optimizing %p from loop failed.\n", codeBlock);
+#endif
+
+ ASSERT(codeBlock->getJITType() == JITCode::BaselineJIT);
+ codeBlock->dontOptimizeAnytimeSoon();
+ return;
+ }
+ }
+
+ CodeBlock* optimizedCodeBlock = codeBlock->replacement();
+ ASSERT(optimizedCodeBlock->getJITType() == JITCode::DFGJIT);
+
+ if (void* address = DFG::prepareOSREntry(callFrame, optimizedCodeBlock, bytecodeIndex)) {
+#if ENABLE(JIT_VERBOSE_OSR)
+ printf("Optimizing %p from loop succeeded, performing OSR after a delay of %u.\n", codeBlock, codeBlock->optimizationDelayCounter());
+#endif
+
+ codeBlock->optimizeSoon();
+ optimizedCodeBlock->countSpeculationSuccess();
+ STUB_SET_RETURN_ADDRESS(address);
+ return;
+ }
+
+#if ENABLE(JIT_VERBOSE_OSR)
+ printf("Optimizing %p from loop succeeded, OSR failed, after a delay of %u.\n", codeBlock, codeBlock->optimizationDelayCounter());
+#endif
+
+ // Count the OSR failure as a speculation failure. If this happens a lot, then
+ // reoptimize.
+ optimizedCodeBlock->countSpeculationFailure();
+
+#if ENABLE(JIT_VERBOSE_OSR)
+ printf("Encountered loop OSR failure into %p(%p) with success/fail %u/%u.\n", codeBlock, codeBlock->replacement(), codeBlock->replacement()->speculativeSuccessCounter(), codeBlock->replacement()->speculativeFailCounter());
+#endif
+
+ // We are a lot more conservative about triggering reoptimization after OSR failure than
+ // before it. If we enter the optimize_from_loop trigger with a bucket full of fail
+ // already, then we really would like to reoptimize immediately. But this case covers
+ // something else: there weren't many (or any) speculation failures before, but we just
+ // failed to enter the speculative code because some variable had the wrong value or
+ // because the OSR code decided for any spurious reason that it did not want to OSR
+ // right now. So, we only trigger reoptimization only upon the more conservative (non-loop)
+ // reoptimization trigger.
+ if (optimizedCodeBlock->shouldReoptimizeNow()) {
+#if ENABLE(JIT_VERBOSE_OSR)
+ printf("Triggering reoptimization of %p(%p) (in loop after OSR fail).\n", codeBlock, codeBlock->replacement());
+#endif
+ codeBlock->reoptimize();
+ return;
+ }
+
+ // OSR failed this time, but it might succeed next time! Let the code run a bit
+ // longer and then try again.
+ codeBlock->optimizeAfterWarmUp();
+}
+
+DEFINE_STUB_FUNCTION(void, optimize_from_ret)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ CallFrame* callFrame = stackFrame.callFrame;
+ CodeBlock* codeBlock = callFrame->codeBlock();
+
+#if ENABLE(JIT_VERBOSE_OSR)
+ printf("Entered optimize_from_ret with executeCounter = %d, reoptimizationRetryCounter = %u, optimizationDelayCounter = %u\n", codeBlock->executeCounter(), codeBlock->reoptimizationRetryCounter(), codeBlock->optimizationDelayCounter());
+#endif
+
+ if (codeBlock->hasOptimizedReplacement()) {
+#if ENABLE(JIT_VERBOSE_OSR)
+ printf("Returning from old JIT call frame with optimized replacement %p(%p), with success/fail %u/%u", codeBlock, codeBlock->replacement(), codeBlock->replacement()->speculativeSuccessCounter(), codeBlock->replacement()->speculativeFailCounter());
+ CallFrame* callerFrame = callFrame->callerFrame();
+ if (callerFrame)
+ printf(", callerFrame = %p, returnPC = %p, caller code block = %p", callerFrame, callFrame->returnPC().value(), callerFrame->codeBlock());
+ printf("\n");
+#endif
+ if (codeBlock->replacement()->shouldReoptimizeNow()) {
+#if ENABLE(JIT_VERBOSE_OSR)
+ printf("Triggering reoptimization of %p(%p) (in return).\n", codeBlock, codeBlock->replacement());
+#endif
+ codeBlock->reoptimize();
+ }
+
+ codeBlock->optimizeSoon();
+ return;
+ }
+
+ if (!codeBlock->shouldOptimizeNow()) {
+#if ENABLE(JIT_VERBOSE_OSR)
+ printf("Delaying optimization for %p (in return) because of insufficient profiling.\n", codeBlock);
+#endif
+ return;
+ }
+
+ ScopeChainNode* scopeChain = callFrame->scopeChain();
+
+ JSObject* error = codeBlock->compileOptimized(callFrame, scopeChain);
+ if (error)
+ fprintf(stderr, "WARNING: optimized compilation from ret failed.\n");
+
+ if (codeBlock->replacement() == codeBlock) {
+#if ENABLE(JIT_VERBOSE_OSR)
+ printf("Optimizing %p from return failed.\n", codeBlock);
+#endif
+
+ ASSERT(codeBlock->getJITType() == JITCode::BaselineJIT);
+ codeBlock->dontOptimizeAnytimeSoon();
+ return;
+ }
+
+ ASSERT(codeBlock->replacement()->getJITType() == JITCode::DFGJIT);
+
+#if ENABLE(JIT_VERBOSE_OSR)
+ printf("Optimizing %p from return succeeded after a delay of %u.\n", codeBlock, codeBlock->optimizationDelayCounter());
+#endif
+
+ codeBlock->optimizeSoon();
+}
+#endif // ENABLE(DFG_JIT)
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_instanceof)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ CallFrame* callFrame = stackFrame.callFrame;
+ JSValue value = stackFrame.args[0].jsValue();
+ JSValue baseVal = stackFrame.args[1].jsValue();
+ JSValue proto = stackFrame.args[2].jsValue();
+
+ // At least one of these checks must have failed to get to the slow case.
+ ASSERT(!value.isCell() || !baseVal.isCell() || !proto.isCell()
+ || !value.isObject() || !baseVal.isObject() || !proto.isObject()
+ || !asObject(baseVal)->structure()->typeInfo().implementsDefaultHasInstance());
+
+
+ // ECMA-262 15.3.5.3:
+ // Throw an exception either if baseVal is not an object, or if it does not implement 'HasInstance' (i.e. is a function).
+ TypeInfo typeInfo(UnspecifiedType);
+ if (!baseVal.isObject() || !(typeInfo = asObject(baseVal)->structure()->typeInfo()).implementsHasInstance()) {
+ stackFrame.globalData->exception = createInvalidParamError(stackFrame.callFrame, "instanceof", baseVal);
+ VM_THROW_EXCEPTION();
+ }
+ ASSERT(typeInfo.type() != UnspecifiedType);
+
+ if (!typeInfo.overridesHasInstance() && !value.isObject())
+ return JSValue::encode(jsBoolean(false));
+
+ JSValue result = jsBoolean(asObject(baseVal)->methodTable()->hasInstance(asObject(baseVal), callFrame, value, proto));
+ CHECK_FOR_EXCEPTION_AT_END();
+
+ return JSValue::encode(result);
+}
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_del_by_id)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ CallFrame* callFrame = stackFrame.callFrame;
+
+ JSObject* baseObj = stackFrame.args[0].jsValue().toObject(callFrame);
+
+ bool couldDelete = baseObj->methodTable()->deleteProperty(baseObj, callFrame, stackFrame.args[1].identifier());
+ JSValue result = jsBoolean(couldDelete);
+ if (!couldDelete && callFrame->codeBlock()->isStrictMode())
+ stackFrame.globalData->exception = createTypeError(stackFrame.callFrame, "Unable to delete property.");
+
+ CHECK_FOR_EXCEPTION_AT_END();
+ return JSValue::encode(result);
+}
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_mul)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ JSValue src1 = stackFrame.args[0].jsValue();
+ JSValue src2 = stackFrame.args[1].jsValue();
+
+ if (src1.isNumber() && src2.isNumber())
+ return JSValue::encode(jsNumber(src1.asNumber() * src2.asNumber()));
+
+ CallFrame* callFrame = stackFrame.callFrame;
+ JSValue result = jsNumber(src1.toNumber(callFrame) * src2.toNumber(callFrame));
+ CHECK_FOR_EXCEPTION_AT_END();
+ return JSValue::encode(result);
+}
+
+DEFINE_STUB_FUNCTION(JSObject*, op_new_func)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ ASSERT(stackFrame.callFrame->codeBlock()->codeType() != FunctionCode || !stackFrame.callFrame->codeBlock()->needsFullScopeChain() || stackFrame.callFrame->uncheckedR(stackFrame.callFrame->codeBlock()->activationRegister()).jsValue());
+ return stackFrame.args[0].function()->make(stackFrame.callFrame, stackFrame.callFrame->scopeChain());
+}
+
+inline void* jitCompileFor(CallFrame* callFrame, CodeSpecializationKind kind)
+{
+ JSFunction* function = asFunction(callFrame->callee());
+ ASSERT(!function->isHostFunction());
+ FunctionExecutable* executable = function->jsExecutable();
+ ScopeChainNode* callDataScopeChain = function->scope();
+ JSObject* error = executable->compileFor(callFrame, callDataScopeChain, kind);
+ if (!error)
+ return function;
+ callFrame->globalData().exception = error;
+ return 0;
+}
+
+DEFINE_STUB_FUNCTION(void*, op_call_jitCompile)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+#if !ASSERT_DISABLED
+ CallData callData;
+ ASSERT(stackFrame.callFrame->callee()->methodTable()->getCallData(stackFrame.callFrame->callee(), callData) == CallTypeJS);
+#endif
+
+ CallFrame* callFrame = stackFrame.callFrame;
+ void* result = jitCompileFor(callFrame, CodeForCall);
+ if (!result)
+ return throwExceptionFromOpCall<void*>(stackFrame, callFrame, STUB_RETURN_ADDRESS);
+
+ return result;
+}
+
+DEFINE_STUB_FUNCTION(void*, op_construct_jitCompile)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+#if !ASSERT_DISABLED
+ ConstructData constructData;
+ ASSERT(asFunction(stackFrame.callFrame->callee())->methodTable()->getConstructData(stackFrame.callFrame->callee(), constructData) == ConstructTypeJS);
+#endif
+
+ CallFrame* callFrame = stackFrame.callFrame;
+ void* result = jitCompileFor(callFrame, CodeForConstruct);
+ if (!result)
+ return throwExceptionFromOpCall<void*>(stackFrame, callFrame, STUB_RETURN_ADDRESS);
+
+ return result;
+}
+
+inline CallFrame* arityCheckFor(CallFrame* callFrame, RegisterFile* registerFile, CodeSpecializationKind kind)
+{
+ JSFunction* callee = asFunction(callFrame->callee());
+ ASSERT(!callee->isHostFunction());
+ CodeBlock* newCodeBlock = &callee->jsExecutable()->generatedBytecodeFor(kind);
+ int argumentCountIncludingThis = callFrame->argumentCountIncludingThis();
+
+ // This ensures enough space for the worst case scenario of zero arguments passed by the caller.
+ if (!registerFile->grow(callFrame->registers() + newCodeBlock->m_numParameters + newCodeBlock->m_numCalleeRegisters))
+ return 0;
+
+ ASSERT(argumentCountIncludingThis < newCodeBlock->m_numParameters);
+
+ // Too few arguments -- copy call frame and arguments, then fill in missing arguments with undefined.
+ size_t delta = newCodeBlock->m_numParameters - argumentCountIncludingThis;
+ Register* src = callFrame->registers();
+ Register* dst = callFrame->registers() + delta;
+
+ int i;
+ int end = -CallFrame::offsetFor(argumentCountIncludingThis);
+ for (i = -1; i >= end; --i)
+ dst[i] = src[i];
+
+ end -= delta;
+ for ( ; i >= end; --i)
+ dst[i] = jsUndefined();
+
+ CallFrame* newCallFrame = CallFrame::create(dst);
+ ASSERT((void*)newCallFrame <= registerFile->end());
+ return newCallFrame;
+}
+
+DEFINE_STUB_FUNCTION(void*, op_call_arityCheck)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ CallFrame* callFrame = stackFrame.callFrame;
+
+ CallFrame* newCallFrame = arityCheckFor(callFrame, stackFrame.registerFile, CodeForCall);
+ if (!newCallFrame)
+ return throwExceptionFromOpCall<void*>(stackFrame, callFrame, STUB_RETURN_ADDRESS, createStackOverflowError(callFrame->callerFrame()));
+
+ return newCallFrame;
+}
+
+DEFINE_STUB_FUNCTION(void*, op_construct_arityCheck)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ CallFrame* callFrame = stackFrame.callFrame;
+
+ CallFrame* newCallFrame = arityCheckFor(callFrame, stackFrame.registerFile, CodeForConstruct);
+ if (!newCallFrame)
+ return throwExceptionFromOpCall<void*>(stackFrame, callFrame, STUB_RETURN_ADDRESS, createStackOverflowError(callFrame->callerFrame()));
+
+ return newCallFrame;
+}
+
+inline void* lazyLinkFor(CallFrame* callFrame, CodeSpecializationKind kind)
+{
+ JSFunction* callee = asFunction(callFrame->callee());
+ ExecutableBase* executable = callee->executable();
+
+ MacroAssemblerCodePtr codePtr;
+ CodeBlock* codeBlock = 0;
+ CallLinkInfo* callLinkInfo = &callFrame->callerFrame()->codeBlock()->getCallLinkInfo(callFrame->returnPC());
+
+ if (executable->isHostFunction())
+ codePtr = executable->generatedJITCodeFor(kind).addressForCall();
+ else {
+ FunctionExecutable* functionExecutable = static_cast<FunctionExecutable*>(executable);
+ JSObject* error = functionExecutable->compileFor(callFrame, callee->scope(), kind);
+ if (error)
+ return 0;
+ codeBlock = &functionExecutable->generatedBytecodeFor(kind);
+ if (callFrame->argumentCountIncludingThis() < static_cast<size_t>(codeBlock->m_numParameters)
+ || callLinkInfo->callType == CallLinkInfo::CallVarargs)
+ codePtr = functionExecutable->generatedJITCodeWithArityCheckFor(kind);
+ else
+ codePtr = functionExecutable->generatedJITCodeFor(kind).addressForCall();
+ }
+
+ if (!callLinkInfo->seenOnce())
+ callLinkInfo->setSeen();
+ else
+ JIT::linkFor(callee, callFrame->callerFrame()->codeBlock(), codeBlock, codePtr, callLinkInfo, &callFrame->globalData(), kind);
+
+ return codePtr.executableAddress();
+}
+
+DEFINE_STUB_FUNCTION(void*, vm_lazyLinkCall)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ CallFrame* callFrame = stackFrame.callFrame;
+ void* result = lazyLinkFor(callFrame, CodeForCall);
+ if (!result)
+ return throwExceptionFromOpCall<void*>(stackFrame, callFrame, STUB_RETURN_ADDRESS);
+
+ return result;
+}
+
+DEFINE_STUB_FUNCTION(void*, vm_lazyLinkConstruct)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ CallFrame* callFrame = stackFrame.callFrame;
+ void* result = lazyLinkFor(callFrame, CodeForConstruct);
+ if (!result)
+ return throwExceptionFromOpCall<void*>(stackFrame, callFrame, STUB_RETURN_ADDRESS);
+
+ return result;
+}
+
+DEFINE_STUB_FUNCTION(JSObject*, op_push_activation)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ JSActivation* activation = JSActivation::create(stackFrame.callFrame->globalData(), stackFrame.callFrame, static_cast<FunctionExecutable*>(stackFrame.callFrame->codeBlock()->ownerExecutable()));
+ stackFrame.callFrame->setScopeChain(stackFrame.callFrame->scopeChain()->push(activation));
+ return activation;
+}
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_call_NotJSFunction)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ CallFrame* callFrame = stackFrame.callFrame;
+ JSValue callee = callFrame->calleeAsValue();
+
+ CallData callData;
+ CallType callType = getCallData(callee, callData);
+
+ ASSERT(callType != CallTypeJS);
+ if (callType != CallTypeHost) {
+ ASSERT(callType == CallTypeNone);
+ return throwExceptionFromOpCall<EncodedJSValue>(stackFrame, callFrame, STUB_RETURN_ADDRESS, createNotAFunctionError(callFrame->callerFrame(), callee));
+ }
+
+ EncodedJSValue returnValue;
+ {
+ SamplingTool::HostCallRecord callRecord(CTI_SAMPLER);
+ returnValue = callData.native.function(callFrame);
+ }
+
+ if (stackFrame.globalData->exception)
+ return throwExceptionFromOpCall<EncodedJSValue>(stackFrame, callFrame, STUB_RETURN_ADDRESS);
+
+ return returnValue;
+}
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_create_arguments)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ Arguments* arguments = Arguments::create(*stackFrame.globalData, stackFrame.callFrame);
+ return JSValue::encode(JSValue(arguments));
+}
+
+DEFINE_STUB_FUNCTION(void, op_tear_off_activation)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ CallFrame* callFrame = stackFrame.callFrame;
+ ASSERT(callFrame->codeBlock()->needsFullScopeChain());
+ JSValue activationValue = stackFrame.args[0].jsValue();
+ if (!activationValue) {
+ if (JSValue v = stackFrame.args[1].jsValue()) {
+ if (!callFrame->codeBlock()->isStrictMode())
+ asArguments(v)->tearOff(callFrame);
+ }
+ return;
+ }
+ JSActivation* activation = asActivation(stackFrame.args[0].jsValue());
+ activation->tearOff(*stackFrame.globalData);
+ if (JSValue v = stackFrame.args[1].jsValue())
+ asArguments(v)->didTearOffActivation(*stackFrame.globalData, activation);
+}
+
+DEFINE_STUB_FUNCTION(void, op_tear_off_arguments)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ CallFrame* callFrame = stackFrame.callFrame;
+ ASSERT(callFrame->codeBlock()->usesArguments() && !callFrame->codeBlock()->needsFullScopeChain());
+ asArguments(stackFrame.args[0].jsValue())->tearOff(callFrame);
+}
+
+DEFINE_STUB_FUNCTION(void, op_profile_will_call)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ ASSERT(*stackFrame.enabledProfilerReference);
+ (*stackFrame.enabledProfilerReference)->willExecute(stackFrame.callFrame, stackFrame.args[0].jsValue());
+}
+
+DEFINE_STUB_FUNCTION(void, op_profile_did_call)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ ASSERT(*stackFrame.enabledProfilerReference);
+ (*stackFrame.enabledProfilerReference)->didExecute(stackFrame.callFrame, stackFrame.args[0].jsValue());
+}
+
+DEFINE_STUB_FUNCTION(JSObject*, op_new_array)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ return constructArray(stackFrame.callFrame, reinterpret_cast<JSValue*>(&stackFrame.callFrame->registers()[stackFrame.args[0].int32()]), stackFrame.args[1].int32());
+}
+
+DEFINE_STUB_FUNCTION(JSObject*, op_new_array_buffer)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ return constructArray(stackFrame.callFrame, stackFrame.callFrame->codeBlock()->constantBuffer(stackFrame.args[0].int32()), stackFrame.args[1].int32());
+}
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_resolve)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ CallFrame* callFrame = stackFrame.callFrame;
+ ScopeChainNode* scopeChain = callFrame->scopeChain();
+
+ ScopeChainIterator iter = scopeChain->begin();
+ ScopeChainIterator end = scopeChain->end();
+ ASSERT(iter != end);
+
+ Identifier& ident = stackFrame.args[0].identifier();
+ do {
+ JSObject* o = iter->get();
+ PropertySlot slot(o);
+ if (o->getPropertySlot(callFrame, ident, slot)) {
+ JSValue result = slot.getValue(callFrame, ident);
+ CHECK_FOR_EXCEPTION_AT_END();
+ return JSValue::encode(result);
+ }
+ } while (++iter != end);
+
+ stackFrame.globalData->exception = createUndefinedVariableError(callFrame, ident);
+ VM_THROW_EXCEPTION();
+}
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_construct_NotJSConstruct)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ CallFrame* callFrame = stackFrame.callFrame;
+ JSValue callee = callFrame->calleeAsValue();
+
+ ConstructData constructData;
+ ConstructType constructType = getConstructData(callee, constructData);
+
+ ASSERT(constructType != ConstructTypeJS);
+ if (constructType != ConstructTypeHost) {
+ ASSERT(constructType == ConstructTypeNone);
+ return throwExceptionFromOpCall<EncodedJSValue>(stackFrame, callFrame, STUB_RETURN_ADDRESS, createNotAConstructorError(callFrame->callerFrame(), callee));
+ }
+
+ EncodedJSValue returnValue;
+ {
+ SamplingTool::HostCallRecord callRecord(CTI_SAMPLER);
+ returnValue = constructData.native.function(callFrame);
+ }
+
+ if (stackFrame.globalData->exception)
+ return throwExceptionFromOpCall<EncodedJSValue>(stackFrame, callFrame, STUB_RETURN_ADDRESS);
+
+ return returnValue;
+}
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_val)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ CallFrame* callFrame = stackFrame.callFrame;
+
+ JSValue baseValue = stackFrame.args[0].jsValue();
+ JSValue subscript = stackFrame.args[1].jsValue();
+
+ if (LIKELY(baseValue.isCell() && subscript.isString())) {
+ if (JSValue result = baseValue.asCell()->fastGetOwnProperty(callFrame, asString(subscript)->value(callFrame))) {
+ CHECK_FOR_EXCEPTION();
+ return JSValue::encode(result);
+ }
+ }
+
+ if (subscript.isUInt32()) {
+ uint32_t i = subscript.asUInt32();
+ if (isJSString(baseValue) && asString(baseValue)->canGetIndex(i)) {
+ ctiPatchCallByReturnAddress(callFrame->codeBlock(), STUB_RETURN_ADDRESS, FunctionPtr(cti_op_get_by_val_string));
+ JSValue result = asString(baseValue)->getIndex(callFrame, i);
+ CHECK_FOR_EXCEPTION();
+ return JSValue::encode(result);
+ }
+ if (isJSByteArray(baseValue) && asByteArray(baseValue)->canAccessIndex(i)) {
+ // All fast byte array accesses are safe from exceptions so return immediately to avoid exception checks.
+ ctiPatchCallByReturnAddress(callFrame->codeBlock(), STUB_RETURN_ADDRESS, FunctionPtr(cti_op_get_by_val_byte_array));
+ return JSValue::encode(asByteArray(baseValue)->getIndex(callFrame, i));
+ }
+ JSValue result = baseValue.get(callFrame, i);
+ CHECK_FOR_EXCEPTION();
+ return JSValue::encode(result);
+ }
+
+ Identifier property(callFrame, subscript.toString(callFrame));
+ JSValue result = baseValue.get(callFrame, property);
+ CHECK_FOR_EXCEPTION_AT_END();
+ return JSValue::encode(result);
+}
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_val_string)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ CallFrame* callFrame = stackFrame.callFrame;
+
+ JSValue baseValue = stackFrame.args[0].jsValue();
+ JSValue subscript = stackFrame.args[1].jsValue();
+
+ JSValue result;
+
+ if (LIKELY(subscript.isUInt32())) {
+ uint32_t i = subscript.asUInt32();
+ if (isJSString(baseValue) && asString(baseValue)->canGetIndex(i))
+ result = asString(baseValue)->getIndex(callFrame, i);
+ else {
+ result = baseValue.get(callFrame, i);
+ if (!isJSString(baseValue))
+ ctiPatchCallByReturnAddress(callFrame->codeBlock(), STUB_RETURN_ADDRESS, FunctionPtr(cti_op_get_by_val));
+ }
+ } else {
+ Identifier property(callFrame, subscript.toString(callFrame));
+ result = baseValue.get(callFrame, property);
+ }
+
+ CHECK_FOR_EXCEPTION_AT_END();
+ return JSValue::encode(result);
+}
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_val_byte_array)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ CallFrame* callFrame = stackFrame.callFrame;
+
+ JSValue baseValue = stackFrame.args[0].jsValue();
+ JSValue subscript = stackFrame.args[1].jsValue();
+
+ JSValue result;
+
+ if (LIKELY(subscript.isUInt32())) {
+ uint32_t i = subscript.asUInt32();
+ if (isJSByteArray(baseValue) && asByteArray(baseValue)->canAccessIndex(i)) {
+ // All fast byte array accesses are safe from exceptions so return immediately to avoid exception checks.
+ return JSValue::encode(asByteArray(baseValue)->getIndex(callFrame, i));
+ }
+
+ result = baseValue.get(callFrame, i);
+ if (!isJSByteArray(baseValue))
+ ctiPatchCallByReturnAddress(callFrame->codeBlock(), STUB_RETURN_ADDRESS, FunctionPtr(cti_op_get_by_val));
+ } else {
+ Identifier property(callFrame, subscript.toString(callFrame));
+ result = baseValue.get(callFrame, property);
+ }
+
+ CHECK_FOR_EXCEPTION_AT_END();
+ return JSValue::encode(result);
+}
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_sub)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ JSValue src1 = stackFrame.args[0].jsValue();
+ JSValue src2 = stackFrame.args[1].jsValue();
+
+ if (src1.isNumber() && src2.isNumber())
+ return JSValue::encode(jsNumber(src1.asNumber() - src2.asNumber()));
+
+ CallFrame* callFrame = stackFrame.callFrame;
+ JSValue result = jsNumber(src1.toNumber(callFrame) - src2.toNumber(callFrame));
+ CHECK_FOR_EXCEPTION_AT_END();
+ return JSValue::encode(result);
+}
+
+DEFINE_STUB_FUNCTION(void, op_put_by_val)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ CallFrame* callFrame = stackFrame.callFrame;
+ JSGlobalData* globalData = stackFrame.globalData;
+
+ JSValue baseValue = stackFrame.args[0].jsValue();
+ JSValue subscript = stackFrame.args[1].jsValue();
+ JSValue value = stackFrame.args[2].jsValue();
+
+ if (LIKELY(subscript.isUInt32())) {
+ uint32_t i = subscript.asUInt32();
+ if (isJSArray(baseValue)) {
+ JSArray* jsArray = asArray(baseValue);
+ if (jsArray->canSetIndex(i))
+ jsArray->setIndex(*globalData, i, value);
+ else
+ JSArray::putByIndex(jsArray, callFrame, i, value);
+ } else if (isJSByteArray(baseValue) && asByteArray(baseValue)->canAccessIndex(i)) {
+ JSByteArray* jsByteArray = asByteArray(baseValue);
+ ctiPatchCallByReturnAddress(callFrame->codeBlock(), STUB_RETURN_ADDRESS, FunctionPtr(cti_op_put_by_val_byte_array));
+ // All fast byte array accesses are safe from exceptions so return immediately to avoid exception checks.
+ if (value.isInt32()) {
+ jsByteArray->setIndex(i, value.asInt32());
+ return;
+ } else {
+ if (value.isNumber()) {
+ jsByteArray->setIndex(i, value.asNumber());
+ return;
+ }
+ }
+
+ baseValue.put(callFrame, i, value);
+ } else
+ baseValue.put(callFrame, i, value);
+ } else {
+ Identifier property(callFrame, subscript.toString(callFrame));
+ if (!stackFrame.globalData->exception) { // Don't put to an object if toString threw an exception.
+ PutPropertySlot slot(callFrame->codeBlock()->isStrictMode());
+ baseValue.put(callFrame, property, value, slot);
+ }
+ }
+
+ CHECK_FOR_EXCEPTION_AT_END();
+}
+
+DEFINE_STUB_FUNCTION(void, op_put_by_val_byte_array)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ CallFrame* callFrame = stackFrame.callFrame;
+
+ JSValue baseValue = stackFrame.args[0].jsValue();
+ JSValue subscript = stackFrame.args[1].jsValue();
+ JSValue value = stackFrame.args[2].jsValue();
+
+ if (LIKELY(subscript.isUInt32())) {
+ uint32_t i = subscript.asUInt32();
+ if (isJSByteArray(baseValue) && asByteArray(baseValue)->canAccessIndex(i)) {
+ JSByteArray* jsByteArray = asByteArray(baseValue);
+
+ // All fast byte array accesses are safe from exceptions so return immediately to avoid exception checks.
+ if (value.isInt32()) {
+ jsByteArray->setIndex(i, value.asInt32());
+ return;
+ } else {
+ if (value.isNumber()) {
+ jsByteArray->setIndex(i, value.asNumber());
+ return;
+ }
+ }
+ }
+
+ if (!isJSByteArray(baseValue))
+ ctiPatchCallByReturnAddress(callFrame->codeBlock(), STUB_RETURN_ADDRESS, FunctionPtr(cti_op_put_by_val));
+ baseValue.put(callFrame, i, value);
+ } else {
+ Identifier property(callFrame, subscript.toString(callFrame));
+ if (!stackFrame.globalData->exception) { // Don't put to an object if toString threw an exception.
+ PutPropertySlot slot(callFrame->codeBlock()->isStrictMode());
+ baseValue.put(callFrame, property, value, slot);
+ }
+ }
+
+ CHECK_FOR_EXCEPTION_AT_END();
+}
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_less)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ CallFrame* callFrame = stackFrame.callFrame;
+ JSValue result = jsBoolean(jsLess<true>(callFrame, stackFrame.args[0].jsValue(), stackFrame.args[1].jsValue()));
+ CHECK_FOR_EXCEPTION_AT_END();
+ return JSValue::encode(result);
+}
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_lesseq)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ CallFrame* callFrame = stackFrame.callFrame;
+ JSValue result = jsBoolean(jsLessEq<true>(callFrame, stackFrame.args[0].jsValue(), stackFrame.args[1].jsValue()));
+ CHECK_FOR_EXCEPTION_AT_END();
+ return JSValue::encode(result);
+}
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_greater)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ CallFrame* callFrame = stackFrame.callFrame;
+ JSValue result = jsBoolean(jsLess<false>(callFrame, stackFrame.args[1].jsValue(), stackFrame.args[0].jsValue()));
+ CHECK_FOR_EXCEPTION_AT_END();
+ return JSValue::encode(result);
+}
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_greatereq)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ CallFrame* callFrame = stackFrame.callFrame;
+ JSValue result = jsBoolean(jsLessEq<false>(callFrame, stackFrame.args[1].jsValue(), stackFrame.args[0].jsValue()));
+ CHECK_FOR_EXCEPTION_AT_END();
+ return JSValue::encode(result);
+}
+
+DEFINE_STUB_FUNCTION(void*, op_load_varargs)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ CallFrame* callFrame = stackFrame.callFrame;
+ RegisterFile* registerFile = stackFrame.registerFile;
+ JSValue thisValue = stackFrame.args[0].jsValue();
+ JSValue arguments = stackFrame.args[1].jsValue();
+ int firstFreeRegister = stackFrame.args[2].int32();
+
+ CallFrame* newCallFrame = loadVarargs(callFrame, registerFile, thisValue, arguments, firstFreeRegister);
+ if (!newCallFrame)
+ VM_THROW_EXCEPTION();
+ return newCallFrame;
+}
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_negate)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ JSValue src = stackFrame.args[0].jsValue();
+
+ if (src.isNumber())
+ return JSValue::encode(jsNumber(-src.asNumber()));
+
+ CallFrame* callFrame = stackFrame.callFrame;
+ JSValue result = jsNumber(-src.toNumber(callFrame));
+ CHECK_FOR_EXCEPTION_AT_END();
+ return JSValue::encode(result);
+}
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_resolve_base)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ return JSValue::encode(JSC::resolveBase(stackFrame.callFrame, stackFrame.args[0].identifier(), stackFrame.callFrame->scopeChain(), false));
+}
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_resolve_base_strict_put)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+ JSValue base = JSC::resolveBase(stackFrame.callFrame, stackFrame.args[0].identifier(), stackFrame.callFrame->scopeChain(), true);
+ if (!base) {
+ stackFrame.globalData->exception = createErrorForInvalidGlobalAssignment(stackFrame.callFrame, stackFrame.args[0].identifier().ustring());
+ VM_THROW_EXCEPTION();
+ }
+ return JSValue::encode(base);
+}
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_ensure_property_exists)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+ JSValue base = stackFrame.callFrame->r(stackFrame.args[0].int32()).jsValue();
+ JSObject* object = asObject(base);
+ PropertySlot slot(object);
+ ASSERT(stackFrame.callFrame->codeBlock()->isStrictMode());
+ if (!object->getPropertySlot(stackFrame.callFrame, stackFrame.args[1].identifier(), slot)) {
+ stackFrame.globalData->exception = createErrorForInvalidGlobalAssignment(stackFrame.callFrame, stackFrame.args[1].identifier().ustring());
+ VM_THROW_EXCEPTION();
+ }
+
+ return JSValue::encode(base);
+}
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_resolve_skip)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ CallFrame* callFrame = stackFrame.callFrame;
+ ScopeChainNode* scopeChain = callFrame->scopeChain();
+
+ int skip = stackFrame.args[1].int32();
+
+ ScopeChainIterator iter = scopeChain->begin();
+ ScopeChainIterator end = scopeChain->end();
+ ASSERT(iter != end);
+ CodeBlock* codeBlock = callFrame->codeBlock();
+ bool checkTopLevel = codeBlock->codeType() == FunctionCode && codeBlock->needsFullScopeChain();
+ ASSERT(skip || !checkTopLevel);
+ if (checkTopLevel && skip--) {
+ if (callFrame->uncheckedR(codeBlock->activationRegister()).jsValue())
+ ++iter;
+ }
+ while (skip--) {
+ ++iter;
+ ASSERT(iter != end);
+ }
+ Identifier& ident = stackFrame.args[0].identifier();
+ do {
+ JSObject* o = iter->get();
+ PropertySlot slot(o);
+ if (o->getPropertySlot(callFrame, ident, slot)) {
+ JSValue result = slot.getValue(callFrame, ident);
+ CHECK_FOR_EXCEPTION_AT_END();
+ return JSValue::encode(result);
+ }
+ } while (++iter != end);
+
+ stackFrame.globalData->exception = createUndefinedVariableError(callFrame, ident);
+ VM_THROW_EXCEPTION();
+}
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_resolve_global)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ CallFrame* callFrame = stackFrame.callFrame;
+ CodeBlock* codeBlock = callFrame->codeBlock();
+ JSGlobalObject* globalObject = codeBlock->globalObject();
+ Identifier& ident = stackFrame.args[0].identifier();
+ unsigned globalResolveInfoIndex = stackFrame.args[1].int32();
+ ASSERT(globalObject->isGlobalObject());
+
+ PropertySlot slot(globalObject);
+ if (globalObject->getPropertySlot(callFrame, ident, slot)) {
+ JSValue result = slot.getValue(callFrame, ident);
+ if (slot.isCacheableValue() && !globalObject->structure()->isUncacheableDictionary() && slot.slotBase() == globalObject) {
+ GlobalResolveInfo& globalResolveInfo = codeBlock->globalResolveInfo(globalResolveInfoIndex);
+ globalResolveInfo.structure.set(callFrame->globalData(), codeBlock->ownerExecutable(), globalObject->structure());
+ globalResolveInfo.offset = slot.cachedOffset();
+ return JSValue::encode(result);
+ }
+
+ CHECK_FOR_EXCEPTION_AT_END();
+ return JSValue::encode(result);
+ }
+
+ stackFrame.globalData->exception = createUndefinedVariableError(callFrame, ident);
+ VM_THROW_EXCEPTION();
+}
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_div)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ JSValue src1 = stackFrame.args[0].jsValue();
+ JSValue src2 = stackFrame.args[1].jsValue();
+
+ if (src1.isNumber() && src2.isNumber())
+ return JSValue::encode(jsNumber(src1.asNumber() / src2.asNumber()));
+
+ CallFrame* callFrame = stackFrame.callFrame;
+ JSValue result = jsNumber(src1.toNumber(callFrame) / src2.toNumber(callFrame));
+ CHECK_FOR_EXCEPTION_AT_END();
+ return JSValue::encode(result);
+}
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_pre_dec)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ JSValue v = stackFrame.args[0].jsValue();
+
+ CallFrame* callFrame = stackFrame.callFrame;
+ JSValue result = jsNumber(v.toNumber(callFrame) - 1);
+ CHECK_FOR_EXCEPTION_AT_END();
+ return JSValue::encode(result);
+}
+
+DEFINE_STUB_FUNCTION(int, op_jless)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ JSValue src1 = stackFrame.args[0].jsValue();
+ JSValue src2 = stackFrame.args[1].jsValue();
+ CallFrame* callFrame = stackFrame.callFrame;
+
+ bool result = jsLess<true>(callFrame, src1, src2);
+ CHECK_FOR_EXCEPTION_AT_END();
+ return result;
+}
+
+DEFINE_STUB_FUNCTION(int, op_jlesseq)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ JSValue src1 = stackFrame.args[0].jsValue();
+ JSValue src2 = stackFrame.args[1].jsValue();
+ CallFrame* callFrame = stackFrame.callFrame;
+
+ bool result = jsLessEq<true>(callFrame, src1, src2);
+ CHECK_FOR_EXCEPTION_AT_END();
+ return result;
+}
+
+DEFINE_STUB_FUNCTION(int, op_jgreater)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ JSValue src1 = stackFrame.args[0].jsValue();
+ JSValue src2 = stackFrame.args[1].jsValue();
+ CallFrame* callFrame = stackFrame.callFrame;
+
+ bool result = jsLess<false>(callFrame, src2, src1);
+ CHECK_FOR_EXCEPTION_AT_END();
+ return result;
+}
+
+DEFINE_STUB_FUNCTION(int, op_jgreatereq)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ JSValue src1 = stackFrame.args[0].jsValue();
+ JSValue src2 = stackFrame.args[1].jsValue();
+ CallFrame* callFrame = stackFrame.callFrame;
+
+ bool result = jsLessEq<false>(callFrame, src2, src1);
+ CHECK_FOR_EXCEPTION_AT_END();
+ return result;
+}
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_not)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ JSValue src = stackFrame.args[0].jsValue();
+
+ CallFrame* callFrame = stackFrame.callFrame;
+
+ JSValue result = jsBoolean(!src.toBoolean(callFrame));
+ CHECK_FOR_EXCEPTION_AT_END();
+ return JSValue::encode(result);
+}
+
+DEFINE_STUB_FUNCTION(int, op_jtrue)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ JSValue src1 = stackFrame.args[0].jsValue();
+
+ CallFrame* callFrame = stackFrame.callFrame;
+
+ bool result = src1.toBoolean(callFrame);
+ CHECK_FOR_EXCEPTION_AT_END();
+ return result;
+}
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_post_inc)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ JSValue v = stackFrame.args[0].jsValue();
+
+ CallFrame* callFrame = stackFrame.callFrame;
+
+ double number = v.toNumber(callFrame);
+ CHECK_FOR_EXCEPTION_AT_END();
+
+ callFrame->registers()[stackFrame.args[1].int32()] = jsNumber(number + 1);
+ return JSValue::encode(jsNumber(number));
+}
+
+DEFINE_STUB_FUNCTION(int, op_eq)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ JSValue src1 = stackFrame.args[0].jsValue();
+ JSValue src2 = stackFrame.args[1].jsValue();
+
+#if USE(JSVALUE32_64)
+ start:
+ if (src2.isUndefined()) {
+ return src1.isNull() ||
+ (src1.isCell() && src1.asCell()->structure()->typeInfo().masqueradesAsUndefined())
+ || src1.isUndefined();
+ }
+
+ if (src2.isNull()) {
+ return src1.isUndefined() ||
+ (src1.isCell() && src1.asCell()->structure()->typeInfo().masqueradesAsUndefined())
+ || src1.isNull();
+ }
+
+ if (src1.isInt32()) {
+ if (src2.isDouble())
+ return src1.asInt32() == src2.asDouble();
+ double d = src2.toNumber(stackFrame.callFrame);
+ CHECK_FOR_EXCEPTION();
+ return src1.asInt32() == d;
+ }
+
+ if (src1.isDouble()) {
+ if (src2.isInt32())
+ return src1.asDouble() == src2.asInt32();
+ double d = src2.toNumber(stackFrame.callFrame);
+ CHECK_FOR_EXCEPTION();
+ return src1.asDouble() == d;
+ }
+
+ if (src1.isTrue()) {
+ if (src2.isFalse())
+ return false;
+ double d = src2.toNumber(stackFrame.callFrame);
+ CHECK_FOR_EXCEPTION();
+ return d == 1.0;
+ }
+
+ if (src1.isFalse()) {
+ if (src2.isTrue())
+ return false;
+ double d = src2.toNumber(stackFrame.callFrame);
+ CHECK_FOR_EXCEPTION();
+ return d == 0.0;
+ }
+
+ if (src1.isUndefined())
+ return src2.isCell() && src2.asCell()->structure()->typeInfo().masqueradesAsUndefined();
+
+ if (src1.isNull())
+ return src2.isCell() && src2.asCell()->structure()->typeInfo().masqueradesAsUndefined();
+
+ JSCell* cell1 = src1.asCell();
+
+ if (cell1->isString()) {
+ if (src2.isInt32())
+ return jsToNumber(static_cast<JSString*>(cell1)->value(stackFrame.callFrame)) == src2.asInt32();
+
+ if (src2.isDouble())
+ return jsToNumber(static_cast<JSString*>(cell1)->value(stackFrame.callFrame)) == src2.asDouble();
+
+ if (src2.isTrue())
+ return jsToNumber(static_cast<JSString*>(cell1)->value(stackFrame.callFrame)) == 1.0;
+
+ if (src2.isFalse())
+ return jsToNumber(static_cast<JSString*>(cell1)->value(stackFrame.callFrame)) == 0.0;
+
+ JSCell* cell2 = src2.asCell();
+ if (cell2->isString())
+ return static_cast<JSString*>(cell1)->value(stackFrame.callFrame) == static_cast<JSString*>(cell2)->value(stackFrame.callFrame);
+
+ src2 = asObject(cell2)->toPrimitive(stackFrame.callFrame);
+ CHECK_FOR_EXCEPTION();
+ goto start;
+ }
+
+ if (src2.isObject())
+ return asObject(cell1) == asObject(src2);
+ src1 = asObject(cell1)->toPrimitive(stackFrame.callFrame);
+ CHECK_FOR_EXCEPTION();
+ goto start;
+
+#else // USE(JSVALUE32_64)
+ CallFrame* callFrame = stackFrame.callFrame;
+
+ bool result = JSValue::equalSlowCaseInline(callFrame, src1, src2);
+ CHECK_FOR_EXCEPTION_AT_END();
+ return result;
+#endif // USE(JSVALUE32_64)
+}
+
+DEFINE_STUB_FUNCTION(int, op_eq_strings)
+{
+#if USE(JSVALUE32_64)
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ JSString* string1 = stackFrame.args[0].jsString();
+ JSString* string2 = stackFrame.args[1].jsString();
+
+ ASSERT(string1->isString());
+ ASSERT(string2->isString());
+ return string1->value(stackFrame.callFrame) == string2->value(stackFrame.callFrame);
+#else
+ UNUSED_PARAM(args);
+ ASSERT_NOT_REACHED();
+ return 0;
+#endif
+}
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_lshift)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ JSValue val = stackFrame.args[0].jsValue();
+ JSValue shift = stackFrame.args[1].jsValue();
+
+ CallFrame* callFrame = stackFrame.callFrame;
+ JSValue result = jsNumber((val.toInt32(callFrame)) << (shift.toUInt32(callFrame) & 0x1f));
+ CHECK_FOR_EXCEPTION_AT_END();
+ return JSValue::encode(result);
+}
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_bitand)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ JSValue src1 = stackFrame.args[0].jsValue();
+ JSValue src2 = stackFrame.args[1].jsValue();
+
+ ASSERT(!src1.isInt32() || !src2.isInt32());
+ CallFrame* callFrame = stackFrame.callFrame;
+ JSValue result = jsNumber(src1.toInt32(callFrame) & src2.toInt32(callFrame));
+ CHECK_FOR_EXCEPTION_AT_END();
+ return JSValue::encode(result);
+}
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_rshift)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ JSValue val = stackFrame.args[0].jsValue();
+ JSValue shift = stackFrame.args[1].jsValue();
+
+ CallFrame* callFrame = stackFrame.callFrame;
+ JSValue result = jsNumber((val.toInt32(callFrame)) >> (shift.toUInt32(callFrame) & 0x1f));
+
+ CHECK_FOR_EXCEPTION_AT_END();
+ return JSValue::encode(result);
+}
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_bitnot)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ JSValue src = stackFrame.args[0].jsValue();
+
+ ASSERT(!src.isInt32());
+ CallFrame* callFrame = stackFrame.callFrame;
+ JSValue result = jsNumber(~src.toInt32(callFrame));
+ CHECK_FOR_EXCEPTION_AT_END();
+ return JSValue::encode(result);
+}
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_resolve_with_base)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ CallFrame* callFrame = stackFrame.callFrame;
+ ScopeChainNode* scopeChain = callFrame->scopeChain();
+
+ ScopeChainIterator iter = scopeChain->begin();
+ ScopeChainIterator end = scopeChain->end();
+
+ // FIXME: add scopeDepthIsZero optimization
+
+ ASSERT(iter != end);
+
+ Identifier& ident = stackFrame.args[0].identifier();
+ JSObject* base;
+ do {
+ base = iter->get();
+ PropertySlot slot(base);
+ if (base->getPropertySlot(callFrame, ident, slot)) {
+ JSValue result = slot.getValue(callFrame, ident);
+ CHECK_FOR_EXCEPTION_AT_END();
+
+ callFrame->registers()[stackFrame.args[1].int32()] = JSValue(base);
+ return JSValue::encode(result);
+ }
+ ++iter;
+ } while (iter != end);
+
+ stackFrame.globalData->exception = createUndefinedVariableError(callFrame, ident);
+ VM_THROW_EXCEPTION_AT_END();
+ return JSValue::encode(JSValue());
+}
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_resolve_with_this)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ CallFrame* callFrame = stackFrame.callFrame;
+ ScopeChainNode* scopeChain = callFrame->scopeChain();
+
+ ScopeChainIterator iter = scopeChain->begin();
+ ScopeChainIterator end = scopeChain->end();
+
+ // FIXME: add scopeDepthIsZero optimization
+
+ ASSERT(iter != end);
+
+ Identifier& ident = stackFrame.args[0].identifier();
+ JSObject* base;
+ do {
+ base = iter->get();
+ ++iter;
+ PropertySlot slot(base);
+ if (base->getPropertySlot(callFrame, ident, slot)) {
+ JSValue result = slot.getValue(callFrame, ident);
+ CHECK_FOR_EXCEPTION_AT_END();
+
+ // All entries on the scope chain should be EnvironmentRecords (activations etc),
+ // other then 'with' object, which are directly referenced from the scope chain,
+ // and the global object. If we hit either an EnvironmentRecord or a global
+ // object at the end of the scope chain, this is undefined. If we hit a non-
+ // EnvironmentRecord within the scope chain, pass the base as the this value.
+ if (iter == end || base->structure()->typeInfo().isEnvironmentRecord())
+ callFrame->registers()[stackFrame.args[1].int32()] = jsUndefined();
+ else
+ callFrame->registers()[stackFrame.args[1].int32()] = JSValue(base);
+ return JSValue::encode(result);
+ }
+ } while (iter != end);
+
+ stackFrame.globalData->exception = createUndefinedVariableError(callFrame, ident);
+ VM_THROW_EXCEPTION_AT_END();
+ return JSValue::encode(JSValue());
+}
+
+DEFINE_STUB_FUNCTION(JSObject*, op_new_func_exp)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+ CallFrame* callFrame = stackFrame.callFrame;
+
+ FunctionExecutable* function = stackFrame.args[0].function();
+ JSFunction* func = function->make(callFrame, callFrame->scopeChain());
+ ASSERT(callFrame->codeBlock()->codeType() != FunctionCode || !callFrame->codeBlock()->needsFullScopeChain() || callFrame->uncheckedR(callFrame->codeBlock()->activationRegister()).jsValue());
+
+ /*
+ The Identifier in a FunctionExpression can be referenced from inside
+ the FunctionExpression's FunctionBody to allow the function to call
+ itself recursively. However, unlike in a FunctionDeclaration, the
+ Identifier in a FunctionExpression cannot be referenced from and
+ does not affect the scope enclosing the FunctionExpression.
+ */
+ if (!function->name().isNull()) {
+ JSStaticScopeObject* functionScopeObject = JSStaticScopeObject::create(callFrame, function->name(), func, ReadOnly | DontDelete);
+ func->setScope(callFrame->globalData(), func->scope()->push(functionScopeObject));
+ }
+
+ return func;
+}
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_mod)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ JSValue dividendValue = stackFrame.args[0].jsValue();
+ JSValue divisorValue = stackFrame.args[1].jsValue();
+
+ CallFrame* callFrame = stackFrame.callFrame;
+ double d = dividendValue.toNumber(callFrame);
+ JSValue result = jsNumber(fmod(d, divisorValue.toNumber(callFrame)));
+ CHECK_FOR_EXCEPTION_AT_END();
+ return JSValue::encode(result);
+}
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_post_dec)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ JSValue v = stackFrame.args[0].jsValue();
+
+ CallFrame* callFrame = stackFrame.callFrame;
+
+ double number = v.toNumber(callFrame);
+ CHECK_FOR_EXCEPTION_AT_END();
+
+ callFrame->registers()[stackFrame.args[1].int32()] = jsNumber(number - 1);
+ return JSValue::encode(jsNumber(number));
+}
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_urshift)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ JSValue val = stackFrame.args[0].jsValue();
+ JSValue shift = stackFrame.args[1].jsValue();
+
+ CallFrame* callFrame = stackFrame.callFrame;
+ JSValue result = jsNumber((val.toUInt32(callFrame)) >> (shift.toUInt32(callFrame) & 0x1f));
+ CHECK_FOR_EXCEPTION_AT_END();
+ return JSValue::encode(result);
+}
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_bitxor)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ JSValue src1 = stackFrame.args[0].jsValue();
+ JSValue src2 = stackFrame.args[1].jsValue();
+
+ CallFrame* callFrame = stackFrame.callFrame;
+
+ JSValue result = jsNumber(src1.toInt32(callFrame) ^ src2.toInt32(callFrame));
+ CHECK_FOR_EXCEPTION_AT_END();
+ return JSValue::encode(result);
+}
+
+DEFINE_STUB_FUNCTION(JSObject*, op_new_regexp)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ CallFrame* callFrame = stackFrame.callFrame;
+
+ RegExp* regExp = stackFrame.args[0].regExp();
+ if (!regExp->isValid()) {
+ stackFrame.globalData->exception = createSyntaxError(callFrame, "Invalid flags supplied to RegExp constructor.");
+ VM_THROW_EXCEPTION();
+ }
+
+ return RegExpObject::create(*stackFrame.globalData, stackFrame.callFrame->lexicalGlobalObject(), stackFrame.callFrame->lexicalGlobalObject()->regExpStructure(), regExp);
+}
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_bitor)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ JSValue src1 = stackFrame.args[0].jsValue();
+ JSValue src2 = stackFrame.args[1].jsValue();
+
+ CallFrame* callFrame = stackFrame.callFrame;
+
+ JSValue result = jsNumber(src1.toInt32(callFrame) | src2.toInt32(callFrame));
+ CHECK_FOR_EXCEPTION_AT_END();
+ return JSValue::encode(result);
+}
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_call_eval)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ CallFrame* callFrame = stackFrame.callFrame;
+ CallFrame* callerFrame = callFrame->callerFrame();
+ ASSERT(callFrame->callerFrame()->codeBlock()->codeType() != FunctionCode
+ || !callFrame->callerFrame()->codeBlock()->needsFullScopeChain()
+ || callFrame->callerFrame()->uncheckedR(callFrame->callerFrame()->codeBlock()->activationRegister()).jsValue());
+
+ callFrame->setScopeChain(callerFrame->scopeChain());
+ callFrame->setReturnPC(static_cast<Instruction*>((STUB_RETURN_ADDRESS).value()));
+ callFrame->setCodeBlock(0);
+
+ if (!isHostFunction(callFrame->calleeAsValue(), globalFuncEval))
+ return JSValue::encode(JSValue());
+
+ JSValue result = eval(callFrame);
+ if (stackFrame.globalData->exception)
+ return throwExceptionFromOpCall<EncodedJSValue>(stackFrame, callFrame, STUB_RETURN_ADDRESS);
+
+ return JSValue::encode(result);
+}
+
+DEFINE_STUB_FUNCTION(void*, op_throw)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+ ExceptionHandler handler = jitThrow(stackFrame.globalData, stackFrame.callFrame, stackFrame.args[0].jsValue(), STUB_RETURN_ADDRESS);
+ STUB_SET_RETURN_ADDRESS(handler.catchRoutine);
+ return handler.callFrame;
+}
+
+DEFINE_STUB_FUNCTION(JSPropertyNameIterator*, op_get_pnames)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ CallFrame* callFrame = stackFrame.callFrame;
+ JSObject* o = stackFrame.args[0].jsObject();
+ Structure* structure = o->structure();
+ JSPropertyNameIterator* jsPropertyNameIterator = structure->enumerationCache();
+ if (!jsPropertyNameIterator || jsPropertyNameIterator->cachedPrototypeChain() != structure->prototypeChain(callFrame))
+ jsPropertyNameIterator = JSPropertyNameIterator::create(callFrame, o);
+ return jsPropertyNameIterator;
+}
+
+DEFINE_STUB_FUNCTION(int, has_property)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ JSObject* base = stackFrame.args[0].jsObject();
+ JSString* property = stackFrame.args[1].jsString();
+ int result = base->hasProperty(stackFrame.callFrame, Identifier(stackFrame.callFrame, property->value(stackFrame.callFrame)));
+ CHECK_FOR_EXCEPTION_AT_END();
+ return result;
+}
+
+DEFINE_STUB_FUNCTION(JSObject*, op_push_scope)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ JSObject* o = stackFrame.args[0].jsValue().toObject(stackFrame.callFrame);
+ CHECK_FOR_EXCEPTION();
+ stackFrame.callFrame->setScopeChain(stackFrame.callFrame->scopeChain()->push(o));
+ return o;
+}
+
+DEFINE_STUB_FUNCTION(void, op_pop_scope)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ stackFrame.callFrame->setScopeChain(stackFrame.callFrame->scopeChain()->pop());
+}
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_typeof)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ return JSValue::encode(jsTypeStringForValue(stackFrame.callFrame, stackFrame.args[0].jsValue()));
+}
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_is_undefined)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ JSValue v = stackFrame.args[0].jsValue();
+ return JSValue::encode(jsBoolean(v.isCell() ? v.asCell()->structure()->typeInfo().masqueradesAsUndefined() : v.isUndefined()));
+}
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_is_boolean)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ return JSValue::encode(jsBoolean(stackFrame.args[0].jsValue().isBoolean()));
+}
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_is_number)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ return JSValue::encode(jsBoolean(stackFrame.args[0].jsValue().isNumber()));
+}
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_is_string)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ return JSValue::encode(jsBoolean(isJSString(stackFrame.args[0].jsValue())));
+}
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_is_object)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ return JSValue::encode(jsBoolean(jsIsObjectType(stackFrame.args[0].jsValue())));
+}
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_is_function)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ return JSValue::encode(jsBoolean(jsIsFunctionType(stackFrame.args[0].jsValue())));
+}
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_stricteq)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ JSValue src1 = stackFrame.args[0].jsValue();
+ JSValue src2 = stackFrame.args[1].jsValue();
+
+ bool result = JSValue::strictEqual(stackFrame.callFrame, src1, src2);
+ CHECK_FOR_EXCEPTION_AT_END();
+ return JSValue::encode(jsBoolean(result));
+}
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_to_primitive)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ return JSValue::encode(stackFrame.args[0].jsValue().toPrimitive(stackFrame.callFrame));
+}
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_strcat)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ JSValue result = jsString(stackFrame.callFrame, &stackFrame.callFrame->registers()[stackFrame.args[0].int32()], stackFrame.args[1].int32());
+ CHECK_FOR_EXCEPTION_AT_END();
+ return JSValue::encode(result);
+}
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_nstricteq)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ JSValue src1 = stackFrame.args[0].jsValue();
+ JSValue src2 = stackFrame.args[1].jsValue();
+
+ bool result = !JSValue::strictEqual(stackFrame.callFrame, src1, src2);
+ CHECK_FOR_EXCEPTION_AT_END();
+ return JSValue::encode(jsBoolean(result));
+}
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_to_jsnumber)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ JSValue src = stackFrame.args[0].jsValue();
+ CallFrame* callFrame = stackFrame.callFrame;
+
+ double number = src.toNumber(callFrame);
+ CHECK_FOR_EXCEPTION_AT_END();
+ return JSValue::encode(jsNumber(number));
+}
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_in)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ CallFrame* callFrame = stackFrame.callFrame;
+ JSValue baseVal = stackFrame.args[1].jsValue();
+
+ if (!baseVal.isObject()) {
+ stackFrame.globalData->exception = createInvalidParamError(stackFrame.callFrame, "in", baseVal);
+ VM_THROW_EXCEPTION();
+ }
+
+ JSValue propName = stackFrame.args[0].jsValue();
+ JSObject* baseObj = asObject(baseVal);
+
+ uint32_t i;
+ if (propName.getUInt32(i))
+ return JSValue::encode(jsBoolean(baseObj->hasProperty(callFrame, i)));
+
+ Identifier property(callFrame, propName.toString(callFrame));
+ CHECK_FOR_EXCEPTION();
+ return JSValue::encode(jsBoolean(baseObj->hasProperty(callFrame, property)));
+}
+
+DEFINE_STUB_FUNCTION(JSObject*, op_push_new_scope)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ JSObject* scope = JSStaticScopeObject::create(stackFrame.callFrame, stackFrame.args[0].identifier(), stackFrame.args[1].jsValue(), DontDelete);
+
+ CallFrame* callFrame = stackFrame.callFrame;
+ callFrame->setScopeChain(callFrame->scopeChain()->push(scope));
+ return scope;
+}
+
+DEFINE_STUB_FUNCTION(void, op_jmp_scopes)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ unsigned count = stackFrame.args[0].int32();
+ CallFrame* callFrame = stackFrame.callFrame;
+
+ ScopeChainNode* tmp = callFrame->scopeChain();
+ while (count--)
+ tmp = tmp->pop();
+ callFrame->setScopeChain(tmp);
+}
+
+DEFINE_STUB_FUNCTION(void, op_put_by_index)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ CallFrame* callFrame = stackFrame.callFrame;
+ unsigned property = stackFrame.args[1].int32();
+
+ stackFrame.args[0].jsValue().put(callFrame, property, stackFrame.args[2].jsValue());
+}
+
+DEFINE_STUB_FUNCTION(void*, op_switch_imm)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ JSValue scrutinee = stackFrame.args[0].jsValue();
+ unsigned tableIndex = stackFrame.args[1].int32();
+ CallFrame* callFrame = stackFrame.callFrame;
+ CodeBlock* codeBlock = callFrame->codeBlock();
+
+ if (scrutinee.isInt32())
+ return codeBlock->immediateSwitchJumpTable(tableIndex).ctiForValue(scrutinee.asInt32()).executableAddress();
+ if (scrutinee.isDouble() && scrutinee.asDouble() == static_cast<int32_t>(scrutinee.asDouble()))
+ return codeBlock->immediateSwitchJumpTable(tableIndex).ctiForValue(static_cast<int32_t>(scrutinee.asDouble())).executableAddress();
+ return codeBlock->immediateSwitchJumpTable(tableIndex).ctiDefault.executableAddress();
+}
+
+DEFINE_STUB_FUNCTION(void*, op_switch_char)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ JSValue scrutinee = stackFrame.args[0].jsValue();
+ unsigned tableIndex = stackFrame.args[1].int32();
+ CallFrame* callFrame = stackFrame.callFrame;
+ CodeBlock* codeBlock = callFrame->codeBlock();
+
+ void* result = codeBlock->characterSwitchJumpTable(tableIndex).ctiDefault.executableAddress();
+
+ if (scrutinee.isString()) {
+ StringImpl* value = asString(scrutinee)->value(callFrame).impl();
+ if (value->length() == 1)
+ result = codeBlock->characterSwitchJumpTable(tableIndex).ctiForValue((*value)[0]).executableAddress();
+ }
+
+ CHECK_FOR_EXCEPTION_AT_END();
+ return result;
+}
+
+DEFINE_STUB_FUNCTION(void*, op_switch_string)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ JSValue scrutinee = stackFrame.args[0].jsValue();
+ unsigned tableIndex = stackFrame.args[1].int32();
+ CallFrame* callFrame = stackFrame.callFrame;
+ CodeBlock* codeBlock = callFrame->codeBlock();
+
+ void* result = codeBlock->stringSwitchJumpTable(tableIndex).ctiDefault.executableAddress();
+
+ if (scrutinee.isString()) {
+ StringImpl* value = asString(scrutinee)->value(callFrame).impl();
+ result = codeBlock->stringSwitchJumpTable(tableIndex).ctiForValue(value).executableAddress();
+ }
+
+ CHECK_FOR_EXCEPTION_AT_END();
+ return result;
+}
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_del_by_val)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ CallFrame* callFrame = stackFrame.callFrame;
+
+ JSValue baseValue = stackFrame.args[0].jsValue();
+ JSObject* baseObj = baseValue.toObject(callFrame); // may throw
+
+ JSValue subscript = stackFrame.args[1].jsValue();
+ bool result;
+ uint32_t i;
+ if (subscript.getUInt32(i))
+ result = baseObj->methodTable()->deletePropertyByIndex(baseObj, callFrame, i);
+ else {
+ CHECK_FOR_EXCEPTION();
+ Identifier property(callFrame, subscript.toString(callFrame));
+ CHECK_FOR_EXCEPTION();
+ result = baseObj->methodTable()->deleteProperty(baseObj, callFrame, property);
+ }
+
+ if (!result && callFrame->codeBlock()->isStrictMode())
+ stackFrame.globalData->exception = createTypeError(stackFrame.callFrame, "Unable to delete property.");
+
+ CHECK_FOR_EXCEPTION_AT_END();
+ return JSValue::encode(jsBoolean(result));
+}
+
+DEFINE_STUB_FUNCTION(void, op_put_getter)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ CallFrame* callFrame = stackFrame.callFrame;
+
+ ASSERT(stackFrame.args[0].jsValue().isObject());
+ JSObject* baseObj = asObject(stackFrame.args[0].jsValue());
+ ASSERT(stackFrame.args[2].jsValue().isObject());
+ baseObj->methodTable()->defineGetter(baseObj, callFrame, stackFrame.args[1].identifier(), asObject(stackFrame.args[2].jsValue()), 0);
+}
+
+DEFINE_STUB_FUNCTION(void, op_put_setter)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ CallFrame* callFrame = stackFrame.callFrame;
+
+ ASSERT(stackFrame.args[0].jsValue().isObject());
+ JSObject* baseObj = asObject(stackFrame.args[0].jsValue());
+ ASSERT(stackFrame.args[2].jsValue().isObject());
+ baseObj->methodTable()->defineSetter(baseObj, callFrame, stackFrame.args[1].identifier(), asObject(stackFrame.args[2].jsValue()), 0);
+}
+
+DEFINE_STUB_FUNCTION(void, op_throw_reference_error)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ CallFrame* callFrame = stackFrame.callFrame;
+ UString message = stackFrame.args[0].jsValue().toString(callFrame);
+ stackFrame.globalData->exception = createReferenceError(callFrame, message);
+ VM_THROW_EXCEPTION_AT_END();
+}
+
+DEFINE_STUB_FUNCTION(void, op_debug)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ CallFrame* callFrame = stackFrame.callFrame;
+
+ int debugHookID = stackFrame.args[0].int32();
+ int firstLine = stackFrame.args[1].int32();
+ int lastLine = stackFrame.args[2].int32();
+
+ stackFrame.globalData->interpreter->debug(callFrame, static_cast<DebugHookID>(debugHookID), firstLine, lastLine);
+}
+
+DEFINE_STUB_FUNCTION(void*, vm_throw)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+ JSGlobalData* globalData = stackFrame.globalData;
+ ExceptionHandler handler = jitThrow(globalData, stackFrame.callFrame, globalData->exception, globalData->exceptionLocation);
+ STUB_SET_RETURN_ADDRESS(handler.catchRoutine);
+ return handler.callFrame;
+}
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, to_object)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ CallFrame* callFrame = stackFrame.callFrame;
+ return JSValue::encode(stackFrame.args[0].jsValue().toObject(callFrame));
+}
+
+MacroAssemblerCodeRef JITThunks::ctiStub(JSGlobalData* globalData, ThunkGenerator generator)
+{
+ std::pair<CTIStubMap::iterator, bool> entry = m_ctiStubMap.add(generator, MacroAssemblerCodeRef());
+ if (entry.second)
+ entry.first->second = generator(globalData);
+ return entry.first->second;
+}
+
+NativeExecutable* JITThunks::hostFunctionStub(JSGlobalData* globalData, NativeFunction function, NativeFunction constructor)
+{
+ std::pair<HostFunctionStubMap::iterator, bool> entry = m_hostFunctionStubMap->add(function, Weak<NativeExecutable>());
+ if (!*entry.first->second)
+ entry.first->second.set(*globalData, NativeExecutable::create(*globalData, JIT::compileCTINativeCall(globalData, function), function, MacroAssemblerCodeRef::createSelfManagedCodeRef(ctiNativeConstruct()), constructor, NoIntrinsic));
+ return entry.first->second.get();
+}
+
+NativeExecutable* JITThunks::hostFunctionStub(JSGlobalData* globalData, NativeFunction function, ThunkGenerator generator, Intrinsic intrinsic)
+{
+ std::pair<HostFunctionStubMap::iterator, bool> entry = m_hostFunctionStubMap->add(function, Weak<NativeExecutable>());
+ if (!*entry.first->second) {
+ MacroAssemblerCodeRef code;
+ if (generator) {
+ if (globalData->canUseJIT())
+ code = generator(globalData);
+ else
+ code = MacroAssemblerCodeRef();
+ } else
+ code = JIT::compileCTINativeCall(globalData, function);
+ entry.first->second.set(*globalData, NativeExecutable::create(*globalData, code, function, MacroAssemblerCodeRef::createSelfManagedCodeRef(ctiNativeConstruct()), callHostFunctionAsConstructor, intrinsic));
+ }
+ return entry.first->second.get();
+}
+
+void JITThunks::clearHostFunctionStubs()
+{
+ m_hostFunctionStubMap.clear();
+}
+
+} // namespace JSC
+
+#endif // ENABLE(JIT)
diff --git a/Source/JavaScriptCore/jit/JITStubs.h b/Source/JavaScriptCore/jit/JITStubs.h
new file mode 100644
index 000000000..dfaa3b113
--- /dev/null
+++ b/Source/JavaScriptCore/jit/JITStubs.h
@@ -0,0 +1,451 @@
+/*
+ * Copyright (C) 2008 Apple Inc. All rights reserved.
+ * Copyright (C) Research In Motion Limited 2010. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * its contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef JITStubs_h
+#define JITStubs_h
+
+#include "CallData.h"
+#include "Intrinsic.h"
+#include "MacroAssemblerCodeRef.h"
+#include "Register.h"
+#include "ThunkGenerators.h"
+#include <wtf/HashMap.h>
+
+#if ENABLE(JIT)
+
+namespace JSC {
+
+ struct StructureStubInfo;
+
+ class CodeBlock;
+ class ExecutablePool;
+ class FunctionExecutable;
+ class Identifier;
+ class JSGlobalData;
+ class JSGlobalObject;
+ class JSObject;
+ class JSPropertyNameIterator;
+ class JSValue;
+ class JSValueEncodedAsPointer;
+ class NativeExecutable;
+ class Profiler;
+ class PropertySlot;
+ class PutPropertySlot;
+ class RegisterFile;
+ class RegExp;
+
+ template <typename T> class Weak;
+
+ union JITStubArg {
+ void* asPointer;
+ EncodedJSValue asEncodedJSValue;
+ int32_t asInt32;
+
+ JSValue jsValue() { return JSValue::decode(asEncodedJSValue); }
+ JSObject* jsObject() { return static_cast<JSObject*>(asPointer); }
+ Register* reg() { return static_cast<Register*>(asPointer); }
+ Identifier& identifier() { return *static_cast<Identifier*>(asPointer); }
+ int32_t int32() { return asInt32; }
+ CodeBlock* codeBlock() { return static_cast<CodeBlock*>(asPointer); }
+ FunctionExecutable* function() { return static_cast<FunctionExecutable*>(asPointer); }
+ RegExp* regExp() { return static_cast<RegExp*>(asPointer); }
+ JSPropertyNameIterator* propertyNameIterator() { return static_cast<JSPropertyNameIterator*>(asPointer); }
+ JSGlobalObject* globalObject() { return static_cast<JSGlobalObject*>(asPointer); }
+ JSString* jsString() { return static_cast<JSString*>(asPointer); }
+ ReturnAddressPtr returnAddress() { return ReturnAddressPtr(asPointer); }
+ };
+
+ struct TrampolineStructure {
+ MacroAssemblerCodePtr ctiStringLengthTrampoline;
+ MacroAssemblerCodePtr ctiVirtualCallLink;
+ MacroAssemblerCodePtr ctiVirtualConstructLink;
+ MacroAssemblerCodePtr ctiVirtualCall;
+ MacroAssemblerCodePtr ctiVirtualConstruct;
+ MacroAssemblerCodePtr ctiNativeCall;
+ MacroAssemblerCodePtr ctiNativeConstruct;
+ MacroAssemblerCodePtr ctiSoftModulo;
+ };
+
+#if CPU(X86_64)
+ struct JITStackFrame {
+ void* reserved; // Unused
+ JITStubArg args[6];
+ void* padding[2]; // Maintain 32-byte stack alignment (possibly overkill).
+
+ void* code;
+ RegisterFile* registerFile;
+ CallFrame* callFrame;
+ void* unused1;
+ Profiler** enabledProfilerReference;
+ JSGlobalData* globalData;
+
+ void* savedRBX;
+ void* savedR15;
+ void* savedR14;
+ void* savedR13;
+ void* savedR12;
+ void* savedRBP;
+ void* savedRIP;
+
+ // When JIT code makes a call, it pushes its return address just below the rest of the stack.
+ ReturnAddressPtr* returnAddressSlot() { return reinterpret_cast<ReturnAddressPtr*>(this) - 1; }
+ };
+#elif CPU(X86)
+#if COMPILER(MSVC) || (OS(WINDOWS) && COMPILER(GCC))
+#pragma pack(push)
+#pragma pack(4)
+#endif // COMPILER(MSVC) || (OS(WINDOWS) && COMPILER(GCC))
+ struct JITStackFrame {
+ void* reserved; // Unused
+ JITStubArg args[6];
+#if USE(JSVALUE32_64)
+ void* padding[2]; // Maintain 16-byte stack alignment.
+#endif
+
+ void* savedEBX;
+ void* savedEDI;
+ void* savedESI;
+ void* savedEBP;
+ void* savedEIP;
+
+ void* code;
+ RegisterFile* registerFile;
+ CallFrame* callFrame;
+ void* unused1;
+ Profiler** enabledProfilerReference;
+ JSGlobalData* globalData;
+
+ // When JIT code makes a call, it pushes its return address just below the rest of the stack.
+ ReturnAddressPtr* returnAddressSlot() { return reinterpret_cast<ReturnAddressPtr*>(this) - 1; }
+ };
+#if COMPILER(MSVC) || (OS(WINDOWS) && COMPILER(GCC))
+#pragma pack(pop)
+#endif // COMPILER(MSVC) || (OS(WINDOWS) && COMPILER(GCC))
+#elif CPU(ARM_THUMB2)
+ struct JITStackFrame {
+ JITStubArg reserved; // Unused
+ JITStubArg args[6];
+
+ ReturnAddressPtr thunkReturnAddress;
+
+ void* preservedReturnAddress;
+ void* preservedR4;
+ void* preservedR5;
+ void* preservedR6;
+ void* preservedR7;
+ void* preservedR8;
+ void* preservedR9;
+ void* preservedR10;
+ void* preservedR11;
+
+ // These arguments passed in r1..r3 (r0 contained the entry code pointed, which is not preserved)
+ RegisterFile* registerFile;
+ CallFrame* callFrame;
+
+ // These arguments passed on the stack.
+ Profiler** enabledProfilerReference;
+ JSGlobalData* globalData;
+
+ ReturnAddressPtr* returnAddressSlot() { return &thunkReturnAddress; }
+ };
+#elif CPU(ARM_TRADITIONAL)
+#if COMPILER(MSVC)
+#pragma pack(push)
+#pragma pack(4)
+#endif // COMPILER(MSVC)
+ struct JITStackFrame {
+ JITStubArg padding; // Unused
+ JITStubArg args[7];
+
+ ReturnAddressPtr thunkReturnAddress;
+
+ void* preservedR4;
+ void* preservedR5;
+ void* preservedR6;
+ void* preservedR7;
+ void* preservedR8;
+ void* preservedLink;
+
+ RegisterFile* registerFile;
+ CallFrame* callFrame;
+ void* unused1;
+
+ // These arguments passed on the stack.
+ Profiler** enabledProfilerReference;
+ JSGlobalData* globalData;
+
+ // When JIT code makes a call, it pushes its return address just below the rest of the stack.
+ ReturnAddressPtr* returnAddressSlot() { return &thunkReturnAddress; }
+ };
+#if COMPILER(MSVC)
+#pragma pack(pop)
+#endif // COMPILER(MSVC)
+#elif CPU(MIPS)
+ struct JITStackFrame {
+ JITStubArg reserved; // Unused
+ JITStubArg args[6];
+
+#if USE(JSVALUE32_64)
+ void* padding; // Make the overall stack length 8-byte aligned.
+#endif
+
+ void* preservedGP; // store GP when using PIC code
+ void* preservedS0;
+ void* preservedS1;
+ void* preservedS2;
+ void* preservedReturnAddress;
+
+ ReturnAddressPtr thunkReturnAddress;
+
+ // These arguments passed in a1..a3 (a0 contained the entry code pointed, which is not preserved)
+ RegisterFile* registerFile;
+ CallFrame* callFrame;
+ void* unused1;
+
+ // These arguments passed on the stack.
+ Profiler** enabledProfilerReference;
+ JSGlobalData* globalData;
+
+ ReturnAddressPtr* returnAddressSlot() { return &thunkReturnAddress; }
+ };
+#elif CPU(SH4)
+ struct JITStackFrame {
+ JITStubArg padding; // Unused
+ JITStubArg args[6];
+
+ ReturnAddressPtr thunkReturnAddress;
+ void* savedR10;
+ void* savedR11;
+ void* savedR13;
+ void* savedRPR;
+ void* savedR14;
+ void* savedTimeoutReg;
+
+ RegisterFile* registerFile;
+ CallFrame* callFrame;
+ JSValue* exception;
+ Profiler** enabledProfilerReference;
+ JSGlobalData* globalData;
+
+ ReturnAddressPtr* returnAddressSlot() { return &thunkReturnAddress; }
+ };
+#else
+#error "JITStackFrame not defined for this platform."
+#endif
+
+#define JITSTACKFRAME_ARGS_INDEX (OBJECT_OFFSETOF(JITStackFrame, args) / sizeof(void*))
+
+#define STUB_ARGS_DECLARATION void** args
+#define STUB_ARGS (args)
+
+#if CPU(X86)
+ #if COMPILER(MSVC)
+ #define JIT_STUB __fastcall
+ #elif COMPILER(GCC)
+ #define JIT_STUB __attribute__ ((fastcall))
+ #else
+ #error "JIT_STUB function calls require fastcall conventions on x86, add appropriate directive/attribute here for your compiler!"
+ #endif
+#else
+ #define JIT_STUB
+#endif
+
+ extern "C" void ctiVMThrowTrampoline();
+ extern "C" void ctiOpThrowNotCaught();
+ extern "C" EncodedJSValue ctiTrampoline(void* code, RegisterFile*, CallFrame*, void* /*unused1*/, Profiler**, JSGlobalData*);
+
+ class JITThunks {
+ public:
+ JITThunks(JSGlobalData*);
+ ~JITThunks();
+
+ static void tryCacheGetByID(CallFrame*, CodeBlock*, ReturnAddressPtr returnAddress, JSValue baseValue, const Identifier& propertyName, const PropertySlot&, StructureStubInfo* stubInfo);
+ static void tryCachePutByID(CallFrame*, CodeBlock*, ReturnAddressPtr returnAddress, JSValue baseValue, const PutPropertySlot&, StructureStubInfo* stubInfo, bool direct);
+
+ MacroAssemblerCodePtr ctiStringLengthTrampoline() { return m_trampolineStructure.ctiStringLengthTrampoline; }
+ MacroAssemblerCodePtr ctiVirtualCallLink() { return m_trampolineStructure.ctiVirtualCallLink; }
+ MacroAssemblerCodePtr ctiVirtualConstructLink() { return m_trampolineStructure.ctiVirtualConstructLink; }
+ MacroAssemblerCodePtr ctiVirtualCall() { return m_trampolineStructure.ctiVirtualCall; }
+ MacroAssemblerCodePtr ctiVirtualConstruct() { return m_trampolineStructure.ctiVirtualConstruct; }
+ MacroAssemblerCodePtr ctiNativeCall() { return m_trampolineStructure.ctiNativeCall; }
+ MacroAssemblerCodePtr ctiNativeConstruct() { return m_trampolineStructure.ctiNativeConstruct; }
+ MacroAssemblerCodePtr ctiSoftModulo() { return m_trampolineStructure.ctiSoftModulo; }
+
+ MacroAssemblerCodeRef ctiStub(JSGlobalData*, ThunkGenerator);
+
+ NativeExecutable* hostFunctionStub(JSGlobalData*, NativeFunction, NativeFunction constructor);
+ NativeExecutable* hostFunctionStub(JSGlobalData*, NativeFunction, ThunkGenerator, Intrinsic);
+
+ void clearHostFunctionStubs();
+
+ private:
+ typedef HashMap<ThunkGenerator, MacroAssemblerCodeRef> CTIStubMap;
+ CTIStubMap m_ctiStubMap;
+ typedef HashMap<NativeFunction, Weak<NativeExecutable> > HostFunctionStubMap;
+ OwnPtr<HostFunctionStubMap> m_hostFunctionStubMap;
+ RefPtr<ExecutableMemoryHandle> m_executableMemory;
+
+ TrampolineStructure m_trampolineStructure;
+ };
+
+extern "C" {
+ EncodedJSValue JIT_STUB cti_op_add(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_bitand(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_bitnot(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_bitor(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_bitxor(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_call_NotJSFunction(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_call_eval(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_construct_NotJSConstruct(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_create_this(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_convert_this(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_create_arguments(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_del_by_id(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_del_by_val(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_div(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_get_by_id(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_get_by_id_array_fail(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_get_by_id_custom_stub(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_get_by_id_generic(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_get_by_id_getter_stub(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_get_by_id_method_check(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_get_by_id_method_check_update(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_get_by_id_proto_fail(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_get_by_id_proto_list(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_get_by_id_proto_list_full(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_get_by_id_self_fail(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_get_by_id_string_fail(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_get_by_val(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_get_by_val_byte_array(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_get_by_val_string(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_in(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_instanceof(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_is_boolean(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_is_function(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_is_number(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_is_object(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_is_string(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_is_undefined(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_less(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_lesseq(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_greater(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_greatereq(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_lshift(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_mod(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_mul(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_negate(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_not(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_nstricteq(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_post_dec(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_post_inc(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_pre_dec(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_pre_inc(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_resolve(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_resolve_base(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_resolve_base_strict_put(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_ensure_property_exists(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_resolve_global(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_resolve_global_dynamic(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_resolve_skip(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_resolve_with_base(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_resolve_with_this(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_rshift(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_strcat(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_stricteq(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_sub(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_to_jsnumber(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_to_primitive(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_typeof(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_urshift(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_to_object(STUB_ARGS_DECLARATION);
+ JSObject* JIT_STUB cti_op_new_array(STUB_ARGS_DECLARATION);
+ JSObject* JIT_STUB cti_op_new_array_buffer(STUB_ARGS_DECLARATION);
+ JSObject* JIT_STUB cti_op_new_func(STUB_ARGS_DECLARATION);
+ JSObject* JIT_STUB cti_op_new_func_exp(STUB_ARGS_DECLARATION);
+ JSObject* JIT_STUB cti_op_new_object(STUB_ARGS_DECLARATION);
+ JSObject* JIT_STUB cti_op_new_regexp(STUB_ARGS_DECLARATION);
+ JSObject* JIT_STUB cti_op_push_activation(STUB_ARGS_DECLARATION);
+ JSObject* JIT_STUB cti_op_push_new_scope(STUB_ARGS_DECLARATION);
+ JSObject* JIT_STUB cti_op_push_scope(STUB_ARGS_DECLARATION);
+ JSObject* JIT_STUB cti_op_put_by_id_transition_realloc(STUB_ARGS_DECLARATION);
+ JSPropertyNameIterator* JIT_STUB cti_op_get_pnames(STUB_ARGS_DECLARATION);
+ int JIT_STUB cti_op_eq(STUB_ARGS_DECLARATION);
+ int JIT_STUB cti_op_eq_strings(STUB_ARGS_DECLARATION);
+ int JIT_STUB cti_op_jless(STUB_ARGS_DECLARATION);
+ int JIT_STUB cti_op_jlesseq(STUB_ARGS_DECLARATION);
+ int JIT_STUB cti_op_jgreater(STUB_ARGS_DECLARATION);
+ int JIT_STUB cti_op_jgreatereq(STUB_ARGS_DECLARATION);
+ int JIT_STUB cti_op_jtrue(STUB_ARGS_DECLARATION);
+ void* JIT_STUB cti_op_load_varargs(STUB_ARGS_DECLARATION);
+ int JIT_STUB cti_timeout_check(STUB_ARGS_DECLARATION);
+ int JIT_STUB cti_has_property(STUB_ARGS_DECLARATION);
+ void JIT_STUB cti_op_check_has_instance(STUB_ARGS_DECLARATION);
+ void JIT_STUB cti_op_debug(STUB_ARGS_DECLARATION);
+ void JIT_STUB cti_op_end(STUB_ARGS_DECLARATION);
+ void JIT_STUB cti_op_jmp_scopes(STUB_ARGS_DECLARATION);
+ void JIT_STUB cti_op_pop_scope(STUB_ARGS_DECLARATION);
+ void JIT_STUB cti_op_profile_did_call(STUB_ARGS_DECLARATION);
+ void JIT_STUB cti_op_profile_will_call(STUB_ARGS_DECLARATION);
+ void JIT_STUB cti_op_put_by_id(STUB_ARGS_DECLARATION);
+ void JIT_STUB cti_op_put_by_id_fail(STUB_ARGS_DECLARATION);
+ void JIT_STUB cti_op_put_by_id_generic(STUB_ARGS_DECLARATION);
+ void JIT_STUB cti_op_put_by_id_direct(STUB_ARGS_DECLARATION);
+ void JIT_STUB cti_op_put_by_id_direct_fail(STUB_ARGS_DECLARATION);
+ void JIT_STUB cti_op_put_by_id_direct_generic(STUB_ARGS_DECLARATION);
+ void JIT_STUB cti_op_put_by_index(STUB_ARGS_DECLARATION);
+ void JIT_STUB cti_op_put_by_val(STUB_ARGS_DECLARATION);
+ void JIT_STUB cti_op_put_by_val_byte_array(STUB_ARGS_DECLARATION);
+ void JIT_STUB cti_op_put_getter(STUB_ARGS_DECLARATION);
+ void JIT_STUB cti_op_put_setter(STUB_ARGS_DECLARATION);
+ void JIT_STUB cti_op_tear_off_activation(STUB_ARGS_DECLARATION);
+ void JIT_STUB cti_op_tear_off_arguments(STUB_ARGS_DECLARATION);
+ void JIT_STUB cti_op_throw_reference_error(STUB_ARGS_DECLARATION);
+#if ENABLE(DFG_JIT)
+ void JIT_STUB cti_optimize_from_loop(STUB_ARGS_DECLARATION);
+ void JIT_STUB cti_optimize_from_ret(STUB_ARGS_DECLARATION);
+#endif
+ void* JIT_STUB cti_op_call_arityCheck(STUB_ARGS_DECLARATION);
+ void* JIT_STUB cti_op_construct_arityCheck(STUB_ARGS_DECLARATION);
+ void* JIT_STUB cti_op_call_jitCompile(STUB_ARGS_DECLARATION);
+ void* JIT_STUB cti_op_construct_jitCompile(STUB_ARGS_DECLARATION);
+ void* JIT_STUB cti_op_switch_char(STUB_ARGS_DECLARATION);
+ void* JIT_STUB cti_op_switch_imm(STUB_ARGS_DECLARATION);
+ void* JIT_STUB cti_op_switch_string(STUB_ARGS_DECLARATION);
+ void* JIT_STUB cti_op_throw(STUB_ARGS_DECLARATION);
+ void* JIT_STUB cti_register_file_check(STUB_ARGS_DECLARATION);
+ void* JIT_STUB cti_vm_lazyLinkCall(STUB_ARGS_DECLARATION);
+ void* JIT_STUB cti_vm_lazyLinkConstruct(STUB_ARGS_DECLARATION);
+ void* JIT_STUB cti_vm_throw(STUB_ARGS_DECLARATION);
+} // extern "C"
+
+} // namespace JSC
+
+#endif // ENABLE(JIT)
+
+#endif // JITStubs_h
diff --git a/Source/JavaScriptCore/jit/JITWriteBarrier.h b/Source/JavaScriptCore/jit/JITWriteBarrier.h
new file mode 100644
index 000000000..81a3653a0
--- /dev/null
+++ b/Source/JavaScriptCore/jit/JITWriteBarrier.h
@@ -0,0 +1,147 @@
+/*
+ * Copyright (C) 2011 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef JITWriteBarrier_h
+#define JITWriteBarrier_h
+
+#if ENABLE(JIT)
+
+#include "MacroAssembler.h"
+#include "MarkStack.h"
+#include "WriteBarrier.h"
+
+namespace JSC {
+
+class JSCell;
+class JSGlobalData;
+
+// Needs to be even to appease some of the backends.
+#define JITWriteBarrierFlag ((void*)2)
+class JITWriteBarrierBase {
+public:
+ typedef void* (JITWriteBarrierBase::*UnspecifiedBoolType);
+ operator UnspecifiedBoolType*() const { return get() ? reinterpret_cast<UnspecifiedBoolType*>(1) : 0; }
+ bool operator!() const { return !get(); }
+
+ void setFlagOnBarrier()
+ {
+ ASSERT(!m_location);
+ m_location = CodeLocationDataLabelPtr(JITWriteBarrierFlag);
+ }
+
+ bool isFlagged() const
+ {
+ return !!m_location;
+ }
+
+ void setLocation(CodeLocationDataLabelPtr location)
+ {
+ ASSERT(!m_location);
+ m_location = location;
+ }
+
+ CodeLocationDataLabelPtr location() const
+ {
+ ASSERT((!!m_location) && m_location.executableAddress() != JITWriteBarrierFlag);
+ return m_location;
+ }
+
+ void clear() { clear(0); }
+ void clearToMaxUnsigned() { clear(reinterpret_cast<void*>(-1)); }
+
+protected:
+ JITWriteBarrierBase()
+ {
+ }
+
+ void set(JSGlobalData&, CodeLocationDataLabelPtr location, JSCell* owner, JSCell* value)
+ {
+ Heap::writeBarrier(owner, value);
+ m_location = location;
+ ASSERT(((!!m_location) && m_location.executableAddress() != JITWriteBarrierFlag) || (location.executableAddress() == m_location.executableAddress()));
+ MacroAssembler::repatchPointer(m_location, value);
+ ASSERT(get() == value);
+ }
+
+ JSCell* get() const
+ {
+ if (!m_location || m_location.executableAddress() == JITWriteBarrierFlag)
+ return 0;
+ void* result = static_cast<JSCell*>(MacroAssembler::readPointer(m_location));
+ // We use -1 to indicate a "safe" empty value in the instruction stream
+ if (result == (void*)-1)
+ return 0;
+ return static_cast<JSCell*>(result);
+ }
+
+private:
+ void clear(void* clearedValue)
+ {
+ if (!m_location)
+ return;
+ if (m_location.executableAddress() != JITWriteBarrierFlag)
+ MacroAssembler::repatchPointer(m_location, clearedValue);
+ }
+
+ CodeLocationDataLabelPtr m_location;
+};
+
+#undef JITWriteBarrierFlag
+
+template <typename T> class JITWriteBarrier : public JITWriteBarrierBase {
+public:
+ JITWriteBarrier()
+ {
+ }
+
+ void set(JSGlobalData& globalData, CodeLocationDataLabelPtr location, JSCell* owner, T* value)
+ {
+ validateCell(owner);
+ validateCell(value);
+ JITWriteBarrierBase::set(globalData, location, owner, value);
+ }
+ void set(JSGlobalData& globalData, JSCell* owner, T* value)
+ {
+ set(globalData, location(), owner, value);
+ }
+ T* get() const
+ {
+ T* result = static_cast<T*>(JITWriteBarrierBase::get());
+ if (result)
+ validateCell(result);
+ return result;
+ }
+};
+
+template<typename T> inline void MarkStack::append(JITWriteBarrier<T>* slot)
+{
+ internalAppend(slot->get());
+}
+
+}
+
+#endif // ENABLE(JIT)
+
+#endif
diff --git a/Source/JavaScriptCore/jit/JSInterfaceJIT.h b/Source/JavaScriptCore/jit/JSInterfaceJIT.h
new file mode 100644
index 000000000..d54dedc1a
--- /dev/null
+++ b/Source/JavaScriptCore/jit/JSInterfaceJIT.h
@@ -0,0 +1,358 @@
+/*
+ * Copyright (C) 2010 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef JSInterfaceJIT_h
+#define JSInterfaceJIT_h
+
+#include "JITCode.h"
+#include "JITStubs.h"
+#include "JSValue.h"
+#include "MacroAssembler.h"
+#include "RegisterFile.h"
+#include <wtf/AlwaysInline.h>
+#include <wtf/Vector.h>
+
+namespace JSC {
+ class JSInterfaceJIT : public MacroAssembler {
+ public:
+ // NOTES:
+ //
+ // regT0 has two special meanings. The return value from a stub
+ // call will always be in regT0, and by default (unless
+ // a register is specified) emitPutVirtualRegister() will store
+ // the value from regT0.
+ //
+ // regT3 is required to be callee-preserved.
+ //
+ // tempRegister2 is has no such dependencies. It is important that
+ // on x86/x86-64 it is ecx for performance reasons, since the
+ // MacroAssembler will need to plant register swaps if it is not -
+ // however the code will still function correctly.
+#if CPU(X86_64)
+ static const RegisterID returnValueRegister = X86Registers::eax;
+ static const RegisterID cachedResultRegister = X86Registers::eax;
+ static const RegisterID firstArgumentRegister = X86Registers::edi;
+
+#if ENABLE(VALUE_PROFILER)
+ static const RegisterID bucketCounterRegister = X86Registers::r10;
+#endif
+
+ static const RegisterID timeoutCheckRegister = X86Registers::r12;
+ static const RegisterID callFrameRegister = X86Registers::r13;
+ static const RegisterID tagTypeNumberRegister = X86Registers::r14;
+ static const RegisterID tagMaskRegister = X86Registers::r15;
+
+ static const RegisterID regT0 = X86Registers::eax;
+ static const RegisterID regT1 = X86Registers::edx;
+ static const RegisterID regT2 = X86Registers::ecx;
+ static const RegisterID regT3 = X86Registers::ebx;
+
+ static const FPRegisterID fpRegT0 = X86Registers::xmm0;
+ static const FPRegisterID fpRegT1 = X86Registers::xmm1;
+ static const FPRegisterID fpRegT2 = X86Registers::xmm2;
+ static const FPRegisterID fpRegT3 = X86Registers::xmm3;
+#elif CPU(X86)
+ static const RegisterID returnValueRegister = X86Registers::eax;
+ static const RegisterID cachedResultRegister = X86Registers::eax;
+ // On x86 we always use fastcall conventions = but on
+ // OS X if might make more sense to just use regparm.
+ static const RegisterID firstArgumentRegister = X86Registers::ecx;
+
+ static const RegisterID bucketCounterRegister = X86Registers::esi;
+ static const RegisterID callFrameRegister = X86Registers::edi;
+
+ static const RegisterID regT0 = X86Registers::eax;
+ static const RegisterID regT1 = X86Registers::edx;
+ static const RegisterID regT2 = X86Registers::ecx;
+ static const RegisterID regT3 = X86Registers::ebx;
+
+ static const FPRegisterID fpRegT0 = X86Registers::xmm0;
+ static const FPRegisterID fpRegT1 = X86Registers::xmm1;
+ static const FPRegisterID fpRegT2 = X86Registers::xmm2;
+ static const FPRegisterID fpRegT3 = X86Registers::xmm3;
+#elif CPU(ARM_THUMB2)
+ static const RegisterID returnValueRegister = ARMRegisters::r0;
+ static const RegisterID cachedResultRegister = ARMRegisters::r0;
+ static const RegisterID firstArgumentRegister = ARMRegisters::r0;
+
+#if ENABLE(VALUE_PROFILER)
+ static const RegisterID bucketCounterRegister = ARMRegisters::r7;
+#endif
+
+ static const RegisterID regT0 = ARMRegisters::r0;
+ static const RegisterID regT1 = ARMRegisters::r1;
+ static const RegisterID regT2 = ARMRegisters::r2;
+ static const RegisterID regT3 = ARMRegisters::r4;
+
+ static const RegisterID callFrameRegister = ARMRegisters::r5;
+ static const RegisterID timeoutCheckRegister = ARMRegisters::r6;
+
+ static const FPRegisterID fpRegT0 = ARMRegisters::d0;
+ static const FPRegisterID fpRegT1 = ARMRegisters::d1;
+ static const FPRegisterID fpRegT2 = ARMRegisters::d2;
+ static const FPRegisterID fpRegT3 = ARMRegisters::d3;
+#elif CPU(ARM_TRADITIONAL)
+ static const RegisterID returnValueRegister = ARMRegisters::r0;
+ static const RegisterID cachedResultRegister = ARMRegisters::r0;
+ static const RegisterID firstArgumentRegister = ARMRegisters::r0;
+
+ static const RegisterID timeoutCheckRegister = ARMRegisters::r5;
+ static const RegisterID callFrameRegister = ARMRegisters::r4;
+
+ static const RegisterID regT0 = ARMRegisters::r0;
+ static const RegisterID regT1 = ARMRegisters::r1;
+ static const RegisterID regT2 = ARMRegisters::r2;
+ // Callee preserved
+ static const RegisterID regT3 = ARMRegisters::r7;
+
+ static const RegisterID regS0 = ARMRegisters::S0;
+ // Callee preserved
+ static const RegisterID regS1 = ARMRegisters::S1;
+
+ static const RegisterID regStackPtr = ARMRegisters::sp;
+ static const RegisterID regLink = ARMRegisters::lr;
+
+ static const FPRegisterID fpRegT0 = ARMRegisters::d0;
+ static const FPRegisterID fpRegT1 = ARMRegisters::d1;
+ static const FPRegisterID fpRegT2 = ARMRegisters::d2;
+ static const FPRegisterID fpRegT3 = ARMRegisters::d3;
+#elif CPU(MIPS)
+ static const RegisterID returnValueRegister = MIPSRegisters::v0;
+ static const RegisterID cachedResultRegister = MIPSRegisters::v0;
+ static const RegisterID firstArgumentRegister = MIPSRegisters::a0;
+
+ // regT0 must be v0 for returning a 32-bit value.
+ static const RegisterID regT0 = MIPSRegisters::v0;
+
+ // regT1 must be v1 for returning a pair of 32-bit value.
+ static const RegisterID regT1 = MIPSRegisters::v1;
+
+ static const RegisterID regT2 = MIPSRegisters::t4;
+
+ // regT3 must be saved in the callee, so use an S register.
+ static const RegisterID regT3 = MIPSRegisters::s2;
+
+ static const RegisterID callFrameRegister = MIPSRegisters::s0;
+ static const RegisterID timeoutCheckRegister = MIPSRegisters::s1;
+
+ static const FPRegisterID fpRegT0 = MIPSRegisters::f4;
+ static const FPRegisterID fpRegT1 = MIPSRegisters::f6;
+ static const FPRegisterID fpRegT2 = MIPSRegisters::f8;
+ static const FPRegisterID fpRegT3 = MIPSRegisters::f10;
+#elif CPU(SH4)
+ static const RegisterID timeoutCheckRegister = SH4Registers::r8;
+ static const RegisterID callFrameRegister = SH4Registers::fp;
+
+ static const RegisterID regT0 = SH4Registers::r0;
+ static const RegisterID regT1 = SH4Registers::r1;
+ static const RegisterID regT2 = SH4Registers::r2;
+ static const RegisterID regT3 = SH4Registers::r10;
+ static const RegisterID regT4 = SH4Registers::r4;
+ static const RegisterID regT5 = SH4Registers::r5;
+ static const RegisterID regT6 = SH4Registers::r6;
+ static const RegisterID regT7 = SH4Registers::r7;
+ static const RegisterID firstArgumentRegister =regT4;
+
+ static const RegisterID returnValueRegister = SH4Registers::r0;
+ static const RegisterID cachedResultRegister = SH4Registers::r0;
+
+ static const FPRegisterID fpRegT0 = SH4Registers::fr0;
+ static const FPRegisterID fpRegT1 = SH4Registers::fr2;
+ static const FPRegisterID fpRegT2 = SH4Registers::fr4;
+ static const FPRegisterID fpRegT3 = SH4Registers::fr6;
+ static const FPRegisterID fpRegT4 = SH4Registers::fr8;
+ static const FPRegisterID fpRegT5 = SH4Registers::fr10;
+ static const FPRegisterID fpRegT6 = SH4Registers::fr12;
+ static const FPRegisterID fpRegT7 = SH4Registers::fr14;
+#else
+#error "JIT not supported on this platform."
+#endif
+
+#if USE(JSVALUE32_64)
+ // Can't just propogate JSValue::Int32Tag as visual studio doesn't like it
+ static const unsigned Int32Tag = 0xffffffff;
+ COMPILE_ASSERT(Int32Tag == JSValue::Int32Tag, Int32Tag_out_of_sync);
+#else
+ static const unsigned Int32Tag = TagTypeNumber >> 32;
+#endif
+ inline Jump emitLoadJSCell(unsigned virtualRegisterIndex, RegisterID payload);
+ inline Jump emitLoadInt32(unsigned virtualRegisterIndex, RegisterID dst);
+ inline Jump emitLoadDouble(unsigned virtualRegisterIndex, FPRegisterID dst, RegisterID scratch);
+
+#if USE(JSVALUE32_64)
+ inline Jump emitJumpIfNotJSCell(unsigned virtualRegisterIndex);
+ inline Address tagFor(int index, RegisterID base = callFrameRegister);
+#endif
+
+#if USE(JSVALUE64)
+ Jump emitJumpIfImmediateNumber(RegisterID reg);
+ Jump emitJumpIfNotImmediateNumber(RegisterID reg);
+ void emitFastArithImmToInt(RegisterID reg);
+#endif
+
+ inline Address payloadFor(int index, RegisterID base = callFrameRegister);
+ inline Address intPayloadFor(int index, RegisterID base = callFrameRegister);
+ inline Address intTagFor(int index, RegisterID base = callFrameRegister);
+ inline Address addressFor(int index, RegisterID base = callFrameRegister);
+ };
+
+ struct ThunkHelpers {
+ static unsigned stringImplFlagsOffset() { return StringImpl::flagsOffset(); }
+ static unsigned stringImpl8BitFlag() { return StringImpl::flagIs8Bit(); }
+ static unsigned stringImplDataOffset() { return StringImpl::dataOffset(); }
+ static unsigned jsStringLengthOffset() { return OBJECT_OFFSETOF(JSString, m_length); }
+ static unsigned jsStringValueOffset() { return OBJECT_OFFSETOF(JSString, m_value); }
+ };
+
+#if USE(JSVALUE32_64)
+ inline JSInterfaceJIT::Jump JSInterfaceJIT::emitLoadJSCell(unsigned virtualRegisterIndex, RegisterID payload)
+ {
+ loadPtr(payloadFor(virtualRegisterIndex), payload);
+ return emitJumpIfNotJSCell(virtualRegisterIndex);
+ }
+
+ inline JSInterfaceJIT::Jump JSInterfaceJIT::emitJumpIfNotJSCell(unsigned virtualRegisterIndex)
+ {
+ ASSERT(static_cast<int>(virtualRegisterIndex) < FirstConstantRegisterIndex);
+ return branch32(NotEqual, tagFor(virtualRegisterIndex), TrustedImm32(JSValue::CellTag));
+ }
+
+ inline JSInterfaceJIT::Jump JSInterfaceJIT::emitLoadInt32(unsigned virtualRegisterIndex, RegisterID dst)
+ {
+ ASSERT(static_cast<int>(virtualRegisterIndex) < FirstConstantRegisterIndex);
+ loadPtr(payloadFor(virtualRegisterIndex), dst);
+ return branch32(NotEqual, tagFor(static_cast<int>(virtualRegisterIndex)), TrustedImm32(JSValue::Int32Tag));
+ }
+
+ inline JSInterfaceJIT::Address JSInterfaceJIT::tagFor(int virtualRegisterIndex, RegisterID base)
+ {
+ ASSERT(virtualRegisterIndex < FirstConstantRegisterIndex);
+ return Address(base, (static_cast<unsigned>(virtualRegisterIndex) * sizeof(Register)) + OBJECT_OFFSETOF(JSValue, u.asBits.tag));
+ }
+
+ inline JSInterfaceJIT::Address JSInterfaceJIT::payloadFor(int virtualRegisterIndex, RegisterID base)
+ {
+ ASSERT(virtualRegisterIndex < FirstConstantRegisterIndex);
+ return Address(base, (static_cast<unsigned>(virtualRegisterIndex) * sizeof(Register)) + OBJECT_OFFSETOF(JSValue, u.asBits.payload));
+ }
+
+ inline JSInterfaceJIT::Address JSInterfaceJIT::intPayloadFor(int virtualRegisterIndex, RegisterID base)
+ {
+ return payloadFor(virtualRegisterIndex, base);
+ }
+
+ inline JSInterfaceJIT::Address JSInterfaceJIT::intTagFor(int virtualRegisterIndex, RegisterID base)
+ {
+ return tagFor(virtualRegisterIndex, base);
+ }
+
+ inline JSInterfaceJIT::Jump JSInterfaceJIT::emitLoadDouble(unsigned virtualRegisterIndex, FPRegisterID dst, RegisterID scratch)
+ {
+ ASSERT(static_cast<int>(virtualRegisterIndex) < FirstConstantRegisterIndex);
+ loadPtr(tagFor(virtualRegisterIndex), scratch);
+ Jump isDouble = branch32(Below, scratch, TrustedImm32(JSValue::LowestTag));
+ Jump notInt = branch32(NotEqual, scratch, TrustedImm32(JSValue::Int32Tag));
+ loadPtr(payloadFor(virtualRegisterIndex), scratch);
+ convertInt32ToDouble(scratch, dst);
+ Jump done = jump();
+ isDouble.link(this);
+ loadDouble(addressFor(virtualRegisterIndex), dst);
+ done.link(this);
+ return notInt;
+ }
+
+#endif
+
+#if USE(JSVALUE64)
+ ALWAYS_INLINE JSInterfaceJIT::Jump JSInterfaceJIT::emitJumpIfImmediateNumber(RegisterID reg)
+ {
+ return branchTestPtr(NonZero, reg, tagTypeNumberRegister);
+ }
+ ALWAYS_INLINE JSInterfaceJIT::Jump JSInterfaceJIT::emitJumpIfNotImmediateNumber(RegisterID reg)
+ {
+ return branchTestPtr(Zero, reg, tagTypeNumberRegister);
+ }
+ inline JSInterfaceJIT::Jump JSInterfaceJIT::emitLoadJSCell(unsigned virtualRegisterIndex, RegisterID dst)
+ {
+ loadPtr(addressFor(virtualRegisterIndex), dst);
+ return branchTestPtr(NonZero, dst, tagMaskRegister);
+ }
+
+ inline JSInterfaceJIT::Jump JSInterfaceJIT::emitLoadInt32(unsigned virtualRegisterIndex, RegisterID dst)
+ {
+ loadPtr(addressFor(virtualRegisterIndex), dst);
+ Jump result = branchPtr(Below, dst, tagTypeNumberRegister);
+ zeroExtend32ToPtr(dst, dst);
+ return result;
+ }
+
+ inline JSInterfaceJIT::Jump JSInterfaceJIT::emitLoadDouble(unsigned virtualRegisterIndex, FPRegisterID dst, RegisterID scratch)
+ {
+ loadPtr(addressFor(virtualRegisterIndex), scratch);
+ Jump notNumber = emitJumpIfNotImmediateNumber(scratch);
+ Jump notInt = branchPtr(Below, scratch, tagTypeNumberRegister);
+ convertInt32ToDouble(scratch, dst);
+ Jump done = jump();
+ notInt.link(this);
+ addPtr(tagTypeNumberRegister, scratch);
+ movePtrToDouble(scratch, dst);
+ done.link(this);
+ return notNumber;
+ }
+
+ ALWAYS_INLINE void JSInterfaceJIT::emitFastArithImmToInt(RegisterID)
+ {
+ }
+
+#endif
+
+#if USE(JSVALUE64)
+ inline JSInterfaceJIT::Address JSInterfaceJIT::payloadFor(int virtualRegisterIndex, RegisterID base)
+ {
+ ASSERT(virtualRegisterIndex < FirstConstantRegisterIndex);
+ return addressFor(virtualRegisterIndex, base);
+ }
+
+ inline JSInterfaceJIT::Address JSInterfaceJIT::intPayloadFor(int virtualRegisterIndex, RegisterID base)
+ {
+ ASSERT(virtualRegisterIndex < FirstConstantRegisterIndex);
+ return Address(base, (static_cast<unsigned>(virtualRegisterIndex) * sizeof(Register)) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload));
+ }
+ inline JSInterfaceJIT::Address JSInterfaceJIT::intTagFor(int virtualRegisterIndex, RegisterID base)
+ {
+ ASSERT(virtualRegisterIndex < FirstConstantRegisterIndex);
+ return Address(base, (static_cast<unsigned>(virtualRegisterIndex) * sizeof(Register)) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag));
+ }
+#endif
+
+ inline JSInterfaceJIT::Address JSInterfaceJIT::addressFor(int virtualRegisterIndex, RegisterID base)
+ {
+ ASSERT(virtualRegisterIndex < FirstConstantRegisterIndex);
+ return Address(base, (static_cast<unsigned>(virtualRegisterIndex) * sizeof(Register)));
+ }
+
+}
+
+#endif // JSInterfaceJIT_h
diff --git a/Source/JavaScriptCore/jit/SpecializedThunkJIT.h b/Source/JavaScriptCore/jit/SpecializedThunkJIT.h
new file mode 100644
index 000000000..1802216fa
--- /dev/null
+++ b/Source/JavaScriptCore/jit/SpecializedThunkJIT.h
@@ -0,0 +1,179 @@
+/*
+ * Copyright (C) 2010 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef SpecializedThunkJIT_h
+#define SpecializedThunkJIT_h
+
+#if ENABLE(JIT)
+
+#include "Executable.h"
+#include "JSInterfaceJIT.h"
+#include "LinkBuffer.h"
+
+namespace JSC {
+
+ class SpecializedThunkJIT : public JSInterfaceJIT {
+ public:
+ static const int ThisArgument = -1;
+ SpecializedThunkJIT(int expectedArgCount, JSGlobalData* globalData)
+ : m_expectedArgCount(expectedArgCount)
+ , m_globalData(globalData)
+ {
+ // Check that we have the expected number of arguments
+ m_failures.append(branch32(NotEqual, payloadFor(RegisterFile::ArgumentCount), TrustedImm32(expectedArgCount + 1)));
+ }
+
+ void loadDoubleArgument(int argument, FPRegisterID dst, RegisterID scratch)
+ {
+ unsigned src = CallFrame::argumentOffset(argument);
+ m_failures.append(emitLoadDouble(src, dst, scratch));
+ }
+
+ void loadCellArgument(int argument, RegisterID dst)
+ {
+ unsigned src = CallFrame::argumentOffset(argument);
+ m_failures.append(emitLoadJSCell(src, dst));
+ }
+
+ void loadJSStringArgument(int argument, RegisterID dst)
+ {
+ loadCellArgument(argument, dst);
+ m_failures.append(branchPtr(NotEqual, Address(dst, JSCell::classInfoOffset()), TrustedImmPtr(&JSString::s_info)));
+ }
+
+ void loadInt32Argument(int argument, RegisterID dst, Jump& failTarget)
+ {
+ unsigned src = CallFrame::argumentOffset(argument);
+ failTarget = emitLoadInt32(src, dst);
+ }
+
+ void loadInt32Argument(int argument, RegisterID dst)
+ {
+ Jump conversionFailed;
+ loadInt32Argument(argument, dst, conversionFailed);
+ m_failures.append(conversionFailed);
+ }
+
+ void appendFailure(const Jump& failure)
+ {
+ m_failures.append(failure);
+ }
+
+ void returnJSValue(RegisterID src)
+ {
+ if (src != regT0)
+ move(src, regT0);
+ loadPtr(payloadFor(RegisterFile::CallerFrame, callFrameRegister), callFrameRegister);
+ ret();
+ }
+
+ void returnDouble(FPRegisterID src)
+ {
+#if USE(JSVALUE64)
+ moveDoubleToPtr(src, regT0);
+ Jump zero = branchTestPtr(Zero, regT0);
+ subPtr(tagTypeNumberRegister, regT0);
+ Jump done = jump();
+ zero.link(this);
+ move(tagTypeNumberRegister, regT0);
+ done.link(this);
+#else
+ storeDouble(src, Address(stackPointerRegister, -(int)sizeof(double)));
+ loadPtr(Address(stackPointerRegister, OBJECT_OFFSETOF(JSValue, u.asBits.tag) - sizeof(double)), regT1);
+ loadPtr(Address(stackPointerRegister, OBJECT_OFFSETOF(JSValue, u.asBits.payload) - sizeof(double)), regT0);
+ Jump lowNonZero = branchTestPtr(NonZero, regT1);
+ Jump highNonZero = branchTestPtr(NonZero, regT0);
+ move(TrustedImm32(0), regT0);
+ move(TrustedImm32(Int32Tag), regT1);
+ lowNonZero.link(this);
+ highNonZero.link(this);
+#endif
+ loadPtr(payloadFor(RegisterFile::CallerFrame, callFrameRegister), callFrameRegister);
+ ret();
+ }
+
+ void returnInt32(RegisterID src)
+ {
+ if (src != regT0)
+ move(src, regT0);
+ tagReturnAsInt32();
+ loadPtr(payloadFor(RegisterFile::CallerFrame, callFrameRegister), callFrameRegister);
+ ret();
+ }
+
+ void returnJSCell(RegisterID src)
+ {
+ if (src != regT0)
+ move(src, regT0);
+ tagReturnAsJSCell();
+ loadPtr(payloadFor(RegisterFile::CallerFrame, callFrameRegister), callFrameRegister);
+ ret();
+ }
+
+ MacroAssemblerCodeRef finalize(JSGlobalData& globalData, MacroAssemblerCodePtr fallback)
+ {
+ LinkBuffer patchBuffer(globalData, this);
+ patchBuffer.link(m_failures, CodeLocationLabel(fallback));
+ for (unsigned i = 0; i < m_calls.size(); i++)
+ patchBuffer.link(m_calls[i].first, m_calls[i].second);
+ return patchBuffer.finalizeCode();
+ }
+
+ // Assumes that the target function uses fpRegister0 as the first argument
+ // and return value. Like any sensible architecture would.
+ void callDoubleToDouble(FunctionPtr function)
+ {
+ m_calls.append(std::make_pair(call(), function));
+ }
+
+ private:
+
+ void tagReturnAsInt32()
+ {
+#if USE(JSVALUE64)
+ orPtr(tagTypeNumberRegister, regT0);
+#else
+ move(TrustedImm32(JSValue::Int32Tag), regT1);
+#endif
+ }
+
+ void tagReturnAsJSCell()
+ {
+#if USE(JSVALUE32_64)
+ move(TrustedImm32(JSValue::CellTag), regT1);
+#endif
+ }
+
+ int m_expectedArgCount;
+ JSGlobalData* m_globalData;
+ MacroAssembler::JumpList m_failures;
+ Vector<std::pair<Call, FunctionPtr> > m_calls;
+ };
+
+}
+
+#endif // ENABLE(JIT)
+
+#endif // SpecializedThunkJIT_h
diff --git a/Source/JavaScriptCore/jit/ThunkGenerators.cpp b/Source/JavaScriptCore/jit/ThunkGenerators.cpp
new file mode 100644
index 000000000..0e54c8a8f
--- /dev/null
+++ b/Source/JavaScriptCore/jit/ThunkGenerators.cpp
@@ -0,0 +1,350 @@
+/*
+ * Copyright (C) 2010 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "ThunkGenerators.h"
+
+#include "CodeBlock.h"
+#include <wtf/text/StringImpl.h>
+#include "SpecializedThunkJIT.h"
+
+#if ENABLE(JIT)
+
+namespace JSC {
+
+static void stringCharLoad(SpecializedThunkJIT& jit)
+{
+ // load string
+ jit.loadJSStringArgument(SpecializedThunkJIT::ThisArgument, SpecializedThunkJIT::regT0);
+
+ // Load string length to regT2, and start the process of loading the data pointer into regT0
+ jit.load32(MacroAssembler::Address(SpecializedThunkJIT::regT0, ThunkHelpers::jsStringLengthOffset()), SpecializedThunkJIT::regT2);
+ jit.loadPtr(MacroAssembler::Address(SpecializedThunkJIT::regT0, ThunkHelpers::jsStringValueOffset()), SpecializedThunkJIT::regT0);
+ jit.appendFailure(jit.branchTest32(MacroAssembler::Zero, SpecializedThunkJIT::regT0));
+
+ // load index
+ jit.loadInt32Argument(0, SpecializedThunkJIT::regT1); // regT1 contains the index
+
+ // Do an unsigned compare to simultaneously filter negative indices as well as indices that are too large
+ jit.appendFailure(jit.branch32(MacroAssembler::AboveOrEqual, SpecializedThunkJIT::regT1, SpecializedThunkJIT::regT2));
+
+ // Load the character
+ SpecializedThunkJIT::JumpList is16Bit;
+ SpecializedThunkJIT::JumpList cont8Bit;
+ // Load the string flags
+ jit.loadPtr(MacroAssembler::Address(SpecializedThunkJIT::regT0, ThunkHelpers::stringImplFlagsOffset()), SpecializedThunkJIT::regT2);
+ jit.loadPtr(MacroAssembler::Address(SpecializedThunkJIT::regT0, ThunkHelpers::stringImplDataOffset()), SpecializedThunkJIT::regT0);
+ is16Bit.append(jit.branchTest32(MacroAssembler::Zero, SpecializedThunkJIT::regT2, MacroAssembler::TrustedImm32(ThunkHelpers::stringImpl8BitFlag())));
+ jit.load8(MacroAssembler::BaseIndex(SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT1, MacroAssembler::TimesOne, 0), SpecializedThunkJIT::regT0);
+ cont8Bit.append(jit.jump());
+ is16Bit.link(&jit);
+ jit.load16(MacroAssembler::BaseIndex(SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT1, MacroAssembler::TimesTwo, 0), SpecializedThunkJIT::regT0);
+ cont8Bit.link(&jit);
+}
+
+static void charToString(SpecializedThunkJIT& jit, JSGlobalData* globalData, MacroAssembler::RegisterID src, MacroAssembler::RegisterID dst, MacroAssembler::RegisterID scratch)
+{
+ jit.appendFailure(jit.branch32(MacroAssembler::AboveOrEqual, src, MacroAssembler::TrustedImm32(0x100)));
+ jit.move(MacroAssembler::TrustedImmPtr(globalData->smallStrings.singleCharacterStrings()), scratch);
+ jit.loadPtr(MacroAssembler::BaseIndex(scratch, src, MacroAssembler::ScalePtr, 0), dst);
+ jit.appendFailure(jit.branchTestPtr(MacroAssembler::Zero, dst));
+}
+
+MacroAssemblerCodeRef charCodeAtThunkGenerator(JSGlobalData* globalData)
+{
+ SpecializedThunkJIT jit(1, globalData);
+ stringCharLoad(jit);
+ jit.returnInt32(SpecializedThunkJIT::regT0);
+ return jit.finalize(*globalData, globalData->jitStubs->ctiNativeCall());
+}
+
+MacroAssemblerCodeRef charAtThunkGenerator(JSGlobalData* globalData)
+{
+ SpecializedThunkJIT jit(1, globalData);
+ stringCharLoad(jit);
+ charToString(jit, globalData, SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT1);
+ jit.returnJSCell(SpecializedThunkJIT::regT0);
+ return jit.finalize(*globalData, globalData->jitStubs->ctiNativeCall());
+}
+
+MacroAssemblerCodeRef fromCharCodeThunkGenerator(JSGlobalData* globalData)
+{
+ SpecializedThunkJIT jit(1, globalData);
+ // load char code
+ jit.loadInt32Argument(0, SpecializedThunkJIT::regT0);
+ charToString(jit, globalData, SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT1);
+ jit.returnJSCell(SpecializedThunkJIT::regT0);
+ return jit.finalize(*globalData, globalData->jitStubs->ctiNativeCall());
+}
+
+MacroAssemblerCodeRef sqrtThunkGenerator(JSGlobalData* globalData)
+{
+ SpecializedThunkJIT jit(1, globalData);
+ if (!jit.supportsFloatingPointSqrt())
+ return MacroAssemblerCodeRef::createSelfManagedCodeRef(globalData->jitStubs->ctiNativeCall());
+
+ jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
+ jit.sqrtDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT0);
+ jit.returnDouble(SpecializedThunkJIT::fpRegT0);
+ return jit.finalize(*globalData, globalData->jitStubs->ctiNativeCall());
+}
+
+#if OS(DARWIN) || (OS(WINDOWS) && CPU(X86))
+#define SYMBOL_STRING(name) "_" #name
+#else
+#define SYMBOL_STRING(name) #name
+#endif
+
+#if (OS(LINUX) || OS(FREEBSD)) && CPU(X86_64)
+#define SYMBOL_STRING_RELOCATION(name) #name "@plt"
+#elif OS(DARWIN) || (CPU(X86_64) && COMPILER(MINGW) && !GCC_VERSION_AT_LEAST(4, 5, 0))
+#define SYMBOL_STRING_RELOCATION(name) "_" #name
+#elif CPU(X86) && COMPILER(MINGW)
+#define SYMBOL_STRING_RELOCATION(name) "@" #name "@4"
+#else
+#define SYMBOL_STRING_RELOCATION(name) #name
+#endif
+
+#define UnaryDoubleOpWrapper(function) function##Wrapper
+enum MathThunkCallingConvention { };
+typedef MathThunkCallingConvention(*MathThunk)(MathThunkCallingConvention);
+extern "C" {
+
+double jsRound(double);
+double jsRound(double d)
+{
+ double integer = ceil(d);
+ return integer - (integer - d > 0.5);
+}
+
+}
+
+#if CPU(X86_64) && COMPILER(GCC) && (PLATFORM(MAC) || OS(LINUX))
+
+#define defineUnaryDoubleOpWrapper(function) \
+ asm( \
+ ".text\n" \
+ ".globl " SYMBOL_STRING(function##Thunk) "\n" \
+ SYMBOL_STRING(function##Thunk) ":" "\n" \
+ "call " SYMBOL_STRING_RELOCATION(function) "\n" \
+ "ret\n" \
+ );\
+ extern "C" { \
+ MathThunkCallingConvention function##Thunk(MathThunkCallingConvention); \
+ } \
+ static MathThunk UnaryDoubleOpWrapper(function) = &function##Thunk;
+
+#elif CPU(X86) && COMPILER(GCC) && (PLATFORM(MAC) || OS(LINUX))
+#define defineUnaryDoubleOpWrapper(function) \
+ asm( \
+ ".text\n" \
+ ".globl " SYMBOL_STRING(function##Thunk) "\n" \
+ SYMBOL_STRING(function##Thunk) ":" "\n" \
+ "subl $8, %esp\n" \
+ "movsd %xmm0, (%esp) \n" \
+ "call " SYMBOL_STRING_RELOCATION(function) "\n" \
+ "fstpl (%esp) \n" \
+ "movsd (%esp), %xmm0 \n" \
+ "addl $8, %esp\n" \
+ "ret\n" \
+ );\
+ extern "C" { \
+ MathThunkCallingConvention function##Thunk(MathThunkCallingConvention); \
+ } \
+ static MathThunk UnaryDoubleOpWrapper(function) = &function##Thunk;
+
+#else
+
+#define defineUnaryDoubleOpWrapper(function) \
+ static MathThunk UnaryDoubleOpWrapper(function) = 0
+#endif
+
+defineUnaryDoubleOpWrapper(jsRound);
+defineUnaryDoubleOpWrapper(exp);
+defineUnaryDoubleOpWrapper(log);
+defineUnaryDoubleOpWrapper(floor);
+defineUnaryDoubleOpWrapper(ceil);
+
+MacroAssemblerCodeRef floorThunkGenerator(JSGlobalData* globalData)
+{
+ SpecializedThunkJIT jit(1, globalData);
+ MacroAssembler::Jump nonIntJump;
+ if (!UnaryDoubleOpWrapper(floor) || !jit.supportsFloatingPoint())
+ return MacroAssemblerCodeRef::createSelfManagedCodeRef(globalData->jitStubs->ctiNativeCall());
+ jit.loadInt32Argument(0, SpecializedThunkJIT::regT0, nonIntJump);
+ jit.returnInt32(SpecializedThunkJIT::regT0);
+ nonIntJump.link(&jit);
+ jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
+ jit.callDoubleToDouble(UnaryDoubleOpWrapper(floor));
+ SpecializedThunkJIT::JumpList doubleResult;
+ jit.branchConvertDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0, doubleResult, SpecializedThunkJIT::fpRegT1);
+ jit.returnInt32(SpecializedThunkJIT::regT0);
+ doubleResult.link(&jit);
+ jit.returnDouble(SpecializedThunkJIT::fpRegT0);
+ return jit.finalize(*globalData, globalData->jitStubs->ctiNativeCall());
+}
+
+MacroAssemblerCodeRef ceilThunkGenerator(JSGlobalData* globalData)
+{
+ SpecializedThunkJIT jit(1, globalData);
+ if (!UnaryDoubleOpWrapper(ceil) || !jit.supportsFloatingPoint())
+ return MacroAssemblerCodeRef::createSelfManagedCodeRef(globalData->jitStubs->ctiNativeCall());
+ MacroAssembler::Jump nonIntJump;
+ jit.loadInt32Argument(0, SpecializedThunkJIT::regT0, nonIntJump);
+ jit.returnInt32(SpecializedThunkJIT::regT0);
+ nonIntJump.link(&jit);
+ jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
+ jit.callDoubleToDouble(UnaryDoubleOpWrapper(ceil));
+ SpecializedThunkJIT::JumpList doubleResult;
+ jit.branchConvertDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0, doubleResult, SpecializedThunkJIT::fpRegT1);
+ jit.returnInt32(SpecializedThunkJIT::regT0);
+ doubleResult.link(&jit);
+ jit.returnDouble(SpecializedThunkJIT::fpRegT0);
+ return jit.finalize(*globalData, globalData->jitStubs->ctiNativeCall());
+}
+
+static const double oneConstant = 1.0;
+static const double negativeHalfConstant = -0.5;
+
+MacroAssemblerCodeRef roundThunkGenerator(JSGlobalData* globalData)
+{
+ SpecializedThunkJIT jit(1, globalData);
+ if (!UnaryDoubleOpWrapper(jsRound) || !jit.supportsFloatingPoint())
+ return MacroAssemblerCodeRef::createSelfManagedCodeRef(globalData->jitStubs->ctiNativeCall());
+ MacroAssembler::Jump nonIntJump;
+ jit.loadInt32Argument(0, SpecializedThunkJIT::regT0, nonIntJump);
+ jit.returnInt32(SpecializedThunkJIT::regT0);
+ nonIntJump.link(&jit);
+ jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
+ jit.callDoubleToDouble(UnaryDoubleOpWrapper(jsRound));
+ SpecializedThunkJIT::JumpList doubleResult;
+ jit.branchConvertDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0, doubleResult, SpecializedThunkJIT::fpRegT1);
+ jit.returnInt32(SpecializedThunkJIT::regT0);
+ doubleResult.link(&jit);
+ jit.returnDouble(SpecializedThunkJIT::fpRegT0);
+ return jit.finalize(*globalData, globalData->jitStubs->ctiNativeCall());
+}
+
+MacroAssemblerCodeRef expThunkGenerator(JSGlobalData* globalData)
+{
+ if (!UnaryDoubleOpWrapper(exp))
+ return MacroAssemblerCodeRef::createSelfManagedCodeRef(globalData->jitStubs->ctiNativeCall());
+ SpecializedThunkJIT jit(1, globalData);
+ if (!jit.supportsFloatingPoint())
+ return MacroAssemblerCodeRef::createSelfManagedCodeRef(globalData->jitStubs->ctiNativeCall());
+ jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
+ jit.callDoubleToDouble(UnaryDoubleOpWrapper(exp));
+ jit.returnDouble(SpecializedThunkJIT::fpRegT0);
+ return jit.finalize(*globalData, globalData->jitStubs->ctiNativeCall());
+}
+
+MacroAssemblerCodeRef logThunkGenerator(JSGlobalData* globalData)
+{
+ if (!UnaryDoubleOpWrapper(log))
+ return MacroAssemblerCodeRef::createSelfManagedCodeRef(globalData->jitStubs->ctiNativeCall());
+ SpecializedThunkJIT jit(1, globalData);
+ if (!jit.supportsFloatingPoint())
+ return MacroAssemblerCodeRef::createSelfManagedCodeRef(globalData->jitStubs->ctiNativeCall());
+ jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
+ jit.callDoubleToDouble(UnaryDoubleOpWrapper(log));
+ jit.returnDouble(SpecializedThunkJIT::fpRegT0);
+ return jit.finalize(*globalData, globalData->jitStubs->ctiNativeCall());
+}
+
+MacroAssemblerCodeRef absThunkGenerator(JSGlobalData* globalData)
+{
+ SpecializedThunkJIT jit(1, globalData);
+ if (!jit.supportsFloatingPointAbs())
+ return MacroAssemblerCodeRef::createSelfManagedCodeRef(globalData->jitStubs->ctiNativeCall());
+ MacroAssembler::Jump nonIntJump;
+ jit.loadInt32Argument(0, SpecializedThunkJIT::regT0, nonIntJump);
+ jit.rshift32(SpecializedThunkJIT::regT0, MacroAssembler::TrustedImm32(31), SpecializedThunkJIT::regT1);
+ jit.add32(SpecializedThunkJIT::regT1, SpecializedThunkJIT::regT0);
+ jit.xor32(SpecializedThunkJIT::regT1, SpecializedThunkJIT::regT0);
+ jit.appendFailure(jit.branch32(MacroAssembler::Equal, SpecializedThunkJIT::regT0, MacroAssembler::TrustedImm32(1 << 31)));
+ jit.returnInt32(SpecializedThunkJIT::regT0);
+ nonIntJump.link(&jit);
+ // Shame about the double int conversion here.
+ jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
+ jit.absDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1);
+ jit.returnDouble(SpecializedThunkJIT::fpRegT1);
+ return jit.finalize(*globalData, globalData->jitStubs->ctiNativeCall());
+}
+
+MacroAssemblerCodeRef powThunkGenerator(JSGlobalData* globalData)
+{
+ SpecializedThunkJIT jit(2, globalData);
+ if (!jit.supportsFloatingPoint())
+ return MacroAssemblerCodeRef::createSelfManagedCodeRef(globalData->jitStubs->ctiNativeCall());
+
+ jit.loadDouble(&oneConstant, SpecializedThunkJIT::fpRegT1);
+ jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
+ MacroAssembler::Jump nonIntExponent;
+ jit.loadInt32Argument(1, SpecializedThunkJIT::regT0, nonIntExponent);
+ jit.appendFailure(jit.branch32(MacroAssembler::LessThan, SpecializedThunkJIT::regT0, MacroAssembler::TrustedImm32(0)));
+
+ MacroAssembler::Jump exponentIsZero = jit.branchTest32(MacroAssembler::Zero, SpecializedThunkJIT::regT0);
+ MacroAssembler::Label startLoop(jit.label());
+
+ MacroAssembler::Jump exponentIsEven = jit.branchTest32(MacroAssembler::Zero, SpecializedThunkJIT::regT0, MacroAssembler::TrustedImm32(1));
+ jit.mulDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1);
+ exponentIsEven.link(&jit);
+ jit.mulDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT0);
+ jit.rshift32(MacroAssembler::TrustedImm32(1), SpecializedThunkJIT::regT0);
+ jit.branchTest32(MacroAssembler::NonZero, SpecializedThunkJIT::regT0).linkTo(startLoop, &jit);
+
+ exponentIsZero.link(&jit);
+
+ {
+ SpecializedThunkJIT::JumpList doubleResult;
+ jit.branchConvertDoubleToInt32(SpecializedThunkJIT::fpRegT1, SpecializedThunkJIT::regT0, doubleResult, SpecializedThunkJIT::fpRegT0);
+ jit.returnInt32(SpecializedThunkJIT::regT0);
+ doubleResult.link(&jit);
+ jit.returnDouble(SpecializedThunkJIT::fpRegT1);
+ }
+
+ if (jit.supportsFloatingPointSqrt()) {
+ nonIntExponent.link(&jit);
+ jit.loadDouble(&negativeHalfConstant, SpecializedThunkJIT::fpRegT3);
+ jit.loadDoubleArgument(1, SpecializedThunkJIT::fpRegT2, SpecializedThunkJIT::regT0);
+ jit.appendFailure(jit.branchDouble(MacroAssembler::DoubleLessThanOrEqual, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1));
+ jit.appendFailure(jit.branchDouble(MacroAssembler::DoubleNotEqualOrUnordered, SpecializedThunkJIT::fpRegT2, SpecializedThunkJIT::fpRegT3));
+ jit.sqrtDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT0);
+ jit.divDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1);
+
+ SpecializedThunkJIT::JumpList doubleResult;
+ jit.branchConvertDoubleToInt32(SpecializedThunkJIT::fpRegT1, SpecializedThunkJIT::regT0, doubleResult, SpecializedThunkJIT::fpRegT0);
+ jit.returnInt32(SpecializedThunkJIT::regT0);
+ doubleResult.link(&jit);
+ jit.returnDouble(SpecializedThunkJIT::fpRegT1);
+ } else
+ jit.appendFailure(nonIntExponent);
+
+ return jit.finalize(*globalData, globalData->jitStubs->ctiNativeCall());
+}
+
+}
+
+#endif // ENABLE(JIT)
diff --git a/Source/JavaScriptCore/jit/ThunkGenerators.h b/Source/JavaScriptCore/jit/ThunkGenerators.h
new file mode 100644
index 000000000..b251f6be8
--- /dev/null
+++ b/Source/JavaScriptCore/jit/ThunkGenerators.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright (C) 2010 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef ThunkGenerators_h
+#define ThunkGenerators_h
+
+#if ENABLE(JIT)
+namespace JSC {
+ class ExecutablePool;
+ class JSGlobalData;
+ class NativeExecutable;
+ class MacroAssemblerCodeRef;
+
+ typedef MacroAssemblerCodeRef (*ThunkGenerator)(JSGlobalData*);
+ MacroAssemblerCodeRef charCodeAtThunkGenerator(JSGlobalData*);
+ MacroAssemblerCodeRef charAtThunkGenerator(JSGlobalData*);
+ MacroAssemblerCodeRef fromCharCodeThunkGenerator(JSGlobalData*);
+ MacroAssemblerCodeRef absThunkGenerator(JSGlobalData*);
+ MacroAssemblerCodeRef ceilThunkGenerator(JSGlobalData*);
+ MacroAssemblerCodeRef expThunkGenerator(JSGlobalData*);
+ MacroAssemblerCodeRef floorThunkGenerator(JSGlobalData*);
+ MacroAssemblerCodeRef logThunkGenerator(JSGlobalData*);
+ MacroAssemblerCodeRef roundThunkGenerator(JSGlobalData*);
+ MacroAssemblerCodeRef sqrtThunkGenerator(JSGlobalData*);
+ MacroAssemblerCodeRef powThunkGenerator(JSGlobalData*);
+}
+#endif
+
+#endif // ThunkGenerator_h