From 40736c5763bf61337c8c14e16d8587db021a87d4 Mon Sep 17 00:00:00 2001 From: Simon Hausmann Date: Fri, 6 Jan 2012 14:44:00 +0100 Subject: Imported WebKit commit 2ea9d364d0f6efa8fa64acf19f451504c59be0e4 (http://svn.webkit.org/repository/webkit/trunk@104285) --- Source/JavaScriptCore/jit/CompactJITCodeMap.h | 298 ++ Source/JavaScriptCore/jit/ExecutableAllocator.cpp | 180 + Source/JavaScriptCore/jit/ExecutableAllocator.h | 243 ++ .../jit/ExecutableAllocatorFixedVMPool.cpp | 144 + Source/JavaScriptCore/jit/JIT.cpp | 741 ++++ Source/JavaScriptCore/jit/JIT.h | 1175 ++++++ Source/JavaScriptCore/jit/JITArithmetic.cpp | 1139 ++++++ Source/JavaScriptCore/jit/JITArithmetic32_64.cpp | 1277 +++++++ Source/JavaScriptCore/jit/JITCall.cpp | 213 ++ Source/JavaScriptCore/jit/JITCall32_64.cpp | 299 ++ Source/JavaScriptCore/jit/JITCode.h | 167 + Source/JavaScriptCore/jit/JITInlineMethods.h | 987 ++++++ Source/JavaScriptCore/jit/JITOpcodes.cpp | 1660 +++++++++ Source/JavaScriptCore/jit/JITOpcodes32_64.cpp | 1733 +++++++++ Source/JavaScriptCore/jit/JITPropertyAccess.cpp | 1179 ++++++ .../JavaScriptCore/jit/JITPropertyAccess32_64.cpp | 1133 ++++++ Source/JavaScriptCore/jit/JITStubCall.h | 263 ++ Source/JavaScriptCore/jit/JITStubs.cpp | 3748 ++++++++++++++++++++ Source/JavaScriptCore/jit/JITStubs.h | 451 +++ Source/JavaScriptCore/jit/JITWriteBarrier.h | 147 + Source/JavaScriptCore/jit/JSInterfaceJIT.h | 358 ++ Source/JavaScriptCore/jit/SpecializedThunkJIT.h | 179 + Source/JavaScriptCore/jit/ThunkGenerators.cpp | 350 ++ Source/JavaScriptCore/jit/ThunkGenerators.h | 51 + 24 files changed, 18115 insertions(+) create mode 100644 Source/JavaScriptCore/jit/CompactJITCodeMap.h create mode 100644 Source/JavaScriptCore/jit/ExecutableAllocator.cpp create mode 100644 Source/JavaScriptCore/jit/ExecutableAllocator.h create mode 100644 Source/JavaScriptCore/jit/ExecutableAllocatorFixedVMPool.cpp create mode 100644 Source/JavaScriptCore/jit/JIT.cpp create mode 100644 Source/JavaScriptCore/jit/JIT.h create mode 100644 Source/JavaScriptCore/jit/JITArithmetic.cpp create mode 100644 Source/JavaScriptCore/jit/JITArithmetic32_64.cpp create mode 100644 Source/JavaScriptCore/jit/JITCall.cpp create mode 100644 Source/JavaScriptCore/jit/JITCall32_64.cpp create mode 100644 Source/JavaScriptCore/jit/JITCode.h create mode 100644 Source/JavaScriptCore/jit/JITInlineMethods.h create mode 100644 Source/JavaScriptCore/jit/JITOpcodes.cpp create mode 100644 Source/JavaScriptCore/jit/JITOpcodes32_64.cpp create mode 100644 Source/JavaScriptCore/jit/JITPropertyAccess.cpp create mode 100644 Source/JavaScriptCore/jit/JITPropertyAccess32_64.cpp create mode 100644 Source/JavaScriptCore/jit/JITStubCall.h create mode 100644 Source/JavaScriptCore/jit/JITStubs.cpp create mode 100644 Source/JavaScriptCore/jit/JITStubs.h create mode 100644 Source/JavaScriptCore/jit/JITWriteBarrier.h create mode 100644 Source/JavaScriptCore/jit/JSInterfaceJIT.h create mode 100644 Source/JavaScriptCore/jit/SpecializedThunkJIT.h create mode 100644 Source/JavaScriptCore/jit/ThunkGenerators.cpp create mode 100644 Source/JavaScriptCore/jit/ThunkGenerators.h (limited to 'Source/JavaScriptCore/jit') diff --git a/Source/JavaScriptCore/jit/CompactJITCodeMap.h b/Source/JavaScriptCore/jit/CompactJITCodeMap.h new file mode 100644 index 000000000..5b92a8961 --- /dev/null +++ b/Source/JavaScriptCore/jit/CompactJITCodeMap.h @@ -0,0 +1,298 @@ +/* + * Copyright (C) 2011 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of + * its contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef CompactJITCodeMap_h +#define CompactJITCodeMap_h + +#include +#include +#include +#include +#include +#include +#include + +namespace JSC { + +// Gives you a compressed map between between bytecode indices and machine code +// entry points. The compression simply tries to use either 1, 2, or 4 bytes for +// any given offset. The largest offset that can be stored is 2^30. + +// Example use: +// +// CompactJITCodeMap::Encoder encoder(map); +// encoder.append(a, b); +// encoder.append(c, d); // preconditions: c >= a, d >= b +// OwnPtr map = encoder.finish(); +// +// At some later time: +// +// Vector decoded; +// map->decode(decoded); + +struct BytecodeAndMachineOffset { + BytecodeAndMachineOffset() { } + + BytecodeAndMachineOffset(unsigned bytecodeIndex, unsigned machineCodeOffset) + : m_bytecodeIndex(bytecodeIndex) + , m_machineCodeOffset(machineCodeOffset) + { + } + + unsigned m_bytecodeIndex; + unsigned m_machineCodeOffset; + + static inline unsigned getBytecodeIndex(BytecodeAndMachineOffset* mapping) + { + return mapping->m_bytecodeIndex; + } + + static inline unsigned getMachineCodeOffset(BytecodeAndMachineOffset* mapping) + { + return mapping->m_machineCodeOffset; + } +}; + +class CompactJITCodeMap { + WTF_MAKE_FAST_ALLOCATED; +public: + ~CompactJITCodeMap() + { + if (m_buffer) + fastFree(m_buffer); + } + + unsigned numberOfEntries() const + { + return m_numberOfEntries; + } + + void decode(Vector& result) const; + +private: + CompactJITCodeMap(uint8_t* buffer, unsigned size, unsigned numberOfEntries) + : m_buffer(buffer) +#if !ASSERT_DISABLED + , m_size(size) +#endif + , m_numberOfEntries(numberOfEntries) + { + UNUSED_PARAM(size); + } + + uint8_t at(unsigned index) const + { + ASSERT(index < m_size); + return m_buffer[index]; + } + + unsigned decodeNumber(unsigned& index) const + { + uint8_t headValue = at(index++); + if (!(headValue & 128)) + return headValue; + if (!(headValue & 64)) + return (static_cast(headValue & ~128) << 8) | at(index++); + unsigned second = at(index++); + unsigned third = at(index++); + unsigned fourth = at(index++); + return (static_cast(headValue & ~(128 + 64)) << 24) | (second << 16) | (third << 8) | fourth; + } + + uint8_t* m_buffer; +#if !ASSERT_DISABLED + unsigned m_size; +#endif + unsigned m_numberOfEntries; + +public: + class Encoder { + WTF_MAKE_NONCOPYABLE(Encoder); + public: + Encoder(); + ~Encoder(); + + void ensureCapacityFor(unsigned numberOfEntriesToAdd); + void append(unsigned bytecodeIndex, unsigned machineCodeOffset); + PassOwnPtr finish(); + + private: + void appendByte(uint8_t value); + void encodeNumber(uint32_t value); + + uint8_t* m_buffer; + unsigned m_size; + unsigned m_capacity; + unsigned m_numberOfEntries; + + unsigned m_previousBytecodeIndex; + unsigned m_previousMachineCodeOffset; + }; + + class Decoder { + WTF_MAKE_NONCOPYABLE(Decoder); + public: + Decoder(const CompactJITCodeMap*); + + unsigned numberOfEntriesRemaining() const; + void read(unsigned& bytecodeIndex, unsigned& machineCodeOffset); + + private: + const CompactJITCodeMap* m_jitCodeMap; + unsigned m_previousBytecodeIndex; + unsigned m_previousMachineCodeOffset; + unsigned m_numberOfEntriesRemaining; + unsigned m_bufferIndex; + }; + +private: + friend class Encoder; + friend class Decoder; +}; + +inline void CompactJITCodeMap::decode(Vector& result) const +{ + Decoder decoder(this); + result.resize(decoder.numberOfEntriesRemaining()); + for (unsigned i = 0; i < result.size(); ++i) + decoder.read(result[i].m_bytecodeIndex, result[i].m_machineCodeOffset); + + ASSERT(!decoder.numberOfEntriesRemaining()); +} + +inline CompactJITCodeMap::Encoder::Encoder() + : m_buffer(0) + , m_size(0) + , m_capacity(0) + , m_numberOfEntries(0) + , m_previousBytecodeIndex(0) + , m_previousMachineCodeOffset(0) +{ +} + +inline CompactJITCodeMap::Encoder::~Encoder() +{ + if (m_buffer) + fastFree(m_buffer); +} + +inline void CompactJITCodeMap::Encoder::append(unsigned bytecodeIndex, unsigned machineCodeOffset) +{ + ASSERT(bytecodeIndex >= m_previousBytecodeIndex); + ASSERT(machineCodeOffset >= m_previousMachineCodeOffset); + ensureCapacityFor(1); + encodeNumber(bytecodeIndex - m_previousBytecodeIndex); + encodeNumber(machineCodeOffset - m_previousMachineCodeOffset); + m_previousBytecodeIndex = bytecodeIndex; + m_previousMachineCodeOffset = machineCodeOffset; + m_numberOfEntries++; +} + +inline PassOwnPtr CompactJITCodeMap::Encoder::finish() +{ + m_capacity = m_size; + m_buffer = static_cast(fastRealloc(m_buffer, m_capacity)); + OwnPtr result = adoptPtr(new CompactJITCodeMap(m_buffer, m_size, m_numberOfEntries)); + m_buffer = 0; + m_size = 0; + m_capacity = 0; + m_numberOfEntries = 0; + m_previousBytecodeIndex = 0; + m_previousMachineCodeOffset = 0; + return result.release(); +} + +inline void CompactJITCodeMap::Encoder::appendByte(uint8_t value) +{ + ASSERT(m_size + 1 <= m_capacity); + m_buffer[m_size++] = value; +} + +inline void CompactJITCodeMap::Encoder::encodeNumber(uint32_t value) +{ + ASSERT(m_size + 4 <= m_capacity); + ASSERT(value < (1 << 30)); + if (value <= 127) { + uint8_t headValue = static_cast(value); + ASSERT(!(headValue & 128)); + appendByte(headValue); + } else if (value <= 16383) { + uint8_t headValue = static_cast(value >> 8); + ASSERT(!(headValue & 128)); + ASSERT(!(headValue & 64)); + appendByte(headValue | 128); + appendByte(static_cast(value)); + } else { + uint8_t headValue = static_cast(value >> 24); + ASSERT(!(headValue & 128)); + ASSERT(!(headValue & 64)); + appendByte(headValue | 128 | 64); + appendByte(static_cast(value >> 16)); + appendByte(static_cast(value >> 8)); + appendByte(static_cast(value)); + } +} + +inline void CompactJITCodeMap::Encoder::ensureCapacityFor(unsigned numberOfEntriesToAdd) +{ + unsigned capacityNeeded = m_size + numberOfEntriesToAdd * 2 * 4; + if (capacityNeeded > m_capacity) { + m_capacity = capacityNeeded * 2; + m_buffer = static_cast(fastRealloc(m_buffer, m_capacity)); + } +} + +inline CompactJITCodeMap::Decoder::Decoder(const CompactJITCodeMap* jitCodeMap) + : m_jitCodeMap(jitCodeMap) + , m_previousBytecodeIndex(0) + , m_previousMachineCodeOffset(0) + , m_numberOfEntriesRemaining(jitCodeMap->m_numberOfEntries) + , m_bufferIndex(0) +{ +} + +inline unsigned CompactJITCodeMap::Decoder::numberOfEntriesRemaining() const +{ + ASSERT(m_numberOfEntriesRemaining || m_bufferIndex == m_jitCodeMap->m_size); + return m_numberOfEntriesRemaining; +} + +inline void CompactJITCodeMap::Decoder::read(unsigned& bytecodeIndex, unsigned& machineCodeOffset) +{ + ASSERT(numberOfEntriesRemaining()); + + m_previousBytecodeIndex += m_jitCodeMap->decodeNumber(m_bufferIndex); + m_previousMachineCodeOffset += m_jitCodeMap->decodeNumber(m_bufferIndex); + bytecodeIndex = m_previousBytecodeIndex; + machineCodeOffset = m_previousMachineCodeOffset; + m_numberOfEntriesRemaining--; +} + +} // namespace JSC + +#endif // CompactJITCodeMap_h diff --git a/Source/JavaScriptCore/jit/ExecutableAllocator.cpp b/Source/JavaScriptCore/jit/ExecutableAllocator.cpp new file mode 100644 index 000000000..82c149d0e --- /dev/null +++ b/Source/JavaScriptCore/jit/ExecutableAllocator.cpp @@ -0,0 +1,180 @@ +/* + * Copyright (C) 2008 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" + +#include "ExecutableAllocator.h" + +#if ENABLE(EXECUTABLE_ALLOCATOR_DEMAND) +#include +#include +#include +#endif + +#if ENABLE(ASSEMBLER) + +using namespace WTF; + +namespace JSC { + +#if ENABLE(EXECUTABLE_ALLOCATOR_DEMAND) + +class DemandExecutableAllocator : public MetaAllocator { +public: + DemandExecutableAllocator() + : MetaAllocator(32) // round up all allocations to 32 bytes + { + // Don't preallocate any memory here. + } + + virtual ~DemandExecutableAllocator() + { + for (unsigned i = 0; i < reservations.size(); ++i) + reservations.at(i).deallocate(); + } + +protected: + virtual void* allocateNewSpace(size_t& numPages) + { + size_t newNumPages = (((numPages * pageSize() + JIT_ALLOCATOR_LARGE_ALLOC_SIZE - 1) / JIT_ALLOCATOR_LARGE_ALLOC_SIZE * JIT_ALLOCATOR_LARGE_ALLOC_SIZE) + pageSize() - 1) / pageSize(); + + ASSERT(newNumPages >= numPages); + + numPages = newNumPages; + + PageReservation reservation = PageReservation::reserve(numPages * pageSize(), OSAllocator::JSJITCodePages, EXECUTABLE_POOL_WRITABLE, true); + if (!reservation) + CRASH(); + + reservations.append(reservation); + + return reservation.base(); + } + + virtual void notifyNeedPage(void* page) + { + OSAllocator::commit(page, pageSize(), EXECUTABLE_POOL_WRITABLE, true); + } + + virtual void notifyPageIsFree(void* page) + { + OSAllocator::decommit(page, pageSize()); + } + +private: + Vector reservations; +}; + +static DemandExecutableAllocator* allocator; + +void ExecutableAllocator::initializeAllocator() +{ + ASSERT(!allocator); + allocator = new DemandExecutableAllocator(); +} + +ExecutableAllocator::ExecutableAllocator(JSGlobalData&) +{ + ASSERT(allocator); +} + +bool ExecutableAllocator::isValid() const +{ + return true; +} + +bool ExecutableAllocator::underMemoryPressure() +{ + return false; +} + +PassRefPtr ExecutableAllocator::allocate(JSGlobalData&, size_t sizeInBytes) +{ + RefPtr result = allocator->allocate(sizeInBytes); + if (!result) + CRASH(); + return result.release(); +} + +size_t ExecutableAllocator::committedByteCount() +{ + return allocator->bytesCommitted(); +} + +#if ENABLE(META_ALLOCATOR_PROFILE) +void ExecutableAllocator::dumpProfile() +{ + allocator->dumpProfile(); +} +#endif + +#endif // ENABLE(EXECUTABLE_ALLOCATOR_DEMAND) + +#if ENABLE(ASSEMBLER_WX_EXCLUSIVE) + +#if OS(WINDOWS) +#error "ASSEMBLER_WX_EXCLUSIVE not yet suported on this platform." +#endif + +void ExecutableAllocator::reprotectRegion(void* start, size_t size, ProtectionSetting setting) +{ + size_t pageSize = WTF::pageSize(); + + // Calculate the start of the page containing this region, + // and account for this extra memory within size. + intptr_t startPtr = reinterpret_cast(start); + intptr_t pageStartPtr = startPtr & ~(pageSize - 1); + void* pageStart = reinterpret_cast(pageStartPtr); + size += (startPtr - pageStartPtr); + + // Round size up + size += (pageSize - 1); + size &= ~(pageSize - 1); + + mprotect(pageStart, size, (setting == Writable) ? PROTECTION_FLAGS_RW : PROTECTION_FLAGS_RX); +} + +#endif + +#if CPU(ARM_TRADITIONAL) && OS(LINUX) && COMPILER(RVCT) + +__asm void ExecutableAllocator::cacheFlush(void* code, size_t size) +{ + ARM + push {r7} + add r1, r1, r0 + mov r7, #0xf0000 + add r7, r7, #0x2 + mov r2, #0x0 + svc #0x0 + pop {r7} + bx lr +} + +#endif + +} + +#endif // HAVE(ASSEMBLER) diff --git a/Source/JavaScriptCore/jit/ExecutableAllocator.h b/Source/JavaScriptCore/jit/ExecutableAllocator.h new file mode 100644 index 000000000..876bda62e --- /dev/null +++ b/Source/JavaScriptCore/jit/ExecutableAllocator.h @@ -0,0 +1,243 @@ +/* + * Copyright (C) 2008 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef ExecutableAllocator_h +#define ExecutableAllocator_h +#include // for ptrdiff_t +#include +#include +#include +#include +#include +#include +#include +#include + +#if OS(IOS) +#include +#endif + +#if OS(IOS) || OS(QNX) +#include +#endif + +#if CPU(MIPS) && OS(LINUX) +#include +#endif + +#if CPU(SH4) && OS(LINUX) +#include +#include +#include +#include +#endif + +#if OS(WINCE) +// From pkfuncs.h (private header file from the Platform Builder) +#define CACHE_SYNC_ALL 0x07F +extern "C" __declspec(dllimport) void CacheRangeFlush(LPVOID pAddr, DWORD dwLength, DWORD dwFlags); +#endif + +#define JIT_ALLOCATOR_LARGE_ALLOC_SIZE (pageSize() * 4) + +#if ENABLE(ASSEMBLER_WX_EXCLUSIVE) +#define PROTECTION_FLAGS_RW (PROT_READ | PROT_WRITE) +#define PROTECTION_FLAGS_RX (PROT_READ | PROT_EXEC) +#define EXECUTABLE_POOL_WRITABLE false +#else +#define EXECUTABLE_POOL_WRITABLE true +#endif + +namespace JSC { + +class JSGlobalData; +void releaseExecutableMemory(JSGlobalData&); + +inline size_t roundUpAllocationSize(size_t request, size_t granularity) +{ + if ((std::numeric_limits::max() - granularity) <= request) + CRASH(); // Allocation is too large + + // Round up to next page boundary + size_t size = request + (granularity - 1); + size = size & ~(granularity - 1); + ASSERT(size >= request); + return size; +} + +} + +#if ENABLE(JIT) && ENABLE(ASSEMBLER) + +namespace JSC { + +typedef WTF::MetaAllocatorHandle ExecutableMemoryHandle; + +class ExecutableAllocator { + enum ProtectionSetting { Writable, Executable }; + +public: + ExecutableAllocator(JSGlobalData&); + + static void initializeAllocator(); + + bool isValid() const; + + static bool underMemoryPressure(); + +#if ENABLE(META_ALLOCATOR_PROFILE) + static void dumpProfile(); +#else + static void dumpProfile() { } +#endif + + PassRefPtr allocate(JSGlobalData&, size_t sizeInBytes); + +#if ENABLE(ASSEMBLER_WX_EXCLUSIVE) + static void makeWritable(void* start, size_t size) + { + reprotectRegion(start, size, Writable); + } + + static void makeExecutable(void* start, size_t size) + { + reprotectRegion(start, size, Executable); + } +#else + static void makeWritable(void*, size_t) {} + static void makeExecutable(void*, size_t) {} +#endif + + +#if CPU(X86) || CPU(X86_64) + static void cacheFlush(void*, size_t) + { + } +#elif CPU(MIPS) + static void cacheFlush(void* code, size_t size) + { +#if GCC_VERSION_AT_LEAST(4, 3, 0) +#if WTF_MIPS_ISA_REV(2) && !GCC_VERSION_AT_LEAST(4, 4, 3) + int lineSize; + asm("rdhwr %0, $1" : "=r" (lineSize)); + // + // Modify "start" and "end" to avoid GCC 4.3.0-4.4.2 bug in + // mips_expand_synci_loop that may execute synci one more time. + // "start" points to the fisrt byte of the cache line. + // "end" points to the last byte of the line before the last cache line. + // Because size is always a multiple of 4, this is safe to set + // "end" to the last byte. + // + intptr_t start = reinterpret_cast(code) & (-lineSize); + intptr_t end = ((reinterpret_cast(code) + size - 1) & (-lineSize)) - 1; + __builtin___clear_cache(reinterpret_cast(start), reinterpret_cast(end)); +#else + intptr_t end = reinterpret_cast(code) + size; + __builtin___clear_cache(reinterpret_cast(code), reinterpret_cast(end)); +#endif +#else + _flush_cache(reinterpret_cast(code), size, BCACHE); +#endif + } +#elif CPU(ARM_THUMB2) && OS(IOS) + static void cacheFlush(void* code, size_t size) + { + sys_cache_control(kCacheFunctionPrepareForExecution, code, size); + } +#elif CPU(ARM_THUMB2) && OS(LINUX) + static void cacheFlush(void* code, size_t size) + { + asm volatile ( + "push {r7}\n" + "mov r0, %0\n" + "mov r1, %1\n" + "movw r7, #0x2\n" + "movt r7, #0xf\n" + "movs r2, #0x0\n" + "svc 0x0\n" + "pop {r7}\n" + : + : "r" (code), "r" (reinterpret_cast(code) + size) + : "r0", "r1", "r2"); + } +#elif CPU(ARM_TRADITIONAL) && OS(LINUX) && COMPILER(RVCT) + static __asm void cacheFlush(void* code, size_t size); +#elif CPU(ARM_TRADITIONAL) && OS(LINUX) && COMPILER(GCC) + static void cacheFlush(void* code, size_t size) + { + asm volatile ( + "push {r7}\n" + "mov r0, %0\n" + "mov r1, %1\n" + "mov r7, #0xf0000\n" + "add r7, r7, #0x2\n" + "mov r2, #0x0\n" + "svc 0x0\n" + "pop {r7}\n" + : + : "r" (code), "r" (reinterpret_cast(code) + size) + : "r0", "r1", "r2"); + } +#elif OS(WINCE) + static void cacheFlush(void* code, size_t size) + { + CacheRangeFlush(code, size, CACHE_SYNC_ALL); + } +#elif CPU(SH4) && OS(LINUX) + static void cacheFlush(void* code, size_t size) + { +#ifdef CACHEFLUSH_D_L2 + syscall(__NR_cacheflush, reinterpret_cast(code), size, CACHEFLUSH_D_WB | CACHEFLUSH_I | CACHEFLUSH_D_L2); +#else + syscall(__NR_cacheflush, reinterpret_cast(code), size, CACHEFLUSH_D_WB | CACHEFLUSH_I); +#endif + } +#elif OS(QNX) + static void cacheFlush(void* code, size_t size) + { +#if !ENABLE(ASSEMBLER_WX_EXCLUSIVE) + msync(code, size, MS_INVALIDATE_ICACHE); +#else + UNUSED_PARAM(code); + UNUSED_PARAM(size); +#endif + } +#else + #error "The cacheFlush support is missing on this platform." +#endif + static size_t committedByteCount(); + +private: + +#if ENABLE(ASSEMBLER_WX_EXCLUSIVE) + static void reprotectRegion(void*, size_t, ProtectionSetting); +#endif +}; + +} // namespace JSC + +#endif // ENABLE(JIT) && ENABLE(ASSEMBLER) + +#endif // !defined(ExecutableAllocator) diff --git a/Source/JavaScriptCore/jit/ExecutableAllocatorFixedVMPool.cpp b/Source/JavaScriptCore/jit/ExecutableAllocatorFixedVMPool.cpp new file mode 100644 index 000000000..3771c74a9 --- /dev/null +++ b/Source/JavaScriptCore/jit/ExecutableAllocatorFixedVMPool.cpp @@ -0,0 +1,144 @@ +/* + * Copyright (C) 2009 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" + +#include "ExecutableAllocator.h" + +#if ENABLE(EXECUTABLE_ALLOCATOR_FIXED) + +#include + +#include +#include +#include +#include +#include + +#if OS(LINUX) +#include +#endif + +using namespace WTF; + +namespace JSC { + +#if CPU(ARM) +static const size_t fixedPoolSize = 16 * 1024 * 1024; +#elif CPU(X86_64) +static const size_t fixedPoolSize = 1024 * 1024 * 1024; +#else +static const size_t fixedPoolSize = 32 * 1024 * 1024; +#endif + +class FixedVMPoolExecutableAllocator : public MetaAllocator { +public: + FixedVMPoolExecutableAllocator() + : MetaAllocator(32) // round up all allocations to 32 bytes + { + m_reservation = PageReservation::reserveWithGuardPages(fixedPoolSize, OSAllocator::JSJITCodePages, EXECUTABLE_POOL_WRITABLE, true); +#if !ENABLE(INTERPRETER) + if (!m_reservation) + CRASH(); +#endif + if (m_reservation) { + ASSERT(m_reservation.size() == fixedPoolSize); + addFreshFreeSpace(m_reservation.base(), m_reservation.size()); + } + } + +protected: + virtual void* allocateNewSpace(size_t&) + { + // We're operating in a fixed pool, so new allocation is always prohibited. + return 0; + } + + virtual void notifyNeedPage(void* page) + { + m_reservation.commit(page, pageSize()); + } + + virtual void notifyPageIsFree(void* page) + { + m_reservation.decommit(page, pageSize()); + } + +private: + PageReservation m_reservation; +}; + +static FixedVMPoolExecutableAllocator* allocator; + +void ExecutableAllocator::initializeAllocator() +{ + ASSERT(!allocator); + allocator = new FixedVMPoolExecutableAllocator(); +} + +ExecutableAllocator::ExecutableAllocator(JSGlobalData&) +{ + ASSERT(allocator); +} + +bool ExecutableAllocator::isValid() const +{ + return !!allocator->bytesReserved(); +} + +bool ExecutableAllocator::underMemoryPressure() +{ + MetaAllocator::Statistics statistics = allocator->currentStatistics(); + return statistics.bytesAllocated > statistics.bytesReserved / 2; +} + +PassRefPtr ExecutableAllocator::allocate(JSGlobalData& globalData, size_t sizeInBytes) +{ + RefPtr result = allocator->allocate(sizeInBytes); + if (!result) { + releaseExecutableMemory(globalData); + result = allocator->allocate(sizeInBytes); + if (!result) + CRASH(); + } + return result.release(); +} + +size_t ExecutableAllocator::committedByteCount() +{ + return allocator->bytesCommitted(); +} + +#if ENABLE(META_ALLOCATOR_PROFILE) +void ExecutableAllocator::dumpProfile() +{ + allocator->dumpProfile(); +} +#endif + +} + + +#endif // ENABLE(EXECUTABLE_ALLOCATOR_FIXED) diff --git a/Source/JavaScriptCore/jit/JIT.cpp b/Source/JavaScriptCore/jit/JIT.cpp new file mode 100644 index 000000000..4a6e3fb3d --- /dev/null +++ b/Source/JavaScriptCore/jit/JIT.cpp @@ -0,0 +1,741 @@ +/* + * Copyright (C) 2008, 2009 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" + +#if ENABLE(JIT) +#include "JIT.h" + +// This probably does not belong here; adding here for now as a quick Windows build fix. +#if ENABLE(ASSEMBLER) && CPU(X86) && !OS(MAC_OS_X) +#include "MacroAssembler.h" +JSC::MacroAssemblerX86Common::SSE2CheckState JSC::MacroAssemblerX86Common::s_sse2CheckState = NotCheckedSSE2; +#endif + +#include "CodeBlock.h" +#include "CryptographicallyRandomNumber.h" +#include "DFGNode.h" // for DFG_SUCCESS_STATS +#include "Interpreter.h" +#include "JITInlineMethods.h" +#include "JITStubCall.h" +#include "JSArray.h" +#include "JSFunction.h" +#include "LinkBuffer.h" +#include "RepatchBuffer.h" +#include "ResultType.h" +#include "SamplingTool.h" + +using namespace std; + +namespace JSC { + +void ctiPatchNearCallByReturnAddress(CodeBlock* codeblock, ReturnAddressPtr returnAddress, MacroAssemblerCodePtr newCalleeFunction) +{ + RepatchBuffer repatchBuffer(codeblock); + repatchBuffer.relinkNearCallerToTrampoline(returnAddress, newCalleeFunction); +} + +void ctiPatchCallByReturnAddress(CodeBlock* codeblock, ReturnAddressPtr returnAddress, MacroAssemblerCodePtr newCalleeFunction) +{ + RepatchBuffer repatchBuffer(codeblock); + repatchBuffer.relinkCallerToTrampoline(returnAddress, newCalleeFunction); +} + +void ctiPatchCallByReturnAddress(CodeBlock* codeblock, ReturnAddressPtr returnAddress, FunctionPtr newCalleeFunction) +{ + RepatchBuffer repatchBuffer(codeblock); + repatchBuffer.relinkCallerToFunction(returnAddress, newCalleeFunction); +} + +JIT::JIT(JSGlobalData* globalData, CodeBlock* codeBlock) + : m_interpreter(globalData->interpreter) + , m_globalData(globalData) + , m_codeBlock(codeBlock) + , m_labels(codeBlock ? codeBlock->numberOfInstructions() : 0) + , m_bytecodeOffset((unsigned)-1) +#if USE(JSVALUE32_64) + , m_jumpTargetIndex(0) + , m_mappedBytecodeOffset((unsigned)-1) + , m_mappedVirtualRegisterIndex(RegisterFile::ReturnPC) + , m_mappedTag((RegisterID)-1) + , m_mappedPayload((RegisterID)-1) +#else + , m_lastResultBytecodeRegister(std::numeric_limits::max()) + , m_jumpTargetsPosition(0) +#endif +#if USE(OS_RANDOMNESS) + , m_randomGenerator(cryptographicallyRandomNumber()) +#else + , m_randomGenerator(static_cast(randomNumber() * 0xFFFFFFF)) +#endif +{ +} + +#if ENABLE(DFG_JIT) +void JIT::emitOptimizationCheck(OptimizationCheckKind kind) +{ + if (!shouldEmitProfiling()) + return; + + Jump skipOptimize = branchAdd32(Signed, TrustedImm32(kind == LoopOptimizationCheck ? Options::executionCounterIncrementForLoop : Options::executionCounterIncrementForReturn), AbsoluteAddress(m_codeBlock->addressOfExecuteCounter())); + JITStubCall stubCall(this, kind == LoopOptimizationCheck ? cti_optimize_from_loop : cti_optimize_from_ret); + if (kind == LoopOptimizationCheck) + stubCall.addArgument(Imm32(m_bytecodeOffset)); + stubCall.call(); + skipOptimize.link(this); +} +#endif + +#if CPU(X86) +void JIT::emitTimeoutCheck() +{ + Jump skipTimeout = branchSub32(NonZero, TrustedImm32(1), AbsoluteAddress(&m_globalData->m_timeoutCount)); + JITStubCall stubCall(this, cti_timeout_check); + stubCall.addArgument(regT1, regT0); // save last result registers. + stubCall.call(regT0); + store32(regT0, &m_globalData->m_timeoutCount); + stubCall.getArgument(0, regT1, regT0); // reload last result registers. + skipTimeout.link(this); +} +#elif USE(JSVALUE32_64) +void JIT::emitTimeoutCheck() +{ + Jump skipTimeout = branchSub32(NonZero, TrustedImm32(1), timeoutCheckRegister); + JITStubCall stubCall(this, cti_timeout_check); + stubCall.addArgument(regT1, regT0); // save last result registers. + stubCall.call(timeoutCheckRegister); + stubCall.getArgument(0, regT1, regT0); // reload last result registers. + skipTimeout.link(this); +} +#else +void JIT::emitTimeoutCheck() +{ + Jump skipTimeout = branchSub32(NonZero, TrustedImm32(1), timeoutCheckRegister); + JITStubCall(this, cti_timeout_check).call(timeoutCheckRegister); + skipTimeout.link(this); + + killLastResultRegister(); +} +#endif + +#define NEXT_OPCODE(name) \ + m_bytecodeOffset += OPCODE_LENGTH(name); \ + break; + +#if USE(JSVALUE32_64) +#define DEFINE_BINARY_OP(name) \ + case name: { \ + JITStubCall stubCall(this, cti_##name); \ + stubCall.addArgument(currentInstruction[2].u.operand); \ + stubCall.addArgument(currentInstruction[3].u.operand); \ + stubCall.call(currentInstruction[1].u.operand); \ + NEXT_OPCODE(name); \ + } + +#define DEFINE_UNARY_OP(name) \ + case name: { \ + JITStubCall stubCall(this, cti_##name); \ + stubCall.addArgument(currentInstruction[2].u.operand); \ + stubCall.call(currentInstruction[1].u.operand); \ + NEXT_OPCODE(name); \ + } + +#else // USE(JSVALUE32_64) + +#define DEFINE_BINARY_OP(name) \ + case name: { \ + JITStubCall stubCall(this, cti_##name); \ + stubCall.addArgument(currentInstruction[2].u.operand, regT2); \ + stubCall.addArgument(currentInstruction[3].u.operand, regT2); \ + stubCall.call(currentInstruction[1].u.operand); \ + NEXT_OPCODE(name); \ + } + +#define DEFINE_UNARY_OP(name) \ + case name: { \ + JITStubCall stubCall(this, cti_##name); \ + stubCall.addArgument(currentInstruction[2].u.operand, regT2); \ + stubCall.call(currentInstruction[1].u.operand); \ + NEXT_OPCODE(name); \ + } +#endif // USE(JSVALUE32_64) + +#define DEFINE_OP(name) \ + case name: { \ + emit_##name(currentInstruction); \ + NEXT_OPCODE(name); \ + } + +#define DEFINE_SLOWCASE_OP(name) \ + case name: { \ + emitSlow_##name(currentInstruction, iter); \ + NEXT_OPCODE(name); \ + } + +void JIT::privateCompileMainPass() +{ + Instruction* instructionsBegin = m_codeBlock->instructions().begin(); + unsigned instructionCount = m_codeBlock->instructions().size(); + + m_globalResolveInfoIndex = 0; + m_callLinkInfoIndex = 0; + + for (m_bytecodeOffset = 0; m_bytecodeOffset < instructionCount; ) { + Instruction* currentInstruction = instructionsBegin + m_bytecodeOffset; + ASSERT_WITH_MESSAGE(m_interpreter->isOpcode(currentInstruction->u.opcode), "privateCompileMainPass gone bad @ %d", m_bytecodeOffset); + +#if ENABLE(OPCODE_SAMPLING) + if (m_bytecodeOffset > 0) // Avoid the overhead of sampling op_enter twice. + sampleInstruction(currentInstruction); +#endif + +#if USE(JSVALUE64) + if (atJumpTarget()) + killLastResultRegister(); +#endif + + m_labels[m_bytecodeOffset] = label(); + +#if ENABLE(JIT_VERBOSE) + printf("Old JIT emitting code for bc#%u at offset 0x%lx.\n", m_bytecodeOffset, (long)debugOffset()); +#endif + + switch (m_interpreter->getOpcodeID(currentInstruction->u.opcode)) { + DEFINE_BINARY_OP(op_del_by_val) + DEFINE_BINARY_OP(op_in) + DEFINE_BINARY_OP(op_less) + DEFINE_BINARY_OP(op_lesseq) + DEFINE_BINARY_OP(op_greater) + DEFINE_BINARY_OP(op_greatereq) + DEFINE_UNARY_OP(op_is_boolean) + DEFINE_UNARY_OP(op_is_function) + DEFINE_UNARY_OP(op_is_number) + DEFINE_UNARY_OP(op_is_object) + DEFINE_UNARY_OP(op_is_string) + DEFINE_UNARY_OP(op_is_undefined) +#if USE(JSVALUE64) + DEFINE_UNARY_OP(op_negate) +#endif + DEFINE_UNARY_OP(op_typeof) + + DEFINE_OP(op_add) + DEFINE_OP(op_bitand) + DEFINE_OP(op_bitnot) + DEFINE_OP(op_bitor) + DEFINE_OP(op_bitxor) + DEFINE_OP(op_call) + DEFINE_OP(op_call_eval) + DEFINE_OP(op_call_varargs) + DEFINE_OP(op_catch) + DEFINE_OP(op_construct) + DEFINE_OP(op_get_callee) + DEFINE_OP(op_create_this) + DEFINE_OP(op_convert_this) + DEFINE_OP(op_init_lazy_reg) + DEFINE_OP(op_create_arguments) + DEFINE_OP(op_debug) + DEFINE_OP(op_del_by_id) + DEFINE_OP(op_div) + DEFINE_OP(op_end) + DEFINE_OP(op_enter) + DEFINE_OP(op_create_activation) + DEFINE_OP(op_eq) + DEFINE_OP(op_eq_null) + DEFINE_OP(op_get_by_id) + DEFINE_OP(op_get_arguments_length) + DEFINE_OP(op_get_by_val) + DEFINE_OP(op_get_argument_by_val) + DEFINE_OP(op_get_by_pname) + DEFINE_OP(op_get_global_var) + DEFINE_OP(op_get_pnames) + DEFINE_OP(op_get_scoped_var) + DEFINE_OP(op_check_has_instance) + DEFINE_OP(op_instanceof) + DEFINE_OP(op_jeq_null) + DEFINE_OP(op_jfalse) + DEFINE_OP(op_jmp) + DEFINE_OP(op_jmp_scopes) + DEFINE_OP(op_jneq_null) + DEFINE_OP(op_jneq_ptr) + DEFINE_OP(op_jless) + DEFINE_OP(op_jlesseq) + DEFINE_OP(op_jgreater) + DEFINE_OP(op_jgreatereq) + DEFINE_OP(op_jnless) + DEFINE_OP(op_jnlesseq) + DEFINE_OP(op_jngreater) + DEFINE_OP(op_jngreatereq) + DEFINE_OP(op_jsr) + DEFINE_OP(op_jtrue) + DEFINE_OP(op_loop) + DEFINE_OP(op_loop_hint) + DEFINE_OP(op_loop_if_less) + DEFINE_OP(op_loop_if_lesseq) + DEFINE_OP(op_loop_if_greater) + DEFINE_OP(op_loop_if_greatereq) + DEFINE_OP(op_loop_if_true) + DEFINE_OP(op_loop_if_false) + DEFINE_OP(op_lshift) + DEFINE_OP(op_method_check) + DEFINE_OP(op_mod) + DEFINE_OP(op_mov) + DEFINE_OP(op_mul) +#if USE(JSVALUE32_64) + DEFINE_OP(op_negate) +#endif + DEFINE_OP(op_neq) + DEFINE_OP(op_neq_null) + DEFINE_OP(op_new_array) + DEFINE_OP(op_new_array_buffer) + DEFINE_OP(op_new_func) + DEFINE_OP(op_new_func_exp) + DEFINE_OP(op_new_object) + DEFINE_OP(op_new_regexp) + DEFINE_OP(op_next_pname) + DEFINE_OP(op_not) + DEFINE_OP(op_nstricteq) + DEFINE_OP(op_pop_scope) + DEFINE_OP(op_post_dec) + DEFINE_OP(op_post_inc) + DEFINE_OP(op_pre_dec) + DEFINE_OP(op_pre_inc) + DEFINE_OP(op_profile_did_call) + DEFINE_OP(op_profile_will_call) + DEFINE_OP(op_push_new_scope) + DEFINE_OP(op_push_scope) + DEFINE_OP(op_put_by_id) + DEFINE_OP(op_put_by_index) + DEFINE_OP(op_put_by_val) + DEFINE_OP(op_put_getter) + DEFINE_OP(op_put_global_var) + DEFINE_OP(op_put_scoped_var) + DEFINE_OP(op_put_setter) + DEFINE_OP(op_resolve) + DEFINE_OP(op_resolve_base) + DEFINE_OP(op_ensure_property_exists) + DEFINE_OP(op_resolve_global) + DEFINE_OP(op_resolve_global_dynamic) + DEFINE_OP(op_resolve_skip) + DEFINE_OP(op_resolve_with_base) + DEFINE_OP(op_resolve_with_this) + DEFINE_OP(op_ret) + DEFINE_OP(op_call_put_result) + DEFINE_OP(op_ret_object_or_this) + DEFINE_OP(op_rshift) + DEFINE_OP(op_urshift) + DEFINE_OP(op_sret) + DEFINE_OP(op_strcat) + DEFINE_OP(op_stricteq) + DEFINE_OP(op_sub) + DEFINE_OP(op_switch_char) + DEFINE_OP(op_switch_imm) + DEFINE_OP(op_switch_string) + DEFINE_OP(op_tear_off_activation) + DEFINE_OP(op_tear_off_arguments) + DEFINE_OP(op_throw) + DEFINE_OP(op_throw_reference_error) + DEFINE_OP(op_to_jsnumber) + DEFINE_OP(op_to_primitive) + + case op_get_array_length: + case op_get_by_id_chain: + case op_get_by_id_generic: + case op_get_by_id_proto: + case op_get_by_id_self: + case op_get_by_id_getter_chain: + case op_get_by_id_getter_proto: + case op_get_by_id_getter_self: + case op_get_by_id_custom_chain: + case op_get_by_id_custom_proto: + case op_get_by_id_custom_self: + case op_get_string_length: + case op_put_by_id_generic: + case op_put_by_id_replace: + case op_put_by_id_transition: + ASSERT_NOT_REACHED(); + } + } + + ASSERT(m_callLinkInfoIndex == m_callStructureStubCompilationInfo.size()); + +#ifndef NDEBUG + // Reset this, in order to guard its use with ASSERTs. + m_bytecodeOffset = (unsigned)-1; +#endif +} + +void JIT::privateCompileLinkPass() +{ + unsigned jmpTableCount = m_jmpTable.size(); + for (unsigned i = 0; i < jmpTableCount; ++i) + m_jmpTable[i].from.linkTo(m_labels[m_jmpTable[i].toBytecodeOffset], this); + m_jmpTable.clear(); +} + +void JIT::privateCompileSlowCases() +{ + Instruction* instructionsBegin = m_codeBlock->instructions().begin(); + + m_propertyAccessInstructionIndex = 0; + m_globalResolveInfoIndex = 0; + m_callLinkInfoIndex = 0; + +#if !ASSERT_DISABLED && ENABLE(VALUE_PROFILER) + // Use this to assert that slow-path code associates new profiling sites with existing + // ValueProfiles rather than creating new ones. This ensures that for a given instruction + // (say, get_by_id) we get combined statistics for both the fast-path executions of that + // instructions and the slow-path executions. Furthermore, if the slow-path code created + // new ValueProfiles then the ValueProfiles would no longer be sorted by bytecode offset, + // which would break the invariant necessary to use CodeBlock::valueProfileForBytecodeOffset(). + unsigned numberOfValueProfiles = m_codeBlock->numberOfValueProfiles(); +#endif + + for (Vector::iterator iter = m_slowCases.begin(); iter != m_slowCases.end();) { +#if USE(JSVALUE64) + killLastResultRegister(); +#endif + + m_bytecodeOffset = iter->to; +#ifndef NDEBUG + unsigned firstTo = m_bytecodeOffset; +#endif + Instruction* currentInstruction = instructionsBegin + m_bytecodeOffset; + +#if ENABLE(VALUE_PROFILER) + RareCaseProfile* rareCaseProfile = 0; + if (m_canBeOptimized) + rareCaseProfile = m_codeBlock->addRareCaseProfile(m_bytecodeOffset); +#endif + +#if ENABLE(JIT_VERBOSE) + printf("Old JIT emitting slow code for bc#%u at offset 0x%lx.\n", m_bytecodeOffset, (long)debugOffset()); +#endif + + switch (m_interpreter->getOpcodeID(currentInstruction->u.opcode)) { + DEFINE_SLOWCASE_OP(op_add) + DEFINE_SLOWCASE_OP(op_bitand) + DEFINE_SLOWCASE_OP(op_bitnot) + DEFINE_SLOWCASE_OP(op_bitor) + DEFINE_SLOWCASE_OP(op_bitxor) + DEFINE_SLOWCASE_OP(op_call) + DEFINE_SLOWCASE_OP(op_call_eval) + DEFINE_SLOWCASE_OP(op_call_varargs) + DEFINE_SLOWCASE_OP(op_construct) + DEFINE_SLOWCASE_OP(op_convert_this) + DEFINE_SLOWCASE_OP(op_create_this) + DEFINE_SLOWCASE_OP(op_div) + DEFINE_SLOWCASE_OP(op_eq) + DEFINE_SLOWCASE_OP(op_get_by_id) + DEFINE_SLOWCASE_OP(op_get_arguments_length) + DEFINE_SLOWCASE_OP(op_get_by_val) + DEFINE_SLOWCASE_OP(op_get_argument_by_val) + DEFINE_SLOWCASE_OP(op_get_by_pname) + DEFINE_SLOWCASE_OP(op_check_has_instance) + DEFINE_SLOWCASE_OP(op_instanceof) + DEFINE_SLOWCASE_OP(op_jfalse) + DEFINE_SLOWCASE_OP(op_jless) + DEFINE_SLOWCASE_OP(op_jlesseq) + DEFINE_SLOWCASE_OP(op_jgreater) + DEFINE_SLOWCASE_OP(op_jgreatereq) + DEFINE_SLOWCASE_OP(op_jnless) + DEFINE_SLOWCASE_OP(op_jnlesseq) + DEFINE_SLOWCASE_OP(op_jngreater) + DEFINE_SLOWCASE_OP(op_jngreatereq) + DEFINE_SLOWCASE_OP(op_jtrue) + DEFINE_SLOWCASE_OP(op_loop_if_less) + DEFINE_SLOWCASE_OP(op_loop_if_lesseq) + DEFINE_SLOWCASE_OP(op_loop_if_greater) + DEFINE_SLOWCASE_OP(op_loop_if_greatereq) + DEFINE_SLOWCASE_OP(op_loop_if_true) + DEFINE_SLOWCASE_OP(op_loop_if_false) + DEFINE_SLOWCASE_OP(op_lshift) + DEFINE_SLOWCASE_OP(op_method_check) + DEFINE_SLOWCASE_OP(op_mod) + DEFINE_SLOWCASE_OP(op_mul) +#if USE(JSVALUE32_64) + DEFINE_SLOWCASE_OP(op_negate) +#endif + DEFINE_SLOWCASE_OP(op_neq) + DEFINE_SLOWCASE_OP(op_new_object) + DEFINE_SLOWCASE_OP(op_new_func) + DEFINE_SLOWCASE_OP(op_new_func_exp) + DEFINE_SLOWCASE_OP(op_not) + DEFINE_SLOWCASE_OP(op_nstricteq) + DEFINE_SLOWCASE_OP(op_post_dec) + DEFINE_SLOWCASE_OP(op_post_inc) + DEFINE_SLOWCASE_OP(op_pre_dec) + DEFINE_SLOWCASE_OP(op_pre_inc) + DEFINE_SLOWCASE_OP(op_put_by_id) + DEFINE_SLOWCASE_OP(op_put_by_val) + DEFINE_SLOWCASE_OP(op_resolve_global) + DEFINE_SLOWCASE_OP(op_resolve_global_dynamic) + DEFINE_SLOWCASE_OP(op_rshift) + DEFINE_SLOWCASE_OP(op_urshift) + DEFINE_SLOWCASE_OP(op_stricteq) + DEFINE_SLOWCASE_OP(op_sub) + DEFINE_SLOWCASE_OP(op_to_jsnumber) + DEFINE_SLOWCASE_OP(op_to_primitive) + default: + ASSERT_NOT_REACHED(); + } + + ASSERT_WITH_MESSAGE(iter == m_slowCases.end() || firstTo != iter->to,"Not enough jumps linked in slow case codegen."); + ASSERT_WITH_MESSAGE(firstTo == (iter - 1)->to, "Too many jumps linked in slow case codegen."); + +#if ENABLE(VALUE_PROFILER) + if (m_canBeOptimized) + add32(Imm32(1), AbsoluteAddress(&rareCaseProfile->m_counter)); +#endif + + emitJumpSlowToHot(jump(), 0); + } + + ASSERT(m_propertyAccessInstructionIndex == m_propertyAccessCompilationInfo.size()); + ASSERT(m_callLinkInfoIndex == m_callStructureStubCompilationInfo.size()); +#if ENABLE(VALUE_PROFILER) + ASSERT(numberOfValueProfiles == m_codeBlock->numberOfValueProfiles()); +#endif + +#ifndef NDEBUG + // Reset this, in order to guard its use with ASSERTs. + m_bytecodeOffset = (unsigned)-1; +#endif +} + +JITCode JIT::privateCompile(CodePtr* functionEntryArityCheck) +{ +#if ENABLE(VALUE_PROFILER) + m_canBeOptimized = m_codeBlock->canCompileWithDFG(); +#endif + + // Just add a little bit of randomness to the codegen + if (m_randomGenerator.getUint32() & 1) + nop(); + + preserveReturnAddressAfterCall(regT2); + emitPutToCallFrameHeader(regT2, RegisterFile::ReturnPC); + emitPutImmediateToCallFrameHeader(m_codeBlock, RegisterFile::CodeBlock); + + Label beginLabel(this); + + sampleCodeBlock(m_codeBlock); +#if ENABLE(OPCODE_SAMPLING) + sampleInstruction(m_codeBlock->instructions().begin()); +#endif + + Jump registerFileCheck; + if (m_codeBlock->codeType() == FunctionCode) { +#if ENABLE(DFG_JIT) +#if DFG_ENABLE(SUCCESS_STATS) + static SamplingCounter counter("orignalJIT"); + emitCount(counter); +#endif +#endif + +#if ENABLE(VALUE_PROFILER) + ASSERT(m_bytecodeOffset == (unsigned)-1); + if (shouldEmitProfiling()) { + m_codeBlock->setArgumentValueProfileSize(m_codeBlock->m_numParameters); + for (int argument = 0; argument < m_codeBlock->m_numParameters; ++argument) { + // If this is a constructor, then we want to put in a dummy profiling site (to + // keep things consistent) but we don't actually want to record the dummy value. + if (m_codeBlock->m_isConstructor && !argument) + continue; + int offset = CallFrame::argumentOffsetIncludingThis(argument) * static_cast(sizeof(Register)); +#if USE(JSVALUE64) + loadPtr(Address(callFrameRegister, offset), regT0); +#elif USE(JSVALUE32_64) + load32(Address(callFrameRegister, offset + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT0); + load32(Address(callFrameRegister, offset + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), regT1); +#endif + emitValueProfilingSite(m_codeBlock->valueProfileForArgument(argument)); + } + } +#endif + + addPtr(Imm32(m_codeBlock->m_numCalleeRegisters * sizeof(Register)), callFrameRegister, regT1); + registerFileCheck = branchPtr(Below, AbsoluteAddress(m_globalData->interpreter->registerFile().addressOfEnd()), regT1); + } + + Label functionBody = label(); + +#if ENABLE(VALUE_PROFILER) + if (m_canBeOptimized) + add32(Imm32(1), AbsoluteAddress(&m_codeBlock->m_executionEntryCount)); +#endif + + privateCompileMainPass(); + privateCompileLinkPass(); + privateCompileSlowCases(); + + Label arityCheck; + if (m_codeBlock->codeType() == FunctionCode) { + registerFileCheck.link(this); + m_bytecodeOffset = 0; + JITStubCall(this, cti_register_file_check).call(); +#ifndef NDEBUG + m_bytecodeOffset = (unsigned)-1; // Reset this, in order to guard its use with ASSERTs. +#endif + jump(functionBody); + + arityCheck = label(); + preserveReturnAddressAfterCall(regT2); + emitPutToCallFrameHeader(regT2, RegisterFile::ReturnPC); + emitPutImmediateToCallFrameHeader(m_codeBlock, RegisterFile::CodeBlock); + + load32(payloadFor(RegisterFile::ArgumentCount), regT1); + branch32(AboveOrEqual, regT1, TrustedImm32(m_codeBlock->m_numParameters)).linkTo(beginLabel, this); + + JITStubCall(this, m_codeBlock->m_isConstructor ? cti_op_construct_arityCheck : cti_op_call_arityCheck).call(callFrameRegister); + + jump(beginLabel); + } + + ASSERT(m_jmpTable.isEmpty()); + + LinkBuffer patchBuffer(*m_globalData, this); + + // Translate vPC offsets into addresses in JIT generated code, for switch tables. + for (unsigned i = 0; i < m_switches.size(); ++i) { + SwitchRecord record = m_switches[i]; + unsigned bytecodeOffset = record.bytecodeOffset; + + if (record.type != SwitchRecord::String) { + ASSERT(record.type == SwitchRecord::Immediate || record.type == SwitchRecord::Character); + ASSERT(record.jumpTable.simpleJumpTable->branchOffsets.size() == record.jumpTable.simpleJumpTable->ctiOffsets.size()); + + record.jumpTable.simpleJumpTable->ctiDefault = patchBuffer.locationOf(m_labels[bytecodeOffset + record.defaultOffset]); + + for (unsigned j = 0; j < record.jumpTable.simpleJumpTable->branchOffsets.size(); ++j) { + unsigned offset = record.jumpTable.simpleJumpTable->branchOffsets[j]; + record.jumpTable.simpleJumpTable->ctiOffsets[j] = offset ? patchBuffer.locationOf(m_labels[bytecodeOffset + offset]) : record.jumpTable.simpleJumpTable->ctiDefault; + } + } else { + ASSERT(record.type == SwitchRecord::String); + + record.jumpTable.stringJumpTable->ctiDefault = patchBuffer.locationOf(m_labels[bytecodeOffset + record.defaultOffset]); + + StringJumpTable::StringOffsetTable::iterator end = record.jumpTable.stringJumpTable->offsetTable.end(); + for (StringJumpTable::StringOffsetTable::iterator it = record.jumpTable.stringJumpTable->offsetTable.begin(); it != end; ++it) { + unsigned offset = it->second.branchOffset; + it->second.ctiOffset = offset ? patchBuffer.locationOf(m_labels[bytecodeOffset + offset]) : record.jumpTable.stringJumpTable->ctiDefault; + } + } + } + + for (size_t i = 0; i < m_codeBlock->numberOfExceptionHandlers(); ++i) { + HandlerInfo& handler = m_codeBlock->exceptionHandler(i); + handler.nativeCode = patchBuffer.locationOf(m_labels[handler.target]); + } + + for (Vector::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) { + if (iter->to) + patchBuffer.link(iter->from, FunctionPtr(iter->to)); + } + + if (m_codeBlock->needsCallReturnIndices()) { + m_codeBlock->callReturnIndexVector().reserveCapacity(m_calls.size()); + for (Vector::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) + m_codeBlock->callReturnIndexVector().append(CallReturnOffsetToBytecodeOffset(patchBuffer.returnAddressOffset(iter->from), iter->bytecodeOffset)); + } + + // Link absolute addresses for jsr + for (Vector::iterator iter = m_jsrSites.begin(); iter != m_jsrSites.end(); ++iter) + patchBuffer.patch(iter->storeLocation, patchBuffer.locationOf(iter->target).executableAddress()); + + m_codeBlock->setNumberOfStructureStubInfos(m_propertyAccessCompilationInfo.size()); + for (unsigned i = 0; i < m_propertyAccessCompilationInfo.size(); ++i) { + StructureStubInfo& info = m_codeBlock->structureStubInfo(i); + ASSERT(m_propertyAccessCompilationInfo[i].bytecodeIndex != std::numeric_limits::max()); + info.bytecodeIndex = m_propertyAccessCompilationInfo[i].bytecodeIndex; + info.callReturnLocation = patchBuffer.locationOf(m_propertyAccessCompilationInfo[i].callReturnLocation); + info.hotPathBegin = patchBuffer.locationOf(m_propertyAccessCompilationInfo[i].hotPathBegin); + } + m_codeBlock->setNumberOfCallLinkInfos(m_callStructureStubCompilationInfo.size()); + for (unsigned i = 0; i < m_codeBlock->numberOfCallLinkInfos(); ++i) { + CallLinkInfo& info = m_codeBlock->callLinkInfo(i); + info.callType = m_callStructureStubCompilationInfo[i].callType; + info.bytecodeIndex = m_callStructureStubCompilationInfo[i].bytecodeIndex; + info.callReturnLocation = CodeLocationLabel(patchBuffer.locationOfNearCall(m_callStructureStubCompilationInfo[i].callReturnLocation)); + info.hotPathBegin = patchBuffer.locationOf(m_callStructureStubCompilationInfo[i].hotPathBegin); + info.hotPathOther = patchBuffer.locationOfNearCall(m_callStructureStubCompilationInfo[i].hotPathOther); + } + unsigned methodCallCount = m_methodCallCompilationInfo.size(); + m_codeBlock->addMethodCallLinkInfos(methodCallCount); + for (unsigned i = 0; i < methodCallCount; ++i) { + MethodCallLinkInfo& info = m_codeBlock->methodCallLinkInfo(i); + info.bytecodeIndex = m_methodCallCompilationInfo[i].bytecodeIndex; + info.cachedStructure.setLocation(patchBuffer.locationOf(m_methodCallCompilationInfo[i].structureToCompare)); + info.callReturnLocation = m_codeBlock->structureStubInfo(m_methodCallCompilationInfo[i].propertyAccessIndex).callReturnLocation; + } + +#if ENABLE(DFG_JIT) + if (m_canBeOptimized) { + CompactJITCodeMap::Encoder jitCodeMapEncoder; + for (unsigned bytecodeOffset = 0; bytecodeOffset < m_labels.size(); ++bytecodeOffset) { + if (m_labels[bytecodeOffset].isSet()) + jitCodeMapEncoder.append(bytecodeOffset, patchBuffer.offsetOf(m_labels[bytecodeOffset])); + } + m_codeBlock->setJITCodeMap(jitCodeMapEncoder.finish()); + } +#endif + + if (m_codeBlock->codeType() == FunctionCode && functionEntryArityCheck) + *functionEntryArityCheck = patchBuffer.locationOf(arityCheck); + + CodeRef result = patchBuffer.finalizeCode(); + +#if ENABLE(JIT_VERBOSE) + printf("JIT generated code for %p at [%p, %p).\n", m_codeBlock, result.executableMemory()->start(), result.executableMemory()->end()); +#endif + + return JITCode(result, JITCode::BaselineJIT); +} + +void JIT::linkFor(JSFunction* callee, CodeBlock* callerCodeBlock, CodeBlock* calleeCodeBlock, JIT::CodePtr code, CallLinkInfo* callLinkInfo, JSGlobalData* globalData, CodeSpecializationKind kind) +{ + RepatchBuffer repatchBuffer(callerCodeBlock); + + ASSERT(!callLinkInfo->isLinked()); + callLinkInfo->callee.set(*globalData, callLinkInfo->hotPathBegin, callerCodeBlock->ownerExecutable(), callee); + callLinkInfo->lastSeenCallee.set(*globalData, callerCodeBlock->ownerExecutable(), callee); + repatchBuffer.relink(callLinkInfo->hotPathOther, code); + + if (calleeCodeBlock) + calleeCodeBlock->linkIncomingCall(callLinkInfo); + + // Patch the slow patch so we do not continue to try to link. + if (kind == CodeForCall) { + repatchBuffer.relink(CodeLocationNearCall(callLinkInfo->callReturnLocation), globalData->jitStubs->ctiVirtualCall()); + return; + } + + ASSERT(kind == CodeForConstruct); + repatchBuffer.relink(CodeLocationNearCall(callLinkInfo->callReturnLocation), globalData->jitStubs->ctiVirtualConstruct()); +} + +} // namespace JSC + +#endif // ENABLE(JIT) diff --git a/Source/JavaScriptCore/jit/JIT.h b/Source/JavaScriptCore/jit/JIT.h new file mode 100644 index 000000000..750b9d818 --- /dev/null +++ b/Source/JavaScriptCore/jit/JIT.h @@ -0,0 +1,1175 @@ +/* + * Copyright (C) 2008 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef JIT_h +#define JIT_h + +#if ENABLE(JIT) + +// Verbose logging of code generation +#define ENABLE_JIT_VERBOSE 0 +// Verbose logging for OSR-related code. +#define ENABLE_JIT_VERBOSE_OSR 0 + +// We've run into some problems where changing the size of the class JIT leads to +// performance fluctuations. Try forcing alignment in an attempt to stabalize this. +#if COMPILER(GCC) +#define JIT_CLASS_ALIGNMENT __attribute__ ((aligned (32))) +#else +#define JIT_CLASS_ALIGNMENT +#endif + +#define ASSERT_JIT_OFFSET_UNUSED(variable, actual, expected) ASSERT_WITH_MESSAGE_UNUSED(variable, actual == expected, "JIT Offset \"%s\" should be %d, not %d.\n", #expected, static_cast(expected), static_cast(actual)); +#define ASSERT_JIT_OFFSET(actual, expected) ASSERT_WITH_MESSAGE(actual == expected, "JIT Offset \"%s\" should be %d, not %d.\n", #expected, static_cast(expected), static_cast(actual)); + +#include "CodeBlock.h" +#include "CompactJITCodeMap.h" +#include "Interpreter.h" +#include "JSInterfaceJIT.h" +#include "Opcode.h" +#include "Profiler.h" +#include + +namespace JSC { + + class CodeBlock; + class FunctionExecutable; + class JIT; + class JSPropertyNameIterator; + class Interpreter; + class Register; + class RegisterFile; + class ScopeChainNode; + class StructureChain; + + struct CallLinkInfo; + struct Instruction; + struct OperandTypes; + struct PolymorphicAccessStructureList; + struct SimpleJumpTable; + struct StringJumpTable; + struct StructureStubInfo; + + struct CallRecord { + MacroAssembler::Call from; + unsigned bytecodeOffset; + void* to; + + CallRecord() + { + } + + CallRecord(MacroAssembler::Call from, unsigned bytecodeOffset, void* to = 0) + : from(from) + , bytecodeOffset(bytecodeOffset) + , to(to) + { + } + }; + + struct JumpTable { + MacroAssembler::Jump from; + unsigned toBytecodeOffset; + + JumpTable(MacroAssembler::Jump f, unsigned t) + : from(f) + , toBytecodeOffset(t) + { + } + }; + + struct SlowCaseEntry { + MacroAssembler::Jump from; + unsigned to; + unsigned hint; + + SlowCaseEntry(MacroAssembler::Jump f, unsigned t, unsigned h = 0) + : from(f) + , to(t) + , hint(h) + { + } + }; + + struct SwitchRecord { + enum Type { + Immediate, + Character, + String + }; + + Type type; + + union { + SimpleJumpTable* simpleJumpTable; + StringJumpTable* stringJumpTable; + } jumpTable; + + unsigned bytecodeOffset; + unsigned defaultOffset; + + SwitchRecord(SimpleJumpTable* jumpTable, unsigned bytecodeOffset, unsigned defaultOffset, Type type) + : type(type) + , bytecodeOffset(bytecodeOffset) + , defaultOffset(defaultOffset) + { + this->jumpTable.simpleJumpTable = jumpTable; + } + + SwitchRecord(StringJumpTable* jumpTable, unsigned bytecodeOffset, unsigned defaultOffset) + : type(String) + , bytecodeOffset(bytecodeOffset) + , defaultOffset(defaultOffset) + { + this->jumpTable.stringJumpTable = jumpTable; + } + }; + + struct PropertyStubCompilationInfo { + unsigned bytecodeIndex; + MacroAssembler::Call callReturnLocation; + MacroAssembler::Label hotPathBegin; + +#if !ASSERT_DISABLED + PropertyStubCompilationInfo() + : bytecodeIndex(std::numeric_limits::max()) + { + } +#endif + }; + + struct StructureStubCompilationInfo { + MacroAssembler::DataLabelPtr hotPathBegin; + MacroAssembler::Call hotPathOther; + MacroAssembler::Call callReturnLocation; + CallLinkInfo::CallType callType; + unsigned bytecodeIndex; + }; + + struct MethodCallCompilationInfo { + MethodCallCompilationInfo(unsigned bytecodeIndex, unsigned propertyAccessIndex) + : bytecodeIndex(bytecodeIndex) + , propertyAccessIndex(propertyAccessIndex) + { + } + + unsigned bytecodeIndex; + MacroAssembler::DataLabelPtr structureToCompare; + unsigned propertyAccessIndex; + }; + + // Near calls can only be patched to other JIT code, regular calls can be patched to JIT code or relinked to stub functions. + void ctiPatchNearCallByReturnAddress(CodeBlock* codeblock, ReturnAddressPtr returnAddress, MacroAssemblerCodePtr newCalleeFunction); + void ctiPatchCallByReturnAddress(CodeBlock* codeblock, ReturnAddressPtr returnAddress, MacroAssemblerCodePtr newCalleeFunction); + void ctiPatchCallByReturnAddress(CodeBlock* codeblock, ReturnAddressPtr returnAddress, FunctionPtr newCalleeFunction); + + class JIT : private JSInterfaceJIT { + friend class JITStubCall; + + using MacroAssembler::Jump; + using MacroAssembler::JumpList; + using MacroAssembler::Label; + + static const int patchGetByIdDefaultStructure = -1; + static const int patchGetByIdDefaultOffset = 0; + // Magic number - initial offset cannot be representable as a signed 8bit value, or the X86Assembler + // will compress the displacement, and we may not be able to fit a patched offset. + static const int patchPutByIdDefaultOffset = 256; + + public: + static JITCode compile(JSGlobalData* globalData, CodeBlock* codeBlock, CodePtr* functionEntryArityCheck = 0) + { + return JIT(globalData, codeBlock).privateCompile(functionEntryArityCheck); + } + + static void compileGetByIdProto(JSGlobalData* globalData, CallFrame* callFrame, CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, Structure* prototypeStructure, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset, ReturnAddressPtr returnAddress) + { + JIT jit(globalData, codeBlock); + jit.privateCompileGetByIdProto(stubInfo, structure, prototypeStructure, ident, slot, cachedOffset, returnAddress, callFrame); + } + + static void compileGetByIdSelfList(JSGlobalData* globalData, CodeBlock* codeBlock, StructureStubInfo* stubInfo, PolymorphicAccessStructureList* polymorphicStructures, int currentIndex, Structure* structure, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset) + { + JIT jit(globalData, codeBlock); + jit.privateCompileGetByIdSelfList(stubInfo, polymorphicStructures, currentIndex, structure, ident, slot, cachedOffset); + } + static void compileGetByIdProtoList(JSGlobalData* globalData, CallFrame* callFrame, CodeBlock* codeBlock, StructureStubInfo* stubInfo, PolymorphicAccessStructureList* prototypeStructureList, int currentIndex, Structure* structure, Structure* prototypeStructure, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset) + { + JIT jit(globalData, codeBlock); + jit.privateCompileGetByIdProtoList(stubInfo, prototypeStructureList, currentIndex, structure, prototypeStructure, ident, slot, cachedOffset, callFrame); + } + static void compileGetByIdChainList(JSGlobalData* globalData, CallFrame* callFrame, CodeBlock* codeBlock, StructureStubInfo* stubInfo, PolymorphicAccessStructureList* prototypeStructureList, int currentIndex, Structure* structure, StructureChain* chain, size_t count, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset) + { + JIT jit(globalData, codeBlock); + jit.privateCompileGetByIdChainList(stubInfo, prototypeStructureList, currentIndex, structure, chain, count, ident, slot, cachedOffset, callFrame); + } + + static void compileGetByIdChain(JSGlobalData* globalData, CallFrame* callFrame, CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, StructureChain* chain, size_t count, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset, ReturnAddressPtr returnAddress) + { + JIT jit(globalData, codeBlock); + jit.privateCompileGetByIdChain(stubInfo, structure, chain, count, ident, slot, cachedOffset, returnAddress, callFrame); + } + + static void compilePutByIdTransition(JSGlobalData* globalData, CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* oldStructure, Structure* newStructure, size_t cachedOffset, StructureChain* chain, ReturnAddressPtr returnAddress, bool direct) + { + JIT jit(globalData, codeBlock); + jit.privateCompilePutByIdTransition(stubInfo, oldStructure, newStructure, cachedOffset, chain, returnAddress, direct); + } + + static PassRefPtr compileCTIMachineTrampolines(JSGlobalData* globalData, TrampolineStructure *trampolines) + { + if (!globalData->canUseJIT()) + return 0; + JIT jit(globalData, 0); + return jit.privateCompileCTIMachineTrampolines(globalData, trampolines); + } + + static CodeRef compileCTINativeCall(JSGlobalData* globalData, NativeFunction func) + { + if (!globalData->canUseJIT()) + return CodeRef(); + JIT jit(globalData, 0); + return jit.privateCompileCTINativeCall(globalData, func); + } + + static void resetPatchGetById(RepatchBuffer&, StructureStubInfo*); + static void resetPatchPutById(RepatchBuffer&, StructureStubInfo*); + static void patchGetByIdSelf(CodeBlock* codeblock, StructureStubInfo*, Structure*, size_t cachedOffset, ReturnAddressPtr returnAddress); + static void patchPutByIdReplace(CodeBlock* codeblock, StructureStubInfo*, Structure*, size_t cachedOffset, ReturnAddressPtr returnAddress, bool direct); + static void patchMethodCallProto(JSGlobalData&, CodeBlock* codeblock, MethodCallLinkInfo&, JSObject*, Structure*, JSObject*, ReturnAddressPtr); + + static void compilePatchGetArrayLength(JSGlobalData* globalData, CodeBlock* codeBlock, ReturnAddressPtr returnAddress) + { + JIT jit(globalData, codeBlock); + return jit.privateCompilePatchGetArrayLength(returnAddress); + } + + static void linkFor(JSFunction* callee, CodeBlock* callerCodeBlock, CodeBlock* calleeCodeBlock, CodePtr, CallLinkInfo*, JSGlobalData*, CodeSpecializationKind); + + private: + struct JSRInfo { + DataLabelPtr storeLocation; + Label target; + + JSRInfo(DataLabelPtr storeLocation, Label targetLocation) + : storeLocation(storeLocation) + , target(targetLocation) + { + } + }; + + JIT(JSGlobalData*, CodeBlock* = 0); + + void privateCompileMainPass(); + void privateCompileLinkPass(); + void privateCompileSlowCases(); + JITCode privateCompile(CodePtr* functionEntryArityCheck); + void privateCompileGetByIdProto(StructureStubInfo*, Structure*, Structure* prototypeStructure, const Identifier&, const PropertySlot&, size_t cachedOffset, ReturnAddressPtr returnAddress, CallFrame* callFrame); + void privateCompileGetByIdSelfList(StructureStubInfo*, PolymorphicAccessStructureList*, int, Structure*, const Identifier&, const PropertySlot&, size_t cachedOffset); + void privateCompileGetByIdProtoList(StructureStubInfo*, PolymorphicAccessStructureList*, int, Structure*, Structure* prototypeStructure, const Identifier&, const PropertySlot&, size_t cachedOffset, CallFrame* callFrame); + void privateCompileGetByIdChainList(StructureStubInfo*, PolymorphicAccessStructureList*, int, Structure*, StructureChain* chain, size_t count, const Identifier&, const PropertySlot&, size_t cachedOffset, CallFrame* callFrame); + void privateCompileGetByIdChain(StructureStubInfo*, Structure*, StructureChain*, size_t count, const Identifier&, const PropertySlot&, size_t cachedOffset, ReturnAddressPtr returnAddress, CallFrame* callFrame); + void privateCompilePutByIdTransition(StructureStubInfo*, Structure*, Structure*, size_t cachedOffset, StructureChain*, ReturnAddressPtr returnAddress, bool direct); + + PassRefPtr privateCompileCTIMachineTrampolines(JSGlobalData*, TrampolineStructure*); + Label privateCompileCTINativeCall(JSGlobalData*, bool isConstruct = false); + CodeRef privateCompileCTINativeCall(JSGlobalData*, NativeFunction); + void privateCompilePatchGetArrayLength(ReturnAddressPtr returnAddress); + + static bool isDirectPutById(StructureStubInfo*); + + void addSlowCase(Jump); + void addSlowCase(JumpList); + void addSlowCase(); + void addJump(Jump, int); + void emitJumpSlowToHot(Jump, int); + + void compileOpCall(OpcodeID, Instruction*, unsigned callLinkInfoIndex); + void compileOpCallSlowCase(OpcodeID, Instruction*, Vector::iterator&, unsigned callLinkInfoIndex); + void compileLoadVarargs(Instruction*); + void compileCallEval(); + void compileCallEvalSlowCase(Vector::iterator&); + + enum CompileOpStrictEqType { OpStrictEq, OpNStrictEq }; + void compileOpStrictEq(Instruction* instruction, CompileOpStrictEqType type); + bool isOperandConstantImmediateDouble(unsigned src); + + void emitLoadDouble(int index, FPRegisterID value); + void emitLoadInt32ToDouble(int index, FPRegisterID value); + Jump emitJumpIfNotObject(RegisterID structureReg); + Jump emitJumpIfNotType(RegisterID baseReg, RegisterID scratchReg, JSType); + + void testPrototype(JSValue, JumpList& failureCases); + + enum WriteBarrierMode { UnconditionalWriteBarrier, ShouldFilterImmediates }; + // value register in write barrier is used before any scratch registers + // so may safely be the same as either of the scratch registers. + void emitWriteBarrier(RegisterID owner, RegisterID valueTag, RegisterID scratch, RegisterID scratch2, WriteBarrierMode, WriteBarrierUseKind); + void emitWriteBarrier(JSCell* owner, RegisterID value, RegisterID scratch, WriteBarrierMode, WriteBarrierUseKind); + + template void emitAllocateBasicJSObject(StructureType, RegisterID result, RegisterID storagePtr); + template void emitAllocateJSFinalObject(T structure, RegisterID result, RegisterID storagePtr); + void emitAllocateJSFunction(FunctionExecutable*, RegisterID scopeChain, RegisterID result, RegisterID storagePtr); + + enum ValueProfilingSiteKind { FirstProfilingSite, SubsequentProfilingSite }; +#if ENABLE(VALUE_PROFILER) + // This assumes that the value to profile is in regT0 and that regT3 is available for + // scratch. + void emitValueProfilingSite(ValueProfile*); + void emitValueProfilingSite(ValueProfilingSiteKind); +#else + void emitValueProfilingSite(ValueProfilingSiteKind) { } +#endif + +#if USE(JSVALUE32_64) + bool getOperandConstantImmediateInt(unsigned op1, unsigned op2, unsigned& op, int32_t& constant); + + void emitLoadTag(int index, RegisterID tag); + void emitLoadPayload(int index, RegisterID payload); + + void emitLoad(const JSValue& v, RegisterID tag, RegisterID payload); + void emitLoad(int index, RegisterID tag, RegisterID payload, RegisterID base = callFrameRegister); + void emitLoad2(int index1, RegisterID tag1, RegisterID payload1, int index2, RegisterID tag2, RegisterID payload2); + + void emitStore(int index, RegisterID tag, RegisterID payload, RegisterID base = callFrameRegister); + void emitStore(int index, const JSValue constant, RegisterID base = callFrameRegister); + void emitStoreInt32(int index, RegisterID payload, bool indexIsInt32 = false); + void emitStoreInt32(int index, TrustedImm32 payload, bool indexIsInt32 = false); + void emitStoreAndMapInt32(int index, RegisterID tag, RegisterID payload, bool indexIsInt32, size_t opcodeLength); + void emitStoreCell(int index, RegisterID payload, bool indexIsCell = false); + void emitStoreBool(int index, RegisterID payload, bool indexIsBool = false); + void emitStoreDouble(int index, FPRegisterID value); + + bool isLabeled(unsigned bytecodeOffset); + void map(unsigned bytecodeOffset, int virtualRegisterIndex, RegisterID tag, RegisterID payload); + void unmap(RegisterID); + void unmap(); + bool isMapped(int virtualRegisterIndex); + bool getMappedPayload(int virtualRegisterIndex, RegisterID& payload); + bool getMappedTag(int virtualRegisterIndex, RegisterID& tag); + + void emitJumpSlowCaseIfNotJSCell(int virtualRegisterIndex); + void emitJumpSlowCaseIfNotJSCell(int virtualRegisterIndex, RegisterID tag); + + void compileGetByIdHotPath(); + void compileGetByIdSlowCase(int resultVReg, int baseVReg, Identifier* ident, Vector::iterator& iter, bool isMethodCheck = false); + void compileGetDirectOffset(RegisterID base, RegisterID resultTag, RegisterID resultPayload, size_t cachedOffset); + void compileGetDirectOffset(JSObject* base, RegisterID resultTag, RegisterID resultPayload, size_t cachedOffset); + void compileGetDirectOffset(RegisterID base, RegisterID resultTag, RegisterID resultPayload, RegisterID offset); + void compilePutDirectOffset(RegisterID base, RegisterID valueTag, RegisterID valuePayload, size_t cachedOffset); + + // Arithmetic opcode helpers + void emitAdd32Constant(unsigned dst, unsigned op, int32_t constant, ResultType opType); + void emitSub32Constant(unsigned dst, unsigned op, int32_t constant, ResultType opType); + void emitBinaryDoubleOp(OpcodeID, unsigned dst, unsigned op1, unsigned op2, OperandTypes, JumpList& notInt32Op1, JumpList& notInt32Op2, bool op1IsInRegisters = true, bool op2IsInRegisters = true); + +#if CPU(X86) + // These architecture specific value are used to enable patching - see comment on op_put_by_id. + static const int patchOffsetPutByIdStructure = 7; + static const int patchOffsetPutByIdPropertyMapOffset1 = 22; + static const int patchOffsetPutByIdPropertyMapOffset2 = 28; + // These architecture specific value are used to enable patching - see comment on op_get_by_id. + static const int patchOffsetGetByIdStructure = 7; + static const int patchOffsetGetByIdBranchToSlowCase = 13; + static const int patchOffsetGetByIdPropertyMapOffset1 = 19; + static const int patchOffsetGetByIdPropertyMapOffset2 = 22; + static const int patchOffsetGetByIdPutResult = 22; +#if ENABLE(OPCODE_SAMPLING) + static const int patchOffsetGetByIdSlowCaseCall = 37; +#else + static const int patchOffsetGetByIdSlowCaseCall = 33; +#endif + static const int patchOffsetOpCallCompareToJump = 6; + + static const int patchOffsetMethodCheckProtoObj = 11; + static const int patchOffsetMethodCheckProtoStruct = 18; + static const int patchOffsetMethodCheckPutFunction = 29; +#elif CPU(ARM_TRADITIONAL) + // These architecture specific value are used to enable patching - see comment on op_put_by_id. + static const int patchOffsetPutByIdStructure = 4; + static const int patchOffsetPutByIdPropertyMapOffset1 = 20; + static const int patchOffsetPutByIdPropertyMapOffset2 = 28; + // These architecture specific value are used to enable patching - see comment on op_get_by_id. + static const int patchOffsetGetByIdStructure = 4; + static const int patchOffsetGetByIdBranchToSlowCase = 16; + static const int patchOffsetGetByIdPropertyMapOffset1 = 20; + static const int patchOffsetGetByIdPropertyMapOffset2 = 28; + static const int patchOffsetGetByIdPutResult = 36; +#if ENABLE(OPCODE_SAMPLING) + #error "OPCODE_SAMPLING is not yet supported" +#else + static const int patchOffsetGetByIdSlowCaseCall = 40; +#endif + static const int patchOffsetOpCallCompareToJump = 12; + + static const int patchOffsetMethodCheckProtoObj = 12; + static const int patchOffsetMethodCheckProtoStruct = 20; + static const int patchOffsetMethodCheckPutFunction = 32; + + // sequenceOpCall + static const int sequenceOpCallInstructionSpace = 12; + static const int sequenceOpCallConstantSpace = 2; + // sequenceMethodCheck + static const int sequenceMethodCheckInstructionSpace = 40; + static const int sequenceMethodCheckConstantSpace = 6; + // sequenceGetByIdHotPath + static const int sequenceGetByIdHotPathInstructionSpace = 36; + static const int sequenceGetByIdHotPathConstantSpace = 4; + // sequenceGetByIdSlowCase + static const int sequenceGetByIdSlowCaseInstructionSpace = 56; + static const int sequenceGetByIdSlowCaseConstantSpace = 3; + // sequencePutById + static const int sequencePutByIdInstructionSpace = 36; + static const int sequencePutByIdConstantSpace = 4; +#elif CPU(ARM_THUMB2) + // These architecture specific value are used to enable patching - see comment on op_put_by_id. + static const int patchOffsetPutByIdStructure = 10; + static const int patchOffsetPutByIdPropertyMapOffset1 = 36; + static const int patchOffsetPutByIdPropertyMapOffset2 = 48; + // These architecture specific value are used to enable patching - see comment on op_get_by_id. + static const int patchOffsetGetByIdStructure = 10; + static const int patchOffsetGetByIdBranchToSlowCase = 26; + static const int patchOffsetGetByIdPropertyMapOffset1 = 28; + static const int patchOffsetGetByIdPropertyMapOffset2 = 30; + static const int patchOffsetGetByIdPutResult = 32; +#if ENABLE(OPCODE_SAMPLING) + #error "OPCODE_SAMPLING is not yet supported" +#else + static const int patchOffsetGetByIdSlowCaseCall = 40; +#endif + static const int patchOffsetOpCallCompareToJump = 16; + + static const int patchOffsetMethodCheckProtoObj = 24; + static const int patchOffsetMethodCheckProtoStruct = 34; + static const int patchOffsetMethodCheckPutFunction = 58; + + // sequenceOpCall + static const int sequenceOpCallInstructionSpace = 12; + static const int sequenceOpCallConstantSpace = 2; + // sequenceMethodCheck + static const int sequenceMethodCheckInstructionSpace = 40; + static const int sequenceMethodCheckConstantSpace = 6; + // sequenceGetByIdHotPath + static const int sequenceGetByIdHotPathInstructionSpace = 36; + static const int sequenceGetByIdHotPathConstantSpace = 4; + // sequenceGetByIdSlowCase + static const int sequenceGetByIdSlowCaseInstructionSpace = 40; + static const int sequenceGetByIdSlowCaseConstantSpace = 2; + // sequencePutById + static const int sequencePutByIdInstructionSpace = 36; + static const int sequencePutByIdConstantSpace = 4; +#elif CPU(MIPS) +#if WTF_MIPS_ISA(1) + static const int patchOffsetPutByIdStructure = 16; + static const int patchOffsetPutByIdPropertyMapOffset1 = 56; + static const int patchOffsetPutByIdPropertyMapOffset2 = 72; + static const int patchOffsetGetByIdStructure = 16; + static const int patchOffsetGetByIdBranchToSlowCase = 48; + static const int patchOffsetGetByIdPropertyMapOffset1 = 56; + static const int patchOffsetGetByIdPropertyMapOffset2 = 76; + static const int patchOffsetGetByIdPutResult = 96; +#if ENABLE(OPCODE_SAMPLING) + #error "OPCODE_SAMPLING is not yet supported" +#else + static const int patchOffsetGetByIdSlowCaseCall = 56; +#endif + static const int patchOffsetOpCallCompareToJump = 32; + static const int patchOffsetMethodCheckProtoObj = 32; + static const int patchOffsetMethodCheckProtoStruct = 56; + static const int patchOffsetMethodCheckPutFunction = 88; +#else // WTF_MIPS_ISA(1) + static const int patchOffsetPutByIdStructure = 12; + static const int patchOffsetPutByIdPropertyMapOffset1 = 48; + static const int patchOffsetPutByIdPropertyMapOffset2 = 64; + static const int patchOffsetGetByIdStructure = 12; + static const int patchOffsetGetByIdBranchToSlowCase = 44; + static const int patchOffsetGetByIdPropertyMapOffset1 = 48; + static const int patchOffsetGetByIdPropertyMapOffset2 = 64; + static const int patchOffsetGetByIdPutResult = 80; +#if ENABLE(OPCODE_SAMPLING) + #error "OPCODE_SAMPLING is not yet supported" +#else + static const int patchOffsetGetByIdSlowCaseCall = 56; +#endif + static const int patchOffsetOpCallCompareToJump = 32; + static const int patchOffsetMethodCheckProtoObj = 32; + static const int patchOffsetMethodCheckProtoStruct = 52; + static const int patchOffsetMethodCheckPutFunction = 84; +#endif +#elif CPU(SH4) + // These architecture specific value are used to enable patching - see comment on op_put_by_id. + static const int patchOffsetGetByIdStructure = 6; + static const int patchOffsetPutByIdPropertyMapOffset = 24; + static const int patchOffsetPutByIdStructure = 6; + // These architecture specific value are used to enable patching - see comment on op_get_by_id. + static const int patchOffsetGetByIdBranchToSlowCase = 10; + static const int patchOffsetGetByIdPropertyMapOffset = 24; + static const int patchOffsetGetByIdPutResult = 24; + + // sequenceOpCall + static const int sequenceOpCallInstructionSpace = 12; + static const int sequenceOpCallConstantSpace = 2; + // sequenceMethodCheck + static const int sequenceMethodCheckInstructionSpace = 40; + static const int sequenceMethodCheckConstantSpace = 6; + // sequenceGetByIdHotPath + static const int sequenceGetByIdHotPathInstructionSpace = 36; + static const int sequenceGetByIdHotPathConstantSpace = 5; + // sequenceGetByIdSlowCase + static const int sequenceGetByIdSlowCaseInstructionSpace = 30; + static const int sequenceGetByIdSlowCaseConstantSpace = 3; + // sequencePutById + static const int sequencePutByIdInstructionSpace = 36; + static const int sequencePutByIdConstantSpace = 5; + + static const int patchOffsetGetByIdPropertyMapOffset1 = 20; + static const int patchOffsetGetByIdPropertyMapOffset2 = 22; + + static const int patchOffsetPutByIdPropertyMapOffset1 = 20; + static const int patchOffsetPutByIdPropertyMapOffset2 = 26; + +#if ENABLE(OPCODE_SAMPLING) + static const int patchOffsetGetByIdSlowCaseCall = 0; // FIMXE +#else + static const int patchOffsetGetByIdSlowCaseCall = 26; +#endif + static const int patchOffsetOpCallCompareToJump = 4; + + static const int patchOffsetMethodCheckProtoObj = 12; + static const int patchOffsetMethodCheckProtoStruct = 20; + static const int patchOffsetMethodCheckPutFunction = 32; +#else +#error "JSVALUE32_64 not supported on this platform." +#endif + +#else // USE(JSVALUE32_64) + void emitGetVirtualRegister(int src, RegisterID dst); + void emitGetVirtualRegisters(int src1, RegisterID dst1, int src2, RegisterID dst2); + void emitPutVirtualRegister(unsigned dst, RegisterID from = regT0); + void emitStoreCell(unsigned dst, RegisterID payload, bool /* only used in JSValue32_64 */ = false) + { + emitPutVirtualRegister(dst, payload); + } + + int32_t getConstantOperandImmediateInt(unsigned src); + + void killLastResultRegister(); + + Jump emitJumpIfJSCell(RegisterID); + Jump emitJumpIfBothJSCells(RegisterID, RegisterID, RegisterID); + void emitJumpSlowCaseIfJSCell(RegisterID); + Jump emitJumpIfNotJSCell(RegisterID); + void emitJumpSlowCaseIfNotJSCell(RegisterID); + void emitJumpSlowCaseIfNotJSCell(RegisterID, int VReg); +#if USE(JSVALUE32_64) + JIT::Jump emitJumpIfImmediateNumber(RegisterID reg) + { + return emitJumpIfImmediateInteger(reg); + } + + JIT::Jump emitJumpIfNotImmediateNumber(RegisterID reg) + { + return emitJumpIfNotImmediateInteger(reg); + } +#endif + Jump emitJumpIfImmediateInteger(RegisterID); + Jump emitJumpIfNotImmediateInteger(RegisterID); + Jump emitJumpIfNotImmediateIntegers(RegisterID, RegisterID, RegisterID); + void emitJumpSlowCaseIfNotImmediateInteger(RegisterID); + void emitJumpSlowCaseIfNotImmediateNumber(RegisterID); + void emitJumpSlowCaseIfNotImmediateIntegers(RegisterID, RegisterID, RegisterID); + +#if USE(JSVALUE32_64) + void emitFastArithDeTagImmediate(RegisterID); + Jump emitFastArithDeTagImmediateJumpIfZero(RegisterID); +#endif + void emitFastArithReTagImmediate(RegisterID src, RegisterID dest); + void emitFastArithIntToImmNoCheck(RegisterID src, RegisterID dest); + + void emitTagAsBoolImmediate(RegisterID reg); + void compileBinaryArithOp(OpcodeID, unsigned dst, unsigned src1, unsigned src2, OperandTypes opi); +#if USE(JSVALUE64) + void compileBinaryArithOpSlowCase(OpcodeID, Vector::iterator&, unsigned dst, unsigned src1, unsigned src2, OperandTypes, bool op1HasImmediateIntFastCase, bool op2HasImmediateIntFastCase); +#else + void compileBinaryArithOpSlowCase(OpcodeID, Vector::iterator&, unsigned dst, unsigned src1, unsigned src2, OperandTypes); +#endif + + void compileGetByIdHotPath(int baseVReg, Identifier*); + void compileGetByIdSlowCase(int resultVReg, int baseVReg, Identifier* ident, Vector::iterator& iter, bool isMethodCheck = false); + void compileGetDirectOffset(RegisterID base, RegisterID result, size_t cachedOffset); + void compileGetDirectOffset(JSObject* base, RegisterID result, size_t cachedOffset); + void compileGetDirectOffset(RegisterID base, RegisterID result, RegisterID offset, RegisterID scratch); + void compilePutDirectOffset(RegisterID base, RegisterID value, size_t cachedOffset); + +#if CPU(X86_64) + // These architecture specific value are used to enable patching - see comment on op_put_by_id. + static const int patchOffsetPutByIdStructure = 10; + static const int patchOffsetPutByIdPropertyMapOffset = 31; + // These architecture specific value are used to enable patching - see comment on op_get_by_id. + static const int patchOffsetGetByIdStructure = 10; + static const int patchOffsetGetByIdBranchToSlowCase = 20; + static const int patchOffsetGetByIdPropertyMapOffset = 28; + static const int patchOffsetGetByIdPutResult = 28; +#if ENABLE(OPCODE_SAMPLING) + static const int patchOffsetGetByIdSlowCaseCall = 64; +#else + static const int patchOffsetGetByIdSlowCaseCall = 54; +#endif + static const int patchOffsetOpCallCompareToJump = 9; + + static const int patchOffsetMethodCheckProtoObj = 20; + static const int patchOffsetMethodCheckProtoStruct = 30; + static const int patchOffsetMethodCheckPutFunction = 50; +#elif CPU(X86) + // These architecture specific value are used to enable patching - see comment on op_put_by_id. + static const int patchOffsetPutByIdStructure = 7; + static const int patchOffsetPutByIdPropertyMapOffset = 22; + // These architecture specific value are used to enable patching - see comment on op_get_by_id. + static const int patchOffsetGetByIdStructure = 7; + static const int patchOffsetGetByIdBranchToSlowCase = 13; + static const int patchOffsetGetByIdPropertyMapOffset = 22; + static const int patchOffsetGetByIdPutResult = 22; +#if ENABLE(OPCODE_SAMPLING) + static const int patchOffsetGetByIdSlowCaseCall = 33; +#else + static const int patchOffsetGetByIdSlowCaseCall = 23; +#endif + static const int patchOffsetOpCallCompareToJump = 6; + + static const int patchOffsetMethodCheckProtoObj = 11; + static const int patchOffsetMethodCheckProtoStruct = 18; + static const int patchOffsetMethodCheckPutFunction = 29; +#elif CPU(ARM_THUMB2) + // These architecture specific value are used to enable patching - see comment on op_put_by_id. + static const int patchOffsetPutByIdStructure = 10; + static const int patchOffsetPutByIdPropertyMapOffset = 46; + // These architecture specific value are used to enable patching - see comment on op_get_by_id. + static const int patchOffsetGetByIdStructure = 10; + static const int patchOffsetGetByIdBranchToSlowCase = 26; + static const int patchOffsetGetByIdPropertyMapOffset = 46; + static const int patchOffsetGetByIdPutResult = 50; +#if ENABLE(OPCODE_SAMPLING) + static const int patchOffsetGetByIdSlowCaseCall = 0; // FIMXE +#else + static const int patchOffsetGetByIdSlowCaseCall = 28; +#endif + static const int patchOffsetOpCallCompareToJump = 16; + + static const int patchOffsetMethodCheckProtoObj = 24; + static const int patchOffsetMethodCheckProtoStruct = 34; + static const int patchOffsetMethodCheckPutFunction = 58; +#elif CPU(ARM_TRADITIONAL) + // These architecture specific value are used to enable patching - see comment on op_put_by_id. + static const int patchOffsetPutByIdStructure = 4; + static const int patchOffsetPutByIdPropertyMapOffset = 20; + // These architecture specific value are used to enable patching - see comment on op_get_by_id. + static const int patchOffsetGetByIdStructure = 4; + static const int patchOffsetGetByIdBranchToSlowCase = 16; + static const int patchOffsetGetByIdPropertyMapOffset = 20; + static const int patchOffsetGetByIdPutResult = 28; +#if ENABLE(OPCODE_SAMPLING) + #error "OPCODE_SAMPLING is not yet supported" +#else + static const int patchOffsetGetByIdSlowCaseCall = 28; +#endif + static const int patchOffsetOpCallCompareToJump = 12; + + static const int patchOffsetMethodCheckProtoObj = 12; + static const int patchOffsetMethodCheckProtoStruct = 20; + static const int patchOffsetMethodCheckPutFunction = 32; + + // sequenceOpCall + static const int sequenceOpCallInstructionSpace = 12; + static const int sequenceOpCallConstantSpace = 2; + // sequenceMethodCheck + static const int sequenceMethodCheckInstructionSpace = 40; + static const int sequenceMethodCheckConstantSpace = 6; + // sequenceGetByIdHotPath + static const int sequenceGetByIdHotPathInstructionSpace = 28; + static const int sequenceGetByIdHotPathConstantSpace = 3; + // sequenceGetByIdSlowCase + static const int sequenceGetByIdSlowCaseInstructionSpace = 32; + static const int sequenceGetByIdSlowCaseConstantSpace = 2; + // sequencePutById + static const int sequencePutByIdInstructionSpace = 28; + static const int sequencePutByIdConstantSpace = 3; +#elif CPU(MIPS) +#if WTF_MIPS_ISA(1) + static const int patchOffsetPutByIdStructure = 16; + static const int patchOffsetPutByIdPropertyMapOffset = 68; + static const int patchOffsetGetByIdStructure = 16; + static const int patchOffsetGetByIdBranchToSlowCase = 48; + static const int patchOffsetGetByIdPropertyMapOffset = 68; + static const int patchOffsetGetByIdPutResult = 88; +#if ENABLE(OPCODE_SAMPLING) + #error "OPCODE_SAMPLING is not yet supported" +#else + static const int patchOffsetGetByIdSlowCaseCall = 40; +#endif + static const int patchOffsetOpCallCompareToJump = 32; + static const int patchOffsetMethodCheckProtoObj = 32; + static const int patchOffsetMethodCheckProtoStruct = 56; + static const int patchOffsetMethodCheckPutFunction = 88; +#else // WTF_MIPS_ISA(1) + static const int patchOffsetPutByIdStructure = 12; + static const int patchOffsetPutByIdPropertyMapOffset = 60; + static const int patchOffsetGetByIdStructure = 12; + static const int patchOffsetGetByIdBranchToSlowCase = 44; + static const int patchOffsetGetByIdPropertyMapOffset = 60; + static const int patchOffsetGetByIdPutResult = 76; +#if ENABLE(OPCODE_SAMPLING) + #error "OPCODE_SAMPLING is not yet supported" +#else + static const int patchOffsetGetByIdSlowCaseCall = 40; +#endif + static const int patchOffsetOpCallCompareToJump = 32; + static const int patchOffsetMethodCheckProtoObj = 32; + static const int patchOffsetMethodCheckProtoStruct = 52; + static const int patchOffsetMethodCheckPutFunction = 84; +#endif +#endif +#endif // USE(JSVALUE32_64) + +#if (defined(ASSEMBLER_HAS_CONSTANT_POOL) && ASSEMBLER_HAS_CONSTANT_POOL) +#define BEGIN_UNINTERRUPTED_SEQUENCE(name) do { beginUninterruptedSequence(name ## InstructionSpace, name ## ConstantSpace); } while (false) +#define END_UNINTERRUPTED_SEQUENCE_FOR_PUT(name, dst) do { endUninterruptedSequence(name ## InstructionSpace, name ## ConstantSpace, dst); } while (false) +#define END_UNINTERRUPTED_SEQUENCE(name) END_UNINTERRUPTED_SEQUENCE_FOR_PUT(name, 0) + + void beginUninterruptedSequence(int, int); + void endUninterruptedSequence(int, int, int); + +#else +#define BEGIN_UNINTERRUPTED_SEQUENCE(name) do { beginUninterruptedSequence(); } while (false) +#define END_UNINTERRUPTED_SEQUENCE(name) do { endUninterruptedSequence(); } while (false) +#define END_UNINTERRUPTED_SEQUENCE_FOR_PUT(name, dst) do { endUninterruptedSequence(); } while (false) +#endif + + void emit_compareAndJump(OpcodeID, unsigned op1, unsigned op2, unsigned target, RelationalCondition); + void emit_compareAndJumpSlow(unsigned op1, unsigned op2, unsigned target, DoubleCondition, int (JIT_STUB *stub)(STUB_ARGS_DECLARATION), bool invert, Vector::iterator&); + + void emit_op_add(Instruction*); + void emit_op_bitand(Instruction*); + void emit_op_bitnot(Instruction*); + void emit_op_bitor(Instruction*); + void emit_op_bitxor(Instruction*); + void emit_op_call(Instruction*); + void emit_op_call_eval(Instruction*); + void emit_op_call_varargs(Instruction*); + void emit_op_call_put_result(Instruction*); + void emit_op_catch(Instruction*); + void emit_op_construct(Instruction*); + void emit_op_get_callee(Instruction*); + void emit_op_create_this(Instruction*); + void emit_op_convert_this(Instruction*); + void emit_op_create_arguments(Instruction*); + void emit_op_debug(Instruction*); + void emit_op_del_by_id(Instruction*); + void emit_op_div(Instruction*); + void emit_op_end(Instruction*); + void emit_op_enter(Instruction*); + void emit_op_create_activation(Instruction*); + void emit_op_eq(Instruction*); + void emit_op_eq_null(Instruction*); + void emit_op_get_by_id(Instruction*); + void emit_op_get_arguments_length(Instruction*); + void emit_op_get_by_val(Instruction*); + void emit_op_get_argument_by_val(Instruction*); + void emit_op_get_by_pname(Instruction*); + void emit_op_get_global_var(Instruction*); + void emit_op_get_scoped_var(Instruction*); + void emit_op_init_lazy_reg(Instruction*); + void emit_op_check_has_instance(Instruction*); + void emit_op_instanceof(Instruction*); + void emit_op_jeq_null(Instruction*); + void emit_op_jfalse(Instruction*); + void emit_op_jmp(Instruction*); + void emit_op_jmp_scopes(Instruction*); + void emit_op_jneq_null(Instruction*); + void emit_op_jneq_ptr(Instruction*); + void emit_op_jless(Instruction*); + void emit_op_jlesseq(Instruction*); + void emit_op_jgreater(Instruction*); + void emit_op_jgreatereq(Instruction*); + void emit_op_jnless(Instruction*); + void emit_op_jnlesseq(Instruction*); + void emit_op_jngreater(Instruction*); + void emit_op_jngreatereq(Instruction*); + void emit_op_jsr(Instruction*); + void emit_op_jtrue(Instruction*); + void emit_op_loop(Instruction*); + void emit_op_loop_hint(Instruction*); + void emit_op_loop_if_less(Instruction*); + void emit_op_loop_if_lesseq(Instruction*); + void emit_op_loop_if_greater(Instruction*); + void emit_op_loop_if_greatereq(Instruction*); + void emit_op_loop_if_true(Instruction*); + void emit_op_loop_if_false(Instruction*); + void emit_op_lshift(Instruction*); + void emit_op_method_check(Instruction*); + void emit_op_mod(Instruction*); + void emit_op_mov(Instruction*); + void emit_op_mul(Instruction*); + void emit_op_negate(Instruction*); + void emit_op_neq(Instruction*); + void emit_op_neq_null(Instruction*); + void emit_op_new_array(Instruction*); + void emit_op_new_array_buffer(Instruction*); + void emit_op_new_func(Instruction*); + void emit_op_new_func_exp(Instruction*); + void emit_op_new_object(Instruction*); + void emit_op_new_regexp(Instruction*); + void emit_op_get_pnames(Instruction*); + void emit_op_next_pname(Instruction*); + void emit_op_not(Instruction*); + void emit_op_nstricteq(Instruction*); + void emit_op_pop_scope(Instruction*); + void emit_op_post_dec(Instruction*); + void emit_op_post_inc(Instruction*); + void emit_op_pre_dec(Instruction*); + void emit_op_pre_inc(Instruction*); + void emit_op_profile_did_call(Instruction*); + void emit_op_profile_will_call(Instruction*); + void emit_op_push_new_scope(Instruction*); + void emit_op_push_scope(Instruction*); + void emit_op_put_by_id(Instruction*); + void emit_op_put_by_index(Instruction*); + void emit_op_put_by_val(Instruction*); + void emit_op_put_getter(Instruction*); + void emit_op_put_global_var(Instruction*); + void emit_op_put_scoped_var(Instruction*); + void emit_op_put_setter(Instruction*); + void emit_op_resolve(Instruction*); + void emit_op_resolve_base(Instruction*); + void emit_op_ensure_property_exists(Instruction*); + void emit_op_resolve_global(Instruction*, bool dynamic = false); + void emit_op_resolve_global_dynamic(Instruction*); + void emit_op_resolve_skip(Instruction*); + void emit_op_resolve_with_base(Instruction*); + void emit_op_resolve_with_this(Instruction*); + void emit_op_ret(Instruction*); + void emit_op_ret_object_or_this(Instruction*); + void emit_op_rshift(Instruction*); + void emit_op_sret(Instruction*); + void emit_op_strcat(Instruction*); + void emit_op_stricteq(Instruction*); + void emit_op_sub(Instruction*); + void emit_op_switch_char(Instruction*); + void emit_op_switch_imm(Instruction*); + void emit_op_switch_string(Instruction*); + void emit_op_tear_off_activation(Instruction*); + void emit_op_tear_off_arguments(Instruction*); + void emit_op_throw(Instruction*); + void emit_op_throw_reference_error(Instruction*); + void emit_op_to_jsnumber(Instruction*); + void emit_op_to_primitive(Instruction*); + void emit_op_unexpected_load(Instruction*); + void emit_op_urshift(Instruction*); +#if ENABLE(JIT_USE_SOFT_MODULO) + void softModulo(); +#endif + + void emitSlow_op_add(Instruction*, Vector::iterator&); + void emitSlow_op_bitand(Instruction*, Vector::iterator&); + void emitSlow_op_bitnot(Instruction*, Vector::iterator&); + void emitSlow_op_bitor(Instruction*, Vector::iterator&); + void emitSlow_op_bitxor(Instruction*, Vector::iterator&); + void emitSlow_op_call(Instruction*, Vector::iterator&); + void emitSlow_op_call_eval(Instruction*, Vector::iterator&); + void emitSlow_op_call_varargs(Instruction*, Vector::iterator&); + void emitSlow_op_construct(Instruction*, Vector::iterator&); + void emitSlow_op_convert_this(Instruction*, Vector::iterator&); + void emitSlow_op_create_this(Instruction*, Vector::iterator&); + void emitSlow_op_div(Instruction*, Vector::iterator&); + void emitSlow_op_eq(Instruction*, Vector::iterator&); + void emitSlow_op_get_by_id(Instruction*, Vector::iterator&); + void emitSlow_op_get_arguments_length(Instruction*, Vector::iterator&); + void emitSlow_op_get_by_val(Instruction*, Vector::iterator&); + void emitSlow_op_get_argument_by_val(Instruction*, Vector::iterator&); + void emitSlow_op_get_by_pname(Instruction*, Vector::iterator&); + void emitSlow_op_check_has_instance(Instruction*, Vector::iterator&); + void emitSlow_op_instanceof(Instruction*, Vector::iterator&); + void emitSlow_op_jfalse(Instruction*, Vector::iterator&); + void emitSlow_op_jless(Instruction*, Vector::iterator&); + void emitSlow_op_jlesseq(Instruction*, Vector::iterator&); + void emitSlow_op_jgreater(Instruction*, Vector::iterator&); + void emitSlow_op_jgreatereq(Instruction*, Vector::iterator&); + void emitSlow_op_jnless(Instruction*, Vector::iterator&); + void emitSlow_op_jnlesseq(Instruction*, Vector::iterator&); + void emitSlow_op_jngreater(Instruction*, Vector::iterator&); + void emitSlow_op_jngreatereq(Instruction*, Vector::iterator&); + void emitSlow_op_jtrue(Instruction*, Vector::iterator&); + void emitSlow_op_loop_if_less(Instruction*, Vector::iterator&); + void emitSlow_op_loop_if_lesseq(Instruction*, Vector::iterator&); + void emitSlow_op_loop_if_greater(Instruction*, Vector::iterator&); + void emitSlow_op_loop_if_greatereq(Instruction*, Vector::iterator&); + void emitSlow_op_loop_if_true(Instruction*, Vector::iterator&); + void emitSlow_op_loop_if_false(Instruction*, Vector::iterator&); + void emitSlow_op_lshift(Instruction*, Vector::iterator&); + void emitSlow_op_method_check(Instruction*, Vector::iterator&); + void emitSlow_op_mod(Instruction*, Vector::iterator&); + void emitSlow_op_mul(Instruction*, Vector::iterator&); + void emitSlow_op_negate(Instruction*, Vector::iterator&); + void emitSlow_op_neq(Instruction*, Vector::iterator&); + void emitSlow_op_new_object(Instruction*, Vector::iterator&); + void emitSlow_op_not(Instruction*, Vector::iterator&); + void emitSlow_op_nstricteq(Instruction*, Vector::iterator&); + void emitSlow_op_post_dec(Instruction*, Vector::iterator&); + void emitSlow_op_post_inc(Instruction*, Vector::iterator&); + void emitSlow_op_pre_dec(Instruction*, Vector::iterator&); + void emitSlow_op_pre_inc(Instruction*, Vector::iterator&); + void emitSlow_op_put_by_id(Instruction*, Vector::iterator&); + void emitSlow_op_put_by_val(Instruction*, Vector::iterator&); + void emitSlow_op_resolve_global(Instruction*, Vector::iterator&); + void emitSlow_op_resolve_global_dynamic(Instruction*, Vector::iterator&); + void emitSlow_op_rshift(Instruction*, Vector::iterator&); + void emitSlow_op_stricteq(Instruction*, Vector::iterator&); + void emitSlow_op_sub(Instruction*, Vector::iterator&); + void emitSlow_op_to_jsnumber(Instruction*, Vector::iterator&); + void emitSlow_op_to_primitive(Instruction*, Vector::iterator&); + void emitSlow_op_urshift(Instruction*, Vector::iterator&); + void emitSlow_op_new_func(Instruction*, Vector::iterator&); + void emitSlow_op_new_func_exp(Instruction*, Vector::iterator&); + + + void emitRightShift(Instruction*, bool isUnsigned); + void emitRightShiftSlowCase(Instruction*, Vector::iterator&, bool isUnsigned); + + /* This function is deprecated. */ + void emitGetJITStubArg(unsigned argumentNumber, RegisterID dst); + + void emitInitRegister(unsigned dst); + + void emitPutToCallFrameHeader(RegisterID from, RegisterFile::CallFrameHeaderEntry entry); + void emitPutCellToCallFrameHeader(RegisterID from, RegisterFile::CallFrameHeaderEntry); + void emitPutIntToCallFrameHeader(RegisterID from, RegisterFile::CallFrameHeaderEntry); + void emitPutImmediateToCallFrameHeader(void* value, RegisterFile::CallFrameHeaderEntry entry); + void emitGetFromCallFrameHeaderPtr(RegisterFile::CallFrameHeaderEntry entry, RegisterID to, RegisterID from = callFrameRegister); + void emitGetFromCallFrameHeader32(RegisterFile::CallFrameHeaderEntry entry, RegisterID to, RegisterID from = callFrameRegister); + + JSValue getConstantOperand(unsigned src); + bool isOperandConstantImmediateInt(unsigned src); + bool isOperandConstantImmediateChar(unsigned src); + + bool atJumpTarget(); + + Jump getSlowCase(Vector::iterator& iter) + { + return iter++->from; + } + void linkSlowCase(Vector::iterator& iter) + { + iter->from.link(this); + ++iter; + } + void linkDummySlowCase(Vector::iterator& iter) + { + ASSERT(!iter->from.isSet()); + ++iter; + } + void linkSlowCaseIfNotJSCell(Vector::iterator&, int virtualRegisterIndex); + + Jump checkStructure(RegisterID reg, Structure* structure); + + void restoreArgumentReference(); + void restoreArgumentReferenceForTrampoline(); + void updateTopCallFrame(); + + Call emitNakedCall(CodePtr function = CodePtr()); + + void preserveReturnAddressAfterCall(RegisterID); + void restoreReturnAddressBeforeReturn(RegisterID); + void restoreReturnAddressBeforeReturn(Address); + + // Loads the character value of a single character string into dst. + void emitLoadCharacterString(RegisterID src, RegisterID dst, JumpList& failures); + + enum OptimizationCheckKind { LoopOptimizationCheck, RetOptimizationCheck }; +#if ENABLE(DFG_JIT) + void emitOptimizationCheck(OptimizationCheckKind); +#else + void emitOptimizationCheck(OptimizationCheckKind) { } +#endif + + void emitTimeoutCheck(); +#ifndef NDEBUG + void printBytecodeOperandTypes(unsigned src1, unsigned src2); +#endif + +#if ENABLE(SAMPLING_FLAGS) + void setSamplingFlag(int32_t); + void clearSamplingFlag(int32_t); +#endif + +#if ENABLE(SAMPLING_COUNTERS) + void emitCount(AbstractSamplingCounter&, int32_t = 1); +#endif + +#if ENABLE(OPCODE_SAMPLING) + void sampleInstruction(Instruction*, bool = false); +#endif + +#if ENABLE(CODEBLOCK_SAMPLING) + void sampleCodeBlock(CodeBlock*); +#else + void sampleCodeBlock(CodeBlock*) {} +#endif + +#if ENABLE(DFG_JIT) + bool canBeOptimized() { return m_canBeOptimized; } + bool shouldEmitProfiling() { return m_canBeOptimized; } +#else + bool canBeOptimized() { return false; } + // Enables use of value profiler with tiered compilation turned off, + // in which case all code gets profiled. + bool shouldEmitProfiling() { return true; } +#endif + + Interpreter* m_interpreter; + JSGlobalData* m_globalData; + CodeBlock* m_codeBlock; + + Vector m_calls; + Vector