From de4f791e30be4e4239b381c11745ffa4d87ddb8b Mon Sep 17 00:00:00 2001 From: Simon Hausmann Date: Fri, 9 Nov 2012 12:15:52 +0100 Subject: Imported WebKit commit e2c32e2f53e02d388e70b9db88b91d8d9d28fc84 (http://svn.webkit.org/repository/webkit/trunk@133952) Revert back to an older snapshot that should build on ARM --- Source/JavaScriptCore/jit/HostCallReturnValue.cpp | 2 +- Source/JavaScriptCore/jit/JIT.cpp | 2 +- Source/JavaScriptCore/jit/JIT.h | 19 +- Source/JavaScriptCore/jit/JITArithmetic.cpp | 12 +- Source/JavaScriptCore/jit/JITArithmetic32_64.cpp | 2 +- Source/JavaScriptCore/jit/JITCall.cpp | 2 +- Source/JavaScriptCore/jit/JITCall32_64.cpp | 2 +- Source/JavaScriptCore/jit/JITExceptions.cpp | 2 +- Source/JavaScriptCore/jit/JITInlineMethods.h | 1001 +++++++++++++++++++ Source/JavaScriptCore/jit/JITInlines.h | 1013 -------------------- Source/JavaScriptCore/jit/JITOpcodes.cpp | 7 +- Source/JavaScriptCore/jit/JITOpcodes32_64.cpp | 2 +- Source/JavaScriptCore/jit/JITPropertyAccess.cpp | 91 +- .../JavaScriptCore/jit/JITPropertyAccess32_64.cpp | 74 +- Source/JavaScriptCore/jit/JITStubs.cpp | 13 +- Source/JavaScriptCore/jit/JITStubs.h | 2 - 16 files changed, 1035 insertions(+), 1211 deletions(-) create mode 100644 Source/JavaScriptCore/jit/JITInlineMethods.h delete mode 100644 Source/JavaScriptCore/jit/JITInlines.h (limited to 'Source/JavaScriptCore/jit') diff --git a/Source/JavaScriptCore/jit/HostCallReturnValue.cpp b/Source/JavaScriptCore/jit/HostCallReturnValue.cpp index 967c499b9..c4d2e6ad9 100644 --- a/Source/JavaScriptCore/jit/HostCallReturnValue.cpp +++ b/Source/JavaScriptCore/jit/HostCallReturnValue.cpp @@ -29,7 +29,7 @@ #include "CallFrame.h" #include #include "JSObject.h" -#include "JSValueInlines.h" +#include "JSValueInlineMethods.h" namespace JSC { diff --git a/Source/JavaScriptCore/jit/JIT.cpp b/Source/JavaScriptCore/jit/JIT.cpp index ffd18b571..3102c7693 100644 --- a/Source/JavaScriptCore/jit/JIT.cpp +++ b/Source/JavaScriptCore/jit/JIT.cpp @@ -38,7 +38,7 @@ JSC::MacroAssemblerX86Common::SSE2CheckState JSC::MacroAssemblerX86Common::s_sse #include #include "DFGNode.h" // for DFG_SUCCESS_STATS #include "Interpreter.h" -#include "JITInlines.h" +#include "JITInlineMethods.h" #include "JITStubCall.h" #include "JSArray.h" #include "JSFunction.h" diff --git a/Source/JavaScriptCore/jit/JIT.h b/Source/JavaScriptCore/jit/JIT.h index 9b0879fe2..dcf87d352 100644 --- a/Source/JavaScriptCore/jit/JIT.h +++ b/Source/JavaScriptCore/jit/JIT.h @@ -474,9 +474,7 @@ namespace JSC { // Property is int-checked and zero extended. Base is cell checked. // Structure is already profiled. Returns the slow cases. Fall-through // case contains result in regT0, and it is not yet profiled. - JumpList emitInt32GetByVal(Instruction* instruction, PatchableJump& badType) { return emitContiguousGetByVal(instruction, badType, Int32Shape); } - JumpList emitDoubleGetByVal(Instruction*, PatchableJump& badType); - JumpList emitContiguousGetByVal(Instruction*, PatchableJump& badType, IndexingType expectedShape = ContiguousShape); + JumpList emitContiguousGetByVal(Instruction*, PatchableJump& badType); JumpList emitArrayStorageGetByVal(Instruction*, PatchableJump& badType); JumpList emitIntTypedArrayGetByVal(Instruction*, PatchableJump& badType, const TypedArrayDescriptor&, size_t elementSize, TypedArraySignedness); JumpList emitFloatTypedArrayGetByVal(Instruction*, PatchableJump& badType, const TypedArrayDescriptor&, size_t elementSize); @@ -485,20 +483,7 @@ namespace JSC { // The value to store is not yet loaded. Property is int-checked and // zero-extended. Base is cell checked. Structure is already profiled. // returns the slow cases. - JumpList emitInt32PutByVal(Instruction* currentInstruction, PatchableJump& badType) - { - return emitGenericContiguousPutByVal(currentInstruction, badType); - } - JumpList emitDoublePutByVal(Instruction* currentInstruction, PatchableJump& badType) - { - return emitGenericContiguousPutByVal(currentInstruction, badType); - } - JumpList emitContiguousPutByVal(Instruction* currentInstruction, PatchableJump& badType) - { - return emitGenericContiguousPutByVal(currentInstruction, badType); - } - template - JumpList emitGenericContiguousPutByVal(Instruction*, PatchableJump& badType); + JumpList emitContiguousPutByVal(Instruction*, PatchableJump& badType); JumpList emitArrayStoragePutByVal(Instruction*, PatchableJump& badType); JumpList emitIntTypedArrayPutByVal(Instruction*, PatchableJump& badType, const TypedArrayDescriptor&, size_t elementSize, TypedArraySignedness, TypedArrayRounding); JumpList emitFloatTypedArrayPutByVal(Instruction*, PatchableJump& badType, const TypedArrayDescriptor&, size_t elementSize); diff --git a/Source/JavaScriptCore/jit/JITArithmetic.cpp b/Source/JavaScriptCore/jit/JITArithmetic.cpp index bcb3dd74a..21d59bc33 100644 --- a/Source/JavaScriptCore/jit/JITArithmetic.cpp +++ b/Source/JavaScriptCore/jit/JITArithmetic.cpp @@ -29,7 +29,7 @@ #include "JIT.h" #include "CodeBlock.h" -#include "JITInlines.h" +#include "JITInlineMethods.h" #include "JITStubCall.h" #include "JITStubs.h" #include "JSArray.h" @@ -1090,20 +1090,18 @@ void JIT::emit_op_div(Instruction* currentInstruction) // access). So if we are DFG compiling anything in the program, we want this code to // ensure that it produces integers whenever possible. + // FIXME: This will fail to convert to integer if the result is zero. We should + // distinguish between positive zero and negative zero here. + JumpList notInteger; branchConvertDoubleToInt32(fpRegT0, regT0, notInteger, fpRegT1); // If we've got an integer, we might as well make that the result of the division. emitFastArithReTagImmediate(regT0, regT0); Jump isInteger = jump(); notInteger.link(this); - moveDoubleTo64(fpRegT0, regT0); - Jump doubleZero = branchTest64(Zero, regT0); add32(TrustedImm32(1), AbsoluteAddress(&m_codeBlock->addSpecialFastCaseProfile(m_bytecodeOffset)->m_counter)); + moveDoubleTo64(fpRegT0, regT0); sub64(tagTypeNumberRegister, regT0); - Jump trueDouble = jump(); - doubleZero.link(this); - move(tagTypeNumberRegister, regT0); - trueDouble.link(this); isInteger.link(this); #else // Double result. diff --git a/Source/JavaScriptCore/jit/JITArithmetic32_64.cpp b/Source/JavaScriptCore/jit/JITArithmetic32_64.cpp index 960d06091..62a359eeb 100644 --- a/Source/JavaScriptCore/jit/JITArithmetic32_64.cpp +++ b/Source/JavaScriptCore/jit/JITArithmetic32_64.cpp @@ -30,7 +30,7 @@ #include "JIT.h" #include "CodeBlock.h" -#include "JITInlines.h" +#include "JITInlineMethods.h" #include "JITStubCall.h" #include "JITStubs.h" #include "JSArray.h" diff --git a/Source/JavaScriptCore/jit/JITCall.cpp b/Source/JavaScriptCore/jit/JITCall.cpp index 006c5b741..074bf7f97 100644 --- a/Source/JavaScriptCore/jit/JITCall.cpp +++ b/Source/JavaScriptCore/jit/JITCall.cpp @@ -31,7 +31,7 @@ #include "Arguments.h" #include "CodeBlock.h" -#include "JITInlines.h" +#include "JITInlineMethods.h" #include "JITStubCall.h" #include "JSArray.h" #include "JSFunction.h" diff --git a/Source/JavaScriptCore/jit/JITCall32_64.cpp b/Source/JavaScriptCore/jit/JITCall32_64.cpp index ecd5cf126..ad827cdf9 100644 --- a/Source/JavaScriptCore/jit/JITCall32_64.cpp +++ b/Source/JavaScriptCore/jit/JITCall32_64.cpp @@ -32,7 +32,7 @@ #include "Arguments.h" #include "CodeBlock.h" #include "Interpreter.h" -#include "JITInlines.h" +#include "JITInlineMethods.h" #include "JITStubCall.h" #include "JSArray.h" #include "JSFunction.h" diff --git a/Source/JavaScriptCore/jit/JITExceptions.cpp b/Source/JavaScriptCore/jit/JITExceptions.cpp index aeb869474..f6cec24bd 100644 --- a/Source/JavaScriptCore/jit/JITExceptions.cpp +++ b/Source/JavaScriptCore/jit/JITExceptions.cpp @@ -39,7 +39,7 @@ namespace JSC { ExceptionHandler genericThrow(JSGlobalData* globalData, ExecState* callFrame, JSValue exceptionValue, unsigned vPCIndex) { ASSERT(exceptionValue); - + globalData->exception = JSValue(); HandlerInfo* handler = globalData->interpreter->throwException(callFrame, exceptionValue, vPCIndex); // This may update callFrame & exceptionValue! globalData->exception = exceptionValue; diff --git a/Source/JavaScriptCore/jit/JITInlineMethods.h b/Source/JavaScriptCore/jit/JITInlineMethods.h new file mode 100644 index 000000000..410bdf710 --- /dev/null +++ b/Source/JavaScriptCore/jit/JITInlineMethods.h @@ -0,0 +1,1001 @@ +/* + * Copyright (C) 2008, 2012 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef JITInlineMethods_h +#define JITInlineMethods_h + + +#if ENABLE(JIT) + +namespace JSC { + +ALWAYS_INLINE bool JIT::isOperandConstantImmediateDouble(unsigned src) +{ + return m_codeBlock->isConstantRegisterIndex(src) && getConstantOperand(src).isDouble(); +} + +ALWAYS_INLINE JSValue JIT::getConstantOperand(unsigned src) +{ + ASSERT(m_codeBlock->isConstantRegisterIndex(src)); + return m_codeBlock->getConstant(src); +} + +ALWAYS_INLINE void JIT::emitPutCellToCallFrameHeader(RegisterID from, JSStack::CallFrameHeaderEntry entry) +{ +#if USE(JSVALUE32_64) + store32(TrustedImm32(JSValue::CellTag), tagFor(entry, callFrameRegister)); + store32(from, payloadFor(entry, callFrameRegister)); +#else + store64(from, addressFor(entry, callFrameRegister)); +#endif +} + +ALWAYS_INLINE void JIT::emitPutIntToCallFrameHeader(RegisterID from, JSStack::CallFrameHeaderEntry entry) +{ +#if USE(JSVALUE32_64) + store32(TrustedImm32(Int32Tag), intTagFor(entry, callFrameRegister)); + store32(from, intPayloadFor(entry, callFrameRegister)); +#else + store64(from, addressFor(entry, callFrameRegister)); +#endif +} + +ALWAYS_INLINE void JIT::emitPutToCallFrameHeader(RegisterID from, JSStack::CallFrameHeaderEntry entry) +{ +#if USE(JSVALUE32_64) + storePtr(from, payloadFor(entry, callFrameRegister)); +#else + store64(from, addressFor(entry, callFrameRegister)); +#endif +} + +ALWAYS_INLINE void JIT::emitPutImmediateToCallFrameHeader(void* value, JSStack::CallFrameHeaderEntry entry) +{ + storePtr(TrustedImmPtr(value), Address(callFrameRegister, entry * sizeof(Register))); +} + +ALWAYS_INLINE void JIT::emitGetFromCallFrameHeaderPtr(JSStack::CallFrameHeaderEntry entry, RegisterID to, RegisterID from) +{ + loadPtr(Address(from, entry * sizeof(Register)), to); +#if USE(JSVALUE64) + killLastResultRegister(); +#endif +} + +ALWAYS_INLINE void JIT::emitGetFromCallFrameHeader32(JSStack::CallFrameHeaderEntry entry, RegisterID to, RegisterID from) +{ + load32(Address(from, entry * sizeof(Register)), to); +#if USE(JSVALUE64) + killLastResultRegister(); +#endif +} + +#if USE(JSVALUE64) +ALWAYS_INLINE void JIT::emitGetFromCallFrameHeader64(JSStack::CallFrameHeaderEntry entry, RegisterID to, RegisterID from) +{ + load64(Address(from, entry * sizeof(Register)), to); + killLastResultRegister(); +} +#endif + +ALWAYS_INLINE void JIT::emitLoadCharacterString(RegisterID src, RegisterID dst, JumpList& failures) +{ + failures.append(branchPtr(NotEqual, Address(src, JSCell::structureOffset()), TrustedImmPtr(m_globalData->stringStructure.get()))); + failures.append(branch32(NotEqual, MacroAssembler::Address(src, ThunkHelpers::jsStringLengthOffset()), TrustedImm32(1))); + loadPtr(MacroAssembler::Address(src, ThunkHelpers::jsStringValueOffset()), dst); + failures.append(branchTest32(Zero, dst)); + loadPtr(MacroAssembler::Address(dst, ThunkHelpers::stringImplFlagsOffset()), regT1); + loadPtr(MacroAssembler::Address(dst, ThunkHelpers::stringImplDataOffset()), dst); + + JumpList is16Bit; + JumpList cont8Bit; + is16Bit.append(branchTest32(Zero, regT1, TrustedImm32(ThunkHelpers::stringImpl8BitFlag()))); + load8(MacroAssembler::Address(dst, 0), dst); + cont8Bit.append(jump()); + is16Bit.link(this); + load16(MacroAssembler::Address(dst, 0), dst); + cont8Bit.link(this); +} + +ALWAYS_INLINE JIT::Call JIT::emitNakedCall(CodePtr function) +{ + ASSERT(m_bytecodeOffset != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set. + + Call nakedCall = nearCall(); + m_calls.append(CallRecord(nakedCall, m_bytecodeOffset, function.executableAddress())); + return nakedCall; +} + +ALWAYS_INLINE bool JIT::atJumpTarget() +{ + while (m_jumpTargetsPosition < m_codeBlock->numberOfJumpTargets() && m_codeBlock->jumpTarget(m_jumpTargetsPosition) <= m_bytecodeOffset) { + if (m_codeBlock->jumpTarget(m_jumpTargetsPosition) == m_bytecodeOffset) + return true; + ++m_jumpTargetsPosition; + } + return false; +} + +#if defined(ASSEMBLER_HAS_CONSTANT_POOL) && ASSEMBLER_HAS_CONSTANT_POOL + +ALWAYS_INLINE void JIT::beginUninterruptedSequence(int insnSpace, int constSpace) +{ +#if CPU(ARM_TRADITIONAL) +#ifndef NDEBUG + // Ensure the label after the sequence can also fit + insnSpace += sizeof(ARMWord); + constSpace += sizeof(uint64_t); +#endif + + ensureSpace(insnSpace, constSpace); + +#elif CPU(SH4) +#ifndef NDEBUG + insnSpace += sizeof(SH4Word); + constSpace += sizeof(uint64_t); +#endif + + m_assembler.ensureSpace(insnSpace + m_assembler.maxInstructionSize + 2, constSpace + 8); +#endif + +#if defined(ASSEMBLER_HAS_CONSTANT_POOL) && ASSEMBLER_HAS_CONSTANT_POOL +#ifndef NDEBUG + m_uninterruptedInstructionSequenceBegin = label(); + m_uninterruptedConstantSequenceBegin = sizeOfConstantPool(); +#endif +#endif +} + +ALWAYS_INLINE void JIT::endUninterruptedSequence(int insnSpace, int constSpace, int dst) +{ + UNUSED_PARAM(dst); +#if defined(ASSEMBLER_HAS_CONSTANT_POOL) && ASSEMBLER_HAS_CONSTANT_POOL + /* There are several cases when the uninterrupted sequence is larger than + * maximum required offset for pathing the same sequence. Eg.: if in a + * uninterrupted sequence the last macroassembler's instruction is a stub + * call, it emits store instruction(s) which should not be included in the + * calculation of length of uninterrupted sequence. So, the insnSpace and + * constSpace should be upper limit instead of hard limit. + */ +#if CPU(SH4) + if ((dst > 15) || (dst < -16)) { + insnSpace += 8; + constSpace += 2; + } + + if (((dst >= -16) && (dst < 0)) || ((dst > 7) && (dst <= 15))) + insnSpace += 8; +#endif + ASSERT(differenceBetween(m_uninterruptedInstructionSequenceBegin, label()) <= insnSpace); + ASSERT(sizeOfConstantPool() - m_uninterruptedConstantSequenceBegin <= constSpace); +#endif +} + +#endif + +#if CPU(ARM) + +ALWAYS_INLINE void JIT::preserveReturnAddressAfterCall(RegisterID reg) +{ + move(linkRegister, reg); +} + +ALWAYS_INLINE void JIT::restoreReturnAddressBeforeReturn(RegisterID reg) +{ + move(reg, linkRegister); +} + +ALWAYS_INLINE void JIT::restoreReturnAddressBeforeReturn(Address address) +{ + loadPtr(address, linkRegister); +} +#elif CPU(SH4) + +ALWAYS_INLINE void JIT::preserveReturnAddressAfterCall(RegisterID reg) +{ + m_assembler.stspr(reg); +} + +ALWAYS_INLINE void JIT::restoreReturnAddressBeforeReturn(RegisterID reg) +{ + m_assembler.ldspr(reg); +} + +ALWAYS_INLINE void JIT::restoreReturnAddressBeforeReturn(Address address) +{ + loadPtrLinkReg(address); +} + +#elif CPU(MIPS) + +ALWAYS_INLINE void JIT::preserveReturnAddressAfterCall(RegisterID reg) +{ + move(returnAddressRegister, reg); +} + +ALWAYS_INLINE void JIT::restoreReturnAddressBeforeReturn(RegisterID reg) +{ + move(reg, returnAddressRegister); +} + +ALWAYS_INLINE void JIT::restoreReturnAddressBeforeReturn(Address address) +{ + loadPtr(address, returnAddressRegister); +} + +#else // CPU(X86) || CPU(X86_64) + +ALWAYS_INLINE void JIT::preserveReturnAddressAfterCall(RegisterID reg) +{ + pop(reg); +} + +ALWAYS_INLINE void JIT::restoreReturnAddressBeforeReturn(RegisterID reg) +{ + push(reg); +} + +ALWAYS_INLINE void JIT::restoreReturnAddressBeforeReturn(Address address) +{ + push(address); +} + +#endif + +ALWAYS_INLINE void JIT::restoreArgumentReference() +{ + move(stackPointerRegister, firstArgumentRegister); + poke(callFrameRegister, OBJECT_OFFSETOF(struct JITStackFrame, callFrame) / sizeof(void*)); +} + +ALWAYS_INLINE void JIT::updateTopCallFrame() +{ + ASSERT(static_cast(m_bytecodeOffset) >= 0); + if (m_bytecodeOffset) { +#if USE(JSVALUE32_64) + storePtr(TrustedImmPtr(m_codeBlock->instructions().begin() + m_bytecodeOffset + 1), intTagFor(JSStack::ArgumentCount)); +#else + store32(TrustedImm32(m_bytecodeOffset + 1), intTagFor(JSStack::ArgumentCount)); +#endif + } + storePtr(callFrameRegister, &m_globalData->topCallFrame); +} + +ALWAYS_INLINE void JIT::restoreArgumentReferenceForTrampoline() +{ +#if CPU(X86) + // Within a trampoline the return address will be on the stack at this point. + addPtr(TrustedImm32(sizeof(void*)), stackPointerRegister, firstArgumentRegister); +#elif CPU(ARM) + move(stackPointerRegister, firstArgumentRegister); +#elif CPU(SH4) + move(stackPointerRegister, firstArgumentRegister); +#endif + // In the trampoline on x86-64, the first argument register is not overwritten. +} + +ALWAYS_INLINE JIT::Jump JIT::checkStructure(RegisterID reg, Structure* structure) +{ + return branchPtr(NotEqual, Address(reg, JSCell::structureOffset()), TrustedImmPtr(structure)); +} + +ALWAYS_INLINE void JIT::linkSlowCaseIfNotJSCell(Vector::iterator& iter, int vReg) +{ + if (!m_codeBlock->isKnownNotImmediate(vReg)) + linkSlowCase(iter); +} + +ALWAYS_INLINE void JIT::addSlowCase(Jump jump) +{ + ASSERT(m_bytecodeOffset != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set. + + m_slowCases.append(SlowCaseEntry(jump, m_bytecodeOffset)); +} + +ALWAYS_INLINE void JIT::addSlowCase(JumpList jumpList) +{ + ASSERT(m_bytecodeOffset != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set. + + const JumpList::JumpVector& jumpVector = jumpList.jumps(); + size_t size = jumpVector.size(); + for (size_t i = 0; i < size; ++i) + m_slowCases.append(SlowCaseEntry(jumpVector[i], m_bytecodeOffset)); +} + +ALWAYS_INLINE void JIT::addSlowCase() +{ + ASSERT(m_bytecodeOffset != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set. + + Jump emptyJump; // Doing it this way to make Windows happy. + m_slowCases.append(SlowCaseEntry(emptyJump, m_bytecodeOffset)); +} + +ALWAYS_INLINE void JIT::addJump(Jump jump, int relativeOffset) +{ + ASSERT(m_bytecodeOffset != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set. + + m_jmpTable.append(JumpTable(jump, m_bytecodeOffset + relativeOffset)); +} + +ALWAYS_INLINE void JIT::emitJumpSlowToHot(Jump jump, int relativeOffset) +{ + ASSERT(m_bytecodeOffset != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set. + + jump.linkTo(m_labels[m_bytecodeOffset + relativeOffset], this); +} + +ALWAYS_INLINE JIT::Jump JIT::emitJumpIfNotObject(RegisterID structureReg) +{ + return branch8(Below, Address(structureReg, Structure::typeInfoTypeOffset()), TrustedImm32(ObjectType)); +} + +ALWAYS_INLINE JIT::Jump JIT::emitJumpIfNotType(RegisterID baseReg, RegisterID scratchReg, JSType type) +{ + loadPtr(Address(baseReg, JSCell::structureOffset()), scratchReg); + return branch8(NotEqual, Address(scratchReg, Structure::typeInfoTypeOffset()), TrustedImm32(type)); +} + +#if ENABLE(SAMPLING_FLAGS) +ALWAYS_INLINE void JIT::setSamplingFlag(int32_t flag) +{ + ASSERT(flag >= 1); + ASSERT(flag <= 32); + or32(TrustedImm32(1u << (flag - 1)), AbsoluteAddress(SamplingFlags::addressOfFlags())); +} + +ALWAYS_INLINE void JIT::clearSamplingFlag(int32_t flag) +{ + ASSERT(flag >= 1); + ASSERT(flag <= 32); + and32(TrustedImm32(~(1u << (flag - 1))), AbsoluteAddress(SamplingFlags::addressOfFlags())); +} +#endif + +#if ENABLE(SAMPLING_COUNTERS) +ALWAYS_INLINE void JIT::emitCount(AbstractSamplingCounter& counter, int32_t count) +{ + add64(TrustedImm32(count), AbsoluteAddress(counter.addressOfCounter())); +} +#endif + +#if ENABLE(OPCODE_SAMPLING) +#if CPU(X86_64) +ALWAYS_INLINE void JIT::sampleInstruction(Instruction* instruction, bool inHostFunction) +{ + move(TrustedImmPtr(m_interpreter->sampler()->sampleSlot()), X86Registers::ecx); + storePtr(TrustedImmPtr(m_interpreter->sampler()->encodeSample(instruction, inHostFunction)), X86Registers::ecx); +} +#else +ALWAYS_INLINE void JIT::sampleInstruction(Instruction* instruction, bool inHostFunction) +{ + storePtr(TrustedImmPtr(m_interpreter->sampler()->encodeSample(instruction, inHostFunction)), m_interpreter->sampler()->sampleSlot()); +} +#endif +#endif + +#if ENABLE(CODEBLOCK_SAMPLING) +#if CPU(X86_64) +ALWAYS_INLINE void JIT::sampleCodeBlock(CodeBlock* codeBlock) +{ + move(TrustedImmPtr(m_interpreter->sampler()->codeBlockSlot()), X86Registers::ecx); + storePtr(TrustedImmPtr(codeBlock), X86Registers::ecx); +} +#else +ALWAYS_INLINE void JIT::sampleCodeBlock(CodeBlock* codeBlock) +{ + storePtr(TrustedImmPtr(codeBlock), m_interpreter->sampler()->codeBlockSlot()); +} +#endif +#endif + +ALWAYS_INLINE bool JIT::isOperandConstantImmediateChar(unsigned src) +{ + return m_codeBlock->isConstantRegisterIndex(src) && getConstantOperand(src).isString() && asString(getConstantOperand(src).asCell())->length() == 1; +} + +template inline void JIT::emitAllocateBasicJSObject(StructureType structure, RegisterID result, RegisterID storagePtr) +{ + size_t size = ClassType::allocationSize(INLINE_STORAGE_CAPACITY); + MarkedAllocator* allocator = 0; + if (destructorType == MarkedBlock::Normal) + allocator = &m_globalData->heap.allocatorForObjectWithNormalDestructor(size); + else if (destructorType == MarkedBlock::ImmortalStructure) + allocator = &m_globalData->heap.allocatorForObjectWithImmortalStructureDestructor(size); + else + allocator = &m_globalData->heap.allocatorForObjectWithoutDestructor(size); + loadPtr(&allocator->m_freeList.head, result); + addSlowCase(branchTestPtr(Zero, result)); + + // remove the object from the free list + loadPtr(Address(result), storagePtr); + storePtr(storagePtr, &allocator->m_freeList.head); + + // initialize the object's structure + storePtr(structure, Address(result, JSCell::structureOffset())); + + // initialize the object's property storage pointer + storePtr(TrustedImmPtr(0), Address(result, JSObject::butterflyOffset())); +} + +template inline void JIT::emitAllocateJSFinalObject(T structure, RegisterID result, RegisterID scratch) +{ + emitAllocateBasicJSObject(structure, result, scratch); +} + +#if ENABLE(VALUE_PROFILER) +inline void JIT::emitValueProfilingSite(ValueProfile* valueProfile) +{ + ASSERT(shouldEmitProfiling()); + ASSERT(valueProfile); + + const RegisterID value = regT0; +#if USE(JSVALUE32_64) + const RegisterID valueTag = regT1; +#endif + const RegisterID scratch = regT3; + + if (ValueProfile::numberOfBuckets == 1) { + // We're in a simple configuration: only one bucket, so we can just do a direct + // store. +#if USE(JSVALUE64) + store64(value, valueProfile->m_buckets); +#else + EncodedValueDescriptor* descriptor = bitwise_cast(valueProfile->m_buckets); + store32(value, &descriptor->asBits.payload); + store32(valueTag, &descriptor->asBits.tag); +#endif + return; + } + + if (m_randomGenerator.getUint32() & 1) + add32(TrustedImm32(1), bucketCounterRegister); + else + add32(TrustedImm32(3), bucketCounterRegister); + and32(TrustedImm32(ValueProfile::bucketIndexMask), bucketCounterRegister); + move(TrustedImmPtr(valueProfile->m_buckets), scratch); +#if USE(JSVALUE64) + store64(value, BaseIndex(scratch, bucketCounterRegister, TimesEight)); +#elif USE(JSVALUE32_64) + store32(value, BaseIndex(scratch, bucketCounterRegister, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.payload))); + store32(valueTag, BaseIndex(scratch, bucketCounterRegister, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag))); +#endif +} + +inline void JIT::emitValueProfilingSite(unsigned bytecodeOffset) +{ + if (!shouldEmitProfiling()) + return; + emitValueProfilingSite(m_codeBlock->valueProfileForBytecodeOffset(bytecodeOffset)); +} + +inline void JIT::emitValueProfilingSite() +{ + emitValueProfilingSite(m_bytecodeOffset); +} +#endif // ENABLE(VALUE_PROFILER) + +inline void JIT::emitArrayProfilingSite(RegisterID structureAndIndexingType, RegisterID scratch, ArrayProfile* arrayProfile) +{ + UNUSED_PARAM(scratch); // We had found this scratch register useful here before, so I will keep it for now. + + RegisterID structure = structureAndIndexingType; + RegisterID indexingType = structureAndIndexingType; + + if (canBeOptimized()) + storePtr(structure, arrayProfile->addressOfLastSeenStructure()); + + load8(Address(structure, Structure::indexingTypeOffset()), indexingType); +} + +inline void JIT::emitArrayProfilingSiteForBytecodeIndex(RegisterID structureAndIndexingType, RegisterID scratch, unsigned bytecodeIndex) +{ +#if ENABLE(VALUE_PROFILER) + emitArrayProfilingSite(structureAndIndexingType, scratch, m_codeBlock->getOrAddArrayProfile(bytecodeIndex)); +#else + UNUSED_PARAM(bytecodeIndex); + emitArrayProfilingSite(structureAndIndexingType, scratch, 0); +#endif +} + +inline void JIT::emitArrayProfileStoreToHoleSpecialCase(ArrayProfile* arrayProfile) +{ +#if ENABLE(VALUE_PROFILER) + store8(TrustedImm32(1), arrayProfile->addressOfMayStoreToHole()); +#else + UNUSED_PARAM(arrayProfile); +#endif +} + +static inline bool arrayProfileSaw(ArrayProfile* profile, IndexingType capability) +{ +#if ENABLE(VALUE_PROFILER) + return !!(profile->observedArrayModes() & (asArrayModes(NonArray | capability) | asArrayModes(ArrayClass | capability))); +#else + UNUSED_PARAM(profile); + UNUSED_PARAM(capability); + return false; +#endif +} + +inline JITArrayMode JIT::chooseArrayMode(ArrayProfile* profile) +{ + if (arrayProfileSaw(profile, ArrayStorageShape)) + return JITArrayStorage; + return JITContiguous; +} + +#if USE(JSVALUE32_64) + +inline void JIT::emitLoadTag(int index, RegisterID tag) +{ + RegisterID mappedTag; + if (getMappedTag(index, mappedTag)) { + move(mappedTag, tag); + unmap(tag); + return; + } + + if (m_codeBlock->isConstantRegisterIndex(index)) { + move(Imm32(getConstantOperand(index).tag()), tag); + unmap(tag); + return; + } + + load32(tagFor(index), tag); + unmap(tag); +} + +inline void JIT::emitLoadPayload(int index, RegisterID payload) +{ + RegisterID mappedPayload; + if (getMappedPayload(index, mappedPayload)) { + move(mappedPayload, payload); + unmap(payload); + return; + } + + if (m_codeBlock->isConstantRegisterIndex(index)) { + move(Imm32(getConstantOperand(index).payload()), payload); + unmap(payload); + return; + } + + load32(payloadFor(index), payload); + unmap(payload); +} + +inline void JIT::emitLoad(const JSValue& v, RegisterID tag, RegisterID payload) +{ + move(Imm32(v.payload()), payload); + move(Imm32(v.tag()), tag); +} + +inline void JIT::emitLoad(int index, RegisterID tag, RegisterID payload, RegisterID base) +{ + ASSERT(tag != payload); + + if (base == callFrameRegister) { + ASSERT(payload != base); + emitLoadPayload(index, payload); + emitLoadTag(index, tag); + return; + } + + if (payload == base) { // avoid stomping base + load32(tagFor(index, base), tag); + load32(payloadFor(index, base), payload); + return; + } + + load32(payloadFor(index, base), payload); + load32(tagFor(index, base), tag); +} + +inline void JIT::emitLoad2(int index1, RegisterID tag1, RegisterID payload1, int index2, RegisterID tag2, RegisterID payload2) +{ + if (isMapped(index1)) { + emitLoad(index1, tag1, payload1); + emitLoad(index2, tag2, payload2); + return; + } + emitLoad(index2, tag2, payload2); + emitLoad(index1, tag1, payload1); +} + +inline void JIT::emitLoadDouble(int index, FPRegisterID value) +{ + if (m_codeBlock->isConstantRegisterIndex(index)) { + WriteBarrier& inConstantPool = m_codeBlock->constantRegister(index); + loadDouble(&inConstantPool, value); + } else + loadDouble(addressFor(index), value); +} + +inline void JIT::emitLoadInt32ToDouble(int index, FPRegisterID value) +{ + if (m_codeBlock->isConstantRegisterIndex(index)) { + WriteBarrier& inConstantPool = m_codeBlock->constantRegister(index); + char* bytePointer = reinterpret_cast(&inConstantPool); + convertInt32ToDouble(AbsoluteAddress(bytePointer + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), value); + } else + convertInt32ToDouble(payloadFor(index), value); +} + +inline void JIT::emitStore(int index, RegisterID tag, RegisterID payload, RegisterID base) +{ + store32(payload, payloadFor(index, base)); + store32(tag, tagFor(index, base)); +} + +inline void JIT::emitStoreInt32(int index, RegisterID payload, bool indexIsInt32) +{ + store32(payload, payloadFor(index, callFrameRegister)); + if (!indexIsInt32) + store32(TrustedImm32(JSValue::Int32Tag), tagFor(index, callFrameRegister)); +} + +inline void JIT::emitStoreAndMapInt32(int index, RegisterID tag, RegisterID payload, bool indexIsInt32, size_t opcodeLength) +{ + emitStoreInt32(index, payload, indexIsInt32); + map(m_bytecodeOffset + opcodeLength, index, tag, payload); +} + +inline void JIT::emitStoreInt32(int index, TrustedImm32 payload, bool indexIsInt32) +{ + store32(payload, payloadFor(index, callFrameRegister)); + if (!indexIsInt32) + store32(TrustedImm32(JSValue::Int32Tag), tagFor(index, callFrameRegister)); +} + +inline void JIT::emitStoreCell(int index, RegisterID payload, bool indexIsCell) +{ + store32(payload, payloadFor(index, callFrameRegister)); + if (!indexIsCell) + store32(TrustedImm32(JSValue::CellTag), tagFor(index, callFrameRegister)); +} + +inline void JIT::emitStoreBool(int index, RegisterID payload, bool indexIsBool) +{ + store32(payload, payloadFor(index, callFrameRegister)); + if (!indexIsBool) + store32(TrustedImm32(JSValue::BooleanTag), tagFor(index, callFrameRegister)); +} + +inline void JIT::emitStoreDouble(int index, FPRegisterID value) +{ + storeDouble(value, addressFor(index)); +} + +inline void JIT::emitStore(int index, const JSValue constant, RegisterID base) +{ + store32(Imm32(constant.payload()), payloadFor(index, base)); + store32(Imm32(constant.tag()), tagFor(index, base)); +} + +ALWAYS_INLINE void JIT::emitInitRegister(unsigned dst) +{ + emitStore(dst, jsUndefined()); +} + +inline bool JIT::isLabeled(unsigned bytecodeOffset) +{ + for (size_t numberOfJumpTargets = m_codeBlock->numberOfJumpTargets(); m_jumpTargetIndex != numberOfJumpTargets; ++m_jumpTargetIndex) { + unsigned jumpTarget = m_codeBlock->jumpTarget(m_jumpTargetIndex); + if (jumpTarget == bytecodeOffset) + return true; + if (jumpTarget > bytecodeOffset) + return false; + } + return false; +} + +inline void JIT::map(unsigned bytecodeOffset, int virtualRegisterIndex, RegisterID tag, RegisterID payload) +{ + if (isLabeled(bytecodeOffset)) + return; + + m_mappedBytecodeOffset = bytecodeOffset; + m_mappedVirtualRegisterIndex = virtualRegisterIndex; + m_mappedTag = tag; + m_mappedPayload = payload; + + ASSERT(!canBeOptimized() || m_mappedPayload == regT0); + ASSERT(!canBeOptimized() || m_mappedTag == regT1); +} + +inline void JIT::unmap(RegisterID registerID) +{ + if (m_mappedTag == registerID) + m_mappedTag = (RegisterID)-1; + else if (m_mappedPayload == registerID) + m_mappedPayload = (RegisterID)-1; +} + +inline void JIT::unmap() +{ + m_mappedBytecodeOffset = (unsigned)-1; + m_mappedVirtualRegisterIndex = JSStack::ReturnPC; + m_mappedTag = (RegisterID)-1; + m_mappedPayload = (RegisterID)-1; +} + +inline bool JIT::isMapped(int virtualRegisterIndex) +{ + if (m_mappedBytecodeOffset != m_bytecodeOffset) + return false; + if (m_mappedVirtualRegisterIndex != virtualRegisterIndex) + return false; + return true; +} + +inline bool JIT::getMappedPayload(int virtualRegisterIndex, RegisterID& payload) +{ + if (m_mappedBytecodeOffset != m_bytecodeOffset) + return false; + if (m_mappedVirtualRegisterIndex != virtualRegisterIndex) + return false; + if (m_mappedPayload == (RegisterID)-1) + return false; + payload = m_mappedPayload; + return true; +} + +inline bool JIT::getMappedTag(int virtualRegisterIndex, RegisterID& tag) +{ + if (m_mappedBytecodeOffset != m_bytecodeOffset) + return false; + if (m_mappedVirtualRegisterIndex != virtualRegisterIndex) + return false; + if (m_mappedTag == (RegisterID)-1) + return false; + tag = m_mappedTag; + return true; +} + +inline void JIT::emitJumpSlowCaseIfNotJSCell(int virtualRegisterIndex) +{ + if (!m_codeBlock->isKnownNotImmediate(virtualRegisterIndex)) { + if (m_codeBlock->isConstantRegisterIndex(virtualRegisterIndex)) + addSlowCase(jump()); + else + addSlowCase(emitJumpIfNotJSCell(virtualRegisterIndex)); + } +} + +inline void JIT::emitJumpSlowCaseIfNotJSCell(int virtualRegisterIndex, RegisterID tag) +{ + if (!m_codeBlock->isKnownNotImmediate(virtualRegisterIndex)) { + if (m_codeBlock->isConstantRegisterIndex(virtualRegisterIndex)) + addSlowCase(jump()); + else + addSlowCase(branch32(NotEqual, tag, TrustedImm32(JSValue::CellTag))); + } +} + +ALWAYS_INLINE bool JIT::isOperandConstantImmediateInt(unsigned src) +{ + return m_codeBlock->isConstantRegisterIndex(src) && getConstantOperand(src).isInt32(); +} + +ALWAYS_INLINE bool JIT::getOperandConstantImmediateInt(unsigned op1, unsigned op2, unsigned& op, int32_t& constant) +{ + if (isOperandConstantImmediateInt(op1)) { + constant = getConstantOperand(op1).asInt32(); + op = op2; + return true; + } + + if (isOperandConstantImmediateInt(op2)) { + constant = getConstantOperand(op2).asInt32(); + op = op1; + return true; + } + + return false; +} + +#else // USE(JSVALUE32_64) + +/* Deprecated: Please use JITStubCall instead. */ + +ALWAYS_INLINE void JIT::emitGetJITStubArg(unsigned argumentNumber, RegisterID dst) +{ + unsigned argumentStackOffset = (argumentNumber * (sizeof(JSValue) / sizeof(void*))) + JITSTACKFRAME_ARGS_INDEX; + peek64(dst, argumentStackOffset); +} + +ALWAYS_INLINE void JIT::killLastResultRegister() +{ + m_lastResultBytecodeRegister = std::numeric_limits::max(); +} + +// get arg puts an arg from the SF register array into a h/w register +ALWAYS_INLINE void JIT::emitGetVirtualRegister(int src, RegisterID dst) +{ + ASSERT(m_bytecodeOffset != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set. + + // TODO: we want to reuse values that are already in registers if we can - add a register allocator! + if (m_codeBlock->isConstantRegisterIndex(src)) { + JSValue value = m_codeBlock->getConstant(src); + if (!value.isNumber()) + move(TrustedImm64(JSValue::encode(value)), dst); + else + move(Imm64(JSValue::encode(value)), dst); + killLastResultRegister(); + return; + } + + if (src == m_lastResultBytecodeRegister && m_codeBlock->isTemporaryRegisterIndex(src) && !atJumpTarget()) { + // The argument we want is already stored in eax + if (dst != cachedResultRegister) + move(cachedResultRegister, dst); + killLastResultRegister(); + return; + } + + load64(Address(callFrameRegister, src * sizeof(Register)), dst); + killLastResultRegister(); +} + +ALWAYS_INLINE void JIT::emitGetVirtualRegisters(int src1, RegisterID dst1, int src2, RegisterID dst2) +{ + if (src2 == m_lastResultBytecodeRegister) { + emitGetVirtualRegister(src2, dst2); + emitGetVirtualRegister(src1, dst1); + } else { + emitGetVirtualRegister(src1, dst1); + emitGetVirtualRegister(src2, dst2); + } +} + +ALWAYS_INLINE int32_t JIT::getConstantOperandImmediateInt(unsigned src) +{ + return getConstantOperand(src).asInt32(); +} + +ALWAYS_INLINE bool JIT::isOperandConstantImmediateInt(unsigned src) +{ + return m_codeBlock->isConstantRegisterIndex(src) && getConstantOperand(src).isInt32(); +} + +ALWAYS_INLINE void JIT::emitPutVirtualRegister(unsigned dst, RegisterID from) +{ + store64(from, Address(callFrameRegister, dst * sizeof(Register))); + m_lastResultBytecodeRegister = (from == cachedResultRegister) ? static_cast(dst) : std::numeric_limits::max(); +} + +ALWAYS_INLINE void JIT::emitInitRegister(unsigned dst) +{ + store64(TrustedImm64(JSValue::encode(jsUndefined())), Address(callFrameRegister, dst * sizeof(Register))); +} + +ALWAYS_INLINE JIT::Jump JIT::emitJumpIfJSCell(RegisterID reg) +{ + return branchTest64(Zero, reg, tagMaskRegister); +} + +ALWAYS_INLINE JIT::Jump JIT::emitJumpIfBothJSCells(RegisterID reg1, RegisterID reg2, RegisterID scratch) +{ + move(reg1, scratch); + or64(reg2, scratch); + return emitJumpIfJSCell(scratch); +} + +ALWAYS_INLINE void JIT::emitJumpSlowCaseIfJSCell(RegisterID reg) +{ + addSlowCase(emitJumpIfJSCell(reg)); +} + +ALWAYS_INLINE JIT::Jump JIT::emitJumpIfNotJSCell(RegisterID reg) +{ + return branchTest64(NonZero, reg, tagMaskRegister); +} + +ALWAYS_INLINE void JIT::emitJumpSlowCaseIfNotJSCell(RegisterID reg) +{ + addSlowCase(emitJumpIfNotJSCell(reg)); +} + +ALWAYS_INLINE void JIT::emitJumpSlowCaseIfNotJSCell(RegisterID reg, int vReg) +{ + if (!m_codeBlock->isKnownNotImmediate(vReg)) + emitJumpSlowCaseIfNotJSCell(reg); +} + +inline void JIT::emitLoadDouble(int index, FPRegisterID value) +{ + if (m_codeBlock->isConstantRegisterIndex(index)) { + WriteBarrier& inConstantPool = m_codeBlock->constantRegister(index); + loadDouble(&inConstantPool, value); + } else + loadDouble(addressFor(index), value); +} + +inline void JIT::emitLoadInt32ToDouble(int index, FPRegisterID value) +{ + if (m_codeBlock->isConstantRegisterIndex(index)) { + ASSERT(isOperandConstantImmediateInt(index)); + convertInt32ToDouble(Imm32(getConstantOperand(index).asInt32()), value); + } else + convertInt32ToDouble(addressFor(index), value); +} + +ALWAYS_INLINE JIT::Jump JIT::emitJumpIfImmediateInteger(RegisterID reg) +{ + return branch64(AboveOrEqual, reg, tagTypeNumberRegister); +} + +ALWAYS_INLINE JIT::Jump JIT::emitJumpIfNotImmediateInteger(RegisterID reg) +{ + return branch64(Below, reg, tagTypeNumberRegister); +} + +ALWAYS_INLINE JIT::Jump JIT::emitJumpIfNotImmediateIntegers(RegisterID reg1, RegisterID reg2, RegisterID scratch) +{ + move(reg1, scratch); + and64(reg2, scratch); + return emitJumpIfNotImmediateInteger(scratch); +} + +ALWAYS_INLINE void JIT::emitJumpSlowCaseIfNotImmediateInteger(RegisterID reg) +{ + addSlowCase(emitJumpIfNotImmediateInteger(reg)); +} + +ALWAYS_INLINE void JIT::emitJumpSlowCaseIfNotImmediateIntegers(RegisterID reg1, RegisterID reg2, RegisterID scratch) +{ + addSlowCase(emitJumpIfNotImmediateIntegers(reg1, reg2, scratch)); +} + +ALWAYS_INLINE void JIT::emitJumpSlowCaseIfNotImmediateNumber(RegisterID reg) +{ + addSlowCase(emitJumpIfNotImmediateNumber(reg)); +} + +ALWAYS_INLINE void JIT::emitFastArithReTagImmediate(RegisterID src, RegisterID dest) +{ + emitFastArithIntToImmNoCheck(src, dest); +} + +// operand is int32_t, must have been zero-extended if register is 64-bit. +ALWAYS_INLINE void JIT::emitFastArithIntToImmNoCheck(RegisterID src, RegisterID dest) +{ + if (src != dest) + move(src, dest); + or64(tagTypeNumberRegister, dest); +} + +ALWAYS_INLINE void JIT::emitTagAsBoolImmediate(RegisterID reg) +{ + or32(TrustedImm32(static_cast(ValueFalse)), reg); +} + +#endif // USE(JSVALUE32_64) + +} // namespace JSC + +#endif // ENABLE(JIT) + +#endif diff --git a/Source/JavaScriptCore/jit/JITInlines.h b/Source/JavaScriptCore/jit/JITInlines.h deleted file mode 100644 index e6f95b94c..000000000 --- a/Source/JavaScriptCore/jit/JITInlines.h +++ /dev/null @@ -1,1013 +0,0 @@ -/* - * Copyright (C) 2008, 2012 Apple Inc. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR - * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, - * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, - * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR - * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY - * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#ifndef JITInlines_h -#define JITInlines_h - - -#if ENABLE(JIT) - -namespace JSC { - -ALWAYS_INLINE bool JIT::isOperandConstantImmediateDouble(unsigned src) -{ - return m_codeBlock->isConstantRegisterIndex(src) && getConstantOperand(src).isDouble(); -} - -ALWAYS_INLINE JSValue JIT::getConstantOperand(unsigned src) -{ - ASSERT(m_codeBlock->isConstantRegisterIndex(src)); - return m_codeBlock->getConstant(src); -} - -ALWAYS_INLINE void JIT::emitPutCellToCallFrameHeader(RegisterID from, JSStack::CallFrameHeaderEntry entry) -{ -#if USE(JSVALUE32_64) - store32(TrustedImm32(JSValue::CellTag), tagFor(entry, callFrameRegister)); - store32(from, payloadFor(entry, callFrameRegister)); -#else - store64(from, addressFor(entry, callFrameRegister)); -#endif -} - -ALWAYS_INLINE void JIT::emitPutIntToCallFrameHeader(RegisterID from, JSStack::CallFrameHeaderEntry entry) -{ -#if USE(JSVALUE32_64) - store32(TrustedImm32(Int32Tag), intTagFor(entry, callFrameRegister)); - store32(from, intPayloadFor(entry, callFrameRegister)); -#else - store64(from, addressFor(entry, callFrameRegister)); -#endif -} - -ALWAYS_INLINE void JIT::emitPutToCallFrameHeader(RegisterID from, JSStack::CallFrameHeaderEntry entry) -{ -#if USE(JSVALUE32_64) - storePtr(from, payloadFor(entry, callFrameRegister)); -#else - store64(from, addressFor(entry, callFrameRegister)); -#endif -} - -ALWAYS_INLINE void JIT::emitPutImmediateToCallFrameHeader(void* value, JSStack::CallFrameHeaderEntry entry) -{ - storePtr(TrustedImmPtr(value), Address(callFrameRegister, entry * sizeof(Register))); -} - -ALWAYS_INLINE void JIT::emitGetFromCallFrameHeaderPtr(JSStack::CallFrameHeaderEntry entry, RegisterID to, RegisterID from) -{ - loadPtr(Address(from, entry * sizeof(Register)), to); -#if USE(JSVALUE64) - killLastResultRegister(); -#endif -} - -ALWAYS_INLINE void JIT::emitGetFromCallFrameHeader32(JSStack::CallFrameHeaderEntry entry, RegisterID to, RegisterID from) -{ - load32(Address(from, entry * sizeof(Register)), to); -#if USE(JSVALUE64) - killLastResultRegister(); -#endif -} - -#if USE(JSVALUE64) -ALWAYS_INLINE void JIT::emitGetFromCallFrameHeader64(JSStack::CallFrameHeaderEntry entry, RegisterID to, RegisterID from) -{ - load64(Address(from, entry * sizeof(Register)), to); - killLastResultRegister(); -} -#endif - -ALWAYS_INLINE void JIT::emitLoadCharacterString(RegisterID src, RegisterID dst, JumpList& failures) -{ - failures.append(branchPtr(NotEqual, Address(src, JSCell::structureOffset()), TrustedImmPtr(m_globalData->stringStructure.get()))); - failures.append(branch32(NotEqual, MacroAssembler::Address(src, ThunkHelpers::jsStringLengthOffset()), TrustedImm32(1))); - loadPtr(MacroAssembler::Address(src, ThunkHelpers::jsStringValueOffset()), dst); - failures.append(branchTest32(Zero, dst)); - loadPtr(MacroAssembler::Address(dst, ThunkHelpers::stringImplFlagsOffset()), regT1); - loadPtr(MacroAssembler::Address(dst, ThunkHelpers::stringImplDataOffset()), dst); - - JumpList is16Bit; - JumpList cont8Bit; - is16Bit.append(branchTest32(Zero, regT1, TrustedImm32(ThunkHelpers::stringImpl8BitFlag()))); - load8(MacroAssembler::Address(dst, 0), dst); - cont8Bit.append(jump()); - is16Bit.link(this); - load16(MacroAssembler::Address(dst, 0), dst); - cont8Bit.link(this); -} - -ALWAYS_INLINE JIT::Call JIT::emitNakedCall(CodePtr function) -{ - ASSERT(m_bytecodeOffset != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set. - - Call nakedCall = nearCall(); - m_calls.append(CallRecord(nakedCall, m_bytecodeOffset, function.executableAddress())); - return nakedCall; -} - -ALWAYS_INLINE bool JIT::atJumpTarget() -{ - while (m_jumpTargetsPosition < m_codeBlock->numberOfJumpTargets() && m_codeBlock->jumpTarget(m_jumpTargetsPosition) <= m_bytecodeOffset) { - if (m_codeBlock->jumpTarget(m_jumpTargetsPosition) == m_bytecodeOffset) - return true; - ++m_jumpTargetsPosition; - } - return false; -} - -#if defined(ASSEMBLER_HAS_CONSTANT_POOL) && ASSEMBLER_HAS_CONSTANT_POOL - -ALWAYS_INLINE void JIT::beginUninterruptedSequence(int insnSpace, int constSpace) -{ -#if CPU(ARM_TRADITIONAL) -#ifndef NDEBUG - // Ensure the label after the sequence can also fit - insnSpace += sizeof(ARMWord); - constSpace += sizeof(uint64_t); -#endif - - ensureSpace(insnSpace, constSpace); - -#elif CPU(SH4) -#ifndef NDEBUG - insnSpace += sizeof(SH4Word); - constSpace += sizeof(uint64_t); -#endif - - m_assembler.ensureSpace(insnSpace + m_assembler.maxInstructionSize + 2, constSpace + 8); -#endif - -#if defined(ASSEMBLER_HAS_CONSTANT_POOL) && ASSEMBLER_HAS_CONSTANT_POOL -#ifndef NDEBUG - m_uninterruptedInstructionSequenceBegin = label(); - m_uninterruptedConstantSequenceBegin = sizeOfConstantPool(); -#endif -#endif -} - -ALWAYS_INLINE void JIT::endUninterruptedSequence(int insnSpace, int constSpace, int dst) -{ - UNUSED_PARAM(dst); -#if defined(ASSEMBLER_HAS_CONSTANT_POOL) && ASSEMBLER_HAS_CONSTANT_POOL - /* There are several cases when the uninterrupted sequence is larger than - * maximum required offset for pathing the same sequence. Eg.: if in a - * uninterrupted sequence the last macroassembler's instruction is a stub - * call, it emits store instruction(s) which should not be included in the - * calculation of length of uninterrupted sequence. So, the insnSpace and - * constSpace should be upper limit instead of hard limit. - */ -#if CPU(SH4) - if ((dst > 15) || (dst < -16)) { - insnSpace += 8; - constSpace += 2; - } - - if (((dst >= -16) && (dst < 0)) || ((dst > 7) && (dst <= 15))) - insnSpace += 8; -#endif - ASSERT(differenceBetween(m_uninterruptedInstructionSequenceBegin, label()) <= insnSpace); - ASSERT(sizeOfConstantPool() - m_uninterruptedConstantSequenceBegin <= constSpace); -#endif -} - -#endif - -#if CPU(ARM) - -ALWAYS_INLINE void JIT::preserveReturnAddressAfterCall(RegisterID reg) -{ - move(linkRegister, reg); -} - -ALWAYS_INLINE void JIT::restoreReturnAddressBeforeReturn(RegisterID reg) -{ - move(reg, linkRegister); -} - -ALWAYS_INLINE void JIT::restoreReturnAddressBeforeReturn(Address address) -{ - loadPtr(address, linkRegister); -} -#elif CPU(SH4) - -ALWAYS_INLINE void JIT::preserveReturnAddressAfterCall(RegisterID reg) -{ - m_assembler.stspr(reg); -} - -ALWAYS_INLINE void JIT::restoreReturnAddressBeforeReturn(RegisterID reg) -{ - m_assembler.ldspr(reg); -} - -ALWAYS_INLINE void JIT::restoreReturnAddressBeforeReturn(Address address) -{ - loadPtrLinkReg(address); -} - -#elif CPU(MIPS) - -ALWAYS_INLINE void JIT::preserveReturnAddressAfterCall(RegisterID reg) -{ - move(returnAddressRegister, reg); -} - -ALWAYS_INLINE void JIT::restoreReturnAddressBeforeReturn(RegisterID reg) -{ - move(reg, returnAddressRegister); -} - -ALWAYS_INLINE void JIT::restoreReturnAddressBeforeReturn(Address address) -{ - loadPtr(address, returnAddressRegister); -} - -#else // CPU(X86) || CPU(X86_64) - -ALWAYS_INLINE void JIT::preserveReturnAddressAfterCall(RegisterID reg) -{ - pop(reg); -} - -ALWAYS_INLINE void JIT::restoreReturnAddressBeforeReturn(RegisterID reg) -{ - push(reg); -} - -ALWAYS_INLINE void JIT::restoreReturnAddressBeforeReturn(Address address) -{ - push(address); -} - -#endif - -ALWAYS_INLINE void JIT::restoreArgumentReference() -{ - move(stackPointerRegister, firstArgumentRegister); - poke(callFrameRegister, OBJECT_OFFSETOF(struct JITStackFrame, callFrame) / sizeof(void*)); -} - -ALWAYS_INLINE void JIT::updateTopCallFrame() -{ - ASSERT(static_cast(m_bytecodeOffset) >= 0); - if (m_bytecodeOffset) { -#if USE(JSVALUE32_64) - storePtr(TrustedImmPtr(m_codeBlock->instructions().begin() + m_bytecodeOffset + 1), intTagFor(JSStack::ArgumentCount)); -#else - store32(TrustedImm32(m_bytecodeOffset + 1), intTagFor(JSStack::ArgumentCount)); -#endif - } - storePtr(callFrameRegister, &m_globalData->topCallFrame); -} - -ALWAYS_INLINE void JIT::restoreArgumentReferenceForTrampoline() -{ -#if CPU(X86) - // Within a trampoline the return address will be on the stack at this point. - addPtr(TrustedImm32(sizeof(void*)), stackPointerRegister, firstArgumentRegister); -#elif CPU(ARM) - move(stackPointerRegister, firstArgumentRegister); -#elif CPU(SH4) - move(stackPointerRegister, firstArgumentRegister); -#endif - // In the trampoline on x86-64, the first argument register is not overwritten. -} - -ALWAYS_INLINE JIT::Jump JIT::checkStructure(RegisterID reg, Structure* structure) -{ - return branchPtr(NotEqual, Address(reg, JSCell::structureOffset()), TrustedImmPtr(structure)); -} - -ALWAYS_INLINE void JIT::linkSlowCaseIfNotJSCell(Vector::iterator& iter, int vReg) -{ - if (!m_codeBlock->isKnownNotImmediate(vReg)) - linkSlowCase(iter); -} - -ALWAYS_INLINE void JIT::addSlowCase(Jump jump) -{ - ASSERT(m_bytecodeOffset != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set. - - m_slowCases.append(SlowCaseEntry(jump, m_bytecodeOffset)); -} - -ALWAYS_INLINE void JIT::addSlowCase(JumpList jumpList) -{ - ASSERT(m_bytecodeOffset != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set. - - const JumpList::JumpVector& jumpVector = jumpList.jumps(); - size_t size = jumpVector.size(); - for (size_t i = 0; i < size; ++i) - m_slowCases.append(SlowCaseEntry(jumpVector[i], m_bytecodeOffset)); -} - -ALWAYS_INLINE void JIT::addSlowCase() -{ - ASSERT(m_bytecodeOffset != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set. - - Jump emptyJump; // Doing it this way to make Windows happy. - m_slowCases.append(SlowCaseEntry(emptyJump, m_bytecodeOffset)); -} - -ALWAYS_INLINE void JIT::addJump(Jump jump, int relativeOffset) -{ - ASSERT(m_bytecodeOffset != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set. - - m_jmpTable.append(JumpTable(jump, m_bytecodeOffset + relativeOffset)); -} - -ALWAYS_INLINE void JIT::emitJumpSlowToHot(Jump jump, int relativeOffset) -{ - ASSERT(m_bytecodeOffset != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set. - - jump.linkTo(m_labels[m_bytecodeOffset + relativeOffset], this); -} - -ALWAYS_INLINE JIT::Jump JIT::emitJumpIfNotObject(RegisterID structureReg) -{ - return branch8(Below, Address(structureReg, Structure::typeInfoTypeOffset()), TrustedImm32(ObjectType)); -} - -ALWAYS_INLINE JIT::Jump JIT::emitJumpIfNotType(RegisterID baseReg, RegisterID scratchReg, JSType type) -{ - loadPtr(Address(baseReg, JSCell::structureOffset()), scratchReg); - return branch8(NotEqual, Address(scratchReg, Structure::typeInfoTypeOffset()), TrustedImm32(type)); -} - -#if ENABLE(SAMPLING_FLAGS) -ALWAYS_INLINE void JIT::setSamplingFlag(int32_t flag) -{ - ASSERT(flag >= 1); - ASSERT(flag <= 32); - or32(TrustedImm32(1u << (flag - 1)), AbsoluteAddress(SamplingFlags::addressOfFlags())); -} - -ALWAYS_INLINE void JIT::clearSamplingFlag(int32_t flag) -{ - ASSERT(flag >= 1); - ASSERT(flag <= 32); - and32(TrustedImm32(~(1u << (flag - 1))), AbsoluteAddress(SamplingFlags::addressOfFlags())); -} -#endif - -#if ENABLE(SAMPLING_COUNTERS) -ALWAYS_INLINE void JIT::emitCount(AbstractSamplingCounter& counter, int32_t count) -{ - add64(TrustedImm32(count), AbsoluteAddress(counter.addressOfCounter())); -} -#endif - -#if ENABLE(OPCODE_SAMPLING) -#if CPU(X86_64) -ALWAYS_INLINE void JIT::sampleInstruction(Instruction* instruction, bool inHostFunction) -{ - move(TrustedImmPtr(m_interpreter->sampler()->sampleSlot()), X86Registers::ecx); - storePtr(TrustedImmPtr(m_interpreter->sampler()->encodeSample(instruction, inHostFunction)), X86Registers::ecx); -} -#else -ALWAYS_INLINE void JIT::sampleInstruction(Instruction* instruction, bool inHostFunction) -{ - storePtr(TrustedImmPtr(m_interpreter->sampler()->encodeSample(instruction, inHostFunction)), m_interpreter->sampler()->sampleSlot()); -} -#endif -#endif - -#if ENABLE(CODEBLOCK_SAMPLING) -#if CPU(X86_64) -ALWAYS_INLINE void JIT::sampleCodeBlock(CodeBlock* codeBlock) -{ - move(TrustedImmPtr(m_interpreter->sampler()->codeBlockSlot()), X86Registers::ecx); - storePtr(TrustedImmPtr(codeBlock), X86Registers::ecx); -} -#else -ALWAYS_INLINE void JIT::sampleCodeBlock(CodeBlock* codeBlock) -{ - storePtr(TrustedImmPtr(codeBlock), m_interpreter->sampler()->codeBlockSlot()); -} -#endif -#endif - -ALWAYS_INLINE bool JIT::isOperandConstantImmediateChar(unsigned src) -{ - return m_codeBlock->isConstantRegisterIndex(src) && getConstantOperand(src).isString() && asString(getConstantOperand(src).asCell())->length() == 1; -} - -template inline void JIT::emitAllocateBasicJSObject(StructureType structure, RegisterID result, RegisterID storagePtr) -{ - size_t size = ClassType::allocationSize(INLINE_STORAGE_CAPACITY); - MarkedAllocator* allocator = 0; - if (destructorType == MarkedBlock::Normal) - allocator = &m_globalData->heap.allocatorForObjectWithNormalDestructor(size); - else if (destructorType == MarkedBlock::ImmortalStructure) - allocator = &m_globalData->heap.allocatorForObjectWithImmortalStructureDestructor(size); - else - allocator = &m_globalData->heap.allocatorForObjectWithoutDestructor(size); - loadPtr(&allocator->m_freeList.head, result); - addSlowCase(branchTestPtr(Zero, result)); - - // remove the object from the free list - loadPtr(Address(result), storagePtr); - storePtr(storagePtr, &allocator->m_freeList.head); - - // initialize the object's structure - storePtr(structure, Address(result, JSCell::structureOffset())); - - // initialize the object's property storage pointer - storePtr(TrustedImmPtr(0), Address(result, JSObject::butterflyOffset())); -} - -template inline void JIT::emitAllocateJSFinalObject(T structure, RegisterID result, RegisterID scratch) -{ - emitAllocateBasicJSObject(structure, result, scratch); -} - -#if ENABLE(VALUE_PROFILER) -inline void JIT::emitValueProfilingSite(ValueProfile* valueProfile) -{ - ASSERT(shouldEmitProfiling()); - ASSERT(valueProfile); - - const RegisterID value = regT0; -#if USE(JSVALUE32_64) - const RegisterID valueTag = regT1; -#endif - const RegisterID scratch = regT3; - - if (ValueProfile::numberOfBuckets == 1) { - // We're in a simple configuration: only one bucket, so we can just do a direct - // store. -#if USE(JSVALUE64) - store64(value, valueProfile->m_buckets); -#else - EncodedValueDescriptor* descriptor = bitwise_cast(valueProfile->m_buckets); - store32(value, &descriptor->asBits.payload); - store32(valueTag, &descriptor->asBits.tag); -#endif - return; - } - - if (m_randomGenerator.getUint32() & 1) - add32(TrustedImm32(1), bucketCounterRegister); - else - add32(TrustedImm32(3), bucketCounterRegister); - and32(TrustedImm32(ValueProfile::bucketIndexMask), bucketCounterRegister); - move(TrustedImmPtr(valueProfile->m_buckets), scratch); -#if USE(JSVALUE64) - store64(value, BaseIndex(scratch, bucketCounterRegister, TimesEight)); -#elif USE(JSVALUE32_64) - store32(value, BaseIndex(scratch, bucketCounterRegister, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.payload))); - store32(valueTag, BaseIndex(scratch, bucketCounterRegister, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag))); -#endif -} - -inline void JIT::emitValueProfilingSite(unsigned bytecodeOffset) -{ - if (!shouldEmitProfiling()) - return; - emitValueProfilingSite(m_codeBlock->valueProfileForBytecodeOffset(bytecodeOffset)); -} - -inline void JIT::emitValueProfilingSite() -{ - emitValueProfilingSite(m_bytecodeOffset); -} -#endif // ENABLE(VALUE_PROFILER) - -inline void JIT::emitArrayProfilingSite(RegisterID structureAndIndexingType, RegisterID scratch, ArrayProfile* arrayProfile) -{ - UNUSED_PARAM(scratch); // We had found this scratch register useful here before, so I will keep it for now. - - RegisterID structure = structureAndIndexingType; - RegisterID indexingType = structureAndIndexingType; - - if (canBeOptimized()) - storePtr(structure, arrayProfile->addressOfLastSeenStructure()); - - load8(Address(structure, Structure::indexingTypeOffset()), indexingType); -} - -inline void JIT::emitArrayProfilingSiteForBytecodeIndex(RegisterID structureAndIndexingType, RegisterID scratch, unsigned bytecodeIndex) -{ -#if ENABLE(VALUE_PROFILER) - emitArrayProfilingSite(structureAndIndexingType, scratch, m_codeBlock->getOrAddArrayProfile(bytecodeIndex)); -#else - UNUSED_PARAM(bytecodeIndex); - emitArrayProfilingSite(structureAndIndexingType, scratch, 0); -#endif -} - -inline void JIT::emitArrayProfileStoreToHoleSpecialCase(ArrayProfile* arrayProfile) -{ -#if ENABLE(VALUE_PROFILER) - store8(TrustedImm32(1), arrayProfile->addressOfMayStoreToHole()); -#else - UNUSED_PARAM(arrayProfile); -#endif -} - -static inline bool arrayProfileSaw(ArrayModes arrayModes, IndexingType capability) -{ -#if ENABLE(VALUE_PROFILER) - return arrayModesInclude(arrayModes, capability); -#else - UNUSED_PARAM(arrayModes); - UNUSED_PARAM(capability); - return false; -#endif -} - -inline JITArrayMode JIT::chooseArrayMode(ArrayProfile* profile) -{ -#if ENABLE(VALUE_PROFILER) - profile->computeUpdatedPrediction(m_codeBlock); - ArrayModes arrayModes = profile->observedArrayModes(); - if (arrayProfileSaw(arrayModes, DoubleShape)) - return JITDouble; - if (arrayProfileSaw(arrayModes, Int32Shape)) - return JITInt32; - if (arrayProfileSaw(arrayModes, ArrayStorageShape)) - return JITArrayStorage; - return JITContiguous; -#else - UNUSED_PARAM(profile); - return JITContiguous; -#endif -} - -#if USE(JSVALUE32_64) - -inline void JIT::emitLoadTag(int index, RegisterID tag) -{ - RegisterID mappedTag; - if (getMappedTag(index, mappedTag)) { - move(mappedTag, tag); - unmap(tag); - return; - } - - if (m_codeBlock->isConstantRegisterIndex(index)) { - move(Imm32(getConstantOperand(index).tag()), tag); - unmap(tag); - return; - } - - load32(tagFor(index), tag); - unmap(tag); -} - -inline void JIT::emitLoadPayload(int index, RegisterID payload) -{ - RegisterID mappedPayload; - if (getMappedPayload(index, mappedPayload)) { - move(mappedPayload, payload); - unmap(payload); - return; - } - - if (m_codeBlock->isConstantRegisterIndex(index)) { - move(Imm32(getConstantOperand(index).payload()), payload); - unmap(payload); - return; - } - - load32(payloadFor(index), payload); - unmap(payload); -} - -inline void JIT::emitLoad(const JSValue& v, RegisterID tag, RegisterID payload) -{ - move(Imm32(v.payload()), payload); - move(Imm32(v.tag()), tag); -} - -inline void JIT::emitLoad(int index, RegisterID tag, RegisterID payload, RegisterID base) -{ - ASSERT(tag != payload); - - if (base == callFrameRegister) { - ASSERT(payload != base); - emitLoadPayload(index, payload); - emitLoadTag(index, tag); - return; - } - - if (payload == base) { // avoid stomping base - load32(tagFor(index, base), tag); - load32(payloadFor(index, base), payload); - return; - } - - load32(payloadFor(index, base), payload); - load32(tagFor(index, base), tag); -} - -inline void JIT::emitLoad2(int index1, RegisterID tag1, RegisterID payload1, int index2, RegisterID tag2, RegisterID payload2) -{ - if (isMapped(index1)) { - emitLoad(index1, tag1, payload1); - emitLoad(index2, tag2, payload2); - return; - } - emitLoad(index2, tag2, payload2); - emitLoad(index1, tag1, payload1); -} - -inline void JIT::emitLoadDouble(int index, FPRegisterID value) -{ - if (m_codeBlock->isConstantRegisterIndex(index)) { - WriteBarrier& inConstantPool = m_codeBlock->constantRegister(index); - loadDouble(&inConstantPool, value); - } else - loadDouble(addressFor(index), value); -} - -inline void JIT::emitLoadInt32ToDouble(int index, FPRegisterID value) -{ - if (m_codeBlock->isConstantRegisterIndex(index)) { - WriteBarrier& inConstantPool = m_codeBlock->constantRegister(index); - char* bytePointer = reinterpret_cast(&inConstantPool); - convertInt32ToDouble(AbsoluteAddress(bytePointer + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), value); - } else - convertInt32ToDouble(payloadFor(index), value); -} - -inline void JIT::emitStore(int index, RegisterID tag, RegisterID payload, RegisterID base) -{ - store32(payload, payloadFor(index, base)); - store32(tag, tagFor(index, base)); -} - -inline void JIT::emitStoreInt32(int index, RegisterID payload, bool indexIsInt32) -{ - store32(payload, payloadFor(index, callFrameRegister)); - if (!indexIsInt32) - store32(TrustedImm32(JSValue::Int32Tag), tagFor(index, callFrameRegister)); -} - -inline void JIT::emitStoreAndMapInt32(int index, RegisterID tag, RegisterID payload, bool indexIsInt32, size_t opcodeLength) -{ - emitStoreInt32(index, payload, indexIsInt32); - map(m_bytecodeOffset + opcodeLength, index, tag, payload); -} - -inline void JIT::emitStoreInt32(int index, TrustedImm32 payload, bool indexIsInt32) -{ - store32(payload, payloadFor(index, callFrameRegister)); - if (!indexIsInt32) - store32(TrustedImm32(JSValue::Int32Tag), tagFor(index, callFrameRegister)); -} - -inline void JIT::emitStoreCell(int index, RegisterID payload, bool indexIsCell) -{ - store32(payload, payloadFor(index, callFrameRegister)); - if (!indexIsCell) - store32(TrustedImm32(JSValue::CellTag), tagFor(index, callFrameRegister)); -} - -inline void JIT::emitStoreBool(int index, RegisterID payload, bool indexIsBool) -{ - store32(payload, payloadFor(index, callFrameRegister)); - if (!indexIsBool) - store32(TrustedImm32(JSValue::BooleanTag), tagFor(index, callFrameRegister)); -} - -inline void JIT::emitStoreDouble(int index, FPRegisterID value) -{ - storeDouble(value, addressFor(index)); -} - -inline void JIT::emitStore(int index, const JSValue constant, RegisterID base) -{ - store32(Imm32(constant.payload()), payloadFor(index, base)); - store32(Imm32(constant.tag()), tagFor(index, base)); -} - -ALWAYS_INLINE void JIT::emitInitRegister(unsigned dst) -{ - emitStore(dst, jsUndefined()); -} - -inline bool JIT::isLabeled(unsigned bytecodeOffset) -{ - for (size_t numberOfJumpTargets = m_codeBlock->numberOfJumpTargets(); m_jumpTargetIndex != numberOfJumpTargets; ++m_jumpTargetIndex) { - unsigned jumpTarget = m_codeBlock->jumpTarget(m_jumpTargetIndex); - if (jumpTarget == bytecodeOffset) - return true; - if (jumpTarget > bytecodeOffset) - return false; - } - return false; -} - -inline void JIT::map(unsigned bytecodeOffset, int virtualRegisterIndex, RegisterID tag, RegisterID payload) -{ - if (isLabeled(bytecodeOffset)) - return; - - m_mappedBytecodeOffset = bytecodeOffset; - m_mappedVirtualRegisterIndex = virtualRegisterIndex; - m_mappedTag = tag; - m_mappedPayload = payload; - - ASSERT(!canBeOptimized() || m_mappedPayload == regT0); - ASSERT(!canBeOptimized() || m_mappedTag == regT1); -} - -inline void JIT::unmap(RegisterID registerID) -{ - if (m_mappedTag == registerID) - m_mappedTag = (RegisterID)-1; - else if (m_mappedPayload == registerID) - m_mappedPayload = (RegisterID)-1; -} - -inline void JIT::unmap() -{ - m_mappedBytecodeOffset = (unsigned)-1; - m_mappedVirtualRegisterIndex = JSStack::ReturnPC; - m_mappedTag = (RegisterID)-1; - m_mappedPayload = (RegisterID)-1; -} - -inline bool JIT::isMapped(int virtualRegisterIndex) -{ - if (m_mappedBytecodeOffset != m_bytecodeOffset) - return false; - if (m_mappedVirtualRegisterIndex != virtualRegisterIndex) - return false; - return true; -} - -inline bool JIT::getMappedPayload(int virtualRegisterIndex, RegisterID& payload) -{ - if (m_mappedBytecodeOffset != m_bytecodeOffset) - return false; - if (m_mappedVirtualRegisterIndex != virtualRegisterIndex) - return false; - if (m_mappedPayload == (RegisterID)-1) - return false; - payload = m_mappedPayload; - return true; -} - -inline bool JIT::getMappedTag(int virtualRegisterIndex, RegisterID& tag) -{ - if (m_mappedBytecodeOffset != m_bytecodeOffset) - return false; - if (m_mappedVirtualRegisterIndex != virtualRegisterIndex) - return false; - if (m_mappedTag == (RegisterID)-1) - return false; - tag = m_mappedTag; - return true; -} - -inline void JIT::emitJumpSlowCaseIfNotJSCell(int virtualRegisterIndex) -{ - if (!m_codeBlock->isKnownNotImmediate(virtualRegisterIndex)) { - if (m_codeBlock->isConstantRegisterIndex(virtualRegisterIndex)) - addSlowCase(jump()); - else - addSlowCase(emitJumpIfNotJSCell(virtualRegisterIndex)); - } -} - -inline void JIT::emitJumpSlowCaseIfNotJSCell(int virtualRegisterIndex, RegisterID tag) -{ - if (!m_codeBlock->isKnownNotImmediate(virtualRegisterIndex)) { - if (m_codeBlock->isConstantRegisterIndex(virtualRegisterIndex)) - addSlowCase(jump()); - else - addSlowCase(branch32(NotEqual, tag, TrustedImm32(JSValue::CellTag))); - } -} - -ALWAYS_INLINE bool JIT::isOperandConstantImmediateInt(unsigned src) -{ - return m_codeBlock->isConstantRegisterIndex(src) && getConstantOperand(src).isInt32(); -} - -ALWAYS_INLINE bool JIT::getOperandConstantImmediateInt(unsigned op1, unsigned op2, unsigned& op, int32_t& constant) -{ - if (isOperandConstantImmediateInt(op1)) { - constant = getConstantOperand(op1).asInt32(); - op = op2; - return true; - } - - if (isOperandConstantImmediateInt(op2)) { - constant = getConstantOperand(op2).asInt32(); - op = op1; - return true; - } - - return false; -} - -#else // USE(JSVALUE32_64) - -/* Deprecated: Please use JITStubCall instead. */ - -ALWAYS_INLINE void JIT::emitGetJITStubArg(unsigned argumentNumber, RegisterID dst) -{ - unsigned argumentStackOffset = (argumentNumber * (sizeof(JSValue) / sizeof(void*))) + JITSTACKFRAME_ARGS_INDEX; - peek64(dst, argumentStackOffset); -} - -ALWAYS_INLINE void JIT::killLastResultRegister() -{ - m_lastResultBytecodeRegister = std::numeric_limits::max(); -} - -// get arg puts an arg from the SF register array into a h/w register -ALWAYS_INLINE void JIT::emitGetVirtualRegister(int src, RegisterID dst) -{ - ASSERT(m_bytecodeOffset != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set. - - // TODO: we want to reuse values that are already in registers if we can - add a register allocator! - if (m_codeBlock->isConstantRegisterIndex(src)) { - JSValue value = m_codeBlock->getConstant(src); - if (!value.isNumber()) - move(TrustedImm64(JSValue::encode(value)), dst); - else - move(Imm64(JSValue::encode(value)), dst); - killLastResultRegister(); - return; - } - - if (src == m_lastResultBytecodeRegister && m_codeBlock->isTemporaryRegisterIndex(src) && !atJumpTarget()) { - // The argument we want is already stored in eax - if (dst != cachedResultRegister) - move(cachedResultRegister, dst); - killLastResultRegister(); - return; - } - - load64(Address(callFrameRegister, src * sizeof(Register)), dst); - killLastResultRegister(); -} - -ALWAYS_INLINE void JIT::emitGetVirtualRegisters(int src1, RegisterID dst1, int src2, RegisterID dst2) -{ - if (src2 == m_lastResultBytecodeRegister) { - emitGetVirtualRegister(src2, dst2); - emitGetVirtualRegister(src1, dst1); - } else { - emitGetVirtualRegister(src1, dst1); - emitGetVirtualRegister(src2, dst2); - } -} - -ALWAYS_INLINE int32_t JIT::getConstantOperandImmediateInt(unsigned src) -{ - return getConstantOperand(src).asInt32(); -} - -ALWAYS_INLINE bool JIT::isOperandConstantImmediateInt(unsigned src) -{ - return m_codeBlock->isConstantRegisterIndex(src) && getConstantOperand(src).isInt32(); -} - -ALWAYS_INLINE void JIT::emitPutVirtualRegister(unsigned dst, RegisterID from) -{ - store64(from, Address(callFrameRegister, dst * sizeof(Register))); - m_lastResultBytecodeRegister = (from == cachedResultRegister) ? static_cast(dst) : std::numeric_limits::max(); -} - -ALWAYS_INLINE void JIT::emitInitRegister(unsigned dst) -{ - store64(TrustedImm64(JSValue::encode(jsUndefined())), Address(callFrameRegister, dst * sizeof(Register))); -} - -ALWAYS_INLINE JIT::Jump JIT::emitJumpIfJSCell(RegisterID reg) -{ - return branchTest64(Zero, reg, tagMaskRegister); -} - -ALWAYS_INLINE JIT::Jump JIT::emitJumpIfBothJSCells(RegisterID reg1, RegisterID reg2, RegisterID scratch) -{ - move(reg1, scratch); - or64(reg2, scratch); - return emitJumpIfJSCell(scratch); -} - -ALWAYS_INLINE void JIT::emitJumpSlowCaseIfJSCell(RegisterID reg) -{ - addSlowCase(emitJumpIfJSCell(reg)); -} - -ALWAYS_INLINE JIT::Jump JIT::emitJumpIfNotJSCell(RegisterID reg) -{ - return branchTest64(NonZero, reg, tagMaskRegister); -} - -ALWAYS_INLINE void JIT::emitJumpSlowCaseIfNotJSCell(RegisterID reg) -{ - addSlowCase(emitJumpIfNotJSCell(reg)); -} - -ALWAYS_INLINE void JIT::emitJumpSlowCaseIfNotJSCell(RegisterID reg, int vReg) -{ - if (!m_codeBlock->isKnownNotImmediate(vReg)) - emitJumpSlowCaseIfNotJSCell(reg); -} - -inline void JIT::emitLoadDouble(int index, FPRegisterID value) -{ - if (m_codeBlock->isConstantRegisterIndex(index)) { - WriteBarrier& inConstantPool = m_codeBlock->constantRegister(index); - loadDouble(&inConstantPool, value); - } else - loadDouble(addressFor(index), value); -} - -inline void JIT::emitLoadInt32ToDouble(int index, FPRegisterID value) -{ - if (m_codeBlock->isConstantRegisterIndex(index)) { - ASSERT(isOperandConstantImmediateInt(index)); - convertInt32ToDouble(Imm32(getConstantOperand(index).asInt32()), value); - } else - convertInt32ToDouble(addressFor(index), value); -} - -ALWAYS_INLINE JIT::Jump JIT::emitJumpIfImmediateInteger(RegisterID reg) -{ - return branch64(AboveOrEqual, reg, tagTypeNumberRegister); -} - -ALWAYS_INLINE JIT::Jump JIT::emitJumpIfNotImmediateInteger(RegisterID reg) -{ - return branch64(Below, reg, tagTypeNumberRegister); -} - -ALWAYS_INLINE JIT::Jump JIT::emitJumpIfNotImmediateIntegers(RegisterID reg1, RegisterID reg2, RegisterID scratch) -{ - move(reg1, scratch); - and64(reg2, scratch); - return emitJumpIfNotImmediateInteger(scratch); -} - -ALWAYS_INLINE void JIT::emitJumpSlowCaseIfNotImmediateInteger(RegisterID reg) -{ - addSlowCase(emitJumpIfNotImmediateInteger(reg)); -} - -ALWAYS_INLINE void JIT::emitJumpSlowCaseIfNotImmediateIntegers(RegisterID reg1, RegisterID reg2, RegisterID scratch) -{ - addSlowCase(emitJumpIfNotImmediateIntegers(reg1, reg2, scratch)); -} - -ALWAYS_INLINE void JIT::emitJumpSlowCaseIfNotImmediateNumber(RegisterID reg) -{ - addSlowCase(emitJumpIfNotImmediateNumber(reg)); -} - -ALWAYS_INLINE void JIT::emitFastArithReTagImmediate(RegisterID src, RegisterID dest) -{ - emitFastArithIntToImmNoCheck(src, dest); -} - -// operand is int32_t, must have been zero-extended if register is 64-bit. -ALWAYS_INLINE void JIT::emitFastArithIntToImmNoCheck(RegisterID src, RegisterID dest) -{ - if (src != dest) - move(src, dest); - or64(tagTypeNumberRegister, dest); -} - -ALWAYS_INLINE void JIT::emitTagAsBoolImmediate(RegisterID reg) -{ - or32(TrustedImm32(static_cast(ValueFalse)), reg); -} - -#endif // USE(JSVALUE32_64) - -} // namespace JSC - -#endif // ENABLE(JIT) - -#endif // JITInlines_h - diff --git a/Source/JavaScriptCore/jit/JITOpcodes.cpp b/Source/JavaScriptCore/jit/JITOpcodes.cpp index 3053918b8..4fb9d8cd5 100644 --- a/Source/JavaScriptCore/jit/JITOpcodes.cpp +++ b/Source/JavaScriptCore/jit/JITOpcodes.cpp @@ -29,9 +29,9 @@ #include "JIT.h" #include "Arguments.h" -#include "CopiedSpaceInlines.h" +#include "CopiedSpaceInlineMethods.h" #include "Heap.h" -#include "JITInlines.h" +#include "JITInlineMethods.h" #include "JITStubCall.h" #include "JSArray.h" #include "JSCell.h" @@ -1952,7 +1952,6 @@ void JIT::emit_op_new_array(Instruction* currentInstruction) JITStubCall stubCall(this, cti_op_new_array); stubCall.addArgument(TrustedImm32(currentInstruction[2].u.operand)); stubCall.addArgument(TrustedImm32(currentInstruction[3].u.operand)); - stubCall.addArgument(TrustedImmPtr(currentInstruction[4].u.arrayAllocationProfile)); stubCall.call(currentInstruction[1].u.operand); } @@ -1964,7 +1963,6 @@ void JIT::emit_op_new_array_with_size(Instruction* currentInstruction) #else stubCall.addArgument(currentInstruction[2].u.operand); #endif - stubCall.addArgument(TrustedImmPtr(currentInstruction[3].u.arrayAllocationProfile)); stubCall.call(currentInstruction[1].u.operand); } @@ -1973,7 +1971,6 @@ void JIT::emit_op_new_array_buffer(Instruction* currentInstruction) JITStubCall stubCall(this, cti_op_new_array_buffer); stubCall.addArgument(TrustedImm32(currentInstruction[2].u.operand)); stubCall.addArgument(TrustedImm32(currentInstruction[3].u.operand)); - stubCall.addArgument(TrustedImmPtr(currentInstruction[4].u.arrayAllocationProfile)); stubCall.call(currentInstruction[1].u.operand); } diff --git a/Source/JavaScriptCore/jit/JITOpcodes32_64.cpp b/Source/JavaScriptCore/jit/JITOpcodes32_64.cpp index 23361c099..9c5d260ab 100644 --- a/Source/JavaScriptCore/jit/JITOpcodes32_64.cpp +++ b/Source/JavaScriptCore/jit/JITOpcodes32_64.cpp @@ -30,7 +30,7 @@ #if USE(JSVALUE32_64) #include "JIT.h" -#include "JITInlines.h" +#include "JITInlineMethods.h" #include "JITStubCall.h" #include "JSArray.h" #include "JSCell.h" diff --git a/Source/JavaScriptCore/jit/JITPropertyAccess.cpp b/Source/JavaScriptCore/jit/JITPropertyAccess.cpp index 3110be38c..6362598f4 100644 --- a/Source/JavaScriptCore/jit/JITPropertyAccess.cpp +++ b/Source/JavaScriptCore/jit/JITPropertyAccess.cpp @@ -32,7 +32,7 @@ #include "GCAwareJITStubRoutine.h" #include "GetterSetter.h" #include "Interpreter.h" -#include "JITInlines.h" +#include "JITInlineMethods.h" #include "JITStubCall.h" #include "JSArray.h" #include "JSFunction.h" @@ -98,7 +98,7 @@ void JIT::emit_op_get_by_val(Instruction* currentInstruction) unsigned base = currentInstruction[2].u.operand; unsigned property = currentInstruction[3].u.operand; ArrayProfile* profile = currentInstruction[4].u.arrayProfile; - + emitGetVirtualRegisters(base, regT0, property, regT1); emitJumpSlowCaseIfNotImmediateInteger(regT1); @@ -120,12 +120,6 @@ void JIT::emit_op_get_by_val(Instruction* currentInstruction) JITArrayMode mode = chooseArrayMode(profile); switch (mode) { - case JITInt32: - slowCases = emitInt32GetByVal(currentInstruction, badType); - break; - case JITDouble: - slowCases = emitDoubleGetByVal(currentInstruction, badType); - break; case JITContiguous: slowCases = emitContiguousGetByVal(currentInstruction, badType); break; @@ -154,26 +148,11 @@ void JIT::emit_op_get_by_val(Instruction* currentInstruction) m_byValCompilationInfo.append(ByValCompilationInfo(m_bytecodeOffset, badType, mode, done)); } -JIT::JumpList JIT::emitDoubleGetByVal(Instruction*, PatchableJump& badType) +JIT::JumpList JIT::emitContiguousGetByVal(Instruction*, PatchableJump& badType) { JumpList slowCases; - badType = patchableBranch32(NotEqual, regT2, TrustedImm32(DoubleShape)); - loadPtr(Address(regT0, JSObject::butterflyOffset()), regT2); - slowCases.append(branch32(AboveOrEqual, regT1, Address(regT2, Butterfly::offsetOfPublicLength()))); - loadDouble(BaseIndex(regT2, regT1, TimesEight), fpRegT0); - slowCases.append(branchDouble(DoubleNotEqualOrUnordered, fpRegT0, fpRegT0)); - moveDoubleTo64(fpRegT0, regT0); - sub64(tagTypeNumberRegister, regT0); - - return slowCases; -} - -JIT::JumpList JIT::emitContiguousGetByVal(Instruction*, PatchableJump& badType, IndexingType expectedShape) -{ - JumpList slowCases; - - badType = patchableBranch32(NotEqual, regT2, TrustedImm32(expectedShape)); + badType = patchableBranch32(NotEqual, regT2, TrustedImm32(ContiguousShape)); loadPtr(Address(regT0, JSObject::butterflyOffset()), regT2); slowCases.append(branch32(AboveOrEqual, regT1, Address(regT2, Butterfly::offsetOfPublicLength()))); load64(BaseIndex(regT2, regT1, TimesEight), regT0); @@ -325,12 +304,6 @@ void JIT::emit_op_put_by_val(Instruction* currentInstruction) JITArrayMode mode = chooseArrayMode(profile); switch (mode) { - case JITInt32: - slowCases = emitInt32PutByVal(currentInstruction, badType); - break; - case JITDouble: - slowCases = emitDoublePutByVal(currentInstruction, badType); - break; case JITContiguous: slowCases = emitContiguousPutByVal(currentInstruction, badType); break; @@ -352,49 +325,24 @@ void JIT::emit_op_put_by_val(Instruction* currentInstruction) emitWriteBarrier(regT0, regT3, regT1, regT3, ShouldFilterImmediates, WriteBarrierForPropertyAccess); } -template -JIT::JumpList JIT::emitGenericContiguousPutByVal(Instruction* currentInstruction, PatchableJump& badType) +JIT::JumpList JIT::emitContiguousPutByVal(Instruction* currentInstruction, PatchableJump& badType) { unsigned value = currentInstruction[3].u.operand; ArrayProfile* profile = currentInstruction[4].u.arrayProfile; - JumpList slowCases; - - badType = patchableBranch32(NotEqual, regT2, TrustedImm32(indexingShape)); + badType = patchableBranch32(NotEqual, regT2, TrustedImm32(ContiguousShape)); loadPtr(Address(regT0, JSObject::butterflyOffset()), regT2); Jump outOfBounds = branch32(AboveOrEqual, regT1, Address(regT2, Butterfly::offsetOfPublicLength())); Label storeResult = label(); emitGetVirtualRegister(value, regT3); - switch (indexingShape) { - case Int32Shape: - slowCases.append(emitJumpIfNotImmediateInteger(regT3)); - store64(regT3, BaseIndex(regT2, regT1, TimesEight)); - break; - case DoubleShape: { - Jump notInt = emitJumpIfNotImmediateInteger(regT3); - convertInt32ToDouble(regT3, fpRegT0); - Jump ready = jump(); - notInt.link(this); - add64(tagTypeNumberRegister, regT3); - move64ToDouble(regT3, fpRegT0); - slowCases.append(branchDouble(DoubleNotEqualOrUnordered, fpRegT0, fpRegT0)); - ready.link(this); - storeDouble(fpRegT0, BaseIndex(regT2, regT1, TimesEight)); - break; - } - case ContiguousShape: - store64(regT3, BaseIndex(regT2, regT1, TimesEight)); - break; - default: - CRASH(); - break; - } + store64(regT3, BaseIndex(regT2, regT1, TimesEight)); Jump done = jump(); outOfBounds.link(this); + JumpList slowCases; slowCases.append(branch32(AboveOrEqual, regT1, Address(regT2, Butterfly::offsetOfVectorLength()))); emitArrayProfileStoreToHoleSpecialCase(profile); @@ -446,23 +394,12 @@ void JIT::emitSlow_op_put_by_val(Instruction* currentInstruction, Vector -JIT::JumpList JIT::emitGenericContiguousPutByVal(Instruction* currentInstruction, PatchableJump& badType) +JIT::JumpList JIT::emitContiguousPutByVal(Instruction* currentInstruction, PatchableJump& badType) { unsigned value = currentInstruction[3].u.operand; ArrayProfile* profile = currentInstruction[4].u.arrayProfile; @@ -332,30 +303,8 @@ JIT::JumpList JIT::emitGenericContiguousPutByVal(Instruction* currentInstruction Label storeResult = label(); emitLoad(value, regT1, regT0); - switch (indexingShape) { - case Int32Shape: - slowCases.append(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag))); - // Fall through. - case ContiguousShape: - store32(regT0, BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.payload))); - store32(regT1, BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag))); - break; - case DoubleShape: { - Jump notInt = branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)); - convertInt32ToDouble(regT0, fpRegT0); - Jump ready = jump(); - notInt.link(this); - moveIntsToDouble(regT0, regT1, fpRegT0, fpRegT1); - slowCases.append(branchDouble(DoubleNotEqualOrUnordered, fpRegT0, fpRegT0)); - ready.link(this); - storeDouble(fpRegT0, BaseIndex(regT3, regT2, TimesEight)); - break; - } - default: - CRASH(); - break; - } - + store32(regT0, BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.payload))); + store32(regT1, BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag))); Jump done = jump(); outOfBounds.link(this); @@ -415,23 +364,12 @@ void JIT::emitSlow_op_put_by_val(Instruction* currentInstruction, VectorgetJITType() == JITCode::DFGJIT); if (void* address = DFG::prepareOSREntry(callFrame, optimizedCodeBlock, bytecodeIndex)) { - if (Options::showDFGDisassembly()) { - dataLog( - "Performing OSR from code block %p to code block %p, address %p to %p.\n", - codeBlock, optimizedCodeBlock, (STUB_RETURN_ADDRESS).value(), address); - } #if ENABLE(JIT_VERBOSE_OSR) dataLog("Optimizing %p succeeded, performing OSR after a delay of %u.\n", codeBlock, codeBlock->optimizationDelayCounter()); #endif @@ -2233,21 +2228,21 @@ DEFINE_STUB_FUNCTION(JSObject*, op_new_array) { STUB_INIT_STACK_FRAME(stackFrame); - return constructArray(stackFrame.callFrame, stackFrame.args[2].arrayAllocationProfile(), reinterpret_cast(&stackFrame.callFrame->registers()[stackFrame.args[0].int32()]), stackFrame.args[1].int32()); + return constructArray(stackFrame.callFrame, reinterpret_cast(&stackFrame.callFrame->registers()[stackFrame.args[0].int32()]), stackFrame.args[1].int32()); } DEFINE_STUB_FUNCTION(JSObject*, op_new_array_with_size) { STUB_INIT_STACK_FRAME(stackFrame); - return constructArrayWithSizeQuirk(stackFrame.callFrame, stackFrame.args[1].arrayAllocationProfile(), stackFrame.callFrame->lexicalGlobalObject(), stackFrame.args[0].jsValue()); + return constructArrayWithSizeQuirk(stackFrame.callFrame, stackFrame.callFrame->lexicalGlobalObject(), stackFrame.args[0].jsValue()); } DEFINE_STUB_FUNCTION(JSObject*, op_new_array_buffer) { STUB_INIT_STACK_FRAME(stackFrame); - return constructArray(stackFrame.callFrame, stackFrame.args[2].arrayAllocationProfile(), stackFrame.callFrame->codeBlock()->constantBuffer(stackFrame.args[0].int32()), stackFrame.args[1].int32()); + return constructArray(stackFrame.callFrame, stackFrame.callFrame->codeBlock()->constantBuffer(stackFrame.args[0].int32()), stackFrame.args[1].int32()); } DEFINE_STUB_FUNCTION(void, op_init_global_const_check) @@ -2475,7 +2470,7 @@ DEFINE_STUB_FUNCTION(void, op_put_by_val) JSValue baseValue = stackFrame.args[0].jsValue(); JSValue subscript = stackFrame.args[1].jsValue(); JSValue value = stackFrame.args[2].jsValue(); - + if (baseValue.isObject() && subscript.isInt32()) { // See if it's worth optimizing at all. JSObject* object = asObject(baseValue); diff --git a/Source/JavaScriptCore/jit/JITStubs.h b/Source/JavaScriptCore/jit/JITStubs.h index 3bf13bbdf..5761236b1 100644 --- a/Source/JavaScriptCore/jit/JITStubs.h +++ b/Source/JavaScriptCore/jit/JITStubs.h @@ -45,7 +45,6 @@ namespace JSC { struct StructureStubInfo; - class ArrayAllocationProfile; class CodeBlock; class ExecutablePool; class FunctionExecutable; @@ -86,7 +85,6 @@ namespace JSC { ReturnAddressPtr returnAddress() { return ReturnAddressPtr(asPointer); } ResolveOperations* resolveOperations() { return static_cast(asPointer); } PutToBaseOperation* putToBaseOperation() { return static_cast(asPointer); } - ArrayAllocationProfile* arrayAllocationProfile() { return static_cast(asPointer); } }; struct TrampolineStructure { -- cgit v1.2.1