diff options
author | Simon Hausmann <simon.hausmann@nokia.com> | 2012-05-07 11:21:11 +0200 |
---|---|---|
committer | Simon Hausmann <simon.hausmann@nokia.com> | 2012-05-07 11:21:11 +0200 |
commit | 2cf6c8816a73e0132bd8fa3b509d62d7c51b6e47 (patch) | |
tree | 988e8c5b116dd0466244ae2fe5af8ee9be926d76 /Source/JavaScriptCore/jit | |
parent | dd91e772430dc294e3bf478c119ef8d43c0a3358 (diff) | |
download | qtwebkit-2cf6c8816a73e0132bd8fa3b509d62d7c51b6e47.tar.gz |
Imported WebKit commit 7e538425aa020340619e927792f3d895061fb54b (http://svn.webkit.org/repository/webkit/trunk@116286)
Diffstat (limited to 'Source/JavaScriptCore/jit')
-rw-r--r-- | Source/JavaScriptCore/jit/ExecutableAllocator.cpp | 10 | ||||
-rw-r--r-- | Source/JavaScriptCore/jit/ExecutableAllocator.h | 32 | ||||
-rw-r--r-- | Source/JavaScriptCore/jit/ExecutableAllocatorFixedVMPool.cpp | 11 | ||||
-rw-r--r-- | Source/JavaScriptCore/jit/JIT.cpp | 65 | ||||
-rw-r--r-- | Source/JavaScriptCore/jit/JIT.h | 425 | ||||
-rw-r--r-- | Source/JavaScriptCore/jit/JITArithmetic.cpp | 36 | ||||
-rw-r--r-- | Source/JavaScriptCore/jit/JITArithmetic32_64.cpp | 33 | ||||
-rw-r--r-- | Source/JavaScriptCore/jit/JITCall.cpp | 5 | ||||
-rw-r--r-- | Source/JavaScriptCore/jit/JITCall32_64.cpp | 3 | ||||
-rw-r--r-- | Source/JavaScriptCore/jit/JITExceptions.cpp | 2 | ||||
-rw-r--r-- | Source/JavaScriptCore/jit/JITInlineMethods.h | 16 | ||||
-rw-r--r-- | Source/JavaScriptCore/jit/JITOpcodes.cpp | 85 | ||||
-rw-r--r-- | Source/JavaScriptCore/jit/JITOpcodes32_64.cpp | 176 | ||||
-rw-r--r-- | Source/JavaScriptCore/jit/JITPropertyAccess.cpp | 88 | ||||
-rw-r--r-- | Source/JavaScriptCore/jit/JITPropertyAccess32_64.cpp | 93 | ||||
-rw-r--r-- | Source/JavaScriptCore/jit/JITStubs.cpp | 182 | ||||
-rw-r--r-- | Source/JavaScriptCore/jit/JITStubs.h | 4 |
17 files changed, 472 insertions, 794 deletions
diff --git a/Source/JavaScriptCore/jit/ExecutableAllocator.cpp b/Source/JavaScriptCore/jit/ExecutableAllocator.cpp index 5912f8652..e30c892e3 100644 --- a/Source/JavaScriptCore/jit/ExecutableAllocator.cpp +++ b/Source/JavaScriptCore/jit/ExecutableAllocator.cpp @@ -29,7 +29,6 @@ #if ENABLE(EXECUTABLE_ALLOCATOR_DEMAND) #include "CodeProfiling.h" -#include <wtf/DataLog.h> #include <wtf/HashSet.h> #include <wtf/MetaAllocator.h> #include <wtf/PageReservation.h> @@ -197,16 +196,21 @@ bool ExecutableAllocator::underMemoryPressure() double ExecutableAllocator::memoryPressureMultiplier(size_t addedMemoryUsage) { + double result; #ifdef EXECUTABLE_MEMORY_LIMIT size_t bytesAllocated = DemandExecutableAllocator::bytesAllocatedByAllAllocators() + addedMemoryUsage; if (bytesAllocated >= EXECUTABLE_MEMORY_LIMIT) bytesAllocated = EXECUTABLE_MEMORY_LIMIT; - return static_cast<double>(EXECUTABLE_MEMORY_LIMIT) / + result = static_cast<double>(EXECUTABLE_MEMORY_LIMIT) / (EXECUTABLE_MEMORY_LIMIT - bytesAllocated); #else UNUSED_PARAM(addedMemoryUsage); - return 1.0; + result = 1.0; #endif + if (result < 1.0) + result = 1.0; + return result; + } PassRefPtr<ExecutableMemoryHandle> ExecutableAllocator::allocate(JSGlobalData&, size_t sizeInBytes, void* ownerUID, JITCompilationEffort effort) diff --git a/Source/JavaScriptCore/jit/ExecutableAllocator.h b/Source/JavaScriptCore/jit/ExecutableAllocator.h index 8a14ac67e..1ddf011cb 100644 --- a/Source/JavaScriptCore/jit/ExecutableAllocator.h +++ b/Source/JavaScriptCore/jit/ExecutableAllocator.h @@ -94,7 +94,7 @@ namespace JSC { typedef WTF::MetaAllocatorHandle ExecutableMemoryHandle; -#if ENABLE(JIT) && ENABLE(ASSEMBLER) +#if ENABLE(ASSEMBLER) #if ENABLE(EXECUTABLE_ALLOCATOR_DEMAND) class DemandExecutableAllocator; @@ -194,18 +194,24 @@ public: #elif CPU(ARM_TRADITIONAL) && OS(LINUX) && COMPILER(GCC) static void cacheFlush(void* code, size_t size) { - asm volatile ( - "push {r7}\n" - "mov r0, %0\n" - "mov r1, %1\n" - "mov r7, #0xf0000\n" - "add r7, r7, #0x2\n" - "mov r2, #0x0\n" - "svc 0x0\n" - "pop {r7}\n" - : - : "r" (code), "r" (reinterpret_cast<char*>(code) + size) - : "r0", "r1", "r2"); + uintptr_t currentPage = reinterpret_cast<uintptr_t>(code) & ~(pageSize() - 1); + uintptr_t lastPage = (reinterpret_cast<uintptr_t>(code) + size) & ~(pageSize() - 1); + + do { + asm volatile ( + "push {r7}\n" + "mov r0, %0\n" + "mov r1, %1\n" + "mov r7, #0xf0000\n" + "add r7, r7, #0x2\n" + "mov r2, #0x0\n" + "svc 0x0\n" + "pop {r7}\n" + : + : "r" (currentPage), "r" (currentPage + pageSize()) + : "r0", "r1", "r2"); + currentPage += pageSize(); + } while (lastPage >= currentPage); } #elif OS(WINCE) static void cacheFlush(void* code, size_t size) diff --git a/Source/JavaScriptCore/jit/ExecutableAllocatorFixedVMPool.cpp b/Source/JavaScriptCore/jit/ExecutableAllocatorFixedVMPool.cpp index 959ea744b..b4422c3df 100644 --- a/Source/JavaScriptCore/jit/ExecutableAllocatorFixedVMPool.cpp +++ b/Source/JavaScriptCore/jit/ExecutableAllocatorFixedVMPool.cpp @@ -59,7 +59,7 @@ public: : MetaAllocator(32) // round up all allocations to 32 bytes { m_reservation = PageReservation::reserveWithGuardPages(fixedPoolSize, OSAllocator::JSJITCodePages, EXECUTABLE_POOL_WRITABLE, true); -#if !ENABLE(CLASSIC_INTERPRETER) +#if !(ENABLE(CLASSIC_INTERPRETER) || ENABLE(LLINT)) if (!m_reservation) CRASH(); #endif @@ -126,8 +126,13 @@ double ExecutableAllocator::memoryPressureMultiplier(size_t addedMemoryUsage) size_t bytesAllocated = statistics.bytesAllocated + addedMemoryUsage; if (bytesAllocated >= statistics.bytesReserved) bytesAllocated = statistics.bytesReserved; - return static_cast<double>(statistics.bytesReserved) / - (statistics.bytesReserved - bytesAllocated); + double result = 1.0; + size_t divisor = statistics.bytesReserved - bytesAllocated; + if (divisor) + result = static_cast<double>(statistics.bytesReserved) / divisor; + if (result < 1.0) + result = 1.0; + return result; } PassRefPtr<ExecutableMemoryHandle> ExecutableAllocator::allocate(JSGlobalData& globalData, size_t sizeInBytes, void* ownerUID, JITCompilationEffort effort) diff --git a/Source/JavaScriptCore/jit/JIT.cpp b/Source/JavaScriptCore/jit/JIT.cpp index 541cc896a..01b1260c9 100644 --- a/Source/JavaScriptCore/jit/JIT.cpp +++ b/Source/JavaScriptCore/jit/JIT.cpp @@ -229,12 +229,8 @@ void JIT::privateCompileMainPass() DEFINE_BINARY_OP(op_lesseq) DEFINE_BINARY_OP(op_greater) DEFINE_BINARY_OP(op_greatereq) - DEFINE_UNARY_OP(op_is_boolean) DEFINE_UNARY_OP(op_is_function) - DEFINE_UNARY_OP(op_is_number) DEFINE_UNARY_OP(op_is_object) - DEFINE_UNARY_OP(op_is_string) - DEFINE_UNARY_OP(op_is_undefined) DEFINE_UNARY_OP(op_typeof) DEFINE_OP(op_add) @@ -269,6 +265,10 @@ void JIT::privateCompileMainPass() DEFINE_OP(op_get_scoped_var) DEFINE_OP(op_check_has_instance) DEFINE_OP(op_instanceof) + DEFINE_OP(op_is_undefined) + DEFINE_OP(op_is_boolean) + DEFINE_OP(op_is_number) + DEFINE_OP(op_is_string) DEFINE_OP(op_jeq_null) DEFINE_OP(op_jfalse) DEFINE_OP(op_jmp) @@ -283,7 +283,6 @@ void JIT::privateCompileMainPass() DEFINE_OP(op_jnlesseq) DEFINE_OP(op_jngreater) DEFINE_OP(op_jngreatereq) - DEFINE_OP(op_jsr) DEFINE_OP(op_jtrue) DEFINE_OP(op_loop) DEFINE_OP(op_loop_hint) @@ -340,7 +339,6 @@ void JIT::privateCompileMainPass() DEFINE_OP(op_ret_object_or_this) DEFINE_OP(op_rshift) DEFINE_OP(op_urshift) - DEFINE_OP(op_sret) DEFINE_OP(op_strcat) DEFINE_OP(op_stricteq) DEFINE_OP(op_sub) @@ -519,6 +517,48 @@ void JIT::privateCompileSlowCases() #endif } +ALWAYS_INLINE void PropertyStubCompilationInfo::copyToStubInfo(StructureStubInfo& info, LinkBuffer &linkBuffer) +{ + ASSERT(bytecodeIndex != std::numeric_limits<unsigned>::max()); + info.bytecodeIndex = bytecodeIndex; + info.callReturnLocation = linkBuffer.locationOf(callReturnLocation); + info.hotPathBegin = linkBuffer.locationOf(hotPathBegin); + + switch (m_type) { + case MethodCheck: { + CodeLocationDataLabelPtr structureToCompareLocation = linkBuffer.locationOf(methodCheckStructureToCompare); + info.patch.baseline.methodCheckProtoObj = MacroAssembler::differenceBetweenCodePtr(structureToCompareLocation, linkBuffer.locationOf(methodCheckProtoObj)); + info.patch.baseline.methodCheckProtoStructureToCompare = MacroAssembler::differenceBetweenCodePtr(structureToCompareLocation, linkBuffer.locationOf(methodCheckProtoStructureToCompare)); + info.patch.baseline.methodCheckPutFunction = MacroAssembler::differenceBetweenCodePtr(structureToCompareLocation, linkBuffer.locationOf(methodCheckPutFunction)); + // No break - fall through to GetById. + } + case GetById: { + CodeLocationLabel hotPathBeginLocation = linkBuffer.locationOf(hotPathBegin); + info.patch.baseline.u.get.structureToCompare = MacroAssembler::differenceBetweenCodePtr(hotPathBeginLocation, linkBuffer.locationOf(getStructureToCompare)); + info.patch.baseline.u.get.structureCheck = MacroAssembler::differenceBetweenCodePtr(hotPathBeginLocation, linkBuffer.locationOf(getStructureCheck)); +#if USE(JSVALUE64) + info.patch.baseline.u.get.displacementLabel = MacroAssembler::differenceBetweenCodePtr(hotPathBeginLocation, linkBuffer.locationOf(getDisplacementLabel)); +#else + info.patch.baseline.u.get.displacementLabel1 = MacroAssembler::differenceBetweenCodePtr(hotPathBeginLocation, linkBuffer.locationOf(getDisplacementLabel1)); + info.patch.baseline.u.get.displacementLabel2 = MacroAssembler::differenceBetweenCodePtr(hotPathBeginLocation, linkBuffer.locationOf(getDisplacementLabel2)); +#endif + info.patch.baseline.u.get.putResult = MacroAssembler::differenceBetweenCodePtr(hotPathBeginLocation, linkBuffer.locationOf(getPutResult)); + info.patch.baseline.u.get.coldPathBegin = MacroAssembler::differenceBetweenCodePtr(linkBuffer.locationOf(getColdPathBegin), linkBuffer.locationOf(callReturnLocation)); + break; + } + case PutById: + CodeLocationLabel hotPathBeginLocation = linkBuffer.locationOf(hotPathBegin); + info.patch.baseline.u.put.structureToCompare = MacroAssembler::differenceBetweenCodePtr(hotPathBeginLocation, linkBuffer.locationOf(putStructureToCompare)); +#if USE(JSVALUE64) + info.patch.baseline.u.put.displacementLabel = MacroAssembler::differenceBetweenCodePtr(hotPathBeginLocation, linkBuffer.locationOf(putDisplacementLabel)); +#else + info.patch.baseline.u.put.displacementLabel1 = MacroAssembler::differenceBetweenCodePtr(hotPathBeginLocation, linkBuffer.locationOf(putDisplacementLabel1)); + info.patch.baseline.u.put.displacementLabel2 = MacroAssembler::differenceBetweenCodePtr(hotPathBeginLocation, linkBuffer.locationOf(putDisplacementLabel2)); +#endif + break; + } +} + JITCode JIT::privateCompile(CodePtr* functionEntryArityCheck, JITCompilationEffort effort) { #if ENABLE(JIT_VERBOSE_OSR) @@ -665,18 +705,9 @@ JITCode JIT::privateCompile(CodePtr* functionEntryArityCheck, JITCompilationEffo m_codeBlock->callReturnIndexVector().append(CallReturnOffsetToBytecodeOffset(patchBuffer.returnAddressOffset(iter->from), iter->bytecodeOffset)); } - // Link absolute addresses for jsr - for (Vector<JSRInfo>::iterator iter = m_jsrSites.begin(); iter != m_jsrSites.end(); ++iter) - patchBuffer.patch(iter->storeLocation, patchBuffer.locationOf(iter->target).executableAddress()); - m_codeBlock->setNumberOfStructureStubInfos(m_propertyAccessCompilationInfo.size()); - for (unsigned i = 0; i < m_propertyAccessCompilationInfo.size(); ++i) { - StructureStubInfo& info = m_codeBlock->structureStubInfo(i); - ASSERT(m_propertyAccessCompilationInfo[i].bytecodeIndex != std::numeric_limits<unsigned>::max()); - info.bytecodeIndex = m_propertyAccessCompilationInfo[i].bytecodeIndex; - info.callReturnLocation = patchBuffer.locationOf(m_propertyAccessCompilationInfo[i].callReturnLocation); - info.hotPathBegin = patchBuffer.locationOf(m_propertyAccessCompilationInfo[i].hotPathBegin); - } + for (unsigned i = 0; i < m_propertyAccessCompilationInfo.size(); ++i) + m_propertyAccessCompilationInfo[i].copyToStubInfo(m_codeBlock->structureStubInfo(i), patchBuffer); m_codeBlock->setNumberOfCallLinkInfos(m_callStructureStubCompilationInfo.size()); for (unsigned i = 0; i < m_codeBlock->numberOfCallLinkInfos(); ++i) { CallLinkInfo& info = m_codeBlock->callLinkInfo(i); diff --git a/Source/JavaScriptCore/jit/JIT.h b/Source/JavaScriptCore/jit/JIT.h index 2d2841baf..af5076fb5 100644 --- a/Source/JavaScriptCore/jit/JIT.h +++ b/Source/JavaScriptCore/jit/JIT.h @@ -41,7 +41,6 @@ #define JIT_CLASS_ALIGNMENT #endif -#define ASSERT_JIT_OFFSET_UNUSED(variable, actual, expected) ASSERT_WITH_MESSAGE_UNUSED(variable, actual == expected, "JIT Offset \"%s\" should be %d, not %d.\n", #expected, static_cast<int>(expected), static_cast<int>(actual)); #define ASSERT_JIT_OFFSET(actual, expected) ASSERT_WITH_MESSAGE(actual == expected, "JIT Offset \"%s\" should be %d, not %d.\n", #expected, static_cast<int>(expected), static_cast<int>(actual)); #include "CodeBlock.h" @@ -147,17 +146,108 @@ namespace JSC { } }; + enum PropertyStubGetById_T { PropertyStubGetById }; + enum PropertyStubPutById_T { PropertyStubPutById }; + struct PropertyStubCompilationInfo { + enum Type { GetById, PutById, MethodCheck } m_type; + unsigned bytecodeIndex; MacroAssembler::Call callReturnLocation; MacroAssembler::Label hotPathBegin; - + MacroAssembler::DataLabelPtr getStructureToCompare; + MacroAssembler::PatchableJump getStructureCheck; +#if USE(JSVALUE64) + MacroAssembler::DataLabelCompact getDisplacementLabel; +#else + MacroAssembler::DataLabelCompact getDisplacementLabel1; + MacroAssembler::DataLabelCompact getDisplacementLabel2; +#endif + MacroAssembler::Label getPutResult; + MacroAssembler::Label getColdPathBegin; + MacroAssembler::DataLabelPtr putStructureToCompare; +#if USE(JSVALUE64) + MacroAssembler::DataLabel32 putDisplacementLabel; +#else + MacroAssembler::DataLabel32 putDisplacementLabel1; + MacroAssembler::DataLabel32 putDisplacementLabel2; +#endif + MacroAssembler::DataLabelPtr methodCheckStructureToCompare; + MacroAssembler::DataLabelPtr methodCheckProtoObj; + MacroAssembler::DataLabelPtr methodCheckProtoStructureToCompare; + MacroAssembler::DataLabelPtr methodCheckPutFunction; + #if !ASSERT_DISABLED PropertyStubCompilationInfo() : bytecodeIndex(std::numeric_limits<unsigned>::max()) { } #endif + + + PropertyStubCompilationInfo(PropertyStubGetById_T, unsigned bytecodeIndex, MacroAssembler::Label hotPathBegin, +#if USE(JSVALUE64) + MacroAssembler::DataLabelPtr structureToCompare, MacroAssembler::PatchableJump structureCheck, MacroAssembler::DataLabelCompact displacementLabel, MacroAssembler::Label putResult) +#else + MacroAssembler::DataLabelPtr structureToCompare, MacroAssembler::PatchableJump structureCheck, MacroAssembler::DataLabelCompact displacementLabel1, MacroAssembler::DataLabelCompact displacementLabel2, MacroAssembler::Label putResult) +#endif + : m_type(GetById) + , bytecodeIndex(bytecodeIndex) + , hotPathBegin(hotPathBegin) + , getStructureToCompare(structureToCompare) + , getStructureCheck(structureCheck) +#if USE(JSVALUE64) + , getDisplacementLabel(displacementLabel) +#else + , getDisplacementLabel1(displacementLabel1) + , getDisplacementLabel2(displacementLabel2) +#endif + , getPutResult(putResult) + { + } + + PropertyStubCompilationInfo(PropertyStubPutById_T, unsigned bytecodeIndex, MacroAssembler::Label hotPathBegin, +#if USE(JSVALUE64) + MacroAssembler::DataLabelPtr structureToCompare, MacroAssembler::DataLabel32 displacementLabel) +#else + MacroAssembler::DataLabelPtr structureToCompare, MacroAssembler::DataLabel32 displacementLabel1, MacroAssembler::DataLabel32 displacementLabel2) +#endif + : m_type(PutById) + , bytecodeIndex(bytecodeIndex) + , hotPathBegin(hotPathBegin) + , putStructureToCompare(structureToCompare) +#if USE(JSVALUE64) + , putDisplacementLabel(displacementLabel) +#else + , putDisplacementLabel1(displacementLabel1) + , putDisplacementLabel2(displacementLabel2) +#endif + { + } + + void slowCaseInfo(PropertyStubGetById_T, MacroAssembler::Label coldPathBegin, MacroAssembler::Call call) + { + ASSERT(m_type == GetById || m_type == MethodCheck); + callReturnLocation = call; + getColdPathBegin = coldPathBegin; + } + + void slowCaseInfo(PropertyStubPutById_T, MacroAssembler::Call call) + { + ASSERT(m_type == PutById); + callReturnLocation = call; + } + + void addMethodCheckInfo(MacroAssembler::DataLabelPtr structureToCompare, MacroAssembler::DataLabelPtr protoObj, MacroAssembler::DataLabelPtr protoStructureToCompare, MacroAssembler::DataLabelPtr putFunction) + { + m_type = MethodCheck; + methodCheckStructureToCompare = structureToCompare; + methodCheckProtoObj = protoObj; + methodCheckProtoStructureToCompare = protoStructureToCompare; + methodCheckPutFunction = putFunction; + } + + void copyToStubInfo(StructureStubInfo& info, LinkBuffer &patchBuffer); }; struct StructureStubCompilationInfo { @@ -187,6 +277,7 @@ namespace JSC { class JIT : private JSInterfaceJIT { friend class JITStubCall; + friend struct PropertyStubCompilationInfo; using MacroAssembler::Jump; using MacroAssembler::JumpList; @@ -269,7 +360,7 @@ namespace JSC { static void resetPatchPutById(RepatchBuffer&, StructureStubInfo*); static void patchGetByIdSelf(CodeBlock* codeblock, StructureStubInfo*, Structure*, size_t cachedOffset, ReturnAddressPtr returnAddress); static void patchPutByIdReplace(CodeBlock* codeblock, StructureStubInfo*, Structure*, size_t cachedOffset, ReturnAddressPtr returnAddress, bool direct); - static void patchMethodCallProto(JSGlobalData&, CodeBlock* codeblock, MethodCallLinkInfo&, JSObject*, Structure*, JSObject*, ReturnAddressPtr); + static void patchMethodCallProto(JSGlobalData&, CodeBlock* codeblock, MethodCallLinkInfo&, StructureStubInfo&, JSObject*, Structure*, JSObject*, ReturnAddressPtr); static void compilePatchGetArrayLength(JSGlobalData* globalData, CodeBlock* codeBlock, ReturnAddressPtr returnAddress) { @@ -280,17 +371,6 @@ namespace JSC { static void linkFor(JSFunction* callee, CodeBlock* callerCodeBlock, CodeBlock* calleeCodeBlock, CodePtr, CallLinkInfo*, JSGlobalData*, CodeSpecializationKind); private: - struct JSRInfo { - DataLabelPtr storeLocation; - Label target; - - JSRInfo(DataLabelPtr storeLocation, Label targetLocation) - : storeLocation(storeLocation) - , target(targetLocation) - { - } - }; - JIT(JSGlobalData*, CodeBlock* = 0); void privateCompileMainPass(); @@ -399,49 +479,7 @@ namespace JSC { void emitSub32Constant(unsigned dst, unsigned op, int32_t constant, ResultType opType); void emitBinaryDoubleOp(OpcodeID, unsigned dst, unsigned op1, unsigned op2, OperandTypes, JumpList& notInt32Op1, JumpList& notInt32Op2, bool op1IsInRegisters = true, bool op2IsInRegisters = true); -#if CPU(X86) - // These architecture specific value are used to enable patching - see comment on op_put_by_id. - static const int patchOffsetPutByIdStructure = 7; - static const int patchOffsetPutByIdPropertyMapOffset1 = 22; - static const int patchOffsetPutByIdPropertyMapOffset2 = 28; - // These architecture specific value are used to enable patching - see comment on op_get_by_id. - static const int patchOffsetGetByIdStructure = 7; - static const int patchOffsetGetByIdBranchToSlowCase = 13; - static const int patchOffsetGetByIdPropertyMapOffset1 = 19; - static const int patchOffsetGetByIdPropertyMapOffset2 = 22; - static const int patchOffsetGetByIdPutResult = 22; -#if ENABLE(OPCODE_SAMPLING) - static const int patchOffsetGetByIdSlowCaseCall = 44; -#else - static const int patchOffsetGetByIdSlowCaseCall = 40; -#endif - static const int patchOffsetOpCallCompareToJump = 6; - - static const int patchOffsetMethodCheckProtoObj = 11; - static const int patchOffsetMethodCheckProtoStruct = 18; - static const int patchOffsetMethodCheckPutFunction = 29; -#elif CPU(ARM_TRADITIONAL) - // These architecture specific value are used to enable patching - see comment on op_put_by_id. - static const int patchOffsetPutByIdStructure = 4; - static const int patchOffsetPutByIdPropertyMapOffset1 = 20; - static const int patchOffsetPutByIdPropertyMapOffset2 = 28; - // These architecture specific value are used to enable patching - see comment on op_get_by_id. - static const int patchOffsetGetByIdStructure = 4; - static const int patchOffsetGetByIdBranchToSlowCase = 16; - static const int patchOffsetGetByIdPropertyMapOffset1 = 20; - static const int patchOffsetGetByIdPropertyMapOffset2 = 28; - static const int patchOffsetGetByIdPutResult = 36; -#if ENABLE(OPCODE_SAMPLING) - #error "OPCODE_SAMPLING is not yet supported" -#else - static const int patchOffsetGetByIdSlowCaseCall = 48; -#endif - static const int patchOffsetOpCallCompareToJump = 12; - - static const int patchOffsetMethodCheckProtoObj = 12; - static const int patchOffsetMethodCheckProtoStruct = 20; - static const int patchOffsetMethodCheckPutFunction = 32; - +#if CPU(ARM_TRADITIONAL) // sequenceOpCall static const int sequenceOpCallInstructionSpace = 12; static const int sequenceOpCallConstantSpace = 2; @@ -452,96 +490,12 @@ namespace JSC { static const int sequenceGetByIdHotPathInstructionSpace = 36; static const int sequenceGetByIdHotPathConstantSpace = 4; // sequenceGetByIdSlowCase - static const int sequenceGetByIdSlowCaseInstructionSpace = 56; - static const int sequenceGetByIdSlowCaseConstantSpace = 3; + static const int sequenceGetByIdSlowCaseInstructionSpace = 64; + static const int sequenceGetByIdSlowCaseConstantSpace = 4; // sequencePutById static const int sequencePutByIdInstructionSpace = 36; static const int sequencePutByIdConstantSpace = 4; -#elif CPU(ARM_THUMB2) - // These architecture specific value are used to enable patching - see comment on op_put_by_id. - static const int patchOffsetPutByIdStructure = 10; - static const int patchOffsetPutByIdPropertyMapOffset1 = 36; - static const int patchOffsetPutByIdPropertyMapOffset2 = 48; - // These architecture specific value are used to enable patching - see comment on op_get_by_id. - static const int patchOffsetGetByIdStructure = 10; - static const int patchOffsetGetByIdBranchToSlowCase = 26; - static const int patchOffsetGetByIdPropertyMapOffset1 = 28; - static const int patchOffsetGetByIdPropertyMapOffset2 = 30; - static const int patchOffsetGetByIdPutResult = 32; -#if ENABLE(OPCODE_SAMPLING) - #error "OPCODE_SAMPLING is not yet supported" -#else - static const int patchOffsetGetByIdSlowCaseCall = 52; -#endif - static const int patchOffsetOpCallCompareToJump = 16; - - static const int patchOffsetMethodCheckProtoObj = 24; - static const int patchOffsetMethodCheckProtoStruct = 34; - static const int patchOffsetMethodCheckPutFunction = 58; - - // sequenceOpCall - static const int sequenceOpCallInstructionSpace = 12; - static const int sequenceOpCallConstantSpace = 2; - // sequenceMethodCheck - static const int sequenceMethodCheckInstructionSpace = 40; - static const int sequenceMethodCheckConstantSpace = 6; - // sequenceGetByIdHotPath - static const int sequenceGetByIdHotPathInstructionSpace = 36; - static const int sequenceGetByIdHotPathConstantSpace = 4; - // sequenceGetByIdSlowCase - static const int sequenceGetByIdSlowCaseInstructionSpace = 40; - static const int sequenceGetByIdSlowCaseConstantSpace = 2; - // sequencePutById - static const int sequencePutByIdInstructionSpace = 36; - static const int sequencePutByIdConstantSpace = 4; -#elif CPU(MIPS) -#if WTF_MIPS_ISA(1) - static const int patchOffsetPutByIdStructure = 16; - static const int patchOffsetPutByIdPropertyMapOffset1 = 56; - static const int patchOffsetPutByIdPropertyMapOffset2 = 72; - static const int patchOffsetGetByIdStructure = 16; - static const int patchOffsetGetByIdBranchToSlowCase = 48; - static const int patchOffsetGetByIdPropertyMapOffset1 = 56; - static const int patchOffsetGetByIdPropertyMapOffset2 = 76; - static const int patchOffsetGetByIdPutResult = 96; -#if ENABLE(OPCODE_SAMPLING) - #error "OPCODE_SAMPLING is not yet supported" -#else - static const int patchOffsetGetByIdSlowCaseCall = 68; -#endif - static const int patchOffsetOpCallCompareToJump = 32; - static const int patchOffsetMethodCheckProtoObj = 32; - static const int patchOffsetMethodCheckProtoStruct = 56; - static const int patchOffsetMethodCheckPutFunction = 88; -#else // WTF_MIPS_ISA(1) - static const int patchOffsetPutByIdStructure = 12; - static const int patchOffsetPutByIdPropertyMapOffset1 = 48; - static const int patchOffsetPutByIdPropertyMapOffset2 = 64; - static const int patchOffsetGetByIdStructure = 12; - static const int patchOffsetGetByIdBranchToSlowCase = 44; - static const int patchOffsetGetByIdPropertyMapOffset1 = 48; - static const int patchOffsetGetByIdPropertyMapOffset2 = 64; - static const int patchOffsetGetByIdPutResult = 80; -#if ENABLE(OPCODE_SAMPLING) - #error "OPCODE_SAMPLING is not yet supported" -#else - static const int patchOffsetGetByIdSlowCaseCall = 68; -#endif - static const int patchOffsetOpCallCompareToJump = 32; - static const int patchOffsetMethodCheckProtoObj = 32; - static const int patchOffsetMethodCheckProtoStruct = 52; - static const int patchOffsetMethodCheckPutFunction = 84; -#endif #elif CPU(SH4) - // These architecture specific value are used to enable patching - see comment on op_put_by_id. - static const int patchOffsetGetByIdStructure = 6; - static const int patchOffsetPutByIdPropertyMapOffset = 24; - static const int patchOffsetPutByIdStructure = 6; - // These architecture specific value are used to enable patching - see comment on op_get_by_id. - static const int patchOffsetGetByIdBranchToSlowCase = 10; - static const int patchOffsetGetByIdPropertyMapOffset = 24; - static const int patchOffsetGetByIdPutResult = 24; - // sequenceOpCall static const int sequenceOpCallInstructionSpace = 12; static const int sequenceOpCallConstantSpace = 2; @@ -552,30 +506,11 @@ namespace JSC { static const int sequenceGetByIdHotPathInstructionSpace = 36; static const int sequenceGetByIdHotPathConstantSpace = 5; // sequenceGetByIdSlowCase - static const int sequenceGetByIdSlowCaseInstructionSpace = 30; - static const int sequenceGetByIdSlowCaseConstantSpace = 3; + static const int sequenceGetByIdSlowCaseInstructionSpace = 38; + static const int sequenceGetByIdSlowCaseConstantSpace = 4; // sequencePutById static const int sequencePutByIdInstructionSpace = 36; static const int sequencePutByIdConstantSpace = 5; - - static const int patchOffsetGetByIdPropertyMapOffset1 = 20; - static const int patchOffsetGetByIdPropertyMapOffset2 = 22; - - static const int patchOffsetPutByIdPropertyMapOffset1 = 20; - static const int patchOffsetPutByIdPropertyMapOffset2 = 26; - -#if ENABLE(OPCODE_SAMPLING) - static const int patchOffsetGetByIdSlowCaseCall = 0; // FIMXE -#else - static const int patchOffsetGetByIdSlowCaseCall = 26; -#endif - static const int patchOffsetOpCallCompareToJump = 4; - - static const int patchOffsetMethodCheckProtoObj = 12; - static const int patchOffsetMethodCheckProtoStruct = 20; - static const int patchOffsetMethodCheckPutFunction = 32; -#else -#error "JSVALUE32_64 not supported on this platform." #endif #else // USE(JSVALUE32_64) @@ -597,17 +532,6 @@ namespace JSC { Jump emitJumpIfNotJSCell(RegisterID); void emitJumpSlowCaseIfNotJSCell(RegisterID); void emitJumpSlowCaseIfNotJSCell(RegisterID, int VReg); -#if USE(JSVALUE32_64) - JIT::Jump emitJumpIfImmediateNumber(RegisterID reg) - { - return emitJumpIfImmediateInteger(reg); - } - - JIT::Jump emitJumpIfNotImmediateNumber(RegisterID reg) - { - return emitJumpIfNotImmediateInteger(reg); - } -#endif Jump emitJumpIfImmediateInteger(RegisterID); Jump emitJumpIfNotImmediateInteger(RegisterID); Jump emitJumpIfNotImmediateIntegers(RegisterID, RegisterID, RegisterID); @@ -615,20 +539,12 @@ namespace JSC { void emitJumpSlowCaseIfNotImmediateNumber(RegisterID); void emitJumpSlowCaseIfNotImmediateIntegers(RegisterID, RegisterID, RegisterID); -#if USE(JSVALUE32_64) - void emitFastArithDeTagImmediate(RegisterID); - Jump emitFastArithDeTagImmediateJumpIfZero(RegisterID); -#endif void emitFastArithReTagImmediate(RegisterID src, RegisterID dest); void emitFastArithIntToImmNoCheck(RegisterID src, RegisterID dest); void emitTagAsBoolImmediate(RegisterID reg); void compileBinaryArithOp(OpcodeID, unsigned dst, unsigned src1, unsigned src2, OperandTypes opi); -#if USE(JSVALUE64) void compileBinaryArithOpSlowCase(OpcodeID, Vector<SlowCaseEntry>::iterator&, unsigned dst, unsigned src1, unsigned src2, OperandTypes, bool op1HasImmediateIntFastCase, bool op2HasImmediateIntFastCase); -#else - void compileBinaryArithOpSlowCase(OpcodeID, Vector<SlowCaseEntry>::iterator&, unsigned dst, unsigned src1, unsigned src2, OperandTypes); -#endif void compileGetByIdHotPath(int baseVReg, Identifier*); void compileGetByIdSlowCase(int resultVReg, int baseVReg, Identifier* ident, Vector<SlowCaseEntry>::iterator& iter, bool isMethodCheck = false); @@ -637,133 +553,6 @@ namespace JSC { void compileGetDirectOffset(RegisterID base, RegisterID result, RegisterID offset, RegisterID scratch); void compilePutDirectOffset(RegisterID base, RegisterID value, size_t cachedOffset); -#if CPU(X86_64) - // These architecture specific value are used to enable patching - see comment on op_put_by_id. - static const int patchOffsetPutByIdStructure = 10; - static const int patchOffsetPutByIdPropertyMapOffset = 31; - // These architecture specific value are used to enable patching - see comment on op_get_by_id. - static const int patchOffsetGetByIdStructure = 10; - static const int patchOffsetGetByIdBranchToSlowCase = 20; - static const int patchOffsetGetByIdPropertyMapOffset = 28; - static const int patchOffsetGetByIdPutResult = 28; -#if ENABLE(OPCODE_SAMPLING) - static const int patchOffsetGetByIdSlowCaseCall = 72; -#else - static const int patchOffsetGetByIdSlowCaseCall = 62; -#endif - static const int patchOffsetOpCallCompareToJump = 9; - - static const int patchOffsetMethodCheckProtoObj = 20; - static const int patchOffsetMethodCheckProtoStruct = 30; - static const int patchOffsetMethodCheckPutFunction = 50; -#elif CPU(X86) - // These architecture specific value are used to enable patching - see comment on op_put_by_id. - static const int patchOffsetPutByIdStructure = 7; - static const int patchOffsetPutByIdPropertyMapOffset = 22; - // These architecture specific value are used to enable patching - see comment on op_get_by_id. - static const int patchOffsetGetByIdStructure = 7; - static const int patchOffsetGetByIdBranchToSlowCase = 13; - static const int patchOffsetGetByIdPropertyMapOffset = 22; - static const int patchOffsetGetByIdPutResult = 22; -#if ENABLE(OPCODE_SAMPLING) - static const int patchOffsetGetByIdSlowCaseCall = 33; -#else - static const int patchOffsetGetByIdSlowCaseCall = 23; -#endif - static const int patchOffsetOpCallCompareToJump = 6; - - static const int patchOffsetMethodCheckProtoObj = 11; - static const int patchOffsetMethodCheckProtoStruct = 18; - static const int patchOffsetMethodCheckPutFunction = 29; -#elif CPU(ARM_THUMB2) - // These architecture specific value are used to enable patching - see comment on op_put_by_id. - static const int patchOffsetPutByIdStructure = 10; - static const int patchOffsetPutByIdPropertyMapOffset = 46; - // These architecture specific value are used to enable patching - see comment on op_get_by_id. - static const int patchOffsetGetByIdStructure = 10; - static const int patchOffsetGetByIdBranchToSlowCase = 26; - static const int patchOffsetGetByIdPropertyMapOffset = 46; - static const int patchOffsetGetByIdPutResult = 50; -#if ENABLE(OPCODE_SAMPLING) - static const int patchOffsetGetByIdSlowCaseCall = 0; // FIMXE -#else - static const int patchOffsetGetByIdSlowCaseCall = 28; -#endif - static const int patchOffsetOpCallCompareToJump = 16; - - static const int patchOffsetMethodCheckProtoObj = 24; - static const int patchOffsetMethodCheckProtoStruct = 34; - static const int patchOffsetMethodCheckPutFunction = 58; -#elif CPU(ARM_TRADITIONAL) - // These architecture specific value are used to enable patching - see comment on op_put_by_id. - static const int patchOffsetPutByIdStructure = 4; - static const int patchOffsetPutByIdPropertyMapOffset = 20; - // These architecture specific value are used to enable patching - see comment on op_get_by_id. - static const int patchOffsetGetByIdStructure = 4; - static const int patchOffsetGetByIdBranchToSlowCase = 16; - static const int patchOffsetGetByIdPropertyMapOffset = 20; - static const int patchOffsetGetByIdPutResult = 28; -#if ENABLE(OPCODE_SAMPLING) - #error "OPCODE_SAMPLING is not yet supported" -#else - static const int patchOffsetGetByIdSlowCaseCall = 28; -#endif - static const int patchOffsetOpCallCompareToJump = 12; - - static const int patchOffsetMethodCheckProtoObj = 12; - static const int patchOffsetMethodCheckProtoStruct = 20; - static const int patchOffsetMethodCheckPutFunction = 32; - - // sequenceOpCall - static const int sequenceOpCallInstructionSpace = 12; - static const int sequenceOpCallConstantSpace = 2; - // sequenceMethodCheck - static const int sequenceMethodCheckInstructionSpace = 40; - static const int sequenceMethodCheckConstantSpace = 6; - // sequenceGetByIdHotPath - static const int sequenceGetByIdHotPathInstructionSpace = 28; - static const int sequenceGetByIdHotPathConstantSpace = 3; - // sequenceGetByIdSlowCase - static const int sequenceGetByIdSlowCaseInstructionSpace = 32; - static const int sequenceGetByIdSlowCaseConstantSpace = 2; - // sequencePutById - static const int sequencePutByIdInstructionSpace = 28; - static const int sequencePutByIdConstantSpace = 3; -#elif CPU(MIPS) -#if WTF_MIPS_ISA(1) - static const int patchOffsetPutByIdStructure = 16; - static const int patchOffsetPutByIdPropertyMapOffset = 68; - static const int patchOffsetGetByIdStructure = 16; - static const int patchOffsetGetByIdBranchToSlowCase = 48; - static const int patchOffsetGetByIdPropertyMapOffset = 68; - static const int patchOffsetGetByIdPutResult = 88; -#if ENABLE(OPCODE_SAMPLING) - #error "OPCODE_SAMPLING is not yet supported" -#else - static const int patchOffsetGetByIdSlowCaseCall = 40; -#endif - static const int patchOffsetOpCallCompareToJump = 32; - static const int patchOffsetMethodCheckProtoObj = 32; - static const int patchOffsetMethodCheckProtoStruct = 56; - static const int patchOffsetMethodCheckPutFunction = 88; -#else // WTF_MIPS_ISA(1) - static const int patchOffsetPutByIdStructure = 12; - static const int patchOffsetPutByIdPropertyMapOffset = 60; - static const int patchOffsetGetByIdStructure = 12; - static const int patchOffsetGetByIdBranchToSlowCase = 44; - static const int patchOffsetGetByIdPropertyMapOffset = 60; - static const int patchOffsetGetByIdPutResult = 76; -#if ENABLE(OPCODE_SAMPLING) - #error "OPCODE_SAMPLING is not yet supported" -#else - static const int patchOffsetGetByIdSlowCaseCall = 40; -#endif - static const int patchOffsetOpCallCompareToJump = 32; - static const int patchOffsetMethodCheckProtoObj = 32; - static const int patchOffsetMethodCheckProtoStruct = 52; - static const int patchOffsetMethodCheckPutFunction = 84; -#endif -#endif #endif // USE(JSVALUE32_64) #if (defined(ASSEMBLER_HAS_CONSTANT_POOL) && ASSEMBLER_HAS_CONSTANT_POOL) @@ -775,9 +564,9 @@ namespace JSC { void endUninterruptedSequence(int, int, int); #else -#define BEGIN_UNINTERRUPTED_SEQUENCE(name) do { beginUninterruptedSequence(); } while (false) -#define END_UNINTERRUPTED_SEQUENCE(name) do { endUninterruptedSequence(); } while (false) -#define END_UNINTERRUPTED_SEQUENCE_FOR_PUT(name, dst) do { endUninterruptedSequence(); } while (false) +#define BEGIN_UNINTERRUPTED_SEQUENCE(name) +#define END_UNINTERRUPTED_SEQUENCE(name) +#define END_UNINTERRUPTED_SEQUENCE_FOR_PUT(name, dst) #endif void emit_compareAndJump(OpcodeID, unsigned op1, unsigned op2, unsigned target, RelationalCondition); @@ -815,6 +604,10 @@ namespace JSC { void emit_op_init_lazy_reg(Instruction*); void emit_op_check_has_instance(Instruction*); void emit_op_instanceof(Instruction*); + void emit_op_is_undefined(Instruction*); + void emit_op_is_boolean(Instruction*); + void emit_op_is_number(Instruction*); + void emit_op_is_string(Instruction*); void emit_op_jeq_null(Instruction*); void emit_op_jfalse(Instruction*); void emit_op_jmp(Instruction*); @@ -829,7 +622,6 @@ namespace JSC { void emit_op_jnlesseq(Instruction*); void emit_op_jngreater(Instruction*); void emit_op_jngreatereq(Instruction*); - void emit_op_jsr(Instruction*); void emit_op_jtrue(Instruction*); void emit_op_loop(Instruction*); void emit_op_loop_hint(Instruction*); @@ -883,7 +675,6 @@ namespace JSC { void emit_op_ret(Instruction*); void emit_op_ret_object_or_this(Instruction*); void emit_op_rshift(Instruction*); - void emit_op_sret(Instruction*); void emit_op_strcat(Instruction*); void emit_op_stricteq(Instruction*); void emit_op_sub(Instruction*); @@ -898,9 +689,6 @@ namespace JSC { void emit_op_to_primitive(Instruction*); void emit_op_unexpected_load(Instruction*); void emit_op_urshift(Instruction*); -#if ENABLE(JIT_USE_SOFT_MODULO) - void softModulo(); -#endif void emitSlow_op_add(Instruction*, Vector<SlowCaseEntry>::iterator&); void emitSlow_op_bitand(Instruction*, Vector<SlowCaseEntry>::iterator&); @@ -1069,7 +857,6 @@ namespace JSC { Vector<JumpTable> m_jmpTable; unsigned m_bytecodeOffset; - Vector<JSRInfo> m_jsrSites; Vector<SlowCaseEntry> m_slowCases; Vector<SwitchRecord> m_switches; diff --git a/Source/JavaScriptCore/jit/JITArithmetic.cpp b/Source/JavaScriptCore/jit/JITArithmetic.cpp index 1b32e3bcf..a9390e35f 100644 --- a/Source/JavaScriptCore/jit/JITArithmetic.cpp +++ b/Source/JavaScriptCore/jit/JITArithmetic.cpp @@ -732,7 +732,7 @@ void JIT::emitSlow_op_pre_dec(Instruction* currentInstruction, Vector<SlowCaseEn /* ------------------------------ BEGIN: OP_MOD ------------------------------ */ -#if CPU(X86) || CPU(X86_64) || CPU(MIPS) +#if CPU(X86) || CPU(X86_64) void JIT::emit_op_mod(Instruction* currentInstruction) { @@ -740,20 +740,25 @@ void JIT::emit_op_mod(Instruction* currentInstruction) unsigned op1 = currentInstruction[2].u.operand; unsigned op2 = currentInstruction[3].u.operand; -#if CPU(X86) || CPU(X86_64) // Make sure registers are correct for x86 IDIV instructions. ASSERT(regT0 == X86Registers::eax); ASSERT(regT1 == X86Registers::edx); ASSERT(regT2 == X86Registers::ecx); -#endif - emitGetVirtualRegisters(op1, regT0, op2, regT2); - emitJumpSlowCaseIfNotImmediateInteger(regT0); + emitGetVirtualRegisters(op1, regT3, op2, regT2); + emitJumpSlowCaseIfNotImmediateInteger(regT3); emitJumpSlowCaseIfNotImmediateInteger(regT2); - addSlowCase(branchPtr(Equal, regT2, TrustedImmPtr(JSValue::encode(jsNumber(0))))); + move(regT3, regT0); + addSlowCase(branchTest32(Zero, regT2)); + Jump denominatorNotNeg1 = branch32(NotEqual, regT2, TrustedImm32(-1)); + addSlowCase(branch32(Equal, regT0, TrustedImm32(-2147483647-1))); + denominatorNotNeg1.link(this); m_assembler.cdq(); m_assembler.idivl_r(regT2); + Jump numeratorPositive = branch32(GreaterThanOrEqual, regT3, TrustedImm32(0)); + addSlowCase(branchTest32(Zero, regT1)); + numeratorPositive.link(this); emitFastArithReTagImmediate(regT1, regT0); emitPutVirtualRegister(result); } @@ -765,13 +770,15 @@ void JIT::emitSlow_op_mod(Instruction* currentInstruction, Vector<SlowCaseEntry> linkSlowCase(iter); linkSlowCase(iter); linkSlowCase(iter); + linkSlowCase(iter); + linkSlowCase(iter); JITStubCall stubCall(this, cti_op_mod); - stubCall.addArgument(regT0); + stubCall.addArgument(regT3); stubCall.addArgument(regT2); stubCall.call(result); } -#else // CPU(X86) || CPU(X86_64) || CPU(MIPS) +#else // CPU(X86) || CPU(X86_64) void JIT::emit_op_mod(Instruction* currentInstruction) { @@ -787,20 +794,7 @@ void JIT::emit_op_mod(Instruction* currentInstruction) void JIT::emitSlow_op_mod(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) { -#if ENABLE(JIT_USE_SOFT_MODULO) - unsigned result = currentInstruction[1].u.operand; - unsigned op1 = currentInstruction[2].u.operand; - unsigned op2 = currentInstruction[3].u.operand; - linkSlowCase(iter); - linkSlowCase(iter); - linkSlowCase(iter); - JITStubCall stubCall(this, cti_op_mod); - stubCall.addArgument(op1, regT2); - stubCall.addArgument(op2, regT2); - stubCall.call(result); -#else ASSERT_NOT_REACHED(); -#endif } #endif // CPU(X86) || CPU(X86_64) diff --git a/Source/JavaScriptCore/jit/JITArithmetic32_64.cpp b/Source/JavaScriptCore/jit/JITArithmetic32_64.cpp index 11a758103..62a359eeb 100644 --- a/Source/JavaScriptCore/jit/JITArithmetic32_64.cpp +++ b/Source/JavaScriptCore/jit/JITArithmetic32_64.cpp @@ -1202,25 +1202,28 @@ void JIT::emit_op_mod(Instruction* currentInstruction) unsigned op1 = currentInstruction[2].u.operand; unsigned op2 = currentInstruction[3].u.operand; -#if ENABLE(JIT_USE_SOFT_MODULO) - #if CPU(X86) || CPU(X86_64) // Make sure registers are correct for x86 IDIV instructions. ASSERT(regT0 == X86Registers::eax); ASSERT(regT1 == X86Registers::edx); ASSERT(regT2 == X86Registers::ecx); ASSERT(regT3 == X86Registers::ebx); -#endif - emitLoad2(op1, regT1, regT0, op2, regT3, regT2); + emitLoad2(op1, regT0, regT3, op2, regT1, regT2); addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag))); - addSlowCase(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag))); - - addSlowCase(branch32(Equal, regT2, TrustedImm32(0))); - - emitNakedCall(m_globalData->jitStubs->ctiSoftModulo()); - - emitStoreInt32(dst, regT0, (op1 == dst || op2 == dst)); + addSlowCase(branch32(NotEqual, regT0, TrustedImm32(JSValue::Int32Tag))); + + move(regT3, regT0); + addSlowCase(branchTest32(Zero, regT2)); + Jump denominatorNotNeg1 = branch32(NotEqual, regT2, TrustedImm32(-1)); + addSlowCase(branch32(Equal, regT0, TrustedImm32(-2147483647-1))); + denominatorNotNeg1.link(this); + m_assembler.cdq(); + m_assembler.idivl_r(regT2); + Jump numeratorPositive = branch32(GreaterThanOrEqual, regT3, TrustedImm32(0)); + addSlowCase(branchTest32(Zero, regT1)); + numeratorPositive.link(this); + emitStoreInt32(dst, regT1, (op1 == dst || op2 == dst)); #else JITStubCall stubCall(this, cti_op_mod); stubCall.addArgument(op1); @@ -1231,13 +1234,15 @@ void JIT::emit_op_mod(Instruction* currentInstruction) void JIT::emitSlow_op_mod(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) { -#if ENABLE(JIT_USE_SOFT_MODULO) +#if CPU(X86) || CPU(X86_64) unsigned result = currentInstruction[1].u.operand; unsigned op1 = currentInstruction[2].u.operand; unsigned op2 = currentInstruction[3].u.operand; linkSlowCase(iter); linkSlowCase(iter); linkSlowCase(iter); + linkSlowCase(iter); + linkSlowCase(iter); JITStubCall stubCall(this, cti_op_mod); stubCall.addArgument(op1); stubCall.addArgument(op2); @@ -1245,7 +1250,9 @@ void JIT::emitSlow_op_mod(Instruction* currentInstruction, Vector<SlowCaseEntry> #else UNUSED_PARAM(currentInstruction); UNUSED_PARAM(iter); - ASSERT_NOT_REACHED(); + // We would have really useful assertions here if it wasn't for the compiler's + // insistence on attribute noreturn. + // ASSERT_NOT_REACHED(); #endif } diff --git a/Source/JavaScriptCore/jit/JITCall.cpp b/Source/JavaScriptCore/jit/JITCall.cpp index 73d017d05..7664eb746 100644 --- a/Source/JavaScriptCore/jit/JITCall.cpp +++ b/Source/JavaScriptCore/jit/JITCall.cpp @@ -161,8 +161,10 @@ void JIT::compileOpCall(OpcodeID opcodeID, Instruction* instruction, unsigned ca int registerOffset = instruction[3].u.operand; addPtr(TrustedImm32(registerOffset * sizeof(Register)), callFrameRegister, regT1); - store32(TrustedImm32(argCount), Address(regT1, RegisterFile::ArgumentCount * static_cast<int>(sizeof(Register)))); + store32(TrustedImm32(argCount), Address(regT1, RegisterFile::ArgumentCount * static_cast<int>(sizeof(Register)) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload))); } // regT1 holds newCallFrame with ArgumentCount initialized. + + store32(TrustedImm32(instruction - m_codeBlock->instructions().begin()), Address(callFrameRegister, RegisterFile::ArgumentCount * static_cast<int>(sizeof(Register)) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag))); emitGetVirtualRegister(callee, regT0); // regT0 holds callee. storePtr(callFrameRegister, Address(regT1, RegisterFile::CallerFrame * static_cast<int>(sizeof(Register)))); @@ -180,7 +182,6 @@ void JIT::compileOpCall(OpcodeID opcodeID, Instruction* instruction, unsigned ca END_UNINTERRUPTED_SEQUENCE(sequenceOpCall); addSlowCase(slowCase); - ASSERT_JIT_OFFSET(differenceBetween(addressOfLinkedFunctionCheck, slowCase), patchOffsetOpCallCompareToJump); ASSERT(m_callStructureStubCompilationInfo.size() == callLinkInfoIndex); m_callStructureStubCompilationInfo.append(StructureStubCompilationInfo()); m_callStructureStubCompilationInfo[callLinkInfoIndex].hotPathBegin = addressOfLinkedFunctionCheck; diff --git a/Source/JavaScriptCore/jit/JITCall32_64.cpp b/Source/JavaScriptCore/jit/JITCall32_64.cpp index 7fb6c78b9..81266052b 100644 --- a/Source/JavaScriptCore/jit/JITCall32_64.cpp +++ b/Source/JavaScriptCore/jit/JITCall32_64.cpp @@ -245,6 +245,8 @@ void JIT::compileOpCall(OpcodeID opcodeID, Instruction* instruction, unsigned ca store32(TrustedImm32(argCount), payloadFor(RegisterFile::ArgumentCount, regT3)); } // regT3 holds newCallFrame with ArgumentCount initialized. + + storePtr(TrustedImmPtr(instruction), tagFor(RegisterFile::ArgumentCount, callFrameRegister)); emitLoad(callee, regT1, regT0); // regT1, regT0 holds callee. storePtr(callFrameRegister, Address(regT3, RegisterFile::CallerFrame * static_cast<int>(sizeof(Register)))); @@ -264,7 +266,6 @@ void JIT::compileOpCall(OpcodeID opcodeID, Instruction* instruction, unsigned ca addSlowCase(slowCase); addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag))); - ASSERT_JIT_OFFSET(differenceBetween(addressOfLinkedFunctionCheck, slowCase), patchOffsetOpCallCompareToJump); ASSERT(m_callStructureStubCompilationInfo.size() == callLinkInfoIndex); m_callStructureStubCompilationInfo.append(StructureStubCompilationInfo()); m_callStructureStubCompilationInfo[callLinkInfoIndex].hotPathBegin = addressOfLinkedFunctionCheck; diff --git a/Source/JavaScriptCore/jit/JITExceptions.cpp b/Source/JavaScriptCore/jit/JITExceptions.cpp index ab1180716..2955fac8d 100644 --- a/Source/JavaScriptCore/jit/JITExceptions.cpp +++ b/Source/JavaScriptCore/jit/JITExceptions.cpp @@ -32,7 +32,7 @@ #include "JSGlobalData.h" #include "JSValue.h" -#if ENABLE(ASSEMBLER) +#if ENABLE(JIT) namespace JSC { diff --git a/Source/JavaScriptCore/jit/JITInlineMethods.h b/Source/JavaScriptCore/jit/JITInlineMethods.h index 998d5ac18..cd33821f2 100644 --- a/Source/JavaScriptCore/jit/JITInlineMethods.h +++ b/Source/JavaScriptCore/jit/JITInlineMethods.h @@ -132,7 +132,6 @@ ALWAYS_INLINE bool JIT::atJumpTarget() ALWAYS_INLINE void JIT::beginUninterruptedSequence(int insnSpace, int constSpace) { - JSInterfaceJIT::beginUninterruptedSequence(); #if CPU(ARM_TRADITIONAL) #ifndef NDEBUG // Ensure the label after the sequence can also fit @@ -182,7 +181,6 @@ ALWAYS_INLINE void JIT::endUninterruptedSequence(int insnSpace, int constSpace, ASSERT(differenceBetween(m_uninterruptedInstructionSequenceBegin, label()) <= insnSpace); ASSERT(sizeOfConstantPool() - m_uninterruptedConstantSequenceBegin <= constSpace); #endif - JSInterfaceJIT::endUninterruptedSequence(); } #endif @@ -414,12 +412,12 @@ template <typename ClassType, bool destructor, typename StructureType> inline vo allocator = &m_globalData->heap.allocatorForObjectWithDestructor(sizeof(ClassType)); else allocator = &m_globalData->heap.allocatorForObjectWithoutDestructor(sizeof(ClassType)); - loadPtr(&allocator->m_firstFreeCell, result); + loadPtr(&allocator->m_freeList.head, result); addSlowCase(branchTestPtr(Zero, result)); // remove the object from the free list loadPtr(Address(result), storagePtr); - storePtr(storagePtr, &allocator->m_firstFreeCell); + storePtr(storagePtr, &allocator->m_freeList.head); // initialize the object's structure storePtr(structure, Address(result, JSCell::structureOffset())); @@ -485,12 +483,13 @@ inline void JIT::emitAllocateJSArray(unsigned valuesRegister, unsigned length, R unsigned initialLength = std::max(length, 4U); size_t initialStorage = JSArray::storageSize(initialLength); + // We allocate the backing store first to ensure that garbage collection + // doesn't happen during JSArray initialization. + emitAllocateBasicStorage(initialStorage, storageResult, storagePtr); + // Allocate the cell for the array. emitAllocateBasicJSObject<JSArray, false>(TrustedImmPtr(m_codeBlock->globalObject()->arrayStructure()), cellResult, storagePtr); - // Allocate the backing store for the array. - emitAllocateBasicStorage(initialStorage, storageResult, storagePtr); - // Store all the necessary info in the ArrayStorage. storePtr(storageResult, Address(storageResult, ArrayStorage::allocBaseOffset())); store32(Imm32(length), Address(storageResult, ArrayStorage::lengthOffset())); @@ -503,8 +502,7 @@ inline void JIT::emitAllocateJSArray(unsigned valuesRegister, unsigned length, R store32(Imm32(initialLength), Address(cellResult, JSArray::vectorLengthOffset())); store32(TrustedImm32(0), Address(cellResult, JSArray::indexBiasOffset())); - // Initialize the subclass data and the sparse value map. - storePtr(TrustedImmPtr(0), Address(cellResult, JSArray::subclassDataOffset())); + // Initialize the sparse value map. storePtr(TrustedImmPtr(0), Address(cellResult, JSArray::sparseValueMapOffset())); // Store the values we have. diff --git a/Source/JavaScriptCore/jit/JITOpcodes.cpp b/Source/JavaScriptCore/jit/JITOpcodes.cpp index 2db82bf4a..d68f4109d 100644 --- a/Source/JavaScriptCore/jit/JITOpcodes.cpp +++ b/Source/JavaScriptCore/jit/JITOpcodes.cpp @@ -232,6 +232,7 @@ JIT::Label JIT::privateCompileCTINativeCall(JSGlobalData* globalData, bool isCon Label nativeCallThunk = align(); emitPutImmediateToCallFrameHeader(0, RegisterFile::CodeBlock); + storePtr(callFrameRegister, &m_globalData->topCallFrame); #if CPU(X86_64) // Load caller frame's scope chain into this callframe so that whatever we call can @@ -465,6 +466,69 @@ void JIT::emit_op_instanceof(Instruction* currentInstruction) emitPutVirtualRegister(dst); } +void JIT::emit_op_is_undefined(Instruction* currentInstruction) +{ + unsigned dst = currentInstruction[1].u.operand; + unsigned value = currentInstruction[2].u.operand; + + emitGetVirtualRegister(value, regT0); + Jump isCell = emitJumpIfJSCell(regT0); + + comparePtr(Equal, regT0, TrustedImm32(ValueUndefined), regT0); + Jump done = jump(); + + isCell.link(this); + loadPtr(Address(regT0, JSCell::structureOffset()), regT1); + test8(NonZero, Address(regT1, Structure::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined), regT0); + + done.link(this); + emitTagAsBoolImmediate(regT0); + emitPutVirtualRegister(dst); +} + +void JIT::emit_op_is_boolean(Instruction* currentInstruction) +{ + unsigned dst = currentInstruction[1].u.operand; + unsigned value = currentInstruction[2].u.operand; + + emitGetVirtualRegister(value, regT0); + xorPtr(TrustedImm32(static_cast<int32_t>(ValueFalse)), regT0); + testPtr(Zero, regT0, TrustedImm32(static_cast<int32_t>(~1)), regT0); + emitTagAsBoolImmediate(regT0); + emitPutVirtualRegister(dst); +} + +void JIT::emit_op_is_number(Instruction* currentInstruction) +{ + unsigned dst = currentInstruction[1].u.operand; + unsigned value = currentInstruction[2].u.operand; + + emitGetVirtualRegister(value, regT0); + testPtr(NonZero, regT0, tagTypeNumberRegister, regT0); + emitTagAsBoolImmediate(regT0); + emitPutVirtualRegister(dst); +} + +void JIT::emit_op_is_string(Instruction* currentInstruction) +{ + unsigned dst = currentInstruction[1].u.operand; + unsigned value = currentInstruction[2].u.operand; + + emitGetVirtualRegister(value, regT0); + Jump isNotCell = emitJumpIfNotJSCell(regT0); + + loadPtr(Address(regT0, JSCell::structureOffset()), regT1); + compare8(Equal, Address(regT1, Structure::typeInfoTypeOffset()), TrustedImm32(StringType), regT0); + emitTagAsBoolImmediate(regT0); + Jump done = jump(); + + isNotCell.link(this); + move(TrustedImm32(ValueFalse), regT0); + + done.link(this); + emitPutVirtualRegister(dst); +} + void JIT::emit_op_call(Instruction* currentInstruction) { compileOpCall(op_call, currentInstruction, m_callLinkInfoIndex++); @@ -742,22 +806,6 @@ void JIT::emit_op_jneq_ptr(Instruction* currentInstruction) addJump(branchPtr(NotEqual, regT0, TrustedImmPtr(JSValue::encode(JSValue(ptr)))), target); } -void JIT::emit_op_jsr(Instruction* currentInstruction) -{ - int retAddrDst = currentInstruction[1].u.operand; - int target = currentInstruction[2].u.operand; - DataLabelPtr storeLocation = storePtrWithPatch(TrustedImmPtr(0), Address(callFrameRegister, sizeof(Register) * retAddrDst)); - addJump(jump(), target); - m_jsrSites.append(JSRInfo(storeLocation, label())); - killLastResultRegister(); -} - -void JIT::emit_op_sret(Instruction* currentInstruction) -{ - jump(Address(callFrameRegister, sizeof(Register) * currentInstruction[1].u.operand)); - killLastResultRegister(); -} - void JIT::emit_op_eq(Instruction* currentInstruction) { emitGetVirtualRegisters(currentInstruction[2].u.operand, regT0, currentInstruction[3].u.operand, regT1); @@ -1662,11 +1710,14 @@ void JIT::emit_op_new_array(Instruction* currentInstruction) void JIT::emitSlow_op_new_array(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) { + // If the allocation would be oversize, we will already make the proper stub call above in + // emit_op_new_array. int length = currentInstruction[3].u.operand; if (CopiedSpace::isOversize(JSArray::storageSize(length))) return; - linkSlowCase(iter); // Not enough space in MarkedSpace for cell. linkSlowCase(iter); // Not enough space in CopiedSpace for storage. + linkSlowCase(iter); // Not enough space in MarkedSpace for cell. + JITStubCall stubCall(this, cti_op_new_array); stubCall.addArgument(TrustedImm32(currentInstruction[2].u.operand)); stubCall.addArgument(TrustedImm32(currentInstruction[3].u.operand)); diff --git a/Source/JavaScriptCore/jit/JITOpcodes32_64.cpp b/Source/JavaScriptCore/jit/JITOpcodes32_64.cpp index b67696f35..76e11e48c 100644 --- a/Source/JavaScriptCore/jit/JITOpcodes32_64.cpp +++ b/Source/JavaScriptCore/jit/JITOpcodes32_64.cpp @@ -42,10 +42,6 @@ namespace JSC { PassRefPtr<ExecutableMemoryHandle> JIT::privateCompileCTIMachineTrampolines(JSGlobalData* globalData, TrampolineStructure *trampolines) { -#if ENABLE(JIT_USE_SOFT_MODULO) - Label softModBegin = align(); - softModulo(); -#endif // (1) This function provides fast property access for string length Label stringLengthBegin = align(); @@ -222,10 +218,6 @@ PassRefPtr<ExecutableMemoryHandle> JIT::privateCompileCTIMachineTrampolines(JSGl trampolines->ctiNativeConstruct = patchBuffer.trampolineAt(nativeConstructThunk); trampolines->ctiStringLengthTrampoline = patchBuffer.trampolineAt(stringLengthBegin); -#if ENABLE(JIT_USE_SOFT_MODULO) - trampolines->ctiSoftModulo = patchBuffer.trampolineAt(softModBegin); -#endif - return executableMemory.release(); } @@ -236,6 +228,7 @@ JIT::Label JIT::privateCompileCTINativeCall(JSGlobalData* globalData, bool isCon Label nativeCallThunk = align(); emitPutImmediateToCallFrameHeader(0, RegisterFile::CodeBlock); + storePtr(callFrameRegister, &m_globalData->topCallFrame); #if CPU(X86) // Load caller frame's scope chain into this callframe so that whatever we call can @@ -370,6 +363,7 @@ JIT::CodeRef JIT::privateCompileCTINativeCall(JSGlobalData* globalData, NativeFu Call nativeCall; emitPutImmediateToCallFrameHeader(0, RegisterFile::CodeBlock); + storePtr(callFrameRegister, &m_globalData->topCallFrame); #if CPU(X86) // Load caller frame's scope chain into this callframe so that whatever we call can @@ -639,6 +633,65 @@ void JIT::emitSlow_op_instanceof(Instruction* currentInstruction, Vector<SlowCas stubCall.call(dst); } +void JIT::emit_op_is_undefined(Instruction* currentInstruction) +{ + unsigned dst = currentInstruction[1].u.operand; + unsigned value = currentInstruction[2].u.operand; + + emitLoad(value, regT1, regT0); + Jump isCell = branch32(Equal, regT1, TrustedImm32(JSValue::CellTag)); + + compare32(Equal, regT1, TrustedImm32(JSValue::UndefinedTag), regT0); + Jump done = jump(); + + isCell.link(this); + loadPtr(Address(regT0, JSCell::structureOffset()), regT1); + test8(NonZero, Address(regT1, Structure::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined), regT0); + + done.link(this); + emitStoreBool(dst, regT0); +} + +void JIT::emit_op_is_boolean(Instruction* currentInstruction) +{ + unsigned dst = currentInstruction[1].u.operand; + unsigned value = currentInstruction[2].u.operand; + + emitLoadTag(value, regT0); + compare32(Equal, regT0, TrustedImm32(JSValue::BooleanTag), regT0); + emitStoreBool(dst, regT0); +} + +void JIT::emit_op_is_number(Instruction* currentInstruction) +{ + unsigned dst = currentInstruction[1].u.operand; + unsigned value = currentInstruction[2].u.operand; + + emitLoadTag(value, regT0); + add32(TrustedImm32(1), regT0); + compare32(Below, regT0, TrustedImm32(JSValue::LowestTag + 1), regT0); + emitStoreBool(dst, regT0); +} + +void JIT::emit_op_is_string(Instruction* currentInstruction) +{ + unsigned dst = currentInstruction[1].u.operand; + unsigned value = currentInstruction[2].u.operand; + + emitLoad(value, regT1, regT0); + Jump isNotCell = branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag)); + + loadPtr(Address(regT0, JSCell::structureOffset()), regT1); + compare8(Equal, Address(regT1, Structure::typeInfoTypeOffset()), TrustedImm32(StringType), regT0); + Jump done = jump(); + + isNotCell.link(this); + move(TrustedImm32(0), regT0); + + done.link(this); + emitStoreBool(dst, regT0); +} + void JIT::emit_op_tear_off_activation(Instruction* currentInstruction) { unsigned activation = currentInstruction[1].u.operand; @@ -929,20 +982,6 @@ void JIT::emit_op_jneq_ptr(Instruction* currentInstruction) addJump(branchPtr(NotEqual, regT0, TrustedImmPtr(ptr)), target); } -void JIT::emit_op_jsr(Instruction* currentInstruction) -{ - int retAddrDst = currentInstruction[1].u.operand; - int target = currentInstruction[2].u.operand; - DataLabelPtr storeLocation = storePtrWithPatch(TrustedImmPtr(0), Address(callFrameRegister, sizeof(Register) * retAddrDst)); - addJump(jump(), target); - m_jsrSites.append(JSRInfo(storeLocation, label())); -} - -void JIT::emit_op_sret(Instruction* currentInstruction) -{ - jump(Address(callFrameRegister, sizeof(Register) * currentInstruction[1].u.operand)); -} - void JIT::emit_op_eq(Instruction* currentInstruction) { unsigned dst = currentInstruction[1].u.operand; @@ -1642,99 +1681,6 @@ void JIT::emitSlow_op_get_argument_by_val(Instruction* currentInstruction, Vecto stubCall.call(dst); } -#if ENABLE(JIT_USE_SOFT_MODULO) -void JIT::softModulo() -{ - move(regT2, regT3); - move(regT0, regT2); - move(TrustedImm32(0), regT1); - JumpList exitBranch; - - // Check for negative result reminder - Jump positiveRegT3 = branch32(GreaterThanOrEqual, regT3, TrustedImm32(0)); - neg32(regT3); - xor32(TrustedImm32(1), regT1); - positiveRegT3.link(this); - - Jump positiveRegT2 = branch32(GreaterThanOrEqual, regT2, TrustedImm32(0)); - neg32(regT2); - xor32(TrustedImm32(2), regT1); - positiveRegT2.link(this); - - // Save the condition for negative reminder - push(regT1); - - exitBranch.append(branch32(LessThan, regT2, regT3)); - - // Power of two fast case - move(regT3, regT0); - sub32(TrustedImm32(1), regT0); - Jump notPowerOfTwo = branchTest32(NonZero, regT0, regT3); - and32(regT0, regT2); - exitBranch.append(jump()); - - notPowerOfTwo.link(this); - -#if CPU(X86) || CPU(X86_64) - move(regT2, regT0); - m_assembler.cdq(); - m_assembler.idivl_r(regT3); - move(regT1, regT2); -#elif CPU(MIPS) - m_assembler.div(regT2, regT3); - m_assembler.mfhi(regT2); -#else - countLeadingZeros32(regT2, regT0); - countLeadingZeros32(regT3, regT1); - sub32(regT0, regT1); - - Jump useFullTable = branch32(Equal, regT1, TrustedImm32(31)); - - neg32(regT1); - add32(TrustedImm32(31), regT1); - - int elementSizeByShift = -1; -#if CPU(ARM) - elementSizeByShift = 3; -#else -#error "JIT_USE_SOFT_MODULO not yet supported on this platform." -#endif - relativeTableJump(regT1, elementSizeByShift); - - useFullTable.link(this); - // Modulo table - for (int i = 31; i > 0; --i) { -#if CPU(ARM_TRADITIONAL) - m_assembler.cmp_r(regT2, m_assembler.lsl(regT3, i)); - m_assembler.sub_r(regT2, regT2, m_assembler.lsl(regT3, i), ARMAssembler::CS); -#elif CPU(ARM_THUMB2) - ShiftTypeAndAmount shift(SRType_LSL, i); - m_assembler.sub_S(regT1, regT2, regT3, shift); - m_assembler.it(ARMv7Assembler::ConditionCS); - m_assembler.mov(regT2, regT1); -#else -#error "JIT_USE_SOFT_MODULO not yet supported on this platform." -#endif - } - - Jump lower = branch32(Below, regT2, regT3); - sub32(regT3, regT2); - lower.link(this); -#endif - - exitBranch.link(this); - - // Check for negative reminder - pop(regT1); - Jump positiveResult = branch32(Equal, regT1, TrustedImm32(0)); - neg32(regT2); - positiveResult.link(this); - - move(regT2, regT0); - ret(); -} -#endif // ENABLE(JIT_USE_SOFT_MODULO) - } // namespace JSC #endif // USE(JSVALUE32_64) diff --git a/Source/JavaScriptCore/jit/JITPropertyAccess.cpp b/Source/JavaScriptCore/jit/JITPropertyAccess.cpp index 99c038e55..8c7148c9d 100644 --- a/Source/JavaScriptCore/jit/JITPropertyAccess.cpp +++ b/Source/JavaScriptCore/jit/JITPropertyAccess.cpp @@ -310,10 +310,6 @@ void JIT::emit_op_method_check(Instruction* currentInstruction) Jump match = jump(); - ASSERT_JIT_OFFSET_UNUSED(protoObj, differenceBetween(info.structureToCompare, protoObj), patchOffsetMethodCheckProtoObj); - ASSERT_JIT_OFFSET(differenceBetween(info.structureToCompare, protoStructureToCompare), patchOffsetMethodCheckProtoStruct); - ASSERT_JIT_OFFSET_UNUSED(putFunction, differenceBetween(info.structureToCompare, putFunction), patchOffsetMethodCheckPutFunction); - // Link the failure cases here. notCell.link(this); structureCheck.link(this); @@ -329,6 +325,8 @@ void JIT::emit_op_method_check(Instruction* currentInstruction) // We've already generated the following get_by_id, so make sure it's skipped over. m_bytecodeOffset += OPCODE_LENGTH(op_get_by_id); + + m_propertyAccessCompilationInfo.last().addMethodCheckInfo(info.structureToCompare, protoObj, protoStructureToCompare, putFunction); } void JIT::emitSlow_op_method_check(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) @@ -369,25 +367,19 @@ void JIT::compileGetByIdHotPath(int baseVReg, Identifier*) BEGIN_UNINTERRUPTED_SEQUENCE(sequenceGetByIdHotPath); Label hotPathBegin(this); - m_propertyAccessCompilationInfo.append(PropertyStubCompilationInfo()); - m_propertyAccessCompilationInfo.last().bytecodeIndex = m_bytecodeOffset; - m_propertyAccessCompilationInfo.last().hotPathBegin = hotPathBegin; DataLabelPtr structureToCompare; - Jump structureCheck = branchPtrWithPatch(NotEqual, Address(regT0, JSCell::structureOffset()), structureToCompare, TrustedImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure))); + PatchableJump structureCheck = patchableBranchPtrWithPatch(NotEqual, Address(regT0, JSCell::structureOffset()), structureToCompare, TrustedImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure))); addSlowCase(structureCheck); - ASSERT_JIT_OFFSET(differenceBetween(hotPathBegin, structureToCompare), patchOffsetGetByIdStructure); - ASSERT_JIT_OFFSET(differenceBetween(hotPathBegin, structureCheck), patchOffsetGetByIdBranchToSlowCase) loadPtr(Address(regT0, JSObject::offsetOfPropertyStorage()), regT0); DataLabelCompact displacementLabel = loadPtrWithCompactAddressOffsetPatch(Address(regT0, patchGetByIdDefaultOffset), regT0); - ASSERT_JIT_OFFSET_UNUSED(displacementLabel, differenceBetween(hotPathBegin, displacementLabel), patchOffsetGetByIdPropertyMapOffset); Label putResult(this); END_UNINTERRUPTED_SEQUENCE(sequenceGetByIdHotPath); - ASSERT_JIT_OFFSET(differenceBetween(hotPathBegin, putResult), patchOffsetGetByIdPutResult); + m_propertyAccessCompilationInfo.append(PropertyStubCompilationInfo(PropertyStubGetById, m_bytecodeOffset, hotPathBegin, structureToCompare, structureCheck, displacementLabel, putResult)); } void JIT::emitSlow_op_get_by_id(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) @@ -413,9 +405,7 @@ void JIT::compileGetByIdSlowCase(int resultVReg, int baseVReg, Identifier* ident BEGIN_UNINTERRUPTED_SEQUENCE(sequenceGetByIdSlowCase); -#ifndef NDEBUG Label coldPathBegin(this); -#endif JITStubCall stubCall(this, isMethodCheck ? cti_op_get_by_id_method_check : cti_op_get_by_id); stubCall.addArgument(regT0); stubCall.addArgument(TrustedImmPtr(ident)); @@ -423,10 +413,8 @@ void JIT::compileGetByIdSlowCase(int resultVReg, int baseVReg, Identifier* ident END_UNINTERRUPTED_SEQUENCE(sequenceGetByIdSlowCase); - ASSERT_JIT_OFFSET(differenceBetween(coldPathBegin, call), patchOffsetGetByIdSlowCaseCall); - // Track the location of the call; this will be used to recover patch information. - m_propertyAccessCompilationInfo[m_propertyAccessInstructionIndex++].callReturnLocation = call; + m_propertyAccessCompilationInfo[m_propertyAccessInstructionIndex++].slowCaseInfo(PropertyStubGetById, coldPathBegin, call); } void JIT::emit_op_put_by_id(Instruction* currentInstruction) @@ -446,14 +434,10 @@ void JIT::emit_op_put_by_id(Instruction* currentInstruction) BEGIN_UNINTERRUPTED_SEQUENCE(sequencePutById); Label hotPathBegin(this); - m_propertyAccessCompilationInfo.append(PropertyStubCompilationInfo()); - m_propertyAccessCompilationInfo.last().bytecodeIndex = m_bytecodeOffset; - m_propertyAccessCompilationInfo.last().hotPathBegin = hotPathBegin; // It is important that the following instruction plants a 32bit immediate, in order that it can be patched over. DataLabelPtr structureToCompare; addSlowCase(branchPtrWithPatch(NotEqual, Address(regT0, JSCell::structureOffset()), structureToCompare, TrustedImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure)))); - ASSERT_JIT_OFFSET(differenceBetween(hotPathBegin, structureToCompare), patchOffsetPutByIdStructure); loadPtr(Address(regT0, JSObject::offsetOfPropertyStorage()), regT2); DataLabel32 displacementLabel = storePtrWithAddressOffsetPatch(regT1, Address(regT2, patchPutByIdDefaultOffset)); @@ -462,7 +446,7 @@ void JIT::emit_op_put_by_id(Instruction* currentInstruction) emitWriteBarrier(regT0, regT1, regT2, regT3, ShouldFilterImmediates, WriteBarrierForPropertyAccess); - ASSERT_JIT_OFFSET_UNUSED(displacementLabel, differenceBetween(hotPathBegin, displacementLabel), patchOffsetPutByIdPropertyMapOffset); + m_propertyAccessCompilationInfo.append(PropertyStubCompilationInfo(PropertyStubPutById, m_bytecodeOffset, hotPathBegin, structureToCompare, displacementLabel)); } void JIT::emitSlow_op_put_by_id(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) @@ -481,7 +465,7 @@ void JIT::emitSlow_op_put_by_id(Instruction* currentInstruction, Vector<SlowCase Call call = stubCall.call(); // Track the location of the call; this will be used to recover patch information. - m_propertyAccessCompilationInfo[m_propertyAccessInstructionIndex++].callReturnLocation = call; + m_propertyAccessCompilationInfo[m_propertyAccessInstructionIndex++].slowCaseInfo(PropertyStubPutById, call); } // Compile a store into an object's property storage. May overwrite the @@ -595,8 +579,8 @@ void JIT::patchGetByIdSelf(CodeBlock* codeBlock, StructureStubInfo* stubInfo, St int offset = sizeof(JSValue) * cachedOffset; // Patch the offset into the propoerty map to load from, then patch the Structure to look for. - repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelPtrAtOffset(patchOffsetGetByIdStructure), structure); - repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelCompactAtOffset(patchOffsetGetByIdPropertyMapOffset), offset); + repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelPtrAtOffset(stubInfo->patch.baseline.u.get.structureToCompare), structure); + repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelCompactAtOffset(stubInfo->patch.baseline.u.get.displacementLabel), offset); } void JIT::patchPutByIdReplace(CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, size_t cachedOffset, ReturnAddressPtr returnAddress, bool direct) @@ -610,8 +594,8 @@ void JIT::patchPutByIdReplace(CodeBlock* codeBlock, StructureStubInfo* stubInfo, int offset = sizeof(JSValue) * cachedOffset; // Patch the offset into the propoerty map to load from, then patch the Structure to look for. - repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelPtrAtOffset(patchOffsetPutByIdStructure), structure); - repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabel32AtOffset(patchOffsetPutByIdPropertyMapOffset), offset); + repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelPtrAtOffset(stubInfo->patch.baseline.u.put.structureToCompare), structure); + repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabel32AtOffset(stubInfo->patch.baseline.u.put.displacementLabel), offset); } void JIT::privateCompilePatchGetArrayLength(ReturnAddressPtr returnAddress) @@ -632,18 +616,18 @@ void JIT::privateCompilePatchGetArrayLength(ReturnAddressPtr returnAddress) LinkBuffer patchBuffer(*m_globalData, this, m_codeBlock); // Use the patch information to link the failure cases back to the original slow case routine. - CodeLocationLabel slowCaseBegin = stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall); + CodeLocationLabel slowCaseBegin = stubInfo->callReturnLocation.labelAtOffset(-stubInfo->patch.baseline.u.get.coldPathBegin); patchBuffer.link(failureCases1, slowCaseBegin); patchBuffer.link(failureCases2, slowCaseBegin); // On success return back to the hot patch code, at a point it will perform the store to dest for us. - patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult)); + patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(stubInfo->patch.baseline.u.get.putResult)); // Track the stub we have created so that it will be deleted later. stubInfo->stubRoutine = patchBuffer.finalizeCode(); // Finally patch the jump to slow case back in the hot path to jump here instead. - CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase); + CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(stubInfo->patch.baseline.u.get.structureCheck); RepatchBuffer repatchBuffer(m_codeBlock); repatchBuffer.relink(jumpLocation, CodeLocationLabel(stubInfo->stubRoutine.code())); @@ -689,12 +673,12 @@ void JIT::privateCompileGetByIdProto(StructureStubInfo* stubInfo, Structure* str LinkBuffer patchBuffer(*m_globalData, this, m_codeBlock); // Use the patch information to link the failure cases back to the original slow case routine. - CodeLocationLabel slowCaseBegin = stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall); + CodeLocationLabel slowCaseBegin = stubInfo->callReturnLocation.labelAtOffset(-stubInfo->patch.baseline.u.get.coldPathBegin); patchBuffer.link(failureCases1, slowCaseBegin); patchBuffer.link(failureCases2, slowCaseBegin); // On success return back to the hot patch code, at a point it will perform the store to dest for us. - patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult)); + patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(stubInfo->patch.baseline.u.get.putResult)); if (needsStubLink) { for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) { @@ -706,7 +690,7 @@ void JIT::privateCompileGetByIdProto(StructureStubInfo* stubInfo, Structure* str stubInfo->stubRoutine = patchBuffer.finalizeCode(); // Finally patch the jump to slow case back in the hot path to jump here instead. - CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase); + CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(stubInfo->patch.baseline.u.get.structureCheck); RepatchBuffer repatchBuffer(m_codeBlock); repatchBuffer.relink(jumpLocation, CodeLocationLabel(stubInfo->stubRoutine.code())); @@ -753,19 +737,19 @@ void JIT::privateCompileGetByIdSelfList(StructureStubInfo* stubInfo, Polymorphic // Use the patch information to link the failure cases back to the original slow case routine. CodeLocationLabel lastProtoBegin = CodeLocationLabel(polymorphicStructures->list[currentIndex - 1].stubRoutine.code()); if (!lastProtoBegin) - lastProtoBegin = stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall); + lastProtoBegin = stubInfo->callReturnLocation.labelAtOffset(-stubInfo->patch.baseline.u.get.coldPathBegin); patchBuffer.link(failureCase, lastProtoBegin); // On success return back to the hot patch code, at a point it will perform the store to dest for us. - patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult)); + patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(stubInfo->patch.baseline.u.get.putResult)); MacroAssemblerCodeRef stubCode = patchBuffer.finalizeCode(); polymorphicStructures->list[currentIndex].set(*m_globalData, m_codeBlock->ownerExecutable(), stubCode, structure, isDirect); // Finally patch the jump to slow case back in the hot path to jump here instead. - CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase); + CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(stubInfo->patch.baseline.u.get.structureCheck); RepatchBuffer repatchBuffer(m_codeBlock); repatchBuffer.relink(jumpLocation, CodeLocationLabel(stubCode.code())); } @@ -824,13 +808,13 @@ void JIT::privateCompileGetByIdProtoList(StructureStubInfo* stubInfo, Polymorphi patchBuffer.link(failureCases2, lastProtoBegin); // On success return back to the hot patch code, at a point it will perform the store to dest for us. - patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult)); + patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(stubInfo->patch.baseline.u.get.putResult)); MacroAssemblerCodeRef stubCode = patchBuffer.finalizeCode(); prototypeStructures->list[currentIndex].set(*m_globalData, m_codeBlock->ownerExecutable(), stubCode, structure, prototypeStructure, isDirect); // Finally patch the jump to slow case back in the hot path to jump here instead. - CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase); + CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(stubInfo->patch.baseline.u.get.structureCheck); RepatchBuffer repatchBuffer(m_codeBlock); repatchBuffer.relink(jumpLocation, CodeLocationLabel(stubCode.code())); } @@ -893,7 +877,7 @@ void JIT::privateCompileGetByIdChainList(StructureStubInfo* stubInfo, Polymorphi patchBuffer.link(bucketsOfFail, lastProtoBegin); // On success return back to the hot patch code, at a point it will perform the store to dest for us. - patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult)); + patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(stubInfo->patch.baseline.u.get.putResult)); CodeRef stubRoutine = patchBuffer.finalizeCode(); @@ -901,7 +885,7 @@ void JIT::privateCompileGetByIdChainList(StructureStubInfo* stubInfo, Polymorphi prototypeStructures->list[currentIndex].set(callFrame->globalData(), m_codeBlock->ownerExecutable(), stubRoutine, structure, chain, isDirect); // Finally patch the jump to slow case back in the hot path to jump here instead. - CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase); + CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(stubInfo->patch.baseline.u.get.structureCheck); RepatchBuffer repatchBuffer(m_codeBlock); repatchBuffer.relink(jumpLocation, CodeLocationLabel(stubRoutine.code())); } @@ -956,17 +940,17 @@ void JIT::privateCompileGetByIdChain(StructureStubInfo* stubInfo, Structure* str } // Use the patch information to link the failure cases back to the original slow case routine. - patchBuffer.link(bucketsOfFail, stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall)); + patchBuffer.link(bucketsOfFail, stubInfo->callReturnLocation.labelAtOffset(-stubInfo->patch.baseline.u.get.coldPathBegin)); // On success return back to the hot patch code, at a point it will perform the store to dest for us. - patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult)); + patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(stubInfo->patch.baseline.u.get.putResult)); // Track the stub we have created so that it will be deleted later. CodeRef stubRoutine = patchBuffer.finalizeCode(); stubInfo->stubRoutine = stubRoutine; // Finally patch the jump to slow case back in the hot path to jump here instead. - CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase); + CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(stubInfo->patch.baseline.u.get.structureCheck); RepatchBuffer repatchBuffer(m_codeBlock); repatchBuffer.relink(jumpLocation, CodeLocationLabel(stubRoutine.code())); @@ -1048,9 +1032,9 @@ void JIT::emit_op_put_global_var(Instruction* currentInstruction) void JIT::resetPatchGetById(RepatchBuffer& repatchBuffer, StructureStubInfo* stubInfo) { repatchBuffer.relink(stubInfo->callReturnLocation, cti_op_get_by_id); - repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelPtrAtOffset(patchOffsetGetByIdStructure), reinterpret_cast<void*>(-1)); - repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelCompactAtOffset(patchOffsetGetByIdPropertyMapOffset), 0); - repatchBuffer.relink(stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase), stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall)); + repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelPtrAtOffset(stubInfo->patch.baseline.u.get.structureToCompare), reinterpret_cast<void*>(-1)); + repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelCompactAtOffset(stubInfo->patch.baseline.u.get.displacementLabel), 0); + repatchBuffer.relink(stubInfo->hotPathBegin.jumpAtOffset(stubInfo->patch.baseline.u.get.structureCheck), stubInfo->callReturnLocation.labelAtOffset(-stubInfo->patch.baseline.u.get.coldPathBegin)); } void JIT::resetPatchPutById(RepatchBuffer& repatchBuffer, StructureStubInfo* stubInfo) @@ -1059,8 +1043,8 @@ void JIT::resetPatchPutById(RepatchBuffer& repatchBuffer, StructureStubInfo* stu repatchBuffer.relink(stubInfo->callReturnLocation, cti_op_put_by_id_direct); else repatchBuffer.relink(stubInfo->callReturnLocation, cti_op_put_by_id); - repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelPtrAtOffset(patchOffsetPutByIdStructure), reinterpret_cast<void*>(-1)); - repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelCompactAtOffset(patchOffsetPutByIdPropertyMapOffset), 0); + repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelPtrAtOffset(stubInfo->patch.baseline.u.put.structureToCompare), reinterpret_cast<void*>(-1)); + repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelCompactAtOffset(stubInfo->patch.baseline.u.put.displacementLabel), 0); } #endif // USE(JSVALUE64) @@ -1135,7 +1119,7 @@ void JIT::testPrototype(JSValue prototype, JumpList& failureCases) failureCases.append(branchPtr(NotEqual, Address(regT3, JSCell::structureOffset()), TrustedImmPtr(prototype.asCell()->structure()))); } -void JIT::patchMethodCallProto(JSGlobalData& globalData, CodeBlock* codeBlock, MethodCallLinkInfo& methodCallLinkInfo, JSObject* callee, Structure* structure, JSObject* proto, ReturnAddressPtr returnAddress) +void JIT::patchMethodCallProto(JSGlobalData& globalData, CodeBlock* codeBlock, MethodCallLinkInfo& methodCallLinkInfo, StructureStubInfo& stubInfo, JSObject* callee, Structure* structure, JSObject* proto, ReturnAddressPtr returnAddress) { RepatchBuffer repatchBuffer(codeBlock); @@ -1143,9 +1127,9 @@ void JIT::patchMethodCallProto(JSGlobalData& globalData, CodeBlock* codeBlock, M methodCallLinkInfo.cachedStructure.set(globalData, structureLocation, codeBlock->ownerExecutable(), structure); Structure* prototypeStructure = proto->structure(); - methodCallLinkInfo.cachedPrototypeStructure.set(globalData, structureLocation.dataLabelPtrAtOffset(patchOffsetMethodCheckProtoStruct), codeBlock->ownerExecutable(), prototypeStructure); - methodCallLinkInfo.cachedPrototype.set(globalData, structureLocation.dataLabelPtrAtOffset(patchOffsetMethodCheckProtoObj), codeBlock->ownerExecutable(), proto); - methodCallLinkInfo.cachedFunction.set(globalData, structureLocation.dataLabelPtrAtOffset(patchOffsetMethodCheckPutFunction), codeBlock->ownerExecutable(), callee); + methodCallLinkInfo.cachedPrototypeStructure.set(globalData, structureLocation.dataLabelPtrAtOffset(stubInfo.patch.baseline.methodCheckProtoStructureToCompare), codeBlock->ownerExecutable(), prototypeStructure); + methodCallLinkInfo.cachedPrototype.set(globalData, structureLocation.dataLabelPtrAtOffset(stubInfo.patch.baseline.methodCheckProtoObj), codeBlock->ownerExecutable(), proto); + methodCallLinkInfo.cachedFunction.set(globalData, structureLocation.dataLabelPtrAtOffset(stubInfo.patch.baseline.methodCheckPutFunction), codeBlock->ownerExecutable(), callee); repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_method_check_update)); } diff --git a/Source/JavaScriptCore/jit/JITPropertyAccess32_64.cpp b/Source/JavaScriptCore/jit/JITPropertyAccess32_64.cpp index 19abdbd89..550ad0b2e 100644 --- a/Source/JavaScriptCore/jit/JITPropertyAccess32_64.cpp +++ b/Source/JavaScriptCore/jit/JITPropertyAccess32_64.cpp @@ -120,10 +120,6 @@ void JIT::emit_op_method_check(Instruction* currentInstruction) move(TrustedImm32(JSValue::CellTag), regT1); Jump match = jump(); - ASSERT_JIT_OFFSET_UNUSED(protoObj, differenceBetween(info.structureToCompare, protoObj), patchOffsetMethodCheckProtoObj); - ASSERT_JIT_OFFSET(differenceBetween(info.structureToCompare, protoStructureToCompare), patchOffsetMethodCheckProtoStruct); - ASSERT_JIT_OFFSET_UNUSED(putFunction, differenceBetween(info.structureToCompare, putFunction), patchOffsetMethodCheckPutFunction); - // Link the failure cases here. structureCheck.link(this); protoStructureCheck.link(this); @@ -139,6 +135,8 @@ void JIT::emit_op_method_check(Instruction* currentInstruction) // We've already generated the following get_by_id, so make sure it's skipped over. m_bytecodeOffset += OPCODE_LENGTH(op_get_by_id); + + m_propertyAccessCompilationInfo.last().addMethodCheckInfo(info.structureToCompare, protoObj, protoStructureToCompare, putFunction); } void JIT::emitSlow_op_method_check(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) @@ -329,26 +327,20 @@ void JIT::compileGetByIdHotPath() BEGIN_UNINTERRUPTED_SEQUENCE(sequenceGetByIdHotPath); Label hotPathBegin(this); - m_propertyAccessCompilationInfo.append(PropertyStubCompilationInfo()); - m_propertyAccessCompilationInfo.last().bytecodeIndex = m_bytecodeOffset; - m_propertyAccessCompilationInfo.last().hotPathBegin = hotPathBegin; DataLabelPtr structureToCompare; - Jump structureCheck = branchPtrWithPatch(NotEqual, Address(regT0, JSCell::structureOffset()), structureToCompare, TrustedImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure))); + PatchableJump structureCheck = patchableBranchPtrWithPatch(NotEqual, Address(regT0, JSCell::structureOffset()), structureToCompare, TrustedImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure))); addSlowCase(structureCheck); - ASSERT_JIT_OFFSET(differenceBetween(hotPathBegin, structureToCompare), patchOffsetGetByIdStructure); - ASSERT_JIT_OFFSET(differenceBetween(hotPathBegin, structureCheck), patchOffsetGetByIdBranchToSlowCase); loadPtr(Address(regT0, JSObject::offsetOfPropertyStorage()), regT2); DataLabelCompact displacementLabel1 = loadPtrWithCompactAddressOffsetPatch(Address(regT2, patchGetByIdDefaultOffset), regT0); // payload - ASSERT_JIT_OFFSET_UNUSED(displacementLabel1, differenceBetween(hotPathBegin, displacementLabel1), patchOffsetGetByIdPropertyMapOffset1); DataLabelCompact displacementLabel2 = loadPtrWithCompactAddressOffsetPatch(Address(regT2, patchGetByIdDefaultOffset), regT1); // tag - ASSERT_JIT_OFFSET_UNUSED(displacementLabel2, differenceBetween(hotPathBegin, displacementLabel2), patchOffsetGetByIdPropertyMapOffset2); Label putResult(this); - ASSERT_JIT_OFFSET(differenceBetween(hotPathBegin, putResult), patchOffsetGetByIdPutResult); END_UNINTERRUPTED_SEQUENCE(sequenceGetByIdHotPath); + + m_propertyAccessCompilationInfo.append(PropertyStubCompilationInfo(PropertyStubGetById, m_bytecodeOffset, hotPathBegin, structureToCompare, structureCheck, displacementLabel1, displacementLabel2, putResult)); } void JIT::emitSlow_op_get_by_id(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) @@ -373,9 +365,7 @@ void JIT::compileGetByIdSlowCase(int dst, int base, Identifier* ident, Vector<Sl BEGIN_UNINTERRUPTED_SEQUENCE(sequenceGetByIdSlowCase); -#ifndef NDEBUG Label coldPathBegin(this); -#endif JITStubCall stubCall(this, isMethodCheck ? cti_op_get_by_id_method_check : cti_op_get_by_id); stubCall.addArgument(regT1, regT0); stubCall.addArgument(TrustedImmPtr(ident)); @@ -383,10 +373,8 @@ void JIT::compileGetByIdSlowCase(int dst, int base, Identifier* ident, Vector<Sl END_UNINTERRUPTED_SEQUENCE_FOR_PUT(sequenceGetByIdSlowCase, dst); - ASSERT_JIT_OFFSET(differenceBetween(coldPathBegin, call), patchOffsetGetByIdSlowCaseCall); - // Track the location of the call; this will be used to recover patch information. - m_propertyAccessCompilationInfo[m_propertyAccessInstructionIndex++].callReturnLocation = call; + m_propertyAccessCompilationInfo[m_propertyAccessInstructionIndex++].slowCaseInfo(PropertyStubGetById, coldPathBegin, call); } void JIT::emit_op_put_by_id(Instruction* currentInstruction) @@ -405,14 +393,10 @@ void JIT::emit_op_put_by_id(Instruction* currentInstruction) BEGIN_UNINTERRUPTED_SEQUENCE(sequencePutById); Label hotPathBegin(this); - m_propertyAccessCompilationInfo.append(PropertyStubCompilationInfo()); - m_propertyAccessCompilationInfo.last().bytecodeIndex = m_bytecodeOffset; - m_propertyAccessCompilationInfo.last().hotPathBegin = hotPathBegin; // It is important that the following instruction plants a 32bit immediate, in order that it can be patched over. DataLabelPtr structureToCompare; addSlowCase(branchPtrWithPatch(NotEqual, Address(regT0, JSCell::structureOffset()), structureToCompare, TrustedImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure)))); - ASSERT_JIT_OFFSET(differenceBetween(hotPathBegin, structureToCompare), patchOffsetPutByIdStructure); loadPtr(Address(regT0, JSObject::offsetOfPropertyStorage()), regT1); DataLabel32 displacementLabel1 = storePtrWithAddressOffsetPatch(regT2, Address(regT1, patchPutByIdDefaultOffset)); // payload @@ -421,9 +405,8 @@ void JIT::emit_op_put_by_id(Instruction* currentInstruction) END_UNINTERRUPTED_SEQUENCE(sequencePutById); emitWriteBarrier(regT0, regT2, regT1, regT2, ShouldFilterImmediates, WriteBarrierForPropertyAccess); - - ASSERT_JIT_OFFSET_UNUSED(displacementLabel1, differenceBetween(hotPathBegin, displacementLabel1), patchOffsetPutByIdPropertyMapOffset1); - ASSERT_JIT_OFFSET_UNUSED(displacementLabel2, differenceBetween(hotPathBegin, displacementLabel2), patchOffsetPutByIdPropertyMapOffset2); + + m_propertyAccessCompilationInfo.append(PropertyStubCompilationInfo(PropertyStubPutById, m_bytecodeOffset, hotPathBegin, structureToCompare, displacementLabel1, displacementLabel2)); } void JIT::emitSlow_op_put_by_id(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) @@ -442,7 +425,7 @@ void JIT::emitSlow_op_put_by_id(Instruction* currentInstruction, Vector<SlowCase Call call = stubCall.call(); // Track the location of the call; this will be used to recover patch information. - m_propertyAccessCompilationInfo[m_propertyAccessInstructionIndex++].callReturnLocation = call; + m_propertyAccessCompilationInfo[m_propertyAccessInstructionIndex++].slowCaseInfo(PropertyStubPutById, call); } // Compile a store into an object's property storage. May overwrite base. @@ -577,9 +560,9 @@ void JIT::patchGetByIdSelf(CodeBlock* codeBlock, StructureStubInfo* stubInfo, St int offset = sizeof(JSValue) * cachedOffset; // Patch the offset into the propoerty map to load from, then patch the Structure to look for. - repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelPtrAtOffset(patchOffsetGetByIdStructure), structure); - repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelCompactAtOffset(patchOffsetGetByIdPropertyMapOffset1), offset + OBJECT_OFFSETOF(JSValue, u.asBits.payload)); // payload - repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelCompactAtOffset(patchOffsetGetByIdPropertyMapOffset2), offset + OBJECT_OFFSETOF(JSValue, u.asBits.tag)); // tag + repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelPtrAtOffset(stubInfo->patch.baseline.u.get.structureToCompare), structure); + repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelCompactAtOffset(stubInfo->patch.baseline.u.get.displacementLabel1), offset + OBJECT_OFFSETOF(JSValue, u.asBits.payload)); // payload + repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelCompactAtOffset(stubInfo->patch.baseline.u.get.displacementLabel2), offset + OBJECT_OFFSETOF(JSValue, u.asBits.tag)); // tag } void JIT::patchPutByIdReplace(CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, size_t cachedOffset, ReturnAddressPtr returnAddress, bool direct) @@ -593,9 +576,9 @@ void JIT::patchPutByIdReplace(CodeBlock* codeBlock, StructureStubInfo* stubInfo, int offset = sizeof(JSValue) * cachedOffset; // Patch the offset into the propoerty map to load from, then patch the Structure to look for. - repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelPtrAtOffset(patchOffsetPutByIdStructure), structure); - repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabel32AtOffset(patchOffsetPutByIdPropertyMapOffset1), offset + OBJECT_OFFSETOF(JSValue, u.asBits.payload)); // payload - repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabel32AtOffset(patchOffsetPutByIdPropertyMapOffset2), offset + OBJECT_OFFSETOF(JSValue, u.asBits.tag)); // tag + repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelPtrAtOffset(stubInfo->patch.baseline.u.put.structureToCompare), structure); + repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabel32AtOffset(stubInfo->patch.baseline.u.put.displacementLabel1), offset + OBJECT_OFFSETOF(JSValue, u.asBits.payload)); // payload + repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabel32AtOffset(stubInfo->patch.baseline.u.put.displacementLabel2), offset + OBJECT_OFFSETOF(JSValue, u.asBits.tag)); // tag } void JIT::privateCompilePatchGetArrayLength(ReturnAddressPtr returnAddress) @@ -619,18 +602,18 @@ void JIT::privateCompilePatchGetArrayLength(ReturnAddressPtr returnAddress) LinkBuffer patchBuffer(*m_globalData, this, m_codeBlock); // Use the patch information to link the failure cases back to the original slow case routine. - CodeLocationLabel slowCaseBegin = stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall); + CodeLocationLabel slowCaseBegin = stubInfo->callReturnLocation.labelAtOffset(-stubInfo->patch.baseline.u.get.coldPathBegin); patchBuffer.link(failureCases1, slowCaseBegin); patchBuffer.link(failureCases2, slowCaseBegin); // On success return back to the hot patch code, at a point it will perform the store to dest for us. - patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult)); + patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(stubInfo->patch.baseline.u.get.putResult)); // Track the stub we have created so that it will be deleted later. stubInfo->stubRoutine = patchBuffer.finalizeCode(); // Finally patch the jump to slow case back in the hot path to jump here instead. - CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase); + CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(stubInfo->patch.baseline.u.get.structureCheck); RepatchBuffer repatchBuffer(m_codeBlock); repatchBuffer.relink(jumpLocation, CodeLocationLabel(stubInfo->stubRoutine.code())); @@ -678,12 +661,12 @@ void JIT::privateCompileGetByIdProto(StructureStubInfo* stubInfo, Structure* str LinkBuffer patchBuffer(*m_globalData, this, m_codeBlock); // Use the patch information to link the failure cases back to the original slow case routine. - CodeLocationLabel slowCaseBegin = stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall); + CodeLocationLabel slowCaseBegin = stubInfo->callReturnLocation.labelAtOffset(-stubInfo->patch.baseline.u.get.coldPathBegin); patchBuffer.link(failureCases1, slowCaseBegin); patchBuffer.link(failureCases2, slowCaseBegin); // On success return back to the hot patch code, at a point it will perform the store to dest for us. - patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult)); + patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(stubInfo->patch.baseline.u.get.putResult)); if (needsStubLink) { for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) { @@ -696,7 +679,7 @@ void JIT::privateCompileGetByIdProto(StructureStubInfo* stubInfo, Structure* str stubInfo->stubRoutine = patchBuffer.finalizeCode(); // Finally patch the jump to slow case back in the hot path to jump here instead. - CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase); + CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(stubInfo->patch.baseline.u.get.structureCheck); RepatchBuffer repatchBuffer(m_codeBlock); repatchBuffer.relink(jumpLocation, CodeLocationLabel(stubInfo->stubRoutine.code())); @@ -744,19 +727,19 @@ void JIT::privateCompileGetByIdSelfList(StructureStubInfo* stubInfo, Polymorphic // Use the patch information to link the failure cases back to the original slow case routine. CodeLocationLabel lastProtoBegin = CodeLocationLabel(polymorphicStructures->list[currentIndex - 1].stubRoutine.code()); if (!lastProtoBegin) - lastProtoBegin = stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall); + lastProtoBegin = stubInfo->callReturnLocation.labelAtOffset(-stubInfo->patch.baseline.u.get.coldPathBegin); patchBuffer.link(failureCase, lastProtoBegin); // On success return back to the hot patch code, at a point it will perform the store to dest for us. - patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult)); + patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(stubInfo->patch.baseline.u.get.putResult)); CodeRef stubRoutine = patchBuffer.finalizeCode(); polymorphicStructures->list[currentIndex].set(*m_globalData, m_codeBlock->ownerExecutable(), stubRoutine, structure, isDirect); // Finally patch the jump to slow case back in the hot path to jump here instead. - CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase); + CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(stubInfo->patch.baseline.u.get.structureCheck); RepatchBuffer repatchBuffer(m_codeBlock); repatchBuffer.relink(jumpLocation, CodeLocationLabel(stubRoutine.code())); } @@ -814,14 +797,14 @@ void JIT::privateCompileGetByIdProtoList(StructureStubInfo* stubInfo, Polymorphi patchBuffer.link(failureCases2, lastProtoBegin); // On success return back to the hot patch code, at a point it will perform the store to dest for us. - patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult)); + patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(stubInfo->patch.baseline.u.get.putResult)); CodeRef stubRoutine = patchBuffer.finalizeCode(); prototypeStructures->list[currentIndex].set(callFrame->globalData(), m_codeBlock->ownerExecutable(), stubRoutine, structure, prototypeStructure, isDirect); // Finally patch the jump to slow case back in the hot path to jump here instead. - CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase); + CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(stubInfo->patch.baseline.u.get.structureCheck); RepatchBuffer repatchBuffer(m_codeBlock); repatchBuffer.relink(jumpLocation, CodeLocationLabel(stubRoutine.code())); } @@ -884,7 +867,7 @@ void JIT::privateCompileGetByIdChainList(StructureStubInfo* stubInfo, Polymorphi patchBuffer.link(bucketsOfFail, lastProtoBegin); // On success return back to the hot patch code, at a point it will perform the store to dest for us. - patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult)); + patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(stubInfo->patch.baseline.u.get.putResult)); CodeRef stubRoutine = patchBuffer.finalizeCode(); @@ -892,7 +875,7 @@ void JIT::privateCompileGetByIdChainList(StructureStubInfo* stubInfo, Polymorphi prototypeStructures->list[currentIndex].set(callFrame->globalData(), m_codeBlock->ownerExecutable(), stubRoutine, structure, chain, isDirect); // Finally patch the jump to slow case back in the hot path to jump here instead. - CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase); + CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(stubInfo->patch.baseline.u.get.structureCheck); RepatchBuffer repatchBuffer(m_codeBlock); repatchBuffer.relink(jumpLocation, CodeLocationLabel(stubRoutine.code())); } @@ -946,17 +929,17 @@ void JIT::privateCompileGetByIdChain(StructureStubInfo* stubInfo, Structure* str } } // Use the patch information to link the failure cases back to the original slow case routine. - patchBuffer.link(bucketsOfFail, stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall)); + patchBuffer.link(bucketsOfFail, stubInfo->callReturnLocation.labelAtOffset(-stubInfo->patch.baseline.u.get.coldPathBegin)); // On success return back to the hot patch code, at a point it will perform the store to dest for us. - patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult)); + patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(stubInfo->patch.baseline.u.get.putResult)); // Track the stub we have created so that it will be deleted later. CodeRef stubRoutine = patchBuffer.finalizeCode(); stubInfo->stubRoutine = stubRoutine; // Finally patch the jump to slow case back in the hot path to jump here instead. - CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase); + CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(stubInfo->patch.baseline.u.get.structureCheck); RepatchBuffer repatchBuffer(m_codeBlock); repatchBuffer.relink(jumpLocation, CodeLocationLabel(stubRoutine.code())); @@ -1109,10 +1092,10 @@ void JIT::emit_op_put_global_var(Instruction* currentInstruction) void JIT::resetPatchGetById(RepatchBuffer& repatchBuffer, StructureStubInfo* stubInfo) { repatchBuffer.relink(stubInfo->callReturnLocation, cti_op_get_by_id); - repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelPtrAtOffset(patchOffsetGetByIdStructure), reinterpret_cast<void*>(-1)); - repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelCompactAtOffset(patchOffsetGetByIdPropertyMapOffset1), 0); - repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelCompactAtOffset(patchOffsetGetByIdPropertyMapOffset2), 0); - repatchBuffer.relink(stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase), stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall)); + repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelPtrAtOffset(stubInfo->patch.baseline.u.get.structureToCompare), reinterpret_cast<void*>(-1)); + repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelCompactAtOffset(stubInfo->patch.baseline.u.get.displacementLabel1), 0); + repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelCompactAtOffset(stubInfo->patch.baseline.u.get.displacementLabel2), 0); + repatchBuffer.relink(stubInfo->hotPathBegin.jumpAtOffset(stubInfo->patch.baseline.u.get.structureCheck), stubInfo->callReturnLocation.labelAtOffset(-stubInfo->patch.baseline.u.get.coldPathBegin)); } void JIT::resetPatchPutById(RepatchBuffer& repatchBuffer, StructureStubInfo* stubInfo) @@ -1121,9 +1104,9 @@ void JIT::resetPatchPutById(RepatchBuffer& repatchBuffer, StructureStubInfo* stu repatchBuffer.relink(stubInfo->callReturnLocation, cti_op_put_by_id_direct); else repatchBuffer.relink(stubInfo->callReturnLocation, cti_op_put_by_id); - repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelPtrAtOffset(patchOffsetPutByIdStructure), reinterpret_cast<void*>(-1)); - repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabel32AtOffset(patchOffsetPutByIdPropertyMapOffset1), 0); - repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabel32AtOffset(patchOffsetPutByIdPropertyMapOffset2), 0); + repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelPtrAtOffset(stubInfo->patch.baseline.u.put.structureToCompare), reinterpret_cast<void*>(-1)); + repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabel32AtOffset(stubInfo->patch.baseline.u.put.displacementLabel1), 0); + repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabel32AtOffset(stubInfo->patch.baseline.u.put.displacementLabel2), 0); } } // namespace JSC diff --git a/Source/JavaScriptCore/jit/JITStubs.cpp b/Source/JavaScriptCore/jit/JITStubs.cpp index eebe90427..d81e68aae 100644 --- a/Source/JavaScriptCore/jit/JITStubs.cpp +++ b/Source/JavaScriptCore/jit/JITStubs.cpp @@ -48,7 +48,6 @@ #include "JITExceptions.h" #include "JSActivation.h" #include "JSArray.h" -#include "JSByteArray.h" #include "JSFunction.h" #include "JSGlobalObjectFunctions.h" #include "JSNotAnObject.h" @@ -1281,7 +1280,7 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_create_this) STUB_INIT_STACK_FRAME(stackFrame); CallFrame* callFrame = stackFrame.callFrame; - JSFunction* constructor = asFunction(callFrame->callee()); + JSFunction* constructor = jsCast<JSFunction*>(callFrame->callee()); #if !ASSERT_DISABLED ConstructData constructData; ASSERT(constructor->methodTable()->getConstructData(constructor, constructData) == ConstructTypeJS); @@ -1498,7 +1497,9 @@ DEFINE_STUB_FUNCTION(JSObject*, op_put_by_id_transition_realloc) ASSERT(baseValue.isObject()); JSObject* base = asObject(baseValue); - base->allocatePropertyStorage(*stackFrame.globalData, oldSize, newSize); + JSGlobalData& globalData = *stackFrame.globalData; + PropertyStorage newStorage = base->growPropertyStorage(globalData, oldSize, newSize); + base->setPropertyStorage(globalData, newStorage, newStructure); return base; } @@ -1517,6 +1518,7 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_id_method_check) CodeBlock* codeBlock = stackFrame.callFrame->codeBlock(); MethodCallLinkInfo& methodCallLinkInfo = codeBlock->getMethodCallLinkInfo(STUB_RETURN_ADDRESS); + StructureStubInfo& stubInfo = codeBlock->getStubInfo(STUB_RETURN_ADDRESS); if (!methodCallLinkInfo.seenOnce()) { methodCallLinkInfo.setSeen(); @@ -1555,7 +1557,7 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_id_method_check) // Check to see if the function is on the object's prototype. Patch up the code to optimize. if (slot.slotBase() == structure->prototypeForLookup(callFrame)) { - JIT::patchMethodCallProto(callFrame->globalData(), codeBlock, methodCallLinkInfo, callee, structure, slotBaseObject, STUB_RETURN_ADDRESS); + JIT::patchMethodCallProto(callFrame->globalData(), codeBlock, methodCallLinkInfo, stubInfo, callee, structure, slotBaseObject, STUB_RETURN_ADDRESS); return JSValue::encode(result); } @@ -1566,7 +1568,7 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_id_method_check) // for now. For now it performs a check on a special object on the global object only used for this // purpose. The object is in no way exposed, and as such the check will always pass. if (slot.slotBase() == baseValue) { - JIT::patchMethodCallProto(callFrame->globalData(), codeBlock, methodCallLinkInfo, callee, structure, callFrame->scopeChain()->globalObject->methodCallDummy(), STUB_RETURN_ADDRESS); + JIT::patchMethodCallProto(callFrame->globalData(), codeBlock, methodCallLinkInfo, stubInfo, callee, structure, callFrame->scopeChain()->globalObject->methodCallDummy(), STUB_RETURN_ADDRESS); return JSValue::encode(result); } } @@ -1590,6 +1592,7 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_id_method_check_update) CodeBlock* codeBlock = stackFrame.callFrame->codeBlock(); MethodCallLinkInfo& methodCallLinkInfo = codeBlock->getMethodCallLinkInfo(STUB_RETURN_ADDRESS); + StructureStubInfo& stubInfo = codeBlock->getStubInfo(STUB_RETURN_ADDRESS); ASSERT(methodCallLinkInfo.seenOnce()); @@ -1650,7 +1653,7 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_id_method_check_update) // Check to see if the function is on the object's prototype. Patch up the code to optimize. if (slot.slotBase() == proto) { - JIT::patchMethodCallProto(callFrame->globalData(), codeBlock, methodCallLinkInfo, callee, structure, slotBaseObject, STUB_RETURN_ADDRESS); + JIT::patchMethodCallProto(callFrame->globalData(), codeBlock, methodCallLinkInfo, stubInfo, callee, structure, slotBaseObject, STUB_RETURN_ADDRESS); return JSValue::encode(result); } @@ -1661,7 +1664,7 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_id_method_check_update) // useful. We could try to nop it out altogether, but that's a little messy, so lets do something simpler // for now. For now it performs a check on a special object on the global object only used for this // purpose. The object is in no way exposed, and as such the check will always pass. - JIT::patchMethodCallProto(callFrame->globalData(), codeBlock, methodCallLinkInfo, callee, structure, callFrame->scopeChain()->globalObject->methodCallDummy(), STUB_RETURN_ADDRESS); + JIT::patchMethodCallProto(callFrame->globalData(), codeBlock, methodCallLinkInfo, stubInfo, callee, structure, callFrame->scopeChain()->globalObject->methodCallDummy(), STUB_RETURN_ADDRESS); return JSValue::encode(result); } @@ -2149,7 +2152,7 @@ DEFINE_STUB_FUNCTION(JSObject*, op_new_func) inline void* jitCompileFor(CallFrame* callFrame, CodeSpecializationKind kind) { - JSFunction* function = asFunction(callFrame->callee()); + JSFunction* function = jsCast<JSFunction*>(callFrame->callee()); ASSERT(!function->isHostFunction()); FunctionExecutable* executable = function->jsExecutable(); ScopeChainNode* callDataScopeChain = function->scope(); @@ -2183,7 +2186,7 @@ DEFINE_STUB_FUNCTION(void*, op_construct_jitCompile) #if !ASSERT_DISABLED ConstructData constructData; - ASSERT(asFunction(stackFrame.callFrame->callee())->methodTable()->getConstructData(stackFrame.callFrame->callee(), constructData) == ConstructTypeJS); + ASSERT(jsCast<JSFunction*>(stackFrame.callFrame->callee())->methodTable()->getConstructData(stackFrame.callFrame->callee(), constructData) == ConstructTypeJS); #endif CallFrame* callFrame = stackFrame.callFrame; @@ -2222,7 +2225,7 @@ DEFINE_STUB_FUNCTION(void*, op_construct_arityCheck) inline void* lazyLinkFor(CallFrame* callFrame, CodeSpecializationKind kind) { - JSFunction* callee = asFunction(callFrame->callee()); + JSFunction* callee = jsCast<JSFunction*>(callFrame->callee()); ExecutableBase* executable = callee->executable(); MacroAssemblerCodePtr codePtr; @@ -2233,9 +2236,10 @@ inline void* lazyLinkFor(CallFrame* callFrame, CodeSpecializationKind kind) codePtr = executable->generatedJITCodeFor(kind).addressForCall(); else { FunctionExecutable* functionExecutable = static_cast<FunctionExecutable*>(executable); - JSObject* error = functionExecutable->compileFor(callFrame, callee->scope(), kind); - if (error) + if (JSObject* error = functionExecutable->compileFor(callFrame, callee->scope(), kind)) { + callFrame->globalData().exception = error; return 0; + } codeBlock = &functionExecutable->generatedBytecodeFor(kind); if (callFrame->argumentCountIncludingThis() < static_cast<size_t>(codeBlock->numParameters()) || callLinkInfo->callType == CallLinkInfo::CallVarargs) @@ -2444,11 +2448,6 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_val) CHECK_FOR_EXCEPTION(); return JSValue::encode(result); } - if (isJSByteArray(baseValue) && asByteArray(baseValue)->canAccessIndex(i)) { - // All fast byte array accesses are safe from exceptions so return immediately to avoid exception checks. - ctiPatchCallByReturnAddress(callFrame->codeBlock(), STUB_RETURN_ADDRESS, FunctionPtr(cti_op_get_by_val_byte_array)); - return JSValue::encode(asByteArray(baseValue)->getIndex(callFrame, i)); - } JSValue result = baseValue.get(callFrame, i); CHECK_FOR_EXCEPTION(); return JSValue::encode(result); @@ -2489,36 +2488,6 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_val_string) return JSValue::encode(result); } -DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_val_byte_array) -{ - STUB_INIT_STACK_FRAME(stackFrame); - - CallFrame* callFrame = stackFrame.callFrame; - - JSValue baseValue = stackFrame.args[0].jsValue(); - JSValue subscript = stackFrame.args[1].jsValue(); - - JSValue result; - - if (LIKELY(subscript.isUInt32())) { - uint32_t i = subscript.asUInt32(); - if (isJSByteArray(baseValue) && asByteArray(baseValue)->canAccessIndex(i)) { - // All fast byte array accesses are safe from exceptions so return immediately to avoid exception checks. - return JSValue::encode(asByteArray(baseValue)->getIndex(callFrame, i)); - } - - result = baseValue.get(callFrame, i); - if (!isJSByteArray(baseValue)) - ctiPatchCallByReturnAddress(callFrame->codeBlock(), STUB_RETURN_ADDRESS, FunctionPtr(cti_op_get_by_val)); - } else { - Identifier property(callFrame, subscript.toString(callFrame)->value(callFrame)); - result = baseValue.get(callFrame, property); - } - - CHECK_FOR_EXCEPTION_AT_END(); - return JSValue::encode(result); -} - DEFINE_STUB_FUNCTION(EncodedJSValue, op_sub) { STUB_INIT_STACK_FRAME(stackFrame); @@ -2554,21 +2523,6 @@ DEFINE_STUB_FUNCTION(void, op_put_by_val) jsArray->setIndex(*globalData, i, value); else JSArray::putByIndex(jsArray, callFrame, i, value, callFrame->codeBlock()->isStrictMode()); - } else if (isJSByteArray(baseValue) && asByteArray(baseValue)->canAccessIndex(i)) { - JSByteArray* jsByteArray = asByteArray(baseValue); - ctiPatchCallByReturnAddress(callFrame->codeBlock(), STUB_RETURN_ADDRESS, FunctionPtr(cti_op_put_by_val_byte_array)); - // All fast byte array accesses are safe from exceptions so return immediately to avoid exception checks. - if (value.isInt32()) { - jsByteArray->setIndex(i, value.asInt32()); - return; - } else { - if (value.isNumber()) { - jsByteArray->setIndex(i, value.asNumber()); - return; - } - } - - baseValue.putByIndex(callFrame, i, value, callFrame->codeBlock()->isStrictMode()); } else baseValue.putByIndex(callFrame, i, value, callFrame->codeBlock()->isStrictMode()); } else { @@ -2582,47 +2536,6 @@ DEFINE_STUB_FUNCTION(void, op_put_by_val) CHECK_FOR_EXCEPTION_AT_END(); } -DEFINE_STUB_FUNCTION(void, op_put_by_val_byte_array) -{ - STUB_INIT_STACK_FRAME(stackFrame); - - CallFrame* callFrame = stackFrame.callFrame; - - JSValue baseValue = stackFrame.args[0].jsValue(); - JSValue subscript = stackFrame.args[1].jsValue(); - JSValue value = stackFrame.args[2].jsValue(); - - if (LIKELY(subscript.isUInt32())) { - uint32_t i = subscript.asUInt32(); - if (isJSByteArray(baseValue) && asByteArray(baseValue)->canAccessIndex(i)) { - JSByteArray* jsByteArray = asByteArray(baseValue); - - // All fast byte array accesses are safe from exceptions so return immediately to avoid exception checks. - if (value.isInt32()) { - jsByteArray->setIndex(i, value.asInt32()); - return; - } else { - if (value.isNumber()) { - jsByteArray->setIndex(i, value.asNumber()); - return; - } - } - } - - if (!isJSByteArray(baseValue)) - ctiPatchCallByReturnAddress(callFrame->codeBlock(), STUB_RETURN_ADDRESS, FunctionPtr(cti_op_put_by_val)); - baseValue.putByIndex(callFrame, i, value, callFrame->codeBlock()->isStrictMode()); - } else { - Identifier property(callFrame, subscript.toString(callFrame)->value(callFrame)); - if (!stackFrame.globalData->exception) { // Don't put to an object if toString threw an exception. - PutPropertySlot slot(callFrame->codeBlock()->isStrictMode()); - baseValue.put(callFrame, property, value, slot); - } - } - - CHECK_FOR_EXCEPTION_AT_END(); -} - DEFINE_STUB_FUNCTION(EncodedJSValue, op_less) { STUB_INIT_STACK_FRAME(stackFrame); @@ -2949,20 +2862,20 @@ DEFINE_STUB_FUNCTION(int, op_eq) if (cell1->isString()) { if (src2.isInt32()) - return jsToNumber(static_cast<JSString*>(cell1)->value(stackFrame.callFrame)) == src2.asInt32(); + return jsToNumber(jsCast<JSString*>(cell1)->value(stackFrame.callFrame)) == src2.asInt32(); if (src2.isDouble()) - return jsToNumber(static_cast<JSString*>(cell1)->value(stackFrame.callFrame)) == src2.asDouble(); + return jsToNumber(jsCast<JSString*>(cell1)->value(stackFrame.callFrame)) == src2.asDouble(); if (src2.isTrue()) - return jsToNumber(static_cast<JSString*>(cell1)->value(stackFrame.callFrame)) == 1.0; + return jsToNumber(jsCast<JSString*>(cell1)->value(stackFrame.callFrame)) == 1.0; if (src2.isFalse()) - return jsToNumber(static_cast<JSString*>(cell1)->value(stackFrame.callFrame)) == 0.0; + return jsToNumber(jsCast<JSString*>(cell1)->value(stackFrame.callFrame)) == 0.0; JSCell* cell2 = src2.asCell(); if (cell2->isString()) - return static_cast<JSString*>(cell1)->value(stackFrame.callFrame) == static_cast<JSString*>(cell2)->value(stackFrame.callFrame); + return jsCast<JSString*>(cell1)->value(stackFrame.callFrame) == jsCast<JSString*>(cell2)->value(stackFrame.callFrame); src2 = asObject(cell2)->toPrimitive(stackFrame.callFrame); CHECK_FOR_EXCEPTION(); @@ -3252,35 +3165,6 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_typeof) return JSValue::encode(jsTypeStringForValue(stackFrame.callFrame, stackFrame.args[0].jsValue())); } -DEFINE_STUB_FUNCTION(EncodedJSValue, op_is_undefined) -{ - STUB_INIT_STACK_FRAME(stackFrame); - - JSValue v = stackFrame.args[0].jsValue(); - return JSValue::encode(jsBoolean(v.isCell() ? v.asCell()->structure()->typeInfo().masqueradesAsUndefined() : v.isUndefined())); -} - -DEFINE_STUB_FUNCTION(EncodedJSValue, op_is_boolean) -{ - STUB_INIT_STACK_FRAME(stackFrame); - - return JSValue::encode(jsBoolean(stackFrame.args[0].jsValue().isBoolean())); -} - -DEFINE_STUB_FUNCTION(EncodedJSValue, op_is_number) -{ - STUB_INIT_STACK_FRAME(stackFrame); - - return JSValue::encode(jsBoolean(stackFrame.args[0].jsValue().isNumber())); -} - -DEFINE_STUB_FUNCTION(EncodedJSValue, op_is_string) -{ - STUB_INIT_STACK_FRAME(stackFrame); - - return JSValue::encode(jsBoolean(isJSString(stackFrame.args[0].jsValue()))); -} - DEFINE_STUB_FUNCTION(EncodedJSValue, op_is_object) { STUB_INIT_STACK_FRAME(stackFrame); @@ -3558,24 +3442,24 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, to_object) MacroAssemblerCodeRef JITThunks::ctiStub(JSGlobalData* globalData, ThunkGenerator generator) { - std::pair<CTIStubMap::iterator, bool> entry = m_ctiStubMap.add(generator, MacroAssemblerCodeRef()); - if (entry.second) - entry.first->second = generator(globalData); - return entry.first->second; + CTIStubMap::AddResult entry = m_ctiStubMap.add(generator, MacroAssemblerCodeRef()); + if (entry.isNewEntry) + entry.iterator->second = generator(globalData); + return entry.iterator->second; } NativeExecutable* JITThunks::hostFunctionStub(JSGlobalData* globalData, NativeFunction function, NativeFunction constructor) { - std::pair<HostFunctionStubMap::iterator, bool> result = m_hostFunctionStubMap->add(function, PassWeak<NativeExecutable>()); - if (!result.first->second) - result.first->second = PassWeak<NativeExecutable>(*globalData, NativeExecutable::create(*globalData, JIT::compileCTINativeCall(globalData, function), function, MacroAssemblerCodeRef::createSelfManagedCodeRef(ctiNativeConstruct()), constructor, NoIntrinsic)); - return result.first->second.get(); + HostFunctionStubMap::AddResult result = m_hostFunctionStubMap->add(function, PassWeak<NativeExecutable>()); + if (!result.iterator->second) + result.iterator->second = PassWeak<NativeExecutable>(NativeExecutable::create(*globalData, JIT::compileCTINativeCall(globalData, function), function, MacroAssemblerCodeRef::createSelfManagedCodeRef(ctiNativeConstruct()), constructor, NoIntrinsic)); + return result.iterator->second.get(); } NativeExecutable* JITThunks::hostFunctionStub(JSGlobalData* globalData, NativeFunction function, ThunkGenerator generator, Intrinsic intrinsic) { - std::pair<HostFunctionStubMap::iterator, bool> entry = m_hostFunctionStubMap->add(function, PassWeak<NativeExecutable>()); - if (!*entry.first->second) { + HostFunctionStubMap::AddResult entry = m_hostFunctionStubMap->add(function, PassWeak<NativeExecutable>()); + if (!entry.iterator->second) { MacroAssemblerCodeRef code; if (generator) { if (globalData->canUseJIT()) @@ -3584,9 +3468,9 @@ NativeExecutable* JITThunks::hostFunctionStub(JSGlobalData* globalData, NativeFu code = MacroAssemblerCodeRef(); } else code = JIT::compileCTINativeCall(globalData, function); - entry.first->second = PassWeak<NativeExecutable>(*globalData, NativeExecutable::create(*globalData, code, function, MacroAssemblerCodeRef::createSelfManagedCodeRef(ctiNativeConstruct()), callHostFunctionAsConstructor, intrinsic)); + entry.iterator->second = PassWeak<NativeExecutable>(NativeExecutable::create(*globalData, code, function, MacroAssemblerCodeRef::createSelfManagedCodeRef(ctiNativeConstruct()), callHostFunctionAsConstructor, intrinsic)); } - return entry.first->second.get(); + return entry.iterator->second.get(); } void JITThunks::clearHostFunctionStubs() diff --git a/Source/JavaScriptCore/jit/JITStubs.h b/Source/JavaScriptCore/jit/JITStubs.h index 49f666465..786353df5 100644 --- a/Source/JavaScriptCore/jit/JITStubs.h +++ b/Source/JavaScriptCore/jit/JITStubs.h @@ -92,7 +92,6 @@ namespace JSC { MacroAssemblerCodePtr ctiVirtualConstruct; MacroAssemblerCodePtr ctiNativeCall; MacroAssemblerCodePtr ctiNativeConstruct; - MacroAssemblerCodePtr ctiSoftModulo; }; #if CPU(X86_64) @@ -323,7 +322,6 @@ namespace JSC { #endif return m_trampolineStructure.ctiNativeConstruct; } - MacroAssemblerCodePtr ctiSoftModulo() { return m_trampolineStructure.ctiSoftModulo; } MacroAssemblerCodeRef ctiStub(JSGlobalData*, ThunkGenerator); @@ -369,7 +367,6 @@ extern "C" { EncodedJSValue JIT_STUB cti_op_get_by_id_self_fail(STUB_ARGS_DECLARATION); EncodedJSValue JIT_STUB cti_op_get_by_id_string_fail(STUB_ARGS_DECLARATION); EncodedJSValue JIT_STUB cti_op_get_by_val(STUB_ARGS_DECLARATION); - EncodedJSValue JIT_STUB cti_op_get_by_val_byte_array(STUB_ARGS_DECLARATION); EncodedJSValue JIT_STUB cti_op_get_by_val_string(STUB_ARGS_DECLARATION); EncodedJSValue JIT_STUB cti_op_in(STUB_ARGS_DECLARATION); EncodedJSValue JIT_STUB cti_op_instanceof(STUB_ARGS_DECLARATION); @@ -447,7 +444,6 @@ extern "C" { void JIT_STUB cti_op_put_by_id_direct_generic(STUB_ARGS_DECLARATION); void JIT_STUB cti_op_put_by_index(STUB_ARGS_DECLARATION); void JIT_STUB cti_op_put_by_val(STUB_ARGS_DECLARATION); - void JIT_STUB cti_op_put_by_val_byte_array(STUB_ARGS_DECLARATION); void JIT_STUB cti_op_put_getter_setter(STUB_ARGS_DECLARATION); void JIT_STUB cti_op_tear_off_activation(STUB_ARGS_DECLARATION); void JIT_STUB cti_op_tear_off_arguments(STUB_ARGS_DECLARATION); |