diff options
author | Simon Hausmann <simon.hausmann@nokia.com> | 2012-05-25 15:09:11 +0200 |
---|---|---|
committer | Simon Hausmann <simon.hausmann@nokia.com> | 2012-05-25 15:09:11 +0200 |
commit | a89b2ebb8e192c5e8cea21079bda2ee2c0c7dddd (patch) | |
tree | b7abd9f49ae1d4d2e426a5883bfccd42b8e2ee12 /Source/JavaScriptCore/jit | |
parent | 8d473cf9743f1d30a16a27114e93bd5af5648d23 (diff) | |
download | qtwebkit-a89b2ebb8e192c5e8cea21079bda2ee2c0c7dddd.tar.gz |
Imported WebKit commit eb5c1b8fe4d4b1b90b5137433fc58a91da0e6878 (http://svn.webkit.org/repository/webkit/trunk@118516)
Diffstat (limited to 'Source/JavaScriptCore/jit')
-rw-r--r-- | Source/JavaScriptCore/jit/ExecutableAllocator.cpp | 17 | ||||
-rw-r--r-- | Source/JavaScriptCore/jit/ExecutableAllocator.h | 102 | ||||
-rw-r--r-- | Source/JavaScriptCore/jit/HostCallReturnValue.h | 4 | ||||
-rw-r--r-- | Source/JavaScriptCore/jit/JIT.cpp | 27 | ||||
-rw-r--r-- | Source/JavaScriptCore/jit/JIT.h | 3 | ||||
-rw-r--r-- | Source/JavaScriptCore/jit/JITArithmetic.cpp | 2 | ||||
-rw-r--r-- | Source/JavaScriptCore/jit/JITCode.h | 2 | ||||
-rw-r--r-- | Source/JavaScriptCore/jit/JITDriver.h | 12 | ||||
-rw-r--r-- | Source/JavaScriptCore/jit/JITOpcodes.cpp | 3 | ||||
-rw-r--r-- | Source/JavaScriptCore/jit/JITOpcodes32_64.cpp | 1 | ||||
-rw-r--r-- | Source/JavaScriptCore/jit/JITPropertyAccess.cpp | 2 | ||||
-rw-r--r-- | Source/JavaScriptCore/jit/JITPropertyAccess32_64.cpp | 2 | ||||
-rw-r--r-- | Source/JavaScriptCore/jit/JITStubs.cpp | 67 | ||||
-rw-r--r-- | Source/JavaScriptCore/jit/JITStubs.h | 2 | ||||
-rw-r--r-- | Source/JavaScriptCore/jit/ThunkGenerators.cpp | 2 |
15 files changed, 85 insertions, 163 deletions
diff --git a/Source/JavaScriptCore/jit/ExecutableAllocator.cpp b/Source/JavaScriptCore/jit/ExecutableAllocator.cpp index e30c892e3..79399196e 100644 --- a/Source/JavaScriptCore/jit/ExecutableAllocator.cpp +++ b/Source/JavaScriptCore/jit/ExecutableAllocator.cpp @@ -261,23 +261,6 @@ void ExecutableAllocator::reprotectRegion(void* start, size_t size, ProtectionSe #endif -#if CPU(ARM_TRADITIONAL) && OS(LINUX) && COMPILER(RVCT) - -__asm void ExecutableAllocator::cacheFlush(void* code, size_t size) -{ - ARM - push {r7} - add r1, r1, r0 - mov r7, #0xf0000 - add r7, r7, #0x2 - mov r2, #0x0 - svc #0x0 - pop {r7} - bx lr -} - -#endif - } #endif // HAVE(ASSEMBLER) diff --git a/Source/JavaScriptCore/jit/ExecutableAllocator.h b/Source/JavaScriptCore/jit/ExecutableAllocator.h index c1edc9752..8cd5cba07 100644 --- a/Source/JavaScriptCore/jit/ExecutableAllocator.h +++ b/Source/JavaScriptCore/jit/ExecutableAllocator.h @@ -139,108 +139,6 @@ public: static void makeExecutable(void*, size_t) {} #endif -#if CPU(X86) || CPU(X86_64) - static void cacheFlush(void*, size_t) - { - } -#elif CPU(MIPS) - static void cacheFlush(void* code, size_t size) - { -#if GCC_VERSION_AT_LEAST(4, 3, 0) -#if WTF_MIPS_ISA_REV(2) && !GCC_VERSION_AT_LEAST(4, 4, 3) - int lineSize; - asm("rdhwr %0, $1" : "=r" (lineSize)); - // - // Modify "start" and "end" to avoid GCC 4.3.0-4.4.2 bug in - // mips_expand_synci_loop that may execute synci one more time. - // "start" points to the fisrt byte of the cache line. - // "end" points to the last byte of the line before the last cache line. - // Because size is always a multiple of 4, this is safe to set - // "end" to the last byte. - // - intptr_t start = reinterpret_cast<intptr_t>(code) & (-lineSize); - intptr_t end = ((reinterpret_cast<intptr_t>(code) + size - 1) & (-lineSize)) - 1; - __builtin___clear_cache(reinterpret_cast<char*>(start), reinterpret_cast<char*>(end)); -#else - intptr_t end = reinterpret_cast<intptr_t>(code) + size; - __builtin___clear_cache(reinterpret_cast<char*>(code), reinterpret_cast<char*>(end)); -#endif -#else - _flush_cache(reinterpret_cast<char*>(code), size, BCACHE); -#endif - } -#elif CPU(ARM_THUMB2) && OS(IOS) - static void cacheFlush(void* code, size_t size) - { - sys_cache_control(kCacheFunctionPrepareForExecution, code, size); - } -#elif CPU(ARM_THUMB2) && OS(LINUX) - static void cacheFlush(void* code, size_t size) - { - asm volatile ( - "push {r7}\n" - "mov r0, %0\n" - "mov r1, %1\n" - "movw r7, #0x2\n" - "movt r7, #0xf\n" - "movs r2, #0x0\n" - "svc 0x0\n" - "pop {r7}\n" - : - : "r" (code), "r" (reinterpret_cast<char*>(code) + size) - : "r0", "r1", "r2"); - } -#elif CPU(ARM_TRADITIONAL) && OS(LINUX) && COMPILER(RVCT) - static __asm void cacheFlush(void* code, size_t size); -#elif CPU(ARM_TRADITIONAL) && OS(LINUX) && COMPILER(GCC) - static void cacheFlush(void* code, size_t size) - { - uintptr_t currentPage = reinterpret_cast<uintptr_t>(code) & ~(pageSize() - 1); - uintptr_t lastPage = (reinterpret_cast<uintptr_t>(code) + size) & ~(pageSize() - 1); - - do { - asm volatile ( - "push {r7}\n" - "mov r0, %0\n" - "mov r1, %1\n" - "mov r7, #0xf0000\n" - "add r7, r7, #0x2\n" - "mov r2, #0x0\n" - "svc 0x0\n" - "pop {r7}\n" - : - : "r" (currentPage), "r" (currentPage + pageSize()) - : "r0", "r1", "r2"); - currentPage += pageSize(); - } while (lastPage >= currentPage); - } -#elif OS(WINCE) - static void cacheFlush(void* code, size_t size) - { - CacheRangeFlush(code, size, CACHE_SYNC_ALL); - } -#elif CPU(SH4) && OS(LINUX) - static void cacheFlush(void* code, size_t size) - { -#ifdef CACHEFLUSH_D_L2 - syscall(__NR_cacheflush, reinterpret_cast<unsigned>(code), size, CACHEFLUSH_D_WB | CACHEFLUSH_I | CACHEFLUSH_D_L2); -#else - syscall(__NR_cacheflush, reinterpret_cast<unsigned>(code), size, CACHEFLUSH_D_WB | CACHEFLUSH_I); -#endif - } -#elif OS(QNX) - static void cacheFlush(void* code, size_t size) - { -#if !ENABLE(ASSEMBLER_WX_EXCLUSIVE) - msync(code, size, MS_INVALIDATE_ICACHE); -#else - UNUSED_PARAM(code); - UNUSED_PARAM(size); -#endif - } -#else - #error "The cacheFlush support is missing on this platform." -#endif static size_t committedByteCount(); private: diff --git a/Source/JavaScriptCore/jit/HostCallReturnValue.h b/Source/JavaScriptCore/jit/HostCallReturnValue.h index 12fe10b10..fc9127faf 100644 --- a/Source/JavaScriptCore/jit/HostCallReturnValue.h +++ b/Source/JavaScriptCore/jit/HostCallReturnValue.h @@ -43,10 +43,10 @@ namespace JSC { -extern "C" EncodedJSValue HOST_CALL_RETURN_VALUE_OPTION getHostCallReturnValue(); +extern "C" EncodedJSValue HOST_CALL_RETURN_VALUE_OPTION getHostCallReturnValue() REFERENCED_FROM_ASM; // This is a public declaration only to convince CLANG not to elide it. -extern "C" EncodedJSValue HOST_CALL_RETURN_VALUE_OPTION getHostCallReturnValueWithExecState(ExecState*); +extern "C" EncodedJSValue HOST_CALL_RETURN_VALUE_OPTION getHostCallReturnValueWithExecState(ExecState*) REFERENCED_FROM_ASM; inline void initializeHostCallReturnValue() { diff --git a/Source/JavaScriptCore/jit/JIT.cpp b/Source/JavaScriptCore/jit/JIT.cpp index c2aec549a..ff5615f44 100644 --- a/Source/JavaScriptCore/jit/JIT.cpp +++ b/Source/JavaScriptCore/jit/JIT.cpp @@ -96,7 +96,7 @@ JIT::JIT(JSGlobalData* globalData, CodeBlock* codeBlock) #if ENABLE(DFG_JIT) void JIT::emitOptimizationCheck(OptimizationCheckKind kind) { - if (!shouldEmitProfiling()) + if (!canBeOptimized()) return; Jump skipOptimize = branchAdd32(Signed, TrustedImm32(kind == LoopOptimizationCheck ? Options::executionCounterIncrementForLoop : Options::executionCounterIncrementForReturn), AbsoluteAddress(m_codeBlock->addressOfJITExecuteCounter())); @@ -417,7 +417,7 @@ void JIT::privateCompileSlowCases() #if ENABLE(VALUE_PROFILER) RareCaseProfile* rareCaseProfile = 0; - if (m_canBeOptimized) + if (shouldEmitProfiling()) rareCaseProfile = m_codeBlock->addRareCaseProfile(m_bytecodeOffset); #endif @@ -497,7 +497,7 @@ void JIT::privateCompileSlowCases() ASSERT_WITH_MESSAGE(firstTo == (iter - 1)->to, "Too many jumps linked in slow case codegen."); #if ENABLE(VALUE_PROFILER) - if (m_canBeOptimized) + if (shouldEmitProfiling()) add32(TrustedImm32(1), AbsoluteAddress(&rareCaseProfile->m_counter)); #endif @@ -565,7 +565,24 @@ JITCode JIT::privateCompile(CodePtr* functionEntryArityCheck, JITCompilationEffo #endif #if ENABLE(VALUE_PROFILER) - m_canBeOptimized = m_codeBlock->canCompileWithDFG(); + DFG::CapabilityLevel level = m_codeBlock->canCompileWithDFG(); + switch (level) { + case DFG::CannotCompile: + m_canBeOptimized = false; + m_shouldEmitProfiling = false; + break; + case DFG::ShouldProfile: + m_canBeOptimized = false; + m_shouldEmitProfiling = true; + break; + case DFG::CanCompile: + m_canBeOptimized = true; + m_shouldEmitProfiling = true; + break; + default: + ASSERT_NOT_REACHED(); + break; + } #endif // Just add a little bit of randomness to the codegen @@ -619,7 +636,7 @@ JITCode JIT::privateCompile(CodePtr* functionEntryArityCheck, JITCompilationEffo Label functionBody = label(); #if ENABLE(VALUE_PROFILER) - if (m_canBeOptimized) + if (canBeOptimized()) add32(TrustedImm32(1), AbsoluteAddress(&m_codeBlock->m_executionEntryCount)); #endif diff --git a/Source/JavaScriptCore/jit/JIT.h b/Source/JavaScriptCore/jit/JIT.h index 6dc0137d9..d1143105a 100644 --- a/Source/JavaScriptCore/jit/JIT.h +++ b/Source/JavaScriptCore/jit/JIT.h @@ -836,7 +836,7 @@ namespace JSC { #if ENABLE(DFG_JIT) bool canBeOptimized() { return m_canBeOptimized; } - bool shouldEmitProfiling() { return m_canBeOptimized; } + bool shouldEmitProfiling() { return m_shouldEmitProfiling; } #else bool canBeOptimized() { return false; } // Enables use of value profiler with tiered compilation turned off, @@ -885,6 +885,7 @@ namespace JSC { #if ENABLE(VALUE_PROFILER) bool m_canBeOptimized; + bool m_shouldEmitProfiling; #endif } JIT_CLASS_ALIGNMENT; diff --git a/Source/JavaScriptCore/jit/JITArithmetic.cpp b/Source/JavaScriptCore/jit/JITArithmetic.cpp index a9390e35f..b66e2cd07 100644 --- a/Source/JavaScriptCore/jit/JITArithmetic.cpp +++ b/Source/JavaScriptCore/jit/JITArithmetic.cpp @@ -818,7 +818,7 @@ void JIT::compileBinaryArithOp(OpcodeID opcodeID, unsigned, unsigned op1, unsign else { ASSERT(opcodeID == op_mul); #if ENABLE(VALUE_PROFILER) - if (m_canBeOptimized) { + if (shouldEmitProfiling()) { // We want to be able to measure if this is taking the slow case just // because of negative zero. If this produces positive zero, then we // don't want the slow case to be taken because that will throw off diff --git a/Source/JavaScriptCore/jit/JITCode.h b/Source/JavaScriptCore/jit/JITCode.h index 3ae5ff234..c85e02e80 100644 --- a/Source/JavaScriptCore/jit/JITCode.h +++ b/Source/JavaScriptCore/jit/JITCode.h @@ -107,7 +107,7 @@ namespace JSC { void* dataAddressAtOffset(size_t offset) const { - ASSERT(offset < size()); + ASSERT(offset <= size()); // use <= instead of < because it is valid to ask for an address at the exclusive end of the code. return reinterpret_cast<char*>(m_ref.code().dataLocation()) + offset; } diff --git a/Source/JavaScriptCore/jit/JITDriver.h b/Source/JavaScriptCore/jit/JITDriver.h index 66cf51925..6c50f4bd1 100644 --- a/Source/JavaScriptCore/jit/JITDriver.h +++ b/Source/JavaScriptCore/jit/JITDriver.h @@ -38,8 +38,10 @@ namespace JSC { template<typename CodeBlockType> -inline bool jitCompileIfAppropriate(JSGlobalData& globalData, OwnPtr<CodeBlockType>& codeBlock, JITCode& jitCode, JITCode::JITType jitType, JITCompilationEffort effort) +inline bool jitCompileIfAppropriate(ExecState* exec, OwnPtr<CodeBlockType>& codeBlock, JITCode& jitCode, JITCode::JITType jitType, JITCompilationEffort effort) { + JSGlobalData& globalData = exec->globalData(); + if (jitType == codeBlock->getJITType()) return true; @@ -52,7 +54,7 @@ inline bool jitCompileIfAppropriate(JSGlobalData& globalData, OwnPtr<CodeBlockTy bool dfgCompiled = false; if (jitType == JITCode::DFGJIT) - dfgCompiled = DFG::tryCompile(globalData, codeBlock.get(), jitCode); + dfgCompiled = DFG::tryCompile(exec, codeBlock.get(), jitCode); if (dfgCompiled) { if (codeBlock->alternative()) codeBlock->alternative()->unlinkIncomingCalls(); @@ -73,8 +75,10 @@ inline bool jitCompileIfAppropriate(JSGlobalData& globalData, OwnPtr<CodeBlockTy return true; } -inline bool jitCompileFunctionIfAppropriate(JSGlobalData& globalData, OwnPtr<FunctionCodeBlock>& codeBlock, JITCode& jitCode, MacroAssemblerCodePtr& jitCodeWithArityCheck, SharedSymbolTable*& symbolTable, JITCode::JITType jitType, JITCompilationEffort effort) +inline bool jitCompileFunctionIfAppropriate(ExecState* exec, OwnPtr<FunctionCodeBlock>& codeBlock, JITCode& jitCode, MacroAssemblerCodePtr& jitCodeWithArityCheck, SharedSymbolTable*& symbolTable, JITCode::JITType jitType, JITCompilationEffort effort) { + JSGlobalData& globalData = exec->globalData(); + if (jitType == codeBlock->getJITType()) return true; @@ -88,7 +92,7 @@ inline bool jitCompileFunctionIfAppropriate(JSGlobalData& globalData, OwnPtr<Fun bool dfgCompiled = false; if (jitType == JITCode::DFGJIT) - dfgCompiled = DFG::tryCompileFunction(globalData, codeBlock.get(), jitCode, jitCodeWithArityCheck); + dfgCompiled = DFG::tryCompileFunction(exec, codeBlock.get(), jitCode, jitCodeWithArityCheck); if (dfgCompiled) { if (codeBlock->alternative()) codeBlock->alternative()->unlinkIncomingCalls(); diff --git a/Source/JavaScriptCore/jit/JITOpcodes.cpp b/Source/JavaScriptCore/jit/JITOpcodes.cpp index f43e98c45..d458f7fb5 100644 --- a/Source/JavaScriptCore/jit/JITOpcodes.cpp +++ b/Source/JavaScriptCore/jit/JITOpcodes.cpp @@ -1526,6 +1526,7 @@ void JIT::emit_op_get_argument_by_val(Instruction* currentInstruction) neg32(regT1); signExtend32ToPtr(regT1, regT1); loadPtr(BaseIndex(callFrameRegister, regT1, TimesEight, CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register))), regT0); + emitValueProfilingSite(); emitPutVirtualRegister(dst, regT0); } @@ -1548,7 +1549,7 @@ void JIT::emitSlow_op_get_argument_by_val(Instruction* currentInstruction, Vecto JITStubCall stubCall(this, cti_op_get_by_val); stubCall.addArgument(arguments, regT2); stubCall.addArgument(property, regT2); - stubCall.call(dst); + stubCall.callWithValueProfiling(dst); } #endif // USE(JSVALUE64) diff --git a/Source/JavaScriptCore/jit/JITOpcodes32_64.cpp b/Source/JavaScriptCore/jit/JITOpcodes32_64.cpp index c9f8922fa..5643fe9f3 100644 --- a/Source/JavaScriptCore/jit/JITOpcodes32_64.cpp +++ b/Source/JavaScriptCore/jit/JITOpcodes32_64.cpp @@ -1636,6 +1636,7 @@ void JIT::emit_op_get_argument_by_val(Instruction* currentInstruction) neg32(regT2); loadPtr(BaseIndex(callFrameRegister, regT2, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.payload) + CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register))), regT0); loadPtr(BaseIndex(callFrameRegister, regT2, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag) + CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register))), regT1); + emitValueProfilingSite(); emitStore(dst, regT1, regT0); } diff --git a/Source/JavaScriptCore/jit/JITPropertyAccess.cpp b/Source/JavaScriptCore/jit/JITPropertyAccess.cpp index 8c7148c9d..5d39735af 100644 --- a/Source/JavaScriptCore/jit/JITPropertyAccess.cpp +++ b/Source/JavaScriptCore/jit/JITPropertyAccess.cpp @@ -513,7 +513,7 @@ void JIT::privateCompilePutByIdTransition(StructureStubInfo* stubInfo, Structure // If we succeed in all of our checks, and the code was optimizable, then make sure we // decrement the rare case counter. #if ENABLE(VALUE_PROFILER) - if (m_codeBlock->canCompileWithDFG()) { + if (m_codeBlock->canCompileWithDFG() >= DFG::ShouldProfile) { sub32( TrustedImm32(1), AbsoluteAddress(&m_codeBlock->rareCaseProfileForBytecodeOffset(stubInfo->bytecodeIndex)->m_counter)); diff --git a/Source/JavaScriptCore/jit/JITPropertyAccess32_64.cpp b/Source/JavaScriptCore/jit/JITPropertyAccess32_64.cpp index 550ad0b2e..bd57484c4 100644 --- a/Source/JavaScriptCore/jit/JITPropertyAccess32_64.cpp +++ b/Source/JavaScriptCore/jit/JITPropertyAccess32_64.cpp @@ -479,7 +479,7 @@ void JIT::privateCompilePutByIdTransition(StructureStubInfo* stubInfo, Structure // If we succeed in all of our checks, and the code was optimizable, then make sure we // decrement the rare case counter. #if ENABLE(VALUE_PROFILER) - if (m_codeBlock->canCompileWithDFG()) { + if (m_codeBlock->canCompileWithDFG() >= DFG::ShouldProfile) { sub32( TrustedImm32(1), AbsoluteAddress(&m_codeBlock->rareCaseProfileForBytecodeOffset(stubInfo->bytecodeIndex)->m_counter)); diff --git a/Source/JavaScriptCore/jit/JITStubs.cpp b/Source/JavaScriptCore/jit/JITStubs.cpp index a6d6be106..e75f2825c 100644 --- a/Source/JavaScriptCore/jit/JITStubs.cpp +++ b/Source/JavaScriptCore/jit/JITStubs.cpp @@ -54,6 +54,7 @@ #include "JSPropertyNameIterator.h" #include "JSStaticScopeObject.h" #include "JSString.h" +#include "NameInstance.h" #include "ObjectPrototype.h" #include "Operations.h" #include "Parser.h" @@ -2447,7 +2448,13 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_val) CHECK_FOR_EXCEPTION(); return JSValue::encode(result); } - + + if (isName(subscript)) { + JSValue result = baseValue.get(callFrame, jsCast<NameInstance*>(subscript.asCell())->privateName()); + CHECK_FOR_EXCEPTION(); + return JSValue::encode(result); + } + Identifier property(callFrame, subscript.toString(callFrame)->value(callFrame)); JSValue result = baseValue.get(callFrame, property); CHECK_FOR_EXCEPTION_AT_END(); @@ -2474,7 +2481,9 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_val_string) if (!isJSString(baseValue)) ctiPatchCallByReturnAddress(callFrame->codeBlock(), STUB_RETURN_ADDRESS, FunctionPtr(cti_op_get_by_val)); } - } else { + } else if (isName(subscript)) + result = baseValue.get(callFrame, jsCast<NameInstance*>(subscript.asCell())->privateName()); + else { Identifier property(callFrame, subscript.toString(callFrame)->value(callFrame)); result = baseValue.get(callFrame, property); } @@ -2520,6 +2529,9 @@ DEFINE_STUB_FUNCTION(void, op_put_by_val) JSArray::putByIndex(jsArray, callFrame, i, value, callFrame->codeBlock()->isStrictMode()); } else baseValue.putByIndex(callFrame, i, value, callFrame->codeBlock()->isStrictMode()); + } else if (isName(subscript)) { + PutPropertySlot slot(callFrame->codeBlock()->isStrictMode()); + baseValue.put(callFrame, jsCast<NameInstance*>(subscript.asCell())->privateName(), value, slot); } else { Identifier property(callFrame, subscript.toString(callFrame)->value(callFrame)); if (!stackFrame.globalData->exception) { // Don't put to an object if toString threw an exception. @@ -2759,9 +2771,7 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_not) JSValue src = stackFrame.args[0].jsValue(); - CallFrame* callFrame = stackFrame.callFrame; - - JSValue result = jsBoolean(!src.toBoolean(callFrame)); + JSValue result = jsBoolean(!src.toBoolean()); CHECK_FOR_EXCEPTION_AT_END(); return JSValue::encode(result); } @@ -2772,9 +2782,7 @@ DEFINE_STUB_FUNCTION(int, op_jtrue) JSValue src1 = stackFrame.args[0].jsValue(); - CallFrame* callFrame = stackFrame.callFrame; - - bool result = src1.toBoolean(callFrame); + bool result = src1.toBoolean(); CHECK_FOR_EXCEPTION_AT_END(); return result; } @@ -3245,6 +3253,9 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_in) if (propName.getUInt32(i)) return JSValue::encode(jsBoolean(baseObj->hasProperty(callFrame, i))); + if (isName(propName)) + return JSValue::encode(jsBoolean(baseObj->hasProperty(callFrame, jsCast<NameInstance*>(propName.asCell())->privateName()))); + Identifier property(callFrame, propName.toString(callFrame)->value(callFrame)); CHECK_FOR_EXCEPTION(); return JSValue::encode(jsBoolean(baseObj->hasProperty(callFrame, property))); @@ -3357,6 +3368,8 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_del_by_val) uint32_t i; if (subscript.getUInt32(i)) result = baseObj->methodTable()->deletePropertyByIndex(baseObj, callFrame, i); + else if (isName(subscript)) + result = baseObj->methodTable()->deleteProperty(baseObj, callFrame, jsCast<NameInstance*>(subscript.asCell())->privateName()); else { CHECK_FOR_EXCEPTION(); Identifier property(callFrame, subscript.toString(callFrame)->value(callFrame)); @@ -3445,27 +3458,31 @@ MacroAssemblerCodeRef JITThunks::ctiStub(JSGlobalData* globalData, ThunkGenerato NativeExecutable* JITThunks::hostFunctionStub(JSGlobalData* globalData, NativeFunction function, NativeFunction constructor) { - HostFunctionStubMap::AddResult result = m_hostFunctionStubMap->add(function, PassWeak<NativeExecutable>()); - if (!result.iterator->second) - result.iterator->second = PassWeak<NativeExecutable>(NativeExecutable::create(*globalData, JIT::compileCTINativeCall(globalData, function), function, MacroAssemblerCodeRef::createSelfManagedCodeRef(ctiNativeConstruct()), constructor, NoIntrinsic)); - return result.iterator->second.get(); + if (NativeExecutable* nativeExecutable = m_hostFunctionStubMap->get(function)) + return nativeExecutable; + + NativeExecutable* nativeExecutable = NativeExecutable::create(*globalData, JIT::compileCTINativeCall(globalData, function), function, MacroAssemblerCodeRef::createSelfManagedCodeRef(ctiNativeConstruct()), constructor, NoIntrinsic); + weakAdd(*m_hostFunctionStubMap, function, PassWeak<NativeExecutable>(nativeExecutable)); + return nativeExecutable; } NativeExecutable* JITThunks::hostFunctionStub(JSGlobalData* globalData, NativeFunction function, ThunkGenerator generator, Intrinsic intrinsic) { - HostFunctionStubMap::AddResult entry = m_hostFunctionStubMap->add(function, PassWeak<NativeExecutable>()); - if (!entry.iterator->second) { - MacroAssemblerCodeRef code; - if (generator) { - if (globalData->canUseJIT()) - code = generator(globalData); - else - code = MacroAssemblerCodeRef(); - } else - code = JIT::compileCTINativeCall(globalData, function); - entry.iterator->second = PassWeak<NativeExecutable>(NativeExecutable::create(*globalData, code, function, MacroAssemblerCodeRef::createSelfManagedCodeRef(ctiNativeConstruct()), callHostFunctionAsConstructor, intrinsic)); - } - return entry.iterator->second.get(); + if (NativeExecutable* nativeExecutable = m_hostFunctionStubMap->get(function)) + return nativeExecutable; + + MacroAssemblerCodeRef code; + if (generator) { + if (globalData->canUseJIT()) + code = generator(globalData); + else + code = MacroAssemblerCodeRef(); + } else + code = JIT::compileCTINativeCall(globalData, function); + + NativeExecutable* nativeExecutable = NativeExecutable::create(*globalData, code, function, MacroAssemblerCodeRef::createSelfManagedCodeRef(ctiNativeConstruct()), callHostFunctionAsConstructor, intrinsic); + weakAdd(*m_hostFunctionStubMap, function, PassWeak<NativeExecutable>(nativeExecutable)); + return nativeExecutable; } void JITThunks::clearHostFunctionStubs() diff --git a/Source/JavaScriptCore/jit/JITStubs.h b/Source/JavaScriptCore/jit/JITStubs.h index 786353df5..664338fd3 100644 --- a/Source/JavaScriptCore/jit/JITStubs.h +++ b/Source/JavaScriptCore/jit/JITStubs.h @@ -463,7 +463,7 @@ extern "C" { void* JIT_STUB cti_register_file_check(STUB_ARGS_DECLARATION); void* JIT_STUB cti_vm_lazyLinkCall(STUB_ARGS_DECLARATION); void* JIT_STUB cti_vm_lazyLinkConstruct(STUB_ARGS_DECLARATION); - void* JIT_STUB cti_vm_throw(STUB_ARGS_DECLARATION); + void* JIT_STUB cti_vm_throw(STUB_ARGS_DECLARATION) REFERENCED_FROM_ASM; } // extern "C" #endif // ENABLE(JIT) diff --git a/Source/JavaScriptCore/jit/ThunkGenerators.cpp b/Source/JavaScriptCore/jit/ThunkGenerators.cpp index 371aff2f9..e46ba809c 100644 --- a/Source/JavaScriptCore/jit/ThunkGenerators.cpp +++ b/Source/JavaScriptCore/jit/ThunkGenerators.cpp @@ -118,7 +118,7 @@ enum MathThunkCallingConvention { }; typedef MathThunkCallingConvention(*MathThunk)(MathThunkCallingConvention); extern "C" { -double jsRound(double); +double jsRound(double) REFERENCED_FROM_ASM; double jsRound(double d) { double integer = ceil(d); |