diff options
author | Simon Hausmann <simon.hausmann@nokia.com> | 2012-09-11 19:54:20 +0200 |
---|---|---|
committer | Simon Hausmann <simon.hausmann@nokia.com> | 2012-09-11 19:54:20 +0200 |
commit | 88a04ac016f57c2d78e714682445dff2e7db4ade (patch) | |
tree | a48ca81ee3b29953121308168db22532d5b57fe2 /Source/JavaScriptCore | |
parent | 284837daa07b29d6a63a748544a90b1f5842ac5c (diff) | |
download | qtwebkit-88a04ac016f57c2d78e714682445dff2e7db4ade.tar.gz |
Imported WebKit commit 42d95198c30c2d1a94a5081181aad0b2be7c316c (http://svn.webkit.org/repository/webkit/trunk@128206)
This includes the rewrite of the configure part of the build system which should fix the QtQuick2 detection
and allow for further simplifications in the future
Diffstat (limited to 'Source/JavaScriptCore')
58 files changed, 1405 insertions, 1088 deletions
diff --git a/Source/JavaScriptCore/API/JSContextRef.cpp b/Source/JavaScriptCore/API/JSContextRef.cpp index 7c815355b..e2a102948 100644 --- a/Source/JavaScriptCore/API/JSContextRef.cpp +++ b/Source/JavaScriptCore/API/JSContextRef.cpp @@ -185,9 +185,7 @@ JSStringRef JSContextCreateBacktrace(JSContextRef ctx, unsigned maxStackSize) intptr_t sourceID; String urlString; JSValue function; - - String levelStr = String::number(count); - + exec->interpreter()->retrieveLastCaller(callFrame, signedLineNumber, sourceID, urlString, function); if (function) @@ -202,7 +200,7 @@ JSStringRef JSContextCreateBacktrace(JSContextRef ctx, unsigned maxStackSize) if (!builder.isEmpty()) builder.append('\n'); builder.append('#'); - builder.append(levelStr); + builder.appendNumber(count); builder.append(' '); builder.append(functionName); builder.appendLiteral("() at "); diff --git a/Source/JavaScriptCore/CMakeLists.txt b/Source/JavaScriptCore/CMakeLists.txt index 22d571933..2065c5b39 100644 --- a/Source/JavaScriptCore/CMakeLists.txt +++ b/Source/JavaScriptCore/CMakeLists.txt @@ -119,6 +119,7 @@ SET(JavaScriptCore_SOURCES heap/MarkedBlock.cpp heap/MarkedSpace.cpp heap/MarkStack.cpp + heap/SlotVisitor.cpp heap/WeakSet.cpp heap/WeakHandleOwner.cpp heap/WeakBlock.cpp @@ -331,6 +332,7 @@ IF (ENABLE_LLINT) ADD_SOURCE_DEPENDENCIES(${JAVASCRIPTCORE_DIR}/llint/LLIntOffsetsExtractor.cpp ${DERIVED_SOURCES_JAVASCRIPTCORE_DIR}/LLIntDesiredOffsets.h) ADD_EXECUTABLE(LLIntOffsetsExtractor ${JAVASCRIPTCORE_DIR}/llint/LLIntOffsetsExtractor.cpp) + TARGET_LINK_LIBRARIES(LLIntOffsetsExtractor ${WTF_LIBRARY_NAME}) ADD_CUSTOM_COMMAND( OUTPUT ${DERIVED_SOURCES_JAVASCRIPTCORE_DIR}/LLIntAssembly.h @@ -407,10 +409,6 @@ TARGET_LINK_LIBRARIES(${JavaScriptCore_LIBRARY_NAME} ${JavaScriptCore_LIBRARIES} SET_TARGET_PROPERTIES(${JavaScriptCore_LIBRARY_NAME} PROPERTIES FOLDER "JavaScriptCore") SET_TARGET_PROPERTIES(${JavaScriptCore_LIBRARY_NAME} PROPERTIES LINK_INTERFACE_LIBRARIES "") -IF (JavaScriptCore_LINK_FLAGS) - ADD_TARGET_PROPERTIES(${JavaScriptCore_LIBRARY_NAME} LINK_FLAGS "${JavaScriptCore_LINK_FLAGS}") -ENDIF () - IF (SHARED_CORE) SET_TARGET_PROPERTIES(${JavaScriptCore_LIBRARY_NAME} PROPERTIES VERSION ${PROJECT_VERSION} SOVERSION ${PROJECT_VERSION_MAJOR}) INSTALL(TARGETS ${JavaScriptCore_LIBRARY_NAME} DESTINATION "${LIB_INSTALL_DIR}") diff --git a/Source/JavaScriptCore/ChangeLog b/Source/JavaScriptCore/ChangeLog index 83cae4a31..e92f02211 100644 --- a/Source/JavaScriptCore/ChangeLog +++ b/Source/JavaScriptCore/ChangeLog @@ -1,3 +1,351 @@ +2012-09-11 Raphael Kubo da Costa <rakuco@webkit.org> + + [EFL] Rewrite the EFL-related Find modules + https://bugs.webkit.org/show_bug.cgi?id=95237 + + Reviewed by Kenneth Rohde Christiansen. + + * CMakeLists.txt: Stop setting the LINK_FLAGS property. + * PlatformEfl.cmake: Ditto. + * shell/PlatformEfl.cmake: Ditto. + +2012-09-11 Raphael Kubo da Costa <rakuco@webkit.org> + + [EFL] Unreviewed build fix after r128065. + + * CMakeLists.txt: Link against WTF for FastMalloc symbols, which + are needed when building with SYSTEM_MALLOC off. + +2012-09-10 Mark Hahnenberg <mhahnenberg@apple.com> + + Remove m_classInfo from JSCell + https://bugs.webkit.org/show_bug.cgi?id=96311 + + Reviewed by Oliver Hunt. + + Now that no one is using the ClassInfo in JSCell, we can remove it for the greater good. This is a 1.5% win on v8v7 and + a 1.7% win on kraken, and is an overall performance progression. + + * dfg/DFGSpeculativeJIT.h: + (JSC::DFG::SpeculativeJIT::emitAllocateBasicJSObject): Had to rearrange the order of when we take things off the free list + and when we store the Structure in the object because we would clobber the free list otherwise. This made it not okay for + the structure argument and the scratch register to alias one another. Also removed the store of the ClassInfo pointer in the + object. Yay! + (SpeculativeJIT): + * dfg/DFGSpeculativeJIT32_64.cpp: Since it's no longer okay for for the scratch register and structure register to alias + one another as stated above, had to add an extra temporary for passing the Structure. + (JSC::DFG::SpeculativeJIT::compile): + * dfg/DFGSpeculativeJIT64.cpp: Ditto. + (JSC::DFG::SpeculativeJIT::compile): + * jit/JITInlineMethods.h: + (JSC::JIT::emitAllocateBasicJSObject): Similar changes to DFG's inline allocation except that it removed the object from + the free list first, so no changes were necessary there. + * llint/LowLevelInterpreter.asm: Change the constants for amount of inline storage to match PropertyOffset.h and remove + the store of the ClassInfo pointer during inline allocation. + * llint/LowLevelInterpreter32_64.asm: + * llint/LowLevelInterpreter64.asm: + * runtime/JSCell.h: Remove the m_classInfo field and associated methods. + (JSCell): + * runtime/JSObject.h: + (JSObject): + * runtime/PropertyOffset.h: Expand the number of inline storage properties to take up the extra space that we're freeing + with the removal of the ClassInfo pointer. + (JSC): + * runtime/Structure.h: + (JSC): + (JSC::JSCell::JSCell): + (JSC::JSCell::finishCreation): + +2012-09-10 Geoffrey Garen <ggaren@apple.com> + + Added large allocation support to MarkedSpace + https://bugs.webkit.org/show_bug.cgi?id=96214 + + Originally reviewed by Oliver Hunt, then I added a design revision by + suggested by Phil Pizlo. + + I expanded the imprecise size classes to cover up to 32KB, then added + an mmap-based allocator for everything bigger. There's a lot of tuning + we could do in these size classes, but currently they're almost + completely unused, so I haven't done any tuning. + + Subtle point: the large allocator is a degenerate case of our free list + logic. Its list only ever contains zero or one items. + + * heap/Heap.h: + (JSC::Heap::allocateStructure): Pipe in size information. + + * heap/MarkedAllocator.cpp: + (JSC::MarkedAllocator::tryAllocateHelper): Handle the case where we + find a free item in the sweep list but the item isn't big enough. This + can happen in the large allocator because it mixes sizes. + + (JSC::MarkedAllocator::tryAllocate): + (JSC::MarkedAllocator::allocateSlowCase): More piping. + + (JSC::MarkedAllocator::allocateBlock): Handle the oversize case. + + (JSC::MarkedAllocator::addBlock): I moved the call to didAddBlock here + because it made more sense. + + * heap/MarkedAllocator.h: + (MarkedAllocator): + (JSC::MarkedAllocator::allocate): + * heap/MarkedSpace.cpp: + (JSC::MarkedSpace::MarkedSpace): + (JSC::MarkedSpace::resetAllocators): + (JSC::MarkedSpace::canonicalizeCellLivenessData): + (JSC::MarkedSpace::isPagedOut): + (JSC::MarkedSpace::freeBlock): + * heap/MarkedSpace.h: + (MarkedSpace): + (JSC::MarkedSpace::allocatorFor): + (JSC::MarkedSpace::destructorAllocatorFor): + (JSC::MarkedSpace::allocateWithoutDestructor): + (JSC::MarkedSpace::allocateWithDestructor): + (JSC::MarkedSpace::allocateStructure): + (JSC::MarkedSpace::forEachBlock): + * runtime/Structure.h: + (JSC::Structure): More piping. + +2012-09-10 Geoffrey Garen <ggaren@apple.com> + + Try to fix the Windows (32-bit) build. + + * jit/JITOpcodes.cpp: + (JSC::JIT::emit_op_tear_off_arguments): + * jit/JITOpcodes32_64.cpp: + (JSC::JIT::emit_op_tear_off_arguments): Get operands 1 and 2, not 1 and 1. :( + + Also took this opportunity to rename to indicate that these values are + not destinations anymore. + +2012-09-10 Geoffrey Garen <ggaren@apple.com> + + DFG misses arguments tear-off for function.arguments if 'arguments' is used + https://bugs.webkit.org/show_bug.cgi?id=96227 + + Reviewed by Gavin Barraclough. + + We've decided not to allow function.arguments to alias the local + 'arguments' object, or a local var or function named 'arguments'. + Aliasing complicates the implementation (cf, this bug) and can produce + surprising behavior for web programmers. + + Eliminating the aliasing has the side-effect of fixing this bug. + + The compatibilty story: function.arguments is deprecated, was never + specified, and throws an exception in strict mode, so we expect it to + disappear over time. Firefox does not alias to 'arguments'; Chrome + does, but not if you use eval or with; IE does; Safari did. + + * dfg/DFGByteCodeParser.cpp: Noticed a little cleanup while verifying + this code. Use the CodeBlock method for better encapsulation. + + * interpreter/Interpreter.cpp: + (JSC::Interpreter::retrieveArgumentsFromVMCode): Behavior change: don't + alias. + + * tests/mozilla/js1_4/Functions/function-001.js: + (TestFunction_4): Updated test expectations for changed behavior. + +2012-09-10 Filip Pizlo <fpizlo@apple.com> + + offlineasm has some impossible to implement, and unused, instructions + https://bugs.webkit.org/show_bug.cgi?id=96310 + + Reviewed by Mark Hahnenberg. + + * offlineasm/armv7.rb: + * offlineasm/instructions.rb: + * offlineasm/x86.rb: + +2012-09-09 Geoffrey Garen <ggaren@apple.com> + + Refactored op_tear_off* to support activations that don't allocate space for 'arguments' + https://bugs.webkit.org/show_bug.cgi?id=96231 + + Reviewed by Gavin Barraclough. + + This is a step toward smaller activations. + + As a side-effect, this patch eliminates a load and branch from the hot path + of activation tear-off by moving it to the cold path of arguments tear-off. Our + optimizing assumptions are that activations are common and that reifying the + arguments object is less common. + + * bytecode/CodeBlock.cpp: + (JSC::CodeBlock::dump): + * bytecode/Opcode.h: + (JSC::padOpcodeName): Updated for new opcode lengths. + + * bytecompiler/BytecodeGenerator.cpp: + (JSC::BytecodeGenerator::BytecodeGenerator): + (JSC::BytecodeGenerator::addConstantValue): Added support for JSValue() + in the bytecode, which we use when we have 'arguments' but no activation. + + (JSC::BytecodeGenerator::emitReturn): Always emit tear_off_arguments + if we've allocated the arguments registers. This allows tear_off_activation + not to worry about the arguments object anymore. + + Also, pass the activation and arguments values directly to these opcodes + instead of requiring the opcodes to infer the values through special + registers. This gives us more flexibility to move or eliminate registers. + + * dfg/DFGArgumentsSimplificationPhase.cpp: + (JSC::DFG::ArgumentsSimplificationPhase::run): + * dfg/DFGByteCodeParser.cpp: + (JSC::DFG::ByteCodeParser::parseBlock): + * dfg/DFGNode.h: + (Node): Updated for new opcode lengths. + + * dfg/DFGOperations.cpp: Activation tear-off doesn't worry about the + arguments object anymore. If 'arguments' is in use and reified, it's + responsible for aliasing back to the activation object in tear_off_arguments. + + * dfg/DFGOperations.h: + * dfg/DFGSpeculativeJIT.h: + (JSC::DFG::SpeculativeJIT::callOperation): + (SpeculativeJIT): + * dfg/DFGSpeculativeJIT32_64.cpp: + (JSC::DFG::SpeculativeJIT::compile): + * dfg/DFGSpeculativeJIT64.cpp: + (JSC::DFG::SpeculativeJIT::compile): Don't pass the arguments object to + activation tear-off; do pass the activation object to arguments tear-off. + + * interpreter/Interpreter.cpp: + (JSC::Interpreter::privateExecute): Ditto. + + * jit/JITOpcodes.cpp: + (JSC::JIT::emit_op_tear_off_activation): + (JSC::JIT::emit_op_tear_off_arguments): + * jit/JITOpcodes32_64.cpp: + (JSC::JIT::emit_op_tear_off_activation): + (JSC::JIT::emit_op_tear_off_arguments): + * jit/JITStubs.cpp: + (JSC::DEFINE_STUB_FUNCTION): + * llint/LLIntSlowPaths.cpp: + (JSC::LLInt::LLINT_SLOW_PATH_DECL): + * llint/LowLevelInterpreter32_64.asm: + * llint/LowLevelInterpreter64.asm: Same change in a few more execution engines. + +2012-09-10 Patrick Gansterer <paroga@webkit.org> + + [JSC] Use StringBuilder::appendNumber() instead of String::number() + https://bugs.webkit.org/show_bug.cgi?id=96236 + + Reviewed by Benjamin Poulain. + + * API/JSContextRef.cpp: + (JSContextCreateBacktrace): + +2012-09-06 Mark Hahnenberg <mhahnenberg@apple.com> + + Combine MarkStack and SlotVisitor into single class + https://bugs.webkit.org/show_bug.cgi?id=96043 + + Reviewed by Geoff Garen. + + Move all of MarkStack into SlotVisitor. The remaining stuff in MarkStack.cpp actually has to do + with MarkStack management/allocation. Cleaned up a few of the header files while I was at it. + + * CMakeLists.txt: + * GNUmakefile.list.am: + * JavaScriptCore.vcproj/JavaScriptCore/JavaScriptCore.vcproj: + * JavaScriptCore.xcodeproj/project.pbxproj: + * Target.pri: + * bytecode/CodeBlock.cpp: + * dfg/DFGCommon.h: + * heap/GCThreadSharedData.cpp: + * heap/GCThreadSharedData.h: + (GCThreadSharedData): + * heap/HeapRootVisitor.h: + * heap/MarkStack.cpp: + (JSC): + * heap/MarkStack.h: + (JSC): + (MarkStackSegment): + (JSC::MarkStackSegment::data): + (JSC::MarkStackSegment::capacityFromSize): + (JSC::MarkStackSegment::sizeFromCapacity): + (MarkStackSegmentAllocator): + (MarkStackArray): + * heap/MarkStackInlineMethods.h: + (JSC::MarkStackArray::postIncTop): + (JSC): + (JSC::MarkStackArray::preDecTop): + (JSC::MarkStackArray::setTopForFullSegment): + (JSC::MarkStackArray::setTopForEmptySegment): + (JSC::MarkStackArray::top): + (JSC::MarkStackArray::validatePrevious): + (JSC::MarkStackArray::append): + (JSC::MarkStackArray::canRemoveLast): + (JSC::MarkStackArray::removeLast): + (JSC::MarkStackArray::isEmpty): + (JSC::MarkStackArray::size): + * heap/SlotVisitor.cpp: Added. + (JSC): + (JSC::SlotVisitor::SlotVisitor): + (JSC::SlotVisitor::~SlotVisitor): + (JSC::SlotVisitor::setup): + (JSC::SlotVisitor::reset): + (JSC::SlotVisitor::append): + (JSC::visitChildren): + (JSC::SlotVisitor::donateKnownParallel): + (JSC::SlotVisitor::drain): + (JSC::SlotVisitor::drainFromShared): + (JSC::SlotVisitor::mergeOpaqueRoots): + (JSC::SlotVisitor::startCopying): + (JSC::SlotVisitor::allocateNewSpaceSlow): + (JSC::SlotVisitor::allocateNewSpaceOrPin): + (JSC::JSString::tryHashConstLock): + (JSC::JSString::releaseHashConstLock): + (JSC::JSString::shouldTryHashConst): + (JSC::SlotVisitor::internalAppend): + (JSC::SlotVisitor::copyAndAppend): + (JSC::SlotVisitor::doneCopying): + (JSC::SlotVisitor::harvestWeakReferences): + (JSC::SlotVisitor::finalizeUnconditionalFinalizers): + (JSC::SlotVisitor::validate): + * heap/SlotVisitor.h: + (JSC): + (SlotVisitor): + (JSC::SlotVisitor::sharedData): + (JSC::SlotVisitor::isEmpty): + (JSC::SlotVisitor::visitCount): + (JSC::SlotVisitor::resetChildCount): + (JSC::SlotVisitor::childCount): + (JSC::SlotVisitor::incrementChildCount): + (ParallelModeEnabler): + (JSC::ParallelModeEnabler::ParallelModeEnabler): + (JSC::ParallelModeEnabler::~ParallelModeEnabler): + * heap/SlotVisitorInlineMethods.h: + (JSC::SlotVisitor::append): + (JSC): + (JSC::SlotVisitor::appendUnbarrieredPointer): + (JSC::SlotVisitor::appendUnbarrieredValue): + (JSC::SlotVisitor::internalAppend): + (JSC::SlotVisitor::addWeakReferenceHarvester): + (JSC::SlotVisitor::addUnconditionalFinalizer): + (JSC::SlotVisitor::addOpaqueRoot): + (JSC::SlotVisitor::containsOpaqueRoot): + (JSC::SlotVisitor::opaqueRootCount): + (JSC::SlotVisitor::mergeOpaqueRootsIfNecessary): + (JSC::SlotVisitor::mergeOpaqueRootsIfProfitable): + (JSC::SlotVisitor::donate): + (JSC::SlotVisitor::donateAndDrain): + * jit/JITWriteBarrier.h: + (JSC::SlotVisitor::append): + * jit/JumpReplacementWatchpoint.cpp: + * runtime/JSCell.h: + * runtime/Structure.h: + (JSC::SlotVisitor::internalAppend): + * runtime/WriteBarrier.h: + (JSC): + (JSC::SlotVisitor::append): + (JSC::SlotVisitor::appendValues): + * yarr/YarrJIT.cpp: + 2012-09-10 Hojong Han <hojong.han@samsung.com> [EFL] JIT memory usage is not retrieved diff --git a/Source/JavaScriptCore/GNUmakefile.list.am b/Source/JavaScriptCore/GNUmakefile.list.am index dd584f747..9d8161fdd 100644 --- a/Source/JavaScriptCore/GNUmakefile.list.am +++ b/Source/JavaScriptCore/GNUmakefile.list.am @@ -266,6 +266,7 @@ javascriptcore_sources += \ Source/JavaScriptCore/heap/HeapTimer.cpp \ Source/JavaScriptCore/heap/IncrementalSweeper.h \ Source/JavaScriptCore/heap/IncrementalSweeper.cpp \ + Source/JavaScriptCore/heap/SlotVisitor.cpp \ Source/JavaScriptCore/heap/SlotVisitor.h \ Source/JavaScriptCore/heap/SlotVisitorInlineMethods.h \ Source/JavaScriptCore/heap/HandleStack.cpp \ diff --git a/Source/JavaScriptCore/JavaScriptCore.vcproj/JavaScriptCore/JavaScriptCore.def b/Source/JavaScriptCore/JavaScriptCore.vcproj/JavaScriptCore/JavaScriptCore.def index aa7ec78df..843bd1475 100755 --- a/Source/JavaScriptCore/JavaScriptCore.vcproj/JavaScriptCore/JavaScriptCore.def +++ b/Source/JavaScriptCore/JavaScriptCore.vcproj/JavaScriptCore/JavaScriptCore.def @@ -63,7 +63,7 @@ EXPORTS ?addSlowCase@Identifier@JSC@@CA?AV?$PassRefPtr@VStringImpl@WTF@@@WTF@@PAVExecState@2@PAVStringImpl@4@@Z ?addSlowCase@Identifier@JSC@@CA?AV?$PassRefPtr@VStringImpl@WTF@@@WTF@@PAVJSGlobalData@2@PAVStringImpl@4@@Z ?addStaticGlobals@JSGlobalObject@JSC@@IAEXPAUGlobalPropertyInfo@12@H@Z - ?allocateSlowCase@MarkedAllocator@JSC@@AAEPAXXZ + ?allocateSlowCase@MarkedAllocator@JSC@@AAEPAXI@Z ?append@StringBuilder@WTF@@QAEXPBEI@Z ?append@StringBuilder@WTF@@QAEXPB_WI@Z ?appendNumber@StringBuilder@WTF@@QAEXH@Z @@ -365,7 +365,7 @@ EXPORTS ?unlock@Mutex@WTF@@QAEXXZ ?unlockAtomicallyInitializedStaticMutex@WTF@@YAXXZ ?unprotect@Heap@JSC@@QAE_NVJSValue@2@@Z - ?validate@MarkStack@JSC@@KAXPAVJSCell@2@@Z + ?validate@SlotVisitor@JSC@@CAXPAVJSCell@2@@Z ?visitChildren@JSGlobalObject@JSC@@SAXPAVJSCell@2@AAVSlotVisitor@2@@Z ?visitChildren@JSGlobalThis@JSC@@KAXPAVJSCell@2@AAVSlotVisitor@2@@Z ?visitChildren@JSObject@JSC@@SAXPAVJSCell@2@AAVSlotVisitor@2@@Z diff --git a/Source/JavaScriptCore/JavaScriptCore.vcproj/JavaScriptCore/JavaScriptCore.vcproj b/Source/JavaScriptCore/JavaScriptCore.vcproj/JavaScriptCore/JavaScriptCore.vcproj index a7f7cf9af..43fdfb21a 100644 --- a/Source/JavaScriptCore/JavaScriptCore.vcproj/JavaScriptCore/JavaScriptCore.vcproj +++ b/Source/JavaScriptCore/JavaScriptCore.vcproj/JavaScriptCore/JavaScriptCore.vcproj @@ -2386,6 +2386,10 @@ >
</File>
<File
+ RelativePath="..\..\heap\SlotVisitor.cpp"
+ >
+ </File>
+ <File
RelativePath="..\..\heap\Strong.h"
>
</File>
diff --git a/Source/JavaScriptCore/JavaScriptCore.xcodeproj/project.pbxproj b/Source/JavaScriptCore/JavaScriptCore.xcodeproj/project.pbxproj index 948cecc11..7d180ebae 100644 --- a/Source/JavaScriptCore/JavaScriptCore.xcodeproj/project.pbxproj +++ b/Source/JavaScriptCore/JavaScriptCore.xcodeproj/project.pbxproj @@ -693,6 +693,8 @@ C21122E115DD9AB300790E3A /* GCThreadSharedData.cpp in Sources */ = {isa = PBXBuildFile; fileRef = C21122DE15DD9AB300790E3A /* GCThreadSharedData.cpp */; }; C21122E215DD9AB300790E3A /* GCThreadSharedData.h in Headers */ = {isa = PBXBuildFile; fileRef = C21122DF15DD9AB300790E3A /* GCThreadSharedData.h */; settings = {ATTRIBUTES = (Private, ); }; }; C21122E315DD9AB300790E3A /* MarkStackInlineMethods.h in Headers */ = {isa = PBXBuildFile; fileRef = C21122E015DD9AB300790E3A /* MarkStackInlineMethods.h */; settings = {ATTRIBUTES = (Private, ); }; }; + C2160FE715F7E95E00942DFC /* SlotVisitorInlineMethods.h in Headers */ = {isa = PBXBuildFile; fileRef = 0FCB408515C0A3C30048932B /* SlotVisitorInlineMethods.h */; settings = {ATTRIBUTES = (Private, ); }; }; + C225494315F7DBAA0065E898 /* SlotVisitor.cpp in Sources */ = {isa = PBXBuildFile; fileRef = C225494215F7DBAA0065E898 /* SlotVisitor.cpp */; }; C22B31B9140577D700DB475A /* SamplingCounter.h in Headers */ = {isa = PBXBuildFile; fileRef = 0F77008E1402FDD60078EB39 /* SamplingCounter.h */; settings = {ATTRIBUTES = (Private, ); }; }; C240305514B404E60079EB64 /* CopiedSpace.cpp in Sources */ = {isa = PBXBuildFile; fileRef = C240305314B404C90079EB64 /* CopiedSpace.cpp */; }; C25F8BCD157544A900245B71 /* IncrementalSweeper.cpp in Sources */ = {isa = PBXBuildFile; fileRef = C25F8BCB157544A900245B71 /* IncrementalSweeper.cpp */; }; @@ -1455,6 +1457,7 @@ C21122DE15DD9AB300790E3A /* GCThreadSharedData.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = GCThreadSharedData.cpp; sourceTree = "<group>"; }; C21122DF15DD9AB300790E3A /* GCThreadSharedData.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = GCThreadSharedData.h; sourceTree = "<group>"; }; C21122E015DD9AB300790E3A /* MarkStackInlineMethods.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = MarkStackInlineMethods.h; sourceTree = "<group>"; }; + C225494215F7DBAA0065E898 /* SlotVisitor.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = SlotVisitor.cpp; sourceTree = "<group>"; }; C240305314B404C90079EB64 /* CopiedSpace.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CopiedSpace.cpp; sourceTree = "<group>"; }; C25F8BCB157544A900245B71 /* IncrementalSweeper.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = IncrementalSweeper.cpp; sourceTree = "<group>"; }; C25F8BCC157544A900245B71 /* IncrementalSweeper.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = IncrementalSweeper.h; sourceTree = "<group>"; }; @@ -1784,6 +1787,7 @@ 142E312A134FF0A600AFADB5 /* heap */ = { isa = PBXGroup; children = ( + C225494215F7DBAA0065E898 /* SlotVisitor.cpp */, C21122DE15DD9AB300790E3A /* GCThreadSharedData.cpp */, C21122DF15DD9AB300790E3A /* GCThreadSharedData.h */, C21122E015DD9AB300790E3A /* MarkStackInlineMethods.h */, @@ -2527,6 +2531,7 @@ C2B916C214DA014E00CBAC86 /* MarkedAllocator.h in Headers */, FE20CE9E15F04A9500DF3430 /* LLIntCLoop.h in Headers */, C21122E215DD9AB300790E3A /* GCThreadSharedData.h in Headers */, + C2160FE715F7E95E00942DFC /* SlotVisitorInlineMethods.h in Headers */, C2E526BE1590EF000054E48D /* HeapTimer.h in Headers */, C21122E315DD9AB300790E3A /* MarkStackInlineMethods.h in Headers */, C25F8BCE157544A900245B71 /* IncrementalSweeper.h in Headers */, @@ -3503,6 +3508,7 @@ 14874AE515EBDE4A002E3587 /* JSScope.cpp in Sources */, 1442566115EDE98D0066A49B /* JSWithScope.cpp in Sources */, FE20CE9D15F04A9500DF3430 /* LLIntCLoop.cpp in Sources */, + C225494315F7DBAA0065E898 /* SlotVisitor.cpp in Sources */, ); runOnlyForDeploymentPostprocessing = 0; }; diff --git a/Source/JavaScriptCore/PlatformEfl.cmake b/Source/JavaScriptCore/PlatformEfl.cmake index 78a3a1f17..3b4115514 100644 --- a/Source/JavaScriptCore/PlatformEfl.cmake +++ b/Source/JavaScriptCore/PlatformEfl.cmake @@ -18,7 +18,3 @@ IF (ENABLE_GLIB_SUPPORT) ${JAVASCRIPTCORE_DIR}/wtf/gobject ) ENDIF () - -LIST(APPEND JavaScriptCore_LINK_FLAGS - ${ECORE_LDFLAGS} -) diff --git a/Source/JavaScriptCore/Target.pri b/Source/JavaScriptCore/Target.pri index bcbc0363a..336d8008b 100644 --- a/Source/JavaScriptCore/Target.pri +++ b/Source/JavaScriptCore/Target.pri @@ -89,6 +89,7 @@ SOURCES += \ heap/MarkedAllocator.cpp \ heap/MarkedBlock.cpp \ heap/MarkedSpace.cpp \ + heap/SlotVisitor.cpp \ heap/VTableSpectrum.cpp \ heap/WriteBarrierSupport.cpp \ debugger/DebuggerActivation.cpp \ diff --git a/Source/JavaScriptCore/bytecode/CodeBlock.cpp b/Source/JavaScriptCore/bytecode/CodeBlock.cpp index f15e5b0dd..d79a37668 100644 --- a/Source/JavaScriptCore/bytecode/CodeBlock.cpp +++ b/Source/JavaScriptCore/bytecode/CodeBlock.cpp @@ -45,6 +45,7 @@ #include "LowLevelInterpreter.h" #include "MethodCallLinkStatus.h" #include "RepatchBuffer.h" +#include "SlotVisitorInlineMethods.h" #include <stdio.h> #include <wtf/StringExtras.h> #include <wtf/UnusedParam.h> @@ -1406,14 +1407,14 @@ void CodeBlock::dump(ExecState* exec, const Vector<Instruction>::const_iterator& } case op_tear_off_activation: { int r0 = (++it)->u.operand; - int r1 = (++it)->u.operand; - dataLog("[%4d] tear_off_activation\t %s, %s", location, registerName(exec, r0).data(), registerName(exec, r1).data()); + dataLog("[%4d] tear_off_activation\t %s", location, registerName(exec, r0).data()); dumpBytecodeCommentAndNewLine(location); break; } case op_tear_off_arguments: { int r0 = (++it)->u.operand; - dataLog("[%4d] tear_off_arguments %s", location, registerName(exec, r0).data()); + int r1 = (++it)->u.operand; + dataLog("[%4d] tear_off_arguments %s, %s", location, registerName(exec, r0).data(), registerName(exec, r1).data()); dumpBytecodeCommentAndNewLine(location); break; } diff --git a/Source/JavaScriptCore/bytecode/Opcode.h b/Source/JavaScriptCore/bytecode/Opcode.h index a63cccaec..5cbae8a09 100644 --- a/Source/JavaScriptCore/bytecode/Opcode.h +++ b/Source/JavaScriptCore/bytecode/Opcode.h @@ -174,8 +174,8 @@ namespace JSC { macro(op_call, 6) \ macro(op_call_eval, 6) \ macro(op_call_varargs, 5) \ - macro(op_tear_off_activation, 3) \ - macro(op_tear_off_arguments, 2) \ + macro(op_tear_off_activation, 2) \ + macro(op_tear_off_arguments, 3) \ macro(op_ret, 2) \ macro(op_call_put_result, 3) /* has value profiling */ \ macro(op_ret_object_or_this, 3) \ diff --git a/Source/JavaScriptCore/bytecompiler/BytecodeGenerator.cpp b/Source/JavaScriptCore/bytecompiler/BytecodeGenerator.cpp index 52b576da2..82f9d6f60 100644 --- a/Source/JavaScriptCore/bytecompiler/BytecodeGenerator.cpp +++ b/Source/JavaScriptCore/bytecompiler/BytecodeGenerator.cpp @@ -272,6 +272,7 @@ BytecodeGenerator::BytecodeGenerator(ProgramNode* programNode, JSScope* scope, S , m_scopeNode(programNode) , m_codeBlock(codeBlock) , m_thisRegister(CallFrame::thisArgumentOffset()) + , m_emptyValueRegister(0) , m_finallyDepth(0) , m_dynamicScopeDepth(0) , m_baseScopeDepth(0) @@ -353,6 +354,7 @@ BytecodeGenerator::BytecodeGenerator(FunctionBodyNode* functionBody, JSScope* sc , m_scopeNode(functionBody) , m_codeBlock(codeBlock) , m_activationRegister(0) + , m_emptyValueRegister(0) , m_finallyDepth(0) , m_dynamicScopeDepth(0) , m_baseScopeDepth(0) @@ -386,8 +388,6 @@ BytecodeGenerator::BytecodeGenerator(FunctionBodyNode* functionBody, JSScope* sc m_codeBlock->setActivationRegister(m_activationRegister->index()); } - // Both op_tear_off_activation and op_tear_off_arguments tear off the 'arguments' - // object, if created. if (m_codeBlock->needsFullScopeChain() || functionBody->usesArguments()) { RegisterID* unmodifiedArgumentsRegister = addVar(); // Anonymous, so it can't be modified by user code. RegisterID* argumentsRegister = addVar(propertyNames().arguments, false); // Can be changed by assigning to 'arguments'. @@ -526,6 +526,7 @@ BytecodeGenerator::BytecodeGenerator(EvalNode* evalNode, JSScope* scope, SymbolT , m_scopeNode(evalNode) , m_codeBlock(codeBlock) , m_thisRegister(CallFrame::thisArgumentOffset()) + , m_emptyValueRegister(0) , m_finallyDepth(0) , m_dynamicScopeDepth(0) , m_baseScopeDepth(codeBlock->baseScopeDepth()) @@ -1111,18 +1112,33 @@ unsigned BytecodeGenerator::addConstant(const Identifier& ident) return result.iterator->second; } +// We can't hash JSValue(), so we use a dedicated data member to cache it. +RegisterID* BytecodeGenerator::addConstantEmptyValue() +{ + if (!m_emptyValueRegister) { + int index = m_nextConstantOffset; + m_constantPoolRegisters.append(FirstConstantRegisterIndex + m_nextConstantOffset); + ++m_nextConstantOffset; + m_codeBlock->addConstant(JSValue()); + m_emptyValueRegister = &m_constantPoolRegisters[index]; + } + + return m_emptyValueRegister; +} + RegisterID* BytecodeGenerator::addConstantValue(JSValue v) { - int index = m_nextConstantOffset; + if (!v) + return addConstantEmptyValue(); + int index = m_nextConstantOffset; JSValueMap::AddResult result = m_jsValueMap.add(JSValue::encode(v), m_nextConstantOffset); if (result.isNewEntry) { m_constantPoolRegisters.append(FirstConstantRegisterIndex + m_nextConstantOffset); ++m_nextConstantOffset; - m_codeBlock->addConstant(JSValue(v)); + m_codeBlock->addConstant(v); } else index = result.iterator->second; - return &m_constantPoolRegisters[index]; } @@ -2046,10 +2062,12 @@ RegisterID* BytecodeGenerator::emitReturn(RegisterID* src) if (m_codeBlock->needsFullScopeChain()) { emitOpcode(op_tear_off_activation); instructions().append(m_activationRegister->index()); - instructions().append(m_codeBlock->argumentsRegister()); - } else if (m_codeBlock->usesArguments() && m_codeBlock->numParameters() != 1 && !m_codeBlock->isStrictMode()) { + } + + if (m_codeBlock->usesArguments() && m_codeBlock->numParameters() != 1 && !m_codeBlock->isStrictMode()) { emitOpcode(op_tear_off_arguments); instructions().append(m_codeBlock->argumentsRegister()); + instructions().append(m_activationRegister ? m_activationRegister->index() : emitLoad(0, JSValue())->index()); } // Constructors use op_ret_object_or_this to check the result is an diff --git a/Source/JavaScriptCore/bytecompiler/BytecodeGenerator.h b/Source/JavaScriptCore/bytecompiler/BytecodeGenerator.h index 037a2ce25..28a806eb3 100644 --- a/Source/JavaScriptCore/bytecompiler/BytecodeGenerator.h +++ b/Source/JavaScriptCore/bytecompiler/BytecodeGenerator.h @@ -637,6 +637,7 @@ namespace JSC { unsigned addConstant(const Identifier&); RegisterID* addConstantValue(JSValue); + RegisterID* addConstantEmptyValue(); unsigned addRegExp(RegExp*); unsigned addConstantBuffer(unsigned length); @@ -713,6 +714,7 @@ namespace JSC { RegisterID m_thisRegister; RegisterID m_calleeRegister; RegisterID* m_activationRegister; + RegisterID* m_emptyValueRegister; SegmentedVector<RegisterID, 32> m_constantPoolRegisters; SegmentedVector<RegisterID, 32> m_calleeRegisters; SegmentedVector<RegisterID, 32> m_parameters; diff --git a/Source/JavaScriptCore/dfg/DFGArgumentsSimplificationPhase.cpp b/Source/JavaScriptCore/dfg/DFGArgumentsSimplificationPhase.cpp index 640a0a966..ab0da20c7 100644 --- a/Source/JavaScriptCore/dfg/DFGArgumentsSimplificationPhase.cpp +++ b/Source/JavaScriptCore/dfg/DFGArgumentsSimplificationPhase.cpp @@ -616,6 +616,7 @@ public: node.setOpAndDefaultFlags(Nop); m_graph.clearAndDerefChild1(node); + m_graph.clearAndDerefChild2(node); node.setRefCount(0); break; } diff --git a/Source/JavaScriptCore/dfg/DFGByteCodeParser.cpp b/Source/JavaScriptCore/dfg/DFGByteCodeParser.cpp index b96b8d9a3..201c42e71 100644 --- a/Source/JavaScriptCore/dfg/DFGByteCodeParser.cpp +++ b/Source/JavaScriptCore/dfg/DFGByteCodeParser.cpp @@ -2855,13 +2855,13 @@ bool ByteCodeParser::parseBlock(unsigned limit) } case op_tear_off_activation: { - addToGraph(TearOffActivation, OpInfo(unmodifiedArgumentsRegister(currentInstruction[2].u.operand)), get(currentInstruction[1].u.operand), get(currentInstruction[2].u.operand)); + addToGraph(TearOffActivation, get(currentInstruction[1].u.operand)); NEXT_OPCODE(op_tear_off_activation); } - + case op_tear_off_arguments: { m_graph.m_hasArguments = true; - addToGraph(TearOffArguments, get(unmodifiedArgumentsRegister(currentInstruction[1].u.operand))); + addToGraph(TearOffArguments, get(unmodifiedArgumentsRegister(currentInstruction[1].u.operand)), get(currentInstruction[2].u.operand)); NEXT_OPCODE(op_tear_off_arguments); } @@ -3191,7 +3191,7 @@ ByteCodeParser::InlineStackEntry::InlineStackEntry( inlineCallFrame.capturedVars.set(i); } - if (codeBlock->usesArguments() || codeBlock->needsActivation()) { + if (codeBlock->argumentsAreCaptured()) { for (int i = argumentCountIncludingThis; i--;) inlineCallFrame.capturedVars.set(argumentToOperand(i) + inlineCallFrame.stackOffset); } diff --git a/Source/JavaScriptCore/dfg/DFGCommon.h b/Source/JavaScriptCore/dfg/DFGCommon.h index ddbefd2d5..c3726ed85 100644 --- a/Source/JavaScriptCore/dfg/DFGCommon.h +++ b/Source/JavaScriptCore/dfg/DFGCommon.h @@ -31,6 +31,7 @@ #if ENABLE(DFG_JIT) #include "CodeOrigin.h" +#include "Options.h" #include "VirtualRegister.h" /* DFG_ENABLE() - turn on a specific features in the DFG JIT */ diff --git a/Source/JavaScriptCore/dfg/DFGNode.h b/Source/JavaScriptCore/dfg/DFGNode.h index fb31f935e..0a51b71d7 100644 --- a/Source/JavaScriptCore/dfg/DFGNode.h +++ b/Source/JavaScriptCore/dfg/DFGNode.h @@ -337,12 +337,6 @@ struct Node { return variableAccessData()->local(); } - VirtualRegister unmodifiedArgumentsRegister() - { - ASSERT(op() == TearOffActivation); - return static_cast<VirtualRegister>(m_opInfo); - } - VirtualRegister unlinkedLocal() { ASSERT(op() == GetLocalUnlinked); diff --git a/Source/JavaScriptCore/dfg/DFGOperations.cpp b/Source/JavaScriptCore/dfg/DFGOperations.cpp index 824a0a37a..b5bfd2334 100644 --- a/Source/JavaScriptCore/dfg/DFGOperations.cpp +++ b/Source/JavaScriptCore/dfg/DFGOperations.cpp @@ -1165,40 +1165,28 @@ JSCell* DFG_OPERATION operationCreateInlinedArguments( return result; } -void DFG_OPERATION operationTearOffActivation(ExecState* exec, JSCell* activationCell, int32_t unmodifiedArgumentsRegister) +void DFG_OPERATION operationTearOffActivation(ExecState* exec, JSCell* activationCell) { JSGlobalData& globalData = exec->globalData(); NativeCallFrameTracer tracer(&globalData, exec); - if (!activationCell) { - if (JSValue v = exec->uncheckedR(unmodifiedArgumentsRegister).jsValue()) { - if (!exec->codeBlock()->isStrictMode()) - asArguments(v)->tearOff(exec); - } - return; - } - JSActivation* activation = jsCast<JSActivation*>(activationCell); - activation->tearOff(exec->globalData()); - if (JSValue v = exec->uncheckedR(unmodifiedArgumentsRegister).jsValue()) - asArguments(v)->didTearOffActivation(exec->globalData(), activation); + jsCast<JSActivation*>(activationCell)->tearOff(exec->globalData()); } - -void DFG_OPERATION operationTearOffArguments(ExecState* exec, JSCell* argumentsCell) +void DFG_OPERATION operationTearOffArguments(ExecState* exec, JSCell* argumentsCell, JSCell* activationCell) { ASSERT(exec->codeBlock()->usesArguments()); - ASSERT(!exec->codeBlock()->needsFullScopeChain()); - asArguments(argumentsCell)->tearOff(exec); + if (activationCell) { + jsCast<Arguments*>(argumentsCell)->didTearOffActivation(exec->globalData(), jsCast<JSActivation*>(activationCell)); + return; + } + jsCast<Arguments*>(argumentsCell)->tearOff(exec); } void DFG_OPERATION operationTearOffInlinedArguments( - ExecState* exec, JSCell* argumentsCell, InlineCallFrame* inlineCallFrame) -{ - // This should only be called when the inline code block uses arguments but does not - // need a full scope chain. We could assert it, except that the assertion would be - // rather expensive and may cause side effects that would greatly diverge debug-mode - // behavior from release-mode behavior, since getting the code block of an inline - // call frame implies call frame reification. - asArguments(argumentsCell)->tearOff(exec, inlineCallFrame); + ExecState* exec, JSCell* argumentsCell, JSCell* activationCell, InlineCallFrame* inlineCallFrame) +{ + ASSERT_UNUSED(activationCell, !activationCell); // Currently, we don't inline functions with activations. + jsCast<Arguments*>(argumentsCell)->tearOff(exec, inlineCallFrame); } EncodedJSValue DFG_OPERATION operationGetArgumentsLength(ExecState* exec, int32_t argumentsRegister) diff --git a/Source/JavaScriptCore/dfg/DFGOperations.h b/Source/JavaScriptCore/dfg/DFGOperations.h index 82babe875..aa52703c7 100644 --- a/Source/JavaScriptCore/dfg/DFGOperations.h +++ b/Source/JavaScriptCore/dfg/DFGOperations.h @@ -98,8 +98,10 @@ typedef size_t DFG_OPERATION (*S_DFGOperation_J)(EncodedJSValue); typedef void DFG_OPERATION (*V_DFGOperation_EAZJ)(ExecState*, JSArray*, int32_t, EncodedJSValue); typedef void DFG_OPERATION (*V_DFGOperation_EC)(ExecState*, JSCell*); typedef void DFG_OPERATION (*V_DFGOperation_ECIcf)(ExecState*, JSCell*, InlineCallFrame*); +typedef void DFG_OPERATION (*V_DFGOperation_ECCIcf)(ExecState*, JSCell*, JSCell*, InlineCallFrame*); typedef void DFG_OPERATION (*V_DFGOperation_ECJJ)(ExecState*, JSCell*, EncodedJSValue, EncodedJSValue); typedef void DFG_OPERATION (*V_DFGOperation_ECZ)(ExecState*, JSCell*, int32_t); +typedef void DFG_OPERATION (*V_DFGOperation_ECC)(ExecState*, JSCell*, JSCell*); typedef void DFG_OPERATION (*V_DFGOperation_EJCI)(ExecState*, EncodedJSValue, JSCell*, Identifier*); typedef void DFG_OPERATION (*V_DFGOperation_EJJJ)(ExecState*, EncodedJSValue, EncodedJSValue, EncodedJSValue); typedef void DFG_OPERATION (*V_DFGOperation_EJPP)(ExecState*, EncodedJSValue, EncodedJSValue, void*); @@ -173,9 +175,9 @@ char* DFG_OPERATION operationLinkConstruct(ExecState*) WTF_INTERNAL; JSCell* DFG_OPERATION operationCreateActivation(ExecState*) WTF_INTERNAL; JSCell* DFG_OPERATION operationCreateArguments(ExecState*) WTF_INTERNAL; JSCell* DFG_OPERATION operationCreateInlinedArguments(ExecState*, InlineCallFrame*) WTF_INTERNAL; -void DFG_OPERATION operationTearOffActivation(ExecState*, JSCell*, int32_t unmodifiedArgumentsRegister) WTF_INTERNAL; -void DFG_OPERATION operationTearOffArguments(ExecState*, JSCell*) WTF_INTERNAL; -void DFG_OPERATION operationTearOffInlinedArguments(ExecState*, JSCell*, InlineCallFrame*) WTF_INTERNAL; +void DFG_OPERATION operationTearOffActivation(ExecState*, JSCell*) WTF_INTERNAL; +void DFG_OPERATION operationTearOffArguments(ExecState*, JSCell*, JSCell*) WTF_INTERNAL; +void DFG_OPERATION operationTearOffInlinedArguments(ExecState*, JSCell*, JSCell*, InlineCallFrame*) WTF_INTERNAL; EncodedJSValue DFG_OPERATION operationGetArgumentsLength(ExecState*, int32_t) WTF_INTERNAL; EncodedJSValue DFG_OPERATION operationGetInlinedArgumentByVal(ExecState*, int32_t, InlineCallFrame*, int32_t) WTF_INTERNAL; EncodedJSValue DFG_OPERATION operationGetArgumentByVal(ExecState*, int32_t, int32_t) WTF_INTERNAL; diff --git a/Source/JavaScriptCore/dfg/DFGSpeculativeJIT.h b/Source/JavaScriptCore/dfg/DFGSpeculativeJIT.h index f7b125e1b..06a8d9e31 100644 --- a/Source/JavaScriptCore/dfg/DFGSpeculativeJIT.h +++ b/Source/JavaScriptCore/dfg/DFGSpeculativeJIT.h @@ -1353,6 +1353,11 @@ public: m_jit.setupArgumentsWithExecState(arg1, TrustedImmPtr(arg2)); return appendCallWithExceptionCheck(operation); } + JITCompiler::Call callOperation(V_DFGOperation_ECCIcf operation, GPRReg arg1, GPRReg arg2, InlineCallFrame* arg3) + { + m_jit.setupArgumentsWithExecState(arg1, arg2, TrustedImmPtr(arg3)); + return appendCallWithExceptionCheck(operation); + } JITCompiler::Call callOperation(V_DFGOperation_EJPP operation, GPRReg arg1, GPRReg arg2, void* pointer) { m_jit.setupArgumentsWithExecState(arg1, arg2, TrustedImmPtr(pointer)); @@ -1388,6 +1393,11 @@ public: m_jit.setupArgumentsWithExecState(arg1, TrustedImm32(arg2)); return appendCallWithExceptionCheck(operation); } + JITCompiler::Call callOperation(V_DFGOperation_ECC operation, GPRReg arg1, GPRReg arg2) + { + m_jit.setupArgumentsWithExecState(arg1, arg2); + return appendCallWithExceptionCheck(operation); + } JITCompiler::Call callOperation(V_DFGOperation_W operation, WatchpointSet* watchpointSet) { m_jit.setupArguments(TrustedImmPtr(watchpointSet)); @@ -1651,6 +1661,11 @@ public: m_jit.setupArgumentsWithExecState(arg1, TrustedImmPtr(inlineCallFrame)); return appendCallWithExceptionCheck(operation); } + JITCompiler::Call callOperation(V_DFGOperation_ECCIcf operation, GPRReg arg1, GPRReg arg2, InlineCallFrame* inlineCallFrame) + { + m_jit.setupArgumentsWithExecState(arg1, arg2, TrustedImmPtr(inlineCallFrame)); + return appendCallWithExceptionCheck(operation); + } JITCompiler::Call callOperation(V_DFGOperation_EJPP operation, GPRReg arg1Tag, GPRReg arg1Payload, GPRReg arg2, void* pointer) { m_jit.setupArgumentsWithExecState(EABI_32BIT_DUMMY_ARG arg1Payload, arg1Tag, arg2, TrustedImmPtr(pointer)); @@ -1671,6 +1686,11 @@ public: m_jit.setupArgumentsWithExecState(arg1, TrustedImm32(arg2)); return appendCallWithExceptionCheck(operation); } + JITCompiler::Call callOperation(V_DFGOperation_ECC operation, GPRReg arg1, GPRReg arg2) + { + m_jit.setupArgumentsWithExecState(arg1, arg2); + return appendCallWithExceptionCheck(operation); + } JITCompiler::Call callOperation(V_DFGOperation_EPZJ operation, GPRReg arg1, GPRReg arg2, GPRReg arg3Tag, GPRReg arg3Payload) { m_jit.setupArgumentsWithExecState(arg1, arg2, EABI_32BIT_DUMMY_ARG arg3Payload, arg3Tag); @@ -1696,6 +1716,11 @@ public: { return callOperation(operation, arg1, arg2); } + template<typename FunctionType, typename ArgumentType1, typename ArgumentType2, typename ArgumentType3> + JITCompiler::Call callOperation(FunctionType operation, NoResultTag, ArgumentType1 arg1, ArgumentType2 arg2, ArgumentType3 arg3) + { + return callOperation(operation, arg1, arg2, arg3); + } template<typename FunctionType, typename ArgumentType1, typename ArgumentType2, typename ArgumentType3, typename ArgumentType4> JITCompiler::Call callOperation(FunctionType operation, NoResultTag, ArgumentType1 arg1, ArgumentType2 arg2, ArgumentType3 arg3, ArgumentType4 arg4) { @@ -2136,7 +2161,9 @@ public: void compileNewFunctionNoCheck(Node&); void compileNewFunctionExpression(Node&); bool compileRegExpExec(Node&); - + + // It is NOT okay for the structure and the scratch register to be the same thing because if they are then the Structure will + // get clobbered. template <typename ClassType, bool destructor, typename StructureType> void emitAllocateBasicJSObject(StructureType structure, GPRReg resultGPR, GPRReg scratchGPR, MacroAssembler::JumpList& slowPath) { @@ -2151,24 +2178,16 @@ public: // The object is half-allocated: we have what we know is a fresh object, but // it's still on the GC's free list. - - // Ditch the structure by placing it into the structure slot, so that we can reuse - // scratchGPR. - m_jit.storePtr(structure, MacroAssembler::Address(resultGPR, JSObject::structureOffset())); - - // Now that we have scratchGPR back, remove the object from the free list m_jit.loadPtr(MacroAssembler::Address(resultGPR), scratchGPR); m_jit.storePtr(scratchGPR, &allocator->m_freeList.head); - - // Initialize the object's classInfo pointer - m_jit.storePtr(MacroAssembler::TrustedImmPtr(&ClassType::s_info), MacroAssembler::Address(resultGPR, JSCell::classInfoOffset())); + + // Initialize the object's Structure. + m_jit.storePtr(structure, MacroAssembler::Address(resultGPR, JSCell::structureOffset())); // Initialize the object's property storage pointer. m_jit.storePtr(MacroAssembler::TrustedImmPtr(0), MacroAssembler::Address(resultGPR, ClassType::offsetOfOutOfLineStorage())); } - // It is acceptable to have structure be equal to scratch, so long as you're fine - // with the structure GPR being clobbered. template<typename T> void emitAllocateJSFinalObject(T structure, GPRReg resultGPR, GPRReg scratchGPR, MacroAssembler::JumpList& slowPath) { diff --git a/Source/JavaScriptCore/dfg/DFGSpeculativeJIT32_64.cpp b/Source/JavaScriptCore/dfg/DFGSpeculativeJIT32_64.cpp index 34b8dae46..38889fd05 100644 --- a/Source/JavaScriptCore/dfg/DFGSpeculativeJIT32_64.cpp +++ b/Source/JavaScriptCore/dfg/DFGSpeculativeJIT32_64.cpp @@ -3296,18 +3296,20 @@ void SpeculativeJIT::compile(Node& node) SpeculateCellOperand callee(this, node.child1()); GPRTemporary result(this); + GPRTemporary structure(this); GPRTemporary scratch(this); GPRReg calleeGPR = callee.gpr(); GPRReg resultGPR = result.gpr(); + GPRReg structureGPR = structure.gpr(); GPRReg scratchGPR = scratch.gpr(); // Load the inheritorID. If the inheritorID is not set, go to slow path. - m_jit.loadPtr(MacroAssembler::Address(calleeGPR, JSFunction::offsetOfCachedInheritorID()), scratchGPR); + m_jit.loadPtr(MacroAssembler::Address(calleeGPR, JSFunction::offsetOfCachedInheritorID()), structureGPR); MacroAssembler::JumpList slowPath; - slowPath.append(m_jit.branchTestPtr(MacroAssembler::Zero, scratchGPR)); + slowPath.append(m_jit.branchTestPtr(MacroAssembler::Zero, structureGPR)); - emitAllocateJSFinalObject(scratchGPR, resultGPR, scratchGPR, slowPath); + emitAllocateJSFinalObject(structureGPR, resultGPR, scratchGPR, slowPath); addSlowPathGenerator(slowPathCall(slowPath, this, operationCreateThis, resultGPR, calleeGPR)); @@ -4023,43 +4025,39 @@ void SpeculativeJIT::compile(Node& node) case TearOffActivation: { JSValueOperand activationValue(this, node.child1()); - JSValueOperand argumentsValue(this, node.child2()); GPRReg activationValueTagGPR = activationValue.tagGPR(); GPRReg activationValuePayloadGPR = activationValue.payloadGPR(); - GPRReg argumentsValueTagGPR = argumentsValue.tagGPR(); - - JITCompiler::JumpList created; - created.append(m_jit.branch32(JITCompiler::NotEqual, activationValueTagGPR, TrustedImm32(JSValue::EmptyValueTag))); - created.append(m_jit.branch32(JITCompiler::NotEqual, argumentsValueTagGPR, TrustedImm32(JSValue::EmptyValueTag))); - + + JITCompiler::Jump created = m_jit.branch32(JITCompiler::NotEqual, activationValueTagGPR, TrustedImm32(JSValue::EmptyValueTag)); + addSlowPathGenerator( slowPathCall( - created, this, operationTearOffActivation, NoResult, activationValuePayloadGPR, - static_cast<int32_t>(node.unmodifiedArgumentsRegister()))); + created, this, operationTearOffActivation, NoResult, activationValuePayloadGPR)); noResult(m_compileIndex); break; } case TearOffArguments: { - JSValueOperand argumentsValue(this, node.child1()); - GPRReg argumentsValueTagGPR = argumentsValue.tagGPR(); - GPRReg argumentsValuePayloadGPR = argumentsValue.payloadGPR(); + JSValueOperand unmodifiedArgumentsValue(this, node.child1()); + JSValueOperand activationValue(this, node.child2()); + GPRReg unmodifiedArgumentsValuePayloadGPR = unmodifiedArgumentsValue.payloadGPR(); + GPRReg activationValuePayloadGPR = activationValue.payloadGPR(); - JITCompiler::Jump created = m_jit.branch32( - JITCompiler::NotEqual, argumentsValueTagGPR, TrustedImm32(JSValue::EmptyValueTag)); + JITCompiler::Jump created = m_jit.branchTest32( + JITCompiler::NonZero, unmodifiedArgumentsValuePayloadGPR); if (node.codeOrigin.inlineCallFrame) { addSlowPathGenerator( slowPathCall( created, this, operationTearOffInlinedArguments, NoResult, - argumentsValuePayloadGPR, node.codeOrigin.inlineCallFrame)); + unmodifiedArgumentsValuePayloadGPR, activationValuePayloadGPR, node.codeOrigin.inlineCallFrame)); } else { addSlowPathGenerator( slowPathCall( created, this, operationTearOffArguments, NoResult, - argumentsValuePayloadGPR)); + unmodifiedArgumentsValuePayloadGPR, activationValuePayloadGPR)); } noResult(m_compileIndex); diff --git a/Source/JavaScriptCore/dfg/DFGSpeculativeJIT64.cpp b/Source/JavaScriptCore/dfg/DFGSpeculativeJIT64.cpp index a1ac899a2..0435df930 100644 --- a/Source/JavaScriptCore/dfg/DFGSpeculativeJIT64.cpp +++ b/Source/JavaScriptCore/dfg/DFGSpeculativeJIT64.cpp @@ -3289,18 +3289,20 @@ void SpeculativeJIT::compile(Node& node) SpeculateCellOperand callee(this, node.child1()); GPRTemporary result(this); + GPRTemporary structure(this); GPRTemporary scratch(this); GPRReg calleeGPR = callee.gpr(); GPRReg resultGPR = result.gpr(); + GPRReg structureGPR = structure.gpr(); GPRReg scratchGPR = scratch.gpr(); // Load the inheritorID. If the inheritorID is not set, go to slow path. - m_jit.loadPtr(MacroAssembler::Address(calleeGPR, JSFunction::offsetOfCachedInheritorID()), scratchGPR); + m_jit.loadPtr(MacroAssembler::Address(calleeGPR, JSFunction::offsetOfCachedInheritorID()), structureGPR); MacroAssembler::JumpList slowPath; - slowPath.append(m_jit.branchTestPtr(MacroAssembler::Zero, scratchGPR)); + slowPath.append(m_jit.branchTestPtr(MacroAssembler::Zero, structureGPR)); - emitAllocateJSFinalObject(scratchGPR, resultGPR, scratchGPR, slowPath); + emitAllocateJSFinalObject(structureGPR, resultGPR, scratchGPR, slowPath); addSlowPathGenerator(slowPathCall(slowPath, this, operationCreateThis, resultGPR, calleeGPR)); @@ -3972,43 +3974,40 @@ void SpeculativeJIT::compile(Node& node) cellResult(resultGPR, m_compileIndex); break; } - + case TearOffActivation: { ASSERT(!node.codeOrigin.inlineCallFrame); JSValueOperand activationValue(this, node.child1()); - JSValueOperand argumentsValue(this, node.child2()); GPRReg activationValueGPR = activationValue.gpr(); - GPRReg argumentsValueGPR = argumentsValue.gpr(); - - JITCompiler::JumpList created; - created.append(m_jit.branchTestPtr(JITCompiler::NonZero, activationValueGPR)); - created.append(m_jit.branchTestPtr(JITCompiler::NonZero, argumentsValueGPR)); + + JITCompiler::Jump created = m_jit.branchTestPtr(JITCompiler::NonZero, activationValueGPR); addSlowPathGenerator( slowPathCall( - created, this, operationTearOffActivation, NoResult, activationValueGPR, - static_cast<int32_t>(node.unmodifiedArgumentsRegister()))); + created, this, operationTearOffActivation, NoResult, activationValueGPR)); noResult(m_compileIndex); break; } - + case TearOffArguments: { - JSValueOperand argumentsValue(this, node.child1()); - GPRReg argumentsValueGPR = argumentsValue.gpr(); - - JITCompiler::Jump created = m_jit.branchTestPtr(JITCompiler::NonZero, argumentsValueGPR); - + JSValueOperand unmodifiedArgumentsValue(this, node.child1()); + JSValueOperand activationValue(this, node.child2()); + GPRReg unmodifiedArgumentsValueGPR = unmodifiedArgumentsValue.gpr(); + GPRReg activationValueGPR = activationValue.gpr(); + + JITCompiler::Jump created = m_jit.branchTestPtr(JITCompiler::NonZero, unmodifiedArgumentsValueGPR); + if (node.codeOrigin.inlineCallFrame) { addSlowPathGenerator( slowPathCall( created, this, operationTearOffInlinedArguments, NoResult, - argumentsValueGPR, node.codeOrigin.inlineCallFrame)); + unmodifiedArgumentsValueGPR, activationValueGPR, node.codeOrigin.inlineCallFrame)); } else { addSlowPathGenerator( slowPathCall( - created, this, operationTearOffArguments, NoResult, argumentsValueGPR)); + created, this, operationTearOffArguments, NoResult, unmodifiedArgumentsValueGPR, activationValueGPR)); } noResult(m_compileIndex); diff --git a/Source/JavaScriptCore/heap/GCThreadSharedData.cpp b/Source/JavaScriptCore/heap/GCThreadSharedData.cpp index 82c52d22e..23a6b97a1 100644 --- a/Source/JavaScriptCore/heap/GCThreadSharedData.cpp +++ b/Source/JavaScriptCore/heap/GCThreadSharedData.cpp @@ -29,6 +29,7 @@ #include "JSGlobalData.h" #include "MarkStack.h" #include "SlotVisitor.h" +#include "SlotVisitorInlineMethods.h" #include <wtf/MainThread.h> namespace JSC { diff --git a/Source/JavaScriptCore/heap/GCThreadSharedData.h b/Source/JavaScriptCore/heap/GCThreadSharedData.h index 8868b440c..3f09a2820 100644 --- a/Source/JavaScriptCore/heap/GCThreadSharedData.h +++ b/Source/JavaScriptCore/heap/GCThreadSharedData.h @@ -26,7 +26,12 @@ #ifndef GCThreadSharedData_h #define GCThreadSharedData_h +#include "ListableHandler.h" #include "MarkStack.h" +#include "UnconditionalFinalizer.h" +#include "WeakReferenceHarvester.h" +#include <wtf/HashSet.h> +#include <wtf/Threading.h> #include <wtf/Vector.h> namespace JSC { @@ -48,7 +53,6 @@ public: #endif private: - friend class MarkStack; friend class SlotVisitor; #if ENABLE(PARALLEL_GC) @@ -64,7 +68,7 @@ private: bool m_shouldHashConst; Vector<ThreadIdentifier> m_markingThreads; - Vector<MarkStack*> m_markingThreadsMarkStack; + Vector<SlotVisitor*> m_markingThreadsMarkStack; Mutex m_markingLock; ThreadCondition m_markingCondition; diff --git a/Source/JavaScriptCore/heap/Heap.h b/Source/JavaScriptCore/heap/Heap.h index 69aa97e33..e48386791 100644 --- a/Source/JavaScriptCore/heap/Heap.h +++ b/Source/JavaScriptCore/heap/Heap.h @@ -185,7 +185,7 @@ namespace JSC { void* allocateWithDestructor(size_t); void* allocateWithoutDestructor(size_t); - void* allocateStructure(); + void* allocateStructure(size_t); static const size_t minExtraCost = 256; static const size_t maxExtraCost = 1024 * 1024; @@ -372,9 +372,9 @@ namespace JSC { return m_objectSpace.allocateWithoutDestructor(bytes); } - inline void* Heap::allocateStructure() + inline void* Heap::allocateStructure(size_t bytes) { - return m_objectSpace.allocateStructure(); + return m_objectSpace.allocateStructure(bytes); } inline CheckedBoolean Heap::tryAllocateStorage(size_t bytes, void** outPtr) diff --git a/Source/JavaScriptCore/heap/HeapRootVisitor.h b/Source/JavaScriptCore/heap/HeapRootVisitor.h index 76c97290a..9849d7c39 100644 --- a/Source/JavaScriptCore/heap/HeapRootVisitor.h +++ b/Source/JavaScriptCore/heap/HeapRootVisitor.h @@ -27,6 +27,7 @@ #define HeapRootVisitor_h #include "SlotVisitor.h" +#include "SlotVisitorInlineMethods.h" namespace JSC { diff --git a/Source/JavaScriptCore/heap/MarkStack.cpp b/Source/JavaScriptCore/heap/MarkStack.cpp index 9a4a01f04..582439fd2 100644 --- a/Source/JavaScriptCore/heap/MarkStack.cpp +++ b/Source/JavaScriptCore/heap/MarkStack.cpp @@ -223,402 +223,4 @@ void MarkStackArray::stealSomeCellsFrom(MarkStackArray& other, size_t idleThread append(other.removeLast()); } -MarkStack::MarkStack(GCThreadSharedData& shared) - : m_stack(shared.m_segmentAllocator) -#if !ASSERT_DISABLED - , m_isCheckingForDefaultMarkViolation(false) - , m_isDraining(false) -#endif - , m_visitCount(0) - , m_isInParallelMode(false) - , m_shared(shared) - , m_shouldHashConst(false) -{ -} - -MarkStack::~MarkStack() -{ - ASSERT(m_stack.isEmpty()); -} - -void MarkStack::setup() -{ - m_shared.m_shouldHashConst = m_shared.m_globalData->haveEnoughNewStringsToHashConst(); - m_shouldHashConst = m_shared.m_shouldHashConst; -#if ENABLE(PARALLEL_GC) - for (unsigned i = 0; i < m_shared.m_markingThreadsMarkStack.size(); ++i) - m_shared.m_markingThreadsMarkStack[i]->m_shouldHashConst = m_shared.m_shouldHashConst; -#endif -} - -void MarkStack::reset() -{ - m_visitCount = 0; - ASSERT(m_stack.isEmpty()); -#if ENABLE(PARALLEL_GC) - ASSERT(m_opaqueRoots.isEmpty()); // Should have merged by now. -#else - m_opaqueRoots.clear(); -#endif - if (m_shouldHashConst) { - m_uniqueStrings.clear(); - m_shouldHashConst = false; - } -} - -void MarkStack::append(ConservativeRoots& conservativeRoots) -{ - JSCell** roots = conservativeRoots.roots(); - size_t size = conservativeRoots.size(); - for (size_t i = 0; i < size; ++i) - internalAppend(roots[i]); -} - -ALWAYS_INLINE static void visitChildren(SlotVisitor& visitor, const JSCell* cell) -{ -#if ENABLE(SIMPLE_HEAP_PROFILING) - m_visitedTypeCounts.count(cell); -#endif - - ASSERT(Heap::isMarked(cell)); - - if (isJSString(cell)) { - JSString::visitChildren(const_cast<JSCell*>(cell), visitor); - return; - } - - if (isJSFinalObject(cell)) { - JSFinalObject::visitChildren(const_cast<JSCell*>(cell), visitor); - return; - } - - if (isJSArray(cell)) { - JSArray::visitChildren(const_cast<JSCell*>(cell), visitor); - return; - } - - cell->methodTable()->visitChildren(const_cast<JSCell*>(cell), visitor); -} - -void SlotVisitor::donateKnownParallel() -{ - // NOTE: Because we re-try often, we can afford to be conservative, and - // assume that donating is not profitable. - - // Avoid locking when a thread reaches a dead end in the object graph. - if (m_stack.size() < 2) - return; - - // If there's already some shared work queued up, be conservative and assume - // that donating more is not profitable. - if (m_shared.m_sharedMarkStack.size()) - return; - - // If we're contending on the lock, be conservative and assume that another - // thread is already donating. - MutexTryLocker locker(m_shared.m_markingLock); - if (!locker.locked()) - return; - - // Otherwise, assume that a thread will go idle soon, and donate. - m_stack.donateSomeCellsTo(m_shared.m_sharedMarkStack); - - if (m_shared.m_numberOfActiveParallelMarkers < Options::numberOfGCMarkers()) - m_shared.m_markingCondition.broadcast(); -} - -void SlotVisitor::drain() -{ - ASSERT(m_isInParallelMode); - -#if ENABLE(PARALLEL_GC) - if (Options::numberOfGCMarkers() > 1) { - while (!m_stack.isEmpty()) { - m_stack.refill(); - for (unsigned countdown = Options::minimumNumberOfScansBetweenRebalance(); m_stack.canRemoveLast() && countdown--;) - visitChildren(*this, m_stack.removeLast()); - donateKnownParallel(); - } - - mergeOpaqueRootsIfNecessary(); - return; - } -#endif - - while (!m_stack.isEmpty()) { - m_stack.refill(); - while (m_stack.canRemoveLast()) - visitChildren(*this, m_stack.removeLast()); - } -} - -void SlotVisitor::drainFromShared(SharedDrainMode sharedDrainMode) -{ - ASSERT(m_isInParallelMode); - - ASSERT(Options::numberOfGCMarkers()); - - bool shouldBeParallel; - -#if ENABLE(PARALLEL_GC) - shouldBeParallel = Options::numberOfGCMarkers() > 1; -#else - ASSERT(Options::numberOfGCMarkers() == 1); - shouldBeParallel = false; -#endif - - if (!shouldBeParallel) { - // This call should be a no-op. - ASSERT_UNUSED(sharedDrainMode, sharedDrainMode == MasterDrain); - ASSERT(m_stack.isEmpty()); - ASSERT(m_shared.m_sharedMarkStack.isEmpty()); - return; - } - -#if ENABLE(PARALLEL_GC) - { - MutexLocker locker(m_shared.m_markingLock); - m_shared.m_numberOfActiveParallelMarkers++; - } - while (true) { - { - MutexLocker locker(m_shared.m_markingLock); - m_shared.m_numberOfActiveParallelMarkers--; - - // How we wait differs depending on drain mode. - if (sharedDrainMode == MasterDrain) { - // Wait until either termination is reached, or until there is some work - // for us to do. - while (true) { - // Did we reach termination? - if (!m_shared.m_numberOfActiveParallelMarkers && m_shared.m_sharedMarkStack.isEmpty()) { - // Let any sleeping slaves know it's time for them to give their private CopiedBlocks back - m_shared.m_markingCondition.broadcast(); - return; - } - - // Is there work to be done? - if (!m_shared.m_sharedMarkStack.isEmpty()) - break; - - // Otherwise wait. - m_shared.m_markingCondition.wait(m_shared.m_markingLock); - } - } else { - ASSERT(sharedDrainMode == SlaveDrain); - - // Did we detect termination? If so, let the master know. - if (!m_shared.m_numberOfActiveParallelMarkers && m_shared.m_sharedMarkStack.isEmpty()) - m_shared.m_markingCondition.broadcast(); - - while (m_shared.m_sharedMarkStack.isEmpty() && !m_shared.m_parallelMarkersShouldExit) { - if (!m_shared.m_numberOfActiveParallelMarkers && m_shared.m_sharedMarkStack.isEmpty()) - doneCopying(); - m_shared.m_markingCondition.wait(m_shared.m_markingLock); - } - - // Is the VM exiting? If so, exit this thread. - if (m_shared.m_parallelMarkersShouldExit) { - doneCopying(); - return; - } - } - - size_t idleThreadCount = Options::numberOfGCMarkers() - m_shared.m_numberOfActiveParallelMarkers; - m_stack.stealSomeCellsFrom(m_shared.m_sharedMarkStack, idleThreadCount); - m_shared.m_numberOfActiveParallelMarkers++; - } - - drain(); - } -#endif -} - -void MarkStack::mergeOpaqueRoots() -{ - ASSERT(!m_opaqueRoots.isEmpty()); // Should only be called when opaque roots are non-empty. - { - MutexLocker locker(m_shared.m_opaqueRootsLock); - HashSet<void*>::iterator begin = m_opaqueRoots.begin(); - HashSet<void*>::iterator end = m_opaqueRoots.end(); - for (HashSet<void*>::iterator iter = begin; iter != end; ++iter) - m_shared.m_opaqueRoots.add(*iter); - } - m_opaqueRoots.clear(); -} - -void SlotVisitor::startCopying() -{ - ASSERT(!m_copiedAllocator.isValid()); -} - -void* SlotVisitor::allocateNewSpaceSlow(size_t bytes) -{ - m_shared.m_copiedSpace->doneFillingBlock(m_copiedAllocator.resetCurrentBlock()); - m_copiedAllocator.setCurrentBlock(m_shared.m_copiedSpace->allocateBlockForCopyingPhase()); - - void* result = 0; - CheckedBoolean didSucceed = m_copiedAllocator.tryAllocate(bytes, &result); - ASSERT(didSucceed); - return result; -} - -void* SlotVisitor::allocateNewSpaceOrPin(void* ptr, size_t bytes) -{ - if (!checkIfShouldCopyAndPinOtherwise(ptr, bytes)) - return 0; - - return allocateNewSpace(bytes); -} - -ALWAYS_INLINE bool JSString::tryHashConstLock() -{ -#if ENABLE(PARALLEL_GC) - unsigned currentFlags = m_flags; - - if (currentFlags & HashConstLock) - return false; - - unsigned newFlags = currentFlags | HashConstLock; - - if (!WTF::weakCompareAndSwap(&m_flags, currentFlags, newFlags)) - return false; - - WTF::memoryBarrierAfterLock(); - return true; -#else - if (isHashConstSingleton()) - return false; - - m_flags |= HashConstLock; - - return true; -#endif -} - -ALWAYS_INLINE void JSString::releaseHashConstLock() -{ -#if ENABLE(PARALLEL_GC) - WTF::memoryBarrierBeforeUnlock(); -#endif - m_flags &= ~HashConstLock; -} - -ALWAYS_INLINE bool JSString::shouldTryHashConst() -{ - return ((length() > 1) && !isRope() && !isHashConstSingleton()); -} - -ALWAYS_INLINE void MarkStack::internalAppend(JSValue* slot) -{ - // This internalAppend is only intended for visits to object and array backing stores. - // as it can change the JSValue pointed to be the argument when the original JSValue - // is a string that contains the same contents as another string. - - ASSERT(slot); - JSValue value = *slot; - ASSERT(value); - if (!value.isCell()) - return; - - JSCell* cell = value.asCell(); - if (!cell) - return; - - if (m_shouldHashConst && cell->isString()) { - JSString* string = jsCast<JSString*>(cell); - if (string->shouldTryHashConst() && string->tryHashConstLock()) { - UniqueStringMap::AddResult addResult = m_uniqueStrings.add(string->string().impl(), value); - if (addResult.isNewEntry) - string->setHashConstSingleton(); - else { - JSValue existingJSValue = addResult.iterator->second; - if (value != existingJSValue) - jsCast<JSString*>(existingJSValue.asCell())->clearHashConstSingleton(); - *slot = existingJSValue; - string->releaseHashConstLock(); - return; - } - string->releaseHashConstLock(); - } - } - - internalAppend(cell); -} - -void SlotVisitor::copyAndAppend(void** ptr, size_t bytes, JSValue* values, unsigned length) -{ - void* oldPtr = *ptr; - void* newPtr = allocateNewSpaceOrPin(oldPtr, bytes); - if (newPtr) { - size_t jsValuesOffset = static_cast<size_t>(reinterpret_cast<char*>(values) - static_cast<char*>(oldPtr)); - - JSValue* newValues = reinterpret_cast_ptr<JSValue*>(static_cast<char*>(newPtr) + jsValuesOffset); - for (unsigned i = 0; i < length; i++) { - JSValue& value = values[i]; - newValues[i] = value; - if (!value) - continue; - internalAppend(&newValues[i]); - } - - memcpy(newPtr, oldPtr, jsValuesOffset); - *ptr = newPtr; - } else - append(values, length); -} - -void SlotVisitor::doneCopying() -{ - if (!m_copiedAllocator.isValid()) - return; - - m_shared.m_copiedSpace->doneFillingBlock(m_copiedAllocator.resetCurrentBlock()); -} - -void SlotVisitor::harvestWeakReferences() -{ - for (WeakReferenceHarvester* current = m_shared.m_weakReferenceHarvesters.head(); current; current = current->next()) - current->visitWeakReferences(*this); -} - -void SlotVisitor::finalizeUnconditionalFinalizers() -{ - while (m_shared.m_unconditionalFinalizers.hasNext()) - m_shared.m_unconditionalFinalizers.removeNext()->finalizeUnconditionally(); -} - -#if ENABLE(GC_VALIDATION) -void MarkStack::validate(JSCell* cell) -{ - if (!cell) { - dataLog("cell is NULL\n"); - CRASH(); - } - - if (!cell->structure()) { - dataLog("cell at %p has a null structure\n" , cell); - CRASH(); - } - - // Both the cell's structure, and the cell's structure's structure should be the Structure Structure. - // I hate this sentence. - if (cell->structure()->structure()->JSCell::classInfo() != cell->structure()->JSCell::classInfo()) { - const char* parentClassName = 0; - const char* ourClassName = 0; - if (cell->structure()->structure() && cell->structure()->structure()->JSCell::classInfo()) - parentClassName = cell->structure()->structure()->JSCell::classInfo()->className; - if (cell->structure()->JSCell::classInfo()) - ourClassName = cell->structure()->JSCell::classInfo()->className; - dataLog("parent structure (%p <%s>) of cell at %p doesn't match cell's structure (%p <%s>)\n", - cell->structure()->structure(), parentClassName, cell, cell->structure(), ourClassName); - CRASH(); - } -} -#else -void MarkStack::validate(JSCell*) -{ -} -#endif - } // namespace JSC diff --git a/Source/JavaScriptCore/heap/MarkStack.h b/Source/JavaScriptCore/heap/MarkStack.h index 54ae1cb02..0245e4be5 100644 --- a/Source/JavaScriptCore/heap/MarkStack.h +++ b/Source/JavaScriptCore/heap/MarkStack.h @@ -26,25 +26,6 @@ #ifndef MarkStack_h #define MarkStack_h -#include "CopiedSpace.h" -#include "HandleTypes.h" -#include "JSValue.h" -#include "Options.h" -#include "Register.h" -#include "UnconditionalFinalizer.h" -#include "VTableSpectrum.h" -#include "WeakReferenceHarvester.h" -#include <wtf/DataLog.h> -#include <wtf/Forward.h> -#include <wtf/HashMap.h> -#include <wtf/HashSet.h> -#include <wtf/Noncopyable.h> -#include <wtf/OSAllocator.h> -#include <wtf/PageBlock.h> -#include <wtf/TCSpinLock.h> -#include <wtf/text/StringHash.h> -#include <wtf/Vector.h> - #if ENABLE(OBJECT_MARK_LOGGING) #define MARK_LOG_MESSAGE0(message) dataLog(message) #define MARK_LOG_MESSAGE1(message, arg1) dataLog(message, arg1) @@ -69,276 +50,86 @@ #define MARK_LOG_CHILD(visitor, child) do { } while (false) #endif -namespace JSC { - - class ConservativeRoots; - class JSGlobalData; - class MarkStack; - class GCThreadSharedData; - class ParallelModeEnabler; - class Register; - class SlotVisitor; - template<typename T> class WriteBarrierBase; - template<typename T> class JITWriteBarrier; - - struct MarkStackSegment { - MarkStackSegment* m_previous; -#if !ASSERT_DISABLED - size_t m_top; -#endif - - const JSCell** data() - { - return bitwise_cast<const JSCell**>(this + 1); - } - - static size_t capacityFromSize(size_t size) - { - return (size - sizeof(MarkStackSegment)) / sizeof(const JSCell*); - } - - static size_t sizeFromCapacity(size_t capacity) - { - return sizeof(MarkStackSegment) + capacity * sizeof(const JSCell*); - } - }; - - class MarkStackSegmentAllocator { - public: - MarkStackSegmentAllocator(); - ~MarkStackSegmentAllocator(); - - MarkStackSegment* allocate(); - void release(MarkStackSegment*); - - void shrinkReserve(); - - private: - SpinLock m_lock; - MarkStackSegment* m_nextFreeSegment; - }; - - class MarkStackArray { - public: - MarkStackArray(MarkStackSegmentAllocator&); - ~MarkStackArray(); - - void append(const JSCell*); - - bool canRemoveLast(); - const JSCell* removeLast(); - bool refill(); - - bool isEmpty(); - - void donateSomeCellsTo(MarkStackArray& other); - - void stealSomeCellsFrom(MarkStackArray& other, size_t idleThreadCount); - - size_t size(); - - private: - MarkStackSegment* m_topSegment; - - JS_EXPORT_PRIVATE void expand(); - - MarkStackSegmentAllocator& m_allocator; - - size_t m_segmentCapacity; - size_t m_top; - size_t m_numberOfPreviousSegments; - - size_t postIncTop() - { - size_t result = m_top++; - ASSERT(result == m_topSegment->m_top++); - return result; - } - - size_t preDecTop() - { - size_t result = --m_top; - ASSERT(result == --m_topSegment->m_top); - return result; - } - - void setTopForFullSegment() - { - ASSERT(m_topSegment->m_top == m_segmentCapacity); - m_top = m_segmentCapacity; - } - - void setTopForEmptySegment() - { - ASSERT(!m_topSegment->m_top); - m_top = 0; - } - - size_t top() - { - ASSERT(m_top == m_topSegment->m_top); - return m_top; - } - -#if ASSERT_DISABLED - void validatePrevious() { } -#else - void validatePrevious() - { - unsigned count = 0; - for (MarkStackSegment* current = m_topSegment->m_previous; current; current = current->m_previous) - count++; - ASSERT(count == m_numberOfPreviousSegments); - } -#endif - }; - - class MarkStack { - WTF_MAKE_NONCOPYABLE(MarkStack); - friend class HeapRootVisitor; // Allowed to mark a JSValue* or JSCell** directly. - - public: - MarkStack(GCThreadSharedData&); - ~MarkStack(); - - void append(ConservativeRoots&); - - template<typename T> void append(JITWriteBarrier<T>*); - template<typename T> void append(WriteBarrierBase<T>*); - void appendValues(WriteBarrierBase<Unknown>*, size_t count); - - template<typename T> - void appendUnbarrieredPointer(T**); - void appendUnbarrieredValue(JSValue*); - - void addOpaqueRoot(void*); - bool containsOpaqueRoot(void*); - int opaqueRootCount(); - - GCThreadSharedData& sharedData() { return m_shared; } - bool isEmpty() { return m_stack.isEmpty(); } - - void setup(); - void reset(); - - size_t visitCount() const { return m_visitCount; } - -#if ENABLE(SIMPLE_HEAP_PROFILING) - VTableSpectrum m_visitedTypeCounts; -#endif - - void addWeakReferenceHarvester(WeakReferenceHarvester*); - void addUnconditionalFinalizer(UnconditionalFinalizer*); - -#if ENABLE(OBJECT_MARK_LOGGING) - inline void resetChildCount() { m_logChildCount = 0; } - inline unsigned childCount() { return m_logChildCount; } - inline void incrementChildCount() { m_logChildCount++; } -#endif +#include <wtf/StdLibExtras.h> +#include <wtf/TCSpinLock.h> - protected: - JS_EXPORT_PRIVATE static void validate(JSCell*); +namespace JSC { - void append(JSValue*); - void append(JSValue*, size_t count); - void append(JSCell**); +class JSCell; - void internalAppend(JSCell*); - void internalAppend(JSValue); - void internalAppend(JSValue*); - - JS_EXPORT_PRIVATE void mergeOpaqueRoots(); - - void mergeOpaqueRootsIfNecessary() - { - if (m_opaqueRoots.isEmpty()) - return; - mergeOpaqueRoots(); - } - - void mergeOpaqueRootsIfProfitable() - { - if (static_cast<unsigned>(m_opaqueRoots.size()) < Options::opaqueRootMergeThreshold()) - return; - mergeOpaqueRoots(); - } - - MarkStackArray m_stack; - HashSet<void*> m_opaqueRoots; // Handle-owning data structures not visible to the garbage collector. - +struct MarkStackSegment { + MarkStackSegment* m_previous; #if !ASSERT_DISABLED - public: - bool m_isCheckingForDefaultMarkViolation; - bool m_isDraining; + size_t m_top; #endif - protected: - friend class ParallelModeEnabler; - size_t m_visitCount; - bool m_isInParallelMode; - - GCThreadSharedData& m_shared; - - bool m_shouldHashConst; // Local per-thread copy of shared flag for performance reasons - typedef HashMap<StringImpl*, JSValue> UniqueStringMap; - UniqueStringMap m_uniqueStrings; - -#if ENABLE(OBJECT_MARK_LOGGING) - unsigned m_logChildCount; -#endif - }; - - inline void MarkStackArray::append(const JSCell* cell) + const JSCell** data() { - if (m_top == m_segmentCapacity) - expand(); - m_topSegment->data()[postIncTop()] = cell; + return bitwise_cast<const JSCell**>(this + 1); } - - inline bool MarkStackArray::canRemoveLast() + + static size_t capacityFromSize(size_t size) { - return !!m_top; + return (size - sizeof(MarkStackSegment)) / sizeof(const JSCell*); } - - inline const JSCell* MarkStackArray::removeLast() + + static size_t sizeFromCapacity(size_t capacity) { - return m_topSegment->data()[preDecTop()]; + return sizeof(MarkStackSegment) + capacity * sizeof(const JSCell*); } +}; - inline bool MarkStackArray::isEmpty() - { - if (m_top) - return false; - if (m_topSegment->m_previous) { - ASSERT(m_topSegment->m_previous->m_top == m_segmentCapacity); - return false; - } - return true; - } +class MarkStackSegmentAllocator { +public: + MarkStackSegmentAllocator(); + ~MarkStackSegmentAllocator(); + + MarkStackSegment* allocate(); + void release(MarkStackSegment*); + + void shrinkReserve(); + +private: + SpinLock m_lock; + MarkStackSegment* m_nextFreeSegment; +}; - inline size_t MarkStackArray::size() - { - return m_top + m_segmentCapacity * m_numberOfPreviousSegments; - } +class MarkStackArray { +public: + MarkStackArray(MarkStackSegmentAllocator&); + ~MarkStackArray(); - class ParallelModeEnabler { - public: - ParallelModeEnabler(MarkStack& stack) - : m_stack(stack) - { - ASSERT(!m_stack.m_isInParallelMode); - m_stack.m_isInParallelMode = true; - } - - ~ParallelModeEnabler() - { - ASSERT(m_stack.m_isInParallelMode); - m_stack.m_isInParallelMode = false; - } - - private: - MarkStack& m_stack; - }; + void append(const JSCell*); + + bool canRemoveLast(); + const JSCell* removeLast(); + bool refill(); + + void donateSomeCellsTo(MarkStackArray& other); + void stealSomeCellsFrom(MarkStackArray& other, size_t idleThreadCount); + + size_t size(); + bool isEmpty(); + +private: + JS_EXPORT_PRIVATE void expand(); + + size_t postIncTop(); + size_t preDecTop(); + void setTopForFullSegment(); + void setTopForEmptySegment(); + size_t top(); + + void validatePrevious(); + + MarkStackSegment* m_topSegment; + MarkStackSegmentAllocator& m_allocator; + + size_t m_segmentCapacity; + size_t m_top; + size_t m_numberOfPreviousSegments; + +}; } // namespace JSC diff --git a/Source/JavaScriptCore/heap/MarkStackInlineMethods.h b/Source/JavaScriptCore/heap/MarkStackInlineMethods.h index 031dfff39..d3276d7fa 100644 --- a/Source/JavaScriptCore/heap/MarkStackInlineMethods.h +++ b/Source/JavaScriptCore/heap/MarkStackInlineMethods.h @@ -31,94 +31,81 @@ namespace JSC { -ALWAYS_INLINE void MarkStack::append(JSValue* slot, size_t count) +inline size_t MarkStackArray::postIncTop() { - for (size_t i = 0; i < count; ++i) { - JSValue& value = slot[i]; - internalAppend(value); - } + size_t result = m_top++; + ASSERT(result == m_topSegment->m_top++); + return result; } - -template<typename T> -inline void MarkStack::appendUnbarrieredPointer(T** slot) + +inline size_t MarkStackArray::preDecTop() { - ASSERT(slot); - JSCell* cell = *slot; - internalAppend(cell); + size_t result = --m_top; + ASSERT(result == --m_topSegment->m_top); + return result; } - -ALWAYS_INLINE void MarkStack::append(JSValue* slot) + +inline void MarkStackArray::setTopForFullSegment() { - ASSERT(slot); - internalAppend(*slot); + ASSERT(m_topSegment->m_top == m_segmentCapacity); + m_top = m_segmentCapacity; } -ALWAYS_INLINE void MarkStack::appendUnbarrieredValue(JSValue* slot) +inline void MarkStackArray::setTopForEmptySegment() { - ASSERT(slot); - internalAppend(*slot); + ASSERT(!m_topSegment->m_top); + m_top = 0; } -ALWAYS_INLINE void MarkStack::append(JSCell** slot) +inline size_t MarkStackArray::top() { - ASSERT(slot); - internalAppend(*slot); + ASSERT(m_top == m_topSegment->m_top); + return m_top; } -ALWAYS_INLINE void MarkStack::internalAppend(JSValue value) +#if ASSERT_DISABLED +inline void MarkStackArray::validatePrevious() { } +#else +inline void MarkStackArray::validatePrevious() { - if (!value || !value.isCell()) - return; - internalAppend(value.asCell()); + unsigned count = 0; + for (MarkStackSegment* current = m_topSegment->m_previous; current; current = current->m_previous) + count++; + ASSERT(count == m_numberOfPreviousSegments); } +#endif -inline void MarkStack::addWeakReferenceHarvester(WeakReferenceHarvester* weakReferenceHarvester) +inline void MarkStackArray::append(const JSCell* cell) { - m_shared.m_weakReferenceHarvesters.addThreadSafe(weakReferenceHarvester); + if (m_top == m_segmentCapacity) + expand(); + m_topSegment->data()[postIncTop()] = cell; } -inline void MarkStack::addUnconditionalFinalizer(UnconditionalFinalizer* unconditionalFinalizer) +inline bool MarkStackArray::canRemoveLast() { - m_shared.m_unconditionalFinalizers.addThreadSafe(unconditionalFinalizer); + return !!m_top; } -inline void MarkStack::addOpaqueRoot(void* root) +inline const JSCell* MarkStackArray::removeLast() { -#if ENABLE(PARALLEL_GC) - if (Options::numberOfGCMarkers() == 1) { - // Put directly into the shared HashSet. - m_shared.m_opaqueRoots.add(root); - return; - } - // Put into the local set, but merge with the shared one every once in - // a while to make sure that the local sets don't grow too large. - mergeOpaqueRootsIfProfitable(); - m_opaqueRoots.add(root); -#else - m_opaqueRoots.add(root); -#endif + return m_topSegment->data()[preDecTop()]; } -inline bool MarkStack::containsOpaqueRoot(void* root) +inline bool MarkStackArray::isEmpty() { - ASSERT(!m_isInParallelMode); -#if ENABLE(PARALLEL_GC) - ASSERT(m_opaqueRoots.isEmpty()); - return m_shared.m_opaqueRoots.contains(root); -#else - return m_opaqueRoots.contains(root); -#endif + if (m_top) + return false; + if (m_topSegment->m_previous) { + ASSERT(m_topSegment->m_previous->m_top == m_segmentCapacity); + return false; + } + return true; } -inline int MarkStack::opaqueRootCount() +inline size_t MarkStackArray::size() { - ASSERT(!m_isInParallelMode); -#if ENABLE(PARALLEL_GC) - ASSERT(m_opaqueRoots.isEmpty()); - return m_shared.m_opaqueRoots.size(); -#else - return m_opaqueRoots.size(); -#endif + return m_top + m_segmentCapacity * m_numberOfPreviousSegments; } } // namespace JSC diff --git a/Source/JavaScriptCore/heap/MarkedAllocator.cpp b/Source/JavaScriptCore/heap/MarkedAllocator.cpp index 20b556969..ab37ead4c 100644 --- a/Source/JavaScriptCore/heap/MarkedAllocator.cpp +++ b/Source/JavaScriptCore/heap/MarkedAllocator.cpp @@ -27,7 +27,7 @@ bool MarkedAllocator::isPagedOut(double deadline) return false; } -inline void* MarkedAllocator::tryAllocateHelper() +inline void* MarkedAllocator::tryAllocateHelper(size_t bytes) { if (!m_freeList.head) { if (m_onlyContainsStructures && !m_heap->isSafeToSweepStructures()) { @@ -42,12 +42,20 @@ inline void* MarkedAllocator::tryAllocateHelper() } for (MarkedBlock*& block = m_blocksToSweep; block; block = block->next()) { - m_freeList = block->sweep(MarkedBlock::SweepToFreeList); - if (m_freeList.head) { - m_currentBlock = block; - break; + MarkedBlock::FreeList freeList = block->sweep(MarkedBlock::SweepToFreeList); + if (!freeList.head) { + block->didConsumeFreeList(); + continue; } - block->didConsumeFreeList(); + + if (bytes > block->cellSize()) { + block->zapFreeList(freeList); + continue; + } + + m_currentBlock = block; + m_freeList = freeList; + break; } if (!m_freeList.head) { @@ -62,16 +70,16 @@ inline void* MarkedAllocator::tryAllocateHelper() return head; } -inline void* MarkedAllocator::tryAllocate() +inline void* MarkedAllocator::tryAllocate(size_t bytes) { ASSERT(!m_heap->isBusy()); m_heap->m_operationInProgress = Allocation; - void* result = tryAllocateHelper(); + void* result = tryAllocateHelper(bytes); m_heap->m_operationInProgress = NoOperation; return result; } -void* MarkedAllocator::allocateSlowCase() +void* MarkedAllocator::allocateSlowCase(size_t bytes) { ASSERT(m_heap->globalData()->apiLock().currentThreadIsHoldingLock()); #if COLLECT_ON_EVERY_ALLOCATION @@ -82,7 +90,7 @@ void* MarkedAllocator::allocateSlowCase() ASSERT(!m_freeList.head); m_heap->didAllocate(m_freeList.bytes); - void* result = tryAllocate(); + void* result = tryAllocate(bytes); if (LIKELY(result != 0)) return result; @@ -90,27 +98,39 @@ void* MarkedAllocator::allocateSlowCase() if (m_heap->shouldCollect()) { m_heap->collect(Heap::DoNotSweep); - result = tryAllocate(); + result = tryAllocate(bytes); if (result) return result; } ASSERT(!m_heap->shouldCollect()); - MarkedBlock* block = allocateBlock(); + MarkedBlock* block = allocateBlock(bytes); ASSERT(block); addBlock(block); - result = tryAllocate(); + result = tryAllocate(bytes); ASSERT(result); return result; } -MarkedBlock* MarkedAllocator::allocateBlock() +MarkedBlock* MarkedAllocator::allocateBlock(size_t bytes) { - MarkedBlock* block = MarkedBlock::create(m_heap->blockAllocator().allocate(), m_heap, m_cellSize, m_cellsNeedDestruction, m_onlyContainsStructures); - m_markedSpace->didAddBlock(block); - return block; + size_t minBlockSize = MarkedBlock::blockSize; + size_t minAllocationSize = WTF::roundUpToMultipleOf(WTF::pageSize(), sizeof(MarkedBlock) + bytes); + size_t blockSize = std::max(minBlockSize, minAllocationSize); + + size_t cellSize = m_cellSize ? m_cellSize : WTF::roundUpToMultipleOf<MarkedBlock::atomSize>(bytes); + + if (blockSize == MarkedBlock::blockSize) { + PageAllocationAligned allocation = m_heap->blockAllocator().allocate(); + return MarkedBlock::create(allocation, m_heap, cellSize, m_cellsNeedDestruction, m_onlyContainsStructures); + } + + PageAllocationAligned allocation = PageAllocationAligned::allocate(blockSize, MarkedBlock::blockSize, OSAllocator::JSGCHeapPages); + if (!static_cast<bool>(allocation)) + CRASH(); + return MarkedBlock::create(allocation, m_heap, cellSize, m_cellsNeedDestruction, m_onlyContainsStructures); } void MarkedAllocator::addBlock(MarkedBlock* block) @@ -121,6 +141,7 @@ void MarkedAllocator::addBlock(MarkedBlock* block) m_blockList.append(block); m_blocksToSweep = m_currentBlock = block; m_freeList = block->sweep(MarkedBlock::SweepToFreeList); + m_markedSpace->didAddBlock(block); } void MarkedAllocator::removeBlock(MarkedBlock* block) diff --git a/Source/JavaScriptCore/heap/MarkedAllocator.h b/Source/JavaScriptCore/heap/MarkedAllocator.h index c1c431194..7273c13e4 100644 --- a/Source/JavaScriptCore/heap/MarkedAllocator.h +++ b/Source/JavaScriptCore/heap/MarkedAllocator.h @@ -25,7 +25,7 @@ public: size_t cellSize() { return m_cellSize; } bool cellsNeedDestruction() { return m_cellsNeedDestruction; } bool onlyContainsStructures() { return m_onlyContainsStructures; } - void* allocate(); + void* allocate(size_t); Heap* heap() { return m_heap; } template<typename Functor> void forEachBlock(Functor&); @@ -39,10 +39,10 @@ public: private: friend class LLIntOffsetsExtractor; - JS_EXPORT_PRIVATE void* allocateSlowCase(); - void* tryAllocate(); - void* tryAllocateHelper(); - MarkedBlock* allocateBlock(); + JS_EXPORT_PRIVATE void* allocateSlowCase(size_t); + void* tryAllocate(size_t); + void* tryAllocateHelper(size_t); + MarkedBlock* allocateBlock(size_t); MarkedBlock::FreeList m_freeList; MarkedBlock* m_currentBlock; @@ -75,12 +75,11 @@ inline void MarkedAllocator::init(Heap* heap, MarkedSpace* markedSpace, size_t c m_onlyContainsStructures = onlyContainsStructures; } -inline void* MarkedAllocator::allocate() +inline void* MarkedAllocator::allocate(size_t bytes) { MarkedBlock::FreeCell* head = m_freeList.head; - // This is a light-weight fast path to cover the most common case. if (UNLIKELY(!head)) - return allocateSlowCase(); + return allocateSlowCase(bytes); m_freeList.head = head->next; return head; diff --git a/Source/JavaScriptCore/heap/MarkedSpace.cpp b/Source/JavaScriptCore/heap/MarkedSpace.cpp index 68b059c36..689e5f9ab 100644 --- a/Source/JavaScriptCore/heap/MarkedSpace.cpp +++ b/Source/JavaScriptCore/heap/MarkedSpace.cpp @@ -90,6 +90,7 @@ MarkedSpace::MarkedSpace(Heap* heap) destructorAllocatorFor(cellSize).init(heap, this, cellSize, true, false); } + m_largeAllocator.init(heap, this, 0, true, false); m_structureAllocator.init(heap, this, WTF::roundUpToMultipleOf(32, sizeof(Structure)), true, true); } @@ -127,6 +128,7 @@ void MarkedSpace::resetAllocators() destructorAllocatorFor(cellSize).reset(); } + m_largeAllocator.reset(); m_structureAllocator.reset(); } @@ -153,6 +155,7 @@ void MarkedSpace::canonicalizeCellLivenessData() destructorAllocatorFor(cellSize).zapFreeList(); } + m_largeAllocator.zapFreeList(); m_structureAllocator.zapFreeList(); } @@ -168,6 +171,9 @@ bool MarkedSpace::isPagedOut(double deadline) return true; } + if (m_largeAllocator.isPagedOut(deadline)) + return true; + if (m_structureAllocator.isPagedOut(deadline)) return true; @@ -178,7 +184,12 @@ void MarkedSpace::freeBlock(MarkedBlock* block) { allocatorFor(block).removeBlock(block); m_blocks.remove(block); - m_heap->blockAllocator().deallocate(MarkedBlock::destroy(block)); + if (block->capacity() == MarkedBlock::blockSize) { + m_heap->blockAllocator().deallocate(MarkedBlock::destroy(block)); + return; + } + + MarkedBlock::destroy(block).deallocate(); } void MarkedSpace::freeOrShrinkBlock(MarkedBlock* block) diff --git a/Source/JavaScriptCore/heap/MarkedSpace.h b/Source/JavaScriptCore/heap/MarkedSpace.h index d5dae3584..03679d9d3 100644 --- a/Source/JavaScriptCore/heap/MarkedSpace.h +++ b/Source/JavaScriptCore/heap/MarkedSpace.h @@ -80,7 +80,7 @@ public: MarkedAllocator& destructorAllocatorFor(size_t); void* allocateWithDestructor(size_t); void* allocateWithoutDestructor(size_t); - void* allocateStructure(); + void* allocateStructure(size_t); void resetAllocators(); @@ -115,15 +115,15 @@ public: private: friend class LLIntOffsetsExtractor; - - // [ 32... 256 ] + + // [ 32... 512 ] static const size_t preciseStep = MarkedBlock::atomSize; - static const size_t preciseCutoff = 256; + static const size_t preciseCutoff = 512; static const size_t preciseCount = preciseCutoff / preciseStep; - // [ 512... 2048 ] - static const size_t impreciseStep = preciseCutoff; - static const size_t impreciseCutoff = maxCellSize; + // [ 1024... blockSize ] + static const size_t impreciseStep = 2 * preciseCutoff; + static const size_t impreciseCutoff = MarkedBlock::blockSize / 2; static const size_t impreciseCount = impreciseCutoff / impreciseStep; struct Subspace { @@ -133,6 +133,7 @@ private: Subspace m_destructorSpace; Subspace m_normalSpace; + MarkedAllocator m_largeAllocator; MarkedAllocator m_structureAllocator; Heap* m_heap; @@ -162,10 +163,12 @@ inline MarkedAllocator& MarkedSpace::firstAllocator() inline MarkedAllocator& MarkedSpace::allocatorFor(size_t bytes) { - ASSERT(bytes && bytes <= maxCellSize); + ASSERT(bytes); if (bytes <= preciseCutoff) return m_normalSpace.preciseAllocators[(bytes - 1) / preciseStep]; - return m_normalSpace.impreciseAllocators[(bytes - 1) / impreciseStep]; + if (bytes <= impreciseCutoff) + return m_normalSpace.impreciseAllocators[(bytes - 1) / impreciseStep]; + return m_largeAllocator; } inline MarkedAllocator& MarkedSpace::allocatorFor(MarkedBlock* block) @@ -181,25 +184,27 @@ inline MarkedAllocator& MarkedSpace::allocatorFor(MarkedBlock* block) inline MarkedAllocator& MarkedSpace::destructorAllocatorFor(size_t bytes) { - ASSERT(bytes && bytes <= maxCellSize); + ASSERT(bytes); if (bytes <= preciseCutoff) return m_destructorSpace.preciseAllocators[(bytes - 1) / preciseStep]; - return m_destructorSpace.impreciseAllocators[(bytes - 1) / impreciseStep]; + if (bytes <= impreciseCutoff) + return m_normalSpace.impreciseAllocators[(bytes - 1) / impreciseStep]; + return m_largeAllocator; } inline void* MarkedSpace::allocateWithoutDestructor(size_t bytes) { - return allocatorFor(bytes).allocate(); + return allocatorFor(bytes).allocate(bytes); } inline void* MarkedSpace::allocateWithDestructor(size_t bytes) { - return destructorAllocatorFor(bytes).allocate(); + return destructorAllocatorFor(bytes).allocate(bytes); } -inline void* MarkedSpace::allocateStructure() +inline void* MarkedSpace::allocateStructure(size_t bytes) { - return m_structureAllocator.allocate(); + return m_structureAllocator.allocate(bytes); } template <typename Functor> inline typename Functor::ReturnType MarkedSpace::forEachBlock(Functor& functor) @@ -214,6 +219,7 @@ template <typename Functor> inline typename Functor::ReturnType MarkedSpace::for m_destructorSpace.impreciseAllocators[i].forEachBlock(functor); } + m_largeAllocator.forEachBlock(functor); m_structureAllocator.forEachBlock(functor); return functor.returnValue(); diff --git a/Source/JavaScriptCore/heap/SlotVisitor.cpp b/Source/JavaScriptCore/heap/SlotVisitor.cpp new file mode 100644 index 000000000..0f003e79d --- /dev/null +++ b/Source/JavaScriptCore/heap/SlotVisitor.cpp @@ -0,0 +1,412 @@ +#include "config.h" +#include "SlotVisitor.h" + +#include "ConservativeRoots.h" +#include "CopiedSpace.h" +#include "CopiedSpaceInlineMethods.h" +#include "JSArray.h" +#include "JSGlobalData.h" +#include "JSObject.h" +#include "JSString.h" + +namespace JSC { + +SlotVisitor::SlotVisitor(GCThreadSharedData& shared) + : m_stack(shared.m_segmentAllocator) + , m_visitCount(0) + , m_isInParallelMode(false) + , m_shared(shared) + , m_shouldHashConst(false) +#if !ASSERT_DISABLED + , m_isCheckingForDefaultMarkViolation(false) + , m_isDraining(false) +#endif +{ +} + +SlotVisitor::~SlotVisitor() +{ + ASSERT(m_stack.isEmpty()); +} + +void SlotVisitor::setup() +{ + m_shared.m_shouldHashConst = m_shared.m_globalData->haveEnoughNewStringsToHashConst(); + m_shouldHashConst = m_shared.m_shouldHashConst; +#if ENABLE(PARALLEL_GC) + for (unsigned i = 0; i < m_shared.m_markingThreadsMarkStack.size(); ++i) + m_shared.m_markingThreadsMarkStack[i]->m_shouldHashConst = m_shared.m_shouldHashConst; +#endif +} + +void SlotVisitor::reset() +{ + m_visitCount = 0; + ASSERT(m_stack.isEmpty()); +#if ENABLE(PARALLEL_GC) + ASSERT(m_opaqueRoots.isEmpty()); // Should have merged by now. +#else + m_opaqueRoots.clear(); +#endif + if (m_shouldHashConst) { + m_uniqueStrings.clear(); + m_shouldHashConst = false; + } +} + +void SlotVisitor::append(ConservativeRoots& conservativeRoots) +{ + JSCell** roots = conservativeRoots.roots(); + size_t size = conservativeRoots.size(); + for (size_t i = 0; i < size; ++i) + internalAppend(roots[i]); +} + +ALWAYS_INLINE static void visitChildren(SlotVisitor& visitor, const JSCell* cell) +{ +#if ENABLE(SIMPLE_HEAP_PROFILING) + m_visitedTypeCounts.count(cell); +#endif + + ASSERT(Heap::isMarked(cell)); + + if (isJSString(cell)) { + JSString::visitChildren(const_cast<JSCell*>(cell), visitor); + return; + } + + if (isJSFinalObject(cell)) { + JSFinalObject::visitChildren(const_cast<JSCell*>(cell), visitor); + return; + } + + if (isJSArray(cell)) { + JSArray::visitChildren(const_cast<JSCell*>(cell), visitor); + return; + } + + cell->methodTable()->visitChildren(const_cast<JSCell*>(cell), visitor); +} + +void SlotVisitor::donateKnownParallel() +{ + // NOTE: Because we re-try often, we can afford to be conservative, and + // assume that donating is not profitable. + + // Avoid locking when a thread reaches a dead end in the object graph. + if (m_stack.size() < 2) + return; + + // If there's already some shared work queued up, be conservative and assume + // that donating more is not profitable. + if (m_shared.m_sharedMarkStack.size()) + return; + + // If we're contending on the lock, be conservative and assume that another + // thread is already donating. + MutexTryLocker locker(m_shared.m_markingLock); + if (!locker.locked()) + return; + + // Otherwise, assume that a thread will go idle soon, and donate. + m_stack.donateSomeCellsTo(m_shared.m_sharedMarkStack); + + if (m_shared.m_numberOfActiveParallelMarkers < Options::numberOfGCMarkers()) + m_shared.m_markingCondition.broadcast(); +} + +void SlotVisitor::drain() +{ + ASSERT(m_isInParallelMode); + +#if ENABLE(PARALLEL_GC) + if (Options::numberOfGCMarkers() > 1) { + while (!m_stack.isEmpty()) { + m_stack.refill(); + for (unsigned countdown = Options::minimumNumberOfScansBetweenRebalance(); m_stack.canRemoveLast() && countdown--;) + visitChildren(*this, m_stack.removeLast()); + donateKnownParallel(); + } + + mergeOpaqueRootsIfNecessary(); + return; + } +#endif + + while (!m_stack.isEmpty()) { + m_stack.refill(); + while (m_stack.canRemoveLast()) + visitChildren(*this, m_stack.removeLast()); + } +} + +void SlotVisitor::drainFromShared(SharedDrainMode sharedDrainMode) +{ + ASSERT(m_isInParallelMode); + + ASSERT(Options::numberOfGCMarkers()); + + bool shouldBeParallel; + +#if ENABLE(PARALLEL_GC) + shouldBeParallel = Options::numberOfGCMarkers() > 1; +#else + ASSERT(Options::numberOfGCMarkers() == 1); + shouldBeParallel = false; +#endif + + if (!shouldBeParallel) { + // This call should be a no-op. + ASSERT_UNUSED(sharedDrainMode, sharedDrainMode == MasterDrain); + ASSERT(m_stack.isEmpty()); + ASSERT(m_shared.m_sharedMarkStack.isEmpty()); + return; + } + +#if ENABLE(PARALLEL_GC) + { + MutexLocker locker(m_shared.m_markingLock); + m_shared.m_numberOfActiveParallelMarkers++; + } + while (true) { + { + MutexLocker locker(m_shared.m_markingLock); + m_shared.m_numberOfActiveParallelMarkers--; + + // How we wait differs depending on drain mode. + if (sharedDrainMode == MasterDrain) { + // Wait until either termination is reached, or until there is some work + // for us to do. + while (true) { + // Did we reach termination? + if (!m_shared.m_numberOfActiveParallelMarkers && m_shared.m_sharedMarkStack.isEmpty()) { + // Let any sleeping slaves know it's time for them to give their private CopiedBlocks back + m_shared.m_markingCondition.broadcast(); + return; + } + + // Is there work to be done? + if (!m_shared.m_sharedMarkStack.isEmpty()) + break; + + // Otherwise wait. + m_shared.m_markingCondition.wait(m_shared.m_markingLock); + } + } else { + ASSERT(sharedDrainMode == SlaveDrain); + + // Did we detect termination? If so, let the master know. + if (!m_shared.m_numberOfActiveParallelMarkers && m_shared.m_sharedMarkStack.isEmpty()) + m_shared.m_markingCondition.broadcast(); + + while (m_shared.m_sharedMarkStack.isEmpty() && !m_shared.m_parallelMarkersShouldExit) { + if (!m_shared.m_numberOfActiveParallelMarkers && m_shared.m_sharedMarkStack.isEmpty()) + doneCopying(); + m_shared.m_markingCondition.wait(m_shared.m_markingLock); + } + + // Is the VM exiting? If so, exit this thread. + if (m_shared.m_parallelMarkersShouldExit) { + doneCopying(); + return; + } + } + + size_t idleThreadCount = Options::numberOfGCMarkers() - m_shared.m_numberOfActiveParallelMarkers; + m_stack.stealSomeCellsFrom(m_shared.m_sharedMarkStack, idleThreadCount); + m_shared.m_numberOfActiveParallelMarkers++; + } + + drain(); + } +#endif +} + +void SlotVisitor::mergeOpaqueRoots() +{ + ASSERT(!m_opaqueRoots.isEmpty()); // Should only be called when opaque roots are non-empty. + { + MutexLocker locker(m_shared.m_opaqueRootsLock); + HashSet<void*>::iterator begin = m_opaqueRoots.begin(); + HashSet<void*>::iterator end = m_opaqueRoots.end(); + for (HashSet<void*>::iterator iter = begin; iter != end; ++iter) + m_shared.m_opaqueRoots.add(*iter); + } + m_opaqueRoots.clear(); +} + +void SlotVisitor::startCopying() +{ + ASSERT(!m_copiedAllocator.isValid()); +} + +void* SlotVisitor::allocateNewSpaceSlow(size_t bytes) +{ + m_shared.m_copiedSpace->doneFillingBlock(m_copiedAllocator.resetCurrentBlock()); + m_copiedAllocator.setCurrentBlock(m_shared.m_copiedSpace->allocateBlockForCopyingPhase()); + + void* result = 0; + CheckedBoolean didSucceed = m_copiedAllocator.tryAllocate(bytes, &result); + ASSERT(didSucceed); + return result; +} + +void* SlotVisitor::allocateNewSpaceOrPin(void* ptr, size_t bytes) +{ + if (!checkIfShouldCopyAndPinOtherwise(ptr, bytes)) + return 0; + + return allocateNewSpace(bytes); +} + +ALWAYS_INLINE bool JSString::tryHashConstLock() +{ +#if ENABLE(PARALLEL_GC) + unsigned currentFlags = m_flags; + + if (currentFlags & HashConstLock) + return false; + + unsigned newFlags = currentFlags | HashConstLock; + + if (!WTF::weakCompareAndSwap(&m_flags, currentFlags, newFlags)) + return false; + + WTF::memoryBarrierAfterLock(); + return true; +#else + if (isHashConstSingleton()) + return false; + + m_flags |= HashConstLock; + + return true; +#endif +} + +ALWAYS_INLINE void JSString::releaseHashConstLock() +{ +#if ENABLE(PARALLEL_GC) + WTF::memoryBarrierBeforeUnlock(); +#endif + m_flags &= ~HashConstLock; +} + +ALWAYS_INLINE bool JSString::shouldTryHashConst() +{ + return ((length() > 1) && !isRope() && !isHashConstSingleton()); +} + +ALWAYS_INLINE void SlotVisitor::internalAppend(JSValue* slot) +{ + // This internalAppend is only intended for visits to object and array backing stores. + // as it can change the JSValue pointed to be the argument when the original JSValue + // is a string that contains the same contents as another string. + + ASSERT(slot); + JSValue value = *slot; + ASSERT(value); + if (!value.isCell()) + return; + + JSCell* cell = value.asCell(); + if (!cell) + return; + + if (m_shouldHashConst && cell->isString()) { + JSString* string = jsCast<JSString*>(cell); + if (string->shouldTryHashConst() && string->tryHashConstLock()) { + UniqueStringMap::AddResult addResult = m_uniqueStrings.add(string->string().impl(), value); + if (addResult.isNewEntry) + string->setHashConstSingleton(); + else { + JSValue existingJSValue = addResult.iterator->second; + if (value != existingJSValue) + jsCast<JSString*>(existingJSValue.asCell())->clearHashConstSingleton(); + *slot = existingJSValue; + string->releaseHashConstLock(); + return; + } + string->releaseHashConstLock(); + } + } + + internalAppend(cell); +} + +void SlotVisitor::copyAndAppend(void** ptr, size_t bytes, JSValue* values, unsigned length) +{ + void* oldPtr = *ptr; + void* newPtr = allocateNewSpaceOrPin(oldPtr, bytes); + if (newPtr) { + size_t jsValuesOffset = static_cast<size_t>(reinterpret_cast<char*>(values) - static_cast<char*>(oldPtr)); + + JSValue* newValues = reinterpret_cast_ptr<JSValue*>(static_cast<char*>(newPtr) + jsValuesOffset); + for (unsigned i = 0; i < length; i++) { + JSValue& value = values[i]; + newValues[i] = value; + if (!value) + continue; + internalAppend(&newValues[i]); + } + + memcpy(newPtr, oldPtr, jsValuesOffset); + *ptr = newPtr; + } else + append(values, length); +} + +void SlotVisitor::doneCopying() +{ + if (!m_copiedAllocator.isValid()) + return; + + m_shared.m_copiedSpace->doneFillingBlock(m_copiedAllocator.resetCurrentBlock()); +} + +void SlotVisitor::harvestWeakReferences() +{ + for (WeakReferenceHarvester* current = m_shared.m_weakReferenceHarvesters.head(); current; current = current->next()) + current->visitWeakReferences(*this); +} + +void SlotVisitor::finalizeUnconditionalFinalizers() +{ + while (m_shared.m_unconditionalFinalizers.hasNext()) + m_shared.m_unconditionalFinalizers.removeNext()->finalizeUnconditionally(); +} + +#if ENABLE(GC_VALIDATION) +void SlotVisitor::validate(JSCell* cell) +{ + if (!cell) { + dataLog("cell is NULL\n"); + CRASH(); + } + + if (!cell->structure()) { + dataLog("cell at %p has a null structure\n" , cell); + CRASH(); + } + + // Both the cell's structure, and the cell's structure's structure should be the Structure Structure. + // I hate this sentence. + if (cell->structure()->structure()->JSCell::classInfo() != cell->structure()->JSCell::classInfo()) { + const char* parentClassName = 0; + const char* ourClassName = 0; + if (cell->structure()->structure() && cell->structure()->structure()->JSCell::classInfo()) + parentClassName = cell->structure()->structure()->JSCell::classInfo()->className; + if (cell->structure()->JSCell::classInfo()) + ourClassName = cell->structure()->JSCell::classInfo()->className; + dataLog("parent structure (%p <%s>) of cell at %p doesn't match cell's structure (%p <%s>)\n", + cell->structure()->structure(), parentClassName, cell, cell->structure(), ourClassName); + CRASH(); + } +} +#else +void SlotVisitor::validate(JSCell*) +{ +} +#endif + +} // namespace JSC diff --git a/Source/JavaScriptCore/heap/SlotVisitor.h b/Source/JavaScriptCore/heap/SlotVisitor.h index 6364b23e4..230ed3334 100644 --- a/Source/JavaScriptCore/heap/SlotVisitor.h +++ b/Source/JavaScriptCore/heap/SlotVisitor.h @@ -27,35 +27,52 @@ #define SlotVisitor_h #include "CopiedSpace.h" -#include "MarkStack.h" +#include "HandleTypes.h" #include "MarkStackInlineMethods.h" +#include <wtf/text/StringHash.h> + namespace JSC { -class Heap; +class ConservativeRoots; class GCThreadSharedData; +class Heap; +template<typename T> class WriteBarrierBase; +template<typename T> class JITWriteBarrier; + +class SlotVisitor { + WTF_MAKE_NONCOPYABLE(SlotVisitor); + friend class HeapRootVisitor; // Allowed to mark a JSValue* or JSCell** directly. -class SlotVisitor : public MarkStack { - friend class HeapRootVisitor; public: SlotVisitor(GCThreadSharedData&); + ~SlotVisitor(); - void donate() - { - ASSERT(m_isInParallelMode); - if (Options::numberOfGCMarkers() == 1) - return; - - donateKnownParallel(); - } + void append(ConservativeRoots&); - void drain(); + template<typename T> void append(JITWriteBarrier<T>*); + template<typename T> void append(WriteBarrierBase<T>*); + void appendValues(WriteBarrierBase<Unknown>*, size_t count); - void donateAndDrain() - { - donate(); - drain(); - } + template<typename T> + void appendUnbarrieredPointer(T**); + void appendUnbarrieredValue(JSValue*); + + void addOpaqueRoot(void*); + bool containsOpaqueRoot(void*); + int opaqueRootCount(); + + GCThreadSharedData& sharedData() { return m_shared; } + bool isEmpty() { return m_stack.isEmpty(); } + + void setup(); + void reset(); + + size_t visitCount() const { return m_visitCount; } + + void donate(); + void drain(); + void donateAndDrain(); enum SharedDrainMode { SlaveDrain, MasterDrain }; void drainFromShared(SharedDrainMode); @@ -78,19 +95,84 @@ public: void doneCopying(); +#if ENABLE(SIMPLE_HEAP_PROFILING) + VTableSpectrum m_visitedTypeCounts; +#endif + + void addWeakReferenceHarvester(WeakReferenceHarvester*); + void addUnconditionalFinalizer(UnconditionalFinalizer*); + +#if ENABLE(OBJECT_MARK_LOGGING) + inline void resetChildCount() { m_logChildCount = 0; } + inline unsigned childCount() { return m_logChildCount; } + inline void incrementChildCount() { m_logChildCount++; } +#endif + private: + friend class ParallelModeEnabler; + + JS_EXPORT_PRIVATE static void validate(JSCell*); + + void append(JSValue*); + void append(JSValue*, size_t count); + void append(JSCell**); + + void internalAppend(JSCell*); + void internalAppend(JSValue); + void internalAppend(JSValue*); + + JS_EXPORT_PRIVATE void mergeOpaqueRoots(); + void mergeOpaqueRootsIfNecessary(); + void mergeOpaqueRootsIfProfitable(); + void* allocateNewSpaceOrPin(void*, size_t); void* allocateNewSpaceSlow(size_t); void donateKnownParallel(); + MarkStackArray m_stack; + HashSet<void*> m_opaqueRoots; // Handle-owning data structures not visible to the garbage collector. + + size_t m_visitCount; + bool m_isInParallelMode; + + GCThreadSharedData& m_shared; + + bool m_shouldHashConst; // Local per-thread copy of shared flag for performance reasons + typedef HashMap<StringImpl*, JSValue> UniqueStringMap; + UniqueStringMap m_uniqueStrings; + +#if ENABLE(OBJECT_MARK_LOGGING) + unsigned m_logChildCount; +#endif + CopiedAllocator m_copiedAllocator; + +public: +#if !ASSERT_DISABLED + bool m_isCheckingForDefaultMarkViolation; + bool m_isDraining; +#endif }; -inline SlotVisitor::SlotVisitor(GCThreadSharedData& shared) - : MarkStack(shared) -{ -} +class ParallelModeEnabler { +public: + ParallelModeEnabler(SlotVisitor& stack) + : m_stack(stack) + { + ASSERT(!m_stack.m_isInParallelMode); + m_stack.m_isInParallelMode = true; + } + + ~ParallelModeEnabler() + { + ASSERT(m_stack.m_isInParallelMode); + m_stack.m_isInParallelMode = false; + } + +private: + SlotVisitor& m_stack; +}; } // namespace JSC diff --git a/Source/JavaScriptCore/heap/SlotVisitorInlineMethods.h b/Source/JavaScriptCore/heap/SlotVisitorInlineMethods.h index f02564e10..540da3bc4 100644 --- a/Source/JavaScriptCore/heap/SlotVisitorInlineMethods.h +++ b/Source/JavaScriptCore/heap/SlotVisitorInlineMethods.h @@ -27,10 +27,115 @@ #define SlotVisitorInlineMethods_h #include "CopiedSpaceInlineMethods.h" +#include "Options.h" #include "SlotVisitor.h" namespace JSC { +ALWAYS_INLINE void SlotVisitor::append(JSValue* slot, size_t count) +{ + for (size_t i = 0; i < count; ++i) { + JSValue& value = slot[i]; + internalAppend(value); + } +} + +template<typename T> +inline void SlotVisitor::appendUnbarrieredPointer(T** slot) +{ + ASSERT(slot); + JSCell* cell = *slot; + internalAppend(cell); +} + +ALWAYS_INLINE void SlotVisitor::append(JSValue* slot) +{ + ASSERT(slot); + internalAppend(*slot); +} + +ALWAYS_INLINE void SlotVisitor::appendUnbarrieredValue(JSValue* slot) +{ + ASSERT(slot); + internalAppend(*slot); +} + +ALWAYS_INLINE void SlotVisitor::append(JSCell** slot) +{ + ASSERT(slot); + internalAppend(*slot); +} + +ALWAYS_INLINE void SlotVisitor::internalAppend(JSValue value) +{ + if (!value || !value.isCell()) + return; + internalAppend(value.asCell()); +} + +inline void SlotVisitor::addWeakReferenceHarvester(WeakReferenceHarvester* weakReferenceHarvester) +{ + m_shared.m_weakReferenceHarvesters.addThreadSafe(weakReferenceHarvester); +} + +inline void SlotVisitor::addUnconditionalFinalizer(UnconditionalFinalizer* unconditionalFinalizer) +{ + m_shared.m_unconditionalFinalizers.addThreadSafe(unconditionalFinalizer); +} + +inline void SlotVisitor::addOpaqueRoot(void* root) +{ +#if ENABLE(PARALLEL_GC) + if (Options::numberOfGCMarkers() == 1) { + // Put directly into the shared HashSet. + m_shared.m_opaqueRoots.add(root); + return; + } + // Put into the local set, but merge with the shared one every once in + // a while to make sure that the local sets don't grow too large. + mergeOpaqueRootsIfProfitable(); + m_opaqueRoots.add(root); +#else + m_opaqueRoots.add(root); +#endif +} + +inline bool SlotVisitor::containsOpaqueRoot(void* root) +{ + ASSERT(!m_isInParallelMode); +#if ENABLE(PARALLEL_GC) + ASSERT(m_opaqueRoots.isEmpty()); + return m_shared.m_opaqueRoots.contains(root); +#else + return m_opaqueRoots.contains(root); +#endif +} + +inline int SlotVisitor::opaqueRootCount() +{ + ASSERT(!m_isInParallelMode); +#if ENABLE(PARALLEL_GC) + ASSERT(m_opaqueRoots.isEmpty()); + return m_shared.m_opaqueRoots.size(); +#else + return m_opaqueRoots.size(); +#endif +} + +inline void SlotVisitor::mergeOpaqueRootsIfNecessary() +{ + if (m_opaqueRoots.isEmpty()) + return; + mergeOpaqueRoots(); +} + +inline void SlotVisitor::mergeOpaqueRootsIfProfitable() +{ + if (static_cast<unsigned>(m_opaqueRoots.size()) < Options::opaqueRootMergeThreshold()) + return; + mergeOpaqueRoots(); +} + ALWAYS_INLINE bool SlotVisitor::checkIfShouldCopyAndPinOtherwise(void* oldPtr, size_t bytes) { if (CopiedSpace::isOversize(bytes)) { @@ -55,6 +160,21 @@ ALWAYS_INLINE void* SlotVisitor::allocateNewSpace(size_t bytes) return result; } +inline void SlotVisitor::donate() +{ + ASSERT(m_isInParallelMode); + if (Options::numberOfGCMarkers() == 1) + return; + + donateKnownParallel(); +} + +inline void SlotVisitor::donateAndDrain() +{ + donate(); + drain(); +} + } // namespace JSC #endif // SlotVisitorInlineMethods_h diff --git a/Source/JavaScriptCore/interpreter/Interpreter.cpp b/Source/JavaScriptCore/interpreter/Interpreter.cpp index 87b77d639..358a24096 100644 --- a/Source/JavaScriptCore/interpreter/Interpreter.cpp +++ b/Source/JavaScriptCore/interpreter/Interpreter.cpp @@ -4475,50 +4475,43 @@ skip_id_custom_self: goto vm_throw; } DEFINE_OPCODE(op_tear_off_activation) { - /* tear_off_activation activation(r) arguments(r) + /* tear_off_activation activation(r) Copy locals and named parameters from the register file to the heap. - Point the bindings in 'activation' and 'arguments' to this new backing - store. (Note that 'arguments' may not have been created. If created, - 'arguments' already holds a copy of any extra / unnamed parameters.) + Point the bindings in 'activation' to this new backing store. This opcode appears before op_ret in functions that require full scope chains. */ int activation = vPC[1].u.operand; - int arguments = vPC[2].u.operand; ASSERT(codeBlock->needsFullScopeChain()); JSValue activationValue = callFrame->r(activation).jsValue(); - if (activationValue) { + if (activationValue) asActivation(activationValue)->tearOff(*globalData); - if (JSValue argumentsValue = callFrame->r(unmodifiedArgumentsRegister(arguments)).jsValue()) - asArguments(argumentsValue)->didTearOffActivation(*globalData, asActivation(activationValue)); - } else if (JSValue argumentsValue = callFrame->r(unmodifiedArgumentsRegister(arguments)).jsValue()) { - if (!codeBlock->isStrictMode()) - asArguments(argumentsValue)->tearOff(callFrame); - } - vPC += OPCODE_LENGTH(op_tear_off_activation); NEXT_INSTRUCTION(); } DEFINE_OPCODE(op_tear_off_arguments) { - /* tear_off_arguments arguments(r) + /* tear_off_arguments arguments(r) activation(r) Copy named parameters from the register file to the heap. Point the - bindings in 'arguments' to this new backing store. (Note that - 'arguments' may not have been created. If created, 'arguments' already - holds a copy of any extra / unnamed parameters.) + bindings in 'arguments' to this new backing store. (If 'activation' + was also copied to the heap, 'arguments' will point to its storage.) This opcode appears before op_ret in functions that don't require full scope chains, but do use 'arguments'. */ - int src1 = vPC[1].u.operand; - ASSERT(!codeBlock->needsFullScopeChain() && codeBlock->ownerExecutable()->usesArguments()); - - if (JSValue arguments = callFrame->r(unmodifiedArgumentsRegister(src1)).jsValue()) - asArguments(arguments)->tearOff(callFrame); + int arguments = vPC[1].u.operand; + int activation = vPC[2].u.operand; + ASSERT(codeBlock->usesArguments()); + if (JSValue argumentsValue = callFrame->r(unmodifiedArgumentsRegister(arguments)).jsValue()) { + if (JSValue activationValue = callFrame->r(activation).jsValue()) + asArguments(argumentsValue)->didTearOffActivation(callFrame->globalData(), asActivation(activationValue)); + else + asArguments(argumentsValue)->tearOff(callFrame); + } vPC += OPCODE_LENGTH(op_tear_off_arguments); NEXT_INSTRUCTION(); @@ -5112,19 +5105,6 @@ JSValue Interpreter::retrieveArgumentsFromVMCode(CallFrame* callFrame, JSFunctio if (!functionCallFrame) return jsNull(); - CodeBlock* codeBlock = functionCallFrame->someCodeBlockForPossiblyInlinedCode(); - if (codeBlock->usesArguments()) { - ASSERT(codeBlock->codeType() == FunctionCode); - int argumentsRegister = codeBlock->argumentsRegister(); - int realArgumentsRegister = unmodifiedArgumentsRegister(argumentsRegister); - if (JSValue arguments = functionCallFrame->uncheckedR(argumentsRegister).jsValue()) - return arguments; - JSValue arguments = JSValue(Arguments::create(callFrame->globalData(), functionCallFrame)); - functionCallFrame->r(argumentsRegister) = arguments; - functionCallFrame->r(realArgumentsRegister) = arguments; - return arguments; - } - Arguments* arguments = Arguments::create(functionCallFrame->globalData(), functionCallFrame); arguments->tearOff(functionCallFrame); return JSValue(arguments); diff --git a/Source/JavaScriptCore/jit/JITInlineMethods.h b/Source/JavaScriptCore/jit/JITInlineMethods.h index e68ecbe78..3f32597fa 100644 --- a/Source/JavaScriptCore/jit/JITInlineMethods.h +++ b/Source/JavaScriptCore/jit/JITInlineMethods.h @@ -422,9 +422,6 @@ template <typename ClassType, bool destructor, typename StructureType> inline vo // initialize the object's structure storePtr(structure, Address(result, JSCell::structureOffset())); - // initialize the object's classInfo pointer - storePtr(TrustedImmPtr(&ClassType::s_info), Address(result, JSCell::classInfoOffset())); - // initialize the object's property storage pointer storePtr(TrustedImmPtr(0), Address(result, ClassType::offsetOfOutOfLineStorage())); } diff --git a/Source/JavaScriptCore/jit/JITOpcodes.cpp b/Source/JavaScriptCore/jit/JITOpcodes.cpp index 9b7dc634f..f859f8b93 100644 --- a/Source/JavaScriptCore/jit/JITOpcodes.cpp +++ b/Source/JavaScriptCore/jit/JITOpcodes.cpp @@ -559,25 +559,23 @@ void JIT::emit_op_construct(Instruction* currentInstruction) void JIT::emit_op_tear_off_activation(Instruction* currentInstruction) { - unsigned activation = currentInstruction[1].u.operand; - unsigned arguments = currentInstruction[2].u.operand; - Jump activationCreated = branchTestPtr(NonZero, addressFor(activation)); - Jump argumentsNotCreated = branchTestPtr(Zero, addressFor(arguments)); - activationCreated.link(this); + int activation = currentInstruction[1].u.operand; + Jump activationNotCreated = branchTestPtr(Zero, addressFor(activation)); JITStubCall stubCall(this, cti_op_tear_off_activation); stubCall.addArgument(activation, regT2); - stubCall.addArgument(unmodifiedArgumentsRegister(arguments), regT2); stubCall.call(); - argumentsNotCreated.link(this); + activationNotCreated.link(this); } void JIT::emit_op_tear_off_arguments(Instruction* currentInstruction) { - unsigned dst = currentInstruction[1].u.operand; + int arguments = currentInstruction[1].u.operand; + int activation = currentInstruction[2].u.operand; - Jump argsNotCreated = branchTestPtr(Zero, Address(callFrameRegister, sizeof(Register) * (unmodifiedArgumentsRegister(dst)))); + Jump argsNotCreated = branchTestPtr(Zero, Address(callFrameRegister, sizeof(Register) * (unmodifiedArgumentsRegister(arguments)))); JITStubCall stubCall(this, cti_op_tear_off_arguments); - stubCall.addArgument(unmodifiedArgumentsRegister(dst), regT2); + stubCall.addArgument(unmodifiedArgumentsRegister(arguments), regT2); + stubCall.addArgument(activation, regT2); stubCall.call(); argsNotCreated.link(this); } diff --git a/Source/JavaScriptCore/jit/JITOpcodes32_64.cpp b/Source/JavaScriptCore/jit/JITOpcodes32_64.cpp index c94f5d910..adfb57341 100644 --- a/Source/JavaScriptCore/jit/JITOpcodes32_64.cpp +++ b/Source/JavaScriptCore/jit/JITOpcodes32_64.cpp @@ -703,24 +703,22 @@ void JIT::emit_op_is_string(Instruction* currentInstruction) void JIT::emit_op_tear_off_activation(Instruction* currentInstruction) { unsigned activation = currentInstruction[1].u.operand; - unsigned arguments = currentInstruction[2].u.operand; - Jump activationCreated = branch32(NotEqual, tagFor(activation), TrustedImm32(JSValue::EmptyValueTag)); - Jump argumentsNotCreated = branch32(Equal, tagFor(arguments), TrustedImm32(JSValue::EmptyValueTag)); - activationCreated.link(this); + Jump activationNotCreated = branch32(Equal, tagFor(activation), TrustedImm32(JSValue::EmptyValueTag)); JITStubCall stubCall(this, cti_op_tear_off_activation); - stubCall.addArgument(currentInstruction[1].u.operand); - stubCall.addArgument(unmodifiedArgumentsRegister(currentInstruction[2].u.operand)); + stubCall.addArgument(activation); stubCall.call(); - argumentsNotCreated.link(this); + activationNotCreated.link(this); } void JIT::emit_op_tear_off_arguments(Instruction* currentInstruction) { - int dst = currentInstruction[1].u.operand; + int arguments = currentInstruction[1].u.operand; + int activation = currentInstruction[2].u.operand; - Jump argsNotCreated = branch32(Equal, tagFor(unmodifiedArgumentsRegister(dst)), TrustedImm32(JSValue::EmptyValueTag)); + Jump argsNotCreated = branch32(Equal, tagFor(unmodifiedArgumentsRegister(arguments)), TrustedImm32(JSValue::EmptyValueTag)); JITStubCall stubCall(this, cti_op_tear_off_arguments); - stubCall.addArgument(unmodifiedArgumentsRegister(dst)); + stubCall.addArgument(unmodifiedArgumentsRegister(arguments)); + stubCall.addArgument(activation); stubCall.call(); argsNotCreated.link(this); } diff --git a/Source/JavaScriptCore/jit/JITStubs.cpp b/Source/JavaScriptCore/jit/JITStubs.cpp index 8fc395a63..5fad9c8d7 100644 --- a/Source/JavaScriptCore/jit/JITStubs.cpp +++ b/Source/JavaScriptCore/jit/JITStubs.cpp @@ -2311,20 +2311,8 @@ DEFINE_STUB_FUNCTION(void, op_tear_off_activation) { STUB_INIT_STACK_FRAME(stackFrame); - CallFrame* callFrame = stackFrame.callFrame; - ASSERT(callFrame->codeBlock()->needsFullScopeChain()); - JSValue activationValue = stackFrame.args[0].jsValue(); - if (!activationValue) { - if (JSValue v = stackFrame.args[1].jsValue()) { - if (!callFrame->codeBlock()->isStrictMode()) - asArguments(v)->tearOff(callFrame); - } - return; - } - JSActivation* activation = asActivation(stackFrame.args[0].jsValue()); - activation->tearOff(*stackFrame.globalData); - if (JSValue v = stackFrame.args[1].jsValue()) - asArguments(v)->didTearOffActivation(*stackFrame.globalData, activation); + ASSERT(stackFrame.callFrame->codeBlock()->needsFullScopeChain()); + jsCast<JSActivation*>(stackFrame.args[0].jsValue())->tearOff(*stackFrame.globalData); } DEFINE_STUB_FUNCTION(void, op_tear_off_arguments) @@ -2332,8 +2320,13 @@ DEFINE_STUB_FUNCTION(void, op_tear_off_arguments) STUB_INIT_STACK_FRAME(stackFrame); CallFrame* callFrame = stackFrame.callFrame; - ASSERT(callFrame->codeBlock()->usesArguments() && !callFrame->codeBlock()->needsFullScopeChain()); - asArguments(stackFrame.args[0].jsValue())->tearOff(callFrame); + ASSERT(callFrame->codeBlock()->usesArguments()); + Arguments* arguments = jsCast<Arguments*>(stackFrame.args[0].jsValue()); + if (JSValue activationValue = stackFrame.args[1].jsValue()) { + arguments->didTearOffActivation(callFrame->globalData(), jsCast<JSActivation*>(activationValue)); + return; + } + arguments->tearOff(callFrame); } DEFINE_STUB_FUNCTION(void, op_profile_will_call) diff --git a/Source/JavaScriptCore/jit/JITWriteBarrier.h b/Source/JavaScriptCore/jit/JITWriteBarrier.h index 81a3653a0..ee73b702f 100644 --- a/Source/JavaScriptCore/jit/JITWriteBarrier.h +++ b/Source/JavaScriptCore/jit/JITWriteBarrier.h @@ -29,7 +29,7 @@ #if ENABLE(JIT) #include "MacroAssembler.h" -#include "MarkStack.h" +#include "SlotVisitor.h" #include "WriteBarrier.h" namespace JSC { @@ -135,7 +135,7 @@ public: } }; -template<typename T> inline void MarkStack::append(JITWriteBarrier<T>* slot) +template<typename T> inline void SlotVisitor::append(JITWriteBarrier<T>* slot) { internalAppend(slot->get()); } diff --git a/Source/JavaScriptCore/jit/JumpReplacementWatchpoint.cpp b/Source/JavaScriptCore/jit/JumpReplacementWatchpoint.cpp index 00311dab4..26eae57be 100644 --- a/Source/JavaScriptCore/jit/JumpReplacementWatchpoint.cpp +++ b/Source/JavaScriptCore/jit/JumpReplacementWatchpoint.cpp @@ -29,6 +29,7 @@ #if ENABLE(JIT) #include "LinkBuffer.h" +#include "Options.h" namespace JSC { diff --git a/Source/JavaScriptCore/llint/LLIntSlowPaths.cpp b/Source/JavaScriptCore/llint/LLIntSlowPaths.cpp index 1a34a09d4..fa50fedb6 100644 --- a/Source/JavaScriptCore/llint/LLIntSlowPaths.cpp +++ b/Source/JavaScriptCore/llint/LLIntSlowPaths.cpp @@ -1454,26 +1454,19 @@ LLINT_SLOW_PATH_DECL(slow_path_tear_off_activation) { LLINT_BEGIN(); ASSERT(exec->codeBlock()->needsFullScopeChain()); - JSValue activationValue = LLINT_OP(1).jsValue(); - if (!activationValue) { - if (JSValue v = exec->uncheckedR(unmodifiedArgumentsRegister(pc[2].u.operand)).jsValue()) { - if (!exec->codeBlock()->isStrictMode()) - asArguments(v)->tearOff(exec); - } - LLINT_END(); - } - JSActivation* activation = asActivation(activationValue); - activation->tearOff(globalData); - if (JSValue v = exec->uncheckedR(unmodifiedArgumentsRegister(pc[2].u.operand)).jsValue()) - asArguments(v)->didTearOffActivation(globalData, activation); + jsCast<JSActivation*>(LLINT_OP(1).jsValue())->tearOff(globalData); LLINT_END(); } LLINT_SLOW_PATH_DECL(slow_path_tear_off_arguments) { LLINT_BEGIN(); - ASSERT(exec->codeBlock()->usesArguments() && !exec->codeBlock()->needsFullScopeChain()); - asArguments(exec->uncheckedR(unmodifiedArgumentsRegister(pc[1].u.operand)).jsValue())->tearOff(exec); + ASSERT(exec->codeBlock()->usesArguments()); + Arguments* arguments = jsCast<Arguments*>(exec->uncheckedR(unmodifiedArgumentsRegister(pc[1].u.operand)).jsValue()); + if (JSValue activationValue = LLINT_OP_C(2).jsValue()) + arguments->didTearOffActivation(globalData, jsCast<JSActivation*>(activationValue)); + else + arguments->tearOff(exec); LLINT_END(); } diff --git a/Source/JavaScriptCore/llint/LowLevelInterpreter.asm b/Source/JavaScriptCore/llint/LowLevelInterpreter.asm index db4b71dfd..4c590a1c3 100644 --- a/Source/JavaScriptCore/llint/LowLevelInterpreter.asm +++ b/Source/JavaScriptCore/llint/LowLevelInterpreter.asm @@ -86,9 +86,9 @@ const HashFlags8BitBuffer = 64 # Property storage constants if JSVALUE64 - const InlineStorageCapacity = 5 -else const InlineStorageCapacity = 6 +else + const InlineStorageCapacity = 7 end # Allocation constants @@ -310,7 +310,7 @@ macro functionInitialization(profileArgSkip) .stackHeightOK: end -macro allocateBasicJSObject(sizeClassIndex, classInfoOffset, structure, result, scratch1, scratch2, slowCase) +macro allocateBasicJSObject(sizeClassIndex, structure, result, scratch1, scratch2, slowCase) if ALWAYS_ALLOCATE_SLOW jmp slowCase else @@ -338,8 +338,6 @@ macro allocateBasicJSObject(sizeClassIndex, classInfoOffset, structure, result, storep scratch2, offsetOfMySizeClass + offsetOfFirstFreeCell[scratch1] # Initialize the object. - loadp classInfoOffset[scratch1], scratch2 - storep scratch2, [result] storep structure, JSCell::m_structure[result] storep 0, JSObject::m_outOfLineStorage[result] end diff --git a/Source/JavaScriptCore/llint/LowLevelInterpreter32_64.asm b/Source/JavaScriptCore/llint/LowLevelInterpreter32_64.asm index b011c5425..103a3f978 100644 --- a/Source/JavaScriptCore/llint/LowLevelInterpreter32_64.asm +++ b/Source/JavaScriptCore/llint/LowLevelInterpreter32_64.asm @@ -352,7 +352,7 @@ _llint_op_create_this: loadp Callee[cfr], t0 loadp JSFunction::m_cachedInheritorID[t0], t2 btpz t2, .opCreateThisSlow - allocateBasicJSObject(JSFinalObjectSizeClassIndex, JSGlobalData::jsFinalObjectClassInfo, t2, t0, t1, t3, .opCreateThisSlow) + allocateBasicJSObject(JSFinalObjectSizeClassIndex, t2, t0, t1, t3, .opCreateThisSlow) loadi 4[PC], t1 storei CellTag, TagOffset[cfr, t1, 8] storei t0, PayloadOffset[cfr, t1, 8] @@ -384,7 +384,7 @@ _llint_op_new_object: loadp CodeBlock[cfr], t0 loadp CodeBlock::m_globalObject[t0], t0 loadp JSGlobalObject::m_emptyObjectStructure[t0], t1 - allocateBasicJSObject(JSFinalObjectSizeClassIndex, JSGlobalData::jsFinalObjectClassInfo, t1, t0, t2, t3, .opNewObjectSlow) + allocateBasicJSObject(JSFinalObjectSizeClassIndex, t1, t0, t2, t3, .opNewObjectSlow) loadi 4[PC], t1 storei CellTag, TagOffset[cfr, t1, 8] storei t0, PayloadOffset[cfr, t1, 8] @@ -1639,13 +1639,10 @@ end _llint_op_tear_off_activation: traceExecution() loadi 4[PC], t0 - loadi 8[PC], t1 - bineq TagOffset[cfr, t0, 8], EmptyValueTag, .opTearOffActivationCreated - bieq TagOffset[cfr, t1, 8], EmptyValueTag, .opTearOffActivationNotCreated -.opTearOffActivationCreated: + bieq TagOffset[cfr, t0, 8], EmptyValueTag, .opTearOffActivationNotCreated callSlowPath(_llint_slow_path_tear_off_activation) .opTearOffActivationNotCreated: - dispatch(3) + dispatch(2) _llint_op_tear_off_arguments: @@ -1655,7 +1652,7 @@ _llint_op_tear_off_arguments: bieq TagOffset[cfr, t0, 8], EmptyValueTag, .opTearOffArgumentsNotCreated callSlowPath(_llint_slow_path_tear_off_arguments) .opTearOffArgumentsNotCreated: - dispatch(2) + dispatch(3) _llint_op_ret: diff --git a/Source/JavaScriptCore/llint/LowLevelInterpreter64.asm b/Source/JavaScriptCore/llint/LowLevelInterpreter64.asm index d429542e7..4bb7b8e1c 100644 --- a/Source/JavaScriptCore/llint/LowLevelInterpreter64.asm +++ b/Source/JavaScriptCore/llint/LowLevelInterpreter64.asm @@ -236,7 +236,7 @@ _llint_op_create_this: loadp Callee[cfr], t0 loadp JSFunction::m_cachedInheritorID[t0], t2 btpz t2, .opCreateThisSlow - allocateBasicJSObject(JSFinalObjectSizeClassIndex, JSGlobalData::jsFinalObjectClassInfo, t2, t0, t1, t3, .opCreateThisSlow) + allocateBasicJSObject(JSFinalObjectSizeClassIndex, t2, t0, t1, t3, .opCreateThisSlow) loadis 8[PB, PC, 8], t1 storep t0, [cfr, t1, 8] dispatch(2) @@ -267,7 +267,7 @@ _llint_op_new_object: loadp CodeBlock[cfr], t0 loadp CodeBlock::m_globalObject[t0], t0 loadp JSGlobalObject::m_emptyObjectStructure[t0], t1 - allocateBasicJSObject(JSFinalObjectSizeClassIndex, JSGlobalData::jsFinalObjectClassInfo, t1, t0, t2, t3, .opNewObjectSlow) + allocateBasicJSObject(JSFinalObjectSizeClassIndex, t1, t0, t2, t3, .opNewObjectSlow) loadis 8[PB, PC, 8], t1 storep t0, [cfr, t1, 8] dispatch(2) @@ -1483,13 +1483,10 @@ end _llint_op_tear_off_activation: traceExecution() loadis 8[PB, PC, 8], t0 - loadis 16[PB, PC, 8], t1 - btpnz [cfr, t0, 8], .opTearOffActivationCreated - btpz [cfr, t1, 8], .opTearOffActivationNotCreated -.opTearOffActivationCreated: + btpz [cfr, t0, 8], .opTearOffActivationNotCreated callSlowPath(_llint_slow_path_tear_off_activation) .opTearOffActivationNotCreated: - dispatch(3) + dispatch(2) _llint_op_tear_off_arguments: @@ -1499,7 +1496,7 @@ _llint_op_tear_off_arguments: btpz [cfr, t0, 8], .opTearOffArgumentsNotCreated callSlowPath(_llint_slow_path_tear_off_arguments) .opTearOffArgumentsNotCreated: - dispatch(2) + dispatch(3) _llint_op_ret: diff --git a/Source/JavaScriptCore/offlineasm/armv7.rb b/Source/JavaScriptCore/offlineasm/armv7.rb index 6595fdc71..eb9472af3 100644 --- a/Source/JavaScriptCore/offlineasm/armv7.rb +++ b/Source/JavaScriptCore/offlineasm/armv7.rb @@ -527,8 +527,8 @@ def armV7LowerMisplacedAddresses(list) node.opcode, armV7AsRegisters(newList, postInstructions, node.operands, "i"), annotation) - when "bbeq", "bbneq", "bba", "bbaeq", "bbb", "bbbeq", "btbo", "btbz", "btbnz", "tbz", "tbnz", - "tbo", "cbeq", "cbneq", "cba", "cbaeq", "cbb", "cbbeq" + when "bbeq", "bbneq", "bba", "bbaeq", "bbb", "bbbeq", "btbz", "btbnz", "tbz", "tbnz", + "cbeq", "cbneq", "cba", "cbaeq", "cbb", "cbbeq" newList << Instruction.new(node.codeOrigin, node.opcode, armV7AsRegisters(newList, postInstructions, node.operands, "b"), @@ -579,8 +579,8 @@ def armV7LowerRegisterReuse(list) case node.opcode when "cieq", "cineq", "cia", "ciaeq", "cib", "cibeq", "cigt", "cigteq", "cilt", "cilteq", "cpeq", "cpneq", "cpa", "cpaeq", "cpb", "cpbeq", "cpgt", "cpgteq", "cplt", "cplteq", - "tio", "tis", "tiz", "tinz", "tbo", "tbs", "tbz", "tbnz", "tpo", "tps", "tpz", "tpnz", - "cbeq", "cbneq", "cba", "cbaeq", "cbb", "cbbeq", "cbgt", "cbgteq", "cblt", "cblteq" + "tis", "tiz", "tinz", "tbs", "tbz", "tbnz", "tps", "tpz", "tpnz", "cbeq", "cbneq", + "cba", "cbaeq", "cbb", "cbbeq", "cbgt", "cbgteq", "cblt", "cblteq" if node.operands.size == 2 if node.operands[0] == node.operands[1] tmp = Tmp.new(node.codeOrigin, :gpr) @@ -940,9 +940,6 @@ class Instruction when "btinz", "btpnz", "btbnz" emitArmV7Test(operands) $asm.puts "bne #{operands[-1].asmLabel}" - when "btio", "btpo", "btbo" - emitArmV7Test(operands) - $asm.puts "bvs #{operands[-1].asmLabel}" when "btis", "btps", "btbs" emitArmV7Test(operands) $asm.puts "bmi #{operands[-1].asmLabel}" @@ -982,8 +979,6 @@ class Instruction emitArmV7Compare(operands, "lt") when "cilteq", "cplteq", "cblteq" emitArmV7Compare(operands, "le") - when "tio", "tbo", "tpo" - emitArmV7TestSet(operands, "vs") when "tis", "tbs", "tps" emitArmV7TestSet(operands, "mi") when "tiz", "tbz", "tpz" diff --git a/Source/JavaScriptCore/offlineasm/instructions.rb b/Source/JavaScriptCore/offlineasm/instructions.rb index 211c10933..d046bee6f 100644 --- a/Source/JavaScriptCore/offlineasm/instructions.rb +++ b/Source/JavaScriptCore/offlineasm/instructions.rb @@ -107,11 +107,9 @@ MACRO_INSTRUCTIONS = "bbgteq", "bblt", "bblteq", - "btio", "btis", "btiz", "btinz", - "btbo", "btbs", "btbz", "btbnz", @@ -155,15 +153,12 @@ MACRO_INSTRUCTIONS = "cigteq", "cilt", "cilteq", - "tio", "tis", "tiz", "tinz", - "tbo", "tbs", "tbz", "tbnz", - "tpo", "tps", "tpz", "tpnz", @@ -197,7 +192,6 @@ MACRO_INSTRUCTIONS = "cplt", "cplteq", "storep", - "btpo", "btps", "btpz", "btpnz", diff --git a/Source/JavaScriptCore/offlineasm/x86.rb b/Source/JavaScriptCore/offlineasm/x86.rb index cebc83326..033c200d7 100644 --- a/Source/JavaScriptCore/offlineasm/x86.rb +++ b/Source/JavaScriptCore/offlineasm/x86.rb @@ -835,10 +835,6 @@ class Instruction handleX86IntBranch("jl", :byte) when "bblteq" handleX86IntBranch("jlteq", :byte) - when "btio" - handleX86BranchTest("jo", :int) - when "btpo" - handleX86BranchTest("jo", :ptr) when "btis" handleX86BranchTest("js", :int) when "btps" @@ -851,8 +847,6 @@ class Instruction handleX86BranchTest("jnz", :int) when "btpnz" handleX86BranchTest("jnz", :ptr) - when "btbo" - handleX86BranchTest("jo", :byte) when "btbs" handleX86BranchTest("js", :byte) when "btbz" @@ -967,24 +961,18 @@ class Instruction handleX86IntCompareSet("setle", :byte) when "cplteq" handleX86IntCompareSet("setle", :ptr) - when "tio" - handleX86SetTest("seto", :int) when "tis" handleX86SetTest("sets", :int) when "tiz" handleX86SetTest("setz", :int) when "tinz" handleX86SetTest("setnz", :int) - when "tpo" - handleX86SetTest("seto", :ptr) when "tps" handleX86SetTest("sets", :ptr) when "tpz" handleX86SetTest("setz", :ptr) when "tpnz" handleX86SetTest("setnz", :ptr) - when "tbo" - handleX86SetTest("seto", :byte) when "tbs" handleX86SetTest("sets", :byte) when "tbz" diff --git a/Source/JavaScriptCore/runtime/JSCell.h b/Source/JavaScriptCore/runtime/JSCell.h index ef06b1ecf..a63e08e33 100644 --- a/Source/JavaScriptCore/runtime/JSCell.h +++ b/Source/JavaScriptCore/runtime/JSCell.h @@ -30,6 +30,7 @@ #include "JSLock.h" #include "JSValueInlineMethods.h" #include "SlotVisitor.h" +#include "SlotVisitorInlineMethods.h" #include "WriteBarrier.h" #include <wtf/Noncopyable.h> @@ -108,7 +109,6 @@ namespace JSC { // Object operations, with the toObject operation included. const ClassInfo* classInfo() const; - const ClassInfo* validatedClassInfo() const; const MethodTable* methodTable() const; static void put(JSCell*, ExecState*, PropertyName, JSValue, PutPropertySlot&); static void putByIndex(JSCell*, ExecState*, unsigned propertyName, JSValue, bool shouldThrow); @@ -133,11 +133,6 @@ namespace JSC { return OBJECT_OFFSETOF(JSCell, m_structure); } - static ptrdiff_t classInfoOffset() - { - return OBJECT_OFFSETOF(JSCell, m_classInfo); - } - void* structureAddress() { return &m_structure; @@ -170,7 +165,6 @@ namespace JSC { private: friend class LLIntOffsetsExtractor; - const ClassInfo* m_classInfo; WriteBarrier<Structure> m_structure; }; diff --git a/Source/JavaScriptCore/runtime/JSObject.h b/Source/JavaScriptCore/runtime/JSObject.h index f4b847b4c..5e2c12f2f 100644 --- a/Source/JavaScriptCore/runtime/JSObject.h +++ b/Source/JavaScriptCore/runtime/JSObject.h @@ -346,9 +346,6 @@ namespace JSC { Structure* createInheritorID(JSGlobalData&); StorageBarrier m_outOfLineStorage; -#if USE(JSVALUE32_64) - void* m_padding; -#endif }; diff --git a/Source/JavaScriptCore/runtime/PropertyOffset.h b/Source/JavaScriptCore/runtime/PropertyOffset.h index 511c5e334..aa82eb468 100644 --- a/Source/JavaScriptCore/runtime/PropertyOffset.h +++ b/Source/JavaScriptCore/runtime/PropertyOffset.h @@ -34,9 +34,9 @@ namespace JSC { #if USE(JSVALUE32_64) -#define INLINE_STORAGE_CAPACITY 6 +#define INLINE_STORAGE_CAPACITY 7 #else -#define INLINE_STORAGE_CAPACITY 5 +#define INLINE_STORAGE_CAPACITY 6 #endif typedef int PropertyOffset; diff --git a/Source/JavaScriptCore/runtime/Structure.h b/Source/JavaScriptCore/runtime/Structure.h index 57368bee8..73ec0789e 100644 --- a/Source/JavaScriptCore/runtime/Structure.h +++ b/Source/JavaScriptCore/runtime/Structure.h @@ -460,7 +460,7 @@ namespace JSC { ASSERT(!heap.globalData()->isInitializingObject()); heap.globalData()->setInitializingObjectClass(&Structure::s_info); #endif - JSCell* result = static_cast<JSCell*>(heap.allocateStructure()); + JSCell* result = static_cast<JSCell*>(heap.allocateStructure(sizeof(Structure))); result->clearStructure(); return result; } @@ -554,17 +554,7 @@ namespace JSC { m_structure.set(globalData, this, structure); } - inline const ClassInfo* JSCell::validatedClassInfo() const - { -#if ENABLE(GC_VALIDATION) - ASSERT(m_structure.unvalidatedGet()->classInfo() == m_classInfo); -#else - ASSERT(m_structure->classInfo() == m_classInfo); -#endif - return m_classInfo; - } - - ALWAYS_INLINE void MarkStack::internalAppend(JSCell* cell) + ALWAYS_INLINE void SlotVisitor::internalAppend(JSCell* cell) { ASSERT(!m_isCheckingForDefaultMarkViolation); if (!cell) @@ -603,8 +593,7 @@ namespace JSC { } inline JSCell::JSCell(JSGlobalData& globalData, Structure* structure) - : m_classInfo(structure->classInfo()) - , m_structure(globalData, this, structure) + : m_structure(globalData, this, structure) { } @@ -616,7 +605,6 @@ namespace JSC { if (structure) #endif m_structure.setEarlyValue(globalData, this, structure); - m_classInfo = structure->classInfo(); // Very first set of allocations won't have a real structure. ASSERT(m_structure || !globalData.structureStructure); } diff --git a/Source/JavaScriptCore/runtime/WriteBarrier.h b/Source/JavaScriptCore/runtime/WriteBarrier.h index 9784a921e..ef8c3aff8 100644 --- a/Source/JavaScriptCore/runtime/WriteBarrier.h +++ b/Source/JavaScriptCore/runtime/WriteBarrier.h @@ -225,14 +225,14 @@ template <typename U, typename V> inline bool operator==(const WriteBarrierBase< return lhs.get() == rhs.get(); } -// MarkStack functions +// SlotVisitor functions -template<typename T> inline void MarkStack::append(WriteBarrierBase<T>* slot) +template<typename T> inline void SlotVisitor::append(WriteBarrierBase<T>* slot) { internalAppend(*slot->slot()); } -ALWAYS_INLINE void MarkStack::appendValues(WriteBarrierBase<Unknown>* barriers, size_t count) +ALWAYS_INLINE void SlotVisitor::appendValues(WriteBarrierBase<Unknown>* barriers, size_t count) { append(barriers->slot(), count); } diff --git a/Source/JavaScriptCore/shell/PlatformEfl.cmake b/Source/JavaScriptCore/shell/PlatformEfl.cmake index 53755727b..22fe20a8e 100644 --- a/Source/JavaScriptCore/shell/PlatformEfl.cmake +++ b/Source/JavaScriptCore/shell/PlatformEfl.cmake @@ -3,7 +3,3 @@ LIST(APPEND JSC_LIBRARIES ${ECORE_LIBRARIES} ${CMAKE_DL_LIBS} ) - -LIST(APPEND JSC_LINK_FLAGS - ${ECORE_LDFLAGS} -) diff --git a/Source/JavaScriptCore/tests/mozilla/js1_4/Functions/function-001.js b/Source/JavaScriptCore/tests/mozilla/js1_4/Functions/function-001.js index c1e6a3df1..fd2ceb0c3 100644 --- a/Source/JavaScriptCore/tests/mozilla/js1_4/Functions/function-001.js +++ b/Source/JavaScriptCore/tests/mozilla/js1_4/Functions/function-001.js @@ -82,7 +82,7 @@ SECTION, "return function.arguments when function contains an arguments property", "PASS", - TestFunction_4( "F", "A", "I", "L" ) +""); + TestFunction_4( "P", "A", "S", "S" ) +""); test(); @@ -100,7 +100,7 @@ } function TestFunction_4( a, b, c, d, e ) { - var arguments = "PASS"; - return TestFunction_4.arguments; + var arguments = "FAIL"; + return Array.prototype.join.call(TestFunction_4.arguments, ""); } diff --git a/Source/JavaScriptCore/yarr/YarrJIT.cpp b/Source/JavaScriptCore/yarr/YarrJIT.cpp index ee6e39753..ce84e2c74 100644 --- a/Source/JavaScriptCore/yarr/YarrJIT.cpp +++ b/Source/JavaScriptCore/yarr/YarrJIT.cpp @@ -28,6 +28,7 @@ #include <wtf/ASCIICType.h> #include "LinkBuffer.h" +#include "Options.h" #include "Yarr.h" #include "YarrCanonicalizeUCS2.h" |