diff options
author | Simon Hausmann <simon.hausmann@digia.com> | 2012-11-07 11:22:47 +0100 |
---|---|---|
committer | Simon Hausmann <simon.hausmann@digia.com> | 2012-11-07 11:22:47 +0100 |
commit | cfd86b747d32ac22246a1aa908eaa720c63a88c1 (patch) | |
tree | 24d68c6f61c464ecba1e05670b80390ea3b0e50c /Source/JavaScriptCore | |
parent | 69d7c744c9de19d152dbe2d8e46eb7dfd4511d1a (diff) | |
download | qtwebkit-cfd86b747d32ac22246a1aa908eaa720c63a88c1.tar.gz |
Imported WebKit commit 20271caf2e2c016d5cef40184cddeefeac4f1876 (http://svn.webkit.org/repository/webkit/trunk@133733)
New snapshot that contains all previous fixes as well as build fix for latest QtMultimedia API changes.
Diffstat (limited to 'Source/JavaScriptCore')
147 files changed, 7895 insertions, 4838 deletions
diff --git a/Source/JavaScriptCore/API/JSStringRef.cpp b/Source/JavaScriptCore/API/JSStringRef.cpp index da1a3057a..7f2168dc2 100644 --- a/Source/JavaScriptCore/API/JSStringRef.cpp +++ b/Source/JavaScriptCore/API/JSStringRef.cpp @@ -47,9 +47,10 @@ JSStringRef JSStringCreateWithUTF8CString(const char* string) Vector<UChar, 1024> buffer(length); UChar* p = buffer.data(); bool sourceIsAllASCII; + const LChar* stringStart = reinterpret_cast<const LChar*>(string); if (conversionOK == convertUTF8ToUTF16(&string, string + length, &p, p + length, &sourceIsAllASCII)) { if (sourceIsAllASCII) - return OpaqueJSString::create(reinterpret_cast<const LChar*>(string), length).leakRef(); + return OpaqueJSString::create(stringStart, length).leakRef(); return OpaqueJSString::create(buffer.data(), p - buffer.data()).leakRef(); } } diff --git a/Source/JavaScriptCore/API/JSStringRefCF.cpp b/Source/JavaScriptCore/API/JSStringRefCF.cpp index 69cf3f8c4..fd72a593c 100644 --- a/Source/JavaScriptCore/API/JSStringRefCF.cpp +++ b/Source/JavaScriptCore/API/JSStringRefCF.cpp @@ -41,6 +41,12 @@ JSStringRef JSStringCreateWithCFString(CFStringRef string) // it can hold. (<rdar://problem/6806478>) size_t length = CFStringGetLength(string); if (length) { + Vector<LChar, 1024> lcharBuffer(length); + CFIndex usedBufferLength; + CFIndex convertedSize = CFStringGetBytes(string, CFRangeMake(0, length), kCFStringEncodingISOLatin1, 0, false, lcharBuffer.data(), length, &usedBufferLength); + if (static_cast<size_t>(convertedSize) == length && static_cast<size_t>(usedBufferLength) == length) + return OpaqueJSString::create(lcharBuffer.data(), length).leakRef(); + OwnArrayPtr<UniChar> buffer = adoptArrayPtr(new UniChar[length]); CFStringGetCharacters(string, CFRangeMake(0, length), buffer.get()); COMPILE_ASSERT(sizeof(UniChar) == sizeof(UChar), unichar_and_uchar_must_be_same_size); diff --git a/Source/JavaScriptCore/API/JSValueRef.cpp b/Source/JavaScriptCore/API/JSValueRef.cpp index de84508c1..5ff7c03c6 100644 --- a/Source/JavaScriptCore/API/JSValueRef.cpp +++ b/Source/JavaScriptCore/API/JSValueRef.cpp @@ -217,7 +217,7 @@ JSValueRef JSValueMakeNumber(JSContextRef ctx, double value) // generated internally to JavaScriptCore naturally have that representation, // but an external NaN might not. if (isnan(value)) - value = std::numeric_limits<double>::quiet_NaN(); + value = QNaN; return toRef(exec, jsNumber(value)); } @@ -282,7 +282,7 @@ double JSValueToNumber(JSContextRef ctx, JSValueRef value, JSValueRef* exception if (exception) *exception = toRef(exec, exec->exception()); exec->clearException(); - number = std::numeric_limits<double>::quiet_NaN(); + number = QNaN; } return number; } diff --git a/Source/JavaScriptCore/API/tests/minidom.c b/Source/JavaScriptCore/API/tests/minidom.c index bd3e119e5..43ae2c1a8 100644 --- a/Source/JavaScriptCore/API/tests/minidom.c +++ b/Source/JavaScriptCore/API/tests/minidom.c @@ -30,7 +30,6 @@ #include "JSStringRef.h" #include <stdio.h> #include <stdlib.h> -#include <wtf/Platform.h> #include <wtf/Assertions.h> #include <wtf/UnusedParam.h> diff --git a/Source/JavaScriptCore/API/tests/testapi.c b/Source/JavaScriptCore/API/tests/testapi.c index b52a2b440..c2400f7ec 100644 --- a/Source/JavaScriptCore/API/tests/testapi.c +++ b/Source/JavaScriptCore/API/tests/testapi.c @@ -29,7 +29,6 @@ #include "JSObjectRefPrivate.h" #include <math.h> #define ASSERT_DISABLED 0 -#include <wtf/Platform.h> #include <wtf/Assertions.h> #include <wtf/UnusedParam.h> diff --git a/Source/JavaScriptCore/CMakeLists.txt b/Source/JavaScriptCore/CMakeLists.txt index c706f65e9..393db67c3 100644 --- a/Source/JavaScriptCore/CMakeLists.txt +++ b/Source/JavaScriptCore/CMakeLists.txt @@ -49,8 +49,6 @@ SET(JavaScriptCore_SOURCES bytecode/GetByIdStatus.cpp bytecode/JumpTable.cpp bytecode/LazyOperandValueProfile.cpp - bytecode/MethodCallLinkInfo.cpp - bytecode/MethodCallLinkStatus.cpp bytecode/MethodOfGettingAValueProfile.cpp bytecode/Opcode.cpp bytecode/PolymorphicPutByIdList.cpp @@ -61,6 +59,7 @@ SET(JavaScriptCore_SOURCES bytecode/SpecialPointer.cpp bytecode/StructureStubClearingWatchpoint.cpp bytecode/StructureStubInfo.cpp + bytecode/UnlinkedCodeBlock.cpp bytecode/Watchpoint.cpp bytecompiler/BytecodeGenerator.cpp @@ -176,6 +175,7 @@ SET(JavaScriptCore_SOURCES runtime/BooleanObject.cpp runtime/BooleanPrototype.cpp runtime/CallData.cpp + runtime/CodeCache.cpp runtime/CommonIdentifiers.cpp runtime/Completion.cpp runtime/ConstructData.cpp diff --git a/Source/JavaScriptCore/ChangeLog b/Source/JavaScriptCore/ChangeLog index b2c4299ce..8479ac599 100644 --- a/Source/JavaScriptCore/ChangeLog +++ b/Source/JavaScriptCore/ChangeLog @@ -1,3 +1,570 @@ +2012-11-06 Oliver Hunt <oliver@apple.com> + + Reduce parser overhead in JSC + https://bugs.webkit.org/show_bug.cgi?id=101127 + + Reviewed by Filip Pizlo. + + An exciting journey into the world of architecture in which our hero + adds yet another layer to JSC codegeneration. + + This patch adds a marginally more compact form of bytecode that is + free from any data specific to a given execution context, and that + does store any data structures necessary for execution. To actually + execute this UnlinkedBytecode we still need to instantiate a real + CodeBlock, but this is a much faster linear time operation than any + of the earlier parsing or code generation passes. + + As the unlinked code is context free we can then simply use a cache + from source to unlinked code mapping to completely avoid all of the + old parser overhead. The cache is currently very simple and memory + heavy, using the complete source text as a key (rather than SourceCode + or equivalent), and a random eviction policy. + + This seems to produce a substantial win when loading identical content + in different contexts. + + * API/tests/testapi.c: + (main): + * CMakeLists.txt: + * GNUmakefile.list.am: + * JavaScriptCore.vcproj/JavaScriptCore/JavaScriptCore.vcproj: + * JavaScriptCore.xcodeproj/project.pbxproj: + * bytecode/CodeBlock.cpp: + * bytecode/CodeBlock.h: + Moved a number of fields, and a bunch of logic to UnlinkedCodeBlock.h/cpp + * bytecode/Opcode.h: + Added a global const init no op instruction needed to get correct + behaviour without any associated semantics. + * bytecode/UnlinkedCodeBlock.cpp: Added. + * bytecode/UnlinkedCodeBlock.h: Added. + A fairly shallow, GC allocated version of the old CodeBlock + classes with a 32bit instruction size, and just metadata + size tracking. + * bytecompiler/BytecodeGenerator.cpp: + * bytecompiler/BytecodeGenerator.h: + Replace direct access to m_symbolTable with access through + symbolTable(). ProgramCode no longer has a symbol table at + all so some previously unconditional (and pointless) uses + of symbolTable get null checks. + A few other changes to deal with type changes due to us generating + unlinked code (eg. pointer free, so profile indices rather than + pointers). + * dfg/DFGByteCodeParser.cpp: + * dfg/DFGCapabilities.h: + Support global_init_nop + * interpreter/Interpreter.cpp: + Now get the ProgramExecutable to initialise new global properties + before starting execution. + * jit/JIT.cpp: + * jit/JITDriver.h: + * jit/JITStubs.cpp: + * llint/LLIntData.cpp: + * llint/LLIntSlowPaths.cpp: + * llint/LowLevelInterpreter.asm: + * llint/LowLevelInterpreter32_64.asm: + * llint/LowLevelInterpreter64.asm: + Adding init_global_const_nop everywhere else + * parser/Parser.h: + * parser/ParserModes.h: Added. + * parser/ParserTokens.h: + Parser no longer needs a global object or callframe to function + * runtime/CodeCache.cpp: Added. + * runtime/CodeCache.h: Added. + A simple, random eviction, Source->UnlinkedCode cache + * runtime/Executable.cpp: + * runtime/Executable.h: + Executables now reference their unlinked counterparts, and + request code specifically for the target global object. + * runtime/JSGlobalData.cpp: + * runtime/JSGlobalData.h: + GlobalData now owns a CodeCache and a set of new structures + for the unlinked code types. + * runtime/JSGlobalObject.cpp: + * runtime/JSGlobalObject.h: + Utility functions used by executables to perform compilation + + * runtime/JSType.h: + Add new JSTypes for unlinked code + +2012-11-06 Michael Saboff <msaboff@apple.com> + + JSStringCreateWithCFString() Should create an 8 bit String if possible + https://bugs.webkit.org/show_bug.cgi?id=101104 + + Reviewed by Darin Adler. + + Try converting the CFString to an 8 bit string using CFStringGetBytes(..., + kCFStringEncodingISOLatin1, ...) and return the 8 bit string if successful. + If not proceed with 16 bit conversion. + + * API/JSStringRefCF.cpp: + (JSStringCreateWithCFString): + +2012-11-06 Oliver Hunt <oliver@apple.com> + + Reduce direct m_symbolTable usage in CodeBlock + https://bugs.webkit.org/show_bug.cgi?id=101391 + + Reviewed by Sam Weinig. + + Simple refactoring. + + * bytecode/CodeBlock.cpp: + (JSC::CodeBlock::dump): + (JSC::CodeBlock::dumpStatistics): + (JSC::CodeBlock::nameForRegister): + * bytecode/CodeBlock.h: + (JSC::CodeBlock::isCaptured): + +2012-11-06 Michael Saboff <msaboff@apple.com> + + Lexer::scanRegExp, create 8 bit pattern and flag Identifiers from 16 bit source when possible + https://bugs.webkit.org/show_bug.cgi?id=101013 + + Reviewed by Darin Adler. + + Changed scanRegExp so that it will create 8 bit identifiers from 8 bit sources and from 16 bit sources + whan all the characters are 8 bit. Using two templated helpers, the "is all 8 bit" check is only performed + on 16 bit sources. The first helper is orCharacter() that will accumulate the or value of all characters + only for 16 bit sources. Replaced the helper Lexer::makeIdentifierSameType() with Lexer::makeRightSizedIdentifier(). + + * parser/Lexer.cpp: + (JSC::orCharacter<LChar>): Explicit template that serves as a placeholder. + (JSC::orCharacter<UChar>): Explicit template that actually or accumulates characters. + (JSC::Lexer::scanRegExp): + * parser/Lexer.h: + (Lexer): + (JSC::Lexer::makeRightSizedIdentifier<LChar>): New template that always creates an 8 bit Identifier. + (JSC::Lexer::makeRightSizedIdentifier<UChar>): New template that creates an 8 bit Identifier for 8 bit + data in a 16 bit source. + +2012-11-06 Filip Pizlo <fpizlo@apple.com> + + Indentation of JSCell.h is wrong + https://bugs.webkit.org/show_bug.cgi?id=101379 + + Rubber stamped by Alexey Proskuryakov. + + Just removed four spaces on a bunch of lines. + + * runtime/JSCell.h: + +2012-11-05 Filip Pizlo <fpizlo@apple.com> + + Indentation of JSObject.h is wrong + https://bugs.webkit.org/show_bug.cgi?id=101313 + + Rubber stamped by Alexey Proskuryakov. + + Just unindented code, since namespace bodies shouldn't be indented. + + * runtime/JSObject.h: + +2012-11-05 Filip Pizlo <fpizlo@apple.com> + + Indentation of JSArray.h is wrong + https://bugs.webkit.org/show_bug.cgi?id=101314 + + Rubber stamped by Alexey Proskuryakov. + + Just removing the indentation inside the namespace body. + + * runtime/JSArray.h: + +2012-11-05 Filip Pizlo <fpizlo@apple.com> + + DFG should not fall down to patchable GetById just because a prototype had things added to it + https://bugs.webkit.org/show_bug.cgi?id=101299 + + Reviewed by Geoffrey Garen. + + This looks like a slight win on V8v7 and SunSpider. + + * bytecode/DFGExitProfile.h: + (JSC::DFG::exitKindToString): + * dfg/DFGSpeculativeJIT64.cpp: + (JSC::DFG::SpeculativeJIT::compile): + +2012-11-05 Filip Pizlo <fpizlo@apple.com> + + Get rid of method_check + https://bugs.webkit.org/show_bug.cgi?id=101147 + + Reviewed by Geoffrey Garen. + + op_method_check no longer buys us anything, since get_by_id proto caching + gives just as much profiling information and the DFG inlines monomorphic + proto accesses anyway. + + This also has the potential for a speed-up since it makes parsing of + profiling data easier. No longer do we have to deal with the confusion of + the get_by_id portion of a method_check appearing monomorphic even though + we're really dealing with a bimorphic access (method_check specializes for + one case and get_by_id for another). + + This looks like a 1% speed-up on both SunSpider and V8v7. + + * CMakeLists.txt: + * GNUmakefile.list.am: + * JavaScriptCore.vcproj/JavaScriptCore/JavaScriptCore.vcproj: + * JavaScriptCore.xcodeproj/project.pbxproj: + * Target.pri: + * bytecode/CodeBlock.cpp: + (JSC::CodeBlock::printGetByIdCacheStatus): + (JSC::CodeBlock::dump): + (JSC::CodeBlock::finalizeUnconditionally): + (JSC::CodeBlock::shrinkToFit): + (JSC::CodeBlock::unlinkCalls): + * bytecode/CodeBlock.h: + (JSC::CodeBlock::getCallLinkInfo): + (JSC::CodeBlock::callLinkInfo): + (CodeBlock): + * bytecode/GetByIdStatus.cpp: + (JSC::GetByIdStatus::computeFromLLInt): + * bytecode/MethodCallLinkInfo.cpp: Removed. + * bytecode/MethodCallLinkInfo.h: Removed. + * bytecode/MethodCallLinkStatus.cpp: Removed. + * bytecode/MethodCallLinkStatus.h: Removed. + * bytecode/Opcode.h: + (JSC): + (JSC::padOpcodeName): + * bytecompiler/BytecodeGenerator.cpp: + (JSC): + * bytecompiler/BytecodeGenerator.h: + (BytecodeGenerator): + * bytecompiler/NodesCodegen.cpp: + (JSC::FunctionCallDotNode::emitBytecode): + * dfg/DFGByteCodeParser.cpp: + (JSC::DFG::ByteCodeParser::parseBlock): + * dfg/DFGCapabilities.h: + (JSC::DFG::canCompileOpcode): + * jit/JIT.cpp: + (JSC::JIT::privateCompileMainPass): + (JSC::JIT::privateCompileSlowCases): + (JSC::PropertyStubCompilationInfo::copyToStubInfo): + (JSC::JIT::privateCompile): + * jit/JIT.h: + (JSC::PropertyStubCompilationInfo::slowCaseInfo): + (PropertyStubCompilationInfo): + (JSC): + (JIT): + * jit/JITPropertyAccess.cpp: + (JSC): + (JSC::JIT::emitSlow_op_get_by_id): + (JSC::JIT::compileGetByIdSlowCase): + * jit/JITPropertyAccess32_64.cpp: + (JSC): + (JSC::JIT::compileGetByIdSlowCase): + * jit/JITStubs.cpp: + (JSC): + * jit/JITStubs.h: + * llint/LowLevelInterpreter.asm: + +2012-11-05 Yuqiang Xian <yuqiang.xian@intel.com> + + Refactor LLInt64 to distinguish the pointer operations from the 64-bit integer operations + https://bugs.webkit.org/show_bug.cgi?id=100321 + + Reviewed by Filip Pizlo. + + We have refactored the MacroAssembler and JIT compilers to distinguish + the pointer operations from the 64-bit integer operations (see bug #99154). + Now we want to do the similar work for LLInt, and the goal is same as + the one mentioned in 99154. + + This is the second part of the modification: in the low level interpreter, + changing the operations on 64-bit integers to use the "<foo>q" instructions. + This also removes some unused/meaningless "<foo>p" instructions. + + * llint/LowLevelInterpreter.asm: + * llint/LowLevelInterpreter.cpp: + (JSC::CLoop::execute): + * llint/LowLevelInterpreter64.asm: + * offlineasm/armv7.rb: + * offlineasm/cloop.rb: + * offlineasm/instructions.rb: + * offlineasm/x86.rb: + +2012-11-05 Filip Pizlo <fpizlo@apple.com> + + Prototype chain caching should check that the path from the base object to the slot base involves prototype hops only + https://bugs.webkit.org/show_bug.cgi?id=101276 + + Reviewed by Gavin Barraclough. + + Changed normalizePrototypeChain() to report an invalid prototype chain if any object is a proxy. + This catches cases where our prototype chain checks would have been insufficient to guard against + newly introduced properties, despecialized properties, or deleted properties in the chain of + objects involved in the access. + + * dfg/DFGRepatch.cpp: + (JSC::DFG::tryCacheGetByID): + (JSC::DFG::tryBuildGetByIDProtoList): + (JSC::DFG::tryCachePutByID): + (JSC::DFG::tryBuildPutByIdList): + * jit/JITStubs.cpp: + (JSC::JITThunks::tryCachePutByID): + (JSC::JITThunks::tryCacheGetByID): + (JSC::DEFINE_STUB_FUNCTION): + * llint/LLIntSlowPaths.cpp: + (JSC::LLInt::LLINT_SLOW_PATH_DECL): + * runtime/Operations.h: + (JSC): + (JSC::normalizePrototypeChain): + +2012-11-05 Dima Gorbik <dgorbik@apple.com> + + Back out controversial changes from Bug 98665. + https://bugs.webkit.org/show_bug.cgi?id=101244 + + Reviewed by David Kilzer. + + Backing out changes from Bug 98665 until further discussions take place on rules for including Platform.h in Assertions.h. + + * API/tests/minidom.c: + * API/tests/testapi.c: + +2012-11-04 Filip Pizlo <fpizlo@apple.com> + + Reduce the verbosity of referring to QNaN in JavaScriptCore + https://bugs.webkit.org/show_bug.cgi?id=101174 + + Reviewed by Geoffrey Garen. + + Introduces a #define QNaN in JSValue.h, and replaces all previous uses of + std::numeric_limits<double>::quiet_NaN() with QNaN. + + * API/JSValueRef.cpp: + (JSValueMakeNumber): + (JSValueToNumber): + * dfg/DFGSpeculativeJIT.cpp: + (JSC::DFG::SpeculativeJIT::compileGetByValOnFloatTypedArray): + * jit/JITPropertyAccess.cpp: + (JSC::JIT::emitFloatTypedArrayGetByVal): + * runtime/CachedTranscendentalFunction.h: + (JSC::CachedTranscendentalFunction::initialize): + * runtime/DateConstructor.cpp: + (JSC::constructDate): + * runtime/DateInstanceCache.h: + (JSC::DateInstanceData::DateInstanceData): + (JSC::DateInstanceCache::reset): + * runtime/ExceptionHelpers.cpp: + (JSC::InterruptedExecutionError::defaultValue): + (JSC::TerminatedExecutionError::defaultValue): + * runtime/JSCell.h: + (JSC::JSValue::getPrimitiveNumber): + * runtime/JSDateMath.cpp: + (JSC::parseDateFromNullTerminatedCharacters): + * runtime/JSGlobalData.cpp: + (JSC::JSGlobalData::JSGlobalData): + (JSC::JSGlobalData::resetDateCache): + * runtime/JSGlobalObjectFunctions.cpp: + (JSC::parseInt): + (JSC::jsStrDecimalLiteral): + (JSC::toDouble): + (JSC::jsToNumber): + (JSC::parseFloat): + * runtime/JSValue.cpp: + (JSC::JSValue::toNumberSlowCase): + * runtime/JSValue.h: + (JSC): + * runtime/JSValueInlineMethods.h: + (JSC::jsNaN): + * runtime/MathObject.cpp: + (JSC::mathProtoFuncMax): + (JSC::mathProtoFuncMin): + +2012-11-03 Filip Pizlo <fpizlo@apple.com> + + Baseline JIT should use structure watchpoints whenever possible + https://bugs.webkit.org/show_bug.cgi?id=101146 + + Reviewed by Sam Weinig. + + No speed-up yet except on toy programs. I think that it will start to show + speed-ups with https://bugs.webkit.org/show_bug.cgi?id=101147, which this is + a step towards. + + * jit/JIT.h: + (JIT): + * jit/JITPropertyAccess.cpp: + (JSC::JIT::privateCompilePutByIdTransition): + (JSC::JIT::privateCompileGetByIdProto): + (JSC::JIT::privateCompileGetByIdProtoList): + (JSC::JIT::privateCompileGetByIdChainList): + (JSC::JIT::privateCompileGetByIdChain): + (JSC::JIT::addStructureTransitionCheck): + (JSC): + (JSC::JIT::testPrototype): + * jit/JITPropertyAccess32_64.cpp: + (JSC::JIT::privateCompilePutByIdTransition): + (JSC::JIT::privateCompileGetByIdProto): + (JSC::JIT::privateCompileGetByIdProtoList): + (JSC::JIT::privateCompileGetByIdChainList): + (JSC::JIT::privateCompileGetByIdChain): + +2012-11-04 Csaba Osztrogonác <ossy@webkit.org> + + [Qt] udis86_itab.c is always regenerated + https://bugs.webkit.org/show_bug.cgi?id=100756 + + Reviewed by Simon Hausmann. + + * DerivedSources.pri: Generate sources to the generated directory. + * disassembler/udis86/differences.txt: + * disassembler/udis86/itab.py: Add --outputDir option. + (UdItabGenerator.__init__): + (genItabH): + (genItabC): + (main): + +2012-11-02 Filip Pizlo <fpizlo@apple.com> + + LLInt 32-bit put_by_val ArrayStorage case should use the right register (t3, not t2) for the index in the publicLength updating path + https://bugs.webkit.org/show_bug.cgi?id=101118 + + Reviewed by Gavin Barraclough. + + * llint/LowLevelInterpreter32_64.asm: + +2012-11-02 Filip Pizlo <fpizlo@apple.com> + + DFG::Node::converToStructureTransitionWatchpoint should take kindly to ArrayifyToStructure + https://bugs.webkit.org/show_bug.cgi?id=101117 + + Reviewed by Gavin Barraclough. + + We have logic to convert ArrayifyToStructure to StructureTransitionWatchpoint, which is awesome, except + that previously convertToStructureTransitionWatchpoint was (a) asserting that it never saw an + ArrayifyToStructure and (b) would incorrectly create a ForwardStructureTransitionWatchpoint if it did. + + * dfg/DFGNode.h: + (JSC::DFG::Node::convertToStructureTransitionWatchpoint): + +2012-11-02 Filip Pizlo <fpizlo@apple.com> + + DFG::SpeculativeJIT::typedArrayDescriptor should use the Float64Array descriptor for Float64Arrays + https://bugs.webkit.org/show_bug.cgi?id=101114 + + Reviewed by Gavin Barraclough. + + As in https://bugs.webkit.org/show_bug.cgi?id=101112, this was only wrong when Float64Array descriptors + hadn't been initialized yet. That happens rarely, but when it does happen, we would crash. + + This would also become much more wrong if we ever put type size info (num bytes, etc) in the descriptor + and used that directly. So it's good to fix it. + + * dfg/DFGSpeculativeJIT.cpp: + (JSC::DFG::SpeculativeJIT::typedArrayDescriptor): + +2012-11-02 Filip Pizlo <fpizlo@apple.com> + + JIT::privateCompileGetByVal should use the uint8ClampedArrayDescriptor for compiling accesses to Uint8ClampedArrays + https://bugs.webkit.org/show_bug.cgi?id=101112 + + Reviewed by Gavin Barraclough. + + The only reason why the code was wrong to use uint8ArrayDescriptor instead is that if we're just using + Uint8ClampedArrays then the descriptor for Uint8Array may not have been initialized. + + * jit/JITPropertyAccess.cpp: + (JSC::JIT::privateCompileGetByVal): + +2012-11-02 Mark Hahnenberg <mhahnenberg@apple.com> + + MarkedBlocks should use something other than the mark bits to indicate liveness for newly allocated objects + https://bugs.webkit.org/show_bug.cgi?id=100877 + + Reviewed by Filip Pizlo. + + Currently when we canonicalize cell liveness data in MarkedBlocks, we set the mark bit for every cell in the + block except for those in the free list. This allows us to consider objects that were allocated since the + previous collection to be considered live until they have a chance to be properly marked by the collector. + + If we want to use the mark bits to signify other types of information, e.g. using sticky mark bits for generational + collection, we will have to keep track of newly allocated objects in a different fashion when we canonicalize cell liveness. + + One method would be to allocate a separate set of bits while canonicalizing liveness data. These bits would + track the newly allocated objects in the block separately from those objects who had already been marked. We would + then check these bits, along with the mark bits, when determining liveness. + + * heap/Heap.h: + (Heap): + (JSC::Heap::isLive): We now check for the presence of the newlyAllocated Bitmap. + (JSC): + * heap/MarkedBlock.cpp: + (JSC::MarkedBlock::specializedSweep): We clear the newlyAllocated Bitmap if we're creating a free list. This + will happen if we canonicalize liveness data for some other reason than collection (e.g. forEachCell) and + then start allocating again. + (JSC::SetNewlyAllocatedFunctor::SetNewlyAllocatedFunctor): + (SetNewlyAllocatedFunctor): + (JSC::SetNewlyAllocatedFunctor::operator()): We set the newlyAllocated bits for all the objects + that aren't already marked. We undo the bits for the objects in the free list later in canonicalizeCellLivenessData. + (JSC::MarkedBlock::canonicalizeCellLivenessData): We should never have a FreeListed block with a newlyAllocated Bitmap. + We allocate the new Bitmap, set the bits for all the objects that aren't already marked, and then unset all of the + bits for the items currently in the FreeList. + * heap/MarkedBlock.h: + (JSC::MarkedBlock::clearMarks): We clear the newlyAllocated bitmap if it exists because at this point we don't need it + any more. + (JSC::MarkedBlock::isEmpty): If we have some objects that are newlyAllocated, we are not empty. + (JSC::MarkedBlock::isNewlyAllocated): + (JSC): + (JSC::MarkedBlock::setNewlyAllocated): + (JSC::MarkedBlock::clearNewlyAllocated): + (JSC::MarkedBlock::isLive): We now check the newlyAllocated Bitmap, if it exists, when determining liveness of a cell in + a block that is Marked. + * heap/WeakBlock.cpp: + (JSC::WeakBlock::visit): We need to make sure we don't finalize objects that are in the newlyAllocated Bitmap. + (JSC::WeakBlock::reap): Ditto. + +2012-11-02 Filip Pizlo <fpizlo@apple.com> + + JIT::privateCompileGetByVal should use MacroAssemblerCodePtr::createFromExecutableAddress like JIT::privateCompilePutByVal + https://bugs.webkit.org/show_bug.cgi?id=101109 + + Reviewed by Gavin Barraclough. + + This fixes crashes on ARMv7 resulting from the return address already being tagged with the THUMB2 bit. + + * jit/JITPropertyAccess.cpp: + (JSC::JIT::privateCompileGetByVal): + +2012-11-02 Simon Fraser <simon.fraser@apple.com> + + Enable SUBPIXEL_LAYOUT on Mac + https://bugs.webkit.org/show_bug.cgi?id=101076 + + Reviewed by Dave Hyatt. + + Define ENABLE_SUBPIXEL_LAYOUT and include it in FEATURE_DEFINES. + + * Configurations/FeatureDefines.xcconfig: + +2012-11-02 Michael Saboff <msaboff@apple.com> + + RegExp.prototype.toString Should Produce an 8 bit JSString if possible. + https://bugs.webkit.org/show_bug.cgi?id=101003 + + Reviewed by Geoffrey Garen. + + Took the logic of regExpObjectSource() and created two templated helpers that uses the + source character type when appending to the StringBuilder. + + * runtime/RegExpObject.cpp: + (JSC::appendLineTerminatorEscape): Checks line terminate type to come up with escaped version. + (JSC::regExpObjectSourceInternal): Templated version of original. + (JSC::regExpObjectSource): Wrapper function. + +2012-11-02 Adam Barth <abarth@webkit.org> + + ENABLE(UNDO_MANAGER) is disabled everywhere and is not under active development + https://bugs.webkit.org/show_bug.cgi?id=100711 + + Reviewed by Eric Seidel. + + * Configurations/FeatureDefines.xcconfig: + 2012-11-02 Simon Hausmann <simon.hausmann@digia.com> [Qt] Fix build on Windows when Qt is configured with -release @@ -13,12 +580,610 @@ * LLIntOffsetsExtractor.pro: -2012-10-25 Simon Hausmann <simon.hausmann@digia.com> +2012-11-01 Mark Lam <mark.lam@apple.com> + + A llint workaround for a toolchain issue. + https://bugs.webkit.org/show_bug.cgi?id=101012. + + Reviewed by Michael Saboff. + + * llint/LowLevelInterpreter.asm: + - use a local label to workaround the toolchain issue with undeclared + global labels. + +2012-11-01 Oliver Hunt <oliver@apple.com> + + Remove GlobalObject constant register that is typically unused + https://bugs.webkit.org/show_bug.cgi?id=101005 + + Reviewed by Geoffrey Garen. + + The GlobalObject constant register is frequently allocated even when it + is not used, it is also getting in the way of some other optimisations. + + * bytecode/CodeBlock.cpp: + (JSC::CodeBlock::CodeBlock): + * bytecode/CodeBlock.h: + (CodeBlock): + * bytecompiler/BytecodeGenerator.cpp: + (JSC::BytecodeGenerator::BytecodeGenerator): + * dfg/DFGByteCodeParser.cpp: + (JSC::DFG::ByteCodeParser::parseResolveOperations): + +2012-10-31 Filip Pizlo <fpizlo@apple.com> + + DFG optimized string access code should be enabled + https://bugs.webkit.org/show_bug.cgi?id=100825 + + Reviewed by Oliver Hunt. + + - Removes prediction checks from the parser. + + - Fixes the handling of array mode refinement for strings. I.e. we don't do + any refinement - we already know it's going to be a string. We could + revisit this in the future, but for now the DFG lacks the ability to + handle any array modes other than Array::String for string intrinsics, so + this is as good as it gets. + + - Removes uses of isBlahSpeculation for checking if a mode is already + checked. isBlahSpeculation implicitly checks if the SpeculatedType is not + BOTTOM ("empty"), which breaks for checking if a mode is already checked + since a mode may already be "checked" in the sense that we've proven that + the code is unreachable. + + ~1% speed-up on V8v7, mostly from a speed-up on crypto, which uses string + intrinsics in one of the hot functions. + + * bytecode/SpeculatedType.h: + (JSC::speculationChecked): + (JSC): + * dfg/DFGArrayMode.cpp: + (JSC::DFG::ArrayMode::alreadyChecked): + * dfg/DFGByteCodeParser.cpp: + (JSC::DFG::ByteCodeParser::handleIntrinsic): + * dfg/DFGFixupPhase.cpp: + (JSC::DFG::FixupPhase::fixupNode): + * dfg/DFGSpeculativeJIT.cpp: + (JSC::DFG::SpeculativeJIT::compileGetCharCodeAt): + +2012-10-31 Filip Pizlo <fpizlo@apple.com> + + Sparse array size threshold should be increased to 100000 + https://bugs.webkit.org/show_bug.cgi?id=100827 + + Reviewed by Oliver Hunt. + + This enables the use of contiguous arrays in programs that previously + couldn't use them. And I so far can't see any examples of this being + a downside. To the extent that there is a downside, it ought to be + addressed by GC: https://bugs.webkit.org/show_bug.cgi?id=100828 + + * runtime/ArrayConventions.h: + (JSC): + +2012-10-31 Mark Lam <mark.lam@apple.com> + + C++ llint 64-bit backend needs to zero extend results of int32 operations. + https://bugs.webkit.org/show_bug.cgi?id=100899. + + Reviewed by Filip Pizlo. + + llint asm instructions ending in "i" for a 64-bit machine expects the + high 32-bit of registers to be zero'ed out when a 32-bit instruction + writes into a register. Fixed the C++ llint to honor this. + + Fixed the index register used in BaseIndex addressing to be of size + intptr_t as expected. + + Updated CLoopRegister to handle different endiannesss configurations. + + * llint/LowLevelInterpreter.cpp: + (JSC::CLoopRegister::clearHighWord): + - new method to clear the high 32-bit of a 64-bit register. + It's a no-op for the 32-bit build. + (CLoopRegister): + - CLoopRegister now takes care of packing and byte endianness order. + (JSC::CLoop::execute): - Added an assert. + * offlineasm/cloop.rb: + - Add calls to clearHighWord() wherever needed. + +2012-10-31 Mark Lam <mark.lam@apple.com> + + A JSC printf (support for %J+s and %b). + https://bugs.webkit.org/show_bug.cgi?id=100566. + + Reviewed by Michael Saboff. + + Added VMInspector::printf(), fprintf(), sprintf(), and snprintf(). + - %b prints ints as boolean TRUE (non-zero) or FALSE (zero). + - %Js prints a WTF::String* like a %s prints a char*. + Also works for 16bit WTF::Strings (prints wchar_t* using %S). + - '+' is a modifier meaning 'use verbose mode', and %J+s is an example + of its use. + + * JavaScriptCore.xcodeproj/project.pbxproj: + * interpreter/VMInspector.cpp: + (FormatPrinter): + (JSC::FormatPrinter::~FormatPrinter): + (JSC::FormatPrinter::print): + (JSC::FormatPrinter::printArg): + (JSC::FormatPrinter::printWTFString): + (JSC::FileFormatPrinter::FileFormatPrinter): + (JSC::FileFormatPrinter::printArg): + (JSC::StringFormatPrinter::StringFormatPrinter): + (JSC::StringFormatPrinter::printArg): + (JSC::StringNFormatPrinter::StringNFormatPrinter): + (JSC::StringNFormatPrinter::printArg): + (JSC::VMInspector::fprintf): + (JSC::VMInspector::printf): + (JSC::VMInspector::sprintf): + (JSC::VMInspector::snprintf): + * interpreter/VMInspector.h: + (VMInspector): + +2012-10-31 Mark Lam <mark.lam@apple.com> + + 64-bit llint PC offset can be negative: using an unsigned shift is a bug. + https://bugs.webkit.org/show_bug.cgi?id=100896. + + Reviewed by Filip Pizlo. + + Fixed the PC offset divisions in the 64-bit llint asm to use rshift instead of urshift. + + * llint/LowLevelInterpreter64.asm: + +2012-10-30 Yuqiang Xian <yuqiang.xian@intel.com> + + glsl-function-atan.html WebGL conformance test fails after https://bugs.webkit.org/show_bug.cgi?id=99154 + https://bugs.webkit.org/show_bug.cgi?id=100789 + + Reviewed by Filip Pizlo. + + We accidently missed a bitwise double to int64 conversion. + + * dfg/DFGSpeculativeJIT.h: + (JSC::DFG::SpeculativeJIT::silentFill): + +2012-10-30 Joseph Pecoraro <pecoraro@apple.com> + + [Mac] Sync up FeatureDefine Configuration Files + https://bugs.webkit.org/show_bug.cgi?id=100171 + + Reviewed by David Kilzer. + + Follow up to better coordinate with iOS feature defines. Make: + + - ENABLE_FILTERS always on + - ENABLE_INPUT_* iphonesimulator values point to the iphoneos values + + * Configurations/FeatureDefines.xcconfig: + +2012-10-30 Joseph Pecoraro <pecoraro@apple.com> + + [Mac] Sync up FeatureDefine Configuration Files + https://bugs.webkit.org/show_bug.cgi?id=100171 + + Reviewed by David Kilzer. + + Ensure an identical FeatureDefine files across all projects. Changes: + + - ENABLE_CSS_BOX_DECORATION_BREAK should be in all + - ENABLE_PDFKIT_PLUGIN should be in all + - ENABLE_RESOLUTION_MEDIA_QUERY should be in all + - ENABLE_ENCRYPTED_MEDIA should be in all + - ENABLE_HIDDEN_PAGE_DOM_TIMER_THROTTLING with corrected value + - Some alphabetical ordering cleanup + + * Configurations/FeatureDefines.xcconfig: + +2012-10-30 Mark Hahnenberg <mhahnenberg@apple.com> + + Arrays can change IndexingType in the middle of sorting + https://bugs.webkit.org/show_bug.cgi?id=100773 + + Reviewed by Filip Pizlo. + + Instead of giving up, we just fetch the appropriate vector based on the current + IndexingType of the array. + + * runtime/JSArray.cpp: + (JSC::JSArray::sortVector): + * runtime/JSObject.h: + (JSObject): + (JSC::JSObject::currentIndexingData): + (JSC::JSObject::currentRelevantLength): + +2012-10-29 Anders Carlsson <andersca@apple.com> + + Build WebKit as C++11 on Mac + https://bugs.webkit.org/show_bug.cgi?id=100720 + + Reviewed by Daniel Bates. + + * Configurations/Base.xcconfig: + Add CLANG_CXX_LANGUAGE_STANDARD=gnu++0x. + + * bytecompiler/BytecodeGenerator.cpp: + (JSC::BytecodeGenerator::generate): + (JSC::BytecodeGenerator::pushFinallyContext): + (JSC::BytecodeGenerator::beginSwitch): + * llint/LLIntOffsetsExtractor.cpp: + * runtime/Identifier.cpp: + (JSC::Identifier::add8): + * runtime/Identifier.h: + (JSC::Identifier::add): + * runtime/JSONObject.cpp: + (JSC::appendStringToStringBuilder): + * runtime/StringPrototype.cpp: + (JSC::replaceUsingStringSearch): + Add static_casts to prevent implicit type conversions in non-constant initializer lists. + +2012-10-28 Mark Rowe <mrowe@apple.com> + + Simplify Xcode configuration settings that used to vary between OS versions. + + Reviewed by Dan Bernstein. + + * Configurations/Base.xcconfig: + * Configurations/DebugRelease.xcconfig: + * Configurations/JavaScriptCore.xcconfig: + +2012-10-28 Mark Rowe <mrowe@apple.com> + + Remove references to unsupported OS and Xcode versions. + + Reviewed by Anders Carlsson. + + * Configurations/Base.xcconfig: + * Configurations/CompilerVersion.xcconfig: Removed. + * Configurations/DebugRelease.xcconfig: + * Configurations/Version.xcconfig: + * JavaScriptCore.xcodeproj/project.pbxproj: + +2012-10-29 Michael Saboff <msaboff@apple.com> + + Non-special escape character sequences cause JSC::Lexer::parseString to create 16 bit strings + https://bugs.webkit.org/show_bug.cgi?id=100576 + + Reviewed by Darin Adler. + + Changed singleEscape() processing to be based on a lookup of a static table. The table + covers ASCII characters SPACE through DEL. If a character can be a single character escape, + then the table provides the non-zero result of that escape. Updated the result of + singleEscape to be an LChar to make the table as small as possible. + Added a new test fast/js/normal-character-escapes-in-string-literals.html to validated + the behavior. + + * parser/Lexer.cpp: + (JSC::singleEscape): + (JSC::Lexer::parseString): + (JSC::Lexer::parseStringSlowCase): + +2012-10-29 Enrica Casucci <enrica@apple.com> + + Add ENABLE_USERSELECT_ALL feature flag. + https://bugs.webkit.org/show_bug.cgi?id=100559 + + Reviewed by Eric Seidel. + + * Configurations/FeatureDefines.xcconfig: + +2012-10-28 Filip Pizlo <fpizlo@apple.com> + + DFG should be able to emit effectful structure checks + https://bugs.webkit.org/show_bug.cgi?id=99260 + + Reviewed by Oliver Hunt. + + This change allows us to find out if an array access that has gone polymorphic + is operating over known structures - i.e. the primordial array structures of the + global object that the code block containing the array access belongs to. We + term this state "OriginalArray" for short. The fact that the access has gone + polymorphic means that the array profile will not be able to report the set of + structures it had seen - but if it can tell us that all of the structures were + primordial then it just so happens that we can deduce what the structure set + would have been by just querying the code block's global object. This allows us + to emit an ArrayifyToStructure instead of an Arrayify if we find that we need to + do conversions. The fast path of an ArrayifyToStructure is exactly like the fast + path of a CheckStructure and is mostly subject to the same optimizations. It + also burns one fewer registers. + + Essentially the notion of OriginalArray is a super cheap way of getting the + array profile to tell us a structure set instead of a singleton structure. + Currently, the array profile can only tell us the structure seen at an array + access if there was exactly one structure. If there were multiple structures, it + won't tell us anything other than the array modes and other auxiliary profiling + data (whether there were stores to holes, for example). With OriginalArray, we + cheaply get a structure set if all of the structures were primordial for the + code block's global object, since in that case the array mode set (ArrayModes) + can directly tell us the structure set. In the future, we might consider adding + complete structure sets to the array profiles, but I suspect that we would hit + diminishing returns if we did so - it would only help if we have array accesses + that are both polymorphic and are cross-global-object accesses (rare) or if the + arrays had named properties or other structure transitions that are unrelated to + indexing type (also rare). + + This also does away with Arrayify (and the new ArrayifyToStructure) returning + the butterfly pointer. This turns out to be faster and easier to CSE. + + And, this also changes constant folding to be able to eliminate CheckStructure, + ForwardCheckStructure, and ArrayifyToStructure in addition to being able to + transform them into structure transition watchpoints. This is great for + ArrayifyToStructure because then CSE and CFA know that there is no side effect. + Converting CheckStructure and ForwardCheckStructure to also behave this way is + just a matter of elegance. + + This has no performance impact right now. It's intended to alleviate some of the + regressions seen in the early implementation of + https://bugs.webkit.org/show_bug.cgi?id=98606. + + * bytecode/ArrayProfile.cpp: + (JSC::ArrayProfile::computeUpdatedPrediction): + * bytecode/ArrayProfile.h: + (JSC): + (JSC::ArrayProfile::ArrayProfile): + (ArrayProfile): + (JSC::ArrayProfile::usesOriginalArrayStructures): + * bytecode/CodeBlock.cpp: + (JSC::CodeBlock::updateAllPredictionsAndCountLiveness): + * dfg/DFGAbstractState.cpp: + (JSC::DFG::AbstractState::execute): + * dfg/DFGArrayMode.cpp: + (JSC::DFG::ArrayMode::fromObserved): + (JSC::DFG::ArrayMode::alreadyChecked): + (JSC::DFG::arrayClassToString): + * dfg/DFGArrayMode.h: + (JSC::DFG::ArrayMode::withProfile): + (JSC::DFG::ArrayMode::isJSArray): + (ArrayMode): + (JSC::DFG::ArrayMode::isJSArrayWithOriginalStructure): + (JSC::DFG::ArrayMode::supportsLength): + (JSC::DFG::ArrayMode::arrayModesWithIndexingShape): + * dfg/DFGByteCodeParser.cpp: + (JSC::DFG::ByteCodeParser::getArrayMode): + (JSC::DFG::ByteCodeParser::getArrayModeAndEmitChecks): + (JSC::DFG::ByteCodeParser::handleGetByOffset): + * dfg/DFGCSEPhase.cpp: + (JSC::DFG::CSEPhase::checkStructureElimination): + (JSC::DFG::CSEPhase::structureTransitionWatchpointElimination): + (JSC::DFG::CSEPhase::getPropertyStorageLoadElimination): + (JSC::DFG::CSEPhase::checkArrayElimination): + (JSC::DFG::CSEPhase::getScopeRegistersLoadElimination): + * dfg/DFGConstantFoldingPhase.cpp: + (JSC::DFG::ConstantFoldingPhase::foldConstants): + * dfg/DFGFixupPhase.cpp: + (JSC::DFG::FixupPhase::fixupNode): + (JSC::DFG::FixupPhase::checkArray): + * dfg/DFGNode.h: + (JSC::DFG::Node::hasStructure): + (JSC::DFG::Node::hasArrayMode): + (JSC::DFG::Node::arrayMode): + * dfg/DFGNodeType.h: + (DFG): + * dfg/DFGPredictionPropagationPhase.cpp: + (JSC::DFG::PredictionPropagationPhase::propagate): + * dfg/DFGSpeculativeJIT.cpp: + (JSC::DFG::SpeculativeJIT::jumpSlowForUnwantedArrayMode): + (JSC::DFG::SpeculativeJIT::arrayify): + * dfg/DFGSpeculativeJIT.h: + (SpeculativeJIT): + * dfg/DFGSpeculativeJIT32_64.cpp: + (JSC::DFG::SpeculativeJIT::compile): + * dfg/DFGSpeculativeJIT64.cpp: + (JSC::DFG::SpeculativeJIT::compile): + * runtime/JSGlobalObject.h: + (JSC::JSGlobalObject::isOriginalArrayStructure): + * runtime/Structure.cpp: + (JSC::Structure::nonPropertyTransition): + +2012-10-28 Filip Pizlo <fpizlo@apple.com> + + There should not be blind spots in array length array profiling + https://bugs.webkit.org/show_bug.cgi?id=100620 + + Reviewed by Oliver Hunt. + + I don't think this has any performance impact. But it's good to not have random + programs occasionally emit a GetById for array length accesses. + + * jit/JITPropertyAccess.cpp: + (JSC::JIT::compileGetByIdHotPath): + (JSC::JIT::privateCompilePatchGetArrayLength): + * jit/JITPropertyAccess32_64.cpp: + (JSC::JIT::compileGetByIdHotPath): + (JSC::JIT::privateCompilePatchGetArrayLength): + +2012-10-28 Filip Pizlo <fpizlo@apple.com> + + Unreviewed, make always-true enum-to-int comparisons use casts. + + * dfg/DFGFPRInfo.h: + (JSC::DFG::FPRInfo::debugName): + * dfg/DFGGPRInfo.h: + (JSC::DFG::JSValueSource::tagGPR): + (JSC::DFG::GPRInfo::toIndex): + (JSC::DFG::GPRInfo::debugName): + * runtime/JSTypeInfo.h: + (JSC::TypeInfo::TypeInfo): + +2012-10-27 Filip Pizlo <fpizlo@apple.com> + + OSR exit compilation should defend against argument recoveries from code blocks that are no longer on the inline stack + https://bugs.webkit.org/show_bug.cgi?id=100601 + + Reviewed by Oliver Hunt. + + This happened to me while I was fixing bugs for https://bugs.webkit.org/show_bug.cgi?id=100599. + I'm not sure how to reproduce this. + + * dfg/DFGAssemblyHelpers.h: + (JSC::DFG::AssemblyHelpers::baselineCodeBlockFor): + (AssemblyHelpers): + * dfg/DFGOSRExitCompiler32_64.cpp: + (JSC::DFG::OSRExitCompiler::compileExit): + * dfg/DFGOSRExitCompiler64.cpp: + (JSC::DFG::OSRExitCompiler::compileExit): + +2012-10-27 Filip Pizlo <fpizlo@apple.com> + + DFG::Array::Mode needs to be cleaned up + https://bugs.webkit.org/show_bug.cgi?id=100599 + + Reviewed by Oliver Hunt. + + Turn the previous massive Array::Mode enum into a class that contains four + fields, the type, whether it's a JSArray, the level of speculation, and the + kind of conversion to perform. + + No performance or behavioral change. + + * dfg/DFGAbstractState.cpp: + (JSC::DFG::AbstractState::execute): + * dfg/DFGArgumentsSimplificationPhase.cpp: + (JSC::DFG::ArgumentsSimplificationPhase::run): + * dfg/DFGArrayMode.cpp: + (JSC::DFG::ArrayMode::fromObserved): + (JSC::DFG::ArrayMode::refine): + (JSC::DFG::ArrayMode::alreadyChecked): + (JSC::DFG::arrayTypeToString): + (JSC::DFG::arrayClassToString): + (DFG): + (JSC::DFG::arraySpeculationToString): + (JSC::DFG::arrayConversionToString): + (JSC::DFG::ArrayMode::toString): + * dfg/DFGArrayMode.h: + (DFG): + (ArrayMode): + (JSC::DFG::ArrayMode::ArrayMode): + (JSC::DFG::ArrayMode::type): + (JSC::DFG::ArrayMode::arrayClass): + (JSC::DFG::ArrayMode::speculation): + (JSC::DFG::ArrayMode::conversion): + (JSC::DFG::ArrayMode::asWord): + (JSC::DFG::ArrayMode::fromWord): + (JSC::DFG::ArrayMode::withSpeculation): + (JSC::DFG::ArrayMode::usesButterfly): + (JSC::DFG::ArrayMode::isJSArray): + (JSC::DFG::ArrayMode::isInBounds): + (JSC::DFG::ArrayMode::mayStoreToHole): + (JSC::DFG::ArrayMode::isOutOfBounds): + (JSC::DFG::ArrayMode::isSlowPut): + (JSC::DFG::ArrayMode::canCSEStorage): + (JSC::DFG::ArrayMode::lengthNeedsStorage): + (JSC::DFG::ArrayMode::modeForPut): + (JSC::DFG::ArrayMode::isSpecific): + (JSC::DFG::ArrayMode::supportsLength): + (JSC::DFG::ArrayMode::benefitsFromStructureCheck): + (JSC::DFG::ArrayMode::doesConversion): + (JSC::DFG::ArrayMode::arrayModesThatPassFiltering): + (JSC::DFG::ArrayMode::operator==): + (JSC::DFG::ArrayMode::operator!=): + (JSC::DFG::ArrayMode::arrayModesWithIndexingShape): + (JSC::DFG::canCSEStorage): + (JSC::DFG::lengthNeedsStorage): + * dfg/DFGByteCodeParser.cpp: + (JSC::DFG::ByteCodeParser::getArrayMode): + (JSC::DFG::ByteCodeParser::getArrayModeAndEmitChecks): + (JSC::DFG::ByteCodeParser::handleIntrinsic): + (JSC::DFG::ByteCodeParser::parseBlock): + * dfg/DFGCSEPhase.cpp: + (JSC::DFG::CSEPhase::getArrayLengthElimination): + (JSC::DFG::CSEPhase::checkArrayElimination): + (JSC::DFG::CSEPhase::getIndexedPropertyStorageLoadElimination): + (JSC::DFG::CSEPhase::performNodeCSE): + * dfg/DFGConstantFoldingPhase.cpp: + (JSC::DFG::ConstantFoldingPhase::foldConstants): + * dfg/DFGFixupPhase.cpp: + (JSC::DFG::FixupPhase::fixupNode): + (JSC::DFG::FixupPhase::checkArray): + (JSC::DFG::FixupPhase::blessArrayOperation): + * dfg/DFGGraph.cpp: + (JSC::DFG::Graph::dump): + * dfg/DFGGraph.h: + (JSC::DFG::Graph::byValIsPure): + * dfg/DFGNode.h: + (JSC::DFG::Node::arrayMode): + (JSC::DFG::Node::setArrayMode): + * dfg/DFGSpeculativeJIT.cpp: + (JSC::DFG::SpeculativeJIT::typedArrayDescriptor): + (JSC::DFG::SpeculativeJIT::jumpSlowForUnwantedArrayMode): + (JSC::DFG::SpeculativeJIT::checkArray): + (JSC::DFG::SpeculativeJIT::arrayify): + (JSC::DFG::SpeculativeJIT::compileGetByValOnString): + (JSC::DFG::SpeculativeJIT::compileGetByValOnIntTypedArray): + (JSC::DFG::SpeculativeJIT::compileGetByValOnFloatTypedArray): + (JSC::DFG::SpeculativeJIT::compilePutByValForFloatTypedArray): + (JSC::DFG::SpeculativeJIT::compileGetIndexedPropertyStorage): + (JSC::DFG::SpeculativeJIT::compileGetByValOnArguments): + (JSC::DFG::SpeculativeJIT::compileGetArgumentsLength): + (JSC::DFG::SpeculativeJIT::compileGetArrayLength): + (JSC::DFG::SpeculativeJIT::temporaryRegisterForPutByVal): + * dfg/DFGSpeculativeJIT.h: + (JSC::DFG::SpeculativeJIT::putByValWillNeedExtraRegister): + (SpeculativeJIT): + * dfg/DFGSpeculativeJIT32_64.cpp: + (JSC::DFG::SpeculativeJIT::compile): + * dfg/DFGSpeculativeJIT64.cpp: + (JSC::DFG::SpeculativeJIT::compile): + +2012-10-27 Dan Bernstein <mitz@apple.com> + + REAL_PLATFORM_NAME build setting is no longer needed + https://bugs.webkit.org/show_bug.cgi?id=100587 + + Reviewed by Mark Rowe. + + Removed the definition of REAL_PLATFORM_NAME and replaced references to it with references + to PLATFORM_NAME. + + * Configurations/Base.xcconfig: + * Configurations/CompilerVersion.xcconfig: + * Configurations/DebugRelease.xcconfig: + * Configurations/FeatureDefines.xcconfig: + * Configurations/JSC.xcconfig: + * Configurations/JavaScriptCore.xcconfig: + * Configurations/ToolExecutable.xcconfig: + +2012-10-25 Filip Pizlo <fpizlo@apple.com> + + Forward OSR calculation is wrong in the presence of multiple SetLocals, or a mix of SetLocals and Phantoms + https://bugs.webkit.org/show_bug.cgi?id=100461 + + Reviewed by Oliver Hunt and Gavin Barraclough. + + This does a couple of things. First, it removes the part of the change in r131822 that made the forward + OSR exit calculator capable of handling multiple SetLocals. That change was wrong, because it would + blindly assume that all SetLocals had the same ValueRecovery, and would ignore the possibility that if + there is no value recovery then a ForwardCheckStructure on the first SetLocal would not know how to + recover the state associated with the second SetLocal. Then, it introduces the invariant that any bytecode + op that decomposes into multiple SetLocals must first emit dead SetLocals as hints and then emit a second + set of SetLocals to actually do the setting of the locals. This means that if a ForwardCheckStructure (or + any other hoisted forward speculation) is inserted, it will always be inserted on the second set of + SetLocals (since hoisting only touches the live ones), at which point OSR will already know about the + mov hints implied by the first set of (dead) SetLocals. This gives us the behavior we wanted, namely, that + a ForwardCheckStructure applied to a variant set by a resolve_with_base-like operation can correctly do a + forward exit while also ensuring that prior to exiting we set the appropriate locals. + + * dfg/DFGByteCodeParser.cpp: + (JSC::DFG::ByteCodeParser::parseBlock): + * dfg/DFGOSRExit.cpp: + (JSC::DFG::OSRExit::OSRExit): + * dfg/DFGOSRExit.h: + (OSRExit): + * dfg/DFGOSRExitCompiler.cpp: + * dfg/DFGOSRExitCompiler32_64.cpp: + (JSC::DFG::OSRExitCompiler::compileExit): + * dfg/DFGOSRExitCompiler64.cpp: + (JSC::DFG::OSRExitCompiler::compileExit): + * dfg/DFGSpeculativeJIT.cpp: + (JSC::DFG::SpeculativeJIT::convertLastOSRExitToForward): + +2012-10-26 Simon Hausmann <simon.hausmann@digia.com> [Qt] Fix the LLInt build on Windows https://bugs.webkit.org/show_bug.cgi?id=97648 - Reviewed by NOBODY (OOPS!). + Reviewed by Tor Arne Vestbø. The main change for the port on Windows is changing the way offsets are extracted and the LLIntAssembly.h is generated to accomodate release and debug configurations. @@ -38,21 +1203,45 @@ In Target.pri we have to also make sure that those directories are in the include path according to the release or debug configuration. - Lastly a small tweak in the LLIntOffsetsExtractor build was needed to make sure that - we include JavaScriptCore/config.h instead of WTF/config.h, required to fix the build - issues originally pasted in bug #97648. + Lastly a small tweak - swapping WTF.pri and JSC.pri inclusions - in the + LLIntOffsetsExtractor build was needed to make sure that we include + JavaScriptCore/config.h instead of WTF/config.h, required to fix the + build issues originally pasted in bug #97648. * DerivedSources.pri: * JavaScriptCore.pro: * LLIntOffsetsExtractor.pro: * Target.pri: -2012-10-25 Simon Hausmann <simon.hausmann@digia.com> +2012-10-26 Gabor Ballabas <gaborb@inf.u-szeged.hu> + + [Qt] Enable JSC's disassembler on x86, x86_64 Linux + https://bugs.webkit.org/show_bug.cgi?id=100386 + + Reviewed by Simon Hausmann. + + It works fine on Linux x86, x86_64 just needs to be enabled in the + QtWebKit build system. + + * DerivedSources.pri: + * JavaScriptCore.pri: + * Target.pri: + +2012-10-26 Thiago Marcos P. Santos <thiago.santos@intel.com> + + Add feature flags for CSS Device Adaptation + https://bugs.webkit.org/show_bug.cgi?id=95960 + + Reviewed by Kenneth Rohde Christiansen. + + * Configurations/FeatureDefines.xcconfig: + +2012-10-26 Simon Hausmann <simon.hausmann@digia.com> [WIN] Make LLInt offsets extractor work on Windows https://bugs.webkit.org/show_bug.cgi?id=100369 - Reviewed by NOBODY (OOPS!). + Reviewed by Kenneth Rohde Christiansen. Open the input file explicitly in binary mode to prevent ruby/Windows from thinking that it's a text mode file that needs even new line conversions. The binary mode parameter is @@ -60,6 +1249,194 @@ * offlineasm/offsets.rb: +2012-10-25 Michael Saboff <msaboff@apple.com> + + SymbolTableIndexHashTraits::needsDestruction should be set to true + https://bugs.webkit.org/show_bug.cgi?id=100437 + + Reviewed by Mark Hahnenberg. + + For correctness, set SymbolTableIndexHashTraits::needsDestruction to true since SymbolTableEntry's do + need to have their destructor called due to the possibility of rare data. + + * runtime/SymbolTable.h: + (SymbolTableIndexHashTraits): + +2012-10-25 Filip Pizlo <fpizlo@apple.com> + + DFG Arrayify elimination should replace it with GetButterfly rather than Phantom + https://bugs.webkit.org/show_bug.cgi?id=100441 + + Reviewed by Oliver Hunt and Gavin Barraclough. + + Made array profiler's to-string helper behave correctly. + + Made Arrayify elimination do the right thing (convert to GetButterfly). + + Made CFA's interference analysis track clobbered array modes correctly, mostly by + simplifying the machinery. + + * bytecode/ArrayProfile.cpp: + (JSC::arrayModesToString): + * dfg/DFGAbstractState.cpp: + (JSC::DFG::AbstractState::execute): + * dfg/DFGAbstractValue.h: + (JSC::DFG::AbstractValue::clobberArrayModes): + (AbstractValue): + * dfg/DFGConstantFoldingPhase.cpp: + (JSC::DFG::ConstantFoldingPhase::foldConstants): + +2012-10-25 Filip Pizlo <fpizlo@apple.com> + + REGRESSION (r131793-r131826): Crash going to wikifonia.org + https://bugs.webkit.org/show_bug.cgi?id=100281 + + Reviewed by Oliver Hunt. + + Restore something that got lost in the resolve refactoring: the ability to give up on life if + we see a resolve of 'arguments'. + + * runtime/JSScope.cpp: + (JSC::JSScope::resolveContainingScopeInternal): + +2012-10-25 Dominik Röttsches <dominik.rottsches@intel.com> + + Conditionalize XHR timeout support + https://bugs.webkit.org/show_bug.cgi?id=100356 + + Reviewed by Adam Barth. + + Adding XHR_TIMEOUT feature to conditionalize this on ports without network backend support. + + * Configurations/FeatureDefines.xcconfig: + +2012-10-25 Michael Saboff <msaboff@apple.com> + + REGRESSION (r131836): failures in list styles tests on EFL, GTK + https://bugs.webkit.org/show_bug.cgi?id=99824 + + Reviewed by Oliver Hunt. + + Saved start of string since it is modified by call convertUTF8ToUTF16(). + + * API/JSStringRef.cpp: + (JSStringCreateWithUTF8CString): + +2012-10-24 Filip Pizlo <fpizlo@apple.com> + + DFG NewArrayBuffer node should keep its data in a structure on the side to free up one of the opInfos + https://bugs.webkit.org/show_bug.cgi?id=100328 + + Reviewed by Oliver Hunt. + + * dfg/DFGByteCodeParser.cpp: + (JSC::DFG::ByteCodeParser::parseBlock): + * dfg/DFGGraph.h: + (Graph): + * dfg/DFGNode.h: + (NewArrayBufferData): + (DFG): + (JSC::DFG::Node::newArrayBufferData): + (Node): + (JSC::DFG::Node::startConstant): + (JSC::DFG::Node::numConstants): + +2012-10-25 Mark Lam <mark.lam@apple.com> + + Update the C++ llint to work with the latest op_resolve... changes. + https://bugs.webkit.org/show_bug.cgi?id=100345. + + Reviewed by Oliver Hunt. + + * llint/LowLevelInterpreter.cpp: + (JSC::CLoop::execute): + - emit opcode name as label when not using COMPUTED_GOTOs. The new op_resolve + opcodes have jumps to these labels. + - declare all opcode labels as UNUSED_LABEL()s to keep the compiler happy + for opcodes that are not referenced by anyone. + * offlineasm/asm.rb: + - strip llint_ prefix from opcode names used as labels. + +2012-10-24 Yuqiang Xian <yuqiang.xian@intel.com> + + Refactor LLInt64 to distinguish the pointer operations from the 64-bit integer operations + https://bugs.webkit.org/show_bug.cgi?id=100321 + + Reviewed by Filip Pizlo. + + We have refactored the MacroAssembler and JIT compilers to distinguish + the pointer operations from the 64-bit integer operations (see bug #99154). + Now we want to do the similar work for LLInt, and the goal is same as + the one mentioned in 99154. + + This is the first part of the modification: in the offline assembler, + adding the support of the "<foo>q" instructions which will be used for + 64-bit integer operations. + + * llint/LowLevelInterpreter.cpp: + (JSC::CLoop::execute): + * offlineasm/cloop.rb: + * offlineasm/instructions.rb: + * offlineasm/x86.rb: + +2012-10-24 Filip Pizlo <fpizlo@apple.com> + + DFG compileBlahBlahByVal methods for Contiguous and ArrayStorage have only one caller and should be removed + https://bugs.webkit.org/show_bug.cgi?id=100311 + + Reviewed by Mark Hahnenberg. + + Just trying to simplify things before I make them more complicated again. + + * dfg/DFGSpeculativeJIT.h: + (SpeculativeJIT): + (JSC::DFG::SpeculativeJIT::temporaryRegisterForPutByVal): + * dfg/DFGSpeculativeJIT32_64.cpp: + (DFG): + (JSC::DFG::SpeculativeJIT::compile): + * dfg/DFGSpeculativeJIT64.cpp: + (DFG): + (JSC::DFG::SpeculativeJIT::compile): + +2012-10-23 Andreas Kling <kling@webkit.org> + + CodeBlock: Give m_putToBaseOperations an inline capacity. + <http://webkit.org/b/100190> + <rdar://problem/12562466> + + Reviewed by Oliver Hunt. + + Since the CodeBlock constructor always inserts a single PutToBaseOperation, but there's no + guarantee that more will follow, give the m_putToBaseOperations vector an inline capacity of 1. + There are 4009 of these Vectors on Membuster3, and only 126 of them have more than a single entry. + + This change yields a 1.90MB reduction in memory usage. + + * bytecode/CodeBlock.h: + (CodeBlock): + +2012-10-23 Christophe Dumez <christophe.dumez@intel.com> + + Regression(r132143): Assertion hit in JSC::Interpreter::StackPolicy::StackPolicy(JSC::Interpreter&, const WTF::StackBounds&) + https://bugs.webkit.org/show_bug.cgi?id=100109 + + Reviewed by Oliver Hunt. + + Fix possible integer overflow in StackPolicy constructor by + using size_t type instead of int for stack sizes. The value + returned by StackBounds::size() is of type size_t but was + assigned to an int, which may overflow. + + * interpreter/Interpreter.cpp: + (JSC): + (JSC::Interpreter::StackPolicy::StackPolicy): + +2012-10-23 Carlos Garcia Campos <cgarcia@igalia.com> + + Unreviewed. Fix make distcheck. + + * GNUmakefile.list.am: Add missing header file. + 2012-10-23 Mark Lam <mark.lam@apple.com> Make topCallFrame reliable. @@ -729,7 +2106,7 @@ (JSC::JIT::emit_op_next_pname): (JSC::JIT::compileOpStrictEq): (JSC::JIT::emit_op_catch): - (JSC::JIT::emit_op_throw_reference_error): + (JSC::JIT::emit_op_throw_static_error): (JSC::JIT::emit_op_eq_null): (JSC::JIT::emit_op_neq_null): (JSC::JIT::emit_op_create_activation): diff --git a/Source/JavaScriptCore/Configurations/Base.xcconfig b/Source/JavaScriptCore/Configurations/Base.xcconfig index 47c8f7382..2de8597d7 100644 --- a/Source/JavaScriptCore/Configurations/Base.xcconfig +++ b/Source/JavaScriptCore/Configurations/Base.xcconfig @@ -21,11 +21,8 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -#include "CompilerVersion.xcconfig" - -COMPILER_SPECIFIC_WARNING_CFLAGS = $(COMPILER_SPECIFIC_WARNING_CFLAGS_$(TARGET_GCC_VERSION)); -COMPILER_SPECIFIC_WARNING_CFLAGS_LLVM_COMPILER = -Wexit-time-destructors -Wglobal-constructors -Wtautological-compare; - +CLANG_CXX_LANGUAGE_STANDARD = gnu++0x; +CLANG_CXX_LIBRARY = libc++; CLANG_WARN_CXX0X_EXTENSIONS = NO; DEBUG_INFORMATION_FORMAT = dwarf-with-dsym; GCC_C_LANGUAGE_STANDARD = gnu99; @@ -34,7 +31,7 @@ GCC_DYNAMIC_NO_PIC = NO; GCC_ENABLE_CPP_EXCEPTIONS = NO; GCC_ENABLE_CPP_RTTI = NO; GCC_ENABLE_OBJC_EXCEPTIONS = YES; -GCC_ENABLE_OBJC_GC = $(GCC_ENABLE_OBJC_GC_$(REAL_PLATFORM_NAME)); +GCC_ENABLE_OBJC_GC = $(GCC_ENABLE_OBJC_GC_$(PLATFORM_NAME)); GCC_ENABLE_OBJC_GC_iphoneos = NO; GCC_ENABLE_OBJC_GC_iphonesimulator = NO; GCC_ENABLE_OBJC_GC_macosx = supported; @@ -42,7 +39,7 @@ GCC_ENABLE_SYMBOL_SEPARATION = NO; GCC_FAST_OBJC_DISPATCH = YES; GCC_GENERATE_DEBUGGING_SYMBOLS = YES; GCC_INLINES_ARE_PRIVATE_EXTERN = YES; -GCC_MODEL_TUNING = $(GCC_MODEL_TUNING_$(REAL_PLATFORM_NAME)); +GCC_MODEL_TUNING = $(GCC_MODEL_TUNING_$(PLATFORM_NAME)); GCC_MODEL_TUNING_macosx = G5; GCC_OBJC_CALL_CXX_CDTORS = YES; GCC_PRECOMPILE_PREFIX_HEADER = YES; @@ -50,6 +47,7 @@ GCC_PREPROCESSOR_DEFINITIONS = $(DEBUG_DEFINES) HAVE_DTRACE=$(HAVE_DTRACE) WEBKI GCC_STRICT_ALIASING = YES; GCC_THREADSAFE_STATICS = NO; GCC_TREAT_WARNINGS_AS_ERRORS = YES; +GCC_VERSION = com.apple.compilers.llvm.clang.1_0; GCC_WARN_ABOUT_DEPRECATED_FUNCTIONS = NO; GCC_WARN_ABOUT_MISSING_NEWLINE = YES; GCC_WARN_ABOUT_MISSING_PROTOTYPES = YES; @@ -57,12 +55,12 @@ GCC_WARN_NON_VIRTUAL_DESTRUCTOR = YES; GCC_WARN_SIGN_COMPARE = YES; LINKER_DISPLAYS_MANGLED_NAMES = YES; PREBINDING = NO; -VALID_ARCHS = $(VALID_ARCHS_$(REAL_PLATFORM_NAME)); +VALID_ARCHS = $(VALID_ARCHS_$(PLATFORM_NAME)); VALID_ARCHS_iphoneos = $(ARCHS_STANDARD_32_BIT); VALID_ARCHS_iphonesimulator = $(ARCHS_STANDARD_32_BIT); VALID_ARCHS_macosx = i386 ppc x86_64 ppc64 $(ARCHS_UNIVERSAL_IPHONE_OS); -WARNING_CFLAGS_BASE = -Wall -Wextra -Wcast-qual -Wchar-subscripts -Wextra-tokens -Wformat=2 -Winit-self -Wmissing-format-attribute -Wmissing-noreturn -Wpacked -Wpointer-arith -Wredundant-decls -Wundef -Wwrite-strings; -WARNING_CFLAGS = $(WARNING_CFLAGS_$(REAL_PLATFORM_NAME)) $(COMPILER_SPECIFIC_WARNING_CFLAGS); +WARNING_CFLAGS_BASE = -Wall -Wextra -Wcast-qual -Wchar-subscripts -Wextra-tokens -Wformat=2 -Winit-self -Wmissing-format-attribute -Wmissing-noreturn -Wpacked -Wpointer-arith -Wredundant-decls -Wundef -Wwrite-strings -Wexit-time-destructors -Wglobal-constructors -Wtautological-compare; +WARNING_CFLAGS = $(WARNING_CFLAGS_$(PLATFORM_NAME)); WARNING_CFLAGS_iphoneos = $(WARNING_CFLAGS_BASE) -Wshorten-64-to-32; WARNING_CFLAGS_iphonesimulator = $(WARNING_CFLAGS_BASE) -Wshorten-64-to-32; WARNING_CFLAGS_macosx = $(WARNING_CFLAGS_macosx_$(CURRENT_ARCH)); @@ -70,22 +68,9 @@ WARNING_CFLAGS_macosx_ = $(WARNING_CFLAGS_BASE) -Wshorten-64-to-32; WARNING_CFLAGS_macosx_i386 = $(WARNING_CFLAGS_BASE) -Wshorten-64-to-32; WARNING_CFLAGS_macosx_ppc = $(WARNING_CFLAGS_BASE) -Wshorten-64-to-32; // FIXME: JavaScriptCore 64-bit builds should build with -Wshorten-64-to-32 -WARNING_CFLAGS_macosx_ppc64 = $(WARNING_CFLAGS_BASE); WARNING_CFLAGS_macosx_x86_64 = $(WARNING_CFLAGS_BASE); HEADER_SEARCH_PATHS = . icu "${BUILT_PRODUCTS_DIR}/usr/local/include" $(HEADER_SEARCH_PATHS); -CLANG_CXX_LIBRARY = $(CLANG_CXX_LIBRARY_$(TARGET_MAC_OS_X_VERSION_MAJOR)); -CLANG_CXX_LIBRARY_1060 = libstdc++; -CLANG_CXX_LIBRARY_1070 = libc++; -CLANG_CXX_LIBRARY_1080 = libc++; -CLANG_CXX_LIBRARY_1090 = libc++; - -REAL_PLATFORM_NAME = $(REAL_PLATFORM_NAME_$(PLATFORM_NAME)); -REAL_PLATFORM_NAME_ = $(REAL_PLATFORM_NAME_macosx); -REAL_PLATFORM_NAME_iphoneos = iphoneos; -REAL_PLATFORM_NAME_iphonesimulator = iphonesimulator; -REAL_PLATFORM_NAME_macosx = macosx; - TARGET_MAC_OS_X_VERSION_MAJOR = $(MAC_OS_X_VERSION_MAJOR); FRAMEWORK_SEARCH_PATHS = $(STAGED_FRAMEWORKS_SEARCH_PATH); @@ -93,12 +78,12 @@ FRAMEWORK_SEARCH_PATHS = $(STAGED_FRAMEWORKS_SEARCH_PATH); STAGED_FRAMEWORKS_SEARCH_PATH = $(STAGED_FRAMEWORKS_SEARCH_PATH_$(USE_STAGING_INSTALL_PATH)); STAGED_FRAMEWORKS_SEARCH_PATH_YES = $(NEXT_ROOT)$(SYSTEM_LIBRARY_DIR)/StagedFrameworks/Safari; -NORMAL_JAVASCRIPTCORE_FRAMEWORKS_DIR = $(NORMAL_JAVASCRIPTCORE_FRAMEWORKS_DIR_$(REAL_PLATFORM_NAME)); +NORMAL_JAVASCRIPTCORE_FRAMEWORKS_DIR = $(NORMAL_JAVASCRIPTCORE_FRAMEWORKS_DIR_$(PLATFORM_NAME)); NORMAL_JAVASCRIPTCORE_FRAMEWORKS_DIR_iphoneos = $(SYSTEM_LIBRARY_DIR)/PrivateFrameworks; NORMAL_JAVASCRIPTCORE_FRAMEWORKS_DIR_iphonesimulator = $(NORMAL_JAVASCRIPTCORE_FRAMEWORKS_DIR_iphoneos); NORMAL_JAVASCRIPTCORE_FRAMEWORKS_DIR_macosx = $(SYSTEM_LIBRARY_DIR)/Frameworks; -JAVASCRIPTCORE_FRAMEWORKS_DIR = $(JAVASCRIPTCORE_FRAMEWORKS_DIR_$(REAL_PLATFORM_NAME)); +JAVASCRIPTCORE_FRAMEWORKS_DIR = $(JAVASCRIPTCORE_FRAMEWORKS_DIR_$(PLATFORM_NAME)); JAVASCRIPTCORE_FRAMEWORKS_DIR_iphoneos = $(NORMAL_JAVASCRIPTCORE_FRAMEWORKS_DIR_iphoneos); JAVASCRIPTCORE_FRAMEWORKS_DIR_iphonesimulator = $(JAVASCRIPTCORE_FRAMEWORKS_DIR_iphoneos); @@ -128,7 +113,6 @@ DEAD_CODE_STRIPPING = $(DEAD_CODE_STRIPPING_$(CURRENT_VARIANT)); SECTORDER_FLAGS = -Wl,-order_file,JavaScriptCore.order; TARGETING_SAME_OS_X_VERSION = $(TARGETING_SAME_OS_X_VERSION_$(MAC_OS_X_VERSION_MAJOR)_$(TARGET_MAC_OS_X_VERSION_MAJOR)); -TARGETING_SAME_OS_X_VERSION_1060_1060 = YES; TARGETING_SAME_OS_X_VERSION_1070_1070 = YES; TARGETING_SAME_OS_X_VERSION_1080_1080 = YES; TARGETING_SAME_OS_X_VERSION_1090_1090 = YES; @@ -137,14 +121,4 @@ TARGETING_SAME_OS_X_VERSION_1090_1090 = YES; SDKROOT = $(SDKROOT_TARGETING_SAME_OS_X_VERSION_$(TARGETING_SAME_OS_X_VERSION)); SDKROOT_TARGETING_SAME_OS_X_VERSION_ = macosx; - -// HAVE_DTRACE is disabled on Leopard due to <rdar://problem/5628149> -HAVE_DTRACE = $(HAVE_DTRACE_$(REAL_PLATFORM_NAME)); -HAVE_DTRACE_iphoneos = 1; -HAVE_DTRACE_iphonesimulator = 1; -HAVE_DTRACE_macosx = $(HAVE_DTRACE_macosx_$(TARGET_MAC_OS_X_VERSION_MAJOR)); -HAVE_DTRACE_macosx_1050 = 0; -HAVE_DTRACE_macosx_1060 = 1; -HAVE_DTRACE_macosx_1070 = 1; -HAVE_DTRACE_macosx_1080 = 1; -HAVE_DTRACE_macosx_1090 = 1; +HAVE_DTRACE = 1; diff --git a/Source/JavaScriptCore/Configurations/CompilerVersion.xcconfig b/Source/JavaScriptCore/Configurations/CompilerVersion.xcconfig deleted file mode 100644 index 1d959fe9d..000000000 --- a/Source/JavaScriptCore/Configurations/CompilerVersion.xcconfig +++ /dev/null @@ -1,82 +0,0 @@ -// Copyright (C) 2009, 2010, 2011 Apple Inc. All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions -// are met: -// 1. Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// 2. Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// -// THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY -// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR -// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -IS_XCODE_0400 = $(IS_XCODE_0400_$(XCODE_VERSION_MINOR)); -IS_XCODE_0400_0400 = YES; - -IS_XCODE_0400_OR_0410 = $(IS_XCODE_0400_OR_0410_$(XCODE_VERSION_MINOR)); -IS_XCODE_0400_OR_0410_0400 = YES; -IS_XCODE_0400_OR_0410_0410 = YES; - -// The version of the LLVM Compiler in Xcode 4.0 and earlier have difficulty compiling our code. -LLVM_COMPILER_UNSUITABLE_FOR_DEBUG_BUILDS = $(LLVM_COMPILER_UNSUITABLE_FOR_DEBUG_BUILDS_$(XCODE_VERSION_MAJOR)); -LLVM_COMPILER_UNSUITABLE_FOR_DEBUG_BUILDS_0300 = YES; -LLVM_COMPILER_UNSUITABLE_FOR_DEBUG_BUILDS_0400 = $(LLVM_COMPILER_UNSUITABLE_FOR_DEBUG_BUILDS_IS_XCODE_0400_$(IS_XCODE_0400)); -LLVM_COMPILER_UNSUITABLE_FOR_DEBUG_BUILDS_IS_XCODE_0400_YES = YES; - -// The version of the LLVM Compiler in Xcode 4.1 and earlier do not generate fast enough code. -LLVM_COMPILER_UNSUITABLE_FOR_OPTIMIZED_BUILDS = $(LLVM_COMPILER_UNSUITABLE_FOR_OPTIMIZED_BUILDS_$(XCODE_VERSION_MAJOR)); -LLVM_COMPILER_UNSUITABLE_FOR_OPTIMIZED_BUILDS_0300 = YES; -LLVM_COMPILER_UNSUITABLE_FOR_OPTIMIZED_BUILDS_0400 = $(LLVM_COMPILER_UNSUITABLE_FOR_OPTIMIZED_BUILDS_IS_XCODE_0400_OR_0410_$(IS_XCODE_0400_OR_0410)); -LLVM_COMPILER_UNSUITABLE_FOR_OPTIMIZED_BUILDS_IS_XCODE_0400_OR_0410_YES = YES; - -LLVM_COMPILER_SUITABLE_FOR_DEBUG_BUILDS = $(LLVM_COMPILER_SUITABLE_FOR_DEBUG_BUILDS_$(LLVM_COMPILER_UNSUITABLE_FOR_DEBUG_BUILDS)); -LLVM_COMPILER_SUITABLE_FOR_DEBUG_BUILDS_ = YES; -LLVM_COMPILER_SUITABLE_FOR_DEBUG_BUILDS_YES = NO; - -LLVM_COMPILER_SUITABLE_FOR_OPTIMIZED_BUILDS = $(LLVM_COMPILER_SUITABLE_FOR_OPTIMIZED_BUILDS_$(LLVM_COMPILER_UNSUITABLE_FOR_OPTIMIZED_BUILDS)); -LLVM_COMPILER_SUITABLE_FOR_OPTIMIZED_BUILDS_ = YES; -LLVM_COMPILER_SUITABLE_FOR_OPTIMIZED_BUILDS_YES = NO; - - -// Use GCC 4.2 with Xcode 3.1, which includes GCC 4.2 but defaults to GCC 4.0. -// Note that Xcode versions as new as 3.1.2 use XCODE_VERSION_ACTUAL for the minor version -// number. Newer versions of Xcode use XCODE_VERSION_MINOR for the minor version, and -// XCODE_VERSION_ACTUAL for the full version number. -TARGET_GCC_VERSION = $(TARGET_GCC_VERSION_$(REAL_PLATFORM_NAME)); -TARGET_GCC_VERSION_iphoneos = LLVM_COMPILER; -TARGET_GCC_VERSION_iphonesimulator = $(TARGET_GCC_VERSION_iphoneos); -TARGET_GCC_VERSION_macosx = $(TARGET_GCC_VERSION_macosx_$(TARGET_MAC_OS_X_VERSION_MAJOR)); - -TARGET_GCC_VERSION_macosx_1050 = $(TARGET_GCC_VERSION_macosx_1050_$(XCODE_VERSION_MINOR)); -TARGET_GCC_VERSION_macosx_1050_ = $(TARGET_GCC_VERSION_macosx_1050_$(XCODE_VERSION_ACTUAL)); -TARGET_GCC_VERSION_macosx_1050_0310 = GCC_42; -TARGET_GCC_VERSION_macosx_1050_0320 = GCC_42; - -TARGET_GCC_VERSION_macosx_1060 = $(TARGET_GCC_VERSION_macosx_1060_AND_1070_$(CONFIGURATION)); -TARGET_GCC_VERSION_macosx_1070 = $(TARGET_GCC_VERSION_macosx_1060_AND_1070_$(CONFIGURATION)); -TARGET_GCC_VERSION_macosx_1060_AND_1070_Debug = $(TARGET_GCC_VERSION_macosx_USE_LLVM_COMPILER_$(LLVM_COMPILER_SUITABLE_FOR_DEBUG_BUILDS)); -TARGET_GCC_VERSION_macosx_1060_AND_1070_Release = $(TARGET_GCC_VERSION_macosx_USE_LLVM_COMPILER_$(LLVM_COMPILER_SUITABLE_FOR_OPTIMIZED_BUILDS)); -TARGET_GCC_VERSION_macosx_1060_AND_1070_Production = $(TARGET_GCC_VERSION_macosx_USE_LLVM_COMPILER_$(LLVM_COMPILER_SUITABLE_FOR_OPTIMIZED_BUILDS)); - -TARGET_GCC_VERSION_macosx_USE_LLVM_COMPILER_YES = LLVM_COMPILER; -TARGET_GCC_VERSION_macosx_USE_LLVM_COMPILER_NO = GCC_42; - -TARGET_GCC_VERSION_macosx_1080 = LLVM_COMPILER; -TARGET_GCC_VERSION_macosx_1090 = LLVM_COMPILER; - -GCC_VERSION = $(GCC_VERSION_$(TARGET_GCC_VERSION)); -GCC_VERSION_GCC_40 = 4.0; -GCC_VERSION_GCC_42 = 4.2; -GCC_VERSION_LLVM_GCC_42 = com.apple.compilers.llvmgcc42; -GCC_VERSION_LLVM_COMPILER = com.apple.compilers.llvm.clang.1_0; diff --git a/Source/JavaScriptCore/Configurations/DebugRelease.xcconfig b/Source/JavaScriptCore/Configurations/DebugRelease.xcconfig index 5fb574367..bf2c8d19a 100644 --- a/Source/JavaScriptCore/Configurations/DebugRelease.xcconfig +++ b/Source/JavaScriptCore/Configurations/DebugRelease.xcconfig @@ -23,24 +23,17 @@ #include "Base.xcconfig" -ARCHS = $(ARCHS_$(REAL_PLATFORM_NAME)); +ARCHS = $(ARCHS_$(PLATFORM_NAME)); ARCHS_iphoneos = $(ARCHS_UNIVERSAL_IPHONE_OS); ARCHS_iphonesimulator = $(NATIVE_ARCH); -ARCHS_macosx = $(ARCHS_macosx_$(TARGET_MAC_OS_X_VERSION_MAJOR)); -ARCHS_macosx_1050 = $(NATIVE_ARCH); -ARCHS_macosx_1060 = $(ARCHS_STANDARD_32_64_BIT); -ARCHS_macosx_1070 = $(ARCHS_STANDARD_32_64_BIT); -ARCHS_macosx_1080 = $(ARCHS_STANDARD_32_64_BIT); -ARCHS_macosx_1090 = $(ARCHS_STANDARD_32_64_BIT); +ARCHS_macosx = $(ARCHS_STANDARD_32_64_BIT); ONLY_ACTIVE_ARCH = YES; -MACOSX_DEPLOYMENT_TARGET = $(MACOSX_DEPLOYMENT_TARGET_$(REAL_PLATFORM_NAME)); +MACOSX_DEPLOYMENT_TARGET = $(MACOSX_DEPLOYMENT_TARGET_$(PLATFORM_NAME)); MACOSX_DEPLOYMENT_TARGET_iphoneos = 10.5; MACOSX_DEPLOYMENT_TARGET_iphonesimulator = 10.5; MACOSX_DEPLOYMENT_TARGET_macosx = $(MACOSX_DEPLOYMENT_TARGET_macosx_$(TARGET_MAC_OS_X_VERSION_MAJOR)); -MACOSX_DEPLOYMENT_TARGET_macosx_1050 = 10.5; -MACOSX_DEPLOYMENT_TARGET_macosx_1060 = 10.6; MACOSX_DEPLOYMENT_TARGET_macosx_1070 = 10.7; MACOSX_DEPLOYMENT_TARGET_macosx_1080 = 10.8; MACOSX_DEPLOYMENT_TARGET_macosx_1090 = 10.9; diff --git a/Source/JavaScriptCore/Configurations/FeatureDefines.xcconfig b/Source/JavaScriptCore/Configurations/FeatureDefines.xcconfig index 79a458eca..a4f8ca0c0 100644 --- a/Source/JavaScriptCore/Configurations/FeatureDefines.xcconfig +++ b/Source/JavaScriptCore/Configurations/FeatureDefines.xcconfig @@ -37,6 +37,9 @@ ENABLE_ANIMATION_API = ; ENABLE_BLOB = ENABLE_BLOB; ENABLE_CHANNEL_MESSAGING = ENABLE_CHANNEL_MESSAGING; ENABLE_CSP_NEXT = ; +ENABLE_CSS_BOX_DECORATION_BREAK = ENABLE_CSS_BOX_DECORATION_BREAK; +ENABLE_CSS_COMPOSITING = ENABLE_CSS_COMPOSITING; +ENABLE_CSS_DEVICE_ADAPTATION = ; ENABLE_CSS_EXCLUSIONS = ENABLE_CSS_EXCLUSIONS; ENABLE_CSS_FILTERS = ENABLE_CSS_FILTERS; ENABLE_CSS_HIERARCHIES = ; @@ -44,13 +47,12 @@ ENABLE_CSS_IMAGE_ORIENTATION = ; ENABLE_CSS_IMAGE_RESOLUTION = ; ENABLE_CSS_REGIONS = ENABLE_CSS_REGIONS; ENABLE_CSS_SHADERS = ENABLE_CSS_SHADERS; -ENABLE_CSS_COMPOSITING = ENABLE_CSS_COMPOSITING; ENABLE_CSS_STICKY_POSITION = ENABLE_CSS_STICKY_POSITION; ENABLE_CSS_VARIABLES = ; ENABLE_CSS3_CONDITIONAL_RULES = ; ENABLE_CSS3_TEXT = ; ENABLE_CUSTOM_SCHEME_HANDLER = ; -ENABLE_DASHBOARD_SUPPORT = $(ENABLE_DASHBOARD_SUPPORT_$(REAL_PLATFORM_NAME)); +ENABLE_DASHBOARD_SUPPORT = $(ENABLE_DASHBOARD_SUPPORT_$(PLATFORM_NAME)); ENABLE_DASHBOARD_SUPPORT_macosx = ENABLE_DASHBOARD_SUPPORT; ENABLE_DATALIST_ELEMENT = ; ENABLE_DATA_TRANSFER_ITEMS = ; @@ -59,47 +61,46 @@ ENABLE_DEVICE_ORIENTATION = ; ENABLE_DIALOG_ELEMENT = ; ENABLE_DIRECTORY_UPLOAD = ; ENABLE_DRAGGABLE_REGION = ; -ENABLE_ENCRYPTED_MEDIA = $(ENABLE_ENCRYPTED_MEDIA_$(REAL_PLATFORM_NAME)); +ENABLE_ENCRYPTED_MEDIA = $(ENABLE_ENCRYPTED_MEDIA_$(PLATFORM_NAME)); ENABLE_ENCRYPTED_MEDIA_macosx = $(ENABLE_ENCRYPTED_MEDIA_macosx_$(TARGET_MAC_OS_X_VERSION_MAJOR)); ENABLE_ENCRYPTED_MEDIA_macosx_1070 = ; ENABLE_ENCRYPTED_MEDIA_macosx_1080 = ; ENABLE_ENCRYPTED_MEDIA_macosx_1090 = ENABLE_ENCRYPTED_MEDIA; ENABLE_FILE_SYSTEM = ; -ENABLE_FILTERS = $(ENABLE_FILTERS_$(REAL_PLATFORM_NAME)); -ENABLE_FILTERS_macosx = ENABLE_FILTERS; +ENABLE_FILTERS = ENABLE_FILTERS; ENABLE_FULLSCREEN_API = ENABLE_FULLSCREEN_API; ENABLE_GAMEPAD = ; ENABLE_GEOLOCATION = ENABLE_GEOLOCATION; -ENABLE_HIDDEN_PAGE_DOM_TIMER_THROTTLING = $(HIDDEN_PAGE_DOM_TIMER_THROTTLING_$(REAL_PLATFORM_NAME)); -ENABLE_HIDDEN_PAGE_DOM_TIMER_THROTTLING_macosx = HIDDEN_PAGE_DOM_TIMER_THROTTLING; +ENABLE_HIDDEN_PAGE_DOM_TIMER_THROTTLING = $(ENABLE_HIDDEN_PAGE_DOM_TIMER_THROTTLING_$(PLATFORM_NAME)); +ENABLE_HIDDEN_PAGE_DOM_TIMER_THROTTLING_macosx = ENABLE_HIDDEN_PAGE_DOM_TIMER_THROTTLING; ENABLE_HIGH_DPI_CANVAS = ENABLE_HIGH_DPI_CANVAS; -ENABLE_ICONDATABASE = $(ENABLE_ICONDATABASE_$(REAL_PLATFORM_NAME)); +ENABLE_ICONDATABASE = $(ENABLE_ICONDATABASE_$(PLATFORM_NAME)); ENABLE_ICONDATABASE_macosx = ENABLE_ICONDATABASE; ENABLE_IFRAME_SEAMLESS = ENABLE_IFRAME_SEAMLESS; ENABLE_INDEXED_DATABASE = ; ENABLE_INPUT_SPEECH = ; ENABLE_INPUT_TYPE_COLOR = ; -ENABLE_INPUT_TYPE_DATE = $(ENABLE_INPUT_TYPE_DATE_$(REAL_PLATFORM_NAME)); +ENABLE_INPUT_TYPE_DATE = $(ENABLE_INPUT_TYPE_DATE_$(PLATFORM_NAME)); ENABLE_INPUT_TYPE_DATE_iphoneos = ENABLE_INPUT_TYPE_DATE; -ENABLE_INPUT_TYPE_DATE_iphonesimulator = ENABLE_INPUT_TYPE_DATE; -ENABLE_INPUT_TYPE_DATETIME = $(ENABLE_INPUT_TYPE_DATETIME_$(REAL_PLATFORM_NAME)); +ENABLE_INPUT_TYPE_DATE_iphonesimulator = $(ENABLE_INPUT_TYPE_DATE_iphoneos); +ENABLE_INPUT_TYPE_DATETIME = $(ENABLE_INPUT_TYPE_DATETIME_$(PLATFORM_NAME)); ENABLE_INPUT_TYPE_DATETIME_iphoneos = ENABLE_INPUT_TYPE_DATETIME; -ENABLE_INPUT_TYPE_DATETIME_iphonesimulator = ENABLE_INPUT_TYPE_DATETIME; -ENABLE_INPUT_TYPE_DATETIMELOCAL = $(ENABLE_INPUT_TYPE_DATETIMELOCAL_$(REAL_PLATFORM_NAME)); +ENABLE_INPUT_TYPE_DATETIME_iphonesimulator = $(ENABLE_INPUT_TYPE_DATETIME_iphoneos); +ENABLE_INPUT_TYPE_DATETIMELOCAL = $(ENABLE_INPUT_TYPE_DATETIMELOCAL_$(PLATFORM_NAME)); ENABLE_INPUT_TYPE_DATETIMELOCAL_iphoneos = ENABLE_INPUT_TYPE_DATETIMELOCAL; -ENABLE_INPUT_TYPE_DATETIMELOCAL_iphonesimulator = ENABLE_INPUT_TYPE_DATETIMELOCAL; -ENABLE_INPUT_TYPE_MONTH = $(ENABLE_INPUT_TYPE_MONTH_$(REAL_PLATFORM_NAME)); +ENABLE_INPUT_TYPE_DATETIMELOCAL_iphonesimulator = $(ENABLE_INPUT_TYPE_DATETIMELOCAL_iphoneos); +ENABLE_INPUT_TYPE_MONTH = $(ENABLE_INPUT_TYPE_MONTH_$(PLATFORM_NAME)); ENABLE_INPUT_TYPE_MONTH_iphoneos = ENABLE_INPUT_TYPE_MONTH; -ENABLE_INPUT_TYPE_MONTH_iphonesimulator = ENABLE_INPUT_TYPE_MONTH; -ENABLE_INPUT_TYPE_TIME = $(ENABLE_INPUT_TYPE_TIME_$(REAL_PLATFORM_NAME)); +ENABLE_INPUT_TYPE_MONTH_iphonesimulator = $(ENABLE_INPUT_TYPE_MONTH_iphoneos); +ENABLE_INPUT_TYPE_TIME = $(ENABLE_INPUT_TYPE_TIME_$(PLATFORM_NAME)); ENABLE_INPUT_TYPE_TIME_iphoneos = ENABLE_INPUT_TYPE_TIME; -ENABLE_INPUT_TYPE_TIME_iphonesimulator = ENABLE_INPUT_TYPE_TIME; -ENABLE_INPUT_TYPE_WEEK = $(ENABLE_INPUT_TYPE_WEEK_$(REAL_PLATFORM_NAME)); +ENABLE_INPUT_TYPE_TIME_iphonesimulator = $(ENABLE_INPUT_TYPE_TIME_iphoneos); +ENABLE_INPUT_TYPE_WEEK = $(ENABLE_INPUT_TYPE_WEEK_$(PLATFORM_NAME)); ENABLE_INPUT_TYPE_WEEK_iphoneos = ENABLE_INPUT_TYPE_WEEK; -ENABLE_INPUT_TYPE_WEEK_iphonesimulator = ENABLE_INPUT_TYPE_WEEK; +ENABLE_INPUT_TYPE_WEEK_iphonesimulator = $(ENABLE_INPUT_TYPE_WEEK_iphoneos); ENABLE_JAVASCRIPT_DEBUGGER = ENABLE_JAVASCRIPT_DEBUGGER; ENABLE_LEGACY_CSS_VENDOR_PREFIXES = ENABLE_LEGACY_CSS_VENDOR_PREFIXES; -ENABLE_LEGACY_NOTIFICATIONS = $(ENABLE_LEGACY_NOTIFICATIONS_$(REAL_PLATFORM_NAME)); +ENABLE_LEGACY_NOTIFICATIONS = $(ENABLE_LEGACY_NOTIFICATIONS_$(PLATFORM_NAME)); ENABLE_LEGACY_NOTIFICATIONS_macosx = $(ENABLE_LEGACY_NOTIFICATIONS_macosx_$(TARGET_MAC_OS_X_VERSION_MAJOR)); ENABLE_LEGACY_NOTIFICATIONS_macosx_1070 = ; ENABLE_LEGACY_NOTIFICATIONS_macosx_1080 = ENABLE_LEGACY_NOTIFICATIONS; @@ -116,36 +117,44 @@ ENABLE_MHTML = ; ENABLE_MICRODATA = ; ENABLE_MUTATION_OBSERVERS = ENABLE_MUTATION_OBSERVERS; ENABLE_NAVIGATOR_CONTENT_UTILS = ; -ENABLE_NOTIFICATIONS = $(ENABLE_NOTIFICATIONS_$(REAL_PLATFORM_NAME)); +ENABLE_NOTIFICATIONS = $(ENABLE_NOTIFICATIONS_$(PLATFORM_NAME)); ENABLE_NOTIFICATIONS_macosx = $(ENABLE_NOTIFICATIONS_macosx_$(TARGET_MAC_OS_X_VERSION_MAJOR)); ENABLE_NOTIFICATIONS_macosx_1070 = ; ENABLE_NOTIFICATIONS_macosx_1080 = ENABLE_NOTIFICATIONS; ENABLE_NOTIFICATIONS_macosx_1090 = ENABLE_NOTIFICATIONS; ENABLE_PAGE_VISIBILITY_API = ; +ENABLE_PDFKIT_PLUGIN = $(ENABLE_PDFKIT_PLUGIN_$(PLATFORM_NAME)); +ENABLE_PDFKIT_PLUGIN_macosx = $(ENABLE_PDFKIT_PLUGIN_macosx_$(TARGET_MAC_OS_X_VERSION_MAJOR)); +ENABLE_PDFKIT_PLUGIN_macosx_1070 = ; +ENABLE_PDFKIT_PLUGIN_macosx_1080 = ; +ENABLE_PDFKIT_PLUGIN_macosx_1090 = ENABLE_PDFKIT_PLUGIN; ENABLE_PROGRESS_ELEMENT = ENABLE_PROGRESS_ELEMENT; ENABLE_QUOTA = ; ENABLE_REQUEST_ANIMATION_FRAME = ENABLE_REQUEST_ANIMATION_FRAME; +ENABLE_RESOLUTION_MEDIA_QUERY = ; ENABLE_SCRIPTED_SPEECH = ; ENABLE_SHADOW_DOM = ; ENABLE_SHARED_WORKERS = ENABLE_SHARED_WORKERS; ENABLE_SQL_DATABASE = ENABLE_SQL_DATABASE; ENABLE_STYLE_SCOPED = ; +ENABLE_SUBPIXEL_LAYOUT = ENABLE_SUBPIXEL_LAYOUT; ENABLE_SVG = ENABLE_SVG; -ENABLE_SVG_DOM_OBJC_BINDINGS = $(ENABLE_SVG_DOM_OBJC_BINDINGS_$(REAL_PLATFORM_NAME)); +ENABLE_SVG_DOM_OBJC_BINDINGS = $(ENABLE_SVG_DOM_OBJC_BINDINGS_$(PLATFORM_NAME)); ENABLE_SVG_DOM_OBJC_BINDINGS_macosx = ENABLE_SVG_DOM_OBJC_BINDINGS; ENABLE_SVG_FONTS = ENABLE_SVG_FONTS; ENABLE_TEXT_AUTOSIZING = ; ENABLE_TEXT_NOTIFICATIONS_ONLY = ENABLE_TEXT_NOTIFICATIONS_ONLY; ENABLE_TOUCH_ICON_LOADING = ; -ENABLE_UNDO_MANAGER = ; +ENABLE_USERSELECT_ALL = ENABLE_USERSELECT_ALL; ENABLE_VIDEO = ENABLE_VIDEO; -ENABLE_VIDEO_TRACK = $(ENABLE_VIDEO_TRACK_$(REAL_PLATFORM_NAME)); +ENABLE_VIDEO_TRACK = $(ENABLE_VIDEO_TRACK_$(PLATFORM_NAME)); ENABLE_VIDEO_TRACK_macosx = ENABLE_VIDEO_TRACK; ENABLE_WEBGL = ENABLE_WEBGL; ENABLE_WEB_AUDIO = ENABLE_WEB_AUDIO; ENABLE_WEB_SOCKETS = ENABLE_WEB_SOCKETS; ENABLE_WEB_TIMING = ; ENABLE_WORKERS = ENABLE_WORKERS; +ENABLE_XHR_TIMEOUT = ENABLE_XHR_TIMEOUT; ENABLE_XSLT = ENABLE_XSLT; -FEATURE_DEFINES = $(ENABLE_3D_RENDERING) $(ENABLE_ACCELERATED_2D_CANVAS) $(ENABLE_ANIMATION_API) $(ENABLE_BLOB) $(ENABLE_CHANNEL_MESSAGING) $(ENABLE_CSP_NEXT) $(ENABLE_CSS_BOX_DECORATION_BREAK) $(ENABLE_CSS_EXCLUSIONS) $(ENABLE_CSS_FILTERS) $(ENABLE_CSS_HIERARCHIES) $(ENABLE_CSS_IMAGE_ORIENTATION) $(ENABLE_CSS_IMAGE_RESOLUTION) $(ENABLE_CSS_REGIONS) $(ENABLE_CSS_SHADERS) $(ENABLE_CSS_COMPOSITING) $(ENABLE_CSS_STICKY_POSITION) $(ENABLE_CSS_VARIABLES) $(ENABLE_CSS3_CONDITIONAL_RULES) $(ENABLE_CSS3_TEXT) $(ENABLE_CUSTOM_SCHEME_HANDLER) $(ENABLE_DASHBOARD_SUPPORT) $(ENABLE_DATALIST_ELEMENT) $(ENABLE_DATA_TRANSFER_ITEMS) $(ENABLE_DETAILS_ELEMENT) $(ENABLE_DEVICE_ORIENTATION) $(ENABLE_DIALOG_ELEMENT) $(ENABLE_DIRECTORY_UPLOAD) $(ENABLE_DRAGGABLE_REGION) $(ENABLE_ENCRYPTED_MEDIA) $(ENABLE_FILE_SYSTEM) $(ENABLE_FILTERS) $(ENABLE_FULLSCREEN_API) $(ENABLE_GAMEPAD) $(ENABLE_GEOLOCATION) $(ENABLE_HIDDEN_PAGE_DOM_TIMER_THROTTLING) $(ENABLE_HIGH_DPI_CANVAS) $(ENABLE_ICONDATABASE) $(ENABLE_IFRAME_SEAMLESS) $(ENABLE_INDEXED_DATABASE) $(ENABLE_INPUT_SPEECH) $(ENABLE_INPUT_TYPE_COLOR) $(ENABLE_INPUT_TYPE_DATE) $(ENABLE_INPUT_TYPE_DATETIME) $(ENABLE_INPUT_TYPE_DATETIMELOCAL) $(ENABLE_INPUT_TYPE_MONTH) $(ENABLE_INPUT_TYPE_TIME) $(ENABLE_INPUT_TYPE_WEEK) $(ENABLE_JAVASCRIPT_DEBUGGER) $(ENABLE_LEGACY_CSS_VENDOR_PREFIXES) $(ENABLE_LEGACY_NOTIFICATIONS) $(ENABLE_LEGACY_VENDOR_PREFIXES) $(ENABLE_LEGACY_WEB_AUDIO) $(ENABLE_LINK_PREFETCH) $(ENABLE_LINK_PRERENDER) $(ENABLE_MATHML) $(ENABLE_MEDIA_SOURCE) $(ENABLE_MEDIA_STATISTICS) $(ENABLE_METER_ELEMENT) $(ENABLE_MHTML) $(ENABLE_MICRODATA) $(ENABLE_MUTATION_OBSERVERS) $(ENABLE_NAVIGATOR_CONTENT_UTILS) $(ENABLE_NOTIFICATIONS) $(ENABLE_PAGE_VISIBILITY_API) $(ENABLE_PROGRESS_ELEMENT) $(ENABLE_QUOTA) $(ENABLE_REQUEST_ANIMATION_FRAME) $(ENABLE_SCRIPTED_SPEECH) $(ENABLE_SHADOW_DOM) $(ENABLE_SHARED_WORKERS) $(ENABLE_SQL_DATABASE) $(ENABLE_STYLE_SCOPED) $(ENABLE_SVG) $(ENABLE_SVG_DOM_OBJC_BINDINGS) $(ENABLE_SVG_FONTS) $(ENABLE_TEXT_AUTOSIZING) $(ENABLE_TEXT_NOTIFICATIONS_ONLY) $(ENABLE_TOUCH_ICON_LOADING) $(ENABLE_UNDO_MANAGER) $(ENABLE_VIDEO) $(ENABLE_VIDEO_TRACK) $(ENABLE_WEBGL) $(ENABLE_WEB_AUDIO) $(ENABLE_WEB_SOCKETS) $(ENABLE_WEB_TIMING) $(ENABLE_WORKERS) $(ENABLE_XSLT); +FEATURE_DEFINES = $(ENABLE_3D_RENDERING) $(ENABLE_ACCELERATED_2D_CANVAS) $(ENABLE_ANIMATION_API) $(ENABLE_BLOB) $(ENABLE_CHANNEL_MESSAGING) $(ENABLE_CSP_NEXT) $(ENABLE_CSS_BOX_DECORATION_BREAK) $(ENABLE_CSS_DEVICE_ADAPTATION) $(ENABLE_CSS_EXCLUSIONS) $(ENABLE_CSS_FILTERS) $(ENABLE_CSS_HIERARCHIES) $(ENABLE_CSS_IMAGE_ORIENTATION) $(ENABLE_CSS_IMAGE_RESOLUTION) $(ENABLE_CSS_REGIONS) $(ENABLE_CSS_SHADERS) $(ENABLE_CSS_COMPOSITING) $(ENABLE_CSS_STICKY_POSITION) $(ENABLE_CSS_VARIABLES) $(ENABLE_CSS3_CONDITIONAL_RULES) $(ENABLE_CSS3_TEXT) $(ENABLE_CUSTOM_SCHEME_HANDLER) $(ENABLE_DASHBOARD_SUPPORT) $(ENABLE_DATALIST_ELEMENT) $(ENABLE_DATA_TRANSFER_ITEMS) $(ENABLE_DETAILS_ELEMENT) $(ENABLE_DEVICE_ORIENTATION) $(ENABLE_DIALOG_ELEMENT) $(ENABLE_DIRECTORY_UPLOAD) $(ENABLE_DRAGGABLE_REGION) $(ENABLE_ENCRYPTED_MEDIA) $(ENABLE_FILE_SYSTEM) $(ENABLE_FILTERS) $(ENABLE_FULLSCREEN_API) $(ENABLE_GAMEPAD) $(ENABLE_GEOLOCATION) $(ENABLE_HIDDEN_PAGE_DOM_TIMER_THROTTLING) $(ENABLE_HIGH_DPI_CANVAS) $(ENABLE_ICONDATABASE) $(ENABLE_IFRAME_SEAMLESS) $(ENABLE_INDEXED_DATABASE) $(ENABLE_INPUT_SPEECH) $(ENABLE_INPUT_TYPE_COLOR) $(ENABLE_INPUT_TYPE_DATE) $(ENABLE_INPUT_TYPE_DATETIME) $(ENABLE_INPUT_TYPE_DATETIMELOCAL) $(ENABLE_INPUT_TYPE_MONTH) $(ENABLE_INPUT_TYPE_TIME) $(ENABLE_INPUT_TYPE_WEEK) $(ENABLE_JAVASCRIPT_DEBUGGER) $(ENABLE_LEGACY_CSS_VENDOR_PREFIXES) $(ENABLE_LEGACY_NOTIFICATIONS) $(ENABLE_LEGACY_VENDOR_PREFIXES) $(ENABLE_LEGACY_WEB_AUDIO) $(ENABLE_LINK_PREFETCH) $(ENABLE_LINK_PRERENDER) $(ENABLE_MATHML) $(ENABLE_MEDIA_SOURCE) $(ENABLE_MEDIA_STATISTICS) $(ENABLE_METER_ELEMENT) $(ENABLE_MHTML) $(ENABLE_MICRODATA) $(ENABLE_MUTATION_OBSERVERS) $(ENABLE_NAVIGATOR_CONTENT_UTILS) $(ENABLE_NOTIFICATIONS) $(ENABLE_PAGE_VISIBILITY_API) $(ENABLE_PDFKIT_PLUGIN) $(ENABLE_PROGRESS_ELEMENT) $(ENABLE_QUOTA) $(ENABLE_REQUEST_ANIMATION_FRAME) $(ENABLE_RESOLUTION_MEDIA_QUERY) $(ENABLE_SCRIPTED_SPEECH) $(ENABLE_SHADOW_DOM) $(ENABLE_SHARED_WORKERS) $(ENABLE_SQL_DATABASE) $(ENABLE_STYLE_SCOPED) $(ENABLE_SUBPIXEL_LAYOUT) $(ENABLE_SVG) $(ENABLE_SVG_DOM_OBJC_BINDINGS) $(ENABLE_SVG_FONTS) $(ENABLE_TEXT_AUTOSIZING) $(ENABLE_TEXT_NOTIFICATIONS_ONLY) $(ENABLE_TOUCH_ICON_LOADING) $(ENABLE_USERSELECT_ALL) $(ENABLE_VIDEO) $(ENABLE_VIDEO_TRACK) $(ENABLE_WEBGL) $(ENABLE_WEB_AUDIO) $(ENABLE_WEB_SOCKETS) $(ENABLE_WEB_TIMING) $(ENABLE_WORKERS) $(ENABLE_XHR_TIMEOUT) $(ENABLE_XSLT); diff --git a/Source/JavaScriptCore/Configurations/JSC.xcconfig b/Source/JavaScriptCore/Configurations/JSC.xcconfig index 8330d3f27..d596596e8 100644 --- a/Source/JavaScriptCore/Configurations/JSC.xcconfig +++ b/Source/JavaScriptCore/Configurations/JSC.xcconfig @@ -23,5 +23,5 @@ INSTALL_PATH = $(JAVASCRIPTCORE_FRAMEWORKS_DIR)/JavaScriptCore.framework/Versions/A/Resources PRODUCT_NAME = jsc; -CODE_SIGN_ENTITLEMENTS = $(CODE_SIGN_ENTITLEMENTS_$(REAL_PLATFORM_NAME)); +CODE_SIGN_ENTITLEMENTS = $(CODE_SIGN_ENTITLEMENTS_$(PLATFORM_NAME)); CODE_SIGN_ENTITLEMENTS_iphoneos = entitlements.plist; diff --git a/Source/JavaScriptCore/Configurations/JavaScriptCore.xcconfig b/Source/JavaScriptCore/Configurations/JavaScriptCore.xcconfig index 28ce60770..d5101d3b0 100644 --- a/Source/JavaScriptCore/Configurations/JavaScriptCore.xcconfig +++ b/Source/JavaScriptCore/Configurations/JavaScriptCore.xcconfig @@ -30,19 +30,15 @@ JSVALUE_MODEL_armv6 = 32_64; JSVALUE_MODEL_armv7 = 32_64; JSVALUE_MODEL_i386 = 32_64; JSVALUE_MODEL_ppc = 32_64; -JSVALUE_MODEL_ppc64 = 64; JSVALUE_MODEL_x86_64 = 64; // Prevent C++ standard library operator new, delete and their related exception types from being exported as weak symbols. OTHER_LDFLAGS_HIDE_SYMBOLS = -Wl,-unexported_symbol -Wl,__ZTISt9bad_alloc -Wl,-unexported_symbol -Wl,__ZTISt9exception -Wl,-unexported_symbol -Wl,__ZTSSt9bad_alloc -Wl,-unexported_symbol -Wl,__ZTSSt9exception -Wl,-unexported_symbol -Wl,__ZdlPvS_ -Wl,-unexported_symbol -Wl,__ZnwmPv -Wl,-all_load; OTHER_LDFLAGS_BASE = -lobjc -Wl,-Y,3 $(OTHER_LDFLAGS_HIDE_SYMBOLS); -OTHER_LDFLAGS = $(OTHER_LDFLAGS_$(REAL_PLATFORM_NAME)); +OTHER_LDFLAGS = $(OTHER_LDFLAGS_$(PLATFORM_NAME)); OTHER_LDFLAGS_iphoneos = $(OTHER_LDFLAGS_BASE); OTHER_LDFLAGS_iphonesimulator = $(OTHER_LDFLAGS_iphoneos); -OTHER_LDFLAGS_macosx = $(OTHER_LDFLAGS_BASE) -sub_library libobjc -framework CoreServices $(OTHER_LDFLAGS_macosx_$(TARGET_MAC_OS_X_VERSION_MAJOR)); -OTHER_LDFLAGS_macosx_1070 = -Xlinker -objc_gc_compaction; -OTHER_LDFLAGS_macosx_1080 = $(OTHER_LDFLAGS_macosx_1070); -OTHER_LDFLAGS_macosx_1090 = $(OTHER_LDFLAGS_macosx_1070); +OTHER_LDFLAGS_macosx = $(OTHER_LDFLAGS_BASE) -sub_library libobjc -framework CoreServices; GCC_PREFIX_HEADER = JavaScriptCorePrefix.h; GCC_SYMBOLS_PRIVATE_EXTERN = YES; HEADER_SEARCH_PATHS = "${BUILT_PRODUCTS_DIR}/DerivedSources/JavaScriptCore" $(HEADER_SEARCH_PATHS); diff --git a/Source/JavaScriptCore/Configurations/ToolExecutable.xcconfig b/Source/JavaScriptCore/Configurations/ToolExecutable.xcconfig index 65f9ad5ac..bd733d4a2 100644 --- a/Source/JavaScriptCore/Configurations/ToolExecutable.xcconfig +++ b/Source/JavaScriptCore/Configurations/ToolExecutable.xcconfig @@ -21,7 +21,7 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -INSTALL_PATH = $(INSTALL_PATH_$(REAL_PLATFORM_NAME)); +INSTALL_PATH = $(INSTALL_PATH_$(PLATFORM_NAME)); INSTALL_PATH_iphoneos = $(JAVASCRIPTCORE_FRAMEWORKS_DIR)/JavaScriptCore.framework/Resources; INSTALL_PATH_iphonesimulator = $(INSTALL_PATH_iphoneos); INSTALL_PATH_macosx = $(JAVASCRIPTCORE_FRAMEWORKS_DIR)/JavaScriptCore.framework/Versions/A/Resources; diff --git a/Source/JavaScriptCore/Configurations/Version.xcconfig b/Source/JavaScriptCore/Configurations/Version.xcconfig index f0f18d9e1..3d24fe85a 100644 --- a/Source/JavaScriptCore/Configurations/Version.xcconfig +++ b/Source/JavaScriptCore/Configurations/Version.xcconfig @@ -22,7 +22,7 @@ // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. MAJOR_VERSION = 537; -MINOR_VERSION = 16; +MINOR_VERSION = 18; TINY_VERSION = 0; FULL_VERSION = $(MAJOR_VERSION).$(MINOR_VERSION); @@ -32,8 +32,6 @@ SHORT_VERSION_STRING = $(SHORT_VERSION_STRING_$(CONFIGURATION)) // The system version prefix is based on the current system version. SYSTEM_VERSION_PREFIX = $(SYSTEM_VERSION_PREFIX_$(TARGET_MAC_OS_X_VERSION_MAJOR)); -SYSTEM_VERSION_PREFIX_1050 = 5; -SYSTEM_VERSION_PREFIX_1060 = 6; SYSTEM_VERSION_PREFIX_1070 = 7; SYSTEM_VERSION_PREFIX_1080 = 8; SYSTEM_VERSION_PREFIX_1090 = 9; diff --git a/Source/JavaScriptCore/DerivedSources.pri b/Source/JavaScriptCore/DerivedSources.pri index cf44aefe1..03a935575 100644 --- a/Source/JavaScriptCore/DerivedSources.pri +++ b/Source/JavaScriptCore/DerivedSources.pri @@ -40,6 +40,13 @@ LLINT_DEPENDENCY = \ $$PWD/llint/LowLevelInterpreter64.asm \ $$LLINT_ASSEMBLER +DISASSEMBLER_FILES = \ + disassembler/udis86/optable.xml + +DISASSEMBLER_DEPENDENCY = \ + $$PWD/disassembler/udis86/ud_opcode.py \ + $$PWD/disassembler/udis86/ud_optable.py + # GENERATOR 1-A: LUT creator lut.output = ${QMAKE_FILE_BASE}.lut.h lut.input = LUT_FILES @@ -88,7 +95,7 @@ klgen.input = KEYWORDLUT_FILES klgen.commands = python $$klgen.script ${QMAKE_FILE_NAME} > ${QMAKE_FILE_OUT} GENERATORS += klgen -EXTRACTOR_BINARY = LLIntOffsetsExtractor$$EXEEXT +EXTRACTOR_BINARY = LLIntOffsetsExtractor$$BIN_EXTENSION DIRS = $$OUT_PWD $$OUT_PWD/debug $$OUT_PWD/release for(dir, DIRS) { file = $$dir/$$EXTRACTOR_BINARY @@ -104,3 +111,14 @@ if(linux-*|win32):!equals(QT_ARCH, "arm") { llint.commands = ruby $$llint.script $$LLINT_ASSEMBLER ${QMAKE_FILE_IN} ${QMAKE_FILE_OUT} GENERATORS += llint } + +linux-*:if(isEqual(QT_ARCH, "i386")|isEqual(QT_ARCH, "x86_64")) { + # GENERATOR: disassembler + disassembler.output = udis86_itab.c + disassembler.input = DISASSEMBLER_FILES + disassembler.script = $$PWD/disassembler/udis86/itab.py + disassembler.depends = $$DISASSEMBLER_DEPENDENCY + disassembler.commands = python $$disassembler.script ${QMAKE_FILE_NAME} --outputDir ${QMAKE_FUNC_FILE_OUT_PATH} + disassembler.CONFIG += no_link + GENERATORS += disassembler +} diff --git a/Source/JavaScriptCore/GNUmakefile.list.am b/Source/JavaScriptCore/GNUmakefile.list.am index 243894d39..d68a22b9f 100644 --- a/Source/JavaScriptCore/GNUmakefile.list.am +++ b/Source/JavaScriptCore/GNUmakefile.list.am @@ -114,10 +114,6 @@ javascriptcore_sources += \ Source/JavaScriptCore/bytecode/LazyOperandValueProfile.cpp \ Source/JavaScriptCore/bytecode/LazyOperandValueProfile.h \ Source/JavaScriptCore/bytecode/LineInfo.h \ - Source/JavaScriptCore/bytecode/MethodCallLinkInfo.cpp \ - Source/JavaScriptCore/bytecode/MethodCallLinkInfo.h \ - Source/JavaScriptCore/bytecode/MethodCallLinkStatus.cpp \ - Source/JavaScriptCore/bytecode/MethodCallLinkStatus.h \ Source/JavaScriptCore/bytecode/MethodOfGettingAValueProfile.cpp \ Source/JavaScriptCore/bytecode/MethodOfGettingAValueProfile.h \ Source/JavaScriptCore/bytecode/Opcode.cpp \ @@ -142,6 +138,8 @@ javascriptcore_sources += \ Source/JavaScriptCore/bytecode/StructureStubInfo.h \ Source/JavaScriptCore/bytecode/StructureStubClearingWatchpoint.cpp \ Source/JavaScriptCore/bytecode/StructureStubClearingWatchpoint.h \ + Source/JavaScriptCore/bytecode/UnlinkedCodeBlock.cpp \ + Source/JavaScriptCore/bytecode/UnlinkedCodeBlock.h \ Source/JavaScriptCore/bytecode/ValueProfile.h \ Source/JavaScriptCore/bytecode/ValueRecovery.h \ Source/JavaScriptCore/bytecode/VirtualRegister.h \ @@ -377,6 +375,7 @@ javascriptcore_sources += \ Source/JavaScriptCore/interpreter/Interpreter.h \ Source/JavaScriptCore/interpreter/JSStack.cpp \ Source/JavaScriptCore/interpreter/JSStack.h \ + Source/JavaScriptCore/interpreter/JSStackInlines.h \ Source/JavaScriptCore/interpreter/Register.h \ Source/JavaScriptCore/interpreter/VMInspector.cpp \ Source/JavaScriptCore/interpreter/VMInspector.h \ @@ -447,6 +446,7 @@ javascriptcore_sources += \ Source/JavaScriptCore/parser/ParserTokens.h \ Source/JavaScriptCore/parser/Parser.cpp \ Source/JavaScriptCore/parser/Parser.h \ + Source/JavaScriptCore/parser/ParserModes.h \ Source/JavaScriptCore/parser/ResultType.h \ Source/JavaScriptCore/parser/SourceCode.h \ Source/JavaScriptCore/parser/SourceProvider.h \ @@ -487,6 +487,8 @@ javascriptcore_sources += \ Source/JavaScriptCore/runtime/CallData.cpp \ Source/JavaScriptCore/runtime/CallData.h \ Source/JavaScriptCore/runtime/ClassInfo.h \ + Source/JavaScriptCore/runtime/CodeCache.cpp \ + Source/JavaScriptCore/runtime/CodeCache.h \ Source/JavaScriptCore/runtime/CodeSpecializationKind.h \ Source/JavaScriptCore/runtime/CommonIdentifiers.cpp \ Source/JavaScriptCore/runtime/CommonIdentifiers.h \ diff --git a/Source/JavaScriptCore/JavaScriptCore.pri b/Source/JavaScriptCore/JavaScriptCore.pri index fad36c974..d465bcfea 100644 --- a/Source/JavaScriptCore/JavaScriptCore.pri +++ b/Source/JavaScriptCore/JavaScriptCore.pri @@ -38,3 +38,7 @@ wince* { INCLUDEPATH += $$QT.core.sources/../3rdparty/ce-compat INCLUDEPATH += $$SOURCE_DIR/os-win32 } + +linux-*:if(isEqual(QT_ARCH, "i386")|isEqual(QT_ARCH, "x86_64")) { + INCLUDEPATH += $$SOURCE_DIR/disassembler/udis86 +} diff --git a/Source/JavaScriptCore/JavaScriptCore.vcproj/JavaScriptCore/JavaScriptCore.vcproj b/Source/JavaScriptCore/JavaScriptCore.vcproj/JavaScriptCore/JavaScriptCore.vcproj index a21dcf1e8..b1567e2cd 100644 --- a/Source/JavaScriptCore/JavaScriptCore.vcproj/JavaScriptCore/JavaScriptCore.vcproj +++ b/Source/JavaScriptCore/JavaScriptCore.vcproj/JavaScriptCore/JavaScriptCore.vcproj @@ -638,6 +638,14 @@ >
</File>
<File
+ RelativePath="..\..\runtime\CodeCache.cpp"
+ >
+ </File>
+ <File
+ RelativePath="..\..\runtime\CodeCache.h"
+ >
+ </File>
+ <File
RelativePath="..\..\runtime\CommonIdentifiers.cpp"
>
</File>
@@ -1678,22 +1686,6 @@ >
</File>
<File
- RelativePath="..\..\bytecode\MethodCallLinkInfo.cpp"
- >
- </File>
- <File
- RelativePath="..\..\bytecode\MethodCallLinkInfo.h"
- >
- </File>
- <File
- RelativePath="..\..\bytecode\MethodCallLinkStatus.cpp"
- >
- </File>
- <File
- RelativePath="..\..\bytecode\MethodCallLinkStatus.h"
- >
- </File>
- <File
RelativePath="..\..\bytecode\MethodOfGettingAValueProfile.cpp"
>
</File>
@@ -1762,6 +1754,14 @@ >
</File>
<File
+ RelativePath="..\..\bytecode\UnlinkedCodeBlock.cpp"
+ >
+ </File>
+ <File
+ RelativePath="..\..\bytecode\UnlinkedCodeBlock.h"
+ >
+ </File>
+ <File
RelativePath="..\..\bytecode\ValueProfile.h"
>
</File>
@@ -2246,6 +2246,10 @@ >
</File>
<File
+ RelativePath="..\..\parser\ParserModes.h"
+ >
+ </File>
+ <File
RelativePath="..\..\parser\ParserArena.cpp"
>
</File>
diff --git a/Source/JavaScriptCore/JavaScriptCore.xcodeproj/project.pbxproj b/Source/JavaScriptCore/JavaScriptCore.xcodeproj/project.pbxproj index 1cf109a0b..3cada1cd7 100644 --- a/Source/JavaScriptCore/JavaScriptCore.xcodeproj/project.pbxproj +++ b/Source/JavaScriptCore/JavaScriptCore.xcodeproj/project.pbxproj @@ -69,8 +69,6 @@ 0F0B83AD14BCF60400885B4F /* LineInfo.h in Headers */ = {isa = PBXBuildFile; fileRef = 0F0B83AC14BCF60200885B4F /* LineInfo.h */; settings = {ATTRIBUTES = (Private, ); }; }; 0F0B83B014BCF71600885B4F /* CallLinkInfo.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 0F0B83AE14BCF71400885B4F /* CallLinkInfo.cpp */; }; 0F0B83B114BCF71800885B4F /* CallLinkInfo.h in Headers */ = {isa = PBXBuildFile; fileRef = 0F0B83AF14BCF71400885B4F /* CallLinkInfo.h */; settings = {ATTRIBUTES = (Private, ); }; }; - 0F0B83B414BCF86000885B4F /* MethodCallLinkInfo.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 0F0B83B214BCF85E00885B4F /* MethodCallLinkInfo.cpp */; }; - 0F0B83B514BCF86200885B4F /* MethodCallLinkInfo.h in Headers */ = {isa = PBXBuildFile; fileRef = 0F0B83B314BCF85E00885B4F /* MethodCallLinkInfo.h */; settings = {ATTRIBUTES = (Private, ); }; }; 0F0B83B914BCF95F00885B4F /* CallReturnOffsetToBytecodeOffset.h in Headers */ = {isa = PBXBuildFile; fileRef = 0F0B83B814BCF95B00885B4F /* CallReturnOffsetToBytecodeOffset.h */; settings = {ATTRIBUTES = (Private, ); }; }; 0F0CD4C215F1A6070032F1C0 /* PutDirectIndexMode.h in Headers */ = {isa = PBXBuildFile; fileRef = 0F0CD4C015F1A6040032F1C0 /* PutDirectIndexMode.h */; settings = {ATTRIBUTES = (Private, ); }; }; 0F0CD4C415F6B6BB0032F1C0 /* SparseArrayValueMap.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 0F0CD4C315F6B6B50032F1C0 /* SparseArrayValueMap.cpp */; }; @@ -178,8 +176,6 @@ 0F93329E14CA7DC50085F3C6 /* CallLinkStatus.h in Headers */ = {isa = PBXBuildFile; fileRef = 0F93329414CA7DC10085F3C6 /* CallLinkStatus.h */; settings = {ATTRIBUTES = (Private, ); }; }; 0F93329F14CA7DCA0085F3C6 /* GetByIdStatus.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 0F93329514CA7DC10085F3C6 /* GetByIdStatus.cpp */; }; 0F9332A014CA7DCD0085F3C6 /* GetByIdStatus.h in Headers */ = {isa = PBXBuildFile; fileRef = 0F93329614CA7DC10085F3C6 /* GetByIdStatus.h */; settings = {ATTRIBUTES = (Private, ); }; }; - 0F9332A114CA7DD10085F3C6 /* MethodCallLinkStatus.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 0F93329714CA7DC10085F3C6 /* MethodCallLinkStatus.cpp */; }; - 0F9332A214CA7DD30085F3C6 /* MethodCallLinkStatus.h in Headers */ = {isa = PBXBuildFile; fileRef = 0F93329814CA7DC10085F3C6 /* MethodCallLinkStatus.h */; settings = {ATTRIBUTES = (Private, ); }; }; 0F9332A314CA7DD70085F3C6 /* PutByIdStatus.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 0F93329914CA7DC10085F3C6 /* PutByIdStatus.cpp */; }; 0F9332A414CA7DD90085F3C6 /* PutByIdStatus.h in Headers */ = {isa = PBXBuildFile; fileRef = 0F93329A14CA7DC10085F3C6 /* PutByIdStatus.h */; settings = {ATTRIBUTES = (Private, ); }; }; 0F9332A514CA7DDD0085F3C6 /* StructureSet.h in Headers */ = {isa = PBXBuildFile; fileRef = 0F93329B14CA7DC10085F3C6 /* StructureSet.h */; settings = {ATTRIBUTES = (Private, ); }; }; @@ -584,12 +580,17 @@ A75706DE118A2BCF0057F88F /* JITArithmetic32_64.cpp in Sources */ = {isa = PBXBuildFile; fileRef = A75706DD118A2BCF0057F88F /* JITArithmetic32_64.cpp */; }; A766B44F0EE8DCD1009518CA /* ExecutableAllocator.h in Headers */ = {isa = PBXBuildFile; fileRef = A7B48DB50EE74CFC00DCBDB6 /* ExecutableAllocator.h */; settings = {ATTRIBUTES = (Private, ); }; }; A76C51761182748D00715B05 /* JSInterfaceJIT.h in Headers */ = {isa = PBXBuildFile; fileRef = A76C51741182748D00715B05 /* JSInterfaceJIT.h */; settings = {ATTRIBUTES = (Private, ); }; }; + A76F279415F13C9600517D67 /* UnlinkedCodeBlock.cpp in Sources */ = {isa = PBXBuildFile; fileRef = A79E781E15EECBA80047C855 /* UnlinkedCodeBlock.cpp */; }; A76F54A313B28AAB00EF2BCE /* JITWriteBarrier.h in Headers */ = {isa = PBXBuildFile; fileRef = A76F54A213B28AAB00EF2BCE /* JITWriteBarrier.h */; }; + A77F1821164088B200640A47 /* CodeCache.cpp in Sources */ = {isa = PBXBuildFile; fileRef = A77F181F164088B200640A47 /* CodeCache.cpp */; }; + A77F1822164088B200640A47 /* CodeCache.h in Headers */ = {isa = PBXBuildFile; fileRef = A77F1820164088B200640A47 /* CodeCache.h */; settings = {ATTRIBUTES = (Private, ); }; }; + A77F1825164192C700640A47 /* ParserModes.h in Headers */ = {isa = PBXBuildFile; fileRef = A77F18241641925400640A47 /* ParserModes.h */; settings = {ATTRIBUTES = (Private, ); }; }; A784A26111D16622005776AC /* ASTBuilder.h in Headers */ = {isa = PBXBuildFile; fileRef = A7A7EE7411B98B8D0065A14F /* ASTBuilder.h */; }; A784A26411D16622005776AC /* SyntaxChecker.h in Headers */ = {isa = PBXBuildFile; fileRef = A7A7EE7711B98B8D0065A14F /* SyntaxChecker.h */; }; A7AFC17915F7EFE30048F57B /* ResolveOperation.h in Headers */ = {isa = PBXBuildFile; fileRef = A7AFC17715F7EFE30048F57B /* ResolveOperation.h */; settings = {ATTRIBUTES = (Private, ); }; }; A7B48F490EE8936F00DCBDB6 /* ExecutableAllocator.cpp in Sources */ = {isa = PBXBuildFile; fileRef = A7B48DB60EE74CFC00DCBDB6 /* ExecutableAllocator.cpp */; }; A7B4ACAF1484C9CE00B38A36 /* JSExportMacros.h in Headers */ = {isa = PBXBuildFile; fileRef = A7B4ACAE1484C9CE00B38A36 /* JSExportMacros.h */; settings = {ATTRIBUTES = (Private, ); }; }; + A7B601821639FD2A00372BA3 /* UnlinkedCodeBlock.h in Headers */ = {isa = PBXBuildFile; fileRef = A79E781F15EECBA80047C855 /* UnlinkedCodeBlock.h */; settings = {ATTRIBUTES = (Private, ); }; }; A7C1E8E4112E72EF00A37F98 /* JITPropertyAccess32_64.cpp in Sources */ = {isa = PBXBuildFile; fileRef = A7C1E8C8112E701C00A37F98 /* JITPropertyAccess32_64.cpp */; }; A7DCB97312E5193F00911940 /* WriteBarrier.h in Headers */ = {isa = PBXBuildFile; fileRef = A7DCB77912E3D90500911940 /* WriteBarrier.h */; settings = {ATTRIBUTES = (Private, ); }; }; A7E2EA6B0FB460CF00601F06 /* LiteralParser.h in Headers */ = {isa = PBXBuildFile; fileRef = A7E2EA690FB460CF00601F06 /* LiteralParser.h */; }; @@ -746,7 +747,7 @@ FE20CE9D15F04A9500DF3430 /* LLIntCLoop.cpp in Sources */ = {isa = PBXBuildFile; fileRef = FE20CE9B15F04A9500DF3430 /* LLIntCLoop.cpp */; }; FE20CE9E15F04A9500DF3430 /* LLIntCLoop.h in Headers */ = {isa = PBXBuildFile; fileRef = FE20CE9C15F04A9500DF3430 /* LLIntCLoop.h */; settings = {ATTRIBUTES = (Private, ); }; }; FE4A331F15BD2E07006F54F3 /* VMInspector.cpp in Sources */ = {isa = PBXBuildFile; fileRef = FE4A331D15BD2E07006F54F3 /* VMInspector.cpp */; }; - FE4A332015BD2E07006F54F3 /* VMInspector.h in Headers */ = {isa = PBXBuildFile; fileRef = FE4A331E15BD2E07006F54F3 /* VMInspector.h */; }; + FE4A332015BD2E07006F54F3 /* VMInspector.h in Headers */ = {isa = PBXBuildFile; fileRef = FE4A331E15BD2E07006F54F3 /* VMInspector.h */; settings = {ATTRIBUTES = (Private, ); }; }; FED287B215EC9A5700DA8161 /* LLIntOpcode.h in Headers */ = {isa = PBXBuildFile; fileRef = FED287B115EC9A5700DA8161 /* LLIntOpcode.h */; settings = {ATTRIBUTES = (Private, ); }; }; /* End PBXBuildFile section */ @@ -853,8 +854,6 @@ 0F0B83AC14BCF60200885B4F /* LineInfo.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = LineInfo.h; sourceTree = "<group>"; }; 0F0B83AE14BCF71400885B4F /* CallLinkInfo.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CallLinkInfo.cpp; sourceTree = "<group>"; }; 0F0B83AF14BCF71400885B4F /* CallLinkInfo.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = CallLinkInfo.h; sourceTree = "<group>"; }; - 0F0B83B214BCF85E00885B4F /* MethodCallLinkInfo.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = MethodCallLinkInfo.cpp; sourceTree = "<group>"; }; - 0F0B83B314BCF85E00885B4F /* MethodCallLinkInfo.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = MethodCallLinkInfo.h; sourceTree = "<group>"; }; 0F0B83B814BCF95B00885B4F /* CallReturnOffsetToBytecodeOffset.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = CallReturnOffsetToBytecodeOffset.h; sourceTree = "<group>"; }; 0F0CD4C015F1A6040032F1C0 /* PutDirectIndexMode.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = PutDirectIndexMode.h; sourceTree = "<group>"; }; 0F0CD4C315F6B6B50032F1C0 /* SparseArrayValueMap.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = SparseArrayValueMap.cpp; sourceTree = "<group>"; }; @@ -963,8 +962,6 @@ 0F93329414CA7DC10085F3C6 /* CallLinkStatus.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = CallLinkStatus.h; sourceTree = "<group>"; }; 0F93329514CA7DC10085F3C6 /* GetByIdStatus.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = GetByIdStatus.cpp; sourceTree = "<group>"; }; 0F93329614CA7DC10085F3C6 /* GetByIdStatus.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = GetByIdStatus.h; sourceTree = "<group>"; }; - 0F93329714CA7DC10085F3C6 /* MethodCallLinkStatus.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = MethodCallLinkStatus.cpp; sourceTree = "<group>"; }; - 0F93329814CA7DC10085F3C6 /* MethodCallLinkStatus.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = MethodCallLinkStatus.h; sourceTree = "<group>"; }; 0F93329914CA7DC10085F3C6 /* PutByIdStatus.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = PutByIdStatus.cpp; sourceTree = "<group>"; }; 0F93329A14CA7DC10085F3C6 /* PutByIdStatus.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = PutByIdStatus.h; sourceTree = "<group>"; }; 0F93329B14CA7DC10085F3C6 /* StructureSet.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = StructureSet.h; sourceTree = "<group>"; }; @@ -1386,6 +1383,11 @@ A767FF9F14F4502900789059 /* JSCTypedArrayStubs.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = JSCTypedArrayStubs.h; sourceTree = "<group>"; }; A76C51741182748D00715B05 /* JSInterfaceJIT.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = JSInterfaceJIT.h; sourceTree = "<group>"; }; A76F54A213B28AAB00EF2BCE /* JITWriteBarrier.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = JITWriteBarrier.h; sourceTree = "<group>"; }; + A77F181F164088B200640A47 /* CodeCache.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CodeCache.cpp; sourceTree = "<group>"; }; + A77F1820164088B200640A47 /* CodeCache.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = CodeCache.h; sourceTree = "<group>"; }; + A77F18241641925400640A47 /* ParserModes.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = ParserModes.h; sourceTree = "<group>"; }; + A79E781E15EECBA80047C855 /* UnlinkedCodeBlock.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = UnlinkedCodeBlock.cpp; sourceTree = "<group>"; }; + A79E781F15EECBA80047C855 /* UnlinkedCodeBlock.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = UnlinkedCodeBlock.h; sourceTree = "<group>"; }; A79EDB0811531CD60019E912 /* JSObjectRefPrivate.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = JSObjectRefPrivate.h; sourceTree = "<group>"; }; A7A7EE7411B98B8D0065A14F /* ASTBuilder.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ASTBuilder.h; sourceTree = "<group>"; }; A7A7EE7711B98B8D0065A14F /* SyntaxChecker.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = SyntaxChecker.h; sourceTree = "<group>"; }; @@ -1408,7 +1410,6 @@ A8A4748D151A8306004123FF /* libWTF.a */ = {isa = PBXFileReference; lastKnownFileType = archive.ar; path = libWTF.a; sourceTree = BUILT_PRODUCTS_DIR; }; A8E894310CD0602400367179 /* JSCallbackObjectFunctions.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = JSCallbackObjectFunctions.h; sourceTree = "<group>"; }; A8E894330CD0603F00367179 /* JSGlobalObject.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = JSGlobalObject.h; sourceTree = "<group>"; }; - BC021BF1136900C300FC5467 /* CompilerVersion.xcconfig */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.xcconfig; path = CompilerVersion.xcconfig; sourceTree = "<group>"; }; BC021BF2136900C300FC5467 /* ToolExecutable.xcconfig */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.xcconfig; path = ToolExecutable.xcconfig; sourceTree = "<group>"; }; BC02E9040E1839DB000F9297 /* ErrorConstructor.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ErrorConstructor.cpp; sourceTree = "<group>"; }; BC02E9050E1839DB000F9297 /* ErrorConstructor.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ErrorConstructor.h; sourceTree = "<group>"; }; @@ -2001,7 +2002,6 @@ isa = PBXGroup; children = ( 1C9051450BA9E8A70081E9D0 /* Base.xcconfig */, - BC021BF1136900C300FC5467 /* CompilerVersion.xcconfig */, 1C9051440BA9E8A70081E9D0 /* DebugRelease.xcconfig */, 449097EE0F8F81B50076A327 /* FeatureDefines.xcconfig */, 5DAFD6CB146B686300FBEFB4 /* JSC.xcconfig */, @@ -2073,6 +2073,7 @@ E49DC15112EF272200184A1F /* SourceProviderCache.h */, E49DC14912EF261A00184A1F /* SourceProviderCacheItem.h */, A7A7EE7711B98B8D0065A14F /* SyntaxChecker.h */, + A77F18241641925400640A47 /* ParserModes.h */, ); path = parser; sourceTree = "<group>"; @@ -2307,6 +2308,8 @@ 1420BE7A10AA6DDB00F455D2 /* WeakRandom.h */, A7DCB77912E3D90500911940 /* WriteBarrier.h */, C2D58C3315912FEE0021A844 /* GCActivityCallback.cpp */, + A77F181F164088B200640A47 /* CodeCache.cpp */, + A77F1820164088B200640A47 /* CodeCache.h */, ); path = runtime; sourceTree = "<group>"; @@ -2520,14 +2523,6 @@ 0F0B83AF14BCF71400885B4F /* CallLinkInfo.h */, 0F93329314CA7DC10085F3C6 /* CallLinkStatus.cpp */, 0F93329414CA7DC10085F3C6 /* CallLinkStatus.h */, - 0F93329514CA7DC10085F3C6 /* GetByIdStatus.cpp */, - 0F93329614CA7DC10085F3C6 /* GetByIdStatus.h */, - 0F93329714CA7DC10085F3C6 /* MethodCallLinkStatus.cpp */, - 0F93329814CA7DC10085F3C6 /* MethodCallLinkStatus.h */, - 0F93329914CA7DC10085F3C6 /* PutByIdStatus.cpp */, - 0F93329A14CA7DC10085F3C6 /* PutByIdStatus.h */, - A7AFC17715F7EFE30048F57B /* ResolveOperation.h */, - 0F93329B14CA7DC10085F3C6 /* StructureSet.h */, 0F0B83B814BCF95B00885B4F /* CallReturnOffsetToBytecodeOffset.h */, 969A07900ED1D3AE00F1F681 /* CodeBlock.cpp */, 969A07910ED1D3AE00F1F681 /* CodeBlock.h */, @@ -2551,10 +2546,6 @@ 0FB5467614F59AD1002C2989 /* LazyOperandValueProfile.h */, 0F0B83AC14BCF60200885B4F /* LineInfo.h */, 0F0FC45814BD15F100B81154 /* LLIntCallLinkInfo.h */, - 0F0B83B214BCF85E00885B4F /* MethodCallLinkInfo.cpp */, - 0F0B83B314BCF85E00885B4F /* MethodCallLinkInfo.h */, - 0F93329714CA7DC10085F3C6 /* MethodCallLinkStatus.cpp */, - 0F93329814CA7DC10085F3C6 /* MethodCallLinkStatus.h */, 0FB5467C14F5CFD3002C2989 /* MethodOfGettingAValueProfile.cpp */, 0FB5467A14F5C7D4002C2989 /* MethodOfGettingAValueProfile.h */, 969A07940ED1D3AE00F1F681 /* Opcode.cpp */, @@ -2567,6 +2558,7 @@ 0F9FC8C114E1B5FB00D52AE0 /* PutKind.h */, 0FF4276E159275D2004CB9FF /* ResolveGlobalStatus.cpp */, 0FF4276F159275D2004CB9FF /* ResolveGlobalStatus.h */, + A7AFC17715F7EFE30048F57B /* ResolveOperation.h */, 1429D8830ED21C3D00B89619 /* SamplingTool.cpp */, 1429D8840ED21C3D00B89619 /* SamplingTool.h */, 0F5541AF1613C1FB00CE3E25 /* SpecialPointer.cpp */, @@ -2574,6 +2566,7 @@ 0FD82E84141F3FDA00179C94 /* SpeculatedType.cpp */, 0FD82E4F141DAEA100179C94 /* SpeculatedType.h */, 0F93329B14CA7DC10085F3C6 /* StructureSet.h */, + 0F93329514CA7DC10085F3C6 /* GetByIdStatus.cpp */, 0F766D3615AE4A1A008F363E /* StructureStubClearingWatchpoint.cpp */, 0F766D3715AE4A1A008F363E /* StructureStubClearingWatchpoint.h */, BCCF0D0B0EF0B8A500413C8F /* StructureStubInfo.cpp */, @@ -2583,6 +2576,12 @@ 0F426A461460CBAB00131F8F /* VirtualRegister.h */, 0F919D2215853CDE004A4E7D /* Watchpoint.cpp */, 0F919D2315853CDE004A4E7D /* Watchpoint.h */, + 0F93329614CA7DC10085F3C6 /* GetByIdStatus.h */, + 0F93329914CA7DC10085F3C6 /* PutByIdStatus.cpp */, + 0F93329A14CA7DC10085F3C6 /* PutByIdStatus.h */, + 0F93329B14CA7DC10085F3C6 /* StructureSet.h */, + A79E781E15EECBA80047C855 /* UnlinkedCodeBlock.cpp */, + A79E781F15EECBA80047C855 /* UnlinkedCodeBlock.h */, ); path = bytecode; sourceTree = "<group>"; @@ -2615,6 +2614,7 @@ 86ADD1450FDDEA980006EEC2 /* ARMv7Assembler.h in Headers */, C2EAD2FC14F0249800A4B159 /* CopiedAllocator.h in Headers */, C2B916C214DA014E00CBAC86 /* MarkedAllocator.h in Headers */, + FE4A332015BD2E07006F54F3 /* VMInspector.h in Headers */, C2239D1816262BDD005AC5FD /* CopyVisitor.h in Headers */, C2239D1916262BDD005AC5FD /* CopyVisitorInlineMethods.h in Headers */, C24D31E3161CD695002AA4DB /* HeapStatistics.h in Headers */, @@ -2900,7 +2900,6 @@ 0F0B83AB14BCF5BB00885B4F /* ExpressionRangeInfo.h in Headers */, 0F0B83AD14BCF60400885B4F /* LineInfo.h in Headers */, 0F0B83B114BCF71800885B4F /* CallLinkInfo.h in Headers */, - 0F0B83B514BCF86200885B4F /* MethodCallLinkInfo.h in Headers */, 0F0B83B914BCF95F00885B4F /* CallReturnOffsetToBytecodeOffset.h in Headers */, 0F0FC45A14BD15F500B81154 /* LLIntCallLinkInfo.h in Headers */, 0F21C26814BE5F6800ADC64B /* JITDriver.h in Headers */, @@ -2914,7 +2913,6 @@ 0F7B294D14C3CD4C007C3DB1 /* DFGCommon.h in Headers */, 0F93329E14CA7DC50085F3C6 /* CallLinkStatus.h in Headers */, 0F9332A014CA7DCD0085F3C6 /* GetByIdStatus.h in Headers */, - 0F9332A214CA7DD30085F3C6 /* MethodCallLinkStatus.h in Headers */, 0F9332A414CA7DD90085F3C6 /* PutByIdStatus.h in Headers */, 0F9332A514CA7DDD0085F3C6 /* StructureSet.h in Headers */, 0F55F0F514D1063C00AC7649 /* AbstractPC.h in Headers */, @@ -2984,7 +2982,6 @@ 0F766D3915AE4A1F008F363E /* StructureStubClearingWatchpoint.h in Headers */, 0F766D4415B2A3C0008F363E /* DFGRegisterSet.h in Headers */, 0F766D4615B3701F008F363E /* DFGScratchRegisterAllocator.h in Headers */, - FE4A332015BD2E07006F54F3 /* VMInspector.h in Headers */, 0F63943F15C75F19006A597C /* DFGStructureCheckHoistingPhase.h in Headers */, 0F63945515D07057006A597C /* ArrayProfile.h in Headers */, 0F63948515E4811B006A597C /* DFGArrayMode.h in Headers */, @@ -3009,8 +3006,11 @@ 862553D216136E1A009F17D0 /* JSProxy.h in Headers */, 0F5541B21613C1FB00CE3E25 /* SpecialPointer.h in Headers */, 0FEB3ECD16237F4D00AB67AD /* TypedArrayDescriptor.h in Headers */, - 0F256C361627B0AD007F2783 /* DFGCallArrayAllocatorSlowPathGenerator.h in Headers */, + 0F256C361627B0AD007F2783 /* DFGCallArrayAllocatorSlowPathGenerator.h in Headers */, C2239D1B16262BDD005AC5FD /* GCThread.h in Headers */, + A7B601821639FD2A00372BA3 /* UnlinkedCodeBlock.h in Headers */, + A77F1822164088B200640A47 /* CodeCache.h in Headers */, + A77F1825164192C700640A47 /* ParserModes.h in Headers */, ); runOnlyForDeploymentPostprocessing = 0; }; @@ -3096,8 +3096,6 @@ 932F5B3F0822A1C700736975 /* Headers */, 932F5B910822A1C700736975 /* Sources */, 932F5BD20822A1C700736975 /* Frameworks */, - 9319586B09D9F91A00A56FD4 /* Check For Global Initializers */, - 933457200EBFDC3F00B80894 /* Check For Exit Time Destructors */, 5D29D8BE0E9860B400C3D2D0 /* Check For Weak VTables and Externals */, 3713F014142905240036387F /* Check For Inappropriate Objective-C Class Names */, ); @@ -3292,36 +3290,6 @@ shellPath = /bin/sh; shellScript = "mkdir -p \"${BUILT_PRODUCTS_DIR}/DerivedSources/JavaScriptCore/docs\"\ncd \"${BUILT_PRODUCTS_DIR}/DerivedSources/JavaScriptCore\"\n\n/bin/ln -sfh \"${SRCROOT}\" JavaScriptCore\nexport JavaScriptCore=\"JavaScriptCore\"\nexport BUILT_PRODUCTS_DIR=\"../..\"\n\nmake --no-builtin-rules -f \"JavaScriptCore/DerivedSources.make\" -j `/usr/sbin/sysctl -n hw.ncpu` || exit 1\n\n/usr/bin/env ruby JavaScriptCore/offlineasm/asm.rb JavaScriptCore/llint/LowLevelInterpreter.asm ${BUILT_PRODUCTS_DIR}/JSCLLIntOffsetsExtractor LLIntAssembly.h || exit 1\n"; }; - 9319586B09D9F91A00A56FD4 /* Check For Global Initializers */ = { - isa = PBXShellScriptBuildPhase; - buildActionMask = 2147483647; - files = ( - ); - inputPaths = ( - "$(TARGET_BUILD_DIR)/$(EXECUTABLE_PATH)", - ); - name = "Check For Global Initializers"; - outputPaths = ( - ); - runOnlyForDeploymentPostprocessing = 0; - shellPath = /bin/sh; - shellScript = "if [ \"${TARGET_GCC_VERSION}\" = \"LLVM_COMPILER\" ]; then\n exit 0;\nfi\n\nif [ -f ../../Tools/Scripts/check-for-global-initializers ]; then\n ../../Tools/Scripts/check-for-global-initializers || exit $?\nfi"; - }; - 933457200EBFDC3F00B80894 /* Check For Exit Time Destructors */ = { - isa = PBXShellScriptBuildPhase; - buildActionMask = 2147483647; - files = ( - ); - inputPaths = ( - "$(TARGET_BUILD_DIR)/$(EXECUTABLE_PATH)", - ); - name = "Check For Exit Time Destructors"; - outputPaths = ( - ); - runOnlyForDeploymentPostprocessing = 0; - shellPath = /bin/sh; - shellScript = "if [ \"${TARGET_GCC_VERSION}\" = \"LLVM_COMPILER\" ]; then\n exit 0;\nfi\n\nif [ -f ../../Tools/Scripts/check-for-exit-time-destructors ]; then\n ../../Tools/Scripts/check-for-exit-time-destructors || exit $?\nfi"; - }; /* End PBXShellScriptBuildPhase section */ /* Begin PBXSourcesBuildPhase section */ @@ -3541,10 +3509,8 @@ 0F0B839A14BCF45D00885B4F /* LLIntEntrypoints.cpp in Sources */, 0F0B839C14BCF46300885B4F /* LLIntThunks.cpp in Sources */, 0F0B83B014BCF71600885B4F /* CallLinkInfo.cpp in Sources */, - 0F0B83B414BCF86000885B4F /* MethodCallLinkInfo.cpp in Sources */, 0F93329D14CA7DC30085F3C6 /* CallLinkStatus.cpp in Sources */, 0F93329F14CA7DCA0085F3C6 /* GetByIdStatus.cpp in Sources */, - 0F9332A114CA7DD10085F3C6 /* MethodCallLinkStatus.cpp in Sources */, 0F9332A314CA7DD70085F3C6 /* PutByIdStatus.cpp in Sources */, 0F55F0F414D1063900AC7649 /* AbstractPC.cpp in Sources */, 86B5826714D2796C00A9C306 /* CodeProfile.cpp in Sources */, @@ -3621,6 +3587,8 @@ C24D31E2161CD695002AA4DB /* HeapStatistics.cpp in Sources */, C2239D1716262BDD005AC5FD /* CopyVisitor.cpp in Sources */, C2239D1A16262BDD005AC5FD /* GCThread.cpp in Sources */, + A76F279415F13C9600517D67 /* UnlinkedCodeBlock.cpp in Sources */, + A77F1821164088B200640A47 /* CodeCache.cpp in Sources */, ); runOnlyForDeploymentPostprocessing = 0; }; diff --git a/Source/JavaScriptCore/LLIntOffsetsExtractor.pro b/Source/JavaScriptCore/LLIntOffsetsExtractor.pro index 1fc6c1d7c..ae635c1b5 100644 --- a/Source/JavaScriptCore/LLIntOffsetsExtractor.pro +++ b/Source/JavaScriptCore/LLIntOffsetsExtractor.pro @@ -22,18 +22,14 @@ LIBS = defineTest(addIncludePaths) { # Just needed for include paths - include(../WTF/WTF.pri) include(JavaScriptCore.pri) + include(../WTF/WTF.pri) export(INCLUDEPATH) } addIncludePaths() -# To make sure we include JavaScriptCore/config.h and not -# the WTF one. -INCLUDEPATH = $$PWD $$INCLUDEPATH - LLINT_DEPENDENCY = \ $$PWD/llint/LowLevelInterpreter.asm \ $$PWD/llint/LowLevelInterpreter32_64.asm \ diff --git a/Source/JavaScriptCore/Target.pri b/Source/JavaScriptCore/Target.pri index ffd295a41..b0fcc16e7 100644 --- a/Source/JavaScriptCore/Target.pri +++ b/Source/JavaScriptCore/Target.pri @@ -30,8 +30,7 @@ include(yarr/yarr.pri) INSTALLDEPS += all -CONFIG(debug, debug|release): INCLUDEPATH += $$JAVASCRIPTCORE_GENERATED_SOURCES_DIR\debug -CONFIG(release, debug|release): INCLUDEPATH += $$JAVASCRIPTCORE_GENERATED_SOURCES_DIR\release +debug_and_release: INCLUDEPATH += $$JAVASCRIPTCORE_GENERATED_SOURCES_DIR/$$activeBuildConfig() SOURCES += \ API/JSBase.cpp \ @@ -60,8 +59,6 @@ SOURCES += \ bytecode/GetByIdStatus.cpp \ bytecode/JumpTable.cpp \ bytecode/LazyOperandValueProfile.cpp \ - bytecode/MethodCallLinkInfo.cpp \ - bytecode/MethodCallLinkStatus.cpp \ bytecode/MethodOfGettingAValueProfile.cpp \ bytecode/Opcode.cpp \ bytecode/PolymorphicPutByIdList.cpp \ @@ -72,6 +69,7 @@ SOURCES += \ bytecode/SpeculatedType.cpp \ bytecode/StructureStubClearingWatchpoint.cpp \ bytecode/StructureStubInfo.cpp \ + bytecode/UnlinkedCodeBlock.cpp \ bytecode/Watchpoint.cpp \ bytecompiler/BytecodeGenerator.cpp \ bytecompiler/NodesCodegen.cpp \ @@ -187,6 +185,7 @@ SOURCES += \ runtime/BooleanObject.cpp \ runtime/BooleanPrototype.cpp \ runtime/CallData.cpp \ + runtime/CodeCache.cpp \ runtime/CommonIdentifiers.cpp \ runtime/Completion.cpp \ runtime/ConstructData.cpp \ @@ -276,6 +275,18 @@ SOURCES += \ tools/CodeProfiling.cpp \ yarr/YarrJIT.cpp \ +linux-*:if(isEqual(QT_ARCH, "i386")|isEqual(QT_ARCH, "x86_64")) { + SOURCES += \ + disassembler/UDis86Disassembler.cpp \ + disassembler/udis86/udis86.c \ + disassembler/udis86/udis86_decode.c \ + disassembler/udis86/udis86_input.c \ + disassembler/udis86/udis86_itab_holder.c \ + disassembler/udis86/udis86_syn-att.c \ + disassembler/udis86/udis86_syn-intel.c \ + disassembler/udis86/udis86_syn.c \ +} + HEADERS += $$files(*.h, true) *sh4* { diff --git a/Source/JavaScriptCore/bytecode/ArrayProfile.cpp b/Source/JavaScriptCore/bytecode/ArrayProfile.cpp index de7f67887..5a87380fd 100644 --- a/Source/JavaScriptCore/bytecode/ArrayProfile.cpp +++ b/Source/JavaScriptCore/bytecode/ArrayProfile.cpp @@ -26,6 +26,7 @@ #include "config.h" #include "ArrayProfile.h" +#include "CodeBlock.h" #include <wtf/StringExtras.h> namespace JSC { @@ -38,14 +39,14 @@ const char* arrayModesToString(ArrayModes arrayModes) if (arrayModes == ALL_ARRAY_MODES) return "TOP"; - bool isNonArray = !!(arrayModes & NonArray); - bool isNonArrayWithContiguous = !!(arrayModes & NonArrayWithContiguous); - bool isNonArrayWithArrayStorage = !!(arrayModes & NonArrayWithArrayStorage); - bool isNonArrayWithSlowPutArrayStorage = !!(arrayModes & NonArrayWithSlowPutArrayStorage); - bool isArray = !!(arrayModes & ArrayClass); - bool isArrayWithContiguous = !!(arrayModes & ArrayWithContiguous); - bool isArrayWithArrayStorage = !!(arrayModes & ArrayWithArrayStorage); - bool isArrayWithSlowPutArrayStorage = !!(arrayModes & ArrayWithSlowPutArrayStorage); + bool isNonArray = !!(arrayModes & asArrayModes(NonArray)); + bool isNonArrayWithContiguous = !!(arrayModes & asArrayModes(NonArrayWithContiguous)); + bool isNonArrayWithArrayStorage = !!(arrayModes & asArrayModes(NonArrayWithArrayStorage)); + bool isNonArrayWithSlowPutArrayStorage = !!(arrayModes & asArrayModes(NonArrayWithSlowPutArrayStorage)); + bool isArray = !!(arrayModes & asArrayModes(ArrayClass)); + bool isArrayWithContiguous = !!(arrayModes & asArrayModes(ArrayWithContiguous)); + bool isArrayWithArrayStorage = !!(arrayModes & asArrayModes(ArrayWithArrayStorage)); + bool isArrayWithSlowPutArrayStorage = !!(arrayModes & asArrayModes(ArrayWithSlowPutArrayStorage)); static char result[256]; snprintf( @@ -64,12 +65,14 @@ const char* arrayModesToString(ArrayModes arrayModes) return result; } -void ArrayProfile::computeUpdatedPrediction(OperationInProgress operation) +void ArrayProfile::computeUpdatedPrediction(CodeBlock* codeBlock, OperationInProgress operation) { if (m_lastSeenStructure) { m_observedArrayModes |= arrayModeFromStructure(m_lastSeenStructure); m_mayInterceptIndexedAccesses |= m_lastSeenStructure->typeInfo().interceptsGetOwnPropertySlotByIndexEvenWhenLengthIsNotZero(); + if (!codeBlock->globalObject()->isOriginalArrayStructure(m_lastSeenStructure)) + m_usesOriginalArrayStructures = false; if (!m_structureIsPolymorphic) { if (!m_expectedStructure) m_expectedStructure = m_lastSeenStructure; diff --git a/Source/JavaScriptCore/bytecode/ArrayProfile.h b/Source/JavaScriptCore/bytecode/ArrayProfile.h index ffc136258..376684fc1 100644 --- a/Source/JavaScriptCore/bytecode/ArrayProfile.h +++ b/Source/JavaScriptCore/bytecode/ArrayProfile.h @@ -33,6 +33,7 @@ namespace JSC { +class CodeBlock; class LLIntOffsetsExtractor; // This is a bitfield where each bit represents an IndexingType that we have seen. @@ -87,6 +88,7 @@ public: , m_structureIsPolymorphic(false) , m_mayStoreToHole(false) , m_mayInterceptIndexedAccesses(false) + , m_usesOriginalArrayStructures(true) , m_observedArrayModes(0) { } @@ -98,6 +100,7 @@ public: , m_structureIsPolymorphic(false) , m_mayStoreToHole(false) , m_mayInterceptIndexedAccesses(false) + , m_usesOriginalArrayStructures(true) , m_observedArrayModes(0) { } @@ -113,7 +116,7 @@ public: m_lastSeenStructure = structure; } - void computeUpdatedPrediction(OperationInProgress operation = NoOperation); + void computeUpdatedPrediction(CodeBlock*, OperationInProgress = NoOperation); Structure* expectedStructure() const { return m_expectedStructure; } bool structureIsPolymorphic() const @@ -129,6 +132,8 @@ public: bool mayStoreToHole() const { return m_mayStoreToHole; } + bool usesOriginalArrayStructures() const { return m_usesOriginalArrayStructures; } + private: friend class LLIntOffsetsExtractor; @@ -138,6 +143,7 @@ private: bool m_structureIsPolymorphic; bool m_mayStoreToHole; // This flag may become overloaded to indicate other special cases that were encountered during array access, as it depends on indexing type. Since we currently have basically just one indexing type (two variants of ArrayStorage), this flag for now just means exactly what its name implies. bool m_mayInterceptIndexedAccesses; + bool m_usesOriginalArrayStructures; ArrayModes m_observedArrayModes; }; diff --git a/Source/JavaScriptCore/bytecode/CodeBlock.cpp b/Source/JavaScriptCore/bytecode/CodeBlock.cpp index 7f86186a0..ceae3fcb2 100644 --- a/Source/JavaScriptCore/bytecode/CodeBlock.cpp +++ b/Source/JavaScriptCore/bytecode/CodeBlock.cpp @@ -43,7 +43,6 @@ #include "JSNameScope.h" #include "JSValue.h" #include "LowLevelInterpreter.h" -#include "MethodCallLinkStatus.h" #include "RepatchBuffer.h" #include "SlotVisitorInlineMethods.h" #include <stdio.h> @@ -283,9 +282,6 @@ void CodeBlock::printGetByIdCacheStatus(ExecState* exec, int location) { Instruction* instruction = instructions().begin() + location; - if (exec->interpreter()->getOpcodeID(instruction[0].u.opcode) == op_method_check) - instruction++; - Identifier& ident = identifier(instruction[3].u.operand); UNUSED_PARAM(ident); // tell the compiler to shut up in certain platform configurations. @@ -496,8 +492,8 @@ void CodeBlock::dump(ExecState* exec) static_cast<unsigned long>(instructions().size() * sizeof(Instruction)), this, codeTypeToString(codeType()), m_numParameters, m_numCalleeRegisters, m_numVars); - if (m_symbolTable->captureCount()) - dataLog("; %d captured var(s)", m_symbolTable->captureCount()); + if (symbolTable()->captureCount()) + dataLog("; %d captured var(s)", symbolTable()->captureCount()); if (usesArguments()) { dataLog( "; uses arguments, in r%d, r%d", @@ -531,13 +527,13 @@ void CodeBlock::dump(ExecState* exec) } while (i < m_constantRegisters.size()); } - if (m_rareData && !m_rareData->m_regexps.isEmpty()) { + if (size_t count = m_unlinkedCode->numberOfRegExps()) { dataLog("\nm_regexps:\n"); size_t i = 0; do { - dataLog(" re%u = %s\n", static_cast<unsigned>(i), regexpToSourceString(m_rareData->m_regexps[i].get()).utf8().data()); + dataLog(" re%u = %s\n", static_cast<unsigned>(i), regexpToSourceString(m_unlinkedCode->regexp(i)).utf8().data()); ++i; - } while (i < m_rareData->m_regexps.size()); + } while (i < count); } #if ENABLE(JIT) @@ -671,7 +667,7 @@ void CodeBlock::dump(ExecState* exec, const Vector<Instruction>::const_iterator& int dst = (++it)->u.operand; int argv = (++it)->u.operand; int argc = (++it)->u.operand; - dataLog("[%4d] new_array_buffer %s, %d, %d", location, registerName(exec, dst).data(), argv, argc); + dataLog("[%4d] new_array_buffer\t %s, %d, %d", location, registerName(exec, dst).data(), argv, argc); dumpBytecodeCommentAndNewLine(location); break; } @@ -679,7 +675,7 @@ void CodeBlock::dump(ExecState* exec, const Vector<Instruction>::const_iterator& int r0 = (++it)->u.operand; int re0 = (++it)->u.operand; dataLog("[%4d] new_regexp\t %s, ", location, registerName(exec, r0).data()); - if (r0 >=0 && r0 < (int)numberOfRegExps()) + if (r0 >=0 && r0 < (int)m_unlinkedCode->numberOfRegExps()) dataLog("%s", regexpName(re0, regexp(re0)).data()); else dataLog("bad_regexp(%d)", re0); @@ -889,11 +885,22 @@ void CodeBlock::dump(ExecState* exec, const Vector<Instruction>::const_iterator& it++; break; } + case op_init_global_const_nop: { + dataLog("[%4d] init_global_const_nop\t", location); + dumpBytecodeCommentAndNewLine(location); + it++; + it++; + it++; + it++; + break; + } case op_init_global_const: { WriteBarrier<Unknown>* registerPointer = (++it)->u.registerPointer; int r0 = (++it)->u.operand; dataLog("[%4d] init_global_const\t g%d(%p), %s", location, m_globalObject->findRegisterIndex(registerPointer), registerPointer, registerName(exec, r0).data()); dumpBytecodeCommentAndNewLine(location); + it++; + it++; break; } case op_init_global_const_check: { @@ -1017,42 +1024,6 @@ void CodeBlock::dump(ExecState* exec, const Vector<Instruction>::const_iterator& dumpBytecodeCommentAndNewLine(location); break; } - case op_method_check: { - dataLog("[%4d] method_check", location); -#if ENABLE(JIT) - if (numberOfMethodCallLinkInfos()) { - MethodCallLinkInfo& methodCall = getMethodCallLinkInfo(location); - dataLog(" jit("); - if (!methodCall.seen) - dataLog("not seen"); - else { - // Use the fact that MethodCallLinkStatus already does smart things - // for decoding seen method calls. - MethodCallLinkStatus status = MethodCallLinkStatus::computeFor(this, location); - if (!status) - dataLog("not set"); - else { - dataLog("function = %p (executable = ", status.function()); - JSCell* functionAsCell = getJSFunction(status.function()); - if (functionAsCell) - dataLog("%p", jsCast<JSFunction*>(functionAsCell)->executable()); - else - dataLog("N/A"); - dataLog("), struct = %p", status.structure()); - if (status.needsPrototypeCheck()) - dataLog(", prototype = %p, struct = %p", status.prototype(), status.prototypeStructure()); - } - } - dataLog(")"); - } -#endif - dumpBytecodeCommentAndNewLine(location); - ++it; - printGetByIdOp(exec, location, it); - printGetByIdCacheStatus(exec, location); - dataLog("\n"); - break; - } case op_del_by_id: { int r0 = (++it)->u.operand; int r1 = (++it)->u.operand; @@ -1431,9 +1402,10 @@ void CodeBlock::dump(ExecState* exec, const Vector<Instruction>::const_iterator& dumpBytecodeCommentAndNewLine(location); break; } - case op_throw_reference_error: { + case op_throw_static_error: { int k0 = (++it)->u.operand; - dataLog("[%4d] throw_reference_error\t %s", location, constantName(exec, k0, getConstant(k0)).data()); + int k1 = (++it)->u.operand; + dataLog("[%4d] throw_static_error\t %s, %s", location, constantName(exec, k0, getConstant(k0)).data(), k1 ? "true" : "false"); dumpBytecodeCommentAndNewLine(location); break; } @@ -1531,9 +1503,9 @@ void CodeBlock::dumpStatistics() FOR_EACH_MEMBER_VECTOR(GET_STATS) #undef GET_STATS - if (!codeBlock->m_symbolTable.isEmpty()) { + if (codeBlock->symbolTable() && !codeBlock->symbolTable()->isEmpty()) { symbolTableIsNotEmpty++; - symbolTableTotalSize += (codeBlock->m_symbolTable.capacity() * (sizeof(SymbolTable::KeyType) + sizeof(SymbolTable::MappedType))); + symbolTableTotalSize += (codeBlock->symbolTable()->capacity() * (sizeof(SymbolTable::KeyType) + sizeof(SymbolTable::MappedType))); } if (codeBlock->m_rareData) { @@ -1601,34 +1573,26 @@ CodeBlock::CodeBlock(CopyParsedBlockTag, CodeBlock& other) , m_numCalleeRegisters(other.m_numCalleeRegisters) , m_numVars(other.m_numVars) , m_isConstructor(other.m_isConstructor) + , m_unlinkedCode(*other.m_globalData, other.m_ownerExecutable.get(), other.m_unlinkedCode.get()) , m_ownerExecutable(*other.m_globalData, other.m_ownerExecutable.get(), other.m_ownerExecutable.get()) , m_globalData(other.m_globalData) , m_instructions(other.m_instructions) , m_thisRegister(other.m_thisRegister) , m_argumentsRegister(other.m_argumentsRegister) , m_activationRegister(other.m_activationRegister) - , m_globalObjectConstant(other.m_globalObjectConstant) - , m_needsFullScopeChain(other.m_needsFullScopeChain) - , m_usesEval(other.m_usesEval) - , m_isNumericCompareFunction(other.m_isNumericCompareFunction) , m_isStrictMode(other.m_isStrictMode) - , m_codeType(other.m_codeType) , m_source(other.m_source) , m_sourceOffset(other.m_sourceOffset) #if ENABLE(VALUE_PROFILER) , m_executionEntryCount(0) #endif - , m_jumpTargets(other.m_jumpTargets) - , m_loopTargets(other.m_loopTargets) , m_identifiers(other.m_identifiers) , m_constantRegisters(other.m_constantRegisters) , m_functionDecls(other.m_functionDecls) , m_functionExprs(other.m_functionExprs) - , m_symbolTable(*other.m_globalData, other.m_ownerExecutable.get(), other.symbolTable()) , m_osrExitCounter(0) , m_optimizationDelayCounter(0) , m_reoptimizationRetryCounter(0) - , m_lineInfo(other.m_lineInfo) , m_resolveOperations(other.m_resolveOperations) , m_putToBaseOperations(other.m_putToBaseOperations) #if ENABLE(BYTECODE_COMMENTS) @@ -1646,36 +1610,31 @@ CodeBlock::CodeBlock(CopyParsedBlockTag, CodeBlock& other) createRareDataIfNecessary(); m_rareData->m_exceptionHandlers = other.m_rareData->m_exceptionHandlers; - m_rareData->m_regexps = other.m_rareData->m_regexps; m_rareData->m_constantBuffers = other.m_rareData->m_constantBuffers; m_rareData->m_immediateSwitchJumpTables = other.m_rareData->m_immediateSwitchJumpTables; m_rareData->m_characterSwitchJumpTables = other.m_rareData->m_characterSwitchJumpTables; m_rareData->m_stringSwitchJumpTables = other.m_rareData->m_stringSwitchJumpTables; - m_rareData->m_expressionInfo = other.m_rareData->m_expressionInfo; } } -CodeBlock::CodeBlock(ScriptExecutable* ownerExecutable, CodeType codeType, JSGlobalObject *globalObject, PassRefPtr<SourceProvider> sourceProvider, unsigned sourceOffset, bool isConstructor, PassOwnPtr<CodeBlock> alternative) +CodeBlock::CodeBlock(ScriptExecutable* ownerExecutable, UnlinkedCodeBlock* unlinkedCodeBlock, JSGlobalObject* globalObject, unsigned baseScopeDepth, PassRefPtr<SourceProvider> sourceProvider, unsigned sourceOffset, PassOwnPtr<CodeBlock> alternative) : m_globalObject(globalObject->globalData(), ownerExecutable, globalObject) , m_heap(&m_globalObject->globalData().heap) - , m_numCalleeRegisters(0) - , m_numVars(0) - , m_isConstructor(isConstructor) - , m_numParameters(0) + , m_numCalleeRegisters(unlinkedCodeBlock->m_numCalleeRegisters) + , m_numVars(unlinkedCodeBlock->m_numVars) + , m_isConstructor(unlinkedCodeBlock->isConstructor()) + , m_unlinkedCode(globalObject->globalData(), ownerExecutable, unlinkedCodeBlock) , m_ownerExecutable(globalObject->globalData(), ownerExecutable, ownerExecutable) - , m_globalData(0) - , m_argumentsRegister(-1) - , m_needsFullScopeChain(ownerExecutable->needsActivation()) - , m_usesEval(ownerExecutable->usesEval()) - , m_isNumericCompareFunction(false) - , m_isStrictMode(ownerExecutable->isStrictMode()) - , m_codeType(codeType) + , m_globalData(unlinkedCodeBlock->globalData()) + , m_thisRegister(unlinkedCodeBlock->thisRegister()) + , m_argumentsRegister(unlinkedCodeBlock->argumentsRegister()) + , m_activationRegister(unlinkedCodeBlock->activationRegister()) + , m_isStrictMode(unlinkedCodeBlock->isStrictMode()) , m_source(sourceProvider) , m_sourceOffset(sourceOffset) #if ENABLE(VALUE_PROFILER) , m_executionEntryCount(0) #endif - , m_symbolTable(globalObject->globalData(), ownerExecutable, SharedSymbolTable::create(globalObject->globalData())) , m_alternative(alternative) , m_osrExitCounter(0) , m_optimizationDelayCounter(0) @@ -1684,7 +1643,10 @@ CodeBlock::CodeBlock(ScriptExecutable* ownerExecutable, CodeType codeType, JSGlo , m_bytecodeCommentIterator(0) #endif { + m_globalData->startedCompiling(this); + ASSERT(m_source); + setNumParameters(unlinkedCodeBlock->numParameters()); optimizeAfterWarmUp(); jitAfterWarmUp(); @@ -1692,9 +1654,208 @@ CodeBlock::CodeBlock(ScriptExecutable* ownerExecutable, CodeType codeType, JSGlo #if DUMP_CODE_BLOCK_STATISTICS liveCodeBlockSet.add(this); #endif - // We have a stub putToBase operation to allow resolve_base to - // remain branchless - m_putToBaseOperations.append(PutToBaseOperation(isStrictMode())); + setIdentifiers(unlinkedCodeBlock->identifiers()); + setConstantRegisters(unlinkedCodeBlock->constantRegisters()); + + m_functionDecls.grow(unlinkedCodeBlock->numberOfFunctionDecls()); + for (size_t count = unlinkedCodeBlock->numberOfFunctionDecls(), i = 0; i < count; ++i) { + UnlinkedFunctionExecutable* unlinkedExecutable = unlinkedCodeBlock->functionDecl(i); + unsigned lineCount = unlinkedExecutable->lineCount(); + unsigned firstLine = ownerExecutable->lineNo() + unlinkedExecutable->firstLineOffset(); + unsigned startOffset = sourceOffset + unlinkedExecutable->startOffset(); + unsigned sourceLength = unlinkedExecutable->sourceLength(); + SourceCode code(m_source, startOffset, startOffset + sourceLength, firstLine); + FunctionExecutable* executable = FunctionExecutable::create(*m_globalData, code, unlinkedExecutable, firstLine, firstLine + lineCount); + m_functionDecls[i].set(*m_globalData, ownerExecutable, executable); + } + + m_functionExprs.grow(unlinkedCodeBlock->numberOfFunctionExprs()); + for (size_t count = unlinkedCodeBlock->numberOfFunctionExprs(), i = 0; i < count; ++i) { + UnlinkedFunctionExecutable* unlinkedExecutable = unlinkedCodeBlock->functionExpr(i); + unsigned lineCount = unlinkedExecutable->lineCount(); + unsigned firstLine = ownerExecutable->lineNo() + unlinkedExecutable->firstLineOffset(); + unsigned startOffset = sourceOffset + unlinkedExecutable->startOffset(); + unsigned sourceLength = unlinkedExecutable->sourceLength(); + SourceCode code(m_source, startOffset, startOffset + sourceLength, firstLine); + FunctionExecutable* executable = FunctionExecutable::create(*m_globalData, code, unlinkedExecutable, firstLine, firstLine + lineCount); + m_functionExprs[i].set(*m_globalData, ownerExecutable, executable); + } + + if (unlinkedCodeBlock->hasRareData()) { + createRareDataIfNecessary(); + if (size_t count = unlinkedCodeBlock->constantBufferCount()) { + m_rareData->m_constantBuffers.grow(count); + for (size_t i = 0; i < count; i++) { + const UnlinkedCodeBlock::ConstantBuffer& buffer = unlinkedCodeBlock->constantBuffer(i); + m_rareData->m_constantBuffers[i] = buffer; + } + } + if (size_t count = unlinkedCodeBlock->numberOfExceptionHandlers()) { + m_rareData->m_exceptionHandlers.grow(count); + for (size_t i = 0; i < count; i++) { + const UnlinkedHandlerInfo& handler = unlinkedCodeBlock->exceptionHandler(i); + m_rareData->m_exceptionHandlers[i].start = handler.start; + m_rareData->m_exceptionHandlers[i].end = handler.end; + m_rareData->m_exceptionHandlers[i].target = handler.target; + m_rareData->m_exceptionHandlers[i].scopeDepth = handler.scopeDepth + baseScopeDepth; +#if ENABLE(JIT) && ENABLE(LLINT) + m_rareData->m_exceptionHandlers[i].nativeCode = CodeLocationLabel(MacroAssemblerCodePtr::createFromExecutableAddress(LLInt::getCodePtr(llint_op_catch))); +#endif + } + } + + if (size_t count = unlinkedCodeBlock->numberOfStringSwitchJumpTables()) { + m_rareData->m_stringSwitchJumpTables.grow(count); + for (size_t i = 0; i < count; i++) { + UnlinkedStringJumpTable::StringOffsetTable::iterator ptr = unlinkedCodeBlock->stringSwitchJumpTable(i).offsetTable.begin(); + UnlinkedStringJumpTable::StringOffsetTable::iterator end = unlinkedCodeBlock->stringSwitchJumpTable(i).offsetTable.end(); + for (; ptr != end; ++ptr) { + OffsetLocation offset; + offset.branchOffset = ptr->value; + m_rareData->m_stringSwitchJumpTables[i].offsetTable.add(ptr->key, offset); + } + } + } + + if (size_t count = unlinkedCodeBlock->numberOfImmediateSwitchJumpTables()) { + m_rareData->m_immediateSwitchJumpTables.grow(count); + for (size_t i = 0; i < count; i++) { + UnlinkedSimpleJumpTable& sourceTable = unlinkedCodeBlock->immediateSwitchJumpTable(i); + SimpleJumpTable& destTable = m_rareData->m_immediateSwitchJumpTables[i]; + destTable.branchOffsets = sourceTable.branchOffsets; + destTable.min = sourceTable.min; + } + } + + if (size_t count = unlinkedCodeBlock->numberOfCharacterSwitchJumpTables()) { + m_rareData->m_characterSwitchJumpTables.grow(count); + for (size_t i = 0; i < count; i++) { + UnlinkedSimpleJumpTable& sourceTable = unlinkedCodeBlock->characterSwitchJumpTable(i); + SimpleJumpTable& destTable = m_rareData->m_characterSwitchJumpTables[i]; + destTable.branchOffsets = sourceTable.branchOffsets; + destTable.min = sourceTable.min; + } + } + } + + // Allocate metadata buffers for the bytecode +#if ENABLE(LLINT) + if (size_t size = unlinkedCodeBlock->numberOfLLintCallLinkInfos()) + m_llintCallLinkInfos.grow(size); +#endif +#if ENABLE(DFG_JIT) + if (size_t size = unlinkedCodeBlock->numberOfArrayProfiles()) + m_arrayProfiles.grow(size); + if (size_t size = unlinkedCodeBlock->numberOfValueProfiles()) + m_valueProfiles.grow(size); +#endif + if (size_t size = unlinkedCodeBlock->numberOfResolveOperations()) + m_resolveOperations.grow(size); + size_t putToBaseCount = unlinkedCodeBlock->numberOfPutToBaseOperations(); + m_putToBaseOperations.reserveCapacity(putToBaseCount); + for (size_t i = 0; i < putToBaseCount; ++i) + m_putToBaseOperations.append(PutToBaseOperation(isStrictMode())); + + ASSERT(m_putToBaseOperations.capacity() == putToBaseCount); + + // Copy and translate the UnlinkedInstructions + size_t instructionCount = unlinkedCodeBlock->instructions().size(); + UnlinkedInstruction* pc = unlinkedCodeBlock->instructions().data(); + Vector<Instruction> instructions(instructionCount); + for (size_t i = 0; i < unlinkedCodeBlock->instructions().size(); ) { + unsigned opLength = opcodeLength(pc[i].u.opcode); + instructions[i] = globalData()->interpreter->getOpcode(pc[i].u.opcode); + for (size_t j = 1; j < opLength; ++j) { + if (sizeof(int32_t) != sizeof(intptr_t)) + instructions[i + j].u.pointer = 0; + instructions[i + j].u.operand = pc[i + j].u.operand; + } + switch (pc[i].u.opcode) { +#if ENABLE(DFG_JIT) + case op_get_by_val: + case op_get_argument_by_val: { + int arrayProfileIndex = pc[i + opLength - 2].u.operand; + m_arrayProfiles[arrayProfileIndex] = ArrayProfile(i); + + instructions[i + opLength - 2] = &m_arrayProfiles[arrayProfileIndex]; + // fallthrough + } + case op_convert_this: + case op_resolve: + case op_resolve_base: + case op_resolve_with_base: + case op_resolve_with_this: + case op_get_by_id: + case op_call_put_result: { + ValueProfile* profile = &m_valueProfiles[pc[i + opLength - 1].u.operand]; + ASSERT(profile->m_bytecodeOffset == -1); + profile->m_bytecodeOffset = i; + instructions[i + opLength - 1] = profile; + break; + } + case op_put_by_val: { + int arrayProfileIndex = pc[i + opLength - 1].u.operand; + m_arrayProfiles[arrayProfileIndex] = ArrayProfile(i); + instructions[i + opLength - 1] = &m_arrayProfiles[arrayProfileIndex]; + break; + } + + case op_call: + case op_call_eval: { + int arrayProfileIndex = pc[i + opLength - 1].u.operand; + m_arrayProfiles[arrayProfileIndex] = ArrayProfile(i); + instructions[i + opLength - 1] = &m_arrayProfiles[arrayProfileIndex]; + // fallthrough + } +#endif +#if ENABLE(LLINT) + case op_construct: + instructions[i + 4] = &m_llintCallLinkInfos[pc[i + 4].u.operand]; + break; +#endif + case op_get_by_id_out_of_line: + case op_get_by_id_self: + case op_get_by_id_proto: + case op_get_by_id_chain: + case op_get_by_id_getter_self: + case op_get_by_id_getter_proto: + case op_get_by_id_getter_chain: + case op_get_by_id_custom_self: + case op_get_by_id_custom_proto: + case op_get_by_id_custom_chain: + case op_get_by_id_generic: + case op_get_array_length: + case op_get_string_length: + CRASH(); + + case op_init_global_const_nop: { + ASSERT(codeType() == GlobalCode); + Identifier ident = identifier(pc[i + 4].u.operand); + SymbolTableEntry entry = globalObject->symbolTable()->get(ident.impl()); + if (entry.isNull()) + break; + + if (entry.couldBeWatched()) { + instructions[i + 0] = globalData()->interpreter->getOpcode(op_init_global_const_check); + instructions[i + 1] = &globalObject->registerAt(entry.getIndex()); + instructions[i + 3] = entry.addressOfIsWatched(); + break; + } + + instructions[i + 0] = globalData()->interpreter->getOpcode(op_init_global_const); + instructions[i + 1] = &globalObject->registerAt(entry.getIndex()); + break; + } + default: + break; + } + i += opLength; + } + m_instructions = WTF::RefCountedArray<Instruction>(instructions); + + if (BytecodeGenerator::dumpsGeneratedCode()) + dump(m_globalObject->globalExec()); + m_globalData->finishedCompiling(this); } CodeBlock::~CodeBlock() @@ -1745,15 +1906,6 @@ void CodeBlock::setNumParameters(int newValue) #endif } -void CodeBlock::addParameter() -{ - m_numParameters++; - -#if ENABLE(VALUE_PROFILER) - m_argumentValueProfiles.append(ValueProfile()); -#endif -} - void CodeBlock::visitStructures(SlotVisitor& visitor, Instruction* vPC) { Interpreter* interpreter = m_globalData->interpreter; @@ -1836,6 +1988,8 @@ void CodeBlock::visitAggregate(SlotVisitor& visitor) if (!!m_alternative) m_alternative->visitAggregate(visitor); + visitor.append(&m_unlinkedCode); + // There are three things that may use unconditional finalizers: lazy bytecode freeing, // inline cache clearing, and jettisoning. The probability of us wanting to do at // least one of those things is probably quite close to 1. So we add one no matter what @@ -1950,8 +2104,9 @@ void CodeBlock::finalizeUnconditionally() #if ENABLE(LLINT) Interpreter* interpreter = m_globalData->interpreter; if (!!numberOfInstructions()) { - for (size_t size = m_propertyAccessInstructions.size(), i = 0; i < size; ++i) { - Instruction* curInstruction = &instructions()[m_propertyAccessInstructions[i]]; + const Vector<unsigned>& propertyAccessInstructions = m_unlinkedCode->propertyAccessInstructions(); + for (size_t size = propertyAccessInstructions.size(), i = 0; i < size; ++i) { + Instruction* curInstruction = &instructions()[propertyAccessInstructions[i]]; switch (interpreter->getOpcodeID(curInstruction[0].u.opcode)) { case op_get_by_id: case op_get_by_id_out_of_line: @@ -2061,36 +2216,6 @@ void CodeBlock::finalizeUnconditionally() resetStubInternal(repatchBuffer, stubInfo); } - - for (size_t size = m_methodCallLinkInfos.size(), i = 0; i < size; ++i) { - if (!m_methodCallLinkInfos[i].cachedStructure) - continue; - - ASSERT(m_methodCallLinkInfos[i].seenOnce()); - ASSERT(!!m_methodCallLinkInfos[i].cachedPrototypeStructure); - - if (!Heap::isMarked(m_methodCallLinkInfos[i].cachedStructure.get()) - || !Heap::isMarked(m_methodCallLinkInfos[i].cachedPrototypeStructure.get()) - || !Heap::isMarked(m_methodCallLinkInfos[i].cachedFunction.get()) - || !Heap::isMarked(m_methodCallLinkInfos[i].cachedPrototype.get())) { - if (verboseUnlinking) - dataLog("Clearing method call in %p.\n", this); - m_methodCallLinkInfos[i].reset(repatchBuffer, getJITType()); - - StructureStubInfo& stubInfo = getStubInfo(m_methodCallLinkInfos[i].bytecodeIndex); - - AccessType accessType = static_cast<AccessType>(stubInfo.accessType); - - if (accessType != access_unset) { - ASSERT(isGetByIdAccess(accessType)); - if (getJITCode().jitType() == JITCode::DFGJIT) - DFG::dfgResetGetByID(repatchBuffer, stubInfo); - else - JIT::resetPatchGetById(repatchBuffer, &stubInfo); - stubInfo.reset(); - } - } - } } #endif } @@ -2133,14 +2258,9 @@ void CodeBlock::stronglyVisitStrongReferences(SlotVisitor& visitor) { visitor.append(&m_globalObject); visitor.append(&m_ownerExecutable); - visitor.append(&m_symbolTable); - if (m_rareData) { + visitor.append(&m_unlinkedCode); + if (m_rareData) m_rareData->m_evalCodeCache.visitAggregate(visitor); - size_t regExpCount = m_rareData->m_regexps.size(); - WriteBarrier<RegExp>* regexps = m_rareData->m_regexps.data(); - for (size_t i = 0; i < regExpCount; i++) - visitor.append(regexps + i); - } visitor.appendValues(m_constantRegisters.data(), m_constantRegisters.size()); for (size_t i = 0; i < m_functionExprs.size(); ++i) visitor.append(&m_functionExprs[i]); @@ -2267,76 +2387,27 @@ HandlerInfo* CodeBlock::handlerForBytecodeOffset(unsigned bytecodeOffset) int CodeBlock::lineNumberForBytecodeOffset(unsigned bytecodeOffset) { ASSERT(bytecodeOffset < instructions().size()); - - Vector<LineInfo>& lineInfo = m_lineInfo; - - int low = 0; - int high = lineInfo.size(); - while (low < high) { - int mid = low + (high - low) / 2; - if (lineInfo[mid].instructionOffset <= bytecodeOffset) - low = mid + 1; - else - high = mid; - } - - if (!low) - return m_ownerExecutable->source().firstLine(); - return lineInfo[low - 1].lineNumber; + return m_ownerExecutable->lineNo() + m_unlinkedCode->lineNumberForBytecodeOffset(bytecodeOffset); } void CodeBlock::expressionRangeForBytecodeOffset(unsigned bytecodeOffset, int& divot, int& startOffset, int& endOffset) { - ASSERT(bytecodeOffset < instructions().size()); - - if (!m_rareData) { - startOffset = 0; - endOffset = 0; - divot = 0; - return; - } - - Vector<ExpressionRangeInfo>& expressionInfo = m_rareData->m_expressionInfo; - - int low = 0; - int high = expressionInfo.size(); - while (low < high) { - int mid = low + (high - low) / 2; - if (expressionInfo[mid].instructionOffset <= bytecodeOffset) - low = mid + 1; - else - high = mid; - } - - ASSERT(low); - if (!low) { - startOffset = 0; - endOffset = 0; - divot = 0; - return; - } - - startOffset = expressionInfo[low - 1].startOffset; - endOffset = expressionInfo[low - 1].endOffset; - divot = expressionInfo[low - 1].divotPoint + m_sourceOffset; - return; + m_unlinkedCode->expressionRangeForBytecodeOffset(bytecodeOffset, divot, startOffset, endOffset); + divot += m_sourceOffset; } void CodeBlock::shrinkToFit(ShrinkMode shrinkMode) { - m_propertyAccessInstructions.shrinkToFit(); #if ENABLE(LLINT) m_llintCallLinkInfos.shrinkToFit(); #endif #if ENABLE(JIT) m_structureStubInfos.shrinkToFit(); m_callLinkInfos.shrinkToFit(); - m_methodCallLinkInfos.shrinkToFit(); #endif #if ENABLE(VALUE_PROFILER) if (shrinkMode == EarlyShrink) m_argumentValueProfiles.shrinkToFit(); - m_valueProfiles.shrinkToFit(); m_rareCaseProfiles.shrinkToFit(); m_specialFastCaseProfiles.shrinkToFit(); #endif @@ -2348,15 +2419,11 @@ void CodeBlock::shrinkToFit(ShrinkMode shrinkMode) m_constantRegisters.shrinkToFit(); } // else don't shrink these, because we would have already pointed pointers into these tables. - m_resolveOperations.shrinkToFit(); - m_lineInfo.shrinkToFit(); if (m_rareData) { m_rareData->m_exceptionHandlers.shrinkToFit(); - m_rareData->m_regexps.shrinkToFit(); m_rareData->m_immediateSwitchJumpTables.shrinkToFit(); m_rareData->m_characterSwitchJumpTables.shrinkToFit(); m_rareData->m_stringSwitchJumpTables.shrinkToFit(); - m_rareData->m_expressionInfo.shrinkToFit(); #if ENABLE(JIT) m_rareData->m_callReturnIndexVector.shrinkToFit(); #endif @@ -2384,7 +2451,7 @@ void CodeBlock::createActivation(CallFrame* callFrame) ASSERT(codeType() == FunctionCode); ASSERT(needsFullScopeChain()); ASSERT(!callFrame->uncheckedR(activationRegister()).jsValue()); - JSActivation* activation = JSActivation::create(callFrame->globalData(), callFrame, static_cast<FunctionExecutable*>(ownerExecutable())); + JSActivation* activation = JSActivation::create(callFrame->globalData(), callFrame, this); callFrame->uncheckedR(activationRegister()) = JSValue(activation); callFrame->setScope(activation); } @@ -2410,7 +2477,7 @@ void CodeBlock::unlinkCalls() m_llintCallLinkInfos[i].unlink(); } #endif - if (!(m_callLinkInfos.size() || m_methodCallLinkInfos.size())) + if (!m_callLinkInfos.size()) return; if (!m_globalData->canUseJIT()) return; @@ -2725,7 +2792,7 @@ void CodeBlock::updateAllPredictionsAndCountLiveness( // site also has a value profile site - so we already know whether or not it's // live. for (unsigned i = m_arrayProfiles.size(); i--;) - m_arrayProfiles[i].computeUpdatedPrediction(operation); + m_arrayProfiles[i].computeUpdatedPrediction(this, operation); } void CodeBlock::updateAllPredictions(OperationInProgress operation) @@ -2879,8 +2946,8 @@ bool CodeBlock::usesOpcode(OpcodeID opcodeID) String CodeBlock::nameForRegister(int registerNumber) { - SymbolTable::iterator end = m_symbolTable->end(); - for (SymbolTable::iterator ptr = m_symbolTable->begin(); ptr != end; ++ptr) { + SymbolTable::iterator end = symbolTable()->end(); + for (SymbolTable::iterator ptr = symbolTable()->begin(); ptr != end; ++ptr) { if (ptr->value.getIndex() == registerNumber) return String(ptr->key); } diff --git a/Source/JavaScriptCore/bytecode/CodeBlock.h b/Source/JavaScriptCore/bytecode/CodeBlock.h index fe588c787..a28064940 100644 --- a/Source/JavaScriptCore/bytecode/CodeBlock.h +++ b/Source/JavaScriptCore/bytecode/CodeBlock.h @@ -50,7 +50,6 @@ #include "ExecutionCounter.h" #include "ExpressionRangeInfo.h" #include "HandlerInfo.h" -#include "MethodCallLinkInfo.h" #include "Options.h" #include "Instruction.h" #include "JITCode.h" @@ -121,7 +120,7 @@ namespace JSC { protected: CodeBlock(CopyParsedBlockTag, CodeBlock& other); - CodeBlock(ScriptExecutable* ownerExecutable, CodeType, JSGlobalObject*, PassRefPtr<SourceProvider>, unsigned sourceOffset, bool isConstructor, PassOwnPtr<CodeBlock> alternative); + CodeBlock(ScriptExecutable* ownerExecutable, UnlinkedCodeBlock*, JSGlobalObject*, unsigned baseScopeDepth, PassRefPtr<SourceProvider>, unsigned sourceOffset, PassOwnPtr<CodeBlock> alternative); WriteBarrier<JSGlobalObject> m_globalObject; Heap* m_heap; @@ -131,7 +130,6 @@ namespace JSC { int numParameters() const { return m_numParameters; } void setNumParameters(int newValue); - void addParameter(); int* addressOfNumParameters() { return &m_numParameters; } static ptrdiff_t offsetOfNumParameters() { return OBJECT_OFFSETOF(CodeBlock, m_numParameters); } @@ -249,16 +247,6 @@ namespace JSC { { return *(binarySearch<CallLinkInfo, unsigned, getCallLinkInfoBytecodeIndex>(m_callLinkInfos.begin(), m_callLinkInfos.size(), bytecodeIndex)); } - - MethodCallLinkInfo& getMethodCallLinkInfo(ReturnAddressPtr returnAddress) - { - return *(binarySearch<MethodCallLinkInfo, void*, getMethodCallLinkInfoReturnLocation>(m_methodCallLinkInfos.begin(), m_methodCallLinkInfos.size(), returnAddress.value())); - } - - MethodCallLinkInfo& getMethodCallLinkInfo(unsigned bytecodeIndex) - { - return *(binarySearch<MethodCallLinkInfo, unsigned, getMethodCallLinkInfoBytecodeIndex>(m_methodCallLinkInfos.begin(), m_methodCallLinkInfos.size(), bytecodeIndex)); - } #endif // ENABLE(JIT) #if ENABLE(LLINT) @@ -445,8 +433,7 @@ namespace JSC { return static_cast<Instruction*>(returnAddress) - instructions().begin(); } - void setIsNumericCompareFunction(bool isNumericCompareFunction) { m_isNumericCompareFunction = isNumericCompareFunction; } - bool isNumericCompareFunction() { return m_isNumericCompareFunction; } + bool isNumericCompareFunction() { return m_unlinkedCode->isNumericCompareFunction(); } unsigned numberOfInstructions() const { return m_instructions.size(); } RefCountedArray<Instruction>& instructions() { return m_instructions; } @@ -535,10 +522,8 @@ namespace JSC { void setThisRegister(int thisRegister) { m_thisRegister = thisRegister; } int thisRegister() const { return m_thisRegister; } - void setNeedsFullScopeChain(bool needsFullScopeChain) { m_needsFullScopeChain = needsFullScopeChain; } - bool needsFullScopeChain() const { return m_needsFullScopeChain; } - void setUsesEval(bool usesEval) { m_usesEval = usesEval; } - bool usesEval() const { return m_usesEval; } + bool needsFullScopeChain() const { return m_unlinkedCode->needsFullScopeChain(); } + bool usesEval() const { return m_unlinkedCode->usesEval(); } void setArgumentsRegister(int argumentsRegister) { @@ -600,37 +585,28 @@ namespace JSC { if (usesArguments() && operand == unmodifiedArgumentsRegister(argumentsRegister())) return true; - return operand >= m_symbolTable->captureStart() - && operand < m_symbolTable->captureEnd(); + // We're in global code so there are no locals to capture + if (!symbolTable()) + return false; + + return operand >= symbolTable()->captureStart() + && operand < symbolTable()->captureEnd(); } - CodeType codeType() const { return m_codeType; } + CodeType codeType() const { return m_unlinkedCode->codeType(); } SourceProvider* source() const { return m_source.get(); } unsigned sourceOffset() const { return m_sourceOffset; } - size_t numberOfJumpTargets() const { return m_jumpTargets.size(); } - void addJumpTarget(unsigned jumpTarget) { m_jumpTargets.append(jumpTarget); } - unsigned jumpTarget(int index) const { return m_jumpTargets[index]; } - unsigned lastJumpTarget() const { return m_jumpTargets.last(); } + size_t numberOfJumpTargets() const { return m_unlinkedCode->numberOfJumpTargets(); } + unsigned jumpTarget(int index) const { return m_unlinkedCode->jumpTarget(index); } void createActivation(CallFrame*); void clearEvalCache(); String nameForRegister(int registerNumber); - - void addPropertyAccessInstruction(unsigned propertyAccessInstruction) - { - m_propertyAccessInstructions.append(propertyAccessInstruction); - } -#if ENABLE(LLINT) - LLIntCallLinkInfo* addLLIntCallLinkInfo() - { - m_llintCallLinkInfos.append(LLIntCallLinkInfo()); - return &m_llintCallLinkInfos.last(); - } -#endif + #if ENABLE(JIT) void setNumberOfStructureStubInfos(size_t size) { m_structureStubInfos.grow(size); } size_t numberOfStructureStubInfos() const { return m_structureStubInfos.size(); } @@ -643,10 +619,6 @@ namespace JSC { void setNumberOfCallLinkInfos(size_t size) { m_callLinkInfos.grow(size); } size_t numberOfCallLinkInfos() const { return m_callLinkInfos.size(); } CallLinkInfo& callLinkInfo(int index) { return m_callLinkInfos[index]; } - - void addMethodCallLinkInfos(unsigned n) { ASSERT(m_globalData->canUseJIT()); m_methodCallLinkInfos.grow(n); } - MethodCallLinkInfo& methodCallLinkInfo(int index) { return m_methodCallLinkInfos[index]; } - size_t numberOfMethodCallLinkInfos() { return m_methodCallLinkInfos.size(); } #endif #if ENABLE(VALUE_PROFILER) @@ -662,14 +634,7 @@ namespace JSC { ASSERT(result->m_bytecodeOffset == -1); return result; } - - ValueProfile* addValueProfile(int bytecodeOffset) - { - ASSERT(bytecodeOffset != -1); - ASSERT(m_valueProfiles.isEmpty() || m_valueProfiles.last().m_bytecodeOffset < bytecodeOffset); - m_valueProfiles.append(ValueProfile(bytecodeOffset)); - return &m_valueProfiles.last(); - } + unsigned numberOfValueProfiles() { return m_valueProfiles.size(); } ValueProfile* valueProfile(int index) { @@ -795,25 +760,24 @@ namespace JSC { // Exception handling support size_t numberOfExceptionHandlers() const { return m_rareData ? m_rareData->m_exceptionHandlers.size() : 0; } - void addExceptionHandler(const HandlerInfo& hanler) { createRareDataIfNecessary(); return m_rareData->m_exceptionHandlers.append(hanler); } - HandlerInfo& exceptionHandler(int index) { ASSERT(m_rareData); return m_rareData->m_exceptionHandlers[index]; } - - void addExpressionInfo(const ExpressionRangeInfo& expressionInfo) + void allocateHandlers(const Vector<UnlinkedHandlerInfo>& unlinkedHandlers) { + size_t count = unlinkedHandlers.size(); + if (!count) + return; createRareDataIfNecessary(); - m_rareData->m_expressionInfo.append(expressionInfo); - } - - void addLineInfo(unsigned bytecodeOffset, int lineNo) - { - Vector<LineInfo>& lineInfo = m_lineInfo; - if (!lineInfo.size() || lineInfo.last().lineNumber != lineNo) { - LineInfo info = { bytecodeOffset, lineNo }; - lineInfo.append(info); + m_rareData->m_exceptionHandlers.resize(count); + for (size_t i = 0; i < count; ++i) { + m_rareData->m_exceptionHandlers[i].start = unlinkedHandlers[i].start; + m_rareData->m_exceptionHandlers[i].end = unlinkedHandlers[i].end; + m_rareData->m_exceptionHandlers[i].target = unlinkedHandlers[i].target; + m_rareData->m_exceptionHandlers[i].scopeDepth = unlinkedHandlers[i].scopeDepth; } + } + HandlerInfo& exceptionHandler(int index) { ASSERT(m_rareData); return m_rareData->m_exceptionHandlers[index]; } - bool hasExpressionInfo() { return m_rareData && m_rareData->m_expressionInfo.size(); } + bool hasExpressionInfo() { return m_unlinkedCode->hasExpressionInfo(); } #if ENABLE(JIT) Vector<CallReturnOffsetToBytecodeOffset>& callReturnIndexVector() @@ -888,6 +852,8 @@ namespace JSC { m_constantRegisters.last().set(m_globalObject->globalData(), m_ownerExecutable.get(), v); return result; } + + unsigned addOrFindConstant(JSValue); WriteBarrier<Unknown>& constantRegister(int index) { return m_constantRegisters[index - FirstConstantRegisterIndex]; } ALWAYS_INLINE bool isConstantRegisterIndex(int index) const { return index >= FirstConstantRegisterIndex; } @@ -911,20 +877,7 @@ namespace JSC { } FunctionExecutable* functionExpr(int index) { return m_functionExprs[index].get(); } - unsigned addRegExp(RegExp* r) - { - createRareDataIfNecessary(); - unsigned size = m_rareData->m_regexps.size(); - m_rareData->m_regexps.append(WriteBarrier<RegExp>(*m_globalData, ownerExecutable(), r)); - return size; - } - unsigned numberOfRegExps() const - { - if (!m_rareData) - return 0; - return m_rareData->m_regexps.size(); - } - RegExp* regexp(int index) const { ASSERT(m_rareData); return m_rareData->m_regexps[index].get(); } + RegExp* regexp(int index) const { return m_unlinkedCode->regexp(index); } unsigned numberOfConstantBuffers() const { @@ -939,10 +892,6 @@ namespace JSC { m_rareData->m_constantBuffers.append(buffer); return size; } - unsigned addConstantBuffer(unsigned length) - { - return addConstantBuffer(Vector<JSValue>(length)); - } Vector<JSValue>& constantBufferAsVector(unsigned index) { @@ -979,7 +928,7 @@ namespace JSC { StringJumpTable& stringSwitchJumpTable(int tableIndex) { ASSERT(m_rareData); return m_rareData->m_stringSwitchJumpTables[tableIndex]; } - SharedSymbolTable* symbolTable() { return m_symbolTable.get(); } + SharedSymbolTable* symbolTable() const { return m_unlinkedCode->symbolTable(); } EvalCodeCache& evalCodeCache() { createRareDataIfNecessary(); return m_rareData->m_evalCodeCache; } @@ -1216,9 +1165,6 @@ namespace JSC { int m_numVars; bool m_isConstructor; - int globalObjectConstant() const { return m_globalObjectConstant; } - void setGlobalObjectConstant(int globalRegister) { m_globalObjectConstant = globalRegister; } - protected: #if ENABLE(JIT) virtual bool jitCompileImpl(ExecState*) = 0; @@ -1226,6 +1172,8 @@ namespace JSC { virtual void visitWeakReferences(SlotVisitor&); virtual void finalizeUnconditionally(); + UnlinkedCodeBlock* unlinkedCodeBlock() const { return m_unlinkedCode.get(); } + private: friend class DFGCodeBlocks; @@ -1237,7 +1185,21 @@ namespace JSC { #if ENABLE(VALUE_PROFILER) void updateAllPredictionsAndCountLiveness(OperationInProgress, unsigned& numberOfLiveNonArgumentValueProfiles, unsigned& numberOfSamplesInProfiles); #endif - + + void setIdentifiers(const Vector<Identifier>& identifiers) + { + ASSERT(m_identifiers.isEmpty()); + m_identifiers.appendVector(identifiers); + } + + void setConstantRegisters(const Vector<WriteBarrier<Unknown> >& constants) + { + size_t count = constants.size(); + m_constantRegisters.resize(count); + for (size_t i = 0; i < count; i++) + m_constantRegisters[i].set(*m_globalData, ownerExecutable(), constants[i].get()); + } + void dump(ExecState*, const Vector<Instruction>::const_iterator& begin, Vector<Instruction>::const_iterator&); CString registerName(ExecState*, int r) const; @@ -1287,30 +1249,21 @@ namespace JSC { #if ENABLE(JIT) void resetStubInternal(RepatchBuffer&, StructureStubInfo&); #endif - + WriteBarrier<UnlinkedCodeBlock> m_unlinkedCode; int m_numParameters; - WriteBarrier<ScriptExecutable> m_ownerExecutable; JSGlobalData* m_globalData; RefCountedArray<Instruction> m_instructions; - int m_thisRegister; int m_argumentsRegister; int m_activationRegister; - int m_globalObjectConstant; - bool m_needsFullScopeChain; - bool m_usesEval; - bool m_isNumericCompareFunction; bool m_isStrictMode; - CodeType m_codeType; - RefPtr<SourceProvider> m_source; unsigned m_sourceOffset; - Vector<unsigned> m_propertyAccessInstructions; #if ENABLE(LLINT) SegmentedVector<LLIntCallLinkInfo, 8> m_llintCallLinkInfos; SentinelLinkedList<LLIntCallLinkInfo, BasicRawSentinelNode<LLIntCallLinkInfo> > m_incomingLLIntCalls; @@ -1319,7 +1272,6 @@ namespace JSC { Vector<StructureStubInfo> m_structureStubInfos; Vector<ByValInfo> m_byValInfos; Vector<CallLinkInfo> m_callLinkInfos; - Vector<MethodCallLinkInfo> m_methodCallLinkInfos; JITCode m_jitCode; MacroAssemblerCodePtr m_jitCodeWithArityCheck; SentinelLinkedList<CallLinkInfo, BasicRawSentinelNode<CallLinkInfo> > m_incomingCalls; @@ -1382,18 +1334,15 @@ namespace JSC { unsigned m_executionEntryCount; #endif - Vector<unsigned> m_jumpTargets; - Vector<unsigned> m_loopTargets; - // Constant Pool Vector<Identifier> m_identifiers; COMPILE_ASSERT(sizeof(Register) == sizeof(WriteBarrier<Unknown>), Register_must_be_same_size_as_WriteBarrier_Unknown); + // TODO: This could just be a pointer to m_unlinkedCodeBlock's data, but the DFG mutates + // it, so we're stuck with it for now. Vector<WriteBarrier<Unknown> > m_constantRegisters; Vector<WriteBarrier<FunctionExecutable> > m_functionDecls; Vector<WriteBarrier<FunctionExecutable> > m_functionExprs; - WriteBarrier<SharedSymbolTable> m_symbolTable; - OwnPtr<CodeBlock> m_alternative; ExecutionCounter m_llintExecuteCounter; @@ -1404,22 +1353,14 @@ namespace JSC { uint16_t m_optimizationDelayCounter; uint16_t m_reoptimizationRetryCounter; - Vector<LineInfo> m_lineInfo; -#if ENABLE(BYTECODE_COMMENTS) - Vector<Comment> m_bytecodeComments; - size_t m_bytecodeCommentIterator; -#endif Vector<ResolveOperations> m_resolveOperations; - Vector<PutToBaseOperation> m_putToBaseOperations; + Vector<PutToBaseOperation, 1> m_putToBaseOperations; struct RareData { WTF_MAKE_FAST_ALLOCATED; public: Vector<HandlerInfo> m_exceptionHandlers; - // Rare Constants - Vector<WriteBarrier<RegExp> > m_regexps; - // Buffers used for large array literals Vector<Vector<JSValue> > m_constantBuffers; @@ -1430,9 +1371,6 @@ namespace JSC { EvalCodeCache m_evalCodeCache; - // Expression info - present if debugging. - Vector<ExpressionRangeInfo> m_expressionInfo; - // Line info - present if profiling or debugging. #if ENABLE(JIT) Vector<CallReturnOffsetToBytecodeOffset> m_callReturnIndexVector; #endif @@ -1460,8 +1398,8 @@ namespace JSC { { } - GlobalCodeBlock(ScriptExecutable* ownerExecutable, CodeType codeType, JSGlobalObject* globalObject, PassRefPtr<SourceProvider> sourceProvider, unsigned sourceOffset, PassOwnPtr<CodeBlock> alternative) - : CodeBlock(ownerExecutable, codeType, globalObject, sourceProvider, sourceOffset, false, alternative) + GlobalCodeBlock(ScriptExecutable* ownerExecutable, UnlinkedCodeBlock* unlinkedCodeBlock, JSGlobalObject* globalObject, unsigned baseScopeDepth, PassRefPtr<SourceProvider> sourceProvider, unsigned sourceOffset, PassOwnPtr<CodeBlock> alternative) + : CodeBlock(ownerExecutable, unlinkedCodeBlock, globalObject, baseScopeDepth, sourceProvider, sourceOffset, alternative) { } }; @@ -1473,11 +1411,11 @@ namespace JSC { { } - ProgramCodeBlock(ProgramExecutable* ownerExecutable, CodeType codeType, JSGlobalObject* globalObject, PassRefPtr<SourceProvider> sourceProvider, PassOwnPtr<CodeBlock> alternative) - : GlobalCodeBlock(ownerExecutable, codeType, globalObject, sourceProvider, 0, alternative) + ProgramCodeBlock(ProgramExecutable* ownerExecutable, UnlinkedProgramCodeBlock* unlinkedCodeBlock, JSGlobalObject* globalObject, PassRefPtr<SourceProvider> sourceProvider, PassOwnPtr<CodeBlock> alternative) + : GlobalCodeBlock(ownerExecutable, unlinkedCodeBlock, globalObject, 0, sourceProvider, 0, alternative) { } - + #if ENABLE(JIT) protected: virtual JSObject* compileOptimized(ExecState*, JSScope*, unsigned bytecodeIndex); @@ -1492,26 +1430,16 @@ namespace JSC { public: EvalCodeBlock(CopyParsedBlockTag, EvalCodeBlock& other) : GlobalCodeBlock(CopyParsedBlock, other) - , m_baseScopeDepth(other.m_baseScopeDepth) - , m_variables(other.m_variables) { } - EvalCodeBlock(EvalExecutable* ownerExecutable, JSGlobalObject* globalObject, PassRefPtr<SourceProvider> sourceProvider, int baseScopeDepth, PassOwnPtr<CodeBlock> alternative) - : GlobalCodeBlock(ownerExecutable, EvalCode, globalObject, sourceProvider, 0, alternative) - , m_baseScopeDepth(baseScopeDepth) + EvalCodeBlock(EvalExecutable* ownerExecutable, UnlinkedEvalCodeBlock* unlinkedCodeBlock, JSGlobalObject* globalObject, PassRefPtr<SourceProvider> sourceProvider, int baseScopeDepth, PassOwnPtr<CodeBlock> alternative) + : GlobalCodeBlock(ownerExecutable, unlinkedCodeBlock, globalObject, baseScopeDepth, sourceProvider, 0, alternative) { } - int baseScopeDepth() const { return m_baseScopeDepth; } - - const Identifier& variable(unsigned index) { return m_variables[index]; } - unsigned numVariables() { return m_variables.size(); } - void adoptVariables(Vector<Identifier>& variables) - { - ASSERT(m_variables.isEmpty()); - m_variables.swap(variables); - } + const Identifier& variable(unsigned index) { return unlinkedEvalCodeBlock()->variable(index); } + unsigned numVariables() { return unlinkedEvalCodeBlock()->numVariables(); } #if ENABLE(JIT) protected: @@ -1523,8 +1451,7 @@ namespace JSC { #endif private: - int m_baseScopeDepth; - Vector<Identifier> m_variables; + UnlinkedEvalCodeBlock* unlinkedEvalCodeBlock() const { return jsCast<UnlinkedEvalCodeBlock*>(unlinkedCodeBlock()); } }; class FunctionCodeBlock : public CodeBlock { @@ -1534,8 +1461,8 @@ namespace JSC { { } - FunctionCodeBlock(FunctionExecutable* ownerExecutable, CodeType codeType, JSGlobalObject* globalObject, PassRefPtr<SourceProvider> sourceProvider, unsigned sourceOffset, bool isConstructor, PassOwnPtr<CodeBlock> alternative = nullptr) - : CodeBlock(ownerExecutable, codeType, globalObject, sourceProvider, sourceOffset, isConstructor, alternative) + FunctionCodeBlock(FunctionExecutable* ownerExecutable, UnlinkedFunctionCodeBlock* unlinkedCodeBlock, JSGlobalObject* globalObject, PassRefPtr<SourceProvider> sourceProvider, unsigned sourceOffset, PassOwnPtr<CodeBlock> alternative = nullptr) + : CodeBlock(ownerExecutable, unlinkedCodeBlock, globalObject, 0, sourceProvider, sourceOffset, alternative) { } diff --git a/Source/JavaScriptCore/bytecode/DFGExitProfile.h b/Source/JavaScriptCore/bytecode/DFGExitProfile.h index 57fb06bda..60d313ad4 100644 --- a/Source/JavaScriptCore/bytecode/DFGExitProfile.h +++ b/Source/JavaScriptCore/bytecode/DFGExitProfile.h @@ -36,6 +36,7 @@ enum ExitKind { ExitKindUnset, BadType, // We exited because a type prediction was wrong. BadCache, // We exited because an inline cache was wrong. + BadWeakConstantCache, // We exited because a cache on a weak constant (usually a prototype) was wrong. BadIndexingType, // We exited because an indexing type was wrong. Overflow, // We exited because of overflow. NegativeZero, // We exited because we encountered negative zero. @@ -55,6 +56,8 @@ inline const char* exitKindToString(ExitKind kind) return "BadType"; case BadCache: return "BadCache"; + case BadWeakConstantCache: + return "BadWeakConstantCache"; case Overflow: return "Overflow"; case NegativeZero: diff --git a/Source/JavaScriptCore/bytecode/GetByIdStatus.cpp b/Source/JavaScriptCore/bytecode/GetByIdStatus.cpp index e44568a26..605a81c2f 100644 --- a/Source/JavaScriptCore/bytecode/GetByIdStatus.cpp +++ b/Source/JavaScriptCore/bytecode/GetByIdStatus.cpp @@ -41,9 +41,6 @@ GetByIdStatus GetByIdStatus::computeFromLLInt(CodeBlock* profiledBlock, unsigned #if ENABLE(LLINT) Instruction* instruction = profiledBlock->instructions().begin() + bytecodeIndex; - if (instruction[0].u.opcode == LLInt::getOpcode(llint_op_method_check)) - instruction++; - if (instruction[0].u.opcode == LLInt::getOpcode(llint_op_get_array_length)) return GetByIdStatus(NoInformation, false); diff --git a/Source/JavaScriptCore/bytecode/MethodCallLinkInfo.cpp b/Source/JavaScriptCore/bytecode/MethodCallLinkInfo.cpp deleted file mode 100644 index 1fcf5850f..000000000 --- a/Source/JavaScriptCore/bytecode/MethodCallLinkInfo.cpp +++ /dev/null @@ -1,50 +0,0 @@ -/* - * Copyright (C) 2012 Apple Inc. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR - * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, - * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, - * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR - * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY - * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#include "config.h" -#include "MethodCallLinkInfo.h" - -#if ENABLE(JIT) - -#include "JITStubs.h" -#include "RepatchBuffer.h" - -namespace JSC { - -void MethodCallLinkInfo::reset(RepatchBuffer& repatchBuffer, JITCode::JITType jitType) -{ - cachedStructure.clearToMaxUnsigned(); - cachedPrototype.clear(); - cachedPrototypeStructure.clearToMaxUnsigned(); - cachedFunction.clear(); - - ASSERT_UNUSED(jitType, jitType == JITCode::BaselineJIT); - - repatchBuffer.relink(callReturnLocation, cti_op_get_by_id_method_check); -} - -} // namespace JSC - -#endif // ENABLE(JIT) diff --git a/Source/JavaScriptCore/bytecode/MethodCallLinkInfo.h b/Source/JavaScriptCore/bytecode/MethodCallLinkInfo.h deleted file mode 100644 index 2243bc24e..000000000 --- a/Source/JavaScriptCore/bytecode/MethodCallLinkInfo.h +++ /dev/null @@ -1,83 +0,0 @@ -/* - * Copyright (C) 2012 Apple Inc. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR - * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, - * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, - * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR - * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY - * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#ifndef MethodCallLinkInfo_h -#define MethodCallLinkInfo_h - -#include "CodeLocation.h" -#include "JITCode.h" -#include "JITWriteBarrier.h" -#include <wtf/Platform.h> - -namespace JSC { - -#if ENABLE(JIT) - -class RepatchBuffer; - -struct MethodCallLinkInfo { - MethodCallLinkInfo() - : seen(false) - { - } - - bool seenOnce() - { - return seen; - } - - void setSeen() - { - seen = true; - } - - void reset(RepatchBuffer&, JITCode::JITType); - - unsigned bytecodeIndex; - CodeLocationCall callReturnLocation; - JITWriteBarrier<Structure> cachedStructure; - JITWriteBarrier<Structure> cachedPrototypeStructure; - // We'd like this to actually be JSFunction, but InternalFunction and JSFunction - // don't have a common parent class and we allow specialisation on both - JITWriteBarrier<JSObject> cachedFunction; - JITWriteBarrier<JSObject> cachedPrototype; - bool seen; -}; - -inline void* getMethodCallLinkInfoReturnLocation(MethodCallLinkInfo* methodCallLinkInfo) -{ - return methodCallLinkInfo->callReturnLocation.executableAddress(); -} - -inline unsigned getMethodCallLinkInfoBytecodeIndex(MethodCallLinkInfo* methodCallLinkInfo) -{ - return methodCallLinkInfo->bytecodeIndex; -} - -#endif // ENABLE(JIT) - -} // namespace JSC - -#endif // MethodCallLinkInfo_h diff --git a/Source/JavaScriptCore/bytecode/MethodCallLinkStatus.cpp b/Source/JavaScriptCore/bytecode/MethodCallLinkStatus.cpp deleted file mode 100644 index 795b41b69..000000000 --- a/Source/JavaScriptCore/bytecode/MethodCallLinkStatus.cpp +++ /dev/null @@ -1,66 +0,0 @@ -/* - * Copyright (C) 2012 Apple Inc. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR - * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, - * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, - * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR - * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY - * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#include "config.h" -#include "MethodCallLinkStatus.h" - -#include "CodeBlock.h" - -namespace JSC { - -MethodCallLinkStatus MethodCallLinkStatus::computeFor(CodeBlock* profiledBlock, unsigned bytecodeIndex) -{ - UNUSED_PARAM(profiledBlock); - UNUSED_PARAM(bytecodeIndex); -#if ENABLE(JIT) && ENABLE(VALUE_PROFILER) - // NOTE: This does not have an LLInt fall-back because LLInt does not do any method - // call link caching. - if (!profiledBlock->numberOfMethodCallLinkInfos()) - return MethodCallLinkStatus(); - - MethodCallLinkInfo& methodCall = profiledBlock->getMethodCallLinkInfo(bytecodeIndex); - - if (!methodCall.seen || !methodCall.cachedStructure) - return MethodCallLinkStatus(); - - if (methodCall.cachedPrototype.get() == profiledBlock->globalObject()->methodCallDummy()) { - return MethodCallLinkStatus( - methodCall.cachedStructure.get(), - 0, - methodCall.cachedFunction.get(), - 0); - } - - return MethodCallLinkStatus( - methodCall.cachedStructure.get(), - methodCall.cachedPrototypeStructure.get(), - methodCall.cachedFunction.get(), - methodCall.cachedPrototype.get()); -#else // ENABLE(JIT) - return MethodCallLinkStatus(); -#endif // ENABLE(JIT) -} - -} // namespace JSC diff --git a/Source/JavaScriptCore/bytecode/MethodCallLinkStatus.h b/Source/JavaScriptCore/bytecode/MethodCallLinkStatus.h deleted file mode 100644 index c3d11a1d8..000000000 --- a/Source/JavaScriptCore/bytecode/MethodCallLinkStatus.h +++ /dev/null @@ -1,87 +0,0 @@ -/* - * Copyright (C) 2012 Apple Inc. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR - * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, - * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, - * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR - * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY - * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#ifndef MethodCallLinkStatus_h -#define MethodCallLinkStatus_h - -namespace JSC { - -class CodeBlock; -class JSObject; -class Structure; - -class MethodCallLinkStatus { -public: - MethodCallLinkStatus() - : m_structure(0) - , m_prototypeStructure(0) - , m_function(0) - , m_prototype(0) - { - } - - MethodCallLinkStatus( - Structure* structure, - Structure* prototypeStructure, - JSObject* function, - JSObject* prototype) - : m_structure(structure) - , m_prototypeStructure(prototypeStructure) - , m_function(function) - , m_prototype(prototype) - { - if (!m_function) { - ASSERT(!m_structure); - ASSERT(!m_prototypeStructure); - ASSERT(!m_prototype); - } else - ASSERT(m_structure); - - ASSERT(!m_prototype == !m_prototypeStructure); - } - - static MethodCallLinkStatus computeFor(CodeBlock*, unsigned bytecodeIndex); - - bool isSet() const { return !!m_function; } - bool operator!() const { return !m_function; } - - bool needsPrototypeCheck() const { return !!m_prototype; } - - Structure* structure() { return m_structure; } - Structure* prototypeStructure() { return m_prototypeStructure; } - JSObject* function() const { return m_function; } - JSObject* prototype() const { return m_prototype; } - -private: - Structure* m_structure; - Structure* m_prototypeStructure; - JSObject* m_function; - JSObject* m_prototype; -}; - -} // namespace JSC - -#endif // MethodCallLinkStatus_h - diff --git a/Source/JavaScriptCore/bytecode/Opcode.h b/Source/JavaScriptCore/bytecode/Opcode.h index 3ce56c80e..8979d0b7b 100644 --- a/Source/JavaScriptCore/bytecode/Opcode.h +++ b/Source/JavaScriptCore/bytecode/Opcode.h @@ -118,7 +118,8 @@ namespace JSC { macro(op_put_to_base, 5) \ macro(op_put_to_base_variable, 5) \ \ - macro(op_init_global_const, 3) \ + macro(op_init_global_const_nop, 5) \ + macro(op_init_global_const, 5) \ macro(op_init_global_const_check, 5) \ macro(op_get_by_id, 9) /* has value profiling */ \ macro(op_get_by_id_out_of_line, 9) /* has value profiling */ \ @@ -190,7 +191,6 @@ namespace JSC { macro(op_ret, 2) \ macro(op_call_put_result, 3) /* has value profiling */ \ macro(op_ret_object_or_this, 3) \ - macro(op_method_check, 1) \ \ macro(op_construct, 6) \ macro(op_strcat, 4) \ @@ -205,7 +205,7 @@ namespace JSC { \ macro(op_catch, 2) \ macro(op_throw, 2) \ - macro(op_throw_reference_error, 2) \ + macro(op_throw_static_error, 3) \ \ macro(op_debug, 5) \ macro(op_profile_will_call, 2) \ diff --git a/Source/JavaScriptCore/bytecode/SpeculatedType.h b/Source/JavaScriptCore/bytecode/SpeculatedType.h index 9d2c61ae8..09ba9fdfa 100644 --- a/Source/JavaScriptCore/bytecode/SpeculatedType.h +++ b/Source/JavaScriptCore/bytecode/SpeculatedType.h @@ -284,6 +284,11 @@ inline bool mergeSpeculation(T& left, SpeculatedType right) return result; } +inline bool speculationChecked(SpeculatedType actual, SpeculatedType desired) +{ + return (actual | desired) == desired; +} + SpeculatedType speculationFromClassInfo(const ClassInfo*); SpeculatedType speculationFromStructure(Structure*); SpeculatedType speculationFromCell(JSCell*); diff --git a/Source/JavaScriptCore/bytecode/UnlinkedCodeBlock.cpp b/Source/JavaScriptCore/bytecode/UnlinkedCodeBlock.cpp new file mode 100644 index 000000000..8aa48404a --- /dev/null +++ b/Source/JavaScriptCore/bytecode/UnlinkedCodeBlock.cpp @@ -0,0 +1,294 @@ +/* + * Copyright (C) 2012 Apple Inc. All Rights Reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" + +#include "UnlinkedCodeBlock.h" + +#include "BytecodeGenerator.h" +#include "ClassInfo.h" +#include "CodeCache.h" +#include "Executable.h" +#include "JSString.h" +#include "SourceProvider.h" +#include "Structure.h" +#include "SymbolTable.h" + +namespace JSC { + +const ClassInfo UnlinkedFunctionExecutable::s_info = { "UnlinkedFunctionExecutable", 0, 0, 0, CREATE_METHOD_TABLE(UnlinkedFunctionExecutable) }; +const ClassInfo UnlinkedCodeBlock::s_info = { "UnlinkedCodeBlock", 0, 0, 0, CREATE_METHOD_TABLE(UnlinkedCodeBlock) }; +const ClassInfo UnlinkedGlobalCodeBlock::s_info = { "UnlinkedGlobalCodeBlock", &Base::s_info, 0, 0, CREATE_METHOD_TABLE(UnlinkedGlobalCodeBlock) }; +const ClassInfo UnlinkedProgramCodeBlock::s_info = { "UnlinkedProgramCodeBlock", &Base::s_info, 0, 0, CREATE_METHOD_TABLE(UnlinkedProgramCodeBlock) }; +const ClassInfo UnlinkedEvalCodeBlock::s_info = { "UnlinkedEvalCodeBlock", &Base::s_info, 0, 0, CREATE_METHOD_TABLE(UnlinkedEvalCodeBlock) }; +const ClassInfo UnlinkedFunctionCodeBlock::s_info = { "UnlinkedFunctionCodeBlock", &Base::s_info, 0, 0, CREATE_METHOD_TABLE(UnlinkedFunctionCodeBlock) }; + +unsigned UnlinkedCodeBlock::addOrFindConstant(JSValue v) +{ + unsigned numberOfConstants = numberOfConstantRegisters(); + for (unsigned i = 0; i < numberOfConstants; ++i) { + if (getConstant(FirstConstantRegisterIndex + i) == v) + return i; + } + return addConstant(v); +} + +UnlinkedFunctionExecutable::UnlinkedFunctionExecutable(JSGlobalData* globalData, Structure* structure, const SourceCode& source, FunctionBodyNode* node) + : Base(*globalData, structure) + , m_numCapturedVariables(node->capturedVariableCount()) + , m_forceUsesArguments(node->usesArguments()) + , m_isInStrictContext(node->isStrictMode()) + , m_hasCapturedVariables(node->hasCapturedVariables()) + , m_name(node->ident()) + , m_inferredName(node->inferredName()) + , m_parameters(node->parameters()) + , m_firstLineOffset(node->firstLine() - source.firstLine()) + , m_lineCount(node->lastLine() - node->firstLine()) + , m_startOffset(node->source().startOffset() - source.startOffset()) + , m_sourceLength(node->source().length()) + , m_features(node->features()) + , m_functionNameIsInScopeToggle(node->functionNameIsInScopeToggle()) +{ +} + +void UnlinkedFunctionExecutable::visitChildren(JSCell* cell, SlotVisitor& visitor) +{ + UnlinkedFunctionExecutable* thisObject = jsCast<UnlinkedFunctionExecutable*>(cell); + ASSERT_GC_OBJECT_INHERITS(thisObject, &s_info); + COMPILE_ASSERT(StructureFlags & OverridesVisitChildren, OverridesVisitChildrenWithoutSettingFlag); + ASSERT(thisObject->structure()->typeInfo().overridesVisitChildren()); + Base::visitChildren(thisObject, visitor); + visitor.append(&thisObject->m_codeBlockForCall); + visitor.append(&thisObject->m_codeBlockForConstruct); + visitor.append(&thisObject->m_nameValue); + visitor.append(&thisObject->m_symbolTableForCall); + visitor.append(&thisObject->m_symbolTableForConstruct); +} + +FunctionExecutable* UnlinkedFunctionExecutable::link(JSGlobalData& globalData, const SourceCode& source, size_t lineOffset, size_t sourceOffset) +{ + unsigned firstLine = lineOffset + m_firstLineOffset; + unsigned startOffset = sourceOffset + m_startOffset; + SourceCode code(source.provider(), startOffset, startOffset + m_sourceLength, firstLine); + return FunctionExecutable::create(globalData, code, this, firstLine, firstLine + m_lineCount); +} + +UnlinkedFunctionExecutable* UnlinkedFunctionExecutable::fromGlobalCode(const Identifier& name, ExecState* exec, Debugger*, const SourceCode& source, JSObject** exception) +{ + ParserError error; + CodeCache* codeCache = exec->globalData().codeCache(); + UnlinkedFunctionExecutable* executable = codeCache->getFunctionExecutableFromGlobalCode(exec->globalData(), name, source, error); + if (error.m_type != ParserError::ErrorNone) { + *exception = error.toErrorObject(exec->lexicalGlobalObject(), source); + return 0; + } + + return executable; +} + +UnlinkedFunctionCodeBlock* UnlinkedFunctionExecutable::codeBlockFor(JSGlobalData& globalData, const SourceCode& source, CodeSpecializationKind specializationKind, DebuggerMode debuggerMode, ProfilerMode profilerMode, ParserError& error) +{ + switch (specializationKind) { + case CodeForCall: + if (m_codeBlockForCall) + return m_codeBlockForCall.get(); + break; + case CodeForConstruct: + if (m_codeBlockForConstruct) + return m_codeBlockForConstruct.get(); + break; + } + + UnlinkedFunctionCodeBlock* result = globalData.codeCache()->getFunctionCodeBlock(globalData, this, source, specializationKind, debuggerMode, profilerMode, error); + + if (error.m_type != ParserError::ErrorNone) + return 0; + + switch (specializationKind) { + case CodeForCall: + m_codeBlockForCall.set(globalData, this, result); + m_symbolTableForCall.set(globalData, this, result->symbolTable()); + break; + case CodeForConstruct: + m_codeBlockForConstruct.set(globalData, this, result); + m_symbolTableForConstruct.set(globalData, this, result->symbolTable()); + break; + } + return result; +} + +String UnlinkedFunctionExecutable::paramString() const +{ + FunctionParameters& parameters = *m_parameters; + StringBuilder builder; + for (size_t pos = 0; pos < parameters.size(); ++pos) { + if (!builder.isEmpty()) + builder.appendLiteral(", "); + builder.append(parameters[pos].string()); + } + return builder.toString(); +} + +UnlinkedCodeBlock::UnlinkedCodeBlock(JSGlobalData* globalData, Structure* structure, CodeType codeType, const ExecutableInfo& info) + : Base(*globalData, structure) + , m_numVars(0) + , m_numCalleeRegisters(0) + , m_numParameters(0) + , m_globalData(globalData) + , m_argumentsRegister(-1) + , m_needsFullScopeChain(info.m_needsActivation) + , m_usesEval(info.m_usesEval) + , m_isNumericCompareFunction(false) + , m_isStrictMode(info.m_isStrictMode) + , m_isConstructor(info.m_isConstructor) + , m_hasCapturedVariables(false) + , m_firstLine(0) + , m_lineCount(0) + , m_features(0) + , m_codeType(codeType) + , m_resolveOperationCount(0) + , m_putToBaseOperationCount(1) + , m_arrayProfileCount(0) + , m_valueProfileCount(0) + , m_llintCallLinkInfoCount(0) +#if ENABLE(BYTECODE_COMMENTS) + , m_bytecodeCommentIterator(0) +#endif +{ + +} + +void UnlinkedCodeBlock::visitChildren(JSCell* cell, SlotVisitor& visitor) +{ + UnlinkedCodeBlock* thisObject = jsCast<UnlinkedCodeBlock*>(cell); + ASSERT_GC_OBJECT_INHERITS(thisObject, &s_info); + COMPILE_ASSERT(StructureFlags & OverridesVisitChildren, OverridesVisitChildrenWithoutSettingFlag); + ASSERT(thisObject->structure()->typeInfo().overridesVisitChildren()); + Base::visitChildren(thisObject, visitor); + visitor.append(&thisObject->m_symbolTable); + for (FunctionExpressionVector::iterator ptr = thisObject->m_functionDecls.begin(), end = thisObject->m_functionDecls.end(); ptr != end; ++ptr) + visitor.append(ptr); + for (FunctionExpressionVector::iterator ptr = thisObject->m_functionExprs.begin(), end = thisObject->m_functionExprs.end(); ptr != end; ++ptr) + visitor.append(ptr); + visitor.appendValues(thisObject->m_constantRegisters.data(), thisObject->m_constantRegisters.size()); + if (thisObject->m_rareData) { + for (size_t i = 0, end = thisObject->m_rareData->m_regexps.size(); i != end; i++) + visitor.append(&thisObject->m_rareData->m_regexps[i]); + } +} + +int UnlinkedCodeBlock::lineNumberForBytecodeOffset(unsigned bytecodeOffset) +{ + ASSERT(bytecodeOffset < instructions().size()); + Vector<LineInfo>& lineInfo = m_lineInfo; + + int low = 0; + int high = lineInfo.size(); + while (low < high) { + int mid = low + (high - low) / 2; + if (lineInfo[mid].instructionOffset <= bytecodeOffset) + low = mid + 1; + else + high = mid; + } + + if (!low) + return 0; + return lineInfo[low - 1].lineNumber; +} + +void UnlinkedCodeBlock::expressionRangeForBytecodeOffset(unsigned bytecodeOffset, int& divot, int& startOffset, int& endOffset) +{ + ASSERT(bytecodeOffset < instructions().size()); + + if (!m_expressionInfo.size()) { + startOffset = 0; + endOffset = 0; + divot = 0; + return; + } + + Vector<ExpressionRangeInfo>& expressionInfo = m_expressionInfo; + + int low = 0; + int high = expressionInfo.size(); + while (low < high) { + int mid = low + (high - low) / 2; + if (expressionInfo[mid].instructionOffset <= bytecodeOffset) + low = mid + 1; + else + high = mid; + } + + ASSERT(low); + if (!low) { + startOffset = 0; + endOffset = 0; + divot = 0; + return; + } + + startOffset = expressionInfo[low - 1].startOffset; + endOffset = expressionInfo[low - 1].endOffset; + divot = expressionInfo[low - 1].divotPoint; +} + +void UnlinkedProgramCodeBlock::visitChildren(JSCell* cell, SlotVisitor& visitor) +{ + UnlinkedProgramCodeBlock* thisObject = jsCast<UnlinkedProgramCodeBlock*>(cell); + ASSERT_GC_OBJECT_INHERITS(thisObject, &s_info); + COMPILE_ASSERT(StructureFlags & OverridesVisitChildren, OverridesVisitChildrenWithoutSettingFlag); + ASSERT(thisObject->structure()->typeInfo().overridesVisitChildren()); + Base::visitChildren(thisObject, visitor); + for (size_t i = 0, end = thisObject->m_functionDeclarations.size(); i != end; i++) + visitor.append(&thisObject->m_functionDeclarations[i].second); +} + +UnlinkedCodeBlock::~UnlinkedCodeBlock() +{ +} + +void UnlinkedProgramCodeBlock::destroy(JSCell* cell) +{ + jsCast<UnlinkedProgramCodeBlock*>(cell)->~UnlinkedProgramCodeBlock(); +} + +void UnlinkedEvalCodeBlock::destroy(JSCell* cell) +{ + jsCast<UnlinkedEvalCodeBlock*>(cell)->~UnlinkedEvalCodeBlock(); +} + +void UnlinkedFunctionCodeBlock::destroy(JSCell* cell) +{ + jsCast<UnlinkedFunctionCodeBlock*>(cell)->~UnlinkedFunctionCodeBlock(); +} + +void UnlinkedFunctionExecutable::destroy(JSCell* cell) +{ + jsCast<UnlinkedFunctionExecutable*>(cell)->~UnlinkedFunctionExecutable(); +} + +} + diff --git a/Source/JavaScriptCore/bytecode/UnlinkedCodeBlock.h b/Source/JavaScriptCore/bytecode/UnlinkedCodeBlock.h new file mode 100644 index 000000000..bf3f5fdff --- /dev/null +++ b/Source/JavaScriptCore/bytecode/UnlinkedCodeBlock.h @@ -0,0 +1,700 @@ +/* + * Copyright (C) 2012 Apple Inc. All Rights Reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef UnlinkedCodeBlock_h +#define UnlinkedCodeBlock_h + +#include "BytecodeConventions.h" +#include "CodeSpecializationKind.h" +#include "CodeType.h" +#include "ExpressionRangeInfo.h" +#include "Identifier.h" +#include "JSCell.h" +#include "LineInfo.h" +#include "Nodes.h" +#include "RegExp.h" +#include "SpecialPointer.h" + +#include <wtf/RefCountedArray.h> +#include <wtf/Vector.h> + +namespace JSC { + +class Debugger; +class FunctionBodyNode; +class FunctionExecutable; +class FunctionParameters; +struct ParserError; +class ScriptExecutable; +class SourceCode; +class SourceProvider; +class SharedSymbolTable; +class UnlinkedCodeBlock; +class UnlinkedFunctionCodeBlock; + +typedef unsigned UnlinkedValueProfile; +typedef unsigned UnlinkedArrayProfile; +typedef unsigned UnlinkedLLIntCallLinkInfo; + +struct ExecutableInfo { + ExecutableInfo(bool needsActivation, bool usesEval, bool isStrictMode, bool isConstructor) + : m_needsActivation(needsActivation) + , m_usesEval(usesEval) + , m_isStrictMode(isStrictMode) + , m_isConstructor(isConstructor) + { + } + bool m_needsActivation; + bool m_usesEval; + bool m_isStrictMode; + bool m_isConstructor; +}; + +class UnlinkedFunctionExecutable : public JSCell { +public: + friend class CodeCache; + typedef JSCell Base; + static UnlinkedFunctionExecutable* create(JSGlobalData* globalData, const SourceCode& source, FunctionBodyNode* node) + { + UnlinkedFunctionExecutable* instance = new (NotNull, allocateCell<UnlinkedFunctionExecutable>(globalData->heap)) UnlinkedFunctionExecutable(globalData, globalData->unlinkedFunctionExecutableStructure.get(), source, node); + instance->finishCreation(*globalData); + return instance; + } + + const Identifier& name() const { return m_name; } + const Identifier& inferredName() const { return m_inferredName; } + JSString* nameValue() const { return m_nameValue.get(); } + SharedSymbolTable* symbolTable(CodeSpecializationKind kind) + { + return (kind == CodeForCall) ? m_symbolTableForCall.get() : m_symbolTableForConstruct.get(); + } + size_t parameterCount() const { return m_parameters->size(); } + bool isInStrictContext() const { return m_isInStrictContext; } + FunctionNameIsInScopeToggle functionNameIsInScopeToggle() const { return m_functionNameIsInScopeToggle; } + + unsigned firstLineOffset() const { return m_firstLineOffset; } + unsigned lineCount() const { return m_lineCount; } + unsigned startOffset() const { return m_startOffset; } + unsigned sourceLength() { return m_sourceLength; } + + String paramString() const; + + UnlinkedFunctionCodeBlock* codeBlockFor(JSGlobalData&, const SourceCode&, CodeSpecializationKind, DebuggerMode, ProfilerMode, ParserError&); + + static UnlinkedFunctionExecutable* fromGlobalCode(const Identifier&, ExecState*, Debugger*, const SourceCode&, JSObject** exception); + + FunctionExecutable* link(JSGlobalData&, const SourceCode&, size_t lineOffset, size_t sourceOffset); + + void clearCode() + { + m_symbolTableForCall.clear(); + m_symbolTableForConstruct.clear(); + m_codeBlockForCall.clear(); + m_codeBlockForConstruct.clear(); + } + + FunctionParameters* parameters() { return m_parameters.get(); } + + void recordParse(CodeFeatures features, bool hasCapturedVariables, int firstLine, int lastLine) + { + m_features = features; + m_hasCapturedVariables = hasCapturedVariables; + m_lineCount = lastLine - firstLine; + } + + bool forceUsesArguments() const { return m_forceUsesArguments; } + + CodeFeatures features() const { return m_features; } + bool hasCapturedVariables() const { return m_hasCapturedVariables; } + + static const bool needsDestruction = true; + static const bool hasImmortalStructure = true; + static void destroy(JSCell*); + +private: + UnlinkedFunctionExecutable(JSGlobalData*, Structure*, const SourceCode&, FunctionBodyNode*); + WriteBarrier<UnlinkedFunctionCodeBlock> m_codeBlockForCall; + WriteBarrier<UnlinkedFunctionCodeBlock> m_codeBlockForConstruct; + + unsigned m_numCapturedVariables : 29; + bool m_forceUsesArguments : 1; + bool m_isInStrictContext : 1; + bool m_hasCapturedVariables : 1; + + Identifier m_name; + Identifier m_inferredName; + WriteBarrier<JSString> m_nameValue; + WriteBarrier<SharedSymbolTable> m_symbolTableForCall; + WriteBarrier<SharedSymbolTable> m_symbolTableForConstruct; + RefPtr<FunctionParameters> m_parameters; + unsigned m_firstLineOffset; + unsigned m_lineCount; + unsigned m_startOffset; + unsigned m_sourceLength; + + CodeFeatures m_features; + + FunctionNameIsInScopeToggle m_functionNameIsInScopeToggle; + +protected: + void finishCreation(JSGlobalData& globalData) + { + Base::finishCreation(globalData); + m_nameValue.set(globalData, this, jsString(&globalData, name().string())); + } + + static void visitChildren(JSCell*, SlotVisitor&); + +public: + static Structure* createStructure(JSGlobalData& globalData, JSGlobalObject* globalObject, JSValue proto) + { + return Structure::create(globalData, globalObject, proto, TypeInfo(UnlinkedFunctionExecutableType, StructureFlags), &s_info); + } + + static const unsigned StructureFlags = OverridesVisitChildren | JSCell::StructureFlags; + + static const ClassInfo s_info; +}; + +struct UnlinkedStringJumpTable { + typedef HashMap<RefPtr<StringImpl>, int32_t> StringOffsetTable; + StringOffsetTable offsetTable; + + inline int32_t offsetForValue(StringImpl* value, int32_t defaultOffset) + { + StringOffsetTable::const_iterator end = offsetTable.end(); + StringOffsetTable::const_iterator loc = offsetTable.find(value); + if (loc == end) + return defaultOffset; + return loc->value; + } + +}; + +struct UnlinkedSimpleJumpTable { + Vector<int32_t> branchOffsets; + int32_t min; + + int32_t offsetForValue(int32_t value, int32_t defaultOffset); + void add(int32_t key, int32_t offset) + { + if (!branchOffsets[key]) + branchOffsets[key] = offset; + } +}; + +struct UnlinkedHandlerInfo { + uint32_t start; + uint32_t end; + uint32_t target; + uint32_t scopeDepth; +}; + +struct UnlinkedInstruction { + UnlinkedInstruction() { u.operand = 0; } + UnlinkedInstruction(OpcodeID opcode) { u.opcode = opcode; } + UnlinkedInstruction(int operand) { u.operand = operand; } + union { + OpcodeID opcode; + int32_t operand; + } u; +}; + +class UnlinkedCodeBlock : public JSCell { +public: + typedef JSCell Base; + static const bool needsDestruction = true; + static const bool hasImmortalStructure = true; + + enum { CallFunction, ApplyFunction }; + + bool isConstructor() const { return m_isConstructor; } + bool isStrictMode() const { return m_isStrictMode; } + bool usesEval() const { return m_usesEval; } + + bool needsFullScopeChain() const { return m_needsFullScopeChain; } + void setNeedsFullScopeChain(bool needsFullScopeChain) { m_needsFullScopeChain = needsFullScopeChain; } + + void addExpressionInfo(const ExpressionRangeInfo& expressionInfo) + { + m_expressionInfo.append(expressionInfo); + } + + void addLineInfo(unsigned bytecodeOffset, int lineNo) + { + Vector<LineInfo>& lineInfo = m_lineInfo; + if (!lineInfo.size() || lineInfo.last().lineNumber != lineNo) { + LineInfo info = { bytecodeOffset, lineNo }; + lineInfo.append(info); + } + } + + bool hasExpressionInfo() { return m_expressionInfo.size(); } + + // Special registers + void setThisRegister(int thisRegister) { m_thisRegister = thisRegister; } + void setActivationRegister(int activationRegister) { m_activationRegister = activationRegister; } + + void setArgumentsRegister(int argumentsRegister) { m_argumentsRegister = argumentsRegister; } + bool usesArguments() const { return m_argumentsRegister != -1; } + int argumentsRegister() const { return m_argumentsRegister; } + + // Parameter information + void setNumParameters(int newValue) { m_numParameters = newValue; } + void addParameter() { m_numParameters++; } + unsigned numParameters() const { return m_numParameters; } + + unsigned addRegExp(RegExp* r) + { + createRareDataIfNecessary(); + unsigned size = m_rareData->m_regexps.size(); + m_rareData->m_regexps.append(WriteBarrier<RegExp>(*m_globalData, this, r)); + return size; + } + unsigned numberOfRegExps() const + { + if (!m_rareData) + return 0; + return m_rareData->m_regexps.size(); + } + RegExp* regexp(int index) const { ASSERT(m_rareData); return m_rareData->m_regexps[index].get(); } + + // Constant Pools + + size_t numberOfIdentifiers() const { return m_identifiers.size(); } + void addIdentifier(const Identifier& i) { return m_identifiers.append(i); } + const Identifier& identifier(int index) const { return m_identifiers[index]; } + const Vector<Identifier>& identifiers() const { return m_identifiers; } + + size_t numberOfConstantRegisters() const { return m_constantRegisters.size(); } + unsigned addConstant(JSValue v) + { + unsigned result = m_constantRegisters.size(); + m_constantRegisters.append(WriteBarrier<Unknown>()); + m_constantRegisters.last().set(*m_globalData, this, v); + return result; + } + unsigned addOrFindConstant(JSValue); + const Vector<WriteBarrier<Unknown> >& constantRegisters() { return m_constantRegisters; } + const WriteBarrier<Unknown>& constantRegister(int index) const { return m_constantRegisters[index - FirstConstantRegisterIndex]; } + ALWAYS_INLINE bool isConstantRegisterIndex(int index) const { return index >= FirstConstantRegisterIndex; } + ALWAYS_INLINE JSValue getConstant(int index) const { return m_constantRegisters[index - FirstConstantRegisterIndex].get(); } + + // Jumps + size_t numberOfJumpTargets() const { return m_jumpTargets.size(); } + void addJumpTarget(unsigned jumpTarget) { m_jumpTargets.append(jumpTarget); } + unsigned jumpTarget(int index) const { return m_jumpTargets[index]; } + unsigned lastJumpTarget() const { return m_jumpTargets.last(); } + + void setIsNumericCompareFunction(bool isNumericCompareFunction) { m_isNumericCompareFunction = isNumericCompareFunction; } + bool isNumericCompareFunction() const { return m_isNumericCompareFunction; } + + void shrinkToFit() + { + m_jumpTargets.shrinkToFit(); + m_identifiers.shrinkToFit(); + m_constantRegisters.shrinkToFit(); + m_functionDecls.shrinkToFit(); + m_functionExprs.shrinkToFit(); + m_lineInfo.shrinkToFit(); + m_propertyAccessInstructions.shrinkToFit(); + m_expressionInfo.shrinkToFit(); + +#if ENABLE(BYTECODE_COMMENTS) + m_bytecodeComments.shrinkToFit(); +#endif + if (m_rareData) { + m_rareData->m_exceptionHandlers.shrinkToFit(); + m_rareData->m_regexps.shrinkToFit(); + m_rareData->m_constantBuffers.shrinkToFit(); + m_rareData->m_immediateSwitchJumpTables.shrinkToFit(); + m_rareData->m_characterSwitchJumpTables.shrinkToFit(); + m_rareData->m_stringSwitchJumpTables.shrinkToFit(); + } + } + + unsigned numberOfInstructions() const { return m_unlinkedInstructions.size(); } + RefCountedArray<UnlinkedInstruction>& instructions() { return m_unlinkedInstructions; } + const RefCountedArray<UnlinkedInstruction>& instructions() const { return m_unlinkedInstructions; } + + int m_numVars; + int m_numCapturedVars; + int m_numCalleeRegisters; + + // Jump Tables + + size_t numberOfImmediateSwitchJumpTables() const { return m_rareData ? m_rareData->m_immediateSwitchJumpTables.size() : 0; } + UnlinkedSimpleJumpTable& addImmediateSwitchJumpTable() { createRareDataIfNecessary(); m_rareData->m_immediateSwitchJumpTables.append(UnlinkedSimpleJumpTable()); return m_rareData->m_immediateSwitchJumpTables.last(); } + UnlinkedSimpleJumpTable& immediateSwitchJumpTable(int tableIndex) { ASSERT(m_rareData); return m_rareData->m_immediateSwitchJumpTables[tableIndex]; } + + size_t numberOfCharacterSwitchJumpTables() const { return m_rareData ? m_rareData->m_characterSwitchJumpTables.size() : 0; } + UnlinkedSimpleJumpTable& addCharacterSwitchJumpTable() { createRareDataIfNecessary(); m_rareData->m_characterSwitchJumpTables.append(UnlinkedSimpleJumpTable()); return m_rareData->m_characterSwitchJumpTables.last(); } + UnlinkedSimpleJumpTable& characterSwitchJumpTable(int tableIndex) { ASSERT(m_rareData); return m_rareData->m_characterSwitchJumpTables[tableIndex]; } + + size_t numberOfStringSwitchJumpTables() const { return m_rareData ? m_rareData->m_stringSwitchJumpTables.size() : 0; } + UnlinkedStringJumpTable& addStringSwitchJumpTable() { createRareDataIfNecessary(); m_rareData->m_stringSwitchJumpTables.append(UnlinkedStringJumpTable()); return m_rareData->m_stringSwitchJumpTables.last(); } + UnlinkedStringJumpTable& stringSwitchJumpTable(int tableIndex) { ASSERT(m_rareData); return m_rareData->m_stringSwitchJumpTables[tableIndex]; } + + unsigned addFunctionDecl(UnlinkedFunctionExecutable* n) + { + unsigned size = m_functionDecls.size(); + m_functionDecls.append(WriteBarrier<UnlinkedFunctionExecutable>()); + m_functionDecls.last().set(*m_globalData, this, n); + return size; + } + UnlinkedFunctionExecutable* functionDecl(int index) { return m_functionDecls[index].get(); } + size_t numberOfFunctionDecls() { return m_functionDecls.size(); } + unsigned addFunctionExpr(UnlinkedFunctionExecutable* n) + { + unsigned size = m_functionExprs.size(); + m_functionExprs.append(WriteBarrier<UnlinkedFunctionExecutable>()); + m_functionExprs.last().set(*m_globalData, this, n); + return size; + } + UnlinkedFunctionExecutable* functionExpr(int index) { return m_functionExprs[index].get(); } + size_t numberOfFunctionExprs() { return m_functionExprs.size(); } + + // Exception handling support + size_t numberOfExceptionHandlers() const { return m_rareData ? m_rareData->m_exceptionHandlers.size() : 0; } + void addExceptionHandler(const UnlinkedHandlerInfo& hanler) { createRareDataIfNecessary(); return m_rareData->m_exceptionHandlers.append(hanler); } + UnlinkedHandlerInfo& exceptionHandler(int index) { ASSERT(m_rareData); return m_rareData->m_exceptionHandlers[index]; } + + SharedSymbolTable* symbolTable() const { return m_symbolTable.get(); } + + JSGlobalData* globalData() const { return m_globalData; } + + unsigned addResolve() { return m_resolveOperationCount++; } + unsigned numberOfResolveOperations() const { return m_resolveOperationCount; } + unsigned addPutToBase() { return m_putToBaseOperationCount++; } + unsigned numberOfPutToBaseOperations() const { return m_putToBaseOperationCount; } + + UnlinkedArrayProfile addArrayProfile() { return m_arrayProfileCount++; } + unsigned numberOfArrayProfiles() { return m_arrayProfileCount; } + UnlinkedValueProfile addValueProfile() { return m_valueProfileCount++; } + unsigned numberOfValueProfiles() { return m_valueProfileCount; } + + UnlinkedLLIntCallLinkInfo addLLIntCallLinkInfo() { return m_llintCallLinkInfoCount++; } + unsigned numberOfLLintCallLinkInfos() { return m_llintCallLinkInfoCount; } + + CodeType codeType() const { return m_codeType; } + + int thisRegister() const { return m_thisRegister; } + int activationRegister() const { return m_activationRegister; } + + + void addPropertyAccessInstruction(unsigned propertyAccessInstruction) + { + m_propertyAccessInstructions.append(propertyAccessInstruction); + } + + size_t numberOfPropertyAccessInstructions() const { return m_propertyAccessInstructions.size(); } + const Vector<unsigned>& propertyAccessInstructions() const { return m_propertyAccessInstructions; } + + typedef Vector<JSValue> ConstantBuffer; + + size_t constantBufferCount() { ASSERT(m_rareData); return m_rareData->m_constantBuffers.size(); } + unsigned addConstantBuffer(unsigned length) + { + createRareDataIfNecessary(); + unsigned size = m_rareData->m_constantBuffers.size(); + m_rareData->m_constantBuffers.append(Vector<JSValue>(length)); + return size; + } + + const ConstantBuffer& constantBuffer(unsigned index) const + { + ASSERT(m_rareData); + return m_rareData->m_constantBuffers[index]; + } + + ConstantBuffer& constantBuffer(unsigned index) + { + ASSERT(m_rareData); + return m_rareData->m_constantBuffers[index]; + } + + bool hasRareData() const { return m_rareData; } + + int lineNumberForBytecodeOffset(unsigned bytecodeOffset); + + void expressionRangeForBytecodeOffset(unsigned bytecodeOffset, int& divot, int& startOffset, int& endOffset); + + void recordParse(CodeFeatures features, bool hasCapturedVariables, unsigned firstLine, unsigned lineCount) + { + m_features = features; + m_hasCapturedVariables = hasCapturedVariables; + m_firstLine = firstLine; + m_lineCount = lineCount; + } + + CodeFeatures codeFeatures() const { return m_features; } + bool hasCapturedVariables() const { return m_hasCapturedVariables; } + unsigned firstLine() const { return m_firstLine; } + unsigned lineCount() const { return m_lineCount; } + +protected: + UnlinkedCodeBlock(JSGlobalData*, Structure*, CodeType, const ExecutableInfo&); + ~UnlinkedCodeBlock(); + + void finishCreation(JSGlobalData& globalData) + { + Base::finishCreation(globalData); + if (codeType() == GlobalCode) + return; + m_symbolTable.set(globalData, this, SharedSymbolTable::create(globalData)); + } + +private: + + void createRareDataIfNecessary() + { + if (!m_rareData) + m_rareData = adoptPtr(new RareData); + } + + RefCountedArray<UnlinkedInstruction> m_unlinkedInstructions; + + int m_numParameters; + JSGlobalData* m_globalData; + + int m_thisRegister; + int m_argumentsRegister; + int m_activationRegister; + + bool m_needsFullScopeChain : 1; + bool m_usesEval : 1; + bool m_isNumericCompareFunction : 1; + bool m_isStrictMode : 1; + bool m_isConstructor : 1; + bool m_hasCapturedVariables : 1; + unsigned m_firstLine; + unsigned m_lineCount; + + CodeFeatures m_features; + CodeType m_codeType; + + Vector<unsigned> m_jumpTargets; + + // Constant Pools + Vector<Identifier> m_identifiers; + Vector<WriteBarrier<Unknown> > m_constantRegisters; + typedef Vector<WriteBarrier<UnlinkedFunctionExecutable> > FunctionExpressionVector; + FunctionExpressionVector m_functionDecls; + FunctionExpressionVector m_functionExprs; + + WriteBarrier<SharedSymbolTable> m_symbolTable; + + Vector<LineInfo> m_lineInfo; + + Vector<unsigned> m_propertyAccessInstructions; + +#if ENABLE(BYTECODE_COMMENTS) + Vector<Comment> m_bytecodeComments; + size_t m_bytecodeCommentIterator; +#endif + + unsigned m_resolveOperationCount; + unsigned m_putToBaseOperationCount; + unsigned m_arrayProfileCount; + unsigned m_valueProfileCount; + unsigned m_llintCallLinkInfoCount; + +public: + struct RareData { + WTF_MAKE_FAST_ALLOCATED; + public: + Vector<UnlinkedHandlerInfo> m_exceptionHandlers; + + // Rare Constants + Vector<WriteBarrier<RegExp> > m_regexps; + + // Buffers used for large array literals + Vector<ConstantBuffer> m_constantBuffers; + + // Jump Tables + Vector<UnlinkedSimpleJumpTable> m_immediateSwitchJumpTables; + Vector<UnlinkedSimpleJumpTable> m_characterSwitchJumpTables; + Vector<UnlinkedStringJumpTable> m_stringSwitchJumpTables; + + // Expression info - present if debugging. + }; + +private: + OwnPtr<RareData> m_rareData; + Vector<ExpressionRangeInfo> m_expressionInfo; + +protected: + + static const unsigned StructureFlags = OverridesVisitChildren | Base::StructureFlags; + static void visitChildren(JSCell*, SlotVisitor&); + +public: + static const ClassInfo s_info; +}; + +class UnlinkedGlobalCodeBlock : public UnlinkedCodeBlock { +public: + typedef UnlinkedCodeBlock Base; + +protected: + UnlinkedGlobalCodeBlock(JSGlobalData* globalData, Structure* structure, CodeType codeType, const ExecutableInfo& info) + : Base(globalData, structure, codeType, info) + { + } + + static const unsigned StructureFlags = OverridesVisitChildren | Base::StructureFlags; + + static const ClassInfo s_info; +}; + +class UnlinkedProgramCodeBlock : public UnlinkedGlobalCodeBlock { +private: + friend class CodeCache; + static UnlinkedProgramCodeBlock* create(JSGlobalData* globalData, const ExecutableInfo& info) + { + UnlinkedProgramCodeBlock* instance = new (NotNull, allocateCell<UnlinkedProgramCodeBlock>(globalData->heap)) UnlinkedProgramCodeBlock(globalData, globalData->unlinkedProgramCodeBlockStructure.get(), info); + instance->finishCreation(*globalData); + return instance; + } + +public: + typedef UnlinkedGlobalCodeBlock Base; + static void destroy(JSCell*); + + void addFunctionDeclaration(JSGlobalData& globalData, const Identifier& name, UnlinkedFunctionExecutable* functionExecutable) + { + m_functionDeclarations.append(std::make_pair(name, WriteBarrier<UnlinkedFunctionExecutable>(globalData, this, functionExecutable))); + } + + void addVariableDeclaration(const Identifier& name, bool isConstant) + { + m_varDeclarations.append(std::make_pair(name, isConstant)); + } + + typedef Vector<std::pair<Identifier, bool> > VariableDeclations; + typedef Vector<std::pair<Identifier, WriteBarrier<UnlinkedFunctionExecutable> > > FunctionDeclations; + + const VariableDeclations& variableDeclarations() const { return m_varDeclarations; } + const FunctionDeclations& functionDeclarations() const { return m_functionDeclarations; } + + static void visitChildren(JSCell*, SlotVisitor&); + +private: + UnlinkedProgramCodeBlock(JSGlobalData* globalData, Structure* structure, const ExecutableInfo& info) + : Base(globalData, structure, GlobalCode, info) + { + } + + VariableDeclations m_varDeclarations; + FunctionDeclations m_functionDeclarations; + +public: + static Structure* createStructure(JSGlobalData& globalData, JSGlobalObject* globalObject, JSValue proto) + { + return Structure::create(globalData, globalObject, proto, TypeInfo(UnlinkedProgramCodeBlockType, StructureFlags), &s_info); + } + + static const unsigned StructureFlags = OverridesVisitChildren | Base::StructureFlags; + + static const ClassInfo s_info; +}; + +class UnlinkedEvalCodeBlock : public UnlinkedGlobalCodeBlock { +private: + friend class CodeCache; + + static UnlinkedEvalCodeBlock* create(JSGlobalData* globalData, const ExecutableInfo& info) + { + UnlinkedEvalCodeBlock* instance = new (NotNull, allocateCell<UnlinkedEvalCodeBlock>(globalData->heap)) UnlinkedEvalCodeBlock(globalData, globalData->unlinkedEvalCodeBlockStructure.get(), info); + instance->finishCreation(*globalData); + return instance; + } + +public: + typedef UnlinkedGlobalCodeBlock Base; + static void destroy(JSCell*); + + const Identifier& variable(unsigned index) { return m_variables[index]; } + unsigned numVariables() { return m_variables.size(); } + void adoptVariables(Vector<Identifier>& variables) + { + ASSERT(m_variables.isEmpty()); + m_variables.swap(variables); + } + +private: + UnlinkedEvalCodeBlock(JSGlobalData* globalData, Structure* structure, const ExecutableInfo& info) + : Base(globalData, structure, EvalCode, info) + { + } + + Vector<Identifier> m_variables; + +public: + static Structure* createStructure(JSGlobalData& globalData, JSGlobalObject* globalObject, JSValue proto) + { + return Structure::create(globalData, globalObject, proto, TypeInfo(UnlinkedEvalCodeBlockType, StructureFlags), &s_info); + } + + static const unsigned StructureFlags = OverridesVisitChildren | Base::StructureFlags; + + static const ClassInfo s_info; +}; + +class UnlinkedFunctionCodeBlock : public UnlinkedCodeBlock { +private: + friend class CodeCache; + + static UnlinkedFunctionCodeBlock* create(JSGlobalData* globalData, CodeType codeType, const ExecutableInfo& info) + { + UnlinkedFunctionCodeBlock* instance = new (NotNull, allocateCell<UnlinkedFunctionCodeBlock>(globalData->heap)) UnlinkedFunctionCodeBlock(globalData, globalData->unlinkedFunctionCodeBlockStructure.get(), codeType, info); + instance->finishCreation(*globalData); + return instance; + } + +public: + typedef UnlinkedCodeBlock Base; + static void destroy(JSCell*); + +private: + UnlinkedFunctionCodeBlock(JSGlobalData* globalData, Structure* structure, CodeType codeType, const ExecutableInfo& info) + : Base(globalData, structure, codeType, info) + { + } + +public: + static Structure* createStructure(JSGlobalData& globalData, JSGlobalObject* globalObject, JSValue proto) + { + return Structure::create(globalData, globalObject, proto, TypeInfo(UnlinkedFunctionCodeBlockType, StructureFlags), &s_info); + } + + static const unsigned StructureFlags = OverridesVisitChildren | Base::StructureFlags; + + static const ClassInfo s_info; +}; + +} + +#endif // UnlinkedCodeBlock_h diff --git a/Source/JavaScriptCore/bytecompiler/BytecodeGenerator.cpp b/Source/JavaScriptCore/bytecompiler/BytecodeGenerator.cpp index 228277328..b11872551 100644 --- a/Source/JavaScriptCore/bytecompiler/BytecodeGenerator.cpp +++ b/Source/JavaScriptCore/bytecompiler/BytecodeGenerator.cpp @@ -158,7 +158,7 @@ bool BytecodeGenerator::dumpsGeneratedCode() return s_dumpsGeneratedCode; } -JSObject* BytecodeGenerator::generate() +ParserError BytecodeGenerator::generate() { SamplingRegion samplingRegion("Bytecode Generation"); @@ -169,36 +169,21 @@ JSObject* BytecodeGenerator::generate() for (unsigned i = 0; i < m_tryRanges.size(); ++i) { TryRange& range = m_tryRanges[i]; ASSERT(range.tryData->targetScopeDepth != UINT_MAX); - HandlerInfo info = { - range.start->bind(0, 0), range.end->bind(0, 0), - range.tryData->target->bind(0, 0), range.tryData->targetScopeDepth -#if ENABLE(JIT) - , -#if ENABLE(LLINT) - CodeLocationLabel(MacroAssemblerCodePtr::createFromExecutableAddress(LLInt::getCodePtr(llint_op_catch))) -#else - CodeLocationLabel() -#endif -#endif + UnlinkedHandlerInfo info = { + static_cast<uint32_t>(range.start->bind(0, 0)), static_cast<uint32_t>(range.end->bind(0, 0)), + static_cast<uint32_t>(range.tryData->target->bind(0, 0)), + range.tryData->targetScopeDepth }; m_codeBlock->addExceptionHandler(info); } - m_codeBlock->instructions() = RefCountedArray<Instruction>(m_instructions); - - if (s_dumpsGeneratedCode) - m_codeBlock->dump(m_scope->globalObject()->globalExec()); - -#ifdef NDEBUG - if ((m_codeType == FunctionCode && !m_codeBlock->needsFullScopeChain() && !m_codeBlock->usesArguments()) || m_codeType == EvalCode) - symbolTable().clear(); -#endif + m_codeBlock->instructions() = RefCountedArray<UnlinkedInstruction>(m_instructions); - m_codeBlock->shrinkToFit(CodeBlock::EarlyShrink); + m_codeBlock->shrinkToFit(); if (m_expressionTooDeep) - return createOutOfMemoryError(m_scope->globalObject()); - return 0; + return ParserError::OutOfMemory; + return ParserError::ErrorNone; } bool BytecodeGenerator::addVar(const Identifier& ident, bool isConstant, RegisterID*& r0) @@ -216,51 +201,32 @@ bool BytecodeGenerator::addVar(const Identifier& ident, bool isConstant, Registe return true; } -int BytecodeGenerator::addGlobalVar( - const Identifier& ident, ConstantMode constantMode, FunctionMode functionMode) -{ - UNUSED_PARAM(functionMode); - int index = symbolTable().size(); - SymbolTableEntry newEntry(index, (constantMode == IsConstant) ? ReadOnly : 0); - if (functionMode == IsFunctionToSpecialize) - newEntry.attemptToWatch(); - SymbolTable::AddResult result = symbolTable().add(ident.impl(), newEntry); - if (!result.isNewEntry) { - result.iterator->value.notifyWrite(); - index = result.iterator->value.getIndex(); - } - return index; -} - void BytecodeGenerator::preserveLastVar() { if ((m_firstConstantIndex = m_calleeRegisters.size()) != 0) m_lastVar = &m_calleeRegisters.last(); } -BytecodeGenerator::BytecodeGenerator(ProgramNode* programNode, JSScope* scope, SharedSymbolTable* symbolTable, ProgramCodeBlock* codeBlock, CompilationKind compilationKind) - : m_shouldEmitDebugHooks(scope->globalObject()->debugger()) - , m_shouldEmitProfileHooks(scope->globalObject()->globalObjectMethodTable()->supportsProfiling(scope->globalObject())) - , m_shouldEmitRichSourceInfo(scope->globalObject()->globalObjectMethodTable()->supportsRichSourceInfo(scope->globalObject())) - , m_scope(*scope->globalData(), scope) - , m_symbolTable(symbolTable) +BytecodeGenerator::BytecodeGenerator(JSGlobalData& globalData, ProgramNode* programNode, UnlinkedProgramCodeBlock* codeBlock, DebuggerMode debuggerMode, ProfilerMode profilerMode) + : m_shouldEmitDebugHooks(debuggerMode == DebuggerOn) + , m_shouldEmitProfileHooks(profilerMode == ProfilerOn) #if ENABLE(BYTECODE_COMMENTS) , m_currentCommentString(0) #endif + , m_symbolTable(0) , m_scopeNode(programNode) - , m_codeBlock(codeBlock) + , m_codeBlock(globalData, codeBlock) , m_thisRegister(CallFrame::thisArgumentOffset()) , m_emptyValueRegister(0) , m_finallyDepth(0) , m_dynamicScopeDepth(0) - , m_baseScopeDepth(0) , m_codeType(GlobalCode) , m_nextConstantOffset(0) , m_globalConstantIndex(0) , m_hasCreatedActivation(true) , m_firstLazyFunction(0) , m_lastLazyFunction(0) - , m_globalData(scope->globalData()) + , m_globalData(&globalData) , m_lastOpcodeID(op_end) #ifndef NDEBUG , m_lastOpcodePosition(0) @@ -269,82 +235,48 @@ BytecodeGenerator::BytecodeGenerator(ProgramNode* programNode, JSScope* scope, S , m_usesExceptions(false) , m_expressionTooDeep(false) { - m_globalData->startedCompiling(m_codeBlock); - m_codeBlock->setGlobalObjectConstant(emitLoad(0, JSValue(m_codeBlock->globalObject()))->index()); - if (m_shouldEmitDebugHooks) m_codeBlock->setNeedsFullScopeChain(true); - codeBlock->setGlobalData(m_globalData); - symbolTable->setUsesNonStrictEval(codeBlock->usesEval() && !codeBlock->isStrictMode()); m_codeBlock->setNumParameters(1); // Allocate space for "this" prependComment("entering Program block"); emitOpcode(op_enter); - // FIXME: Move code that modifies the global object to Interpreter::execute. - - if (compilationKind == OptimizingCompilation) - return; - - JSGlobalObject* globalObject = scope->globalObject(); - ExecState* exec = globalObject->globalExec(); - - BatchedTransitionOptimizer optimizer(*m_globalData, globalObject); - const VarStack& varStack = programNode->varStack(); const FunctionStack& functionStack = programNode->functionStack(); - size_t newGlobals = varStack.size() + functionStack.size(); - if (!newGlobals) - return; - globalObject->addRegisters(newGlobals); - for (size_t i = 0; i < functionStack.size(); ++i) { FunctionBodyNode* function = functionStack[i]; - bool propertyDidExist = - globalObject->removeDirect(*m_globalData, function->ident()); // Newly declared functions overwrite existing properties. - - JSValue value = JSFunction::create(exec, FunctionExecutable::create(*m_globalData, function), scope); - int index = addGlobalVar( - function->ident(), IsVariable, - !propertyDidExist ? IsFunctionToSpecialize : NotFunctionOrNotSpecializable); - globalObject->registerAt(index).set(*m_globalData, globalObject, value); + UnlinkedFunctionExecutable* unlinkedFunction = makeFunction(function); + codeBlock->addFunctionDeclaration(*m_globalData, function->ident(), unlinkedFunction); } - for (size_t i = 0; i < varStack.size(); ++i) { - if (globalObject->hasProperty(exec, *varStack[i].first)) - continue; - addGlobalVar( - *varStack[i].first, - (varStack[i].second & DeclarationStacks::IsConstant) ? IsConstant : IsVariable, - NotFunctionOrNotSpecializable); - } + for (size_t i = 0; i < varStack.size(); ++i) + codeBlock->addVariableDeclaration(*varStack[i].first, !!(varStack[i].second & DeclarationStacks::IsConstant)); + } -BytecodeGenerator::BytecodeGenerator(FunctionBodyNode* functionBody, JSScope* scope, SharedSymbolTable* symbolTable, CodeBlock* codeBlock, CompilationKind) - : m_shouldEmitDebugHooks(scope->globalObject()->debugger()) - , m_shouldEmitProfileHooks(scope->globalObject()->globalObjectMethodTable()->supportsProfiling(scope->globalObject())) - , m_shouldEmitRichSourceInfo(scope->globalObject()->globalObjectMethodTable()->supportsRichSourceInfo(scope->globalObject())) - , m_scope(*scope->globalData(), scope) - , m_symbolTable(symbolTable) +BytecodeGenerator::BytecodeGenerator(JSGlobalData& globalData, FunctionBodyNode* functionBody, UnlinkedFunctionCodeBlock* codeBlock, DebuggerMode debuggerMode, ProfilerMode profilerMode) + : m_shouldEmitDebugHooks(debuggerMode == DebuggerOn) + , m_shouldEmitProfileHooks(profilerMode == ProfilerOn) + , m_symbolTable(codeBlock->symbolTable()) #if ENABLE(BYTECODE_COMMENTS) , m_currentCommentString(0) #endif , m_scopeNode(functionBody) - , m_codeBlock(codeBlock) + , m_codeBlock(globalData, codeBlock) , m_activationRegister(0) , m_emptyValueRegister(0) , m_finallyDepth(0) , m_dynamicScopeDepth(0) - , m_baseScopeDepth(0) , m_codeType(FunctionCode) , m_nextConstantOffset(0) , m_globalConstantIndex(0) , m_hasCreatedActivation(false) , m_firstLazyFunction(0) , m_lastLazyFunction(0) - , m_globalData(scope->globalData()) + , m_globalData(&globalData) , m_lastOpcodeID(op_end) #ifndef NDEBUG , m_lastOpcodePosition(0) @@ -353,15 +285,11 @@ BytecodeGenerator::BytecodeGenerator(FunctionBodyNode* functionBody, JSScope* sc , m_usesExceptions(false) , m_expressionTooDeep(false) { - m_globalData->startedCompiling(m_codeBlock); - m_codeBlock->setGlobalObjectConstant(emitLoad(0, JSValue(m_codeBlock->globalObject()))->index()); - if (m_shouldEmitDebugHooks) m_codeBlock->setNeedsFullScopeChain(true); - codeBlock->setGlobalData(m_globalData); - symbolTable->setUsesNonStrictEval(codeBlock->usesEval() && !codeBlock->isStrictMode()); - symbolTable->setParameterCountIncludingThis(functionBody->parameters()->size() + 1); + m_symbolTable->setUsesNonStrictEval(codeBlock->usesEval() && !codeBlock->isStrictMode()); + m_symbolTable->setParameterCountIncludingThis(functionBody->parameters()->size() + 1); prependComment("entering Function block"); emitOpcode(op_enter); @@ -372,7 +300,7 @@ BytecodeGenerator::BytecodeGenerator(FunctionBodyNode* functionBody, JSScope* sc m_codeBlock->setActivationRegister(m_activationRegister->index()); } - symbolTable->setCaptureStart(m_codeBlock->m_numVars); + m_symbolTable->setCaptureStart(m_codeBlock->m_numVars); if (functionBody->usesArguments() || codeBlock->usesEval() || m_shouldEmitDebugHooks) { // May reify arguments object. RegisterID* unmodifiedArgumentsRegister = addVar(); // Anonymous, so it can't be modified by user code. @@ -422,7 +350,7 @@ BytecodeGenerator::BytecodeGenerator(FunctionBodyNode* functionBody, JSScope* sc } if (capturesAnyArgumentByName && !codeBlock->isStrictMode()) { - size_t parameterCount = symbolTable->parameterCount(); + size_t parameterCount = m_symbolTable->parameterCount(); OwnArrayPtr<SlowArgument> slowArguments = adoptArrayPtr(new SlowArgument[parameterCount]); for (size_t i = 0; i < parameterCount; ++i) { if (!capturedArguments[i]) { @@ -433,7 +361,7 @@ BytecodeGenerator::BytecodeGenerator(FunctionBodyNode* functionBody, JSScope* sc slowArguments[i].status = SlowArgument::Captured; slowArguments[i].index = capturedArguments[i]->index(); } - symbolTable->setSlowArguments(slowArguments.release()); + m_symbolTable->setSlowArguments(slowArguments.release()); } RegisterID* calleeRegister = resolveCallee(functionBody); // May push to the scope chain and/or add a captured var. @@ -474,7 +402,7 @@ BytecodeGenerator::BytecodeGenerator(FunctionBodyNode* functionBody, JSScope* sc instructions().append(m_activationRegister->index()); } - symbolTable->setCaptureEnd(codeBlock->m_numVars); + m_symbolTable->setCaptureEnd(codeBlock->m_numVars); m_firstLazyFunction = codeBlock->m_numVars; for (size_t i = 0; i < functionStack.size(); ++i) { @@ -502,7 +430,7 @@ BytecodeGenerator::BytecodeGenerator(FunctionBodyNode* functionBody, JSScope* sc } if (shouldCaptureAllTheThings) - symbolTable->setCaptureEnd(codeBlock->m_numVars); + m_symbolTable->setCaptureEnd(codeBlock->m_numVars); FunctionParameters& parameters = *functionBody->parameters(); m_parameters.grow(parameters.size() + 1); // reserve space for "this" @@ -532,35 +460,32 @@ BytecodeGenerator::BytecodeGenerator(FunctionBodyNode* functionBody, JSScope* sc emitOpcode(op_create_this); instructions().append(m_thisRegister.index()); } else if (!codeBlock->isStrictMode() && (functionBody->usesThis() || codeBlock->usesEval() || m_shouldEmitDebugHooks)) { - ValueProfile* profile = emitProfiledOpcode(op_convert_this); + UnlinkedValueProfile profile = emitProfiledOpcode(op_convert_this); instructions().append(m_thisRegister.index()); instructions().append(profile); } } -BytecodeGenerator::BytecodeGenerator(EvalNode* evalNode, JSScope* scope, SharedSymbolTable* symbolTable, EvalCodeBlock* codeBlock, CompilationKind) - : m_shouldEmitDebugHooks(scope->globalObject()->debugger()) - , m_shouldEmitProfileHooks(scope->globalObject()->globalObjectMethodTable()->supportsProfiling(scope->globalObject())) - , m_shouldEmitRichSourceInfo(scope->globalObject()->globalObjectMethodTable()->supportsRichSourceInfo(scope->globalObject())) - , m_scope(*scope->globalData(), scope) - , m_symbolTable(symbolTable) +BytecodeGenerator::BytecodeGenerator(JSGlobalData& globalData, EvalNode* evalNode, UnlinkedEvalCodeBlock* codeBlock, DebuggerMode debuggerMode, ProfilerMode profilerMode) + : m_shouldEmitDebugHooks(debuggerMode == DebuggerOn) + , m_shouldEmitProfileHooks(profilerMode == ProfilerOn) + , m_symbolTable(codeBlock->symbolTable()) #if ENABLE(BYTECODE_COMMENTS) , m_currentCommentString(0) #endif , m_scopeNode(evalNode) - , m_codeBlock(codeBlock) + , m_codeBlock(globalData, codeBlock) , m_thisRegister(CallFrame::thisArgumentOffset()) , m_emptyValueRegister(0) , m_finallyDepth(0) , m_dynamicScopeDepth(0) - , m_baseScopeDepth(codeBlock->baseScopeDepth()) , m_codeType(EvalCode) , m_nextConstantOffset(0) , m_globalConstantIndex(0) , m_hasCreatedActivation(true) , m_firstLazyFunction(0) , m_lastLazyFunction(0) - , m_globalData(scope->globalData()) + , m_globalData(&globalData) , m_lastOpcodeID(op_end) #ifndef NDEBUG , m_lastOpcodePosition(0) @@ -569,14 +494,9 @@ BytecodeGenerator::BytecodeGenerator(EvalNode* evalNode, JSScope* scope, SharedS , m_usesExceptions(false) , m_expressionTooDeep(false) { - m_globalData->startedCompiling(m_codeBlock); - m_codeBlock->setGlobalObjectConstant(emitLoad(0, JSValue(m_codeBlock->globalObject()))->index()); - - if (m_shouldEmitDebugHooks || m_baseScopeDepth) - m_codeBlock->setNeedsFullScopeChain(true); + m_codeBlock->setNeedsFullScopeChain(true); - codeBlock->setGlobalData(m_globalData); - symbolTable->setUsesNonStrictEval(codeBlock->usesEval() && !codeBlock->isStrictMode()); + m_symbolTable->setUsesNonStrictEval(codeBlock->usesEval() && !codeBlock->isStrictMode()); m_codeBlock->setNumParameters(1); prependComment("entering Eval block"); @@ -584,7 +504,7 @@ BytecodeGenerator::BytecodeGenerator(EvalNode* evalNode, JSScope* scope, SharedS const DeclarationStacks::FunctionStack& functionStack = evalNode->functionStack(); for (size_t i = 0; i < functionStack.size(); ++i) - m_codeBlock->addFunctionDecl(FunctionExecutable::create(*m_globalData, functionStack[i])); + m_codeBlock->addFunctionDecl(makeFunction(functionStack[i])); const DeclarationStacks::VarStack& varStack = evalNode->varStack(); unsigned numVariables = varStack.size(); @@ -598,7 +518,6 @@ BytecodeGenerator::BytecodeGenerator(EvalNode* evalNode, JSScope* scope, SharedS BytecodeGenerator::~BytecodeGenerator() { - m_globalData->finishedCompiling(m_codeBlock); } RegisterID* BytecodeGenerator::emitInitLazyRegister(RegisterID* reg) @@ -621,17 +540,6 @@ RegisterID* BytecodeGenerator::resolveCallee(FunctionBodyNode* functionBodyNode) instructions().append(addConstant(functionBodyNode->ident())); instructions().append(m_calleeRegister.index()); instructions().append(ReadOnly | DontDelete); - - // Put a mirror object in compilation scope, so compile-time variable resolution sees the property name we'll see at runtime. - m_scope.set(*globalData(), - JSNameScope::create( - m_scope->globalObject()->globalExec(), - functionBodyNode->ident(), - jsUndefined(), - ReadOnly | DontDelete, - m_scope.get() - ) - ); return 0; } @@ -775,7 +683,7 @@ void BytecodeGenerator::emitOpcode(OpcodeID opcodeID) m_lastOpcodePosition = opcodePosition; #endif emitComment(); - instructions().append(globalData()->interpreter->getOpcode(opcodeID)); + instructions().append(opcodeID); m_lastOpcodeID = opcodeID; } @@ -799,21 +707,21 @@ void BytecodeGenerator::prependComment(const char* string) } #endif -ArrayProfile* BytecodeGenerator::newArrayProfile() +UnlinkedArrayProfile BytecodeGenerator::newArrayProfile() { #if ENABLE(VALUE_PROFILER) - return m_codeBlock->addArrayProfile(instructions().size()); + return m_codeBlock->addArrayProfile(); #else return 0; #endif } -ValueProfile* BytecodeGenerator::emitProfiledOpcode(OpcodeID opcodeID) +UnlinkedValueProfile BytecodeGenerator::emitProfiledOpcode(OpcodeID opcodeID) { #if ENABLE(VALUE_PROFILER) - ValueProfile* result = m_codeBlock->addValueProfile(instructions().size()); + UnlinkedValueProfile result = m_codeBlock->addValueProfile(); #else - ValueProfile* result = 0; + UnlinkedValueProfile result = 0; #endif emitOpcode(opcodeID); return result; @@ -843,14 +751,6 @@ void BytecodeGenerator::retrieveLastUnaryOp(int& dstIndex, int& srcIndex) srcIndex = instructions().at(size - 1).u.operand; } -void BytecodeGenerator::retrieveLastUnaryOp(WriteBarrier<Unknown>*& dstPointer, int& srcIndex) -{ - ASSERT(instructions().size() >= 3); - size_t size = instructions().size(); - dstPointer = instructions().at(size - 2).u.registerPointer; - srcIndex = instructions().at(size - 1).u.operand; -} - void ALWAYS_INLINE BytecodeGenerator::rewindBinaryOp() { ASSERT(instructions().size() >= 4); @@ -1339,7 +1239,7 @@ ResolveResult BytecodeGenerator::resolve(const Identifier& property) return ResolveResult::registerResolve(thisRegister(), ResolveResult::ReadOnlyFlag); // Check if the property should be allocated in a register. - if (m_codeType != GlobalCode && shouldOptimizeLocals()) { + if (m_codeType != GlobalCode && shouldOptimizeLocals() && m_symbolTable) { SymbolTableEntry entry = symbolTable().get(property.impl()); if (!entry.isNull()) { if (property == propertyNames().arguments) @@ -1355,7 +1255,7 @@ ResolveResult BytecodeGenerator::resolve(const Identifier& property) ResolveResult BytecodeGenerator::resolveConstDecl(const Identifier& property) { // Register-allocated const declarations. - if (m_codeType != EvalCode && m_codeType != GlobalCode) { + if (m_codeType != EvalCode && m_codeType != GlobalCode && m_symbolTable) { SymbolTableEntry entry = symbolTable().get(property.impl()); if (!entry.isNull()) { unsigned flags = entry.isReadOnly() ? ResolveResult::ReadOnlyFlag : 0; @@ -1397,7 +1297,7 @@ RegisterID* BytecodeGenerator::emitResolve(RegisterID* dst, const ResolveResult& if (resolveResult.isRegister()) return emitGetLocalVar(dst, resolveResult, property); - ValueProfile* profile = emitProfiledOpcode(op_resolve); + UnlinkedValueProfile profile = emitProfiledOpcode(op_resolve); instructions().append(dst->index()); instructions().append(addConstant(property)); instructions().append(getResolveOperations(property)); @@ -1409,7 +1309,7 @@ RegisterID* BytecodeGenerator::emitResolveBase(RegisterID* dst, const ResolveRes { ASSERT_UNUSED(resolveResult, !resolveResult.isRegister()); // We can't optimise at all :-( - ValueProfile* profile = emitProfiledOpcode(op_resolve_base); + UnlinkedValueProfile profile = emitProfiledOpcode(op_resolve_base); instructions().append(dst->index()); instructions().append(addConstant(property)); instructions().append(false); @@ -1423,7 +1323,7 @@ RegisterID* BytecodeGenerator::emitResolveBaseForPut(RegisterID* dst, const Reso { ASSERT_UNUSED(resolveResult, !resolveResult.isRegister()); // We can't optimise at all :-( - ValueProfile* profile = emitProfiledOpcode(op_resolve_base); + UnlinkedValueProfile profile = emitProfiledOpcode(op_resolve_base); instructions().append(dst->index()); instructions().append(addConstant(property)); instructions().append(m_codeBlock->isStrictMode()); @@ -1438,7 +1338,7 @@ RegisterID* BytecodeGenerator::emitResolveBaseForPut(RegisterID* dst, const Reso RegisterID* BytecodeGenerator::emitResolveWithBaseForPut(RegisterID* baseDst, RegisterID* propDst, const ResolveResult& resolveResult, const Identifier& property, NonlocalResolveInfo& verifier) { ASSERT_UNUSED(resolveResult, !resolveResult.isRegister()); - ValueProfile* profile = emitProfiledOpcode(op_resolve_with_base); + UnlinkedValueProfile profile = emitProfiledOpcode(op_resolve_with_base); instructions().append(baseDst->index()); instructions().append(propDst->index()); instructions().append(addConstant(property)); @@ -1458,7 +1358,7 @@ RegisterID* BytecodeGenerator::emitResolveWithThis(RegisterID* baseDst, Register return baseDst; } - ValueProfile* profile = emitProfiledOpcode(op_resolve_with_this); + UnlinkedValueProfile profile = emitProfiledOpcode(op_resolve_with_this); instructions().append(baseDst->index()); instructions().append(propDst->index()); instructions().append(addConstant(property)); @@ -1485,36 +1385,19 @@ RegisterID* BytecodeGenerator::emitGetLocalVar(RegisterID* dst, const ResolveRes RegisterID* BytecodeGenerator::emitInitGlobalConst(const Identifier& identifier, RegisterID* value) { ASSERT(m_codeType == GlobalCode); - JSGlobalObject* globalObject = m_codeBlock->globalObject(); - SymbolTableEntry entry = globalObject->symbolTable()->get(identifier.impl()); - if (entry.isNull()) - return 0; - - if (entry.couldBeWatched()) { - emitOpcode(op_init_global_const_check); - instructions().append(&globalObject->registerAt(entry.getIndex())); - instructions().append(value->index()); - instructions().append(entry.addressOfIsWatched()); - instructions().append(addConstant(identifier)); - return value; - } - - emitOpcode(op_init_global_const); - instructions().append(&globalObject->registerAt(entry.getIndex())); + emitOpcode(op_init_global_const_nop); + instructions().append(0); instructions().append(value->index()); + instructions().append(0); + instructions().append(addConstant(identifier)); return value; } -void BytecodeGenerator::emitMethodCheck() -{ - emitOpcode(op_method_check); -} - RegisterID* BytecodeGenerator::emitGetById(RegisterID* dst, RegisterID* base, const Identifier& property) { m_codeBlock->addPropertyAccessInstruction(instructions().size()); - ValueProfile* profile = emitProfiledOpcode(op_get_by_id); + UnlinkedValueProfile profile = emitProfiledOpcode(op_get_by_id); instructions().append(dst->index()); instructions().append(base->index()); instructions().append(addConstant(property)); @@ -1600,8 +1483,8 @@ RegisterID* BytecodeGenerator::emitDeleteById(RegisterID* dst, RegisterID* base, RegisterID* BytecodeGenerator::emitGetArgumentByVal(RegisterID* dst, RegisterID* base, RegisterID* property) { - ArrayProfile* arrayProfile = newArrayProfile(); - ValueProfile* profile = emitProfiledOpcode(op_get_argument_by_val); + UnlinkedArrayProfile arrayProfile = newArrayProfile(); + UnlinkedValueProfile profile = emitProfiledOpcode(op_get_argument_by_val); instructions().append(dst->index()); ASSERT(base->index() == m_codeBlock->argumentsRegister()); instructions().append(base->index()); @@ -1626,8 +1509,8 @@ RegisterID* BytecodeGenerator::emitGetByVal(RegisterID* dst, RegisterID* base, R return dst; } } - ArrayProfile* arrayProfile = newArrayProfile(); - ValueProfile* profile = emitProfiledOpcode(op_get_by_val); + UnlinkedArrayProfile arrayProfile = newArrayProfile(); + UnlinkedValueProfile profile = emitProfiledOpcode(op_get_by_val); instructions().append(dst->index()); instructions().append(base->index()); instructions().append(property->index()); @@ -1638,7 +1521,7 @@ RegisterID* BytecodeGenerator::emitGetByVal(RegisterID* dst, RegisterID* base, R RegisterID* BytecodeGenerator::emitPutByVal(RegisterID* base, RegisterID* property, RegisterID* value) { - ArrayProfile* arrayProfile = newArrayProfile(); + UnlinkedArrayProfile arrayProfile = newArrayProfile(); emitOpcode(op_put_by_val); instructions().append(base->index()); instructions().append(property->index()); @@ -1708,7 +1591,7 @@ RegisterID* BytecodeGenerator::emitNewArray(RegisterID* dst, ElementNode* elemen if (!hadVariableExpression) { ASSERT(length == checkLength); unsigned constantBufferIndex = addConstantBuffer(length); - JSValue* constantBuffer = m_codeBlock->constantBuffer(constantBufferIndex); + JSValue* constantBuffer = m_codeBlock->constantBuffer(constantBufferIndex).data(); unsigned index = 0; for (ElementNode* n = elements; index < length; n = n->next()) { if (n->value()->isNumber()) @@ -1744,14 +1627,14 @@ RegisterID* BytecodeGenerator::emitNewArray(RegisterID* dst, ElementNode* elemen RegisterID* BytecodeGenerator::emitNewFunction(RegisterID* dst, FunctionBodyNode* function) { - return emitNewFunctionInternal(dst, m_codeBlock->addFunctionDecl(FunctionExecutable::create(*m_globalData, function)), false); + return emitNewFunctionInternal(dst, m_codeBlock->addFunctionDecl(makeFunction(function)), false); } RegisterID* BytecodeGenerator::emitLazyNewFunction(RegisterID* dst, FunctionBodyNode* function) { FunctionOffsetMap::AddResult ptr = m_functionOffsets.add(function, 0); if (ptr.isNewEntry) - ptr.iterator->value = m_codeBlock->addFunctionDecl(FunctionExecutable::create(*m_globalData, function)); + ptr.iterator->value = m_codeBlock->addFunctionDecl(makeFunction(function)); return emitNewFunctionInternal(dst, ptr.iterator->value, true); } @@ -1776,7 +1659,7 @@ RegisterID* BytecodeGenerator::emitNewRegExp(RegisterID* dst, RegExp* regExp) RegisterID* BytecodeGenerator::emitNewFunctionExpression(RegisterID* r0, FuncExprNode* n) { FunctionBodyNode* function = n->body(); - unsigned index = m_codeBlock->addFunctionExpr(FunctionExecutable::create(*m_globalData, function)); + unsigned index = m_codeBlock->addFunctionExpr(makeFunction(function)); createActivationIfNecessary(); emitOpcode(op_new_func_exp); @@ -1927,7 +1810,7 @@ RegisterID* BytecodeGenerator::emitCall(OpcodeID opcodeID, RegisterID* dst, Regi expectedFunction = emitExpectedFunctionSnippet(dst, func, expectedFunction, callArguments, done.get()); // Emit call. - ArrayProfile* arrayProfile = newArrayProfile(); + UnlinkedArrayProfile arrayProfile = newArrayProfile(); emitOpcode(opcodeID); instructions().append(func->index()); // func instructions().append(callArguments.argumentCountIncludingThis()); // argCount @@ -1939,7 +1822,7 @@ RegisterID* BytecodeGenerator::emitCall(OpcodeID opcodeID, RegisterID* dst, Regi #endif instructions().append(arrayProfile); if (dst != ignoredResult()) { - ValueProfile* profile = emitProfiledOpcode(op_call_put_result); + UnlinkedValueProfile profile = emitProfiledOpcode(op_call_put_result); instructions().append(dst->index()); // dst instructions().append(profile); } @@ -1972,7 +1855,7 @@ RegisterID* BytecodeGenerator::emitCallVarargs(RegisterID* dst, RegisterID* func instructions().append(arguments->index()); instructions().append(firstFreeRegister->index()); if (dst != ignoredResult()) { - ValueProfile* profile = emitProfiledOpcode(op_call_put_result); + UnlinkedValueProfile profile = emitProfiledOpcode(op_call_put_result); instructions().append(dst->index()); instructions().append(profile); } @@ -2055,7 +1938,7 @@ RegisterID* BytecodeGenerator::emitConstruct(RegisterID* dst, RegisterID* func, #endif instructions().append(0); if (dst != ignoredResult()) { - ValueProfile* profile = emitProfiledOpcode(op_call_put_result); + UnlinkedValueProfile profile = emitProfiledOpcode(op_call_put_result); instructions().append(dst->index()); // dst instructions().append(profile); } @@ -2131,11 +2014,11 @@ void BytecodeGenerator::pushFinallyContext(StatementNode* finallyBlock) scope.isFinallyBlock = true; FinallyContext context = { finallyBlock, - m_scopeContextStack.size(), - m_switchContextStack.size(), - m_forInContextStack.size(), - m_tryContextStack.size(), - m_labelScopes.size(), + static_cast<unsigned>(m_scopeContextStack.size()), + static_cast<unsigned>(m_switchContextStack.size()), + static_cast<unsigned>(m_forInContextStack.size()), + static_cast<unsigned>(m_tryContextStack.size()), + static_cast<unsigned>(m_labelScopes.size()), m_finallyDepth, m_dynamicScopeDepth }; @@ -2434,7 +2317,7 @@ RegisterID* BytecodeGenerator::popTryAndEmitCatch(TryData* tryData, RegisterID* m_tryContextStack.removeLast(); emitLabel(tryRange.tryData->target.get()); - tryRange.tryData->targetScopeDepth = m_dynamicScopeDepth + m_baseScopeDepth; + tryRange.tryData->targetScopeDepth = m_dynamicScopeDepth; emitOpcode(op_catch); instructions().append(targetRegister->index()); @@ -2443,8 +2326,9 @@ RegisterID* BytecodeGenerator::popTryAndEmitCatch(TryData* tryData, RegisterID* void BytecodeGenerator::emitThrowReferenceError(const String& message) { - emitOpcode(op_throw_reference_error); + emitOpcode(op_throw_static_error); instructions().append(addConstantValue(jsString(globalData(), message))->index()); + instructions().append(true); } void BytecodeGenerator::emitPushNameScope(const Identifier& property, RegisterID* value, unsigned attributes) @@ -2462,7 +2346,7 @@ void BytecodeGenerator::emitPushNameScope(const Identifier& property, RegisterID void BytecodeGenerator::beginSwitch(RegisterID* scrutineeRegister, SwitchInfo::SwitchType type) { - SwitchInfo info = { instructions().size(), type }; + SwitchInfo info = { static_cast<uint32_t>(instructions().size()), type }; switch (type) { case SwitchInfo::SwitchImmediate: emitOpcode(op_switch_imm); @@ -2495,7 +2379,7 @@ static int32_t keyForImmediateSwitch(ExpressionNode* node, int32_t min, int32_t return key - min; } -static void prepareJumpTableForImmediateSwitch(SimpleJumpTable& jumpTable, int32_t switchAddress, uint32_t clauseCount, RefPtr<Label>* labels, ExpressionNode** nodes, int32_t min, int32_t max) +static void prepareJumpTableForImmediateSwitch(UnlinkedSimpleJumpTable& jumpTable, int32_t switchAddress, uint32_t clauseCount, RefPtr<Label>* labels, ExpressionNode** nodes, int32_t min, int32_t max) { jumpTable.min = min; jumpTable.branchOffsets.resize(max - min + 1); @@ -2521,7 +2405,7 @@ static int32_t keyForCharacterSwitch(ExpressionNode* node, int32_t min, int32_t return key - min; } -static void prepareJumpTableForCharacterSwitch(SimpleJumpTable& jumpTable, int32_t switchAddress, uint32_t clauseCount, RefPtr<Label>* labels, ExpressionNode** nodes, int32_t min, int32_t max) +static void prepareJumpTableForCharacterSwitch(UnlinkedSimpleJumpTable& jumpTable, int32_t switchAddress, uint32_t clauseCount, RefPtr<Label>* labels, ExpressionNode** nodes, int32_t min, int32_t max) { jumpTable.min = min; jumpTable.branchOffsets.resize(max - min + 1); @@ -2534,7 +2418,7 @@ static void prepareJumpTableForCharacterSwitch(SimpleJumpTable& jumpTable, int32 } } -static void prepareJumpTableForStringSwitch(StringJumpTable& jumpTable, int32_t switchAddress, uint32_t clauseCount, RefPtr<Label>* labels, ExpressionNode** nodes) +static void prepareJumpTableForStringSwitch(UnlinkedStringJumpTable& jumpTable, int32_t switchAddress, uint32_t clauseCount, RefPtr<Label>* labels, ExpressionNode** nodes) { for (uint32_t i = 0; i < clauseCount; ++i) { // We're emitting this after the clause labels should have been fixed, so @@ -2543,9 +2427,7 @@ static void prepareJumpTableForStringSwitch(StringJumpTable& jumpTable, int32_t ASSERT(nodes[i]->isString()); StringImpl* clause = static_cast<StringNode*>(nodes[i])->value().impl(); - OffsetLocation location; - location.branchOffset = labels[i]->bind(switchAddress, switchAddress + 3); - jumpTable.offsetTable.add(clause, location); + jumpTable.offsetTable.add(clause, labels[i]->bind(switchAddress, switchAddress + 3)); } } @@ -2557,20 +2439,20 @@ void BytecodeGenerator::endSwitch(uint32_t clauseCount, RefPtr<Label>* labels, E instructions()[switchInfo.bytecodeOffset + 1] = m_codeBlock->numberOfImmediateSwitchJumpTables(); instructions()[switchInfo.bytecodeOffset + 2] = defaultLabel->bind(switchInfo.bytecodeOffset, switchInfo.bytecodeOffset + 3); - SimpleJumpTable& jumpTable = m_codeBlock->addImmediateSwitchJumpTable(); + UnlinkedSimpleJumpTable& jumpTable = m_codeBlock->addImmediateSwitchJumpTable(); prepareJumpTableForImmediateSwitch(jumpTable, switchInfo.bytecodeOffset, clauseCount, labels, nodes, min, max); } else if (switchInfo.switchType == SwitchInfo::SwitchCharacter) { instructions()[switchInfo.bytecodeOffset + 1] = m_codeBlock->numberOfCharacterSwitchJumpTables(); instructions()[switchInfo.bytecodeOffset + 2] = defaultLabel->bind(switchInfo.bytecodeOffset, switchInfo.bytecodeOffset + 3); - SimpleJumpTable& jumpTable = m_codeBlock->addCharacterSwitchJumpTable(); + UnlinkedSimpleJumpTable& jumpTable = m_codeBlock->addCharacterSwitchJumpTable(); prepareJumpTableForCharacterSwitch(jumpTable, switchInfo.bytecodeOffset, clauseCount, labels, nodes, min, max); } else { ASSERT(switchInfo.switchType == SwitchInfo::SwitchString); instructions()[switchInfo.bytecodeOffset + 1] = m_codeBlock->numberOfStringSwitchJumpTables(); instructions()[switchInfo.bytecodeOffset + 2] = defaultLabel->bind(switchInfo.bytecodeOffset, switchInfo.bytecodeOffset + 3); - StringJumpTable& jumpTable = m_codeBlock->addStringSwitchJumpTable(); + UnlinkedStringJumpTable& jumpTable = m_codeBlock->addStringSwitchJumpTable(); prepareJumpTableForStringSwitch(jumpTable, switchInfo.bytecodeOffset, clauseCount, labels, nodes); } } @@ -2602,9 +2484,9 @@ void BytecodeGenerator::emitReadOnlyExceptionIfNeeded() { if (!isStrictMode()) return; - - RefPtr<RegisterID> error = emitLoad(newTemporary(), JSValue(createTypeError(scope()->globalObject()->globalExec(), StrictModeReadonlyPropertyWriteError))); - emitThrow(error.get()); + emitOpcode(op_throw_static_error); + instructions().append(addConstantValue(jsString(globalData(), StrictModeReadonlyPropertyWriteError))->index()); + instructions().append(false); } } // namespace JSC diff --git a/Source/JavaScriptCore/bytecompiler/BytecodeGenerator.h b/Source/JavaScriptCore/bytecompiler/BytecodeGenerator.h index 246530ab2..828726dee 100644 --- a/Source/JavaScriptCore/bytecompiler/BytecodeGenerator.h +++ b/Source/JavaScriptCore/bytecompiler/BytecodeGenerator.h @@ -41,6 +41,7 @@ #include "SymbolTable.h" #include "Debugger.h" #include "Nodes.h" +#include "UnlinkedCodeBlock.h" #include <wtf/PassRefPtr.h> #include <wtf/SegmentedVector.h> #include <wtf/Vector.h> @@ -211,18 +212,18 @@ namespace JSC { JS_EXPORT_PRIVATE static void setDumpsGeneratedCode(bool dumpsGeneratedCode); static bool dumpsGeneratedCode(); - BytecodeGenerator(ProgramNode*, JSScope*, SharedSymbolTable*, ProgramCodeBlock*, CompilationKind); - BytecodeGenerator(FunctionBodyNode*, JSScope*, SharedSymbolTable*, CodeBlock*, CompilationKind); - BytecodeGenerator(EvalNode*, JSScope*, SharedSymbolTable*, EvalCodeBlock*, CompilationKind); + BytecodeGenerator(JSGlobalData&, ProgramNode*, UnlinkedProgramCodeBlock*, DebuggerMode, ProfilerMode); + BytecodeGenerator(JSGlobalData&, FunctionBodyNode*, UnlinkedFunctionCodeBlock*, DebuggerMode, ProfilerMode); + BytecodeGenerator(JSGlobalData&, EvalNode*, UnlinkedEvalCodeBlock*, DebuggerMode, ProfilerMode); ~BytecodeGenerator(); JSGlobalData* globalData() const { return m_globalData; } const CommonIdentifiers& propertyNames() const { return *m_globalData->propertyNames; } - bool isConstructor() { return m_codeBlock->m_isConstructor; } + bool isConstructor() { return m_codeBlock->isConstructor(); } - JSObject* generate(); + ParserError generate(); bool isArgumentNumber(const Identifier&, int); @@ -332,10 +333,7 @@ namespace JSC { void emitExpressionInfo(unsigned divot, unsigned startOffset, unsigned endOffset) { - if (!m_shouldEmitRichSourceInfo) - return; - - divot -= m_codeBlock->sourceOffset(); + divot -= m_scopeNode->source().startOffset(); if (divot > ExpressionRangeInfo::MaxDivot) { // Overflow has occurred, we can only give line number info for errors for this region divot = 0; @@ -421,8 +419,6 @@ namespace JSC { RegisterID* emitPutToBase(RegisterID* base, const Identifier&, RegisterID* value, NonlocalResolveInfo&); - void emitMethodCheck(); - RegisterID* emitGetById(RegisterID* dst, RegisterID* base, const Identifier& property); RegisterID* emitGetArgumentsLength(RegisterID* dst, RegisterID* base); RegisterID* emitPutById(RegisterID* base, const Identifier& property, RegisterID* value); @@ -510,8 +506,6 @@ namespace JSC { bool shouldEmitProfileHooks() { return m_shouldEmitProfileHooks; } bool isStrictMode() const { return m_codeBlock->isStrictMode(); } - - JSScope* scope() const { return m_scope.get(); } private: friend class Label; @@ -529,11 +523,10 @@ namespace JSC { #endif void emitOpcode(OpcodeID); - ArrayProfile* newArrayProfile(); - ValueProfile* emitProfiledOpcode(OpcodeID); + UnlinkedArrayProfile newArrayProfile(); + UnlinkedValueProfile emitProfiledOpcode(OpcodeID); void retrieveLastBinaryOp(int& dstIndex, int& src1Index, int& src2Index); void retrieveLastUnaryOp(int& dstIndex, int& srcIndex); - void retrieveLastUnaryOp(WriteBarrier<Unknown>*& dstPointer, int& srcIndex); ALWAYS_INLINE void rewindBinaryOp(); ALWAYS_INLINE void rewindUnaryOp(); @@ -576,10 +569,6 @@ namespace JSC { } // Returns the index of the added var. - enum ConstantMode { IsConstant, IsVariable }; - enum FunctionMode { IsFunctionToSpecialize, NotFunctionOrNotSpecializable }; - int addGlobalVar(const Identifier&, ConstantMode, FunctionMode); - void addParameter(const Identifier&, int parameterIndex); RegisterID* resolveCallee(FunctionBodyNode*); void addCallee(FunctionBodyNode*, RegisterID*); @@ -606,17 +595,22 @@ namespace JSC { unsigned addConstantBuffer(unsigned length); + UnlinkedFunctionExecutable* makeFunction(FunctionBodyNode* body) + { + return UnlinkedFunctionExecutable::create(m_globalData, m_scopeNode->source(), body); + } + JSString* addStringConstant(const Identifier&); void addLineInfo(unsigned lineNo) { - m_codeBlock->addLineInfo(instructions().size(), lineNo); + m_codeBlock->addLineInfo(instructions().size(), lineNo - m_scopeNode->firstLine()); } RegisterID* emitInitLazyRegister(RegisterID*); public: - Vector<Instruction>& instructions() { return m_instructions; } + Vector<UnlinkedInstruction>& instructions() { return m_instructions; } SharedSymbolTable& symbolTable() { return *m_symbolTable; } #if ENABLE(BYTECODE_COMMENTS) @@ -654,13 +648,11 @@ namespace JSC { void createActivationIfNecessary(); RegisterID* createLazyRegisterIfNecessary(RegisterID*); - Vector<Instruction> m_instructions; + Vector<UnlinkedInstruction> m_instructions; bool m_shouldEmitDebugHooks; bool m_shouldEmitProfileHooks; - bool m_shouldEmitRichSourceInfo; - Strong<JSScope> m_scope; SharedSymbolTable* m_symbolTable; #if ENABLE(BYTECODE_COMMENTS) @@ -669,7 +661,7 @@ namespace JSC { #endif ScopeNode* m_scopeNode; - CodeBlock* m_codeBlock; + Strong<UnlinkedCodeBlock> m_codeBlock; // Some of these objects keep pointers to one another. They are arranged // to ensure a sane destruction order that avoids references to freed memory. @@ -687,7 +679,6 @@ namespace JSC { RefPtr<RegisterID> m_lastVar; int m_finallyDepth; int m_dynamicScopeDepth; - int m_baseScopeDepth; CodeType m_codeType; Vector<ControlFlowContext> m_scopeContextStack; diff --git a/Source/JavaScriptCore/bytecompiler/NodesCodegen.cpp b/Source/JavaScriptCore/bytecompiler/NodesCodegen.cpp index 68811955f..85da1f4eb 100644 --- a/Source/JavaScriptCore/bytecompiler/NodesCodegen.cpp +++ b/Source/JavaScriptCore/bytecompiler/NodesCodegen.cpp @@ -469,7 +469,6 @@ RegisterID* FunctionCallDotNode::emitBytecode(BytecodeGenerator& generator, Regi CallArguments callArguments(generator, m_args); generator.emitNode(callArguments.thisRegister(), m_base); generator.emitExpressionInfo(divot() - m_subexpressionDivotOffset, startOffset() - m_subexpressionDivotOffset, m_subexpressionEndOffset); - generator.emitMethodCheck(); generator.emitGetById(function.get(), callArguments.thisRegister(), m_ident); return generator.emitCall(generator.finalDestinationOrIgnored(dst, function.get()), function.get(), NoExpectedFunction, callArguments, divot(), startOffset(), endOffset()); } diff --git a/Source/JavaScriptCore/debugger/Debugger.cpp b/Source/JavaScriptCore/debugger/Debugger.cpp index 3731b0473..b14729146 100644 --- a/Source/JavaScriptCore/debugger/Debugger.cpp +++ b/Source/JavaScriptCore/debugger/Debugger.cpp @@ -80,6 +80,7 @@ inline void Recompiler::operator()(JSCell* cell) ExecState* exec = function->scope()->globalObject()->JSGlobalObject::globalExec(); executable->clearCodeIfNotCompiling(); + executable->clearUnlinkedCodeIfNotCompiling(); if (m_debugger == function->scope()->globalObject()->debugger()) m_sourceProviders.add(executable->source().provider(), exec); } diff --git a/Source/JavaScriptCore/dfg/DFGAbstractState.cpp b/Source/JavaScriptCore/dfg/DFGAbstractState.cpp index 58ff7d23c..e518c24a8 100644 --- a/Source/JavaScriptCore/dfg/DFGAbstractState.cpp +++ b/Source/JavaScriptCore/dfg/DFGAbstractState.cpp @@ -839,7 +839,7 @@ bool AbstractState::execute(unsigned indexInBlock) case GetByVal: { node.setCanExit(true); - switch (node.arrayMode()) { + switch (node.arrayMode().type()) { case Array::SelectUsingPredictions: case Array::Unprofiled: ASSERT_NOT_REACHED(); @@ -859,18 +859,12 @@ bool AbstractState::execute(unsigned indexInBlock) forNode(node.child2()).filter(SpecInt32); forNode(nodeIndex).makeTop(); break; - case IN_BOUNDS_CONTIGUOUS_MODES: - case IN_BOUNDS_ARRAY_STORAGE_MODES: + case Array::Contiguous: + case Array::ArrayStorage: + case Array::SlowPutArrayStorage: forNode(node.child2()).filter(SpecInt32); - forNode(nodeIndex).makeTop(); - break; - case OUT_OF_BOUNDS_CONTIGUOUS_MODES: - case OUT_OF_BOUNDS_ARRAY_STORAGE_MODES: - case SLOW_PUT_ARRAY_STORAGE_MODES: - case ALL_EFFECTFUL_MODES: - forNode(node.child1()).filter(SpecCell); - forNode(node.child2()).filter(SpecInt32); - clobberWorld(node.codeOrigin, indexInBlock); + if (node.arrayMode().isOutOfBounds()) + clobberWorld(node.codeOrigin, indexInBlock); forNode(nodeIndex).makeTop(); break; case Array::Int8Array: @@ -925,31 +919,32 @@ bool AbstractState::execute(unsigned indexInBlock) Edge child1 = m_graph.varArgChild(node, 0); Edge child2 = m_graph.varArgChild(node, 1); Edge child3 = m_graph.varArgChild(node, 2); - switch (modeForPut(node.arrayMode())) { + switch (node.arrayMode().modeForPut().type()) { case Array::ForceExit: m_isValid = false; break; case Array::Generic: clobberWorld(node.codeOrigin, indexInBlock); break; - case IN_BOUNDS_CONTIGUOUS_MODES: - case CONTIGUOUS_TO_TAIL_MODES: - case IN_BOUNDS_ARRAY_STORAGE_MODES: + case Array::Contiguous: + case Array::ArrayStorage: + forNode(child1).filter(SpecCell); forNode(child2).filter(SpecInt32); + if (node.arrayMode().isOutOfBounds()) + clobberWorld(node.codeOrigin, indexInBlock); break; - case OUT_OF_BOUNDS_CONTIGUOUS_MODES: - case ARRAY_STORAGE_TO_HOLE_MODES: - case OUT_OF_BOUNDS_ARRAY_STORAGE_MODES: - case SLOW_PUT_ARRAY_STORAGE_MODES: - case ALL_EFFECTFUL_MODES: + case Array::SlowPutArrayStorage: forNode(child1).filter(SpecCell); forNode(child2).filter(SpecInt32); - clobberWorld(node.codeOrigin, indexInBlock); + if (node.arrayMode().mayStoreToHole()) + clobberWorld(node.codeOrigin, indexInBlock); break; case Array::Arguments: + forNode(child1).filter(SpecCell); forNode(child2).filter(SpecInt32); break; case Array::Int8Array: + forNode(child1).filter(SpecCell); forNode(child2).filter(SpecInt32); if (m_graph[child3].shouldSpeculateInteger()) forNode(child3).filter(SpecInt32); @@ -957,6 +952,7 @@ bool AbstractState::execute(unsigned indexInBlock) forNode(child3).filter(SpecNumber); break; case Array::Int16Array: + forNode(child1).filter(SpecCell); forNode(child2).filter(SpecInt32); if (m_graph[child3].shouldSpeculateInteger()) forNode(child3).filter(SpecInt32); @@ -964,6 +960,7 @@ bool AbstractState::execute(unsigned indexInBlock) forNode(child3).filter(SpecNumber); break; case Array::Int32Array: + forNode(child1).filter(SpecCell); forNode(child2).filter(SpecInt32); if (m_graph[child3].shouldSpeculateInteger()) forNode(child3).filter(SpecInt32); @@ -971,6 +968,7 @@ bool AbstractState::execute(unsigned indexInBlock) forNode(child3).filter(SpecNumber); break; case Array::Uint8Array: + forNode(child1).filter(SpecCell); forNode(child2).filter(SpecInt32); if (m_graph[child3].shouldSpeculateInteger()) forNode(child3).filter(SpecInt32); @@ -978,6 +976,7 @@ bool AbstractState::execute(unsigned indexInBlock) forNode(child3).filter(SpecNumber); break; case Array::Uint8ClampedArray: + forNode(child1).filter(SpecCell); forNode(child2).filter(SpecInt32); if (m_graph[child3].shouldSpeculateInteger()) forNode(child3).filter(SpecInt32); @@ -985,6 +984,7 @@ bool AbstractState::execute(unsigned indexInBlock) forNode(child3).filter(SpecNumber); break; case Array::Uint16Array: + forNode(child1).filter(SpecCell); forNode(child2).filter(SpecInt32); if (m_graph[child3].shouldSpeculateInteger()) forNode(child3).filter(SpecInt32); @@ -992,6 +992,7 @@ bool AbstractState::execute(unsigned indexInBlock) forNode(child3).filter(SpecNumber); break; case Array::Uint32Array: + forNode(child1).filter(SpecCell); forNode(child2).filter(SpecInt32); if (m_graph[child3].shouldSpeculateInteger()) forNode(child3).filter(SpecInt32); @@ -999,15 +1000,17 @@ bool AbstractState::execute(unsigned indexInBlock) forNode(child3).filter(SpecNumber); break; case Array::Float32Array: + forNode(child1).filter(SpecCell); forNode(child2).filter(SpecInt32); forNode(child3).filter(SpecNumber); break; case Array::Float64Array: + forNode(child1).filter(SpecCell); forNode(child2).filter(SpecInt32); forNode(child3).filter(SpecNumber); break; default: - ASSERT_NOT_REACHED(); + CRASH(); break; } break; @@ -1324,7 +1327,8 @@ bool AbstractState::execute(unsigned indexInBlock) // the futurePossibleStructure set then the constant folding phase should // turn this into a watchpoint instead. StructureSet& set = node.structureSet(); - if (value.m_futurePossibleStructure.isSubsetOf(set)) + if (value.m_futurePossibleStructure.isSubsetOf(set) + || value.m_currentKnownStructure.isSubsetOf(set)) m_foundConstants = true; node.setCanExit( !value.m_currentKnownStructure.isSubsetOf(set) @@ -1365,23 +1369,24 @@ bool AbstractState::execute(unsigned indexInBlock) case GetButterfly: case AllocatePropertyStorage: case ReallocatePropertyStorage: - node.setCanExit(false); + node.setCanExit(!isCellSpeculation(forNode(node.child1()).m_type)); forNode(node.child1()).filter(SpecCell); forNode(nodeIndex).clear(); // The result is not a JS value. break; case CheckArray: { - if (modeAlreadyChecked(forNode(node.child1()), node.arrayMode())) { + if (node.arrayMode().alreadyChecked(forNode(node.child1()))) { m_foundConstants = true; node.setCanExit(false); break; } node.setCanExit(true); // Lies, but this is followed by operations (like GetByVal) that always exit, so there is no point in us trying to be clever here. - switch (node.arrayMode()) { + switch (node.arrayMode().type()) { case Array::String: forNode(node.child1()).filter(SpecString); break; - case ALL_CONTIGUOUS_MODES: - case ALL_ARRAY_STORAGE_MODES: + case Array::Contiguous: + case Array::ArrayStorage: + case Array::SlowPutArrayStorage: // This doesn't filter anything meaningful right now. We may want to add // CFA tracking of array mode speculations, but we don't have that, yet. forNode(node.child1()).filter(SpecCell); @@ -1420,33 +1425,40 @@ bool AbstractState::execute(unsigned indexInBlock) ASSERT_NOT_REACHED(); break; } - forNode(node.child1()).filterArrayModes(arrayModesFor(node.arrayMode())); + forNode(node.child1()).filterArrayModes(node.arrayMode().arrayModesThatPassFiltering()); + m_haveStructures = true; break; } case Arrayify: { - if (modeAlreadyChecked(forNode(node.child1()), node.arrayMode())) { + if (node.arrayMode().alreadyChecked(forNode(node.child1()))) { m_foundConstants = true; node.setCanExit(false); break; } - switch (node.arrayMode()) { - case ALL_EFFECTFUL_MODES: - node.setCanExit(true); - forNode(node.child1()).filter(SpecCell); - if (node.child2()) - forNode(node.child2()).filter(SpecInt32); - forNode(nodeIndex).clear(); - clobberStructures(indexInBlock); - forNode(node.child1()).filterArrayModes(arrayModesFor(node.arrayMode())); - break; - default: - CRASH(); - break; - } + ASSERT(node.arrayMode().conversion() == Array::Convert); + node.setCanExit(true); + forNode(node.child1()).filter(SpecCell); + if (node.child2()) + forNode(node.child2()).filter(SpecInt32); + clobberStructures(indexInBlock); + forNode(node.child1()).filterArrayModes(node.arrayMode().arrayModesThatPassFiltering()); + m_haveStructures = true; + break; + } + case ArrayifyToStructure: { + AbstractValue& value = forNode(node.child1()); + StructureSet set = node.structure(); + if (value.m_futurePossibleStructure.isSubsetOf(set) + || value.m_currentKnownStructure.isSubsetOf(set)) + m_foundConstants = true; + node.setCanExit(true); + clobberStructures(indexInBlock); + value.filter(set); + m_haveStructures = true; break; } case GetIndexedPropertyStorage: { - switch (node.arrayMode()) { + switch (node.arrayMode().type()) { case Array::String: // Strings are weird - we may spec fail if the string was a rope. That is of course // stupid, and we should fix that, but for now let's at least be honest about it. @@ -1460,13 +1472,13 @@ bool AbstractState::execute(unsigned indexInBlock) break; } case GetByOffset: - node.setCanExit(false); + node.setCanExit(!isCellSpeculation(forNode(node.child1()).m_type)); forNode(node.child1()).filter(SpecCell); forNode(nodeIndex).makeTop(); break; case PutByOffset: - node.setCanExit(false); + node.setCanExit(!isCellSpeculation(forNode(node.child1()).m_type)); forNode(node.child1()).filter(SpecCell); break; diff --git a/Source/JavaScriptCore/dfg/DFGAbstractValue.h b/Source/JavaScriptCore/dfg/DFGAbstractValue.h index 5382cd3ad..c198b5e52 100644 --- a/Source/JavaScriptCore/dfg/DFGAbstractValue.h +++ b/Source/JavaScriptCore/dfg/DFGAbstractValue.h @@ -458,29 +458,11 @@ struct AbstractValue { private: void clobberArrayModes() { - if (m_arrayModes == ALL_ARRAY_MODES) - return; - - if (LIKELY(m_arrayModes & asArrayModes(NonArray))) - m_arrayModes = ALL_ARRAY_MODES; - else - clobberArrayModesSlow(); + // FIXME: We could make this try to predict the set of array modes that this object + // could have in the future. For now, just do the simple thing. + m_arrayModes = ALL_ARRAY_MODES; } - void clobberArrayModesSlow() - { - if (m_arrayModes & asArrayModes(ArrayClass)) - m_arrayModes = ALL_ARRAY_MODES; - else if (m_arrayModes & asArrayModes(NonArrayWithContiguous)) - m_arrayModes |= asArrayModes(NonArrayWithArrayStorage) | asArrayModes(NonArrayWithSlowPutArrayStorage); - else if (m_arrayModes & asArrayModes(ArrayWithContiguous)) - m_arrayModes |= asArrayModes(ArrayWithArrayStorage) | asArrayModes(ArrayWithSlowPutArrayStorage); - else if (m_arrayModes & asArrayModes(NonArrayWithArrayStorage)) - m_arrayModes |= asArrayModes(NonArrayWithSlowPutArrayStorage); - else if (m_arrayModes & asArrayModes(ArrayWithArrayStorage)) - m_arrayModes |= asArrayModes(ArrayWithArrayStorage); - } - void setFuturePossibleStructure(Structure* structure) { if (structure->transitionWatchpointSetIsStillValid()) diff --git a/Source/JavaScriptCore/dfg/DFGArgumentsSimplificationPhase.cpp b/Source/JavaScriptCore/dfg/DFGArgumentsSimplificationPhase.cpp index bb61a59e6..00b1109f6 100644 --- a/Source/JavaScriptCore/dfg/DFGArgumentsSimplificationPhase.cpp +++ b/Source/JavaScriptCore/dfg/DFGArgumentsSimplificationPhase.cpp @@ -273,7 +273,7 @@ public: } case GetByVal: { - if (node.arrayMode() != Array::Arguments) { + if (node.arrayMode().type() != Array::Arguments) { observeBadArgumentsUses(node); break; } @@ -287,7 +287,7 @@ public: } case GetArrayLength: { - if (node.arrayMode() != Array::Arguments) { + if (node.arrayMode().type() != Array::Arguments) { observeBadArgumentsUses(node); break; } @@ -476,7 +476,7 @@ public: } case GetByVal: { - if (node.arrayMode() != Array::Arguments) + if (node.arrayMode().type() != Array::Arguments) break; // This can be simplified to GetMyArgumentByVal if we know that @@ -499,7 +499,7 @@ public: } case GetArrayLength: { - if (node.arrayMode() != Array::Arguments) + if (node.arrayMode().type() != Array::Arguments) break; if (!isOKToOptimize(m_graph[node.child1()])) diff --git a/Source/JavaScriptCore/dfg/DFGArrayMode.cpp b/Source/JavaScriptCore/dfg/DFGArrayMode.cpp index a3aafde01..699902a16 100644 --- a/Source/JavaScriptCore/dfg/DFGArrayMode.cpp +++ b/Source/JavaScriptCore/dfg/DFGArrayMode.cpp @@ -32,126 +32,126 @@ namespace JSC { namespace DFG { -Array::Mode fromObserved(ArrayProfile* profile, Array::Action action, bool makeSafe) +ArrayMode ArrayMode::fromObserved(ArrayProfile* profile, Array::Action action, bool makeSafe) { switch (profile->observedArrayModes()) { case 0: - return Array::Unprofiled; + return ArrayMode(Array::Unprofiled); case asArrayModes(NonArray): if (action == Array::Write && !profile->mayInterceptIndexedAccesses()) - return Array::ToContiguous; // FIXME: we don't know whether to go to contiguous or array storage. We're making a static guess here. In future we should use exit profiling for this. - return Array::SelectUsingPredictions; + return ArrayMode(Array::Contiguous, Array::NonArray, Array::OutOfBounds, Array::Convert); // FIXME: we don't know whether to go to contiguous or array storage. We're making a static guess here. In future we should use exit profiling for this. + return ArrayMode(Array::SelectUsingPredictions); case asArrayModes(NonArrayWithContiguous): - return makeSafe ? Array::ContiguousOutOfBounds : (profile->mayStoreToHole() ? Array::ContiguousToTail : Array::Contiguous); + return ArrayMode(Array::Contiguous, Array::NonArray, Array::AsIs).withProfile(profile, makeSafe); case asArrayModes(ArrayWithContiguous): - return makeSafe ? Array::ArrayWithContiguousOutOfBounds : (profile->mayStoreToHole() ? Array::ArrayWithContiguousToTail : Array::ArrayWithContiguous); + return ArrayMode(Array::Contiguous, Array::Array, Array::AsIs).withProfile(profile, makeSafe); case asArrayModes(NonArrayWithContiguous) | asArrayModes(ArrayWithContiguous): - return makeSafe ? Array::PossiblyArrayWithContiguousOutOfBounds : (profile->mayStoreToHole() ? Array::PossiblyArrayWithContiguousToTail : Array::PossiblyArrayWithContiguous); + return ArrayMode(Array::Contiguous, Array::PossiblyArray, Array::AsIs).withProfile(profile, makeSafe); case asArrayModes(NonArrayWithArrayStorage): - return makeSafe ? Array::ArrayStorageOutOfBounds : (profile->mayStoreToHole() ? Array::ArrayStorageToHole : Array::ArrayStorage); + return ArrayMode(Array::ArrayStorage, Array::NonArray, Array::AsIs).withProfile(profile, makeSafe); case asArrayModes(NonArrayWithSlowPutArrayStorage): case asArrayModes(NonArrayWithArrayStorage) | asArrayModes(NonArrayWithSlowPutArrayStorage): - return Array::SlowPutArrayStorage; + return ArrayMode(Array::SlowPutArrayStorage, Array::NonArray, Array::AsIs).withProfile(profile, makeSafe); case asArrayModes(ArrayWithArrayStorage): - return makeSafe ? Array::ArrayWithArrayStorageOutOfBounds : (profile->mayStoreToHole() ? Array::ArrayWithArrayStorageToHole : Array::ArrayWithArrayStorage); + return ArrayMode(Array::ArrayStorage, Array::Array, Array::AsIs).withProfile(profile, makeSafe); case asArrayModes(ArrayWithSlowPutArrayStorage): case asArrayModes(ArrayWithArrayStorage) | asArrayModes(ArrayWithSlowPutArrayStorage): - return Array::ArrayWithSlowPutArrayStorage; + return ArrayMode(Array::SlowPutArrayStorage, Array::Array, Array::AsIs).withProfile(profile, makeSafe); case asArrayModes(NonArrayWithArrayStorage) | asArrayModes(ArrayWithArrayStorage): - return makeSafe ? Array::PossiblyArrayWithArrayStorageOutOfBounds : (profile->mayStoreToHole() ? Array::PossiblyArrayWithArrayStorageToHole : Array::PossiblyArrayWithArrayStorage); + return ArrayMode(Array::ArrayStorage, Array::PossiblyArray, Array::AsIs).withProfile(profile, makeSafe); case asArrayModes(NonArrayWithSlowPutArrayStorage) | asArrayModes(ArrayWithSlowPutArrayStorage): case asArrayModes(NonArrayWithArrayStorage) | asArrayModes(ArrayWithArrayStorage) | asArrayModes(NonArrayWithSlowPutArrayStorage) | asArrayModes(ArrayWithSlowPutArrayStorage): - return Array::PossiblyArrayWithSlowPutArrayStorage; + return ArrayMode(Array::SlowPutArrayStorage, Array::PossiblyArray, Array::AsIs).withProfile(profile, makeSafe); case asArrayModes(NonArrayWithContiguous) | asArrayModes(NonArrayWithArrayStorage): - return Array::ToArrayStorage; + return ArrayMode(Array::ArrayStorage, Array::NonArray, Array::Convert).withProfile(profile, makeSafe); case asArrayModes(ArrayWithContiguous) | asArrayModes(ArrayWithArrayStorage): - return Array::ArrayToArrayStorage; + return ArrayMode(Array::ArrayStorage, Array::Array, Array::Convert).withProfile(profile, makeSafe); case asArrayModes(NonArrayWithContiguous) | asArrayModes(NonArrayWithArrayStorage) | asArrayModes(ArrayWithContiguous) | asArrayModes(ArrayWithArrayStorage): - return Array::PossiblyArrayToArrayStorage; + return ArrayMode(Array::ArrayStorage, Array::PossiblyArray, Array::Convert).withProfile(profile, makeSafe); case asArrayModes(NonArray) | asArrayModes(NonArrayWithContiguous): if (action == Array::Write && !profile->mayInterceptIndexedAccesses()) - return Array::ToContiguous; - return Array::SelectUsingPredictions; + return ArrayMode(Array::Contiguous, Array::NonArray, Array::OutOfBounds, Array::Convert); + return ArrayMode(Array::SelectUsingPredictions); case asArrayModes(NonArray) | asArrayModes(NonArrayWithContiguous) | asArrayModes(NonArrayWithArrayStorage): case asArrayModes(NonArray) | asArrayModes(NonArrayWithArrayStorage): if (action == Array::Write && !profile->mayInterceptIndexedAccesses()) - return Array::ToArrayStorage; - return Array::SelectUsingPredictions; + return ArrayMode(Array::ArrayStorage, Array::NonArray, Array::OutOfBounds, Array::Convert); + return ArrayMode(Array::SelectUsingPredictions); case asArrayModes(NonArray) | asArrayModes(NonArrayWithSlowPutArrayStorage): case asArrayModes(NonArray) | asArrayModes(NonArrayWithArrayStorage) | asArrayModes(NonArrayWithSlowPutArrayStorage): if (action == Array::Write && !profile->mayInterceptIndexedAccesses()) - return Array::ToSlowPutArrayStorage; - return Array::SelectUsingPredictions; + return ArrayMode(Array::SlowPutArrayStorage, Array::NonArray, Array::OutOfBounds, Array::Convert); + return ArrayMode(Array::SelectUsingPredictions); default: // We know that this is possibly a kind of array for which, though there is no // useful data in the array profile, we may be able to extract useful data from // the value profiles of the inputs. Hence, we leave it as undecided, and let // the predictions propagator decide later. - return Array::SelectUsingPredictions; + return ArrayMode(Array::SelectUsingPredictions); } } -Array::Mode refineArrayMode(Array::Mode arrayMode, SpeculatedType base, SpeculatedType index) +ArrayMode ArrayMode::refine(SpeculatedType base, SpeculatedType index) const { if (!base || !index) { // It can be that we had a legitimate arrayMode but no incoming predictions. That'll // happen if we inlined code based on, say, a global variable watchpoint, but later // realized that the callsite could not have possibly executed. It may be worthwhile // to fix that, but for now I'm leaving it as-is. - return Array::ForceExit; + return ArrayMode(Array::ForceExit); } if (!isInt32Speculation(index) || !isCellSpeculation(base)) - return Array::Generic; + return ArrayMode(Array::Generic); - if (arrayMode == Array::Unprofiled) { + if (type() == Array::Unprofiled) { // If the indexing type wasn't recorded in the array profile but the values are // base=cell property=int, then we know that this access didn't execute. - return Array::ForceExit; + return ArrayMode(Array::ForceExit); } - if (arrayMode != Array::SelectUsingPredictions) - return arrayMode; + if (type() != Array::SelectUsingPredictions) + return *this; if (isStringSpeculation(base)) - return Array::String; + return ArrayMode(Array::String); if (isArgumentsSpeculation(base)) - return Array::Arguments; + return ArrayMode(Array::Arguments); if (isInt8ArraySpeculation(base)) - return Array::Int8Array; + return ArrayMode(Array::Int8Array); if (isInt16ArraySpeculation(base)) - return Array::Int16Array; + return ArrayMode(Array::Int16Array); if (isInt32ArraySpeculation(base)) - return Array::Int32Array; + return ArrayMode(Array::Int32Array); if (isUint8ArraySpeculation(base)) - return Array::Uint8Array; + return ArrayMode(Array::Uint8Array); if (isUint8ClampedArraySpeculation(base)) - return Array::Uint8ClampedArray; + return ArrayMode(Array::Uint8ClampedArray); if (isUint16ArraySpeculation(base)) - return Array::Uint16Array; + return ArrayMode(Array::Uint16Array); if (isUint32ArraySpeculation(base)) - return Array::Uint32Array; + return ArrayMode(Array::Uint32Array); if (isFloat32ArraySpeculation(base)) - return Array::Float32Array; + return ArrayMode(Array::Float32Array); if (isFloat64ArraySpeculation(base)) - return Array::Float64Array; + return ArrayMode(Array::Float64Array); - return Array::Generic; + return ArrayMode(Array::Generic); } -bool modeAlreadyChecked(AbstractValue& value, Array::Mode arrayMode) +bool ArrayMode::alreadyChecked(AbstractValue& value) const { - switch (arrayMode) { + switch (type()) { case Array::Generic: return true; @@ -159,109 +159,89 @@ bool modeAlreadyChecked(AbstractValue& value, Array::Mode arrayMode) return false; case Array::String: - return isStringSpeculation(value.m_type); + return speculationChecked(value.m_type, SpecString); case Array::Contiguous: - case Array::ContiguousToTail: - case Array::ContiguousOutOfBounds: - case Array::PossiblyArrayWithContiguous: - case Array::PossiblyArrayWithContiguousToTail: - case Array::PossiblyArrayWithContiguousOutOfBounds: - case Array::ToContiguous: + if (isJSArray()) { + if (arrayModesAlreadyChecked(value.m_arrayModes, asArrayModes(ArrayWithContiguous))) + return true; + return value.m_currentKnownStructure.hasSingleton() + && hasContiguous(value.m_currentKnownStructure.singleton()->indexingType()) + && (value.m_currentKnownStructure.singleton()->indexingType() & IsArray); + } if (arrayModesAlreadyChecked(value.m_arrayModes, asArrayModes(NonArrayWithContiguous) | asArrayModes(ArrayWithContiguous))) return true; return value.m_currentKnownStructure.hasSingleton() && hasContiguous(value.m_currentKnownStructure.singleton()->indexingType()); - case Array::ArrayWithContiguous: - case Array::ArrayWithContiguousToTail: - case Array::ArrayWithContiguousOutOfBounds: - if (arrayModesAlreadyChecked(value.m_arrayModes, asArrayModes(ArrayWithContiguous))) - return true; - return value.m_currentKnownStructure.hasSingleton() - && hasContiguous(value.m_currentKnownStructure.singleton()->indexingType()) - && (value.m_currentKnownStructure.singleton()->indexingType() & IsArray); - case Array::ArrayStorage: - case Array::ArrayStorageToHole: - case Array::ArrayStorageOutOfBounds: - case Array::PossiblyArrayWithArrayStorage: - case Array::PossiblyArrayWithArrayStorageToHole: - case Array::PossiblyArrayWithArrayStorageOutOfBounds: - case Array::ToArrayStorage: - case Array::PossiblyArrayToArrayStorage: + if (isJSArray()) { + if (arrayModesAlreadyChecked(value.m_arrayModes, asArrayModes(ArrayWithArrayStorage))) + return true; + return value.m_currentKnownStructure.hasSingleton() + && hasFastArrayStorage(value.m_currentKnownStructure.singleton()->indexingType()) + && (value.m_currentKnownStructure.singleton()->indexingType() & IsArray); + } if (arrayModesAlreadyChecked(value.m_arrayModes, asArrayModes(NonArrayWithArrayStorage) | asArrayModes(ArrayWithArrayStorage))) return true; return value.m_currentKnownStructure.hasSingleton() && hasFastArrayStorage(value.m_currentKnownStructure.singleton()->indexingType()); case Array::SlowPutArrayStorage: - case Array::PossiblyArrayWithSlowPutArrayStorage: - case Array::ToSlowPutArrayStorage: + if (isJSArray()) { + if (arrayModesAlreadyChecked(value.m_arrayModes, asArrayModes(ArrayWithArrayStorage) | asArrayModes(ArrayWithSlowPutArrayStorage))) + return true; + return value.m_currentKnownStructure.hasSingleton() + && hasArrayStorage(value.m_currentKnownStructure.singleton()->indexingType()) + && (value.m_currentKnownStructure.singleton()->indexingType() & IsArray); + } if (arrayModesAlreadyChecked(value.m_arrayModes, asArrayModes(NonArrayWithArrayStorage) | asArrayModes(ArrayWithArrayStorage) | asArrayModes(NonArrayWithSlowPutArrayStorage) | asArrayModes(ArrayWithSlowPutArrayStorage))) return true; return value.m_currentKnownStructure.hasSingleton() && hasArrayStorage(value.m_currentKnownStructure.singleton()->indexingType()); - case Array::ArrayWithArrayStorage: - case Array::ArrayWithArrayStorageToHole: - case Array::ArrayWithArrayStorageOutOfBounds: - case Array::ArrayToArrayStorage: - if (arrayModesAlreadyChecked(value.m_arrayModes, asArrayModes(ArrayWithArrayStorage))) - return true; - return value.m_currentKnownStructure.hasSingleton() - && hasFastArrayStorage(value.m_currentKnownStructure.singleton()->indexingType()) - && (value.m_currentKnownStructure.singleton()->indexingType() & IsArray); - - case Array::ArrayWithSlowPutArrayStorage: - if (arrayModesAlreadyChecked(value.m_arrayModes, asArrayModes(ArrayWithArrayStorage) | asArrayModes(ArrayWithSlowPutArrayStorage))) - return true; - return value.m_currentKnownStructure.hasSingleton() - && hasArrayStorage(value.m_currentKnownStructure.singleton()->indexingType()) - && (value.m_currentKnownStructure.singleton()->indexingType() & IsArray); - case Array::Arguments: - return isArgumentsSpeculation(value.m_type); + return speculationChecked(value.m_type, SpecArguments); case Array::Int8Array: - return isInt8ArraySpeculation(value.m_type); + return speculationChecked(value.m_type, SpecInt8Array); case Array::Int16Array: - return isInt16ArraySpeculation(value.m_type); + return speculationChecked(value.m_type, SpecInt16Array); case Array::Int32Array: - return isInt32ArraySpeculation(value.m_type); + return speculationChecked(value.m_type, SpecInt32Array); case Array::Uint8Array: - return isUint8ArraySpeculation(value.m_type); + return speculationChecked(value.m_type, SpecUint8Array); case Array::Uint8ClampedArray: - return isUint8ClampedArraySpeculation(value.m_type); + return speculationChecked(value.m_type, SpecUint8ClampedArray); case Array::Uint16Array: - return isUint16ArraySpeculation(value.m_type); + return speculationChecked(value.m_type, SpecUint16Array); case Array::Uint32Array: - return isUint32ArraySpeculation(value.m_type); + return speculationChecked(value.m_type, SpecUint32Array); case Array::Float32Array: - return isFloat32ArraySpeculation(value.m_type); + return speculationChecked(value.m_type, SpecFloat32Array); case Array::Float64Array: - return isFloat64ArraySpeculation(value.m_type); + return speculationChecked(value.m_type, SpecFloat64Array); case Array::SelectUsingPredictions: case Array::Unprofiled: break; } - ASSERT_NOT_REACHED(); + CRASH(); return false; } -const char* modeToString(Array::Mode mode) +const char* arrayTypeToString(Array::Type type) { - switch (mode) { + switch (type) { case Array::SelectUsingPredictions: return "SelectUsingPredictions"; case Array::Unprofiled: @@ -274,56 +254,10 @@ const char* modeToString(Array::Mode mode) return "String"; case Array::Contiguous: return "Contiguous"; - case Array::ContiguousToTail: - return "ContiguousToTail"; - case Array::ContiguousOutOfBounds: - return "ContiguousOutOfBounds"; - case Array::ArrayWithContiguous: - return "ArrayWithContiguous"; - case Array::ArrayWithContiguousToTail: - return "ArrayWithContiguousToTail"; - case Array::ArrayWithContiguousOutOfBounds: - return "ArrayWithContiguousOutOfBounds"; - case Array::PossiblyArrayWithContiguous: - return "PossiblyArrayWithContiguous"; - case Array::PossiblyArrayWithContiguousToTail: - return "PossiblyArrayWithContiguousToTail"; - case Array::PossiblyArrayWithContiguousOutOfBounds: - return "PossiblyArrayWithContiguousOutOfBounds"; case Array::ArrayStorage: return "ArrayStorage"; - case Array::ArrayStorageToHole: - return "ArrayStorageToHole"; case Array::SlowPutArrayStorage: return "SlowPutArrayStorage"; - case Array::ArrayStorageOutOfBounds: - return "ArrayStorageOutOfBounds"; - case Array::ArrayWithArrayStorage: - return "ArrayWithArrayStorage"; - case Array::ArrayWithArrayStorageToHole: - return "ArrayWithArrayStorageToHole"; - case Array::ArrayWithSlowPutArrayStorage: - return "ArrayWithSlowPutArrayStorage"; - case Array::ArrayWithArrayStorageOutOfBounds: - return "ArrayWithArrayStorageOutOfBounds"; - case Array::PossiblyArrayWithArrayStorage: - return "PossiblyArrayWithArrayStorage"; - case Array::PossiblyArrayWithArrayStorageToHole: - return "PossiblyArrayWithArrayStorageToHole"; - case Array::PossiblyArrayWithSlowPutArrayStorage: - return "PossiblyArrayWithSlowPutArrayStorage"; - case Array::PossiblyArrayWithArrayStorageOutOfBounds: - return "PossiblyArrayWithArrayStorageOutOfBounds"; - case Array::ToContiguous: - return "ToContiguous"; - case Array::ToArrayStorage: - return "ToArrayStorage"; - case Array::ToSlowPutArrayStorage: - return "ToSlowPutArrayStorage"; - case Array::ArrayToArrayStorage: - return "ArrayToArrayStorage"; - case Array::PossiblyArrayToArrayStorage: - return "PossiblyArrayToArrayStorage"; case Array::Arguments: return "Arguments"; case Array::Int8Array: @@ -353,6 +287,55 @@ const char* modeToString(Array::Mode mode) } } +const char* arrayClassToString(Array::Class arrayClass) +{ + switch (arrayClass) { + case Array::Array: + return "Array"; + case Array::OriginalArray: + return "OriginalArray"; + case Array::NonArray: + return "NonArray"; + case Array::PossiblyArray: + return "PossiblyArray"; + default: + return "Unknown!"; + } +} + +const char* arraySpeculationToString(Array::Speculation speculation) +{ + switch (speculation) { + case Array::InBounds: + return "InBounds"; + case Array::ToHole: + return "ToHole"; + case Array::OutOfBounds: + return "OutOfBounds"; + default: + return "Unknown!"; + } +} + +const char* arrayConversionToString(Array::Conversion conversion) +{ + switch (conversion) { + case Array::AsIs: + return "AsIs"; + case Array::Convert: + return "Convert"; + default: + return "Unknown!"; + } +} + +const char* ArrayMode::toString() const +{ + static char buffer[256]; + snprintf(buffer, sizeof(buffer), "%s%s%s%s", arrayTypeToString(type()), arrayClassToString(arrayClass()), arraySpeculationToString(speculation()), arrayConversionToString(conversion())); + return buffer; +} + } } // namespace JSC::DFG #endif // ENABLE(DFG_JIT) diff --git a/Source/JavaScriptCore/dfg/DFGArrayMode.h b/Source/JavaScriptCore/dfg/DFGArrayMode.h index a1cd74114..615965c92 100644 --- a/Source/JavaScriptCore/dfg/DFGArrayMode.h +++ b/Source/JavaScriptCore/dfg/DFGArrayMode.h @@ -46,42 +46,16 @@ enum Action { Write }; -enum Mode { +enum Type { SelectUsingPredictions, // Implies that we need predictions to decide. We will never get to the backend in this mode. Unprofiled, // Implies that array profiling didn't see anything. But that could be because the operands didn't comply with basic type assumptions (base is cell, property is int). This either becomes Generic or ForceExit depending on value profiling. ForceExit, // Implies that we have no idea how to execute this operation, so we should just give up. Generic, String, - // Modes of conventional indexed storage where the check is non side-effecting. Contiguous, - ContiguousToTail, - ContiguousOutOfBounds, - ArrayWithContiguous, - ArrayWithContiguousToTail, - ArrayWithContiguousOutOfBounds, - PossiblyArrayWithContiguous, - PossiblyArrayWithContiguousToTail, - PossiblyArrayWithContiguousOutOfBounds, ArrayStorage, - ArrayStorageToHole, SlowPutArrayStorage, - ArrayStorageOutOfBounds, - ArrayWithArrayStorage, - ArrayWithArrayStorageToHole, - ArrayWithSlowPutArrayStorage, - ArrayWithArrayStorageOutOfBounds, - PossiblyArrayWithArrayStorage, - PossiblyArrayWithArrayStorageToHole, - PossiblyArrayWithSlowPutArrayStorage, - PossiblyArrayWithArrayStorageOutOfBounds, - - // Modes of conventional indexed storage where the check is side-effecting. - ToContiguous, - ToArrayStorage, - ArrayToArrayStorage, - PossiblyArrayToArrayStorage, - ToSlowPutArrayStorage, Arguments, Int8Array, @@ -94,304 +68,311 @@ enum Mode { Float32Array, Float64Array }; -} // namespace Array - -// Helpers for 'case' statements. For example, saying "case AllArrayStorageModes:" -// is the same as having multiple case statements listing off all of the modes that -// have the word "ArrayStorage" in them. - -// First: helpers for non-side-effecting checks. -#define NON_ARRAY_CONTIGUOUS_MODES \ - Array::Contiguous: \ - case Array::ContiguousToTail: \ - case Array::ContiguousOutOfBounds: \ - case Array::PossiblyArrayWithContiguous: \ - case Array::PossiblyArrayWithContiguousToTail: \ - case Array::PossiblyArrayWithContiguousOutOfBounds -#define ARRAY_WITH_CONTIGUOUS_MODES \ - Array::ArrayWithContiguous: \ - case Array::ArrayWithContiguousToTail: \ - case Array::ArrayWithContiguousOutOfBounds -#define ALL_CONTIGUOUS_MODES \ - NON_ARRAY_CONTIGUOUS_MODES: \ - case ARRAY_WITH_CONTIGUOUS_MODES -#define IN_BOUNDS_CONTIGUOUS_MODES \ - Array::Contiguous: \ - case Array::ArrayWithContiguous: \ - case Array::PossiblyArrayWithContiguous -#define CONTIGUOUS_TO_TAIL_MODES \ - Array::ContiguousToTail: \ - case Array::ArrayWithContiguousToTail: \ - case Array::PossiblyArrayWithContiguousToTail -#define OUT_OF_BOUNDS_CONTIGUOUS_MODES \ - Array::ContiguousOutOfBounds: \ - case Array::ArrayWithContiguousOutOfBounds: \ - case Array::PossiblyArrayWithContiguousOutOfBounds -#define NON_ARRAY_ARRAY_STORAGE_MODES \ - Array::ArrayStorage: \ - case Array::ArrayStorageToHole: \ - case Array::SlowPutArrayStorage: \ - case Array::ArrayStorageOutOfBounds: \ - case Array::PossiblyArrayWithArrayStorage: \ - case Array::PossiblyArrayWithArrayStorageToHole: \ - case Array::PossiblyArrayWithSlowPutArrayStorage: \ - case Array::PossiblyArrayWithArrayStorageOutOfBounds -#define ARRAY_WITH_ARRAY_STORAGE_MODES \ - Array::ArrayWithArrayStorage: \ - case Array::ArrayWithArrayStorageToHole: \ - case Array::ArrayWithSlowPutArrayStorage: \ - case Array::ArrayWithArrayStorageOutOfBounds -#define ALL_ARRAY_STORAGE_MODES \ - NON_ARRAY_ARRAY_STORAGE_MODES: \ - case ARRAY_WITH_ARRAY_STORAGE_MODES -#define IN_BOUNDS_ARRAY_STORAGE_MODES \ - Array::ArrayStorage: \ - case Array::ArrayWithArrayStorage: \ - case Array::PossiblyArrayWithArrayStorage -#define ARRAY_STORAGE_TO_HOLE_MODES \ - Array::ArrayStorageToHole: \ - case Array::ArrayWithArrayStorageToHole: \ - case Array::PossiblyArrayWithArrayStorageToHole -#define SLOW_PUT_ARRAY_STORAGE_MODES \ - Array::SlowPutArrayStorage: \ - case Array::ArrayWithSlowPutArrayStorage: \ - case Array::PossiblyArrayWithSlowPutArrayStorage -#define OUT_OF_BOUNDS_ARRAY_STORAGE_MODES \ - Array::ArrayStorageOutOfBounds: \ - case Array::ArrayWithArrayStorageOutOfBounds: \ - case Array::PossiblyArrayWithArrayStorageOutOfBounds -// Next: helpers for side-effecting checks. -#define NON_ARRAY_EFFECTFUL_MODES \ - Array::ToContiguous: \ - case Array::ToArrayStorage: \ - case Array::ToSlowPutArrayStorage: \ - case Array::PossiblyArrayToArrayStorage -#define ARRAY_EFFECTFUL_MODES \ - Array::ArrayToArrayStorage -#define ALL_EFFECTFUL_CONTIGUOUS_MODES \ - Array::ToContiguous -#define ALL_EFFECTFUL_ARRAY_STORAGE_MODES \ - Array::ToArrayStorage: \ - case Array::ToSlowPutArrayStorage: \ - case Array::ArrayToArrayStorage: \ - case Array::PossiblyArrayToArrayStorage -#define SLOW_PUT_EFFECTFUL_ARRAY_STORAGE_MODES \ - Array::ToSlowPutArrayStorage -#define ALL_EFFECTFUL_MODES \ - ALL_EFFECTFUL_CONTIGUOUS_MODES: \ - case ALL_EFFECTFUL_ARRAY_STORAGE_MODES - -Array::Mode fromObserved(ArrayProfile*, Array::Action, bool makeSafe); - -Array::Mode refineArrayMode(Array::Mode, SpeculatedType base, SpeculatedType index); +enum Class { + NonArray, // Definitely some object that is not a JSArray. + Array, // Definitely a JSArray, and may or may not have custom properties or have undergone some other bizarre transitions. + OriginalArray, // Definitely a JSArray, and still has one of the primordial JSArray structures for the global object that this code block (possibly inlined code block) belongs to. + PossiblyArray // Some object that may or may not be a JSArray. +}; -bool modeAlreadyChecked(AbstractValue&, Array::Mode); +enum Speculation { + InBounds, + ToHole, + OutOfBounds +}; -const char* modeToString(Array::Mode); +enum Conversion { + AsIs, + Convert +}; +} // namespace Array -inline bool modeUsesButterfly(Array::Mode arrayMode) -{ - switch (arrayMode) { - case ALL_CONTIGUOUS_MODES: - case ALL_ARRAY_STORAGE_MODES: - case ALL_EFFECTFUL_MODES: - return true; - default: - return false; +const char* arrayTypeToString(Array::Type); +const char* arrayClassToString(Array::Class); +const char* arraySpeculationToString(Array::Speculation); +const char* arrayConversionToString(Array::Conversion); + +class ArrayMode { +public: + ArrayMode() + { + u.asBytes.type = Array::SelectUsingPredictions; + u.asBytes.arrayClass = Array::NonArray; + u.asBytes.speculation = Array::InBounds; + u.asBytes.conversion = Array::AsIs; } -} - -inline bool modeIsJSArray(Array::Mode arrayMode) -{ - switch (arrayMode) { - case ARRAY_WITH_CONTIGUOUS_MODES: - case ARRAY_WITH_ARRAY_STORAGE_MODES: - case ARRAY_EFFECTFUL_MODES: - return true; - default: - return false; + + explicit ArrayMode(Array::Type type) + { + u.asBytes.type = type; + u.asBytes.arrayClass = Array::NonArray; + u.asBytes.speculation = Array::InBounds; + u.asBytes.conversion = Array::AsIs; } -} - -inline bool isInBoundsAccess(Array::Mode arrayMode) -{ - switch (arrayMode) { - case IN_BOUNDS_CONTIGUOUS_MODES: - case CONTIGUOUS_TO_TAIL_MODES: - case ARRAY_STORAGE_TO_HOLE_MODES: - case IN_BOUNDS_ARRAY_STORAGE_MODES: - return true; - default: - return false; + + ArrayMode(Array::Type type, Array::Class arrayClass, Array::Speculation speculation, Array::Conversion conversion) + { + u.asBytes.type = type; + u.asBytes.arrayClass = arrayClass; + u.asBytes.speculation = speculation; + u.asBytes.conversion = conversion; } -} - -inline bool isSlowPutAccess(Array::Mode arrayMode) -{ - switch (arrayMode) { - case SLOW_PUT_ARRAY_STORAGE_MODES: - case SLOW_PUT_EFFECTFUL_ARRAY_STORAGE_MODES: - return true; - default: - return false; + + ArrayMode(Array::Type type, Array::Class arrayClass, Array::Conversion conversion) + { + u.asBytes.type = type; + u.asBytes.arrayClass = arrayClass; + u.asBytes.speculation = Array::InBounds; + u.asBytes.conversion = conversion; } -} - -inline bool mayStoreToTail(Array::Mode arrayMode) -{ - switch (arrayMode) { - case CONTIGUOUS_TO_TAIL_MODES: - case OUT_OF_BOUNDS_CONTIGUOUS_MODES: - case ALL_EFFECTFUL_CONTIGUOUS_MODES: - return true; - default: - return false; + + Array::Type type() const { return static_cast<Array::Type>(u.asBytes.type); } + Array::Class arrayClass() const { return static_cast<Array::Class>(u.asBytes.arrayClass); } + Array::Speculation speculation() const { return static_cast<Array::Speculation>(u.asBytes.speculation); } + Array::Conversion conversion() const { return static_cast<Array::Conversion>(u.asBytes.conversion); } + + unsigned asWord() const { return u.asWord; } + + static ArrayMode fromWord(unsigned word) + { + return ArrayMode(word); } -} - -inline bool mayStoreToHole(Array::Mode arrayMode) -{ - switch (arrayMode) { - case ARRAY_STORAGE_TO_HOLE_MODES: - case OUT_OF_BOUNDS_ARRAY_STORAGE_MODES: - case SLOW_PUT_ARRAY_STORAGE_MODES: - case ALL_EFFECTFUL_ARRAY_STORAGE_MODES: - return true; - default: - return false; + + static ArrayMode fromObserved(ArrayProfile*, Array::Action, bool makeSafe); + + ArrayMode withSpeculation(Array::Speculation speculation) const + { + return ArrayMode(type(), arrayClass(), speculation, conversion()); } -} - -inline bool canCSEStorage(Array::Mode arrayMode) -{ - switch (arrayMode) { - case Array::SelectUsingPredictions: - case Array::Unprofiled: - case Array::ForceExit: - case Array::Generic: - case Array::Arguments: - return false; - default: - return true; + + ArrayMode withProfile(ArrayProfile* profile, bool makeSafe) const + { + Array::Speculation mySpeculation; + Array::Class myArrayClass; + + if (makeSafe) + mySpeculation = Array::OutOfBounds; + else if (profile->mayStoreToHole()) + mySpeculation = Array::ToHole; + else + mySpeculation = Array::InBounds; + + if (isJSArray()) { + if (profile->usesOriginalArrayStructures()) + myArrayClass = Array::OriginalArray; + else + myArrayClass = Array::Array; + } else + myArrayClass = arrayClass(); + + return ArrayMode(type(), myArrayClass, mySpeculation, conversion()); } -} - -inline bool lengthNeedsStorage(Array::Mode arrayMode) -{ - return modeIsJSArray(arrayMode); -} - -inline Array::Mode modeForPut(Array::Mode arrayMode) -{ - switch (arrayMode) { - case Array::String: - return Array::Generic; + + ArrayMode refine(SpeculatedType base, SpeculatedType index) const; + + bool alreadyChecked(AbstractValue&) const; + + const char* toString() const; + + bool usesButterfly() const + { + switch (type()) { + case Array::Contiguous: + case Array::ArrayStorage: + case Array::SlowPutArrayStorage: + return true; + default: + return false; + } + } + + bool isJSArray() const + { + switch (arrayClass()) { + case Array::Array: + case Array::OriginalArray: + return true; + default: + return false; + } + } + + bool isJSArrayWithOriginalStructure() const + { + return arrayClass() == Array::OriginalArray; + } + + bool isInBounds() const + { + return speculation() == Array::InBounds; + } + + bool mayStoreToHole() const + { + return !isInBounds(); + } + + bool isOutOfBounds() const + { + return speculation() == Array::OutOfBounds; + } + + bool isSlowPut() const + { + return type() == Array::SlowPutArrayStorage; + } + + bool canCSEStorage() const + { + switch (type()) { + case Array::SelectUsingPredictions: + case Array::Unprofiled: + case Array::ForceExit: + case Array::Generic: + case Array::Arguments: + return false; + default: + return true; + } + } + + bool lengthNeedsStorage() const + { + return isJSArray(); + } + + ArrayMode modeForPut() const + { + switch (type()) { + case Array::String: + return ArrayMode(Array::Generic); #if USE(JSVALUE32_64) - case Array::Arguments: - return Array::Generic; + case Array::Arguments: + return ArrayMode(Array::Generic); #endif - default: - return arrayMode; + default: + return *this; + } } -} - -inline bool modeIsSpecific(Array::Mode mode) -{ - switch (mode) { - case Array::SelectUsingPredictions: - case Array::Unprofiled: - case Array::ForceExit: - case Array::Generic: - return false; - default: - return true; + + bool isSpecific() const + { + switch (type()) { + case Array::SelectUsingPredictions: + case Array::Unprofiled: + case Array::ForceExit: + case Array::Generic: + return false; + default: + return true; + } } -} - -inline bool modeSupportsLength(Array::Mode mode) -{ - switch (mode) { - case Array::SelectUsingPredictions: - case Array::Unprofiled: - case Array::ForceExit: - case Array::Generic: - case NON_ARRAY_CONTIGUOUS_MODES: - case NON_ARRAY_ARRAY_STORAGE_MODES: - case NON_ARRAY_EFFECTFUL_MODES: - return false; - default: - return true; + + bool supportsLength() const + { + switch (type()) { + case Array::SelectUsingPredictions: + case Array::Unprofiled: + case Array::ForceExit: + case Array::Generic: + return false; + case Array::Contiguous: + case Array::ArrayStorage: + case Array::SlowPutArrayStorage: + return isJSArray(); + default: + return true; + } } -} - -inline bool benefitsFromStructureCheck(Array::Mode mode) -{ - switch (mode) { - case ALL_EFFECTFUL_MODES: - case Array::SelectUsingPredictions: - case Array::Unprofiled: - case Array::ForceExit: - case Array::Generic: - return false; - default: - return true; + + bool benefitsFromStructureCheck() const + { + switch (type()) { + case Array::SelectUsingPredictions: + case Array::Unprofiled: + case Array::ForceExit: + case Array::Generic: + return false; + default: + return conversion() == Array::AsIs; + } } -} + + bool doesConversion() const + { + return conversion() == Array::Convert; + } + + ArrayModes arrayModesThatPassFiltering() const + { + switch (type()) { + case Array::Generic: + return ALL_ARRAY_MODES; + case Array::Contiguous: + return arrayModesWithIndexingShape(ContiguousShape); + case Array::ArrayStorage: + return arrayModesWithIndexingShape(ArrayStorageShape); + case Array::SlowPutArrayStorage: + return arrayModesWithIndexingShape(SlowPutArrayStorageShape); + default: + return asArrayModes(NonArray); + } + } + + bool operator==(const ArrayMode& other) const + { + return type() == other.type() + && arrayClass() == other.arrayClass() + && speculation() == other.speculation() + && conversion() == other.conversion(); + } + + bool operator!=(const ArrayMode& other) const + { + return !(*this == other); + } +private: + explicit ArrayMode(unsigned word) + { + u.asWord = word; + } + + ArrayModes arrayModesWithIndexingShape(IndexingType shape) const + { + switch (arrayClass()) { + case Array::NonArray: + return asArrayModes(shape); + case Array::Array: + case Array::OriginalArray: + return asArrayModes(shape | IsArray); + case Array::PossiblyArray: + return asArrayModes(shape) | asArrayModes(shape | IsArray); + default: + // This is only necessary for C++ compilers that don't understand enums. + return 0; + } + } + + union { + struct { + uint8_t type; + uint8_t arrayClass; + uint8_t speculation; + uint8_t conversion; + } asBytes; + unsigned asWord; + } u; +}; -inline bool isEffectful(Array::Mode mode) +static inline bool canCSEStorage(const ArrayMode& arrayMode) { - switch (mode) { - case ALL_EFFECTFUL_MODES: - return true; - default: - return false; - } + return arrayMode.canCSEStorage(); } -// This returns the set of array modes that will pass filtering of a CheckArray or -// Arrayify with the given mode. -inline ArrayModes arrayModesFor(Array::Mode arrayMode) +static inline bool lengthNeedsStorage(const ArrayMode& arrayMode) { - switch (arrayMode) { - case Array::Generic: - return ALL_ARRAY_MODES; - case Array::Contiguous: - case Array::ContiguousToTail: - case Array::ContiguousOutOfBounds: - case Array::ToContiguous: - return asArrayModes(NonArrayWithContiguous); - case Array::PossiblyArrayWithContiguous: - case Array::PossiblyArrayWithContiguousToTail: - case Array::PossiblyArrayWithContiguousOutOfBounds: - return asArrayModes(NonArrayWithContiguous) | asArrayModes(ArrayWithContiguous); - case ARRAY_WITH_CONTIGUOUS_MODES: - return asArrayModes(ArrayWithContiguous); - case Array::ArrayStorage: - case Array::ArrayStorageToHole: - case Array::ArrayStorageOutOfBounds: - case Array::ToArrayStorage: - return asArrayModes(NonArrayWithArrayStorage); - case Array::ToSlowPutArrayStorage: - case Array::SlowPutArrayStorage: - return asArrayModes(NonArrayWithArrayStorage) | asArrayModes(NonArrayWithSlowPutArrayStorage); - case Array::PossiblyArrayWithArrayStorage: - case Array::PossiblyArrayWithArrayStorageToHole: - case Array::PossiblyArrayWithArrayStorageOutOfBounds: - case Array::PossiblyArrayToArrayStorage: - return asArrayModes(NonArrayWithArrayStorage) | asArrayModes(ArrayWithArrayStorage); - case Array::PossiblyArrayWithSlowPutArrayStorage: - return asArrayModes(NonArrayWithArrayStorage) | asArrayModes(ArrayWithArrayStorage) | asArrayModes(NonArrayWithSlowPutArrayStorage) | asArrayModes(ArrayWithSlowPutArrayStorage); - case Array::ArrayWithArrayStorage: - case Array::ArrayWithArrayStorageToHole: - case Array::ArrayWithArrayStorageOutOfBounds: - case Array::ArrayToArrayStorage: - return asArrayModes(ArrayWithArrayStorage); - case Array::ArrayWithSlowPutArrayStorage: - return asArrayModes(ArrayWithArrayStorage) | asArrayModes(ArrayWithSlowPutArrayStorage); - default: - return asArrayModes(NonArray); - } + return arrayMode.lengthNeedsStorage(); } } } // namespace JSC::DFG diff --git a/Source/JavaScriptCore/dfg/DFGAssemblyHelpers.h b/Source/JavaScriptCore/dfg/DFGAssemblyHelpers.h index 953a743ff..75b9c7072 100644 --- a/Source/JavaScriptCore/dfg/DFGAssemblyHelpers.h +++ b/Source/JavaScriptCore/dfg/DFGAssemblyHelpers.h @@ -320,6 +320,13 @@ public: return baselineCodeBlockForOriginAndBaselineCodeBlock(codeOrigin, baselineCodeBlock()); } + CodeBlock* baselineCodeBlockFor(InlineCallFrame* inlineCallFrame) + { + if (!inlineCallFrame) + return baselineCodeBlock(); + return baselineCodeBlockForInlineCallFrame(inlineCallFrame); + } + CodeBlock* baselineCodeBlock() { return m_baselineCodeBlock; diff --git a/Source/JavaScriptCore/dfg/DFGByteCodeCache.h b/Source/JavaScriptCore/dfg/DFGByteCodeCache.h index 6b9056e54..e1837b041 100644 --- a/Source/JavaScriptCore/dfg/DFGByteCodeCache.h +++ b/Source/JavaScriptCore/dfg/DFGByteCodeCache.h @@ -158,7 +158,7 @@ public: // Nope, so try to parse one. JSObject* exception; value.owned = true; - value.codeBlock = key.executable()->produceCodeBlockFor(scope, OptimizingCompilation, key.kind(), exception).leakPtr(); + value.codeBlock = key.executable()->produceCodeBlockFor(scope, key.kind(), exception).leakPtr(); } // Check if there is any reason to reject this from our cache. If so, then diff --git a/Source/JavaScriptCore/dfg/DFGByteCodeParser.cpp b/Source/JavaScriptCore/dfg/DFGByteCodeParser.cpp index 36d18d7b3..70aa2b637 100644 --- a/Source/JavaScriptCore/dfg/DFGByteCodeParser.cpp +++ b/Source/JavaScriptCore/dfg/DFGByteCodeParser.cpp @@ -35,7 +35,6 @@ #include "DFGByteCodeCache.h" #include "DFGCapabilities.h" #include "GetByIdStatus.h" -#include "MethodCallLinkStatus.h" #include "PutByIdStatus.h" #include "ResolveGlobalStatus.h" #include <wtf/HashMap.h> @@ -906,15 +905,15 @@ private: return getPrediction(m_graph.size(), m_currentProfilingIndex); } - Array::Mode getArrayMode(ArrayProfile* profile) + ArrayMode getArrayMode(ArrayProfile* profile) { - profile->computeUpdatedPrediction(); - return fromObserved(profile, Array::Read, false); + profile->computeUpdatedPrediction(m_inlineStackTop->m_codeBlock); + return ArrayMode::fromObserved(profile, Array::Read, false); } - Array::Mode getArrayModeAndEmitChecks(ArrayProfile* profile, Array::Action action, NodeIndex base) + ArrayMode getArrayModeAndEmitChecks(ArrayProfile* profile, Array::Action action, NodeIndex base) { - profile->computeUpdatedPrediction(); + profile->computeUpdatedPrediction(m_inlineStackTop->m_codeBlock); #if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE) if (m_inlineStackTop->m_profiledBlock->numberOfRareCaseProfiles()) @@ -926,9 +925,9 @@ private: m_inlineStackTop->m_profiledBlock->couldTakeSlowCase(m_currentIndex) || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, OutOfBounds); - Array::Mode result = fromObserved(profile, action, makeSafe); + ArrayMode result = ArrayMode::fromObserved(profile, action, makeSafe); - if (profile->hasDefiniteStructure() && benefitsFromStructureCheck(result)) + if (profile->hasDefiniteStructure() && result.benefitsFromStructureCheck()) addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(profile->expectedStructure())), base); return result; @@ -1649,16 +1648,11 @@ bool ByteCodeParser::handleIntrinsic(bool usesResult, int resultOperand, Intrins if (argumentCountIncludingThis != 2) return false; - Array::Mode arrayMode = getArrayMode(m_currentInstruction[5].u.arrayProfile); - switch (arrayMode) { - case Array::ArrayWithArrayStorageToHole: - ASSERT_NOT_REACHED(); - - case Array::ArrayWithContiguous: - case Array::ArrayWithContiguousOutOfBounds: - case Array::ArrayWithArrayStorage: - case Array::ArrayWithArrayStorageOutOfBounds: { - NodeIndex arrayPush = addToGraph(ArrayPush, OpInfo(arrayMode), OpInfo(prediction), get(registerOffset + argumentToOperand(0)), get(registerOffset + argumentToOperand(1))); + ArrayMode arrayMode = getArrayMode(m_currentInstruction[5].u.arrayProfile); + switch (arrayMode.type()) { + case Array::Contiguous: + case Array::ArrayStorage: { + NodeIndex arrayPush = addToGraph(ArrayPush, OpInfo(arrayMode.asWord()), OpInfo(prediction), get(registerOffset + argumentToOperand(0)), get(registerOffset + argumentToOperand(1))); if (usesResult) set(resultOperand, arrayPush); @@ -1674,16 +1668,11 @@ bool ByteCodeParser::handleIntrinsic(bool usesResult, int resultOperand, Intrins if (argumentCountIncludingThis != 1) return false; - Array::Mode arrayMode = getArrayMode(m_currentInstruction[5].u.arrayProfile); - switch (arrayMode) { - case Array::ArrayWithArrayStorageToHole: - ASSERT_NOT_REACHED(); - - case Array::ArrayWithContiguous: - case Array::ArrayWithContiguousOutOfBounds: - case Array::ArrayWithArrayStorage: - case Array::ArrayWithArrayStorageOutOfBounds: { - NodeIndex arrayPop = addToGraph(ArrayPop, OpInfo(arrayMode), OpInfo(prediction), get(registerOffset + argumentToOperand(0))); + ArrayMode arrayMode = getArrayMode(m_currentInstruction[5].u.arrayProfile); + switch (arrayMode.type()) { + case Array::Contiguous: + case Array::ArrayStorage: { + NodeIndex arrayPop = addToGraph(ArrayPop, OpInfo(arrayMode.asWord()), OpInfo(prediction), get(registerOffset + argumentToOperand(0))); if (usesResult) set(resultOperand, arrayPop); return true; @@ -1699,9 +1688,6 @@ bool ByteCodeParser::handleIntrinsic(bool usesResult, int resultOperand, Intrins return false; int thisOperand = registerOffset + argumentToOperand(0); - if (!(m_graph[get(thisOperand)].prediction() & SpecString)) - return false; - int indexOperand = registerOffset + argumentToOperand(1); NodeIndex charCode = addToGraph(StringCharCodeAt, OpInfo(Array::String), get(thisOperand), getToInt32(indexOperand)); @@ -1715,9 +1701,6 @@ bool ByteCodeParser::handleIntrinsic(bool usesResult, int resultOperand, Intrins return false; int thisOperand = registerOffset + argumentToOperand(0); - if (!(m_graph[get(thisOperand)].prediction() & SpecString)) - return false; - int indexOperand = registerOffset + argumentToOperand(1); NodeIndex charCode = addToGraph(StringCharAt, OpInfo(Array::String), get(thisOperand), getToInt32(indexOperand)); @@ -1793,6 +1776,10 @@ NodeIndex ByteCodeParser::handleGetByOffset(SpeculatedType prediction, NodeIndex propertyStorage = base; else propertyStorage = addToGraph(GetButterfly, base); + // FIXME: It would be far more efficient for load elimination (and safer from + // an OSR standpoint) if GetByOffset also referenced the object we were loading + // from, and if we could load eliminate a GetByOffset even if the butterfly + // had changed. That would be a great success. NodeIndex getByOffset = addToGraph(GetByOffset, OpInfo(m_graph.m_storageAccessData.size()), OpInfo(prediction), propertyStorage); StorageAccessData storageAccessData; @@ -1888,12 +1875,12 @@ bool ByteCodeParser::parseResolveOperations(SpeculatedType prediction, unsigned while (resolvingBase) { switch (pc->m_operation) { case ResolveOperation::ReturnGlobalObjectAsBase: - *base = get(m_codeBlock->globalObjectConstant()); + *base = cellConstant(globalObject); ASSERT(!value); return true; case ResolveOperation::SetBaseToGlobal: - *base = get(m_codeBlock->globalObjectConstant()); + *base = cellConstant(globalObject); setBase = true; resolvingBase = false; ++pc; @@ -2150,7 +2137,11 @@ bool ByteCodeParser::parseBlock(unsigned limit) case op_new_array_buffer: { int startConstant = currentInstruction[2].u.operand; int numConstants = currentInstruction[3].u.operand; - set(currentInstruction[1].u.operand, addToGraph(NewArrayBuffer, OpInfo(m_inlineStackTop->m_constantBufferRemap[startConstant]), OpInfo(numConstants))); + NewArrayBufferData data; + data.startConstant = m_inlineStackTop->m_constantBufferRemap[startConstant]; + data.numConstants = numConstants; + m_graph.m_newArrayBufferData.append(data); + set(currentInstruction[1].u.operand, addToGraph(NewArrayBuffer, OpInfo(&m_graph.m_newArrayBufferData.last()))); NEXT_OPCODE(op_new_array_buffer); } @@ -2472,9 +2463,9 @@ bool ByteCodeParser::parseBlock(unsigned limit) SpeculatedType prediction = getPrediction(); NodeIndex base = get(currentInstruction[2].u.operand); - Array::Mode arrayMode = getArrayModeAndEmitChecks(currentInstruction[4].u.arrayProfile, Array::Read, base); + ArrayMode arrayMode = getArrayModeAndEmitChecks(currentInstruction[4].u.arrayProfile, Array::Read, base); NodeIndex property = get(currentInstruction[3].u.operand); - NodeIndex getByVal = addToGraph(GetByVal, OpInfo(arrayMode), OpInfo(prediction), base, property); + NodeIndex getByVal = addToGraph(GetByVal, OpInfo(arrayMode.asWord()), OpInfo(prediction), base, property); set(currentInstruction[1].u.operand, getByVal); NEXT_OPCODE(op_get_by_val); @@ -2483,7 +2474,7 @@ bool ByteCodeParser::parseBlock(unsigned limit) case op_put_by_val: { NodeIndex base = get(currentInstruction[1].u.operand); - Array::Mode arrayMode = getArrayModeAndEmitChecks(currentInstruction[4].u.arrayProfile, Array::Write, base); + ArrayMode arrayMode = getArrayModeAndEmitChecks(currentInstruction[4].u.arrayProfile, Array::Write, base); NodeIndex property = get(currentInstruction[2].u.operand); NodeIndex value = get(currentInstruction[3].u.operand); @@ -2492,53 +2483,11 @@ bool ByteCodeParser::parseBlock(unsigned limit) addVarArgChild(property); addVarArgChild(value); addVarArgChild(NoNode); // Leave room for property storage. - addToGraph(Node::VarArg, PutByVal, OpInfo(arrayMode), OpInfo(0)); + addToGraph(Node::VarArg, PutByVal, OpInfo(arrayMode.asWord()), OpInfo(0)); NEXT_OPCODE(op_put_by_val); } - case op_method_check: { - m_currentProfilingIndex += OPCODE_LENGTH(op_method_check); - Instruction* getInstruction = currentInstruction + OPCODE_LENGTH(op_method_check); - - SpeculatedType prediction = getPrediction(); - - ASSERT(interpreter->getOpcodeID(getInstruction->u.opcode) == op_get_by_id - || interpreter->getOpcodeID(getInstruction->u.opcode) == op_get_by_id_out_of_line); - - NodeIndex base = get(getInstruction[2].u.operand); - unsigned identifier = m_inlineStackTop->m_identifierRemap[getInstruction[3].u.operand]; - - // Check if the method_check was monomorphic. If so, emit a CheckXYZMethod - // node, which is a lot more efficient. - GetByIdStatus getByIdStatus = GetByIdStatus::computeFor( - m_inlineStackTop->m_profiledBlock, - m_currentIndex, - m_codeBlock->identifier(identifier)); - MethodCallLinkStatus methodCallStatus = MethodCallLinkStatus::computeFor( - m_inlineStackTop->m_profiledBlock, m_currentIndex); - - if (methodCallStatus.isSet() - && !getByIdStatus.wasSeenInJIT() - && !m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache)) { - // It's monomorphic as far as we can tell, since the method_check was linked - // but the slow path (i.e. the normal get_by_id) never fired. - - addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(methodCallStatus.structure())), base); - if (methodCallStatus.needsPrototypeCheck()) { - addStructureTransitionCheck( - methodCallStatus.prototype(), methodCallStatus.prototypeStructure()); - addToGraph(Phantom, base); - } - set(getInstruction[1].u.operand, cellConstant(methodCallStatus.function())); - } else { - handleGetById( - getInstruction[1].u.operand, prediction, base, identifier, getByIdStatus); - } - - m_currentIndex += OPCODE_LENGTH(op_method_check) + OPCODE_LENGTH(op_get_by_id); - continue; - } case op_get_by_id: case op_get_by_id_out_of_line: case op_get_array_length: { @@ -2665,6 +2614,10 @@ bool ByteCodeParser::parseBlock(unsigned limit) NEXT_OPCODE(op_put_by_id); } + case op_init_global_const_nop: { + NEXT_OPCODE(op_init_global_const_nop); + } + case op_init_global_const: { NodeIndex value = get(currentInstruction[2].u.operand); addToGraph( @@ -2902,10 +2855,10 @@ bool ByteCodeParser::parseBlock(unsigned limit) addToGraph(Throw, get(currentInstruction[1].u.operand)); LAST_OPCODE(op_throw); - case op_throw_reference_error: + case op_throw_static_error: flushArgumentsAndCapturedVariables(); addToGraph(ThrowReferenceError); - LAST_OPCODE(op_throw_reference_error); + LAST_OPCODE(op_throw_static_error); case op_call: handleCall(interpreter, currentInstruction, Call, CodeForCall); @@ -3104,6 +3057,14 @@ bool ByteCodeParser::parseBlock(unsigned limit) NodeIndex base = 0; NodeIndex value = 0; if (parseResolveOperations(prediction, identifier, operations, putToBaseOperation, &base, &value)) { + // First create OSR hints only. + set(baseDst, base); + set(valueDst, value); + + // If we try to hoist structure checks into here, then we're guaranteed that they will occur + // *after* we have already set up the values for OSR. + + // Then do the real SetLocals. set(baseDst, base); set(valueDst, value); } else { @@ -3124,6 +3085,14 @@ bool ByteCodeParser::parseBlock(unsigned limit) NodeIndex base = 0; NodeIndex value = 0; if (parseResolveOperations(prediction, identifier, operations, 0, &base, &value)) { + // First create OSR hints only. + set(baseDst, base); + set(valueDst, value); + + // If we try to hoist structure checks into here, then we're guaranteed that they will occur + // *after* we have already set up the values for OSR. + + // Then do the real SetLocals. set(baseDst, base); set(valueDst, value); } else { @@ -3632,7 +3601,7 @@ void ByteCodeParser::parseCodeBlock() dataLog("Parsing code block %p. codeType = %s, captureCount = %u, needsFullScopeChain = %s, needsActivation = %s, isStrictMode = %s\n", codeBlock, codeTypeToString(codeBlock->codeType()), - codeBlock->symbolTable()->captureCount(), + codeBlock->symbolTable() ? codeBlock->symbolTable()->captureCount() : 0, codeBlock->needsFullScopeChain()?"true":"false", codeBlock->ownerExecutable()->needsActivation()?"true":"false", codeBlock->ownerExecutable()->isStrictMode()?"true":"false"); diff --git a/Source/JavaScriptCore/dfg/DFGCSEPhase.cpp b/Source/JavaScriptCore/dfg/DFGCSEPhase.cpp index 185332921..19051c174 100644 --- a/Source/JavaScriptCore/dfg/DFGCSEPhase.cpp +++ b/Source/JavaScriptCore/dfg/DFGCSEPhase.cpp @@ -176,13 +176,8 @@ private: case PutByVal: if (!m_graph.byValIsPure(node)) return NoNode; - switch (node.arrayMode()) { - case CONTIGUOUS_TO_TAIL_MODES: - case ARRAY_STORAGE_TO_HOLE_MODES: + if (node.arrayMode().mayStoreToHole()) return NoNode; - default: - break; - } break; default: @@ -438,6 +433,12 @@ private: } return false; + case Arrayify: + case ArrayifyToStructure: + // We could check if the arrayification could affect our structures. + // But that seems like it would take Effort. + return false; + default: if (m_graph.clobbersWorld(index)) return false; @@ -489,6 +490,12 @@ private: return true; break; + case Arrayify: + case ArrayifyToStructure: + // We could check if the arrayification could affect our structures. + // But that seems like it would take Effort. + return false; + default: if (m_graph.clobbersWorld(index)) return false; @@ -663,7 +670,6 @@ private: case AllocatePropertyStorage: case ReallocatePropertyStorage: - case Arrayify: // If we can cheaply prove this is a change to our object's storage, we // can optimize and use its result. if (node.child1() == child1) @@ -689,6 +695,12 @@ private: } return NoNode; + case Arrayify: + case ArrayifyToStructure: + // We could check if the arrayification could affect our butterfly. + // But that seems like it would take Effort. + return NoNode; + default: if (m_graph.clobbersWorld(index)) return NoNode; @@ -698,7 +710,7 @@ private: return NoNode; } - bool checkArrayElimination(NodeIndex child1, Array::Mode arrayMode) + bool checkArrayElimination(NodeIndex child1, ArrayMode arrayMode) { for (unsigned i = m_indexInBlock; i--;) { NodeIndex index = m_currentBlock->at(i); @@ -720,6 +732,12 @@ private: return true; break; + case Arrayify: + case ArrayifyToStructure: + // We could check if the arrayification could affect our array. + // But that seems like it would take Effort. + return false; + default: if (m_graph.clobbersWorld(index)) return false; @@ -729,7 +747,7 @@ private: return false; } - NodeIndex getIndexedPropertyStorageLoadElimination(NodeIndex child1, Array::Mode arrayMode) + NodeIndex getIndexedPropertyStorageLoadElimination(NodeIndex child1, ArrayMode arrayMode) { for (unsigned i = m_indexInBlock; i--;) { NodeIndex index = m_currentBlock->at(i); @@ -788,8 +806,6 @@ private: } return NoNode; } - - NodeIndex getLocalLoadElimination(VirtualRegister local, NodeIndex& relevantLocalOp, bool careAboutClobbering) { @@ -1234,7 +1250,7 @@ private: case PutByVal: { Edge child1 = m_graph.varArgChild(node, 0); Edge child2 = m_graph.varArgChild(node, 1); - if (canCSEStorage(node.arrayMode())) { + if (node.arrayMode().canCSEStorage()) { NodeIndex nodeIndex = getByValLoadElimination(child1.index(), child2.index()); if (nodeIndex == NoNode) break; diff --git a/Source/JavaScriptCore/dfg/DFGCapabilities.h b/Source/JavaScriptCore/dfg/DFGCapabilities.h index e80cc28ae..1f9778efe 100644 --- a/Source/JavaScriptCore/dfg/DFGCapabilities.h +++ b/Source/JavaScriptCore/dfg/DFGCapabilities.h @@ -157,7 +157,6 @@ inline CapabilityLevel canCompileOpcode(OpcodeID opcodeID, CodeBlock*, Instructi case op_nstricteq: case op_get_by_val: case op_put_by_val: - case op_method_check: case op_get_by_id: case op_get_by_id_out_of_line: case op_get_array_length: @@ -167,6 +166,7 @@ inline CapabilityLevel canCompileOpcode(OpcodeID opcodeID, CodeBlock*, Instructi case op_put_by_id_transition_direct_out_of_line: case op_put_by_id_transition_normal: case op_put_by_id_transition_normal_out_of_line: + case op_init_global_const_nop: case op_init_global_const: case op_init_global_const_check: case op_jmp: @@ -200,7 +200,7 @@ inline CapabilityLevel canCompileOpcode(OpcodeID opcodeID, CodeBlock*, Instructi case op_strcat: case op_to_primitive: case op_throw: - case op_throw_reference_error: + case op_throw_static_error: case op_call: case op_construct: case op_new_regexp: diff --git a/Source/JavaScriptCore/dfg/DFGConstantFoldingPhase.cpp b/Source/JavaScriptCore/dfg/DFGConstantFoldingPhase.cpp index 25915cfd4..43aa2c007 100644 --- a/Source/JavaScriptCore/dfg/DFGConstantFoldingPhase.cpp +++ b/Source/JavaScriptCore/dfg/DFGConstantFoldingPhase.cpp @@ -92,10 +92,22 @@ private: } case CheckStructure: - case ForwardCheckStructure: { + case ForwardCheckStructure: + case ArrayifyToStructure: { AbstractValue& value = m_state.forNode(node.child1()); + StructureSet set; + if (node.op() == ArrayifyToStructure) + set = node.structure(); + else + set = node.structureSet(); + if (value.m_currentKnownStructure.isSubsetOf(set)) { + ASSERT(node.refCount() == 1); + node.setOpAndDefaultFlags(Phantom); + eliminated = true; + break; + } StructureAbstractValue& structureValue = value.m_futurePossibleStructure; - if (structureValue.isSubsetOf(node.structureSet()) + if (structureValue.isSubsetOf(set) && structureValue.hasSingleton() && isCellSpeculation(value.m_type)) node.convertToStructureTransitionWatchpoint(structureValue.singleton()); @@ -104,7 +116,7 @@ private: case CheckArray: case Arrayify: { - if (!modeAlreadyChecked(m_state.forNode(node.child1()), node.arrayMode())) + if (!node.arrayMode().alreadyChecked(m_state.forNode(node.child1()))) break; ASSERT(node.refCount() == 1); node.setOpAndDefaultFlags(Phantom); diff --git a/Source/JavaScriptCore/dfg/DFGFPRInfo.h b/Source/JavaScriptCore/dfg/DFGFPRInfo.h index 5ee87bce1..d6a038a99 100644 --- a/Source/JavaScriptCore/dfg/DFGFPRInfo.h +++ b/Source/JavaScriptCore/dfg/DFGFPRInfo.h @@ -82,7 +82,7 @@ public: { ASSERT(reg != InvalidFPRReg); #if CPU(X86_64) - ASSERT(reg < 16); + ASSERT(static_cast<int>(reg) < 16); static const char* nameForRegister[16] = { "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7", @@ -90,7 +90,7 @@ public: "xmm12", "xmm13", "xmm14", "xmm15" }; #elif CPU(X86) - ASSERT(reg < 8); + ASSERT(static_cast<int>(reg) < 8); static const char* nameForRegister[8] = { "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7" diff --git a/Source/JavaScriptCore/dfg/DFGFixupPhase.cpp b/Source/JavaScriptCore/dfg/DFGFixupPhase.cpp index 5dcfe08a5..5a76aa8df 100644 --- a/Source/JavaScriptCore/dfg/DFGFixupPhase.cpp +++ b/Source/JavaScriptCore/dfg/DFGFixupPhase.cpp @@ -86,14 +86,14 @@ private: ArrayProfile* arrayProfile = m_graph.baselineCodeBlockFor(nodePtr->codeOrigin)->getArrayProfile( nodePtr->codeOrigin.bytecodeIndex); - Array::Mode arrayMode = Array::SelectUsingPredictions; + ArrayMode arrayMode = ArrayMode(Array::SelectUsingPredictions); if (arrayProfile) { - arrayProfile->computeUpdatedPrediction(); - arrayMode = refineArrayMode( - fromObserved(arrayProfile, Array::Read, false), + arrayProfile->computeUpdatedPrediction(m_graph.baselineCodeBlockFor(node.codeOrigin)); + arrayMode = ArrayMode::fromObserved(arrayProfile, Array::Read, false); + arrayMode = arrayMode.refine( m_graph[node.child1()].prediction(), m_graph[m_compileIndex].prediction()); - if (modeSupportsLength(arrayMode) && arrayProfile->hasDefiniteStructure()) { + if (arrayMode.supportsLength() && arrayProfile->hasDefiniteStructure()) { m_graph.ref(nodePtr->child1()); Node checkStructure(CheckStructure, nodePtr->codeOrigin, OpInfo(m_graph.addStructureSet(arrayProfile->expectedStructure())), nodePtr->child1().index()); checkStructure.ref(); @@ -103,12 +103,11 @@ private: nodePtr = &m_graph[m_compileIndex]; } } else { - arrayMode = refineArrayMode( - arrayMode, + arrayMode = arrayMode.refine( m_graph[node.child1()].prediction(), m_graph[m_compileIndex].prediction()); } - if (!modeSupportsLength(arrayMode)) + if (!arrayMode.supportsLength()) break; nodePtr->setOp(GetArrayLength); ASSERT(nodePtr->flags() & NodeMustGenerate); @@ -125,21 +124,25 @@ private: break; } case GetIndexedPropertyStorage: { - ASSERT(canCSEStorage(node.arrayMode())); + ASSERT(node.arrayMode().canCSEStorage()); break; } - case GetByVal: - case StringCharAt: - case StringCharCodeAt: { + case GetByVal: { node.setArrayMode( - refineArrayMode( - node.arrayMode(), + node.arrayMode().refine( m_graph[node.child1()].prediction(), m_graph[node.child2()].prediction())); blessArrayOperation(node.child1(), node.child2(), 2); break; } + case StringCharAt: + case StringCharCodeAt: { + // Currently we have no good way of refining these. + ASSERT(node.arrayMode() == ArrayMode(Array::String)); + blessArrayOperation(node.child1(), node.child2(), 2); + break; + } case ArrayPush: { blessArrayOperation(node.child1(), node.child2(), 2); @@ -323,8 +326,7 @@ private: Edge child3 = m_graph.varArgChild(node, 2); node.setArrayMode( - refineArrayMode( - node.arrayMode(), + node.arrayMode().refine( m_graph[child1].prediction(), m_graph[child2].prediction())); @@ -332,7 +334,7 @@ private: Node* nodePtr = &m_graph[m_compileIndex]; - switch (modeForPut(nodePtr->arrayMode())) { + switch (nodePtr->arrayMode().modeForPut().type()) { case Array::Int8Array: case Array::Int16Array: case Array::Int32Array: @@ -376,47 +378,66 @@ private: return nodeIndex; } - NodeIndex checkArray(Array::Mode arrayMode, CodeOrigin codeOrigin, NodeIndex array, NodeIndex index, bool (*storageCheck)(Array::Mode) = canCSEStorage, bool shouldGenerate = true) + NodeIndex checkArray(ArrayMode arrayMode, CodeOrigin codeOrigin, NodeIndex array, NodeIndex index, bool (*storageCheck)(const ArrayMode&) = canCSEStorage, bool shouldGenerate = true) { - ASSERT(modeIsSpecific(arrayMode)); + ASSERT(arrayMode.isSpecific()); m_graph.ref(array); - if (isEffectful(arrayMode)) { + if (arrayMode.doesConversion()) { if (index != NoNode) m_graph.ref(index); - Node arrayify(Arrayify, codeOrigin, OpInfo(arrayMode), array, index); - arrayify.ref(); // Once because it's used as a butterfly. - arrayify.ref(); // And twice because it's must-generate. - NodeIndex arrayifyIndex = m_graph.size(); - m_graph.append(arrayify); - m_insertionSet.append(m_indexInBlock, arrayifyIndex); - - ASSERT(shouldGenerate); - ASSERT(canCSEStorage(arrayMode)); - ASSERT(modeUsesButterfly(arrayMode)); - - if (!storageCheck(arrayMode)) - return NoNode; - return arrayifyIndex; + + Structure* structure = 0; + if (arrayMode.isJSArrayWithOriginalStructure()) { + JSGlobalObject* globalObject = m_graph.baselineCodeBlockFor(codeOrigin)->globalObject(); + switch (arrayMode.type()) { + case Array::Contiguous: + structure = globalObject->arrayStructure(); + if (structure->indexingType() != ArrayWithContiguous) + structure = 0; + break; + case Array::ArrayStorage: + structure = globalObject->arrayStructureWithArrayStorage(); + if (structure->indexingType() != ArrayWithArrayStorage) + structure = 0; + break; + default: + break; + } + } + + if (structure) { + Node arrayify(ArrayifyToStructure, codeOrigin, OpInfo(structure), OpInfo(arrayMode.asWord()), array, index); + arrayify.ref(); + NodeIndex arrayifyIndex = m_graph.size(); + m_graph.append(arrayify); + m_insertionSet.append(m_indexInBlock, arrayifyIndex); + } else { + Node arrayify(Arrayify, codeOrigin, OpInfo(arrayMode.asWord()), array, index); + arrayify.ref(); + NodeIndex arrayifyIndex = m_graph.size(); + m_graph.append(arrayify); + m_insertionSet.append(m_indexInBlock, arrayifyIndex); + } + } else { + Node checkArray(CheckArray, codeOrigin, OpInfo(arrayMode.asWord()), array); + checkArray.ref(); + NodeIndex checkArrayIndex = m_graph.size(); + m_graph.append(checkArray); + m_insertionSet.append(m_indexInBlock, checkArrayIndex); } - Node checkArray(CheckArray, codeOrigin, OpInfo(arrayMode), array); - checkArray.ref(); - NodeIndex checkArrayIndex = m_graph.size(); - m_graph.append(checkArray); - m_insertionSet.append(m_indexInBlock, checkArrayIndex); - if (!storageCheck(arrayMode)) return NoNode; if (shouldGenerate) m_graph.ref(array); - if (modeUsesButterfly(arrayMode)) + if (arrayMode.usesButterfly()) return addNode(Node(GetButterfly, codeOrigin, array), shouldGenerate); - return addNode(Node(GetIndexedPropertyStorage, codeOrigin, OpInfo(arrayMode), array), shouldGenerate); + return addNode(Node(GetIndexedPropertyStorage, codeOrigin, OpInfo(arrayMode.asWord()), array), shouldGenerate); } void blessArrayOperation(Edge base, Edge index, unsigned storageChildIdx) @@ -426,7 +447,7 @@ private: Node* nodePtr = &m_graph[m_compileIndex]; - switch (nodePtr->arrayMode()) { + switch (nodePtr->arrayMode().type()) { case Array::ForceExit: { Node forceExit(ForceOSRExit, nodePtr->codeOrigin); forceExit.ref(); diff --git a/Source/JavaScriptCore/dfg/DFGGPRInfo.h b/Source/JavaScriptCore/dfg/DFGGPRInfo.h index 498b116ec..3d07556cc 100644 --- a/Source/JavaScriptCore/dfg/DFGGPRInfo.h +++ b/Source/JavaScriptCore/dfg/DFGGPRInfo.h @@ -218,7 +218,7 @@ public: GPRReg tagGPR() const { - ASSERT(!isAddress() && m_baseOrTag != InvalidGPRReg); + ASSERT(!isAddress() && static_cast<GPRReg>(m_baseOrTag) != InvalidGPRReg); return static_cast<GPRReg>(m_baseOrTag); } @@ -290,7 +290,7 @@ public: static unsigned toIndex(GPRReg reg) { ASSERT(reg != InvalidGPRReg); - ASSERT(reg < 8); + ASSERT(static_cast<int>(reg) < 8); static const unsigned indexForRegister[8] = { 0, 2, 1, 3, InvalidIndex, InvalidIndex, 4, InvalidIndex }; unsigned result = indexForRegister[reg]; ASSERT(result != InvalidIndex); @@ -300,7 +300,7 @@ public: static const char* debugName(GPRReg reg) { ASSERT(reg != InvalidGPRReg); - ASSERT(reg < 8); + ASSERT(static_cast<int>(reg) < 8); static const char* nameForRegister[8] = { "eax", "ecx", "edx", "ebx", "esp", "ebp", "esi", "edi", @@ -362,7 +362,7 @@ public: static unsigned toIndex(GPRReg reg) { ASSERT(reg != InvalidGPRReg); - ASSERT(reg < 16); + ASSERT(static_cast<int>(reg) < 16); static const unsigned indexForRegister[16] = { 0, 2, 1, 3, InvalidIndex, InvalidIndex, 5, 4, 6, 7, 8, InvalidIndex, InvalidIndex, InvalidIndex, InvalidIndex, InvalidIndex }; unsigned result = indexForRegister[reg]; ASSERT(result != InvalidIndex); @@ -372,7 +372,7 @@ public: static const char* debugName(GPRReg reg) { ASSERT(reg != InvalidGPRReg); - ASSERT(reg < 16); + ASSERT(static_cast<int>(reg) < 16); static const char* nameForRegister[16] = { "rax", "rcx", "rdx", "rbx", "rsp", "rbp", "rsi", "rdi", diff --git a/Source/JavaScriptCore/dfg/DFGGraph.cpp b/Source/JavaScriptCore/dfg/DFGGraph.cpp index b9a0db2a2..8e8817f81 100644 --- a/Source/JavaScriptCore/dfg/DFGGraph.cpp +++ b/Source/JavaScriptCore/dfg/DFGGraph.cpp @@ -226,7 +226,7 @@ void Graph::dump(const char* prefix, NodeIndex nodeIndex) hasPrinted = true; } if (node.hasArrayMode()) { - dataLog("%s%s", hasPrinted ? ", " : "", modeToString(node.arrayMode())); + dataLog("%s%s", hasPrinted ? ", " : "", node.arrayMode().toString()); hasPrinted = true; } if (node.hasVarNumber()) { diff --git a/Source/JavaScriptCore/dfg/DFGGraph.h b/Source/JavaScriptCore/dfg/DFGGraph.h index b2c754f85..9fbb2df07 100644 --- a/Source/JavaScriptCore/dfg/DFGGraph.h +++ b/Source/JavaScriptCore/dfg/DFGGraph.h @@ -490,14 +490,14 @@ public: // - and so on. bool byValIsPure(Node& node) { - switch (node.arrayMode()) { + switch (node.arrayMode().type()) { case Array::Generic: - case OUT_OF_BOUNDS_CONTIGUOUS_MODES: - case ARRAY_STORAGE_TO_HOLE_MODES: - case OUT_OF_BOUNDS_ARRAY_STORAGE_MODES: - case SLOW_PUT_ARRAY_STORAGE_MODES: - case ALL_EFFECTFUL_MODES: return false; + case Array::Contiguous: + case Array::ArrayStorage: + return !node.arrayMode().isOutOfBounds(); + case Array::SlowPutArrayStorage: + return !node.arrayMode().mayStoreToHole(); case Array::String: return node.op() == GetByVal; #if USE(JSVALUE32_64) @@ -689,6 +689,7 @@ public: SegmentedVector<ArgumentPosition, 8> m_argumentPositions; SegmentedVector<StructureSet, 16> m_structureSet; SegmentedVector<StructureTransitionData, 8> m_structureTransitionData; + SegmentedVector<NewArrayBufferData, 4> m_newArrayBufferData; bool m_hasArguments; HashSet<ExecutableBase*> m_executablesWhoseArgumentsEscaped; BitVector m_preservedVars; diff --git a/Source/JavaScriptCore/dfg/DFGNode.h b/Source/JavaScriptCore/dfg/DFGNode.h index 40b3ed7ec..e66629ec4 100644 --- a/Source/JavaScriptCore/dfg/DFGNode.h +++ b/Source/JavaScriptCore/dfg/DFGNode.h @@ -59,6 +59,11 @@ struct StructureTransitionData { } }; +struct NewArrayBufferData { + unsigned startConstant; + unsigned numConstants; +}; + // This type used in passing an immediate argument to Node constructor; // distinguishes an immediate value (typically an index into a CodeBlock data structure - // a constant index, argument, or identifier) from a NodeIndex. @@ -250,9 +255,9 @@ struct Node { void convertToStructureTransitionWatchpoint(Structure* structure) { - ASSERT(m_op == CheckStructure || m_op == ForwardCheckStructure); + ASSERT(m_op == CheckStructure || m_op == ForwardCheckStructure || m_op == ArrayifyToStructure); m_opInfo = bitwise_cast<uintptr_t>(structure); - if (m_op == CheckStructure) + if (m_op == CheckStructure || m_op == ArrayifyToStructure) m_op = StructureTransitionWatchpoint; else m_op = ForwardStructureTransitionWatchpoint; @@ -412,16 +417,20 @@ struct Node { return op() == NewArrayBuffer; } - unsigned startConstant() + NewArrayBufferData* newArrayBufferData() { ASSERT(hasConstantBuffer()); - return m_opInfo; + return reinterpret_cast<NewArrayBufferData*>(m_opInfo); + } + + unsigned startConstant() + { + return newArrayBufferData()->startConstant; } unsigned numConstants() { - ASSERT(hasConstantBuffer()); - return m_opInfo2; + return newArrayBufferData()->numConstants; } bool hasRegexpIndex() @@ -692,6 +701,7 @@ struct Node { switch (op()) { case StructureTransitionWatchpoint: case ForwardStructureTransitionWatchpoint: + case ArrayifyToStructure: return true; default: return false; @@ -750,6 +760,7 @@ struct Node { case StringCharCodeAt: case CheckArray: case Arrayify: + case ArrayifyToStructure: case ArrayPush: case ArrayPop: return true; @@ -758,18 +769,20 @@ struct Node { } } - Array::Mode arrayMode() + ArrayMode arrayMode() { ASSERT(hasArrayMode()); - return static_cast<Array::Mode>(m_opInfo); + if (op() == ArrayifyToStructure) + return ArrayMode::fromWord(m_opInfo2); + return ArrayMode::fromWord(m_opInfo); } - bool setArrayMode(Array::Mode arrayMode) + bool setArrayMode(ArrayMode arrayMode) { ASSERT(hasArrayMode()); if (this->arrayMode() == arrayMode) return false; - m_opInfo = arrayMode; + m_opInfo = arrayMode.asWord(); return true; } diff --git a/Source/JavaScriptCore/dfg/DFGNodeType.h b/Source/JavaScriptCore/dfg/DFGNodeType.h index 1d2460659..624b1ae75 100644 --- a/Source/JavaScriptCore/dfg/DFGNodeType.h +++ b/Source/JavaScriptCore/dfg/DFGNodeType.h @@ -139,7 +139,8 @@ namespace JSC { namespace DFG { macro(ReallocatePropertyStorage, NodeMustGenerate | NodeDoesNotExit | NodeResultStorage) \ macro(GetButterfly, NodeResultStorage) \ macro(CheckArray, NodeMustGenerate) \ - macro(Arrayify, NodeResultStorage | NodeMustGenerate | NodeClobbersWorld) \ + macro(Arrayify, NodeMustGenerate) \ + macro(ArrayifyToStructure, NodeMustGenerate) \ macro(GetIndexedPropertyStorage, NodeResultStorage) \ macro(GetByOffset, NodeResultJS) \ macro(PutByOffset, NodeMustGenerate) \ diff --git a/Source/JavaScriptCore/dfg/DFGOSRExit.cpp b/Source/JavaScriptCore/dfg/DFGOSRExit.cpp index 6560088fd..b3701722e 100644 --- a/Source/JavaScriptCore/dfg/DFGOSRExit.cpp +++ b/Source/JavaScriptCore/dfg/DFGOSRExit.cpp @@ -45,9 +45,9 @@ OSRExit::OSRExit(ExitKind kind, JSValueSource jsValueSource, MethodOfGettingAVal , m_kind(kind) , m_count(0) , m_streamIndex(streamIndex) + , m_lastSetOperand(jit->m_lastSetOperand) { ASSERT(m_codeOrigin.isSet()); - m_setOperands.append(jit->m_lastSetOperand); } bool OSRExit::considerAddingAsFrequentExitSiteSlow(CodeBlock* dfgCodeBlock, CodeBlock* profiledCodeBlock) diff --git a/Source/JavaScriptCore/dfg/DFGOSRExit.h b/Source/JavaScriptCore/dfg/DFGOSRExit.h index 0ecefe386..cd2434c11 100644 --- a/Source/JavaScriptCore/dfg/DFGOSRExit.h +++ b/Source/JavaScriptCore/dfg/DFGOSRExit.h @@ -110,9 +110,9 @@ struct OSRExit { } unsigned m_streamIndex; - Vector<int, 1> m_setOperands; + int m_lastSetOperand; - Vector<RefPtr<ValueRecoveryOverride>, 1> m_valueRecoveryOverrides; + RefPtr<ValueRecoveryOverride> m_valueRecoveryOverride; private: bool considerAddingAsFrequentExitSiteSlow(CodeBlock* dfgCodeBlock, CodeBlock* profiledCodeBlock); diff --git a/Source/JavaScriptCore/dfg/DFGOSRExitCompiler.cpp b/Source/JavaScriptCore/dfg/DFGOSRExitCompiler.cpp index 55a903c7a..2ce1c887b 100644 --- a/Source/JavaScriptCore/dfg/DFGOSRExitCompiler.cpp +++ b/Source/JavaScriptCore/dfg/DFGOSRExitCompiler.cpp @@ -70,10 +70,11 @@ void compileOSRExit(ExecState* exec) Operands<ValueRecovery> operands; codeBlock->variableEventStream().reconstruct(codeBlock, exit.m_codeOrigin, codeBlock->minifiedDFG(), exit.m_streamIndex, operands); - // There may be overrides, for forward speculations. - for (size_t i = 0; i < exit.m_valueRecoveryOverrides.size(); i++) - operands.setOperand(exit.m_valueRecoveryOverrides[i]->operand, exit.m_valueRecoveryOverrides[i]->recovery); - + // There may be an override, for forward speculations. + if (!!exit.m_valueRecoveryOverride) { + operands.setOperand( + exit.m_valueRecoveryOverride->operand, exit.m_valueRecoveryOverride->recovery); + } SpeculationRecovery* recovery = 0; if (exit.m_recoveryIndex) diff --git a/Source/JavaScriptCore/dfg/DFGOSRExitCompiler32_64.cpp b/Source/JavaScriptCore/dfg/DFGOSRExitCompiler32_64.cpp index b64ce3fa1..df4f3c905 100644 --- a/Source/JavaScriptCore/dfg/DFGOSRExitCompiler32_64.cpp +++ b/Source/JavaScriptCore/dfg/DFGOSRExitCompiler32_64.cpp @@ -688,6 +688,8 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, const Operands<ValueRecov } } + if (!m_jit.baselineCodeBlockFor(inlineCallFrame)->usesArguments()) + continue; int argumentsRegister = m_jit.argumentsRegisterFor(inlineCallFrame); if (didCreateArgumentsObject.add(inlineCallFrame).isNewEntry) { // We know this call frame optimized out an arguments object that @@ -732,9 +734,9 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, const Operands<ValueRecov // 15) Load the result of the last bytecode operation into regT0. - for (size_t i = 0; i < exit.m_setOperands.size(); i++) { - m_jit.load32(AssemblyHelpers::payloadFor((VirtualRegister)exit.m_setOperands[i]), GPRInfo::cachedResultRegister); - m_jit.load32(AssemblyHelpers::tagFor((VirtualRegister)exit.m_setOperands[i]), GPRInfo::cachedResultRegister2); + if (exit.m_lastSetOperand != std::numeric_limits<int>::max()) { + m_jit.load32(AssemblyHelpers::payloadFor((VirtualRegister)exit.m_lastSetOperand), GPRInfo::cachedResultRegister); + m_jit.load32(AssemblyHelpers::tagFor((VirtualRegister)exit.m_lastSetOperand), GPRInfo::cachedResultRegister2); } // 16) Adjust the call frame pointer. diff --git a/Source/JavaScriptCore/dfg/DFGOSRExitCompiler64.cpp b/Source/JavaScriptCore/dfg/DFGOSRExitCompiler64.cpp index 65b89a550..b278997ab 100644 --- a/Source/JavaScriptCore/dfg/DFGOSRExitCompiler64.cpp +++ b/Source/JavaScriptCore/dfg/DFGOSRExitCompiler64.cpp @@ -653,6 +653,8 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, const Operands<ValueRecov } } + if (!m_jit.baselineCodeBlockFor(inlineCallFrame)->usesArguments()) + continue; int argumentsRegister = m_jit.argumentsRegisterFor(inlineCallFrame); if (didCreateArgumentsObject.add(inlineCallFrame).isNewEntry) { // We know this call frame optimized out an arguments object that @@ -681,9 +683,9 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, const Operands<ValueRecov // 16) Load the result of the last bytecode operation into regT0. - for (size_t i = 0; i < exit.m_setOperands.size(); i++) - m_jit.load64(AssemblyHelpers::addressFor((VirtualRegister)exit.m_setOperands[i]), GPRInfo::cachedResultRegister); - + if (exit.m_lastSetOperand != std::numeric_limits<int>::max()) + m_jit.loadPtr(AssemblyHelpers::addressFor((VirtualRegister)exit.m_lastSetOperand), GPRInfo::cachedResultRegister); + // 17) Adjust the call frame pointer. if (exit.m_codeOrigin.inlineCallFrame) diff --git a/Source/JavaScriptCore/dfg/DFGOperations.cpp b/Source/JavaScriptCore/dfg/DFGOperations.cpp index 13e04388c..0e45e230c 100644 --- a/Source/JavaScriptCore/dfg/DFGOperations.cpp +++ b/Source/JavaScriptCore/dfg/DFGOperations.cpp @@ -1157,8 +1157,7 @@ JSCell* DFG_OPERATION operationCreateActivation(ExecState* exec) { JSGlobalData& globalData = exec->globalData(); NativeCallFrameTracer tracer(&globalData, exec); - JSActivation* activation = JSActivation::create( - globalData, exec, static_cast<FunctionExecutable*>(exec->codeBlock()->ownerExecutable())); + JSActivation* activation = JSActivation::create(globalData, exec, exec->codeBlock()); exec->setScope(activation); return activation; } diff --git a/Source/JavaScriptCore/dfg/DFGPredictionPropagationPhase.cpp b/Source/JavaScriptCore/dfg/DFGPredictionPropagationPhase.cpp index fee7a3ca2..3e8ead5c6 100644 --- a/Source/JavaScriptCore/dfg/DFGPredictionPropagationPhase.cpp +++ b/Source/JavaScriptCore/dfg/DFGPredictionPropagationPhase.cpp @@ -635,7 +635,8 @@ private: case PhantomPutStructure: case PhantomArguments: case CheckArray: - case Arrayify: { + case Arrayify: + case ArrayifyToStructure: { // This node should never be visible at this stage of compilation. It is // inserted by fixup(), which follows this phase. ASSERT_NOT_REACHED(); diff --git a/Source/JavaScriptCore/dfg/DFGRepatch.cpp b/Source/JavaScriptCore/dfg/DFGRepatch.cpp index 531a525d5..7c15ef33e 100644 --- a/Source/JavaScriptCore/dfg/DFGRepatch.cpp +++ b/Source/JavaScriptCore/dfg/DFGRepatch.cpp @@ -324,7 +324,7 @@ static bool tryCacheGetByID(ExecState* exec, JSValue baseValue, const Identifier PropertyOffset offset = slot.cachedOffset(); size_t count = normalizePrototypeChain(exec, baseValue, slot.slotBase(), propertyName, offset); - if (!count) + if (count == InvalidPrototypeChain) return false; StructureChain* prototypeChain = structure->prototypeChain(exec); @@ -550,7 +550,7 @@ static bool tryBuildGetByIDProtoList(ExecState* exec, JSValue baseValue, const I PropertyOffset offset = slot.cachedOffset(); size_t count = normalizePrototypeChain(exec, baseValue, slot.slotBase(), propertyName, offset); - if (!count) + if (count == InvalidPrototypeChain) return false; Structure* structure = baseValue.asCell()->structure(); @@ -964,7 +964,8 @@ static bool tryCachePutByID(ExecState* exec, JSValue baseValue, const Identifier if (hasIndexingHeader(oldStructure->indexingType())) return false; - normalizePrototypeChain(exec, baseCell); + if (normalizePrototypeChain(exec, baseCell) == InvalidPrototypeChain) + return false; StructureChain* prototypeChain = structure->prototypeChain(exec); @@ -1035,7 +1036,8 @@ static bool tryBuildPutByIdList(ExecState* exec, JSValue baseValue, const Identi if (hasIndexingHeader(oldStructure->indexingType())) return false; - normalizePrototypeChain(exec, baseCell); + if (normalizePrototypeChain(exec, baseCell) == InvalidPrototypeChain) + return false; StructureChain* prototypeChain = structure->prototypeChain(exec); diff --git a/Source/JavaScriptCore/dfg/DFGSpeculativeJIT.cpp b/Source/JavaScriptCore/dfg/DFGSpeculativeJIT.cpp index 7cb028388..6bedd6d68 100644 --- a/Source/JavaScriptCore/dfg/DFGSpeculativeJIT.cpp +++ b/Source/JavaScriptCore/dfg/DFGSpeculativeJIT.cpp @@ -153,19 +153,37 @@ JumpReplacementWatchpoint* SpeculativeJIT::speculationWatchpoint(ExitKind kind) void SpeculativeJIT::convertLastOSRExitToForward(const ValueRecovery& valueRecovery) { -#if !ASSERT_DISABLED if (!valueRecovery) { // Check that the preceding node was a SetLocal with the same code origin. Node* setLocal = &at(m_jit.graph().m_blocks[m_block]->at(m_indexInBlock - 1)); - ASSERT(setLocal->op() == SetLocal); - ASSERT(setLocal->codeOrigin == at(m_compileIndex).codeOrigin); + ASSERT_UNUSED(setLocal, setLocal->op() == SetLocal); + ASSERT_UNUSED(setLocal, setLocal->codeOrigin == at(m_compileIndex).codeOrigin); + + // Find the next node. + unsigned indexInBlock = m_indexInBlock + 1; + Node* node = 0; + for (;;) { + if (indexInBlock == m_jit.graph().m_blocks[m_block]->size()) { + // This is an inline return. Give up and do a backwards speculation. This is safe + // because an inline return has its own bytecode index and it's always safe to + // reexecute that bytecode. + ASSERT(node->op() == Jump); + return; + } + node = &at(m_jit.graph().m_blocks[m_block]->at(indexInBlock)); + if (node->codeOrigin != at(m_compileIndex).codeOrigin) + break; + indexInBlock++; + } + + ASSERT(node->codeOrigin != at(m_compileIndex).codeOrigin); + OSRExit& exit = m_jit.codeBlock()->lastOSRExit(); + exit.m_codeOrigin = node->codeOrigin; + return; } -#endif unsigned setLocalIndexInBlock = m_indexInBlock + 1; - - OSRExit& exit = m_jit.codeBlock()->lastOSRExit(); - + Node* setLocal = &at(m_jit.graph().m_blocks[m_block]->at(setLocalIndexInBlock)); bool hadInt32ToDouble = false; @@ -175,13 +193,11 @@ void SpeculativeJIT::convertLastOSRExitToForward(const ValueRecovery& valueRecov } if (setLocal->op() == Flush || setLocal->op() == Phantom) setLocal = &at(m_jit.graph().m_blocks[m_block]->at(++setLocalIndexInBlock)); - - if (!!valueRecovery) { - if (hadInt32ToDouble) - ASSERT(at(setLocal->child1()).child1() == m_compileIndex); - else - ASSERT(setLocal->child1() == m_compileIndex); - } + + if (hadInt32ToDouble) + ASSERT(at(setLocal->child1()).child1() == m_compileIndex); + else + ASSERT(setLocal->child1() == m_compileIndex); ASSERT(setLocal->op() == SetLocal); ASSERT(setLocal->codeOrigin == at(m_compileIndex).codeOrigin); @@ -190,34 +206,14 @@ void SpeculativeJIT::convertLastOSRExitToForward(const ValueRecovery& valueRecov // We're at an inlined return. Use a backward speculation instead. return; } - - exit.m_setOperands[0] = setLocal->local(); - while (nextNode->codeOrigin == at(m_compileIndex).codeOrigin) { - ++setLocalIndexInBlock; - Node* nextSetLocal = nextNode; - if (nextSetLocal->op() == Int32ToDouble) - nextSetLocal = &at(m_jit.graph().m_blocks[m_block]->at(++setLocalIndexInBlock)); - - if (nextSetLocal->op() == Flush || nextSetLocal->op() == Phantom) - nextSetLocal = &at(m_jit.graph().m_blocks[m_block]->at(++setLocalIndexInBlock)); - - nextNode = &at(m_jit.graph().m_blocks[m_block]->at(setLocalIndexInBlock + 1)); - ASSERT(nextNode->op() != Jump || nextNode->codeOrigin != at(m_compileIndex).codeOrigin); - ASSERT(nextSetLocal->op() == SetLocal); - exit.m_setOperands.append(nextSetLocal->local()); - } - ASSERT(nextNode->codeOrigin != at(m_compileIndex).codeOrigin); - + + OSRExit& exit = m_jit.codeBlock()->lastOSRExit(); exit.m_codeOrigin = nextNode->codeOrigin; - if (!valueRecovery) - return; - - ASSERT(exit.m_setOperands.size() == 1); - for (size_t i = 0; i < exit.m_setOperands.size(); i++) - exit.m_valueRecoveryOverrides.append(adoptRef(new ValueRecoveryOverride(exit.m_setOperands[i], valueRecovery))); - + exit.m_lastSetOperand = setLocal->local(); + exit.m_valueRecoveryOverride = adoptRef( + new ValueRecoveryOverride(setLocal->local(), valueRecovery)); } JumpReplacementWatchpoint* SpeculativeJIT::forwardSpeculationWatchpoint(ExitKind kind) @@ -321,9 +317,9 @@ void SpeculativeJIT::clearGenerationInfo() m_fprs = RegisterBank<FPRInfo>(); } -const TypedArrayDescriptor* SpeculativeJIT::typedArrayDescriptor(Array::Mode arrayMode) +const TypedArrayDescriptor* SpeculativeJIT::typedArrayDescriptor(ArrayMode arrayMode) { - switch (arrayMode) { + switch (arrayMode.type()) { case Array::Int8Array: return &m_jit.globalData()->int8ArrayDescriptor(); case Array::Int16Array: @@ -341,62 +337,75 @@ const TypedArrayDescriptor* SpeculativeJIT::typedArrayDescriptor(Array::Mode arr case Array::Float32Array: return &m_jit.globalData()->float32ArrayDescriptor(); case Array::Float64Array: - return &m_jit.globalData()->float32ArrayDescriptor(); + return &m_jit.globalData()->float64ArrayDescriptor(); default: return 0; } } -JITCompiler::JumpList SpeculativeJIT::jumpSlowForUnwantedArrayMode(GPRReg tempGPR, Array::Mode arrayMode) +JITCompiler::JumpList SpeculativeJIT::jumpSlowForUnwantedArrayMode(GPRReg tempGPR, ArrayMode arrayMode, bool invert) { JITCompiler::JumpList result; - switch (arrayMode) { - case NON_ARRAY_CONTIGUOUS_MODES: { + switch (arrayMode.type()) { + case Array::Contiguous: { + if (arrayMode.isJSArray()) { + m_jit.and32(TrustedImm32(IsArray | IndexingShapeMask), tempGPR); + result.append( + m_jit.branch32( + invert ? MacroAssembler::Equal : MacroAssembler::NotEqual, tempGPR, TrustedImm32(IsArray | ContiguousShape))); + break; + } m_jit.and32(TrustedImm32(IndexingShapeMask), tempGPR); result.append( - m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(ContiguousShape))); + m_jit.branch32(invert ? MacroAssembler::Equal : MacroAssembler::NotEqual, tempGPR, TrustedImm32(ContiguousShape))); break; } - case ARRAY_WITH_CONTIGUOUS_MODES: { - m_jit.and32(TrustedImm32(IsArray | IndexingShapeMask), tempGPR); - result.append( - m_jit.branch32( - MacroAssembler::NotEqual, tempGPR, TrustedImm32(IsArray | ContiguousShape))); - break; - } - case NON_ARRAY_ARRAY_STORAGE_MODES: { + case Array::ArrayStorage: + case Array::SlowPutArrayStorage: { + if (arrayMode.isJSArray()) { + if (arrayMode.isSlowPut()) { + if (invert) { + JITCompiler::Jump slow = + m_jit.branchTest32( + MacroAssembler::Zero, tempGPR, MacroAssembler::TrustedImm32(IsArray)); + m_jit.and32(TrustedImm32(IndexingShapeMask), tempGPR); + m_jit.sub32(TrustedImm32(ArrayStorageShape), tempGPR); + result.append( + m_jit.branch32( + MacroAssembler::BelowOrEqual, tempGPR, + TrustedImm32(SlowPutArrayStorageShape - ArrayStorageShape))); + + slow.link(&m_jit); + } + + result.append( + m_jit.branchTest32( + MacroAssembler::Zero, tempGPR, MacroAssembler::TrustedImm32(IsArray))); + m_jit.and32(TrustedImm32(IndexingShapeMask), tempGPR); + m_jit.sub32(TrustedImm32(ArrayStorageShape), tempGPR); + result.append( + m_jit.branch32( + MacroAssembler::Above, tempGPR, + TrustedImm32(SlowPutArrayStorageShape - ArrayStorageShape))); + break; + } + m_jit.and32(TrustedImm32(IsArray | IndexingShapeMask), tempGPR); + result.append( + m_jit.branch32(invert ? MacroAssembler::Equal : MacroAssembler::NotEqual, tempGPR, TrustedImm32(ArrayStorageShape))); + break; + } m_jit.and32(TrustedImm32(IndexingShapeMask), tempGPR); - if (isSlowPutAccess(arrayMode)) { + if (arrayMode.isSlowPut()) { m_jit.sub32(TrustedImm32(ArrayStorageShape), tempGPR); result.append( m_jit.branch32( - MacroAssembler::Above, tempGPR, + invert ? MacroAssembler::BelowOrEqual : MacroAssembler::Above, tempGPR, TrustedImm32(SlowPutArrayStorageShape - ArrayStorageShape))); - } else { - result.append( - m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(ArrayStorageShape))); + break; } - break; - } - case Array::ArrayWithArrayStorage: - case Array::ArrayWithArrayStorageToHole: - case Array::ArrayWithArrayStorageOutOfBounds: { - m_jit.and32(TrustedImm32(IsArray | IndexingShapeMask), tempGPR); result.append( - m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(ArrayStorageShape))); - break; - } - case Array::ArrayWithSlowPutArrayStorage: { - result.append( - m_jit.branchTest32( - MacroAssembler::Zero, tempGPR, MacroAssembler::TrustedImm32(IsArray))); - m_jit.and32(TrustedImm32(IndexingShapeMask), tempGPR); - m_jit.sub32(TrustedImm32(ArrayStorageShape), tempGPR); - result.append( - m_jit.branch32( - MacroAssembler::Above, tempGPR, - TrustedImm32(SlowPutArrayStorageShape - ArrayStorageShape))); + m_jit.branch32(invert ? MacroAssembler::Equal : MacroAssembler::NotEqual, tempGPR, TrustedImm32(ArrayStorageShape))); break; } default: @@ -409,28 +418,28 @@ JITCompiler::JumpList SpeculativeJIT::jumpSlowForUnwantedArrayMode(GPRReg tempGP void SpeculativeJIT::checkArray(Node& node) { - ASSERT(modeIsSpecific(node.arrayMode())); + ASSERT(node.arrayMode().isSpecific()); + ASSERT(!node.arrayMode().doesConversion()); SpeculateCellOperand base(this, node.child1()); GPRReg baseReg = base.gpr(); const TypedArrayDescriptor* result = typedArrayDescriptor(node.arrayMode()); - if (modeAlreadyChecked(m_state.forNode(node.child1()), node.arrayMode())) { + if (node.arrayMode().alreadyChecked(m_state.forNode(node.child1()))) { noResult(m_compileIndex); return; } const ClassInfo* expectedClassInfo = 0; - switch (node.arrayMode()) { + switch (node.arrayMode().type()) { case Array::String: expectedClassInfo = &JSString::s_info; break; - case NON_ARRAY_CONTIGUOUS_MODES: - case ARRAY_WITH_CONTIGUOUS_MODES: - case NON_ARRAY_ARRAY_STORAGE_MODES: - case ARRAY_WITH_ARRAY_STORAGE_MODES: { + case Array::Contiguous: + case Array::ArrayStorage: + case Array::SlowPutArrayStorage: { GPRTemporary temp(this); GPRReg tempGPR = temp.gpr(); m_jit.loadPtr( @@ -477,77 +486,63 @@ void SpeculativeJIT::checkArray(Node& node) void SpeculativeJIT::arrayify(Node& node, GPRReg baseReg, GPRReg propertyReg) { - Array::Mode desiredArrayMode; - - switch (node.arrayMode()) { - case Array::ToContiguous: - desiredArrayMode = Array::Contiguous; - break; - case Array::ToArrayStorage: - desiredArrayMode = Array::ArrayStorage; - break; - case Array::ToSlowPutArrayStorage: - desiredArrayMode = Array::SlowPutArrayStorage; - break; - case Array::ArrayToArrayStorage: - desiredArrayMode = Array::ArrayWithArrayStorage; - break; - case Array::PossiblyArrayToArrayStorage: - desiredArrayMode = Array::PossiblyArrayWithArrayStorage; - break; - default: - CRASH(); - desiredArrayMode = Array::ForceExit; - break; - } + ASSERT(node.arrayMode().doesConversion()); - GPRTemporary structure(this); GPRTemporary temp(this); - GPRReg structureGPR = structure.gpr(); + GPRTemporary structure; GPRReg tempGPR = temp.gpr(); - - m_jit.loadPtr( - MacroAssembler::Address(baseReg, JSCell::structureOffset()), structureGPR); + GPRReg structureGPR = InvalidGPRReg; - m_jit.load8( - MacroAssembler::Address(structureGPR, Structure::indexingTypeOffset()), tempGPR); + if (node.op() != ArrayifyToStructure) { + GPRTemporary realStructure(this); + structure.adopt(realStructure); + structureGPR = structure.gpr(); + } // We can skip all that comes next if we already have array storage. - MacroAssembler::JumpList slowCases = - jumpSlowForUnwantedArrayMode(tempGPR, desiredArrayMode); + MacroAssembler::JumpList done; + + if (node.op() == ArrayifyToStructure) { + done.append(m_jit.branchWeakPtr( + JITCompiler::Equal, + JITCompiler::Address(baseReg, JSCell::structureOffset()), + node.structure())); + } else { + m_jit.loadPtr( + MacroAssembler::Address(baseReg, JSCell::structureOffset()), structureGPR); - m_jit.loadPtr( - MacroAssembler::Address(baseReg, JSObject::butterflyOffset()), tempGPR); + m_jit.load8( + MacroAssembler::Address(structureGPR, Structure::indexingTypeOffset()), tempGPR); - MacroAssembler::Jump done = m_jit.jump(); + done = jumpSlowForUnwantedArrayMode(tempGPR, node.arrayMode(), true); + + // Next check that the object does not intercept indexed accesses. If it does, + // then this mode won't work. + speculationCheck( + BadIndexingType, JSValueSource::unboxedCell(baseReg), NoNode, + m_jit.branchTest8( + MacroAssembler::NonZero, + MacroAssembler::Address(structureGPR, Structure::typeInfoFlagsOffset()), + MacroAssembler::TrustedImm32(InterceptsGetOwnPropertySlotByIndexEvenWhenLengthIsNotZero))); + } - slowCases.link(&m_jit); - // If we're allegedly creating contiguous storage and the index is bogus, then // just don't. - if (node.arrayMode() == Array::ToContiguous && propertyReg != InvalidGPRReg) { + if (node.arrayMode().type() == Array::Contiguous && propertyReg != InvalidGPRReg) { speculationCheck( Uncountable, JSValueRegs(), NoNode, m_jit.branch32( MacroAssembler::AboveOrEqual, propertyReg, TrustedImm32(MIN_SPARSE_ARRAY_INDEX))); } - // Next check that the object does not intercept indexed accesses. If it does, - // then this mode won't work. - speculationCheck( - BadIndexingType, JSValueSource::unboxedCell(baseReg), NoNode, - m_jit.branchTest8( - MacroAssembler::NonZero, - MacroAssembler::Address(structureGPR, Structure::typeInfoFlagsOffset()), - MacroAssembler::TrustedImm32(InterceptsGetOwnPropertySlotByIndexEvenWhenLengthIsNotZero))); - // Now call out to create the array storage. silentSpillAllRegisters(tempGPR); - switch (node.arrayMode()) { - case ALL_EFFECTFUL_CONTIGUOUS_MODES: + switch (node.arrayMode().type()) { + case Array::Contiguous: callOperation(operationEnsureContiguous, tempGPR, baseReg); break; - case ALL_EFFECTFUL_ARRAY_STORAGE_MODES: + case Array::ArrayStorage: + case Array::SlowPutArrayStorage: callOperation(operationEnsureArrayStorage, tempGPR, baseReg); break; default: @@ -555,41 +550,42 @@ void SpeculativeJIT::arrayify(Node& node, GPRReg baseReg, GPRReg propertyReg) break; } silentFillAllRegisters(tempGPR); - - // Alas, we need to reload the structure because silent spilling does not save - // temporaries. Nor would it be useful for it to do so. Either way we're talking - // about a load. - m_jit.loadPtr( - MacroAssembler::Address(baseReg, JSCell::structureOffset()), structureGPR); - - // Finally, check that we have the kind of array storage that we wanted to get. - // Note that this is a backwards speculation check, which will result in the - // bytecode operation corresponding to this arrayification being reexecuted. - // That's fine, since arrayification is not user-visible. - m_jit.load8( - MacroAssembler::Address(structureGPR, Structure::indexingTypeOffset()), structureGPR); - speculationCheck( - BadIndexingType, JSValueSource::unboxedCell(baseReg), NoNode, - jumpSlowForUnwantedArrayMode(structureGPR, desiredArrayMode)); + + if (node.op() == ArrayifyToStructure) { + speculationCheck( + BadIndexingType, JSValueSource::unboxedCell(baseReg), NoNode, + m_jit.branchWeakPtr( + JITCompiler::NotEqual, + JITCompiler::Address(baseReg, JSCell::structureOffset()), + node.structure())); + } else { + // Alas, we need to reload the structure because silent spilling does not save + // temporaries. Nor would it be useful for it to do so. Either way we're talking + // about a load. + m_jit.loadPtr( + MacroAssembler::Address(baseReg, JSCell::structureOffset()), structureGPR); + + // Finally, check that we have the kind of array storage that we wanted to get. + // Note that this is a backwards speculation check, which will result in the + // bytecode operation corresponding to this arrayification being reexecuted. + // That's fine, since arrayification is not user-visible. + m_jit.load8( + MacroAssembler::Address(structureGPR, Structure::indexingTypeOffset()), structureGPR); + speculationCheck( + BadIndexingType, JSValueSource::unboxedCell(baseReg), NoNode, + jumpSlowForUnwantedArrayMode(structureGPR, node.arrayMode())); + } done.link(&m_jit); - storageResult(tempGPR, m_compileIndex); + noResult(m_compileIndex); } void SpeculativeJIT::arrayify(Node& node) { - ASSERT(modeIsSpecific(node.arrayMode())); + ASSERT(node.arrayMode().isSpecific()); SpeculateCellOperand base(this, node.child1()); - if (modeAlreadyChecked(m_state.forNode(node.child1()), node.arrayMode())) { - GPRTemporary temp(this); - m_jit.loadPtr( - MacroAssembler::Address(base.gpr(), JSObject::butterflyOffset()), temp.gpr()); - storageResult(temp.gpr(), m_compileIndex); - return; - } - if (!node.child2()) { arrayify(node, base.gpr(), InvalidGPRReg); return; @@ -1803,7 +1799,6 @@ ValueRecovery SpeculativeJIT::computeValueRecoveryFor(const ValueSource& valueSo void SpeculativeJIT::compileGetCharCodeAt(Node& node) { - ASSERT(node.child3() == NoNode); SpeculateCellOperand string(this, node.child1()); SpeculateStrictInt32Operand index(this, node.child2()); StorageOperand storage(this, node.child3()); @@ -1812,12 +1807,7 @@ void SpeculativeJIT::compileGetCharCodeAt(Node& node) GPRReg indexReg = index.gpr(); GPRReg storageReg = storage.gpr(); - if (!isStringSpeculation(m_state.forNode(node.child1()).m_type)) { - ASSERT(!(at(node.child1()).prediction() & SpecString)); - terminateSpeculativeExecution(Uncountable, JSValueRegs(), NoNode); - noResult(m_compileIndex); - return; - } + ASSERT(speculationChecked(m_state.forNode(node.child1()).m_type, SpecString)); // unsigned comparison so we can filter out negative indices and indices that are too large speculationCheck(Uncountable, JSValueRegs(), NoNode, m_jit.branch32(MacroAssembler::AboveOrEqual, indexReg, MacroAssembler::Address(stringReg, JSString::offsetOfLength()))); @@ -1851,7 +1841,7 @@ void SpeculativeJIT::compileGetByValOnString(Node& node) GPRReg propertyReg = property.gpr(); GPRReg storageReg = storage.gpr(); - ASSERT(modeAlreadyChecked(m_state.forNode(node.child1()), Array::String)); + ASSERT(ArrayMode(Array::String).alreadyChecked(m_state.forNode(node.child1()))); // unsigned comparison so we can filter out negative indices and indices that are too large speculationCheck(Uncountable, JSValueRegs(), NoNode, m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(baseReg, JSString::offsetOfLength()))); @@ -2260,7 +2250,7 @@ void SpeculativeJIT::compileGetByValOnIntTypedArray(const TypedArrayDescriptor& GPRTemporary result(this); GPRReg resultReg = result.gpr(); - ASSERT(modeAlreadyChecked(m_state.forNode(node.child1()), node.arrayMode())); + ASSERT(node.arrayMode().alreadyChecked(m_state.forNode(node.child1()))); speculationCheck( Uncountable, JSValueRegs(), NoNode, @@ -2410,7 +2400,7 @@ void SpeculativeJIT::compileGetByValOnFloatTypedArray(const TypedArrayDescriptor GPRReg propertyReg = property.gpr(); GPRReg storageReg = storage.gpr(); - ASSERT(modeAlreadyChecked(m_state.forNode(node.child1()), node.arrayMode())); + ASSERT(node.arrayMode().alreadyChecked(m_state.forNode(node.child1()))); FPRTemporary result(this); FPRReg resultReg = result.fpr(); @@ -2426,7 +2416,7 @@ void SpeculativeJIT::compileGetByValOnFloatTypedArray(const TypedArrayDescriptor case 8: { m_jit.loadDouble(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight), resultReg); MacroAssembler::Jump notNaN = m_jit.branchDouble(MacroAssembler::DoubleEqual, resultReg, resultReg); - static const double NaN = std::numeric_limits<double>::quiet_NaN(); + static const double NaN = QNaN; m_jit.loadDouble(&NaN, resultReg); notNaN.link(&m_jit); break; @@ -2447,7 +2437,7 @@ void SpeculativeJIT::compilePutByValForFloatTypedArray(const TypedArrayDescripto SpeculateDoubleOperand valueOp(this, valueUse); - ASSERT_UNUSED(baseUse, modeAlreadyChecked(m_state.forNode(baseUse), node.arrayMode())); + ASSERT_UNUSED(baseUse, node.arrayMode().alreadyChecked(m_state.forNode(baseUse))); GPRTemporary result(this); @@ -3230,7 +3220,7 @@ void SpeculativeJIT::compileGetIndexedPropertyStorage(Node& node) const TypedArrayDescriptor* descriptor = typedArrayDescriptor(node.arrayMode()); - switch (node.arrayMode()) { + switch (node.arrayMode().type()) { case Array::String: m_jit.loadPtr(MacroAssembler::Address(baseReg, JSString::offsetOfValue()), storageReg); @@ -3270,7 +3260,7 @@ void SpeculativeJIT::compileGetByValOnArguments(Node& node) if (!m_compileOkay) return; - ASSERT(modeAlreadyChecked(m_state.forNode(node.child1()), Array::Arguments)); + ASSERT(ArrayMode(Array::Arguments).alreadyChecked(m_state.forNode(node.child1()))); // Two really lame checks. speculationCheck( @@ -3327,7 +3317,7 @@ void SpeculativeJIT::compileGetArgumentsLength(Node& node) if (!m_compileOkay) return; - ASSERT(modeAlreadyChecked(m_state.forNode(node.child1()), Array::Arguments)); + ASSERT(ArrayMode(Array::Arguments).alreadyChecked(m_state.forNode(node.child1()))); speculationCheck( Uncountable, JSValueSource(), NoNode, @@ -3345,8 +3335,8 @@ void SpeculativeJIT::compileGetArrayLength(Node& node) { const TypedArrayDescriptor* descriptor = typedArrayDescriptor(node.arrayMode()); - switch (node.arrayMode()) { - case ARRAY_WITH_CONTIGUOUS_MODES: { + switch (node.arrayMode().type()) { + case Array::Contiguous: { StorageOperand storage(this, node.child2()); GPRTemporary result(this, storage); GPRReg storageReg = storage.gpr(); @@ -3356,8 +3346,8 @@ void SpeculativeJIT::compileGetArrayLength(Node& node) integerResult(resultReg, m_compileIndex); break; } - case ARRAY_WITH_ARRAY_STORAGE_MODES: - case ARRAY_EFFECTFUL_MODES: { + case Array::ArrayStorage: + case Array::SlowPutArrayStorage: { StorageOperand storage(this, node.child2()); GPRTemporary result(this, storage); GPRReg storageReg = storage.gpr(); @@ -3542,7 +3532,7 @@ void SpeculativeJIT::compileReallocatePropertyStorage(Node& node) storageResult(scratchGPR2, m_compileIndex); } -GPRReg SpeculativeJIT::temporaryRegisterForPutByVal(GPRTemporary& temporary, Array::Mode arrayMode) +GPRReg SpeculativeJIT::temporaryRegisterForPutByVal(GPRTemporary& temporary, ArrayMode arrayMode) { if (!putByValWillNeedExtraRegister(arrayMode)) return InvalidGPRReg; diff --git a/Source/JavaScriptCore/dfg/DFGSpeculativeJIT.h b/Source/JavaScriptCore/dfg/DFGSpeculativeJIT.h index aadcdb06b..446ea7dbe 100644 --- a/Source/JavaScriptCore/dfg/DFGSpeculativeJIT.h +++ b/Source/JavaScriptCore/dfg/DFGSpeculativeJIT.h @@ -543,7 +543,7 @@ public: m_jit.move(valueOfJSConstantAsImm64(plan.nodeIndex()), plan.gpr()); break; case SetDoubleConstant: - m_jit.move(Imm64(valueOfNumberConstant(plan.nodeIndex())), canTrample); + m_jit.move(Imm64(reinterpretDoubleToInt64(valueOfNumberConstant(plan.nodeIndex()))), canTrample); m_jit.move64ToDouble(canTrample, plan.fpr()); break; case Load32PayloadBoxInt: @@ -2270,47 +2270,15 @@ public: void compileAllocatePropertyStorage(Node&); void compileReallocatePropertyStorage(Node&); -#if USE(JSVALUE64) - MacroAssembler::JumpList compileContiguousGetByVal(Node&, GPRReg baseReg, GPRReg propertyReg, GPRReg storageReg, GPRReg resultReg); - MacroAssembler::JumpList compileArrayStorageGetByVal(Node&, GPRReg baseReg, GPRReg propertyReg, GPRReg storageReg, GPRReg resultReg); -#else - MacroAssembler::JumpList compileContiguousGetByVal(Node&, GPRReg baseReg, GPRReg propertyReg, GPRReg storageReg, GPRReg resultTagReg, GPRReg resultPayloadReg); - MacroAssembler::JumpList compileArrayStorageGetByVal(Node&, GPRReg baseReg, GPRReg propertyReg, GPRReg storageReg, GPRReg resultTagReg, GPRReg resultPayloadReg); -#endif - - bool putByValWillNeedExtraRegister(Array::Mode arrayMode) + bool putByValWillNeedExtraRegister(ArrayMode arrayMode) { - switch (arrayMode) { - // For ArrayStorage, we need an extra reg for stores to holes except if - // we're in SlowPut mode. - case ARRAY_STORAGE_TO_HOLE_MODES: - case OUT_OF_BOUNDS_ARRAY_STORAGE_MODES: - case ALL_EFFECTFUL_ARRAY_STORAGE_MODES: - return true; - - // For Contiguous, we need an extra reg for any access that may store - // to the tail. - case CONTIGUOUS_TO_TAIL_MODES: - case OUT_OF_BOUNDS_CONTIGUOUS_MODES: - case ALL_EFFECTFUL_CONTIGUOUS_MODES: - return true; - - default: - return false; - } + return arrayMode.mayStoreToHole(); } - GPRReg temporaryRegisterForPutByVal(GPRTemporary&, Array::Mode); + GPRReg temporaryRegisterForPutByVal(GPRTemporary&, ArrayMode); GPRReg temporaryRegisterForPutByVal(GPRTemporary& temporary, Node& node) { return temporaryRegisterForPutByVal(temporary, node.arrayMode()); } -#if USE(JSVALUE64) - MacroAssembler::JumpList compileContiguousPutByVal(Node&, GPRReg baseReg, GPRReg propertyReg, GPRReg storageReg, GPRReg valueReg, GPRReg tempReg); - MacroAssembler::JumpList compileArrayStoragePutByVal(Node&, GPRReg baseReg, GPRReg propertyReg, GPRReg storageReg, GPRReg valueReg, GPRReg tempReg); -#else - MacroAssembler::JumpList compileContiguousPutByVal(Node&, GPRReg baseReg, GPRReg propertyReg, GPRReg storageReg, GPRReg valueTagReg, GPRReg valuePayloadReg); - MacroAssembler::JumpList compileArrayStoragePutByVal(Node&, GPRReg baseReg, GPRReg propertyReg, GPRReg storageReg, GPRReg valueTagReg, GPRReg valuePayloadReg); -#endif void compileGetCharCodeAt(Node&); void compileGetByValOnString(Node&); @@ -2445,9 +2413,9 @@ public: JumpReplacementWatchpoint* forwardSpeculationWatchpoint(ExitKind = UncountableWatchpoint); JumpReplacementWatchpoint* speculationWatchpointWithConditionalDirection(ExitKind, bool isForward); - const TypedArrayDescriptor* typedArrayDescriptor(Array::Mode); + const TypedArrayDescriptor* typedArrayDescriptor(ArrayMode); - JITCompiler::JumpList jumpSlowForUnwantedArrayMode(GPRReg tempWithIndexingTypeReg, Array::Mode arrayMode); + JITCompiler::JumpList jumpSlowForUnwantedArrayMode(GPRReg tempWithIndexingTypeReg, ArrayMode, bool invert = false); void checkArray(Node&); void arrayify(Node&, GPRReg baseReg, GPRReg propertyReg); void arrayify(Node&); diff --git a/Source/JavaScriptCore/dfg/DFGSpeculativeJIT32_64.cpp b/Source/JavaScriptCore/dfg/DFGSpeculativeJIT32_64.cpp index ab089ba36..65fdf5593 100644 --- a/Source/JavaScriptCore/dfg/DFGSpeculativeJIT32_64.cpp +++ b/Source/JavaScriptCore/dfg/DFGSpeculativeJIT32_64.cpp @@ -2046,113 +2046,6 @@ void SpeculativeJIT::emitBranch(Node& node) } } -MacroAssembler::JumpList SpeculativeJIT::compileContiguousGetByVal(Node&, GPRReg, GPRReg propertyReg, GPRReg storageReg, GPRReg resultTagReg, GPRReg resultPayloadReg) -{ - MacroAssembler::JumpList slowCases; - slowCases.append(m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength()))); - - m_jit.load32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag)), resultTagReg); - m_jit.load32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.payload)), resultPayloadReg); - slowCases.append(m_jit.branch32(MacroAssembler::Equal, resultTagReg, TrustedImm32(JSValue::EmptyValueTag))); - - return slowCases; -} - -MacroAssembler::JumpList SpeculativeJIT::compileArrayStorageGetByVal(Node&, GPRReg, GPRReg propertyReg, GPRReg storageReg, GPRReg resultTagReg, GPRReg resultPayloadReg) -{ - MacroAssembler::Jump outOfBounds = m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, ArrayStorage::vectorLengthOffset())); - - m_jit.load32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), resultTagReg); - m_jit.load32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), resultPayloadReg); - MacroAssembler::Jump hole = m_jit.branch32(MacroAssembler::Equal, resultTagReg, TrustedImm32(JSValue::EmptyValueTag)); - - MacroAssembler::JumpList slowCases; - slowCases.append(outOfBounds); - slowCases.append(hole); - return slowCases; -} - -MacroAssembler::JumpList SpeculativeJIT::compileContiguousPutByVal(Node& node, GPRReg, GPRReg propertyReg, GPRReg storageReg, GPRReg valueTagReg, GPRReg valuePayloadReg) -{ - Array::Mode arrayMode = node.arrayMode(); - - MacroAssembler::JumpList slowCases; - - if (!mayStoreToTail(arrayMode)) { - speculationCheck( - Uncountable, JSValueRegs(), NoNode, - m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength()))); - } else { - MacroAssembler::Jump inBounds = m_jit.branch32(MacroAssembler::Below, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength())); - - slowCases.append(m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfVectorLength()))); - - if (isInBoundsAccess(arrayMode)) - speculationCheck(Uncountable, JSValueRegs(), NoNode, slowCases); - - m_jit.add32(TrustedImm32(1), propertyReg); - m_jit.store32(propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength())); - m_jit.sub32(TrustedImm32(1), propertyReg); - - inBounds.link(&m_jit); - } - - m_jit.store32(valueTagReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag))); - m_jit.store32(valuePayloadReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.payload))); - - if (isInBoundsAccess(arrayMode)) - return MacroAssembler::JumpList(); - return slowCases; -} - -MacroAssembler::JumpList SpeculativeJIT::compileArrayStoragePutByVal(Node& node, GPRReg, GPRReg propertyReg, GPRReg storageReg, GPRReg valueTagReg, GPRReg valuePayloadReg) -{ - Array::Mode arrayMode = node.arrayMode(); - - MacroAssembler::JumpList slowCases; - - MacroAssembler::Jump beyondArrayBounds = m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, ArrayStorage::vectorLengthOffset())); - if (isInBoundsAccess(arrayMode)) - speculationCheck(OutOfBounds, JSValueRegs(), NoNode, beyondArrayBounds); - else - slowCases.append(beyondArrayBounds); - - // Check if we're writing to a hole; if so increment m_numValuesInVector. - if (!mayStoreToHole(arrayMode)) { - // This is uncountable because if we take this exit, then the baseline JIT - // will immediately count the hole store. So there is no need for exit - // profiling. - speculationCheck( - Uncountable, JSValueRegs(), NoNode, - m_jit.branch32(MacroAssembler::Equal, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), TrustedImm32(JSValue::EmptyValueTag))); - } else { - MacroAssembler::Jump notHoleValue = m_jit.branch32(MacroAssembler::NotEqual, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), TrustedImm32(JSValue::EmptyValueTag)); - if (isSlowPutAccess(arrayMode)) { - // This is sort of strange. If we wanted to optimize this code path, we would invert - // the above branch. But it's simply not worth it since this only happens if we're - // already having a bad time. - slowCases.append(m_jit.jump()); - } else { - m_jit.add32(TrustedImm32(1), MacroAssembler::Address(storageReg, ArrayStorage::numValuesInVectorOffset())); - - // If we're writing to a hole we might be growing the array; - MacroAssembler::Jump lengthDoesNotNeedUpdate = m_jit.branch32(MacroAssembler::Below, propertyReg, MacroAssembler::Address(storageReg, ArrayStorage::lengthOffset())); - m_jit.add32(TrustedImm32(1), propertyReg); - m_jit.store32(propertyReg, MacroAssembler::Address(storageReg, ArrayStorage::lengthOffset())); - m_jit.sub32(TrustedImm32(1), propertyReg); - - lengthDoesNotNeedUpdate.link(&m_jit); - } - notHoleValue.link(&m_jit); - } - - // Store the value to the array. - m_jit.store32(valueTagReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.tag))); - m_jit.store32(valuePayloadReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.payload))); - - return slowCases; -} - void SpeculativeJIT::compile(Node& node) { NodeType op = node.op(); @@ -2646,13 +2539,14 @@ void SpeculativeJIT::compile(Node& node) break; } - case Arrayify: { + case Arrayify: + case ArrayifyToStructure: { arrayify(node); break; } case GetByVal: { - switch (node.arrayMode()) { + switch (node.arrayMode().type()) { case Array::SelectUsingPredictions: case Array::ForceExit: ASSERT_NOT_REACHED(); @@ -2673,29 +2567,28 @@ void SpeculativeJIT::compile(Node& node) jsValueResult(resultTag.gpr(), resultPayload.gpr(), m_compileIndex); break; } - case IN_BOUNDS_CONTIGUOUS_MODES: { - SpeculateStrictInt32Operand property(this, node.child2()); - StorageOperand storage(this, node.child3()); + case Array::Contiguous: { + if (node.arrayMode().isInBounds()) { + SpeculateStrictInt32Operand property(this, node.child2()); + StorageOperand storage(this, node.child3()); - GPRReg propertyReg = property.gpr(); - GPRReg storageReg = storage.gpr(); + GPRReg propertyReg = property.gpr(); + GPRReg storageReg = storage.gpr(); - if (!m_compileOkay) - return; + if (!m_compileOkay) + return; - speculationCheck(OutOfBounds, JSValueRegs(), NoNode, m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength()))); + speculationCheck(OutOfBounds, JSValueRegs(), NoNode, m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength()))); - GPRTemporary resultTag(this); - GPRTemporary resultPayload(this); - m_jit.load32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag)), resultTag.gpr()); - speculationCheck(OutOfBounds, JSValueRegs(), NoNode, m_jit.branch32(MacroAssembler::Equal, resultTag.gpr(), TrustedImm32(JSValue::EmptyValueTag))); - m_jit.load32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.payload)), resultPayload.gpr()); - jsValueResult(resultTag.gpr(), resultPayload.gpr(), m_compileIndex); - break; - } - case CONTIGUOUS_TO_TAIL_MODES: - case OUT_OF_BOUNDS_CONTIGUOUS_MODES: - case ALL_EFFECTFUL_CONTIGUOUS_MODES: { + GPRTemporary resultTag(this); + GPRTemporary resultPayload(this); + m_jit.load32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag)), resultTag.gpr()); + speculationCheck(OutOfBounds, JSValueRegs(), NoNode, m_jit.branch32(MacroAssembler::Equal, resultTag.gpr(), TrustedImm32(JSValue::EmptyValueTag))); + m_jit.load32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.payload)), resultPayload.gpr()); + jsValueResult(resultTag.gpr(), resultPayload.gpr(), m_compileIndex); + break; + } + SpeculateCellOperand base(this, node.child1()); SpeculateStrictInt32Operand property(this, node.child2()); StorageOperand storage(this, node.child3()); @@ -2712,8 +2605,14 @@ void SpeculativeJIT::compile(Node& node) GPRReg resultTagReg = resultTag.gpr(); GPRReg resultPayloadReg = resultPayload.gpr(); - MacroAssembler::JumpList slowCases = - compileContiguousGetByVal(node, baseReg, propertyReg, storageReg, resultTagReg, resultPayloadReg); + MacroAssembler::JumpList slowCases; + + slowCases.append(m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength()))); + + m_jit.load32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag)), resultTagReg); + m_jit.load32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.payload)), resultPayloadReg); + slowCases.append(m_jit.branch32(MacroAssembler::Equal, resultTagReg, TrustedImm32(JSValue::EmptyValueTag))); + addSlowPathGenerator( slowPathCall( slowCases, this, operationGetByValArrayInt, @@ -2722,30 +2621,30 @@ void SpeculativeJIT::compile(Node& node) jsValueResult(resultTagReg, resultPayloadReg, m_compileIndex); break; } - case IN_BOUNDS_ARRAY_STORAGE_MODES: { - SpeculateStrictInt32Operand property(this, node.child2()); - StorageOperand storage(this, node.child3()); - GPRReg propertyReg = property.gpr(); - GPRReg storageReg = storage.gpr(); + case Array::ArrayStorage: + case Array::SlowPutArrayStorage: { + if (node.arrayMode().isInBounds()) { + SpeculateStrictInt32Operand property(this, node.child2()); + StorageOperand storage(this, node.child3()); + GPRReg propertyReg = property.gpr(); + GPRReg storageReg = storage.gpr(); - if (!m_compileOkay) - return; + if (!m_compileOkay) + return; - speculationCheck(OutOfBounds, JSValueRegs(), NoNode, m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, ArrayStorage::vectorLengthOffset()))); + speculationCheck(OutOfBounds, JSValueRegs(), NoNode, m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, ArrayStorage::vectorLengthOffset()))); - GPRTemporary resultTag(this); - GPRTemporary resultPayload(this); + GPRTemporary resultTag(this); + GPRTemporary resultPayload(this); - m_jit.load32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), resultTag.gpr()); - speculationCheck(OutOfBounds, JSValueRegs(), NoNode, m_jit.branch32(MacroAssembler::Equal, resultTag.gpr(), TrustedImm32(JSValue::EmptyValueTag))); - m_jit.load32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), resultPayload.gpr()); + m_jit.load32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), resultTag.gpr()); + speculationCheck(OutOfBounds, JSValueRegs(), NoNode, m_jit.branch32(MacroAssembler::Equal, resultTag.gpr(), TrustedImm32(JSValue::EmptyValueTag))); + m_jit.load32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), resultPayload.gpr()); - jsValueResult(resultTag.gpr(), resultPayload.gpr(), m_compileIndex); - break; - } - case OUT_OF_BOUNDS_ARRAY_STORAGE_MODES: - case SLOW_PUT_ARRAY_STORAGE_MODES: - case ALL_EFFECTFUL_ARRAY_STORAGE_MODES: { + jsValueResult(resultTag.gpr(), resultPayload.gpr(), m_compileIndex); + break; + } + SpeculateCellOperand base(this, node.child1()); SpeculateStrictInt32Operand property(this, node.child2()); StorageOperand storage(this, node.child3()); @@ -2829,10 +2728,10 @@ void SpeculativeJIT::compile(Node& node) Edge child3 = m_jit.graph().varArgChild(node, 2); Edge child4 = m_jit.graph().varArgChild(node, 3); - Array::Mode arrayMode = modeForPut(node.arrayMode()); + ArrayMode arrayMode = node.arrayMode().modeForPut(); bool alreadyHandled = false; - switch (arrayMode) { + switch (arrayMode.type()) { case Array::SelectUsingPredictions: case Array::ForceExit: ASSERT_NOT_REACHED(); @@ -2871,9 +2770,8 @@ void SpeculativeJIT::compile(Node& node) GPRReg baseReg = base.gpr(); GPRReg propertyReg = property.gpr(); - switch (arrayMode) { - case ALL_CONTIGUOUS_MODES: - case ALL_EFFECTFUL_CONTIGUOUS_MODES: { + switch (arrayMode.type()) { + case Array::Contiguous: { JSValueOperand value(this, child3); GPRReg valueTagReg = value.tagGPR(); @@ -2900,16 +2798,36 @@ void SpeculativeJIT::compile(Node& node) break; } - MacroAssembler::JumpList slowCases = - compileContiguousPutByVal( - node, baseReg, propertyReg, storageReg, valueTagReg, valuePayloadReg); + MacroAssembler::JumpList slowCases; + if (arrayMode.isInBounds()) { + speculationCheck( + Uncountable, JSValueRegs(), NoNode, + m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength()))); + } else { + MacroAssembler::Jump inBounds = m_jit.branch32(MacroAssembler::Below, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength())); + + slowCases.append(m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfVectorLength()))); + + if (!arrayMode.isOutOfBounds()) + speculationCheck(Uncountable, JSValueRegs(), NoNode, slowCases); + + m_jit.add32(TrustedImm32(1), propertyReg); + m_jit.store32(propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength())); + m_jit.sub32(TrustedImm32(1), propertyReg); + + inBounds.link(&m_jit); + } + + m_jit.store32(valueTagReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag))); + m_jit.store32(valuePayloadReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.payload))); + base.use(); property.use(); value.use(); storage.use(); - if (!slowCases.empty()) { + if (arrayMode.isOutOfBounds()) { addSlowPathGenerator( slowPathCall( slowCases, this, @@ -2921,8 +2839,8 @@ void SpeculativeJIT::compile(Node& node) break; } - case ALL_ARRAY_STORAGE_MODES: - case ALL_EFFECTFUL_ARRAY_STORAGE_MODES: { + case Array::ArrayStorage: + case Array::SlowPutArrayStorage: { JSValueOperand value(this, child3); GPRReg valueTagReg = value.tagGPR(); @@ -2950,9 +2868,46 @@ void SpeculativeJIT::compile(Node& node) break; } - MacroAssembler::JumpList slowCases = - compileArrayStoragePutByVal( - node, baseReg, propertyReg, storageReg, valueTagReg, valuePayloadReg); + MacroAssembler::JumpList slowCases; + + MacroAssembler::Jump beyondArrayBounds = m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, ArrayStorage::vectorLengthOffset())); + if (!arrayMode.isOutOfBounds()) + speculationCheck(OutOfBounds, JSValueRegs(), NoNode, beyondArrayBounds); + else + slowCases.append(beyondArrayBounds); + + // Check if we're writing to a hole; if so increment m_numValuesInVector. + if (arrayMode.isInBounds()) { + // This is uncountable because if we take this exit, then the baseline JIT + // will immediately count the hole store. So there is no need for exit + // profiling. + speculationCheck( + Uncountable, JSValueRegs(), NoNode, + m_jit.branch32(MacroAssembler::Equal, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), TrustedImm32(JSValue::EmptyValueTag))); + } else { + MacroAssembler::Jump notHoleValue = m_jit.branch32(MacroAssembler::NotEqual, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), TrustedImm32(JSValue::EmptyValueTag)); + if (arrayMode.isSlowPut()) { + // This is sort of strange. If we wanted to optimize this code path, we would invert + // the above branch. But it's simply not worth it since this only happens if we're + // already having a bad time. + slowCases.append(m_jit.jump()); + } else { + m_jit.add32(TrustedImm32(1), MacroAssembler::Address(storageReg, ArrayStorage::numValuesInVectorOffset())); + + // If we're writing to a hole we might be growing the array; + MacroAssembler::Jump lengthDoesNotNeedUpdate = m_jit.branch32(MacroAssembler::Below, propertyReg, MacroAssembler::Address(storageReg, ArrayStorage::lengthOffset())); + m_jit.add32(TrustedImm32(1), propertyReg); + m_jit.store32(propertyReg, MacroAssembler::Address(storageReg, ArrayStorage::lengthOffset())); + m_jit.sub32(TrustedImm32(1), propertyReg); + + lengthDoesNotNeedUpdate.link(&m_jit); + } + notHoleValue.link(&m_jit); + } + + // Store the value to the array. + m_jit.store32(valueTagReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.tag))); + m_jit.store32(valuePayloadReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.payload))); base.use(); property.use(); @@ -3070,7 +3025,7 @@ void SpeculativeJIT::compile(Node& node) } case ArrayPush: { - ASSERT(modeIsJSArray(node.arrayMode())); + ASSERT(node.arrayMode().isJSArray()); SpeculateCellOperand base(this, node.child1()); JSValueOperand value(this, node.child2()); @@ -3089,9 +3044,8 @@ void SpeculativeJIT::compile(Node& node) StorageOperand storage(this, node.child3()); GPRReg storageGPR = storage.gpr(); - switch (node.arrayMode()) { - case Array::ArrayWithContiguous: - case Array::ArrayWithContiguousOutOfBounds: { + switch (node.arrayMode().type()) { + case Array::Contiguous: { m_jit.load32(MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength()), storageLengthGPR); MacroAssembler::Jump slowPath = m_jit.branch32(MacroAssembler::AboveOrEqual, storageLengthGPR, MacroAssembler::Address(storageGPR, Butterfly::offsetOfVectorLength())); m_jit.store32(valueTagGPR, MacroAssembler::BaseIndex(storageGPR, storageLengthGPR, MacroAssembler::TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag))); @@ -3110,8 +3064,7 @@ void SpeculativeJIT::compile(Node& node) break; } - case Array::ArrayWithArrayStorage: - case Array::ArrayWithArrayStorageOutOfBounds: { + case Array::ArrayStorage: { m_jit.load32(MacroAssembler::Address(storageGPR, ArrayStorage::lengthOffset()), storageLengthGPR); // Refuse to handle bizarre lengths. @@ -3141,7 +3094,7 @@ void SpeculativeJIT::compile(Node& node) } case ArrayPop: { - ASSERT(modeIsJSArray(node.arrayMode())); + ASSERT(node.arrayMode().isJSArray()); SpeculateCellOperand base(this, node.child1()); StorageOperand storage(this, node.child2()); @@ -3153,9 +3106,8 @@ void SpeculativeJIT::compile(Node& node) GPRReg valuePayloadGPR = valuePayload.gpr(); GPRReg storageGPR = storage.gpr(); - switch (node.arrayMode()) { - case Array::ArrayWithContiguous: - case Array::ArrayWithContiguousOutOfBounds: { + switch (node.arrayMode().type()) { + case Array::Contiguous: { m_jit.load32( MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength()), valuePayloadGPR); MacroAssembler::Jump undefinedCase = @@ -3188,8 +3140,7 @@ void SpeculativeJIT::compile(Node& node) break; } - case Array::ArrayWithArrayStorage: - case Array::ArrayWithArrayStorageOutOfBounds: { + case Array::ArrayStorage: { GPRTemporary storageLength(this); GPRReg storageLengthGPR = storageLength.gpr(); diff --git a/Source/JavaScriptCore/dfg/DFGSpeculativeJIT64.cpp b/Source/JavaScriptCore/dfg/DFGSpeculativeJIT64.cpp index f11fd1663..6c066c388 100644 --- a/Source/JavaScriptCore/dfg/DFGSpeculativeJIT64.cpp +++ b/Source/JavaScriptCore/dfg/DFGSpeculativeJIT64.cpp @@ -2111,107 +2111,6 @@ void SpeculativeJIT::emitBranch(Node& node) } } -MacroAssembler::JumpList SpeculativeJIT::compileContiguousGetByVal(Node&, GPRReg, GPRReg propertyReg, GPRReg storageReg, GPRReg resultReg) -{ - MacroAssembler::JumpList slowCases; - slowCases.append(m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength()))); - - m_jit.load64(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight), resultReg); - slowCases.append(m_jit.branchTest64(MacroAssembler::Zero, resultReg)); - - return slowCases; -} - -MacroAssembler::JumpList SpeculativeJIT::compileArrayStorageGetByVal(Node&, GPRReg, GPRReg propertyReg, GPRReg storageReg, GPRReg resultReg) -{ - MacroAssembler::Jump outOfBounds = m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, ArrayStorage::vectorLengthOffset())); - - m_jit.load64(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])), resultReg); - MacroAssembler::Jump hole = m_jit.branchTest64(MacroAssembler::Zero, resultReg); - - MacroAssembler::JumpList slowCases; - slowCases.append(outOfBounds); - slowCases.append(hole); - return slowCases; -} - -MacroAssembler::JumpList SpeculativeJIT::compileContiguousPutByVal(Node& node, GPRReg, GPRReg propertyReg, GPRReg storageReg, GPRReg valueReg, GPRReg tempReg) -{ - Array::Mode arrayMode = node.arrayMode(); - - MacroAssembler::JumpList slowCases; - - if (!mayStoreToTail(arrayMode)) { - speculationCheck( - Uncountable, JSValueRegs(), NoNode, - m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength()))); - } else { - MacroAssembler::Jump inBounds = m_jit.branch32(MacroAssembler::Below, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength())); - - slowCases.append(m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfVectorLength()))); - - if (isInBoundsAccess(arrayMode)) - speculationCheck(Uncountable, JSValueRegs(), NoNode, slowCases); - - m_jit.add32(TrustedImm32(1), propertyReg, tempReg); - m_jit.store32(tempReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength())); - - inBounds.link(&m_jit); - } - - m_jit.store64(valueReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight)); - - if (isInBoundsAccess(arrayMode)) - return MacroAssembler::JumpList(); - return slowCases; -} - -MacroAssembler::JumpList SpeculativeJIT::compileArrayStoragePutByVal(Node& node, GPRReg, GPRReg propertyReg, GPRReg storageReg, GPRReg valueReg, GPRReg tempReg) -{ - Array::Mode arrayMode = node.arrayMode(); - - MacroAssembler::JumpList slowCases; - - MacroAssembler::Jump beyondArrayBounds = m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, ArrayStorage::vectorLengthOffset())); - if (isInBoundsAccess(arrayMode)) - speculationCheck(OutOfBounds, JSValueRegs(), NoNode, beyondArrayBounds); - else - slowCases.append(beyondArrayBounds); - - // Check if we're writing to a hole; if so increment m_numValuesInVector. - if (!mayStoreToHole(arrayMode)) { - // This is uncountable because if we take this exit, then the baseline JIT - // will immediately count the hole store. So there is no need for exit - // profiling. - speculationCheck( - Uncountable, JSValueRegs(), NoNode, - m_jit.branchTest64(MacroAssembler::Zero, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])))); - } else { - MacroAssembler::Jump notHoleValue = m_jit.branchTest64(MacroAssembler::NonZero, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]))); - if (isSlowPutAccess(arrayMode)) { - // This is sort of strange. If we wanted to optimize this code path, we would invert - // the above branch. But it's simply not worth it since this only happens if we're - // already having a bad time. - slowCases.append(m_jit.jump()); - } else { - m_jit.add32(TrustedImm32(1), MacroAssembler::Address(storageReg, ArrayStorage::numValuesInVectorOffset())); - - // If we're writing to a hole we might be growing the array; - MacroAssembler::Jump lengthDoesNotNeedUpdate = m_jit.branch32(MacroAssembler::Below, propertyReg, MacroAssembler::Address(storageReg, ArrayStorage::lengthOffset())); - m_jit.add32(TrustedImm32(1), propertyReg, tempReg); - m_jit.store32(tempReg, MacroAssembler::Address(storageReg, ArrayStorage::lengthOffset())); - - lengthDoesNotNeedUpdate.link(&m_jit); - } - notHoleValue.link(&m_jit); - } - - // Store the value to the array. - m_jit.store64(valueReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]))); - - return slowCases; -} - void SpeculativeJIT::compile(Node& node) { NodeType op = node.op(); @@ -2673,13 +2572,14 @@ void SpeculativeJIT::compile(Node& node) break; } - case Arrayify: { + case Arrayify: + case ArrayifyToStructure: { arrayify(node); break; } case GetByVal: { - switch (node.arrayMode()) { + switch (node.arrayMode().type()) { case Array::SelectUsingPredictions: case Array::ForceExit: ASSERT_NOT_REACHED(); @@ -2698,27 +2598,26 @@ void SpeculativeJIT::compile(Node& node) jsValueResult(result.gpr(), m_compileIndex); break; } - case IN_BOUNDS_CONTIGUOUS_MODES: { - SpeculateStrictInt32Operand property(this, node.child2()); - StorageOperand storage(this, node.child3()); - - GPRReg propertyReg = property.gpr(); - GPRReg storageReg = storage.gpr(); - - if (!m_compileOkay) - return; - - speculationCheck(OutOfBounds, JSValueRegs(), NoNode, m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength()))); + case Array::Contiguous: { + if (node.arrayMode().isInBounds()) { + SpeculateStrictInt32Operand property(this, node.child2()); + StorageOperand storage(this, node.child3()); + + GPRReg propertyReg = property.gpr(); + GPRReg storageReg = storage.gpr(); + + if (!m_compileOkay) + return; + + speculationCheck(OutOfBounds, JSValueRegs(), NoNode, m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength()))); + + GPRTemporary result(this); + m_jit.load64(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight), result.gpr()); + speculationCheck(OutOfBounds, JSValueRegs(), NoNode, m_jit.branchTest64(MacroAssembler::Zero, result.gpr())); + jsValueResult(result.gpr(), m_compileIndex); + break; + } - GPRTemporary result(this); - m_jit.load64(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight), result.gpr()); - speculationCheck(OutOfBounds, JSValueRegs(), NoNode, m_jit.branchTest64(MacroAssembler::Zero, result.gpr())); - jsValueResult(result.gpr(), m_compileIndex); - break; - } - case CONTIGUOUS_TO_TAIL_MODES: - case OUT_OF_BOUNDS_CONTIGUOUS_MODES: - case ALL_EFFECTFUL_CONTIGUOUS_MODES: { SpeculateCellOperand base(this, node.child1()); SpeculateStrictInt32Operand property(this, node.child2()); StorageOperand storage(this, node.child3()); @@ -2733,8 +2632,13 @@ void SpeculativeJIT::compile(Node& node) GPRTemporary result(this); GPRReg resultReg = result.gpr(); - MacroAssembler::JumpList slowCases = - compileContiguousGetByVal(node, baseReg, propertyReg, storageReg, resultReg); + MacroAssembler::JumpList slowCases; + + slowCases.append(m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength()))); + + m_jit.load64(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight), resultReg); + slowCases.append(m_jit.branchTest64(MacroAssembler::Zero, resultReg)); + addSlowPathGenerator( slowPathCall( slowCases, this, operationGetByValArrayInt, @@ -2743,28 +2647,28 @@ void SpeculativeJIT::compile(Node& node) jsValueResult(resultReg, m_compileIndex); break; } - case IN_BOUNDS_ARRAY_STORAGE_MODES: { - SpeculateStrictInt32Operand property(this, node.child2()); - StorageOperand storage(this, node.child3()); + case Array::ArrayStorage: + case Array::SlowPutArrayStorage: { + if (node.arrayMode().isInBounds()) { + SpeculateStrictInt32Operand property(this, node.child2()); + StorageOperand storage(this, node.child3()); - GPRReg propertyReg = property.gpr(); - GPRReg storageReg = storage.gpr(); + GPRReg propertyReg = property.gpr(); + GPRReg storageReg = storage.gpr(); - if (!m_compileOkay) - return; + if (!m_compileOkay) + return; - speculationCheck(OutOfBounds, JSValueRegs(), NoNode, m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, ArrayStorage::vectorLengthOffset()))); + speculationCheck(OutOfBounds, JSValueRegs(), NoNode, m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, ArrayStorage::vectorLengthOffset()))); - GPRTemporary result(this); - m_jit.load64(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])), result.gpr()); - speculationCheck(OutOfBounds, JSValueRegs(), NoNode, m_jit.branchTest64(MacroAssembler::Zero, result.gpr())); + GPRTemporary result(this); + m_jit.load64(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])), result.gpr()); + speculationCheck(OutOfBounds, JSValueRegs(), NoNode, m_jit.branchTest64(MacroAssembler::Zero, result.gpr())); - jsValueResult(result.gpr(), m_compileIndex); - break; - } - case OUT_OF_BOUNDS_ARRAY_STORAGE_MODES: - case SLOW_PUT_ARRAY_STORAGE_MODES: - case ALL_EFFECTFUL_ARRAY_STORAGE_MODES: { + jsValueResult(result.gpr(), m_compileIndex); + break; + } + SpeculateCellOperand base(this, node.child1()); SpeculateStrictInt32Operand property(this, node.child2()); StorageOperand storage(this, node.child3()); @@ -2779,8 +2683,13 @@ void SpeculativeJIT::compile(Node& node) GPRTemporary result(this); GPRReg resultReg = result.gpr(); - MacroAssembler::JumpList slowCases = - compileArrayStorageGetByVal(node, baseReg, propertyReg, storageReg, resultReg); + MacroAssembler::JumpList slowCases; + + slowCases.append(m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, ArrayStorage::vectorLengthOffset()))); + + m_jit.load64(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])), resultReg); + slowCases.append(m_jit.branchTest64(MacroAssembler::Zero, resultReg)); + addSlowPathGenerator( slowPathCall( slowCases, this, operationGetByValArrayInt, @@ -2836,10 +2745,10 @@ void SpeculativeJIT::compile(Node& node) Edge child3 = m_jit.graph().varArgChild(node, 2); Edge child4 = m_jit.graph().varArgChild(node, 3); - Array::Mode arrayMode = modeForPut(node.arrayMode()); + ArrayMode arrayMode = node.arrayMode().modeForPut(); bool alreadyHandled = false; - switch (arrayMode) { + switch (arrayMode.type()) { case Array::SelectUsingPredictions: case Array::ForceExit: ASSERT_NOT_REACHED(); @@ -2879,9 +2788,8 @@ void SpeculativeJIT::compile(Node& node) GPRReg baseReg = base.gpr(); GPRReg propertyReg = property.gpr(); - switch (arrayMode) { - case ALL_CONTIGUOUS_MODES: - case ALL_EFFECTFUL_CONTIGUOUS_MODES: { + switch (arrayMode.type()) { + case Array::Contiguous: { JSValueOperand value(this, child3); GPRReg valueReg = value.gpr(); @@ -2910,16 +2818,34 @@ void SpeculativeJIT::compile(Node& node) GPRTemporary temporary; GPRReg temporaryReg = temporaryRegisterForPutByVal(temporary, node); - MacroAssembler::JumpList slowCases = - compileContiguousPutByVal( - node, baseReg, propertyReg, storageReg, valueReg, temporaryReg); + MacroAssembler::JumpList slowCases; + + if (arrayMode.isInBounds()) { + speculationCheck( + Uncountable, JSValueRegs(), NoNode, + m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength()))); + } else { + MacroAssembler::Jump inBounds = m_jit.branch32(MacroAssembler::Below, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength())); + + slowCases.append(m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfVectorLength()))); + + if (!arrayMode.isOutOfBounds()) + speculationCheck(Uncountable, JSValueRegs(), NoNode, slowCases); + + m_jit.add32(TrustedImm32(1), propertyReg, temporaryReg); + m_jit.store32(temporaryReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength())); + + inBounds.link(&m_jit); + } + + m_jit.store64(valueReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight)); base.use(); property.use(); value.use(); storage.use(); - if (!slowCases.empty()) { + if (arrayMode.isOutOfBounds()) { addSlowPathGenerator( slowPathCall( slowCases, this, @@ -2931,8 +2857,8 @@ void SpeculativeJIT::compile(Node& node) break; } - case ALL_ARRAY_STORAGE_MODES: - case ALL_EFFECTFUL_ARRAY_STORAGE_MODES: { + case Array::ArrayStorage: + case Array::SlowPutArrayStorage: { JSValueOperand value(this, child3); GPRReg valueReg = value.gpr(); @@ -2961,9 +2887,44 @@ void SpeculativeJIT::compile(Node& node) GPRTemporary temporary; GPRReg temporaryReg = temporaryRegisterForPutByVal(temporary, node); - MacroAssembler::JumpList slowCases = - compileArrayStoragePutByVal( - node, baseReg, propertyReg, storageReg, valueReg, temporaryReg); + MacroAssembler::JumpList slowCases; + + MacroAssembler::Jump beyondArrayBounds = m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, ArrayStorage::vectorLengthOffset())); + if (!arrayMode.isOutOfBounds()) + speculationCheck(OutOfBounds, JSValueRegs(), NoNode, beyondArrayBounds); + else + slowCases.append(beyondArrayBounds); + + // Check if we're writing to a hole; if so increment m_numValuesInVector. + if (arrayMode.isInBounds()) { + // This is uncountable because if we take this exit, then the baseline JIT + // will immediately count the hole store. So there is no need for exit + // profiling. + speculationCheck( + Uncountable, JSValueRegs(), NoNode, + m_jit.branchTest64(MacroAssembler::Zero, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])))); + } else { + MacroAssembler::Jump notHoleValue = m_jit.branchTest64(MacroAssembler::NonZero, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]))); + if (arrayMode.isSlowPut()) { + // This is sort of strange. If we wanted to optimize this code path, we would invert + // the above branch. But it's simply not worth it since this only happens if we're + // already having a bad time. + slowCases.append(m_jit.jump()); + } else { + m_jit.add32(TrustedImm32(1), MacroAssembler::Address(storageReg, ArrayStorage::numValuesInVectorOffset())); + + // If we're writing to a hole we might be growing the array; + MacroAssembler::Jump lengthDoesNotNeedUpdate = m_jit.branch32(MacroAssembler::Below, propertyReg, MacroAssembler::Address(storageReg, ArrayStorage::lengthOffset())); + m_jit.add32(TrustedImm32(1), propertyReg, temporaryReg); + m_jit.store32(temporaryReg, MacroAssembler::Address(storageReg, ArrayStorage::lengthOffset())); + + lengthDoesNotNeedUpdate.link(&m_jit); + } + notHoleValue.link(&m_jit); + } + + // Store the value to the array. + m_jit.store64(valueReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]))); base.use(); property.use(); @@ -3117,7 +3078,7 @@ void SpeculativeJIT::compile(Node& node) } case ArrayPush: { - ASSERT(modeIsJSArray(node.arrayMode())); + ASSERT(node.arrayMode().isJSArray()); SpeculateCellOperand base(this, node.child1()); JSValueOperand value(this, node.child2()); @@ -3135,9 +3096,8 @@ void SpeculativeJIT::compile(Node& node) StorageOperand storage(this, node.child3()); GPRReg storageGPR = storage.gpr(); - switch (node.arrayMode()) { - case Array::ArrayWithContiguous: - case Array::ArrayWithContiguousOutOfBounds: { + switch (node.arrayMode().type()) { + case Array::Contiguous: { m_jit.load32(MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength()), storageLengthGPR); MacroAssembler::Jump slowPath = m_jit.branch32(MacroAssembler::AboveOrEqual, storageLengthGPR, MacroAssembler::Address(storageGPR, Butterfly::offsetOfVectorLength())); m_jit.store64(valueGPR, MacroAssembler::BaseIndex(storageGPR, storageLengthGPR, MacroAssembler::TimesEight)); @@ -3154,8 +3114,7 @@ void SpeculativeJIT::compile(Node& node) break; } - case Array::ArrayWithArrayStorage: - case Array::ArrayWithArrayStorageOutOfBounds: { + case Array::ArrayStorage: { m_jit.load32(MacroAssembler::Address(storageGPR, ArrayStorage::lengthOffset()), storageLengthGPR); // Refuse to handle bizarre lengths. @@ -3187,7 +3146,7 @@ void SpeculativeJIT::compile(Node& node) } case ArrayPop: { - ASSERT(modeIsJSArray(node.arrayMode())); + ASSERT(node.arrayMode().isJSArray()); SpeculateCellOperand base(this, node.child1()); StorageOperand storage(this, node.child2()); @@ -3199,9 +3158,8 @@ void SpeculativeJIT::compile(Node& node) GPRReg valueGPR = value.gpr(); GPRReg storageLengthGPR = storageLength.gpr(); - switch (node.arrayMode()) { - case Array::ArrayWithContiguous: - case Array::ArrayWithContiguousOutOfBounds: { + switch (node.arrayMode().type()) { + case Array::Contiguous: { m_jit.load32( MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength()), storageLengthGPR); MacroAssembler::Jump undefinedCase = @@ -3230,8 +3188,7 @@ void SpeculativeJIT::compile(Node& node) break; } - case Array::ArrayWithArrayStorage: - case Array::ArrayWithArrayStorageOutOfBounds: { + case Array::ArrayStorage: { m_jit.load32(MacroAssembler::Address(storageGPR, ArrayStorage::lengthOffset()), storageLengthGPR); JITCompiler::Jump undefinedCase = @@ -3873,9 +3830,15 @@ void SpeculativeJIT::compile(Node& node) ASSERT(node.structureSet().size()); + ExitKind exitKind; + if (m_jit.graph()[node.child1()].op() == WeakJSConstant) + exitKind = BadWeakConstantCache; + else + exitKind = BadCache; + if (node.structureSet().size() == 1) { speculationCheckWithConditionalDirection( - BadCache, JSValueRegs(base.gpr()), NoNode, + exitKind, JSValueRegs(base.gpr()), NoNode, m_jit.branchWeakPtr( JITCompiler::NotEqual, JITCompiler::Address(base.gpr(), JSCell::structureOffset()), @@ -3892,7 +3855,7 @@ void SpeculativeJIT::compile(Node& node) done.append(m_jit.branchWeakPtr(JITCompiler::Equal, structure.gpr(), node.structureSet()[i])); speculationCheckWithConditionalDirection( - BadCache, JSValueRegs(base.gpr()), NoNode, + exitKind, JSValueRegs(base.gpr()), NoNode, m_jit.branchWeakPtr( JITCompiler::NotEqual, structure.gpr(), node.structureSet().last()), node.op() == ForwardCheckStructure); @@ -3916,7 +3879,8 @@ void SpeculativeJIT::compile(Node& node) m_jit.addWeakReference(node.structure()); node.structure()->addTransitionWatchpoint( speculationWatchpointWithConditionalDirection( - BadCache, node.op() == ForwardStructureTransitionWatchpoint)); + m_jit.graph()[node.child1()].op() == WeakJSConstant ? BadWeakConstantCache : BadCache, + node.op() == ForwardStructureTransitionWatchpoint)); #if !ASSERT_DISABLED SpeculateCellOperand op1(this, node.child1()); diff --git a/Source/JavaScriptCore/disassembler/udis86/differences.txt b/Source/JavaScriptCore/disassembler/udis86/differences.txt index 3ef51efcf..dc225b6ff 100644 --- a/Source/JavaScriptCore/disassembler/udis86/differences.txt +++ b/Source/JavaScriptCore/disassembler/udis86/differences.txt @@ -20,3 +20,5 @@ here: - Made the code in udis86_syn.h use vsnprintf() instead of vsprintf(). - Fixed udis86_syn-att.c's jump destination printing to work correctly in 64-bit mode. + +- Add --outputDir option to itab.py. diff --git a/Source/JavaScriptCore/disassembler/udis86/itab.py b/Source/JavaScriptCore/disassembler/udis86/itab.py index 27fa9b3f3..07e20a6e1 100644 --- a/Source/JavaScriptCore/disassembler/udis86/itab.py +++ b/Source/JavaScriptCore/disassembler/udis86/itab.py @@ -23,6 +23,8 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +from optparse import OptionParser +import os import sys sys.path.append( '../scripts' ); @@ -194,10 +196,11 @@ class UdItabGenerator( ud_opcode.UdOpcodeTables ): MnemonicAliases = ( "invalid", "3dnow", "none", "db", "pause" ) - def __init__( self ): + def __init__( self, outputDir ): # first itab entry (0) is Invalid self.Itab.append( self.InvalidEntry ) self.MnemonicsTable.extend( self.MnemonicAliases ) + self.outputDir = outputDir def toGroupId( self, id ): return 0x8000 | id @@ -289,7 +292,7 @@ class UdItabGenerator( ud_opcode.UdOpcodeTables ): def genItabH( self ): - self.ItabH = open( "udis86_itab.h", "w" ) + self.ItabH = open( os.path.join(self.outputDir, "udis86_itab.h"), "w" ) # Generate Table Type Enumeration self.ItabH.write( "#ifndef UD_ITAB_H\n" ) @@ -328,7 +331,7 @@ class UdItabGenerator( ud_opcode.UdOpcodeTables ): def genItabC( self ): - self.ItabC = open( "udis86_itab.c", "w" ) + self.ItabC = open( os.path.join(self.outputDir, "udis86_itab.c"), "w" ) self.ItabC.write( "/* itab.c -- generated by itab.py, do no edit" ) self.ItabC.write( " */\n" ); self.ItabC.write( "#include \"udis86_decode.h\"\n\n" ); @@ -344,9 +347,12 @@ class UdItabGenerator( ud_opcode.UdOpcodeTables ): self.genItabH() def main(): - generator = UdItabGenerator() + parser = OptionParser() + parser.add_option("--outputDir", dest="outputDir", default="") + options, args = parser.parse_args() + generator = UdItabGenerator(os.path.normpath(options.outputDir)) optableXmlParser = ud_optable.UdOptableXmlParser() - optableXmlParser.parse( sys.argv[ 1 ], generator.addInsnDef ) + optableXmlParser.parse( args[ 0 ], generator.addInsnDef ) generator.genItab() diff --git a/Source/JavaScriptCore/heap/Heap.cpp b/Source/JavaScriptCore/heap/Heap.cpp index cd3393aa2..c455fc2b1 100644 --- a/Source/JavaScriptCore/heap/Heap.cpp +++ b/Source/JavaScriptCore/heap/Heap.cpp @@ -36,6 +36,7 @@ #include "JSLock.h" #include "JSONObject.h" #include "Tracing.h" +#include "UnlinkedCodeBlock.h" #include "WeakSetInlines.h" #include <algorithm> #include <wtf/RAMSize.h> @@ -484,13 +485,13 @@ void Heap::markRoots(bool fullGC) } } #endif - + if (m_globalData->codeBlocksBeingCompiled.size()) { GCPHASE(VisitActiveCodeBlock); for (size_t i = 0; i < m_globalData->codeBlocksBeingCompiled.size(); i++) m_globalData->codeBlocksBeingCompiled[i]->visitAggregate(visitor); } - + { GCPHASE(VisitMachineRoots); MARK_LOG_ROOT(visitor, "C++ Stack"); diff --git a/Source/JavaScriptCore/heap/Heap.h b/Source/JavaScriptCore/heap/Heap.h index 88dc201a4..51cebdc0e 100644 --- a/Source/JavaScriptCore/heap/Heap.h +++ b/Source/JavaScriptCore/heap/Heap.h @@ -86,6 +86,7 @@ namespace JSC { // our scan to run faster. static const unsigned s_timeCheckResolution = 16; + static bool isLive(const void*); static bool isMarked(const void*); static bool testAndSetMarked(const void*); static void setMarked(const void*); @@ -305,6 +306,11 @@ namespace JSC { return heap(v.asCell()); } + inline bool Heap::isLive(const void* cell) + { + return MarkedBlock::blockFor(cell)->isLiveCell(cell); + } + inline bool Heap::isMarked(const void* cell) { return MarkedBlock::blockFor(cell)->isMarked(cell); diff --git a/Source/JavaScriptCore/heap/MarkedAllocator.h b/Source/JavaScriptCore/heap/MarkedAllocator.h index 13bd8e493..867481fe3 100644 --- a/Source/JavaScriptCore/heap/MarkedAllocator.h +++ b/Source/JavaScriptCore/heap/MarkedAllocator.h @@ -74,10 +74,18 @@ inline void MarkedAllocator::init(Heap* heap, MarkedSpace* markedSpace, size_t c inline void* MarkedAllocator::allocate(size_t bytes) { MarkedBlock::FreeCell* head = m_freeList.head; - if (UNLIKELY(!head)) - return allocateSlowCase(bytes); + if (UNLIKELY(!head)) { + void* result = allocateSlowCase(bytes); +#ifndef NDEBUG + memset(result, 0xCD, bytes); +#endif + return result; + } m_freeList.head = head->next; +#ifndef NDEBUG + memset(head, 0xCD, bytes); +#endif return head; } diff --git a/Source/JavaScriptCore/heap/MarkedBlock.cpp b/Source/JavaScriptCore/heap/MarkedBlock.cpp index 70a24b6ae..9a036f87c 100644 --- a/Source/JavaScriptCore/heap/MarkedBlock.cpp +++ b/Source/JavaScriptCore/heap/MarkedBlock.cpp @@ -78,7 +78,7 @@ MarkedBlock::FreeList MarkedBlock::specializedSweep() FreeCell* head = 0; size_t count = 0; for (size_t i = firstAtom(); i < m_endAtom; i += m_atomsPerCell) { - if (blockState == Marked && m_marks.get(i)) + if (blockState == Marked && (m_marks.get(i) || (m_newlyAllocated && m_newlyAllocated->get(i)))) continue; JSCell* cell = reinterpret_cast_ptr<JSCell*>(&atoms()[i]); @@ -94,6 +94,11 @@ MarkedBlock::FreeList MarkedBlock::specializedSweep() } } + // We only want to discard the newlyAllocated bits if we're creating a FreeList, + // otherwise we would lose information on what's currently alive. + if (sweepMode == SweepToFreeList && m_newlyAllocated) + m_newlyAllocated.clear(); + m_state = ((sweepMode == SweepToFreeList) ? FreeListed : Marked); return FreeList(head, count * cellSize()); } @@ -138,12 +143,21 @@ MarkedBlock::FreeList MarkedBlock::sweepHelper(SweepMode sweepMode) return FreeList(); } -class SetAllMarksFunctor : public MarkedBlock::VoidFunctor { +class SetNewlyAllocatedFunctor : public MarkedBlock::VoidFunctor { public: + SetNewlyAllocatedFunctor(MarkedBlock* block) + : m_block(block) + { + } + void operator()(JSCell* cell) { - MarkedBlock::blockFor(cell)->setMarked(cell); + ASSERT(MarkedBlock::blockFor(cell) == m_block); + m_block->setNewlyAllocated(cell); } + +private: + MarkedBlock* m_block; }; void MarkedBlock::canonicalizeCellLivenessData(const FreeList& freeList) @@ -168,14 +182,17 @@ void MarkedBlock::canonicalizeCellLivenessData(const FreeList& freeList) // allocated from our free list are not currently marked, so we need another // way to tell what's live vs dead. - SetAllMarksFunctor functor; + ASSERT(!m_newlyAllocated); + m_newlyAllocated = adoptPtr(new WTF::Bitmap<atomsPerBlock>()); + + SetNewlyAllocatedFunctor functor(this); forEachCell(functor); FreeCell* next; for (FreeCell* current = head; current; current = next) { next = current->next; reinterpret_cast<JSCell*>(current)->zap(); - clearMarked(current); + clearNewlyAllocated(current); } m_state = Marked; diff --git a/Source/JavaScriptCore/heap/MarkedBlock.h b/Source/JavaScriptCore/heap/MarkedBlock.h index 31bf60b9f..f2f2a720d 100644 --- a/Source/JavaScriptCore/heap/MarkedBlock.h +++ b/Source/JavaScriptCore/heap/MarkedBlock.h @@ -159,6 +159,10 @@ namespace JSC { void setMarked(const void*); void clearMarked(const void*); + bool isNewlyAllocated(const void*); + void setNewlyAllocated(const void*); + void clearNewlyAllocated(const void*); + bool needsSweeping(); #if ENABLE(GGC) @@ -218,6 +222,8 @@ namespace JSC { #else WTF::Bitmap<atomsPerBlock, WTF::BitmapNotAtomic> m_marks; #endif + OwnPtr<WTF::Bitmap<atomsPerBlock> > m_newlyAllocated; + DestructorType m_destructorType; MarkedAllocator* m_allocator; BlockState m_state; @@ -313,6 +319,7 @@ namespace JSC { ASSERT(m_state != New && m_state != FreeListed); m_marks.clearAll(); + m_newlyAllocated.clear(); // This will become true at the end of the mark phase. We set it now to // avoid an extra pass to do so later. @@ -326,7 +333,7 @@ namespace JSC { inline bool MarkedBlock::isEmpty() { - return m_marks.isEmpty() && m_weakSet.isEmpty(); + return m_marks.isEmpty() && m_weakSet.isEmpty() && (!m_newlyAllocated || m_newlyAllocated->isEmpty()); } inline size_t MarkedBlock::cellSize() @@ -375,6 +382,21 @@ namespace JSC { m_marks.clear(atomNumber(p)); } + inline bool MarkedBlock::isNewlyAllocated(const void* p) + { + return m_newlyAllocated->get(atomNumber(p)); + } + + inline void MarkedBlock::setNewlyAllocated(const void* p) + { + m_newlyAllocated->set(atomNumber(p)); + } + + inline void MarkedBlock::clearNewlyAllocated(const void* p) + { + m_newlyAllocated->clear(atomNumber(p)); + } + inline bool MarkedBlock::isLive(const JSCell* cell) { switch (m_state) { @@ -382,7 +404,7 @@ namespace JSC { return true; case Marked: - return m_marks.get(atomNumber(cell)); + return m_marks.get(atomNumber(cell)) || (m_newlyAllocated && isNewlyAllocated(cell)); case New: case FreeListed: diff --git a/Source/JavaScriptCore/heap/SlotVisitor.cpp b/Source/JavaScriptCore/heap/SlotVisitor.cpp index 7a30debda..3919705d0 100644 --- a/Source/JavaScriptCore/heap/SlotVisitor.cpp +++ b/Source/JavaScriptCore/heap/SlotVisitor.cpp @@ -294,6 +294,8 @@ ALWAYS_INLINE void SlotVisitor::internalAppend(JSValue* slot) if (!cell) return; + validate(cell); + if (m_shouldHashConst && cell->isString()) { JSString* string = jsCast<JSString*>(cell); if (string->shouldTryHashConst() && string->tryHashConstLock()) { @@ -355,6 +357,10 @@ void SlotVisitor::validate(JSCell* cell) cell->structure()->structure(), parentClassName, cell, cell->structure(), ourClassName); CRASH(); } + + // Make sure we can walk the ClassInfo chain + const ClassInfo* info = cell->classInfo(); + do { } while ((info = info->parentClass)); } #else void SlotVisitor::validate(JSCell*) diff --git a/Source/JavaScriptCore/heap/WeakBlock.cpp b/Source/JavaScriptCore/heap/WeakBlock.cpp index 13039e702..99e306b85 100644 --- a/Source/JavaScriptCore/heap/WeakBlock.cpp +++ b/Source/JavaScriptCore/heap/WeakBlock.cpp @@ -102,7 +102,7 @@ void WeakBlock::visit(HeapRootVisitor& heapRootVisitor) continue; const JSValue& jsValue = weakImpl->jsValue(); - if (Heap::isMarked(jsValue.asCell())) + if (Heap::isLive(jsValue.asCell())) continue; WeakHandleOwner* weakHandleOwner = weakImpl->weakHandleOwner(); @@ -127,7 +127,7 @@ void WeakBlock::reap() if (weakImpl->state() > WeakImpl::Dead) continue; - if (Heap::isMarked(weakImpl->jsValue().asCell())) { + if (Heap::isLive(weakImpl->jsValue().asCell())) { ASSERT(weakImpl->state() == WeakImpl::Live); continue; } diff --git a/Source/JavaScriptCore/interpreter/Interpreter.cpp b/Source/JavaScriptCore/interpreter/Interpreter.cpp index 0d475b416..397ac8474 100644 --- a/Source/JavaScriptCore/interpreter/Interpreter.cpp +++ b/Source/JavaScriptCore/interpreter/Interpreter.cpp @@ -100,9 +100,9 @@ Interpreter::ErrorHandlingMode::~ErrorHandlingMode() // The Interpreter::StackPolicy class is used to compute a stack capacity // requirement to ensure that we have enough room on the native stack for: -// 1. the max cummulative stack used by the interpreter and all code +// 1. the max cumulative stack used by the interpreter and all code // paths sub of it up till leaf functions. -// 2. the max cummulative stack used by the interpreter before it reaches +// 2. the max cumulative stack used by the interpreter before it reaches // the next checkpoint (execute...() function) in the interpreter. // // The interpreter can be run on different threads and hence, different @@ -116,11 +116,11 @@ Interpreter::ErrorHandlingMode::~ErrorHandlingMode() Interpreter::StackPolicy::StackPolicy(Interpreter& interpreter, const StackBounds& stack) : m_interpreter(interpreter) { - int size = stack.size(); + const size_t size = stack.size(); - const int DEFAULT_REQUIRED_STACK = 1024 * 1024; - const int DEFAULT_MINIMUM_USEABLE_STACK = 128 * 1024; - const int DEFAULT_ERROR_MODE_REQUIRED_STACK = 32 * 1024; + const size_t DEFAULT_REQUIRED_STACK = 1024 * 1024; + const size_t DEFAULT_MINIMUM_USEABLE_STACK = 128 * 1024; + const size_t DEFAULT_ERROR_MODE_REQUIRED_STACK = 32 * 1024; // Here's the policy in a nutshell: // @@ -152,7 +152,7 @@ Interpreter::StackPolicy::StackPolicy(Interpreter& interpreter, const StackBound // ^ ^ // start current sp // - // This smaller requried capacity also means that we won't re-trigger + // This smaller required capacity also means that we won't re-trigger // a stack overflow for processing the exception caused by the original // StackOverflowError. // @@ -169,15 +169,16 @@ Interpreter::StackPolicy::StackPolicy(Interpreter& interpreter, const StackBound // The minimum useable capacity is DEFAULT_MINIMUM_USEABLE_STACK. // In this case, the requiredCapacity is whatever is left of the // total stack capacity after we have give JS its minimum stack - // i.e. requiredCapcity can even be 0 if there's not enough stack. + // i.e. requiredCapacity can even be 0 if there's not enough stack. // Policy 1: Normal mode: required = DEFAULT_REQUIRED_STACK. - // Policy 2: Erro mode: required = DEFAULT_ERROR_MODE_REQUIRED_STACK. - int requiredCapacity = !m_interpreter.m_errorHandlingModeReentry ? + // Policy 2: Error mode: required = DEFAULT_ERROR_MODE_REQUIRED_STACK. + size_t requiredCapacity = !m_interpreter.m_errorHandlingModeReentry ? DEFAULT_REQUIRED_STACK : DEFAULT_ERROR_MODE_REQUIRED_STACK; - int useableStack = size - requiredCapacity; + size_t useableStack = (requiredCapacity <= size) ? + size - requiredCapacity : DEFAULT_MINIMUM_USEABLE_STACK; // Policy 3: Ensure the useable stack is not too small: if (useableStack < DEFAULT_MINIMUM_USEABLE_STACK) @@ -190,9 +191,8 @@ Interpreter::StackPolicy::StackPolicy(Interpreter& interpreter, const StackBound // Re-compute the requiredCapacity based on the adjusted useable stack // size: - // interpreter stack checks: requiredCapacity = size - useableStack; - ASSERT((requiredCapacity >= 0) && (requiredCapacity < size)); + ASSERT(requiredCapacity < size); m_requiredCapacity = requiredCapacity; } @@ -948,10 +948,13 @@ failedJSONP: // object. // Compile source to bytecode if necessary: - JSObject* error = program->compile(callFrame, scope); - if (error) + if (JSObject* error = program->initalizeGlobalProperties(globalData, callFrame, scope)) return checkedReturn(throwError(callFrame, error)); - CodeBlock* codeBlock = &program->generatedBytecode(); + + if (JSObject* error = program->compile(callFrame, scope)) + return checkedReturn(throwError(callFrame, error)); + + ProgramCodeBlock* codeBlock = &program->generatedBytecode(); // Push the call frame for this invocation: ASSERT(codeBlock->numParameters() == 1); // 1 parameter for 'this'. diff --git a/Source/JavaScriptCore/interpreter/VMInspector.cpp b/Source/JavaScriptCore/interpreter/VMInspector.cpp index 566d4e8e0..58bc15075 100644 --- a/Source/JavaScriptCore/interpreter/VMInspector.cpp +++ b/Source/JavaScriptCore/interpreter/VMInspector.cpp @@ -28,6 +28,10 @@ #if ENABLE(VMINSPECTOR) +#include <stdio.h> +#include <wtf/ASCIICType.h> +#include <wtf/text/WTFString.h> + namespace JSC { const char* VMInspector::getTypeName(JSValue value) @@ -105,6 +109,465 @@ int VMInspector::countFrames(CallFrame* frame) return count; } + +//============================================================================ +// class FormatPrinter +// - implements functionality to support fprintf. +// +// The FormatPrinter classes do the real formatting and printing. +// By default, the superclass FormatPrinter will print to stdout (printf). +// Each of the subclass will implement the other ...printf() options. +// The subclasses are: +// +// FileFormatPrinter - fprintf +// StringFormatPrinter - sprintf +// StringNFormatPrinter - snprintf + +class FormatPrinter { +public: + virtual ~FormatPrinter() { } + + void print(const char* format, va_list args); + +protected: + // Low level printers: + bool printArg(const char* format, ...); + virtual bool printArg(const char* format, va_list args); + + // JS type specific printers: + void printWTFString(va_list args, bool verbose); +}; + + +// The public print() function is the real workhorse behind the printf +// family of functions. print() deciphers the % formatting, translate them +// to primitive formats, and dispatches to underlying printArg() functions +// to do the printing. +// +// The non-public internal printArg() function is virtual and is responsible +// for handling the variations between printf, fprintf, sprintf, and snprintf. + +void FormatPrinter::print(const char* format, va_list args) +{ + const char* p = format; + const char* errorStr; + + // buffer is only used for 2 purposes: + // 1. To temporarily hold a copy of normal chars (not needing formatting) + // to be passed to printArg() and printed. + // + // The incoming format string may contain a string of normal chars much + // longer than 128, but we handle this by breaking them out to 128 chars + // fragments and printing each fragment before re-using the buffer to + // load up the next fragment. + // + // 2. To hold a single "%..." format to be passed to printArg() to process + // a single va_arg. + + char buffer[129]; // 128 chars + null terminator. + char* end = &buffer[sizeof(buffer) - 1]; + const char* startOfFormatSpecifier = 0; + + while (true) { + char c = *p++; + char* curr = buffer; + + // Print leading normal chars: + while (c != '\0' && c != '%') { + *curr++ = c; + if (curr == end) { + // Out of buffer space. Flush the fragment, and start over. + *curr = '\0'; + bool success = printArg("%s", buffer); + if (!success) { + errorStr = buffer; + goto handleError; + } + curr = buffer; + } + c = *p++; + } + // If we have stuff in the buffer, flush the fragment: + if (curr != buffer) { + ASSERT(curr < end + 1); + *curr = '\0'; + bool success = printArg("%s", buffer); + if (!success) { + errorStr = buffer; + goto handleError; + } + } + + // End if there are not more chars to print: + if (c == '\0') + break; + + // If we get here, we've must have seen a '%': + startOfFormatSpecifier = p - 1; + ASSERT(*startOfFormatSpecifier == '%'); + c = *p++; + + // Check for "%%" case: + if (c == '%') { + bool success = printArg("%c", '%'); + if (!success) { + errorStr = p - 2; + goto handleError; + } + continue; + } + + // Check for JS (%J<x>) formatting extensions: + if (c == 'J') { + bool verbose = false; + + c = *p++; + if (UNLIKELY(c == '\0')) { + errorStr = p - 2; // Rewind to % in "%J\0" + goto handleError; + } + + if (c == '+') { + verbose = true; + c= *p++; + if (UNLIKELY(c == '\0')) { + errorStr = p - 3; // Rewind to % in "%J+\0" + goto handleError; + } + } + + switch (c) { + // %Js - WTF::String* + case 's': { + printWTFString(args, verbose); + continue; + } + } // END switch. + + // Check for non-JS extensions: + } else if (c == 'b') { + int value = va_arg(args, int); + printArg("%s", value ? "TRUE" : "FALSE"); + continue; + } + + // If we didn't handle the format in one of the above cases, + // rewind p and let the standard formatting check handle it + // if possible: + p = startOfFormatSpecifier; + ASSERT(*p == '%'); + + // Check for standard formatting: + // A format specifier always starts with a % and ends with some + // alphabet. We'll do the simple thing and scan until the next + // alphabet, or the end of string. + + // In the following, we're going to use buffer as storage for a copy + // of a single format specifier. Hence, conceptually, we can think of + // 'buffer' as synonymous with 'argFormat' here: + +#define ABORT_IF_FORMAT_TOO_LONG(curr) \ + do { \ + if (UNLIKELY(curr >= end)) \ + goto formatTooLong; \ + } while (false) + + curr = buffer; + *curr++ = *p++; // Output the first % in the format specifier. + c = *p++; // Grab the next char in the format specifier. + + // Checks for leading modifiers e.g. "%-d": + // 0, -, ' ', +, '\'' + if (c == '0' || c == '-' || c == ' ' || c == '+' || c == '\'' || c == '#') { + ABORT_IF_FORMAT_TOO_LONG(curr); + *curr++ = c; + c = *p++; + } + + // Checks for decimal digit field width modifiers e.g. "%2f": + while (c >= '0' && c <= '9') { + ABORT_IF_FORMAT_TOO_LONG(curr); + *curr++ = c; + c = *p++; + } + + // Checks for '.' e.g. "%2.f": + if (c == '.') { + ABORT_IF_FORMAT_TOO_LONG(curr); + *curr++ = c; + c = *p++; + + // Checks for decimal digit precision modifiers e.g. "%.2f": + while (c >= '0' && c <= '9') { + ABORT_IF_FORMAT_TOO_LONG(curr); + *curr++ = c; + c = *p++; + } + } + + // Checks for the modifier <m> where <m> can be: + // l, h, j, t, z + // e.g. "%ld" + if (c == 'l' || c == 'h' || c == 'j' || c == 't' || c == 'z' || c == 'L') { + ABORT_IF_FORMAT_TOO_LONG(curr); + *curr++ = c; + char prevChar = c; + c = *p++; + + // Checks for the modifier ll or hh in %<x><m>: + if ((prevChar == 'l' || prevChar == 'h') && c == prevChar) { + ABORT_IF_FORMAT_TOO_LONG(curr); + *curr++ = c; + c = *p++; + } + } + + // Checks for %<x> where <x> can be: + // d, i, n, o, u, x, X + // But hey, we're just going to do the simple thing and allow any + // alphabet. The user is expected to pass correct format specifiers. + // We won't do any format checking here. We'll just pass it on, and the + // underlying ...printf() implementation may do the needed checking + // at its discretion. + while (c != '\0' && !isASCIIAlpha(c)) { + ABORT_IF_FORMAT_TOO_LONG(curr); + *curr++ = c; + c = *p++; + } + + ABORT_IF_FORMAT_TOO_LONG(curr); + *curr++ = c; + if (c == '\0') { + // Uh oh. Bad format. We should have gotten an alphabet instead. + // Print the supposed format as a string instead: + errorStr = buffer; + goto handleError; + } + + // Otherwise, we have the alpha that terminates the format. + // Terminate the buffer (i.e. argFormat) string: + ASSERT(isASCIIAlpha(c)); + ABORT_IF_FORMAT_TOO_LONG(curr); + *curr = '\0'; + + bool success = printArg(buffer, args); + if (!success) { + errorStr = buffer; + goto handleError; + } + } +#undef ABORT_IF_FORMAT_TOO_LONG + + return; + +formatTooLong: + // Print the error string: + ASSERT(!!startOfFormatSpecifier); + p = startOfFormatSpecifier; + ASSERT(p >= format); + printArg("ERROR @ Format too long at \"%s\"\n", p); + return; + +handleError: + // We've got an error. Can't do any more work. Print an error message if + // possible and then just return. + + // The errorStr may be pointing into the middle of buffer, or the original + // format string. Move the string to buffer for consistency, and also so + // that we can strip it of newlines below. + if (errorStr != buffer) { + size_t length = strlen(errorStr); + if (length > sizeof(buffer) - 1) + length = sizeof(buffer) - 1; + memmove(buffer, errorStr, length); + buffer[length] = '\0'; // Terminate the moved error string. + } + // Strip the newlines: + char* cp = buffer; + while (*cp) { + if (*cp == '\n' || *cp == '\r') + *cp = ' '; + cp++; + } + // Print the error string: + printArg("ERROR @ \"%s\"\n", buffer); +} + + +bool FormatPrinter::printArg(const char* format, ...) +{ + va_list args; + va_start(args, format); + bool success = printArg(format, args); + va_end(args); + return success; +} + +bool FormatPrinter::printArg(const char* format, va_list args) +{ + int count = ::vprintf(format, args); + return (count >= 0); // Fail if less than 0 chars printed. +} + + +// %Js - WTF::String* +// verbose mode prints: WTF::String "<your string>" +void FormatPrinter::printWTFString(va_list args, bool verbose) +{ + const String* str = va_arg(args, const String*); + + // Print verbose header if appropriate: + if (verbose) + printArg("WTF::String \""); + + // Print the string itself: + if (!str->isEmpty()) { + if (str->is8Bit()) { + const LChar* chars = str->characters8(); + printArg("%s", reinterpret_cast<const char*>(chars)); + } else { + const UChar* chars = str->characters16(); + printArg("%S", reinterpret_cast<const wchar_t*>(chars)); + } + } + + // Print verbose footer if appropriate: + if (verbose) + printArg("\""); +} + + +//============================================================================ +// class FileFormatPrinter +// - implements functionality to support fprintf. + +class FileFormatPrinter: public FormatPrinter { +public: + FileFormatPrinter(FILE*); +private: + virtual bool printArg(const char* format, va_list args); + + FILE* m_file; +}; + +FileFormatPrinter::FileFormatPrinter(FILE* file) + : m_file(file) +{ +} + +bool FileFormatPrinter::printArg(const char* format, va_list args) +{ + int count = ::vfprintf(m_file, format, args); + return (count >= 0); // Fail if less than 0 chars printed. +} + + +//============================================================================ +// class StringFormatPrinter +// - implements functionality to support sprintf. + +class StringFormatPrinter: public FormatPrinter { +public: + StringFormatPrinter(char* buffer); +private: + virtual bool printArg(const char* format, va_list args); + + char* m_buffer; +}; + +StringFormatPrinter::StringFormatPrinter(char* buffer) + : m_buffer(buffer) +{ +} + +bool StringFormatPrinter::printArg(const char* format, va_list args) +{ + int count = ::vsprintf(m_buffer, format, args); + m_buffer += count; + return (count >= 0); // Fail if less than 0 chars printed. +} + + +//============================================================================ +// class StringNFormatPrinter +// - implements functionality to support snprintf. + +class StringNFormatPrinter: public FormatPrinter { +public: + StringNFormatPrinter(char* buffer, size_t); +private: + virtual bool printArg(const char* format, va_list args); + + char* m_buffer; + size_t m_size; +}; + + +StringNFormatPrinter::StringNFormatPrinter(char* buffer, size_t size) + : m_buffer(buffer) + , m_size(size) +{ +} + +bool StringNFormatPrinter::printArg(const char* format, va_list args) +{ + if (m_size > 0) { + int count = ::vsnprintf(m_buffer, m_size, format, args); + + // According to vsnprintf specs, ... + bool success = (count >= 0); + if (static_cast<size_t>(count) >= m_size) { + // If count > size, then we didn't have enough buffer space. + count = m_size; + } + + // Adjust the buffer to what's left if appropriate: + if (success) { + m_buffer += count; + m_size -= count; + } + return success; + } + // No more room to print. Declare it a fail: + return false; +} + + +//============================================================================ +// VMInspector printf family of methods: + +void VMInspector::fprintf(FILE* file, const char* format, ...) +{ + va_list args; + va_start(args, format); + FileFormatPrinter(file).print(format, args); + va_end(args); +} + +void VMInspector::printf(const char* format, ...) +{ + va_list args; + va_start(args, format); + FormatPrinter().print(format, args); + va_end(args); +} + +void VMInspector::sprintf(char* buffer, const char* format, ...) +{ + va_list args; + va_start(args, format); + StringFormatPrinter(buffer).print(format, args); + va_end(args); +} + +void VMInspector::snprintf(char* buffer, size_t size, const char* format, ...) +{ + va_list args; + va_start(args, format); + StringNFormatPrinter(buffer, size).print(format, args); + va_end(args); +} + } // namespace JSC #endif // ENABLE(VMINSPECTOR) diff --git a/Source/JavaScriptCore/interpreter/VMInspector.h b/Source/JavaScriptCore/interpreter/VMInspector.h index 6806cafa1..81ca26c6f 100644 --- a/Source/JavaScriptCore/interpreter/VMInspector.h +++ b/Source/JavaScriptCore/interpreter/VMInspector.h @@ -28,23 +28,62 @@ #define ENABLE_VMINSPECTOR 0 -#if ENABLE(VMINSPECTOR) - #include "CallFrame.h" #include "JSValue.h" +#include <stdarg.h> +#include <stdio.h> +#include <wtf/text/WTFString.h> namespace JSC { +#if ENABLE(VMINSPECTOR) + class VMInspector { public: static JS_EXPORT_PRIVATE const char* getTypeName(JSValue); static JS_EXPORT_PRIVATE void dumpFrame0(CallFrame*); static JS_EXPORT_PRIVATE void dumpFrame(CallFrame*, const char* prefix = 0, const char* funcName = 0, const char* file = 0, int line = -1); static JS_EXPORT_PRIVATE int countFrames(CallFrame*); -}; -} // namespace JSC + // Special family of ...printf() functions that support, in addition to the + // standard % formats (e.g. %d, %s, etc), the following extra JSC formatting + // options, %J<x>, where <x> consists of: + // + // + - verbose mode modifier. + // Used in combination with other options. Must come after the %J. + // s - WTF::String* + // + // Examples of usage: + // + // WTF::String str("My WTF String"); + // + // // Printing the string. Will print: + // // The wtf string says: "My WTF String" and is NOT EMPTY. + // + // VMInspector::printf("The wtf string says: \"%Js\" and is %s\n", + // &str, str.isEmpty()?"EMPTY":"NOT EMPTY"); + // + // // Printing the string with verbose mode. Will print: + // // <WTF::String "My WTF String"> + // + // VMInspector::printf("<%J+s>\n", &str); + // + // Also added some convenience non-JS formats: + // + // %b - boolean (va_args will look for an int). + // Prints TRUE if non-zero, else prints FALSE. + // + // Caution: the user is expected to pass the correctly matched arguments + // to pair with the corresponding % fomatting. + + static JS_EXPORT_PRIVATE void fprintf(FILE*, const char* format, ...); + static JS_EXPORT_PRIVATE void printf(const char* format, ...); + static JS_EXPORT_PRIVATE void sprintf(char*, const char* format, ...); + static JS_EXPORT_PRIVATE void snprintf(char*, size_t, const char* format, ...); +}; #endif // ENABLE(VMINSPECTOR) +} // namespace JSC + #endif // VMInspector.h diff --git a/Source/JavaScriptCore/jit/JIT.cpp b/Source/JavaScriptCore/jit/JIT.cpp index 2d2991b5f..3102c7693 100644 --- a/Source/JavaScriptCore/jit/JIT.cpp +++ b/Source/JavaScriptCore/jit/JIT.cpp @@ -301,7 +301,6 @@ void JIT::privateCompileMainPass() DEFINE_OP(op_loop_if_true) DEFINE_OP(op_loop_if_false) DEFINE_OP(op_lshift) - DEFINE_OP(op_method_check) DEFINE_OP(op_mod) DEFINE_OP(op_mov) DEFINE_OP(op_mul) @@ -336,6 +335,8 @@ void JIT::privateCompileMainPass() DEFINE_OP(op_put_by_index) DEFINE_OP(op_put_by_val) DEFINE_OP(op_put_getter_setter) + case op_init_global_const_nop: + NEXT_OPCODE(op_init_global_const_nop); DEFINE_OP(op_init_global_const) DEFINE_OP(op_init_global_const_check) @@ -372,7 +373,7 @@ void JIT::privateCompileMainPass() DEFINE_OP(op_tear_off_activation) DEFINE_OP(op_tear_off_arguments) DEFINE_OP(op_throw) - DEFINE_OP(op_throw_reference_error) + DEFINE_OP(op_throw_static_error) DEFINE_OP(op_to_jsnumber) DEFINE_OP(op_to_primitive) @@ -489,7 +490,6 @@ void JIT::privateCompileSlowCases() DEFINE_SLOWCASE_OP(op_loop_if_true) DEFINE_SLOWCASE_OP(op_loop_if_false) DEFINE_SLOWCASE_OP(op_lshift) - DEFINE_SLOWCASE_OP(op_method_check) DEFINE_SLOWCASE_OP(op_mod) DEFINE_SLOWCASE_OP(op_mul) DEFINE_SLOWCASE_OP(op_negate) @@ -569,13 +569,6 @@ ALWAYS_INLINE void PropertyStubCompilationInfo::copyToStubInfo(StructureStubInfo info.hotPathBegin = linkBuffer.locationOf(hotPathBegin); switch (m_type) { - case MethodCheck: { - CodeLocationDataLabelPtr structureToCompareLocation = linkBuffer.locationOf(methodCheckStructureToCompare); - info.patch.baseline.methodCheckProtoObj = MacroAssembler::differenceBetweenCodePtr(structureToCompareLocation, linkBuffer.locationOf(methodCheckProtoObj)); - info.patch.baseline.methodCheckProtoStructureToCompare = MacroAssembler::differenceBetweenCodePtr(structureToCompareLocation, linkBuffer.locationOf(methodCheckProtoStructureToCompare)); - info.patch.baseline.methodCheckPutFunction = MacroAssembler::differenceBetweenCodePtr(structureToCompareLocation, linkBuffer.locationOf(methodCheckPutFunction)); - // No break - fall through to GetById. - } case GetById: { CodeLocationLabel hotPathBeginLocation = linkBuffer.locationOf(hotPathBegin); info.patch.baseline.u.get.structureToCompare = MacroAssembler::differenceBetweenCodePtr(hotPathBeginLocation, linkBuffer.locationOf(getStructureToCompare)); @@ -792,14 +785,6 @@ JITCode JIT::privateCompile(CodePtr* functionEntryArityCheck, JITCompilationEffo info.hotPathBegin = patchBuffer.locationOf(m_callStructureStubCompilationInfo[i].hotPathBegin); info.hotPathOther = patchBuffer.locationOfNearCall(m_callStructureStubCompilationInfo[i].hotPathOther); } - unsigned methodCallCount = m_methodCallCompilationInfo.size(); - m_codeBlock->addMethodCallLinkInfos(methodCallCount); - for (unsigned i = 0; i < methodCallCount; ++i) { - MethodCallLinkInfo& info = m_codeBlock->methodCallLinkInfo(i); - info.bytecodeIndex = m_methodCallCompilationInfo[i].bytecodeIndex; - info.cachedStructure.setLocation(patchBuffer.locationOf(m_methodCallCompilationInfo[i].structureToCompare)); - info.callReturnLocation = m_codeBlock->structureStubInfo(m_methodCallCompilationInfo[i].propertyAccessIndex).callReturnLocation; - } #if ENABLE(DFG_JIT) || ENABLE(LLINT) if (canBeOptimized() diff --git a/Source/JavaScriptCore/jit/JIT.h b/Source/JavaScriptCore/jit/JIT.h index cd832c21f..dcf87d352 100644 --- a/Source/JavaScriptCore/jit/JIT.h +++ b/Source/JavaScriptCore/jit/JIT.h @@ -150,7 +150,7 @@ namespace JSC { enum PropertyStubPutById_T { PropertyStubPutById }; struct PropertyStubCompilationInfo { - enum Type { GetById, PutById, MethodCheck } m_type; + enum Type { GetById, PutById } m_type; unsigned bytecodeIndex; MacroAssembler::Call callReturnLocation; @@ -173,10 +173,6 @@ namespace JSC { MacroAssembler::DataLabel32 putDisplacementLabel1; MacroAssembler::DataLabel32 putDisplacementLabel2; #endif - MacroAssembler::DataLabelPtr methodCheckStructureToCompare; - MacroAssembler::DataLabelPtr methodCheckProtoObj; - MacroAssembler::DataLabelPtr methodCheckProtoStructureToCompare; - MacroAssembler::DataLabelPtr methodCheckPutFunction; #if !ASSERT_DISABLED PropertyStubCompilationInfo() @@ -241,7 +237,7 @@ namespace JSC { void slowCaseInfo(PropertyStubGetById_T, MacroAssembler::Label coldPathBegin, MacroAssembler::Call call) { - ASSERT(m_type == GetById || m_type == MethodCheck); + ASSERT(m_type == GetById); callReturnLocation = call; getColdPathBegin = coldPathBegin; } @@ -252,15 +248,6 @@ namespace JSC { callReturnLocation = call; } - void addMethodCheckInfo(MacroAssembler::DataLabelPtr structureToCompare, MacroAssembler::DataLabelPtr protoObj, MacroAssembler::DataLabelPtr protoStructureToCompare, MacroAssembler::DataLabelPtr putFunction) - { - m_type = MethodCheck; - methodCheckStructureToCompare = structureToCompare; - methodCheckProtoObj = protoObj; - methodCheckProtoStructureToCompare = protoStructureToCompare; - methodCheckPutFunction = putFunction; - } - void copyToStubInfo(StructureStubInfo& info, LinkBuffer &patchBuffer); }; @@ -291,18 +278,6 @@ namespace JSC { unsigned bytecodeIndex; }; - struct MethodCallCompilationInfo { - MethodCallCompilationInfo(unsigned bytecodeIndex, unsigned propertyAccessIndex) - : bytecodeIndex(bytecodeIndex) - , propertyAccessIndex(propertyAccessIndex) - { - } - - unsigned bytecodeIndex; - MacroAssembler::DataLabelPtr structureToCompare; - unsigned propertyAccessIndex; - }; - // Near calls can only be patched to other JIT code, regular calls can be patched to JIT code or relinked to stub functions. void ctiPatchNearCallByReturnAddress(CodeBlock* codeblock, ReturnAddressPtr returnAddress, MacroAssemblerCodePtr newCalleeFunction); void ctiPatchCallByReturnAddress(CodeBlock* codeblock, ReturnAddressPtr returnAddress, MacroAssemblerCodePtr newCalleeFunction); @@ -407,7 +382,6 @@ namespace JSC { static void resetPatchPutById(RepatchBuffer&, StructureStubInfo*); static void patchGetByIdSelf(CodeBlock*, StructureStubInfo*, Structure*, PropertyOffset cachedOffset, ReturnAddressPtr); static void patchPutByIdReplace(CodeBlock*, StructureStubInfo*, Structure*, PropertyOffset cachedOffset, ReturnAddressPtr, bool direct); - static void patchMethodCallProto(JSGlobalData&, CodeBlock*, MethodCallLinkInfo&, StructureStubInfo&, JSObject*, Structure*, JSObject*, ReturnAddressPtr); static void compilePatchGetArrayLength(JSGlobalData* globalData, CodeBlock* codeBlock, ReturnAddressPtr returnAddress) { @@ -428,6 +402,7 @@ namespace JSC { void privateCompileLinkPass(); void privateCompileSlowCases(); JITCode privateCompile(CodePtr* functionEntryArityCheck, JITCompilationEffort); + void privateCompileGetByIdProto(StructureStubInfo*, Structure*, Structure* prototypeStructure, const Identifier&, const PropertySlot&, PropertyOffset cachedOffset, ReturnAddressPtr, CallFrame*); void privateCompileGetByIdSelfList(StructureStubInfo*, PolymorphicAccessStructureList*, int, Structure*, const Identifier&, const PropertySlot&, PropertyOffset cachedOffset); void privateCompileGetByIdProtoList(StructureStubInfo*, PolymorphicAccessStructureList*, int, Structure*, Structure* prototypeStructure, const Identifier&, const PropertySlot&, PropertyOffset cachedOffset, CallFrame*); @@ -466,7 +441,9 @@ namespace JSC { Jump emitJumpIfNotObject(RegisterID structureReg); Jump emitJumpIfNotType(RegisterID baseReg, RegisterID scratchReg, JSType); - void testPrototype(JSValue, JumpList& failureCases); + Jump addStructureTransitionCheck(JSCell*, Structure*, StructureStubInfo*, RegisterID scratch); + void addStructureTransitionCheck(JSCell*, Structure*, StructureStubInfo*, JumpList& failureCases, RegisterID scratch); + void testPrototype(JSValue, JumpList& failureCases, StructureStubInfo*); enum WriteBarrierMode { UnconditionalWriteBarrier, ShouldFilterImmediates }; // value register in write barrier is used before any scratch registers @@ -543,8 +520,8 @@ namespace JSC { void emitJumpSlowCaseIfNotJSCell(int virtualRegisterIndex); void emitJumpSlowCaseIfNotJSCell(int virtualRegisterIndex, RegisterID tag); - void compileGetByIdHotPath(); - void compileGetByIdSlowCase(int resultVReg, int baseVReg, Identifier* ident, Vector<SlowCaseEntry>::iterator& iter, bool isMethodCheck = false); + void compileGetByIdHotPath(Identifier*); + void compileGetByIdSlowCase(int resultVReg, int baseVReg, Identifier*, Vector<SlowCaseEntry>::iterator&); void compileGetDirectOffset(RegisterID base, RegisterID resultTag, RegisterID resultPayload, PropertyOffset cachedOffset); void compileGetDirectOffset(JSObject* base, RegisterID resultTag, RegisterID resultPayload, PropertyOffset cachedOffset); void compileGetDirectOffset(RegisterID base, RegisterID resultTag, RegisterID resultPayload, RegisterID offset, FinalObjectMode = MayBeFinal); @@ -559,9 +536,6 @@ namespace JSC { // sequenceOpCall static const int sequenceOpCallInstructionSpace = 12; static const int sequenceOpCallConstantSpace = 2; - // sequenceMethodCheck - static const int sequenceMethodCheckInstructionSpace = 40; - static const int sequenceMethodCheckConstantSpace = 6; // sequenceGetByIdHotPath static const int sequenceGetByIdHotPathInstructionSpace = 36; static const int sequenceGetByIdHotPathConstantSpace = 4; @@ -575,9 +549,6 @@ namespace JSC { // sequenceOpCall static const int sequenceOpCallInstructionSpace = 12; static const int sequenceOpCallConstantSpace = 2; - // sequenceMethodCheck - static const int sequenceMethodCheckInstructionSpace = 40; - static const int sequenceMethodCheckConstantSpace = 6; // sequenceGetByIdHotPath static const int sequenceGetByIdHotPathInstructionSpace = 36; static const int sequenceGetByIdHotPathConstantSpace = 5; @@ -626,7 +597,7 @@ namespace JSC { void compileBinaryArithOpSlowCase(OpcodeID, Vector<SlowCaseEntry>::iterator&, unsigned dst, unsigned src1, unsigned src2, OperandTypes, bool op1HasImmediateIntFastCase, bool op2HasImmediateIntFastCase); void compileGetByIdHotPath(int baseVReg, Identifier*); - void compileGetByIdSlowCase(int resultVReg, int baseVReg, Identifier* ident, Vector<SlowCaseEntry>::iterator& iter, bool isMethodCheck = false); + void compileGetByIdSlowCase(int resultVReg, int baseVReg, Identifier*, Vector<SlowCaseEntry>::iterator&); void compileGetDirectOffset(RegisterID base, RegisterID result, PropertyOffset cachedOffset); void compileGetDirectOffset(JSObject* base, RegisterID result, PropertyOffset cachedOffset); void compileGetDirectOffset(RegisterID base, RegisterID result, RegisterID offset, RegisterID scratch, FinalObjectMode = MayBeFinal); @@ -708,7 +679,6 @@ namespace JSC { void emit_op_loop_if_true(Instruction*); void emit_op_loop_if_false(Instruction*); void emit_op_lshift(Instruction*); - void emit_op_method_check(Instruction*); void emit_op_mod(Instruction*); void emit_op_mov(Instruction*); void emit_op_mul(Instruction*); @@ -761,7 +731,7 @@ namespace JSC { void emit_op_tear_off_activation(Instruction*); void emit_op_tear_off_arguments(Instruction*); void emit_op_throw(Instruction*); - void emit_op_throw_reference_error(Instruction*); + void emit_op_throw_static_error(Instruction*); void emit_op_to_jsnumber(Instruction*); void emit_op_to_primitive(Instruction*); void emit_op_unexpected_load(Instruction*); @@ -803,7 +773,6 @@ namespace JSC { void emitSlow_op_loop_if_true(Instruction*, Vector<SlowCaseEntry>::iterator&); void emitSlow_op_loop_if_false(Instruction*, Vector<SlowCaseEntry>::iterator&); void emitSlow_op_lshift(Instruction*, Vector<SlowCaseEntry>::iterator&); - void emitSlow_op_method_check(Instruction*, Vector<SlowCaseEntry>::iterator&); void emitSlow_op_mod(Instruction*, Vector<SlowCaseEntry>::iterator&); void emitSlow_op_mul(Instruction*, Vector<SlowCaseEntry>::iterator&); void emitSlow_op_negate(Instruction*, Vector<SlowCaseEntry>::iterator&); @@ -933,7 +902,6 @@ namespace JSC { Vector<PropertyStubCompilationInfo> m_propertyAccessCompilationInfo; Vector<ByValCompilationInfo> m_byValCompilationInfo; Vector<StructureStubCompilationInfo> m_callStructureStubCompilationInfo; - Vector<MethodCallCompilationInfo> m_methodCallCompilationInfo; Vector<JumpTable> m_jmpTable; unsigned m_bytecodeOffset; diff --git a/Source/JavaScriptCore/jit/JITDriver.h b/Source/JavaScriptCore/jit/JITDriver.h index 318b4e7aa..645c65b28 100644 --- a/Source/JavaScriptCore/jit/JITDriver.h +++ b/Source/JavaScriptCore/jit/JITDriver.h @@ -75,7 +75,7 @@ inline bool jitCompileIfAppropriate(ExecState* exec, OwnPtr<CodeBlockType>& code return true; } -inline bool jitCompileFunctionIfAppropriate(ExecState* exec, OwnPtr<FunctionCodeBlock>& codeBlock, JITCode& jitCode, MacroAssemblerCodePtr& jitCodeWithArityCheck, WriteBarrier<SharedSymbolTable>& symbolTable, JITCode::JITType jitType, unsigned bytecodeIndex, JITCompilationEffort effort) +inline bool jitCompileFunctionIfAppropriate(ExecState* exec, OwnPtr<FunctionCodeBlock>& codeBlock, JITCode& jitCode, MacroAssemblerCodePtr& jitCodeWithArityCheck, JITCode::JITType jitType, unsigned bytecodeIndex, JITCompilationEffort effort) { JSGlobalData& globalData = exec->globalData(); @@ -99,7 +99,6 @@ inline bool jitCompileFunctionIfAppropriate(ExecState* exec, OwnPtr<FunctionCode } else { if (codeBlock->alternative()) { codeBlock = static_pointer_cast<FunctionCodeBlock>(codeBlock->releaseAlternative()); - symbolTable.set(exec->globalData(), codeBlock->ownerExecutable(), codeBlock->symbolTable()); jitCode = oldJITCode; jitCodeWithArityCheck = oldJITCodeWithArityCheck; return false; diff --git a/Source/JavaScriptCore/jit/JITOpcodes.cpp b/Source/JavaScriptCore/jit/JITOpcodes.cpp index 07c8ace2a..249dcbac9 100644 --- a/Source/JavaScriptCore/jit/JITOpcodes.cpp +++ b/Source/JavaScriptCore/jit/JITOpcodes.cpp @@ -1069,13 +1069,14 @@ void JIT::emit_op_switch_string(Instruction* currentInstruction) jump(regT0); } -void JIT::emit_op_throw_reference_error(Instruction* currentInstruction) +void JIT::emit_op_throw_static_error(Instruction* currentInstruction) { - JITStubCall stubCall(this, cti_op_throw_reference_error); + JITStubCall stubCall(this, cti_op_throw_static_error); if (!m_codeBlock->getConstant(currentInstruction[1].u.operand).isNumber()) stubCall.addArgument(TrustedImm64(JSValue::encode(m_codeBlock->getConstant(currentInstruction[1].u.operand)))); else stubCall.addArgument(Imm64(JSValue::encode(m_codeBlock->getConstant(currentInstruction[1].u.operand)))); + stubCall.addArgument(TrustedImm32(currentInstruction[2].u.operand)); stubCall.call(); } diff --git a/Source/JavaScriptCore/jit/JITOpcodes32_64.cpp b/Source/JavaScriptCore/jit/JITOpcodes32_64.cpp index 44123be19..9c5d260ab 100644 --- a/Source/JavaScriptCore/jit/JITOpcodes32_64.cpp +++ b/Source/JavaScriptCore/jit/JITOpcodes32_64.cpp @@ -1401,12 +1401,13 @@ void JIT::emit_op_switch_string(Instruction* currentInstruction) jump(regT0); } -void JIT::emit_op_throw_reference_error(Instruction* currentInstruction) +void JIT::emit_op_throw_static_error(Instruction* currentInstruction) { unsigned message = currentInstruction[1].u.operand; - JITStubCall stubCall(this, cti_op_throw_reference_error); + JITStubCall stubCall(this, cti_op_throw_static_error); stubCall.addArgument(m_codeBlock->getConstant(message)); + stubCall.addArgument(TrustedImm32(currentInstruction[2].u.operand)); stubCall.call(); } diff --git a/Source/JavaScriptCore/jit/JITPropertyAccess.cpp b/Source/JavaScriptCore/jit/JITPropertyAccess.cpp index b7be821f6..6362598f4 100644 --- a/Source/JavaScriptCore/jit/JITPropertyAccess.cpp +++ b/Source/JavaScriptCore/jit/JITPropertyAccess.cpp @@ -440,71 +440,6 @@ void JIT::emit_op_del_by_id(Instruction* currentInstruction) stubCall.call(currentInstruction[1].u.operand); } -void JIT::emit_op_method_check(Instruction* currentInstruction) -{ - // Assert that the following instruction is a get_by_id. - ASSERT(m_interpreter->getOpcodeID((currentInstruction + OPCODE_LENGTH(op_method_check))->u.opcode) == op_get_by_id - || m_interpreter->getOpcodeID((currentInstruction + OPCODE_LENGTH(op_method_check))->u.opcode) == op_get_by_id_out_of_line); - - currentInstruction += OPCODE_LENGTH(op_method_check); - unsigned resultVReg = currentInstruction[1].u.operand; - unsigned baseVReg = currentInstruction[2].u.operand; - Identifier* ident = &(m_codeBlock->identifier(currentInstruction[3].u.operand)); - - emitGetVirtualRegister(baseVReg, regT0); - - // Do the method check - check the object & its prototype's structure inline (this is the common case). - m_methodCallCompilationInfo.append(MethodCallCompilationInfo(m_bytecodeOffset, m_propertyAccessCompilationInfo.size())); - MethodCallCompilationInfo& info = m_methodCallCompilationInfo.last(); - - Jump notCell = emitJumpIfNotJSCell(regT0); - - BEGIN_UNINTERRUPTED_SEQUENCE(sequenceMethodCheck); - - Jump structureCheck = branchPtrWithPatch(NotEqual, Address(regT0, JSCell::structureOffset()), info.structureToCompare, TrustedImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure))); - DataLabelPtr protoStructureToCompare, protoObj = moveWithPatch(TrustedImmPtr(0), regT1); - Jump protoStructureCheck = branchPtrWithPatch(NotEqual, Address(regT1, JSCell::structureOffset()), protoStructureToCompare, TrustedImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure))); - - // This will be relinked to load the function without doing a load. - DataLabelPtr putFunction = moveWithPatch(TrustedImmPtr(0), regT0); - - END_UNINTERRUPTED_SEQUENCE(sequenceMethodCheck); - - Jump match = jump(); - - // Link the failure cases here. - notCell.link(this); - structureCheck.link(this); - protoStructureCheck.link(this); - - // Do a regular(ish) get_by_id (the slow case will be link to - // cti_op_get_by_id_method_check instead of cti_op_get_by_id. - compileGetByIdHotPath(baseVReg, ident); - - match.link(this); - emitValueProfilingSite(m_bytecodeOffset + OPCODE_LENGTH(op_method_check)); - emitPutVirtualRegister(resultVReg); - - // We've already generated the following get_by_id, so make sure it's skipped over. - m_bytecodeOffset += OPCODE_LENGTH(op_get_by_id); - - m_propertyAccessCompilationInfo.last().addMethodCheckInfo(info.structureToCompare, protoObj, protoStructureToCompare, putFunction); -} - -void JIT::emitSlow_op_method_check(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) -{ - currentInstruction += OPCODE_LENGTH(op_method_check); - unsigned resultVReg = currentInstruction[1].u.operand; - unsigned baseVReg = currentInstruction[2].u.operand; - Identifier* ident = &(m_codeBlock->identifier(currentInstruction[3].u.operand)); - - compileGetByIdSlowCase(resultVReg, baseVReg, ident, iter, true); - emitValueProfilingSite(m_bytecodeOffset + OPCODE_LENGTH(op_method_check)); - - // We've already generated the following get_by_id, so make sure it's skipped over. - m_bytecodeOffset += OPCODE_LENGTH(op_get_by_id); -} - void JIT::emit_op_get_by_id(Instruction* currentInstruction) { unsigned resultVReg = currentInstruction[1].u.operand; @@ -517,7 +452,7 @@ void JIT::emit_op_get_by_id(Instruction* currentInstruction) emitPutVirtualRegister(resultVReg); } -void JIT::compileGetByIdHotPath(int baseVReg, Identifier*) +void JIT::compileGetByIdHotPath(int baseVReg, Identifier* ident) { // As for put_by_id, get_by_id requires the offset of the Structure and the offset of the access to be patched. // Additionally, for get_by_id we need patch the offset of the branch to the slow case (we patch this to jump @@ -525,6 +460,11 @@ void JIT::compileGetByIdHotPath(int baseVReg, Identifier*) // to jump back to if one of these trampolies finds a match. emitJumpSlowCaseIfNotJSCell(regT0, baseVReg); + + if (*ident == m_globalData->propertyNames->length && canBeOptimized()) { + loadPtr(Address(regT0, JSCell::structureOffset()), regT1); + emitArrayProfilingSiteForBytecodeIndex(regT1, regT2, m_bytecodeOffset); + } BEGIN_UNINTERRUPTED_SEQUENCE(sequenceGetByIdHotPath); @@ -550,11 +490,11 @@ void JIT::emitSlow_op_get_by_id(Instruction* currentInstruction, Vector<SlowCase unsigned baseVReg = currentInstruction[2].u.operand; Identifier* ident = &(m_codeBlock->identifier(currentInstruction[3].u.operand)); - compileGetByIdSlowCase(resultVReg, baseVReg, ident, iter, false); + compileGetByIdSlowCase(resultVReg, baseVReg, ident, iter); emitValueProfilingSite(); } -void JIT::compileGetByIdSlowCase(int resultVReg, int baseVReg, Identifier* ident, Vector<SlowCaseEntry>::iterator& iter, bool isMethodCheck) +void JIT::compileGetByIdSlowCase(int resultVReg, int baseVReg, Identifier* ident, Vector<SlowCaseEntry>::iterator& iter) { // As for the hot path of get_by_id, above, we ensure that we can use an architecture specific offset // so that we only need track one pointer into the slow case code - we track a pointer to the location @@ -568,7 +508,7 @@ void JIT::compileGetByIdSlowCase(int resultVReg, int baseVReg, Identifier* ident BEGIN_UNINTERRUPTED_SEQUENCE(sequenceGetByIdSlowCase); Label coldPathBegin(this); - JITStubCall stubCall(this, isMethodCheck ? cti_op_get_by_id_method_check : cti_op_get_by_id); + JITStubCall stubCall(this, cti_op_get_by_id); stubCall.addArgument(regT0); stubCall.addArgument(TrustedImmPtr(ident)); Call call = stubCall.call(resultVReg); @@ -676,7 +616,7 @@ void JIT::privateCompilePutByIdTransition(StructureStubInfo* stubInfo, Structure failureCases.append(emitJumpIfNotJSCell(regT0)); failureCases.append(branchPtr(NotEqual, Address(regT0, JSCell::structureOffset()), TrustedImmPtr(oldStructure))); - testPrototype(oldStructure->storedPrototype(), failureCases); + testPrototype(oldStructure->storedPrototype(), failureCases, stubInfo); ASSERT(oldStructure->storedPrototype().isNull() || oldStructure->storedPrototype().asCell()->structure() == chain->head()->get()); @@ -684,7 +624,7 @@ void JIT::privateCompilePutByIdTransition(StructureStubInfo* stubInfo, Structure if (!direct) { for (WriteBarrier<Structure>* it = chain->head(); *it; ++it) { ASSERT((*it)->storedPrototype().isNull() || (*it)->storedPrototype().asCell()->structure() == it[1].get()); - testPrototype((*it)->storedPrototype(), failureCases); + testPrototype((*it)->storedPrototype(), failureCases, stubInfo); } } @@ -788,7 +728,6 @@ void JIT::privateCompilePatchGetArrayLength(ReturnAddressPtr returnAddress) // Check eax is an array loadPtr(Address(regT0, JSCell::structureOffset()), regT2); - emitArrayProfilingSiteForBytecodeIndex(regT2, regT1, stubInfo->bytecodeIndex); Jump failureCases1 = branchTest32(Zero, regT2, TrustedImm32(IsArray)); Jump failureCases2 = branchTest32(Zero, regT2, TrustedImm32(IndexingShapeMask)); @@ -837,8 +776,7 @@ void JIT::privateCompileGetByIdProto(StructureStubInfo* stubInfo, Structure* str Jump failureCases1 = checkStructure(regT0, structure); // Check the prototype object's Structure had not changed. - move(TrustedImmPtr(protoObject), regT3); - Jump failureCases2 = branchPtr(NotEqual, Address(regT3, JSCell::structureOffset()), TrustedImmPtr(prototypeStructure)); + Jump failureCases2 = addStructureTransitionCheck(protoObject, prototypeStructure, stubInfo, regT3); bool needsStubLink = false; @@ -867,7 +805,8 @@ void JIT::privateCompileGetByIdProto(StructureStubInfo* stubInfo, Structure* str // Use the patch information to link the failure cases back to the original slow case routine. CodeLocationLabel slowCaseBegin = stubInfo->callReturnLocation.labelAtOffset(-stubInfo->patch.baseline.u.get.coldPathBegin); patchBuffer.link(failureCases1, slowCaseBegin); - patchBuffer.link(failureCases2, slowCaseBegin); + if (failureCases2.isSet()) + patchBuffer.link(failureCases2, slowCaseBegin); // On success return back to the hot patch code, at a point it will perform the store to dest for us. patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(stubInfo->patch.baseline.u.get.putResult)); @@ -972,8 +911,7 @@ void JIT::privateCompileGetByIdProtoList(StructureStubInfo* stubInfo, Polymorphi Jump failureCases1 = checkStructure(regT0, structure); // Check the prototype object's Structure had not changed. - move(TrustedImmPtr(protoObject), regT3); - Jump failureCases2 = branchPtr(NotEqual, Address(regT3, JSCell::structureOffset()), TrustedImmPtr(prototypeStructure)); + Jump failureCases2 = addStructureTransitionCheck(protoObject, prototypeStructure, stubInfo, regT3); // Checks out okay! bool needsStubLink = false; @@ -1013,7 +951,8 @@ void JIT::privateCompileGetByIdProtoList(StructureStubInfo* stubInfo, Polymorphi // Use the patch information to link the failure cases back to the original slow case routine. CodeLocationLabel lastProtoBegin = CodeLocationLabel(JITStubRoutine::asCodePtr(prototypeStructures->list[currentIndex - 1].stubRoutine)); patchBuffer.link(failureCases1, lastProtoBegin); - patchBuffer.link(failureCases2, lastProtoBegin); + if (failureCases2.isSet()) + patchBuffer.link(failureCases2, lastProtoBegin); // On success return back to the hot patch code, at a point it will perform the store to dest for us. patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(stubInfo->patch.baseline.u.get.putResult)); @@ -1050,7 +989,7 @@ void JIT::privateCompileGetByIdChainList(StructureStubInfo* stubInfo, Polymorphi for (unsigned i = 0; i < count; ++i, ++it) { protoObject = asObject(currStructure->prototypeForLookup(callFrame)); currStructure = it->get(); - testPrototype(protoObject, bucketsOfFail); + testPrototype(protoObject, bucketsOfFail, stubInfo); } ASSERT(protoObject); @@ -1129,7 +1068,7 @@ void JIT::privateCompileGetByIdChain(StructureStubInfo* stubInfo, Structure* str for (unsigned i = 0; i < count; ++i, ++it) { protoObject = asObject(currStructure->prototypeForLookup(callFrame)); currStructure = it->get(); - testPrototype(protoObject, bucketsOfFail); + testPrototype(protoObject, bucketsOfFail, stubInfo); } ASSERT(protoObject); @@ -1304,28 +1243,40 @@ void JIT::emitWriteBarrier(JSCell* owner, RegisterID value, RegisterID scratch, #endif } -void JIT::testPrototype(JSValue prototype, JumpList& failureCases) +JIT::Jump JIT::addStructureTransitionCheck(JSCell* object, Structure* structure, StructureStubInfo* stubInfo, RegisterID scratch) { - if (prototype.isNull()) - return; - - ASSERT(prototype.isCell()); - move(TrustedImmPtr(prototype.asCell()), regT3); - failureCases.append(branchPtr(NotEqual, Address(regT3, JSCell::structureOffset()), TrustedImmPtr(prototype.asCell()->structure()))); + if (object->structure() == structure && structure->transitionWatchpointSetIsStillValid()) { + structure->addTransitionWatchpoint(stubInfo->addWatchpoint(m_codeBlock)); +#if !ASSERT_DISABLED + move(TrustedImmPtr(object), scratch); + Jump ok = branchPtr(Equal, Address(scratch, JSCell::structureOffset()), TrustedImmPtr(structure)); + breakpoint(); + ok.link(this); +#endif + Jump result; // Returning an unset jump this way because otherwise VC++ would complain. + return result; + } + + move(TrustedImmPtr(object), scratch); + return branchPtr(NotEqual, Address(scratch, JSCell::structureOffset()), TrustedImmPtr(structure)); } -void JIT::patchMethodCallProto(JSGlobalData& globalData, CodeBlock* codeBlock, MethodCallLinkInfo& methodCallLinkInfo, StructureStubInfo& stubInfo, JSObject* callee, Structure* structure, JSObject* proto, ReturnAddressPtr returnAddress) +void JIT::addStructureTransitionCheck(JSCell* object, Structure* structure, StructureStubInfo* stubInfo, JumpList& failureCases, RegisterID scratch) { - RepatchBuffer repatchBuffer(codeBlock); - - CodeLocationDataLabelPtr structureLocation = methodCallLinkInfo.cachedStructure.location(); - methodCallLinkInfo.cachedStructure.set(globalData, structureLocation, codeBlock->ownerExecutable(), structure); + Jump failureCase = addStructureTransitionCheck(object, structure, stubInfo, scratch); + if (!failureCase.isSet()) + return; - Structure* prototypeStructure = proto->structure(); - methodCallLinkInfo.cachedPrototypeStructure.set(globalData, structureLocation.dataLabelPtrAtOffset(stubInfo.patch.baseline.methodCheckProtoStructureToCompare), codeBlock->ownerExecutable(), prototypeStructure); - methodCallLinkInfo.cachedPrototype.set(globalData, structureLocation.dataLabelPtrAtOffset(stubInfo.patch.baseline.methodCheckProtoObj), codeBlock->ownerExecutable(), proto); - methodCallLinkInfo.cachedFunction.set(globalData, structureLocation.dataLabelPtrAtOffset(stubInfo.patch.baseline.methodCheckPutFunction), codeBlock->ownerExecutable(), callee); - repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_method_check_update)); + failureCases.append(failureCase); +} + +void JIT::testPrototype(JSValue prototype, JumpList& failureCases, StructureStubInfo* stubInfo) +{ + if (prototype.isNull()) + return; + + ASSERT(prototype.isCell()); + addStructureTransitionCheck(prototype.asCell(), prototype.asCell()->structure(), stubInfo, failureCases, regT3); } bool JIT::isDirectPutById(StructureStubInfo* stubInfo) @@ -1377,9 +1328,11 @@ void JIT::privateCompileGetByVal(ByValInfo* byValInfo, ReturnAddressPtr returnAd slowCases = emitIntTypedArrayGetByVal(currentInstruction, badType, m_globalData->int32ArrayDescriptor(), 4, SignedTypedArray); break; case JITUint8Array: - case JITUint8ClampedArray: slowCases = emitIntTypedArrayGetByVal(currentInstruction, badType, m_globalData->uint8ArrayDescriptor(), 1, UnsignedTypedArray); break; + case JITUint8ClampedArray: + slowCases = emitIntTypedArrayGetByVal(currentInstruction, badType, m_globalData->uint8ClampedArrayDescriptor(), 1, UnsignedTypedArray); + break; case JITUint16Array: slowCases = emitIntTypedArrayGetByVal(currentInstruction, badType, m_globalData->uint16ArrayDescriptor(), 2, UnsignedTypedArray); break; @@ -1400,8 +1353,8 @@ void JIT::privateCompileGetByVal(ByValInfo* byValInfo, ReturnAddressPtr returnAd LinkBuffer patchBuffer(*m_globalData, this, m_codeBlock); - patchBuffer.link(badType, CodeLocationLabel(returnAddress.value()).labelAtOffset(byValInfo->returnAddressToSlowPath)); - patchBuffer.link(slowCases, CodeLocationLabel(returnAddress.value()).labelAtOffset(byValInfo->returnAddressToSlowPath)); + patchBuffer.link(badType, CodeLocationLabel(MacroAssemblerCodePtr::createFromExecutableAddress(returnAddress.value())).labelAtOffset(byValInfo->returnAddressToSlowPath)); + patchBuffer.link(slowCases, CodeLocationLabel(MacroAssemblerCodePtr::createFromExecutableAddress(returnAddress.value())).labelAtOffset(byValInfo->returnAddressToSlowPath)); patchBuffer.link(done, byValInfo->badTypeJump.labelAtOffset(byValInfo->badTypeJumpToDone)); @@ -1580,7 +1533,7 @@ JIT::JumpList JIT::emitFloatTypedArrayGetByVal(Instruction*, PatchableJump& badT case 8: { loadDouble(BaseIndex(base, property, TimesEight), fpRegT0); Jump notNaN = branchDouble(DoubleEqual, fpRegT0, fpRegT0); - static const double NaN = std::numeric_limits<double>::quiet_NaN(); + static const double NaN = QNaN; loadDouble(&NaN, fpRegT0); notNaN.link(this); break; diff --git a/Source/JavaScriptCore/jit/JITPropertyAccess32_64.cpp b/Source/JavaScriptCore/jit/JITPropertyAccess32_64.cpp index 5d619b94b..939766f04 100644 --- a/Source/JavaScriptCore/jit/JITPropertyAccess32_64.cpp +++ b/Source/JavaScriptCore/jit/JITPropertyAccess32_64.cpp @@ -91,72 +91,6 @@ void JIT::emit_op_del_by_id(Instruction* currentInstruction) stubCall.call(dst); } -void JIT::emit_op_method_check(Instruction* currentInstruction) -{ - // Assert that the following instruction is a get_by_id. - ASSERT(m_interpreter->getOpcodeID((currentInstruction + OPCODE_LENGTH(op_method_check))->u.opcode) == op_get_by_id - || m_interpreter->getOpcodeID((currentInstruction + OPCODE_LENGTH(op_method_check))->u.opcode) == op_get_by_id_out_of_line); - - currentInstruction += OPCODE_LENGTH(op_method_check); - - // Do the method check - check the object & its prototype's structure inline (this is the common case). - m_methodCallCompilationInfo.append(MethodCallCompilationInfo(m_bytecodeOffset, m_propertyAccessCompilationInfo.size())); - MethodCallCompilationInfo& info = m_methodCallCompilationInfo.last(); - - int dst = currentInstruction[1].u.operand; - int base = currentInstruction[2].u.operand; - - emitLoad(base, regT1, regT0); - emitJumpSlowCaseIfNotJSCell(base, regT1); - - BEGIN_UNINTERRUPTED_SEQUENCE(sequenceMethodCheck); - - Jump structureCheck = branchPtrWithPatch(NotEqual, Address(regT0, JSCell::structureOffset()), info.structureToCompare, TrustedImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure))); - DataLabelPtr protoStructureToCompare, protoObj = moveWithPatch(TrustedImmPtr(0), regT2); - Jump protoStructureCheck = branchPtrWithPatch(NotEqual, Address(regT2, JSCell::structureOffset()), protoStructureToCompare, TrustedImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure))); - - // This will be relinked to load the function without doing a load. - DataLabelPtr putFunction = moveWithPatch(TrustedImmPtr(0), regT0); - - END_UNINTERRUPTED_SEQUENCE(sequenceMethodCheck); - - move(TrustedImm32(JSValue::CellTag), regT1); - Jump match = jump(); - - // Link the failure cases here. - structureCheck.link(this); - protoStructureCheck.link(this); - - // Do a regular(ish) get_by_id (the slow case will be link to - // cti_op_get_by_id_method_check instead of cti_op_get_by_id. - compileGetByIdHotPath(); - - match.link(this); - emitValueProfilingSite(m_bytecodeOffset + OPCODE_LENGTH(op_method_check)); - emitStore(dst, regT1, regT0); - map(m_bytecodeOffset + OPCODE_LENGTH(op_method_check) + OPCODE_LENGTH(op_get_by_id), dst, regT1, regT0); - - // We've already generated the following get_by_id, so make sure it's skipped over. - m_bytecodeOffset += OPCODE_LENGTH(op_get_by_id); - - m_propertyAccessCompilationInfo.last().addMethodCheckInfo(info.structureToCompare, protoObj, protoStructureToCompare, putFunction); -} - -void JIT::emitSlow_op_method_check(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) -{ - currentInstruction += OPCODE_LENGTH(op_method_check); - - int dst = currentInstruction[1].u.operand; - int base = currentInstruction[2].u.operand; - int ident = currentInstruction[3].u.operand; - - compileGetByIdSlowCase(dst, base, &(m_codeBlock->identifier(ident)), iter, true); - emitValueProfilingSite(m_bytecodeOffset + OPCODE_LENGTH(op_method_check)); - - // We've already generated the following get_by_id, so make sure it's skipped over. - m_bytecodeOffset += OPCODE_LENGTH(op_get_by_id); -} - JIT::CodeRef JIT::stringGetByValStubGenerator(JSGlobalData* globalData) { JSInterfaceJIT jit; @@ -453,22 +387,28 @@ void JIT::emit_op_get_by_id(Instruction* currentInstruction) { int dst = currentInstruction[1].u.operand; int base = currentInstruction[2].u.operand; + Identifier* ident = &(m_codeBlock->identifier(currentInstruction[3].u.operand)); emitLoad(base, regT1, regT0); emitJumpSlowCaseIfNotJSCell(base, regT1); - compileGetByIdHotPath(); + compileGetByIdHotPath(ident); emitValueProfilingSite(); emitStore(dst, regT1, regT0); map(m_bytecodeOffset + OPCODE_LENGTH(op_get_by_id), dst, regT1, regT0); } -void JIT::compileGetByIdHotPath() +void JIT::compileGetByIdHotPath(Identifier* ident) { // As for put_by_id, get_by_id requires the offset of the Structure and the offset of the access to be patched. // Additionally, for get_by_id we need patch the offset of the branch to the slow case (we patch this to jump // to array-length / prototype access tranpolines, and finally we also the the property-map access offset as a label // to jump back to if one of these trampolies finds a match. + if (*ident == m_globalData->propertyNames->length && canBeOptimized()) { + loadPtr(Address(regT0, JSCell::structureOffset()), regT2); + emitArrayProfilingSiteForBytecodeIndex(regT2, regT3, m_bytecodeOffset); + } + BEGIN_UNINTERRUPTED_SEQUENCE(sequenceGetByIdHotPath); Label hotPathBegin(this); @@ -498,7 +438,7 @@ void JIT::emitSlow_op_get_by_id(Instruction* currentInstruction, Vector<SlowCase emitValueProfilingSite(); } -void JIT::compileGetByIdSlowCase(int dst, int base, Identifier* ident, Vector<SlowCaseEntry>::iterator& iter, bool isMethodCheck) +void JIT::compileGetByIdSlowCase(int dst, int base, Identifier* ident, Vector<SlowCaseEntry>::iterator& iter) { // As for the hot path of get_by_id, above, we ensure that we can use an architecture specific offset // so that we only need track one pointer into the slow case code - we track a pointer to the location @@ -511,7 +451,7 @@ void JIT::compileGetByIdSlowCase(int dst, int base, Identifier* ident, Vector<Sl BEGIN_UNINTERRUPTED_SEQUENCE(sequenceGetByIdSlowCase); Label coldPathBegin(this); - JITStubCall stubCall(this, isMethodCheck ? cti_op_get_by_id_method_check : cti_op_get_by_id); + JITStubCall stubCall(this, cti_op_get_by_id); stubCall.addArgument(regT1, regT0); stubCall.addArgument(TrustedImmPtr(ident)); Call call = stubCall.call(dst); @@ -624,12 +564,12 @@ void JIT::privateCompilePutByIdTransition(StructureStubInfo* stubInfo, Structure JumpList failureCases; failureCases.append(branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag))); failureCases.append(branchPtr(NotEqual, Address(regT0, JSCell::structureOffset()), TrustedImmPtr(oldStructure))); - testPrototype(oldStructure->storedPrototype(), failureCases); + testPrototype(oldStructure->storedPrototype(), failureCases, stubInfo); if (!direct) { // Verify that nothing in the prototype chain has a setter for this property. for (WriteBarrier<Structure>* it = chain->head(); *it; ++it) - testPrototype((*it)->storedPrototype(), failureCases); + testPrototype((*it)->storedPrototype(), failureCases, stubInfo); } // If we succeed in all of our checks, and the code was optimizable, then make sure we @@ -751,7 +691,6 @@ void JIT::privateCompilePatchGetArrayLength(ReturnAddressPtr returnAddress) // Check for array loadPtr(Address(regT0, JSCell::structureOffset()), regT2); - emitArrayProfilingSiteForBytecodeIndex(regT2, regT3, stubInfo->bytecodeIndex); Jump failureCases1 = branchTest32(Zero, regT2, TrustedImm32(IsArray)); Jump failureCases2 = branchTest32(Zero, regT2, TrustedImm32(IndexingShapeMask)); @@ -802,8 +741,7 @@ void JIT::privateCompileGetByIdProto(StructureStubInfo* stubInfo, Structure* str Jump failureCases1 = checkStructure(regT0, structure); // Check the prototype object's Structure had not changed. - move(TrustedImmPtr(protoObject), regT3); - Jump failureCases2 = branchPtr(NotEqual, Address(regT3, JSCell::structureOffset()), TrustedImmPtr(prototypeStructure)); + Jump failureCases2 = addStructureTransitionCheck(protoObject, prototypeStructure, stubInfo, regT3); bool needsStubLink = false; // Checks out okay! @@ -833,7 +771,8 @@ void JIT::privateCompileGetByIdProto(StructureStubInfo* stubInfo, Structure* str // Use the patch information to link the failure cases back to the original slow case routine. CodeLocationLabel slowCaseBegin = stubInfo->callReturnLocation.labelAtOffset(-stubInfo->patch.baseline.u.get.coldPathBegin); patchBuffer.link(failureCases1, slowCaseBegin); - patchBuffer.link(failureCases2, slowCaseBegin); + if (failureCases2.isSet()) + patchBuffer.link(failureCases2, slowCaseBegin); // On success return back to the hot patch code, at a point it will perform the store to dest for us. patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(stubInfo->patch.baseline.u.get.putResult)); @@ -942,8 +881,7 @@ void JIT::privateCompileGetByIdProtoList(StructureStubInfo* stubInfo, Polymorphi Jump failureCases1 = checkStructure(regT0, structure); // Check the prototype object's Structure had not changed. - move(TrustedImmPtr(protoObject), regT3); - Jump failureCases2 = branchPtr(NotEqual, Address(regT3, JSCell::structureOffset()), TrustedImmPtr(prototypeStructure)); + Jump failureCases2 = addStructureTransitionCheck(protoObject, prototypeStructure, stubInfo, regT3); bool needsStubLink = false; bool isDirect = false; @@ -980,7 +918,8 @@ void JIT::privateCompileGetByIdProtoList(StructureStubInfo* stubInfo, Polymorphi // Use the patch information to link the failure cases back to the original slow case routine. CodeLocationLabel lastProtoBegin = CodeLocationLabel(JITStubRoutine::asCodePtr(prototypeStructures->list[currentIndex - 1].stubRoutine)); patchBuffer.link(failureCases1, lastProtoBegin); - patchBuffer.link(failureCases2, lastProtoBegin); + if (failureCases2.isSet()) + patchBuffer.link(failureCases2, lastProtoBegin); // On success return back to the hot patch code, at a point it will perform the store to dest for us. patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(stubInfo->patch.baseline.u.get.putResult)); @@ -1019,7 +958,7 @@ void JIT::privateCompileGetByIdChainList(StructureStubInfo* stubInfo, Polymorphi for (unsigned i = 0; i < count; ++i, ++it) { protoObject = asObject(currStructure->prototypeForLookup(callFrame)); currStructure = it->get(); - testPrototype(protoObject, bucketsOfFail); + testPrototype(protoObject, bucketsOfFail, stubInfo); } ASSERT(protoObject); @@ -1098,7 +1037,7 @@ void JIT::privateCompileGetByIdChain(StructureStubInfo* stubInfo, Structure* str for (unsigned i = 0; i < count; ++i, ++it) { protoObject = asObject(currStructure->prototypeForLookup(callFrame)); currStructure = it->get(); - testPrototype(protoObject, bucketsOfFail); + testPrototype(protoObject, bucketsOfFail, stubInfo); } ASSERT(protoObject); diff --git a/Source/JavaScriptCore/jit/JITStubs.cpp b/Source/JavaScriptCore/jit/JITStubs.cpp index ba8c76cfb..5ddb98dee 100644 --- a/Source/JavaScriptCore/jit/JITStubs.cpp +++ b/Source/JavaScriptCore/jit/JITStubs.cpp @@ -864,7 +864,10 @@ NEVER_INLINE void JITThunks::tryCachePutByID(CallFrame* callFrame, CodeBlock* co } // put_by_id_transition checks the prototype chain for setters. - normalizePrototypeChain(callFrame, baseCell); + if (normalizePrototypeChain(callFrame, baseCell) == InvalidPrototypeChain) { + ctiPatchCallByReturnAddress(codeBlock, returnAddress, FunctionPtr(direct ? cti_op_put_by_id_direct_generic : cti_op_put_by_id_generic)); + return; + } StructureChain* prototypeChain = structure->prototypeChain(callFrame); ASSERT(structure->previousID()->transitionWatchpointSetHasBeenInvalidated()); @@ -937,7 +940,7 @@ NEVER_INLINE void JITThunks::tryCacheGetByID(CallFrame* callFrame, CodeBlock* co if (slot.slotBase() == structure->prototypeForLookup(callFrame)) { ASSERT(slot.slotBase().isObject()); - + JSObject* slotBaseObject = asObject(slot.slotBase()); size_t offset = slot.cachedOffset(); @@ -958,7 +961,7 @@ NEVER_INLINE void JITThunks::tryCacheGetByID(CallFrame* callFrame, CodeBlock* co PropertyOffset offset = slot.cachedOffset(); size_t count = normalizePrototypeChain(callFrame, baseValue, slot.slotBase(), propertyName, offset); - if (!count) { + if (count == InvalidPrototypeChain) { stubInfo->accessType = access_get_by_id_generic; return; } @@ -1512,178 +1515,6 @@ DEFINE_STUB_FUNCTION(JSObject*, op_put_by_id_transition_realloc) return base; } -DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_id_method_check) -{ - STUB_INIT_STACK_FRAME(stackFrame); - - CallFrame* callFrame = stackFrame.callFrame; - Identifier& ident = stackFrame.args[1].identifier(); - - CodeBlock* codeBlock = stackFrame.callFrame->codeBlock(); - MethodCallLinkInfo& methodCallLinkInfo = codeBlock->getMethodCallLinkInfo(STUB_RETURN_ADDRESS); - StructureStubInfo& stubInfo = codeBlock->getStubInfo(STUB_RETURN_ADDRESS); - AccessType accessType = static_cast<AccessType>(stubInfo.accessType); - - JSValue baseValue = stackFrame.args[0].jsValue(); - PropertySlot slot(baseValue); - JSValue result = baseValue.get(callFrame, ident, slot); - CHECK_FOR_EXCEPTION(); - - if (accessType != static_cast<AccessType>(stubInfo.accessType)) - return JSValue::encode(result); - - if (!methodCallLinkInfo.seenOnce()) { - methodCallLinkInfo.setSeen(); - return JSValue::encode(result); - } - - // If we successfully got something, then the base from which it is being accessed must - // be an object. (Assertion to ensure asObject() call below is safe, which comes after - // an isCacheable() chceck. - ASSERT(!slot.isCacheableValue() || slot.slotBase().isObject()); - - // Check that: - // * We're dealing with a JSCell, - // * the property is cachable, - // * it's not a dictionary - // * there is a function cached. - Structure* structure; - JSCell* specific; - JSObject* slotBaseObject; - if (baseValue.isCell() - && slot.isCacheableValue() - && !(structure = baseValue.asCell()->structure())->isUncacheableDictionary() - && (slotBaseObject = asObject(slot.slotBase()))->getPropertySpecificValue(callFrame, ident, specific) - && specific - ) { - - JSObject* callee = asObject(specific); - - // Since we're accessing a prototype in a loop, it's a good bet that it - // should not be treated as a dictionary. - if (slotBaseObject->structure()->isDictionary()) - slotBaseObject->flattenDictionaryObject(callFrame->globalData()); - - // The result fetched should always be the callee! - ASSERT(result == JSValue(callee)); - - // Check to see if the function is on the object's prototype. Patch up the code to optimize. - if (slot.slotBase() == structure->prototypeForLookup(callFrame)) { - JIT::patchMethodCallProto(callFrame->globalData(), codeBlock, methodCallLinkInfo, stubInfo, callee, structure, slotBaseObject, STUB_RETURN_ADDRESS); - return JSValue::encode(result); - } - - // Check to see if the function is on the object itself. - // Since we generate the method-check to check both the structure and a prototype-structure (since this - // is the common case) we have a problem - we need to patch the prototype structure check to do something - // useful. We could try to nop it out altogether, but that's a little messy, so lets do something simpler - // for now. For now it performs a check on a special object on the global object only used for this - // purpose. The object is in no way exposed, and as such the check will always pass. - if (slot.slotBase() == baseValue) { - JIT::patchMethodCallProto(callFrame->globalData(), codeBlock, methodCallLinkInfo, stubInfo, callee, structure, callFrame->scope()->globalObject()->methodCallDummy(), STUB_RETURN_ADDRESS); - return JSValue::encode(result); - } - } - - // Revert the get_by_id op back to being a regular get_by_id - allow it to cache like normal, if it needs to. - ctiPatchCallByReturnAddress(codeBlock, STUB_RETURN_ADDRESS, FunctionPtr(cti_op_get_by_id)); - return JSValue::encode(result); -} - -DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_id_method_check_update) -{ - STUB_INIT_STACK_FRAME(stackFrame); - - CallFrame* callFrame = stackFrame.callFrame; - Identifier& ident = stackFrame.args[1].identifier(); - - CodeBlock* codeBlock = stackFrame.callFrame->codeBlock(); - MethodCallLinkInfo& methodCallLinkInfo = codeBlock->getMethodCallLinkInfo(STUB_RETURN_ADDRESS); - StructureStubInfo& stubInfo = codeBlock->getStubInfo(STUB_RETURN_ADDRESS); - AccessType accessType = static_cast<AccessType>(stubInfo.accessType); - - JSValue baseValue = stackFrame.args[0].jsValue(); - PropertySlot slot(baseValue); - JSValue result = baseValue.get(callFrame, ident, slot); - CHECK_FOR_EXCEPTION(); - - if (accessType != static_cast<AccessType>(stubInfo.accessType)) - return JSValue::encode(result); - - ASSERT(methodCallLinkInfo.seenOnce()); - - // If we successfully got something, then the base from which it is being accessed must - // be an object. (Assertion to ensure asObject() call below is safe, which comes after - // an isCacheable() chceck. - ASSERT(!slot.isCacheableValue() || slot.slotBase().isObject()); - - // Check that: - // * We're dealing with a JSCell, - // * the property is cachable, - // * it's not a dictionary - // * there is a function cached. - Structure* structure; - JSCell* specific; - JSObject* slotBaseObject; - if (!(baseValue.isCell() - && slot.isCacheableValue() - && !(structure = baseValue.asCell()->structure())->isUncacheableDictionary() - && (slotBaseObject = asObject(slot.slotBase()))->getPropertySpecificValue(callFrame, ident, specific) - && specific - ) - || (slot.slotBase() != structure->prototypeForLookup(callFrame) - && slot.slotBase() != baseValue)) { - // Revert the get_by_id op back to being a regular get_by_id - allow it to cache like normal, if it needs to. - ctiPatchCallByReturnAddress(codeBlock, STUB_RETURN_ADDRESS, FunctionPtr(cti_op_get_by_id)); - return JSValue::encode(result); - } - - // Now check if the situation has changed sufficiently that we should bail out of - // doing method_check optimizations entirely, or if it changed only slightly, in - // which case we can just repatch. - - JSValue proto = structure->prototypeForLookup(callFrame); - - bool previousWasProto = methodCallLinkInfo.cachedPrototype.get() != codeBlock->globalObject()->methodCallDummy(); - bool currentIsProto = slot.slotBase() == proto; - - JSObject* callee = asObject(specific); - - if (previousWasProto != currentIsProto - || !structure->transitivelyTransitionedFrom(methodCallLinkInfo.cachedStructure.get()) - || (previousWasProto && !slotBaseObject->structure()->transitivelyTransitionedFrom(methodCallLinkInfo.cachedPrototypeStructure.get())) - || specific != methodCallLinkInfo.cachedFunction.get()) { - ctiPatchCallByReturnAddress(codeBlock, STUB_RETURN_ADDRESS, FunctionPtr(cti_op_get_by_id)); - return JSValue::encode(result); - } - - // It makes sense to simply repatch the method_check. - - // Since we're accessing a prototype in a loop, it's a good bet that it - // should not be treated as a dictionary. - if (slotBaseObject->structure()->isDictionary()) - slotBaseObject->flattenDictionaryObject(callFrame->globalData()); - - // The result fetched should always be the callee! - ASSERT(result == JSValue(callee)); - - // Check to see if the function is on the object's prototype. Patch up the code to optimize. - if (slot.slotBase() == proto) { - JIT::patchMethodCallProto(callFrame->globalData(), codeBlock, methodCallLinkInfo, stubInfo, callee, structure, slotBaseObject, STUB_RETURN_ADDRESS); - return JSValue::encode(result); - } - - ASSERT(slot.slotBase() == baseValue); - - // Since we generate the method-check to check both the structure and a prototype-structure (since this - // is the common case) we have a problem - we need to patch the prototype structure check to do something - // useful. We could try to nop it out altogether, but that's a little messy, so lets do something simpler - // for now. For now it performs a check on a special object on the global object only used for this - // purpose. The object is in no way exposed, and as such the check will always pass. - JIT::patchMethodCallProto(callFrame->globalData(), codeBlock, methodCallLinkInfo, stubInfo, callee, structure, callFrame->scope()->globalObject()->methodCallDummy(), STUB_RETURN_ADDRESS); - return JSValue::encode(result); -} - DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_id) { STUB_INIT_STACK_FRAME(stackFrame); @@ -1873,7 +1704,13 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_id_proto_list) if (listIndex == (POLYMORPHIC_LIST_CACHE_SIZE - 1)) ctiPatchCallByReturnAddress(codeBlock, STUB_RETURN_ADDRESS, FunctionPtr(cti_op_get_by_id_proto_list_full)); } - } else if (size_t count = normalizePrototypeChain(callFrame, baseValue, slot.slotBase(), propertyName, offset)) { + } else { + size_t count = normalizePrototypeChain(callFrame, baseValue, slot.slotBase(), propertyName, offset); + if (count == InvalidPrototypeChain) { + ctiPatchCallByReturnAddress(codeBlock, STUB_RETURN_ADDRESS, FunctionPtr(cti_op_get_by_id_proto_fail)); + return JSValue::encode(result); + } + ASSERT(!baseValue.asCell()->structure()->isDictionary()); int listIndex; PolymorphicAccessStructureList* prototypeStructureList = getPolymorphicAccessStructureListSlot(callFrame->globalData(), codeBlock->ownerExecutable(), stubInfo, listIndex); @@ -1885,8 +1722,7 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_id_proto_list) if (listIndex == (POLYMORPHIC_LIST_CACHE_SIZE - 1)) ctiPatchCallByReturnAddress(codeBlock, STUB_RETURN_ADDRESS, FunctionPtr(cti_op_get_by_id_proto_list_full)); } - } else - ctiPatchCallByReturnAddress(codeBlock, STUB_RETURN_ADDRESS, FunctionPtr(cti_op_get_by_id_proto_fail)); + } return JSValue::encode(result); } @@ -2308,7 +2144,7 @@ DEFINE_STUB_FUNCTION(JSObject*, op_push_activation) { STUB_INIT_STACK_FRAME(stackFrame); - JSActivation* activation = JSActivation::create(stackFrame.callFrame->globalData(), stackFrame.callFrame, static_cast<FunctionExecutable*>(stackFrame.callFrame->codeBlock()->ownerExecutable())); + JSActivation* activation = JSActivation::create(stackFrame.callFrame->globalData(), stackFrame.callFrame, stackFrame.callFrame->codeBlock()); stackFrame.callFrame->setScope(activation); return activation; } @@ -3500,13 +3336,16 @@ DEFINE_STUB_FUNCTION(void, op_put_getter_setter) baseObj->putDirectAccessor(callFrame, stackFrame.args[1].identifier(), accessor, Accessor); } -DEFINE_STUB_FUNCTION(void, op_throw_reference_error) +DEFINE_STUB_FUNCTION(void, op_throw_static_error) { STUB_INIT_STACK_FRAME(stackFrame); CallFrame* callFrame = stackFrame.callFrame; String message = stackFrame.args[0].jsValue().toString(callFrame)->value(callFrame); - stackFrame.globalData->exception = createReferenceError(callFrame, message); + if (stackFrame.args[1].asInt32) + stackFrame.globalData->exception = createReferenceError(callFrame, message); + else + stackFrame.globalData->exception = createTypeError(callFrame, message); VM_THROW_EXCEPTION_AT_END(); } diff --git a/Source/JavaScriptCore/jit/JITStubs.h b/Source/JavaScriptCore/jit/JITStubs.h index 6e3141e22..5761236b1 100644 --- a/Source/JavaScriptCore/jit/JITStubs.h +++ b/Source/JavaScriptCore/jit/JITStubs.h @@ -365,8 +365,6 @@ extern "C" { EncodedJSValue JIT_STUB cti_op_get_by_id_custom_stub(STUB_ARGS_DECLARATION) WTF_INTERNAL; EncodedJSValue JIT_STUB cti_op_get_by_id_generic(STUB_ARGS_DECLARATION) WTF_INTERNAL; EncodedJSValue JIT_STUB cti_op_get_by_id_getter_stub(STUB_ARGS_DECLARATION) WTF_INTERNAL; - EncodedJSValue JIT_STUB cti_op_get_by_id_method_check(STUB_ARGS_DECLARATION) WTF_INTERNAL; - EncodedJSValue JIT_STUB cti_op_get_by_id_method_check_update(STUB_ARGS_DECLARATION) WTF_INTERNAL; EncodedJSValue JIT_STUB cti_op_get_by_id_proto_fail(STUB_ARGS_DECLARATION) WTF_INTERNAL; EncodedJSValue JIT_STUB cti_op_get_by_id_proto_list(STUB_ARGS_DECLARATION) WTF_INTERNAL; EncodedJSValue JIT_STUB cti_op_get_by_id_proto_list_full(STUB_ARGS_DECLARATION) WTF_INTERNAL; @@ -454,7 +452,7 @@ extern "C" { void JIT_STUB cti_op_init_global_const_check(STUB_ARGS_DECLARATION) WTF_INTERNAL; void JIT_STUB cti_op_tear_off_activation(STUB_ARGS_DECLARATION) WTF_INTERNAL; void JIT_STUB cti_op_tear_off_arguments(STUB_ARGS_DECLARATION) WTF_INTERNAL; - void JIT_STUB cti_op_throw_reference_error(STUB_ARGS_DECLARATION) WTF_INTERNAL; + void JIT_STUB cti_op_throw_static_error(STUB_ARGS_DECLARATION) WTF_INTERNAL; #if ENABLE(DFG_JIT) void JIT_STUB cti_optimize(STUB_ARGS_DECLARATION) WTF_INTERNAL; #endif diff --git a/Source/JavaScriptCore/llint/LLIntData.cpp b/Source/JavaScriptCore/llint/LLIntData.cpp index 8e2dacf4d..eec376b37 100644 --- a/Source/JavaScriptCore/llint/LLIntData.cpp +++ b/Source/JavaScriptCore/llint/LLIntData.cpp @@ -103,7 +103,7 @@ void Data::performAssertions(JSGlobalData& globalData) ASSERT(ValueNull == TagBitTypeOther); #endif ASSERT(StringType == 5); - ASSERT(ObjectType == 13); + ASSERT(ObjectType == 17); ASSERT(MasqueradesAsUndefined == 1); ASSERT(ImplementsHasInstance == 2); ASSERT(ImplementsDefaultHasInstance == 8); diff --git a/Source/JavaScriptCore/llint/LLIntOffsetsExtractor.cpp b/Source/JavaScriptCore/llint/LLIntOffsetsExtractor.cpp index cbfff29d6..0087fe5ff 100644 --- a/Source/JavaScriptCore/llint/LLIntOffsetsExtractor.cpp +++ b/Source/JavaScriptCore/llint/LLIntOffsetsExtractor.cpp @@ -53,7 +53,7 @@ namespace JSC { -#define OFFLINE_ASM_OFFSETOF(clazz, field) OBJECT_OFFSETOF(clazz, field) +#define OFFLINE_ASM_OFFSETOF(clazz, field) (static_cast<unsigned>(OBJECT_OFFSETOF(clazz, field))) class LLIntOffsetsExtractor { public: diff --git a/Source/JavaScriptCore/llint/LLIntSlowPaths.cpp b/Source/JavaScriptCore/llint/LLIntSlowPaths.cpp index 74beae98a..ba44bf404 100644 --- a/Source/JavaScriptCore/llint/LLIntSlowPaths.cpp +++ b/Source/JavaScriptCore/llint/LLIntSlowPaths.cpp @@ -460,7 +460,7 @@ LLINT_SLOW_PATH_DECL(slow_path_create_activation) #if LLINT_SLOW_PATH_TRACING dataLog("Creating an activation, exec = %p!\n", exec); #endif - JSActivation* activation = JSActivation::create(globalData, exec, static_cast<FunctionExecutable*>(exec->codeBlock()->ownerExecutable())); + JSActivation* activation = JSActivation::create(globalData, exec, exec->codeBlock()); exec->setScope(activation); LLINT_RETURN(JSValue(activation)); } @@ -1001,32 +1001,32 @@ LLINT_SLOW_PATH_DECL(slow_path_put_by_id) // below may GC. pc[0].u.opcode = LLInt::getOpcode(llint_op_put_by_id); - normalizePrototypeChain(exec, baseCell); - - ASSERT(structure->previousID()->isObject()); - pc[4].u.structure.set( - globalData, codeBlock->ownerExecutable(), structure->previousID()); - if (isInlineOffset(slot.cachedOffset())) - pc[5].u.operand = offsetInInlineStorage(slot.cachedOffset()) * sizeof(JSValue) + JSObject::offsetOfInlineStorage(); - else - pc[5].u.operand = offsetInButterfly(slot.cachedOffset()) * sizeof(JSValue); - pc[6].u.structure.set( - globalData, codeBlock->ownerExecutable(), structure); - StructureChain* chain = structure->prototypeChain(exec); - ASSERT(chain); - pc[7].u.structureChain.set( - globalData, codeBlock->ownerExecutable(), chain); - - if (pc[8].u.operand) { - if (isInlineOffset(slot.cachedOffset())) - pc[0].u.opcode = LLInt::getOpcode(llint_op_put_by_id_transition_direct); - else - pc[0].u.opcode = LLInt::getOpcode(llint_op_put_by_id_transition_direct_out_of_line); - } else { + if (normalizePrototypeChain(exec, baseCell) != InvalidPrototypeChain) { + ASSERT(structure->previousID()->isObject()); + pc[4].u.structure.set( + globalData, codeBlock->ownerExecutable(), structure->previousID()); if (isInlineOffset(slot.cachedOffset())) - pc[0].u.opcode = LLInt::getOpcode(llint_op_put_by_id_transition_normal); + pc[5].u.operand = offsetInInlineStorage(slot.cachedOffset()) * sizeof(JSValue) + JSObject::offsetOfInlineStorage(); else - pc[0].u.opcode = LLInt::getOpcode(llint_op_put_by_id_transition_normal_out_of_line); + pc[5].u.operand = offsetInButterfly(slot.cachedOffset()) * sizeof(JSValue); + pc[6].u.structure.set( + globalData, codeBlock->ownerExecutable(), structure); + StructureChain* chain = structure->prototypeChain(exec); + ASSERT(chain); + pc[7].u.structureChain.set( + globalData, codeBlock->ownerExecutable(), chain); + + if (pc[8].u.operand) { + if (isInlineOffset(slot.cachedOffset())) + pc[0].u.opcode = LLInt::getOpcode(llint_op_put_by_id_transition_direct); + else + pc[0].u.opcode = LLInt::getOpcode(llint_op_put_by_id_transition_direct_out_of_line); + } else { + if (isInlineOffset(slot.cachedOffset())) + pc[0].u.opcode = LLInt::getOpcode(llint_op_put_by_id_transition_normal); + else + pc[0].u.opcode = LLInt::getOpcode(llint_op_put_by_id_transition_normal_out_of_line); + } } } } else { @@ -1622,10 +1622,13 @@ LLINT_SLOW_PATH_DECL(slow_path_throw) LLINT_THROW(LLINT_OP_C(1).jsValue()); } -LLINT_SLOW_PATH_DECL(slow_path_throw_reference_error) +LLINT_SLOW_PATH_DECL(slow_path_throw_static_error) { LLINT_BEGIN(); - LLINT_THROW(createReferenceError(exec, LLINT_OP_C(1).jsValue().toString(exec)->value(exec))); + if (pc[2].u.operand) + LLINT_THROW(createReferenceError(exec, LLINT_OP_C(1).jsValue().toString(exec)->value(exec))); + else + LLINT_THROW(createTypeError(exec, LLINT_OP_C(1).jsValue().toString(exec)->value(exec))); } LLINT_SLOW_PATH_DECL(slow_path_debug) diff --git a/Source/JavaScriptCore/llint/LLIntSlowPaths.h b/Source/JavaScriptCore/llint/LLIntSlowPaths.h index f78476841..99fbaccfa 100644 --- a/Source/JavaScriptCore/llint/LLIntSlowPaths.h +++ b/Source/JavaScriptCore/llint/LLIntSlowPaths.h @@ -204,7 +204,7 @@ LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_push_with_scope); LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_pop_scope); LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_push_name_scope); LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_throw); -LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_throw_reference_error); +LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_throw_static_error); LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_debug); LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_profile_will_call); LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_profile_did_call); diff --git a/Source/JavaScriptCore/llint/LowLevelInterpreter.asm b/Source/JavaScriptCore/llint/LowLevelInterpreter.asm index 409ec4158..ba5b67df4 100644 --- a/Source/JavaScriptCore/llint/LowLevelInterpreter.asm +++ b/Source/JavaScriptCore/llint/LowLevelInterpreter.asm @@ -96,7 +96,7 @@ const SlowPutArrayStorageShape = 30 # Type constants. const StringType = 5 -const ObjectType = 13 +const ObjectType = 17 # Type flags constants. const MasqueradesAsUndefined = 1 @@ -151,11 +151,10 @@ else end # This must match wtf/Vector.h +const VectorSizeOffset = 0 if JSVALUE64 - const VectorSizeOffset = 0 const VectorBufferOffset = 8 else - const VectorSizeOffset = 0 const VectorBufferOffset = 4 end @@ -344,9 +343,9 @@ macro functionInitialization(profileArgSkip) addp t2, t3 .argumentProfileLoop: if JSVALUE64 - loadp ThisArgumentOffset + 8 - profileArgSkip * 8[cfr, t0], t2 + loadq ThisArgumentOffset + 8 - profileArgSkip * 8[cfr, t0], t2 subp sizeof ValueProfile, t3 - storep t2, profileArgSkip * sizeof ValueProfile + ValueProfile::m_buckets[t3] + storeq t2, profileArgSkip * sizeof ValueProfile + ValueProfile::m_buckets[t3] else loadi ThisArgumentOffset + TagOffset + 8 - profileArgSkip * 8[cfr, t0], t2 subp sizeof ValueProfile, t3 @@ -546,13 +545,13 @@ macro getPutToBaseOperationField(scratch, scratch1, fieldOffset, fieldGetter) end macro moveJSValueFromRegisterWithoutProfiling(value, destBuffer, destOffsetReg) - storep value, [destBuffer, destOffsetReg, 8] + storeq value, [destBuffer, destOffsetReg, 8] end macro moveJSValueFromRegistersWithoutProfiling(tag, payload, destBuffer, destOffsetReg) - storep tag, TagOffset[destBuffer, destOffsetReg, 8] - storep payload, PayloadOffset[destBuffer, destOffsetReg, 8] + storei tag, TagOffset[destBuffer, destOffsetReg, 8] + storei payload, PayloadOffset[destBuffer, destOffsetReg, 8] end macro putToBaseVariableBody(variableOffset, scratch1, scratch2, scratch3) @@ -613,15 +612,15 @@ end macro moveJSValue(sourceBuffer, sourceOffsetReg, destBuffer, destOffsetReg, profileOffset, scratchRegister) if JSVALUE64 - loadp [sourceBuffer, sourceOffsetReg, 8], scratchRegister - storep scratchRegister, [destBuffer, destOffsetReg, 8] + loadq [sourceBuffer, sourceOffsetReg, 8], scratchRegister + storeq scratchRegister, [destBuffer, destOffsetReg, 8] loadpFromInstruction(profileOffset, destOffsetReg) valueProfile(scratchRegister, destOffsetReg) else - loadp PayloadOffset[sourceBuffer, sourceOffsetReg, 8], scratchRegister - storep scratchRegister, PayloadOffset[destBuffer, destOffsetReg, 8] - loadp TagOffset[sourceBuffer, sourceOffsetReg, 8], sourceOffsetReg - storep sourceOffsetReg, TagOffset[destBuffer, destOffsetReg, 8] + loadi PayloadOffset[sourceBuffer, sourceOffsetReg, 8], scratchRegister + storei scratchRegister, PayloadOffset[destBuffer, destOffsetReg, 8] + loadi TagOffset[sourceBuffer, sourceOffsetReg, 8], sourceOffsetReg + storei sourceOffsetReg, TagOffset[destBuffer, destOffsetReg, 8] loadpFromInstruction(profileOffset, destOffsetReg) valueProfile(sourceOffsetReg, scratchRegister, destOffsetReg) end @@ -629,29 +628,29 @@ end macro moveJSValueFromSlot(slot, destBuffer, destOffsetReg, profileOffset, scratchRegister) if JSVALUE64 - loadp [slot], scratchRegister - storep scratchRegister, [destBuffer, destOffsetReg, 8] + loadq [slot], scratchRegister + storeq scratchRegister, [destBuffer, destOffsetReg, 8] loadpFromInstruction(profileOffset, destOffsetReg) valueProfile(scratchRegister, destOffsetReg) else - loadp PayloadOffset[slot], scratchRegister - storep scratchRegister, PayloadOffset[destBuffer, destOffsetReg, 8] - loadp TagOffset[slot], slot - storep slot, TagOffset[destBuffer, destOffsetReg, 8] + loadi PayloadOffset[slot], scratchRegister + storei scratchRegister, PayloadOffset[destBuffer, destOffsetReg, 8] + loadi TagOffset[slot], slot + storei slot, TagOffset[destBuffer, destOffsetReg, 8] loadpFromInstruction(profileOffset, destOffsetReg) valueProfile(slot, scratchRegister, destOffsetReg) end end macro moveJSValueFromRegister(value, destBuffer, destOffsetReg, profileOffset) - storep value, [destBuffer, destOffsetReg, 8] + storeq value, [destBuffer, destOffsetReg, 8] loadpFromInstruction(profileOffset, destOffsetReg) valueProfile(value, destOffsetReg) end macro moveJSValueFromRegisters(tag, payload, destBuffer, destOffsetReg, profileOffset) - storep tag, TagOffset[destBuffer, destOffsetReg, 8] - storep payload, PayloadOffset[destBuffer, destOffsetReg, 8] + storei tag, TagOffset[destBuffer, destOffsetReg, 8] + storei payload, PayloadOffset[destBuffer, destOffsetReg, 8] loadpFromInstruction(profileOffset, destOffsetReg) valueProfile(tag, payload, destOffsetReg) end @@ -662,7 +661,7 @@ _llint_op_resolve_global_property: loadp CodeBlock[cfr], t1 loadp CodeBlock::m_globalObject[t1], t1 loadp ResolveOperation::m_structure[t0], t2 - bpneq JSCell::m_structure[t1], t2, ._llint_op_resolve + bpneq JSCell::m_structure[t1], t2, .llint_op_resolve_local loadis ResolveOperation::m_offset[t0], t0 if JSVALUE64 loadPropertyAtVariableOffsetKnownNotInline(t0, t1, t2) @@ -746,8 +745,8 @@ _llint_op_resolve_scoped_var_with_top_scope_check: moveJSValue(t1, t2, cfr, t3, 4, t0) dispatch(5) -._llint_op_resolve: _llint_op_resolve: +.llint_op_resolve_local: traceExecution() getResolveOperation(3, t0, t1) btpz t0, .noInstructions @@ -781,7 +780,7 @@ _llint_op_resolve_base_to_global: dispatch(7) _llint_op_resolve_base_to_global_dynamic: - jmp ._llint_resolve_base + jmp _llint_op_resolve_base _llint_op_resolve_base_to_scope: traceExecution() @@ -828,7 +827,6 @@ _llint_op_resolve_base_to_scope_with_top_scope_check: end dispatch(7) -._llint_resolve_base: _llint_op_resolve_base: traceExecution() callSlowPath(_llint_slow_path_resolve_base) @@ -888,10 +886,11 @@ macro interpretResolveWithBase(opcodeLength, slowPath) # t1 now contains the index for the base register bineq t2, ResolveOperationSetBaseToScope, .notSetBaseToScope - storep t3, PayloadOffset[cfr, t1, 8] if JSVALUE64 + storeq t3, [cfr, t1, 8] else - storep CellTag, TagOffset[cfr, t1, 8] + storei t3, PayloadOffset[cfr, t1, 8] + storei CellTag, TagOffset[cfr, t1, 8] end jmp .haveSetBase @@ -899,10 +898,10 @@ macro interpretResolveWithBase(opcodeLength, slowPath) bineq t2, ResolveOperationSetBaseToUndefined, .notSetBaseToUndefined if JSVALUE64 - storep ValueUndefined, PayloadOffset[cfr, t1, 8] + storeq ValueUndefined, [cfr, t1, 8] else - storep 0, PayloadOffset[cfr, t1, 8] - storep UndefinedTag, TagOffset[cfr, t1, 8] + storei 0, PayloadOffset[cfr, t1, 8] + storei UndefinedTag, TagOffset[cfr, t1, 8] end jmp .haveSetBase @@ -910,10 +909,11 @@ macro interpretResolveWithBase(opcodeLength, slowPath) bineq t2, ResolveOperationSetBaseToGlobal, .slowPath loadp JSCell::m_structure[t3], t2 loadp Structure::m_globalObject[t2], t2 - storep t2, PayloadOffset[cfr, t1, 8] if JSVALUE64 + storeq t2, [cfr, t1, 8] else - storep CellTag, TagOffset[cfr, t1, 8] + storei t2, PayloadOffset[cfr, t1, 8] + storei CellTag, TagOffset[cfr, t1, 8] end .haveSetBase: @@ -1204,12 +1204,6 @@ _llint_op_strcat: dispatch(4) -_llint_op_method_check: - traceExecution() - # We ignore method checks and use normal get_by_id optimizations. - dispatch(1) - - _llint_op_get_pnames: traceExecution() callSlowPath(_llint_slow_path_get_pnames) @@ -1240,10 +1234,10 @@ _llint_op_throw: dispatch(2) -_llint_op_throw_reference_error: +_llint_op_throw_static_error: traceExecution() - callSlowPath(_llint_slow_path_throw_reference_error) - dispatch(2) + callSlowPath(_llint_slow_path_throw_static_error) + dispatch(3) _llint_op_profile_will_call: @@ -1332,6 +1326,8 @@ _llint_op_put_by_id_replace: _llint_op_put_by_id_transition: notSupported() +_llint_op_init_global_const_nop: + dispatch(5) # Indicate the end of LLInt. _llint_end: diff --git a/Source/JavaScriptCore/llint/LowLevelInterpreter.cpp b/Source/JavaScriptCore/llint/LowLevelInterpreter.cpp index ebfdadfdb..a9cb393b0 100644 --- a/Source/JavaScriptCore/llint/LowLevelInterpreter.cpp +++ b/Source/JavaScriptCore/llint/LowLevelInterpreter.cpp @@ -122,6 +122,98 @@ static double Ints2Double(uint32_t lo, uint32_t hi) //============================================================================ +// CLoopRegister is the storage for an emulated CPU register. +// It defines the policy of how ints smaller than intptr_t are packed into the +// pseudo register, as well as hides endianness differences. + +struct CLoopRegister { + union { + intptr_t i; + uintptr_t u; +#if USE(JSVALUE64) +#if CPU(BIG_ENDIAN) + struct { + int32_t i32padding; + int32_t i32; + }; + struct { + uint32_t u32padding; + uint32_t u32; + }; + struct { + int8_t i8padding[7]; + int8_t i8; + }; + struct { + uint8_t u8padding[7]; + uint8_t u8; + }; +#else // !CPU(BIG_ENDIAN) + struct { + int32_t i32; + int32_t i32padding; + }; + struct { + uint32_t u32; + uint32_t u32padding; + }; + struct { + int8_t i8; + int8_t i8padding[7]; + }; + struct { + uint8_t u8; + uint8_t u8padding[7]; + }; +#endif // !CPU(BIG_ENDIAN) +#else // !USE(JSVALUE64) + int32_t i32; + uint32_t u32; + +#if CPU(BIG_ENDIAN) + struct { + int8_t i8padding[3]; + int8_t i8; + }; + struct { + uint8_t u8padding[3]; + uint8_t u8; + }; + +#else // !CPU(BIG_ENDIAN) + struct { + int8_t i8; + int8_t i8padding[3]; + }; + struct { + uint8_t u8; + uint8_t u8padding[3]; + }; +#endif // !CPU(BIG_ENDIAN) +#endif // !USE(JSVALUE64) + + int8_t* i8p; + void* vp; + ExecState* execState; + void* instruction; + NativeFunction nativeFunc; +#if USE(JSVALUE64) + int64_t i64; + uint64_t u64; + EncodedJSValue encodedJSValue; + double castToDouble; +#endif + Opcode opcode; + }; + +#if USE(JSVALUE64) + inline void clearHighWord() { i32padding = 0; } +#else + inline void clearHighWord() { } +#endif +}; + +//============================================================================ // The llint C++ interpreter loop: // @@ -164,29 +256,12 @@ JSValue CLoop::execute(CallFrame* callFrame, OpcodeID bootstrapOpcodeId, ASSERT(callFrame->globalData().topCallFrame == callFrame); // Define the pseudo registers used by the LLINT C Loop backend: - union CLoopRegister { - intptr_t i; - uintptr_t u; - int32_t i32; - uint32_t u32; - int8_t i8; - uint8_t u8; - int8_t* i8p; - void* vp; - ExecState* execState; - void* instruction; - NativeFunction nativeFunc; -#if USE(JSVALUE64) - int64_t i64; - EncodedJSValue encodedJSValue; - double castToDouble; -#endif - Opcode opcode; - }; + ASSERT(sizeof(CLoopRegister) == sizeof(intptr_t)); + union CLoopDoubleRegister { double d; #if USE(JSVALUE64) - void* castToVoidPtr; + int64_t castToInt64; #endif }; @@ -311,6 +386,7 @@ JSValue CLoop::execute(CallFrame* callFrame, OpcodeID bootstrapOpcodeId, #define DEFINE_OPCODE(__opcode) \ case __opcode: \ + __opcode: \ RECORD_OPCODE_STATS(__opcode); // Dispatch to the current PC's bytecode: @@ -417,7 +493,7 @@ JSValue CLoop::execute(CallFrame* callFrame, OpcodeID bootstrapOpcodeId, // compiler on all such labels: #define LLINT_OPCODE_ENTRY(__opcode, length) \ UNUSED_LABEL(__opcode); - FOR_EACH_LLINT_NATIVE_HELPER(LLINT_OPCODE_ENTRY) + FOR_EACH_OPCODE_ID(LLINT_OPCODE_ENTRY); #undef LLINT_OPCODE_ENTRY diff --git a/Source/JavaScriptCore/llint/LowLevelInterpreter32_64.asm b/Source/JavaScriptCore/llint/LowLevelInterpreter32_64.asm index d0072d714..ffb146247 100644 --- a/Source/JavaScriptCore/llint/LowLevelInterpreter32_64.asm +++ b/Source/JavaScriptCore/llint/LowLevelInterpreter32_64.asm @@ -984,7 +984,7 @@ _llint_op_init_global_const: writeBarrier(t2, t3) storei t2, TagOffset[t0] storei t3, PayloadOffset[t0] - dispatch(3) + dispatch(5) _llint_op_init_global_const_check: @@ -1320,8 +1320,8 @@ _llint_op_put_by_val: storeb 1, ArrayProfile::m_mayStoreToHole[t1] end addi 1, ArrayStorage::m_numValuesInVector[t0] - bib t2, -sizeof IndexingHeader + IndexingHeader::m_publicLength[t0], .opPutByValArrayStorageStoreResult - addi 1, t2, t1 + bib t3, -sizeof IndexingHeader + IndexingHeader::m_publicLength[t0], .opPutByValArrayStorageStoreResult + addi 1, t3, t1 storei t1, -sizeof IndexingHeader + IndexingHeader::m_publicLength[t0] jmp .opPutByValArrayStorageStoreResult diff --git a/Source/JavaScriptCore/llint/LowLevelInterpreter64.asm b/Source/JavaScriptCore/llint/LowLevelInterpreter64.asm index 59fa18ccf..c9900b343 100644 --- a/Source/JavaScriptCore/llint/LowLevelInterpreter64.asm +++ b/Source/JavaScriptCore/llint/LowLevelInterpreter64.asm @@ -33,21 +33,29 @@ const ValueUndefined = TagBitTypeOther | TagBitUndefined const ValueNull = TagBitTypeOther # Utilities. +macro jumpToInstruction() + jmp [PB, PC, 8] +end + macro dispatch(advance) addp advance, PC - jmp [PB, PC, 8] + jumpToInstruction() end macro dispatchInt(advance) addi advance, PC - jmp [PB, PC, 8] + jumpToInstruction() +end + +macro dispatchIntIndirect(offset) + dispatchInt(offset * 8[PB, PC, 8]) end macro dispatchAfterCall() loadi ArgumentCount + TagOffset[cfr], PC loadp CodeBlock[cfr], PB loadp CodeBlock::m_instructions[PB], PB - jmp [PB, PC, 8] + jumpToInstruction() end macro cCall2(function, arg1, arg2) @@ -87,7 +95,7 @@ macro restoreStateAfterCCall() move t1, cfr move t3, PB subp PB, PC - urshiftp 3, PC + rshiftp 3, PC end macro callSlowPath(slowPath) @@ -137,24 +145,24 @@ end # Index and value must be different registers. Index may be clobbered. macro loadConstantOrVariable(index, value) bpgteq index, FirstConstantRegisterIndex, .constant - loadp [cfr, index, 8], value + loadq [cfr, index, 8], value jmp .done .constant: loadp CodeBlock[cfr], value loadp CodeBlock::m_constantRegisters + VectorBufferOffset[value], value subp FirstConstantRegisterIndex, index - loadp [value, index, 8], value + loadq [value, index, 8], value .done: end macro loadConstantOrVariableInt32(index, value, slow) loadConstantOrVariable(index, value) - bpb value, tagTypeNumber, slow + bqb value, tagTypeNumber, slow end macro loadConstantOrVariableCell(index, value, slow) loadConstantOrVariable(index, value) - btpnz value, tagMask, slow + btqnz value, tagMask, slow end macro writeBarrier(value) @@ -163,7 +171,7 @@ end macro valueProfile(value, profile) if VALUE_PROFILER - storep value, ValueProfile::m_buckets[profile] + storeq value, ValueProfile::m_buckets[profile] end end @@ -200,7 +208,7 @@ _llint_op_enter: move ValueUndefined, t0 .opEnterLoop: subi 1, t2 - storep t0, [cfr, t2, 8] + storeq t0, [cfr, t2, 8] btinz t2, .opEnterLoop .opEnterDone: dispatch(1) @@ -208,8 +216,8 @@ _llint_op_enter: _llint_op_create_activation: traceExecution() - loadis 8[PB, PC, 8], t0 - bpneq [cfr, t0, 8], ValueEmpty, .opCreateActivationDone + loadisFromInstruction(1, t0) + bqneq [cfr, t0, 8], ValueEmpty, .opCreateActivationDone callSlowPath(_llint_slow_path_create_activation) .opCreateActivationDone: dispatch(2) @@ -217,15 +225,15 @@ _llint_op_create_activation: _llint_op_init_lazy_reg: traceExecution() - loadis 8[PB, PC, 8], t0 - storep ValueEmpty, [cfr, t0, 8] + loadisFromInstruction(1, t0) + storeq ValueEmpty, [cfr, t0, 8] dispatch(2) _llint_op_create_arguments: traceExecution() - loadis 8[PB, PC, 8], t0 - bpneq [cfr, t0, 8], ValueEmpty, .opCreateArgumentsDone + loadisFromInstruction(1, t0) + bqneq [cfr, t0, 8], ValueEmpty, .opCreateArgumentsDone callSlowPath(_llint_slow_path_create_arguments) .opCreateArgumentsDone: dispatch(2) @@ -237,8 +245,8 @@ _llint_op_create_this: loadp JSFunction::m_cachedInheritorID[t0], t2 btpz t2, .opCreateThisSlow allocateBasicJSObject(JSFinalObjectSizeClassIndex, t2, t0, t1, t3, .opCreateThisSlow) - loadis 8[PB, PC, 8], t1 - storep t0, [cfr, t1, 8] + loadisFromInstruction(1, t1) + storeq t0, [cfr, t1, 8] dispatch(2) .opCreateThisSlow: @@ -248,12 +256,12 @@ _llint_op_create_this: _llint_op_convert_this: traceExecution() - loadis 8[PB, PC, 8], t0 - loadp [cfr, t0, 8], t0 - btpnz t0, tagMask, .opConvertThisSlow + loadisFromInstruction(1, t0) + loadq [cfr, t0, 8], t0 + btqnz t0, tagMask, .opConvertThisSlow loadp JSCell::m_structure[t0], t0 bbb Structure::m_typeInfo + TypeInfo::m_type[t0], ObjectType, .opConvertThisSlow - loadp 16[PB, PC, 8], t1 + loadpFromInstruction(2, t1) valueProfile(t0, t1) dispatch(3) @@ -268,8 +276,8 @@ _llint_op_new_object: loadp CodeBlock::m_globalObject[t0], t0 loadp JSGlobalObject::m_emptyObjectStructure[t0], t1 allocateBasicJSObject(JSFinalObjectSizeClassIndex, t1, t0, t2, t3, .opNewObjectSlow) - loadis 8[PB, PC, 8], t1 - storep t0, [cfr, t1, 8] + loadisFromInstruction(1, t1) + storeq t0, [cfr, t1, 8] dispatch(2) .opNewObjectSlow: @@ -279,22 +287,22 @@ _llint_op_new_object: _llint_op_mov: traceExecution() - loadis 16[PB, PC, 8], t1 - loadis 8[PB, PC, 8], t0 + loadisFromInstruction(2, t1) + loadisFromInstruction(1, t0) loadConstantOrVariable(t1, t2) - storep t2, [cfr, t0, 8] + storeq t2, [cfr, t0, 8] dispatch(3) _llint_op_not: traceExecution() - loadis 16[PB, PC, 8], t0 - loadis 8[PB, PC, 8], t1 + loadisFromInstruction(2, t0) + loadisFromInstruction(1, t1) loadConstantOrVariable(t0, t2) - xorp ValueFalse, t2 - btpnz t2, ~1, .opNotSlow - xorp ValueTrue, t2 - storep t2, [cfr, t1, 8] + xorq ValueFalse, t2 + btqnz t2, ~1, .opNotSlow + xorq ValueTrue, t2 + storeq t2, [cfr, t1, 8] dispatch(3) .opNotSlow: @@ -304,14 +312,14 @@ _llint_op_not: macro equalityComparison(integerComparison, slowPath) traceExecution() - loadis 24[PB, PC, 8], t0 - loadis 16[PB, PC, 8], t2 - loadis 8[PB, PC, 8], t3 + loadisFromInstruction(3, t0) + loadisFromInstruction(2, t2) + loadisFromInstruction(1, t3) loadConstantOrVariableInt32(t0, t1, .slow) loadConstantOrVariableInt32(t2, t0, .slow) integerComparison(t0, t1, t0) - orp ValueFalse, t0 - storep t0, [cfr, t3, 8] + orq ValueFalse, t0 + storeq t0, [cfr, t3, 8] dispatch(4) .slow: @@ -332,9 +340,9 @@ _llint_op_neq: macro equalNullComparison() - loadis 16[PB, PC, 8], t0 - loadp [cfr, t0, 8], t0 - btpnz t0, tagMask, .immediate + loadisFromInstruction(2, t0) + loadq [cfr, t0, 8], t0 + btqnz t0, tagMask, .immediate loadp JSCell::m_structure[t0], t2 btbnz Structure::m_typeInfo + TypeInfo::m_flags[t2], MasqueradesAsUndefined, .masqueradesAsUndefined move 0, t0 @@ -345,48 +353,48 @@ macro equalNullComparison() cpeq Structure::m_globalObject[t2], t0, t0 jmp .done .immediate: - andp ~TagBitUndefined, t0 - cpeq t0, ValueNull, t0 + andq ~TagBitUndefined, t0 + cqeq t0, ValueNull, t0 .done: end _llint_op_eq_null: traceExecution() equalNullComparison() - loadis 8[PB, PC, 8], t1 - orp ValueFalse, t0 - storep t0, [cfr, t1, 8] + loadisFromInstruction(1, t1) + orq ValueFalse, t0 + storeq t0, [cfr, t1, 8] dispatch(3) _llint_op_neq_null: traceExecution() equalNullComparison() - loadis 8[PB, PC, 8], t1 - xorp ValueTrue, t0 - storep t0, [cfr, t1, 8] + loadisFromInstruction(1, t1) + xorq ValueTrue, t0 + storeq t0, [cfr, t1, 8] dispatch(3) macro strictEq(equalityOperation, slowPath) traceExecution() - loadis 24[PB, PC, 8], t0 - loadis 16[PB, PC, 8], t2 + loadisFromInstruction(3, t0) + loadisFromInstruction(2, t2) loadConstantOrVariable(t0, t1) loadConstantOrVariable(t2, t0) move t0, t2 - orp t1, t2 - btpz t2, tagMask, .slow - bpaeq t0, tagTypeNumber, .leftOK - btpnz t0, tagTypeNumber, .slow + orq t1, t2 + btqz t2, tagMask, .slow + bqaeq t0, tagTypeNumber, .leftOK + btqnz t0, tagTypeNumber, .slow .leftOK: - bpaeq t1, tagTypeNumber, .rightOK - btpnz t1, tagTypeNumber, .slow + bqaeq t1, tagTypeNumber, .rightOK + btqnz t1, tagTypeNumber, .slow .rightOK: equalityOperation(t0, t1, t0) - loadis 8[PB, PC, 8], t1 - orp ValueFalse, t0 - storep t0, [cfr, t1, 8] + loadisFromInstruction(1, t1) + orq ValueFalse, t0 + storeq t0, [cfr, t1, 8] dispatch(4) .slow: @@ -396,24 +404,24 @@ end _llint_op_stricteq: strictEq( - macro (left, right, result) cpeq left, right, result end, + macro (left, right, result) cqeq left, right, result end, _llint_slow_path_stricteq) _llint_op_nstricteq: strictEq( - macro (left, right, result) cpneq left, right, result end, + macro (left, right, result) cqneq left, right, result end, _llint_slow_path_nstricteq) macro preOp(arithmeticOperation, slowPath) traceExecution() - loadis 8[PB, PC, 8], t0 - loadp [cfr, t0, 8], t1 - bpb t1, tagTypeNumber, .slow + loadisFromInstruction(1, t0) + loadq [cfr, t0, 8], t1 + bqb t1, tagTypeNumber, .slow arithmeticOperation(t1, .slow) - orp tagTypeNumber, t1 - storep t1, [cfr, t0, 8] + orq tagTypeNumber, t1 + storeq t1, [cfr, t0, 8] dispatch(2) .slow: @@ -435,16 +443,16 @@ _llint_op_pre_dec: macro postOp(arithmeticOperation, slowPath) traceExecution() - loadis 16[PB, PC, 8], t0 - loadis 8[PB, PC, 8], t1 - loadp [cfr, t0, 8], t2 + loadisFromInstruction(2, t0) + loadisFromInstruction(1, t1) + loadq [cfr, t0, 8], t2 bieq t0, t1, .done - bpb t2, tagTypeNumber, .slow + bqb t2, tagTypeNumber, .slow move t2, t3 arithmeticOperation(t3, .slow) - orp tagTypeNumber, t3 - storep t2, [cfr, t1, 8] - storep t3, [cfr, t0, 8] + orq tagTypeNumber, t3 + storeq t2, [cfr, t1, 8] + storeq t3, [cfr, t0, 8] .done: dispatch(3) @@ -467,13 +475,13 @@ _llint_op_post_dec: _llint_op_to_jsnumber: traceExecution() - loadis 16[PB, PC, 8], t0 - loadis 8[PB, PC, 8], t1 + loadisFromInstruction(2, t0) + loadisFromInstruction(1, t1) loadConstantOrVariable(t0, t2) - bpaeq t2, tagTypeNumber, .opToJsnumberIsImmediate - btpz t2, tagTypeNumber, .opToJsnumberSlow + bqaeq t2, tagTypeNumber, .opToJsnumberIsImmediate + btqz t2, tagTypeNumber, .opToJsnumberSlow .opToJsnumberIsImmediate: - storep t2, [cfr, t1, 8] + storeq t2, [cfr, t1, 8] dispatch(3) .opToJsnumberSlow: @@ -483,19 +491,19 @@ _llint_op_to_jsnumber: _llint_op_negate: traceExecution() - loadis 16[PB, PC, 8], t0 - loadis 8[PB, PC, 8], t1 + loadisFromInstruction(2, t0) + loadisFromInstruction(1, t1) loadConstantOrVariable(t0, t2) - bpb t2, tagTypeNumber, .opNegateNotInt + bqb t2, tagTypeNumber, .opNegateNotInt btiz t2, 0x7fffffff, .opNegateSlow negi t2 - orp tagTypeNumber, t2 - storep t2, [cfr, t1, 8] + orq tagTypeNumber, t2 + storeq t2, [cfr, t1, 8] dispatch(3) .opNegateNotInt: - btpz t2, tagTypeNumber, .opNegateSlow - xorp 0x8000000000000000, t2 - storep t2, [cfr, t1, 8] + btqz t2, tagTypeNumber, .opNegateSlow + xorq 0x8000000000000000, t2 + storeq t2, [cfr, t1, 8] dispatch(3) .opNegateSlow: @@ -504,47 +512,47 @@ _llint_op_negate: macro binaryOpCustomStore(integerOperationAndStore, doubleOperation, slowPath) - loadis 24[PB, PC, 8], t0 - loadis 16[PB, PC, 8], t2 + loadisFromInstruction(3, t0) + loadisFromInstruction(2, t2) loadConstantOrVariable(t0, t1) loadConstantOrVariable(t2, t0) - bpb t0, tagTypeNumber, .op1NotInt - bpb t1, tagTypeNumber, .op2NotInt - loadis 8[PB, PC, 8], t2 + bqb t0, tagTypeNumber, .op1NotInt + bqb t1, tagTypeNumber, .op2NotInt + loadisFromInstruction(1, t2) integerOperationAndStore(t1, t0, .slow, t2) dispatch(5) .op1NotInt: # First operand is definitely not an int, the second operand could be anything. - btpz t0, tagTypeNumber, .slow - bpaeq t1, tagTypeNumber, .op1NotIntOp2Int - btpz t1, tagTypeNumber, .slow - addp tagTypeNumber, t1 - fp2d t1, ft1 + btqz t0, tagTypeNumber, .slow + bqaeq t1, tagTypeNumber, .op1NotIntOp2Int + btqz t1, tagTypeNumber, .slow + addq tagTypeNumber, t1 + fq2d t1, ft1 jmp .op1NotIntReady .op1NotIntOp2Int: ci2d t1, ft1 .op1NotIntReady: - loadis 8[PB, PC, 8], t2 - addp tagTypeNumber, t0 - fp2d t0, ft0 + loadisFromInstruction(1, t2) + addq tagTypeNumber, t0 + fq2d t0, ft0 doubleOperation(ft1, ft0) - fd2p ft0, t0 - subp tagTypeNumber, t0 - storep t0, [cfr, t2, 8] + fd2q ft0, t0 + subq tagTypeNumber, t0 + storeq t0, [cfr, t2, 8] dispatch(5) .op2NotInt: # First operand is definitely an int, the second is definitely not. - loadis 8[PB, PC, 8], t2 - btpz t1, tagTypeNumber, .slow + loadisFromInstruction(1, t2) + btqz t1, tagTypeNumber, .slow ci2d t0, ft0 - addp tagTypeNumber, t1 - fp2d t1, ft1 + addq tagTypeNumber, t1 + fq2d t1, ft1 doubleOperation(ft1, ft0) - fd2p ft0, t0 - subp tagTypeNumber, t0 - storep t0, [cfr, t2, 8] + fd2q ft0, t0 + subq tagTypeNumber, t0 + storeq t0, [cfr, t2, 8] dispatch(5) .slow: @@ -556,8 +564,8 @@ macro binaryOp(integerOperation, doubleOperation, slowPath) binaryOpCustomStore( macro (left, right, slow, index) integerOperation(left, right, slow) - orp tagTypeNumber, right - storep right, [cfr, index, 8] + orq tagTypeNumber, right + storeq right, [cfr, index, 8] end, doubleOperation, slowPath) end @@ -581,8 +589,8 @@ _llint_op_mul: bilt left, 0, slow bilt right, 0, slow .done: - orp tagTypeNumber, t3 - storep t3, [cfr, index, 8] + orq tagTypeNumber, t3 + storeq t3, [cfr, index, 8] end, macro (left, right) muld left, right end, _llint_slow_path_mul) @@ -613,24 +621,24 @@ _llint_op_div: cdqi idivi t3 btinz t1, slow - orp tagTypeNumber, t0 - storep t0, [cfr, index, 8] + orq tagTypeNumber, t0 + storeq t0, [cfr, index, 8] end, macro (left, right) divd left, right end, _llint_slow_path_div) macro bitOp(operation, slowPath, advance) - loadis 24[PB, PC, 8], t0 - loadis 16[PB, PC, 8], t2 - loadis 8[PB, PC, 8], t3 + loadisFromInstruction(3, t0) + loadisFromInstruction(2, t2) + loadisFromInstruction(1, t3) loadConstantOrVariable(t0, t1) loadConstantOrVariable(t2, t0) - bpb t0, tagTypeNumber, .slow - bpb t1, tagTypeNumber, .slow + bqb t0, tagTypeNumber, .slow + bqb t1, tagTypeNumber, .slow operation(t1, t0, .slow) - orp tagTypeNumber, t0 - storep t0, [cfr, t3, 8] + orq tagTypeNumber, t0 + storeq t0, [cfr, t3, 8] dispatch(advance) .slow: @@ -691,7 +699,7 @@ _llint_op_bitor: _llint_op_check_has_instance: traceExecution() - loadis 24[PB, PC, 8], t1 + loadisFromInstruction(3, t1) loadConstantOrVariableCell(t1, t0, .opCheckHasInstanceSlow) loadp JSCell::m_structure[t0], t0 btbz Structure::m_typeInfo + TypeInfo::m_flags[t0], ImplementsDefaultHasInstance, .opCheckHasInstanceSlow @@ -705,26 +713,26 @@ _llint_op_check_has_instance: _llint_op_instanceof: traceExecution() # Actually do the work. - loadis 24[PB, PC, 8], t0 - loadis 8[PB, PC, 8], t3 + loadisFromInstruction(3, t0) + loadisFromInstruction(1, t3) loadConstantOrVariableCell(t0, t1, .opInstanceofSlow) loadp JSCell::m_structure[t1], t2 bbb Structure::m_typeInfo + TypeInfo::m_type[t2], ObjectType, .opInstanceofSlow - loadis 16[PB, PC, 8], t0 + loadisFromInstruction(2, t0) loadConstantOrVariableCell(t0, t2, .opInstanceofSlow) # Register state: t1 = prototype, t2 = value move 1, t0 .opInstanceofLoop: loadp JSCell::m_structure[t2], t2 - loadp Structure::m_prototype[t2], t2 - bpeq t2, t1, .opInstanceofDone - btpz t2, tagMask, .opInstanceofLoop + loadq Structure::m_prototype[t2], t2 + bqeq t2, t1, .opInstanceofDone + btqz t2, tagMask, .opInstanceofLoop move 0, t0 .opInstanceofDone: - orp ValueFalse, t0 - storep t0, [cfr, t3, 8] + orq ValueFalse, t0 + storeq t0, [cfr, t3, 8] dispatch(4) .opInstanceofSlow: @@ -734,65 +742,65 @@ _llint_op_instanceof: _llint_op_is_undefined: traceExecution() - loadis 16[PB, PC, 8], t1 - loadis 8[PB, PC, 8], t2 + loadisFromInstruction(2, t1) + loadisFromInstruction(1, t2) loadConstantOrVariable(t1, t0) - btpz t0, tagMask, .opIsUndefinedCell - cpeq t0, ValueUndefined, t3 - orp ValueFalse, t3 - storep t3, [cfr, t2, 8] + btqz t0, tagMask, .opIsUndefinedCell + cqeq t0, ValueUndefined, t3 + orq ValueFalse, t3 + storeq t3, [cfr, t2, 8] dispatch(3) .opIsUndefinedCell: loadp JSCell::m_structure[t0], t0 btbnz Structure::m_typeInfo + TypeInfo::m_flags[t0], MasqueradesAsUndefined, .masqueradesAsUndefined move ValueFalse, t1 - storep t1, [cfr, t2, 8] + storeq t1, [cfr, t2, 8] dispatch(3) .masqueradesAsUndefined: loadp CodeBlock[cfr], t1 loadp CodeBlock::m_globalObject[t1], t1 cpeq Structure::m_globalObject[t0], t1, t3 - orp ValueFalse, t3 - storep t3, [cfr, t2, 8] + orq ValueFalse, t3 + storeq t3, [cfr, t2, 8] dispatch(3) _llint_op_is_boolean: traceExecution() - loadis 16[PB, PC, 8], t1 - loadis 8[PB, PC, 8], t2 + loadisFromInstruction(2, t1) + loadisFromInstruction(1, t2) loadConstantOrVariable(t1, t0) - xorp ValueFalse, t0 - tpz t0, ~1, t0 - orp ValueFalse, t0 - storep t0, [cfr, t2, 8] + xorq ValueFalse, t0 + tqz t0, ~1, t0 + orq ValueFalse, t0 + storeq t0, [cfr, t2, 8] dispatch(3) _llint_op_is_number: traceExecution() - loadis 16[PB, PC, 8], t1 - loadis 8[PB, PC, 8], t2 + loadisFromInstruction(2, t1) + loadisFromInstruction(1, t2) loadConstantOrVariable(t1, t0) - tpnz t0, tagTypeNumber, t1 - orp ValueFalse, t1 - storep t1, [cfr, t2, 8] + tqnz t0, tagTypeNumber, t1 + orq ValueFalse, t1 + storeq t1, [cfr, t2, 8] dispatch(3) _llint_op_is_string: traceExecution() - loadis 16[PB, PC, 8], t1 - loadis 8[PB, PC, 8], t2 + loadisFromInstruction(2, t1) + loadisFromInstruction(1, t2) loadConstantOrVariable(t1, t0) - btpnz t0, tagMask, .opIsStringNotCell + btqnz t0, tagMask, .opIsStringNotCell loadp JSCell::m_structure[t0], t0 cbeq Structure::m_typeInfo + TypeInfo::m_type[t0], StringType, t1 - orp ValueFalse, t1 - storep t1, [cfr, t2, 8] + orq ValueFalse, t1 + storeq t1, [cfr, t2, 8] dispatch(3) .opIsStringNotCell: - storep ValueFalse, [cfr, t2, 8] + storeq ValueFalse, [cfr, t2, 8] dispatch(3) @@ -800,40 +808,40 @@ macro loadPropertyAtVariableOffsetKnownNotInline(propertyOffsetAsPointer, object assert(macro (ok) bigteq propertyOffsetAsPointer, firstOutOfLineOffset, ok end) negp propertyOffsetAsPointer loadp JSObject::m_butterfly[objectAndStorage], objectAndStorage - loadp (firstOutOfLineOffset - 2) * 8[objectAndStorage, propertyOffsetAsPointer, 8], value + loadq (firstOutOfLineOffset - 2) * 8[objectAndStorage, propertyOffsetAsPointer, 8], value end macro loadPropertyAtVariableOffset(propertyOffsetAsInt, objectAndStorage, value) bilt propertyOffsetAsInt, firstOutOfLineOffset, .isInline loadp JSObject::m_butterfly[objectAndStorage], objectAndStorage negi propertyOffsetAsInt - sxi2p propertyOffsetAsInt, propertyOffsetAsInt + sxi2q propertyOffsetAsInt, propertyOffsetAsInt jmp .ready .isInline: addp sizeof JSObject - (firstOutOfLineOffset - 2) * 8, objectAndStorage .ready: - loadp (firstOutOfLineOffset - 2) * 8[objectAndStorage, propertyOffsetAsInt, 8], value + loadq (firstOutOfLineOffset - 2) * 8[objectAndStorage, propertyOffsetAsInt, 8], value end _llint_op_init_global_const: traceExecution() - loadis 16[PB, PC, 8], t1 - loadp 8[PB, PC, 8], t0 + loadisFromInstruction(2, t1) + loadpFromInstruction(1, t0) loadConstantOrVariable(t1, t2) writeBarrier(t2) - storep t2, [t0] - dispatch(3) + storeq t2, [t0] + dispatch(5) _llint_op_init_global_const_check: traceExecution() - loadp 24[PB, PC, 8], t2 - loadis 16[PB, PC, 8], t1 - loadp 8[PB, PC, 8], t0 + loadpFromInstruction(3, t2) + loadisFromInstruction(2, t1) + loadpFromInstruction(1, t0) btbnz [t2], .opInitGlobalConstCheckSlow loadConstantOrVariable(t1, t2) writeBarrier(t2) - storep t2, [t0] + storeq t2, [t0] dispatch(5) .opInitGlobalConstCheckSlow: callSlowPath(_llint_slow_path_init_global_const_check) @@ -846,19 +854,19 @@ macro getById(getPropertyStorage) # ping-ponging is free. At best we get lucky and the get_by_id will continue # to take fast path on the new cache. At worst we take slow path, which is what # we would have been doing anyway. - loadis 16[PB, PC, 8], t0 - loadp 32[PB, PC, 8], t1 + loadisFromInstruction(2, t0) + loadpFromInstruction(4, t1) loadConstantOrVariableCell(t0, t3, .opGetByIdSlow) - loadis 40[PB, PC, 8], t2 + loadisFromInstruction(5, t2) getPropertyStorage( t3, t0, macro (propertyStorage, scratch) bpneq JSCell::m_structure[t3], t1, .opGetByIdSlow - loadis 8[PB, PC, 8], t1 - loadp [propertyStorage, t2], scratch - storep scratch, [cfr, t1, 8] - loadp 64[PB, PC, 8], t1 + loadisFromInstruction(1, t1) + loadq [propertyStorage, t2], scratch + storeq scratch, [cfr, t1, 8] + loadpFromInstruction(8, t1) valueProfile(scratch, t1) dispatch(9) end) @@ -878,21 +886,21 @@ _llint_op_get_by_id_out_of_line: _llint_op_get_array_length: traceExecution() - loadis 16[PB, PC, 8], t0 - loadp 32[PB, PC, 8], t1 + loadisFromInstruction(2, t0) + loadpFromInstruction(4, t1) loadConstantOrVariableCell(t0, t3, .opGetArrayLengthSlow) loadp JSCell::m_structure[t3], t2 arrayProfile(t2, t1, t0) btiz t2, IsArray, .opGetArrayLengthSlow btiz t2, IndexingShapeMask, .opGetArrayLengthSlow - loadis 8[PB, PC, 8], t1 - loadp 64[PB, PC, 8], t2 + loadisFromInstruction(1, t1) + loadpFromInstruction(8, t2) loadp JSObject::m_butterfly[t3], t0 loadi -sizeof IndexingHeader + IndexingHeader::m_publicLength[t0], t0 bilt t0, 0, .opGetArrayLengthSlow - orp tagTypeNumber, t0 + orq tagTypeNumber, t0 valueProfile(t0, t2) - storep t0, [cfr, t1, 8] + storeq t0, [cfr, t1, 8] dispatch(9) .opGetArrayLengthSlow: @@ -902,13 +910,13 @@ _llint_op_get_array_length: _llint_op_get_arguments_length: traceExecution() - loadis 16[PB, PC, 8], t0 - loadis 8[PB, PC, 8], t1 - btpnz [cfr, t0, 8], .opGetArgumentsLengthSlow + loadisFromInstruction(2, t0) + loadisFromInstruction(1, t1) + btqnz [cfr, t0, 8], .opGetArgumentsLengthSlow loadi ArgumentCount + PayloadOffset[cfr], t2 subi 1, t2 - orp tagTypeNumber, t2 - storep t2, [cfr, t1, 8] + orq tagTypeNumber, t2 + storeq t2, [cfr, t1, 8] dispatch(4) .opGetArgumentsLengthSlow: @@ -918,19 +926,19 @@ _llint_op_get_arguments_length: macro putById(getPropertyStorage) traceExecution() - loadis 8[PB, PC, 8], t3 - loadp 32[PB, PC, 8], t1 + loadisFromInstruction(1, t3) + loadpFromInstruction(4, t1) loadConstantOrVariableCell(t3, t0, .opPutByIdSlow) - loadis 24[PB, PC, 8], t2 + loadisFromInstruction(3, t2) getPropertyStorage( t0, t3, macro (propertyStorage, scratch) bpneq JSCell::m_structure[t0], t1, .opPutByIdSlow - loadis 40[PB, PC, 8], t1 + loadisFromInstruction(5, t1) loadConstantOrVariable(t2, scratch) writeBarrier(t0) - storep scratch, [propertyStorage, t1] + storeq scratch, [propertyStorage, t1] dispatch(9) end) end @@ -949,13 +957,13 @@ _llint_op_put_by_id_out_of_line: macro putByIdTransition(additionalChecks, getPropertyStorage) traceExecution() - loadis 8[PB, PC, 8], t3 - loadp 32[PB, PC, 8], t1 + loadisFromInstruction(1, t3) + loadpFromInstruction(4, t1) loadConstantOrVariableCell(t3, t0, .opPutByIdSlow) - loadis 24[PB, PC, 8], t2 + loadisFromInstruction(3, t2) bpneq JSCell::m_structure[t0], t1, .opPutByIdSlow additionalChecks(t1, t3) - loadis 40[PB, PC, 8], t1 + loadisFromInstruction(5, t1) getPropertyStorage( t0, t3, @@ -963,8 +971,8 @@ macro putByIdTransition(additionalChecks, getPropertyStorage) addp t1, propertyStorage, t3 loadConstantOrVariable(t2, t1) writeBarrier(t1) - storep t1, [t3] - loadp 48[PB, PC, 8], t1 + storeq t1, [t3] + loadpFromInstruction(6, t1) storep t1, JSCell::m_structure[t0] dispatch(9) end) @@ -975,17 +983,17 @@ end macro structureChainChecks(oldStructure, scratch) const protoCell = oldStructure # Reusing the oldStructure register for the proto - loadp 56[PB, PC, 8], scratch + loadpFromInstruction(7, scratch) assert(macro (ok) btpnz scratch, ok end) loadp StructureChain::m_vector[scratch], scratch assert(macro (ok) btpnz scratch, ok end) - bpeq Structure::m_prototype[oldStructure], ValueNull, .done + bqeq Structure::m_prototype[oldStructure], ValueNull, .done .loop: - loadp Structure::m_prototype[oldStructure], protoCell + loadq Structure::m_prototype[oldStructure], protoCell loadp JSCell::m_structure[protoCell], oldStructure bpneq oldStructure, [scratch], .opPutByIdSlow addp 8, scratch - bpneq Structure::m_prototype[oldStructure], ValueNull, .loop + bqneq Structure::m_prototype[oldStructure], ValueNull, .loop .done: end @@ -1007,35 +1015,35 @@ _llint_op_put_by_id_transition_normal_out_of_line: _llint_op_get_by_val: traceExecution() - loadis 16[PB, PC, 8], t2 + loadisFromInstruction(2, t2) loadConstantOrVariableCell(t2, t0, .opGetByValSlow) loadp JSCell::m_structure[t0], t2 - loadp 32[PB, PC, 8], t3 + loadpFromInstruction(4, t3) arrayProfile(t2, t3, t1) - loadis 24[PB, PC, 8], t3 + loadisFromInstruction(3, t3) loadConstantOrVariableInt32(t3, t1, .opGetByValSlow) - sxi2p t1, t1 + sxi2q t1, t1 loadp JSObject::m_butterfly[t0], t3 andi IndexingShapeMask, t2 bineq t2, ContiguousShape, .opGetByValNotContiguous biaeq t1, -sizeof IndexingHeader + IndexingHeader::m_publicLength[t3], .opGetByValSlow - loadis 8[PB, PC, 8], t0 - loadp [t3, t1, 8], t2 - btpz t2, .opGetByValSlow + loadisFromInstruction(1, t0) + loadq [t3, t1, 8], t2 + btqz t2, .opGetByValSlow jmp .opGetByValDone .opGetByValNotContiguous: subi ArrayStorageShape, t2 bia t2, SlowPutArrayStorageShape - ArrayStorageShape, .opGetByValSlow biaeq t1, -sizeof IndexingHeader + IndexingHeader::m_vectorLength[t3], .opGetByValSlow - loadis 8[PB, PC, 8], t0 - loadp ArrayStorage::m_vector[t3, t1, 8], t2 - btpz t2, .opGetByValSlow + loadisFromInstruction(1, t0) + loadq ArrayStorage::m_vector[t3, t1, 8], t2 + btqz t2, .opGetByValSlow .opGetByValDone: - storep t2, [cfr, t0, 8] - loadp 40[PB, PC, 8], t0 + storeq t2, [cfr, t0, 8] + loadpFromInstruction(5, t0) valueProfile(t2, t0) dispatch(6) @@ -1048,19 +1056,19 @@ _llint_op_get_argument_by_val: # FIXME: At some point we should array profile this. Right now it isn't necessary # since the DFG will never turn a get_argument_by_val into a GetByVal. traceExecution() - loadis 16[PB, PC, 8], t0 - loadis 24[PB, PC, 8], t1 - btpnz [cfr, t0, 8], .opGetArgumentByValSlow + loadisFromInstruction(2, t0) + loadisFromInstruction(3, t1) + btqnz [cfr, t0, 8], .opGetArgumentByValSlow loadConstantOrVariableInt32(t1, t2, .opGetArgumentByValSlow) addi 1, t2 loadi ArgumentCount + PayloadOffset[cfr], t1 biaeq t2, t1, .opGetArgumentByValSlow negi t2 - sxi2p t2, t2 - loadis 8[PB, PC, 8], t3 - loadp 32[PB, PC, 8], t1 - loadp ThisArgumentOffset[cfr, t2, 8], t0 - storep t0, [cfr, t3, 8] + sxi2q t2, t2 + loadisFromInstruction(1, t3) + loadpFromInstruction(4, t1) + loadq ThisArgumentOffset[cfr, t2, 8], t0 + storeq t0, [cfr, t3, 8] valueProfile(t0, t1) dispatch(6) @@ -1071,19 +1079,19 @@ _llint_op_get_argument_by_val: _llint_op_get_by_pname: traceExecution() - loadis 24[PB, PC, 8], t1 + loadisFromInstruction(3, t1) loadConstantOrVariable(t1, t0) - loadis 32[PB, PC, 8], t1 + loadisFromInstruction(4, t1) assertNotConstant(t1) - bpneq t0, [cfr, t1, 8], .opGetByPnameSlow - loadis 16[PB, PC, 8], t2 - loadis 40[PB, PC, 8], t3 + bqneq t0, [cfr, t1, 8], .opGetByPnameSlow + loadisFromInstruction(2, t2) + loadisFromInstruction(5, t3) loadConstantOrVariableCell(t2, t0, .opGetByPnameSlow) assertNotConstant(t3) - loadp [cfr, t3, 8], t1 + loadq [cfr, t3, 8], t1 loadp JSCell::m_structure[t0], t2 bpneq t2, JSPropertyNameIterator::m_cachedStructure[t1], .opGetByPnameSlow - loadis 48[PB, PC, 8], t3 + loadisFromInstruction(6, t3) loadi PayloadOffset[cfr, t3, 8], t3 subi 1, t3 biaeq t3, JSPropertyNameIterator::m_numCacheableSlots[t1], .opGetByPnameSlow @@ -1092,8 +1100,8 @@ _llint_op_get_by_pname: subi JSPropertyNameIterator::m_cachedStructureInlineCapacity[t1], t3 .opGetByPnameInlineProperty: loadPropertyAtVariableOffset(t3, t0, t0) - loadis 8[PB, PC, 8], t1 - storep t0, [cfr, t1, 8] + loadisFromInstruction(1, t1) + storeq t0, [cfr, t1, 8] dispatch(7) .opGetByPnameSlow: @@ -1103,30 +1111,30 @@ _llint_op_get_by_pname: _llint_op_put_by_val: traceExecution() - loadis 8[PB, PC, 8], t0 + loadisFromInstruction(1, t0) loadConstantOrVariableCell(t0, t1, .opPutByValSlow) loadp JSCell::m_structure[t1], t2 - loadp 32[PB, PC, 8], t3 + loadpFromInstruction(4, t3) arrayProfile(t2, t3, t0) - loadis 16[PB, PC, 8], t0 + loadisFromInstruction(2, t0) loadConstantOrVariableInt32(t0, t3, .opPutByValSlow) - sxi2p t3, t3 + sxi2q t3, t3 loadp JSObject::m_butterfly[t1], t0 andi IndexingShapeMask, t2 bineq t2, ContiguousShape, .opPutByValNotContiguous biaeq t3, -sizeof IndexingHeader + IndexingHeader::m_publicLength[t0], .opPutByValContiguousOutOfBounds .opPutByValContiguousStoreResult: - loadis 24[PB, PC, 8], t2 + loadisFromInstruction(3, t2) loadConstantOrVariable(t2, t1) writeBarrier(t1) - storep t1, [t0, t3, 8] + storeq t1, [t0, t3, 8] dispatch(5) .opPutByValContiguousOutOfBounds: biaeq t3, -sizeof IndexingHeader + IndexingHeader::m_vectorLength[t0], .opPutByValSlow if VALUE_PROFILER - loadp 32[PB, PC, 8], t2 + loadpFromInstruction(4, t2) storeb 1, ArrayProfile::m_mayStoreToHole[t2] end addi 1, t3, t2 @@ -1136,17 +1144,17 @@ _llint_op_put_by_val: .opPutByValNotContiguous: bineq t2, ArrayStorageShape, .opPutByValSlow biaeq t3, -sizeof IndexingHeader + IndexingHeader::m_vectorLength[t0], .opPutByValSlow - btpz ArrayStorage::m_vector[t0, t3, 8], .opPutByValArrayStorageEmpty + btqz ArrayStorage::m_vector[t0, t3, 8], .opPutByValArrayStorageEmpty .opPutByValArrayStorageStoreResult: - loadis 24[PB, PC, 8], t2 + loadisFromInstruction(3, t2) loadConstantOrVariable(t2, t1) writeBarrier(t1) - storep t1, ArrayStorage::m_vector[t0, t3, 8] + storeq t1, ArrayStorage::m_vector[t0, t3, 8] dispatch(5) .opPutByValArrayStorageEmpty: if VALUE_PROFILER - loadp 32[PB, PC, 8], t1 + loadpFromInstruction(4, t1) storeb 1, ArrayProfile::m_mayStoreToHole[t1] end addi 1, ArrayStorage::m_numValuesInVector[t0] @@ -1162,24 +1170,24 @@ _llint_op_put_by_val: _llint_op_loop: traceExecution() - dispatchInt(8[PB, PC, 8]) + dispatchIntIndirect(1) _llint_op_jmp: traceExecution() - dispatchInt(8[PB, PC, 8]) + dispatchIntIndirect(1) macro jumpTrueOrFalse(conditionOp, slow) - loadis 8[PB, PC, 8], t1 + loadisFromInstruction(1, t1) loadConstantOrVariable(t1, t0) - xorp ValueFalse, t0 - btpnz t0, -1, .slow + xorq ValueFalse, t0 + btqnz t0, -1, .slow conditionOp(t0, .target) dispatch(3) .target: - dispatchInt(16[PB, PC, 8]) + dispatchIntIndirect(2) .slow: callSlowPath(slow) @@ -1188,19 +1196,19 @@ end macro equalNull(cellHandler, immediateHandler) - loadis 8[PB, PC, 8], t0 + loadisFromInstruction(1, t0) assertNotConstant(t0) - loadp [cfr, t0, 8], t0 - btpnz t0, tagMask, .immediate + loadq [cfr, t0, 8], t0 + btqnz t0, tagMask, .immediate loadp JSCell::m_structure[t0], t2 cellHandler(t2, Structure::m_typeInfo + TypeInfo::m_flags[t2], .target) dispatch(3) .target: - dispatch(16[PB, PC, 8]) + dispatchIntIndirect(2) .immediate: - andp ~TagBitUndefined, t0 + andq ~TagBitUndefined, t0 immediateHandler(t0, .target) dispatch(3) end @@ -1215,7 +1223,7 @@ _llint_op_jeq_null: bpeq Structure::m_globalObject[structure], t0, target .notMasqueradesAsUndefined: end, - macro (value, target) bpeq value, ValueNull, target end) + macro (value, target) bqeq value, ValueNull, target end) _llint_op_jneq_null: @@ -1227,13 +1235,13 @@ _llint_op_jneq_null: loadp CodeBlock::m_globalObject[t0], t0 bpneq Structure::m_globalObject[structure], t0, target end, - macro (value, target) bpneq value, ValueNull, target end) + macro (value, target) bqneq value, ValueNull, target end) _llint_op_jneq_ptr: traceExecution() - loadis 8[PB, PC, 8], t0 - loadi 16[PB, PC, 8], t1 + loadisFromInstruction(1, t0) + loadisFromInstruction(2, t1) loadp CodeBlock[cfr], t2 loadp CodeBlock::m_globalObject[t2], t2 loadp JSGlobalObject::m_specialPointers[t2, t1, 8], t1 @@ -1241,44 +1249,44 @@ _llint_op_jneq_ptr: dispatch(4) .opJneqPtrTarget: - dispatchInt(24[PB, PC, 8]) + dispatchIntIndirect(3) macro compare(integerCompare, doubleCompare, slowPath) - loadis 8[PB, PC, 8], t2 - loadis 16[PB, PC, 8], t3 + loadisFromInstruction(1, t2) + loadisFromInstruction(2, t3) loadConstantOrVariable(t2, t0) loadConstantOrVariable(t3, t1) - bpb t0, tagTypeNumber, .op1NotInt - bpb t1, tagTypeNumber, .op2NotInt + bqb t0, tagTypeNumber, .op1NotInt + bqb t1, tagTypeNumber, .op2NotInt integerCompare(t0, t1, .jumpTarget) dispatch(4) .op1NotInt: - btpz t0, tagTypeNumber, .slow - bpb t1, tagTypeNumber, .op1NotIntOp2NotInt + btqz t0, tagTypeNumber, .slow + bqb t1, tagTypeNumber, .op1NotIntOp2NotInt ci2d t1, ft1 jmp .op1NotIntReady .op1NotIntOp2NotInt: - btpz t1, tagTypeNumber, .slow - addp tagTypeNumber, t1 - fp2d t1, ft1 + btqz t1, tagTypeNumber, .slow + addq tagTypeNumber, t1 + fq2d t1, ft1 .op1NotIntReady: - addp tagTypeNumber, t0 - fp2d t0, ft0 + addq tagTypeNumber, t0 + fq2d t0, ft0 doubleCompare(ft0, ft1, .jumpTarget) dispatch(4) .op2NotInt: ci2d t0, ft0 - btpz t1, tagTypeNumber, .slow - addp tagTypeNumber, t1 - fp2d t1, ft1 + btqz t1, tagTypeNumber, .slow + addq tagTypeNumber, t1 + fq2d t1, ft1 doubleCompare(ft0, ft1, .jumpTarget) dispatch(4) .jumpTarget: - dispatchInt(24[PB, PC, 8]) + dispatchIntIndirect(3) .slow: callSlowPath(slowPath) @@ -1288,15 +1296,15 @@ end _llint_op_switch_imm: traceExecution() - loadis 24[PB, PC, 8], t2 - loadis 8[PB, PC, 8], t3 + loadisFromInstruction(3, t2) + loadisFromInstruction(1, t3) loadConstantOrVariable(t2, t1) loadp CodeBlock[cfr], t2 loadp CodeBlock::m_rareData[t2], t2 muli sizeof SimpleJumpTable, t3 # FIXME: would be nice to peephole this! loadp CodeBlock::RareData::m_immediateSwitchJumpTables + VectorBufferOffset[t2], t2 addp t3, t2 - bpb t1, tagTypeNumber, .opSwitchImmNotInt + bqb t1, tagTypeNumber, .opSwitchImmNotInt subi SimpleJumpTable::min[t2], t1 biaeq t1, SimpleJumpTable::branchOffsets + VectorSizeOffset[t2], .opSwitchImmFallThrough loadp SimpleJumpTable::branchOffsets + VectorBufferOffset[t2], t3 @@ -1305,9 +1313,9 @@ _llint_op_switch_imm: dispatch(t1) .opSwitchImmNotInt: - btpnz t1, tagTypeNumber, .opSwitchImmSlow # Go slow if it's a double. + btqnz t1, tagTypeNumber, .opSwitchImmSlow # Go slow if it's a double. .opSwitchImmFallThrough: - dispatchInt(16[PB, PC, 8]) + dispatchIntIndirect(2) .opSwitchImmSlow: callSlowPath(_llint_slow_path_switch_imm) @@ -1316,15 +1324,15 @@ _llint_op_switch_imm: _llint_op_switch_char: traceExecution() - loadis 24[PB, PC, 8], t2 - loadis 8[PB, PC, 8], t3 + loadisFromInstruction(3, t2) + loadisFromInstruction(1, t3) loadConstantOrVariable(t2, t1) loadp CodeBlock[cfr], t2 loadp CodeBlock::m_rareData[t2], t2 muli sizeof SimpleJumpTable, t3 loadp CodeBlock::RareData::m_characterSwitchJumpTables + VectorBufferOffset[t2], t2 addp t3, t2 - btpnz t1, tagMask, .opSwitchCharFallThrough + btqnz t1, tagMask, .opSwitchCharFallThrough loadp JSCell::m_structure[t1], t0 bbneq Structure::m_typeInfo + TypeInfo::m_type[t0], StringType, .opSwitchCharFallThrough bineq JSString::m_length[t1], 1, .opSwitchCharFallThrough @@ -1345,7 +1353,7 @@ _llint_op_switch_char: dispatch(t1) .opSwitchCharFallThrough: - dispatchInt(16[PB, PC, 8]) + dispatchIntIndirect(2) .opSwitchOnRope: callSlowPath(_llint_slow_path_switch_char) @@ -1354,9 +1362,10 @@ _llint_op_switch_char: _llint_op_new_func: traceExecution() - btiz 24[PB, PC, 8], .opNewFuncUnchecked - loadis 8[PB, PC, 8], t1 - btpnz [cfr, t1, 8], .opNewFuncDone + loadisFromInstruction(3, t2) + btiz t2, .opNewFuncUnchecked + loadisFromInstruction(1, t1) + btqnz [cfr, t1, 8], .opNewFuncDone .opNewFuncUnchecked: callSlowPath(_llint_slow_path_new_func) .opNewFuncDone: @@ -1365,32 +1374,32 @@ _llint_op_new_func: macro arrayProfileForCall() if VALUE_PROFILER - loadis 24[PB, PC, 8], t3 - loadp ThisArgumentOffset[cfr, t3, 8], t0 - btpnz t0, tagMask, .done + loadisFromInstruction(3, t3) + loadq ThisArgumentOffset[cfr, t3, 8], t0 + btqnz t0, tagMask, .done loadp JSCell::m_structure[t0], t0 - loadp 40[PB, PC, 8], t1 + loadpFromInstruction(5, t1) storep t0, ArrayProfile::m_lastSeenStructure[t1] .done: end end macro doCall(slowPath) - loadis 8[PB, PC, 8], t0 - loadp 32[PB, PC, 8], t1 + loadisFromInstruction(1, t0) + loadpFromInstruction(4, t1) loadp LLIntCallLinkInfo::callee[t1], t2 loadConstantOrVariable(t0, t3) - bpneq t3, t2, .opCallSlow - loadis 24[PB, PC, 8], t3 + bqneq t3, t2, .opCallSlow + loadisFromInstruction(3, t3) addi 6, PC lshifti 3, t3 addp cfr, t3 loadp JSFunction::m_scope[t2], t0 - storep t2, Callee[t3] - storep t0, ScopeChain[t3] - loadis 16 - 48[PB, PC, 8], t2 + storeq t2, Callee[t3] + storeq t0, ScopeChain[t3] + loadisFromInstruction(-4, t2) storei PC, ArgumentCount + TagOffset[cfr] - storep cfr, CallerFrame[t3] + storeq cfr, CallerFrame[t3] storei t2, ArgumentCount + PayloadOffset[t3] move t3, cfr callTargetFunction(t1) @@ -1402,8 +1411,8 @@ end _llint_op_tear_off_activation: traceExecution() - loadis 8[PB, PC, 8], t0 - btpz [cfr, t0, 8], .opTearOffActivationNotCreated + loadisFromInstruction(1, t0) + btqz [cfr, t0, 8], .opTearOffActivationNotCreated callSlowPath(_llint_slow_path_tear_off_activation) .opTearOffActivationNotCreated: dispatch(2) @@ -1411,9 +1420,9 @@ _llint_op_tear_off_activation: _llint_op_tear_off_arguments: traceExecution() - loadis 8[PB, PC, 8], t0 + loadisFromInstruction(1, t0) subi 1, t0 # Get the unmodifiedArgumentsRegister - btpz [cfr, t0, 8], .opTearOffArgumentsNotCreated + btqz [cfr, t0, 8], .opTearOffArgumentsNotCreated callSlowPath(_llint_slow_path_tear_off_arguments) .opTearOffArgumentsNotCreated: dispatch(3) @@ -1422,15 +1431,15 @@ _llint_op_tear_off_arguments: _llint_op_ret: traceExecution() checkSwitchToJITForEpilogue() - loadis 8[PB, PC, 8], t2 + loadisFromInstruction(1, t2) loadConstantOrVariable(t2, t0) doReturn() _llint_op_call_put_result: - loadis 8[PB, PC, 8], t2 - loadp 16[PB, PC, 8], t3 - storep t0, [cfr, t2, 8] + loadisFromInstruction(1, t2) + loadpFromInstruction(2, t3) + storeq t0, [cfr, t2, 8] valueProfile(t0, t3) traceExecution() dispatch(3) @@ -1439,29 +1448,29 @@ _llint_op_call_put_result: _llint_op_ret_object_or_this: traceExecution() checkSwitchToJITForEpilogue() - loadis 8[PB, PC, 8], t2 + loadisFromInstruction(1, t2) loadConstantOrVariable(t2, t0) - btpnz t0, tagMask, .opRetObjectOrThisNotObject + btqnz t0, tagMask, .opRetObjectOrThisNotObject loadp JSCell::m_structure[t0], t2 bbb Structure::m_typeInfo + TypeInfo::m_type[t2], ObjectType, .opRetObjectOrThisNotObject doReturn() .opRetObjectOrThisNotObject: - loadis 16[PB, PC, 8], t2 + loadisFromInstruction(2, t2) loadConstantOrVariable(t2, t0) doReturn() _llint_op_to_primitive: traceExecution() - loadis 16[PB, PC, 8], t2 - loadis 8[PB, PC, 8], t3 + loadisFromInstruction(2, t2) + loadisFromInstruction(1, t3) loadConstantOrVariable(t2, t0) - btpnz t0, tagMask, .opToPrimitiveIsImm + btqnz t0, tagMask, .opToPrimitiveIsImm loadp JSCell::m_structure[t0], t2 bbneq Structure::m_typeInfo + TypeInfo::m_type[t2], StringType, .opToPrimitiveSlowCase .opToPrimitiveIsImm: - storep t0, [cfr, t3, 8] + storeq t0, [cfr, t3, 8] dispatch(3) .opToPrimitiveSlowCase: @@ -1471,38 +1480,38 @@ _llint_op_to_primitive: _llint_op_next_pname: traceExecution() - loadis 24[PB, PC, 8], t1 - loadis 32[PB, PC, 8], t2 + loadisFromInstruction(3, t1) + loadisFromInstruction(4, t2) assertNotConstant(t1) assertNotConstant(t2) loadi PayloadOffset[cfr, t1, 8], t0 bieq t0, PayloadOffset[cfr, t2, 8], .opNextPnameEnd - loadis 40[PB, PC, 8], t2 + loadisFromInstruction(5, t2) assertNotConstant(t2) loadp [cfr, t2, 8], t2 loadp JSPropertyNameIterator::m_jsStrings[t2], t3 - loadp [t3, t0, 8], t3 + loadq [t3, t0, 8], t3 addi 1, t0 storei t0, PayloadOffset[cfr, t1, 8] - loadis 8[PB, PC, 8], t1 - storep t3, [cfr, t1, 8] - loadis 16[PB, PC, 8], t3 + loadisFromInstruction(1, t1) + storeq t3, [cfr, t1, 8] + loadisFromInstruction(2, t3) assertNotConstant(t3) - loadp [cfr, t3, 8], t3 + loadq [cfr, t3, 8], t3 loadp JSCell::m_structure[t3], t1 bpneq t1, JSPropertyNameIterator::m_cachedStructure[t2], .opNextPnameSlow loadp JSPropertyNameIterator::m_cachedPrototypeChain[t2], t0 loadp StructureChain::m_vector[t0], t0 btpz [t0], .opNextPnameTarget .opNextPnameCheckPrototypeLoop: - bpeq Structure::m_prototype[t1], ValueNull, .opNextPnameSlow - loadp Structure::m_prototype[t1], t2 + bqeq Structure::m_prototype[t1], ValueNull, .opNextPnameSlow + loadq Structure::m_prototype[t1], t2 loadp JSCell::m_structure[t2], t1 bpneq t1, [t0], .opNextPnameSlow addp 8, t0 btpnz [t0], .opNextPnameCheckPrototypeLoop .opNextPnameTarget: - dispatchInt(48[PB, PC, 8]) + dispatchIntIndirect(6) .opNextPnameEnd: dispatch(7) @@ -1525,11 +1534,11 @@ _llint_op_catch: loadp JITStackFrame::globalData[sp], t3 loadp JSGlobalData::targetInterpreterPCForThrow[t3], PC subp PB, PC - urshiftp 3, PC - loadp JSGlobalData::exception[t3], t0 - storep 0, JSGlobalData::exception[t3] - loadis 8[PB, PC, 8], t2 - storep t0, [cfr, t2, 8] + rshiftp 3, PC + loadq JSGlobalData::exception[t3], t0 + storeq 0, JSGlobalData::exception[t3] + loadisFromInstruction(1, t2) + storeq t0, [cfr, t2, 8] traceExecution() dispatch(2) @@ -1537,9 +1546,9 @@ _llint_op_catch: _llint_op_end: traceExecution() checkSwitchToJITForEpilogue() - loadis 8[PB, PC, 8], t0 + loadisFromInstruction(1, t0) assertNotConstant(t0) - loadp [cfr, t0, 8], t0 + loadq [cfr, t0, 8], t0 doReturn() @@ -1565,8 +1574,8 @@ macro nativeCallTrampoline(executableOffsetToFunction) loadp JITStackFrame::globalData + 8[sp], t0 storep cfr, JSGlobalData::topCallFrame[t0] loadp CallerFrame[cfr], t0 - loadp ScopeChain[t0], t1 - storep t1, ScopeChain[cfr] + loadq ScopeChain[t0], t1 + storeq t1, ScopeChain[cfr] peek 0, t1 storep t1, ReturnPC[cfr] move cfr, t5 # t5 = rdi @@ -1601,7 +1610,7 @@ macro nativeCallTrampoline(executableOffsetToFunction) error end - btpnz JSGlobalData::exception[t3], .exception + btqnz JSGlobalData::exception[t3], .exception ret .exception: preserveReturnAddressAfterCall(t1) diff --git a/Source/JavaScriptCore/offlineasm/armv7.rb b/Source/JavaScriptCore/offlineasm/armv7.rb index ab0496f71..078be8c0f 100644 --- a/Source/JavaScriptCore/offlineasm/armv7.rb +++ b/Source/JavaScriptCore/offlineasm/armv7.rb @@ -401,7 +401,7 @@ class Instruction $asm.puts "pop #{operands[0].armV7Operand}" when "push" $asm.puts "push #{operands[0].armV7Operand}" - when "move", "sxi2p", "zxi2p" + when "move" if operands[0].is_a? Immediate armV7MoveImmediate(operands[0].value, operands[1]) else diff --git a/Source/JavaScriptCore/offlineasm/asm.rb b/Source/JavaScriptCore/offlineasm/asm.rb index 4d44c5e91..bf2426399 100644 --- a/Source/JavaScriptCore/offlineasm/asm.rb +++ b/Source/JavaScriptCore/offlineasm/asm.rb @@ -182,7 +182,11 @@ class Assembler end def self.cLabelReference(labelName) - "#{labelName}" + if /\Allint_op_/.match(labelName) + "op_#{$~.post_match}" # strip opcodes of their llint_ prefix. + else + "#{labelName}" + end end def self.cLocalLabelReference(labelName) diff --git a/Source/JavaScriptCore/offlineasm/cloop.rb b/Source/JavaScriptCore/offlineasm/cloop.rb index b3e319c4d..cbe7e2ca1 100644 --- a/Source/JavaScriptCore/offlineasm/cloop.rb +++ b/Source/JavaScriptCore/offlineasm/cloop.rb @@ -37,6 +37,8 @@ def cloopMapType(type) when :uint; ".u" when :int32; ".i32" when :uint32; ".u32" + when :int64; ".i64" + when :uint64; ".u64" when :int8; ".i8" when :uint8; ".u8" when :int8Ptr; ".i8p" @@ -44,7 +46,7 @@ def cloopMapType(type) when :nativeFunc; ".nativeFunc" when :double; ".d" when :castToDouble; ".castToDouble" - when :castToVoidPtr; ".castToVoidPtr" + when :castToInt64; ".castToInt64" when :opcode; ".opcode" else; raise "Unsupported type" @@ -141,9 +143,11 @@ class Immediate case type when :int8; "int8_t(#{valueStr})" when :int32; "int32_t(#{valueStr})" + when :int64; "int64_t(#{valueStr})" when :int; "intptr_t(#{valueStr})" when :uint8; "uint8_t(#{valueStr})" when :uint32; "uint32_t(#{valueStr})" + when :uint64; "uint64_t(#{valueStr})" when :uint; "uintptr_t(#{valueStr})" else raise "Not implemented immediate of type: #{type}" @@ -159,9 +163,11 @@ class Address case type when :int8; int8MemRef when :int32; int32MemRef + when :int64; int64MemRef when :int; intMemRef when :uint8; uint8MemRef when :uint32; uint32MemRef + when :uint64; uint64MemRef when :uint; uintMemRef when :opcode; opcodeMemRef when :nativeFunc; nativeFuncMemRef @@ -190,6 +196,9 @@ class Address def int32MemRef "*CAST<int32_t*>(#{pointerExpr})" end + def int64MemRef + "*CAST<int64_t*>(#{pointerExpr})" + end def intMemRef "*CAST<intptr_t*>(#{pointerExpr})" end @@ -202,6 +211,9 @@ class Address def uint32MemRef "*CAST<uint32_t*>(#{pointerExpr})" end + def uint64MemRef + "*CAST<uint64_t*>(#{pointerExpr})" + end def uintMemRef "*CAST<uintptr_t*>(#{pointerExpr})" end @@ -224,9 +236,11 @@ class BaseIndex case type when :int8; int8MemRef when :int32; int32MemRef + when :int64; int64MemRef when :int; intMemRef when :uint8; uint8MemRef when :uint32; uint32MemRef + when :uint64; uint64MemRef when :uint; uintMemRef when :opcode; opcodeMemRef else @@ -235,10 +249,10 @@ class BaseIndex end def pointerExpr if base.is_a? RegisterID and base.name == "sp" - offsetValue = "(#{index.clValue(:int32)} << #{scaleShift}) + #{offset.clValue})" + offsetValue = "(#{index.clValue} << #{scaleShift}) + #{offset.clValue})" "(ASSERT(#{offsetValue} == offsetof(JITStackFrame, globalData)), &sp->globalData)" else - "#{base.clValue(:int8Ptr)} + (#{index.clValue(:int32)} << #{scaleShift}) + #{offset.clValue}" + "#{base.clValue(:int8Ptr)} + (#{index.clValue} << #{scaleShift}) + #{offset.clValue}" end end def int8MemRef @@ -250,6 +264,9 @@ class BaseIndex def int32MemRef "*CAST<int32_t*>(#{pointerExpr})" end + def int64MemRef + "*CAST<int64_t*>(#{pointerExpr})" + end def intMemRef "*CAST<intptr_t*>(#{pointerExpr})" end @@ -262,6 +279,9 @@ class BaseIndex def uint32MemRef "*CAST<uint32_t*>(#{pointerExpr})" end + def uint64MemRef + "*CAST<uint64_t*>(#{pointerExpr})" + end def uintMemRef "*CAST<uintptr_t*>(#{pointerExpr})" end @@ -333,22 +353,47 @@ end def cloopEmitOperation(operands, type, operator) + raise unless type == :int || type == :uint || type == :int32 || type == :uint32 || \ + type == :int64 || type == :uint64 || type == :double if operands.size == 3 $asm.putc "#{operands[2].clValue(type)} = #{operands[1].clValue(type)} #{operator} #{operands[0].clValue(type)};" + if operands[2].is_a? RegisterID and (type == :int32 or type == :uint32) + $asm.putc "#{operands[2].dump}.clearHighWord();" # Just clear it. It does nothing on the 32-bit port. + end else raise unless operands.size == 2 raise unless not operands[1].is_a? Immediate $asm.putc "#{operands[1].clValue(type)} = #{operands[1].clValue(type)} #{operator} #{operands[0].clValue(type)};" + if operands[1].is_a? RegisterID and (type == :int32 or type == :uint32) + $asm.putc "#{operands[1].dump}.clearHighWord();" # Just clear it. It does nothing on the 32-bit port. + end end end def cloopEmitShiftOperation(operands, type, operator) + raise unless type == :int || type == :uint || type == :int32 || type == :uint32 || type == :int64 || type == :uint64 if operands.size == 3 $asm.putc "#{operands[2].clValue(type)} = #{operands[1].clValue(type)} #{operator} (#{operands[0].clValue(:int)} & 0x1f);" + if operands[2].is_a? RegisterID and (type == :int32 or type == :uint32) + $asm.putc "#{operands[2].dump}.clearHighWord();" # Just clear it. It does nothing on the 32-bit port. + end else raise unless operands.size == 2 raise unless not operands[1].is_a? Immediate $asm.putc "#{operands[1].clValue(type)} = #{operands[1].clValue(type)} #{operator} (#{operands[0].clValue(:int)} & 0x1f);" + if operands[1].is_a? RegisterID and (type == :int32 or type == :uint32) + $asm.putc "#{operands[1].dump}.clearHighWord();" # Just clear it. It does nothing on the 32-bit port. + end + end +end + +def cloopEmitUnaryOperation(operands, type, operator) + raise unless type == :int || type == :uint || type == :int32 || type == :uint32 || type == :int64 || type == :uint64 + raise unless operands.size == 1 + raise unless not operands[0].is_a? Immediate + $asm.putc "#{operands[0].clValue(type)} = #{operator}#{operands[0].clValue(type)};" + if operands[0].is_a? RegisterID and (type == :int32 or type == :uint32) + $asm.putc "#{operands[0].dump}.clearHighWord();" # Just clear it. It does nothing on the 32-bit port. end end @@ -410,6 +455,7 @@ def cloopEmitOpAndBranch(operands, operator, type, conditionTest) case type when :int; tempType = "intptr_t" when :int32; tempType = "int32_t" + when :int64; tempType = "int64_t" else raise "Unimplemented type" end @@ -512,65 +558,91 @@ class Instruction case opcode when "addi" cloopEmitOperation(operands, :int32, "+") + when "addq" + cloopEmitOperation(operands, :int64, "+") when "addp" cloopEmitOperation(operands, :int, "+") when "andi" cloopEmitOperation(operands, :int32, "&") + when "andq" + cloopEmitOperation(operands, :int64, "&") when "andp" cloopEmitOperation(operands, :int, "&") when "ori" cloopEmitOperation(operands, :int32, "|") + when "orq" + cloopEmitOperation(operands, :int64, "|") when "orp" cloopEmitOperation(operands, :int, "|") when "xori" cloopEmitOperation(operands, :int32, "^") + when "xorq" + cloopEmitOperation(operands, :int64, "^") when "xorp" cloopEmitOperation(operands, :int, "^") when "lshifti" cloopEmitShiftOperation(operands, :int32, "<<") + when "lshiftq" + cloopEmitShiftOperation(operands, :int64, "<<") when "lshiftp" cloopEmitShiftOperation(operands, :int, "<<") when "rshifti" cloopEmitShiftOperation(operands, :int32, ">>") + when "rshiftq" + cloopEmitShiftOperation(operands, :int64, ">>") when "rshiftp" cloopEmitShiftOperation(operands, :int, ">>") when "urshifti" cloopEmitShiftOperation(operands, :uint32, ">>") + when "urshiftq" + cloopEmitShiftOperation(operands, :uint64, ">>") when "urshiftp" cloopEmitShiftOperation(operands, :uint, ">>") when "muli" cloopEmitOperation(operands, :int32, "*") + when "mulq" + cloopEmitOperation(operands, :int64, "*") when "mulp" cloopEmitOperation(operands, :int, "*") when "subi" cloopEmitOperation(operands, :int32, "-") + when "subq" + cloopEmitOperation(operands, :int64, "-") when "subp" cloopEmitOperation(operands, :int, "-") when "negi" - $asm.putc "#{operands[0].clValue(:int32)} = -#{operands[0].clValue(:int32)};" + cloopEmitUnaryOperation(operands, :int32, "-") + when "negq" + cloopEmitUnaryOperation(operands, :int64, "-") when "negp" - $asm.putc "#{operands[0].clValue(:int)} = -#{operands[0].clValue(:int)};" + cloopEmitUnaryOperation(operands, :int, "-") when "noti" - $asm.putc "#{operands[0].clValue(:int32)} = !#{operands[0].clValue(:int32)};" + cloopEmitUnaryOperation(operands, :int32, "!") when "loadi" - $asm.putc "#{operands[1].clValue(:int)} = #{operands[0].uint32MemRef};" + $asm.putc "#{operands[1].clValue(:uint)} = #{operands[0].uint32MemRef};" + # There's no need to call clearHighWord() here because the above will + # automatically take care of 0 extension. when "loadis" $asm.putc "#{operands[1].clValue(:int)} = #{operands[0].int32MemRef};" + when "loadq" + $asm.putc "#{operands[1].clValue(:int64)} = #{operands[0].int64MemRef};" when "loadp" $asm.putc "#{operands[1].clValue(:int)} = #{operands[0].intMemRef};" when "storei" $asm.putc "#{operands[1].int32MemRef} = #{operands[0].clValue(:int32)};" + when "storeq" + $asm.putc "#{operands[1].int64MemRef} = #{operands[0].clValue(:int64)};" when "storep" $asm.putc "#{operands[1].intMemRef} = #{operands[0].clValue(:int)};" when "loadb" @@ -631,6 +703,7 @@ class Instruction when "td2i" $asm.putc "#{operands[1].clValue(:int)} = #{operands[0].clValue(:double)};" + $asm.putc "#{operands[1].dump}.clearHighWord();" when "bcd2i" # operands: srcDbl dstInt slowPath $asm.putc "{" @@ -639,20 +712,23 @@ class Instruction $asm.putc " if (asInt32 != d || (!asInt32 && signbit(d))) // true for -0.0" $asm.putc " goto #{operands[2].cLabel};" $asm.putc " #{operands[1].clValue} = asInt32;" + $asm.putc " #{operands[1].dump}.clearHighWord();" $asm.putc "}" when "move" $asm.putc "#{operands[1].clValue(:int)} = #{operands[0].clValue(:int)};" - when "sxi2p" - $asm.putc "#{operands[1].clValue(:int)} = #{operands[0].clValue(:int32)};" - when "zxi2p" - $asm.putc "#{operands[1].clValue(:uint)} = #{operands[0].clValue(:uint32)};" + when "sxi2q" + $asm.putc "#{operands[1].clValue(:int64)} = #{operands[0].clValue(:int32)};" + when "zxi2q" + $asm.putc "#{operands[1].clValue(:uint64)} = #{operands[0].clValue(:uint32)};" when "nop" $asm.putc "// nop" when "bbeq" cloopEmitCompareAndBranch(operands, :int8, "==") when "bieq" cloopEmitCompareAndBranch(operands, :int32, "==") + when "bqeq" + cloopEmitCompareAndBranch(operands, :int64, "==") when "bpeq" cloopEmitCompareAndBranch(operands, :int, "==") @@ -660,6 +736,8 @@ class Instruction cloopEmitCompareAndBranch(operands, :int8, "!=") when "bineq" cloopEmitCompareAndBranch(operands, :int32, "!=") + when "bqneq" + cloopEmitCompareAndBranch(operands, :int64, "!=") when "bpneq" cloopEmitCompareAndBranch(operands, :int, "!=") @@ -667,6 +745,8 @@ class Instruction cloopEmitCompareAndBranch(operands, :uint8, ">") when "bia" cloopEmitCompareAndBranch(operands, :uint32, ">") + when "bqa" + cloopEmitCompareAndBranch(operands, :uint64, ">") when "bpa" cloopEmitCompareAndBranch(operands, :uint, ">") @@ -674,6 +754,8 @@ class Instruction cloopEmitCompareAndBranch(operands, :uint8, ">=") when "biaeq" cloopEmitCompareAndBranch(operands, :uint32, ">=") + when "bqaeq" + cloopEmitCompareAndBranch(operands, :uint64, ">=") when "bpaeq" cloopEmitCompareAndBranch(operands, :uint, ">=") @@ -681,6 +763,8 @@ class Instruction cloopEmitCompareAndBranch(operands, :uint8, "<") when "bib" cloopEmitCompareAndBranch(operands, :uint32, "<") + when "bqb" + cloopEmitCompareAndBranch(operands, :uint64, "<") when "bpb" cloopEmitCompareAndBranch(operands, :uint, "<") @@ -688,6 +772,8 @@ class Instruction cloopEmitCompareAndBranch(operands, :uint8, "<=") when "bibeq" cloopEmitCompareAndBranch(operands, :uint32, "<=") + when "bqbeq" + cloopEmitCompareAndBranch(operands, :uint64, "<=") when "bpbeq" cloopEmitCompareAndBranch(operands, :uint, "<=") @@ -695,6 +781,8 @@ class Instruction cloopEmitCompareAndBranch(operands, :int8, ">") when "bigt" cloopEmitCompareAndBranch(operands, :int32, ">") + when "bqgt" + cloopEmitCompareAndBranch(operands, :int64, ">") when "bpgt" cloopEmitCompareAndBranch(operands, :int, ">") @@ -702,6 +790,8 @@ class Instruction cloopEmitCompareAndBranch(operands, :int8, ">=") when "bigteq" cloopEmitCompareAndBranch(operands, :int32, ">=") + when "bqgteq" + cloopEmitCompareAndBranch(operands, :int64, ">=") when "bpgteq" cloopEmitCompareAndBranch(operands, :int, ">=") @@ -709,6 +799,8 @@ class Instruction cloopEmitCompareAndBranch(operands, :int8, "<") when "bilt" cloopEmitCompareAndBranch(operands, :int32, "<") + when "bqlt" + cloopEmitCompareAndBranch(operands, :int64, "<") when "bplt" cloopEmitCompareAndBranch(operands, :int, "<") @@ -716,6 +808,8 @@ class Instruction cloopEmitCompareAndBranch(operands, :int8, "<=") when "bilteq" cloopEmitCompareAndBranch(operands, :int32, "<=") + when "bqlteq" + cloopEmitCompareAndBranch(operands, :int64, "<=") when "bplteq" cloopEmitCompareAndBranch(operands, :int, "<=") @@ -723,6 +817,8 @@ class Instruction cloopEmitTestAndBranchIf(operands, :int8, "== 0", operands[-1].cLabel) when "btiz" cloopEmitTestAndBranchIf(operands, :int32, "== 0", operands[-1].cLabel) + when "btqz" + cloopEmitTestAndBranchIf(operands, :int64, "== 0", operands[-1].cLabel) when "btpz" cloopEmitTestAndBranchIf(operands, :int, "== 0", operands[-1].cLabel) @@ -730,6 +826,8 @@ class Instruction cloopEmitTestAndBranchIf(operands, :int8, "!= 0", operands[-1].cLabel) when "btinz" cloopEmitTestAndBranchIf(operands, :int32, "!= 0", operands[-1].cLabel) + when "btqnz" + cloopEmitTestAndBranchIf(operands, :int64, "!= 0", operands[-1].cLabel) when "btpnz" cloopEmitTestAndBranchIf(operands, :int, "!= 0", operands[-1].cLabel) @@ -737,6 +835,8 @@ class Instruction cloopEmitTestAndBranchIf(operands, :int8, "< 0", operands[-1].cLabel) when "btis" cloopEmitTestAndBranchIf(operands, :int32, "< 0", operands[-1].cLabel) + when "btqs" + cloopEmitTestAndBranchIf(operands, :int64, "< 0", operands[-1].cLabel) when "btps" cloopEmitTestAndBranchIf(operands, :int, "< 0", operands[-1].cLabel) @@ -770,6 +870,8 @@ class Instruction cloopEmitCompareAndSet(operands, :uint8, "==") when "cieq" cloopEmitCompareAndSet(operands, :uint32, "==") + when "cqeq" + cloopEmitCompareAndSet(operands, :uint64, "==") when "cpeq" cloopEmitCompareAndSet(operands, :uint, "==") @@ -777,6 +879,8 @@ class Instruction cloopEmitCompareAndSet(operands, :uint8, "!=") when "cineq" cloopEmitCompareAndSet(operands, :uint32, "!=") + when "cqneq" + cloopEmitCompareAndSet(operands, :uint64, "!=") when "cpneq" cloopEmitCompareAndSet(operands, :uint, "!=") @@ -784,6 +888,8 @@ class Instruction cloopEmitCompareAndSet(operands, :uint8, ">") when "cia" cloopEmitCompareAndSet(operands, :uint32, ">") + when "cqa" + cloopEmitCompareAndSet(operands, :uint64, ">") when "cpa" cloopEmitCompareAndSet(operands, :uint, ">") @@ -791,6 +897,8 @@ class Instruction cloopEmitCompareAndSet(operands, :uint8, ">=") when "ciaeq" cloopEmitCompareAndSet(operands, :uint32, ">=") + when "cqaeq" + cloopEmitCompareAndSet(operands, :uint64, ">=") when "cpaeq" cloopEmitCompareAndSet(operands, :uint, ">=") @@ -798,6 +906,8 @@ class Instruction cloopEmitCompareAndSet(operands, :uint8, "<") when "cib" cloopEmitCompareAndSet(operands, :uint32, "<") + when "cqb" + cloopEmitCompareAndSet(operands, :uint64, "<") when "cpb" cloopEmitCompareAndSet(operands, :uint, "<") @@ -805,6 +915,8 @@ class Instruction cloopEmitCompareAndSet(operands, :uint8, "<=") when "cibeq" cloopEmitCompareAndSet(operands, :uint32, "<=") + when "cqbeq" + cloopEmitCompareAndSet(operands, :uint64, "<=") when "cpbeq" cloopEmitCompareAndSet(operands, :uint, "<=") @@ -812,6 +924,8 @@ class Instruction cloopEmitCompareAndSet(operands, :int8, ">") when "cigt" cloopEmitCompareAndSet(operands, :int32, ">") + when "cqgt" + cloopEmitCompareAndSet(operands, :int64, ">") when "cpgt" cloopEmitCompareAndSet(operands, :int, ">") @@ -819,6 +933,8 @@ class Instruction cloopEmitCompareAndSet(operands, :int8, ">=") when "cigteq" cloopEmitCompareAndSet(operands, :int32, ">=") + when "cqgteq" + cloopEmitCompareAndSet(operands, :int64, ">=") when "cpgteq" cloopEmitCompareAndSet(operands, :int, ">=") @@ -826,6 +942,8 @@ class Instruction cloopEmitCompareAndSet(operands, :int8, "<") when "cilt" cloopEmitCompareAndSet(operands, :int32, "<") + when "cqlt" + cloopEmitCompareAndSet(operands, :int64, "<") when "cplt" cloopEmitCompareAndSet(operands, :int, "<") @@ -833,6 +951,8 @@ class Instruction cloopEmitCompareAndSet(operands, :int8, "<=") when "cilteq" cloopEmitCompareAndSet(operands, :int32, "<=") + when "cqlteq" + cloopEmitCompareAndSet(operands, :int64, "<=") when "cplteq" cloopEmitCompareAndSet(operands, :int, "<=") @@ -840,6 +960,8 @@ class Instruction cloopEmitTestSet(operands, :int8, "< 0") when "tis" cloopEmitTestSet(operands, :int32, "< 0") + when "tqs" + cloopEmitTestSet(operands, :int64, "< 0") when "tps" cloopEmitTestSet(operands, :int, "< 0") @@ -847,6 +969,8 @@ class Instruction cloopEmitTestSet(operands, :int8, "== 0") when "tiz" cloopEmitTestSet(operands, :int32, "== 0") + when "tqz" + cloopEmitTestSet(operands, :int64, "== 0") when "tpz" cloopEmitTestSet(operands, :int, "== 0") @@ -854,6 +978,8 @@ class Instruction cloopEmitTestSet(operands, :int8, "!= 0") when "tinz" cloopEmitTestSet(operands, :int32, "!= 0") + when "tqnz" + cloopEmitTestSet(operands, :int64, "!= 0") when "tpnz" cloopEmitTestSet(operands, :int, "!= 0") @@ -864,7 +990,9 @@ class Instruction $asm.putc "{" $asm.putc " int64_t temp = t0.i32; // sign extend the low 32bit" $asm.putc " t0.i32 = temp; // low word" + $asm.putc " t0.clearHighWord();" $asm.putc " t1.i32 = uint64_t(temp) >> 32; // high word" + $asm.putc " t1.clearHighWord();" $asm.putc "}" # 64-bit instruction: idivi op1 (based on X64) @@ -884,7 +1012,9 @@ class Instruction $asm.putc " int64_t dividend = (int64_t(t1.u32) << 32) | t0.u32;" $asm.putc " int64_t divisor = #{operands[0].clValue(:int)};" $asm.putc " t1.i32 = dividend % divisor; // remainder" + $asm.putc " t1.clearHighWord();" $asm.putc " t0.i32 = dividend / divisor; // quotient" + $asm.putc " t0.clearHighWord();" $asm.putc "}" # 32-bit instruction: fii2d int32LoOp int32HiOp dblOp (based on ARMv7) @@ -897,15 +1027,15 @@ class Instruction when "fd2ii" $asm.putc "Double2Ints(#{operands[0].clValue(:double)}, #{operands[1].clValue}, #{operands[2].clValue});" - # 64-bit instruction: fp2d int64Op dblOp (based on X64) + # 64-bit instruction: fq2d int64Op dblOp (based on X64) # Copy a bit-encoded double in a 64-bit int register to a double register. - when "fp2d" + when "fq2d" $asm.putc "#{operands[1].clValue(:double)} = #{operands[0].clValue(:castToDouble)};" - # 64-bit instruction: fd2p dblOp int64Op (based on X64 instruction set) + # 64-bit instruction: fd2q dblOp int64Op (based on X64 instruction set) # Copy a double as a bit-encoded double into a 64-bit int register. - when "fd2p" - $asm.putc "#{operands[1].clValue(:voidPtr)} = #{operands[0].clValue(:castToVoidPtr)};" + when "fd2q" + $asm.putc "#{operands[1].clValue(:int64)} = #{operands[0].clValue(:castToInt64)};" when "leai" operands[0].cloopEmitLea(operands[1], :int32) @@ -926,6 +1056,13 @@ class Instruction when "baddinz" cloopEmitOpAndBranch(operands, "+", :int32, "!= 0") + when "baddqs" + cloopEmitOpAndBranch(operands, "+", :int64, "< 0") + when "baddqz" + cloopEmitOpAndBranch(operands, "+", :int64, "== 0") + when "baddqnz" + cloopEmitOpAndBranch(operands, "+", :int64, "!= 0") + when "baddps" cloopEmitOpAndBranch(operands, "+", :int, "< 0") when "baddpz" diff --git a/Source/JavaScriptCore/offlineasm/instructions.rb b/Source/JavaScriptCore/offlineasm/instructions.rb index ddb1bb90f..e047b2a16 100644 --- a/Source/JavaScriptCore/offlineasm/instructions.rb +++ b/Source/JavaScriptCore/offlineasm/instructions.rb @@ -33,15 +33,19 @@ MACRO_INSTRUCTIONS = "andi", "lshifti", "lshiftp", + "lshiftq", "muli", "negi", "negp", + "negq", "noti", "ori", "rshifti", "urshifti", "rshiftp", "urshiftp", + "rshiftq", + "urshiftq", "subi", "xori", "loadi", @@ -63,8 +67,8 @@ MACRO_INSTRUCTIONS = "ci2d", "fii2d", # usage: fii2d <gpr with least significant bits>, <gpr with most significant bits>, <fpr> "fd2ii", # usage: fd2ii <fpr>, <gpr with least significant bits>, <gpr with most significant bits> - "fp2d", - "fd2p", + "fq2d", + "fd2q", "bdeq", "bdneq", "bdgt", @@ -84,8 +88,8 @@ MACRO_INSTRUCTIONS = "pop", "push", "move", - "sxi2p", - "zxi2p", + "sxi2q", + "zxi2q", "nop", "bieq", "bineq", @@ -199,6 +203,46 @@ MACRO_INSTRUCTIONS = "baddps", "baddpz", "baddpnz", + "tqs", + "tqz", + "tqnz", + "peekq", + "pokeq", + "bqeq", + "bqneq", + "bqa", + "bqaeq", + "bqb", + "bqbeq", + "bqgt", + "bqgteq", + "bqlt", + "bqlteq", + "addq", + "mulq", + "andq", + "orq", + "subq", + "xorq", + "loadq", + "cqeq", + "cqneq", + "cqa", + "cqaeq", + "cqb", + "cqbeq", + "cqgt", + "cqgteq", + "cqlt", + "cqlteq", + "storeq", + "btqs", + "btqz", + "btqnz", + "baddqo", + "baddqs", + "baddqz", + "baddqnz", "bo", "bs", "bz", diff --git a/Source/JavaScriptCore/offlineasm/x86.rb b/Source/JavaScriptCore/offlineasm/x86.rb index 033c200d7..67cbd14b0 100644 --- a/Source/JavaScriptCore/offlineasm/x86.rb +++ b/Source/JavaScriptCore/offlineasm/x86.rb @@ -45,12 +45,15 @@ class SpecialRegister < NoChildren "%" + @name + "d" when :ptr "%" + @name + when :quad + "%" + @name else raise end end def x86CallOperand(kind) - "*#{x86Operand(kind)}" + # Call operands are not allowed to be partial registers. + "*#{x86Operand(:quad)}" end end @@ -82,6 +85,8 @@ class RegisterID "%eax" when :ptr isX64 ? "%rax" : "%eax" + when :quad + isX64 ? "%rax" : raise else raise end @@ -95,6 +100,8 @@ class RegisterID "%edx" when :ptr isX64 ? "%rdx" : "%edx" + when :quad + isX64 ? "%rdx" : raise else raise end @@ -108,6 +115,8 @@ class RegisterID "%ecx" when :ptr isX64 ? "%rcx" : "%ecx" + when :quad + isX64 ? "%rcx" : raise else raise end @@ -121,6 +130,8 @@ class RegisterID "%ebx" when :ptr isX64 ? "%rbx" : "%ebx" + when :quad + isX64 ? "%rbx" : raise else raise end @@ -134,6 +145,8 @@ class RegisterID "%esi" when :ptr isX64 ? "%rsi" : "%esi" + when :quad + isX64 ? "%rsi" : raise else raise end @@ -146,6 +159,8 @@ class RegisterID "%r13d" when :ptr "%r13" + when :quad + "%r13" else raise end @@ -173,6 +188,8 @@ class RegisterID "%esp" when :ptr isX64 ? "%rsp" : "%esp" + when :quad + isX64 ? "%rsp" : raise else raise end @@ -187,6 +204,8 @@ class RegisterID "%edi" when :ptr "%rdi" + when :quad + "%rdi" end when "t6" raise "Cannot use #{name} in 32-bit X86 at #{codeOriginString}" unless isX64 @@ -197,6 +216,8 @@ class RegisterID "%r10d" when :ptr "%r10" + when :quad + "%r10" end when "csr1" raise "Cannot use #{name} in 32-bit X86 at #{codeOriginString}" unless isX64 @@ -207,6 +228,8 @@ class RegisterID "%r14d" when :ptr "%r14" + when :quad + "%r14" end when "csr2" raise "Cannot use #{name} in 32-bit X86 at #{codeOriginString}" unless isX64 @@ -217,13 +240,15 @@ class RegisterID "%r15d" when :ptr "%r15" + when :quad + "%r15" end else raise "Bad register #{name} for X86 at #{codeOriginString}" end end def x86CallOperand(kind) - "*#{x86Operand(kind)}" + isX64 ? "*#{x86Operand(:quad)}" : "*#{x86Operand(:ptr)}" end end @@ -394,6 +419,8 @@ class Instruction "l" when :ptr isX64 ? "q" : "l" + when :quad + isX64 ? "q" : raise when :double "sd" else @@ -411,6 +438,8 @@ class Instruction 4 when :ptr isX64 ? 8 : 4 + when :quad + isX64 ? 8 : raise when :double 8 else @@ -607,9 +636,17 @@ class Instruction def handleMove if Immediate.new(nil, 0) == operands[0] and operands[1].is_a? RegisterID - $asm.puts "xor#{x86Suffix(:ptr)} #{operands[1].x86Operand(:ptr)}, #{operands[1].x86Operand(:ptr)}" + if isX64 + $asm.puts "xor#{x86Suffix(:quad)} #{operands[1].x86Operand(:quad)}, #{operands[1].x86Operand(:quad)}" + else + $asm.puts "xor#{x86Suffix(:ptr)} #{operands[1].x86Operand(:ptr)}, #{operands[1].x86Operand(:ptr)}" + end elsif operands[0] != operands[1] - $asm.puts "mov#{x86Suffix(:ptr)} #{x86Operands(:ptr, :ptr)}" + if isX64 + $asm.puts "mov#{x86Suffix(:quad)} #{x86Operands(:quad, :quad)}" + else + $asm.puts "mov#{x86Suffix(:ptr)} #{x86Operands(:ptr, :ptr)}" + end end end @@ -632,54 +669,76 @@ class Instruction handleX86Add(:int) when "addp" handleX86Add(:ptr) + when "addq" + handleX86Add(:quad) when "andi" handleX86Op("andl", :int) when "andp" handleX86Op("and#{x86Suffix(:ptr)}", :ptr) + when "andq" + handleX86Op("and#{x86Suffix(:quad)}", :quad) when "lshifti" handleX86Shift("sall", :int) when "lshiftp" handleX86Shift("sal#{x86Suffix(:ptr)}", :ptr) + when "lshiftq" + handleX86Shift("sal#{x86Suffix(:quad)}", :quad) when "muli" handleX86Mul(:int) when "mulp" handleX86Mul(:ptr) + when "mulq" + handleX86Mul(:quad) when "negi" $asm.puts "negl #{x86Operands(:int)}" when "negp" $asm.puts "neg#{x86Suffix(:ptr)} #{x86Operands(:ptr)}" + when "negq" + $asm.puts "neg#{x86Suffix(:quad)} #{x86Operands(:quad)}" when "noti" $asm.puts "notl #{x86Operands(:int)}" when "ori" handleX86Op("orl", :int) when "orp" handleX86Op("or#{x86Suffix(:ptr)}", :ptr) + when "orq" + handleX86Op("or#{x86Suffix(:quad)}", :quad) when "rshifti" handleX86Shift("sarl", :int) when "rshiftp" handleX86Shift("sar#{x86Suffix(:ptr)}", :ptr) + when "rshiftq" + handleX86Shift("sar#{x86Suffix(:quad)}", :quad) when "urshifti" handleX86Shift("shrl", :int) when "urshiftp" handleX86Shift("shr#{x86Suffix(:ptr)}", :ptr) + when "urshiftq" + handleX86Shift("shr#{x86Suffix(:quad)}", :quad) when "subi" handleX86Sub(:int) when "subp" handleX86Sub(:ptr) + when "subq" + handleX86Sub(:quad) when "xori" handleX86Op("xorl", :int) when "xorp" handleX86Op("xor#{x86Suffix(:ptr)}", :ptr) + when "xorq" + handleX86Op("xor#{x86Suffix(:quad)}", :quad) when "loadi", "storei" $asm.puts "movl #{x86Operands(:int, :int)}" when "loadis" if isX64 - $asm.puts "movslq #{x86Operands(:int, :ptr)}" + $asm.puts "movslq #{x86Operands(:int, :quad)}" else $asm.puts "movl #{x86Operands(:int, :int)}" end when "loadp", "storep" $asm.puts "mov#{x86Suffix(:ptr)} #{x86Operands(:ptr, :ptr)}" + when "loadq", "storeq" + $asm.puts "mov#{x86Suffix(:quad)} #{x86Operands(:quad, :quad)}" when "loadb" $asm.puts "movzbl #{operands[0].x86Operand(:byte)}, #{operands[1].x86Operand(:int)}" when "loadbs" @@ -761,60 +820,72 @@ class Instruction $asm.puts "push #{operands[0].x86Operand(:ptr)}" when "move" handleMove - when "sxi2p" - if isX64 - $asm.puts "movslq #{operands[0].x86Operand(:int)}, #{operands[1].x86Operand(:ptr)}" - else - handleMove - end - when "zxi2p" - if isX64 - $asm.puts "movl #{operands[0].x86Operand(:int)}, #{operands[1].x86Operand(:int)}" - else - handleMove - end + when "sxi2q" + $asm.puts "movslq #{operands[0].x86Operand(:int)}, #{operands[1].x86Operand(:quad)}" + when "zxi2q" + $asm.puts "movl #{operands[0].x86Operand(:int)}, #{operands[1].x86Operand(:int)}" when "nop" $asm.puts "nop" when "bieq" handleX86IntBranch("je", :int) when "bpeq" handleX86IntBranch("je", :ptr) + when "bqeq" + handleX86IntBranch("je", :quad) when "bineq" handleX86IntBranch("jne", :int) when "bpneq" handleX86IntBranch("jne", :ptr) + when "bqneq" + handleX86IntBranch("jne", :quad) when "bia" handleX86IntBranch("ja", :int) when "bpa" handleX86IntBranch("ja", :ptr) + when "bqa" + handleX86IntBranch("ja", :quad) when "biaeq" handleX86IntBranch("jae", :int) when "bpaeq" handleX86IntBranch("jae", :ptr) + when "bqaeq" + handleX86IntBranch("jae", :quad) when "bib" handleX86IntBranch("jb", :int) when "bpb" handleX86IntBranch("jb", :ptr) + when "bqb" + handleX86IntBranch("jb", :quad) when "bibeq" handleX86IntBranch("jbe", :int) when "bpbeq" handleX86IntBranch("jbe", :ptr) + when "bqbeq" + handleX86IntBranch("jbe", :quad) when "bigt" handleX86IntBranch("jg", :int) when "bpgt" handleX86IntBranch("jg", :ptr) + when "bqgt" + handleX86IntBranch("jg", :quad) when "bigteq" handleX86IntBranch("jge", :int) when "bpgteq" handleX86IntBranch("jge", :ptr) + when "bqgteq" + handleX86IntBranch("jge", :quad) when "bilt" handleX86IntBranch("jl", :int) when "bplt" handleX86IntBranch("jl", :ptr) + when "bqlt" + handleX86IntBranch("jl", :quad) when "bilteq" handleX86IntBranch("jle", :int) when "bplteq" handleX86IntBranch("jle", :ptr) + when "bqlteq" + handleX86IntBranch("jle", :quad) when "bbeq" handleX86IntBranch("je", :byte) when "bbneq" @@ -839,14 +910,20 @@ class Instruction handleX86BranchTest("js", :int) when "btps" handleX86BranchTest("js", :ptr) + when "btqs" + handleX86BranchTest("js", :quad) when "btiz" handleX86BranchTest("jz", :int) when "btpz" handleX86BranchTest("jz", :ptr) + when "btqz" + handleX86BranchTest("jz", :quad) when "btinz" handleX86BranchTest("jnz", :int) when "btpnz" handleX86BranchTest("jnz", :ptr) + when "btqnz" + handleX86BranchTest("jnz", :quad) when "btbs" handleX86BranchTest("js", :byte) when "btbz" @@ -859,18 +936,26 @@ class Instruction handleX86OpBranch("addl", "jo", :int) when "baddpo" handleX86OpBranch("add#{x86Suffix(:ptr)}", "jo", :ptr) + when "baddqo" + handleX86OpBranch("add#{x86Suffix(:quad)}", "jo", :quad) when "baddis" handleX86OpBranch("addl", "js", :int) when "baddps" handleX86OpBranch("add#{x86Suffix(:ptr)}", "js", :ptr) + when "baddqs" + handleX86OpBranch("add#{x86Suffix(:quad)}", "js", :quad) when "baddiz" handleX86OpBranch("addl", "jz", :int) when "baddpz" handleX86OpBranch("add#{x86Suffix(:ptr)}", "jz", :ptr) + when "baddqz" + handleX86OpBranch("add#{x86Suffix(:quad)}", "jz", :quad) when "baddinz" handleX86OpBranch("addl", "jnz", :int) when "baddpnz" handleX86OpBranch("add#{x86Suffix(:ptr)}", "jnz", :ptr) + when "baddqnz" + handleX86OpBranch("add#{x86Suffix(:quad)}", "jnz", :quad) when "bsubio" handleX86SubBranch("jo", :int) when "bsubis" @@ -907,60 +992,80 @@ class Instruction handleX86IntCompareSet("sete", :byte) when "cpeq" handleX86IntCompareSet("sete", :ptr) + when "cqeq" + handleX86IntCompareSet("sete", :quad) when "cineq" handleX86IntCompareSet("setne", :int) when "cbneq" handleX86IntCompareSet("setne", :byte) when "cpneq" handleX86IntCompareSet("setne", :ptr) + when "cqneq" + handleX86IntCompareSet("setne", :quad) when "cia" handleX86IntCompareSet("seta", :int) when "cba" handleX86IntCompareSet("seta", :byte) when "cpa" handleX86IntCompareSet("seta", :ptr) + when "cqa" + handleX86IntCompareSet("seta", :quad) when "ciaeq" handleX86IntCompareSet("setae", :int) when "cbaeq" handleX86IntCompareSet("setae", :byte) when "cpaeq" handleX86IntCompareSet("setae", :ptr) + when "cqaeq" + handleX86IntCompareSet("setae", :quad) when "cib" handleX86IntCompareSet("setb", :int) when "cbb" handleX86IntCompareSet("setb", :byte) when "cpb" handleX86IntCompareSet("setb", :ptr) + when "cqb" + handleX86IntCompareSet("setb", :quad) when "cibeq" handleX86IntCompareSet("setbe", :int) when "cbbeq" handleX86IntCompareSet("setbe", :byte) when "cpbeq" handleX86IntCompareSet("setbe", :ptr) + when "cqbeq" + handleX86IntCompareSet("setbe", :quad) when "cigt" handleX86IntCompareSet("setg", :int) when "cbgt" handleX86IntCompareSet("setg", :byte) when "cpgt" handleX86IntCompareSet("setg", :ptr) + when "cqgt" + handleX86IntCompareSet("setg", :quad) when "cigteq" handleX86IntCompareSet("setge", :int) when "cbgteq" handleX86IntCompareSet("setge", :byte) when "cpgteq" handleX86IntCompareSet("setge", :ptr) + when "cqgteq" + handleX86IntCompareSet("setge", :quad) when "cilt" handleX86IntCompareSet("setl", :int) when "cblt" handleX86IntCompareSet("setl", :byte) when "cplt" handleX86IntCompareSet("setl", :ptr) + when "cqlt" + handleX86IntCompareSet("setl", :quad) when "cilteq" handleX86IntCompareSet("setle", :int) when "cblteq" handleX86IntCompareSet("setle", :byte) when "cplteq" handleX86IntCompareSet("setle", :ptr) + when "cqlteq" + handleX86IntCompareSet("setle", :quad) when "tis" handleX86SetTest("sets", :int) when "tiz" @@ -973,6 +1078,12 @@ class Instruction handleX86SetTest("setz", :ptr) when "tpnz" handleX86SetTest("setnz", :ptr) + when "tqs" + handleX86SetTest("sets", :quad) + when "tqz" + handleX86SetTest("setz", :quad) + when "tqnz" + handleX86SetTest("setnz", :quad) when "tbs" handleX86SetTest("sets", :byte) when "tbz" @@ -982,9 +1093,15 @@ class Instruction when "peek" sp = RegisterID.new(nil, "sp") $asm.puts "mov#{x86Suffix(:ptr)} #{operands[0].value * x86Bytes(:ptr)}(#{sp.x86Operand(:ptr)}), #{operands[1].x86Operand(:ptr)}" + when "peekq" + sp = RegisterID.new(nil, "sp") + $asm.puts "mov#{x86Suffix(:quad)} #{operands[0].value * x86Bytes(:quad)}(#{sp.x86Operand(:ptr)}), #{operands[1].x86Operand(:quad)}" when "poke" sp = RegisterID.new(nil, "sp") $asm.puts "mov#{x86Suffix(:ptr)} #{operands[0].x86Operand(:ptr)}, #{operands[1].value * x86Bytes(:ptr)}(#{sp.x86Operand(:ptr)})" + when "pokeq" + sp = RegisterID.new(nil, "sp") + $asm.puts "mov#{x86Suffix(:quad)} #{operands[0].x86Operand(:quad)}, #{operands[1].value * x86Bytes(:quad)}(#{sp.x86Operand(:ptr)})" when "cdqi" $asm.puts "cdq" when "idivi" @@ -999,10 +1116,10 @@ class Instruction $asm.puts "movsd #{operands[0].x86Operand(:double)}, %xmm7" $asm.puts "psrlq $32, %xmm7" $asm.puts "movsd %xmm7, #{operands[2].x86Operand(:int)}" - when "fp2d" - $asm.puts "movd #{operands[0].x86Operand(:ptr)}, #{operands[1].x86Operand(:double)}" - when "fd2p" - $asm.puts "movd #{operands[0].x86Operand(:double)}, #{operands[1].x86Operand(:ptr)}" + when "fq2d" + $asm.puts "movd #{operands[0].x86Operand(:quad)}, #{operands[1].x86Operand(:double)}" + when "fd2q" + $asm.puts "movd #{operands[0].x86Operand(:double)}, #{operands[1].x86Operand(:quad)}" when "bo" $asm.puts "jo #{operands[0].asmLabel}" when "bs" diff --git a/Source/JavaScriptCore/parser/Lexer.cpp b/Source/JavaScriptCore/parser/Lexer.cpp index 8b2020987..477d403c1 100644 --- a/Source/JavaScriptCore/parser/Lexer.cpp +++ b/Source/JavaScriptCore/parser/Lexer.cpp @@ -355,6 +355,139 @@ static const unsigned short typesOfLatin1Characters[256] = { /* 255 - Ll category */ CharacterIdentifierStart }; +// This table provides the character that results from \X where X is the index in the table beginning +// with SPACE. A table value of 0 means that more processing needs to be done. +static const LChar singleCharacterEscapeValuesForASCII[128] = { +/* 0 - Null */ 0, +/* 1 - Start of Heading */ 0, +/* 2 - Start of Text */ 0, +/* 3 - End of Text */ 0, +/* 4 - End of Transm. */ 0, +/* 5 - Enquiry */ 0, +/* 6 - Acknowledgment */ 0, +/* 7 - Bell */ 0, +/* 8 - Back Space */ 0, +/* 9 - Horizontal Tab */ 0, +/* 10 - Line Feed */ 0, +/* 11 - Vertical Tab */ 0, +/* 12 - Form Feed */ 0, +/* 13 - Carriage Return */ 0, +/* 14 - Shift Out */ 0, +/* 15 - Shift In */ 0, +/* 16 - Data Line Escape */ 0, +/* 17 - Device Control 1 */ 0, +/* 18 - Device Control 2 */ 0, +/* 19 - Device Control 3 */ 0, +/* 20 - Device Control 4 */ 0, +/* 21 - Negative Ack. */ 0, +/* 22 - Synchronous Idle */ 0, +/* 23 - End of Transmit */ 0, +/* 24 - Cancel */ 0, +/* 25 - End of Medium */ 0, +/* 26 - Substitute */ 0, +/* 27 - Escape */ 0, +/* 28 - File Separator */ 0, +/* 29 - Group Separator */ 0, +/* 30 - Record Separator */ 0, +/* 31 - Unit Separator */ 0, +/* 32 - Space */ ' ', +/* 33 - ! */ '!', +/* 34 - " */ '"', +/* 35 - # */ '#', +/* 36 - $ */ '$', +/* 37 - % */ '%', +/* 38 - & */ '&', +/* 39 - ' */ '\'', +/* 40 - ( */ '(', +/* 41 - ) */ ')', +/* 42 - * */ '*', +/* 43 - + */ '+', +/* 44 - , */ ',', +/* 45 - - */ '-', +/* 46 - . */ '.', +/* 47 - / */ '/', +/* 48 - 0 */ 0, +/* 49 - 1 */ 0, +/* 50 - 2 */ 0, +/* 51 - 3 */ 0, +/* 52 - 4 */ 0, +/* 53 - 5 */ 0, +/* 54 - 6 */ 0, +/* 55 - 7 */ 0, +/* 56 - 8 */ 0, +/* 57 - 9 */ 0, +/* 58 - : */ ':', +/* 59 - ; */ ';', +/* 60 - < */ '<', +/* 61 - = */ '=', +/* 62 - > */ '>', +/* 63 - ? */ '?', +/* 64 - @ */ '@', +/* 65 - A */ 'A', +/* 66 - B */ 'B', +/* 67 - C */ 'C', +/* 68 - D */ 'D', +/* 69 - E */ 'E', +/* 70 - F */ 'F', +/* 71 - G */ 'G', +/* 72 - H */ 'H', +/* 73 - I */ 'I', +/* 74 - J */ 'J', +/* 75 - K */ 'K', +/* 76 - L */ 'L', +/* 77 - M */ 'M', +/* 78 - N */ 'N', +/* 79 - O */ 'O', +/* 80 - P */ 'P', +/* 81 - Q */ 'Q', +/* 82 - R */ 'R', +/* 83 - S */ 'S', +/* 84 - T */ 'T', +/* 85 - U */ 'U', +/* 86 - V */ 'V', +/* 87 - W */ 'W', +/* 88 - X */ 'X', +/* 89 - Y */ 'Y', +/* 90 - Z */ 'Z', +/* 91 - [ */ '[', +/* 92 - \ */ '\\', +/* 93 - ] */ ']', +/* 94 - ^ */ '^', +/* 95 - _ */ '_', +/* 96 - ` */ '`', +/* 97 - a */ 'a', +/* 98 - b */ 0x08, +/* 99 - c */ 'c', +/* 100 - d */ 'd', +/* 101 - e */ 'e', +/* 102 - f */ 0x0C, +/* 103 - g */ 'g', +/* 104 - h */ 'h', +/* 105 - i */ 'i', +/* 106 - j */ 'j', +/* 107 - k */ 'k', +/* 108 - l */ 'l', +/* 109 - m */ 'm', +/* 110 - n */ 0x0A, +/* 111 - o */ 'o', +/* 112 - p */ 'p', +/* 113 - q */ 'q', +/* 114 - r */ 0x0D, +/* 115 - s */ 's', +/* 116 - t */ 0x09, +/* 117 - u */ 0, +/* 118 - v */ 0x0B, +/* 119 - w */ 'w', +/* 120 - x */ 0, +/* 121 - y */ 'y', +/* 122 - z */ 'z', +/* 123 - { */ '{', +/* 124 - | */ '|', +/* 125 - } */ '}', +/* 126 - ~ */ '~', +/* 127 - Delete */ 0 +}; + template <typename T> Lexer<T>::Lexer(JSGlobalData* globalData) : m_isReparsing(false) @@ -547,30 +680,13 @@ static ALWAYS_INLINE bool isIdentPart(UChar c) return isLatin1(c) ? isIdentPart(static_cast<LChar>(c)) : isNonLatin1IdentPart(c); } -static inline int singleEscape(int c) +static inline LChar singleEscape(int c) { - switch (c) { - case 'b': - return 0x08; - case 't': - return 0x09; - case 'n': - return 0x0A; - case 'v': - return 0x0B; - case 'f': - return 0x0C; - case 'r': - return 0x0D; - case '\\': - return '\\'; - case '\'': - return '\''; - case '"': - return '"'; - default: - return 0; + if (c < 128) { + ASSERT(static_cast<size_t>(c) < ARRAY_SIZE(singleCharacterEscapeValuesForASCII)); + return singleCharacterEscapeValuesForASCII[c]; } + return 0; } template <typename T> @@ -842,7 +958,7 @@ template <bool shouldBuildStrings> ALWAYS_INLINE bool Lexer<T>::parseString(JSTo append8(stringStart, currentCharacter() - stringStart); shift(); - int escape = singleEscape(m_current); + LChar escape = singleEscape(m_current); // Most common escape sequences first if (escape) { @@ -907,7 +1023,7 @@ template <bool shouldBuildStrings> bool Lexer<T>::parseStringSlowCase(JSTokenDat append16(stringStart, currentCharacter() - stringStart); shift(); - int escape = singleEscape(m_current); + LChar escape = singleEscape(m_current); // Most common escape sequences first if (escape) { @@ -1570,12 +1686,25 @@ returnError: } template <typename T> +static inline void orCharacter(UChar&, UChar); + +template <> +inline void orCharacter<LChar>(UChar&, UChar) { } + +template <> +inline void orCharacter<UChar>(UChar& orAccumulator, UChar character) +{ + orAccumulator |= character; +} + +template <typename T> bool Lexer<T>::scanRegExp(const Identifier*& pattern, const Identifier*& flags, UChar patternPrefix) { ASSERT(m_buffer16.isEmpty()); bool lastWasEscape = false; bool inBrackets = false; + UChar charactersOredTogether = 0; if (patternPrefix) { ASSERT(!isLineTerminator(patternPrefix)); @@ -1598,6 +1727,7 @@ bool Lexer<T>::scanRegExp(const Identifier*& pattern, const Identifier*& flags, break; record16(prev); + orCharacter<T>(charactersOredTogether, prev); if (lastWasEscape) { lastWasEscape = false; @@ -1617,15 +1747,18 @@ bool Lexer<T>::scanRegExp(const Identifier*& pattern, const Identifier*& flags, } } - pattern = makeIdentifierSameType(m_buffer16.data(), m_buffer16.size()); + pattern = makeRightSizedIdentifier(m_buffer16.data(), m_buffer16.size(), charactersOredTogether); + m_buffer16.resize(0); + charactersOredTogether = 0; while (isIdentPart(m_current)) { record16(m_current); + orCharacter<T>(charactersOredTogether, m_current); shift(); } - flags = makeIdentifierSameType(m_buffer16.data(), m_buffer16.size()); + flags = makeRightSizedIdentifier(m_buffer16.data(), m_buffer16.size(), charactersOredTogether); m_buffer16.resize(0); return true; diff --git a/Source/JavaScriptCore/parser/Lexer.h b/Source/JavaScriptCore/parser/Lexer.h index aa1599b96..78c8c8cbd 100644 --- a/Source/JavaScriptCore/parser/Lexer.h +++ b/Source/JavaScriptCore/parser/Lexer.h @@ -148,7 +148,7 @@ private: ALWAYS_INLINE const Identifier* makeIdentifier(const UChar* characters, size_t length); ALWAYS_INLINE const Identifier* makeLCharIdentifier(const LChar* characters, size_t length); ALWAYS_INLINE const Identifier* makeLCharIdentifier(const UChar* characters, size_t length); - ALWAYS_INLINE const Identifier* makeIdentifierSameType(const UChar* characters, size_t length); + ALWAYS_INLINE const Identifier* makeRightSizedIdentifier(const UChar* characters, size_t length, UChar orAllChars); ALWAYS_INLINE const Identifier* makeIdentifierLCharFromUChar(const UChar* characters, size_t length); ALWAYS_INLINE bool lastTokenWasRestrKeyword() const; @@ -242,14 +242,17 @@ ALWAYS_INLINE const Identifier* Lexer<T>::makeIdentifier(const UChar* characters } template <> -ALWAYS_INLINE const Identifier* Lexer<LChar>::makeIdentifierSameType(const UChar* characters, size_t length) +ALWAYS_INLINE const Identifier* Lexer<LChar>::makeRightSizedIdentifier(const UChar* characters, size_t length, UChar) { return &m_arena->makeIdentifierLCharFromUChar(m_globalData, characters, length); } template <> -ALWAYS_INLINE const Identifier* Lexer<UChar>::makeIdentifierSameType(const UChar* characters, size_t length) +ALWAYS_INLINE const Identifier* Lexer<UChar>::makeRightSizedIdentifier(const UChar* characters, size_t length, UChar orAllChars) { + if (!(orAllChars & ~0xff)) + return &m_arena->makeIdentifierLCharFromUChar(m_globalData, characters, length); + return &m_arena->makeIdentifier(m_globalData, characters, length); } diff --git a/Source/JavaScriptCore/parser/Parser.h b/Source/JavaScriptCore/parser/Parser.h index 3b0316f81..615d09eb7 100644 --- a/Source/JavaScriptCore/parser/Parser.h +++ b/Source/JavaScriptCore/parser/Parser.h @@ -76,6 +76,49 @@ COMPILE_ASSERT(LastUntaggedToken < 64, LessThan64UntaggedTokens); enum SourceElementsMode { CheckForStrictMode, DontCheckForStrictMode }; enum FunctionRequirements { FunctionNoRequirements, FunctionNeedsName }; +struct ParserError { + enum ErrorType { ErrorNone, StackOverflow, SyntaxError, EvalError, OutOfMemory } m_type; + String m_message; + int m_line; + ParserError() + : m_type(ErrorNone) + , m_line(-1) + { + } + + ParserError(ErrorType type) + : m_type(type) + , m_line(-1) + { + } + + ParserError(ErrorType type, String msg, int line) + : m_type(type) + , m_message(msg) + , m_line(line) + { + } + + JSObject* toErrorObject(JSGlobalObject* globalObject, const SourceCode& source) + { + switch (m_type) { + case ErrorNone: + return 0; + case SyntaxError: + return addErrorInfo(globalObject->globalExec(), createSyntaxError(globalObject, m_message), m_line, source); + case EvalError: + return createSyntaxError(globalObject, m_message); + case StackOverflow: + return createStackOverflowError(globalObject); + case OutOfMemory: + return createOutOfMemoryError(globalObject); + } + CRASH(); + return createOutOfMemoryError(globalObject); // Appease Qt bot + } + +}; + template <typename T> inline bool isEvalNode() { return false; } template <> inline bool isEvalNode<EvalNode>() { return true; } @@ -370,7 +413,7 @@ public: ~Parser(); template <class ParsedNode> - PassRefPtr<ParsedNode> parse(JSGlobalObject* lexicalGlobalObject, Debugger*, ExecState*, JSObject**); + PassRefPtr<ParsedNode> parse(ParserError&); private: struct AllowInOverride { @@ -890,7 +933,7 @@ private: return m_lastTokenEnd; } - mutable const JSGlobalData* m_globalData; + JSGlobalData* m_globalData; const SourceCode* m_source; ParserArena* m_arena; OwnPtr<LexerType> m_lexer; @@ -935,12 +978,11 @@ private: }; }; + template <typename LexerType> template <class ParsedNode> -PassRefPtr<ParsedNode> Parser<LexerType>::parse(JSGlobalObject* lexicalGlobalObject, Debugger* debugger, ExecState* debuggerExecState, JSObject** exception) +PassRefPtr<ParsedNode> Parser<LexerType>::parse(ParserError& error) { - ASSERT(lexicalGlobalObject); - ASSERT(exception && !*exception); int errLine; String errMsg; @@ -971,7 +1013,7 @@ PassRefPtr<ParsedNode> Parser<LexerType>::parse(JSGlobalObject* lexicalGlobalObj JSTokenLocation location; location.line = m_lexer->lastLineNumber(); location.column = m_lexer->currentColumnNumber(); - result = ParsedNode::create(&lexicalGlobalObject->globalData(), + result = ParsedNode::create(m_globalData, location, m_sourceElements, m_varDeclarations ? &m_varDeclarations->data : 0, @@ -981,7 +1023,7 @@ PassRefPtr<ParsedNode> Parser<LexerType>::parse(JSGlobalObject* lexicalGlobalObj m_features, m_numConstants); result->setLoc(m_source->firstLine(), m_lastLine, m_lexer->currentColumnNumber()); - } else if (lexicalGlobalObject) { + } else { // We can never see a syntax error when reparsing a function, since we should have // reported the error when parsing the containing program or eval code. So if we're // parsing a function body node, we assume that what actually happened here is that @@ -989,35 +1031,31 @@ PassRefPtr<ParsedNode> Parser<LexerType>::parse(JSGlobalObject* lexicalGlobalObj // code we assume that it was a syntax error since running out of stack is much less // likely, and we are currently unable to distinguish between the two cases. if (isFunctionBodyNode(static_cast<ParsedNode*>(0)) || m_hasStackOverflow) - *exception = createStackOverflowError(lexicalGlobalObject); + error = ParserError::StackOverflow; else if (isEvalNode<ParsedNode>()) - *exception = createSyntaxError(lexicalGlobalObject, errMsg); + error = ParserError(ParserError::EvalError, errMsg, errLine); else - *exception = addErrorInfo(lexicalGlobalObject->globalExec(), createSyntaxError(lexicalGlobalObject, errMsg), errLine, *m_source); + error = ParserError(ParserError::SyntaxError, errMsg, errLine); } - if (debugger && !ParsedNode::scopeIsFunction) - debugger->sourceParsed(debuggerExecState, m_source->provider(), errLine, errMsg); - m_arena->reset(); return result.release(); } template <class ParsedNode> -PassRefPtr<ParsedNode> parse(JSGlobalData* globalData, JSGlobalObject* lexicalGlobalObject, const SourceCode& source, FunctionParameters* parameters, const Identifier& name, JSParserStrictness strictness, JSParserMode parserMode, Debugger* debugger, ExecState* execState, JSObject** exception) +PassRefPtr<ParsedNode> parse(JSGlobalData* globalData, const SourceCode& source, FunctionParameters* parameters, const Identifier& name, JSParserStrictness strictness, JSParserMode parserMode, ParserError& error) { SamplingRegion samplingRegion("Parsing"); ASSERT(!source.provider()->source().isNull()); - if (source.provider()->source().is8Bit()) { Parser< Lexer<LChar> > parser(globalData, source, parameters, name, strictness, parserMode); - return parser.parse<ParsedNode>(lexicalGlobalObject, debugger, execState, exception); + return parser.parse<ParsedNode>(error); } Parser< Lexer<UChar> > parser(globalData, source, parameters, name, strictness, parserMode); - return parser.parse<ParsedNode>(lexicalGlobalObject, debugger, execState, exception); + return parser.parse<ParsedNode>(error); } -} // namespace +} // namespace #endif diff --git a/Source/JavaScriptCore/parser/ParserModes.h b/Source/JavaScriptCore/parser/ParserModes.h new file mode 100644 index 000000000..41fb7fdf9 --- /dev/null +++ b/Source/JavaScriptCore/parser/ParserModes.h @@ -0,0 +1,40 @@ +/* + * Copyright (C) 2012 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + * THE POSSIBILITY OF SUCH DAMAGE. + */ + + +#ifndef ParserModes_h +#define ParserModes_h + +namespace JSC { + +enum JSParserStrictness { JSParseNormal, JSParseStrict }; +enum JSParserMode { JSParseProgramCode, JSParseFunctionCode }; + +enum ProfilerMode { ProfilerOff, ProfilerOn }; +enum DebuggerMode { DebuggerOff, DebuggerOn }; + +} + +#endif diff --git a/Source/JavaScriptCore/parser/ParserTokens.h b/Source/JavaScriptCore/parser/ParserTokens.h index 6e6cec114..14191b95d 100644 --- a/Source/JavaScriptCore/parser/ParserTokens.h +++ b/Source/JavaScriptCore/parser/ParserTokens.h @@ -26,6 +26,8 @@ #ifndef ParserTokens_h #define ParserTokens_h +#include "ParserModes.h" + namespace JSC { class Identifier; @@ -161,9 +163,6 @@ struct JSToken { JSTokenLocation m_location; }; -enum JSParserStrictness { JSParseNormal, JSParseStrict }; -enum JSParserMode { JSParseProgramCode, JSParseFunctionCode }; - } diff --git a/Source/JavaScriptCore/runtime/Arguments.h b/Source/JavaScriptCore/runtime/Arguments.h index 7c8b69bd1..7961d4bc8 100644 --- a/Source/JavaScriptCore/runtime/Arguments.h +++ b/Source/JavaScriptCore/runtime/Arguments.h @@ -267,8 +267,7 @@ namespace JSC { m_overrodeCallee = false; m_overrodeCaller = false; m_isStrictMode = jsCast<FunctionExecutable*>(inlineCallFrame->executable.get())->isStrictMode(); - - ASSERT(!jsCast<FunctionExecutable*>(inlineCallFrame->executable.get())->symbolTable()->slowArguments()); + ASSERT(!jsCast<FunctionExecutable*>(inlineCallFrame->executable.get())->symbolTable(inlineCallFrame->isCall ? CodeForCall : CodeForConstruct)->slowArguments()); // The bytecode generator omits op_tear_off_activation in cases of no // declared parameters, so we need to tear off immediately. diff --git a/Source/JavaScriptCore/runtime/ArrayConventions.h b/Source/JavaScriptCore/runtime/ArrayConventions.h index a557b1ef9..3177c6c97 100644 --- a/Source/JavaScriptCore/runtime/ArrayConventions.h +++ b/Source/JavaScriptCore/runtime/ArrayConventions.h @@ -58,7 +58,7 @@ namespace JSC { // These values have to be macros to be used in max() and min() without introducing // a PIC branch in Mach-O binaries, see <rdar://problem/5971391>. -#define MIN_SPARSE_ARRAY_INDEX 10000U +#define MIN_SPARSE_ARRAY_INDEX 100000U #define MAX_STORAGE_VECTOR_INDEX (MAX_STORAGE_VECTOR_LENGTH - 1) // 0xFFFFFFFF is a bit weird -- is not an array index even though it's an integer. #define MAX_ARRAY_INDEX 0xFFFFFFFEU diff --git a/Source/JavaScriptCore/runtime/CachedTranscendentalFunction.h b/Source/JavaScriptCore/runtime/CachedTranscendentalFunction.h index f31b4a07f..62a01dbcb 100644 --- a/Source/JavaScriptCore/runtime/CachedTranscendentalFunction.h +++ b/Source/JavaScriptCore/runtime/CachedTranscendentalFunction.h @@ -74,8 +74,8 @@ private: // Lazily allocate the table, populate with NaN->NaN mapping. m_cache = static_cast<CacheEntry*>(fastMalloc(s_cacheSize * sizeof(CacheEntry))); for (unsigned x = 0; x < s_cacheSize; ++x) { - m_cache[x].operand = std::numeric_limits<double>::quiet_NaN(); - m_cache[x].result = std::numeric_limits<double>::quiet_NaN(); + m_cache[x].operand = QNaN; + m_cache[x].result = QNaN; } } diff --git a/Source/JavaScriptCore/runtime/CodeCache.cpp b/Source/JavaScriptCore/runtime/CodeCache.cpp new file mode 100644 index 000000000..4de760e49 --- /dev/null +++ b/Source/JavaScriptCore/runtime/CodeCache.cpp @@ -0,0 +1,186 @@ +/* + * Copyright (C) 2012 Apple Inc. All Rights Reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" + +#include "CodeCache.h" + +#include "BytecodeGenerator.h" +#include "CodeSpecializationKind.h" +#include "Parser.h" +#include "StrongInlines.h" +#include "UnlinkedCodeBlock.h" + +namespace JSC { + +CodeCache::CodeCache() + : m_randomGenerator(static_cast<uint32_t>(randomNumber() * UINT32_MAX)) +{ +} + +CodeCache::~CodeCache() +{ +} + +CodeCache::CodeBlockKey CodeCache::makeCodeBlockKey(const SourceCode& source, CodeCache::CodeType type, JSParserStrictness strictness) +{ + return std::make_pair(source.toString(), (type << 1) | strictness); +} + +template <typename T> struct CacheTypes { }; + +template <> struct CacheTypes<UnlinkedProgramCodeBlock> { + typedef JSC::ProgramNode RootNode; + static const CodeCache::CodeType codeType = CodeCache::ProgramType; +}; + +template <> struct CacheTypes<UnlinkedEvalCodeBlock> { + typedef JSC::EvalNode RootNode; + static const CodeCache::CodeType codeType = CodeCache::EvalType; +}; + +template <class UnlinkedCodeBlockType, class ExecutableType> +UnlinkedCodeBlockType* CodeCache::getCodeBlock(JSGlobalData& globalData, ExecutableType* executable, const SourceCode& source, JSParserStrictness strictness, DebuggerMode debuggerMode, ProfilerMode profilerMode, ParserError& error) +{ + CodeBlockKey key = makeCodeBlockKey(source, CacheTypes<UnlinkedCodeBlockType>::codeType, strictness); + bool storeInCache = false; + if (debuggerMode == DebuggerOff && profilerMode == ProfilerOff) { + CodeBlockIndicesMap::iterator result = m_cachedCodeBlockIndices.find(key); + if (result != m_cachedCodeBlockIndices.end()) { + UnlinkedCodeBlockType* unlinkedCode = jsCast<UnlinkedCodeBlockType*>(m_cachedCodeBlocks[result->value].second.get()); + unsigned firstLine = source.firstLine() + unlinkedCode->firstLine(); + executable->recordParse(unlinkedCode->codeFeatures(), unlinkedCode->hasCapturedVariables(), firstLine, firstLine + unlinkedCode->lineCount()); + return unlinkedCode; + } + storeInCache = true; + } + + typedef typename CacheTypes<UnlinkedCodeBlockType>::RootNode RootNode; + RefPtr<RootNode> rootNode = parse<RootNode>(&globalData, source, 0, Identifier(), strictness, JSParseProgramCode, error); + if (!rootNode) + return 0; + executable->recordParse(rootNode->features(), rootNode->hasCapturedVariables(), rootNode->lineNo(), rootNode->lastLine()); + + UnlinkedCodeBlockType* unlinkedCode = UnlinkedCodeBlockType::create(&globalData, executable->executableInfo()); + unlinkedCode->recordParse(rootNode->features(), rootNode->hasCapturedVariables(), rootNode->lineNo() - source.firstLine(), rootNode->lastLine() - rootNode->lineNo()); + OwnPtr<BytecodeGenerator> generator(adoptPtr(new BytecodeGenerator(globalData, rootNode.get(), unlinkedCode, debuggerMode, profilerMode))); + error = generator->generate(); + rootNode->destroyData(); + if (error.m_type != ParserError::ErrorNone) + return 0; + + if (storeInCache) { + size_t index = m_randomGenerator.getUint32() % kMaxCodeBlockEntries; + if (m_cachedCodeBlocks[index].second) + m_cachedCodeBlockIndices.remove(m_cachedCodeBlocks[index].first); + m_cachedCodeBlockIndices.set(key, index); + m_cachedCodeBlocks[index].second.set(globalData, unlinkedCode); + m_cachedCodeBlocks[index].first = key; + } + + return unlinkedCode; +} + +UnlinkedProgramCodeBlock* CodeCache::getProgramCodeBlock(JSGlobalData& globalData, ProgramExecutable* executable, const SourceCode& source, JSParserStrictness strictness, DebuggerMode debuggerMode, ProfilerMode profilerMode, ParserError& error) +{ + return getCodeBlock<UnlinkedProgramCodeBlock>(globalData, executable, source, strictness, debuggerMode, profilerMode, error); +} + +UnlinkedEvalCodeBlock* CodeCache::getEvalCodeBlock(JSGlobalData& globalData, EvalExecutable* executable, const SourceCode& source, JSParserStrictness strictness, DebuggerMode debuggerMode, ProfilerMode profilerMode, ParserError& error) +{ + return getCodeBlock<UnlinkedEvalCodeBlock>(globalData, executable, source, strictness, debuggerMode, profilerMode, error); +} + +UnlinkedFunctionCodeBlock* CodeCache::generateFunctionCodeBlock(JSGlobalData& globalData, UnlinkedFunctionExecutable* executable, const SourceCode& source, CodeSpecializationKind kind, DebuggerMode debuggerMode, ProfilerMode profilerMode, ParserError& error) +{ + RefPtr<FunctionBodyNode> body = parse<FunctionBodyNode>(&globalData, source, executable->parameters(), executable->name(), executable->isInStrictContext() ? JSParseStrict : JSParseNormal, JSParseFunctionCode, error); + + if (!body) { + ASSERT(error.m_type != ParserError::ErrorNone); + return 0; + } + + if (executable->forceUsesArguments()) + body->setUsesArguments(); + body->finishParsing(executable->parameters(), executable->name(), executable->functionNameIsInScopeToggle()); + executable->recordParse(body->features(), body->hasCapturedVariables(), body->lineNo(), body->lastLine()); + + UnlinkedFunctionCodeBlock* result = UnlinkedFunctionCodeBlock::create(&globalData, FunctionCode, ExecutableInfo(body->needsActivation(), body->usesEval(), body->isStrictMode(), kind == CodeForConstruct)); + OwnPtr<BytecodeGenerator> generator(adoptPtr(new BytecodeGenerator(globalData, body.get(), result, debuggerMode, profilerMode))); + error = generator->generate(); + body->destroyData(); + if (error.m_type != ParserError::ErrorNone) + return 0; + return result; +} + +UnlinkedFunctionCodeBlock* CodeCache::getFunctionCodeBlock(JSGlobalData& globalData, UnlinkedFunctionExecutable* executable, const SourceCode& source, CodeSpecializationKind kind, DebuggerMode debuggerMode, ProfilerMode profilerMode, ParserError& error) +{ + return generateFunctionCodeBlock(globalData, executable, source, kind, debuggerMode, profilerMode, error); +} + +CodeCache::GlobalFunctionKey CodeCache::makeGlobalFunctionKey(const SourceCode& source, const String& name) +{ + return GlobalFunctionKey(source.toString(), name); +} + +UnlinkedFunctionExecutable* CodeCache::getFunctionExecutableFromGlobalCode(JSGlobalData& globalData, const Identifier& name, const SourceCode& source, ParserError& error) +{ + GlobalFunctionKey key = makeGlobalFunctionKey(source, name.string()); + GlobalFunctionIndicesMap::iterator result = m_cachedGlobalFunctionIndices.find(key); + if (result != m_cachedGlobalFunctionIndices.end()) + return m_cachedGlobalFunctions[result->value].second.get(); + + RefPtr<ProgramNode> program = parse<ProgramNode>(&globalData, source, 0, Identifier(), JSParseNormal, JSParseProgramCode, error); + if (!program) { + ASSERT(error.m_type != ParserError::ErrorNone); + return 0; + } + + // This function assumes an input string that would result in a single anonymous function expression. + StatementNode* exprStatement = program->singleStatement(); + ASSERT(exprStatement); + ASSERT(exprStatement->isExprStatement()); + ExpressionNode* funcExpr = static_cast<ExprStatementNode*>(exprStatement)->expr(); + ASSERT(funcExpr); + ASSERT(funcExpr->isFuncExprNode()); + FunctionBodyNode* body = static_cast<FuncExprNode*>(funcExpr)->body(); + ASSERT(body); + ASSERT(body->ident().isNull()); + + UnlinkedFunctionExecutable* functionExecutable = UnlinkedFunctionExecutable::create(&globalData, source, body); + functionExecutable->m_nameValue.set(globalData, functionExecutable, jsString(&globalData, name.string())); + + size_t index = m_randomGenerator.getUint32() % kMaxGlobalFunctionEntries; + if (m_cachedGlobalFunctions[index].second) + m_cachedGlobalFunctionIndices.remove(m_cachedGlobalFunctions[index].first); + m_cachedGlobalFunctionIndices.set(key, index); + m_cachedGlobalFunctions[index].second.set(globalData, functionExecutable); + m_cachedGlobalFunctions[index].first = key; + + return functionExecutable; +} + +} diff --git a/Source/JavaScriptCore/runtime/CodeCache.h b/Source/JavaScriptCore/runtime/CodeCache.h new file mode 100644 index 000000000..4d4617189 --- /dev/null +++ b/Source/JavaScriptCore/runtime/CodeCache.h @@ -0,0 +1,93 @@ +/* + * Copyright (C) 2012 Apple Inc. All Rights Reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef CodeCache_h +#define CodeCache_h + +#include "CodeSpecializationKind.h" +#include "ParserModes.h" +#include "Strong.h" +#include "WeakRandom.h" + +#include <wtf/FixedArray.h> +#include <wtf/Forward.h> +#include <wtf/PassOwnPtr.h> +#include <wtf/text/WTFString.h> + +namespace JSC { + +class EvalExecutable; +class Identifier; +class ProgramExecutable; +class UnlinkedCodeBlock; +class UnlinkedEvalCodeBlock; +class UnlinkedFunctionCodeBlock; +class UnlinkedFunctionExecutable; +class UnlinkedProgramCodeBlock; +class JSGlobalData; +struct ParserError; +class SourceCode; +class SourceProvider; + +class CodeCache { +public: + static PassOwnPtr<CodeCache> create() { return adoptPtr(new CodeCache); } + + UnlinkedProgramCodeBlock* getProgramCodeBlock(JSGlobalData&, ProgramExecutable*, const SourceCode&, JSParserStrictness, DebuggerMode, ProfilerMode, ParserError&); + UnlinkedEvalCodeBlock* getEvalCodeBlock(JSGlobalData&, EvalExecutable*, const SourceCode&, JSParserStrictness, DebuggerMode, ProfilerMode, ParserError&); + UnlinkedFunctionCodeBlock* getFunctionCodeBlock(JSGlobalData&, UnlinkedFunctionExecutable*, const SourceCode&, CodeSpecializationKind, DebuggerMode, ProfilerMode, ParserError&); + UnlinkedFunctionExecutable* getFunctionExecutableFromGlobalCode(JSGlobalData&, const Identifier&, const SourceCode&, ParserError&); + ~CodeCache(); + + enum CodeType { EvalType, ProgramType, FunctionType }; + typedef std::pair<String, unsigned> CodeBlockKey; + typedef HashMap<CodeBlockKey, unsigned> CodeBlockIndicesMap; + typedef std::pair<String, String> GlobalFunctionKey; + typedef HashMap<GlobalFunctionKey, unsigned> GlobalFunctionIndicesMap; + +private: + CodeCache(); + + UnlinkedFunctionCodeBlock* generateFunctionCodeBlock(JSGlobalData&, UnlinkedFunctionExecutable*, const SourceCode&, CodeSpecializationKind, DebuggerMode, ProfilerMode, ParserError&); + + template <class UnlinkedCodeBlockType, class ExecutableType> inline UnlinkedCodeBlockType* getCodeBlock(JSGlobalData&, ExecutableType*, const SourceCode&, JSParserStrictness, DebuggerMode, ProfilerMode, ParserError&); + CodeBlockKey makeCodeBlockKey(const SourceCode&, CodeType, JSParserStrictness); + CodeBlockIndicesMap m_cachedCodeBlockIndices; + GlobalFunctionKey makeGlobalFunctionKey(const SourceCode&, const String&); + GlobalFunctionIndicesMap m_cachedGlobalFunctionIndices; + + enum { + kMaxCodeBlockEntries = 1024, + kMaxGlobalFunctionEntries = 1024 + }; + + FixedArray<std::pair<CodeBlockKey, Strong<UnlinkedCodeBlock> >, kMaxCodeBlockEntries> m_cachedCodeBlocks; + FixedArray<std::pair<GlobalFunctionKey, Strong<UnlinkedFunctionExecutable> >, kMaxGlobalFunctionEntries> m_cachedGlobalFunctions; + WeakRandom m_randomGenerator; +}; + +} + +#endif diff --git a/Source/JavaScriptCore/runtime/DateConstructor.cpp b/Source/JavaScriptCore/runtime/DateConstructor.cpp index f78e8bf55..9a162e9e7 100644 --- a/Source/JavaScriptCore/runtime/DateConstructor.cpp +++ b/Source/JavaScriptCore/runtime/DateConstructor.cpp @@ -131,7 +131,7 @@ JSObject* constructDate(ExecState* exec, JSGlobalObject* globalObject, const Arg || (numArgs >= 5 && !isfinite(doubleArguments[4])) || (numArgs >= 6 && !isfinite(doubleArguments[5])) || (numArgs >= 7 && !isfinite(doubleArguments[6]))) - value = std::numeric_limits<double>::quiet_NaN(); + value = QNaN; else { GregorianDateTime t; int year = JSC::toInt32(doubleArguments[0]); diff --git a/Source/JavaScriptCore/runtime/DateInstanceCache.h b/Source/JavaScriptCore/runtime/DateInstanceCache.h index 153582f67..e186516e8 100644 --- a/Source/JavaScriptCore/runtime/DateInstanceCache.h +++ b/Source/JavaScriptCore/runtime/DateInstanceCache.h @@ -45,8 +45,8 @@ namespace JSC { private: DateInstanceData() - : m_gregorianDateTimeCachedForMS(std::numeric_limits<double>::quiet_NaN()) - , m_gregorianDateTimeUTCCachedForMS(std::numeric_limits<double>::quiet_NaN()) + : m_gregorianDateTimeCachedForMS(QNaN) + , m_gregorianDateTimeUTCCachedForMS(QNaN) { } }; @@ -61,7 +61,7 @@ namespace JSC { void reset() { for (size_t i = 0; i < cacheSize; ++i) - m_cache[i].key = std::numeric_limits<double>::quiet_NaN(); + m_cache[i].key = QNaN; } DateInstanceData* add(double d) diff --git a/Source/JavaScriptCore/runtime/ExceptionHelpers.cpp b/Source/JavaScriptCore/runtime/ExceptionHelpers.cpp index a3281b6d6..a4368a2bb 100644 --- a/Source/JavaScriptCore/runtime/ExceptionHelpers.cpp +++ b/Source/JavaScriptCore/runtime/ExceptionHelpers.cpp @@ -48,7 +48,7 @@ JSValue InterruptedExecutionError::defaultValue(const JSObject*, ExecState* exec { if (hint == PreferString) return jsNontrivialString(exec, String(ASCIILiteral("JavaScript execution exceeded timeout."))); - return JSValue(std::numeric_limits<double>::quiet_NaN()); + return JSValue(QNaN); } JSObject* createInterruptedExecutionException(JSGlobalData* globalData) @@ -75,7 +75,7 @@ JSValue TerminatedExecutionError::defaultValue(const JSObject*, ExecState* exec, { if (hint == PreferString) return jsNontrivialString(exec, String(ASCIILiteral("JavaScript execution terminated."))); - return JSValue(std::numeric_limits<double>::quiet_NaN()); + return JSValue(QNaN); } JSObject* createTerminatedExecutionException(JSGlobalData* globalData) diff --git a/Source/JavaScriptCore/runtime/Executable.cpp b/Source/JavaScriptCore/runtime/Executable.cpp index 0a453eea0..20a2e2acb 100644 --- a/Source/JavaScriptCore/runtime/Executable.cpp +++ b/Source/JavaScriptCore/runtime/Executable.cpp @@ -26,6 +26,7 @@ #include "config.h" #include "Executable.h" +#include "BatchedTransitionOptimizer.h" #include "BytecodeGenerator.h" #include "CodeBlock.h" #include "DFGDriver.h" @@ -133,16 +134,14 @@ void ProgramExecutable::destroy(JSCell* cell) const ClassInfo FunctionExecutable::s_info = { "FunctionExecutable", &ScriptExecutable::s_info, 0, 0, CREATE_METHOD_TABLE(FunctionExecutable) }; -FunctionExecutable::FunctionExecutable(JSGlobalData& globalData, FunctionBodyNode* node) - : ScriptExecutable(globalData.functionExecutableStructure.get(), globalData, node->source(), node->isStrictMode()) - , m_forceUsesArguments(node->usesArguments()) - , m_parameters(node->parameters()) - , m_name(node->ident()) - , m_inferredName(node->inferredName().isNull() ? globalData.propertyNames->emptyIdentifier : node->inferredName()) - , m_functionNameIsInScopeToggle(node->functionNameIsInScopeToggle()) +FunctionExecutable::FunctionExecutable(JSGlobalData& globalData, const SourceCode& source, UnlinkedFunctionExecutable* unlinkedExecutable, unsigned firstLine, unsigned lastLine) + : ScriptExecutable(globalData.functionExecutableStructure.get(), globalData, source, unlinkedExecutable->isInStrictContext()) + , m_unlinkedExecutable(globalData, this, unlinkedExecutable) { - m_firstLine = node->lineNo(); - m_lastLine = node->lastLine(); + ASSERT(!source.isNull()); + ASSERT(source.length()); + m_firstLine = firstLine; + m_lastLine = lastLine; } void FunctionExecutable::destroy(JSCell* cell) @@ -191,7 +190,6 @@ JSObject* EvalExecutable::compileInternal(ExecState* exec, JSScope* scope, JITCo UNUSED_PARAM(jitType); UNUSED_PARAM(bytecodeIndex); #endif - JSObject* exception = 0; JSGlobalData* globalData = &exec->globalData(); JSGlobalObject* lexicalGlobalObject = exec->lexicalGlobalObject(); @@ -200,28 +198,21 @@ JSObject* EvalExecutable::compileInternal(ExecState* exec, JSScope* scope, JITCo newCodeBlock->setAlternative(static_pointer_cast<CodeBlock>(m_evalCodeBlock.release())); m_evalCodeBlock = newCodeBlock.release(); } else { + UNUSED_PARAM(scope); + UNUSED_PARAM(globalData); + UNUSED_PARAM(lexicalGlobalObject); if (!lexicalGlobalObject->evalEnabled()) return throwError(exec, createEvalError(exec, lexicalGlobalObject->evalDisabledErrorMessage())); - RefPtr<EvalNode> evalNode = parse<EvalNode>(globalData, lexicalGlobalObject, m_source, 0, Identifier(), isStrictMode() ? JSParseStrict : JSParseNormal, EvalNode::isFunctionNode ? JSParseFunctionCode : JSParseProgramCode, lexicalGlobalObject->debugger(), exec, &exception); - if (!evalNode) { - ASSERT(exception); + + JSObject* exception = 0; + UnlinkedEvalCodeBlock* unlinkedEvalCode = lexicalGlobalObject->createEvalCodeBlock(exec, this, &exception); + if (!unlinkedEvalCode) return exception; - } - recordParse(evalNode->features(), evalNode->hasCapturedVariables(), evalNode->lineNo(), evalNode->lastLine()); - - JSGlobalObject* globalObject = scope->globalObject(); - + OwnPtr<CodeBlock> previousCodeBlock = m_evalCodeBlock.release(); ASSERT((jitType == JITCode::bottomTierJIT()) == !previousCodeBlock); - m_evalCodeBlock = adoptPtr(new EvalCodeBlock(this, globalObject, source().provider(), scope->localDepth(), previousCodeBlock.release())); - OwnPtr<BytecodeGenerator> generator(adoptPtr(new BytecodeGenerator(evalNode.get(), scope, m_evalCodeBlock->symbolTable(), m_evalCodeBlock.get(), !!m_evalCodeBlock->alternative() ? OptimizingCompilation : FirstCompilation))); - if ((exception = generator->generate())) { - m_evalCodeBlock = static_pointer_cast<EvalCodeBlock>(m_evalCodeBlock->releaseAlternative()); - evalNode->destroyData(); - return exception; - } - - evalNode->destroyData(); + m_unlinkedEvalCodeBlock.set(*globalData, this, unlinkedEvalCode); + m_evalCodeBlock = adoptPtr(new EvalCodeBlock(this, unlinkedEvalCode, lexicalGlobalObject, source().provider(), scope->localDepth(), previousCodeBlock.release())); m_evalCodeBlock->copyPostParseDataFromAlternative(); } @@ -257,6 +248,7 @@ void EvalExecutable::visitChildren(JSCell* cell, SlotVisitor& visitor) ScriptExecutable::visitChildren(thisObject, visitor); if (thisObject->m_evalCodeBlock) thisObject->m_evalCodeBlock->visitAggregate(visitor); + visitor.append(&thisObject->m_unlinkedEvalCodeBlock); } void EvalExecutable::unlinkCalls() @@ -272,19 +264,20 @@ void EvalExecutable::unlinkCalls() void EvalExecutable::clearCode() { m_evalCodeBlock.clear(); + m_unlinkedEvalCodeBlock.clear(); Base::clearCode(); } JSObject* ProgramExecutable::checkSyntax(ExecState* exec) { - JSObject* exception = 0; + ParserError error; JSGlobalData* globalData = &exec->globalData(); JSGlobalObject* lexicalGlobalObject = exec->lexicalGlobalObject(); - RefPtr<ProgramNode> programNode = parse<ProgramNode>(globalData, lexicalGlobalObject, m_source, 0, Identifier(), JSParseNormal, ProgramNode::isFunctionNode ? JSParseFunctionCode : JSParseProgramCode, lexicalGlobalObject->debugger(), exec, &exception); + RefPtr<ProgramNode> programNode = parse<ProgramNode>(globalData, m_source, 0, Identifier(), JSParseNormal, ProgramNode::isFunctionNode ? JSParseFunctionCode : JSParseProgramCode, error); if (programNode) return 0; - ASSERT(exception); - return exception; + ASSERT(error.m_type != ParserError::ErrorNone); + return error.toErrorObject(lexicalGlobalObject, m_source); } JSObject* ProgramExecutable::compileOptimized(ExecState* exec, JSScope* scope, unsigned bytecodeIndex) @@ -310,38 +303,17 @@ JSObject* ProgramExecutable::compileInternal(ExecState* exec, JSScope* scope, JI SamplingRegion samplingRegion(samplingDescription(jitType)); #if !ENABLE(JIT) + UNUSED_PARAM(exec); UNUSED_PARAM(jitType); UNUSED_PARAM(bytecodeIndex); #endif - JSObject* exception = 0; - JSGlobalData* globalData = &exec->globalData(); - JSGlobalObject* lexicalGlobalObject = exec->lexicalGlobalObject(); - if (!!m_programCodeBlock) { OwnPtr<ProgramCodeBlock> newCodeBlock = adoptPtr(new ProgramCodeBlock(CodeBlock::CopyParsedBlock, *m_programCodeBlock)); newCodeBlock->setAlternative(static_pointer_cast<CodeBlock>(m_programCodeBlock.release())); m_programCodeBlock = newCodeBlock.release(); } else { - RefPtr<ProgramNode> programNode = parse<ProgramNode>(globalData, lexicalGlobalObject, m_source, 0, Identifier(), isStrictMode() ? JSParseStrict : JSParseNormal, ProgramNode::isFunctionNode ? JSParseFunctionCode : JSParseProgramCode, lexicalGlobalObject->debugger(), exec, &exception); - if (!programNode) { - ASSERT(exception); - return exception; - } - recordParse(programNode->features(), programNode->hasCapturedVariables(), programNode->lineNo(), programNode->lastLine()); - JSGlobalObject* globalObject = scope->globalObject(); - - OwnPtr<CodeBlock> previousCodeBlock = m_programCodeBlock.release(); - ASSERT((jitType == JITCode::bottomTierJIT()) == !previousCodeBlock); - m_programCodeBlock = adoptPtr(new ProgramCodeBlock(this, GlobalCode, globalObject, source().provider(), previousCodeBlock.release())); - OwnPtr<BytecodeGenerator> generator(adoptPtr(new BytecodeGenerator(programNode.get(), scope, globalObject->symbolTable(), m_programCodeBlock.get(), !!m_programCodeBlock->alternative() ? OptimizingCompilation : FirstCompilation))); - if ((exception = generator->generate())) { - m_programCodeBlock = static_pointer_cast<ProgramCodeBlock>(m_programCodeBlock->releaseAlternative()); - programNode->destroyData(); - return exception; - } - - programNode->destroyData(); + m_programCodeBlock = adoptPtr(new ProgramCodeBlock(this, m_unlinkedProgramCodeBlock.get(), globalObject, source().provider(), m_programCodeBlock.release())); m_programCodeBlock->copyPostParseDataFromAlternative(); } @@ -378,6 +350,67 @@ void ProgramExecutable::unlinkCalls() #endif } +int ProgramExecutable::addGlobalVar(JSGlobalObject* globalObject, const Identifier& ident, ConstantMode constantMode, FunctionMode functionMode) +{ + // Try to share the symbolTable if possible + SharedSymbolTable* symbolTable = globalObject->symbolTable(); + UNUSED_PARAM(functionMode); + int index = symbolTable->size(); + SymbolTableEntry newEntry(index, (constantMode == IsConstant) ? ReadOnly : 0); + if (functionMode == IsFunctionToSpecialize) + newEntry.attemptToWatch(); + SymbolTable::AddResult result = symbolTable->add(ident.impl(), newEntry); + if (!result.isNewEntry) { + result.iterator->value.notifyWrite(); + index = result.iterator->value.getIndex(); + } + return index; +} + +JSObject* ProgramExecutable::initalizeGlobalProperties(JSGlobalData& globalData, CallFrame* callFrame, JSScope* scope) +{ + ASSERT(scope); + JSGlobalObject* globalObject = scope->globalObject(); + ASSERT(globalObject); + ASSERT(&globalObject->globalData() == &globalData); + + JSObject* exception = 0; + UnlinkedProgramCodeBlock* unlinkedCode = globalObject->createProgramCodeBlock(callFrame, this, &exception); + if (exception) + return exception; + + m_unlinkedProgramCodeBlock.set(globalData, this, unlinkedCode); + + BatchedTransitionOptimizer optimizer(globalData, globalObject); + + const UnlinkedProgramCodeBlock::VariableDeclations& variableDeclarations = unlinkedCode->variableDeclarations(); + const UnlinkedProgramCodeBlock::FunctionDeclations& functionDeclarations = unlinkedCode->functionDeclarations(); + + size_t newGlobals = variableDeclarations.size() + functionDeclarations.size(); + if (!newGlobals) + return 0; + globalObject->addRegisters(newGlobals); + CallFrame* globalExec = globalObject->globalExec(); + + for (size_t i = 0; i < functionDeclarations.size(); ++i) { + bool propertyDidExist = globalObject->removeDirect(globalData, functionDeclarations[i].first); // Newly declared functions overwrite existing properties. + UnlinkedFunctionExecutable* unlinkedFunctionExecutable = functionDeclarations[i].second.get(); + JSValue value = JSFunction::create(globalExec, unlinkedFunctionExecutable->link(globalData, m_source, lineNo(), 0), scope); + int index = addGlobalVar(globalObject, functionDeclarations[i].first, IsVariable, + !propertyDidExist ? IsFunctionToSpecialize : NotFunctionOrNotSpecializable); + globalObject->registerAt(index).set(globalData, globalObject, value); + } + + for (size_t i = 0; i < variableDeclarations.size(); ++i) { + if (globalObject->hasProperty(globalExec, variableDeclarations[i].first)) + continue; + addGlobalVar(globalObject, variableDeclarations[i].first, + (variableDeclarations[i].second & DeclarationStacks::IsConstant) ? IsConstant : IsVariable, + NotFunctionOrNotSpecializable); + } + return 0; +} + void ProgramExecutable::visitChildren(JSCell* cell, SlotVisitor& visitor) { ProgramExecutable* thisObject = jsCast<ProgramExecutable*>(cell); @@ -385,6 +418,7 @@ void ProgramExecutable::visitChildren(JSCell* cell, SlotVisitor& visitor) COMPILE_ASSERT(StructureFlags & OverridesVisitChildren, OverridesVisitChildrenWithoutSettingFlag); ASSERT(thisObject->structure()->typeInfo().overridesVisitChildren()); ScriptExecutable::visitChildren(thisObject, visitor); + visitor.append(&thisObject->m_unlinkedProgramCodeBlock); if (thisObject->m_programCodeBlock) thisObject->m_programCodeBlock->visitAggregate(visitor); } @@ -392,6 +426,7 @@ void ProgramExecutable::visitChildren(JSCell* cell, SlotVisitor& visitor) void ProgramExecutable::clearCode() { m_programCodeBlock.clear(); + m_unlinkedProgramCodeBlock.clear(); Base::clearCode(); } @@ -438,12 +473,12 @@ JSObject* FunctionExecutable::compileOptimizedForConstruct(ExecState* exec, JSSc #if ENABLE(JIT) bool FunctionExecutable::jitCompileForCall(ExecState* exec) { - return jitCompileFunctionIfAppropriate(exec, m_codeBlockForCall, m_jitCodeForCall, m_jitCodeForCallWithArityCheck, m_symbolTable, JITCode::bottomTierJIT(), UINT_MAX, JITCompilationCanFail); + return jitCompileFunctionIfAppropriate(exec, m_codeBlockForCall, m_jitCodeForCall, m_jitCodeForCallWithArityCheck, JITCode::bottomTierJIT(), UINT_MAX, JITCompilationCanFail); } bool FunctionExecutable::jitCompileForConstruct(ExecState* exec) { - return jitCompileFunctionIfAppropriate(exec, m_codeBlockForConstruct, m_jitCodeForConstruct, m_jitCodeForConstructWithArityCheck, m_symbolTable, JITCode::bottomTierJIT(), UINT_MAX, JITCompilationCanFail); + return jitCompileFunctionIfAppropriate(exec, m_codeBlockForConstruct, m_jitCodeForConstruct, m_jitCodeForConstructWithArityCheck, JITCode::bottomTierJIT(), UINT_MAX, JITCompilationCanFail); } #endif @@ -452,49 +487,30 @@ FunctionCodeBlock* FunctionExecutable::codeBlockWithBytecodeFor(CodeSpecializati return baselineCodeBlockFor(kind); } -PassOwnPtr<FunctionCodeBlock> FunctionExecutable::produceCodeBlockFor(JSScope* scope, CompilationKind compilationKind, CodeSpecializationKind specializationKind, JSObject*& exception) +PassOwnPtr<FunctionCodeBlock> FunctionExecutable::produceCodeBlockFor(JSScope* scope, CodeSpecializationKind specializationKind, JSObject*& exception) { if (!!codeBlockFor(specializationKind)) return adoptPtr(new FunctionCodeBlock(CodeBlock::CopyParsedBlock, *codeBlockFor(specializationKind))); - - exception = 0; + JSGlobalData* globalData = scope->globalData(); JSGlobalObject* globalObject = scope->globalObject(); - RefPtr<FunctionBodyNode> body = parse<FunctionBodyNode>( - globalData, - globalObject, - m_source, - m_parameters.get(), - name(), - isStrictMode() ? JSParseStrict : JSParseNormal, - FunctionBodyNode::isFunctionNode ? JSParseFunctionCode : JSParseProgramCode, - 0, - 0, - &exception - ); - - if (!body) { - ASSERT(exception); + ParserError error; + DebuggerMode debuggerMode = globalObject->hasDebugger() ? DebuggerOn : DebuggerOff; + ProfilerMode profilerMode = globalObject->hasProfiler() ? ProfilerOn : ProfilerOff; + UnlinkedFunctionCodeBlock* unlinkedCodeBlock = m_unlinkedExecutable->codeBlockFor(*globalData, m_source, specializationKind, debuggerMode, profilerMode, error); + recordParse(m_unlinkedExecutable->features(), m_unlinkedExecutable->hasCapturedVariables(), lineNo(), lastLine()); + + if (!unlinkedCodeBlock) { + exception = error.toErrorObject(globalObject, m_source); return nullptr; } - if (m_forceUsesArguments) - body->setUsesArguments(); - body->finishParsing(m_parameters, m_name, m_functionNameIsInScopeToggle); - recordParse(body->features(), body->hasCapturedVariables(), body->lineNo(), body->lastLine()); - - OwnPtr<FunctionCodeBlock> result; - ASSERT((compilationKind == FirstCompilation) == !codeBlockFor(specializationKind)); - result = adoptPtr(new FunctionCodeBlock(this, FunctionCode, globalObject, source().provider(), source().startOffset(), specializationKind == CodeForConstruct)); - OwnPtr<BytecodeGenerator> generator(adoptPtr(new BytecodeGenerator(body.get(), scope, result->symbolTable(), result.get(), compilationKind))); - exception = generator->generate(); - body->destroyData(); - if (exception) - return nullptr; + OwnPtr<FunctionCodeBlock> result = adoptPtr(new FunctionCodeBlock(this, unlinkedCodeBlock, globalObject, source().provider(), source().startOffset())); result->copyPostParseDataFrom(codeBlockFor(specializationKind).get()); return result.release(); } + JSObject* FunctionExecutable::compileForCallInternal(ExecState* exec, JSScope* scope, JITCode::JITType jitType, unsigned bytecodeIndex) { SamplingRegion samplingRegion(samplingDescription(jitType)); @@ -507,7 +523,7 @@ JSObject* FunctionExecutable::compileForCallInternal(ExecState* exec, JSScope* s #endif ASSERT((jitType == JITCode::bottomTierJIT()) == !m_codeBlockForCall); JSObject* exception; - OwnPtr<FunctionCodeBlock> newCodeBlock = produceCodeBlockFor(scope, !!m_codeBlockForCall ? OptimizingCompilation : FirstCompilation, CodeForCall, exception); + OwnPtr<FunctionCodeBlock> newCodeBlock = produceCodeBlockFor(scope, CodeForCall, exception); if (!newCodeBlock) return exception; @@ -516,10 +532,9 @@ JSObject* FunctionExecutable::compileForCallInternal(ExecState* exec, JSScope* s m_numParametersForCall = m_codeBlockForCall->numParameters(); ASSERT(m_numParametersForCall); - m_symbolTable.set(exec->globalData(), this, m_codeBlockForCall->symbolTable()); #if ENABLE(JIT) - if (!prepareFunctionForExecution(exec, m_codeBlockForCall, m_jitCodeForCall, m_jitCodeForCallWithArityCheck, m_symbolTable, jitType, bytecodeIndex, CodeForCall)) + if (!prepareFunctionForExecution(exec, m_codeBlockForCall, m_jitCodeForCall, m_jitCodeForCallWithArityCheck, jitType, bytecodeIndex, CodeForCall)) return 0; #endif @@ -544,7 +559,7 @@ JSObject* FunctionExecutable::compileForConstructInternal(ExecState* exec, JSSco ASSERT((jitType == JITCode::bottomTierJIT()) == !m_codeBlockForConstruct); JSObject* exception; - OwnPtr<FunctionCodeBlock> newCodeBlock = produceCodeBlockFor(scope, !!m_codeBlockForConstruct ? OptimizingCompilation : FirstCompilation, CodeForConstruct, exception); + OwnPtr<FunctionCodeBlock> newCodeBlock = produceCodeBlockFor(scope, CodeForConstruct, exception); if (!newCodeBlock) return exception; @@ -553,10 +568,9 @@ JSObject* FunctionExecutable::compileForConstructInternal(ExecState* exec, JSSco m_numParametersForConstruct = m_codeBlockForConstruct->numParameters(); ASSERT(m_numParametersForConstruct); - m_symbolTable.set(exec->globalData(), this, m_codeBlockForConstruct->symbolTable()); #if ENABLE(JIT) - if (!prepareFunctionForExecution(exec, m_codeBlockForConstruct, m_jitCodeForConstruct, m_jitCodeForConstructWithArityCheck, m_symbolTable, jitType, bytecodeIndex, CodeForConstruct)) + if (!prepareFunctionForExecution(exec, m_codeBlockForConstruct, m_jitCodeForConstruct, m_jitCodeForConstructWithArityCheck, jitType, bytecodeIndex, CodeForConstruct)) return 0; #endif @@ -592,12 +606,11 @@ void FunctionExecutable::visitChildren(JSCell* cell, SlotVisitor& visitor) COMPILE_ASSERT(StructureFlags & OverridesVisitChildren, OverridesVisitChildrenWithoutSettingFlag); ASSERT(thisObject->structure()->typeInfo().overridesVisitChildren()); ScriptExecutable::visitChildren(thisObject, visitor); - visitor.append(&thisObject->m_nameValue); - visitor.append(&thisObject->m_symbolTable); if (thisObject->m_codeBlockForCall) thisObject->m_codeBlockForCall->visitAggregate(visitor); if (thisObject->m_codeBlockForConstruct) thisObject->m_codeBlockForConstruct->visitAggregate(visitor); + visitor.append(&thisObject->m_unlinkedExecutable); } void FunctionExecutable::clearCodeIfNotCompiling() @@ -607,10 +620,18 @@ void FunctionExecutable::clearCodeIfNotCompiling() clearCode(); } +void FunctionExecutable::clearUnlinkedCodeIfNotCompiling() +{ + if (isCompiling()) + return; + m_unlinkedExecutable->clearCode(); +} + void FunctionExecutable::clearCode() { m_codeBlockForCall.clear(); m_codeBlockForConstruct.clear(); + m_unlinkedExecutable->clearCode(); Base::clearCode(); } @@ -630,39 +651,19 @@ void FunctionExecutable::unlinkCalls() FunctionExecutable* FunctionExecutable::fromGlobalCode(const Identifier& name, ExecState* exec, Debugger* debugger, const SourceCode& source, JSObject** exception) { - JSGlobalObject* lexicalGlobalObject = exec->lexicalGlobalObject(); - RefPtr<ProgramNode> program = parse<ProgramNode>(&exec->globalData(), lexicalGlobalObject, source, 0, Identifier(), JSParseNormal, ProgramNode::isFunctionNode ? JSParseFunctionCode : JSParseProgramCode, debugger, exec, exception); - if (!program) { - ASSERT(*exception); + UnlinkedFunctionExecutable* unlinkedFunction = UnlinkedFunctionExecutable::fromGlobalCode(name, exec, debugger, source, exception); + if (!unlinkedFunction) return 0; - } - - // This function assumes an input string that would result in a single anonymous function expression. - StatementNode* exprStatement = program->singleStatement(); - ASSERT(exprStatement); - ASSERT(exprStatement->isExprStatement()); - ExpressionNode* funcExpr = static_cast<ExprStatementNode*>(exprStatement)->expr(); - ASSERT(funcExpr); - ASSERT(funcExpr->isFuncExprNode()); - FunctionBodyNode* body = static_cast<FuncExprNode*>(funcExpr)->body(); - ASSERT(body); - ASSERT(body->ident().isNull()); - - FunctionExecutable* functionExecutable = FunctionExecutable::create(exec->globalData(), body); - functionExecutable->m_nameValue.set(exec->globalData(), functionExecutable, jsString(&exec->globalData(), name.string())); - return functionExecutable; + unsigned firstLine = source.firstLine() + unlinkedFunction->firstLineOffset(); + unsigned startOffset = source.startOffset() + unlinkedFunction->startOffset(); + unsigned sourceLength = unlinkedFunction->sourceLength(); + SourceCode functionSource(source.provider(), startOffset, startOffset + sourceLength, firstLine); + return FunctionExecutable::create(exec->globalData(), functionSource, unlinkedFunction, firstLine, unlinkedFunction->lineCount()); } String FunctionExecutable::paramString() const { - FunctionParameters& parameters = *m_parameters; - StringBuilder builder; - for (size_t pos = 0; pos < parameters.size(); ++pos) { - if (!builder.isEmpty()) - builder.appendLiteral(", "); - builder.append(parameters[pos].string()); - } - return builder.toString(); + return m_unlinkedExecutable->paramString(); } } diff --git a/Source/JavaScriptCore/runtime/Executable.h b/Source/JavaScriptCore/runtime/Executable.h index 76a537da3..74b4add75 100644 --- a/Source/JavaScriptCore/runtime/Executable.h +++ b/Source/JavaScriptCore/runtime/Executable.h @@ -35,6 +35,7 @@ #include "LLIntCLoop.h" #include "Nodes.h" #include "SamplingTool.h" +#include "UnlinkedCodeBlock.h" #include <wtf/PassOwnPtr.h> namespace JSC { @@ -364,9 +365,19 @@ namespace JSC { bool isStrictMode() const { return m_features & StrictModeFeature; } void unlinkCalls(); + + CodeFeatures features() const { return m_features; } static const ClassInfo s_info; + void recordParse(CodeFeatures features, bool hasCapturedVariables, int firstLine, int lastLine) + { + m_features = features; + m_hasCapturedVariables = hasCapturedVariables; + m_firstLine = firstLine; + m_lastLine = lastLine; + } + protected: void finishCreation(JSGlobalData& globalData) { @@ -379,14 +390,6 @@ namespace JSC { #endif } - void recordParse(CodeFeatures features, bool hasCapturedVariables, int firstLine, int lastLine) - { - m_features = features; - m_hasCapturedVariables = hasCapturedVariables; - m_firstLine = firstLine; - m_lastLine = lastLine; - } - SourceCode m_source; CodeFeatures m_features; bool m_hasCapturedVariables; @@ -448,6 +451,8 @@ namespace JSC { void clearCode(); + ExecutableInfo executableInfo() const { return ExecutableInfo(needsActivation(), usesEval(), isStrictMode(), false); } + private: static const unsigned StructureFlags = OverridesVisitChildren | ScriptExecutable::StructureFlags; EvalExecutable(ExecState*, const SourceCode&, bool); @@ -456,6 +461,7 @@ namespace JSC { static void visitChildren(JSCell*, SlotVisitor&); OwnPtr<EvalCodeBlock> m_evalCodeBlock; + WriteBarrier<UnlinkedEvalCodeBlock> m_unlinkedEvalCodeBlock; }; class ProgramExecutable : public ScriptExecutable { @@ -470,6 +476,9 @@ namespace JSC { return executable; } + + JSObject* initalizeGlobalProperties(JSGlobalData&, CallFrame*, JSScope*); + static void destroy(JSCell*); JSObject* compile(ExecState* exec, JSScope* scope) @@ -515,13 +524,21 @@ namespace JSC { void clearCode(); + ExecutableInfo executableInfo() const { return ExecutableInfo(needsActivation(), usesEval(), isStrictMode(), false); } + private: static const unsigned StructureFlags = OverridesVisitChildren | ScriptExecutable::StructureFlags; + ProgramExecutable(ExecState*, const SourceCode&); + enum ConstantMode { IsConstant, IsVariable }; + enum FunctionMode { IsFunctionToSpecialize, NotFunctionOrNotSpecializable }; + int addGlobalVar(JSGlobalObject*, const Identifier&, ConstantMode, FunctionMode); + JSObject* compileInternal(ExecState*, JSScope*, JITCode::JITType, unsigned bytecodeIndex = UINT_MAX); static void visitChildren(JSCell*, SlotVisitor&); + WriteBarrier<UnlinkedProgramCodeBlock> m_unlinkedProgramCodeBlock; OwnPtr<ProgramCodeBlock> m_programCodeBlock; }; @@ -531,9 +548,9 @@ namespace JSC { public: typedef ScriptExecutable Base; - static FunctionExecutable* create(JSGlobalData& globalData, FunctionBodyNode* node) + static FunctionExecutable* create(JSGlobalData& globalData, const SourceCode& source, UnlinkedFunctionExecutable* unlinkedExecutable, unsigned firstLine, unsigned lastLine) { - FunctionExecutable* executable = new (NotNull, allocateCell<FunctionExecutable>(globalData.heap)) FunctionExecutable(globalData, node); + FunctionExecutable* executable = new (NotNull, allocateCell<FunctionExecutable>(globalData.heap)) FunctionExecutable(globalData, source, unlinkedExecutable, firstLine, lastLine); executable->finishCreation(globalData); return executable; } @@ -554,7 +571,7 @@ namespace JSC { FunctionCodeBlock* codeBlockWithBytecodeFor(CodeSpecializationKind); - PassOwnPtr<FunctionCodeBlock> produceCodeBlockFor(JSScope*, CompilationKind, CodeSpecializationKind, JSObject*& exception); + PassOwnPtr<FunctionCodeBlock> produceCodeBlockFor(JSScope*, CodeSpecializationKind, JSObject*& exception); JSObject* compileForCall(ExecState* exec, JSScope* scope) { @@ -679,14 +696,15 @@ namespace JSC { return baselineCodeBlockFor(kind); } - const Identifier& name() { return m_name; } - const Identifier& inferredName() { return m_inferredName; } - JSString* nameValue() const { return m_nameValue.get(); } - size_t parameterCount() const { return m_parameters->size(); } // Excluding 'this'! + const Identifier& name() { return m_unlinkedExecutable->name(); } + const Identifier& inferredName() { return m_unlinkedExecutable->inferredName(); } + JSString* nameValue() const { return m_unlinkedExecutable->nameValue(); } + size_t parameterCount() const { return m_unlinkedExecutable->parameterCount(); } // Excluding 'this'! String paramString() const; - SharedSymbolTable* symbolTable() const { return m_symbolTable.get(); } + SharedSymbolTable* symbolTable(CodeSpecializationKind kind) const { return m_unlinkedExecutable->symbolTable(kind); } void clearCodeIfNotCompiling(); + void clearUnlinkedCodeIfNotCompiling(); static void visitChildren(JSCell*, SlotVisitor&); static Structure* createStructure(JSGlobalData& globalData, JSGlobalObject* globalObject, JSValue proto) { @@ -699,15 +717,8 @@ namespace JSC { void clearCode(); - protected: - void finishCreation(JSGlobalData& globalData) - { - Base::finishCreation(globalData); - m_nameValue.set(globalData, this, jsString(&globalData, name().string())); - } - private: - FunctionExecutable(JSGlobalData&, FunctionBodyNode*); + FunctionExecutable(JSGlobalData&, const SourceCode&, UnlinkedFunctionExecutable*, unsigned firstLine, unsigned lastLine); JSObject* compileForCallInternal(ExecState*, JSScope*, JITCode::JITType, unsigned bytecodeIndex = UINT_MAX); JSObject* compileForConstructInternal(ExecState*, JSScope*, JITCode::JITType, unsigned bytecodeIndex = UINT_MAX); @@ -732,16 +743,9 @@ namespace JSC { } static const unsigned StructureFlags = OverridesVisitChildren | ScriptExecutable::StructureFlags; - bool m_forceUsesArguments; - - RefPtr<FunctionParameters> m_parameters; + WriteBarrier<UnlinkedFunctionExecutable> m_unlinkedExecutable; OwnPtr<FunctionCodeBlock> m_codeBlockForCall; OwnPtr<FunctionCodeBlock> m_codeBlockForConstruct; - Identifier m_name; - Identifier m_inferredName; - FunctionNameIsInScopeToggle m_functionNameIsInScopeToggle; - WriteBarrier<JSString> m_nameValue; - WriteBarrier<SharedSymbolTable> m_symbolTable; }; inline JSFunction::JSFunction(JSGlobalData& globalData, FunctionExecutable* executable, JSScope* scope) diff --git a/Source/JavaScriptCore/runtime/ExecutionHarness.h b/Source/JavaScriptCore/runtime/ExecutionHarness.h index 065788aee..b71b60217 100644 --- a/Source/JavaScriptCore/runtime/ExecutionHarness.h +++ b/Source/JavaScriptCore/runtime/ExecutionHarness.h @@ -49,7 +49,7 @@ inline bool prepareForExecution(ExecState* exec, OwnPtr<CodeBlockType>& codeBloc return jitCompileIfAppropriate(exec, codeBlock, jitCode, jitType, bytecodeIndex, JITCode::isBaselineCode(jitType) ? JITCompilationMustSucceed : JITCompilationCanFail); } -inline bool prepareFunctionForExecution(ExecState* exec, OwnPtr<FunctionCodeBlock>& codeBlock, JITCode& jitCode, MacroAssemblerCodePtr& jitCodeWithArityCheck, WriteBarrier<SharedSymbolTable>& symbolTable, JITCode::JITType jitType, unsigned bytecodeIndex, CodeSpecializationKind kind) +inline bool prepareFunctionForExecution(ExecState* exec, OwnPtr<FunctionCodeBlock>& codeBlock, JITCode& jitCode, MacroAssemblerCodePtr& jitCodeWithArityCheck, JITCode::JITType jitType, unsigned bytecodeIndex, CodeSpecializationKind kind) { #if ENABLE(LLINT) if (JITCode::isBaselineCode(jitType)) { @@ -61,7 +61,7 @@ inline bool prepareFunctionForExecution(ExecState* exec, OwnPtr<FunctionCodeBloc #else UNUSED_PARAM(kind); #endif // ENABLE(LLINT) - return jitCompileFunctionIfAppropriate(exec, codeBlock, jitCode, jitCodeWithArityCheck, symbolTable, jitType, bytecodeIndex, JITCode::isBaselineCode(jitType) ? JITCompilationMustSucceed : JITCompilationCanFail); + return jitCompileFunctionIfAppropriate(exec, codeBlock, jitCode, jitCodeWithArityCheck, jitType, bytecodeIndex, JITCode::isBaselineCode(jitType) ? JITCompilationMustSucceed : JITCompilationCanFail); } } // namespace JSC diff --git a/Source/JavaScriptCore/runtime/Identifier.cpp b/Source/JavaScriptCore/runtime/Identifier.cpp index 45dd0bbde..583c12bb1 100644 --- a/Source/JavaScriptCore/runtime/Identifier.cpp +++ b/Source/JavaScriptCore/runtime/Identifier.cpp @@ -128,7 +128,7 @@ PassRefPtr<StringImpl> Identifier::add8(JSGlobalData* globalData, const UChar* s if (!length) return StringImpl::empty(); - CharBuffer<UChar> buf = {s, length}; + CharBuffer<UChar> buf = { s, static_cast<unsigned>(length) }; HashSet<StringImpl*>::AddResult addResult = globalData->identifierTable->add<CharBuffer<UChar>, IdentifierLCharFromUCharTranslator >(buf); // If the string is newly-translated, then we need to adopt it. diff --git a/Source/JavaScriptCore/runtime/Identifier.h b/Source/JavaScriptCore/runtime/Identifier.h index bdcfbd187..dcb739ec3 100644 --- a/Source/JavaScriptCore/runtime/Identifier.h +++ b/Source/JavaScriptCore/runtime/Identifier.h @@ -180,7 +180,7 @@ namespace JSC { if (!length) return StringImpl::empty(); - CharBuffer<T> buf = {s, length}; + CharBuffer<T> buf = { s, static_cast<unsigned>(length) }; HashSet<StringImpl*>::AddResult addResult = globalData->identifierTable->add<CharBuffer<T>, IdentifierCharBufferTranslator<T> >(buf); // If the string is newly-translated, then we need to adopt it. diff --git a/Source/JavaScriptCore/runtime/JSActivation.h b/Source/JavaScriptCore/runtime/JSActivation.h index fa2291813..fc6336463 100644 --- a/Source/JavaScriptCore/runtime/JSActivation.h +++ b/Source/JavaScriptCore/runtime/JSActivation.h @@ -46,15 +46,16 @@ namespace JSC { public: typedef JSVariableObject Base; - static JSActivation* create(JSGlobalData& globalData, CallFrame* callFrame, FunctionExecutable* functionExecutable) + static JSActivation* create(JSGlobalData& globalData, CallFrame* callFrame, CodeBlock* codeBlock) { + SharedSymbolTable* symbolTable = codeBlock->symbolTable(); JSActivation* activation = new ( NotNull, allocateCell<JSActivation>( globalData.heap, - allocationSize(functionExecutable->symbolTable()) + allocationSize(symbolTable) ) - ) JSActivation(globalData, callFrame, functionExecutable->symbolTable()); + ) JSActivation(globalData, callFrame, symbolTable); activation->finishCreation(globalData); return activation; } diff --git a/Source/JavaScriptCore/runtime/JSArray.cpp b/Source/JavaScriptCore/runtime/JSArray.cpp index 7028c3b95..d1ece1a36 100644 --- a/Source/JavaScriptCore/runtime/JSArray.cpp +++ b/Source/JavaScriptCore/runtime/JSArray.cpp @@ -1092,7 +1092,7 @@ void JSArray::sortVector(ExecState* exec, JSValue compareFunction, CallType call for (; numDefined < usedVectorLength; ++numDefined) { if (numDefined > m_butterfly->vectorLength()) break; - JSValue v = indexingData<indexingType>()[numDefined].get(); + JSValue v = currentIndexingData()[numDefined].get(); if (!v || v.isUndefined()) break; tree.abstractor().m_nodes[numDefined].value = v; @@ -1101,7 +1101,7 @@ void JSArray::sortVector(ExecState* exec, JSValue compareFunction, CallType call for (unsigned i = numDefined; i < usedVectorLength; ++i) { if (i > m_butterfly->vectorLength()) break; - JSValue v = indexingData<indexingType>()[i].get(); + JSValue v = currentIndexingData()[i].get(); if (v) { if (v.isUndefined()) ++numUndefined; @@ -1116,7 +1116,7 @@ void JSArray::sortVector(ExecState* exec, JSValue compareFunction, CallType call unsigned newUsedVectorLength = numDefined + numUndefined; // The array size may have changed. Figure out the new bounds. - unsigned newestUsedVectorLength = relevantLength<indexingType>(); + unsigned newestUsedVectorLength = currentRelevantLength(); unsigned elementsToExtractThreshold = min(min(newestUsedVectorLength, numDefined), static_cast<unsigned>(tree.abstractor().m_nodes.size())); unsigned undefinedElementsThreshold = min(newestUsedVectorLength, newUsedVectorLength); @@ -1127,18 +1127,18 @@ void JSArray::sortVector(ExecState* exec, JSValue compareFunction, CallType call iter.start_iter_least(tree); JSGlobalData& globalData = exec->globalData(); for (unsigned i = 0; i < elementsToExtractThreshold; ++i) { - indexingData<indexingType>()[i].set(globalData, this, tree.abstractor().m_nodes[*iter].value); + currentIndexingData()[i].set(globalData, this, tree.abstractor().m_nodes[*iter].value); ++iter; } // Put undefined values back in. for (unsigned i = elementsToExtractThreshold; i < undefinedElementsThreshold; ++i) - indexingData<indexingType>()[i].setUndefined(); + currentIndexingData()[i].setUndefined(); // Ensure that unused values in the vector are zeroed out. for (unsigned i = undefinedElementsThreshold; i < clearElementsThreshold; ++i) - indexingData<indexingType>()[i].clear(); + currentIndexingData()[i].clear(); - if (hasArrayStorage(indexingType)) + if (hasArrayStorage(structure()->indexingType())) arrayStorage()->m_numValuesInVector = newUsedVectorLength; } diff --git a/Source/JavaScriptCore/runtime/JSArray.h b/Source/JavaScriptCore/runtime/JSArray.h index d4622aacc..1d1e64173 100644 --- a/Source/JavaScriptCore/runtime/JSArray.h +++ b/Source/JavaScriptCore/runtime/JSArray.h @@ -27,273 +27,273 @@ namespace JSC { - class JSArray; - class LLIntOffsetsExtractor; +class JSArray; +class LLIntOffsetsExtractor; - class JSArray : public JSNonFinalObject { - friend class LLIntOffsetsExtractor; - friend class Walker; - friend class JIT; +class JSArray : public JSNonFinalObject { + friend class LLIntOffsetsExtractor; + friend class Walker; + friend class JIT; - public: - typedef JSNonFinalObject Base; +public: + typedef JSNonFinalObject Base; - protected: - explicit JSArray(JSGlobalData& globalData, Structure* structure, Butterfly* butterfly) - : JSNonFinalObject(globalData, structure, butterfly) - { - } +protected: + explicit JSArray(JSGlobalData& globalData, Structure* structure, Butterfly* butterfly) + : JSNonFinalObject(globalData, structure, butterfly) + { + } - public: - static JSArray* create(JSGlobalData&, Structure*, unsigned initialLength = 0); +public: + static JSArray* create(JSGlobalData&, Structure*, unsigned initialLength = 0); - // tryCreateUninitialized is used for fast construction of arrays whose size and - // contents are known at time of creation. Clients of this interface must: - // - null-check the result (indicating out of memory, or otherwise unable to allocate vector). - // - call 'initializeIndex' for all properties in sequence, for 0 <= i < initialLength. - static JSArray* tryCreateUninitialized(JSGlobalData&, Structure*, unsigned initialLength); + // tryCreateUninitialized is used for fast construction of arrays whose size and + // contents are known at time of creation. Clients of this interface must: + // - null-check the result (indicating out of memory, or otherwise unable to allocate vector). + // - call 'initializeIndex' for all properties in sequence, for 0 <= i < initialLength. + static JSArray* tryCreateUninitialized(JSGlobalData&, Structure*, unsigned initialLength); - JS_EXPORT_PRIVATE static bool defineOwnProperty(JSObject*, ExecState*, PropertyName, PropertyDescriptor&, bool throwException); + JS_EXPORT_PRIVATE static bool defineOwnProperty(JSObject*, ExecState*, PropertyName, PropertyDescriptor&, bool throwException); - static bool getOwnPropertySlot(JSCell*, ExecState*, PropertyName, PropertySlot&); - static bool getOwnPropertyDescriptor(JSObject*, ExecState*, PropertyName, PropertyDescriptor&); + static bool getOwnPropertySlot(JSCell*, ExecState*, PropertyName, PropertySlot&); + static bool getOwnPropertyDescriptor(JSObject*, ExecState*, PropertyName, PropertyDescriptor&); - static JS_EXPORTDATA const ClassInfo s_info; + static JS_EXPORTDATA const ClassInfo s_info; - unsigned length() const { return getArrayLength(); } - // OK to use on new arrays, but not if it might be a RegExpMatchArray. - bool setLength(ExecState*, unsigned, bool throwException = false); + unsigned length() const { return getArrayLength(); } + // OK to use on new arrays, but not if it might be a RegExpMatchArray. + bool setLength(ExecState*, unsigned, bool throwException = false); - void sort(ExecState*); - void sort(ExecState*, JSValue compareFunction, CallType, const CallData&); - void sortNumeric(ExecState*, JSValue compareFunction, CallType, const CallData&); + void sort(ExecState*); + void sort(ExecState*, JSValue compareFunction, CallType, const CallData&); + void sortNumeric(ExecState*, JSValue compareFunction, CallType, const CallData&); - void push(ExecState*, JSValue); - JSValue pop(ExecState*); + void push(ExecState*, JSValue); + JSValue pop(ExecState*); - enum ShiftCountMode { - // This form of shift hints that we're doing queueing. With this assumption in hand, - // we convert to ArrayStorage, which has queue optimizations. - ShiftCountForShift, + enum ShiftCountMode { + // This form of shift hints that we're doing queueing. With this assumption in hand, + // we convert to ArrayStorage, which has queue optimizations. + ShiftCountForShift, - // This form of shift hints that we're just doing care and feeding on an array that - // is probably typically used for ordinary accesses. With this assumption in hand, - // we try to preserve whatever indexing type it has already. - ShiftCountForSplice - }; - - bool shiftCountForShift(ExecState* exec, unsigned startIndex, unsigned count) - { - return shiftCountWithArrayStorage(startIndex, count, ensureArrayStorage(exec->globalData())); - } - bool shiftCountForSplice(ExecState* exec, unsigned startIndex, unsigned count) - { - return shiftCountWithAnyIndexingType(exec, startIndex, count); - } - template<ShiftCountMode shiftCountMode> - bool shiftCount(ExecState* exec, unsigned startIndex, unsigned count) - { - switch (shiftCountMode) { - case ShiftCountForShift: - return shiftCountForShift(exec, startIndex, count); - case ShiftCountForSplice: - return shiftCountForSplice(exec, startIndex, count); - default: - CRASH(); - return false; - } - } - - bool unshiftCountForShift(ExecState* exec, unsigned startIndex, unsigned count) - { - return unshiftCountWithArrayStorage(exec, startIndex, count, ensureArrayStorage(exec->globalData())); - } - bool unshiftCountForSplice(ExecState* exec, unsigned startIndex, unsigned count) - { - return unshiftCountWithAnyIndexingType(exec, startIndex, count); - } - template<ShiftCountMode shiftCountMode> - bool unshiftCount(ExecState* exec, unsigned startIndex, unsigned count) - { - switch (shiftCountMode) { - case ShiftCountForShift: - return unshiftCountForShift(exec, startIndex, count); - case ShiftCountForSplice: - return unshiftCountForSplice(exec, startIndex, count); - default: - CRASH(); - return false; - } - } - - void fillArgList(ExecState*, MarkedArgumentBuffer&); - void copyToArguments(ExecState*, CallFrame*, uint32_t length); - - static Structure* createStructure(JSGlobalData& globalData, JSGlobalObject* globalObject, JSValue prototype, IndexingType indexingType) - { - return Structure::create(globalData, globalObject, prototype, TypeInfo(ObjectType, StructureFlags), &s_info, indexingType); - } - - protected: - static const unsigned StructureFlags = OverridesGetOwnPropertySlot | OverridesGetPropertyNames | JSObject::StructureFlags; - static void put(JSCell*, ExecState*, PropertyName, JSValue, PutPropertySlot&); - - static bool deleteProperty(JSCell*, ExecState*, PropertyName); - JS_EXPORT_PRIVATE static void getOwnNonIndexPropertyNames(JSObject*, ExecState*, PropertyNameArray&, EnumerationMode); - - private: - bool isLengthWritable() - { - ArrayStorage* storage = arrayStorageOrNull(); - if (!storage) - return true; - SparseArrayValueMap* map = storage->m_sparseMap.get(); - return !map || !map->lengthIsReadOnly(); - } - - bool shiftCountWithAnyIndexingType(ExecState*, unsigned startIndex, unsigned count); - bool shiftCountWithArrayStorage(unsigned startIndex, unsigned count, ArrayStorage*); - - bool unshiftCountWithAnyIndexingType(ExecState*, unsigned startIndex, unsigned count); - bool unshiftCountWithArrayStorage(ExecState*, unsigned startIndex, unsigned count, ArrayStorage*); - bool unshiftCountSlowCase(JSGlobalData&, bool, unsigned); - - template<IndexingType indexingType> - void sortNumericVector(ExecState*, JSValue compareFunction, CallType, const CallData&); - - template<IndexingType indexingType> - void sortCompactedVector(ExecState*, WriteBarrier<Unknown>* begin, unsigned relevantLength); - - template<IndexingType indexingType> - void sortVector(ExecState*, JSValue compareFunction, CallType, const CallData&); - - bool setLengthWithArrayStorage(ExecState*, unsigned newLength, bool throwException, ArrayStorage*); - void setLengthWritable(ExecState*, bool writable); - - template<IndexingType indexingType> - void compactForSorting(unsigned& numDefined, unsigned& newRelevantLength); + // This form of shift hints that we're just doing care and feeding on an array that + // is probably typically used for ordinary accesses. With this assumption in hand, + // we try to preserve whatever indexing type it has already. + ShiftCountForSplice }; - inline Butterfly* createContiguousArrayButterfly(JSGlobalData& globalData, unsigned length) + bool shiftCountForShift(ExecState* exec, unsigned startIndex, unsigned count) { - IndexingHeader header; - header.setVectorLength(std::max(length, BASE_VECTOR_LEN)); - header.setPublicLength(length); - Butterfly* result = Butterfly::create( - globalData, 0, 0, true, header, header.vectorLength() * sizeof(EncodedJSValue)); - return result; + return shiftCountWithArrayStorage(startIndex, count, ensureArrayStorage(exec->globalData())); } - - inline Butterfly* createArrayButterfly(JSGlobalData& globalData, unsigned initialLength) + bool shiftCountForSplice(ExecState* exec, unsigned startIndex, unsigned count) { - Butterfly* butterfly = Butterfly::create( - globalData, 0, 0, true, baseIndexingHeaderForArray(initialLength), ArrayStorage::sizeFor(BASE_VECTOR_LEN)); - ArrayStorage* storage = butterfly->arrayStorage(); - storage->m_indexBias = 0; - storage->m_sparseMap.clear(); - storage->m_numValuesInVector = 0; - return butterfly; + return shiftCountWithAnyIndexingType(exec, startIndex, count); } - - Butterfly* createArrayButterflyInDictionaryIndexingMode(JSGlobalData&, unsigned initialLength); - - inline JSArray* JSArray::create(JSGlobalData& globalData, Structure* structure, unsigned initialLength) + template<ShiftCountMode shiftCountMode> + bool shiftCount(ExecState* exec, unsigned startIndex, unsigned count) { - Butterfly* butterfly; - if (LIKELY(structure->indexingType() == ArrayWithContiguous)) { - butterfly = createContiguousArrayButterfly(globalData, initialLength); - ASSERT(initialLength < MIN_SPARSE_ARRAY_INDEX); - } else { - ASSERT( - structure->indexingType() == ArrayWithSlowPutArrayStorage - || (initialLength && structure->indexingType() == ArrayWithArrayStorage)); - butterfly = createArrayButterfly(globalData, initialLength); + switch (shiftCountMode) { + case ShiftCountForShift: + return shiftCountForShift(exec, startIndex, count); + case ShiftCountForSplice: + return shiftCountForSplice(exec, startIndex, count); + default: + CRASH(); + return false; } - JSArray* array = new (NotNull, allocateCell<JSArray>(globalData.heap)) JSArray(globalData, structure, butterfly); - array->finishCreation(globalData); - return array; } - - inline JSArray* JSArray::tryCreateUninitialized(JSGlobalData& globalData, Structure* structure, unsigned initialLength) - { - unsigned vectorLength = std::max(BASE_VECTOR_LEN, initialLength); - if (vectorLength > MAX_STORAGE_VECTOR_LENGTH) - return 0; - Butterfly* butterfly; - if (LIKELY(structure->indexingType() == ArrayWithContiguous)) { - - void* temp; - if (!globalData.heap.tryAllocateStorage(Butterfly::totalSize(0, 0, true, vectorLength * sizeof(EncodedJSValue)), &temp)) - return 0; - butterfly = Butterfly::fromBase(temp, 0, 0); - butterfly->setVectorLength(vectorLength); - butterfly->setPublicLength(initialLength); - } else { - void* temp; - if (!globalData.heap.tryAllocateStorage(Butterfly::totalSize(0, 0, true, ArrayStorage::sizeFor(vectorLength)), &temp)) - return 0; - butterfly = Butterfly::fromBase(temp, 0, 0); - *butterfly->indexingHeader() = indexingHeaderForArray(initialLength, vectorLength); - ArrayStorage* storage = butterfly->arrayStorage(); - storage->m_indexBias = 0; - storage->m_sparseMap.clear(); - storage->m_numValuesInVector = initialLength; + bool unshiftCountForShift(ExecState* exec, unsigned startIndex, unsigned count) + { + return unshiftCountWithArrayStorage(exec, startIndex, count, ensureArrayStorage(exec->globalData())); + } + bool unshiftCountForSplice(ExecState* exec, unsigned startIndex, unsigned count) + { + return unshiftCountWithAnyIndexingType(exec, startIndex, count); + } + template<ShiftCountMode shiftCountMode> + bool unshiftCount(ExecState* exec, unsigned startIndex, unsigned count) + { + switch (shiftCountMode) { + case ShiftCountForShift: + return unshiftCountForShift(exec, startIndex, count); + case ShiftCountForSplice: + return unshiftCountForSplice(exec, startIndex, count); + default: + CRASH(); + return false; } - - JSArray* array = new (NotNull, allocateCell<JSArray>(globalData.heap)) JSArray(globalData, structure, butterfly); - array->finishCreation(globalData); - return array; } - JSArray* asArray(JSValue); + void fillArgList(ExecState*, MarkedArgumentBuffer&); + void copyToArguments(ExecState*, CallFrame*, uint32_t length); - inline JSArray* asArray(JSCell* cell) + static Structure* createStructure(JSGlobalData& globalData, JSGlobalObject* globalObject, JSValue prototype, IndexingType indexingType) { - ASSERT(cell->inherits(&JSArray::s_info)); - return jsCast<JSArray*>(cell); + return Structure::create(globalData, globalObject, prototype, TypeInfo(ObjectType, StructureFlags), &s_info, indexingType); } + +protected: + static const unsigned StructureFlags = OverridesGetOwnPropertySlot | OverridesGetPropertyNames | JSObject::StructureFlags; + static void put(JSCell*, ExecState*, PropertyName, JSValue, PutPropertySlot&); - inline JSArray* asArray(JSValue value) + static bool deleteProperty(JSCell*, ExecState*, PropertyName); + JS_EXPORT_PRIVATE static void getOwnNonIndexPropertyNames(JSObject*, ExecState*, PropertyNameArray&, EnumerationMode); + +private: + bool isLengthWritable() { - return asArray(value.asCell()); + ArrayStorage* storage = arrayStorageOrNull(); + if (!storage) + return true; + SparseArrayValueMap* map = storage->m_sparseMap.get(); + return !map || !map->lengthIsReadOnly(); } + + bool shiftCountWithAnyIndexingType(ExecState*, unsigned startIndex, unsigned count); + bool shiftCountWithArrayStorage(unsigned startIndex, unsigned count, ArrayStorage*); - inline bool isJSArray(JSCell* cell) { return cell->classInfo() == &JSArray::s_info; } - inline bool isJSArray(JSValue v) { return v.isCell() && isJSArray(v.asCell()); } + bool unshiftCountWithAnyIndexingType(ExecState*, unsigned startIndex, unsigned count); + bool unshiftCountWithArrayStorage(ExecState*, unsigned startIndex, unsigned count, ArrayStorage*); + bool unshiftCountSlowCase(JSGlobalData&, bool, unsigned); - inline JSArray* constructArray(ExecState* exec, Structure* arrayStructure, const ArgList& values) - { - JSGlobalData& globalData = exec->globalData(); - unsigned length = values.size(); - JSArray* array = JSArray::tryCreateUninitialized(globalData, arrayStructure, length); - - // FIXME: we should probably throw an out of memory error here, but - // when making this change we should check that all clients of this - // function will correctly handle an exception being thrown from here. - if (!array) - CRASH(); + template<IndexingType indexingType> + void sortNumericVector(ExecState*, JSValue compareFunction, CallType, const CallData&); + + template<IndexingType indexingType> + void sortCompactedVector(ExecState*, WriteBarrier<Unknown>* begin, unsigned relevantLength); + + template<IndexingType indexingType> + void sortVector(ExecState*, JSValue compareFunction, CallType, const CallData&); - for (unsigned i = 0; i < length; ++i) - array->initializeIndex(globalData, i, values.at(i)); - return array; + bool setLengthWithArrayStorage(ExecState*, unsigned newLength, bool throwException, ArrayStorage*); + void setLengthWritable(ExecState*, bool writable); + + template<IndexingType indexingType> + void compactForSorting(unsigned& numDefined, unsigned& newRelevantLength); +}; + +inline Butterfly* createContiguousArrayButterfly(JSGlobalData& globalData, unsigned length) +{ + IndexingHeader header; + header.setVectorLength(std::max(length, BASE_VECTOR_LEN)); + header.setPublicLength(length); + Butterfly* result = Butterfly::create( + globalData, 0, 0, true, header, header.vectorLength() * sizeof(EncodedJSValue)); + return result; +} + +inline Butterfly* createArrayButterfly(JSGlobalData& globalData, unsigned initialLength) +{ + Butterfly* butterfly = Butterfly::create( + globalData, 0, 0, true, baseIndexingHeaderForArray(initialLength), ArrayStorage::sizeFor(BASE_VECTOR_LEN)); + ArrayStorage* storage = butterfly->arrayStorage(); + storage->m_indexBias = 0; + storage->m_sparseMap.clear(); + storage->m_numValuesInVector = 0; + return butterfly; +} + +Butterfly* createArrayButterflyInDictionaryIndexingMode(JSGlobalData&, unsigned initialLength); + +inline JSArray* JSArray::create(JSGlobalData& globalData, Structure* structure, unsigned initialLength) +{ + Butterfly* butterfly; + if (LIKELY(structure->indexingType() == ArrayWithContiguous)) { + butterfly = createContiguousArrayButterfly(globalData, initialLength); + ASSERT(initialLength < MIN_SPARSE_ARRAY_INDEX); + } else { + ASSERT( + structure->indexingType() == ArrayWithSlowPutArrayStorage + || (initialLength && structure->indexingType() == ArrayWithArrayStorage)); + butterfly = createArrayButterfly(globalData, initialLength); } - - inline JSArray* constructArray(ExecState* exec, Structure* arrayStructure, const JSValue* values, unsigned length) - { - JSGlobalData& globalData = exec->globalData(); - JSArray* array = JSArray::tryCreateUninitialized(globalData, arrayStructure, length); - - // FIXME: we should probably throw an out of memory error here, but - // when making this change we should check that all clients of this - // function will correctly handle an exception being thrown from here. - if (!array) - CRASH(); - - for (unsigned i = 0; i < length; ++i) - array->initializeIndex(globalData, i, values[i]); - return array; + JSArray* array = new (NotNull, allocateCell<JSArray>(globalData.heap)) JSArray(globalData, structure, butterfly); + array->finishCreation(globalData); + return array; +} + +inline JSArray* JSArray::tryCreateUninitialized(JSGlobalData& globalData, Structure* structure, unsigned initialLength) +{ + unsigned vectorLength = std::max(BASE_VECTOR_LEN, initialLength); + if (vectorLength > MAX_STORAGE_VECTOR_LENGTH) + return 0; + + Butterfly* butterfly; + if (LIKELY(structure->indexingType() == ArrayWithContiguous)) { + + void* temp; + if (!globalData.heap.tryAllocateStorage(Butterfly::totalSize(0, 0, true, vectorLength * sizeof(EncodedJSValue)), &temp)) + return 0; + butterfly = Butterfly::fromBase(temp, 0, 0); + butterfly->setVectorLength(vectorLength); + butterfly->setPublicLength(initialLength); + } else { + void* temp; + if (!globalData.heap.tryAllocateStorage(Butterfly::totalSize(0, 0, true, ArrayStorage::sizeFor(vectorLength)), &temp)) + return 0; + butterfly = Butterfly::fromBase(temp, 0, 0); + *butterfly->indexingHeader() = indexingHeaderForArray(initialLength, vectorLength); + ArrayStorage* storage = butterfly->arrayStorage(); + storage->m_indexBias = 0; + storage->m_sparseMap.clear(); + storage->m_numValuesInVector = initialLength; } + + JSArray* array = new (NotNull, allocateCell<JSArray>(globalData.heap)) JSArray(globalData, structure, butterfly); + array->finishCreation(globalData); + return array; +} + +JSArray* asArray(JSValue); + +inline JSArray* asArray(JSCell* cell) +{ + ASSERT(cell->inherits(&JSArray::s_info)); + return jsCast<JSArray*>(cell); +} + +inline JSArray* asArray(JSValue value) +{ + return asArray(value.asCell()); +} + +inline bool isJSArray(JSCell* cell) { return cell->classInfo() == &JSArray::s_info; } +inline bool isJSArray(JSValue v) { return v.isCell() && isJSArray(v.asCell()); } + +inline JSArray* constructArray(ExecState* exec, Structure* arrayStructure, const ArgList& values) +{ + JSGlobalData& globalData = exec->globalData(); + unsigned length = values.size(); + JSArray* array = JSArray::tryCreateUninitialized(globalData, arrayStructure, length); + + // FIXME: we should probably throw an out of memory error here, but + // when making this change we should check that all clients of this + // function will correctly handle an exception being thrown from here. + if (!array) + CRASH(); + + for (unsigned i = 0; i < length; ++i) + array->initializeIndex(globalData, i, values.at(i)); + return array; +} + +inline JSArray* constructArray(ExecState* exec, Structure* arrayStructure, const JSValue* values, unsigned length) +{ + JSGlobalData& globalData = exec->globalData(); + JSArray* array = JSArray::tryCreateUninitialized(globalData, arrayStructure, length); + + // FIXME: we should probably throw an out of memory error here, but + // when making this change we should check that all clients of this + // function will correctly handle an exception being thrown from here. + if (!array) + CRASH(); + + for (unsigned i = 0; i < length; ++i) + array->initializeIndex(globalData, i, values[i]); + return array; +} } // namespace JSC diff --git a/Source/JavaScriptCore/runtime/JSCell.h b/Source/JavaScriptCore/runtime/JSCell.h index a39af1283..3b37613d1 100644 --- a/Source/JavaScriptCore/runtime/JSCell.h +++ b/Source/JavaScriptCore/runtime/JSCell.h @@ -38,326 +38,327 @@ namespace JSC { - class CopyVisitor; - class JSDestructibleObject; - class JSGlobalObject; - class LLIntOffsetsExtractor; - class PropertyDescriptor; - class PropertyNameArray; - class Structure; - - enum EnumerationMode { - ExcludeDontEnumProperties, - IncludeDontEnumProperties - }; - - class JSCell { - friend class JSValue; - friend class MarkedBlock; - template<typename T> friend void* allocateCell(Heap&); - template<typename T> friend void* allocateCell(Heap&, size_t); - - public: - static const unsigned StructureFlags = 0; - - static const bool needsDestruction = false; - static const bool hasImmortalStructure = false; - - enum CreatingEarlyCellTag { CreatingEarlyCell }; - JSCell(CreatingEarlyCellTag); - - protected: - JSCell(JSGlobalData&, Structure*); - JS_EXPORT_PRIVATE static void destroy(JSCell*); - - public: - // Querying the type. - bool isString() const; - bool isObject() const; - bool isGetterSetter() const; - bool inherits(const ClassInfo*) const; - bool isAPIValueWrapper() const; - - Structure* structure() const; - void setStructure(JSGlobalData&, Structure*); - void clearStructure() { m_structure.clear(); } - - const char* className(); - - // Extracting the value. - JS_EXPORT_PRIVATE bool getString(ExecState*, String&) const; - JS_EXPORT_PRIVATE String getString(ExecState*) const; // null string if not a string - JS_EXPORT_PRIVATE JSObject* getObject(); // NULL if not an object - const JSObject* getObject() const; // NULL if not an object +class CopyVisitor; +class JSDestructibleObject; +class JSGlobalObject; +class LLIntOffsetsExtractor; +class PropertyDescriptor; +class PropertyNameArray; +class Structure; + +enum EnumerationMode { + ExcludeDontEnumProperties, + IncludeDontEnumProperties +}; + +class JSCell { + friend class JSValue; + friend class MarkedBlock; + template<typename T> friend void* allocateCell(Heap&); + template<typename T> friend void* allocateCell(Heap&, size_t); + +public: + static const unsigned StructureFlags = 0; + + static const bool needsDestruction = false; + static const bool hasImmortalStructure = false; + + enum CreatingEarlyCellTag { CreatingEarlyCell }; + JSCell(CreatingEarlyCellTag); + +protected: + JSCell(JSGlobalData&, Structure*); + JS_EXPORT_PRIVATE static void destroy(JSCell*); + +public: + // Querying the type. + bool isString() const; + bool isObject() const; + bool isGetterSetter() const; + bool isProxy() const; + bool inherits(const ClassInfo*) const; + bool isAPIValueWrapper() const; + + Structure* structure() const; + void setStructure(JSGlobalData&, Structure*); + void clearStructure() { m_structure.clear(); } + + const char* className(); + + // Extracting the value. + JS_EXPORT_PRIVATE bool getString(ExecState*, String&) const; + JS_EXPORT_PRIVATE String getString(ExecState*) const; // null string if not a string + JS_EXPORT_PRIVATE JSObject* getObject(); // NULL if not an object + const JSObject* getObject() const; // NULL if not an object - JS_EXPORT_PRIVATE static CallType getCallData(JSCell*, CallData&); - JS_EXPORT_PRIVATE static ConstructType getConstructData(JSCell*, ConstructData&); - - // Basic conversions. - JS_EXPORT_PRIVATE JSValue toPrimitive(ExecState*, PreferredPrimitiveType) const; - bool getPrimitiveNumber(ExecState*, double& number, JSValue&) const; - bool toBoolean(ExecState*) const; - JS_EXPORT_PRIVATE double toNumber(ExecState*) const; - JS_EXPORT_PRIVATE JSObject* toObject(ExecState*, JSGlobalObject*) const; - - static void visitChildren(JSCell*, SlotVisitor&); - JS_EXPORT_PRIVATE static void copyBackingStore(JSCell*, CopyVisitor&); - - // Object operations, with the toObject operation included. - const ClassInfo* classInfo() const; - const MethodTable* methodTable() const; - static void put(JSCell*, ExecState*, PropertyName, JSValue, PutPropertySlot&); - static void putByIndex(JSCell*, ExecState*, unsigned propertyName, JSValue, bool shouldThrow); + JS_EXPORT_PRIVATE static CallType getCallData(JSCell*, CallData&); + JS_EXPORT_PRIVATE static ConstructType getConstructData(JSCell*, ConstructData&); + + // Basic conversions. + JS_EXPORT_PRIVATE JSValue toPrimitive(ExecState*, PreferredPrimitiveType) const; + bool getPrimitiveNumber(ExecState*, double& number, JSValue&) const; + bool toBoolean(ExecState*) const; + JS_EXPORT_PRIVATE double toNumber(ExecState*) const; + JS_EXPORT_PRIVATE JSObject* toObject(ExecState*, JSGlobalObject*) const; + + static void visitChildren(JSCell*, SlotVisitor&); + JS_EXPORT_PRIVATE static void copyBackingStore(JSCell*, CopyVisitor&); + + // Object operations, with the toObject operation included. + const ClassInfo* classInfo() const; + const MethodTable* methodTable() const; + static void put(JSCell*, ExecState*, PropertyName, JSValue, PutPropertySlot&); + static void putByIndex(JSCell*, ExecState*, unsigned propertyName, JSValue, bool shouldThrow); - static bool deleteProperty(JSCell*, ExecState*, PropertyName); - static bool deletePropertyByIndex(JSCell*, ExecState*, unsigned propertyName); - - static JSObject* toThisObject(JSCell*, ExecState*); - - void zap() { *reinterpret_cast<uintptr_t**>(this) = 0; } - bool isZapped() const { return !*reinterpret_cast<uintptr_t* const*>(this); } - - // FIXME: Rename getOwnPropertySlot to virtualGetOwnPropertySlot, and - // fastGetOwnPropertySlot to getOwnPropertySlot. Callers should always - // call this function, not its slower virtual counterpart. (For integer - // property names, we want a similar interface with appropriate optimizations.) - bool fastGetOwnPropertySlot(ExecState*, PropertyName, PropertySlot&); - JSValue fastGetOwnProperty(ExecState*, const String&); - - static ptrdiff_t structureOffset() - { - return OBJECT_OFFSETOF(JSCell, m_structure); - } - - void* structureAddress() - { - return &m_structure; - } - -#if ENABLE(GC_VALIDATION) - Structure* unvalidatedStructure() { return m_structure.unvalidatedGet(); } -#endif - - static const TypedArrayType TypedArrayStorageType = TypedArrayNone; - protected: - - void finishCreation(JSGlobalData&); - void finishCreation(JSGlobalData&, Structure*, CreatingEarlyCellTag); - - // Base implementation; for non-object classes implements getPropertySlot. - static bool getOwnPropertySlot(JSCell*, ExecState*, PropertyName, PropertySlot&); - static bool getOwnPropertySlotByIndex(JSCell*, ExecState*, unsigned propertyName, PropertySlot&); - - // Dummy implementations of override-able static functions for classes to put in their MethodTable - static JSValue defaultValue(const JSObject*, ExecState*, PreferredPrimitiveType); - static NO_RETURN_DUE_TO_ASSERT void getOwnPropertyNames(JSObject*, ExecState*, PropertyNameArray&, EnumerationMode); - static NO_RETURN_DUE_TO_ASSERT void getOwnNonIndexPropertyNames(JSObject*, ExecState*, PropertyNameArray&, EnumerationMode); - static NO_RETURN_DUE_TO_ASSERT void getPropertyNames(JSObject*, ExecState*, PropertyNameArray&, EnumerationMode); - static String className(const JSObject*); - JS_EXPORT_PRIVATE static bool customHasInstance(JSObject*, ExecState*, JSValue); - static NO_RETURN_DUE_TO_ASSERT void putDirectVirtual(JSObject*, ExecState*, PropertyName, JSValue, unsigned attributes); - static bool defineOwnProperty(JSObject*, ExecState*, PropertyName, PropertyDescriptor&, bool shouldThrow); - static bool getOwnPropertyDescriptor(JSObject*, ExecState*, PropertyName, PropertyDescriptor&); - - private: - friend class LLIntOffsetsExtractor; - - WriteBarrier<Structure> m_structure; - }; - - inline JSCell::JSCell(CreatingEarlyCellTag) - { - } - - inline void JSCell::finishCreation(JSGlobalData& globalData) - { -#if ENABLE(GC_VALIDATION) - ASSERT(globalData.isInitializingObject()); - globalData.setInitializingObjectClass(0); -#else - UNUSED_PARAM(globalData); -#endif - ASSERT(m_structure); - } - - inline Structure* JSCell::structure() const - { - return m_structure.get(); - } - - inline void JSCell::visitChildren(JSCell* cell, SlotVisitor& visitor) - { - MARK_LOG_PARENT(visitor, cell); - - visitor.append(&cell->m_structure); - } - - // --- JSValue inlines ---------------------------- - - inline bool JSValue::isString() const - { - return isCell() && asCell()->isString(); - } + static bool deleteProperty(JSCell*, ExecState*, PropertyName); + static bool deletePropertyByIndex(JSCell*, ExecState*, unsigned propertyName); - inline bool JSValue::isPrimitive() const - { - return !isCell() || asCell()->isString(); - } + static JSObject* toThisObject(JSCell*, ExecState*); - inline bool JSValue::isGetterSetter() const - { - return isCell() && asCell()->isGetterSetter(); - } + void zap() { *reinterpret_cast<uintptr_t**>(this) = 0; } + bool isZapped() const { return !*reinterpret_cast<uintptr_t* const*>(this); } - inline bool JSValue::isObject() const - { - return isCell() && asCell()->isObject(); - } + // FIXME: Rename getOwnPropertySlot to virtualGetOwnPropertySlot, and + // fastGetOwnPropertySlot to getOwnPropertySlot. Callers should always + // call this function, not its slower virtual counterpart. (For integer + // property names, we want a similar interface with appropriate optimizations.) + bool fastGetOwnPropertySlot(ExecState*, PropertyName, PropertySlot&); + JSValue fastGetOwnProperty(ExecState*, const String&); - inline bool JSValue::getString(ExecState* exec, String& s) const + static ptrdiff_t structureOffset() { - return isCell() && asCell()->getString(exec, s); + return OBJECT_OFFSETOF(JSCell, m_structure); } - inline String JSValue::getString(ExecState* exec) const + void* structureAddress() { - return isCell() ? asCell()->getString(exec) : String(); + return &m_structure; } + +#if ENABLE(GC_VALIDATION) + Structure* unvalidatedStructure() { return m_structure.unvalidatedGet(); } +#endif + + static const TypedArrayType TypedArrayStorageType = TypedArrayNone; +protected: + + void finishCreation(JSGlobalData&); + void finishCreation(JSGlobalData&, Structure*, CreatingEarlyCellTag); + + // Base implementation; for non-object classes implements getPropertySlot. + static bool getOwnPropertySlot(JSCell*, ExecState*, PropertyName, PropertySlot&); + static bool getOwnPropertySlotByIndex(JSCell*, ExecState*, unsigned propertyName, PropertySlot&); + + // Dummy implementations of override-able static functions for classes to put in their MethodTable + static JSValue defaultValue(const JSObject*, ExecState*, PreferredPrimitiveType); + static NO_RETURN_DUE_TO_ASSERT void getOwnPropertyNames(JSObject*, ExecState*, PropertyNameArray&, EnumerationMode); + static NO_RETURN_DUE_TO_ASSERT void getOwnNonIndexPropertyNames(JSObject*, ExecState*, PropertyNameArray&, EnumerationMode); + static NO_RETURN_DUE_TO_ASSERT void getPropertyNames(JSObject*, ExecState*, PropertyNameArray&, EnumerationMode); + static String className(const JSObject*); + JS_EXPORT_PRIVATE static bool customHasInstance(JSObject*, ExecState*, JSValue); + static NO_RETURN_DUE_TO_ASSERT void putDirectVirtual(JSObject*, ExecState*, PropertyName, JSValue, unsigned attributes); + static bool defineOwnProperty(JSObject*, ExecState*, PropertyName, PropertyDescriptor&, bool shouldThrow); + static bool getOwnPropertyDescriptor(JSObject*, ExecState*, PropertyName, PropertyDescriptor&); + +private: + friend class LLIntOffsetsExtractor; + + WriteBarrier<Structure> m_structure; +}; - template <typename Base> String HandleConverter<Base, Unknown>::getString(ExecState* exec) const - { - return jsValue().getString(exec); - } +inline JSCell::JSCell(CreatingEarlyCellTag) +{ +} - inline JSObject* JSValue::getObject() const - { - return isCell() ? asCell()->getObject() : 0; - } - - ALWAYS_INLINE bool JSValue::getUInt32(uint32_t& v) const - { - if (isInt32()) { - int32_t i = asInt32(); - v = static_cast<uint32_t>(i); - return i >= 0; - } - if (isDouble()) { - double d = asDouble(); - v = static_cast<uint32_t>(d); - return v == d; - } - return false; +inline void JSCell::finishCreation(JSGlobalData& globalData) +{ +#if ENABLE(GC_VALIDATION) + ASSERT(globalData.isInitializingObject()); + globalData.setInitializingObjectClass(0); +#else + UNUSED_PARAM(globalData); +#endif + ASSERT(m_structure); +} + +inline Structure* JSCell::structure() const +{ + return m_structure.get(); +} + +inline void JSCell::visitChildren(JSCell* cell, SlotVisitor& visitor) +{ + MARK_LOG_PARENT(visitor, cell); + + visitor.append(&cell->m_structure); +} + +// --- JSValue inlines ---------------------------- + +inline bool JSValue::isString() const +{ + return isCell() && asCell()->isString(); +} + +inline bool JSValue::isPrimitive() const +{ + return !isCell() || asCell()->isString(); +} + +inline bool JSValue::isGetterSetter() const +{ + return isCell() && asCell()->isGetterSetter(); +} + +inline bool JSValue::isObject() const +{ + return isCell() && asCell()->isObject(); +} + +inline bool JSValue::getString(ExecState* exec, String& s) const +{ + return isCell() && asCell()->getString(exec, s); +} + +inline String JSValue::getString(ExecState* exec) const +{ + return isCell() ? asCell()->getString(exec) : String(); +} + +template <typename Base> String HandleConverter<Base, Unknown>::getString(ExecState* exec) const +{ + return jsValue().getString(exec); +} + +inline JSObject* JSValue::getObject() const +{ + return isCell() ? asCell()->getObject() : 0; +} + +ALWAYS_INLINE bool JSValue::getUInt32(uint32_t& v) const +{ + if (isInt32()) { + int32_t i = asInt32(); + v = static_cast<uint32_t>(i); + return i >= 0; } - - inline JSValue JSValue::toPrimitive(ExecState* exec, PreferredPrimitiveType preferredType) const - { - return isCell() ? asCell()->toPrimitive(exec, preferredType) : asValue(); + if (isDouble()) { + double d = asDouble(); + v = static_cast<uint32_t>(d); + return v == d; } - - inline bool JSValue::getPrimitiveNumber(ExecState* exec, double& number, JSValue& value) - { - if (isInt32()) { - number = asInt32(); - value = *this; - return true; - } - if (isDouble()) { - number = asDouble(); - value = *this; - return true; - } - if (isCell()) - return asCell()->getPrimitiveNumber(exec, number, value); - if (isTrue()) { - number = 1.0; - value = *this; - return true; - } - if (isFalse() || isNull()) { - number = 0.0; - value = *this; - return true; - } - ASSERT(isUndefined()); - number = std::numeric_limits<double>::quiet_NaN(); + return false; +} + +inline JSValue JSValue::toPrimitive(ExecState* exec, PreferredPrimitiveType preferredType) const +{ + return isCell() ? asCell()->toPrimitive(exec, preferredType) : asValue(); +} + +inline bool JSValue::getPrimitiveNumber(ExecState* exec, double& number, JSValue& value) +{ + if (isInt32()) { + number = asInt32(); value = *this; return true; } - - ALWAYS_INLINE double JSValue::toNumber(ExecState* exec) const - { - if (isInt32()) - return asInt32(); - if (isDouble()) - return asDouble(); - return toNumberSlowCase(exec); + if (isDouble()) { + number = asDouble(); + value = *this; + return true; } - - inline JSObject* JSValue::toObject(ExecState* exec) const - { - return isCell() ? asCell()->toObject(exec, exec->lexicalGlobalObject()) : toObjectSlowCase(exec, exec->lexicalGlobalObject()); + if (isCell()) + return asCell()->getPrimitiveNumber(exec, number, value); + if (isTrue()) { + number = 1.0; + value = *this; + return true; } - - inline JSObject* JSValue::toObject(ExecState* exec, JSGlobalObject* globalObject) const - { - return isCell() ? asCell()->toObject(exec, globalObject) : toObjectSlowCase(exec, globalObject); + if (isFalse() || isNull()) { + number = 0.0; + value = *this; + return true; } - - template<typename T> - void* allocateCell(Heap& heap, size_t size) - { - ASSERT(size >= sizeof(T)); + ASSERT(isUndefined()); + number = QNaN; + value = *this; + return true; +} + +ALWAYS_INLINE double JSValue::toNumber(ExecState* exec) const +{ + if (isInt32()) + return asInt32(); + if (isDouble()) + return asDouble(); + return toNumberSlowCase(exec); +} + +inline JSObject* JSValue::toObject(ExecState* exec) const +{ + return isCell() ? asCell()->toObject(exec, exec->lexicalGlobalObject()) : toObjectSlowCase(exec, exec->lexicalGlobalObject()); +} + +inline JSObject* JSValue::toObject(ExecState* exec, JSGlobalObject* globalObject) const +{ + return isCell() ? asCell()->toObject(exec, globalObject) : toObjectSlowCase(exec, globalObject); +} + +template<typename T> +void* allocateCell(Heap& heap, size_t size) +{ + ASSERT(size >= sizeof(T)); #if ENABLE(GC_VALIDATION) - ASSERT(!heap.globalData()->isInitializingObject()); - heap.globalData()->setInitializingObjectClass(&T::s_info); + ASSERT(!heap.globalData()->isInitializingObject()); + heap.globalData()->setInitializingObjectClass(&T::s_info); #endif - JSCell* result = 0; - if (T::needsDestruction && T::hasImmortalStructure) - result = static_cast<JSCell*>(heap.allocateWithImmortalStructureDestructor(size)); - else if (T::needsDestruction && !T::hasImmortalStructure) - result = static_cast<JSCell*>(heap.allocateWithNormalDestructor(size)); - else - result = static_cast<JSCell*>(heap.allocateWithoutDestructor(size)); - result->clearStructure(); - return result; - } + JSCell* result = 0; + if (T::needsDestruction && T::hasImmortalStructure) + result = static_cast<JSCell*>(heap.allocateWithImmortalStructureDestructor(size)); + else if (T::needsDestruction && !T::hasImmortalStructure) + result = static_cast<JSCell*>(heap.allocateWithNormalDestructor(size)); + else + result = static_cast<JSCell*>(heap.allocateWithoutDestructor(size)); + result->clearStructure(); + return result; +} - template<typename T> - void* allocateCell(Heap& heap) - { - return allocateCell<T>(heap, sizeof(T)); - } +template<typename T> +void* allocateCell(Heap& heap) +{ + return allocateCell<T>(heap, sizeof(T)); +} - inline bool isZapped(const JSCell* cell) - { - return cell->isZapped(); - } - - template<typename To, typename From> - inline To jsCast(From* from) - { - ASSERT(!from || from->JSCell::inherits(&WTF::RemovePointer<To>::Type::s_info)); - return static_cast<To>(from); - } +inline bool isZapped(const JSCell* cell) +{ + return cell->isZapped(); +} + +template<typename To, typename From> +inline To jsCast(From* from) +{ + ASSERT(!from || from->JSCell::inherits(&WTF::RemovePointer<To>::Type::s_info)); + return static_cast<To>(from); +} - template<typename To> - inline To jsCast(JSValue from) - { - ASSERT(from.isCell() && from.asCell()->JSCell::inherits(&WTF::RemovePointer<To>::Type::s_info)); - return static_cast<To>(from.asCell()); - } - - template<typename To, typename From> - inline To jsDynamicCast(From* from) - { - return from->inherits(&WTF::RemovePointer<To>::Type::s_info) ? static_cast<To>(from) : 0; - } - - template<typename To> - inline To jsDynamicCast(JSValue from) - { - return from.isCell() && from.asCell()->inherits(&WTF::RemovePointer<To>::Type::s_info) ? static_cast<To>(from.asCell()) : 0; - } +template<typename To> +inline To jsCast(JSValue from) +{ + ASSERT(from.isCell() && from.asCell()->JSCell::inherits(&WTF::RemovePointer<To>::Type::s_info)); + return static_cast<To>(from.asCell()); +} + +template<typename To, typename From> +inline To jsDynamicCast(From* from) +{ + return from->inherits(&WTF::RemovePointer<To>::Type::s_info) ? static_cast<To>(from) : 0; +} + +template<typename To> +inline To jsDynamicCast(JSValue from) +{ + return from.isCell() && from.asCell()->inherits(&WTF::RemovePointer<To>::Type::s_info) ? static_cast<To>(from.asCell()) : 0; +} } // namespace JSC diff --git a/Source/JavaScriptCore/runtime/JSDateMath.cpp b/Source/JavaScriptCore/runtime/JSDateMath.cpp index c54147ef2..cd3948fcf 100644 --- a/Source/JavaScriptCore/runtime/JSDateMath.cpp +++ b/Source/JavaScriptCore/runtime/JSDateMath.cpp @@ -247,7 +247,7 @@ double parseDateFromNullTerminatedCharacters(ExecState* exec, const char* dateSt int offset; double ms = WTF::parseDateFromNullTerminatedCharacters(dateString, haveTZ, offset); if (isnan(ms)) - return std::numeric_limits<double>::quiet_NaN(); + return QNaN; // fall back to local timezone if (!haveTZ) { diff --git a/Source/JavaScriptCore/runtime/JSGlobalData.cpp b/Source/JavaScriptCore/runtime/JSGlobalData.cpp index 5fb682bdb..4dca5b0f6 100644 --- a/Source/JavaScriptCore/runtime/JSGlobalData.cpp +++ b/Source/JavaScriptCore/runtime/JSGlobalData.cpp @@ -30,6 +30,7 @@ #include "JSGlobalData.h" #include "ArgList.h" +#include "CodeCache.h" #include "CommonIdentifiers.h" #include "DebuggerActivation.h" #include "FunctionConstructor.h" @@ -57,6 +58,7 @@ #include "RegExpObject.h" #include "StrictEvalActivation.h" #include "StrongInlines.h" +#include "UnlinkedCodeBlock.h" #include <wtf/RetainPtr.h> #include <wtf/Threading.h> #include <wtf/WTFThreadData.h> @@ -170,7 +172,7 @@ JSGlobalData::JSGlobalData(GlobalDataType globalDataType, HeapType heapType) , sizeOfLastScratchBuffer(0) #endif , dynamicGlobalObject(0) - , cachedUTCOffset(std::numeric_limits<double>::quiet_NaN()) + , cachedUTCOffset(QNaN) , m_enabledProfiler(0) , m_regExpCache(new RegExpCache(this)) #if ENABLE(REGEXP_TRACING) @@ -196,6 +198,7 @@ JSGlobalData::JSGlobalData(GlobalDataType globalDataType, HeapType heapType) , m_initializingObjectClass(0) #endif , m_inDefineOwnProperty(false) + , m_codeCache(CodeCache::create()) { interpreter = new Interpreter(*this); @@ -221,6 +224,11 @@ JSGlobalData::JSGlobalData(GlobalDataType globalDataType, HeapType heapType) sharedSymbolTableStructure.set(*this, SharedSymbolTable::createStructure(*this, 0, jsNull())); structureChainStructure.set(*this, StructureChain::createStructure(*this, 0, jsNull())); sparseArrayValueMapStructure.set(*this, SparseArrayValueMap::createStructure(*this, 0, jsNull())); + withScopeStructure.set(*this, JSWithScope::createStructure(*this, 0, jsNull())); + unlinkedFunctionExecutableStructure.set(*this, UnlinkedFunctionExecutable::createStructure(*this, 0, jsNull())); + unlinkedProgramCodeBlockStructure.set(*this, UnlinkedProgramCodeBlock::createStructure(*this, 0, jsNull())); + unlinkedEvalCodeBlockStructure.set(*this, UnlinkedEvalCodeBlock::createStructure(*this, 0, jsNull())); + unlinkedFunctionCodeBlockStructure.set(*this, UnlinkedFunctionCodeBlock::createStructure(*this, 0, jsNull())); wtfThreadData().setCurrentIdentifierTable(existingEntryIdentifierTable); @@ -400,10 +408,10 @@ JSGlobalData::ClientData::~ClientData() void JSGlobalData::resetDateCache() { - cachedUTCOffset = std::numeric_limits<double>::quiet_NaN(); + cachedUTCOffset = QNaN; dstOffsetCache.reset(); cachedDateString = String(); - cachedDateStringValue = std::numeric_limits<double>::quiet_NaN(); + cachedDateStringValue = QNaN; dateInstanceCache.reset(); } diff --git a/Source/JavaScriptCore/runtime/JSGlobalData.h b/Source/JavaScriptCore/runtime/JSGlobalData.h index e97c0a015..0ffaccb6a 100644 --- a/Source/JavaScriptCore/runtime/JSGlobalData.h +++ b/Source/JavaScriptCore/runtime/JSGlobalData.h @@ -63,6 +63,7 @@ struct OpaqueJSClassContextData; namespace JSC { class CodeBlock; + class CodeCache; class CommonIdentifiers; class HandleStack; class IdentifierTable; @@ -80,6 +81,10 @@ namespace JSC { #if ENABLE(REGEXP_TRACING) class RegExp; #endif + class UnlinkedCodeBlock; + class UnlinkedEvalCodeBlock; + class UnlinkedFunctionExecutable; + class UnlinkedProgramCodeBlock; struct HashTable; struct Instruction; @@ -223,6 +228,11 @@ namespace JSC { Strong<Structure> sharedSymbolTableStructure; Strong<Structure> structureChainStructure; Strong<Structure> sparseArrayValueMapStructure; + Strong<Structure> withScopeStructure; + Strong<Structure> unlinkedFunctionExecutableStructure; + Strong<Structure> unlinkedProgramCodeBlockStructure; + Strong<Structure> unlinkedEvalCodeBlockStructure; + Strong<Structure> unlinkedFunctionCodeBlockStructure; IdentifierTable* identifierTable; CommonIdentifiers* propertyNames; @@ -436,6 +446,7 @@ namespace JSC { } JSLock& apiLock() { return m_apiLock; } + CodeCache* codeCache() { return m_codeCache.get(); } private: friend class LLIntOffsetsExtractor; @@ -456,6 +467,7 @@ namespace JSC { const ClassInfo* m_initializingObjectClass; #endif bool m_inDefineOwnProperty; + OwnPtr<CodeCache> m_codeCache; TypedArrayDescriptor m_int8ArrayDescriptor; TypedArrayDescriptor m_int16ArrayDescriptor; diff --git a/Source/JavaScriptCore/runtime/JSGlobalObject.cpp b/Source/JavaScriptCore/runtime/JSGlobalObject.cpp index 03252fad1..c466a2b04 100644 --- a/Source/JavaScriptCore/runtime/JSGlobalObject.cpp +++ b/Source/JavaScriptCore/runtime/JSGlobalObject.cpp @@ -36,6 +36,7 @@ #include "BooleanConstructor.h" #include "BooleanPrototype.h" #include "CodeBlock.h" +#include "CodeCache.h" #include "DateConstructor.h" #include "DatePrototype.h" #include "Debugger.h" @@ -580,4 +581,58 @@ void slowValidateCell(JSGlobalObject* globalObject) ASSERT_GC_OBJECT_INHERITS(globalObject, &JSGlobalObject::s_info); } +UnlinkedProgramCodeBlock* JSGlobalObject::createProgramCodeBlock(CallFrame* callFrame, ProgramExecutable* executable, JSObject** exception) +{ + ParserError error; + JSParserStrictness strictness = executable->isStrictMode() ? JSParseStrict : JSParseNormal; + DebuggerMode debuggerMode = hasDebugger() ? DebuggerOn : DebuggerOff; + ProfilerMode profilerMode = hasProfiler() ? ProfilerOn : ProfilerOff; + UnlinkedProgramCodeBlock* unlinkedCode = globalData().codeCache()->getProgramCodeBlock(globalData(), executable, executable->source(), strictness, debuggerMode, profilerMode, error); + + if (hasDebugger()) + debugger()->sourceParsed(callFrame, executable->source().provider(), error.m_line, error.m_message); + + if (error.m_type != ParserError::ErrorNone) { + *exception = error.toErrorObject(this, executable->source()); + return 0; + } + + return unlinkedCode; +} + +UnlinkedEvalCodeBlock* JSGlobalObject::createEvalCodeBlock(CallFrame* callFrame, EvalExecutable* executable, JSObject** exception) +{ + ParserError error; + JSParserStrictness strictness = executable->isStrictMode() ? JSParseStrict : JSParseNormal; + DebuggerMode debuggerMode = hasDebugger() ? DebuggerOn : DebuggerOff; + ProfilerMode profilerMode = hasProfiler() ? ProfilerOn : ProfilerOff; + UnlinkedEvalCodeBlock* unlinkedCode = globalData().codeCache()->getEvalCodeBlock(globalData(), executable, executable->source(), strictness, debuggerMode, profilerMode, error); + + if (hasDebugger()) + debugger()->sourceParsed(callFrame, executable->source().provider(), error.m_line, error.m_message); + + if (error.m_type != ParserError::ErrorNone) { + *exception = error.toErrorObject(this, executable->source()); + return 0; + } + + return unlinkedCode; +} + +UnlinkedFunctionExecutable* JSGlobalObject::createFunctionExecutableFromGlobalCode(CallFrame* callFrame, const Identifier& name, const SourceCode& code, JSObject** exception) +{ + ParserError error; + UnlinkedFunctionExecutable* executable = globalData().codeCache()->getFunctionExecutableFromGlobalCode(globalData(), name, code, error); + if (hasDebugger()) + debugger()->sourceParsed(callFrame, code.provider(), error.m_line, error.m_message); + + if (error.m_type != ParserError::ErrorNone) { + *exception = error.toErrorObject(this, code); + return 0; + } + + return executable; +} + + } // namespace JSC diff --git a/Source/JavaScriptCore/runtime/JSGlobalObject.h b/Source/JavaScriptCore/runtime/JSGlobalObject.h index 2994aa64b..3212363ab 100644 --- a/Source/JavaScriptCore/runtime/JSGlobalObject.h +++ b/Source/JavaScriptCore/runtime/JSGlobalObject.h @@ -43,6 +43,10 @@ namespace JSC { class Debugger; class ErrorConstructor; class ErrorPrototype; + class EvalCodeBlock; + class EvalExecutable; + class FunctionCodeBlock; + class FunctionExecutable; class FunctionPrototype; class GetterSetter; class GlobalCodeBlock; @@ -50,9 +54,10 @@ namespace JSC { class LLIntOffsetsExtractor; class NativeErrorConstructor; class ProgramCodeBlock; + class ProgramExecutable; class RegExpConstructor; class RegExpPrototype; - + class SourceCode; struct ActivationStackNode; struct HashTable; @@ -185,6 +190,9 @@ namespace JSC { static JS_EXPORTDATA const ClassInfo s_info; + bool hasDebugger() const { return m_debugger; } + bool hasProfiler() const { return globalObjectMethodTable()->supportsProfiling(this); } + protected: JS_EXPORT_PRIVATE explicit JSGlobalObject(JSGlobalData&, Structure*, const GlobalObjectMethodTable* = 0); @@ -271,6 +279,10 @@ namespace JSC { Structure* arrayStructureWithArrayStorage() const { return m_arrayStructureWithArrayStorage.get(); } void* addressOfArrayStructure() { return &m_arrayStructure; } void* addressOfArrayStructureWithArrayStorage() { return &m_arrayStructureWithArrayStorage; } + bool isOriginalArrayStructure(Structure* structure) + { + return structure == m_arrayStructure.get() || structure == m_arrayStructureWithArrayStorage.get(); + } Structure* booleanObjectStructure() const { return m_booleanObjectStructure.get(); } Structure* callbackConstructorStructure() const { return m_callbackConstructorStructure.get(); } Structure* callbackFunctionStructure() const { return m_callbackFunctionStructure.get(); } @@ -362,6 +374,11 @@ namespace JSC { double weakRandomNumber() { return m_weakRandom.get(); } unsigned weakRandomInteger() { return m_weakRandom.getUint32(); } + + UnlinkedProgramCodeBlock* createProgramCodeBlock(CallFrame*, ProgramExecutable*, JSObject** exception); + UnlinkedEvalCodeBlock* createEvalCodeBlock(CallFrame*, EvalExecutable*, JSObject** exception); + UnlinkedFunctionExecutable* createFunctionExecutableFromGlobalCode(CallFrame*, const Identifier&, const SourceCode&, JSObject** exception); + protected: static const unsigned StructureFlags = OverridesGetOwnPropertySlot | OverridesVisitChildren | OverridesGetPropertyNames | Base::StructureFlags; diff --git a/Source/JavaScriptCore/runtime/JSGlobalObjectFunctions.cpp b/Source/JavaScriptCore/runtime/JSGlobalObjectFunctions.cpp index 8b1acb25a..7ac76d350 100644 --- a/Source/JavaScriptCore/runtime/JSGlobalObjectFunctions.cpp +++ b/Source/JavaScriptCore/runtime/JSGlobalObjectFunctions.cpp @@ -276,7 +276,7 @@ static double parseInt(const String& s, const CharType* data, int radix) // 8.a If R < 2 or R > 36, then return NaN. if (radix < 2 || radix > 36) - return std::numeric_limits<double>::quiet_NaN(); + return QNaN; // 13. Let mathInt be the mathematical integer value that is represented by Z in radix-R notation, using the letters // A-Z and a-z for digits with values 10 through 35. (However, if R is 10 and Z contains more than 20 significant @@ -299,7 +299,7 @@ static double parseInt(const String& s, const CharType* data, int radix) // 12. If Z is empty, return NaN. if (!sawDigit) - return std::numeric_limits<double>::quiet_NaN(); + return QNaN; // Alternate code path for certain large numbers. if (number >= mantissaOverflowLowerBound) { @@ -397,7 +397,7 @@ static double jsStrDecimalLiteral(const CharType*& data, const CharType* end) } // Not a number. - return std::numeric_limits<double>::quiet_NaN(); + return QNaN; } template <typename CharType> @@ -427,7 +427,7 @@ static double toDouble(const CharType* characters, unsigned size) break; } if (characters != endCharacters) - return std::numeric_limits<double>::quiet_NaN(); + return QNaN; return number; } @@ -443,7 +443,7 @@ double jsToNumber(const String& s) return c - '0'; if (isStrWhiteSpace(c)) return 0; - return std::numeric_limits<double>::quiet_NaN(); + return QNaN; } if (s.is8Bit()) @@ -459,7 +459,7 @@ static double parseFloat(const String& s) UChar c = s[0]; if (isASCIIDigit(c)) return c - '0'; - return std::numeric_limits<double>::quiet_NaN(); + return QNaN; } if (s.is8Bit()) { @@ -474,7 +474,7 @@ static double parseFloat(const String& s) // Empty string. if (data == end) - return std::numeric_limits<double>::quiet_NaN(); + return QNaN; return jsStrDecimalLiteral(data, end); } @@ -490,7 +490,7 @@ static double parseFloat(const String& s) // Empty string. if (data == end) - return std::numeric_limits<double>::quiet_NaN(); + return QNaN; return jsStrDecimalLiteral(data, end); } diff --git a/Source/JavaScriptCore/runtime/JSONObject.cpp b/Source/JavaScriptCore/runtime/JSONObject.cpp index fd939934e..e051ec7d9 100644 --- a/Source/JavaScriptCore/runtime/JSONObject.cpp +++ b/Source/JavaScriptCore/runtime/JSONObject.cpp @@ -294,7 +294,7 @@ static void appendStringToStringBuilder(StringBuilder& builder, const CharType* default: static const char hexDigits[] = "0123456789abcdef"; UChar ch = data[i]; - LChar hex[] = { '\\', 'u', hexDigits[(ch >> 12) & 0xF], hexDigits[(ch >> 8) & 0xF], hexDigits[(ch >> 4) & 0xF], hexDigits[ch & 0xF] }; + LChar hex[] = { '\\', 'u', static_cast<LChar>(hexDigits[(ch >> 12) & 0xF]), static_cast<LChar>(hexDigits[(ch >> 8) & 0xF]), static_cast<LChar>(hexDigits[(ch >> 4) & 0xF]), static_cast<LChar>(hexDigits[ch & 0xF]) }; builder.append(hex, WTF_ARRAY_LENGTH(hex)); break; } diff --git a/Source/JavaScriptCore/runtime/JSObject.h b/Source/JavaScriptCore/runtime/JSObject.h index 9204099cb..82455390f 100644 --- a/Source/JavaScriptCore/runtime/JSObject.h +++ b/Source/JavaScriptCore/runtime/JSObject.h @@ -44,810 +44,841 @@ namespace JSC { - inline JSCell* getJSFunction(JSValue value) - { - if (value.isCell() && (value.asCell()->structure()->typeInfo().type() == JSFunctionType)) - return value.asCell(); - return 0; - } +inline JSCell* getJSFunction(JSValue value) +{ + if (value.isCell() && (value.asCell()->structure()->typeInfo().type() == JSFunctionType)) + return value.asCell(); + return 0; +} - JS_EXPORT_PRIVATE JSCell* getCallableObjectSlow(JSCell*); +JS_EXPORT_PRIVATE JSCell* getCallableObjectSlow(JSCell*); - inline JSCell* getCallableObject(JSValue value) - { - if (!value.isCell()) - return 0; - return getCallableObjectSlow(value.asCell()); - } - - class GetterSetter; - class HashEntry; - class InternalFunction; - class LLIntOffsetsExtractor; - class MarkedBlock; - class PropertyDescriptor; - class PropertyNameArray; - class Structure; - struct HashTable; - - JS_EXPORT_PRIVATE JSObject* throwTypeError(ExecState*, const String&); - extern JS_EXPORTDATA const char* StrictModeReadonlyPropertyWriteError; - - // ECMA 262-3 8.6.1 - // Property attributes - enum Attribute { - None = 0, - ReadOnly = 1 << 1, // property can be only read, not written - DontEnum = 1 << 2, // property doesn't appear in (for .. in ..) - DontDelete = 1 << 3, // property can't be deleted - Function = 1 << 4, // property is a function - only used by static hashtables - Accessor = 1 << 5, // property is a getter/setter +inline JSCell* getCallableObject(JSValue value) +{ + if (!value.isCell()) + return 0; + return getCallableObjectSlow(value.asCell()); +} + +class GetterSetter; +class HashEntry; +class InternalFunction; +class LLIntOffsetsExtractor; +class MarkedBlock; +class PropertyDescriptor; +class PropertyNameArray; +class Structure; +struct HashTable; + +JS_EXPORT_PRIVATE JSObject* throwTypeError(ExecState*, const String&); +extern JS_EXPORTDATA const char* StrictModeReadonlyPropertyWriteError; + +// ECMA 262-3 8.6.1 +// Property attributes +enum Attribute { + None = 0, + ReadOnly = 1 << 1, // property can be only read, not written + DontEnum = 1 << 2, // property doesn't appear in (for .. in ..) + DontDelete = 1 << 3, // property can't be deleted + Function = 1 << 4, // property is a function - only used by static hashtables + Accessor = 1 << 5, // property is a getter/setter +}; + +COMPILE_ASSERT(None < FirstInternalAttribute, None_is_below_FirstInternalAttribute); +COMPILE_ASSERT(ReadOnly < FirstInternalAttribute, ReadOnly_is_below_FirstInternalAttribute); +COMPILE_ASSERT(DontEnum < FirstInternalAttribute, DontEnum_is_below_FirstInternalAttribute); +COMPILE_ASSERT(DontDelete < FirstInternalAttribute, DontDelete_is_below_FirstInternalAttribute); +COMPILE_ASSERT(Function < FirstInternalAttribute, Function_is_below_FirstInternalAttribute); +COMPILE_ASSERT(Accessor < FirstInternalAttribute, Accessor_is_below_FirstInternalAttribute); + +class JSFinalObject; + +class JSObject : public JSCell { + friend class BatchedTransitionOptimizer; + friend class JIT; + friend class JSCell; + friend class JSFinalObject; + friend class MarkedBlock; + JS_EXPORT_PRIVATE friend bool setUpStaticFunctionSlot(ExecState*, const HashEntry*, JSObject*, PropertyName, PropertySlot&); + + enum PutMode { + PutModePut, + PutModeDefineOwnProperty, }; - COMPILE_ASSERT(None < FirstInternalAttribute, None_is_below_FirstInternalAttribute); - COMPILE_ASSERT(ReadOnly < FirstInternalAttribute, ReadOnly_is_below_FirstInternalAttribute); - COMPILE_ASSERT(DontEnum < FirstInternalAttribute, DontEnum_is_below_FirstInternalAttribute); - COMPILE_ASSERT(DontDelete < FirstInternalAttribute, DontDelete_is_below_FirstInternalAttribute); - COMPILE_ASSERT(Function < FirstInternalAttribute, Function_is_below_FirstInternalAttribute); - COMPILE_ASSERT(Accessor < FirstInternalAttribute, Accessor_is_below_FirstInternalAttribute); - - class JSFinalObject; - - class JSObject : public JSCell { - friend class BatchedTransitionOptimizer; - friend class JIT; - friend class JSCell; - friend class JSFinalObject; - friend class MarkedBlock; - JS_EXPORT_PRIVATE friend bool setUpStaticFunctionSlot(ExecState*, const HashEntry*, JSObject*, PropertyName, PropertySlot&); - - enum PutMode { - PutModePut, - PutModeDefineOwnProperty, - }; - - public: - typedef JSCell Base; +public: + typedef JSCell Base; - static size_t allocationSize(size_t inlineCapacity) - { - return sizeof(JSObject) + inlineCapacity * sizeof(WriteBarrierBase<Unknown>); - } + static size_t allocationSize(size_t inlineCapacity) + { + return sizeof(JSObject) + inlineCapacity * sizeof(WriteBarrierBase<Unknown>); + } - JS_EXPORT_PRIVATE static void visitChildren(JSCell*, SlotVisitor&); - JS_EXPORT_PRIVATE static void copyBackingStore(JSCell*, CopyVisitor&); + JS_EXPORT_PRIVATE static void visitChildren(JSCell*, SlotVisitor&); + JS_EXPORT_PRIVATE static void copyBackingStore(JSCell*, CopyVisitor&); - JS_EXPORT_PRIVATE static String className(const JSObject*); + JS_EXPORT_PRIVATE static String className(const JSObject*); - JSValue prototype() const; - void setPrototype(JSGlobalData&, JSValue prototype); - bool setPrototypeWithCycleCheck(JSGlobalData&, JSValue prototype); + JSValue prototype() const; + void setPrototype(JSGlobalData&, JSValue prototype); + bool setPrototypeWithCycleCheck(JSGlobalData&, JSValue prototype); - Structure* inheritorID(JSGlobalData&); - void notifyUsedAsPrototype(JSGlobalData&); + Structure* inheritorID(JSGlobalData&); + void notifyUsedAsPrototype(JSGlobalData&); - bool mayBeUsedAsPrototype(JSGlobalData& globalData) - { - return isValidOffset(structure()->get(globalData, globalData.m_inheritorIDKey)); - } + bool mayBeUsedAsPrototype(JSGlobalData& globalData) + { + return isValidOffset(structure()->get(globalData, globalData.m_inheritorIDKey)); + } - bool mayInterceptIndexedAccesses() - { - return structure()->mayInterceptIndexedAccesses(); - } + bool mayInterceptIndexedAccesses() + { + return structure()->mayInterceptIndexedAccesses(); + } - JSValue get(ExecState*, PropertyName) const; - JSValue get(ExecState*, unsigned propertyName) const; - - bool getPropertySlot(ExecState*, PropertyName, PropertySlot&); - bool getPropertySlot(ExecState*, unsigned propertyName, PropertySlot&); - JS_EXPORT_PRIVATE bool getPropertyDescriptor(ExecState*, PropertyName, PropertyDescriptor&); - - static bool getOwnPropertySlot(JSCell*, ExecState*, PropertyName, PropertySlot&); - JS_EXPORT_PRIVATE static bool getOwnPropertySlotByIndex(JSCell*, ExecState*, unsigned propertyName, PropertySlot&); - JS_EXPORT_PRIVATE static bool getOwnPropertyDescriptor(JSObject*, ExecState*, PropertyName, PropertyDescriptor&); - - bool allowsAccessFrom(ExecState*); - - unsigned getArrayLength() const - { - switch (structure()->indexingType()) { - case ALL_BLANK_INDEXING_TYPES: - return 0; - case ALL_CONTIGUOUS_INDEXING_TYPES: - case ALL_ARRAY_STORAGE_INDEXING_TYPES: - return m_butterfly->publicLength(); - default: - ASSERT_NOT_REACHED(); - return 0; - } + JSValue get(ExecState*, PropertyName) const; + JSValue get(ExecState*, unsigned propertyName) const; + + bool getPropertySlot(ExecState*, PropertyName, PropertySlot&); + bool getPropertySlot(ExecState*, unsigned propertyName, PropertySlot&); + JS_EXPORT_PRIVATE bool getPropertyDescriptor(ExecState*, PropertyName, PropertyDescriptor&); + + static bool getOwnPropertySlot(JSCell*, ExecState*, PropertyName, PropertySlot&); + JS_EXPORT_PRIVATE static bool getOwnPropertySlotByIndex(JSCell*, ExecState*, unsigned propertyName, PropertySlot&); + JS_EXPORT_PRIVATE static bool getOwnPropertyDescriptor(JSObject*, ExecState*, PropertyName, PropertyDescriptor&); + + bool allowsAccessFrom(ExecState*); + + unsigned getArrayLength() const + { + switch (structure()->indexingType()) { + case ALL_BLANK_INDEXING_TYPES: + return 0; + case ALL_CONTIGUOUS_INDEXING_TYPES: + case ALL_ARRAY_STORAGE_INDEXING_TYPES: + return m_butterfly->publicLength(); + default: + ASSERT_NOT_REACHED(); + return 0; } + } - unsigned getVectorLength() - { - switch (structure()->indexingType()) { - case ALL_BLANK_INDEXING_TYPES: - return 0; - case ALL_CONTIGUOUS_INDEXING_TYPES: - case ALL_ARRAY_STORAGE_INDEXING_TYPES: - return m_butterfly->vectorLength(); - default: - ASSERT_NOT_REACHED(); - return 0; - } + unsigned getVectorLength() + { + switch (structure()->indexingType()) { + case ALL_BLANK_INDEXING_TYPES: + return 0; + case ALL_CONTIGUOUS_INDEXING_TYPES: + case ALL_ARRAY_STORAGE_INDEXING_TYPES: + return m_butterfly->vectorLength(); + default: + ASSERT_NOT_REACHED(); + return 0; } + } - JS_EXPORT_PRIVATE static void put(JSCell*, ExecState*, PropertyName, JSValue, PutPropertySlot&); - JS_EXPORT_PRIVATE static void putByIndex(JSCell*, ExecState*, unsigned propertyName, JSValue, bool shouldThrow); + JS_EXPORT_PRIVATE static void put(JSCell*, ExecState*, PropertyName, JSValue, PutPropertySlot&); + JS_EXPORT_PRIVATE static void putByIndex(JSCell*, ExecState*, unsigned propertyName, JSValue, bool shouldThrow); - void putByIndexInline(ExecState* exec, unsigned propertyName, JSValue value, bool shouldThrow) - { - if (canSetIndexQuickly(propertyName)) { - setIndexQuickly(exec->globalData(), propertyName, value); - return; - } - methodTable()->putByIndex(this, exec, propertyName, value, shouldThrow); + void putByIndexInline(ExecState* exec, unsigned propertyName, JSValue value, bool shouldThrow) + { + if (canSetIndexQuickly(propertyName)) { + setIndexQuickly(exec->globalData(), propertyName, value); + return; } + methodTable()->putByIndex(this, exec, propertyName, value, shouldThrow); + } - // This is similar to the putDirect* methods: - // - the prototype chain is not consulted - // - accessors are not called. - // - it will ignore extensibility and read-only properties if PutDirectIndexLikePutDirect is passed as the mode (the default). - // This method creates a property with attributes writable, enumerable and configurable all set to true. - bool putDirectIndex(ExecState* exec, unsigned propertyName, JSValue value, unsigned attributes, PutDirectIndexMode mode) - { - if (!attributes && canSetIndexQuicklyForPutDirect(propertyName)) { - setIndexQuickly(exec->globalData(), propertyName, value); - return true; - } - return putDirectIndexBeyondVectorLength(exec, propertyName, value, attributes, mode); - } - bool putDirectIndex(ExecState* exec, unsigned propertyName, JSValue value) - { - return putDirectIndex(exec, propertyName, value, 0, PutDirectIndexLikePutDirect); + // This is similar to the putDirect* methods: + // - the prototype chain is not consulted + // - accessors are not called. + // - it will ignore extensibility and read-only properties if PutDirectIndexLikePutDirect is passed as the mode (the default). + // This method creates a property with attributes writable, enumerable and configurable all set to true. + bool putDirectIndex(ExecState* exec, unsigned propertyName, JSValue value, unsigned attributes, PutDirectIndexMode mode) + { + if (!attributes && canSetIndexQuicklyForPutDirect(propertyName)) { + setIndexQuickly(exec->globalData(), propertyName, value); + return true; } + return putDirectIndexBeyondVectorLength(exec, propertyName, value, attributes, mode); + } + bool putDirectIndex(ExecState* exec, unsigned propertyName, JSValue value) + { + return putDirectIndex(exec, propertyName, value, 0, PutDirectIndexLikePutDirect); + } - // A non-throwing version of putDirect and putDirectIndex. - JS_EXPORT_PRIVATE void putDirectMayBeIndex(ExecState*, PropertyName, JSValue); + // A non-throwing version of putDirect and putDirectIndex. + JS_EXPORT_PRIVATE void putDirectMayBeIndex(ExecState*, PropertyName, JSValue); - bool canGetIndexQuickly(unsigned i) - { - switch (structure()->indexingType()) { - case ALL_BLANK_INDEXING_TYPES: - return false; - case ALL_CONTIGUOUS_INDEXING_TYPES: - return i < m_butterfly->vectorLength() && m_butterfly->contiguous()[i]; - case ALL_ARRAY_STORAGE_INDEXING_TYPES: - return i < m_butterfly->arrayStorage()->vectorLength() && m_butterfly->arrayStorage()->m_vector[i]; - default: - ASSERT_NOT_REACHED(); - return false; - } + bool canGetIndexQuickly(unsigned i) + { + switch (structure()->indexingType()) { + case ALL_BLANK_INDEXING_TYPES: + return false; + case ALL_CONTIGUOUS_INDEXING_TYPES: + return i < m_butterfly->vectorLength() && m_butterfly->contiguous()[i]; + case ALL_ARRAY_STORAGE_INDEXING_TYPES: + return i < m_butterfly->arrayStorage()->vectorLength() && m_butterfly->arrayStorage()->m_vector[i]; + default: + ASSERT_NOT_REACHED(); + return false; } + } - JSValue getIndexQuickly(unsigned i) - { - switch (structure()->indexingType()) { - case ALL_CONTIGUOUS_INDEXING_TYPES: + JSValue getIndexQuickly(unsigned i) + { + switch (structure()->indexingType()) { + case ALL_CONTIGUOUS_INDEXING_TYPES: + return m_butterfly->contiguous()[i].get(); + case ALL_ARRAY_STORAGE_INDEXING_TYPES: + return m_butterfly->arrayStorage()->m_vector[i].get(); + default: + ASSERT_NOT_REACHED(); + return JSValue(); + } + } + + JSValue tryGetIndexQuickly(unsigned i) + { + switch (structure()->indexingType()) { + case ALL_BLANK_INDEXING_TYPES: + break; + case ALL_CONTIGUOUS_INDEXING_TYPES: + if (i < m_butterfly->publicLength()) return m_butterfly->contiguous()[i].get(); - case ALL_ARRAY_STORAGE_INDEXING_TYPES: + break; + case ALL_ARRAY_STORAGE_INDEXING_TYPES: + if (i < m_butterfly->arrayStorage()->vectorLength()) return m_butterfly->arrayStorage()->m_vector[i].get(); - default: - ASSERT_NOT_REACHED(); - return JSValue(); - } + break; + default: + ASSERT_NOT_REACHED(); + break; } + return JSValue(); + } - JSValue tryGetIndexQuickly(unsigned i) - { - switch (structure()->indexingType()) { - case ALL_BLANK_INDEXING_TYPES: - break; - case ALL_CONTIGUOUS_INDEXING_TYPES: - if (i < m_butterfly->publicLength()) - return m_butterfly->contiguous()[i].get(); - break; - case ALL_ARRAY_STORAGE_INDEXING_TYPES: - if (i < m_butterfly->arrayStorage()->vectorLength()) - return m_butterfly->arrayStorage()->m_vector[i].get(); - break; - default: - ASSERT_NOT_REACHED(); - break; - } - return JSValue(); - } + JSValue getDirectIndex(ExecState* exec, unsigned i) + { + if (JSValue result = tryGetIndexQuickly(i)) + return result; + PropertySlot slot(this); + if (methodTable()->getOwnPropertySlotByIndex(this, exec, i, slot)) + return slot.getValue(exec, i); + return JSValue(); + } - JSValue getDirectIndex(ExecState* exec, unsigned i) - { - if (JSValue result = tryGetIndexQuickly(i)) - return result; - PropertySlot slot(this); - if (methodTable()->getOwnPropertySlotByIndex(this, exec, i, slot)) - return slot.getValue(exec, i); - return JSValue(); - } + JSValue getIndex(ExecState* exec, unsigned i) + { + if (JSValue result = tryGetIndexQuickly(i)) + return result; + return get(exec, i); + } - JSValue getIndex(ExecState* exec, unsigned i) - { - if (JSValue result = tryGetIndexQuickly(i)) - return result; - return get(exec, i); + bool canSetIndexQuickly(unsigned i) + { + switch (structure()->indexingType()) { + case ALL_BLANK_INDEXING_TYPES: + return false; + case ALL_CONTIGUOUS_INDEXING_TYPES: + case NonArrayWithArrayStorage: + case ArrayWithArrayStorage: + return i < m_butterfly->vectorLength(); + case NonArrayWithSlowPutArrayStorage: + case ArrayWithSlowPutArrayStorage: + return i < m_butterfly->arrayStorage()->vectorLength() + && !!m_butterfly->arrayStorage()->m_vector[i]; + default: + ASSERT_NOT_REACHED(); + return false; } + } - bool canSetIndexQuickly(unsigned i) - { - switch (structure()->indexingType()) { - case ALL_BLANK_INDEXING_TYPES: - return false; - case ALL_CONTIGUOUS_INDEXING_TYPES: - case NonArrayWithArrayStorage: - case ArrayWithArrayStorage: - return i < m_butterfly->vectorLength(); - case NonArrayWithSlowPutArrayStorage: - case ArrayWithSlowPutArrayStorage: - return i < m_butterfly->arrayStorage()->vectorLength() - && !!m_butterfly->arrayStorage()->m_vector[i]; - default: - ASSERT_NOT_REACHED(); - return false; - } + bool canSetIndexQuicklyForPutDirect(unsigned i) + { + switch (structure()->indexingType()) { + case ALL_BLANK_INDEXING_TYPES: + return false; + case ALL_CONTIGUOUS_INDEXING_TYPES: + case ALL_ARRAY_STORAGE_INDEXING_TYPES: + return i < m_butterfly->vectorLength(); + default: + ASSERT_NOT_REACHED(); + return false; } + } - bool canSetIndexQuicklyForPutDirect(unsigned i) - { - switch (structure()->indexingType()) { - case ALL_BLANK_INDEXING_TYPES: - return false; - case ALL_CONTIGUOUS_INDEXING_TYPES: - case ALL_ARRAY_STORAGE_INDEXING_TYPES: - return i < m_butterfly->vectorLength(); - default: - ASSERT_NOT_REACHED(); - return false; - } + void setIndexQuickly(JSGlobalData& globalData, unsigned i, JSValue v) + { + switch (structure()->indexingType()) { + case ALL_CONTIGUOUS_INDEXING_TYPES: { + ASSERT(i < m_butterfly->vectorLength()); + m_butterfly->contiguous()[i].set(globalData, this, v); + if (i >= m_butterfly->publicLength()) + m_butterfly->setPublicLength(i + 1); + break; } - - void setIndexQuickly(JSGlobalData& globalData, unsigned i, JSValue v) - { - switch (structure()->indexingType()) { - case ALL_CONTIGUOUS_INDEXING_TYPES: { - ASSERT(i < m_butterfly->vectorLength()); - m_butterfly->contiguous()[i].set(globalData, this, v); - if (i >= m_butterfly->publicLength()) - m_butterfly->setPublicLength(i + 1); - break; - } - case ALL_ARRAY_STORAGE_INDEXING_TYPES: { - ArrayStorage* storage = m_butterfly->arrayStorage(); - WriteBarrier<Unknown>& x = storage->m_vector[i]; - JSValue old = x.get(); - x.set(globalData, this, v); - if (!old) { - ++storage->m_numValuesInVector; - if (i >= storage->length()) - storage->setLength(i + 1); - } - break; - } - default: - ASSERT_NOT_REACHED(); + case ALL_ARRAY_STORAGE_INDEXING_TYPES: { + ArrayStorage* storage = m_butterfly->arrayStorage(); + WriteBarrier<Unknown>& x = storage->m_vector[i]; + JSValue old = x.get(); + x.set(globalData, this, v); + if (!old) { + ++storage->m_numValuesInVector; + if (i >= storage->length()) + storage->setLength(i + 1); } + break; } - - void initializeIndex(JSGlobalData& globalData, unsigned i, JSValue v) - { - switch (structure()->indexingType()) { - case ALL_CONTIGUOUS_INDEXING_TYPES: { - ASSERT(i < m_butterfly->publicLength()); - ASSERT(i < m_butterfly->vectorLength()); - m_butterfly->contiguous()[i].set(globalData, this, v); - break; - } - case ALL_ARRAY_STORAGE_INDEXING_TYPES: { - ArrayStorage* storage = m_butterfly->arrayStorage(); - ASSERT(i < storage->length()); - ASSERT(i < storage->m_numValuesInVector); - storage->m_vector[i].set(globalData, this, v); - break; - } - default: - ASSERT_NOT_REACHED(); - } + default: + ASSERT_NOT_REACHED(); } + } - bool hasSparseMap() - { - switch (structure()->indexingType()) { - case ALL_BLANK_INDEXING_TYPES: - case ALL_CONTIGUOUS_INDEXING_TYPES: - return false; - case ALL_ARRAY_STORAGE_INDEXING_TYPES: - return m_butterfly->arrayStorage()->m_sparseMap; - default: - ASSERT_NOT_REACHED(); - return false; - } + void initializeIndex(JSGlobalData& globalData, unsigned i, JSValue v) + { + switch (structure()->indexingType()) { + case ALL_CONTIGUOUS_INDEXING_TYPES: { + ASSERT(i < m_butterfly->publicLength()); + ASSERT(i < m_butterfly->vectorLength()); + m_butterfly->contiguous()[i].set(globalData, this, v); + break; + } + case ALL_ARRAY_STORAGE_INDEXING_TYPES: { + ArrayStorage* storage = m_butterfly->arrayStorage(); + ASSERT(i < storage->length()); + ASSERT(i < storage->m_numValuesInVector); + storage->m_vector[i].set(globalData, this, v); + break; } + default: + ASSERT_NOT_REACHED(); + } + } - bool inSparseIndexingMode() - { - switch (structure()->indexingType()) { - case ALL_BLANK_INDEXING_TYPES: - case ALL_CONTIGUOUS_INDEXING_TYPES: - return false; - case ALL_ARRAY_STORAGE_INDEXING_TYPES: - return m_butterfly->arrayStorage()->inSparseMode(); - default: - ASSERT_NOT_REACHED(); - return false; - } + bool hasSparseMap() + { + switch (structure()->indexingType()) { + case ALL_BLANK_INDEXING_TYPES: + case ALL_CONTIGUOUS_INDEXING_TYPES: + return false; + case ALL_ARRAY_STORAGE_INDEXING_TYPES: + return m_butterfly->arrayStorage()->m_sparseMap; + default: + ASSERT_NOT_REACHED(); + return false; } + } - void enterDictionaryIndexingMode(JSGlobalData&); - - // putDirect is effectively an unchecked vesion of 'defineOwnProperty': - // - the prototype chain is not consulted - // - accessors are not called. - // - attributes will be respected (after the call the property will exist with the given attributes) - // - the property name is assumed to not be an index. - JS_EXPORT_PRIVATE static void putDirectVirtual(JSObject*, ExecState*, PropertyName, JSValue, unsigned attributes); - void putDirect(JSGlobalData&, PropertyName, JSValue, unsigned attributes = 0); - void putDirect(JSGlobalData&, PropertyName, JSValue, PutPropertySlot&); - void putDirectWithoutTransition(JSGlobalData&, PropertyName, JSValue, unsigned attributes = 0); - void putDirectAccessor(ExecState*, PropertyName, JSValue, unsigned attributes); - - bool propertyIsEnumerable(ExecState*, const Identifier& propertyName) const; - - JS_EXPORT_PRIVATE bool hasProperty(ExecState*, PropertyName) const; - JS_EXPORT_PRIVATE bool hasProperty(ExecState*, unsigned propertyName) const; - bool hasOwnProperty(ExecState*, PropertyName) const; - - JS_EXPORT_PRIVATE static bool deleteProperty(JSCell*, ExecState*, PropertyName); - JS_EXPORT_PRIVATE static bool deletePropertyByIndex(JSCell*, ExecState*, unsigned propertyName); - - JS_EXPORT_PRIVATE static JSValue defaultValue(const JSObject*, ExecState*, PreferredPrimitiveType); - - bool hasInstance(ExecState*, JSValue); - static bool defaultHasInstance(ExecState*, JSValue, JSValue prototypeProperty); - - JS_EXPORT_PRIVATE static void getOwnPropertyNames(JSObject*, ExecState*, PropertyNameArray&, EnumerationMode); - JS_EXPORT_PRIVATE static void getOwnNonIndexPropertyNames(JSObject*, ExecState*, PropertyNameArray&, EnumerationMode); - JS_EXPORT_PRIVATE static void getPropertyNames(JSObject*, ExecState*, PropertyNameArray&, EnumerationMode); - - JSValue toPrimitive(ExecState*, PreferredPrimitiveType = NoPreference) const; - bool getPrimitiveNumber(ExecState*, double& number, JSValue&) const; - JS_EXPORT_PRIVATE double toNumber(ExecState*) const; - JS_EXPORT_PRIVATE JSString* toString(ExecState*) const; - - // NOTE: JSObject and its subclasses must be able to gracefully handle ExecState* = 0, - // because this call may come from inside the compiler. - JS_EXPORT_PRIVATE static JSObject* toThisObject(JSCell*, ExecState*); - - bool getPropertySpecificValue(ExecState*, PropertyName, JSCell*& specificFunction) const; - - // This get function only looks at the property map. - JSValue getDirect(JSGlobalData& globalData, PropertyName propertyName) const - { - PropertyOffset offset = structure()->get(globalData, propertyName); - checkOffset(offset, structure()->typeInfo().type()); - return offset != invalidOffset ? getDirectOffset(offset) : JSValue(); + bool inSparseIndexingMode() + { + switch (structure()->indexingType()) { + case ALL_BLANK_INDEXING_TYPES: + case ALL_CONTIGUOUS_INDEXING_TYPES: + return false; + case ALL_ARRAY_STORAGE_INDEXING_TYPES: + return m_butterfly->arrayStorage()->inSparseMode(); + default: + ASSERT_NOT_REACHED(); + return false; } + } + + void enterDictionaryIndexingMode(JSGlobalData&); - WriteBarrierBase<Unknown>* getDirectLocation(JSGlobalData& globalData, PropertyName propertyName) - { - PropertyOffset offset = structure()->get(globalData, propertyName); - checkOffset(offset, structure()->typeInfo().type()); - return isValidOffset(offset) ? locationForOffset(offset) : 0; - } + // putDirect is effectively an unchecked vesion of 'defineOwnProperty': + // - the prototype chain is not consulted + // - accessors are not called. + // - attributes will be respected (after the call the property will exist with the given attributes) + // - the property name is assumed to not be an index. + JS_EXPORT_PRIVATE static void putDirectVirtual(JSObject*, ExecState*, PropertyName, JSValue, unsigned attributes); + void putDirect(JSGlobalData&, PropertyName, JSValue, unsigned attributes = 0); + void putDirect(JSGlobalData&, PropertyName, JSValue, PutPropertySlot&); + void putDirectWithoutTransition(JSGlobalData&, PropertyName, JSValue, unsigned attributes = 0); + void putDirectAccessor(ExecState*, PropertyName, JSValue, unsigned attributes); - WriteBarrierBase<Unknown>* getDirectLocation(JSGlobalData& globalData, PropertyName propertyName, unsigned& attributes) - { - JSCell* specificFunction; - PropertyOffset offset = structure()->get(globalData, propertyName, attributes, specificFunction); - return isValidOffset(offset) ? locationForOffset(offset) : 0; - } + bool propertyIsEnumerable(ExecState*, const Identifier& propertyName) const; - bool hasInlineStorage() const { return structure()->hasInlineStorage(); } - ConstPropertyStorage inlineStorageUnsafe() const - { - return bitwise_cast<ConstPropertyStorage>(this + 1); - } - PropertyStorage inlineStorageUnsafe() - { - return bitwise_cast<PropertyStorage>(this + 1); - } - ConstPropertyStorage inlineStorage() const - { - ASSERT(hasInlineStorage()); - return inlineStorageUnsafe(); - } - PropertyStorage inlineStorage() - { - ASSERT(hasInlineStorage()); - return inlineStorageUnsafe(); - } + JS_EXPORT_PRIVATE bool hasProperty(ExecState*, PropertyName) const; + JS_EXPORT_PRIVATE bool hasProperty(ExecState*, unsigned propertyName) const; + bool hasOwnProperty(ExecState*, PropertyName) const; + + JS_EXPORT_PRIVATE static bool deleteProperty(JSCell*, ExecState*, PropertyName); + JS_EXPORT_PRIVATE static bool deletePropertyByIndex(JSCell*, ExecState*, unsigned propertyName); + + JS_EXPORT_PRIVATE static JSValue defaultValue(const JSObject*, ExecState*, PreferredPrimitiveType); + + bool hasInstance(ExecState*, JSValue); + static bool defaultHasInstance(ExecState*, JSValue, JSValue prototypeProperty); + + JS_EXPORT_PRIVATE static void getOwnPropertyNames(JSObject*, ExecState*, PropertyNameArray&, EnumerationMode); + JS_EXPORT_PRIVATE static void getOwnNonIndexPropertyNames(JSObject*, ExecState*, PropertyNameArray&, EnumerationMode); + JS_EXPORT_PRIVATE static void getPropertyNames(JSObject*, ExecState*, PropertyNameArray&, EnumerationMode); + + JSValue toPrimitive(ExecState*, PreferredPrimitiveType = NoPreference) const; + bool getPrimitiveNumber(ExecState*, double& number, JSValue&) const; + JS_EXPORT_PRIVATE double toNumber(ExecState*) const; + JS_EXPORT_PRIVATE JSString* toString(ExecState*) const; + + // NOTE: JSObject and its subclasses must be able to gracefully handle ExecState* = 0, + // because this call may come from inside the compiler. + JS_EXPORT_PRIVATE static JSObject* toThisObject(JSCell*, ExecState*); + + bool getPropertySpecificValue(ExecState*, PropertyName, JSCell*& specificFunction) const; + + // This get function only looks at the property map. + JSValue getDirect(JSGlobalData& globalData, PropertyName propertyName) const + { + PropertyOffset offset = structure()->get(globalData, propertyName); + checkOffset(offset, structure()->typeInfo().type()); + return offset != invalidOffset ? getDirectOffset(offset) : JSValue(); + } + + WriteBarrierBase<Unknown>* getDirectLocation(JSGlobalData& globalData, PropertyName propertyName) + { + PropertyOffset offset = structure()->get(globalData, propertyName); + checkOffset(offset, structure()->typeInfo().type()); + return isValidOffset(offset) ? locationForOffset(offset) : 0; + } + + WriteBarrierBase<Unknown>* getDirectLocation(JSGlobalData& globalData, PropertyName propertyName, unsigned& attributes) + { + JSCell* specificFunction; + PropertyOffset offset = structure()->get(globalData, propertyName, attributes, specificFunction); + return isValidOffset(offset) ? locationForOffset(offset) : 0; + } + + bool hasInlineStorage() const { return structure()->hasInlineStorage(); } + ConstPropertyStorage inlineStorageUnsafe() const + { + return bitwise_cast<ConstPropertyStorage>(this + 1); + } + PropertyStorage inlineStorageUnsafe() + { + return bitwise_cast<PropertyStorage>(this + 1); + } + ConstPropertyStorage inlineStorage() const + { + ASSERT(hasInlineStorage()); + return inlineStorageUnsafe(); + } + PropertyStorage inlineStorage() + { + ASSERT(hasInlineStorage()); + return inlineStorageUnsafe(); + } - const Butterfly* butterfly() const { return m_butterfly; } - Butterfly* butterfly() { return m_butterfly; } + const Butterfly* butterfly() const { return m_butterfly; } + Butterfly* butterfly() { return m_butterfly; } - ConstPropertyStorage outOfLineStorage() const { return m_butterfly->propertyStorage(); } - PropertyStorage outOfLineStorage() { return m_butterfly->propertyStorage(); } - - const WriteBarrierBase<Unknown>* locationForOffset(PropertyOffset offset) const - { - if (isInlineOffset(offset)) - return &inlineStorage()[offsetInInlineStorage(offset)]; - return &outOfLineStorage()[offsetInOutOfLineStorage(offset)]; - } + ConstPropertyStorage outOfLineStorage() const { return m_butterfly->propertyStorage(); } + PropertyStorage outOfLineStorage() { return m_butterfly->propertyStorage(); } - WriteBarrierBase<Unknown>* locationForOffset(PropertyOffset offset) - { - if (isInlineOffset(offset)) - return &inlineStorage()[offsetInInlineStorage(offset)]; - return &outOfLineStorage()[offsetInOutOfLineStorage(offset)]; - } + const WriteBarrierBase<Unknown>* locationForOffset(PropertyOffset offset) const + { + if (isInlineOffset(offset)) + return &inlineStorage()[offsetInInlineStorage(offset)]; + return &outOfLineStorage()[offsetInOutOfLineStorage(offset)]; + } - PropertyOffset offsetForLocation(WriteBarrierBase<Unknown>* location) const - { - PropertyOffset result; - size_t offsetInInlineStorage = location - inlineStorageUnsafe(); - if (offsetInInlineStorage < static_cast<size_t>(firstOutOfLineOffset)) - result = offsetInInlineStorage; - else - result = outOfLineStorage() - location + (firstOutOfLineOffset - 1); - validateOffset(result, structure()->typeInfo().type()); - return result; - } + WriteBarrierBase<Unknown>* locationForOffset(PropertyOffset offset) + { + if (isInlineOffset(offset)) + return &inlineStorage()[offsetInInlineStorage(offset)]; + return &outOfLineStorage()[offsetInOutOfLineStorage(offset)]; + } - void transitionTo(JSGlobalData&, Structure*); - - bool removeDirect(JSGlobalData&, PropertyName); // Return true if anything is removed. - bool hasCustomProperties() { return structure()->didTransition(); } - bool hasGetterSetterProperties() { return structure()->hasGetterSetterProperties(); } - - // putOwnDataProperty has 'put' like semantics, however this method: - // - assumes the object contains no own getter/setter properties. - // - provides no special handling for __proto__ - // - does not walk the prototype chain (to check for accessors or non-writable properties). - // This is used by JSActivation. - bool putOwnDataProperty(JSGlobalData&, PropertyName, JSValue, PutPropertySlot&); - - // Fast access to known property offsets. - JSValue getDirectOffset(PropertyOffset offset) const { return locationForOffset(offset)->get(); } - void putDirectOffset(JSGlobalData& globalData, PropertyOffset offset, JSValue value) { locationForOffset(offset)->set(globalData, this, value); } - void putUndefinedAtDirectOffset(PropertyOffset offset) { locationForOffset(offset)->setUndefined(); } - - JS_EXPORT_PRIVATE static bool defineOwnProperty(JSObject*, ExecState*, PropertyName, PropertyDescriptor&, bool shouldThrow); - - bool isGlobalObject() const; - bool isVariableObject() const; - bool isNameScopeObject() const; - bool isActivationObject() const; - bool isErrorInstance() const; - bool isProxy() const; - - void seal(JSGlobalData&); - void freeze(JSGlobalData&); - JS_EXPORT_PRIVATE void preventExtensions(JSGlobalData&); - bool isSealed(JSGlobalData& globalData) { return structure()->isSealed(globalData); } - bool isFrozen(JSGlobalData& globalData) { return structure()->isFrozen(globalData); } - bool isExtensible() { return structure()->isExtensible(); } - bool indexingShouldBeSparse() - { - return !isExtensible() - || structure()->typeInfo().interceptsGetOwnPropertySlotByIndexEvenWhenLengthIsNotZero(); - } + PropertyOffset offsetForLocation(WriteBarrierBase<Unknown>* location) const + { + PropertyOffset result; + size_t offsetInInlineStorage = location - inlineStorageUnsafe(); + if (offsetInInlineStorage < static_cast<size_t>(firstOutOfLineOffset)) + result = offsetInInlineStorage; + else + result = outOfLineStorage() - location + (firstOutOfLineOffset - 1); + validateOffset(result, structure()->typeInfo().type()); + return result; + } + + void transitionTo(JSGlobalData&, Structure*); + + bool removeDirect(JSGlobalData&, PropertyName); // Return true if anything is removed. + bool hasCustomProperties() { return structure()->didTransition(); } + bool hasGetterSetterProperties() { return structure()->hasGetterSetterProperties(); } + + // putOwnDataProperty has 'put' like semantics, however this method: + // - assumes the object contains no own getter/setter properties. + // - provides no special handling for __proto__ + // - does not walk the prototype chain (to check for accessors or non-writable properties). + // This is used by JSActivation. + bool putOwnDataProperty(JSGlobalData&, PropertyName, JSValue, PutPropertySlot&); + + // Fast access to known property offsets. + JSValue getDirectOffset(PropertyOffset offset) const { return locationForOffset(offset)->get(); } + void putDirectOffset(JSGlobalData& globalData, PropertyOffset offset, JSValue value) { locationForOffset(offset)->set(globalData, this, value); } + void putUndefinedAtDirectOffset(PropertyOffset offset) { locationForOffset(offset)->setUndefined(); } + + JS_EXPORT_PRIVATE static bool defineOwnProperty(JSObject*, ExecState*, PropertyName, PropertyDescriptor&, bool shouldThrow); + + bool isGlobalObject() const; + bool isVariableObject() const; + bool isNameScopeObject() const; + bool isActivationObject() const; + bool isErrorInstance() const; + + void seal(JSGlobalData&); + void freeze(JSGlobalData&); + JS_EXPORT_PRIVATE void preventExtensions(JSGlobalData&); + bool isSealed(JSGlobalData& globalData) { return structure()->isSealed(globalData); } + bool isFrozen(JSGlobalData& globalData) { return structure()->isFrozen(globalData); } + bool isExtensible() { return structure()->isExtensible(); } + bool indexingShouldBeSparse() + { + return !isExtensible() + || structure()->typeInfo().interceptsGetOwnPropertySlotByIndexEvenWhenLengthIsNotZero(); + } - bool staticFunctionsReified() { return structure()->staticFunctionsReified(); } - void reifyStaticFunctionsForDelete(ExecState* exec); + bool staticFunctionsReified() { return structure()->staticFunctionsReified(); } + void reifyStaticFunctionsForDelete(ExecState* exec); - JS_EXPORT_PRIVATE Butterfly* growOutOfLineStorage(JSGlobalData&, size_t oldSize, size_t newSize); - void setButterfly(JSGlobalData&, Butterfly*, Structure*); - void setButterflyWithoutChangingStructure(Butterfly*); // You probably don't want to call this. + JS_EXPORT_PRIVATE Butterfly* growOutOfLineStorage(JSGlobalData&, size_t oldSize, size_t newSize); + void setButterfly(JSGlobalData&, Butterfly*, Structure*); + void setButterflyWithoutChangingStructure(Butterfly*); // You probably don't want to call this. - void setStructureAndReallocateStorageIfNecessary(JSGlobalData&, unsigned oldCapacity, Structure*); - void setStructureAndReallocateStorageIfNecessary(JSGlobalData&, Structure*); + void setStructureAndReallocateStorageIfNecessary(JSGlobalData&, unsigned oldCapacity, Structure*); + void setStructureAndReallocateStorageIfNecessary(JSGlobalData&, Structure*); - void flattenDictionaryObject(JSGlobalData& globalData) - { - structure()->flattenDictionaryStructure(globalData, this); - } + void flattenDictionaryObject(JSGlobalData& globalData) + { + structure()->flattenDictionaryStructure(globalData, this); + } - JSGlobalObject* globalObject() const - { - ASSERT(structure()->globalObject()); - ASSERT(!isGlobalObject() || ((JSObject*)structure()->globalObject()) == this); - return structure()->globalObject(); - } + JSGlobalObject* globalObject() const + { + ASSERT(structure()->globalObject()); + ASSERT(!isGlobalObject() || ((JSObject*)structure()->globalObject()) == this); + return structure()->globalObject(); + } - void switchToSlowPutArrayStorage(JSGlobalData&); + void switchToSlowPutArrayStorage(JSGlobalData&); - // The receiver is the prototype in this case. The following: - // - // asObject(foo->structure()->storedPrototype())->attemptToInterceptPutByIndexOnHoleForPrototype(...) - // - // is equivalent to: - // - // foo->attemptToInterceptPutByIndexOnHole(...); - bool attemptToInterceptPutByIndexOnHoleForPrototype(ExecState*, JSValue thisValue, unsigned propertyName, JSValue, bool shouldThrow); + // The receiver is the prototype in this case. The following: + // + // asObject(foo->structure()->storedPrototype())->attemptToInterceptPutByIndexOnHoleForPrototype(...) + // + // is equivalent to: + // + // foo->attemptToInterceptPutByIndexOnHole(...); + bool attemptToInterceptPutByIndexOnHoleForPrototype(ExecState*, JSValue thisValue, unsigned propertyName, JSValue, bool shouldThrow); - // Returns 0 if contiguous storage cannot be created - either because - // indexing should be sparse or because we're having a bad time. - WriteBarrier<Unknown>* ensureContiguous(JSGlobalData& globalData) - { - if (LIKELY(hasContiguous(structure()->indexingType()))) - return m_butterfly->contiguous(); + // Returns 0 if contiguous storage cannot be created - either because + // indexing should be sparse or because we're having a bad time. + WriteBarrier<Unknown>* ensureContiguous(JSGlobalData& globalData) + { + if (LIKELY(hasContiguous(structure()->indexingType()))) + return m_butterfly->contiguous(); - return ensureContiguousSlow(globalData); - } + return ensureContiguousSlow(globalData); + } - // Ensure that the object is in a mode where it has array storage. Use - // this if you're about to perform actions that would have required the - // object to be converted to have array storage, if it didn't have it - // already. - ArrayStorage* ensureArrayStorage(JSGlobalData& globalData) - { - if (LIKELY(hasArrayStorage(structure()->indexingType()))) - return m_butterfly->arrayStorage(); + // Ensure that the object is in a mode where it has array storage. Use + // this if you're about to perform actions that would have required the + // object to be converted to have array storage, if it didn't have it + // already. + ArrayStorage* ensureArrayStorage(JSGlobalData& globalData) + { + if (LIKELY(hasArrayStorage(structure()->indexingType()))) + return m_butterfly->arrayStorage(); - return ensureArrayStorageSlow(globalData); - } + return ensureArrayStorageSlow(globalData); + } - Butterfly* ensureIndexedStorage(JSGlobalData& globalData) - { - if (LIKELY(hasIndexedProperties(structure()->indexingType()))) - return m_butterfly; + Butterfly* ensureIndexedStorage(JSGlobalData& globalData) + { + if (LIKELY(hasIndexedProperties(structure()->indexingType()))) + return m_butterfly; - return ensureIndexedStorageSlow(globalData); - } + return ensureIndexedStorageSlow(globalData); + } - static size_t offsetOfInlineStorage(); + static size_t offsetOfInlineStorage(); - static ptrdiff_t butterflyOffset() - { - return OBJECT_OFFSETOF(JSObject, m_butterfly); - } + static ptrdiff_t butterflyOffset() + { + return OBJECT_OFFSETOF(JSObject, m_butterfly); + } - void* butterflyAddress() - { - return &m_butterfly; - } + void* butterflyAddress() + { + return &m_butterfly; + } - static JS_EXPORTDATA const ClassInfo s_info; - - protected: - void finishCreation(JSGlobalData& globalData) - { - Base::finishCreation(globalData); - ASSERT(inherits(&s_info)); - ASSERT(!structure()->outOfLineCapacity()); - ASSERT(structure()->isEmpty()); - ASSERT(prototype().isNull() || Heap::heap(this) == Heap::heap(prototype())); - ASSERT(structure()->isObject()); - ASSERT(classInfo()); - } + static JS_EXPORTDATA const ClassInfo s_info; - static Structure* createStructure(JSGlobalData& globalData, JSGlobalObject* globalObject, JSValue prototype) - { - return Structure::create(globalData, globalObject, prototype, TypeInfo(ObjectType, StructureFlags), &s_info); - } +protected: + void finishCreation(JSGlobalData& globalData) + { + Base::finishCreation(globalData); + ASSERT(inherits(&s_info)); + ASSERT(!structure()->outOfLineCapacity()); + ASSERT(structure()->isEmpty()); + ASSERT(prototype().isNull() || Heap::heap(this) == Heap::heap(prototype())); + ASSERT(structure()->isObject()); + ASSERT(classInfo()); + } - // To instantiate objects you likely want JSFinalObject, below. - // To create derived types you likely want JSNonFinalObject, below. - JSObject(JSGlobalData&, Structure*, Butterfly* = 0); + static Structure* createStructure(JSGlobalData& globalData, JSGlobalObject* globalObject, JSValue prototype) + { + return Structure::create(globalData, globalObject, prototype, TypeInfo(ObjectType, StructureFlags), &s_info); + } + + // To instantiate objects you likely want JSFinalObject, below. + // To create derived types you likely want JSNonFinalObject, below. + JSObject(JSGlobalData&, Structure*, Butterfly* = 0); - void resetInheritorID(JSGlobalData&); + void resetInheritorID(JSGlobalData&); - void visitButterfly(SlotVisitor&, Butterfly*, size_t storageSize); - void copyButterfly(CopyVisitor&, Butterfly*, size_t storageSize); - - // Call this if you know that the object is in a mode where it has array - // storage. This will assert otherwise. - ArrayStorage* arrayStorage() - { - ASSERT(hasArrayStorage(structure()->indexingType())); - return m_butterfly->arrayStorage(); - } + void visitButterfly(SlotVisitor&, Butterfly*, size_t storageSize); + void copyButterfly(CopyVisitor&, Butterfly*, size_t storageSize); + + // Call this if you know that the object is in a mode where it has array + // storage. This will assert otherwise. + ArrayStorage* arrayStorage() + { + ASSERT(hasArrayStorage(structure()->indexingType())); + return m_butterfly->arrayStorage(); + } - // Call this if you want to predicate some actions on whether or not the - // object is in a mode where it has array storage. - ArrayStorage* arrayStorageOrNull() - { - switch (structure()->indexingType()) { - case ALL_ARRAY_STORAGE_INDEXING_TYPES: - return m_butterfly->arrayStorage(); + // Call this if you want to predicate some actions on whether or not the + // object is in a mode where it has array storage. + ArrayStorage* arrayStorageOrNull() + { + switch (structure()->indexingType()) { + case ALL_ARRAY_STORAGE_INDEXING_TYPES: + return m_butterfly->arrayStorage(); - default: - return 0; - } + default: + return 0; } + } - ArrayStorage* createArrayStorage(JSGlobalData&, unsigned length, unsigned vectorLength); - ArrayStorage* createInitialArrayStorage(JSGlobalData&); - WriteBarrier<Unknown>* createInitialContiguous(JSGlobalData&, unsigned length); - ArrayStorage* convertContiguousToArrayStorage(JSGlobalData&, NonPropertyTransition, unsigned neededLength); - ArrayStorage* convertContiguousToArrayStorage(JSGlobalData&, NonPropertyTransition); - ArrayStorage* convertContiguousToArrayStorage(JSGlobalData&); + ArrayStorage* createArrayStorage(JSGlobalData&, unsigned length, unsigned vectorLength); + ArrayStorage* createInitialArrayStorage(JSGlobalData&); + WriteBarrier<Unknown>* createInitialContiguous(JSGlobalData&, unsigned length); + ArrayStorage* convertContiguousToArrayStorage(JSGlobalData&, NonPropertyTransition, unsigned neededLength); + ArrayStorage* convertContiguousToArrayStorage(JSGlobalData&, NonPropertyTransition); + ArrayStorage* convertContiguousToArrayStorage(JSGlobalData&); - ArrayStorage* ensureArrayStorageExistsAndEnterDictionaryIndexingMode(JSGlobalData&); + ArrayStorage* ensureArrayStorageExistsAndEnterDictionaryIndexingMode(JSGlobalData&); - bool defineOwnNonIndexProperty(ExecState*, PropertyName, PropertyDescriptor&, bool throwException); + bool defineOwnNonIndexProperty(ExecState*, PropertyName, PropertyDescriptor&, bool throwException); - void putByIndexBeyondVectorLengthContiguousWithoutAttributes(ExecState*, unsigned propertyName, JSValue); - void putByIndexBeyondVectorLengthWithArrayStorage(ExecState*, unsigned propertyName, JSValue, bool shouldThrow, ArrayStorage*); + void putByIndexBeyondVectorLengthContiguousWithoutAttributes(ExecState*, unsigned propertyName, JSValue); + void putByIndexBeyondVectorLengthWithArrayStorage(ExecState*, unsigned propertyName, JSValue, bool shouldThrow, ArrayStorage*); - bool increaseVectorLength(JSGlobalData&, unsigned newLength); - void deallocateSparseIndexMap(); - bool defineOwnIndexedProperty(ExecState*, unsigned, PropertyDescriptor&, bool throwException); - SparseArrayValueMap* allocateSparseIndexMap(JSGlobalData&); + bool increaseVectorLength(JSGlobalData&, unsigned newLength); + void deallocateSparseIndexMap(); + bool defineOwnIndexedProperty(ExecState*, unsigned, PropertyDescriptor&, bool throwException); + SparseArrayValueMap* allocateSparseIndexMap(JSGlobalData&); - void notifyPresenceOfIndexedAccessors(JSGlobalData&); + void notifyPresenceOfIndexedAccessors(JSGlobalData&); - bool attemptToInterceptPutByIndexOnHole(ExecState*, unsigned index, JSValue, bool shouldThrow); + bool attemptToInterceptPutByIndexOnHole(ExecState*, unsigned index, JSValue, bool shouldThrow); - // Call this if you want setIndexQuickly to succeed and you're sure that - // the array is contiguous. - void ensureContiguousLength(JSGlobalData& globalData, unsigned length) - { - ASSERT(length < MAX_ARRAY_INDEX); - ASSERT(hasContiguous(structure()->indexingType())); + // Call this if you want setIndexQuickly to succeed and you're sure that + // the array is contiguous. + void ensureContiguousLength(JSGlobalData& globalData, unsigned length) + { + ASSERT(length < MAX_ARRAY_INDEX); + ASSERT(hasContiguous(structure()->indexingType())); - if (m_butterfly->vectorLength() < length) - ensureContiguousLengthSlow(globalData, length); + if (m_butterfly->vectorLength() < length) + ensureContiguousLengthSlow(globalData, length); - if (m_butterfly->publicLength() < length) - m_butterfly->setPublicLength(length); - } + if (m_butterfly->publicLength() < length) + m_butterfly->setPublicLength(length); + } - unsigned countElementsInContiguous(Butterfly*); + unsigned countElementsInContiguous(Butterfly*); - template<IndexingType indexingType> - WriteBarrier<Unknown>* indexingData() - { - switch (indexingType) { - case ALL_CONTIGUOUS_INDEXING_TYPES: - return m_butterfly->contiguous(); + template<IndexingType indexingType> + WriteBarrier<Unknown>* indexingData() + { + switch (indexingType) { + case ALL_CONTIGUOUS_INDEXING_TYPES: + return m_butterfly->contiguous(); - case ALL_ARRAY_STORAGE_INDEXING_TYPES: - return m_butterfly->arrayStorage()->m_vector; + case ALL_ARRAY_STORAGE_INDEXING_TYPES: + return m_butterfly->arrayStorage()->m_vector; - default: - CRASH(); - return 0; - } + default: + CRASH(); + return 0; } + } + + WriteBarrier<Unknown>* currentIndexingData() + { + switch (structure()->indexingType()) { + case ALL_CONTIGUOUS_INDEXING_TYPES: + return m_butterfly->contiguous(); + + case ALL_ARRAY_STORAGE_INDEXING_TYPES: + return m_butterfly->arrayStorage()->m_vector; + + default: + CRASH(); + return 0; + } + } - template<IndexingType indexingType> - unsigned relevantLength() - { - switch (indexingType) { - case ALL_CONTIGUOUS_INDEXING_TYPES: - return m_butterfly->publicLength(); + template<IndexingType indexingType> + unsigned relevantLength() + { + switch (indexingType) { + case ALL_CONTIGUOUS_INDEXING_TYPES: + return m_butterfly->publicLength(); - case ALL_ARRAY_STORAGE_INDEXING_TYPES: - return std::min( - m_butterfly->arrayStorage()->length(), - m_butterfly->arrayStorage()->vectorLength()); + case ALL_ARRAY_STORAGE_INDEXING_TYPES: + return std::min( + m_butterfly->arrayStorage()->length(), + m_butterfly->arrayStorage()->vectorLength()); - default: - CRASH(); - return 0; - } + default: + CRASH(); + return 0; } + } - private: - friend class LLIntOffsetsExtractor; + unsigned currentRelevantLength() + { + switch (structure()->indexingType()) { + case ALL_CONTIGUOUS_INDEXING_TYPES: + return m_butterfly->publicLength(); + + case ALL_ARRAY_STORAGE_INDEXING_TYPES: + return std::min( + m_butterfly->arrayStorage()->length(), + m_butterfly->arrayStorage()->vectorLength()); + + default: + CRASH(); + return 0; + } + } + +private: + friend class LLIntOffsetsExtractor; - // Nobody should ever ask any of these questions on something already known to be a JSObject. - using JSCell::isAPIValueWrapper; - using JSCell::isGetterSetter; - void getObject(); - void getString(ExecState* exec); - void isObject(); - void isString(); + // Nobody should ever ask any of these questions on something already known to be a JSObject. + using JSCell::isAPIValueWrapper; + using JSCell::isGetterSetter; + void getObject(); + void getString(ExecState* exec); + void isObject(); + void isString(); - ArrayStorage* enterDictionaryIndexingModeWhenArrayStorageAlreadyExists(JSGlobalData&, ArrayStorage*); + ArrayStorage* enterDictionaryIndexingModeWhenArrayStorageAlreadyExists(JSGlobalData&, ArrayStorage*); - template<PutMode> - bool putDirectInternal(JSGlobalData&, PropertyName, JSValue, unsigned attr, PutPropertySlot&, JSCell*); + template<PutMode> + bool putDirectInternal(JSGlobalData&, PropertyName, JSValue, unsigned attr, PutPropertySlot&, JSCell*); - bool inlineGetOwnPropertySlot(ExecState*, PropertyName, PropertySlot&); - JS_EXPORT_PRIVATE void fillGetterPropertySlot(PropertySlot&, PropertyOffset); + bool inlineGetOwnPropertySlot(ExecState*, PropertyName, PropertySlot&); + JS_EXPORT_PRIVATE void fillGetterPropertySlot(PropertySlot&, PropertyOffset); - const HashEntry* findPropertyHashEntry(ExecState*, PropertyName) const; - Structure* createInheritorID(JSGlobalData&); + const HashEntry* findPropertyHashEntry(ExecState*, PropertyName) const; + Structure* createInheritorID(JSGlobalData&); - void putIndexedDescriptor(ExecState*, SparseArrayEntry*, PropertyDescriptor&, PropertyDescriptor& old); + void putIndexedDescriptor(ExecState*, SparseArrayEntry*, PropertyDescriptor&, PropertyDescriptor& old); - void putByIndexBeyondVectorLength(ExecState*, unsigned propertyName, JSValue, bool shouldThrow); - bool putDirectIndexBeyondVectorLengthWithArrayStorage(ExecState*, unsigned propertyName, JSValue, unsigned attributes, PutDirectIndexMode, ArrayStorage*); - JS_EXPORT_PRIVATE bool putDirectIndexBeyondVectorLength(ExecState*, unsigned propertyName, JSValue, unsigned attributes, PutDirectIndexMode); + void putByIndexBeyondVectorLength(ExecState*, unsigned propertyName, JSValue, bool shouldThrow); + bool putDirectIndexBeyondVectorLengthWithArrayStorage(ExecState*, unsigned propertyName, JSValue, unsigned attributes, PutDirectIndexMode, ArrayStorage*); + JS_EXPORT_PRIVATE bool putDirectIndexBeyondVectorLength(ExecState*, unsigned propertyName, JSValue, unsigned attributes, PutDirectIndexMode); - unsigned getNewVectorLength(unsigned currentVectorLength, unsigned currentLength, unsigned desiredLength); - unsigned getNewVectorLength(unsigned desiredLength); + unsigned getNewVectorLength(unsigned currentVectorLength, unsigned currentLength, unsigned desiredLength); + unsigned getNewVectorLength(unsigned desiredLength); - JS_EXPORT_PRIVATE bool getOwnPropertySlotSlow(ExecState*, PropertyName, PropertySlot&); + JS_EXPORT_PRIVATE bool getOwnPropertySlotSlow(ExecState*, PropertyName, PropertySlot&); - void ensureContiguousLengthSlow(JSGlobalData&, unsigned length); + void ensureContiguousLengthSlow(JSGlobalData&, unsigned length); - WriteBarrier<Unknown>* ensureContiguousSlow(JSGlobalData&); - ArrayStorage* ensureArrayStorageSlow(JSGlobalData&); - Butterfly* ensureIndexedStorageSlow(JSGlobalData&); + WriteBarrier<Unknown>* ensureContiguousSlow(JSGlobalData&); + ArrayStorage* ensureArrayStorageSlow(JSGlobalData&); + Butterfly* ensureIndexedStorageSlow(JSGlobalData&); - protected: - Butterfly* m_butterfly; - }; +protected: + Butterfly* m_butterfly; +}; - // JSNonFinalObject is a type of JSObject that has some internal storage, - // but also preserves some space in the collector cell for additional - // data members in derived types. - class JSNonFinalObject : public JSObject { - friend class JSObject; +// JSNonFinalObject is a type of JSObject that has some internal storage, +// but also preserves some space in the collector cell for additional +// data members in derived types. +class JSNonFinalObject : public JSObject { + friend class JSObject; - public: - typedef JSObject Base; +public: + typedef JSObject Base; - static Structure* createStructure(JSGlobalData& globalData, JSGlobalObject* globalObject, JSValue prototype) - { - return Structure::create(globalData, globalObject, prototype, TypeInfo(ObjectType, StructureFlags), &s_info); - } + static Structure* createStructure(JSGlobalData& globalData, JSGlobalObject* globalObject, JSValue prototype) + { + return Structure::create(globalData, globalObject, prototype, TypeInfo(ObjectType, StructureFlags), &s_info); + } - protected: - explicit JSNonFinalObject(JSGlobalData& globalData, Structure* structure, Butterfly* butterfly = 0) - : JSObject(globalData, structure, butterfly) - { - } +protected: + explicit JSNonFinalObject(JSGlobalData& globalData, Structure* structure, Butterfly* butterfly = 0) + : JSObject(globalData, structure, butterfly) + { + } - void finishCreation(JSGlobalData& globalData) - { - Base::finishCreation(globalData); - ASSERT(!this->structure()->totalStorageCapacity()); - ASSERT(classInfo()); - } - }; + void finishCreation(JSGlobalData& globalData) + { + Base::finishCreation(globalData); + ASSERT(!this->structure()->totalStorageCapacity()); + ASSERT(classInfo()); + } +}; - class JSFinalObject; +class JSFinalObject; - // JSFinalObject is a type of JSObject that contains sufficent internal - // storage to fully make use of the colloctor cell containing it. - class JSFinalObject : public JSObject { - friend class JSObject; +// JSFinalObject is a type of JSObject that contains sufficent internal +// storage to fully make use of the colloctor cell containing it. +class JSFinalObject : public JSObject { + friend class JSObject; - public: - typedef JSObject Base; +public: + typedef JSObject Base; - static JSFinalObject* create(ExecState*, Structure*); - static Structure* createStructure(JSGlobalData& globalData, JSGlobalObject* globalObject, JSValue prototype) - { - return Structure::create(globalData, globalObject, prototype, TypeInfo(FinalObjectType, StructureFlags), &s_info, NonArray, INLINE_STORAGE_CAPACITY); - } + static JSFinalObject* create(ExecState*, Structure*); + static Structure* createStructure(JSGlobalData& globalData, JSGlobalObject* globalObject, JSValue prototype) + { + return Structure::create(globalData, globalObject, prototype, TypeInfo(FinalObjectType, StructureFlags), &s_info, NonArray, INLINE_STORAGE_CAPACITY); + } - JS_EXPORT_PRIVATE static void visitChildren(JSCell*, SlotVisitor&); + JS_EXPORT_PRIVATE static void visitChildren(JSCell*, SlotVisitor&); - static JS_EXPORTDATA const ClassInfo s_info; + static JS_EXPORTDATA const ClassInfo s_info; - protected: - void visitChildrenCommon(SlotVisitor&); +protected: + void visitChildrenCommon(SlotVisitor&); - void finishCreation(JSGlobalData& globalData) - { - Base::finishCreation(globalData); - ASSERT(structure()->totalStorageCapacity() == structure()->inlineCapacity()); - ASSERT(classInfo()); - } + void finishCreation(JSGlobalData& globalData) + { + Base::finishCreation(globalData); + ASSERT(structure()->totalStorageCapacity() == structure()->inlineCapacity()); + ASSERT(classInfo()); + } - private: - friend class LLIntOffsetsExtractor; +private: + friend class LLIntOffsetsExtractor; - explicit JSFinalObject(JSGlobalData& globalData, Structure* structure) - : JSObject(globalData, structure) - { - } + explicit JSFinalObject(JSGlobalData& globalData, Structure* structure) + : JSObject(globalData, structure) + { + } - static const unsigned StructureFlags = JSObject::StructureFlags; - }; + static const unsigned StructureFlags = JSObject::StructureFlags; +}; inline JSFinalObject* JSFinalObject::create(ExecState* exec, Structure* structure) { @@ -902,11 +933,6 @@ inline bool JSObject::isErrorInstance() const return structure()->typeInfo().type() == ErrorInstanceType; } -inline bool JSObject::isProxy() const -{ - return structure()->typeInfo().type() == ProxyType; -} - inline void JSObject::setButterfly(JSGlobalData& globalData, Butterfly* butterfly, Structure* structure) { ASSERT(structure); diff --git a/Source/JavaScriptCore/runtime/JSScope.cpp b/Source/JavaScriptCore/runtime/JSScope.cpp index 508a90540..8651a76ba 100644 --- a/Source/JavaScriptCore/runtime/JSScope.cpp +++ b/Source/JavaScriptCore/runtime/JSScope.cpp @@ -334,11 +334,14 @@ template <JSScope::LookupMode mode, JSScope::ReturnValues returnValues> JSObject ASSERT(variableObject); ASSERT(variableObject->symbolTable()); SymbolTableEntry entry = variableObject->symbolTable()->get(identifier.impl()); - // Variable was actually inserted by eval + // Defend against the variable being actually inserted by eval. if (entry.isNull()) { ASSERT(!jsDynamicCast<JSNameScope*>(variableObject)); goto fail; } + // If we're getting the 'arguments' then give up on life. + if (identifier == callFrame->propertyNames().arguments) + goto fail; if (putToBaseOperation) { putToBaseOperation->m_kind = entry.isReadOnly() ? PutToBaseOperation::Readonly : PutToBaseOperation::VariablePut; diff --git a/Source/JavaScriptCore/runtime/JSType.h b/Source/JavaScriptCore/runtime/JSType.h index 03f4a7790..10d98d2bd 100644 --- a/Source/JavaScriptCore/runtime/JSType.h +++ b/Source/JavaScriptCore/runtime/JSType.h @@ -41,6 +41,11 @@ enum JSType { ProgramExecutableType, FunctionExecutableType, + UnlinkedFunctionExecutableType, + UnlinkedProgramCodeBlockType, + UnlinkedEvalCodeBlockType, + UnlinkedFunctionCodeBlockType, + // The ObjectType value must come before any JSType that is a subclass of JSObject. ObjectType, FinalObjectType, diff --git a/Source/JavaScriptCore/runtime/JSTypeInfo.h b/Source/JavaScriptCore/runtime/JSTypeInfo.h index d9b3585a0..6f63260fe 100644 --- a/Source/JavaScriptCore/runtime/JSTypeInfo.h +++ b/Source/JavaScriptCore/runtime/JSTypeInfo.h @@ -55,7 +55,7 @@ namespace JSC { , m_flags2(flags >> 8) { ASSERT(flags <= 0x3ff); - ASSERT(type <= 0xff); + ASSERT(static_cast<int>(type) <= 0xff); ASSERT(type >= CompoundType || !(flags & OverridesVisitChildren)); // No object that doesn't ImplementsHasInstance should override it! ASSERT((m_flags & (ImplementsHasInstance | OverridesHasInstance)) != OverridesHasInstance); diff --git a/Source/JavaScriptCore/runtime/JSValue.cpp b/Source/JavaScriptCore/runtime/JSValue.cpp index a5cdf700b..e7f8cad17 100644 --- a/Source/JavaScriptCore/runtime/JSValue.cpp +++ b/Source/JavaScriptCore/runtime/JSValue.cpp @@ -62,7 +62,7 @@ double JSValue::toNumberSlowCase(ExecState* exec) const return asCell()->toNumber(exec); if (isTrue()) return 1.0; - return isUndefined() ? std::numeric_limits<double>::quiet_NaN() : 0; // null and false both convert to 0. + return isUndefined() ? QNaN : 0; // null and false both convert to 0. } JSObject* JSValue::toObjectSlowCase(ExecState* exec, JSGlobalObject* globalObject) const diff --git a/Source/JavaScriptCore/runtime/JSValue.h b/Source/JavaScriptCore/runtime/JSValue.h index 7b5c81aa9..bd9b90466 100644 --- a/Source/JavaScriptCore/runtime/JSValue.h +++ b/Source/JavaScriptCore/runtime/JSValue.h @@ -35,6 +35,10 @@ namespace JSC { +// This is used a lot throughout JavaScriptCore for everything from value boxing to marking +// values as being missing, so it is useful to have it abbreviated. +#define QNaN (std::numeric_limits<double>::quiet_NaN()) + class ExecState; class JSCell; class JSGlobalData; diff --git a/Source/JavaScriptCore/runtime/JSValueInlineMethods.h b/Source/JavaScriptCore/runtime/JSValueInlineMethods.h index 52b747890..224982e9e 100644 --- a/Source/JavaScriptCore/runtime/JSValueInlineMethods.h +++ b/Source/JavaScriptCore/runtime/JSValueInlineMethods.h @@ -62,7 +62,7 @@ namespace JSC { inline JSValue jsNaN() { - return JSValue(std::numeric_limits<double>::quiet_NaN()); + return JSValue(QNaN); } inline JSValue::JSValue(char i) diff --git a/Source/JavaScriptCore/runtime/MathObject.cpp b/Source/JavaScriptCore/runtime/MathObject.cpp index 2f4df375a..7634487ad 100644 --- a/Source/JavaScriptCore/runtime/MathObject.cpp +++ b/Source/JavaScriptCore/runtime/MathObject.cpp @@ -175,7 +175,7 @@ EncodedJSValue JSC_HOST_CALL mathProtoFuncMax(ExecState* exec) for (unsigned k = 0; k < argsCount; ++k) { double val = exec->argument(k).toNumber(exec); if (isnan(val)) { - result = std::numeric_limits<double>::quiet_NaN(); + result = QNaN; break; } if (val > result || (val == 0 && result == 0 && !signbit(val))) @@ -191,7 +191,7 @@ EncodedJSValue JSC_HOST_CALL mathProtoFuncMin(ExecState* exec) for (unsigned k = 0; k < argsCount; ++k) { double val = exec->argument(k).toNumber(exec); if (isnan(val)) { - result = std::numeric_limits<double>::quiet_NaN(); + result = QNaN; break; } if (val < result || (val == 0 && result == 0 && signbit(val))) diff --git a/Source/JavaScriptCore/runtime/Operations.h b/Source/JavaScriptCore/runtime/Operations.h index 30ba0b27d..01df7e98c 100644 --- a/Source/JavaScriptCore/runtime/Operations.h +++ b/Source/JavaScriptCore/runtime/Operations.h @@ -24,6 +24,7 @@ #include "ExceptionHelpers.h" #include "Interpreter.h" +#include "JSProxy.h" #include "JSString.h" #include "JSValueInlineMethods.h" @@ -297,19 +298,24 @@ namespace JSC { return jsAddSlowCase(callFrame, v1, v2); } +#define InvalidPrototypeChain (std::numeric_limits<size_t>::max()) + inline size_t normalizePrototypeChain(CallFrame* callFrame, JSValue base, JSValue slotBase, const Identifier& propertyName, PropertyOffset& slotOffset) { JSCell* cell = base.asCell(); size_t count = 0; while (slotBase != cell) { + if (cell->isProxy()) + return InvalidPrototypeChain; + JSValue v = cell->structure()->prototypeForLookup(callFrame); // If we didn't find slotBase in base's prototype chain, then base // must be a proxy for another object. if (v.isNull()) - return 0; + return InvalidPrototypeChain; cell = v.asCell(); @@ -332,6 +338,9 @@ namespace JSC { { size_t count = 0; while (1) { + if (base->isProxy()) + return InvalidPrototypeChain; + JSValue v = base->structure()->prototypeForLookup(callFrame); if (v.isNull()) return count; diff --git a/Source/JavaScriptCore/runtime/RegExpObject.cpp b/Source/JavaScriptCore/runtime/RegExpObject.cpp index dfbf533f7..35de40912 100644 --- a/Source/JavaScriptCore/runtime/RegExpObject.cpp +++ b/Source/JavaScriptCore/runtime/RegExpObject.cpp @@ -178,11 +178,34 @@ JSValue regExpObjectMultiline(ExecState*, JSValue slotBase, PropertyName) return jsBoolean(asRegExpObject(slotBase)->regExp()->multiline()); } -JSValue regExpObjectSource(ExecState* exec, JSValue slotBase, PropertyName) +template <typename CharacterType> +static inline void appendLineTerminatorEscape(StringBuilder&, CharacterType); + +template <> +inline void appendLineTerminatorEscape<LChar>(StringBuilder& builder, LChar lineTerminator) +{ + if (lineTerminator == '\n') + builder.append('n'); + else + builder.append('r'); +} + +template <> +inline void appendLineTerminatorEscape<UChar>(StringBuilder& builder, UChar lineTerminator) +{ + if (lineTerminator == '\n') + builder.append('n'); + else if (lineTerminator == '\r') + builder.append('r'); + else if (lineTerminator == 0x2028) + builder.appendLiteral("u2028"); + else + builder.appendLiteral("u2029"); +} + +template <typename CharacterType> +static inline JSValue regExpObjectSourceInternal(ExecState* exec, String pattern, const CharacterType* characters, unsigned length) { - String pattern = asRegExpObject(slotBase)->regExp()->pattern(); - unsigned length = pattern.length(); - const UChar* characters = pattern.characters(); bool previousCharacterWasBackslash = false; bool inBrackets = false; bool shouldEscape = false; @@ -197,7 +220,7 @@ JSValue regExpObjectSource(ExecState* exec, JSValue slotBase, PropertyName) // early return for strings that don't contain a forwards slash and LineTerminator for (unsigned i = 0; i < length; ++i) { - UChar ch = characters[i]; + CharacterType ch = characters[i]; if (!previousCharacterWasBackslash) { if (inBrackets) { if (ch == ']') @@ -212,7 +235,7 @@ JSValue regExpObjectSource(ExecState* exec, JSValue slotBase, PropertyName) } } - if (Lexer<UChar>::isLineTerminator(ch)) { + if (Lexer<CharacterType>::isLineTerminator(ch)) { shouldEscape = true; break; } @@ -230,7 +253,7 @@ JSValue regExpObjectSource(ExecState* exec, JSValue slotBase, PropertyName) inBrackets = false; StringBuilder result; for (unsigned i = 0; i < length; ++i) { - UChar ch = characters[i]; + CharacterType ch = characters[i]; if (!previousCharacterWasBackslash) { if (inBrackets) { if (ch == ']') @@ -244,18 +267,11 @@ JSValue regExpObjectSource(ExecState* exec, JSValue slotBase, PropertyName) } // escape LineTerminator - if (Lexer<UChar>::isLineTerminator(ch)) { + if (Lexer<CharacterType>::isLineTerminator(ch)) { if (!previousCharacterWasBackslash) result.append('\\'); - if (ch == '\n') - result.append('n'); - else if (ch == '\r') - result.append('r'); - else if (ch == 0x2028) - result.appendLiteral("u2028"); - else - result.appendLiteral("u2029"); + appendLineTerminatorEscape<CharacterType>(result, ch); } else result.append(ch); @@ -268,6 +284,14 @@ JSValue regExpObjectSource(ExecState* exec, JSValue slotBase, PropertyName) return jsString(exec, result.toString()); } +JSValue regExpObjectSource(ExecState* exec, JSValue slotBase, PropertyName) +{ + String pattern = asRegExpObject(slotBase)->regExp()->pattern(); + if (pattern.is8Bit()) + return regExpObjectSourceInternal(exec, pattern, pattern.characters8(), pattern.length()); + return regExpObjectSourceInternal(exec, pattern, pattern.characters16(), pattern.length()); +} + void RegExpObject::put(JSCell* cell, ExecState* exec, PropertyName propertyName, JSValue value, PutPropertySlot& slot) { if (propertyName == exec->propertyNames().lastIndex) { diff --git a/Source/JavaScriptCore/runtime/StringPrototype.cpp b/Source/JavaScriptCore/runtime/StringPrototype.cpp index 4d3ccfda2..5aafe8bb3 100644 --- a/Source/JavaScriptCore/runtime/StringPrototype.cpp +++ b/Source/JavaScriptCore/runtime/StringPrototype.cpp @@ -666,7 +666,7 @@ static inline EncodedJSValue replaceUsingStringSearch(ExecState* exec, JSString* String leftPart(StringImpl::create(stringImpl, 0, matchStart)); size_t matchEnd = matchStart + searchString.impl()->length(); - int ovector[2] = { matchStart, matchEnd}; + int ovector[2] = { static_cast<int>(matchStart), static_cast<int>(matchEnd)}; String middlePart = substituteBackreferences(replaceString, string, ovector, 0); size_t leftLength = stringImpl->length() - matchEnd; diff --git a/Source/JavaScriptCore/runtime/Structure.cpp b/Source/JavaScriptCore/runtime/Structure.cpp index a931def27..e733c7e23 100644 --- a/Source/JavaScriptCore/runtime/Structure.cpp +++ b/Source/JavaScriptCore/runtime/Structure.cpp @@ -543,6 +543,15 @@ Structure* Structure::nonPropertyTransition(JSGlobalData& globalData, Structure* unsigned attributes = toAttributes(transitionKind); IndexingType indexingType = newIndexingType(structure->indexingTypeIncludingHistory(), transitionKind); + JSGlobalObject* globalObject = structure->globalObject(); + if (structure == globalObject->arrayStructure()) { + Structure* transition = globalObject->arrayStructureWithArrayStorage(); + if (transition->indexingTypeIncludingHistory() == indexingType) { + structure->notifyTransitionFromThisStructure(); + return transition; + } + } + if (Structure* existingTransition = structure->m_transitionTable.get(0, attributes)) { ASSERT(existingTransition->m_attributesInPrevious == attributes); ASSERT(existingTransition->indexingTypeIncludingHistory() == indexingType); diff --git a/Source/JavaScriptCore/runtime/Structure.h b/Source/JavaScriptCore/runtime/Structure.h index 5f1299766..2b25803a6 100644 --- a/Source/JavaScriptCore/runtime/Structure.h +++ b/Source/JavaScriptCore/runtime/Structure.h @@ -521,6 +521,11 @@ namespace JSC { return m_structure->typeInfo().type() == GetterSetterType; } + inline bool JSCell::isProxy() const + { + return structure()->typeInfo().type() == ProxyType; + } + inline bool JSCell::isAPIValueWrapper() const { return m_structure->typeInfo().type() == APIValueWrapperType; diff --git a/Source/JavaScriptCore/runtime/SymbolTable.h b/Source/JavaScriptCore/runtime/SymbolTable.h index debb76499..87d1c8be5 100644 --- a/Source/JavaScriptCore/runtime/SymbolTable.h +++ b/Source/JavaScriptCore/runtime/SymbolTable.h @@ -337,7 +337,7 @@ namespace JSC { struct SymbolTableIndexHashTraits : HashTraits<SymbolTableEntry> { static const bool emptyValueIsZero = true; - static const bool needsDestruction = false; + static const bool needsDestruction = true; }; typedef HashMap<RefPtr<StringImpl>, SymbolTableEntry, IdentifierRepHash, HashTraits<RefPtr<StringImpl> >, SymbolTableIndexHashTraits> SymbolTable; |