diff options
author | Lorry Tar Creator <lorry-tar-importer@lorry> | 2015-05-20 09:56:07 +0000 |
---|---|---|
committer | Lorry Tar Creator <lorry-tar-importer@lorry> | 2015-05-20 09:56:07 +0000 |
commit | 41386e9cb918eed93b3f13648cbef387e371e451 (patch) | |
tree | a97f9d7bd1d9d091833286085f72da9d83fd0606 /Source/JavaScriptCore/jit/JITOpcodes.cpp | |
parent | e15dd966d523731101f70ccf768bba12435a0208 (diff) | |
download | WebKitGtk-tarball-41386e9cb918eed93b3f13648cbef387e371e451.tar.gz |
webkitgtk-2.4.9webkitgtk-2.4.9
Diffstat (limited to 'Source/JavaScriptCore/jit/JITOpcodes.cpp')
-rw-r--r-- | Source/JavaScriptCore/jit/JITOpcodes.cpp | 851 |
1 files changed, 367 insertions, 484 deletions
diff --git a/Source/JavaScriptCore/jit/JITOpcodes.cpp b/Source/JavaScriptCore/jit/JITOpcodes.cpp index 84cbed519..2bdae1914 100644 --- a/Source/JavaScriptCore/jit/JITOpcodes.cpp +++ b/Source/JavaScriptCore/jit/JITOpcodes.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2009, 2012-2015 Apple Inc. All rights reserved. + * Copyright (C) 2009, 2012, 2013 Apple Inc. All rights reserved. * Copyright (C) 2010 Patrick Gansterer <paroga@paroga.com> * * Redistribution and use in source and binary forms, with or without @@ -28,24 +28,18 @@ #if ENABLE(JIT) #include "JIT.h" -#include "BasicBlockLocation.h" +#include "Arguments.h" #include "CopiedSpaceInlines.h" #include "Debugger.h" -#include "Exception.h" #include "Heap.h" #include "JITInlines.h" #include "JSArray.h" #include "JSCell.h" #include "JSFunction.h" -#include "JSPropertyNameEnumerator.h" +#include "JSPropertyNameIterator.h" #include "LinkBuffer.h" -#include "MaxFrameExtentForSlowPathCall.h" -#include "RepatchBuffer.h" #include "SlowPathCall.h" -#include "TypeLocation.h" -#include "TypeProfilerLog.h" #include "VirtualRegister.h" -#include "Watchdog.h" namespace JSC { @@ -65,12 +59,21 @@ void JIT::emit_op_mov(Instruction* currentInstruction) emitPutVirtualRegister(dst); } +void JIT::emit_op_captured_mov(Instruction* currentInstruction) +{ + int dst = currentInstruction[1].u.operand; + int src = currentInstruction[2].u.operand; + + emitGetVirtualRegister(src, regT0); + emitNotifyWrite(regT0, regT1, currentInstruction[3].u.watchpointSet); + emitPutVirtualRegister(dst); +} void JIT::emit_op_end(Instruction* currentInstruction) { RELEASE_ASSERT(returnValueGPR != callFrameRegister); emitGetVirtualRegister(currentInstruction[1].u.operand, returnValueGPR); - emitFunctionEpilogue(); + restoreReturnAddressBeforeReturn(Address(callFrameRegister, CallFrame::returnPCOffset())); ret(); } @@ -114,7 +117,8 @@ void JIT::emit_op_check_has_instance(Instruction* currentInstruction) emitJumpSlowCaseIfNotJSCell(regT0, baseVal); // Check that baseVal 'ImplementsHasInstance'. - addSlowCase(branchTest8(Zero, Address(regT0, JSCell::typeInfoFlagsOffset()), TrustedImm32(ImplementsDefaultHasInstance))); + loadPtr(Address(regT0, JSCell::structureOffset()), regT0); + addSlowCase(branchTest8(Zero, Address(regT0, Structure::typeInfoFlagsOffset()), TrustedImm32(ImplementsDefaultHasInstance))); } void JIT::emit_op_instanceof(Instruction* currentInstruction) @@ -133,7 +137,8 @@ void JIT::emit_op_instanceof(Instruction* currentInstruction) emitJumpSlowCaseIfNotJSCell(regT1, proto); // Check that prototype is an object - addSlowCase(emitJumpIfCellNotObject(regT1)); + loadPtr(Address(regT1, JSCell::structureOffset()), regT3); + addSlowCase(emitJumpIfNotObject(regT3)); // Optimistically load the result true, and start looping. // Initially, regT1 still contains proto and regT2 still contains value. @@ -143,7 +148,7 @@ void JIT::emit_op_instanceof(Instruction* currentInstruction) // Load the prototype of the object in regT2. If this is equal to regT1 - WIN! // Otherwise, check if we've hit null - if we have then drop out of the loop, if not go again. - emitLoadStructure(regT2, regT2, regT3); + loadPtr(Address(regT2, JSCell::structureOffset()), regT2); load64(Address(regT2, Structure::prototypeOffset()), regT2); Jump isInstance = branchPtr(Equal, regT2, regT1); emitJumpIfJSCell(regT2).linkTo(loop, this); @@ -168,12 +173,12 @@ void JIT::emit_op_is_undefined(Instruction* currentInstruction) Jump done = jump(); isCell.link(this); - Jump isMasqueradesAsUndefined = branchTest8(NonZero, Address(regT0, JSCell::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined)); + loadPtr(Address(regT0, JSCell::structureOffset()), regT1); + Jump isMasqueradesAsUndefined = branchTest8(NonZero, Address(regT1, Structure::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined)); move(TrustedImm32(0), regT0); Jump notMasqueradesAsUndefined = jump(); isMasqueradesAsUndefined.link(this); - emitLoadStructure(regT0, regT1, regT2); move(TrustedImmPtr(m_codeBlock->globalObject()), regT0); loadPtr(Address(regT1, Structure::globalObjectOffset()), regT1); comparePtr(Equal, regT0, regT1, regT0); @@ -215,7 +220,8 @@ void JIT::emit_op_is_string(Instruction* currentInstruction) emitGetVirtualRegister(value, regT0); Jump isNotCell = emitJumpIfNotJSCell(regT0); - compare8(Equal, Address(regT0, JSCell::typeInfoTypeOffset()), TrustedImm32(StringType), regT0); + loadPtr(Address(regT0, JSCell::structureOffset()), regT1); + compare8(Equal, Address(regT1, Structure::typeInfoTypeOffset()), TrustedImm32(StringType), regT0); emitTagAsBoolImmediate(regT0); Jump done = jump(); @@ -226,26 +232,48 @@ void JIT::emit_op_is_string(Instruction* currentInstruction) emitPutVirtualRegister(dst); } -void JIT::emit_op_is_object(Instruction* currentInstruction) +void JIT::emit_op_tear_off_activation(Instruction* currentInstruction) { - int dst = currentInstruction[1].u.operand; - int value = currentInstruction[2].u.operand; + int activation = currentInstruction[1].u.operand; + Jump activationNotCreated = branchTest64(Zero, addressFor(activation)); + emitGetVirtualRegister(activation, regT0); + callOperation(operationTearOffActivation, regT0); + activationNotCreated.link(this); +} - emitGetVirtualRegister(value, regT0); - Jump isNotCell = emitJumpIfNotJSCell(regT0); +void JIT::emit_op_tear_off_arguments(Instruction* currentInstruction) +{ + int arguments = currentInstruction[1].u.operand; + int activation = currentInstruction[2].u.operand; - compare8(AboveOrEqual, Address(regT0, JSCell::typeInfoTypeOffset()), TrustedImm32(ObjectType), regT0); - emitTagAsBoolImmediate(regT0); - Jump done = jump(); + Jump argsNotCreated = branchTest64(Zero, Address(callFrameRegister, sizeof(Register) * (unmodifiedArgumentsRegister(VirtualRegister(arguments)).offset()))); + emitGetVirtualRegister(unmodifiedArgumentsRegister(VirtualRegister(arguments)).offset(), regT0); + emitGetVirtualRegister(activation, regT1); + callOperation(operationTearOffArguments, regT0, regT1); + argsNotCreated.link(this); +} - isNotCell.link(this); - move(TrustedImm32(ValueFalse), regT0); +void JIT::emit_op_ret(Instruction* currentInstruction) +{ + ASSERT(callFrameRegister != regT1); + ASSERT(regT1 != returnValueGPR); + ASSERT(returnValueGPR != callFrameRegister); - done.link(this); - emitPutVirtualRegister(dst); + // Return the result in %eax. + emitGetVirtualRegister(currentInstruction[1].u.operand, returnValueGPR); + + // Grab the return address. + emitGetReturnPCFromCallFrameHeaderPtr(regT1); + + // Restore our caller's "r". + emitGetCallerFrameFromCallFrameHeaderPtr(callFrameRegister); + + // Return. + restoreReturnAddressBeforeReturn(regT1); + ret(); } -void JIT::emit_op_ret(Instruction* currentInstruction) +void JIT::emit_op_ret_object_or_this(Instruction* currentInstruction) { ASSERT(callFrameRegister != regT1); ASSERT(regT1 != returnValueGPR); @@ -253,9 +281,33 @@ void JIT::emit_op_ret(Instruction* currentInstruction) // Return the result in %eax. emitGetVirtualRegister(currentInstruction[1].u.operand, returnValueGPR); + Jump notJSCell = emitJumpIfNotJSCell(returnValueGPR); + loadPtr(Address(returnValueGPR, JSCell::structureOffset()), regT2); + Jump notObject = emitJumpIfNotObject(regT2); + + // Grab the return address. + emitGetReturnPCFromCallFrameHeaderPtr(regT1); - checkStackPointerAlignment(); - emitFunctionEpilogue(); + // Restore our caller's "r". + emitGetCallerFrameFromCallFrameHeaderPtr(callFrameRegister); + + // Return. + restoreReturnAddressBeforeReturn(regT1); + ret(); + + // Return 'this' in %eax. + notJSCell.link(this); + notObject.link(this); + emitGetVirtualRegister(currentInstruction[2].u.operand, returnValueGPR); + + // Grab the return address. + emitGetReturnPCFromCallFrameHeaderPtr(regT1); + + // Restore our caller's "r". + emitGetCallerFrameFromCallFrameHeaderPtr(callFrameRegister); + + // Return. + restoreReturnAddressBeforeReturn(regT1); ret(); } @@ -267,7 +319,7 @@ void JIT::emit_op_to_primitive(Instruction* currentInstruction) emitGetVirtualRegister(src, regT0); Jump isImm = emitJumpIfNotJSCell(regT0); - addSlowCase(emitJumpIfCellObject(regT0)); + addSlowCase(branchPtr(NotEqual, Address(regT0, JSCell::structureOffset()), TrustedImmPtr(m_vm->stringStructure.get()))); isImm.link(this); if (dst != src) @@ -318,8 +370,8 @@ void JIT::emit_op_jeq_null(Instruction* currentInstruction) Jump isImmediate = emitJumpIfNotJSCell(regT0); // First, handle JSCell cases - check MasqueradesAsUndefined bit on the structure. - Jump isNotMasqueradesAsUndefined = branchTest8(Zero, Address(regT0, JSCell::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined)); - emitLoadStructure(regT0, regT2, regT1); + loadPtr(Address(regT0, JSCell::structureOffset()), regT2); + Jump isNotMasqueradesAsUndefined = branchTest8(Zero, Address(regT2, Structure::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined)); move(TrustedImmPtr(m_codeBlock->globalObject()), regT0); addJump(branchPtr(Equal, Address(regT2, Structure::globalObjectOffset()), regT0), target); Jump masqueradesGlobalObjectIsForeign = jump(); @@ -341,8 +393,8 @@ void JIT::emit_op_jneq_null(Instruction* currentInstruction) Jump isImmediate = emitJumpIfNotJSCell(regT0); // First, handle JSCell cases - check MasqueradesAsUndefined bit on the structure. - addJump(branchTest8(Zero, Address(regT0, JSCell::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined)), target); - emitLoadStructure(regT0, regT2, regT1); + loadPtr(Address(regT0, JSCell::structureOffset()), regT2); + addJump(branchTest8(Zero, Address(regT2, Structure::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined)), target); move(TrustedImmPtr(m_codeBlock->globalObject()), regT0); addJump(branchPtr(NotEqual, Address(regT2, Structure::globalObjectOffset()), regT0), target); Jump wasNotImmediate = jump(); @@ -424,10 +476,117 @@ void JIT::emit_op_throw(Instruction* currentInstruction) jumpToExceptionHandler(); } +void JIT::emit_op_get_pnames(Instruction* currentInstruction) +{ + int dst = currentInstruction[1].u.operand; + int base = currentInstruction[2].u.operand; + int i = currentInstruction[3].u.operand; + int size = currentInstruction[4].u.operand; + int breakTarget = currentInstruction[5].u.operand; + + JumpList isNotObject; + + emitGetVirtualRegister(base, regT0); + if (!m_codeBlock->isKnownNotImmediate(base)) + isNotObject.append(emitJumpIfNotJSCell(regT0)); + if (base != m_codeBlock->thisRegister().offset() || m_codeBlock->isStrictMode()) { + loadPtr(Address(regT0, JSCell::structureOffset()), regT2); + isNotObject.append(emitJumpIfNotObject(regT2)); + } + + // We could inline the case where you have a valid cache, but + // this call doesn't seem to be hot. + Label isObject(this); + callOperation(operationGetPNames, regT0); + emitStoreCell(dst, returnValueGPR); + load32(Address(regT0, OBJECT_OFFSETOF(JSPropertyNameIterator, m_jsStringsSize)), regT3); + store64(tagTypeNumberRegister, addressFor(i)); + store32(TrustedImm32(Int32Tag), intTagFor(size)); + store32(regT3, intPayloadFor(size)); + Jump end = jump(); + + isNotObject.link(this); + move(regT0, regT1); + and32(TrustedImm32(~TagBitUndefined), regT1); + addJump(branch32(Equal, regT1, TrustedImm32(ValueNull)), breakTarget); + callOperation(operationToObject, base, regT0); + jump().linkTo(isObject, this); + + end.link(this); +} + +void JIT::emit_op_next_pname(Instruction* currentInstruction) +{ + int dst = currentInstruction[1].u.operand; + int base = currentInstruction[2].u.operand; + int i = currentInstruction[3].u.operand; + int size = currentInstruction[4].u.operand; + int it = currentInstruction[5].u.operand; + int target = currentInstruction[6].u.operand; + + JumpList callHasProperty; + + Label begin(this); + load32(intPayloadFor(i), regT0); + Jump end = branch32(Equal, regT0, intPayloadFor(size)); + + // Grab key @ i + loadPtr(addressFor(it), regT1); + loadPtr(Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_jsStrings)), regT2); + + load64(BaseIndex(regT2, regT0, TimesEight), regT2); + + emitPutVirtualRegister(dst, regT2); + + // Increment i + add32(TrustedImm32(1), regT0); + store32(regT0, intPayloadFor(i)); + + // Verify that i is valid: + emitGetVirtualRegister(base, regT0); + + // Test base's structure + loadPtr(Address(regT0, JSCell::structureOffset()), regT2); + callHasProperty.append(branchPtr(NotEqual, regT2, Address(Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_cachedStructure))))); + + // Test base's prototype chain + loadPtr(Address(Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_cachedPrototypeChain))), regT3); + loadPtr(Address(regT3, OBJECT_OFFSETOF(StructureChain, m_vector)), regT3); + addJump(branchTestPtr(Zero, Address(regT3)), target); + + Label checkPrototype(this); + load64(Address(regT2, Structure::prototypeOffset()), regT2); + callHasProperty.append(emitJumpIfNotJSCell(regT2)); + loadPtr(Address(regT2, JSCell::structureOffset()), regT2); + callHasProperty.append(branchPtr(NotEqual, regT2, Address(regT3))); + addPtr(TrustedImm32(sizeof(Structure*)), regT3); + branchTestPtr(NonZero, Address(regT3)).linkTo(checkPrototype, this); + + // Continue loop. + addJump(jump(), target); + + // Slow case: Ask the object if i is valid. + callHasProperty.link(this); + emitGetVirtualRegister(dst, regT1); + callOperation(operationHasProperty, regT0, regT1); + + // Test for valid key. + addJump(branchTest32(NonZero, regT0), target); + jump().linkTo(begin, this); + + // End of loop. + end.link(this); +} + void JIT::emit_op_push_with_scope(Instruction* currentInstruction) { - JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_push_with_scope); - slowPathCall.call(); + emitGetVirtualRegister(currentInstruction[1].u.operand, regT0); + callOperation(operationPushWithScope, regT0); +} + +void JIT::emit_op_pop_scope(Instruction*) +{ + callOperation(operationPopScope); } void JIT::compileOpStrictEq(Instruction* currentInstruction, CompileOpStrictEqType type) @@ -481,51 +640,19 @@ void JIT::emit_op_to_number(Instruction* currentInstruction) emitPutVirtualRegister(currentInstruction[1].u.operand); } -void JIT::emit_op_to_string(Instruction* currentInstruction) +void JIT::emit_op_push_name_scope(Instruction* currentInstruction) { - int srcVReg = currentInstruction[2].u.operand; - emitGetVirtualRegister(srcVReg, regT0); - - addSlowCase(emitJumpIfNotJSCell(regT0)); - addSlowCase(branch8(NotEqual, Address(regT0, JSCell::typeInfoTypeOffset()), TrustedImm32(StringType))); - - emitPutVirtualRegister(currentInstruction[1].u.operand); + emitGetVirtualRegister(currentInstruction[2].u.operand, regT0); + callOperation(operationPushNameScope, &m_codeBlock->identifier(currentInstruction[1].u.operand), regT0, currentInstruction[3].u.operand); } void JIT::emit_op_catch(Instruction* currentInstruction) { - // Gotta restore the tag registers. We could be throwing from FTL, which may - // clobber them. - move(TrustedImm64(TagTypeNumber), tagTypeNumberRegister); - move(TrustedImm64(TagMask), tagMaskRegister); - move(TrustedImmPtr(m_vm), regT3); load64(Address(regT3, VM::callFrameForThrowOffset()), callFrameRegister); - load64(Address(regT3, VM::vmEntryFrameForThrowOffset()), regT0); - store64(regT0, Address(regT3, VM::topVMEntryFrameOffset())); - - addPtr(TrustedImm32(stackPointerOffsetFor(codeBlock()) * sizeof(Register)), callFrameRegister, stackPointerRegister); - load64(Address(regT3, VM::exceptionOffset()), regT0); store64(TrustedImm64(JSValue::encode(JSValue())), Address(regT3, VM::exceptionOffset())); emitPutVirtualRegister(currentInstruction[1].u.operand); - - load64(Address(regT0, Exception::valueOffset()), regT0); - emitPutVirtualRegister(currentInstruction[2].u.operand); -} - -void JIT::emit_op_create_lexical_environment(Instruction* currentInstruction) -{ - JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_create_lexical_environment); - slowPathCall.call(); -} - -void JIT::emit_op_get_parent_scope(Instruction* currentInstruction) -{ - int currentScope = currentInstruction[2].u.operand; - emitGetVirtualRegister(currentScope, regT0); - loadPtr(Address(regT0, JSScope::offsetOfNext()), regT0); - emitStoreCell(currentInstruction[1].u.operand, regT0); } void JIT::emit_op_switch_imm(Instruction* currentInstruction) @@ -537,7 +664,7 @@ void JIT::emit_op_switch_imm(Instruction* currentInstruction) // create jump table for switch destinations, track this switch statement. SimpleJumpTable* jumpTable = &m_codeBlock->switchJumpTable(tableIndex); m_switches.append(SwitchRecord(jumpTable, m_bytecodeOffset, defaultOffset, SwitchRecord::Immediate)); - jumpTable->ensureCTITable(); + jumpTable->ctiOffsets.grow(jumpTable->branchOffsets.size()); emitGetVirtualRegister(scrutinee, regT0); callOperation(operationSwitchImmWithUnknownKeyType, regT0, tableIndex); @@ -553,7 +680,7 @@ void JIT::emit_op_switch_char(Instruction* currentInstruction) // create jump table for switch destinations, track this switch statement. SimpleJumpTable* jumpTable = &m_codeBlock->switchJumpTable(tableIndex); m_switches.append(SwitchRecord(jumpTable, m_bytecodeOffset, defaultOffset, SwitchRecord::Character)); - jumpTable->ensureCTITable(); + jumpTable->ctiOffsets.grow(jumpTable->branchOffsets.size()); emitGetVirtualRegister(scrutinee, regT0); callOperation(operationSwitchCharWithUnknownKeyType, regT0, tableIndex); @@ -597,12 +724,12 @@ void JIT::emit_op_eq_null(Instruction* currentInstruction) emitGetVirtualRegister(src1, regT0); Jump isImmediate = emitJumpIfNotJSCell(regT0); - Jump isMasqueradesAsUndefined = branchTest8(NonZero, Address(regT0, JSCell::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined)); + loadPtr(Address(regT0, JSCell::structureOffset()), regT2); + Jump isMasqueradesAsUndefined = branchTest8(NonZero, Address(regT2, Structure::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined)); move(TrustedImm32(0), regT0); Jump wasNotMasqueradesAsUndefined = jump(); isMasqueradesAsUndefined.link(this); - emitLoadStructure(regT0, regT2, regT1); move(TrustedImmPtr(m_codeBlock->globalObject()), regT0); loadPtr(Address(regT2, Structure::globalObjectOffset()), regT2); comparePtr(Equal, regT0, regT2, regT0); @@ -629,12 +756,12 @@ void JIT::emit_op_neq_null(Instruction* currentInstruction) emitGetVirtualRegister(src1, regT0); Jump isImmediate = emitJumpIfNotJSCell(regT0); - Jump isMasqueradesAsUndefined = branchTest8(NonZero, Address(regT0, JSCell::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined)); + loadPtr(Address(regT0, JSCell::structureOffset()), regT2); + Jump isMasqueradesAsUndefined = branchTest8(NonZero, Address(regT2, Structure::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined)); move(TrustedImm32(1), regT0); Jump wasNotMasqueradesAsUndefined = jump(); isMasqueradesAsUndefined.link(this); - emitLoadStructure(regT0, regT2, regT1); move(TrustedImmPtr(m_codeBlock->globalObject()), regT0); loadPtr(Address(regT2, Structure::globalObjectOffset()), regT2); comparePtr(NotEqual, regT0, regT2, regT0); @@ -652,8 +779,10 @@ void JIT::emit_op_neq_null(Instruction* currentInstruction) emitPutVirtualRegister(dst); } -void JIT::emit_op_enter(Instruction*) +void JIT::emit_op_enter(Instruction* currentInstruction) { + emitEnterOptimizationCheck(); + // Even though CTI doesn't use them, we initialize our constant // registers to zap stale pointers, to avoid unnecessarily prolonging // object lifetime and increasing GC pressure. @@ -661,17 +790,38 @@ void JIT::emit_op_enter(Instruction*) for (size_t j = 0; j < count; ++j) emitInitRegister(virtualRegisterForLocal(j).offset()); - emitWriteBarrier(m_codeBlock->ownerExecutable()); + JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_enter); + slowPathCall.call(); +} - emitEnterOptimizationCheck(); +void JIT::emit_op_create_activation(Instruction* currentInstruction) +{ + int dst = currentInstruction[1].u.operand; + + Jump activationCreated = branchTest64(NonZero, Address(callFrameRegister, sizeof(Register) * dst)); + callOperation(operationCreateActivation, 0); + emitStoreCell(dst, returnValueGPR); + activationCreated.link(this); } -void JIT::emit_op_get_scope(Instruction* currentInstruction) +void JIT::emit_op_create_arguments(Instruction* currentInstruction) { int dst = currentInstruction[1].u.operand; - emitGetFromCallFrameHeaderPtr(JSStack::Callee, regT0); - loadPtr(Address(regT0, JSFunction::offsetOfScopeChain()), regT0); - emitStoreCell(dst, regT0); + + Jump argsCreated = branchTest64(NonZero, Address(callFrameRegister, sizeof(Register) * dst)); + + callOperation(operationCreateArguments); + emitStoreCell(dst, returnValueGPR); + emitStoreCell(unmodifiedArgumentsRegister(VirtualRegister(dst)), returnValueGPR); + + argsCreated.link(this); +} + +void JIT::emit_op_init_lazy_reg(Instruction* currentInstruction) +{ + int dst = currentInstruction[1].u.operand; + + store64(TrustedImm64((int64_t)0), Address(callFrameRegister, sizeof(Register) * dst)); } void JIT::emit_op_to_this(Instruction* currentInstruction) @@ -680,66 +830,60 @@ void JIT::emit_op_to_this(Instruction* currentInstruction) emitGetVirtualRegister(currentInstruction[1].u.operand, regT1); emitJumpSlowCaseIfNotJSCell(regT1); + loadPtr(Address(regT1, JSCell::structureOffset()), regT0); - addSlowCase(branch8(NotEqual, Address(regT1, JSCell::typeInfoTypeOffset()), TrustedImm32(FinalObjectType))); + addSlowCase(branch8(NotEqual, Address(regT0, Structure::typeInfoTypeOffset()), TrustedImm32(FinalObjectType))); loadPtr(cachedStructure, regT2); - addSlowCase(branchTestPtr(Zero, regT2)); - load32(Address(regT2, Structure::structureIDOffset()), regT2); - addSlowCase(branch32(NotEqual, Address(regT1, JSCell::structureIDOffset()), regT2)); + addSlowCase(branchPtr(NotEqual, regT0, regT2)); +} + +void JIT::emit_op_get_callee(Instruction* currentInstruction) +{ + int result = currentInstruction[1].u.operand; + WriteBarrierBase<JSCell>* cachedFunction = ¤tInstruction[2].u.jsCell; + emitGetFromCallFrameHeaderPtr(JSStack::Callee, regT0); + + loadPtr(cachedFunction, regT2); + addSlowCase(branchPtr(NotEqual, regT0, regT2)); + + emitPutVirtualRegister(result); +} + +void JIT::emitSlow_op_get_callee(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) +{ + linkSlowCase(iter); + + JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_get_callee); + slowPathCall.call(); } void JIT::emit_op_create_this(Instruction* currentInstruction) { int callee = currentInstruction[2].u.operand; - WriteBarrierBase<JSCell>* cachedFunction = ¤tInstruction[4].u.jsCell; RegisterID calleeReg = regT0; - RegisterID rareDataReg = regT4; RegisterID resultReg = regT0; RegisterID allocatorReg = regT1; RegisterID structureReg = regT2; - RegisterID cachedFunctionReg = regT4; RegisterID scratchReg = regT3; emitGetVirtualRegister(callee, calleeReg); - loadPtr(Address(calleeReg, JSFunction::offsetOfRareData()), rareDataReg); - addSlowCase(branchTestPtr(Zero, rareDataReg)); - loadPtr(Address(rareDataReg, FunctionRareData::offsetOfAllocationProfile() + ObjectAllocationProfile::offsetOfAllocator()), allocatorReg); - loadPtr(Address(rareDataReg, FunctionRareData::offsetOfAllocationProfile() + ObjectAllocationProfile::offsetOfStructure()), structureReg); + loadPtr(Address(calleeReg, JSFunction::offsetOfAllocationProfile() + ObjectAllocationProfile::offsetOfAllocator()), allocatorReg); + loadPtr(Address(calleeReg, JSFunction::offsetOfAllocationProfile() + ObjectAllocationProfile::offsetOfStructure()), structureReg); addSlowCase(branchTestPtr(Zero, allocatorReg)); - loadPtr(cachedFunction, cachedFunctionReg); - Jump hasSeenMultipleCallees = branchPtr(Equal, cachedFunctionReg, TrustedImmPtr(JSCell::seenMultipleCalleeObjects())); - addSlowCase(branchPtr(NotEqual, calleeReg, cachedFunctionReg)); - hasSeenMultipleCallees.link(this); - emitAllocateJSObject(allocatorReg, structureReg, resultReg, scratchReg); emitPutVirtualRegister(currentInstruction[1].u.operand); } void JIT::emitSlow_op_create_this(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) { - linkSlowCase(iter); // doesn't have rare data linkSlowCase(iter); // doesn't have an allocation profile linkSlowCase(iter); // allocation failed - linkSlowCase(iter); // cached function didn't match JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_create_this); slowPathCall.call(); } -void JIT::emit_op_check_tdz(Instruction* currentInstruction) -{ - emitGetVirtualRegister(currentInstruction[1].u.operand, regT0); - addSlowCase(branchTest64(Zero, regT0)); -} - -void JIT::emitSlow_op_check_tdz(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) -{ - linkSlowCase(iter); - JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_throw_tdz_error); - slowPathCall.call(); -} - void JIT::emit_op_profile_will_call(Instruction* currentInstruction) { Jump profilerDone = branchTestPtr(Zero, AbsoluteAddress(m_vm->enabledProfilerAddress())); @@ -764,7 +908,6 @@ void JIT::emitSlow_op_to_this(Instruction* currentInstruction, Vector<SlowCaseEn linkSlowCase(iter); linkSlowCase(iter); linkSlowCase(iter); - linkSlowCase(iter); JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_to_this); slowPathCall.call(); @@ -886,44 +1029,110 @@ void JIT::emitSlow_op_to_number(Instruction* currentInstruction, Vector<SlowCase slowPathCall.call(); } -void JIT::emitSlow_op_to_string(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) +void JIT::emit_op_get_arguments_length(Instruction* currentInstruction) { - linkSlowCase(iter); // Not JSCell. - linkSlowCase(iter); // Not JSString. + int dst = currentInstruction[1].u.operand; + int argumentsRegister = currentInstruction[2].u.operand; + addSlowCase(branchTest64(NonZero, addressFor(argumentsRegister))); + emitGetFromCallFrameHeader32(JSStack::ArgumentCount, regT0); + sub32(TrustedImm32(1), regT0); + emitFastArithReTagImmediate(regT0, regT0); + emitPutVirtualRegister(dst, regT0); +} - JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_to_string); - slowPathCall.call(); +void JIT::emitSlow_op_get_arguments_length(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) +{ + linkSlowCase(iter); + int dst = currentInstruction[1].u.operand; + int base = currentInstruction[2].u.operand; + callOperation(operationGetArgumentsLength, dst, base); +} + +void JIT::emit_op_get_argument_by_val(Instruction* currentInstruction) +{ + int dst = currentInstruction[1].u.operand; + int argumentsRegister = currentInstruction[2].u.operand; + int property = currentInstruction[3].u.operand; + addSlowCase(branchTest64(NonZero, addressFor(argumentsRegister))); + emitGetVirtualRegister(property, regT1); + addSlowCase(emitJumpIfNotImmediateInteger(regT1)); + add32(TrustedImm32(1), regT1); + // regT1 now contains the integer index of the argument we want, including this + emitGetFromCallFrameHeader32(JSStack::ArgumentCount, regT2); + addSlowCase(branch32(AboveOrEqual, regT1, regT2)); + + signExtend32ToPtr(regT1, regT1); + load64(BaseIndex(callFrameRegister, regT1, TimesEight, CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register))), regT0); + emitValueProfilingSite(); + emitPutVirtualRegister(dst, regT0); +} + +void JIT::emitSlow_op_get_argument_by_val(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) +{ + int dst = currentInstruction[1].u.operand; + int arguments = currentInstruction[2].u.operand; + int property = currentInstruction[3].u.operand; + + linkSlowCase(iter); + Jump skipArgumentsCreation = jump(); + + linkSlowCase(iter); + linkSlowCase(iter); + callOperation(operationCreateArguments); + emitStoreCell(arguments, returnValueGPR); + emitStoreCell(unmodifiedArgumentsRegister(VirtualRegister(arguments)), returnValueGPR); + + skipArgumentsCreation.link(this); + emitGetVirtualRegister(arguments, regT0); + emitGetVirtualRegister(property, regT1); + callOperation(WithProfile, operationGetByValGeneric, dst, regT0, regT1); } #endif // USE(JSVALUE64) +void JIT::emit_op_touch_entry(Instruction* currentInstruction) +{ + if (m_codeBlock->symbolTable()->m_functionEnteredOnce.hasBeenInvalidated()) + return; + + JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_touch_entry); + slowPathCall.call(); +} + void JIT::emit_op_loop_hint(Instruction*) { // Emit the JIT optimization check: if (canBeOptimized()) { - addSlowCase(branchAdd32(PositiveOrZero, TrustedImm32(Options::executionCounterIncrementForLoop()), - AbsoluteAddress(m_codeBlock->addressOfJITExecuteCounter()))); + if (Options::enableOSREntryInLoops()) { + addSlowCase(branchAdd32(PositiveOrZero, TrustedImm32(Options::executionCounterIncrementForLoop()), + AbsoluteAddress(m_codeBlock->addressOfJITExecuteCounter()))); + } else { + // Add with saturation. + move(TrustedImmPtr(m_codeBlock->addressOfJITExecuteCounter()), regT3); + load32(regT3, regT2); + Jump dontAdd = branch32( + GreaterThan, regT2, + TrustedImm32(std::numeric_limits<int32_t>::max() - Options::executionCounterIncrementForLoop())); + add32(TrustedImm32(Options::executionCounterIncrementForLoop()), regT2); + store32(regT2, regT3); + dontAdd.link(this); + } } // Emit the watchdog timer check: - if (m_vm->watchdog) - addSlowCase(branchTest8(NonZero, AbsoluteAddress(m_vm->watchdog->timerDidFireAddress()))); + if (m_vm->watchdog.isEnabled()) + addSlowCase(branchTest8(NonZero, AbsoluteAddress(m_vm->watchdog.timerDidFireAddress()))); } void JIT::emitSlow_op_loop_hint(Instruction*, Vector<SlowCaseEntry>::iterator& iter) { #if ENABLE(DFG_JIT) // Emit the slow path for the JIT optimization check: - if (canBeOptimized()) { + if (canBeOptimized() && Options::enableOSREntryInLoops()) { linkSlowCase(iter); callOperation(operationOptimize, m_bytecodeOffset); Jump noOptimizedEntry = branchTestPtr(Zero, returnValueGPR); - if (!ASSERT_DISABLED) { - Jump ok = branchPtr(MacroAssembler::Above, regT0, TrustedImmPtr(bitwise_cast<void*>(static_cast<intptr_t>(1000)))); - abortWithReason(JITUnreasonableLoopHintJumpTarget); - ok.link(this); - } jump(returnValueGPR); noOptimizedEntry.link(this); @@ -932,7 +1141,7 @@ void JIT::emitSlow_op_loop_hint(Instruction*, Vector<SlowCaseEntry>::iterator& i #endif // Emit the slow path of the watchdog timer check: - if (m_vm->watchdog) { + if (m_vm->watchdog.isEnabled()) { linkSlowCase(iter); callOperation(operationHandleWatchdogTimer); @@ -950,36 +1159,32 @@ void JIT::emit_op_new_func(Instruction* currentInstruction) { Jump lazyJump; int dst = currentInstruction[1].u.operand; - -#if USE(JSVALUE64) - emitGetVirtualRegister(currentInstruction[2].u.operand, regT0); + if (currentInstruction[3].u.operand) { +#if USE(JSVALUE32_64) + lazyJump = branch32(NotEqual, tagFor(dst), TrustedImm32(JSValue::EmptyValueTag)); #else - emitLoadPayload(currentInstruction[2].u.operand, regT0); + lazyJump = branchTest64(NonZero, addressFor(dst)); #endif - FunctionExecutable* funcExec = m_codeBlock->functionDecl(currentInstruction[3].u.operand); - callOperation(operationNewFunction, dst, regT0, funcExec); + } + + FunctionExecutable* funcExec = m_codeBlock->functionDecl(currentInstruction[2].u.operand); + callOperation(operationNewFunction, dst, funcExec); + + if (currentInstruction[3].u.operand) + lazyJump.link(this); +} + +void JIT::emit_op_new_captured_func(Instruction* currentInstruction) +{ + JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_new_captured_func); + slowPathCall.call(); } void JIT::emit_op_new_func_exp(Instruction* currentInstruction) { - Jump notUndefinedScope; int dst = currentInstruction[1].u.operand; -#if USE(JSVALUE64) - emitGetVirtualRegister(currentInstruction[2].u.operand, regT0); - notUndefinedScope = branch64(NotEqual, regT0, TrustedImm64(JSValue::encode(jsUndefined()))); - store64(TrustedImm64(JSValue::encode(jsUndefined())), Address(callFrameRegister, sizeof(Register) * dst)); -#else - emitLoadPayload(currentInstruction[2].u.operand, regT0); - notUndefinedScope = branch32(NotEqual, tagFor(currentInstruction[2].u.operand), TrustedImm32(JSValue::UndefinedTag)); - emitStore(dst, jsUndefined()); -#endif - - Jump done = jump(); - notUndefinedScope.link(this); - - FunctionExecutable* funcExpr = m_codeBlock->functionExpr(currentInstruction[3].u.operand); - callOperation(operationNewFunction, dst, regT0, funcExpr); - done.link(this); + FunctionExecutable* funcExpr = m_codeBlock->functionExpr(currentInstruction[2].u.operand); + callOperation(operationNewFunction, dst, funcExpr); } void JIT::emit_op_new_array(Instruction* currentInstruction) @@ -1016,335 +1221,13 @@ void JIT::emit_op_new_array_buffer(Instruction* currentInstruction) callOperation(operationNewArrayBufferWithProfile, dst, currentInstruction[4].u.arrayAllocationProfile, values, size); } -#if USE(JSVALUE64) -void JIT::emit_op_has_structure_property(Instruction* currentInstruction) -{ - int dst = currentInstruction[1].u.operand; - int base = currentInstruction[2].u.operand; - int enumerator = currentInstruction[4].u.operand; - - emitGetVirtualRegister(base, regT0); - emitGetVirtualRegister(enumerator, regT1); - emitJumpSlowCaseIfNotJSCell(regT0, base); - - load32(Address(regT0, JSCell::structureIDOffset()), regT0); - addSlowCase(branch32(NotEqual, regT0, Address(regT1, JSPropertyNameEnumerator::cachedStructureIDOffset()))); - - move(TrustedImm64(JSValue::encode(jsBoolean(true))), regT0); - emitPutVirtualRegister(dst); -} - -void JIT::privateCompileHasIndexedProperty(ByValInfo* byValInfo, ReturnAddressPtr returnAddress, JITArrayMode arrayMode) -{ - Instruction* currentInstruction = m_codeBlock->instructions().begin() + byValInfo->bytecodeIndex; - - PatchableJump badType; - - // FIXME: Add support for other types like TypedArrays and Arguments. - // See https://bugs.webkit.org/show_bug.cgi?id=135033 and https://bugs.webkit.org/show_bug.cgi?id=135034. - JumpList slowCases = emitLoadForArrayMode(currentInstruction, arrayMode, badType); - move(TrustedImm64(JSValue::encode(jsBoolean(true))), regT0); - Jump done = jump(); - - LinkBuffer patchBuffer(*m_vm, *this, m_codeBlock); - - patchBuffer.link(badType, CodeLocationLabel(MacroAssemblerCodePtr::createFromExecutableAddress(returnAddress.value())).labelAtOffset(byValInfo->returnAddressToSlowPath)); - patchBuffer.link(slowCases, CodeLocationLabel(MacroAssemblerCodePtr::createFromExecutableAddress(returnAddress.value())).labelAtOffset(byValInfo->returnAddressToSlowPath)); - - patchBuffer.link(done, byValInfo->badTypeJump.labelAtOffset(byValInfo->badTypeJumpToDone)); - - byValInfo->stubRoutine = FINALIZE_CODE_FOR_STUB( - m_codeBlock, patchBuffer, - ("Baseline has_indexed_property stub for %s, return point %p", toCString(*m_codeBlock).data(), returnAddress.value())); - - RepatchBuffer repatchBuffer(m_codeBlock); - repatchBuffer.relink(byValInfo->badTypeJump, CodeLocationLabel(byValInfo->stubRoutine->code().code())); - repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(operationHasIndexedPropertyGeneric)); -} - -void JIT::emit_op_has_indexed_property(Instruction* currentInstruction) -{ - int dst = currentInstruction[1].u.operand; - int base = currentInstruction[2].u.operand; - int property = currentInstruction[3].u.operand; - ArrayProfile* profile = currentInstruction[4].u.arrayProfile; - ByValInfo* byValInfo = m_codeBlock->addByValInfo(); - - emitGetVirtualRegisters(base, regT0, property, regT1); - - // This is technically incorrect - we're zero-extending an int32. On the hot path this doesn't matter. - // We check the value as if it was a uint32 against the m_vectorLength - which will always fail if - // number was signed since m_vectorLength is always less than intmax (since the total allocation - // size is always less than 4Gb). As such zero extending will have been correct (and extending the value - // to 64-bits is necessary since it's used in the address calculation. We zero extend rather than sign - // extending since it makes it easier to re-tag the value in the slow case. - zeroExtend32ToPtr(regT1, regT1); - - emitJumpSlowCaseIfNotJSCell(regT0, base); - emitArrayProfilingSiteWithCell(regT0, regT2, profile); - and32(TrustedImm32(IndexingShapeMask), regT2); - - JITArrayMode mode = chooseArrayMode(profile); - PatchableJump badType; - - // FIXME: Add support for other types like TypedArrays and Arguments. - // See https://bugs.webkit.org/show_bug.cgi?id=135033 and https://bugs.webkit.org/show_bug.cgi?id=135034. - JumpList slowCases = emitLoadForArrayMode(currentInstruction, mode, badType); - - move(TrustedImm64(JSValue::encode(jsBoolean(true))), regT0); - - addSlowCase(badType); - addSlowCase(slowCases); - - Label done = label(); - - emitPutVirtualRegister(dst); - - Label nextHotPath = label(); - - m_byValCompilationInfo.append(ByValCompilationInfo(byValInfo, m_bytecodeOffset, PatchableJump(), badType, mode, profile, done, nextHotPath)); -} - -void JIT::emitSlow_op_has_indexed_property(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) -{ - int dst = currentInstruction[1].u.operand; - int base = currentInstruction[2].u.operand; - int property = currentInstruction[3].u.operand; - ByValInfo* byValInfo = m_byValCompilationInfo[m_byValInstructionIndex].byValInfo; - - linkSlowCaseIfNotJSCell(iter, base); // base cell check - linkSlowCase(iter); // base array check - linkSlowCase(iter); // vector length check - linkSlowCase(iter); // empty value - - Label slowPath = label(); - - emitGetVirtualRegister(base, regT0); - emitGetVirtualRegister(property, regT1); - Call call = callOperation(operationHasIndexedPropertyDefault, dst, regT0, regT1, byValInfo); - - m_byValCompilationInfo[m_byValInstructionIndex].slowPathTarget = slowPath; - m_byValCompilationInfo[m_byValInstructionIndex].returnAddress = call; - m_byValInstructionIndex++; -} - -void JIT::emit_op_get_direct_pname(Instruction* currentInstruction) -{ - int dst = currentInstruction[1].u.operand; - int base = currentInstruction[2].u.operand; - int index = currentInstruction[4].u.operand; - int enumerator = currentInstruction[5].u.operand; - - // Check that base is a cell - emitGetVirtualRegister(base, regT0); - emitJumpSlowCaseIfNotJSCell(regT0, base); - - // Check the structure - emitGetVirtualRegister(enumerator, regT2); - load32(Address(regT0, JSCell::structureIDOffset()), regT1); - addSlowCase(branch32(NotEqual, regT1, Address(regT2, JSPropertyNameEnumerator::cachedStructureIDOffset()))); - - // Compute the offset - emitGetVirtualRegister(index, regT1); - // If index is less than the enumerator's cached inline storage, then it's an inline access - Jump outOfLineAccess = branch32(AboveOrEqual, regT1, Address(regT2, JSPropertyNameEnumerator::cachedInlineCapacityOffset())); - addPtr(TrustedImm32(JSObject::offsetOfInlineStorage()), regT0); - signExtend32ToPtr(regT1, regT1); - load64(BaseIndex(regT0, regT1, TimesEight), regT0); - - Jump done = jump(); - - // Otherwise it's out of line - outOfLineAccess.link(this); - loadPtr(Address(regT0, JSObject::butterflyOffset()), regT0); - sub32(Address(regT2, JSPropertyNameEnumerator::cachedInlineCapacityOffset()), regT1); - neg32(regT1); - signExtend32ToPtr(regT1, regT1); - int32_t offsetOfFirstProperty = static_cast<int32_t>(offsetInButterfly(firstOutOfLineOffset)) * sizeof(EncodedJSValue); - load64(BaseIndex(regT0, regT1, TimesEight, offsetOfFirstProperty), regT0); - - done.link(this); - emitValueProfilingSite(); - emitPutVirtualRegister(dst, regT0); -} - -void JIT::emitSlow_op_get_direct_pname(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) +void JIT::emitSlow_op_captured_mov(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) { - int base = currentInstruction[2].u.operand; - linkSlowCaseIfNotJSCell(iter, base); + VariableWatchpointSet* set = currentInstruction[3].u.watchpointSet; + if (!set || set->state() == IsInvalidated) + return; linkSlowCase(iter); - - JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_get_direct_pname); - slowPathCall.call(); -} - -void JIT::emit_op_enumerator_structure_pname(Instruction* currentInstruction) -{ - int dst = currentInstruction[1].u.operand; - int enumerator = currentInstruction[2].u.operand; - int index = currentInstruction[3].u.operand; - - emitGetVirtualRegister(index, regT0); - emitGetVirtualRegister(enumerator, regT1); - Jump inBounds = branch32(Below, regT0, Address(regT1, JSPropertyNameEnumerator::endStructurePropertyIndexOffset())); - - move(TrustedImm64(JSValue::encode(jsNull())), regT0); - - Jump done = jump(); - inBounds.link(this); - - loadPtr(Address(regT1, JSPropertyNameEnumerator::cachedPropertyNamesVectorOffset()), regT1); - signExtend32ToPtr(regT0, regT0); - load64(BaseIndex(regT1, regT0, TimesEight), regT0); - - done.link(this); - emitPutVirtualRegister(dst); -} - -void JIT::emit_op_enumerator_generic_pname(Instruction* currentInstruction) -{ - int dst = currentInstruction[1].u.operand; - int enumerator = currentInstruction[2].u.operand; - int index = currentInstruction[3].u.operand; - - emitGetVirtualRegister(index, regT0); - emitGetVirtualRegister(enumerator, regT1); - Jump inBounds = branch32(Below, regT0, Address(regT1, JSPropertyNameEnumerator::endGenericPropertyIndexOffset())); - - move(TrustedImm64(JSValue::encode(jsNull())), regT0); - - Jump done = jump(); - inBounds.link(this); - - loadPtr(Address(regT1, JSPropertyNameEnumerator::cachedPropertyNamesVectorOffset()), regT1); - signExtend32ToPtr(regT0, regT0); - load64(BaseIndex(regT1, regT0, TimesEight), regT0); - - done.link(this); - emitPutVirtualRegister(dst); -} - -void JIT::emit_op_profile_type(Instruction* currentInstruction) -{ - TypeLocation* cachedTypeLocation = currentInstruction[2].u.location; - int valueToProfile = currentInstruction[1].u.operand; - - emitGetVirtualRegister(valueToProfile, regT0); - - JumpList jumpToEnd; - - jumpToEnd.append(branchTest64(Zero, regT0)); - - // Compile in a predictive type check, if possible, to see if we can skip writing to the log. - // These typechecks are inlined to match those of the 64-bit JSValue type checks. - if (cachedTypeLocation->m_lastSeenType == TypeUndefined) - jumpToEnd.append(branch64(Equal, regT0, TrustedImm64(JSValue::encode(jsUndefined())))); - else if (cachedTypeLocation->m_lastSeenType == TypeNull) - jumpToEnd.append(branch64(Equal, regT0, TrustedImm64(JSValue::encode(jsNull())))); - else if (cachedTypeLocation->m_lastSeenType == TypeBoolean) { - move(regT0, regT1); - and64(TrustedImm32(~1), regT1); - jumpToEnd.append(branch64(Equal, regT1, TrustedImm64(ValueFalse))); - } else if (cachedTypeLocation->m_lastSeenType == TypeMachineInt) - jumpToEnd.append(emitJumpIfImmediateInteger(regT0)); - else if (cachedTypeLocation->m_lastSeenType == TypeNumber) - jumpToEnd.append(emitJumpIfImmediateNumber(regT0)); - else if (cachedTypeLocation->m_lastSeenType == TypeString) { - Jump isNotCell = emitJumpIfNotJSCell(regT0); - jumpToEnd.append(branch8(Equal, Address(regT0, JSCell::typeInfoTypeOffset()), TrustedImm32(StringType))); - isNotCell.link(this); - } - - // Load the type profiling log into T2. - TypeProfilerLog* cachedTypeProfilerLog = m_vm->typeProfilerLog(); - move(TrustedImmPtr(cachedTypeProfilerLog), regT2); - // Load the next log entry into T1. - loadPtr(Address(regT2, TypeProfilerLog::currentLogEntryOffset()), regT1); - - // Store the JSValue onto the log entry. - store64(regT0, Address(regT1, TypeProfilerLog::LogEntry::valueOffset())); - - // Store the structureID of the cell if T0 is a cell, otherwise, store 0 on the log entry. - Jump notCell = emitJumpIfNotJSCell(regT0); - load32(Address(regT0, JSCell::structureIDOffset()), regT0); - store32(regT0, Address(regT1, TypeProfilerLog::LogEntry::structureIDOffset())); - Jump skipIsCell = jump(); - notCell.link(this); - store32(TrustedImm32(0), Address(regT1, TypeProfilerLog::LogEntry::structureIDOffset())); - skipIsCell.link(this); - - // Store the typeLocation on the log entry. - move(TrustedImmPtr(cachedTypeLocation), regT0); - store64(regT0, Address(regT1, TypeProfilerLog::LogEntry::locationOffset())); - - // Increment the current log entry. - addPtr(TrustedImm32(sizeof(TypeProfilerLog::LogEntry)), regT1); - store64(regT1, Address(regT2, TypeProfilerLog::currentLogEntryOffset())); - Jump skipClearLog = branchPtr(NotEqual, regT1, TrustedImmPtr(cachedTypeProfilerLog->logEndPtr())); - // Clear the log if we're at the end of the log. - callOperation(operationProcessTypeProfilerLog); - skipClearLog.link(this); - - jumpToEnd.link(this); -} - -#endif // USE(JSVALUE64) - -void JIT::emit_op_get_enumerable_length(Instruction* currentInstruction) -{ - JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_get_enumerable_length); - slowPathCall.call(); -} - -void JIT::emitSlow_op_has_structure_property(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) -{ - linkSlowCase(iter); - linkSlowCase(iter); - - JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_has_structure_property); - slowPathCall.call(); -} - -void JIT::emit_op_has_generic_property(Instruction* currentInstruction) -{ - JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_has_generic_property); - slowPathCall.call(); -} - -void JIT::emit_op_get_property_enumerator(Instruction* currentInstruction) -{ - JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_get_property_enumerator); - slowPathCall.call(); -} - -void JIT::emit_op_to_index_string(Instruction* currentInstruction) -{ - JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_to_index_string); - slowPathCall.call(); -} - -void JIT::emit_op_profile_control_flow(Instruction* currentInstruction) -{ - BasicBlockLocation* basicBlockLocation = currentInstruction[1].u.basicBlockLocation; - if (!basicBlockLocation->hasExecuted()) - basicBlockLocation->emitExecuteCode(*this, regT1); -} - -void JIT::emit_op_create_direct_arguments(Instruction* currentInstruction) -{ - JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_create_direct_arguments); - slowPathCall.call(); -} - -void JIT::emit_op_create_scoped_arguments(Instruction* currentInstruction) -{ - JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_create_scoped_arguments); - slowPathCall.call(); -} - -void JIT::emit_op_create_out_of_band_arguments(Instruction* currentInstruction) -{ - JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_create_out_of_band_arguments); + JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_captured_mov); slowPathCall.call(); } |