summaryrefslogtreecommitdiff
path: root/Source/JavaScriptCore/jit/JITCall.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'Source/JavaScriptCore/jit/JITCall.cpp')
-rw-r--r--Source/JavaScriptCore/jit/JITCall.cpp277
1 files changed, 129 insertions, 148 deletions
diff --git a/Source/JavaScriptCore/jit/JITCall.cpp b/Source/JavaScriptCore/jit/JITCall.cpp
index d406d5b6f..90c2e4fb9 100644
--- a/Source/JavaScriptCore/jit/JITCall.cpp
+++ b/Source/JavaScriptCore/jit/JITCall.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2008, 2013-2015 Apple Inc. All rights reserved.
+ * Copyright (C) 2008, 2013 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -29,18 +29,16 @@
#if USE(JSVALUE64)
#include "JIT.h"
-#include "CallFrameShuffler.h"
+#include "Arguments.h"
#include "CodeBlock.h"
#include "JITInlines.h"
#include "JSArray.h"
#include "JSFunction.h"
#include "Interpreter.h"
-#include "JSCInlines.h"
-#include "LinkBuffer.h"
+#include "Operations.h"
+#include "RepatchBuffer.h"
#include "ResultType.h"
#include "SamplingTool.h"
-#include "SetupVarargsFrame.h"
-#include "StackAlignment.h"
#include "ThunkGenerators.h"
#include <wtf/StringPrintStream.h>
@@ -54,50 +52,74 @@ void JIT::emitPutCallResult(Instruction* instruction)
emitPutVirtualRegister(dst);
}
-void JIT::compileSetupVarargsFrame(Instruction* instruction, CallLinkInfo* info)
+void JIT::compileLoadVarargs(Instruction* instruction)
{
int thisValue = instruction[3].u.operand;
int arguments = instruction[4].u.operand;
int firstFreeRegister = instruction[5].u.operand;
- int firstVarArgOffset = instruction[6].u.operand;
+
+ JumpList slowCase;
+ JumpList end;
+ bool canOptimize = m_codeBlock->usesArguments()
+ && arguments == m_codeBlock->argumentsRegister().offset()
+ && !m_codeBlock->symbolTable()->slowArguments();
+
+ if (canOptimize) {
+ emitGetVirtualRegister(arguments, regT0);
+ slowCase.append(branch64(NotEqual, regT0, TrustedImm64(JSValue::encode(JSValue()))));
+
+ emitGetFromCallFrameHeader32(JSStack::ArgumentCount, regT0);
+ slowCase.append(branch32(Above, regT0, TrustedImm32(Arguments::MaxArguments + 1)));
+ // regT0: argumentCountIncludingThis
+
+ move(regT0, regT1);
+ neg64(regT1);
+ add64(TrustedImm32(firstFreeRegister - JSStack::CallFrameHeaderSize), regT1);
+ lshift64(TrustedImm32(3), regT1);
+ addPtr(callFrameRegister, regT1);
+ // regT1: newCallFrame
+
+ slowCase.append(branchPtr(Above, AbsoluteAddress(m_vm->addressOfJSStackLimit()), regT1));
+
+ // Initialize ArgumentCount.
+ store32(regT0, Address(regT1, JSStack::ArgumentCount * static_cast<int>(sizeof(Register)) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)));
+
+ // Initialize 'this'.
+ emitGetVirtualRegister(thisValue, regT2);
+ store64(regT2, Address(regT1, CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register))));
+
+ // Copy arguments.
+ signExtend32ToPtr(regT0, regT0);
+ end.append(branchSub64(Zero, TrustedImm32(1), regT0));
+ // regT0: argumentCount
+
+ Label copyLoop = label();
+ load64(BaseIndex(callFrameRegister, regT0, TimesEight, CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register))), regT2);
+ store64(regT2, BaseIndex(regT1, regT0, TimesEight, CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register))));
+ branchSub64(NonZero, TrustedImm32(1), regT0).linkTo(copyLoop, this);
+
+ end.append(jump());
+ }
+
+ if (canOptimize)
+ slowCase.link(this);
emitGetVirtualRegister(arguments, regT1);
- callOperation(operationSizeFrameForVarargs, regT1, -firstFreeRegister, firstVarArgOffset);
- move(TrustedImm32(-firstFreeRegister), regT1);
- emitSetVarargsFrame(*this, returnValueGPR, false, regT1, regT1);
- addPtr(TrustedImm32(-(sizeof(CallerFrameAndPC) + WTF::roundUpToMultipleOf(stackAlignmentBytes(), 5 * sizeof(void*)))), regT1, stackPointerRegister);
+ callOperation(operationSizeAndAllocFrameForVarargs, regT1, firstFreeRegister);
+ emitGetVirtualRegister(thisValue, regT1);
emitGetVirtualRegister(arguments, regT2);
- callOperation(operationSetupVarargsFrame, regT1, regT2, firstVarArgOffset, regT0);
+ callOperation(operationLoadVarargs, returnValueGPR, regT1, regT2);
move(returnValueGPR, regT1);
- // Profile the argument count.
- load32(Address(regT1, JSStack::ArgumentCount * static_cast<int>(sizeof(Register)) + PayloadOffset), regT2);
- load8(info->addressOfMaxNumArguments(), regT0);
- Jump notBiggest = branch32(Above, regT0, regT2);
- Jump notSaturated = branch32(BelowOrEqual, regT2, TrustedImm32(255));
- move(TrustedImm32(255), regT2);
- notSaturated.link(this);
- store8(regT2, info->addressOfMaxNumArguments());
- notBiggest.link(this);
-
- // Initialize 'this'.
- emitGetVirtualRegister(thisValue, regT0);
- store64(regT0, Address(regT1, CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register))));
-
- addPtr(TrustedImm32(sizeof(CallerFrameAndPC)), regT1, stackPointerRegister);
+ if (canOptimize)
+ end.link(this);
}
void JIT::compileCallEval(Instruction* instruction)
{
- addPtr(TrustedImm32(-static_cast<ptrdiff_t>(sizeof(CallerFrameAndPC))), stackPointerRegister, regT1);
- storePtr(callFrameRegister, Address(regT1, CallFrame::callerFrameOffset()));
-
- addPtr(TrustedImm32(stackPointerOffsetFor(m_codeBlock) * sizeof(Register)), callFrameRegister, stackPointerRegister);
- checkStackPointerAlignment();
-
- callOperation(operationCallEval, regT1);
-
+ callOperationWithCallFrameRollbackOnException(operationCallEval);
addSlowCase(branch64(Equal, regT0, TrustedImm64(JSValue::encode(JSValue()))));
+ emitGetCallerFrameFromCallFrameHeaderPtr(callFrameRegister);
sampleCodeBlock(m_codeBlock);
@@ -106,21 +128,10 @@ void JIT::compileCallEval(Instruction* instruction)
void JIT::compileCallEvalSlowCase(Instruction* instruction, Vector<SlowCaseEntry>::iterator& iter)
{
- CallLinkInfo* info = m_codeBlock->addCallLinkInfo();
- info->setUpCall(CallLinkInfo::Call, CodeOrigin(m_bytecodeOffset), regT0);
-
linkSlowCase(iter);
- int registerOffset = -instruction[4].u.operand;
- addPtr(TrustedImm32(registerOffset * sizeof(Register) + sizeof(CallerFrameAndPC)), callFrameRegister, stackPointerRegister);
-
- load64(Address(stackPointerRegister, sizeof(Register) * JSStack::Callee - sizeof(CallerFrameAndPC)), regT0);
- move(TrustedImmPtr(info), regT2);
- MacroAssemblerCodeRef virtualThunk = virtualThunkFor(m_vm, *info);
- info->setSlowStub(createJITStubRoutine(virtualThunk, *m_vm, nullptr, true));
- emitNakedCall(virtualThunk.code());
- addPtr(TrustedImm32(stackPointerOffsetFor(m_codeBlock) * sizeof(Register)), callFrameRegister, stackPointerRegister);
- checkStackPointerAlignment();
+ emitGetFromCallFrameHeader64(JSStack::Callee, regT0);
+ emitNakedCall(m_vm->getCTIStub(virtualCallThunkGenerator).code());
sampleCodeBlock(m_codeBlock);
@@ -136,23 +147,17 @@ void JIT::compileOpCall(OpcodeID opcodeID, Instruction* instruction, unsigned ca
- Initializes ArgumentCount; CallerFrame; Callee.
For a JS call:
+ - Caller initializes ScopeChain.
- Callee initializes ReturnPC; CodeBlock.
- Callee restores callFrameRegister before return.
For a non-JS call:
- - Caller initializes ReturnPC; CodeBlock.
+ - Caller initializes ScopeChain; ReturnPC; CodeBlock.
- Caller restores callFrameRegister after return.
*/
- COMPILE_ASSERT(OPCODE_LENGTH(op_call) == OPCODE_LENGTH(op_construct), call_and_construct_opcodes_must_be_same_length);
- COMPILE_ASSERT(OPCODE_LENGTH(op_call) == OPCODE_LENGTH(op_call_varargs), call_and_call_varargs_opcodes_must_be_same_length);
- COMPILE_ASSERT(OPCODE_LENGTH(op_call) == OPCODE_LENGTH(op_construct_varargs), call_and_construct_varargs_opcodes_must_be_same_length);
- COMPILE_ASSERT(OPCODE_LENGTH(op_call) == OPCODE_LENGTH(op_tail_call), call_and_tail_call_opcodes_must_be_same_length);
- COMPILE_ASSERT(OPCODE_LENGTH(op_call) == OPCODE_LENGTH(op_tail_call_varargs), call_and_tail_call_varargs_opcodes_must_be_same_length);
- CallLinkInfo* info;
- if (opcodeID != op_call_eval)
- info = m_codeBlock->addCallLinkInfo();
- if (opcodeID == op_call_varargs || opcodeID == op_construct_varargs || opcodeID == op_tail_call_varargs)
- compileSetupVarargsFrame(instruction, info);
+
+ if (opcodeID == op_call_varargs)
+ compileLoadVarargs(instruction);
else {
int argCount = instruction[3].u.operand;
int registerOffset = -instruction[4].u.operand;
@@ -160,21 +165,23 @@ void JIT::compileOpCall(OpcodeID opcodeID, Instruction* instruction, unsigned ca
if (opcodeID == op_call && shouldEmitProfiling()) {
emitGetVirtualRegister(registerOffset + CallFrame::argumentOffsetIncludingThis(0), regT0);
Jump done = emitJumpIfNotJSCell(regT0);
- load32(Address(regT0, JSCell::structureIDOffset()), regT0);
- store32(regT0, instruction[OPCODE_LENGTH(op_call) - 2].u.arrayProfile->addressOfLastSeenStructureID());
+ loadPtr(Address(regT0, JSCell::structureOffset()), regT0);
+ storePtr(regT0, instruction[6].u.arrayProfile->addressOfLastSeenStructure());
done.link(this);
}
- addPtr(TrustedImm32(registerOffset * sizeof(Register) + sizeof(CallerFrameAndPC)), callFrameRegister, stackPointerRegister);
- store32(TrustedImm32(argCount), Address(stackPointerRegister, JSStack::ArgumentCount * static_cast<int>(sizeof(Register)) + PayloadOffset - sizeof(CallerFrameAndPC)));
- } // SP holds newCallFrame + sizeof(CallerFrameAndPC), with ArgumentCount initialized.
+ addPtr(TrustedImm32(registerOffset * sizeof(Register)), callFrameRegister, regT1);
+ store32(TrustedImm32(argCount), Address(regT1, JSStack::ArgumentCount * static_cast<int>(sizeof(Register)) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)));
+ } // regT1 holds newCallFrame with ArgumentCount initialized.
uint32_t bytecodeOffset = instruction - m_codeBlock->instructions().begin();
- uint32_t locationBits = CallSiteIndex(bytecodeOffset).bits();
- store32(TrustedImm32(locationBits), Address(callFrameRegister, JSStack::ArgumentCount * static_cast<int>(sizeof(Register)) + TagOffset));
+ uint32_t locationBits = CallFrame::Location::encodeAsBytecodeOffset(bytecodeOffset);
+ store32(TrustedImm32(locationBits), Address(callFrameRegister, JSStack::ArgumentCount * static_cast<int>(sizeof(Register)) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)));
emitGetVirtualRegister(callee, regT0); // regT0 holds callee.
- store64(regT0, Address(stackPointerRegister, JSStack::Callee * static_cast<int>(sizeof(Register)) - sizeof(CallerFrameAndPC)));
+ store64(callFrameRegister, Address(regT1, CallFrame::callerFrameOffset()));
+ store64(regT0, Address(regT1, JSStack::Callee * static_cast<int>(sizeof(Register))));
+ move(regT1, callFrameRegister);
if (opcodeID == op_call_eval) {
compileCallEval(instruction);
@@ -185,44 +192,15 @@ void JIT::compileOpCall(OpcodeID opcodeID, Instruction* instruction, unsigned ca
Jump slowCase = branchPtrWithPatch(NotEqual, regT0, addressOfLinkedFunctionCheck, TrustedImmPtr(0));
addSlowCase(slowCase);
- ASSERT(m_callCompilationInfo.size() == callLinkInfoIndex);
- info->setUpCall(CallLinkInfo::callTypeFor(opcodeID), CodeOrigin(m_bytecodeOffset), regT0);
- m_callCompilationInfo.append(CallCompilationInfo());
- m_callCompilationInfo[callLinkInfoIndex].hotPathBegin = addressOfLinkedFunctionCheck;
- m_callCompilationInfo[callLinkInfoIndex].callLinkInfo = info;
-
- if (opcodeID == op_tail_call) {
- CallFrameShuffleData shuffleData;
- shuffleData.tagTypeNumber = GPRInfo::tagTypeNumberRegister;
- shuffleData.numLocals =
- instruction[4].u.operand - sizeof(CallerFrameAndPC) / sizeof(Register);
- shuffleData.args.resize(instruction[3].u.operand);
- for (int i = 0; i < instruction[3].u.operand; ++i) {
- shuffleData.args[i] =
- ValueRecovery::displacedInJSStack(
- virtualRegisterForArgument(i) - instruction[4].u.operand,
- DataFormatJS);
- }
- shuffleData.callee =
- ValueRecovery::inGPR(regT0, DataFormatJS);
- shuffleData.setupCalleeSaveRegisters(m_codeBlock);
- info->setFrameShuffleData(shuffleData);
- CallFrameShuffler(*this, shuffleData).prepareForTailCall();
- m_callCompilationInfo[callLinkInfoIndex].hotPathOther = emitNakedTailCall();
- return;
- }
+ ASSERT(m_callStructureStubCompilationInfo.size() == callLinkInfoIndex);
+ m_callStructureStubCompilationInfo.append(StructureStubCompilationInfo());
+ m_callStructureStubCompilationInfo[callLinkInfoIndex].hotPathBegin = addressOfLinkedFunctionCheck;
+ m_callStructureStubCompilationInfo[callLinkInfoIndex].callType = CallLinkInfo::callTypeFor(opcodeID);
+ m_callStructureStubCompilationInfo[callLinkInfoIndex].bytecodeIndex = m_bytecodeOffset;
- if (opcodeID == op_tail_call_varargs) {
- emitRestoreCalleeSaves();
- prepareForTailCallSlow();
- m_callCompilationInfo[callLinkInfoIndex].hotPathOther = emitNakedTailCall();
- return;
- }
-
- m_callCompilationInfo[callLinkInfoIndex].hotPathOther = emitNakedCall();
-
- addPtr(TrustedImm32(stackPointerOffsetFor(m_codeBlock) * sizeof(Register)), callFrameRegister, stackPointerRegister);
- checkStackPointerAlignment();
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_scope)), regT1);
+ emitPutToCallFrameHeader(regT1, JSStack::ScopeChain);
+ m_callStructureStubCompilationInfo[callLinkInfoIndex].hotPathOther = emitNakedCall();
sampleCodeBlock(m_codeBlock);
@@ -238,34 +216,62 @@ void JIT::compileOpCallSlowCase(OpcodeID opcodeID, Instruction* instruction, Vec
linkSlowCase(iter);
- if (opcodeID == op_tail_call || opcodeID == op_tail_call_varargs)
- emitRestoreCalleeSaves();
-
- move(TrustedImmPtr(m_callCompilationInfo[callLinkInfoIndex].callLinkInfo), regT2);
-
- m_callCompilationInfo[callLinkInfoIndex].callReturnLocation = emitNakedCall(m_vm->getCTIStub(linkCallThunkGenerator).code());
-
- if (opcodeID == op_tail_call || opcodeID == op_tail_call_varargs) {
- abortWithReason(JITDidReturnFromTailCall);
- return;
- }
-
- addPtr(TrustedImm32(stackPointerOffsetFor(m_codeBlock) * sizeof(Register)), callFrameRegister, stackPointerRegister);
- checkStackPointerAlignment();
+ m_callStructureStubCompilationInfo[callLinkInfoIndex].callReturnLocation = emitNakedCall(opcodeID == op_construct ? m_vm->getCTIStub(linkConstructThunkGenerator).code() : m_vm->getCTIStub(linkCallThunkGenerator).code());
sampleCodeBlock(m_codeBlock);
emitPutCallResult(instruction);
}
-void JIT::emit_op_call(Instruction* currentInstruction)
+void JIT::privateCompileClosureCall(CallLinkInfo* callLinkInfo, CodeBlock* calleeCodeBlock, Structure* expectedStructure, ExecutableBase* expectedExecutable, MacroAssemblerCodePtr codePtr)
{
- compileOpCall(op_call, currentInstruction, m_callLinkInfoIndex++);
+ JumpList slowCases;
+
+ slowCases.append(branchTestPtr(NonZero, regT0, tagMaskRegister));
+ slowCases.append(branchPtr(NotEqual, Address(regT0, JSCell::structureOffset()), TrustedImmPtr(expectedStructure)));
+ slowCases.append(branchPtr(NotEqual, Address(regT0, JSFunction::offsetOfExecutable()), TrustedImmPtr(expectedExecutable)));
+
+ loadPtr(Address(regT0, JSFunction::offsetOfScopeChain()), regT1);
+ emitPutToCallFrameHeader(regT1, JSStack::ScopeChain);
+
+ Call call = nearCall();
+ Jump done = jump();
+
+ slowCases.link(this);
+ move(TrustedImmPtr(callLinkInfo->callReturnLocation.executableAddress()), regT2);
+ restoreReturnAddressBeforeReturn(regT2);
+ Jump slow = jump();
+
+ LinkBuffer patchBuffer(*m_vm, this, m_codeBlock);
+
+ patchBuffer.link(call, FunctionPtr(codePtr.executableAddress()));
+ patchBuffer.link(done, callLinkInfo->hotPathOther.labelAtOffset(0));
+ patchBuffer.link(slow, CodeLocationLabel(m_vm->getCTIStub(virtualCallThunkGenerator).code()));
+
+ RefPtr<ClosureCallStubRoutine> stubRoutine = adoptRef(new ClosureCallStubRoutine(
+ FINALIZE_CODE(
+ patchBuffer,
+ ("Baseline closure call stub for %s, return point %p, target %p (%s)",
+ toCString(*m_codeBlock).data(),
+ callLinkInfo->hotPathOther.labelAtOffset(0).executableAddress(),
+ codePtr.executableAddress(),
+ toCString(pointerDump(calleeCodeBlock)).data())),
+ *m_vm, m_codeBlock->ownerExecutable(), expectedStructure, expectedExecutable,
+ callLinkInfo->codeOrigin));
+
+ RepatchBuffer repatchBuffer(m_codeBlock);
+
+ repatchBuffer.replaceWithJump(
+ RepatchBuffer::startOfBranchPtrWithPatchOnRegister(callLinkInfo->hotPathBegin),
+ CodeLocationLabel(stubRoutine->code().code()));
+ repatchBuffer.relink(callLinkInfo->callReturnLocation, m_vm->getCTIStub(virtualCallThunkGenerator).code());
+
+ callLinkInfo->stub = stubRoutine.release();
}
-void JIT::emit_op_tail_call(Instruction* currentInstruction)
+void JIT::emit_op_call(Instruction* currentInstruction)
{
- compileOpCall(op_tail_call, currentInstruction, m_callLinkInfoIndex++);
+ compileOpCall(op_call, currentInstruction, m_callLinkInfoIndex++);
}
void JIT::emit_op_call_eval(Instruction* currentInstruction)
@@ -278,16 +284,6 @@ void JIT::emit_op_call_varargs(Instruction* currentInstruction)
compileOpCall(op_call_varargs, currentInstruction, m_callLinkInfoIndex++);
}
-void JIT::emit_op_tail_call_varargs(Instruction* currentInstruction)
-{
- compileOpCall(op_tail_call_varargs, currentInstruction, m_callLinkInfoIndex++);
-}
-
-void JIT::emit_op_construct_varargs(Instruction* currentInstruction)
-{
- compileOpCall(op_construct_varargs, currentInstruction, m_callLinkInfoIndex++);
-}
-
void JIT::emit_op_construct(Instruction* currentInstruction)
{
compileOpCall(op_construct, currentInstruction, m_callLinkInfoIndex++);
@@ -298,11 +294,6 @@ void JIT::emitSlow_op_call(Instruction* currentInstruction, Vector<SlowCaseEntry
compileOpCallSlowCase(op_call, currentInstruction, iter, m_callLinkInfoIndex++);
}
-void JIT::emitSlow_op_tail_call(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- compileOpCallSlowCase(op_tail_call, currentInstruction, iter, m_callLinkInfoIndex++);
-}
-
void JIT::emitSlow_op_call_eval(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
compileOpCallSlowCase(op_call_eval, currentInstruction, iter, m_callLinkInfoIndex);
@@ -313,16 +304,6 @@ void JIT::emitSlow_op_call_varargs(Instruction* currentInstruction, Vector<SlowC
compileOpCallSlowCase(op_call_varargs, currentInstruction, iter, m_callLinkInfoIndex++);
}
-void JIT::emitSlow_op_tail_call_varargs(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- compileOpCallSlowCase(op_tail_call_varargs, currentInstruction, iter, m_callLinkInfoIndex++);
-}
-
-void JIT::emitSlow_op_construct_varargs(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- compileOpCallSlowCase(op_construct_varargs, currentInstruction, iter, m_callLinkInfoIndex++);
-}
-
void JIT::emitSlow_op_construct(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
compileOpCallSlowCase(op_construct, currentInstruction, iter, m_callLinkInfoIndex++);