summaryrefslogtreecommitdiff
path: root/Source/JavaScriptCore/llint/LowLevelInterpreter64.asm
diff options
context:
space:
mode:
Diffstat (limited to 'Source/JavaScriptCore/llint/LowLevelInterpreter64.asm')
-rw-r--r--Source/JavaScriptCore/llint/LowLevelInterpreter64.asm231
1 files changed, 138 insertions, 93 deletions
diff --git a/Source/JavaScriptCore/llint/LowLevelInterpreter64.asm b/Source/JavaScriptCore/llint/LowLevelInterpreter64.asm
index ed6799ef3..741963573 100644
--- a/Source/JavaScriptCore/llint/LowLevelInterpreter64.asm
+++ b/Source/JavaScriptCore/llint/LowLevelInterpreter64.asm
@@ -126,6 +126,16 @@ macro callCallSlowPath(advance, slowPath, action)
action(t0)
end
+macro callWatchdogTimerHandler(throwHandler)
+ storei PC, ArgumentCount + TagOffset[cfr]
+ prepareStateForCCall()
+ cCall2(_llint_slow_path_handle_watchdog_timer, cfr, PC)
+ move t1, cfr
+ btpnz t0, throwHandler
+ move t3, PB
+ loadi ArgumentCount + TagOffset[cfr], PC
+end
+
macro checkSwitchToJITForLoop()
checkSwitchToJIT(
1,
@@ -186,9 +196,9 @@ macro functionArityCheck(doneLabel, slow_path)
cCall2(slow_path, cfr, PC) # This slow_path has a simple protocol: t0 = 0 => no error, t0 != 0 => error
move t1, cfr
btiz t0, .continue
- loadp JITStackFrame::globalData[sp], t1
- loadp JSGlobalData::callFrameForThrow[t1], t0
- jmp JSGlobalData::targetMachinePCForThrow[t1]
+ loadp JITStackFrame::vm[sp], t1
+ loadp VM::callFrameForThrow[t1], t0
+ jmp VM::targetMachinePCForThrow[t1]
.continue:
# Reload CodeBlock and reset PC, since the slow_path clobbered them.
loadp CodeBlock[cfr], t1
@@ -243,16 +253,17 @@ _llint_op_create_this:
traceExecution()
loadisFromInstruction(2, t0)
loadp [cfr, t0, 8], t0
- loadp JSFunction::m_cachedInheritorID[t0], t2
- btpz t2, .opCreateThisSlow
- allocateBasicJSObject(JSFinalObjectSizeClassIndex, t2, t0, t1, t3, .opCreateThisSlow)
+ loadp JSFunction::m_allocationProfile + ObjectAllocationProfile::m_allocator[t0], t1
+ loadp JSFunction::m_allocationProfile + ObjectAllocationProfile::m_structure[t0], t2
+ btpz t1, .opCreateThisSlow
+ allocateJSObject(t1, t2, t0, t3, .opCreateThisSlow)
loadisFromInstruction(1, t1)
storeq t0, [cfr, t1, 8]
- dispatch(3)
+ dispatch(4)
.opCreateThisSlow:
callSlowPath(_llint_slow_path_create_this)
- dispatch(3)
+ dispatch(4)
_llint_op_get_callee:
@@ -283,17 +294,17 @@ _llint_op_convert_this:
_llint_op_new_object:
traceExecution()
- loadp CodeBlock[cfr], t0
- loadp CodeBlock::m_globalObject[t0], t0
- loadp JSGlobalObject::m_emptyObjectStructure[t0], t1
- allocateBasicJSObject(JSFinalObjectSizeClassIndex, t1, t0, t2, t3, .opNewObjectSlow)
+ loadpFromInstruction(3, t0)
+ loadp ObjectAllocationProfile::m_allocator[t0], t1
+ loadp ObjectAllocationProfile::m_structure[t0], t2
+ allocateJSObject(t1, t2, t0, t3, .opNewObjectSlow)
loadisFromInstruction(1, t1)
storeq t0, [cfr, t1, 8]
- dispatch(2)
+ dispatch(4)
.opNewObjectSlow:
callSlowPath(_llint_slow_path_new_object)
- dispatch(2)
+ dispatch(4)
_llint_op_mov:
@@ -440,63 +451,31 @@ macro preOp(arithmeticOperation, slowPath)
dispatch(2)
end
-_llint_op_pre_inc:
+_llint_op_inc:
preOp(
macro (value, slow) baddio 1, value, slow end,
_llint_slow_path_pre_inc)
-_llint_op_pre_dec:
+_llint_op_dec:
preOp(
macro (value, slow) bsubio 1, value, slow end,
_llint_slow_path_pre_dec)
-macro postOp(arithmeticOperation, slowPath)
- traceExecution()
- loadisFromInstruction(2, t0)
- loadisFromInstruction(1, t1)
- loadq [cfr, t0, 8], t2
- bieq t0, t1, .done
- bqb t2, tagTypeNumber, .slow
- move t2, t3
- arithmeticOperation(t3, .slow)
- orq tagTypeNumber, t3
- storeq t2, [cfr, t1, 8]
- storeq t3, [cfr, t0, 8]
-.done:
- dispatch(3)
-
-.slow:
- callSlowPath(slowPath)
- dispatch(3)
-end
-
-_llint_op_post_inc:
- postOp(
- macro (value, slow) baddio 1, value, slow end,
- _llint_slow_path_post_inc)
-
-
-_llint_op_post_dec:
- postOp(
- macro (value, slow) bsubio 1, value, slow end,
- _llint_slow_path_post_dec)
-
-
-_llint_op_to_jsnumber:
+_llint_op_to_number:
traceExecution()
loadisFromInstruction(2, t0)
loadisFromInstruction(1, t1)
loadConstantOrVariable(t0, t2)
- bqaeq t2, tagTypeNumber, .opToJsnumberIsImmediate
- btqz t2, tagTypeNumber, .opToJsnumberSlow
-.opToJsnumberIsImmediate:
+ bqaeq t2, tagTypeNumber, .opToNumberIsImmediate
+ btqz t2, tagTypeNumber, .opToNumberSlow
+.opToNumberIsImmediate:
storeq t2, [cfr, t1, 8]
dispatch(3)
-.opToJsnumberSlow:
- callSlowPath(_llint_slow_path_to_jsnumber)
+.opToNumberSlow:
+ callSlowPath(_llint_slow_path_to_number)
dispatch(3)
@@ -907,7 +886,7 @@ _llint_op_get_array_length:
loadisFromInstruction(1, t1)
loadpFromInstruction(8, t2)
loadp JSObject::m_butterfly[t3], t0
- loadi -sizeof IndexingHeader + IndexingHeader::m_publicLength[t0], t0
+ loadi -sizeof IndexingHeader + IndexingHeader::u.lengths.publicLength[t0], t0
bilt t0, 0, .opGetArrayLengthSlow
orq tagTypeNumber, t0
valueProfile(t0, t2)
@@ -1040,18 +1019,18 @@ _llint_op_get_by_val:
bineq t2, ContiguousShape, .opGetByValNotContiguous
.opGetByValIsContiguous:
- biaeq t1, -sizeof IndexingHeader + IndexingHeader::m_publicLength[t3], .opGetByValSlow
+ biaeq t1, -sizeof IndexingHeader + IndexingHeader::u.lengths.publicLength[t3], .opGetByValOutOfBounds
loadisFromInstruction(1, t0)
loadq [t3, t1, 8], t2
- btqz t2, .opGetByValSlow
+ btqz t2, .opGetByValOutOfBounds
jmp .opGetByValDone
.opGetByValNotContiguous:
bineq t2, DoubleShape, .opGetByValNotDouble
- biaeq t1, -sizeof IndexingHeader + IndexingHeader::m_publicLength[t3], .opGetByValSlow
+ biaeq t1, -sizeof IndexingHeader + IndexingHeader::u.lengths.publicLength[t3], .opGetByValOutOfBounds
loadis 8[PB, PC, 8], t0
loadd [t3, t1, 8], ft0
- bdnequn ft0, ft0, .opGetByValSlow
+ bdnequn ft0, ft0, .opGetByValOutOfBounds
fd2q ft0, t2
subq tagTypeNumber, t2
jmp .opGetByValDone
@@ -1059,10 +1038,10 @@ _llint_op_get_by_val:
.opGetByValNotDouble:
subi ArrayStorageShape, t2
bia t2, SlowPutArrayStorageShape - ArrayStorageShape, .opGetByValSlow
- biaeq t1, -sizeof IndexingHeader + IndexingHeader::m_vectorLength[t3], .opGetByValSlow
+ biaeq t1, -sizeof IndexingHeader + IndexingHeader::u.lengths.vectorLength[t3], .opGetByValOutOfBounds
loadisFromInstruction(1, t0)
loadq ArrayStorage::m_vector[t3, t1, 8], t2
- btqz t2, .opGetByValSlow
+ btqz t2, .opGetByValOutOfBounds
.opGetByValDone:
storeq t2, [cfr, t0, 8]
@@ -1070,6 +1049,11 @@ _llint_op_get_by_val:
valueProfile(t2, t0)
dispatch(6)
+.opGetByValOutOfBounds:
+ if VALUE_PROFILER
+ loadpFromInstruction(4, t0)
+ storeb 1, ArrayProfile::m_outOfBounds[t0]
+ end
.opGetByValSlow:
callSlowPath(_llint_slow_path_get_by_val)
dispatch(6)
@@ -1089,7 +1073,7 @@ _llint_op_get_argument_by_val:
negi t2
sxi2q t2, t2
loadisFromInstruction(1, t3)
- loadpFromInstruction(4, t1)
+ loadpFromInstruction(5, t1)
loadq ThisArgumentOffset[cfr, t2, 8], t0
storeq t0, [cfr, t3, 8]
valueProfile(t0, t1)
@@ -1133,20 +1117,20 @@ _llint_op_get_by_pname:
macro contiguousPutByVal(storeCallback)
- biaeq t3, -sizeof IndexingHeader + IndexingHeader::m_publicLength[t0], .outOfBounds
+ biaeq t3, -sizeof IndexingHeader + IndexingHeader::u.lengths.publicLength[t0], .outOfBounds
.storeResult:
loadisFromInstruction(3, t2)
storeCallback(t2, t1, [t0, t3, 8])
dispatch(5)
.outOfBounds:
- biaeq t3, -sizeof IndexingHeader + IndexingHeader::m_vectorLength[t0], .opPutByValSlow
+ biaeq t3, -sizeof IndexingHeader + IndexingHeader::u.lengths.vectorLength[t0], .opPutByValOutOfBounds
if VALUE_PROFILER
loadp 32[PB, PC, 8], t2
storeb 1, ArrayProfile::m_mayStoreToHole[t2]
end
addi 1, t3, t2
- storei t2, -sizeof IndexingHeader + IndexingHeader::m_publicLength[t0]
+ storei t2, -sizeof IndexingHeader + IndexingHeader::u.lengths.publicLength[t0]
jmp .storeResult
end
@@ -1197,7 +1181,7 @@ _llint_op_put_by_val:
.opPutByValNotContiguous:
bineq t2, ArrayStorageShape, .opPutByValSlow
- biaeq t3, -sizeof IndexingHeader + IndexingHeader::m_vectorLength[t0], .opPutByValSlow
+ biaeq t3, -sizeof IndexingHeader + IndexingHeader::u.lengths.vectorLength[t0], .opPutByValOutOfBounds
btqz ArrayStorage::m_vector[t0, t3, 8], .opPutByValArrayStorageEmpty
.opPutByValArrayStorageStoreResult:
loadisFromInstruction(3, t2)
@@ -1212,21 +1196,21 @@ _llint_op_put_by_val:
storeb 1, ArrayProfile::m_mayStoreToHole[t1]
end
addi 1, ArrayStorage::m_numValuesInVector[t0]
- bib t3, -sizeof IndexingHeader + IndexingHeader::m_publicLength[t0], .opPutByValArrayStorageStoreResult
+ bib t3, -sizeof IndexingHeader + IndexingHeader::u.lengths.publicLength[t0], .opPutByValArrayStorageStoreResult
addi 1, t3, t1
- storei t1, -sizeof IndexingHeader + IndexingHeader::m_publicLength[t0]
+ storei t1, -sizeof IndexingHeader + IndexingHeader::u.lengths.publicLength[t0]
jmp .opPutByValArrayStorageStoreResult
+.opPutByValOutOfBounds:
+ if VALUE_PROFILER
+ loadpFromInstruction(4, t0)
+ storeb 1, ArrayProfile::m_outOfBounds[t0]
+ end
.opPutByValSlow:
callSlowPath(_llint_slow_path_put_by_val)
dispatch(5)
-_llint_op_loop:
- traceExecution()
- dispatchIntIndirect(1)
-
-
_llint_op_jmp:
traceExecution()
dispatchIntIndirect(1)
@@ -1581,16 +1565,16 @@ _llint_op_catch:
# the interpreter's throw trampoline (see _llint_throw_trampoline).
# The JIT throwing protocol calls for the cfr to be in t0. The throwing
# code must have known that we were throwing to the interpreter, and have
- # set JSGlobalData::targetInterpreterPCForThrow.
+ # set VM::targetInterpreterPCForThrow.
move t0, cfr
loadp CodeBlock[cfr], PB
loadp CodeBlock::m_instructions[PB], PB
- loadp JITStackFrame::globalData[sp], t3
- loadp JSGlobalData::targetInterpreterPCForThrow[t3], PC
+ loadp JITStackFrame::vm[sp], t3
+ loadp VM::targetInterpreterPCForThrow[t3], PC
subp PB, PC
rshiftp 3, PC
- loadq JSGlobalData::exception[t3], t0
- storeq 0, JSGlobalData::exception[t3]
+ loadq VM::exception[t3], t0
+ storeq 0, VM::exception[t3]
loadisFromInstruction(1, t2)
storeq t0, [cfr, t2, 8]
traceExecution()
@@ -1610,23 +1594,84 @@ _llint_throw_from_slow_path_trampoline:
# When throwing from the interpreter (i.e. throwing from LLIntSlowPaths), so
# the throw target is not necessarily interpreted code, we come to here.
# This essentially emulates the JIT's throwing protocol.
- loadp JITStackFrame::globalData[sp], t1
- loadp JSGlobalData::callFrameForThrow[t1], t0
- jmp JSGlobalData::targetMachinePCForThrow[t1]
+ loadp JITStackFrame::vm[sp], t1
+ loadp VM::callFrameForThrow[t1], t0
+ jmp VM::targetMachinePCForThrow[t1]
_llint_throw_during_call_trampoline:
preserveReturnAddressAfterCall(t2)
- loadp JITStackFrame::globalData[sp], t1
- loadp JSGlobalData::callFrameForThrow[t1], t0
- jmp JSGlobalData::targetMachinePCForThrow[t1]
+ loadp JITStackFrame::vm[sp], t1
+ loadp VM::callFrameForThrow[t1], t0
+ jmp VM::targetMachinePCForThrow[t1]
+
+# Gives you the scope in t0, while allowing you to optionally perform additional checks on the
+# scopes as they are traversed. scopeCheck() is called with two arguments: the register
+# holding the scope, and a register that can be used for scratch. Note that this does not
+# use t3, so you can hold stuff in t3 if need be.
+macro getDeBruijnScope(deBruijinIndexOperand, scopeCheck)
+ loadp ScopeChain[cfr], t0
+ loadis deBruijinIndexOperand, t2
+
+ btiz t2, .done
+ loadp CodeBlock[cfr], t1
+ bineq CodeBlock::m_codeType[t1], FunctionCode, .loop
+ btbz CodeBlock::m_needsActivation[t1], .loop
+
+ loadis CodeBlock::m_activationRegister[t1], t1
+
+ # Need to conditionally skip over one scope.
+ btpz [cfr, t1, 8], .noActivation
+ scopeCheck(t0, t1)
+ loadp JSScope::m_next[t0], t0
+.noActivation:
+ subi 1, t2
+
+ btiz t2, .done
+.loop:
+ scopeCheck(t0, t1)
+ loadp JSScope::m_next[t0], t0
+ subi 1, t2
+ btinz t2, .loop
+
+.done:
+end
+
+_llint_op_get_scoped_var:
+ traceExecution()
+ # Operands are as follows:
+ # pc[1]: Destination for the load
+ # pc[2]: Index of register in the scope
+ # 24[PB, PC, 8] De Bruijin index.
+ getDeBruijnScope(24[PB, PC, 8], macro (scope, scratch) end)
+ loadisFromInstruction(1, t1)
+ loadisFromInstruction(2, t2)
+
+ loadp JSVariableObject::m_registers[t0], t0
+ loadp [t0, t2, 8], t3
+ storep t3, [cfr, t1, 8]
+ loadp 32[PB, PC, 8], t1
+ valueProfile(t3, t1)
+ dispatch(5)
+
+
+_llint_op_put_scoped_var:
+ traceExecution()
+ getDeBruijnScope(16[PB, PC, 8], macro (scope, scratch) end)
+ loadis 24[PB, PC, 8], t1
+ loadConstantOrVariable(t1, t3)
+ loadis 8[PB, PC, 8], t1
+ writeBarrier(t3)
+ loadp JSVariableObject::m_registers[t0], t0
+ storep t3, [t0, t1, 8]
+ dispatch(4)
macro nativeCallTrampoline(executableOffsetToFunction)
storep 0, CodeBlock[cfr]
if X86_64
- loadp JITStackFrame::globalData + 8[sp], t0
- storep cfr, JSGlobalData::topCallFrame[t0]
+ loadp JITStackFrame::vm + 8[sp], t0
+ storep cfr, VM::topCallFrame[t0]
loadp CallerFrame[cfr], t0
loadq ScopeChain[t0], t1
storeq t1, ScopeChain[cfr]
@@ -1639,15 +1684,15 @@ macro nativeCallTrampoline(executableOffsetToFunction)
move t0, cfr # Restore cfr to avoid loading from stack
call executableOffsetToFunction[t1]
addp 16 - 8, sp
- loadp JITStackFrame::globalData + 8[sp], t3
+ loadp JITStackFrame::vm + 8[sp], t3
elsif C_LOOP
loadp CallerFrame[cfr], t0
loadp ScopeChain[t0], t1
storep t1, ScopeChain[cfr]
- loadp JITStackFrame::globalData[sp], t3
- storep cfr, JSGlobalData::topCallFrame[t3]
+ loadp JITStackFrame::vm[sp], t3
+ storep cfr, VM::topCallFrame[t3]
move t0, t2
preserveReturnAddressAfterCall(t3)
@@ -1659,20 +1704,20 @@ macro nativeCallTrampoline(executableOffsetToFunction)
cloopCallNative executableOffsetToFunction[t1]
restoreReturnAddressBeforeReturn(t3)
- loadp JITStackFrame::globalData[sp], t3
+ loadp JITStackFrame::vm[sp], t3
else
error
end
- btqnz JSGlobalData::exception[t3], .exception
+ btqnz VM::exception[t3], .exception
ret
.exception:
preserveReturnAddressAfterCall(t1)
loadi ArgumentCount + TagOffset[cfr], PC
loadp CodeBlock[cfr], PB
loadp CodeBlock::m_instructions[PB], PB
- loadp JITStackFrame::globalData[sp], t0
- storep cfr, JSGlobalData::topCallFrame[t0]
+ loadp JITStackFrame::vm[sp], t0
+ storep cfr, VM::topCallFrame[t0]
callSlowPath(_llint_throw_from_native_call)
jmp _llint_throw_from_slow_path_trampoline
end