diff options
Diffstat (limited to 'Source/JavaScriptCore/llint/LowLevelInterpreter.cpp')
-rw-r--r-- | Source/JavaScriptCore/llint/LowLevelInterpreter.cpp | 224 |
1 files changed, 134 insertions, 90 deletions
diff --git a/Source/JavaScriptCore/llint/LowLevelInterpreter.cpp b/Source/JavaScriptCore/llint/LowLevelInterpreter.cpp index 72bcddf57..48148c6f4 100644 --- a/Source/JavaScriptCore/llint/LowLevelInterpreter.cpp +++ b/Source/JavaScriptCore/llint/LowLevelInterpreter.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2012, 2014 Apple Inc. All rights reserved. + * Copyright (C) 2012 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -25,15 +25,19 @@ #include "config.h" #include "LowLevelInterpreter.h" + +#if ENABLE(LLINT) + #include "LLIntOfflineAsmConfig.h" #include <wtf/InlineASM.h> -#if !ENABLE(JIT) +#if ENABLE(LLINT_C_LOOP) #include "CodeBlock.h" #include "CommonSlowPaths.h" #include "LLIntCLoop.h" #include "LLIntSlowPaths.h" -#include "JSCInlines.h" +#include "Operations.h" +#include "VMInspector.h" #include <wtf/Assertions.h> #include <wtf/MathExtras.h> @@ -86,12 +90,6 @@ using namespace JSC::LLInt; #define OFFLINE_ASM_BEGIN #define OFFLINE_ASM_END -#if ENABLE(OPCODE_TRACING) -#define TRACE_OPCODE(opcode) dataLogF(" op %s\n", #opcode) -#else -#define TRACE_OPCODE(opcode) -#endif - // To keep compilers happy in case of unused labels, force usage of the label: #define USE_LABEL(label) \ do { \ @@ -99,9 +97,7 @@ using namespace JSC::LLInt; goto label; \ } while (false) -#define OFFLINE_ASM_OPCODE_LABEL(opcode) DEFINE_OPCODE(opcode) USE_LABEL(opcode); TRACE_OPCODE(opcode); - -#define OFFLINE_ASM_GLOBAL_LABEL(label) OFFLINE_ASM_GLUE_LABEL(label) +#define OFFLINE_ASM_OPCODE_LABEL(opcode) DEFINE_OPCODE(opcode) USE_LABEL(opcode); #if ENABLE(COMPUTED_GOTO_OPCODES) #define OFFLINE_ASM_GLUE_LABEL(label) label: USE_LABEL(label); @@ -151,7 +147,6 @@ static void Double2Ints(double val, uint32_t& lo, uint32_t& hi) // pseudo register, as well as hides endianness differences. struct CLoopRegister { - CLoopRegister() { i = static_cast<intptr_t>(0xbadbeef0baddbeef); } union { intptr_t i; uintptr_t u; @@ -217,15 +212,10 @@ struct CLoopRegister { #endif // !CPU(BIG_ENDIAN) #endif // !USE(JSVALUE64) - intptr_t* ip; int8_t* i8p; void* vp; - CallFrame* callFrame; ExecState* execState; void* instruction; - VM* vm; - JSCell* cell; - ProtoCallFrame* protoCallFrame; NativeFunction nativeFunc; #if USE(JSVALUE64) int64_t i64; @@ -236,13 +226,6 @@ struct CLoopRegister { Opcode opcode; }; - operator ExecState*() { return execState; } - operator Instruction*() { return reinterpret_cast<Instruction*>(instruction); } - operator VM*() { return vm; } - operator ProtoCallFrame*() { return protoCallFrame; } - operator Register*() { return reinterpret_cast<Register*>(vp); } - operator JSCell*() { return cell; } - #if USE(JSVALUE64) inline void clearHighWord() { i32padding = 0; } #else @@ -254,7 +237,7 @@ struct CLoopRegister { // The llint C++ interpreter loop: // -JSValue CLoop::execute(OpcodeID entryOpcodeID, void* executableAddress, VM* vm, ProtoCallFrame* protoCallFrame, bool isInitializationPass) +JSValue CLoop::execute(CallFrame* callFrame, Opcode entryOpcode, bool isInitializationPass) { #define CAST reinterpret_cast #define SIGN_BIT32(x) ((x) & 0x80000000) @@ -289,6 +272,8 @@ JSValue CLoop::execute(OpcodeID entryOpcodeID, void* executableAddress, VM* vm, return JSValue(); } + ASSERT(callFrame->vm().topCallFrame == callFrame); + // Define the pseudo registers used by the LLINT C Loop backend: ASSERT(sizeof(CLoopRegister) == sizeof(intptr_t)); @@ -323,66 +308,69 @@ JSValue CLoop::execute(OpcodeID entryOpcodeID, void* executableAddress, VM* vm, // 2. 32 bit result values will be in the low 32-bit of t0. // 3. 64 bit result values will be in t0. - CLoopRegister t0, t1, t2, t3, t5, t7, sp, cfr, lr, pc; + CLoopRegister t0, t1, t2, t3; #if USE(JSVALUE64) - CLoopRegister pcBase, tagTypeNumber, tagMask; + CLoopRegister rBasePC, tagTypeNumber, tagMask; #endif + CLoopRegister rRetVPC; CLoopDoubleRegister d0, d1; - lr.opcode = getOpcode(llint_return_to_host); - sp.vp = vm->interpreter->stack().topOfStack() + 1; - cfr.callFrame = vm->topCallFrame; -#ifndef NDEBUG - void* startSP = sp.vp; - CallFrame* startCFR = cfr.callFrame; -#endif + // Keep the compiler happy. We don't really need this, but the compiler + // will complain. This makes the warning go away. + t0.i = 0; + t1.i = 0; - // Initialize the incoming args for doVMEntryToJavaScript: - t0.vp = executableAddress; - t1.vm = vm; - t2.protoCallFrame = protoCallFrame; + VM* vm = &callFrame->vm(); + + CodeBlock* codeBlock = callFrame->codeBlock(); + Instruction* vPC; + + // rPC is an alias for vPC. Set up the alias: + CLoopRegister& rPC = *CAST<CLoopRegister*>(&vPC); + +#if USE(JSVALUE32_64) + vPC = codeBlock->instructions().begin(); +#else // USE(JSVALUE64) + vPC = 0; + rBasePC.vp = codeBlock->instructions().begin(); -#if USE(JSVALUE64) // For the ASM llint, JITStubs takes care of this initialization. We do // it explicitly here for the C loop: tagTypeNumber.i = 0xFFFF000000000000; tagMask.i = 0xFFFF000000000002; #endif // USE(JSVALUE64) + // cfr is an alias for callFrame. Set up this alias: + CLoopRegister& cfr = *CAST<CLoopRegister*>(&callFrame); + + // Simulate a native return PC which should never be used: + rRetVPC.i = 0xbbadbeef; + // Interpreter variables for value passing between opcodes and/or helpers: NativeFunction nativeFunc = 0; JSValue functionReturnValue; - Opcode opcode = getOpcode(entryOpcodeID); + Opcode opcode; -#define PUSH(cloopReg) \ - do { \ - sp.ip--; \ - *sp.ip = cloopReg.i; \ - } while (false) + opcode = entryOpcode; -#define POP(cloopReg) \ - do { \ - cloopReg.i = *sp.ip; \ - sp.ip++; \ - } while (false) + #if ENABLE(OPCODE_STATS) + #define RECORD_OPCODE_STATS(__opcode) \ + OpcodeStats::recordInstruction(__opcode) + #else + #define RECORD_OPCODE_STATS(__opcode) + #endif -#if ENABLE(OPCODE_STATS) -#define RECORD_OPCODE_STATS(__opcode) OpcodeStats::recordInstruction(__opcode) -#else -#define RECORD_OPCODE_STATS(__opcode) -#endif + #if USE(JSVALUE32_64) + #define FETCH_OPCODE() vPC->u.opcode + #else // USE(JSVALUE64) + #define FETCH_OPCODE() *bitwise_cast<Opcode*>(rBasePC.i8p + rPC.i * 8) + #endif // USE(JSVALUE64) -#if USE(JSVALUE32_64) -#define FETCH_OPCODE() pc.opcode -#else // USE(JSVALUE64) -#define FETCH_OPCODE() *bitwise_cast<Opcode*>(pcBase.i8p + pc.i * 8) -#endif // USE(JSVALUE64) - -#define NEXT_INSTRUCTION() \ - do { \ - opcode = FETCH_OPCODE(); \ - DISPATCH_OPCODE(); \ - } while (false) + #define NEXT_INSTRUCTION() \ + do { \ + opcode = FETCH_OPCODE(); \ + DISPATCH_OPCODE(); \ + } while (false) #if ENABLE(COMPUTED_GOTO_OPCODES) @@ -424,22 +412,14 @@ JSValue CLoop::execute(OpcodeID entryOpcodeID, void* executableAddress, VM* vm, #include "LLIntAssembly.h" - OFFLINE_ASM_GLUE_LABEL(llint_return_to_host) - { - ASSERT(startSP == sp.vp); - ASSERT(startCFR == cfr.callFrame); -#if USE(JSVALUE32_64) - return JSValue(t1.i, t0.i); // returning JSValue(tag, payload); -#else - return JSValue::decode(t0.encodedJSValue); -#endif - } - // In the ASM llint, getHostCallReturnValue() is a piece of glue - // function provided by the JIT (see jit/JITOperations.cpp). + // function provided by the JIT (see dfg/DFGOperations.cpp). // We simulate it here with a pseduo-opcode handler. OFFLINE_ASM_GLUE_LABEL(getHostCallReturnValue) { + // The ASM part pops the frame: + callFrame = callFrame->callerFrame(); + // The part in getHostCallReturnValueWithExecState(): JSValue result = vm->hostCallReturnValue; #if USE(JSVALUE32_64) @@ -448,8 +428,12 @@ JSValue CLoop::execute(OpcodeID entryOpcodeID, void* executableAddress, VM* vm, #else t0.encodedJSValue = JSValue::encode(result); #endif - opcode = lr.opcode; - DISPATCH_OPCODE(); + goto doReturnHelper; + } + + OFFLINE_ASM_GLUE_LABEL(returnFromJavaScript) + { + return vm->exception(); } #if !ENABLE(COMPUTED_GOTO_OPCODES) @@ -459,6 +443,55 @@ JSValue CLoop::execute(OpcodeID entryOpcodeID, void* executableAddress, VM* vm, } // END bytecode handler cases. + //======================================================================== + // Bytecode helpers: + + doReturnHelper: { + ASSERT(!!callFrame); + if (callFrame->isVMEntrySentinel()) { +#if USE(JSVALUE32_64) + return JSValue(t1.i, t0.i); // returning JSValue(tag, payload); +#else + return JSValue::decode(t0.encodedJSValue); +#endif + } + + // The normal ASM llint call implementation returns to the caller as + // recorded in rRetVPC, and the caller would fetch the return address + // from ArgumentCount.tag() (see the dispatchAfterCall() macro used in + // the callTargetFunction() macro in the llint asm files). + // + // For the C loop, we don't have the JIT stub to do this work for us. So, + // we jump to llint_generic_return_point. + + vPC = callFrame->currentVPC(); + +#if USE(JSVALUE64) + // Based on LowLevelInterpreter64.asm's dispatchAfterCall(): + + // When returning from a native trampoline call, unlike the assembly + // LLInt, we can't simply return to the caller. In our case, we grab + // the caller's VPC and resume execution there. However, the caller's + // VPC returned by callFrame->currentVPC() is in the form of the real + // address of the target bytecode, but the 64-bit llint expects the + // VPC to be a bytecode offset. Hence, we need to map it back to a + // bytecode offset before we dispatch via the usual dispatch mechanism + // i.e. NEXT_INSTRUCTION(): + + codeBlock = callFrame->codeBlock(); + ASSERT(codeBlock); + rPC.vp = callFrame->currentVPC(); + rPC.i = rPC.i8p - reinterpret_cast<int8_t*>(codeBlock->instructions().begin()); + rPC.i >>= 3; + + rBasePC.vp = codeBlock->instructions().begin(); +#endif // USE(JSVALUE64) + + goto llint_generic_return_point; + + } // END doReturnHelper. + + #if ENABLE(COMPUTED_GOTO_OPCODES) // Keep the compiler happy so that it doesn't complain about unused // labels for the LLInt trampoline glue. The labels are automatically @@ -478,40 +511,49 @@ JSValue CLoop::execute(OpcodeID entryOpcodeID, void* executableAddress, VM* vm, #undef CAST #undef SIGN_BIT32 - return JSValue(); // to suppress a compiler warning. } // Interpreter::llintCLoopExecute() } // namespace JSC -#elif !OS(WINDOWS) +#else // !ENABLE(LLINT_C_LOOP) //============================================================================ // Define the opcode dispatch mechanism when using an ASM loop: // // These are for building an interpreter from generated assembly code: +#if CPU(X86_64) && COMPILER(CLANG) +#define OFFLINE_ASM_BEGIN asm ( \ + ".cfi_startproc\n" + +#define OFFLINE_ASM_END \ + ".cfi_endproc\n" \ +); +#else #define OFFLINE_ASM_BEGIN asm ( #define OFFLINE_ASM_END ); +#endif -#define OFFLINE_ASM_OPCODE_LABEL(__opcode) OFFLINE_ASM_LOCAL_LABEL(llint_##__opcode) -#define OFFLINE_ASM_GLUE_LABEL(__opcode) OFFLINE_ASM_LOCAL_LABEL(__opcode) +#define OFFLINE_ASM_OPCODE_LABEL(__opcode) OFFLINE_ASM_GLOBAL_LABEL(llint_##__opcode) +#define OFFLINE_ASM_GLUE_LABEL(__opcode) OFFLINE_ASM_GLOBAL_LABEL(__opcode) #if CPU(ARM_THUMB2) #define OFFLINE_ASM_GLOBAL_LABEL(label) \ ".text\n" \ - ".align 4\n" \ ".globl " SYMBOL_STRING(label) "\n" \ HIDE_SYMBOL(label) "\n" \ ".thumb\n" \ ".thumb_func " THUMB_FUNC_PARAM(label) "\n" \ SYMBOL_STRING(label) ":\n" -#elif CPU(ARM64) +#elif CPU(X86_64) && COMPILER(CLANG) #define OFFLINE_ASM_GLOBAL_LABEL(label) \ ".text\n" \ - ".align 4\n" \ ".globl " SYMBOL_STRING(label) "\n" \ HIDE_SYMBOL(label) "\n" \ - SYMBOL_STRING(label) ":\n" + SYMBOL_STRING(label) ":\n" \ + ".cfi_def_cfa rbp, 0\n" \ + ".cfi_offset 16, 8\n" \ + ".cfi_offset 6, 0\n" #else #define OFFLINE_ASM_GLOBAL_LABEL(label) \ ".text\n" \ @@ -526,4 +568,6 @@ JSValue CLoop::execute(OpcodeID entryOpcodeID, void* executableAddress, VM* vm, // for the interpreter, as compiled from LowLevelInterpreter.asm. #include "LLIntAssembly.h" -#endif // ENABLE(JIT) +#endif // !ENABLE(LLINT_C_LOOP) + +#endif // ENABLE(LLINT) |