summaryrefslogtreecommitdiff
path: root/Source/JavaScriptCore/bytecode
diff options
context:
space:
mode:
Diffstat (limited to 'Source/JavaScriptCore/bytecode')
-rw-r--r--Source/JavaScriptCore/bytecode/CodeBlock.cpp2337
-rw-r--r--Source/JavaScriptCore/bytecode/CodeBlock.h1519
-rw-r--r--Source/JavaScriptCore/bytecode/CodeOrigin.h145
-rw-r--r--Source/JavaScriptCore/bytecode/DFGExitProfile.cpp69
-rw-r--r--Source/JavaScriptCore/bytecode/DFGExitProfile.h190
-rw-r--r--Source/JavaScriptCore/bytecode/DataFormat.h181
-rw-r--r--Source/JavaScriptCore/bytecode/EvalCodeCache.h97
-rw-r--r--Source/JavaScriptCore/bytecode/Instruction.h202
-rw-r--r--Source/JavaScriptCore/bytecode/JumpTable.cpp47
-rw-r--r--Source/JavaScriptCore/bytecode/JumpTable.h103
-rw-r--r--Source/JavaScriptCore/bytecode/Opcode.cpp191
-rw-r--r--Source/JavaScriptCore/bytecode/Opcode.h278
-rw-r--r--Source/JavaScriptCore/bytecode/PredictedType.cpp235
-rw-r--r--Source/JavaScriptCore/bytecode/PredictedType.h242
-rw-r--r--Source/JavaScriptCore/bytecode/PredictionTracker.h76
-rw-r--r--Source/JavaScriptCore/bytecode/SamplingTool.cpp480
-rw-r--r--Source/JavaScriptCore/bytecode/SamplingTool.h363
-rw-r--r--Source/JavaScriptCore/bytecode/StructureStubInfo.cpp119
-rw-r--r--Source/JavaScriptCore/bytecode/StructureStubInfo.h238
-rw-r--r--Source/JavaScriptCore/bytecode/ValueProfile.cpp52
-rw-r--r--Source/JavaScriptCore/bytecode/ValueProfile.h166
-rw-r--r--Source/JavaScriptCore/bytecode/ValueRecovery.h337
-rw-r--r--Source/JavaScriptCore/bytecode/VirtualRegister.h40
23 files changed, 7707 insertions, 0 deletions
diff --git a/Source/JavaScriptCore/bytecode/CodeBlock.cpp b/Source/JavaScriptCore/bytecode/CodeBlock.cpp
new file mode 100644
index 000000000..a14ce64a1
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/CodeBlock.cpp
@@ -0,0 +1,2337 @@
+/*
+ * Copyright (C) 2008, 2009, 2010 Apple Inc. All rights reserved.
+ * Copyright (C) 2008 Cameron Zwarich <cwzwarich@uwaterloo.ca>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * its contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "CodeBlock.h"
+
+#include "BytecodeGenerator.h"
+#include "DFGCapabilities.h"
+#include "DFGNode.h"
+#include "DFGRepatch.h"
+#include "Debugger.h"
+#include "Interpreter.h"
+#include "JIT.h"
+#include "JITStubs.h"
+#include "JSActivation.h"
+#include "JSFunction.h"
+#include "JSStaticScopeObject.h"
+#include "JSValue.h"
+#include "RepatchBuffer.h"
+#include "UStringConcatenate.h"
+#include <stdio.h>
+#include <wtf/StringExtras.h>
+
+#if ENABLE(DFG_JIT)
+#include "DFGOperations.h"
+#endif
+
+#define DUMP_CODE_BLOCK_STATISTICS 0
+
+namespace JSC {
+
+#if ENABLE(DFG_JIT)
+using namespace DFG;
+#endif
+
+#if !defined(NDEBUG) || ENABLE(OPCODE_SAMPLING)
+
+static UString escapeQuotes(const UString& str)
+{
+ UString result = str;
+ size_t pos = 0;
+ while ((pos = result.find('\"', pos)) != notFound) {
+ result = makeUString(result.substringSharingImpl(0, pos), "\"\\\"\"", result.substringSharingImpl(pos + 1));
+ pos += 4;
+ }
+ return result;
+}
+
+static UString valueToSourceString(ExecState* exec, JSValue val)
+{
+ if (!val)
+ return "0";
+
+ if (val.isString())
+ return makeUString("\"", escapeQuotes(val.toString(exec)), "\"");
+
+ return val.description();
+}
+
+static CString constantName(ExecState* exec, int k, JSValue value)
+{
+ return makeUString(valueToSourceString(exec, value), "(@k", UString::number(k - FirstConstantRegisterIndex), ")").utf8();
+}
+
+static CString idName(int id0, const Identifier& ident)
+{
+ return makeUString(ident.ustring(), "(@id", UString::number(id0), ")").utf8();
+}
+
+CString CodeBlock::registerName(ExecState* exec, int r) const
+{
+ if (r == missingThisObjectMarker())
+ return "<null>";
+
+ if (isConstantRegisterIndex(r))
+ return constantName(exec, r, getConstant(r));
+
+ return makeUString("r", UString::number(r)).utf8();
+}
+
+static UString regexpToSourceString(RegExp* regExp)
+{
+ char postfix[5] = { '/', 0, 0, 0, 0 };
+ int index = 1;
+ if (regExp->global())
+ postfix[index++] = 'g';
+ if (regExp->ignoreCase())
+ postfix[index++] = 'i';
+ if (regExp->multiline())
+ postfix[index] = 'm';
+
+ return makeUString("/", regExp->pattern(), postfix);
+}
+
+static CString regexpName(int re, RegExp* regexp)
+{
+ return makeUString(regexpToSourceString(regexp), "(@re", UString::number(re), ")").utf8();
+}
+
+static UString pointerToSourceString(void* p)
+{
+ char buffer[2 + 2 * sizeof(void*) + 1]; // 0x [two characters per byte] \0
+ snprintf(buffer, sizeof(buffer), "%p", p);
+ return buffer;
+}
+
+NEVER_INLINE static const char* debugHookName(int debugHookID)
+{
+ switch (static_cast<DebugHookID>(debugHookID)) {
+ case DidEnterCallFrame:
+ return "didEnterCallFrame";
+ case WillLeaveCallFrame:
+ return "willLeaveCallFrame";
+ case WillExecuteStatement:
+ return "willExecuteStatement";
+ case WillExecuteProgram:
+ return "willExecuteProgram";
+ case DidExecuteProgram:
+ return "didExecuteProgram";
+ case DidReachBreakpoint:
+ return "didReachBreakpoint";
+ }
+
+ ASSERT_NOT_REACHED();
+ return "";
+}
+
+void CodeBlock::printUnaryOp(ExecState* exec, int location, Vector<Instruction>::const_iterator& it, const char* op) const
+{
+ int r0 = (++it)->u.operand;
+ int r1 = (++it)->u.operand;
+
+ printf("[%4d] %s\t\t %s, %s\n", location, op, registerName(exec, r0).data(), registerName(exec, r1).data());
+}
+
+void CodeBlock::printBinaryOp(ExecState* exec, int location, Vector<Instruction>::const_iterator& it, const char* op) const
+{
+ int r0 = (++it)->u.operand;
+ int r1 = (++it)->u.operand;
+ int r2 = (++it)->u.operand;
+ printf("[%4d] %s\t\t %s, %s, %s\n", location, op, registerName(exec, r0).data(), registerName(exec, r1).data(), registerName(exec, r2).data());
+}
+
+void CodeBlock::printConditionalJump(ExecState* exec, const Vector<Instruction>::const_iterator&, Vector<Instruction>::const_iterator& it, int location, const char* op) const
+{
+ int r0 = (++it)->u.operand;
+ int offset = (++it)->u.operand;
+ printf("[%4d] %s\t\t %s, %d(->%d)\n", location, op, registerName(exec, r0).data(), offset, location + offset);
+}
+
+void CodeBlock::printGetByIdOp(ExecState* exec, int location, Vector<Instruction>::const_iterator& it, const char* op) const
+{
+ int r0 = (++it)->u.operand;
+ int r1 = (++it)->u.operand;
+ int id0 = (++it)->u.operand;
+ printf("[%4d] %s\t %s, %s, %s\n", location, op, registerName(exec, r0).data(), registerName(exec, r1).data(), idName(id0, m_identifiers[id0]).data());
+ it += 4;
+}
+
+void CodeBlock::printPutByIdOp(ExecState* exec, int location, Vector<Instruction>::const_iterator& it, const char* op) const
+{
+ int r0 = (++it)->u.operand;
+ int id0 = (++it)->u.operand;
+ int r1 = (++it)->u.operand;
+ printf("[%4d] %s\t %s, %s, %s\n", location, op, registerName(exec, r0).data(), idName(id0, m_identifiers[id0]).data(), registerName(exec, r1).data());
+ it += 5;
+}
+
+#if ENABLE(JIT)
+static bool isGlobalResolve(OpcodeID opcodeID)
+{
+ return opcodeID == op_resolve_global || opcodeID == op_resolve_global_dynamic;
+}
+
+static bool isPropertyAccess(OpcodeID opcodeID)
+{
+ switch (opcodeID) {
+ case op_get_by_id_self:
+ case op_get_by_id_proto:
+ case op_get_by_id_chain:
+ case op_put_by_id_transition:
+ case op_put_by_id_replace:
+ case op_get_by_id:
+ case op_put_by_id:
+ case op_get_by_id_generic:
+ case op_put_by_id_generic:
+ case op_get_array_length:
+ case op_get_string_length:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static unsigned instructionOffsetForNth(ExecState* exec, const Vector<Instruction>& instructions, int nth, bool (*predicate)(OpcodeID))
+{
+ size_t i = 0;
+ while (i < instructions.size()) {
+ OpcodeID currentOpcode = exec->interpreter()->getOpcodeID(instructions[i].u.opcode);
+ if (predicate(currentOpcode)) {
+ if (!--nth)
+ return i;
+ }
+ i += opcodeLengths[currentOpcode];
+ }
+
+ ASSERT_NOT_REACHED();
+ return 0;
+}
+
+static void printGlobalResolveInfo(const GlobalResolveInfo& resolveInfo, unsigned instructionOffset)
+{
+ printf(" [%4d] %s: %s\n", instructionOffset, "resolve_global", pointerToSourceString(resolveInfo.structure).utf8().data());
+}
+
+static void printStructureStubInfo(const StructureStubInfo& stubInfo, unsigned instructionOffset)
+{
+ switch (stubInfo.accessType) {
+ case access_get_by_id_self:
+ printf(" [%4d] %s: %s\n", instructionOffset, "get_by_id_self", pointerToSourceString(stubInfo.u.getByIdSelf.baseObjectStructure).utf8().data());
+ return;
+ case access_get_by_id_proto:
+ printf(" [%4d] %s: %s, %s\n", instructionOffset, "get_by_id_proto", pointerToSourceString(stubInfo.u.getByIdProto.baseObjectStructure).utf8().data(), pointerToSourceString(stubInfo.u.getByIdProto.prototypeStructure).utf8().data());
+ return;
+ case access_get_by_id_chain:
+ printf(" [%4d] %s: %s, %s\n", instructionOffset, "get_by_id_chain", pointerToSourceString(stubInfo.u.getByIdChain.baseObjectStructure).utf8().data(), pointerToSourceString(stubInfo.u.getByIdChain.chain).utf8().data());
+ return;
+ case access_get_by_id_self_list:
+ printf(" [%4d] %s: %s (%d)\n", instructionOffset, "op_get_by_id_self_list", pointerToSourceString(stubInfo.u.getByIdSelfList.structureList).utf8().data(), stubInfo.u.getByIdSelfList.listSize);
+ return;
+ case access_get_by_id_proto_list:
+ printf(" [%4d] %s: %s (%d)\n", instructionOffset, "op_get_by_id_proto_list", pointerToSourceString(stubInfo.u.getByIdProtoList.structureList).utf8().data(), stubInfo.u.getByIdProtoList.listSize);
+ return;
+ case access_put_by_id_transition_normal:
+ case access_put_by_id_transition_direct:
+ printf(" [%4d] %s: %s, %s, %s\n", instructionOffset, "put_by_id_transition", pointerToSourceString(stubInfo.u.putByIdTransition.previousStructure).utf8().data(), pointerToSourceString(stubInfo.u.putByIdTransition.structure).utf8().data(), pointerToSourceString(stubInfo.u.putByIdTransition.chain).utf8().data());
+ return;
+ case access_put_by_id_replace:
+ printf(" [%4d] %s: %s\n", instructionOffset, "put_by_id_replace", pointerToSourceString(stubInfo.u.putByIdReplace.baseObjectStructure).utf8().data());
+ return;
+ case access_unset:
+ printf(" [%4d] %s\n", instructionOffset, "unset");
+ return;
+ case access_get_by_id_generic:
+ printf(" [%4d] %s\n", instructionOffset, "op_get_by_id_generic");
+ return;
+ case access_put_by_id_generic:
+ printf(" [%4d] %s\n", instructionOffset, "op_put_by_id_generic");
+ return;
+ case access_get_array_length:
+ printf(" [%4d] %s\n", instructionOffset, "op_get_array_length");
+ return;
+ case access_get_string_length:
+ printf(" [%4d] %s\n", instructionOffset, "op_get_string_length");
+ return;
+ default:
+ ASSERT_NOT_REACHED();
+ }
+}
+#endif
+
+void CodeBlock::printStructure(const char* name, const Instruction* vPC, int operand) const
+{
+ unsigned instructionOffset = vPC - instructions().begin();
+ printf(" [%4d] %s: %s\n", instructionOffset, name, pointerToSourceString(vPC[operand].u.structure).utf8().data());
+}
+
+void CodeBlock::printStructures(const Instruction* vPC) const
+{
+ Interpreter* interpreter = m_globalData->interpreter;
+ unsigned instructionOffset = vPC - instructions().begin();
+
+ if (vPC[0].u.opcode == interpreter->getOpcode(op_get_by_id)) {
+ printStructure("get_by_id", vPC, 4);
+ return;
+ }
+ if (vPC[0].u.opcode == interpreter->getOpcode(op_get_by_id_self)) {
+ printStructure("get_by_id_self", vPC, 4);
+ return;
+ }
+ if (vPC[0].u.opcode == interpreter->getOpcode(op_get_by_id_proto)) {
+ printf(" [%4d] %s: %s, %s\n", instructionOffset, "get_by_id_proto", pointerToSourceString(vPC[4].u.structure).utf8().data(), pointerToSourceString(vPC[5].u.structure).utf8().data());
+ return;
+ }
+ if (vPC[0].u.opcode == interpreter->getOpcode(op_put_by_id_transition)) {
+ printf(" [%4d] %s: %s, %s, %s\n", instructionOffset, "put_by_id_transition", pointerToSourceString(vPC[4].u.structure).utf8().data(), pointerToSourceString(vPC[5].u.structure).utf8().data(), pointerToSourceString(vPC[6].u.structureChain).utf8().data());
+ return;
+ }
+ if (vPC[0].u.opcode == interpreter->getOpcode(op_get_by_id_chain)) {
+ printf(" [%4d] %s: %s, %s\n", instructionOffset, "get_by_id_chain", pointerToSourceString(vPC[4].u.structure).utf8().data(), pointerToSourceString(vPC[5].u.structureChain).utf8().data());
+ return;
+ }
+ if (vPC[0].u.opcode == interpreter->getOpcode(op_put_by_id)) {
+ printStructure("put_by_id", vPC, 4);
+ return;
+ }
+ if (vPC[0].u.opcode == interpreter->getOpcode(op_put_by_id_replace)) {
+ printStructure("put_by_id_replace", vPC, 4);
+ return;
+ }
+ if (vPC[0].u.opcode == interpreter->getOpcode(op_resolve_global)) {
+ printStructure("resolve_global", vPC, 4);
+ return;
+ }
+ if (vPC[0].u.opcode == interpreter->getOpcode(op_resolve_global_dynamic)) {
+ printStructure("resolve_global_dynamic", vPC, 4);
+ return;
+ }
+
+ // These m_instructions doesn't ref Structures.
+ ASSERT(vPC[0].u.opcode == interpreter->getOpcode(op_get_by_id_generic) || vPC[0].u.opcode == interpreter->getOpcode(op_put_by_id_generic) || vPC[0].u.opcode == interpreter->getOpcode(op_call) || vPC[0].u.opcode == interpreter->getOpcode(op_call_eval) || vPC[0].u.opcode == interpreter->getOpcode(op_construct));
+}
+
+void CodeBlock::dump(ExecState* exec) const
+{
+ if (!m_instructions) {
+ printf("No instructions available.\n");
+ return;
+ }
+
+ size_t instructionCount = 0;
+
+ for (size_t i = 0; i < instructions().size(); i += opcodeLengths[exec->interpreter()->getOpcodeID(instructions()[i].u.opcode)])
+ ++instructionCount;
+
+ printf("%lu m_instructions; %lu bytes at %p; %d parameter(s); %d callee register(s)\n\n",
+ static_cast<unsigned long>(instructionCount),
+ static_cast<unsigned long>(instructions().size() * sizeof(Instruction)),
+ this, m_numParameters, m_numCalleeRegisters);
+
+ Vector<Instruction>::const_iterator begin = instructions().begin();
+ Vector<Instruction>::const_iterator end = instructions().end();
+ for (Vector<Instruction>::const_iterator it = begin; it != end; ++it)
+ dump(exec, begin, it);
+
+ if (!m_identifiers.isEmpty()) {
+ printf("\nIdentifiers:\n");
+ size_t i = 0;
+ do {
+ printf(" id%u = %s\n", static_cast<unsigned>(i), m_identifiers[i].ustring().utf8().data());
+ ++i;
+ } while (i != m_identifiers.size());
+ }
+
+ if (!m_constantRegisters.isEmpty()) {
+ printf("\nConstants:\n");
+ size_t i = 0;
+ do {
+ printf(" k%u = %s\n", static_cast<unsigned>(i), valueToSourceString(exec, m_constantRegisters[i].get()).utf8().data());
+ ++i;
+ } while (i < m_constantRegisters.size());
+ }
+
+ if (m_rareData && !m_rareData->m_regexps.isEmpty()) {
+ printf("\nm_regexps:\n");
+ size_t i = 0;
+ do {
+ printf(" re%u = %s\n", static_cast<unsigned>(i), regexpToSourceString(m_rareData->m_regexps[i].get()).utf8().data());
+ ++i;
+ } while (i < m_rareData->m_regexps.size());
+ }
+
+#if ENABLE(JIT)
+ if (!m_globalResolveInfos.isEmpty() || !m_structureStubInfos.isEmpty())
+ printf("\nStructures:\n");
+
+ if (!m_globalResolveInfos.isEmpty()) {
+ size_t i = 0;
+ do {
+ printGlobalResolveInfo(m_globalResolveInfos[i], instructionOffsetForNth(exec, instructions(), i + 1, isGlobalResolve));
+ ++i;
+ } while (i < m_globalResolveInfos.size());
+ }
+ if (!m_structureStubInfos.isEmpty()) {
+ size_t i = 0;
+ do {
+ printStructureStubInfo(m_structureStubInfos[i], instructionOffsetForNth(exec, instructions(), i + 1, isPropertyAccess));
+ ++i;
+ } while (i < m_structureStubInfos.size());
+ }
+#endif
+#if ENABLE(INTERPRETER)
+ if (!m_globalResolveInstructions.isEmpty() || !m_propertyAccessInstructions.isEmpty())
+ printf("\nStructures:\n");
+
+ if (!m_globalResolveInstructions.isEmpty()) {
+ size_t i = 0;
+ do {
+ printStructures(&instructions()[m_globalResolveInstructions[i]]);
+ ++i;
+ } while (i < m_globalResolveInstructions.size());
+ }
+ if (!m_propertyAccessInstructions.isEmpty()) {
+ size_t i = 0;
+ do {
+ printStructures(&instructions()[m_propertyAccessInstructions[i]]);
+ ++i;
+ } while (i < m_propertyAccessInstructions.size());
+ }
+#endif
+
+ if (m_rareData && !m_rareData->m_exceptionHandlers.isEmpty()) {
+ printf("\nException Handlers:\n");
+ unsigned i = 0;
+ do {
+ printf("\t %d: { start: [%4d] end: [%4d] target: [%4d] }\n", i + 1, m_rareData->m_exceptionHandlers[i].start, m_rareData->m_exceptionHandlers[i].end, m_rareData->m_exceptionHandlers[i].target);
+ ++i;
+ } while (i < m_rareData->m_exceptionHandlers.size());
+ }
+
+ if (m_rareData && !m_rareData->m_immediateSwitchJumpTables.isEmpty()) {
+ printf("Immediate Switch Jump Tables:\n");
+ unsigned i = 0;
+ do {
+ printf(" %1d = {\n", i);
+ int entry = 0;
+ Vector<int32_t>::const_iterator end = m_rareData->m_immediateSwitchJumpTables[i].branchOffsets.end();
+ for (Vector<int32_t>::const_iterator iter = m_rareData->m_immediateSwitchJumpTables[i].branchOffsets.begin(); iter != end; ++iter, ++entry) {
+ if (!*iter)
+ continue;
+ printf("\t\t%4d => %04d\n", entry + m_rareData->m_immediateSwitchJumpTables[i].min, *iter);
+ }
+ printf(" }\n");
+ ++i;
+ } while (i < m_rareData->m_immediateSwitchJumpTables.size());
+ }
+
+ if (m_rareData && !m_rareData->m_characterSwitchJumpTables.isEmpty()) {
+ printf("\nCharacter Switch Jump Tables:\n");
+ unsigned i = 0;
+ do {
+ printf(" %1d = {\n", i);
+ int entry = 0;
+ Vector<int32_t>::const_iterator end = m_rareData->m_characterSwitchJumpTables[i].branchOffsets.end();
+ for (Vector<int32_t>::const_iterator iter = m_rareData->m_characterSwitchJumpTables[i].branchOffsets.begin(); iter != end; ++iter, ++entry) {
+ if (!*iter)
+ continue;
+ ASSERT(!((i + m_rareData->m_characterSwitchJumpTables[i].min) & ~0xFFFF));
+ UChar ch = static_cast<UChar>(entry + m_rareData->m_characterSwitchJumpTables[i].min);
+ printf("\t\t\"%s\" => %04d\n", UString(&ch, 1).utf8().data(), *iter);
+ }
+ printf(" }\n");
+ ++i;
+ } while (i < m_rareData->m_characterSwitchJumpTables.size());
+ }
+
+ if (m_rareData && !m_rareData->m_stringSwitchJumpTables.isEmpty()) {
+ printf("\nString Switch Jump Tables:\n");
+ unsigned i = 0;
+ do {
+ printf(" %1d = {\n", i);
+ StringJumpTable::StringOffsetTable::const_iterator end = m_rareData->m_stringSwitchJumpTables[i].offsetTable.end();
+ for (StringJumpTable::StringOffsetTable::const_iterator iter = m_rareData->m_stringSwitchJumpTables[i].offsetTable.begin(); iter != end; ++iter)
+ printf("\t\t\"%s\" => %04d\n", UString(iter->first).utf8().data(), iter->second.branchOffset);
+ printf(" }\n");
+ ++i;
+ } while (i < m_rareData->m_stringSwitchJumpTables.size());
+ }
+
+ printf("\n");
+}
+
+void CodeBlock::dump(ExecState* exec, const Vector<Instruction>::const_iterator& begin, Vector<Instruction>::const_iterator& it) const
+{
+ int location = it - begin;
+ switch (exec->interpreter()->getOpcodeID(it->u.opcode)) {
+ case op_enter: {
+ printf("[%4d] enter\n", location);
+ break;
+ }
+ case op_create_activation: {
+ int r0 = (++it)->u.operand;
+ printf("[%4d] create_activation %s\n", location, registerName(exec, r0).data());
+ break;
+ }
+ case op_create_arguments: {
+ int r0 = (++it)->u.operand;
+ printf("[%4d] create_arguments\t %s\n", location, registerName(exec, r0).data());
+ break;
+ }
+ case op_init_lazy_reg: {
+ int r0 = (++it)->u.operand;
+ printf("[%4d] init_lazy_reg\t %s\n", location, registerName(exec, r0).data());
+ break;
+ }
+ case op_get_callee: {
+ int r0 = (++it)->u.operand;
+ printf("[%4d] op_get_callee %s\n", location, registerName(exec, r0).data());
+ break;
+ }
+ case op_create_this: {
+ int r0 = (++it)->u.operand;
+ int r1 = (++it)->u.operand;
+ printf("[%4d] create_this %s %s\n", location, registerName(exec, r0).data(), registerName(exec, r1).data());
+ break;
+ }
+ case op_convert_this: {
+ int r0 = (++it)->u.operand;
+ printf("[%4d] convert_this %s\n", location, registerName(exec, r0).data());
+ break;
+ }
+ case op_new_object: {
+ int r0 = (++it)->u.operand;
+ printf("[%4d] new_object\t %s\n", location, registerName(exec, r0).data());
+ break;
+ }
+ case op_new_array: {
+ int dst = (++it)->u.operand;
+ int argv = (++it)->u.operand;
+ int argc = (++it)->u.operand;
+ printf("[%4d] new_array\t %s, %s, %d\n", location, registerName(exec, dst).data(), registerName(exec, argv).data(), argc);
+ break;
+ }
+ case op_new_array_buffer: {
+ int dst = (++it)->u.operand;
+ int argv = (++it)->u.operand;
+ int argc = (++it)->u.operand;
+ printf("[%4d] new_array_buffer %s, %d, %d\n", location, registerName(exec, dst).data(), argv, argc);
+ break;
+ }
+ case op_new_regexp: {
+ int r0 = (++it)->u.operand;
+ int re0 = (++it)->u.operand;
+ printf("[%4d] new_regexp\t %s, ", location, registerName(exec, r0).data());
+ if (r0 >=0 && r0 < (int)numberOfRegExps())
+ printf("%s\n", regexpName(re0, regexp(re0)).data());
+ else
+ printf("bad_regexp(%d)\n", re0);
+ break;
+ }
+ case op_mov: {
+ int r0 = (++it)->u.operand;
+ int r1 = (++it)->u.operand;
+ printf("[%4d] mov\t\t %s, %s\n", location, registerName(exec, r0).data(), registerName(exec, r1).data());
+ break;
+ }
+ case op_not: {
+ printUnaryOp(exec, location, it, "not");
+ break;
+ }
+ case op_eq: {
+ printBinaryOp(exec, location, it, "eq");
+ break;
+ }
+ case op_eq_null: {
+ printUnaryOp(exec, location, it, "eq_null");
+ break;
+ }
+ case op_neq: {
+ printBinaryOp(exec, location, it, "neq");
+ break;
+ }
+ case op_neq_null: {
+ printUnaryOp(exec, location, it, "neq_null");
+ break;
+ }
+ case op_stricteq: {
+ printBinaryOp(exec, location, it, "stricteq");
+ break;
+ }
+ case op_nstricteq: {
+ printBinaryOp(exec, location, it, "nstricteq");
+ break;
+ }
+ case op_less: {
+ printBinaryOp(exec, location, it, "less");
+ break;
+ }
+ case op_lesseq: {
+ printBinaryOp(exec, location, it, "lesseq");
+ break;
+ }
+ case op_greater: {
+ printBinaryOp(exec, location, it, "greater");
+ break;
+ }
+ case op_greatereq: {
+ printBinaryOp(exec, location, it, "greatereq");
+ break;
+ }
+ case op_pre_inc: {
+ int r0 = (++it)->u.operand;
+ printf("[%4d] pre_inc\t\t %s\n", location, registerName(exec, r0).data());
+ break;
+ }
+ case op_pre_dec: {
+ int r0 = (++it)->u.operand;
+ printf("[%4d] pre_dec\t\t %s\n", location, registerName(exec, r0).data());
+ break;
+ }
+ case op_post_inc: {
+ printUnaryOp(exec, location, it, "post_inc");
+ break;
+ }
+ case op_post_dec: {
+ printUnaryOp(exec, location, it, "post_dec");
+ break;
+ }
+ case op_to_jsnumber: {
+ printUnaryOp(exec, location, it, "to_jsnumber");
+ break;
+ }
+ case op_negate: {
+ printUnaryOp(exec, location, it, "negate");
+ break;
+ }
+ case op_add: {
+ printBinaryOp(exec, location, it, "add");
+ ++it;
+ break;
+ }
+ case op_mul: {
+ printBinaryOp(exec, location, it, "mul");
+ ++it;
+ break;
+ }
+ case op_div: {
+ printBinaryOp(exec, location, it, "div");
+ ++it;
+ break;
+ }
+ case op_mod: {
+ printBinaryOp(exec, location, it, "mod");
+ break;
+ }
+ case op_sub: {
+ printBinaryOp(exec, location, it, "sub");
+ ++it;
+ break;
+ }
+ case op_lshift: {
+ printBinaryOp(exec, location, it, "lshift");
+ break;
+ }
+ case op_rshift: {
+ printBinaryOp(exec, location, it, "rshift");
+ break;
+ }
+ case op_urshift: {
+ printBinaryOp(exec, location, it, "urshift");
+ break;
+ }
+ case op_bitand: {
+ printBinaryOp(exec, location, it, "bitand");
+ ++it;
+ break;
+ }
+ case op_bitxor: {
+ printBinaryOp(exec, location, it, "bitxor");
+ ++it;
+ break;
+ }
+ case op_bitor: {
+ printBinaryOp(exec, location, it, "bitor");
+ ++it;
+ break;
+ }
+ case op_bitnot: {
+ printUnaryOp(exec, location, it, "bitnot");
+ break;
+ }
+ case op_check_has_instance: {
+ int base = (++it)->u.operand;
+ printf("[%4d] check_has_instance\t\t %s\n", location, registerName(exec, base).data());
+ break;
+ }
+ case op_instanceof: {
+ int r0 = (++it)->u.operand;
+ int r1 = (++it)->u.operand;
+ int r2 = (++it)->u.operand;
+ int r3 = (++it)->u.operand;
+ printf("[%4d] instanceof\t\t %s, %s, %s, %s\n", location, registerName(exec, r0).data(), registerName(exec, r1).data(), registerName(exec, r2).data(), registerName(exec, r3).data());
+ break;
+ }
+ case op_typeof: {
+ printUnaryOp(exec, location, it, "typeof");
+ break;
+ }
+ case op_is_undefined: {
+ printUnaryOp(exec, location, it, "is_undefined");
+ break;
+ }
+ case op_is_boolean: {
+ printUnaryOp(exec, location, it, "is_boolean");
+ break;
+ }
+ case op_is_number: {
+ printUnaryOp(exec, location, it, "is_number");
+ break;
+ }
+ case op_is_string: {
+ printUnaryOp(exec, location, it, "is_string");
+ break;
+ }
+ case op_is_object: {
+ printUnaryOp(exec, location, it, "is_object");
+ break;
+ }
+ case op_is_function: {
+ printUnaryOp(exec, location, it, "is_function");
+ break;
+ }
+ case op_in: {
+ printBinaryOp(exec, location, it, "in");
+ break;
+ }
+ case op_resolve: {
+ int r0 = (++it)->u.operand;
+ int id0 = (++it)->u.operand;
+ printf("[%4d] resolve\t\t %s, %s\n", location, registerName(exec, r0).data(), idName(id0, m_identifiers[id0]).data());
+ break;
+ }
+ case op_resolve_skip: {
+ int r0 = (++it)->u.operand;
+ int id0 = (++it)->u.operand;
+ int skipLevels = (++it)->u.operand;
+ printf("[%4d] resolve_skip\t %s, %s, %d\n", location, registerName(exec, r0).data(), idName(id0, m_identifiers[id0]).data(), skipLevels);
+ break;
+ }
+ case op_resolve_global: {
+ int r0 = (++it)->u.operand;
+ int id0 = (++it)->u.operand;
+ printf("[%4d] resolve_global\t %s, %s\n", location, registerName(exec, r0).data(), idName(id0, m_identifiers[id0]).data());
+ it += 2;
+ break;
+ }
+ case op_resolve_global_dynamic: {
+ int r0 = (++it)->u.operand;
+ int id0 = (++it)->u.operand;
+ JSValue scope = JSValue((++it)->u.jsCell.get());
+ ++it;
+ int depth = (++it)->u.operand;
+ printf("[%4d] resolve_global_dynamic\t %s, %s, %s, %d\n", location, registerName(exec, r0).data(), valueToSourceString(exec, scope).utf8().data(), idName(id0, m_identifiers[id0]).data(), depth);
+ break;
+ }
+ case op_get_scoped_var: {
+ int r0 = (++it)->u.operand;
+ int index = (++it)->u.operand;
+ int skipLevels = (++it)->u.operand;
+ printf("[%4d] get_scoped_var\t %s, %d, %d\n", location, registerName(exec, r0).data(), index, skipLevels);
+ break;
+ }
+ case op_put_scoped_var: {
+ int index = (++it)->u.operand;
+ int skipLevels = (++it)->u.operand;
+ int r0 = (++it)->u.operand;
+ printf("[%4d] put_scoped_var\t %d, %d, %s\n", location, index, skipLevels, registerName(exec, r0).data());
+ break;
+ }
+ case op_get_global_var: {
+ int r0 = (++it)->u.operand;
+ int index = (++it)->u.operand;
+ printf("[%4d] get_global_var\t %s, %d\n", location, registerName(exec, r0).data(), index);
+ break;
+ }
+ case op_put_global_var: {
+ int index = (++it)->u.operand;
+ int r0 = (++it)->u.operand;
+ printf("[%4d] put_global_var\t %d, %s\n", location, index, registerName(exec, r0).data());
+ break;
+ }
+ case op_resolve_base: {
+ int r0 = (++it)->u.operand;
+ int id0 = (++it)->u.operand;
+ int isStrict = (++it)->u.operand;
+ printf("[%4d] resolve_base%s\t %s, %s\n", location, isStrict ? "_strict" : "", registerName(exec, r0).data(), idName(id0, m_identifiers[id0]).data());
+ break;
+ }
+ case op_ensure_property_exists: {
+ int r0 = (++it)->u.operand;
+ int id0 = (++it)->u.operand;
+ printf("[%4d] ensure_property_exists\t %s, %s\n", location, registerName(exec, r0).data(), idName(id0, m_identifiers[id0]).data());
+ break;
+ }
+ case op_resolve_with_base: {
+ int r0 = (++it)->u.operand;
+ int r1 = (++it)->u.operand;
+ int id0 = (++it)->u.operand;
+ printf("[%4d] resolve_with_base %s, %s, %s\n", location, registerName(exec, r0).data(), registerName(exec, r1).data(), idName(id0, m_identifiers[id0]).data());
+ break;
+ }
+ case op_resolve_with_this: {
+ int r0 = (++it)->u.operand;
+ int r1 = (++it)->u.operand;
+ int id0 = (++it)->u.operand;
+ printf("[%4d] resolve_with_this %s, %s, %s\n", location, registerName(exec, r0).data(), registerName(exec, r1).data(), idName(id0, m_identifiers[id0]).data());
+ break;
+ }
+ case op_get_by_id: {
+ printGetByIdOp(exec, location, it, "get_by_id");
+ break;
+ }
+ case op_get_by_id_self: {
+ printGetByIdOp(exec, location, it, "get_by_id_self");
+ break;
+ }
+ case op_get_by_id_proto: {
+ printGetByIdOp(exec, location, it, "get_by_id_proto");
+ break;
+ }
+ case op_get_by_id_chain: {
+ printGetByIdOp(exec, location, it, "get_by_id_chain");
+ break;
+ }
+ case op_get_by_id_getter_self: {
+ printGetByIdOp(exec, location, it, "get_by_id_getter_self");
+ break;
+ }
+ case op_get_by_id_getter_proto: {
+ printGetByIdOp(exec, location, it, "get_by_id_getter_proto");
+ break;
+ }
+ case op_get_by_id_getter_chain: {
+ printGetByIdOp(exec, location, it, "get_by_id_getter_chain");
+ break;
+ }
+ case op_get_by_id_custom_self: {
+ printGetByIdOp(exec, location, it, "get_by_id_custom_self");
+ break;
+ }
+ case op_get_by_id_custom_proto: {
+ printGetByIdOp(exec, location, it, "get_by_id_custom_proto");
+ break;
+ }
+ case op_get_by_id_custom_chain: {
+ printGetByIdOp(exec, location, it, "get_by_id_custom_chain");
+ break;
+ }
+ case op_get_by_id_generic: {
+ printGetByIdOp(exec, location, it, "get_by_id_generic");
+ break;
+ }
+ case op_get_array_length: {
+ printGetByIdOp(exec, location, it, "get_array_length");
+ break;
+ }
+ case op_get_string_length: {
+ printGetByIdOp(exec, location, it, "get_string_length");
+ break;
+ }
+ case op_get_arguments_length: {
+ printUnaryOp(exec, location, it, "get_arguments_length");
+ it++;
+ break;
+ }
+ case op_put_by_id: {
+ printPutByIdOp(exec, location, it, "put_by_id");
+ break;
+ }
+ case op_put_by_id_replace: {
+ printPutByIdOp(exec, location, it, "put_by_id_replace");
+ break;
+ }
+ case op_put_by_id_transition: {
+ printPutByIdOp(exec, location, it, "put_by_id_transition");
+ break;
+ }
+ case op_put_by_id_generic: {
+ printPutByIdOp(exec, location, it, "put_by_id_generic");
+ break;
+ }
+ case op_put_getter: {
+ int r0 = (++it)->u.operand;
+ int id0 = (++it)->u.operand;
+ int r1 = (++it)->u.operand;
+ printf("[%4d] put_getter\t %s, %s, %s\n", location, registerName(exec, r0).data(), idName(id0, m_identifiers[id0]).data(), registerName(exec, r1).data());
+ break;
+ }
+ case op_put_setter: {
+ int r0 = (++it)->u.operand;
+ int id0 = (++it)->u.operand;
+ int r1 = (++it)->u.operand;
+ printf("[%4d] put_setter\t %s, %s, %s\n", location, registerName(exec, r0).data(), idName(id0, m_identifiers[id0]).data(), registerName(exec, r1).data());
+ break;
+ }
+ case op_method_check: {
+ printf("[%4d] method_check\n", location);
+ break;
+ }
+ case op_del_by_id: {
+ int r0 = (++it)->u.operand;
+ int r1 = (++it)->u.operand;
+ int id0 = (++it)->u.operand;
+ printf("[%4d] del_by_id\t %s, %s, %s\n", location, registerName(exec, r0).data(), registerName(exec, r1).data(), idName(id0, m_identifiers[id0]).data());
+ break;
+ }
+ case op_get_by_val: {
+ int r0 = (++it)->u.operand;
+ int r1 = (++it)->u.operand;
+ int r2 = (++it)->u.operand;
+ printf("[%4d] get_by_val\t %s, %s, %s\n", location, registerName(exec, r0).data(), registerName(exec, r1).data(), registerName(exec, r2).data());
+ break;
+ }
+ case op_get_argument_by_val: {
+ int r0 = (++it)->u.operand;
+ int r1 = (++it)->u.operand;
+ int r2 = (++it)->u.operand;
+ printf("[%4d] get_argument_by_val\t %s, %s, %s\n", location, registerName(exec, r0).data(), registerName(exec, r1).data(), registerName(exec, r2).data());
+ break;
+ }
+ case op_get_by_pname: {
+ int r0 = (++it)->u.operand;
+ int r1 = (++it)->u.operand;
+ int r2 = (++it)->u.operand;
+ int r3 = (++it)->u.operand;
+ int r4 = (++it)->u.operand;
+ int r5 = (++it)->u.operand;
+ printf("[%4d] get_by_pname\t %s, %s, %s, %s, %s, %s\n", location, registerName(exec, r0).data(), registerName(exec, r1).data(), registerName(exec, r2).data(), registerName(exec, r3).data(), registerName(exec, r4).data(), registerName(exec, r5).data());
+ break;
+ }
+ case op_put_by_val: {
+ int r0 = (++it)->u.operand;
+ int r1 = (++it)->u.operand;
+ int r2 = (++it)->u.operand;
+ printf("[%4d] put_by_val\t %s, %s, %s\n", location, registerName(exec, r0).data(), registerName(exec, r1).data(), registerName(exec, r2).data());
+ break;
+ }
+ case op_del_by_val: {
+ int r0 = (++it)->u.operand;
+ int r1 = (++it)->u.operand;
+ int r2 = (++it)->u.operand;
+ printf("[%4d] del_by_val\t %s, %s, %s\n", location, registerName(exec, r0).data(), registerName(exec, r1).data(), registerName(exec, r2).data());
+ break;
+ }
+ case op_put_by_index: {
+ int r0 = (++it)->u.operand;
+ unsigned n0 = (++it)->u.operand;
+ int r1 = (++it)->u.operand;
+ printf("[%4d] put_by_index\t %s, %u, %s\n", location, registerName(exec, r0).data(), n0, registerName(exec, r1).data());
+ break;
+ }
+ case op_jmp: {
+ int offset = (++it)->u.operand;
+ printf("[%4d] jmp\t\t %d(->%d)\n", location, offset, location + offset);
+ break;
+ }
+ case op_loop: {
+ int offset = (++it)->u.operand;
+ printf("[%4d] loop\t\t %d(->%d)\n", location, offset, location + offset);
+ break;
+ }
+ case op_jtrue: {
+ printConditionalJump(exec, begin, it, location, "jtrue");
+ break;
+ }
+ case op_loop_if_true: {
+ printConditionalJump(exec, begin, it, location, "loop_if_true");
+ break;
+ }
+ case op_loop_if_false: {
+ printConditionalJump(exec, begin, it, location, "loop_if_false");
+ break;
+ }
+ case op_jfalse: {
+ printConditionalJump(exec, begin, it, location, "jfalse");
+ break;
+ }
+ case op_jeq_null: {
+ printConditionalJump(exec, begin, it, location, "jeq_null");
+ break;
+ }
+ case op_jneq_null: {
+ printConditionalJump(exec, begin, it, location, "jneq_null");
+ break;
+ }
+ case op_jneq_ptr: {
+ int r0 = (++it)->u.operand;
+ int r1 = (++it)->u.operand;
+ int offset = (++it)->u.operand;
+ printf("[%4d] jneq_ptr\t\t %s, %s, %d(->%d)\n", location, registerName(exec, r0).data(), registerName(exec, r1).data(), offset, location + offset);
+ break;
+ }
+ case op_jless: {
+ int r0 = (++it)->u.operand;
+ int r1 = (++it)->u.operand;
+ int offset = (++it)->u.operand;
+ printf("[%4d] jless\t\t %s, %s, %d(->%d)\n", location, registerName(exec, r0).data(), registerName(exec, r1).data(), offset, location + offset);
+ break;
+ }
+ case op_jlesseq: {
+ int r0 = (++it)->u.operand;
+ int r1 = (++it)->u.operand;
+ int offset = (++it)->u.operand;
+ printf("[%4d] jlesseq\t\t %s, %s, %d(->%d)\n", location, registerName(exec, r0).data(), registerName(exec, r1).data(), offset, location + offset);
+ break;
+ }
+ case op_jgreater: {
+ int r0 = (++it)->u.operand;
+ int r1 = (++it)->u.operand;
+ int offset = (++it)->u.operand;
+ printf("[%4d] jgreater\t\t %s, %s, %d(->%d)\n", location, registerName(exec, r0).data(), registerName(exec, r1).data(), offset, location + offset);
+ break;
+ }
+ case op_jgreatereq: {
+ int r0 = (++it)->u.operand;
+ int r1 = (++it)->u.operand;
+ int offset = (++it)->u.operand;
+ printf("[%4d] jgreatereq\t\t %s, %s, %d(->%d)\n", location, registerName(exec, r0).data(), registerName(exec, r1).data(), offset, location + offset);
+ break;
+ }
+ case op_jnless: {
+ int r0 = (++it)->u.operand;
+ int r1 = (++it)->u.operand;
+ int offset = (++it)->u.operand;
+ printf("[%4d] jnless\t\t %s, %s, %d(->%d)\n", location, registerName(exec, r0).data(), registerName(exec, r1).data(), offset, location + offset);
+ break;
+ }
+ case op_jnlesseq: {
+ int r0 = (++it)->u.operand;
+ int r1 = (++it)->u.operand;
+ int offset = (++it)->u.operand;
+ printf("[%4d] jnlesseq\t\t %s, %s, %d(->%d)\n", location, registerName(exec, r0).data(), registerName(exec, r1).data(), offset, location + offset);
+ break;
+ }
+ case op_jngreater: {
+ int r0 = (++it)->u.operand;
+ int r1 = (++it)->u.operand;
+ int offset = (++it)->u.operand;
+ printf("[%4d] jngreater\t\t %s, %s, %d(->%d)\n", location, registerName(exec, r0).data(), registerName(exec, r1).data(), offset, location + offset);
+ break;
+ }
+ case op_jngreatereq: {
+ int r0 = (++it)->u.operand;
+ int r1 = (++it)->u.operand;
+ int offset = (++it)->u.operand;
+ printf("[%4d] jngreatereq\t\t %s, %s, %d(->%d)\n", location, registerName(exec, r0).data(), registerName(exec, r1).data(), offset, location + offset);
+ break;
+ }
+ case op_loop_if_less: {
+ int r0 = (++it)->u.operand;
+ int r1 = (++it)->u.operand;
+ int offset = (++it)->u.operand;
+ printf("[%4d] loop_if_less\t %s, %s, %d(->%d)\n", location, registerName(exec, r0).data(), registerName(exec, r1).data(), offset, location + offset);
+ break;
+ }
+ case op_loop_if_lesseq: {
+ int r0 = (++it)->u.operand;
+ int r1 = (++it)->u.operand;
+ int offset = (++it)->u.operand;
+ printf("[%4d] loop_if_lesseq\t %s, %s, %d(->%d)\n", location, registerName(exec, r0).data(), registerName(exec, r1).data(), offset, location + offset);
+ break;
+ }
+ case op_loop_if_greater: {
+ int r0 = (++it)->u.operand;
+ int r1 = (++it)->u.operand;
+ int offset = (++it)->u.operand;
+ printf("[%4d] loop_if_greater\t %s, %s, %d(->%d)\n", location, registerName(exec, r0).data(), registerName(exec, r1).data(), offset, location + offset);
+ break;
+ }
+ case op_loop_if_greatereq: {
+ int r0 = (++it)->u.operand;
+ int r1 = (++it)->u.operand;
+ int offset = (++it)->u.operand;
+ printf("[%4d] loop_if_greatereq\t %s, %s, %d(->%d)\n", location, registerName(exec, r0).data(), registerName(exec, r1).data(), offset, location + offset);
+ break;
+ }
+ case op_loop_hint: {
+ printf("[%4d] loop_hint\n", location);
+ break;
+ }
+ case op_switch_imm: {
+ int tableIndex = (++it)->u.operand;
+ int defaultTarget = (++it)->u.operand;
+ int scrutineeRegister = (++it)->u.operand;
+ printf("[%4d] switch_imm\t %d, %d(->%d), %s\n", location, tableIndex, defaultTarget, location + defaultTarget, registerName(exec, scrutineeRegister).data());
+ break;
+ }
+ case op_switch_char: {
+ int tableIndex = (++it)->u.operand;
+ int defaultTarget = (++it)->u.operand;
+ int scrutineeRegister = (++it)->u.operand;
+ printf("[%4d] switch_char\t %d, %d(->%d), %s\n", location, tableIndex, defaultTarget, location + defaultTarget, registerName(exec, scrutineeRegister).data());
+ break;
+ }
+ case op_switch_string: {
+ int tableIndex = (++it)->u.operand;
+ int defaultTarget = (++it)->u.operand;
+ int scrutineeRegister = (++it)->u.operand;
+ printf("[%4d] switch_string\t %d, %d(->%d), %s\n", location, tableIndex, defaultTarget, location + defaultTarget, registerName(exec, scrutineeRegister).data());
+ break;
+ }
+ case op_new_func: {
+ int r0 = (++it)->u.operand;
+ int f0 = (++it)->u.operand;
+ int shouldCheck = (++it)->u.operand;
+ printf("[%4d] new_func\t\t %s, f%d, %s\n", location, registerName(exec, r0).data(), f0, shouldCheck ? "<Checked>" : "<Unchecked>");
+ break;
+ }
+ case op_new_func_exp: {
+ int r0 = (++it)->u.operand;
+ int f0 = (++it)->u.operand;
+ printf("[%4d] new_func_exp\t %s, f%d\n", location, registerName(exec, r0).data(), f0);
+ break;
+ }
+ case op_call: {
+ int func = (++it)->u.operand;
+ int argCount = (++it)->u.operand;
+ int registerOffset = (++it)->u.operand;
+ printf("[%4d] call\t\t %s, %d, %d\n", location, registerName(exec, func).data(), argCount, registerOffset);
+ break;
+ }
+ case op_call_eval: {
+ int func = (++it)->u.operand;
+ int argCount = (++it)->u.operand;
+ int registerOffset = (++it)->u.operand;
+ printf("[%4d] call_eval\t %s, %d, %d\n", location, registerName(exec, func).data(), argCount, registerOffset);
+ break;
+ }
+ case op_call_varargs: {
+ int callee = (++it)->u.operand;
+ int thisValue = (++it)->u.operand;
+ int arguments = (++it)->u.operand;
+ int firstFreeRegister = (++it)->u.operand;
+ printf("[%4d] call_varargs\t %s, %s, %s, %d\n", location, registerName(exec, callee).data(), registerName(exec, thisValue).data(), registerName(exec, arguments).data(), firstFreeRegister);
+ break;
+ }
+ case op_tear_off_activation: {
+ int r0 = (++it)->u.operand;
+ int r1 = (++it)->u.operand;
+ printf("[%4d] tear_off_activation\t %s, %s\n", location, registerName(exec, r0).data(), registerName(exec, r1).data());
+ break;
+ }
+ case op_tear_off_arguments: {
+ int r0 = (++it)->u.operand;
+ printf("[%4d] tear_off_arguments\t %s\n", location, registerName(exec, r0).data());
+ break;
+ }
+ case op_ret: {
+ int r0 = (++it)->u.operand;
+ printf("[%4d] ret\t\t %s\n", location, registerName(exec, r0).data());
+ break;
+ }
+ case op_call_put_result: {
+ int r0 = (++it)->u.operand;
+ printf("[%4d] op_call_put_result\t\t %s\n", location, registerName(exec, r0).data());
+ break;
+ }
+ case op_ret_object_or_this: {
+ int r0 = (++it)->u.operand;
+ int r1 = (++it)->u.operand;
+ printf("[%4d] constructor_ret\t\t %s %s\n", location, registerName(exec, r0).data(), registerName(exec, r1).data());
+ break;
+ }
+ case op_construct: {
+ int func = (++it)->u.operand;
+ int argCount = (++it)->u.operand;
+ int registerOffset = (++it)->u.operand;
+ printf("[%4d] construct\t %s, %d, %d\n", location, registerName(exec, func).data(), argCount, registerOffset);
+ break;
+ }
+ case op_strcat: {
+ int r0 = (++it)->u.operand;
+ int r1 = (++it)->u.operand;
+ int count = (++it)->u.operand;
+ printf("[%4d] strcat\t\t %s, %s, %d\n", location, registerName(exec, r0).data(), registerName(exec, r1).data(), count);
+ break;
+ }
+ case op_to_primitive: {
+ int r0 = (++it)->u.operand;
+ int r1 = (++it)->u.operand;
+ printf("[%4d] to_primitive\t %s, %s\n", location, registerName(exec, r0).data(), registerName(exec, r1).data());
+ break;
+ }
+ case op_get_pnames: {
+ int r0 = it[1].u.operand;
+ int r1 = it[2].u.operand;
+ int r2 = it[3].u.operand;
+ int r3 = it[4].u.operand;
+ int offset = it[5].u.operand;
+ printf("[%4d] get_pnames\t %s, %s, %s, %s, %d(->%d)\n", location, registerName(exec, r0).data(), registerName(exec, r1).data(), registerName(exec, r2).data(), registerName(exec, r3).data(), offset, location + offset);
+ it += OPCODE_LENGTH(op_get_pnames) - 1;
+ break;
+ }
+ case op_next_pname: {
+ int dest = it[1].u.operand;
+ int base = it[2].u.operand;
+ int i = it[3].u.operand;
+ int size = it[4].u.operand;
+ int iter = it[5].u.operand;
+ int offset = it[6].u.operand;
+ printf("[%4d] next_pname\t %s, %s, %s, %s, %s, %d(->%d)\n", location, registerName(exec, dest).data(), registerName(exec, base).data(), registerName(exec, i).data(), registerName(exec, size).data(), registerName(exec, iter).data(), offset, location + offset);
+ it += OPCODE_LENGTH(op_next_pname) - 1;
+ break;
+ }
+ case op_push_scope: {
+ int r0 = (++it)->u.operand;
+ printf("[%4d] push_scope\t %s\n", location, registerName(exec, r0).data());
+ break;
+ }
+ case op_pop_scope: {
+ printf("[%4d] pop_scope\n", location);
+ break;
+ }
+ case op_push_new_scope: {
+ int r0 = (++it)->u.operand;
+ int id0 = (++it)->u.operand;
+ int r1 = (++it)->u.operand;
+ printf("[%4d] push_new_scope \t%s, %s, %s\n", location, registerName(exec, r0).data(), idName(id0, m_identifiers[id0]).data(), registerName(exec, r1).data());
+ break;
+ }
+ case op_jmp_scopes: {
+ int scopeDelta = (++it)->u.operand;
+ int offset = (++it)->u.operand;
+ printf("[%4d] jmp_scopes\t^%d, %d(->%d)\n", location, scopeDelta, offset, location + offset);
+ break;
+ }
+ case op_catch: {
+ int r0 = (++it)->u.operand;
+ printf("[%4d] catch\t\t %s\n", location, registerName(exec, r0).data());
+ break;
+ }
+ case op_throw: {
+ int r0 = (++it)->u.operand;
+ printf("[%4d] throw\t\t %s\n", location, registerName(exec, r0).data());
+ break;
+ }
+ case op_throw_reference_error: {
+ int k0 = (++it)->u.operand;
+ printf("[%4d] throw_reference_error\t %s\n", location, constantName(exec, k0, getConstant(k0)).data());
+ break;
+ }
+ case op_jsr: {
+ int retAddrDst = (++it)->u.operand;
+ int offset = (++it)->u.operand;
+ printf("[%4d] jsr\t\t %s, %d(->%d)\n", location, registerName(exec, retAddrDst).data(), offset, location + offset);
+ break;
+ }
+ case op_sret: {
+ int retAddrSrc = (++it)->u.operand;
+ printf("[%4d] sret\t\t %s\n", location, registerName(exec, retAddrSrc).data());
+ break;
+ }
+ case op_debug: {
+ int debugHookID = (++it)->u.operand;
+ int firstLine = (++it)->u.operand;
+ int lastLine = (++it)->u.operand;
+ printf("[%4d] debug\t\t %s, %d, %d\n", location, debugHookName(debugHookID), firstLine, lastLine);
+ break;
+ }
+ case op_profile_will_call: {
+ int function = (++it)->u.operand;
+ printf("[%4d] profile_will_call %s\n", location, registerName(exec, function).data());
+ break;
+ }
+ case op_profile_did_call: {
+ int function = (++it)->u.operand;
+ printf("[%4d] profile_did_call\t %s\n", location, registerName(exec, function).data());
+ break;
+ }
+ case op_end: {
+ int r0 = (++it)->u.operand;
+ printf("[%4d] end\t\t %s\n", location, registerName(exec, r0).data());
+ break;
+ }
+ }
+}
+
+#endif // !defined(NDEBUG) || ENABLE(OPCODE_SAMPLING)
+
+#if DUMP_CODE_BLOCK_STATISTICS
+static HashSet<CodeBlock*> liveCodeBlockSet;
+#endif
+
+#define FOR_EACH_MEMBER_VECTOR(macro) \
+ macro(instructions) \
+ macro(globalResolveInfos) \
+ macro(structureStubInfos) \
+ macro(callLinkInfos) \
+ macro(linkedCallerList) \
+ macro(identifiers) \
+ macro(functionExpressions) \
+ macro(constantRegisters)
+
+#define FOR_EACH_MEMBER_VECTOR_RARE_DATA(macro) \
+ macro(regexps) \
+ macro(functions) \
+ macro(exceptionHandlers) \
+ macro(immediateSwitchJumpTables) \
+ macro(characterSwitchJumpTables) \
+ macro(stringSwitchJumpTables) \
+ macro(evalCodeCache) \
+ macro(expressionInfo) \
+ macro(lineInfo) \
+ macro(callReturnIndexVector)
+
+template<typename T>
+static size_t sizeInBytes(const Vector<T>& vector)
+{
+ return vector.capacity() * sizeof(T);
+}
+
+void CodeBlock::dumpStatistics()
+{
+#if DUMP_CODE_BLOCK_STATISTICS
+ #define DEFINE_VARS(name) size_t name##IsNotEmpty = 0; size_t name##TotalSize = 0;
+ FOR_EACH_MEMBER_VECTOR(DEFINE_VARS)
+ FOR_EACH_MEMBER_VECTOR_RARE_DATA(DEFINE_VARS)
+ #undef DEFINE_VARS
+
+ // Non-vector data members
+ size_t evalCodeCacheIsNotEmpty = 0;
+
+ size_t symbolTableIsNotEmpty = 0;
+ size_t symbolTableTotalSize = 0;
+
+ size_t hasRareData = 0;
+
+ size_t isFunctionCode = 0;
+ size_t isGlobalCode = 0;
+ size_t isEvalCode = 0;
+
+ HashSet<CodeBlock*>::const_iterator end = liveCodeBlockSet.end();
+ for (HashSet<CodeBlock*>::const_iterator it = liveCodeBlockSet.begin(); it != end; ++it) {
+ CodeBlock* codeBlock = *it;
+
+ #define GET_STATS(name) if (!codeBlock->m_##name.isEmpty()) { name##IsNotEmpty++; name##TotalSize += sizeInBytes(codeBlock->m_##name); }
+ FOR_EACH_MEMBER_VECTOR(GET_STATS)
+ #undef GET_STATS
+
+ if (!codeBlock->m_symbolTable.isEmpty()) {
+ symbolTableIsNotEmpty++;
+ symbolTableTotalSize += (codeBlock->m_symbolTable.capacity() * (sizeof(SymbolTable::KeyType) + sizeof(SymbolTable::MappedType)));
+ }
+
+ if (codeBlock->m_rareData) {
+ hasRareData++;
+ #define GET_STATS(name) if (!codeBlock->m_rareData->m_##name.isEmpty()) { name##IsNotEmpty++; name##TotalSize += sizeInBytes(codeBlock->m_rareData->m_##name); }
+ FOR_EACH_MEMBER_VECTOR_RARE_DATA(GET_STATS)
+ #undef GET_STATS
+
+ if (!codeBlock->m_rareData->m_evalCodeCache.isEmpty())
+ evalCodeCacheIsNotEmpty++;
+ }
+
+ switch (codeBlock->codeType()) {
+ case FunctionCode:
+ ++isFunctionCode;
+ break;
+ case GlobalCode:
+ ++isGlobalCode;
+ break;
+ case EvalCode:
+ ++isEvalCode;
+ break;
+ }
+ }
+
+ size_t totalSize = 0;
+
+ #define GET_TOTAL_SIZE(name) totalSize += name##TotalSize;
+ FOR_EACH_MEMBER_VECTOR(GET_TOTAL_SIZE)
+ FOR_EACH_MEMBER_VECTOR_RARE_DATA(GET_TOTAL_SIZE)
+ #undef GET_TOTAL_SIZE
+
+ totalSize += symbolTableTotalSize;
+ totalSize += (liveCodeBlockSet.size() * sizeof(CodeBlock));
+
+ printf("Number of live CodeBlocks: %d\n", liveCodeBlockSet.size());
+ printf("Size of a single CodeBlock [sizeof(CodeBlock)]: %zu\n", sizeof(CodeBlock));
+ printf("Size of all CodeBlocks: %zu\n", totalSize);
+ printf("Average size of a CodeBlock: %zu\n", totalSize / liveCodeBlockSet.size());
+
+ printf("Number of FunctionCode CodeBlocks: %zu (%.3f%%)\n", isFunctionCode, static_cast<double>(isFunctionCode) * 100.0 / liveCodeBlockSet.size());
+ printf("Number of GlobalCode CodeBlocks: %zu (%.3f%%)\n", isGlobalCode, static_cast<double>(isGlobalCode) * 100.0 / liveCodeBlockSet.size());
+ printf("Number of EvalCode CodeBlocks: %zu (%.3f%%)\n", isEvalCode, static_cast<double>(isEvalCode) * 100.0 / liveCodeBlockSet.size());
+
+ printf("Number of CodeBlocks with rare data: %zu (%.3f%%)\n", hasRareData, static_cast<double>(hasRareData) * 100.0 / liveCodeBlockSet.size());
+
+ #define PRINT_STATS(name) printf("Number of CodeBlocks with " #name ": %zu\n", name##IsNotEmpty); printf("Size of all " #name ": %zu\n", name##TotalSize);
+ FOR_EACH_MEMBER_VECTOR(PRINT_STATS)
+ FOR_EACH_MEMBER_VECTOR_RARE_DATA(PRINT_STATS)
+ #undef PRINT_STATS
+
+ printf("Number of CodeBlocks with evalCodeCache: %zu\n", evalCodeCacheIsNotEmpty);
+ printf("Number of CodeBlocks with symbolTable: %zu\n", symbolTableIsNotEmpty);
+
+ printf("Size of all symbolTables: %zu\n", symbolTableTotalSize);
+
+#else
+ printf("Dumping CodeBlock statistics is not enabled.\n");
+#endif
+}
+
+CodeBlock::CodeBlock(CopyParsedBlockTag, CodeBlock& other, SymbolTable* symTab)
+ : m_globalObject(other.m_globalObject)
+ , m_heap(other.m_heap)
+ , m_numCalleeRegisters(other.m_numCalleeRegisters)
+ , m_numVars(other.m_numVars)
+ , m_numCapturedVars(other.m_numCapturedVars)
+ , m_numParameters(other.m_numParameters)
+ , m_isConstructor(other.m_isConstructor)
+ , m_shouldDiscardBytecode(false)
+ , m_ownerExecutable(*other.m_globalData, other.m_ownerExecutable.get(), other.m_ownerExecutable.get())
+ , m_globalData(other.m_globalData)
+ , m_instructions(other.m_instructions)
+ , m_instructionCount(other.m_instructionCount)
+ , m_thisRegister(other.m_thisRegister)
+ , m_argumentsRegister(other.m_argumentsRegister)
+ , m_activationRegister(other.m_activationRegister)
+ , m_needsFullScopeChain(other.m_needsFullScopeChain)
+ , m_usesEval(other.m_usesEval)
+ , m_isNumericCompareFunction(other.m_isNumericCompareFunction)
+ , m_isStrictMode(other.m_isStrictMode)
+ , m_codeType(other.m_codeType)
+ , m_source(other.m_source)
+ , m_sourceOffset(other.m_sourceOffset)
+#if ENABLE(JIT)
+ , m_globalResolveInfos(other.m_globalResolveInfos)
+#endif
+#if ENABLE(VALUE_PROFILER)
+ , m_executionEntryCount(0)
+#endif
+ , m_jumpTargets(other.m_jumpTargets)
+ , m_loopTargets(other.m_loopTargets)
+ , m_identifiers(other.m_identifiers)
+ , m_constantRegisters(other.m_constantRegisters)
+ , m_functionDecls(other.m_functionDecls)
+ , m_functionExprs(other.m_functionExprs)
+ , m_symbolTable(symTab)
+ , m_speculativeSuccessCounter(0)
+ , m_speculativeFailCounter(0)
+ , m_optimizationDelayCounter(0)
+ , m_reoptimizationRetryCounter(0)
+{
+ optimizeAfterWarmUp();
+
+ if (other.m_rareData) {
+ createRareDataIfNecessary();
+
+ m_rareData->m_exceptionHandlers = other.m_rareData->m_exceptionHandlers;
+ m_rareData->m_regexps = other.m_rareData->m_regexps;
+ m_rareData->m_constantBuffers = other.m_rareData->m_constantBuffers;
+ m_rareData->m_immediateSwitchJumpTables = other.m_rareData->m_immediateSwitchJumpTables;
+ m_rareData->m_characterSwitchJumpTables = other.m_rareData->m_characterSwitchJumpTables;
+ m_rareData->m_stringSwitchJumpTables = other.m_rareData->m_stringSwitchJumpTables;
+ m_rareData->m_expressionInfo = other.m_rareData->m_expressionInfo;
+ m_rareData->m_lineInfo = other.m_rareData->m_lineInfo;
+ }
+}
+
+CodeBlock::CodeBlock(ScriptExecutable* ownerExecutable, CodeType codeType, JSGlobalObject *globalObject, PassRefPtr<SourceProvider> sourceProvider, unsigned sourceOffset, SymbolTable* symTab, bool isConstructor, PassOwnPtr<CodeBlock> alternative)
+ : m_globalObject(globalObject->globalData(), ownerExecutable, globalObject)
+ , m_heap(&m_globalObject->globalData().heap)
+ , m_numCalleeRegisters(0)
+ , m_numVars(0)
+ , m_numParameters(0)
+ , m_isConstructor(isConstructor)
+ , m_shouldDiscardBytecode(false)
+ , m_ownerExecutable(globalObject->globalData(), ownerExecutable, ownerExecutable)
+ , m_globalData(0)
+ , m_instructions(adoptRef(new Instructions))
+ , m_instructionCount(0)
+ , m_argumentsRegister(-1)
+ , m_needsFullScopeChain(ownerExecutable->needsActivation())
+ , m_usesEval(ownerExecutable->usesEval())
+ , m_isNumericCompareFunction(false)
+ , m_isStrictMode(ownerExecutable->isStrictMode())
+ , m_codeType(codeType)
+ , m_source(sourceProvider)
+ , m_sourceOffset(sourceOffset)
+#if ENABLE(VALUE_PROFILER)
+ , m_executionEntryCount(0)
+#endif
+ , m_symbolTable(symTab)
+ , m_alternative(alternative)
+ , m_speculativeSuccessCounter(0)
+ , m_speculativeFailCounter(0)
+ , m_optimizationDelayCounter(0)
+ , m_reoptimizationRetryCounter(0)
+{
+ ASSERT(m_source);
+
+ optimizeAfterWarmUp();
+
+#if DUMP_CODE_BLOCK_STATISTICS
+ liveCodeBlockSet.add(this);
+#endif
+}
+
+CodeBlock::~CodeBlock()
+{
+#if ENABLE(DFG_JIT)
+ // Remove myself from the set of DFG code blocks. Note that I may not be in this set
+ // (because I'm not a DFG code block), in which case this is a no-op anyway.
+ m_globalData->heap.m_dfgCodeBlocks.m_set.remove(this);
+#endif
+
+#if ENABLE(VERBOSE_VALUE_PROFILE)
+ dumpValueProfiles();
+#endif
+
+#if ENABLE(JIT)
+ // We may be destroyed before any CodeBlocks that refer to us are destroyed.
+ // Consider that two CodeBlocks become unreachable at the same time. There
+ // is no guarantee about the order in which the CodeBlocks are destroyed.
+ // So, if we don't remove incoming calls, and get destroyed before the
+ // CodeBlock(s) that have calls into us, then the CallLinkInfo vector's
+ // destructor will try to remove nodes from our (no longer valid) linked list.
+ while (m_incomingCalls.begin() != m_incomingCalls.end())
+ m_incomingCalls.begin()->remove();
+
+ // Note that our outgoing calls will be removed from other CodeBlocks'
+ // m_incomingCalls linked lists through the execution of the ~CallLinkInfo
+ // destructors.
+
+ for (size_t size = m_structureStubInfos.size(), i = 0; i < size; ++i)
+ m_structureStubInfos[i].deref();
+#endif // ENABLE(JIT)
+
+#if DUMP_CODE_BLOCK_STATISTICS
+ liveCodeBlockSet.remove(this);
+#endif
+}
+
+void CodeBlock::visitStructures(SlotVisitor& visitor, Instruction* vPC) const
+{
+ Interpreter* interpreter = m_globalData->interpreter;
+
+ if (vPC[0].u.opcode == interpreter->getOpcode(op_get_by_id) && vPC[4].u.structure) {
+ visitor.append(&vPC[4].u.structure);
+ return;
+ }
+
+ if (vPC[0].u.opcode == interpreter->getOpcode(op_get_by_id_self) || vPC[0].u.opcode == interpreter->getOpcode(op_get_by_id_getter_self) || vPC[0].u.opcode == interpreter->getOpcode(op_get_by_id_custom_self)) {
+ visitor.append(&vPC[4].u.structure);
+ return;
+ }
+ if (vPC[0].u.opcode == interpreter->getOpcode(op_get_by_id_proto) || vPC[0].u.opcode == interpreter->getOpcode(op_get_by_id_getter_proto) || vPC[0].u.opcode == interpreter->getOpcode(op_get_by_id_custom_proto)) {
+ visitor.append(&vPC[4].u.structure);
+ visitor.append(&vPC[5].u.structure);
+ return;
+ }
+ if (vPC[0].u.opcode == interpreter->getOpcode(op_get_by_id_chain) || vPC[0].u.opcode == interpreter->getOpcode(op_get_by_id_getter_chain) || vPC[0].u.opcode == interpreter->getOpcode(op_get_by_id_custom_chain)) {
+ visitor.append(&vPC[4].u.structure);
+ visitor.append(&vPC[5].u.structureChain);
+ return;
+ }
+ if (vPC[0].u.opcode == interpreter->getOpcode(op_put_by_id_transition)) {
+ visitor.append(&vPC[4].u.structure);
+ visitor.append(&vPC[5].u.structure);
+ visitor.append(&vPC[6].u.structureChain);
+ return;
+ }
+ if (vPC[0].u.opcode == interpreter->getOpcode(op_put_by_id) && vPC[4].u.structure) {
+ visitor.append(&vPC[4].u.structure);
+ return;
+ }
+ if (vPC[0].u.opcode == interpreter->getOpcode(op_put_by_id_replace)) {
+ visitor.append(&vPC[4].u.structure);
+ return;
+ }
+ if (vPC[0].u.opcode == interpreter->getOpcode(op_resolve_global) || vPC[0].u.opcode == interpreter->getOpcode(op_resolve_global_dynamic)) {
+ if (vPC[3].u.structure)
+ visitor.append(&vPC[3].u.structure);
+ return;
+ }
+
+ // These instructions don't ref their Structures.
+ ASSERT(vPC[0].u.opcode == interpreter->getOpcode(op_get_by_id) || vPC[0].u.opcode == interpreter->getOpcode(op_put_by_id) || vPC[0].u.opcode == interpreter->getOpcode(op_get_by_id_generic) || vPC[0].u.opcode == interpreter->getOpcode(op_put_by_id_generic) || vPC[0].u.opcode == interpreter->getOpcode(op_get_array_length) || vPC[0].u.opcode == interpreter->getOpcode(op_get_string_length));
+}
+
+void EvalCodeCache::visitAggregate(SlotVisitor& visitor)
+{
+ EvalCacheMap::iterator end = m_cacheMap.end();
+ for (EvalCacheMap::iterator ptr = m_cacheMap.begin(); ptr != end; ++ptr)
+ visitor.append(&ptr->second);
+}
+
+void CodeBlock::visitAggregate(SlotVisitor& visitor)
+{
+ if (!!m_alternative)
+ m_alternative->visitAggregate(visitor);
+
+ // There are three things that may use unconditional finalizers: lazy bytecode freeing,
+ // inline cache clearing, and jettisoning. The probability of us wanting to do at
+ // least one of those things is probably quite close to 1. So we add one no matter what
+ // and when it runs, it figures out whether it has any work to do.
+ visitor.addUnconditionalFinalizer(this);
+
+ if (shouldImmediatelyAssumeLivenessDuringScan()) {
+ // This code block is live, so scan all references strongly and return.
+ stronglyVisitStrongReferences(visitor);
+ stronglyVisitWeakReferences(visitor);
+ return;
+ }
+
+#if ENABLE(DFG_JIT)
+ // We get here if we're live in the sense that our owner executable is live,
+ // but we're not yet live for sure in another sense: we may yet decide that this
+ // code block should be jettisoned based on its outgoing weak references being
+ // stale. Set a flag to indicate that we're still assuming that we're dead, and
+ // perform one round of determining if we're live. The GC may determine, based on
+ // either us marking additional objects, or by other objects being marked for
+ // other reasons, that this iteration should run again; it will notify us of this
+ // decision by calling harvestWeakReferences().
+
+ m_dfgData->livenessHasBeenProved = false;
+ m_dfgData->allTransitionsHaveBeenMarked = false;
+
+ performTracingFixpointIteration(visitor);
+
+ // GC doesn't have enough information yet for us to decide whether to keep our DFG
+ // data, so we need to register a handler to run again at the end of GC, when more
+ // information is available.
+ if (!(m_dfgData->livenessHasBeenProved && m_dfgData->allTransitionsHaveBeenMarked))
+ visitor.addWeakReferenceHarvester(this);
+
+#else // ENABLE(DFG_JIT)
+ ASSERT_NOT_REACHED();
+#endif // ENABLE(DFG_JIT)
+}
+
+void CodeBlock::performTracingFixpointIteration(SlotVisitor& visitor)
+{
+ UNUSED_PARAM(visitor);
+
+#if ENABLE(DFG_JIT)
+ // Evaluate our weak reference transitions, if there are still some to evaluate.
+ if (!m_dfgData->allTransitionsHaveBeenMarked) {
+ bool allAreMarkedSoFar = true;
+ for (unsigned i = 0; i < m_dfgData->transitions.size(); ++i) {
+ if ((!m_dfgData->transitions[i].m_codeOrigin
+ || Heap::isMarked(m_dfgData->transitions[i].m_codeOrigin.get()))
+ && Heap::isMarked(m_dfgData->transitions[i].m_from.get())) {
+ // If the following three things are live, then the target of the
+ // transition is also live:
+ // - This code block. We know it's live already because otherwise
+ // we wouldn't be scanning ourselves.
+ // - The code origin of the transition. Transitions may arise from
+ // code that was inlined. They are not relevant if the user's
+ // object that is required for the inlinee to run is no longer
+ // live.
+ // - The source of the transition. The transition checks if some
+ // heap location holds the source, and if so, stores the target.
+ // Hence the source must be live for the transition to be live.
+ visitor.append(&m_dfgData->transitions[i].m_to);
+ } else
+ allAreMarkedSoFar = false;
+ }
+
+ if (allAreMarkedSoFar)
+ m_dfgData->allTransitionsHaveBeenMarked = true;
+ }
+
+ // Check if we have any remaining work to do.
+ if (m_dfgData->livenessHasBeenProved)
+ return;
+
+ // Now check all of our weak references. If all of them are live, then we
+ // have proved liveness and so we scan our strong references. If at end of
+ // GC we still have not proved liveness, then this code block is toast.
+ bool allAreLiveSoFar = true;
+ for (unsigned i = 0; i < m_dfgData->weakReferences.size(); ++i) {
+ if (!Heap::isMarked(m_dfgData->weakReferences[i].get())) {
+ allAreLiveSoFar = false;
+ break;
+ }
+ }
+
+ // If some weak references are dead, then this fixpoint iteration was
+ // unsuccessful.
+ if (!allAreLiveSoFar)
+ return;
+
+ // All weak references are live. Record this information so we don't
+ // come back here again, and scan the strong references.
+ m_dfgData->livenessHasBeenProved = true;
+ stronglyVisitStrongReferences(visitor);
+#endif // ENABLE(DFG_JIT)
+}
+
+void CodeBlock::visitWeakReferences(SlotVisitor& visitor)
+{
+ performTracingFixpointIteration(visitor);
+}
+
+void CodeBlock::finalizeUnconditionally()
+{
+#if ENABLE(JIT)
+#if ENABLE(JIT_VERBOSE_OSR)
+ static const bool verboseUnlinking = true;
+#else
+ static const bool verboseUnlinking = false;
+#endif
+#endif
+
+#if ENABLE(DFG_JIT)
+ // Check if we're not live. If we are, then jettison.
+ if (!(shouldImmediatelyAssumeLivenessDuringScan() || m_dfgData->livenessHasBeenProved)) {
+ if (verboseUnlinking)
+ printf("Code block %p has dead weak references, jettisoning during GC.\n", this);
+
+ // Make sure that the baseline JIT knows that it should re-warm-up before
+ // optimizing.
+ alternative()->optimizeAfterWarmUp();
+
+ jettison();
+ return;
+ }
+#endif // ENABLE(DFG_JIT)
+
+#if ENABLE(JIT)
+ // Handle inline caches.
+ if (!!getJITCode()) {
+ RepatchBuffer repatchBuffer(this);
+ for (unsigned i = 0; i < numberOfCallLinkInfos(); ++i) {
+ if (callLinkInfo(i).isLinked() && !Heap::isMarked(callLinkInfo(i).callee.get())) {
+ if (verboseUnlinking)
+ printf("Clearing call from %p.\n", this);
+ callLinkInfo(i).unlink(*m_globalData, repatchBuffer);
+ }
+ if (!!callLinkInfo(i).lastSeenCallee
+ && !Heap::isMarked(callLinkInfo(i).lastSeenCallee.get()))
+ callLinkInfo(i).lastSeenCallee.clear();
+ }
+ for (size_t size = m_globalResolveInfos.size(), i = 0; i < size; ++i) {
+ if (m_globalResolveInfos[i].structure && !Heap::isMarked(m_globalResolveInfos[i].structure.get())) {
+ if (verboseUnlinking)
+ printf("Clearing resolve info in %p.\n", this);
+ m_globalResolveInfos[i].structure.clear();
+ }
+ }
+
+ for (size_t size = m_structureStubInfos.size(), i = 0; i < size; ++i) {
+ StructureStubInfo& stubInfo = m_structureStubInfos[i];
+
+ AccessType accessType = static_cast<AccessType>(stubInfo.accessType);
+
+ if (stubInfo.visitWeakReferences())
+ continue;
+
+ if (verboseUnlinking)
+ printf("Clearing structure cache (kind %d) in %p.\n", stubInfo.accessType, this);
+
+ if (isGetByIdAccess(accessType)) {
+ if (getJITCode().jitType() == JITCode::DFGJIT)
+ DFG::dfgResetGetByID(repatchBuffer, stubInfo);
+ else
+ JIT::resetPatchGetById(repatchBuffer, &stubInfo);
+ } else {
+ ASSERT(isPutByIdAccess(accessType));
+ if (getJITCode().jitType() == JITCode::DFGJIT)
+ DFG::dfgResetPutByID(repatchBuffer, stubInfo);
+ else
+ JIT::resetPatchPutById(repatchBuffer, &stubInfo);
+ }
+
+ stubInfo.reset();
+ }
+
+ for (size_t size = m_methodCallLinkInfos.size(), i = 0; i < size; ++i) {
+ if (!m_methodCallLinkInfos[i].cachedStructure)
+ continue;
+
+ ASSERT(m_methodCallLinkInfos[i].seenOnce());
+ ASSERT(!!m_methodCallLinkInfos[i].cachedPrototypeStructure);
+
+ if (!Heap::isMarked(m_methodCallLinkInfos[i].cachedStructure.get())
+ || !Heap::isMarked(m_methodCallLinkInfos[i].cachedPrototypeStructure.get())
+ || !Heap::isMarked(m_methodCallLinkInfos[i].cachedFunction.get())
+ || !Heap::isMarked(m_methodCallLinkInfos[i].cachedPrototype.get())) {
+ if (verboseUnlinking)
+ printf("Clearing method call in %p.\n", this);
+ m_methodCallLinkInfos[i].reset(repatchBuffer, getJITType());
+
+ StructureStubInfo& stubInfo = getStubInfo(m_methodCallLinkInfos[i].bytecodeIndex);
+
+ AccessType accessType = static_cast<AccessType>(stubInfo.accessType);
+
+ if (accessType != access_unset) {
+ ASSERT(isGetByIdAccess(accessType));
+ if (getJITCode().jitType() == JITCode::DFGJIT)
+ DFG::dfgResetGetByID(repatchBuffer, stubInfo);
+ else
+ JIT::resetPatchGetById(repatchBuffer, &stubInfo);
+ stubInfo.reset();
+ }
+ }
+ }
+ }
+#endif
+
+ // Handle the bytecode discarding chore.
+ if (m_shouldDiscardBytecode) {
+ discardBytecode();
+ m_shouldDiscardBytecode = false;
+ }
+}
+
+void CodeBlock::stronglyVisitStrongReferences(SlotVisitor& visitor)
+{
+ visitor.append(&m_globalObject);
+ visitor.append(&m_ownerExecutable);
+ if (m_rareData) {
+ m_rareData->m_evalCodeCache.visitAggregate(visitor);
+ size_t regExpCount = m_rareData->m_regexps.size();
+ WriteBarrier<RegExp>* regexps = m_rareData->m_regexps.data();
+ for (size_t i = 0; i < regExpCount; i++)
+ visitor.append(regexps + i);
+ }
+ visitor.appendValues(m_constantRegisters.data(), m_constantRegisters.size());
+ for (size_t i = 0; i < m_functionExprs.size(); ++i)
+ visitor.append(&m_functionExprs[i]);
+ for (size_t i = 0; i < m_functionDecls.size(); ++i)
+ visitor.append(&m_functionDecls[i]);
+#if ENABLE(INTERPRETER)
+ for (size_t size = m_propertyAccessInstructions.size(), i = 0; i < size; ++i)
+ visitStructures(visitor, &instructions()[m_propertyAccessInstructions[i]]);
+ for (size_t size = m_globalResolveInstructions.size(), i = 0; i < size; ++i)
+ visitStructures(visitor, &instructions()[m_globalResolveInstructions[i]]);
+#endif
+
+#if ENABLE(DFG_JIT)
+ if (hasCodeOrigins()) {
+ // Make sure that executables that we have inlined don't die.
+ // FIXME: If they would have otherwise died, we should probably trigger recompilation.
+ for (size_t i = 0; i < inlineCallFrames().size(); ++i) {
+ visitor.append(&inlineCallFrames()[i].executable);
+ visitor.append(&inlineCallFrames()[i].callee);
+ }
+ }
+#endif
+
+#if ENABLE(VALUE_PROFILER)
+ for (unsigned profileIndex = 0; profileIndex < numberOfArgumentValueProfiles(); ++profileIndex)
+ valueProfileForArgument(profileIndex)->computeUpdatedPrediction();
+ for (unsigned profileIndex = 0; profileIndex < numberOfValueProfiles(); ++profileIndex)
+ valueProfile(profileIndex)->computeUpdatedPrediction();
+#endif
+}
+
+void CodeBlock::stronglyVisitWeakReferences(SlotVisitor& visitor)
+{
+ UNUSED_PARAM(visitor);
+
+#if ENABLE(DFG_JIT)
+ if (!m_dfgData)
+ return;
+
+ for (unsigned i = 0; i < m_dfgData->transitions.size(); ++i) {
+ if (!!m_dfgData->transitions[i].m_codeOrigin)
+ visitor.append(&m_dfgData->transitions[i].m_codeOrigin); // Almost certainly not necessary, since the code origin should also be a weak reference. Better to be safe, though.
+ visitor.append(&m_dfgData->transitions[i].m_from);
+ visitor.append(&m_dfgData->transitions[i].m_to);
+ }
+
+ for (unsigned i = 0; i < m_dfgData->weakReferences.size(); ++i)
+ visitor.append(&m_dfgData->weakReferences[i]);
+#endif
+}
+
+HandlerInfo* CodeBlock::handlerForBytecodeOffset(unsigned bytecodeOffset)
+{
+ ASSERT(bytecodeOffset < m_instructionCount);
+
+ if (!m_rareData)
+ return 0;
+
+ Vector<HandlerInfo>& exceptionHandlers = m_rareData->m_exceptionHandlers;
+ for (size_t i = 0; i < exceptionHandlers.size(); ++i) {
+ // Handlers are ordered innermost first, so the first handler we encounter
+ // that contains the source address is the correct handler to use.
+ if (exceptionHandlers[i].start <= bytecodeOffset && exceptionHandlers[i].end >= bytecodeOffset)
+ return &exceptionHandlers[i];
+ }
+
+ return 0;
+}
+
+int CodeBlock::lineNumberForBytecodeOffset(unsigned bytecodeOffset)
+{
+ ASSERT(bytecodeOffset < m_instructionCount);
+
+ if (!m_rareData)
+ return m_ownerExecutable->source().firstLine();
+
+ Vector<LineInfo>& lineInfo = m_rareData->m_lineInfo;
+
+ int low = 0;
+ int high = lineInfo.size();
+ while (low < high) {
+ int mid = low + (high - low) / 2;
+ if (lineInfo[mid].instructionOffset <= bytecodeOffset)
+ low = mid + 1;
+ else
+ high = mid;
+ }
+
+ if (!low)
+ return m_ownerExecutable->source().firstLine();
+ return lineInfo[low - 1].lineNumber;
+}
+
+void CodeBlock::expressionRangeForBytecodeOffset(unsigned bytecodeOffset, int& divot, int& startOffset, int& endOffset)
+{
+ ASSERT(bytecodeOffset < m_instructionCount);
+
+ if (!m_rareData) {
+ startOffset = 0;
+ endOffset = 0;
+ divot = 0;
+ return;
+ }
+
+ Vector<ExpressionRangeInfo>& expressionInfo = m_rareData->m_expressionInfo;
+
+ int low = 0;
+ int high = expressionInfo.size();
+ while (low < high) {
+ int mid = low + (high - low) / 2;
+ if (expressionInfo[mid].instructionOffset <= bytecodeOffset)
+ low = mid + 1;
+ else
+ high = mid;
+ }
+
+ ASSERT(low);
+ if (!low) {
+ startOffset = 0;
+ endOffset = 0;
+ divot = 0;
+ return;
+ }
+
+ startOffset = expressionInfo[low - 1].startOffset;
+ endOffset = expressionInfo[low - 1].endOffset;
+ divot = expressionInfo[low - 1].divotPoint + m_sourceOffset;
+ return;
+}
+
+#if ENABLE(INTERPRETER)
+bool CodeBlock::hasGlobalResolveInstructionAtBytecodeOffset(unsigned bytecodeOffset)
+{
+ if (m_globalResolveInstructions.isEmpty())
+ return false;
+
+ int low = 0;
+ int high = m_globalResolveInstructions.size();
+ while (low < high) {
+ int mid = low + (high - low) / 2;
+ if (m_globalResolveInstructions[mid] <= bytecodeOffset)
+ low = mid + 1;
+ else
+ high = mid;
+ }
+
+ if (!low || m_globalResolveInstructions[low - 1] != bytecodeOffset)
+ return false;
+ return true;
+}
+#endif
+#if ENABLE(JIT)
+bool CodeBlock::hasGlobalResolveInfoAtBytecodeOffset(unsigned bytecodeOffset)
+{
+ if (m_globalResolveInfos.isEmpty())
+ return false;
+
+ int low = 0;
+ int high = m_globalResolveInfos.size();
+ while (low < high) {
+ int mid = low + (high - low) / 2;
+ if (m_globalResolveInfos[mid].bytecodeOffset <= bytecodeOffset)
+ low = mid + 1;
+ else
+ high = mid;
+ }
+
+ if (!low || m_globalResolveInfos[low - 1].bytecodeOffset != bytecodeOffset)
+ return false;
+ return true;
+}
+#endif
+
+void CodeBlock::shrinkToFit()
+{
+ instructions().shrinkToFit();
+
+#if ENABLE(INTERPRETER)
+ m_propertyAccessInstructions.shrinkToFit();
+ m_globalResolveInstructions.shrinkToFit();
+#endif
+#if ENABLE(JIT)
+ m_structureStubInfos.shrinkToFit();
+ m_globalResolveInfos.shrinkToFit();
+ m_callLinkInfos.shrinkToFit();
+#endif
+
+ m_identifiers.shrinkToFit();
+ m_functionDecls.shrinkToFit();
+ m_functionExprs.shrinkToFit();
+ m_constantRegisters.shrinkToFit();
+
+ if (m_rareData) {
+ m_rareData->m_exceptionHandlers.shrinkToFit();
+ m_rareData->m_regexps.shrinkToFit();
+ m_rareData->m_immediateSwitchJumpTables.shrinkToFit();
+ m_rareData->m_characterSwitchJumpTables.shrinkToFit();
+ m_rareData->m_stringSwitchJumpTables.shrinkToFit();
+ m_rareData->m_expressionInfo.shrinkToFit();
+ m_rareData->m_lineInfo.shrinkToFit();
+ }
+}
+
+void CodeBlock::createActivation(CallFrame* callFrame)
+{
+ ASSERT(codeType() == FunctionCode);
+ ASSERT(needsFullScopeChain());
+ ASSERT(!callFrame->uncheckedR(activationRegister()).jsValue());
+ JSActivation* activation = JSActivation::create(callFrame->globalData(), callFrame, static_cast<FunctionExecutable*>(ownerExecutable()));
+ callFrame->uncheckedR(activationRegister()) = JSValue(activation);
+ callFrame->setScopeChain(callFrame->scopeChain()->push(activation));
+}
+
+#if ENABLE(JIT)
+void CallLinkInfo::unlink(JSGlobalData& globalData, RepatchBuffer& repatchBuffer)
+{
+ ASSERT(isLinked());
+
+ if (isDFG) {
+#if ENABLE(DFG_JIT)
+ repatchBuffer.relink(CodeLocationCall(callReturnLocation), callType == Construct ? operationLinkConstruct : operationLinkCall);
+#else
+ ASSERT_NOT_REACHED();
+#endif
+ } else
+ repatchBuffer.relink(CodeLocationNearCall(callReturnLocation), callType == Construct ? globalData.jitStubs->ctiVirtualConstructLink() : globalData.jitStubs->ctiVirtualCallLink());
+ hasSeenShouldRepatch = false;
+ callee.clear();
+
+ // It will be on a list if the callee has a code block.
+ if (isOnList())
+ remove();
+}
+
+void MethodCallLinkInfo::reset(RepatchBuffer& repatchBuffer, JITCode::JITType jitType)
+{
+ cachedStructure.clearToMaxUnsigned();
+ cachedPrototype.clear();
+ cachedPrototypeStructure.clearToMaxUnsigned();
+ cachedFunction.clear();
+
+ ASSERT_UNUSED(jitType, jitType == JITCode::BaselineJIT);
+
+ repatchBuffer.relink(callReturnLocation, cti_op_get_by_id_method_check);
+}
+
+void CodeBlock::unlinkCalls()
+{
+ if (!!m_alternative)
+ m_alternative->unlinkCalls();
+ if (!(m_callLinkInfos.size() || m_methodCallLinkInfos.size()))
+ return;
+ if (!m_globalData->canUseJIT())
+ return;
+ RepatchBuffer repatchBuffer(this);
+ for (size_t i = 0; i < m_callLinkInfos.size(); i++) {
+ if (!m_callLinkInfos[i].isLinked())
+ continue;
+ m_callLinkInfos[i].unlink(*m_globalData, repatchBuffer);
+ }
+}
+
+void CodeBlock::unlinkIncomingCalls()
+{
+ RepatchBuffer repatchBuffer(this);
+ while (m_incomingCalls.begin() != m_incomingCalls.end())
+ m_incomingCalls.begin()->unlink(*m_globalData, repatchBuffer);
+}
+#endif
+
+void CodeBlock::clearEvalCache()
+{
+ if (!!m_alternative)
+ m_alternative->clearEvalCache();
+ if (!m_rareData)
+ return;
+ m_rareData->m_evalCodeCache.clear();
+}
+
+template<typename T>
+inline void replaceExistingEntries(Vector<T>& target, Vector<T>& source)
+{
+ ASSERT(target.size() <= source.size());
+ for (size_t i = 0; i < target.size(); ++i)
+ target[i] = source[i];
+}
+
+void CodeBlock::copyPostParseDataFrom(CodeBlock* alternative)
+{
+ if (!alternative)
+ return;
+
+ replaceExistingEntries(m_constantRegisters, alternative->m_constantRegisters);
+ replaceExistingEntries(m_functionDecls, alternative->m_functionDecls);
+ replaceExistingEntries(m_functionExprs, alternative->m_functionExprs);
+ if (!!m_rareData && !!alternative->m_rareData)
+ replaceExistingEntries(m_rareData->m_constantBuffers, alternative->m_rareData->m_constantBuffers);
+}
+
+void CodeBlock::copyPostParseDataFromAlternative()
+{
+ copyPostParseDataFrom(m_alternative.get());
+}
+
+#if ENABLE(JIT)
+CodeBlock* ProgramCodeBlock::replacement()
+{
+ return &static_cast<ProgramExecutable*>(ownerExecutable())->generatedBytecode();
+}
+
+CodeBlock* EvalCodeBlock::replacement()
+{
+ return &static_cast<EvalExecutable*>(ownerExecutable())->generatedBytecode();
+}
+
+CodeBlock* FunctionCodeBlock::replacement()
+{
+ return &static_cast<FunctionExecutable*>(ownerExecutable())->generatedBytecodeFor(m_isConstructor ? CodeForConstruct : CodeForCall);
+}
+
+JSObject* ProgramCodeBlock::compileOptimized(ExecState* exec, ScopeChainNode* scopeChainNode)
+{
+ if (replacement()->getJITType() == JITCode::nextTierJIT(getJITType()))
+ return 0;
+ JSObject* error = static_cast<ProgramExecutable*>(ownerExecutable())->compileOptimized(exec, scopeChainNode);
+ return error;
+}
+
+JSObject* EvalCodeBlock::compileOptimized(ExecState* exec, ScopeChainNode* scopeChainNode)
+{
+ if (replacement()->getJITType() == JITCode::nextTierJIT(getJITType()))
+ return 0;
+ JSObject* error = static_cast<EvalExecutable*>(ownerExecutable())->compileOptimized(exec, scopeChainNode);
+ return error;
+}
+
+JSObject* FunctionCodeBlock::compileOptimized(ExecState* exec, ScopeChainNode* scopeChainNode)
+{
+ if (replacement()->getJITType() == JITCode::nextTierJIT(getJITType()))
+ return 0;
+ JSObject* error = static_cast<FunctionExecutable*>(ownerExecutable())->compileOptimizedFor(exec, scopeChainNode, m_isConstructor ? CodeForConstruct : CodeForCall);
+ return error;
+}
+
+bool ProgramCodeBlock::canCompileWithDFG()
+{
+ return DFG::canCompileProgram(this);
+}
+
+bool EvalCodeBlock::canCompileWithDFG()
+{
+ return DFG::canCompileEval(this);
+}
+
+bool FunctionCodeBlock::canCompileWithDFG()
+{
+ if (m_isConstructor)
+ return DFG::canCompileFunctionForConstruct(this);
+ return DFG::canCompileFunctionForCall(this);
+}
+
+void ProgramCodeBlock::jettison()
+{
+ ASSERT(getJITType() != JITCode::BaselineJIT);
+ ASSERT(this == replacement());
+ static_cast<ProgramExecutable*>(ownerExecutable())->jettisonOptimizedCode(*globalData());
+}
+
+void EvalCodeBlock::jettison()
+{
+ ASSERT(getJITType() != JITCode::BaselineJIT);
+ ASSERT(this == replacement());
+ static_cast<EvalExecutable*>(ownerExecutable())->jettisonOptimizedCode(*globalData());
+}
+
+void FunctionCodeBlock::jettison()
+{
+ ASSERT(getJITType() != JITCode::BaselineJIT);
+ ASSERT(this == replacement());
+ static_cast<FunctionExecutable*>(ownerExecutable())->jettisonOptimizedCodeFor(*globalData(), m_isConstructor ? CodeForConstruct : CodeForCall);
+}
+#endif
+
+#if ENABLE(VALUE_PROFILER)
+bool CodeBlock::shouldOptimizeNow()
+{
+#if ENABLE(JIT_VERBOSE_OSR)
+ printf("Considering optimizing %p...\n", this);
+#endif
+
+#if ENABLE(VERBOSE_VALUE_PROFILE)
+ dumpValueProfiles();
+#endif
+
+ if (m_optimizationDelayCounter >= Options::maximumOptimizationDelay)
+ return true;
+
+ unsigned numberOfLiveNonArgumentValueProfiles = 0;
+ unsigned numberOfSamplesInProfiles = 0; // If this divided by ValueProfile::numberOfBuckets equals numberOfValueProfiles() then value profiles are full.
+ for (unsigned i = 0; i < totalNumberOfValueProfiles(); ++i) {
+ ValueProfile* profile = getFromAllValueProfiles(i);
+ unsigned numSamples = profile->totalNumberOfSamples();
+ if (numSamples > ValueProfile::numberOfBuckets)
+ numSamples = ValueProfile::numberOfBuckets; // We don't want profiles that are extremely hot to be given more weight.
+ numberOfSamplesInProfiles += numSamples;
+ if (profile->m_bytecodeOffset < 0) {
+ profile->computeUpdatedPrediction();
+ continue;
+ }
+ if (profile->numberOfSamples() || profile->m_prediction != PredictNone)
+ numberOfLiveNonArgumentValueProfiles++;
+ profile->computeUpdatedPrediction();
+ }
+
+#if ENABLE(JIT_VERBOSE_OSR)
+ printf("Profile hotness: %lf, %lf\n", (double)numberOfLiveNonArgumentValueProfiles / numberOfValueProfiles(), (double)numberOfSamplesInProfiles / ValueProfile::numberOfBuckets / numberOfValueProfiles());
+#endif
+
+ if ((!numberOfValueProfiles() || (double)numberOfLiveNonArgumentValueProfiles / numberOfValueProfiles() >= Options::desiredProfileLivenessRate)
+ && (!totalNumberOfValueProfiles() || (double)numberOfSamplesInProfiles / ValueProfile::numberOfBuckets / totalNumberOfValueProfiles() >= Options::desiredProfileFullnessRate)
+ && static_cast<unsigned>(m_optimizationDelayCounter) + 1 >= Options::minimumOptimizationDelay)
+ return true;
+
+ ASSERT(m_optimizationDelayCounter < std::numeric_limits<uint8_t>::max());
+ m_optimizationDelayCounter++;
+ optimizeAfterWarmUp();
+ return false;
+}
+#endif
+
+#if ENABLE(DFG_JIT)
+void CodeBlock::tallyFrequentExitSites()
+{
+ ASSERT(getJITType() == JITCode::DFGJIT);
+ ASSERT(alternative()->getJITType() == JITCode::BaselineJIT);
+ ASSERT(!!m_dfgData);
+
+ CodeBlock* profiledBlock = alternative();
+
+ for (unsigned i = 0; i < m_dfgData->osrExit.size(); ++i) {
+ DFG::OSRExit& exit = m_dfgData->osrExit[i];
+
+ if (!exit.considerAddingAsFrequentExitSite(this, profiledBlock))
+ continue;
+
+#if DFG_ENABLE(DEBUG_VERBOSE)
+ fprintf(stderr, "OSR exit #%u (bc#%u, @%u, %s) for code block %p occurred frequently; counting as frequent exit site.\n", i, exit.m_codeOrigin.bytecodeIndex, exit.m_nodeIndex, DFG::exitKindToString(exit.m_kind), this);
+#endif
+ }
+}
+#endif // ENABLE(DFG_JIT)
+
+#if ENABLE(VERBOSE_VALUE_PROFILE)
+void CodeBlock::dumpValueProfiles()
+{
+ fprintf(stderr, "ValueProfile for %p:\n", this);
+ for (unsigned i = 0; i < totalNumberOfValueProfiles(); ++i) {
+ ValueProfile* profile = getFromAllValueProfiles(i);
+ if (profile->m_bytecodeOffset < 0) {
+ ASSERT(profile->m_bytecodeOffset == -1);
+ fprintf(stderr, " arg = %u: ", i);
+ } else
+ fprintf(stderr, " bc = %d: ", profile->m_bytecodeOffset);
+ if (!profile->numberOfSamples() && profile->m_prediction == PredictNone) {
+ fprintf(stderr, "<empty>\n");
+ continue;
+ }
+ profile->dump(stderr);
+ fprintf(stderr, "\n");
+ }
+ fprintf(stderr, "RareCaseProfile for %p:\n", this);
+ for (unsigned i = 0; i < numberOfRareCaseProfiles(); ++i) {
+ RareCaseProfile* profile = rareCaseProfile(i);
+ fprintf(stderr, " bc = %d: %u\n", profile->m_bytecodeOffset, profile->m_counter);
+ }
+ fprintf(stderr, "SpecialFastCaseProfile for %p:\n", this);
+ for (unsigned i = 0; i < numberOfSpecialFastCaseProfiles(); ++i) {
+ RareCaseProfile* profile = specialFastCaseProfile(i);
+ fprintf(stderr, " bc = %d: %u\n", profile->m_bytecodeOffset, profile->m_counter);
+ }
+}
+#endif
+
+#ifndef NDEBUG
+bool CodeBlock::usesOpcode(OpcodeID opcodeID)
+{
+ Interpreter* interpreter = globalData()->interpreter;
+ Instruction* instructionsBegin = instructions().begin();
+ unsigned instructionCount = instructions().size();
+
+ for (unsigned bytecodeOffset = 0; bytecodeOffset < instructionCount; ) {
+ switch (interpreter->getOpcodeID(instructionsBegin[bytecodeOffset].u.opcode)) {
+#define DEFINE_OP(curOpcode, length) \
+ case curOpcode: \
+ if (curOpcode == opcodeID) \
+ return true; \
+ bytecodeOffset += length; \
+ break;
+ FOR_EACH_OPCODE_ID(DEFINE_OP)
+#undef DEFINE_OP
+ default:
+ ASSERT_NOT_REACHED();
+ break;
+ }
+ }
+
+ return false;
+}
+#endif
+
+} // namespace JSC
diff --git a/Source/JavaScriptCore/bytecode/CodeBlock.h b/Source/JavaScriptCore/bytecode/CodeBlock.h
new file mode 100644
index 000000000..159cb65de
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/CodeBlock.h
@@ -0,0 +1,1519 @@
+/*
+ * Copyright (C) 2008, 2009, 2010 Apple Inc. All rights reserved.
+ * Copyright (C) 2008 Cameron Zwarich <cwzwarich@uwaterloo.ca>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * its contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef CodeBlock_h
+#define CodeBlock_h
+
+#include "CodeOrigin.h"
+#include "CompactJITCodeMap.h"
+#include "DFGCodeBlocks.h"
+#include "DFGExitProfile.h"
+#include "DFGOSREntry.h"
+#include "DFGOSRExit.h"
+#include "EvalCodeCache.h"
+#include "Options.h"
+#include "Instruction.h"
+#include "JITCode.h"
+#include "JITWriteBarrier.h"
+#include "JSGlobalObject.h"
+#include "JumpTable.h"
+#include "Nodes.h"
+#include "PredictionTracker.h"
+#include "RegExpObject.h"
+#include "UString.h"
+#include "UnconditionalFinalizer.h"
+#include "ValueProfile.h"
+#include <wtf/FastAllocBase.h>
+#include <wtf/PassOwnPtr.h>
+#include <wtf/RefPtr.h>
+#include <wtf/SegmentedVector.h>
+#include <wtf/SentinelLinkedList.h>
+#include <wtf/Vector.h>
+
+#if ENABLE(JIT)
+#include "StructureStubInfo.h"
+#endif
+
+// Register numbers used in bytecode operations have different meaning according to their ranges:
+// 0x80000000-0xFFFFFFFF Negative indices from the CallFrame pointer are entries in the call frame, see RegisterFile.h.
+// 0x00000000-0x3FFFFFFF Forwards indices from the CallFrame pointer are local vars and temporaries with the function's callframe.
+// 0x40000000-0x7FFFFFFF Positive indices from 0x40000000 specify entries in the constant pool on the CodeBlock.
+static const int FirstConstantRegisterIndex = 0x40000000;
+
+namespace JSC {
+
+ enum HasSeenShouldRepatch {
+ hasSeenShouldRepatch
+ };
+
+ class ExecState;
+ class DFGCodeBlocks;
+
+ enum CodeType { GlobalCode, EvalCode, FunctionCode };
+
+ inline int unmodifiedArgumentsRegister(int argumentsRegister) { return argumentsRegister - 1; }
+
+ static ALWAYS_INLINE int missingThisObjectMarker() { return std::numeric_limits<int>::max(); }
+
+ struct HandlerInfo {
+ uint32_t start;
+ uint32_t end;
+ uint32_t target;
+ uint32_t scopeDepth;
+#if ENABLE(JIT)
+ CodeLocationLabel nativeCode;
+#endif
+ };
+
+ struct ExpressionRangeInfo {
+ enum {
+ MaxOffset = (1 << 7) - 1,
+ MaxDivot = (1 << 25) - 1
+ };
+ uint32_t instructionOffset : 25;
+ uint32_t divotPoint : 25;
+ uint32_t startOffset : 7;
+ uint32_t endOffset : 7;
+ };
+
+ struct LineInfo {
+ uint32_t instructionOffset;
+ int32_t lineNumber;
+ };
+
+#if ENABLE(JIT)
+ struct CallLinkInfo : public BasicRawSentinelNode<CallLinkInfo> {
+ enum CallType { None, Call, CallVarargs, Construct };
+ static CallType callTypeFor(OpcodeID opcodeID)
+ {
+ if (opcodeID == op_call || opcodeID == op_call_eval)
+ return Call;
+ if (opcodeID == op_construct)
+ return Construct;
+ ASSERT(opcodeID == op_call_varargs);
+ return CallVarargs;
+ }
+
+ CallLinkInfo()
+ : hasSeenShouldRepatch(false)
+ , isDFG(false)
+ , callType(None)
+ {
+ }
+
+ ~CallLinkInfo()
+ {
+ if (isOnList())
+ remove();
+ }
+
+ CodeLocationLabel callReturnLocation; // it's a near call in the old JIT, or a normal call in DFG
+ CodeLocationDataLabelPtr hotPathBegin;
+ CodeLocationNearCall hotPathOther;
+ JITWriteBarrier<JSFunction> callee;
+ WriteBarrier<JSFunction> lastSeenCallee;
+ bool hasSeenShouldRepatch : 1;
+ bool isDFG : 1;
+ CallType callType : 2;
+ unsigned bytecodeIndex;
+
+ bool isLinked() { return callee; }
+ void unlink(JSGlobalData&, RepatchBuffer&);
+
+ bool seenOnce()
+ {
+ return hasSeenShouldRepatch;
+ }
+
+ void setSeen()
+ {
+ hasSeenShouldRepatch = true;
+ }
+ };
+
+ struct MethodCallLinkInfo {
+ MethodCallLinkInfo()
+ : seen(false)
+ {
+ }
+
+ bool seenOnce()
+ {
+ return seen;
+ }
+
+ void setSeen()
+ {
+ seen = true;
+ }
+
+ void reset(RepatchBuffer&, JITCode::JITType);
+
+ unsigned bytecodeIndex;
+ CodeLocationCall callReturnLocation;
+ JITWriteBarrier<Structure> cachedStructure;
+ JITWriteBarrier<Structure> cachedPrototypeStructure;
+ // We'd like this to actually be JSFunction, but InternalFunction and JSFunction
+ // don't have a common parent class and we allow specialisation on both
+ JITWriteBarrier<JSObject> cachedFunction;
+ JITWriteBarrier<JSObject> cachedPrototype;
+ bool seen;
+ };
+
+ struct GlobalResolveInfo {
+ GlobalResolveInfo(unsigned bytecodeOffset)
+ : offset(0)
+ , bytecodeOffset(bytecodeOffset)
+ {
+ }
+
+ WriteBarrier<Structure> structure;
+ unsigned offset;
+ unsigned bytecodeOffset;
+ };
+
+ // This structure is used to map from a call return location
+ // (given as an offset in bytes into the JIT code) back to
+ // the bytecode index of the corresponding bytecode operation.
+ // This is then used to look up the corresponding handler.
+ // FIXME: This should be made inlining aware! Currently it isn't
+ // because we never inline code that has exception handlers.
+ struct CallReturnOffsetToBytecodeOffset {
+ CallReturnOffsetToBytecodeOffset(unsigned callReturnOffset, unsigned bytecodeOffset)
+ : callReturnOffset(callReturnOffset)
+ , bytecodeOffset(bytecodeOffset)
+ {
+ }
+
+ unsigned callReturnOffset;
+ unsigned bytecodeOffset;
+ };
+
+ // valueAtPosition helpers for the binarySearch algorithm.
+
+ inline void* getStructureStubInfoReturnLocation(StructureStubInfo* structureStubInfo)
+ {
+ return structureStubInfo->callReturnLocation.executableAddress();
+ }
+
+ inline unsigned getStructureStubInfoBytecodeIndex(StructureStubInfo* structureStubInfo)
+ {
+ return structureStubInfo->bytecodeIndex;
+ }
+
+ inline void* getCallLinkInfoReturnLocation(CallLinkInfo* callLinkInfo)
+ {
+ return callLinkInfo->callReturnLocation.executableAddress();
+ }
+
+ inline unsigned getCallLinkInfoBytecodeIndex(CallLinkInfo* callLinkInfo)
+ {
+ return callLinkInfo->bytecodeIndex;
+ }
+
+ inline void* getMethodCallLinkInfoReturnLocation(MethodCallLinkInfo* methodCallLinkInfo)
+ {
+ return methodCallLinkInfo->callReturnLocation.executableAddress();
+ }
+
+ inline unsigned getMethodCallLinkInfoBytecodeIndex(MethodCallLinkInfo* methodCallLinkInfo)
+ {
+ return methodCallLinkInfo->bytecodeIndex;
+ }
+
+ inline unsigned getCallReturnOffset(CallReturnOffsetToBytecodeOffset* pc)
+ {
+ return pc->callReturnOffset;
+ }
+#endif
+
+ class CodeBlock : public UnconditionalFinalizer, public WeakReferenceHarvester {
+ WTF_MAKE_FAST_ALLOCATED;
+ friend class JIT;
+ public:
+ enum CopyParsedBlockTag { CopyParsedBlock };
+ protected:
+ CodeBlock(CopyParsedBlockTag, CodeBlock& other, SymbolTable*);
+
+ CodeBlock(ScriptExecutable* ownerExecutable, CodeType, JSGlobalObject*, PassRefPtr<SourceProvider>, unsigned sourceOffset, SymbolTable*, bool isConstructor, PassOwnPtr<CodeBlock> alternative);
+
+ WriteBarrier<JSGlobalObject> m_globalObject;
+ Heap* m_heap;
+
+ public:
+ virtual ~CodeBlock();
+
+ CodeBlock* alternative() { return m_alternative.get(); }
+ PassOwnPtr<CodeBlock> releaseAlternative() { return m_alternative.release(); }
+ void setAlternative(PassOwnPtr<CodeBlock> alternative) { m_alternative = alternative; }
+
+ CodeSpecializationKind specializationKind()
+ {
+ if (m_isConstructor)
+ return CodeForConstruct;
+ return CodeForCall;
+ }
+
+#if ENABLE(JIT)
+ CodeBlock* baselineVersion()
+ {
+ CodeBlock* result = replacement();
+ if (!result)
+ return 0; // This can happen if we're in the process of creating the baseline version.
+ while (result->alternative())
+ result = result->alternative();
+ ASSERT(result);
+ ASSERT(result->getJITType() == JITCode::BaselineJIT);
+ return result;
+ }
+#endif
+
+ bool canProduceCopyWithBytecode() { return hasInstructions(); }
+
+ void visitAggregate(SlotVisitor&);
+
+ static void dumpStatistics();
+
+#if !defined(NDEBUG) || ENABLE_OPCODE_SAMPLING
+ void dump(ExecState*) const;
+ void printStructures(const Instruction*) const;
+ void printStructure(const char* name, const Instruction*, int operand) const;
+#endif
+
+ bool isStrictMode() const { return m_isStrictMode; }
+
+ inline bool isKnownNotImmediate(int index)
+ {
+ if (index == m_thisRegister && !m_isStrictMode)
+ return true;
+
+ if (isConstantRegisterIndex(index))
+ return getConstant(index).isCell();
+
+ return false;
+ }
+
+ ALWAYS_INLINE bool isTemporaryRegisterIndex(int index)
+ {
+ return index >= m_numVars;
+ }
+
+ HandlerInfo* handlerForBytecodeOffset(unsigned bytecodeOffset);
+ int lineNumberForBytecodeOffset(unsigned bytecodeOffset);
+ void expressionRangeForBytecodeOffset(unsigned bytecodeOffset, int& divot, int& startOffset, int& endOffset);
+
+#if ENABLE(JIT)
+
+ StructureStubInfo& getStubInfo(ReturnAddressPtr returnAddress)
+ {
+ return *(binarySearch<StructureStubInfo, void*, getStructureStubInfoReturnLocation>(m_structureStubInfos.begin(), m_structureStubInfos.size(), returnAddress.value()));
+ }
+
+ StructureStubInfo& getStubInfo(unsigned bytecodeIndex)
+ {
+ return *(binarySearch<StructureStubInfo, unsigned, getStructureStubInfoBytecodeIndex>(m_structureStubInfos.begin(), m_structureStubInfos.size(), bytecodeIndex));
+ }
+
+ CallLinkInfo& getCallLinkInfo(ReturnAddressPtr returnAddress)
+ {
+ return *(binarySearch<CallLinkInfo, void*, getCallLinkInfoReturnLocation>(m_callLinkInfos.begin(), m_callLinkInfos.size(), returnAddress.value()));
+ }
+
+ CallLinkInfo& getCallLinkInfo(unsigned bytecodeIndex)
+ {
+ return *(binarySearch<CallLinkInfo, unsigned, getCallLinkInfoBytecodeIndex>(m_callLinkInfos.begin(), m_callLinkInfos.size(), bytecodeIndex));
+ }
+
+ MethodCallLinkInfo& getMethodCallLinkInfo(ReturnAddressPtr returnAddress)
+ {
+ return *(binarySearch<MethodCallLinkInfo, void*, getMethodCallLinkInfoReturnLocation>(m_methodCallLinkInfos.begin(), m_methodCallLinkInfos.size(), returnAddress.value()));
+ }
+
+ MethodCallLinkInfo& getMethodCallLinkInfo(unsigned bytecodeIndex)
+ {
+ return *(binarySearch<MethodCallLinkInfo, unsigned, getMethodCallLinkInfoBytecodeIndex>(m_methodCallLinkInfos.begin(), m_methodCallLinkInfos.size(), bytecodeIndex));
+ }
+
+ unsigned bytecodeOffset(ReturnAddressPtr returnAddress)
+ {
+ if (!m_rareData)
+ return 1;
+ Vector<CallReturnOffsetToBytecodeOffset>& callIndices = m_rareData->m_callReturnIndexVector;
+ if (!callIndices.size())
+ return 1;
+ return binarySearch<CallReturnOffsetToBytecodeOffset, unsigned, getCallReturnOffset>(callIndices.begin(), callIndices.size(), getJITCode().offsetOf(returnAddress.value()))->bytecodeOffset;
+ }
+
+ unsigned bytecodeOffsetForCallAtIndex(unsigned index)
+ {
+ if (!m_rareData)
+ return 1;
+ Vector<CallReturnOffsetToBytecodeOffset>& callIndices = m_rareData->m_callReturnIndexVector;
+ if (!callIndices.size())
+ return 1;
+ ASSERT(index < m_rareData->m_callReturnIndexVector.size());
+ return m_rareData->m_callReturnIndexVector[index].bytecodeOffset;
+ }
+
+ void unlinkCalls();
+
+ bool hasIncomingCalls() { return m_incomingCalls.begin() != m_incomingCalls.end(); }
+
+ void linkIncomingCall(CallLinkInfo* incoming)
+ {
+ m_incomingCalls.push(incoming);
+ }
+
+ void unlinkIncomingCalls();
+#endif
+
+#if ENABLE(DFG_JIT)
+ void setJITCodeMap(PassOwnPtr<CompactJITCodeMap> jitCodeMap)
+ {
+ m_jitCodeMap = jitCodeMap;
+ }
+ CompactJITCodeMap* jitCodeMap()
+ {
+ return m_jitCodeMap.get();
+ }
+
+ void createDFGDataIfNecessary()
+ {
+ if (!!m_dfgData)
+ return;
+
+ m_dfgData = adoptPtr(new DFGData);
+ }
+
+ DFG::OSREntryData* appendDFGOSREntryData(unsigned bytecodeIndex, unsigned machineCodeOffset)
+ {
+ createDFGDataIfNecessary();
+ DFG::OSREntryData entry;
+ entry.m_bytecodeIndex = bytecodeIndex;
+ entry.m_machineCodeOffset = machineCodeOffset;
+ m_dfgData->osrEntry.append(entry);
+ return &m_dfgData->osrEntry.last();
+ }
+ unsigned numberOfDFGOSREntries() const
+ {
+ if (!m_dfgData)
+ return 0;
+ return m_dfgData->osrEntry.size();
+ }
+ DFG::OSREntryData* dfgOSREntryData(unsigned i) { return &m_dfgData->osrEntry[i]; }
+ DFG::OSREntryData* dfgOSREntryDataForBytecodeIndex(unsigned bytecodeIndex)
+ {
+ return binarySearch<DFG::OSREntryData, unsigned, DFG::getOSREntryDataBytecodeIndex>(m_dfgData->osrEntry.begin(), m_dfgData->osrEntry.size(), bytecodeIndex);
+ }
+
+ void appendOSRExit(const DFG::OSRExit& osrExit)
+ {
+ createDFGDataIfNecessary();
+ m_dfgData->osrExit.append(osrExit);
+ }
+
+ DFG::OSRExit& lastOSRExit()
+ {
+ return m_dfgData->osrExit.last();
+ }
+
+ void appendSpeculationRecovery(const DFG::SpeculationRecovery& recovery)
+ {
+ createDFGDataIfNecessary();
+ m_dfgData->speculationRecovery.append(recovery);
+ }
+
+ unsigned numberOfOSRExits()
+ {
+ if (!m_dfgData)
+ return 0;
+ return m_dfgData->osrExit.size();
+ }
+
+ unsigned numberOfSpeculationRecoveries()
+ {
+ if (!m_dfgData)
+ return 0;
+ return m_dfgData->speculationRecovery.size();
+ }
+
+ DFG::OSRExit& osrExit(unsigned index)
+ {
+ return m_dfgData->osrExit[index];
+ }
+
+ DFG::SpeculationRecovery& speculationRecovery(unsigned index)
+ {
+ return m_dfgData->speculationRecovery[index];
+ }
+
+ void appendWeakReference(JSCell* target)
+ {
+ createDFGDataIfNecessary();
+ m_dfgData->weakReferences.append(WriteBarrier<JSCell>(*globalData(), ownerExecutable(), target));
+ }
+
+ void shrinkWeakReferencesToFit()
+ {
+ if (!m_dfgData)
+ return;
+ m_dfgData->weakReferences.shrinkToFit();
+ }
+
+ void appendWeakReferenceTransition(JSCell* codeOrigin, JSCell* from, JSCell* to)
+ {
+ createDFGDataIfNecessary();
+ m_dfgData->transitions.append(
+ WeakReferenceTransition(*globalData(), ownerExecutable(), codeOrigin, from, to));
+ }
+
+ void shrinkWeakReferenceTransitionsToFit()
+ {
+ if (!m_dfgData)
+ return;
+ m_dfgData->transitions.shrinkToFit();
+ }
+#endif
+
+#if ENABLE(INTERPRETER)
+ unsigned bytecodeOffset(Instruction* returnAddress)
+ {
+ return static_cast<Instruction*>(returnAddress) - instructions().begin();
+ }
+#endif
+
+ void setIsNumericCompareFunction(bool isNumericCompareFunction) { m_isNumericCompareFunction = isNumericCompareFunction; }
+ bool isNumericCompareFunction() { return m_isNumericCompareFunction; }
+
+ bool hasInstructions() const { return !!m_instructions; }
+ unsigned numberOfInstructions() const { return !m_instructions ? 0 : m_instructions->m_instructions.size(); }
+ Vector<Instruction>& instructions() { return m_instructions->m_instructions; }
+ const Vector<Instruction>& instructions() const { return m_instructions->m_instructions; }
+ void discardBytecode() { m_instructions.clear(); }
+ void discardBytecodeLater()
+ {
+ m_shouldDiscardBytecode = true;
+ }
+ void handleBytecodeDiscardingOpportunity()
+ {
+ if (!!alternative())
+ discardBytecode();
+ else
+ discardBytecodeLater();
+ }
+
+#ifndef NDEBUG
+ bool usesOpcode(OpcodeID);
+#endif
+
+ unsigned instructionCount() { return m_instructionCount; }
+ void setInstructionCount(unsigned instructionCount) { m_instructionCount = instructionCount; }
+
+#if ENABLE(JIT)
+ void setJITCode(const JITCode& code, MacroAssemblerCodePtr codeWithArityCheck)
+ {
+ m_jitCode = code;
+ m_jitCodeWithArityCheck = codeWithArityCheck;
+#if ENABLE(DFG_JIT)
+ if (m_jitCode.jitType() == JITCode::DFGJIT) {
+ createDFGDataIfNecessary();
+ m_globalData->heap.m_dfgCodeBlocks.m_set.add(this);
+ }
+#endif
+ }
+ JITCode& getJITCode() { return m_jitCode; }
+ MacroAssemblerCodePtr getJITCodeWithArityCheck() { return m_jitCodeWithArityCheck; }
+ JITCode::JITType getJITType() { return m_jitCode.jitType(); }
+ ExecutableMemoryHandle* executableMemory() { return getJITCode().getExecutableMemory(); }
+ virtual JSObject* compileOptimized(ExecState*, ScopeChainNode*) = 0;
+ virtual void jettison() = 0;
+ virtual CodeBlock* replacement() = 0;
+ virtual bool canCompileWithDFG() = 0;
+ bool hasOptimizedReplacement()
+ {
+ ASSERT(getJITType() == JITCode::BaselineJIT);
+ bool result = replacement()->getJITType() > getJITType();
+#if !ASSERT_DISABLED
+ if (result)
+ ASSERT(replacement()->getJITType() == JITCode::DFGJIT);
+ else {
+ ASSERT(replacement()->getJITType() == JITCode::BaselineJIT);
+ ASSERT(replacement() == this);
+ }
+#endif
+ return result;
+ }
+#else
+ JITCode::JITType getJITType() { return JITCode::BaselineJIT; }
+#endif
+
+ ScriptExecutable* ownerExecutable() const { return m_ownerExecutable.get(); }
+
+ void setGlobalData(JSGlobalData* globalData) { m_globalData = globalData; }
+ JSGlobalData* globalData() { return m_globalData; }
+
+ void setThisRegister(int thisRegister) { m_thisRegister = thisRegister; }
+ int thisRegister() const { return m_thisRegister; }
+
+ void setNeedsFullScopeChain(bool needsFullScopeChain) { m_needsFullScopeChain = needsFullScopeChain; }
+ bool needsFullScopeChain() const { return m_needsFullScopeChain; }
+ void setUsesEval(bool usesEval) { m_usesEval = usesEval; }
+ bool usesEval() const { return m_usesEval; }
+
+ void setArgumentsRegister(int argumentsRegister)
+ {
+ ASSERT(argumentsRegister != -1);
+ m_argumentsRegister = argumentsRegister;
+ ASSERT(usesArguments());
+ }
+ int argumentsRegister()
+ {
+ ASSERT(usesArguments());
+ return m_argumentsRegister;
+ }
+ void setActivationRegister(int activationRegister)
+ {
+ m_activationRegister = activationRegister;
+ }
+ int activationRegister()
+ {
+ ASSERT(needsFullScopeChain());
+ return m_activationRegister;
+ }
+ bool usesArguments() const { return m_argumentsRegister != -1; }
+
+ CodeType codeType() const { return m_codeType; }
+
+ SourceProvider* source() const { return m_source.get(); }
+ unsigned sourceOffset() const { return m_sourceOffset; }
+
+ size_t numberOfJumpTargets() const { return m_jumpTargets.size(); }
+ void addJumpTarget(unsigned jumpTarget) { m_jumpTargets.append(jumpTarget); }
+ unsigned jumpTarget(int index) const { return m_jumpTargets[index]; }
+ unsigned lastJumpTarget() const { return m_jumpTargets.last(); }
+
+ void createActivation(CallFrame*);
+
+ void clearEvalCache();
+
+#if ENABLE(INTERPRETER)
+ void addPropertyAccessInstruction(unsigned propertyAccessInstruction)
+ {
+ if (!m_globalData->canUseJIT())
+ m_propertyAccessInstructions.append(propertyAccessInstruction);
+ }
+ void addGlobalResolveInstruction(unsigned globalResolveInstruction)
+ {
+ if (!m_globalData->canUseJIT())
+ m_globalResolveInstructions.append(globalResolveInstruction);
+ }
+ bool hasGlobalResolveInstructionAtBytecodeOffset(unsigned bytecodeOffset);
+#endif
+#if ENABLE(JIT)
+ void setNumberOfStructureStubInfos(size_t size) { m_structureStubInfos.grow(size); }
+ size_t numberOfStructureStubInfos() const { return m_structureStubInfos.size(); }
+ StructureStubInfo& structureStubInfo(int index) { return m_structureStubInfos[index]; }
+
+ void addGlobalResolveInfo(unsigned globalResolveInstruction)
+ {
+ if (m_globalData->canUseJIT())
+ m_globalResolveInfos.append(GlobalResolveInfo(globalResolveInstruction));
+ }
+ GlobalResolveInfo& globalResolveInfo(int index) { return m_globalResolveInfos[index]; }
+ bool hasGlobalResolveInfoAtBytecodeOffset(unsigned bytecodeOffset);
+
+ void setNumberOfCallLinkInfos(size_t size) { m_callLinkInfos.grow(size); }
+ size_t numberOfCallLinkInfos() const { return m_callLinkInfos.size(); }
+ CallLinkInfo& callLinkInfo(int index) { return m_callLinkInfos[index]; }
+
+ void addMethodCallLinkInfos(unsigned n) { ASSERT(m_globalData->canUseJIT()); m_methodCallLinkInfos.grow(n); }
+ MethodCallLinkInfo& methodCallLinkInfo(int index) { return m_methodCallLinkInfos[index]; }
+#endif
+
+#if ENABLE(VALUE_PROFILER)
+ void setArgumentValueProfileSize(unsigned size)
+ {
+ m_argumentValueProfiles.resize(size);
+ }
+ unsigned numberOfArgumentValueProfiles()
+ {
+ return m_argumentValueProfiles.size();
+ }
+ ValueProfile* valueProfileForArgument(unsigned argumentIndex)
+ {
+ ValueProfile* result = &m_argumentValueProfiles[argumentIndex];
+ ASSERT(result->m_bytecodeOffset == -1);
+ return result;
+ }
+
+ ValueProfile* addValueProfile(int bytecodeOffset)
+ {
+ ASSERT(bytecodeOffset != -1);
+ m_valueProfiles.append(ValueProfile(bytecodeOffset));
+ return &m_valueProfiles.last();
+ }
+ unsigned numberOfValueProfiles() { return m_valueProfiles.size(); }
+ ValueProfile* valueProfile(int index)
+ {
+ ValueProfile* result = &m_valueProfiles[index];
+ ASSERT(result->m_bytecodeOffset != -1);
+ return result;
+ }
+ ValueProfile* valueProfileForBytecodeOffset(int bytecodeOffset)
+ {
+ ValueProfile* result = WTF::genericBinarySearch<ValueProfile, int, getValueProfileBytecodeOffset>(m_valueProfiles, m_valueProfiles.size(), bytecodeOffset);
+ ASSERT(result->m_bytecodeOffset != -1);
+ return result;
+ }
+
+ unsigned totalNumberOfValueProfiles()
+ {
+ return numberOfArgumentValueProfiles() + numberOfValueProfiles();
+ }
+ ValueProfile* getFromAllValueProfiles(unsigned index)
+ {
+ if (index < numberOfArgumentValueProfiles())
+ return valueProfileForArgument(index);
+ return valueProfile(index - numberOfArgumentValueProfiles());
+ }
+
+ RareCaseProfile* addRareCaseProfile(int bytecodeOffset)
+ {
+ m_rareCaseProfiles.append(RareCaseProfile(bytecodeOffset));
+ return &m_rareCaseProfiles.last();
+ }
+ unsigned numberOfRareCaseProfiles() { return m_rareCaseProfiles.size(); }
+ RareCaseProfile* rareCaseProfile(int index) { return &m_rareCaseProfiles[index]; }
+ RareCaseProfile* rareCaseProfileForBytecodeOffset(int bytecodeOffset)
+ {
+ return WTF::genericBinarySearch<RareCaseProfile, int, getRareCaseProfileBytecodeOffset>(m_rareCaseProfiles, m_rareCaseProfiles.size(), bytecodeOffset);
+ }
+
+ bool likelyToTakeSlowCase(int bytecodeOffset)
+ {
+ unsigned value = rareCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter;
+ return value >= Options::likelyToTakeSlowCaseMinimumCount && static_cast<double>(value) / m_executionEntryCount >= Options::likelyToTakeSlowCaseThreshold;
+ }
+
+ bool couldTakeSlowCase(int bytecodeOffset)
+ {
+ unsigned value = rareCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter;
+ return value >= Options::couldTakeSlowCaseMinimumCount && static_cast<double>(value) / m_executionEntryCount >= Options::couldTakeSlowCaseThreshold;
+ }
+
+ RareCaseProfile* addSpecialFastCaseProfile(int bytecodeOffset)
+ {
+ m_specialFastCaseProfiles.append(RareCaseProfile(bytecodeOffset));
+ return &m_specialFastCaseProfiles.last();
+ }
+ unsigned numberOfSpecialFastCaseProfiles() { return m_specialFastCaseProfiles.size(); }
+ RareCaseProfile* specialFastCaseProfile(int index) { return &m_specialFastCaseProfiles[index]; }
+ RareCaseProfile* specialFastCaseProfileForBytecodeOffset(int bytecodeOffset)
+ {
+ return WTF::genericBinarySearch<RareCaseProfile, int, getRareCaseProfileBytecodeOffset>(m_specialFastCaseProfiles, m_specialFastCaseProfiles.size(), bytecodeOffset);
+ }
+
+ bool likelyToTakeSpecialFastCase(int bytecodeOffset)
+ {
+ unsigned specialFastCaseCount = specialFastCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter;
+ return specialFastCaseCount >= Options::likelyToTakeSlowCaseMinimumCount && static_cast<double>(specialFastCaseCount) / m_executionEntryCount >= Options::likelyToTakeSlowCaseThreshold;
+ }
+
+ bool likelyToTakeDeepestSlowCase(int bytecodeOffset)
+ {
+ unsigned slowCaseCount = rareCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter;
+ unsigned specialFastCaseCount = specialFastCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter;
+ unsigned value = slowCaseCount - specialFastCaseCount;
+ return value >= Options::likelyToTakeSlowCaseMinimumCount && static_cast<double>(value) / m_executionEntryCount >= Options::likelyToTakeSlowCaseThreshold;
+ }
+
+ bool likelyToTakeAnySlowCase(int bytecodeOffset)
+ {
+ unsigned slowCaseCount = rareCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter;
+ unsigned specialFastCaseCount = specialFastCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter;
+ unsigned value = slowCaseCount + specialFastCaseCount;
+ return value >= Options::likelyToTakeSlowCaseMinimumCount && static_cast<double>(value) / m_executionEntryCount >= Options::likelyToTakeSlowCaseThreshold;
+ }
+
+ unsigned executionEntryCount() const { return m_executionEntryCount; }
+#endif
+
+ unsigned globalResolveInfoCount() const
+ {
+#if ENABLE(JIT)
+ if (m_globalData->canUseJIT())
+ return m_globalResolveInfos.size();
+#endif
+ return 0;
+ }
+
+ // Exception handling support
+
+ size_t numberOfExceptionHandlers() const { return m_rareData ? m_rareData->m_exceptionHandlers.size() : 0; }
+ void addExceptionHandler(const HandlerInfo& hanler) { createRareDataIfNecessary(); return m_rareData->m_exceptionHandlers.append(hanler); }
+ HandlerInfo& exceptionHandler(int index) { ASSERT(m_rareData); return m_rareData->m_exceptionHandlers[index]; }
+
+ void addExpressionInfo(const ExpressionRangeInfo& expressionInfo)
+ {
+ createRareDataIfNecessary();
+ m_rareData->m_expressionInfo.append(expressionInfo);
+ }
+
+ void addLineInfo(unsigned bytecodeOffset, int lineNo)
+ {
+ createRareDataIfNecessary();
+ Vector<LineInfo>& lineInfo = m_rareData->m_lineInfo;
+ if (!lineInfo.size() || lineInfo.last().lineNumber != lineNo) {
+ LineInfo info = { bytecodeOffset, lineNo };
+ lineInfo.append(info);
+ }
+ }
+
+ bool hasExpressionInfo() { return m_rareData && m_rareData->m_expressionInfo.size(); }
+ bool hasLineInfo() { return m_rareData && m_rareData->m_lineInfo.size(); }
+ // We only generate exception handling info if the user is debugging
+ // (and may want line number info), or if the function contains exception handler.
+ bool needsCallReturnIndices()
+ {
+ return m_rareData &&
+ (m_rareData->m_expressionInfo.size() || m_rareData->m_lineInfo.size() || m_rareData->m_exceptionHandlers.size());
+ }
+
+#if ENABLE(JIT)
+ Vector<CallReturnOffsetToBytecodeOffset>& callReturnIndexVector()
+ {
+ createRareDataIfNecessary();
+ return m_rareData->m_callReturnIndexVector;
+ }
+#endif
+
+#if ENABLE(DFG_JIT)
+ SegmentedVector<InlineCallFrame, 4>& inlineCallFrames()
+ {
+ createRareDataIfNecessary();
+ return m_rareData->m_inlineCallFrames;
+ }
+
+ Vector<CodeOriginAtCallReturnOffset>& codeOrigins()
+ {
+ createRareDataIfNecessary();
+ return m_rareData->m_codeOrigins;
+ }
+
+ // Having code origins implies that there has been some inlining.
+ bool hasCodeOrigins()
+ {
+ return m_rareData && !!m_rareData->m_codeOrigins.size();
+ }
+
+ CodeOrigin codeOriginForReturn(ReturnAddressPtr returnAddress)
+ {
+ ASSERT(hasCodeOrigins());
+ return binarySearch<CodeOriginAtCallReturnOffset, unsigned, getCallReturnOffsetForCodeOrigin>(codeOrigins().begin(), codeOrigins().size(), getJITCode().offsetOf(returnAddress.value()))->codeOrigin;
+ }
+
+ bool addFrequentExitSite(const DFG::FrequentExitSite& site)
+ {
+ ASSERT(getJITType() == JITCode::BaselineJIT);
+ return m_exitProfile.add(site);
+ }
+
+ DFG::ExitProfile& exitProfile() { return m_exitProfile; }
+#endif
+
+ // Constant Pool
+
+ size_t numberOfIdentifiers() const { return m_identifiers.size(); }
+ void addIdentifier(const Identifier& i) { return m_identifiers.append(i); }
+ Identifier& identifier(int index) { return m_identifiers[index]; }
+
+ size_t numberOfConstantRegisters() const { return m_constantRegisters.size(); }
+ void addConstant(JSValue v)
+ {
+ m_constantRegisters.append(WriteBarrier<Unknown>());
+ m_constantRegisters.last().set(m_globalObject->globalData(), m_ownerExecutable.get(), v);
+ }
+ WriteBarrier<Unknown>& constantRegister(int index) { return m_constantRegisters[index - FirstConstantRegisterIndex]; }
+ ALWAYS_INLINE bool isConstantRegisterIndex(int index) const { return index >= FirstConstantRegisterIndex; }
+ ALWAYS_INLINE JSValue getConstant(int index) const { return m_constantRegisters[index - FirstConstantRegisterIndex].get(); }
+
+ unsigned addFunctionDecl(FunctionExecutable* n)
+ {
+ unsigned size = m_functionDecls.size();
+ m_functionDecls.append(WriteBarrier<FunctionExecutable>());
+ m_functionDecls.last().set(m_globalObject->globalData(), m_ownerExecutable.get(), n);
+ return size;
+ }
+ FunctionExecutable* functionDecl(int index) { return m_functionDecls[index].get(); }
+ int numberOfFunctionDecls() { return m_functionDecls.size(); }
+ unsigned addFunctionExpr(FunctionExecutable* n)
+ {
+ unsigned size = m_functionExprs.size();
+ m_functionExprs.append(WriteBarrier<FunctionExecutable>());
+ m_functionExprs.last().set(m_globalObject->globalData(), m_ownerExecutable.get(), n);
+ return size;
+ }
+ FunctionExecutable* functionExpr(int index) { return m_functionExprs[index].get(); }
+
+ unsigned addRegExp(RegExp* r)
+ {
+ createRareDataIfNecessary();
+ unsigned size = m_rareData->m_regexps.size();
+ m_rareData->m_regexps.append(WriteBarrier<RegExp>(*m_globalData, ownerExecutable(), r));
+ return size;
+ }
+ unsigned numberOfRegExps() const
+ {
+ if (!m_rareData)
+ return 0;
+ return m_rareData->m_regexps.size();
+ }
+ RegExp* regexp(int index) const { ASSERT(m_rareData); return m_rareData->m_regexps[index].get(); }
+
+ unsigned addConstantBuffer(unsigned length)
+ {
+ createRareDataIfNecessary();
+ unsigned size = m_rareData->m_constantBuffers.size();
+ m_rareData->m_constantBuffers.append(Vector<JSValue>(length));
+ return size;
+ }
+
+ JSValue* constantBuffer(unsigned index)
+ {
+ ASSERT(m_rareData);
+ return m_rareData->m_constantBuffers[index].data();
+ }
+
+ JSGlobalObject* globalObject() { return m_globalObject.get(); }
+
+ JSGlobalObject* globalObjectFor(CodeOrigin codeOrigin)
+ {
+ if (!codeOrigin.inlineCallFrame)
+ return globalObject();
+ // FIXME: if we ever inline based on executable not function, this code will need to change.
+ return codeOrigin.inlineCallFrame->callee->scope()->globalObject.get();
+ }
+
+ // Jump Tables
+
+ size_t numberOfImmediateSwitchJumpTables() const { return m_rareData ? m_rareData->m_immediateSwitchJumpTables.size() : 0; }
+ SimpleJumpTable& addImmediateSwitchJumpTable() { createRareDataIfNecessary(); m_rareData->m_immediateSwitchJumpTables.append(SimpleJumpTable()); return m_rareData->m_immediateSwitchJumpTables.last(); }
+ SimpleJumpTable& immediateSwitchJumpTable(int tableIndex) { ASSERT(m_rareData); return m_rareData->m_immediateSwitchJumpTables[tableIndex]; }
+
+ size_t numberOfCharacterSwitchJumpTables() const { return m_rareData ? m_rareData->m_characterSwitchJumpTables.size() : 0; }
+ SimpleJumpTable& addCharacterSwitchJumpTable() { createRareDataIfNecessary(); m_rareData->m_characterSwitchJumpTables.append(SimpleJumpTable()); return m_rareData->m_characterSwitchJumpTables.last(); }
+ SimpleJumpTable& characterSwitchJumpTable(int tableIndex) { ASSERT(m_rareData); return m_rareData->m_characterSwitchJumpTables[tableIndex]; }
+
+ size_t numberOfStringSwitchJumpTables() const { return m_rareData ? m_rareData->m_stringSwitchJumpTables.size() : 0; }
+ StringJumpTable& addStringSwitchJumpTable() { createRareDataIfNecessary(); m_rareData->m_stringSwitchJumpTables.append(StringJumpTable()); return m_rareData->m_stringSwitchJumpTables.last(); }
+ StringJumpTable& stringSwitchJumpTable(int tableIndex) { ASSERT(m_rareData); return m_rareData->m_stringSwitchJumpTables[tableIndex]; }
+
+
+ SymbolTable* symbolTable() { return m_symbolTable; }
+ SharedSymbolTable* sharedSymbolTable() { ASSERT(m_codeType == FunctionCode); return static_cast<SharedSymbolTable*>(m_symbolTable); }
+
+ EvalCodeCache& evalCodeCache() { createRareDataIfNecessary(); return m_rareData->m_evalCodeCache; }
+
+ void shrinkToFit();
+
+ void copyPostParseDataFrom(CodeBlock* alternative);
+ void copyPostParseDataFromAlternative();
+
+ // Functions for controlling when tiered compilation kicks in. This
+ // controls both when the optimizing compiler is invoked and when OSR
+ // entry happens. Two triggers exist: the loop trigger and the return
+ // trigger. In either case, when an addition to m_executeCounter
+ // causes it to become non-negative, the optimizing compiler is
+ // invoked. This includes a fast check to see if this CodeBlock has
+ // already been optimized (i.e. replacement() returns a CodeBlock
+ // that was optimized with a higher tier JIT than this one). In the
+ // case of the loop trigger, if the optimized compilation succeeds
+ // (or has already succeeded in the past) then OSR is attempted to
+ // redirect program flow into the optimized code.
+
+ // These functions are called from within the optimization triggers,
+ // and are used as a single point at which we define the heuristics
+ // for how much warm-up is mandated before the next optimization
+ // trigger files. All CodeBlocks start out with optimizeAfterWarmUp(),
+ // as this is called from the CodeBlock constructor.
+
+ // When we observe a lot of speculation failures, we trigger a
+ // reoptimization. But each time, we increase the optimization trigger
+ // to avoid thrashing.
+ unsigned reoptimizationRetryCounter() const
+ {
+ ASSERT(m_reoptimizationRetryCounter <= Options::reoptimizationRetryCounterMax);
+ return m_reoptimizationRetryCounter;
+ }
+
+ void countReoptimization()
+ {
+ m_reoptimizationRetryCounter++;
+ if (m_reoptimizationRetryCounter > Options::reoptimizationRetryCounterMax)
+ m_reoptimizationRetryCounter = Options::reoptimizationRetryCounterMax;
+ }
+
+ int32_t counterValueForOptimizeAfterWarmUp()
+ {
+ return Options::executionCounterValueForOptimizeAfterWarmUp << reoptimizationRetryCounter();
+ }
+
+ int32_t counterValueForOptimizeAfterLongWarmUp()
+ {
+ return Options::executionCounterValueForOptimizeAfterLongWarmUp << reoptimizationRetryCounter();
+ }
+
+ int32_t* addressOfExecuteCounter()
+ {
+ return &m_executeCounter;
+ }
+
+ static ptrdiff_t offsetOfExecuteCounter() { return OBJECT_OFFSETOF(CodeBlock, m_executeCounter); }
+
+ int32_t executeCounter() const { return m_executeCounter; }
+
+ unsigned optimizationDelayCounter() const { return m_optimizationDelayCounter; }
+
+ // Call this to force the next optimization trigger to fire. This is
+ // rarely wise, since optimization triggers are typically more
+ // expensive than executing baseline code.
+ void optimizeNextInvocation()
+ {
+ m_executeCounter = Options::executionCounterValueForOptimizeNextInvocation;
+ }
+
+ // Call this to prevent optimization from happening again. Note that
+ // optimization will still happen after roughly 2^29 invocations,
+ // so this is really meant to delay that as much as possible. This
+ // is called if optimization failed, and we expect it to fail in
+ // the future as well.
+ void dontOptimizeAnytimeSoon()
+ {
+ m_executeCounter = Options::executionCounterValueForDontOptimizeAnytimeSoon;
+ }
+
+ // Call this to reinitialize the counter to its starting state,
+ // forcing a warm-up to happen before the next optimization trigger
+ // fires. This is called in the CodeBlock constructor. It also
+ // makes sense to call this if an OSR exit occurred. Note that
+ // OSR exit code is code generated, so the value of the execute
+ // counter that this corresponds to is also available directly.
+ void optimizeAfterWarmUp()
+ {
+ m_executeCounter = counterValueForOptimizeAfterWarmUp();
+ }
+
+ // Call this to force an optimization trigger to fire only after
+ // a lot of warm-up.
+ void optimizeAfterLongWarmUp()
+ {
+ m_executeCounter = counterValueForOptimizeAfterLongWarmUp();
+ }
+
+ // Call this to cause an optimization trigger to fire soon, but
+ // not necessarily the next one. This makes sense if optimization
+ // succeeds. Successfuly optimization means that all calls are
+ // relinked to the optimized code, so this only affects call
+ // frames that are still executing this CodeBlock. The value here
+ // is tuned to strike a balance between the cost of OSR entry
+ // (which is too high to warrant making every loop back edge to
+ // trigger OSR immediately) and the cost of executing baseline
+ // code (which is high enough that we don't necessarily want to
+ // have a full warm-up). The intuition for calling this instead of
+ // optimizeNextInvocation() is for the case of recursive functions
+ // with loops. Consider that there may be N call frames of some
+ // recursive function, for a reasonably large value of N. The top
+ // one triggers optimization, and then returns, and then all of
+ // the others return. We don't want optimization to be triggered on
+ // each return, as that would be superfluous. It only makes sense
+ // to trigger optimization if one of those functions becomes hot
+ // in the baseline code.
+ void optimizeSoon()
+ {
+ m_executeCounter = Options::executionCounterValueForOptimizeSoon << reoptimizationRetryCounter();
+ }
+
+ // The speculative JIT tracks its success rate, so that we can
+ // decide when to reoptimize. It's interesting to note that these
+ // counters may overflow without any protection. The success
+ // counter will overflow before the fail one does, becuase the
+ // fail one is used as a trigger to reoptimize. So the worst case
+ // is that the success counter overflows and we reoptimize without
+ // needing to. But this is harmless. If a method really did
+ // execute 2^32 times then compiling it again probably won't hurt
+ // anyone.
+
+ void countSpeculationSuccess()
+ {
+ m_speculativeSuccessCounter++;
+ }
+
+ void countSpeculationFailure()
+ {
+ m_speculativeFailCounter++;
+ }
+
+ uint32_t speculativeSuccessCounter() const { return m_speculativeSuccessCounter; }
+ uint32_t speculativeFailCounter() const { return m_speculativeFailCounter; }
+
+ uint32_t* addressOfSpeculativeSuccessCounter() { return &m_speculativeSuccessCounter; }
+ uint32_t* addressOfSpeculativeFailCounter() { return &m_speculativeFailCounter; }
+
+ static ptrdiff_t offsetOfSpeculativeSuccessCounter() { return OBJECT_OFFSETOF(CodeBlock, m_speculativeSuccessCounter); }
+ static ptrdiff_t offsetOfSpeculativeFailCounter() { return OBJECT_OFFSETOF(CodeBlock, m_speculativeFailCounter); }
+
+#if ENABLE(JIT)
+ // The number of failures that triggers the use of the ratio.
+ unsigned largeFailCountThreshold() { return Options::largeFailCountThresholdBase << baselineVersion()->reoptimizationRetryCounter(); }
+ unsigned largeFailCountThresholdForLoop() { return Options::largeFailCountThresholdBaseForLoop << baselineVersion()->reoptimizationRetryCounter(); }
+
+ bool shouldReoptimizeNow()
+ {
+ return Options::desiredSpeculativeSuccessFailRatio * speculativeFailCounter() >= speculativeSuccessCounter() && speculativeFailCounter() >= largeFailCountThreshold();
+ }
+
+ bool shouldReoptimizeFromLoopNow()
+ {
+ return Options::desiredSpeculativeSuccessFailRatio * speculativeFailCounter() >= speculativeSuccessCounter() && speculativeFailCounter() >= largeFailCountThresholdForLoop();
+ }
+#endif
+
+#if ENABLE(VALUE_PROFILER)
+ bool shouldOptimizeNow();
+#else
+ bool shouldOptimizeNow() { return false; }
+#endif
+
+#if ENABLE(JIT)
+ void reoptimize()
+ {
+ ASSERT(replacement() != this);
+ ASSERT(replacement()->alternative() == this);
+ replacement()->tallyFrequentExitSites();
+ replacement()->jettison();
+ countReoptimization();
+ optimizeAfterWarmUp();
+ }
+#endif
+
+#if ENABLE(VERBOSE_VALUE_PROFILE)
+ void dumpValueProfiles();
+#endif
+
+ // FIXME: Make these remaining members private.
+
+ int m_numCalleeRegisters;
+ int m_numVars;
+ int m_numCapturedVars;
+ int m_numParameters;
+ bool m_isConstructor;
+
+ // This is public because otherwise we would have many friends.
+ bool m_shouldDiscardBytecode;
+
+ protected:
+ virtual void visitWeakReferences(SlotVisitor&);
+ virtual void finalizeUnconditionally();
+
+ private:
+ friend class DFGCodeBlocks;
+
+#if ENABLE(DFG_JIT)
+ void tallyFrequentExitSites();
+#else
+ void tallyFrequentExitSites() { }
+#endif
+
+#if !defined(NDEBUG) || ENABLE(OPCODE_SAMPLING)
+ void dump(ExecState*, const Vector<Instruction>::const_iterator& begin, Vector<Instruction>::const_iterator&) const;
+
+ CString registerName(ExecState*, int r) const;
+ void printUnaryOp(ExecState*, int location, Vector<Instruction>::const_iterator&, const char* op) const;
+ void printBinaryOp(ExecState*, int location, Vector<Instruction>::const_iterator&, const char* op) const;
+ void printConditionalJump(ExecState*, const Vector<Instruction>::const_iterator&, Vector<Instruction>::const_iterator&, int location, const char* op) const;
+ void printGetByIdOp(ExecState*, int location, Vector<Instruction>::const_iterator&, const char* op) const;
+ void printPutByIdOp(ExecState*, int location, Vector<Instruction>::const_iterator&, const char* op) const;
+#endif
+ void visitStructures(SlotVisitor&, Instruction* vPC) const;
+
+#if ENABLE(DFG_JIT)
+ bool shouldImmediatelyAssumeLivenessDuringScan()
+ {
+ // Null m_dfgData means that this is a baseline JIT CodeBlock. Baseline JIT
+ // CodeBlocks don't need to be jettisoned when their weak references go
+ // stale. So if a basline JIT CodeBlock gets scanned, we can assume that
+ // this means that it's live.
+ if (!m_dfgData)
+ return true;
+
+ // For simplicity, we don't attempt to jettison code blocks during GC if
+ // they are executing. Instead we strongly mark their weak references to
+ // allow them to continue to execute soundly.
+ if (m_dfgData->mayBeExecuting)
+ return true;
+
+ return false;
+ }
+#else
+ bool shouldImmediatelyAssumeLivenessDuringScan() { return true; }
+#endif
+
+ void performTracingFixpointIteration(SlotVisitor&);
+
+ void stronglyVisitStrongReferences(SlotVisitor&);
+ void stronglyVisitWeakReferences(SlotVisitor&);
+
+ void createRareDataIfNecessary()
+ {
+ if (!m_rareData)
+ m_rareData = adoptPtr(new RareData);
+ }
+
+ WriteBarrier<ScriptExecutable> m_ownerExecutable;
+ JSGlobalData* m_globalData;
+
+ struct Instructions : public RefCounted<Instructions> {
+ Vector<Instruction> m_instructions;
+ };
+ RefPtr<Instructions> m_instructions;
+ unsigned m_instructionCount;
+
+ int m_thisRegister;
+ int m_argumentsRegister;
+ int m_activationRegister;
+
+ bool m_needsFullScopeChain;
+ bool m_usesEval;
+ bool m_isNumericCompareFunction;
+ bool m_isStrictMode;
+
+ CodeType m_codeType;
+
+ RefPtr<SourceProvider> m_source;
+ unsigned m_sourceOffset;
+
+#if ENABLE(INTERPRETER)
+ Vector<unsigned> m_propertyAccessInstructions;
+ Vector<unsigned> m_globalResolveInstructions;
+#endif
+#if ENABLE(JIT)
+ Vector<StructureStubInfo> m_structureStubInfos;
+ Vector<GlobalResolveInfo> m_globalResolveInfos;
+ Vector<CallLinkInfo> m_callLinkInfos;
+ Vector<MethodCallLinkInfo> m_methodCallLinkInfos;
+ JITCode m_jitCode;
+ MacroAssemblerCodePtr m_jitCodeWithArityCheck;
+ SentinelLinkedList<CallLinkInfo, BasicRawSentinelNode<CallLinkInfo> > m_incomingCalls;
+#endif
+#if ENABLE(DFG_JIT)
+ OwnPtr<CompactJITCodeMap> m_jitCodeMap;
+
+ struct WeakReferenceTransition {
+ WeakReferenceTransition() { }
+
+ WeakReferenceTransition(JSGlobalData& globalData, JSCell* owner, JSCell* codeOrigin, JSCell* from, JSCell* to)
+ : m_from(globalData, owner, from)
+ , m_to(globalData, owner, to)
+ {
+ if (!!codeOrigin)
+ m_codeOrigin.set(globalData, owner, codeOrigin);
+ }
+
+ WriteBarrier<JSCell> m_codeOrigin;
+ WriteBarrier<JSCell> m_from;
+ WriteBarrier<JSCell> m_to;
+ };
+
+ struct DFGData {
+ DFGData()
+ : mayBeExecuting(false)
+ , isJettisoned(false)
+ {
+ }
+
+ Vector<DFG::OSREntryData> osrEntry;
+ SegmentedVector<DFG::OSRExit, 8> osrExit;
+ Vector<DFG::SpeculationRecovery> speculationRecovery;
+ Vector<WeakReferenceTransition> transitions;
+ Vector<WriteBarrier<JSCell> > weakReferences;
+ bool mayBeExecuting;
+ bool isJettisoned;
+ bool livenessHasBeenProved; // Initialized and used on every GC.
+ bool allTransitionsHaveBeenMarked; // Initialized and used on every GC.
+ };
+
+ OwnPtr<DFGData> m_dfgData;
+
+ // This is relevant to non-DFG code blocks that serve as the profiled code block
+ // for DFG code blocks.
+ DFG::ExitProfile m_exitProfile;
+#endif
+#if ENABLE(VALUE_PROFILER)
+ Vector<ValueProfile> m_argumentValueProfiles;
+ SegmentedVector<ValueProfile, 8> m_valueProfiles;
+ SegmentedVector<RareCaseProfile, 8> m_rareCaseProfiles;
+ SegmentedVector<RareCaseProfile, 8> m_specialFastCaseProfiles;
+ unsigned m_executionEntryCount;
+#endif
+
+ Vector<unsigned> m_jumpTargets;
+ Vector<unsigned> m_loopTargets;
+
+ // Constant Pool
+ Vector<Identifier> m_identifiers;
+ COMPILE_ASSERT(sizeof(Register) == sizeof(WriteBarrier<Unknown>), Register_must_be_same_size_as_WriteBarrier_Unknown);
+ Vector<WriteBarrier<Unknown> > m_constantRegisters;
+ Vector<WriteBarrier<FunctionExecutable> > m_functionDecls;
+ Vector<WriteBarrier<FunctionExecutable> > m_functionExprs;
+
+ SymbolTable* m_symbolTable;
+
+ OwnPtr<CodeBlock> m_alternative;
+
+ int32_t m_executeCounter;
+ uint32_t m_speculativeSuccessCounter;
+ uint32_t m_speculativeFailCounter;
+ uint8_t m_optimizationDelayCounter;
+ uint8_t m_reoptimizationRetryCounter;
+
+ struct RareData {
+ WTF_MAKE_FAST_ALLOCATED;
+ public:
+ Vector<HandlerInfo> m_exceptionHandlers;
+
+ // Rare Constants
+ Vector<WriteBarrier<RegExp> > m_regexps;
+
+ // Buffers used for large array literals
+ Vector<Vector<JSValue> > m_constantBuffers;
+
+ // Jump Tables
+ Vector<SimpleJumpTable> m_immediateSwitchJumpTables;
+ Vector<SimpleJumpTable> m_characterSwitchJumpTables;
+ Vector<StringJumpTable> m_stringSwitchJumpTables;
+
+ EvalCodeCache m_evalCodeCache;
+
+ // Expression info - present if debugging.
+ Vector<ExpressionRangeInfo> m_expressionInfo;
+ // Line info - present if profiling or debugging.
+ Vector<LineInfo> m_lineInfo;
+#if ENABLE(JIT)
+ Vector<CallReturnOffsetToBytecodeOffset> m_callReturnIndexVector;
+#endif
+#if ENABLE(DFG_JIT)
+ SegmentedVector<InlineCallFrame, 4> m_inlineCallFrames;
+ Vector<CodeOriginAtCallReturnOffset> m_codeOrigins;
+#endif
+ };
+#if COMPILER(MSVC)
+ friend void WTF::deleteOwnedPtr<RareData>(RareData*);
+#endif
+ OwnPtr<RareData> m_rareData;
+ };
+
+ // Program code is not marked by any function, so we make the global object
+ // responsible for marking it.
+
+ class GlobalCodeBlock : public CodeBlock {
+ protected:
+ GlobalCodeBlock(CopyParsedBlockTag, GlobalCodeBlock& other)
+ : CodeBlock(CopyParsedBlock, other, &m_unsharedSymbolTable)
+ , m_unsharedSymbolTable(other.m_unsharedSymbolTable)
+ {
+ }
+
+ GlobalCodeBlock(ScriptExecutable* ownerExecutable, CodeType codeType, JSGlobalObject* globalObject, PassRefPtr<SourceProvider> sourceProvider, unsigned sourceOffset, PassOwnPtr<CodeBlock> alternative)
+ : CodeBlock(ownerExecutable, codeType, globalObject, sourceProvider, sourceOffset, &m_unsharedSymbolTable, false, alternative)
+ {
+ }
+
+ private:
+ SymbolTable m_unsharedSymbolTable;
+ };
+
+ class ProgramCodeBlock : public GlobalCodeBlock {
+ public:
+ ProgramCodeBlock(CopyParsedBlockTag, ProgramCodeBlock& other)
+ : GlobalCodeBlock(CopyParsedBlock, other)
+ {
+ }
+
+ ProgramCodeBlock(ProgramExecutable* ownerExecutable, CodeType codeType, JSGlobalObject* globalObject, PassRefPtr<SourceProvider> sourceProvider, PassOwnPtr<CodeBlock> alternative)
+ : GlobalCodeBlock(ownerExecutable, codeType, globalObject, sourceProvider, 0, alternative)
+ {
+ }
+
+#if ENABLE(JIT)
+ protected:
+ virtual JSObject* compileOptimized(ExecState*, ScopeChainNode*);
+ virtual void jettison();
+ virtual CodeBlock* replacement();
+ virtual bool canCompileWithDFG();
+#endif
+ };
+
+ class EvalCodeBlock : public GlobalCodeBlock {
+ public:
+ EvalCodeBlock(CopyParsedBlockTag, EvalCodeBlock& other)
+ : GlobalCodeBlock(CopyParsedBlock, other)
+ , m_baseScopeDepth(other.m_baseScopeDepth)
+ , m_variables(other.m_variables)
+ {
+ }
+
+ EvalCodeBlock(EvalExecutable* ownerExecutable, JSGlobalObject* globalObject, PassRefPtr<SourceProvider> sourceProvider, int baseScopeDepth, PassOwnPtr<CodeBlock> alternative)
+ : GlobalCodeBlock(ownerExecutable, EvalCode, globalObject, sourceProvider, 0, alternative)
+ , m_baseScopeDepth(baseScopeDepth)
+ {
+ }
+
+ int baseScopeDepth() const { return m_baseScopeDepth; }
+
+ const Identifier& variable(unsigned index) { return m_variables[index]; }
+ unsigned numVariables() { return m_variables.size(); }
+ void adoptVariables(Vector<Identifier>& variables)
+ {
+ ASSERT(m_variables.isEmpty());
+ m_variables.swap(variables);
+ }
+
+#if ENABLE(JIT)
+ protected:
+ virtual JSObject* compileOptimized(ExecState*, ScopeChainNode*);
+ virtual void jettison();
+ virtual CodeBlock* replacement();
+ virtual bool canCompileWithDFG();
+#endif
+
+ private:
+ int m_baseScopeDepth;
+ Vector<Identifier> m_variables;
+ };
+
+ class FunctionCodeBlock : public CodeBlock {
+ public:
+ FunctionCodeBlock(CopyParsedBlockTag, FunctionCodeBlock& other)
+ : CodeBlock(CopyParsedBlock, other, other.sharedSymbolTable())
+ {
+ // The fact that we have to do this is yucky, but is necessary because of the
+ // class hierarchy issues described in the comment block for the main
+ // constructor, below.
+ sharedSymbolTable()->ref();
+ }
+
+ // Rather than using the usual RefCounted::create idiom for SharedSymbolTable we just use new
+ // as we need to initialise the CodeBlock before we could initialise any RefPtr to hold the shared
+ // symbol table, so we just pass as a raw pointer with a ref count of 1. We then manually deref
+ // in the destructor.
+ FunctionCodeBlock(FunctionExecutable* ownerExecutable, CodeType codeType, JSGlobalObject* globalObject, PassRefPtr<SourceProvider> sourceProvider, unsigned sourceOffset, bool isConstructor, PassOwnPtr<CodeBlock> alternative = nullptr)
+ : CodeBlock(ownerExecutable, codeType, globalObject, sourceProvider, sourceOffset, SharedSymbolTable::create().leakRef(), isConstructor, alternative)
+ {
+ }
+ ~FunctionCodeBlock()
+ {
+ sharedSymbolTable()->deref();
+ }
+
+#if ENABLE(JIT)
+ protected:
+ virtual JSObject* compileOptimized(ExecState*, ScopeChainNode*);
+ virtual void jettison();
+ virtual CodeBlock* replacement();
+ virtual bool canCompileWithDFG();
+#endif
+ };
+
+ // Use this if you want to copy a code block and you're paranoid about a GC
+ // happening.
+ class BytecodeDestructionBlocker {
+ public:
+ BytecodeDestructionBlocker(CodeBlock* codeBlock)
+ : m_codeBlock(codeBlock)
+ , m_oldValueOfShouldDiscardBytecode(codeBlock->m_shouldDiscardBytecode)
+ {
+ codeBlock->m_shouldDiscardBytecode = false;
+ }
+
+ ~BytecodeDestructionBlocker()
+ {
+ m_codeBlock->m_shouldDiscardBytecode = m_oldValueOfShouldDiscardBytecode;
+ }
+
+ private:
+ CodeBlock* m_codeBlock;
+ bool m_oldValueOfShouldDiscardBytecode;
+ };
+
+ inline Register& ExecState::r(int index)
+ {
+ CodeBlock* codeBlock = this->codeBlock();
+ if (codeBlock->isConstantRegisterIndex(index))
+ return *reinterpret_cast<Register*>(&codeBlock->constantRegister(index));
+ return this[index];
+ }
+
+ inline Register& ExecState::uncheckedR(int index)
+ {
+ ASSERT(index < FirstConstantRegisterIndex);
+ return this[index];
+ }
+
+#if ENABLE(DFG_JIT)
+ inline bool ExecState::isInlineCallFrame()
+ {
+ if (LIKELY(!codeBlock() || codeBlock()->getJITType() != JITCode::DFGJIT))
+ return false;
+ return isInlineCallFrameSlow();
+ }
+#endif
+
+#if ENABLE(DFG_JIT)
+ inline void DFGCodeBlocks::mark(void* candidateCodeBlock)
+ {
+ // We have to check for 0 and -1 because those are used by the HashMap as markers.
+ uintptr_t value = reinterpret_cast<uintptr_t>(candidateCodeBlock);
+
+ // This checks for both of those nasty cases in one go.
+ // 0 + 1 = 1
+ // -1 + 1 = 0
+ if (value + 1 <= 1)
+ return;
+
+ HashSet<CodeBlock*>::iterator iter = m_set.find(static_cast<CodeBlock*>(candidateCodeBlock));
+ if (iter == m_set.end())
+ return;
+
+ (*iter)->m_dfgData->mayBeExecuting = true;
+ }
+#endif
+
+} // namespace JSC
+
+#endif // CodeBlock_h
diff --git a/Source/JavaScriptCore/bytecode/CodeOrigin.h b/Source/JavaScriptCore/bytecode/CodeOrigin.h
new file mode 100644
index 000000000..7b6ce7d48
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/CodeOrigin.h
@@ -0,0 +1,145 @@
+/*
+ * Copyright (C) 2011 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef CodeOrigin_h
+#define CodeOrigin_h
+
+#include "ValueRecovery.h"
+#include "WriteBarrier.h"
+#include <wtf/StdLibExtras.h>
+#include <wtf/Vector.h>
+
+namespace JSC {
+
+struct InlineCallFrame;
+class ExecutableBase;
+class JSFunction;
+
+struct CodeOrigin {
+ uint32_t bytecodeIndex;
+ InlineCallFrame* inlineCallFrame;
+
+ CodeOrigin()
+ : bytecodeIndex(std::numeric_limits<uint32_t>::max())
+ , inlineCallFrame(0)
+ {
+ }
+
+ explicit CodeOrigin(uint32_t bytecodeIndex)
+ : bytecodeIndex(bytecodeIndex)
+ , inlineCallFrame(0)
+ {
+ }
+
+ explicit CodeOrigin(uint32_t bytecodeIndex, InlineCallFrame* inlineCallFrame)
+ : bytecodeIndex(bytecodeIndex)
+ , inlineCallFrame(inlineCallFrame)
+ {
+ }
+
+ bool isSet() const { return bytecodeIndex != std::numeric_limits<uint32_t>::max(); }
+
+ // The inline depth is the depth of the inline stack, so 1 = not inlined,
+ // 2 = inlined one deep, etc.
+ unsigned inlineDepth() const;
+
+ // If the code origin corresponds to inlined code, gives you the heap object that
+ // would have owned the code if it had not been inlined. Otherwise returns 0.
+ ExecutableBase* codeOriginOwner() const;
+
+ static unsigned inlineDepthForCallFrame(InlineCallFrame*);
+
+ bool operator==(const CodeOrigin& other) const;
+
+ bool operator!=(const CodeOrigin& other) const { return !(*this == other); }
+
+#ifndef NDEBUG
+ // Get the inline stack. This is slow, and is intended for debugging only.
+ Vector<CodeOrigin> inlineStack() const;
+#endif
+};
+
+struct InlineCallFrame {
+ Vector<ValueRecovery> arguments;
+ WriteBarrier<ExecutableBase> executable;
+ WriteBarrier<JSFunction> callee;
+ CodeOrigin caller;
+ unsigned stackOffset : 31;
+ bool isCall : 1;
+};
+
+struct CodeOriginAtCallReturnOffset {
+ CodeOrigin codeOrigin;
+ unsigned callReturnOffset;
+};
+
+inline unsigned CodeOrigin::inlineDepthForCallFrame(InlineCallFrame* inlineCallFrame)
+{
+ unsigned result = 1;
+ for (InlineCallFrame* current = inlineCallFrame; current; current = current->caller.inlineCallFrame)
+ result++;
+ return result;
+}
+
+inline unsigned CodeOrigin::inlineDepth() const
+{
+ return inlineDepthForCallFrame(inlineCallFrame);
+}
+
+inline bool CodeOrigin::operator==(const CodeOrigin& other) const
+{
+ return bytecodeIndex == other.bytecodeIndex
+ && inlineCallFrame == other.inlineCallFrame;
+}
+
+#ifndef NDEBUG
+// Get the inline stack. This is slow, and is intended for debugging only.
+inline Vector<CodeOrigin> CodeOrigin::inlineStack() const
+{
+ Vector<CodeOrigin> result(inlineDepth());
+ result.last() = *this;
+ unsigned index = result.size() - 2;
+ for (InlineCallFrame* current = inlineCallFrame; current; current = current->caller.inlineCallFrame)
+ result[index--] = current->caller;
+ return result;
+}
+#endif
+
+inline unsigned getCallReturnOffsetForCodeOrigin(CodeOriginAtCallReturnOffset* data)
+{
+ return data->callReturnOffset;
+}
+
+inline ExecutableBase* CodeOrigin::codeOriginOwner() const
+{
+ if (!inlineCallFrame)
+ return 0;
+ return inlineCallFrame->executable.get();
+}
+
+} // namespace JSC
+
+#endif // CodeOrigin_h
+
diff --git a/Source/JavaScriptCore/bytecode/DFGExitProfile.cpp b/Source/JavaScriptCore/bytecode/DFGExitProfile.cpp
new file mode 100644
index 000000000..69fdc3737
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/DFGExitProfile.cpp
@@ -0,0 +1,69 @@
+/*
+ * Copyright (C) 2011 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "DFGExitProfile.h"
+
+#include <wtf/PassOwnPtr.h>
+
+namespace JSC { namespace DFG {
+
+ExitProfile::ExitProfile() { }
+ExitProfile::~ExitProfile() { }
+
+bool ExitProfile::add(const FrequentExitSite& site)
+{
+ // If we've never seen any frequent exits then create the list and put this site
+ // into it.
+ if (!m_frequentExitSites) {
+ m_frequentExitSites = adoptPtr(new Vector<FrequentExitSite>());
+ m_frequentExitSites->append(site);
+ return true;
+ }
+
+ // Don't add it if it's already there. This is O(n), but that's OK, because we
+ // know that the total number of places where code exits tends to not be large,
+ // and this code is only used when recompilation is triggered.
+ for (unsigned i = 0; i < m_frequentExitSites->size(); ++i) {
+ if (m_frequentExitSites->at(i) == site)
+ return false;
+ }
+
+ m_frequentExitSites->append(site);
+ return true;
+}
+
+QueryableExitProfile::QueryableExitProfile(const ExitProfile& profile)
+{
+ if (!profile.m_frequentExitSites)
+ return;
+
+ for (unsigned i = 0; i < profile.m_frequentExitSites->size(); ++i)
+ m_frequentExitSites.add(profile.m_frequentExitSites->at(i));
+}
+
+QueryableExitProfile::~QueryableExitProfile() { }
+
+} } // namespace JSC::DFG
diff --git a/Source/JavaScriptCore/bytecode/DFGExitProfile.h b/Source/JavaScriptCore/bytecode/DFGExitProfile.h
new file mode 100644
index 000000000..f18b69a54
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/DFGExitProfile.h
@@ -0,0 +1,190 @@
+/*
+ * Copyright (C) 2011 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef DFGExitProfile_h
+#define DFGExitProfile_h
+
+#include <wtf/HashSet.h>
+#include <wtf/OwnPtr.h>
+#include <wtf/Vector.h>
+
+namespace JSC { namespace DFG {
+
+enum ExitKind {
+ ExitKindUnset,
+ BadType, // We exited because a type prediction was wrong.
+ BadCache, // We exited because an inline cache was wrong.
+ Overflow, // We exited because of overflow.
+ NegativeZero, // We exited because we encountered negative zero.
+ Uncountable, // We exited for none of the above reasons, and we should not count it. Most uses of this should be viewed as a FIXME.
+};
+
+#ifndef NDEBUG
+inline const char* exitKindToString(ExitKind kind)
+{
+ switch (kind) {
+ case ExitKindUnset:
+ return "Unset";
+ case BadType:
+ return "BadType";
+ case BadCache:
+ return "BadCache";
+ case Overflow:
+ return "Overflow";
+ case NegativeZero:
+ return "NegativeZero";
+ default:
+ return "Unknown";
+ }
+}
+#endif
+
+inline bool exitKindIsCountable(ExitKind kind)
+{
+ switch (kind) {
+ case ExitKindUnset:
+ ASSERT_NOT_REACHED();
+ case BadType:
+ case Uncountable:
+ return false;
+ default:
+ return true;
+ }
+}
+
+class FrequentExitSite {
+public:
+ FrequentExitSite()
+ : m_bytecodeOffset(0) // 0 = empty value
+ , m_kind(ExitKindUnset)
+ {
+ }
+
+ FrequentExitSite(WTF::HashTableDeletedValueType)
+ : m_bytecodeOffset(1) // 1 = deleted value
+ , m_kind(ExitKindUnset)
+ {
+ }
+
+ explicit FrequentExitSite(unsigned bytecodeOffset, ExitKind kind)
+ : m_bytecodeOffset(bytecodeOffset)
+ , m_kind(kind)
+ {
+ ASSERT(exitKindIsCountable(kind));
+ }
+
+ bool operator!() const
+ {
+ return m_kind == ExitKindUnset;
+ }
+
+ bool operator==(const FrequentExitSite& other) const
+ {
+ return m_bytecodeOffset == other.m_bytecodeOffset
+ && m_kind == other.m_kind;
+ }
+
+ unsigned hash() const
+ {
+ return WTF::intHash(m_bytecodeOffset) + m_kind;
+ }
+
+ unsigned bytecodeOffset() const { return m_bytecodeOffset; }
+ ExitKind kind() const { return m_kind; }
+
+ bool isHashTableDeletedValue() const
+ {
+ return m_kind == ExitKindUnset && m_bytecodeOffset;
+ }
+
+private:
+ unsigned m_bytecodeOffset;
+ ExitKind m_kind;
+};
+
+struct FrequentExitSiteHash {
+ static unsigned hash(const FrequentExitSite& key) { return key.hash(); }
+ static bool equal(const FrequentExitSite& a, const FrequentExitSite& b) { return a == b; }
+ static const bool safeToCompareToEmptyOrDeleted = true;
+};
+
+} } // namespace JSC::DFG
+
+namespace WTF {
+
+template<typename T> struct DefaultHash;
+template<> struct DefaultHash<JSC::DFG::FrequentExitSite> {
+ typedef JSC::DFG::FrequentExitSiteHash Hash;
+};
+
+template<typename T> struct HashTraits;
+template<> struct HashTraits<JSC::DFG::FrequentExitSite> : SimpleClassHashTraits<JSC::DFG::FrequentExitSite> { };
+
+} // namespace WTF
+
+namespace JSC { namespace DFG {
+
+class QueryableExitProfile;
+
+class ExitProfile {
+public:
+ ExitProfile();
+ ~ExitProfile();
+
+ // Add a new frequent exit site. Return true if this is a new one, or false
+ // if we already knew about it. This is an O(n) operation, because it errs
+ // on the side of keeping the data structure compact. Also, this will only
+ // be called a fixed number of times per recompilation. Recompilation is
+ // rare to begin with, and implies doing O(n) operations on the CodeBlock
+ // anyway.
+ bool add(const FrequentExitSite&);
+
+private:
+ friend class QueryableExitProfile;
+
+ OwnPtr<Vector<FrequentExitSite> > m_frequentExitSites;
+};
+
+class QueryableExitProfile {
+public:
+ explicit QueryableExitProfile(const ExitProfile&);
+ ~QueryableExitProfile();
+
+ bool hasExitSite(const FrequentExitSite& site) const
+ {
+ return m_frequentExitSites.find(site) != m_frequentExitSites.end();
+ }
+
+ bool hasExitSite(unsigned bytecodeIndex, ExitKind kind) const
+ {
+ return hasExitSite(FrequentExitSite(bytecodeIndex, kind));
+ }
+private:
+ HashSet<FrequentExitSite> m_frequentExitSites;
+};
+
+} } // namespace JSC::DFG
+
+#endif // DFGExitProfile_h
diff --git a/Source/JavaScriptCore/bytecode/DataFormat.h b/Source/JavaScriptCore/bytecode/DataFormat.h
new file mode 100644
index 000000000..b78a6e8e6
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/DataFormat.h
@@ -0,0 +1,181 @@
+/*
+ * Copyright (C) 2011 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef DataFormat_h
+#define DataFormat_h
+
+#include <wtf/Assertions.h>
+
+namespace JSC {
+
+// === DataFormat ===
+//
+// This enum tracks the current representation in which a value is being held.
+// Values may be unboxed primitives (int32, double, or cell), or boxed as a JSValue.
+// For boxed values, we may know the type of boxing that has taken place.
+// (May also need bool, array, object, string types!)
+enum DataFormat {
+ DataFormatNone = 0,
+ DataFormatInteger = 1,
+ DataFormatDouble = 2,
+ DataFormatBoolean = 3,
+ DataFormatCell = 4,
+ DataFormatStorage = 5,
+ DataFormatJS = 8,
+ DataFormatJSInteger = DataFormatJS | DataFormatInteger,
+ DataFormatJSDouble = DataFormatJS | DataFormatDouble,
+ DataFormatJSCell = DataFormatJS | DataFormatCell,
+ DataFormatJSBoolean = DataFormatJS | DataFormatBoolean
+};
+
+#ifndef NDEBUG
+inline const char* dataFormatToString(DataFormat dataFormat)
+{
+ switch (dataFormat) {
+ case DataFormatNone:
+ return "None";
+ case DataFormatInteger:
+ return "Integer";
+ case DataFormatDouble:
+ return "Double";
+ case DataFormatCell:
+ return "Cell";
+ case DataFormatBoolean:
+ return "Boolean";
+ case DataFormatStorage:
+ return "Storage";
+ case DataFormatJS:
+ return "JS";
+ case DataFormatJSInteger:
+ return "JSInteger";
+ case DataFormatJSDouble:
+ return "JSDouble";
+ case DataFormatJSCell:
+ return "JSCell";
+ case DataFormatJSBoolean:
+ return "JSBoolean";
+ default:
+ return "Unknown";
+ }
+}
+#endif
+
+#if USE(JSVALUE64)
+inline bool needDataFormatConversion(DataFormat from, DataFormat to)
+{
+ ASSERT(from != DataFormatNone);
+ ASSERT(to != DataFormatNone);
+ switch (from) {
+ case DataFormatInteger:
+ case DataFormatDouble:
+ return to != from;
+ case DataFormatCell:
+ case DataFormatJS:
+ case DataFormatJSInteger:
+ case DataFormatJSDouble:
+ case DataFormatJSCell:
+ case DataFormatJSBoolean:
+ switch (to) {
+ case DataFormatInteger:
+ case DataFormatDouble:
+ return true;
+ case DataFormatCell:
+ case DataFormatJS:
+ case DataFormatJSInteger:
+ case DataFormatJSDouble:
+ case DataFormatJSCell:
+ case DataFormatJSBoolean:
+ return false;
+ default:
+ // This captures DataFormatBoolean, which is currently unused.
+ ASSERT_NOT_REACHED();
+ }
+ case DataFormatStorage:
+ ASSERT(to == DataFormatStorage);
+ return false;
+ default:
+ // This captures DataFormatBoolean, which is currently unused.
+ ASSERT_NOT_REACHED();
+ }
+ return true;
+}
+
+#elif USE(JSVALUE32_64)
+inline bool needDataFormatConversion(DataFormat from, DataFormat to)
+{
+ ASSERT(from != DataFormatNone);
+ ASSERT(to != DataFormatNone);
+ switch (from) {
+ case DataFormatInteger:
+ case DataFormatCell:
+ case DataFormatBoolean:
+ return ((to & DataFormatJS) || to == DataFormatDouble);
+ case DataFormatDouble:
+ case DataFormatJSDouble:
+ return (to != DataFormatDouble && to != DataFormatJSDouble);
+ case DataFormatJS:
+ case DataFormatJSInteger:
+ case DataFormatJSCell:
+ case DataFormatJSBoolean:
+ return (!(to & DataFormatJS) || to == DataFormatJSDouble);
+ case DataFormatStorage:
+ ASSERT(to == DataFormatStorage);
+ return false;
+ default:
+ ASSERT_NOT_REACHED();
+ }
+ return true;
+}
+#endif
+
+inline bool isJSFormat(DataFormat format, DataFormat expectedFormat)
+{
+ ASSERT(expectedFormat & DataFormatJS);
+ return (format | DataFormatJS) == expectedFormat;
+}
+
+inline bool isJSInteger(DataFormat format)
+{
+ return isJSFormat(format, DataFormatJSInteger);
+}
+
+inline bool isJSDouble(DataFormat format)
+{
+ return isJSFormat(format, DataFormatJSDouble);
+}
+
+inline bool isJSCell(DataFormat format)
+{
+ return isJSFormat(format, DataFormatJSCell);
+}
+
+inline bool isJSBoolean(DataFormat format)
+{
+ return isJSFormat(format, DataFormatJSBoolean);
+}
+
+}
+
+#endif // DataFormat_h
diff --git a/Source/JavaScriptCore/bytecode/EvalCodeCache.h b/Source/JavaScriptCore/bytecode/EvalCodeCache.h
new file mode 100644
index 000000000..fba1d32f5
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/EvalCodeCache.h
@@ -0,0 +1,97 @@
+/*
+ * Copyright (C) 2008, 2009 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * its contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef EvalCodeCache_h
+#define EvalCodeCache_h
+
+#include "Executable.h"
+#include "JSGlobalObject.h"
+#include "Nodes.h"
+#include "Parser.h"
+#include "SourceCode.h"
+#include "UString.h"
+#include <wtf/HashMap.h>
+#include <wtf/RefPtr.h>
+#include <wtf/text/StringHash.h>
+
+namespace JSC {
+
+ class SlotVisitor;
+
+ class EvalCodeCache {
+ public:
+ EvalExecutable* tryGet(bool inStrictContext, const UString& evalSource, ScopeChainNode* scopeChain)
+ {
+ if (!inStrictContext && evalSource.length() < maxCacheableSourceLength && (*scopeChain->begin())->isVariableObject())
+ return m_cacheMap.get(evalSource.impl()).get();
+ return 0;
+ }
+
+ EvalExecutable* getSlow(ExecState* exec, ScriptExecutable* owner, bool inStrictContext, const UString& evalSource, ScopeChainNode* scopeChain, JSValue& exceptionValue)
+ {
+ EvalExecutable* evalExecutable = EvalExecutable::create(exec, makeSource(evalSource), inStrictContext);
+ exceptionValue = evalExecutable->compile(exec, scopeChain);
+ if (exceptionValue)
+ return 0;
+
+ if (!inStrictContext && evalSource.length() < maxCacheableSourceLength && (*scopeChain->begin())->isVariableObject() && m_cacheMap.size() < maxCacheEntries)
+ m_cacheMap.set(evalSource.impl(), WriteBarrier<EvalExecutable>(exec->globalData(), owner, evalExecutable));
+
+ return evalExecutable;
+ }
+
+ EvalExecutable* get(ExecState* exec, ScriptExecutable* owner, bool inStrictContext, const UString& evalSource, ScopeChainNode* scopeChain, JSValue& exceptionValue)
+ {
+ EvalExecutable* evalExecutable = tryGet(inStrictContext, evalSource, scopeChain);
+
+ if (!evalExecutable)
+ evalExecutable = getSlow(exec, owner, inStrictContext, evalSource, scopeChain, exceptionValue);
+
+ return evalExecutable;
+ }
+
+ bool isEmpty() const { return m_cacheMap.isEmpty(); }
+
+ void visitAggregate(SlotVisitor&);
+
+ void clear()
+ {
+ m_cacheMap.clear();
+ }
+
+ private:
+ static const unsigned maxCacheableSourceLength = 256;
+ static const int maxCacheEntries = 64;
+
+ typedef HashMap<RefPtr<StringImpl>, WriteBarrier<EvalExecutable> > EvalCacheMap;
+ EvalCacheMap m_cacheMap;
+ };
+
+} // namespace JSC
+
+#endif // EvalCodeCache_h
diff --git a/Source/JavaScriptCore/bytecode/Instruction.h b/Source/JavaScriptCore/bytecode/Instruction.h
new file mode 100644
index 000000000..7e4413065
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/Instruction.h
@@ -0,0 +1,202 @@
+/*
+ * Copyright (C) 2008 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * its contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef Instruction_h
+#define Instruction_h
+
+#include "MacroAssembler.h"
+#include "Opcode.h"
+#include "PropertySlot.h"
+#include "Structure.h"
+#include "StructureChain.h"
+#include <wtf/VectorTraits.h>
+
+#define POLYMORPHIC_LIST_CACHE_SIZE 8
+
+namespace JSC {
+
+ // *Sigh*, If the JIT is enabled we need to track the stubRountine (of type CodeLocationLabel),
+ // If the JIT is not in use we don't actually need the variable (that said, if the JIT is not in use we don't
+ // curently actually use PolymorphicAccessStructureLists, which we should). Anyway, this seems like the best
+ // solution for now - will need to something smarter if/when we actually want mixed-mode operation.
+
+ class JSCell;
+ class Structure;
+ class StructureChain;
+
+#if ENABLE(JIT)
+ typedef MacroAssemblerCodeRef PolymorphicAccessStructureListStubRoutineType;
+
+ // Structure used by op_get_by_id_self_list and op_get_by_id_proto_list instruction to hold data off the main opcode stream.
+ struct PolymorphicAccessStructureList {
+ WTF_MAKE_FAST_ALLOCATED;
+ public:
+ struct PolymorphicStubInfo {
+ bool isChain;
+ bool isDirect;
+ PolymorphicAccessStructureListStubRoutineType stubRoutine;
+ WriteBarrier<Structure> base;
+ union {
+ WriteBarrierBase<Structure> proto;
+ WriteBarrierBase<StructureChain> chain;
+ } u;
+
+ PolymorphicStubInfo()
+ {
+ u.proto.clear();
+ }
+
+ void set(JSGlobalData& globalData, JSCell* owner, PolymorphicAccessStructureListStubRoutineType _stubRoutine, Structure* _base, bool isDirect)
+ {
+ stubRoutine = _stubRoutine;
+ base.set(globalData, owner, _base);
+ u.proto.clear();
+ isChain = false;
+ this->isDirect = isDirect;
+ }
+
+ void set(JSGlobalData& globalData, JSCell* owner, PolymorphicAccessStructureListStubRoutineType _stubRoutine, Structure* _base, Structure* _proto, bool isDirect)
+ {
+ stubRoutine = _stubRoutine;
+ base.set(globalData, owner, _base);
+ u.proto.set(globalData, owner, _proto);
+ isChain = false;
+ this->isDirect = isDirect;
+ }
+
+ void set(JSGlobalData& globalData, JSCell* owner, PolymorphicAccessStructureListStubRoutineType _stubRoutine, Structure* _base, StructureChain* _chain, bool isDirect)
+ {
+ stubRoutine = _stubRoutine;
+ base.set(globalData, owner, _base);
+ u.chain.set(globalData, owner, _chain);
+ isChain = true;
+ this->isDirect = isDirect;
+ }
+ } list[POLYMORPHIC_LIST_CACHE_SIZE];
+
+ PolymorphicAccessStructureList(JSGlobalData& globalData, JSCell* owner, PolymorphicAccessStructureListStubRoutineType stubRoutine, Structure* firstBase, bool isDirect)
+ {
+ list[0].set(globalData, owner, stubRoutine, firstBase, isDirect);
+ }
+
+ PolymorphicAccessStructureList(JSGlobalData& globalData, JSCell* owner, PolymorphicAccessStructureListStubRoutineType stubRoutine, Structure* firstBase, Structure* firstProto, bool isDirect)
+ {
+ list[0].set(globalData, owner, stubRoutine, firstBase, firstProto, isDirect);
+ }
+
+ PolymorphicAccessStructureList(JSGlobalData& globalData, JSCell* owner, PolymorphicAccessStructureListStubRoutineType stubRoutine, Structure* firstBase, StructureChain* firstChain, bool isDirect)
+ {
+ list[0].set(globalData, owner, stubRoutine, firstBase, firstChain, isDirect);
+ }
+
+ bool visitWeak(int count)
+ {
+ for (int i = 0; i < count; ++i) {
+ PolymorphicStubInfo& info = list[i];
+ if (!info.base) {
+ // We're being marked during initialisation of an entry
+ ASSERT(!info.u.proto);
+ continue;
+ }
+
+ if (!Heap::isMarked(info.base.get()))
+ return false;
+ if (info.u.proto && !info.isChain
+ && !Heap::isMarked(info.u.proto.get()))
+ return false;
+ if (info.u.chain && info.isChain
+ && !Heap::isMarked(info.u.chain.get()))
+ return false;
+ }
+
+ return true;
+ }
+ };
+
+#endif
+
+ struct Instruction {
+ Instruction(Opcode opcode)
+ {
+#if !ENABLE(COMPUTED_GOTO_INTERPRETER)
+ // We have to initialize one of the pointer members to ensure that
+ // the entire struct is initialized, when opcode is not a pointer.
+ u.jsCell.clear();
+#endif
+ u.opcode = opcode;
+ }
+
+ Instruction(int operand)
+ {
+ // We have to initialize one of the pointer members to ensure that
+ // the entire struct is initialized in 64-bit.
+ u.jsCell.clear();
+ u.operand = operand;
+ }
+
+ Instruction(JSGlobalData& globalData, JSCell* owner, Structure* structure)
+ {
+ u.structure.clear();
+ u.structure.set(globalData, owner, structure);
+ }
+ Instruction(JSGlobalData& globalData, JSCell* owner, StructureChain* structureChain)
+ {
+ u.structureChain.clear();
+ u.structureChain.set(globalData, owner, structureChain);
+ }
+ Instruction(JSGlobalData& globalData, JSCell* owner, JSCell* jsCell)
+ {
+ u.jsCell.clear();
+ u.jsCell.set(globalData, owner, jsCell);
+ }
+
+ Instruction(PropertySlot::GetValueFunc getterFunc) { u.getterFunc = getterFunc; }
+
+ union {
+ Opcode opcode;
+ int operand;
+ WriteBarrierBase<Structure> structure;
+ WriteBarrierBase<StructureChain> structureChain;
+ WriteBarrierBase<JSCell> jsCell;
+ PropertySlot::GetValueFunc getterFunc;
+ } u;
+
+ private:
+ Instruction(StructureChain*);
+ Instruction(Structure*);
+ };
+
+} // namespace JSC
+
+namespace WTF {
+
+ template<> struct VectorTraits<JSC::Instruction> : VectorTraitsBase<true, JSC::Instruction> { };
+
+} // namespace WTF
+
+#endif // Instruction_h
diff --git a/Source/JavaScriptCore/bytecode/JumpTable.cpp b/Source/JavaScriptCore/bytecode/JumpTable.cpp
new file mode 100644
index 000000000..ef7098b65
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/JumpTable.cpp
@@ -0,0 +1,47 @@
+/*
+ * Copyright (C) 2008 Apple Inc. All rights reserved.
+ * Copyright (C) 2008 Cameron Zwarich <cwzwarich@uwaterloo.ca>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * its contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "JumpTable.h"
+
+#include <wtf/text/StringHash.h>
+
+namespace JSC {
+
+int32_t SimpleJumpTable::offsetForValue(int32_t value, int32_t defaultOffset)
+{
+ if (value >= min && static_cast<uint32_t>(value - min) < branchOffsets.size()) {
+ int32_t offset = branchOffsets[value - min];
+ if (offset)
+ return offset;
+ }
+ return defaultOffset;
+}
+
+} // namespace JSC
diff --git a/Source/JavaScriptCore/bytecode/JumpTable.h b/Source/JavaScriptCore/bytecode/JumpTable.h
new file mode 100644
index 000000000..5bbe04710
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/JumpTable.h
@@ -0,0 +1,103 @@
+/*
+ * Copyright (C) 2008 Apple Inc. All rights reserved.
+ * Copyright (C) 2008 Cameron Zwarich <cwzwarich@uwaterloo.ca>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * its contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef JumpTable_h
+#define JumpTable_h
+
+#include "MacroAssembler.h"
+#include "UString.h"
+#include <wtf/HashMap.h>
+#include <wtf/Vector.h>
+
+namespace JSC {
+
+ struct OffsetLocation {
+ int32_t branchOffset;
+#if ENABLE(JIT)
+ CodeLocationLabel ctiOffset;
+#endif
+ };
+
+ struct StringJumpTable {
+ typedef HashMap<RefPtr<StringImpl>, OffsetLocation> StringOffsetTable;
+ StringOffsetTable offsetTable;
+#if ENABLE(JIT)
+ CodeLocationLabel ctiDefault; // FIXME: it should not be necessary to store this.
+#endif
+
+ inline int32_t offsetForValue(StringImpl* value, int32_t defaultOffset)
+ {
+ StringOffsetTable::const_iterator end = offsetTable.end();
+ StringOffsetTable::const_iterator loc = offsetTable.find(value);
+ if (loc == end)
+ return defaultOffset;
+ return loc->second.branchOffset;
+ }
+
+#if ENABLE(JIT)
+ inline CodeLocationLabel ctiForValue(StringImpl* value)
+ {
+ StringOffsetTable::const_iterator end = offsetTable.end();
+ StringOffsetTable::const_iterator loc = offsetTable.find(value);
+ if (loc == end)
+ return ctiDefault;
+ return loc->second.ctiOffset;
+ }
+#endif
+ };
+
+ struct SimpleJumpTable {
+ // FIXME: The two Vectors can be combind into one Vector<OffsetLocation>
+ Vector<int32_t> branchOffsets;
+ int32_t min;
+#if ENABLE(JIT)
+ Vector<CodeLocationLabel> ctiOffsets;
+ CodeLocationLabel ctiDefault;
+#endif
+
+ int32_t offsetForValue(int32_t value, int32_t defaultOffset);
+ void add(int32_t key, int32_t offset)
+ {
+ if (!branchOffsets[key])
+ branchOffsets[key] = offset;
+ }
+
+#if ENABLE(JIT)
+ inline CodeLocationLabel ctiForValue(int32_t value)
+ {
+ if (value >= min && static_cast<uint32_t>(value - min) < ctiOffsets.size())
+ return ctiOffsets[value - min];
+ return ctiDefault;
+ }
+#endif
+ };
+
+} // namespace JSC
+
+#endif // JumpTable_h
diff --git a/Source/JavaScriptCore/bytecode/Opcode.cpp b/Source/JavaScriptCore/bytecode/Opcode.cpp
new file mode 100644
index 000000000..2490804cd
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/Opcode.cpp
@@ -0,0 +1,191 @@
+/*
+ * Copyright (C) 2008 Apple Inc. All rights reserved.
+ * Copyright (C) 2008 Cameron Zwarich <cwzwarich@uwaterloo.ca>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * its contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "Opcode.h"
+
+#if ENABLE(OPCODE_STATS)
+#include <stdio.h>
+#include <wtf/FixedArray.h>
+#endif
+
+using namespace std;
+
+namespace JSC {
+
+#if !defined(NDEBUG) || ENABLE(OPCODE_SAMPLING) || ENABLE(CODEBLOCK_SAMPLING) || ENABLE(OPCODE_STATS)
+
+const char* const opcodeNames[] = {
+#define OPCODE_NAME_ENTRY(opcode, size) #opcode,
+ FOR_EACH_OPCODE_ID(OPCODE_NAME_ENTRY)
+#undef OPCODE_NAME_ENTRY
+};
+
+#endif
+
+#if ENABLE(OPCODE_STATS)
+
+long long OpcodeStats::opcodeCounts[numOpcodeIDs];
+long long OpcodeStats::opcodePairCounts[numOpcodeIDs][numOpcodeIDs];
+int OpcodeStats::lastOpcode = -1;
+
+static OpcodeStats logger;
+
+OpcodeStats::OpcodeStats()
+{
+ for (int i = 0; i < numOpcodeIDs; ++i)
+ opcodeCounts[i] = 0;
+
+ for (int i = 0; i < numOpcodeIDs; ++i)
+ for (int j = 0; j < numOpcodeIDs; ++j)
+ opcodePairCounts[i][j] = 0;
+}
+
+static int compareOpcodeIndices(const void* left, const void* right)
+{
+ long long leftValue = OpcodeStats::opcodeCounts[*(int*) left];
+ long long rightValue = OpcodeStats::opcodeCounts[*(int*) right];
+
+ if (leftValue < rightValue)
+ return 1;
+ else if (leftValue > rightValue)
+ return -1;
+ else
+ return 0;
+}
+
+static int compareOpcodePairIndices(const void* left, const void* right)
+{
+ pair<int, int> leftPair = *(pair<int, int>*) left;
+ long long leftValue = OpcodeStats::opcodePairCounts[leftPair.first][leftPair.second];
+ pair<int, int> rightPair = *(pair<int, int>*) right;
+ long long rightValue = OpcodeStats::opcodePairCounts[rightPair.first][rightPair.second];
+
+ if (leftValue < rightValue)
+ return 1;
+ else if (leftValue > rightValue)
+ return -1;
+ else
+ return 0;
+}
+
+OpcodeStats::~OpcodeStats()
+{
+ long long totalInstructions = 0;
+ for (int i = 0; i < numOpcodeIDs; ++i)
+ totalInstructions += opcodeCounts[i];
+
+ long long totalInstructionPairs = 0;
+ for (int i = 0; i < numOpcodeIDs; ++i)
+ for (int j = 0; j < numOpcodeIDs; ++j)
+ totalInstructionPairs += opcodePairCounts[i][j];
+
+ FixedArray<int, numOpcodeIDs> sortedIndices;
+ for (int i = 0; i < numOpcodeIDs; ++i)
+ sortedIndices[i] = i;
+ qsort(sortedIndices.data(), numOpcodeIDs, sizeof(int), compareOpcodeIndices);
+
+ pair<int, int> sortedPairIndices[numOpcodeIDs * numOpcodeIDs];
+ pair<int, int>* currentPairIndex = sortedPairIndices;
+ for (int i = 0; i < numOpcodeIDs; ++i)
+ for (int j = 0; j < numOpcodeIDs; ++j)
+ *(currentPairIndex++) = make_pair(i, j);
+ qsort(sortedPairIndices, numOpcodeIDs * numOpcodeIDs, sizeof(pair<int, int>), compareOpcodePairIndices);
+
+ printf("\nExecuted opcode statistics\n");
+
+ printf("Total instructions executed: %lld\n\n", totalInstructions);
+
+ printf("All opcodes by frequency:\n\n");
+
+ for (int i = 0; i < numOpcodeIDs; ++i) {
+ int index = sortedIndices[i];
+ printf("%s:%s %lld - %.2f%%\n", opcodeNames[index], padOpcodeName((OpcodeID)index, 28), opcodeCounts[index], ((double) opcodeCounts[index]) / ((double) totalInstructions) * 100.0);
+ }
+
+ printf("\n");
+ printf("2-opcode sequences by frequency: %lld\n\n", totalInstructions);
+
+ for (int i = 0; i < numOpcodeIDs * numOpcodeIDs; ++i) {
+ pair<int, int> indexPair = sortedPairIndices[i];
+ long long count = opcodePairCounts[indexPair.first][indexPair.second];
+
+ if (!count)
+ break;
+
+ printf("%s%s %s:%s %lld %.2f%%\n", opcodeNames[indexPair.first], padOpcodeName((OpcodeID)indexPair.first, 28), opcodeNames[indexPair.second], padOpcodeName((OpcodeID)indexPair.second, 28), count, ((double) count) / ((double) totalInstructionPairs) * 100.0);
+ }
+
+ printf("\n");
+ printf("Most common opcodes and sequences:\n");
+
+ for (int i = 0; i < numOpcodeIDs; ++i) {
+ int index = sortedIndices[i];
+ long long opcodeCount = opcodeCounts[index];
+ double opcodeProportion = ((double) opcodeCount) / ((double) totalInstructions);
+ if (opcodeProportion < 0.0001)
+ break;
+ printf("\n%s:%s %lld - %.2f%%\n", opcodeNames[index], padOpcodeName((OpcodeID)index, 28), opcodeCount, opcodeProportion * 100.0);
+
+ for (int j = 0; j < numOpcodeIDs * numOpcodeIDs; ++j) {
+ pair<int, int> indexPair = sortedPairIndices[j];
+ long long pairCount = opcodePairCounts[indexPair.first][indexPair.second];
+ double pairProportion = ((double) pairCount) / ((double) totalInstructionPairs);
+
+ if (!pairCount || pairProportion < 0.0001 || pairProportion < opcodeProportion / 100)
+ break;
+
+ if (indexPair.first != index && indexPair.second != index)
+ continue;
+
+ printf(" %s%s %s:%s %lld - %.2f%%\n", opcodeNames[indexPair.first], padOpcodeName((OpcodeID)indexPair.first, 28), opcodeNames[indexPair.second], padOpcodeName((OpcodeID)indexPair.second, 28), pairCount, pairProportion * 100.0);
+ }
+
+ }
+ printf("\n");
+}
+
+void OpcodeStats::recordInstruction(int opcode)
+{
+ opcodeCounts[opcode]++;
+
+ if (lastOpcode != -1)
+ opcodePairCounts[lastOpcode][opcode]++;
+
+ lastOpcode = opcode;
+}
+
+void OpcodeStats::resetLastInstruction()
+{
+ lastOpcode = -1;
+}
+
+#endif
+
+} // namespace JSC
diff --git a/Source/JavaScriptCore/bytecode/Opcode.h b/Source/JavaScriptCore/bytecode/Opcode.h
new file mode 100644
index 000000000..4801e4c32
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/Opcode.h
@@ -0,0 +1,278 @@
+/*
+ * Copyright (C) 2008, 2009 Apple Inc. All rights reserved.
+ * Copyright (C) 2008 Cameron Zwarich <cwzwarich@uwaterloo.ca>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * its contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef Opcode_h
+#define Opcode_h
+
+#include <algorithm>
+#include <string.h>
+
+#include <wtf/Assertions.h>
+
+namespace JSC {
+
+ #define FOR_EACH_OPCODE_ID(macro) \
+ macro(op_enter, 1) \
+ macro(op_create_activation, 2) \
+ macro(op_init_lazy_reg, 2) \
+ macro(op_create_arguments, 2) \
+ macro(op_create_this, 3) \
+ macro(op_get_callee, 2) \
+ macro(op_convert_this, 2) \
+ \
+ macro(op_new_object, 2) \
+ macro(op_new_array, 4) \
+ macro(op_new_array_buffer, 4) \
+ macro(op_new_regexp, 3) \
+ macro(op_mov, 3) \
+ \
+ macro(op_not, 3) \
+ macro(op_eq, 4) \
+ macro(op_eq_null, 3) \
+ macro(op_neq, 4) \
+ macro(op_neq_null, 3) \
+ macro(op_stricteq, 4) \
+ macro(op_nstricteq, 4) \
+ macro(op_less, 4) \
+ macro(op_lesseq, 4) \
+ macro(op_greater, 4) \
+ macro(op_greatereq, 4) \
+ \
+ macro(op_pre_inc, 2) \
+ macro(op_pre_dec, 2) \
+ macro(op_post_inc, 3) \
+ macro(op_post_dec, 3) \
+ macro(op_to_jsnumber, 3) \
+ macro(op_negate, 3) \
+ macro(op_add, 5) \
+ macro(op_mul, 5) \
+ macro(op_div, 5) \
+ macro(op_mod, 4) \
+ macro(op_sub, 5) \
+ \
+ macro(op_lshift, 4) \
+ macro(op_rshift, 4) \
+ macro(op_urshift, 4) \
+ macro(op_bitand, 5) \
+ macro(op_bitxor, 5) \
+ macro(op_bitor, 5) \
+ macro(op_bitnot, 3) \
+ \
+ macro(op_check_has_instance, 2) \
+ macro(op_instanceof, 5) \
+ macro(op_typeof, 3) \
+ macro(op_is_undefined, 3) \
+ macro(op_is_boolean, 3) \
+ macro(op_is_number, 3) \
+ macro(op_is_string, 3) \
+ macro(op_is_object, 3) \
+ macro(op_is_function, 3) \
+ macro(op_in, 4) \
+ \
+ macro(op_resolve, 3) \
+ macro(op_resolve_skip, 4) \
+ macro(op_resolve_global, 5) \
+ macro(op_resolve_global_dynamic, 6) \
+ macro(op_get_scoped_var, 4) \
+ macro(op_put_scoped_var, 4) \
+ macro(op_get_global_var, 3) \
+ macro(op_put_global_var, 3) \
+ macro(op_resolve_base, 4) \
+ macro(op_ensure_property_exists, 3) \
+ macro(op_resolve_with_base, 4) \
+ macro(op_resolve_with_this, 4) \
+ macro(op_get_by_id, 8) \
+ macro(op_get_by_id_self, 8) \
+ macro(op_get_by_id_proto, 8) \
+ macro(op_get_by_id_chain, 8) \
+ macro(op_get_by_id_getter_self, 8) \
+ macro(op_get_by_id_getter_proto, 8) \
+ macro(op_get_by_id_getter_chain, 8) \
+ macro(op_get_by_id_custom_self, 8) \
+ macro(op_get_by_id_custom_proto, 8) \
+ macro(op_get_by_id_custom_chain, 8) \
+ macro(op_get_by_id_generic, 8) \
+ macro(op_get_array_length, 8) \
+ macro(op_get_string_length, 8) \
+ macro(op_get_arguments_length, 4) \
+ macro(op_put_by_id, 9) \
+ macro(op_put_by_id_transition, 9) \
+ macro(op_put_by_id_replace, 9) \
+ macro(op_put_by_id_generic, 9) \
+ macro(op_del_by_id, 4) \
+ macro(op_get_by_val, 4) \
+ macro(op_get_argument_by_val, 4) \
+ macro(op_get_by_pname, 7) \
+ macro(op_put_by_val, 4) \
+ macro(op_del_by_val, 4) \
+ macro(op_put_by_index, 4) \
+ macro(op_put_getter, 4) \
+ macro(op_put_setter, 4) \
+ \
+ macro(op_jmp, 2) \
+ macro(op_jtrue, 3) \
+ macro(op_jfalse, 3) \
+ macro(op_jeq_null, 3) \
+ macro(op_jneq_null, 3) \
+ macro(op_jneq_ptr, 4) \
+ macro(op_jless, 4) \
+ macro(op_jlesseq, 4) \
+ macro(op_jgreater, 4) \
+ macro(op_jgreatereq, 4) \
+ macro(op_jnless, 4) \
+ macro(op_jnlesseq, 4) \
+ macro(op_jngreater, 4) \
+ macro(op_jngreatereq, 4) \
+ macro(op_jmp_scopes, 3) \
+ macro(op_loop, 2) \
+ macro(op_loop_if_true, 3) \
+ macro(op_loop_if_false, 3) \
+ macro(op_loop_if_less, 4) \
+ macro(op_loop_if_lesseq, 4) \
+ macro(op_loop_if_greater, 4) \
+ macro(op_loop_if_greatereq, 4) \
+ macro(op_loop_hint, 1) \
+ macro(op_switch_imm, 4) \
+ macro(op_switch_char, 4) \
+ macro(op_switch_string, 4) \
+ \
+ macro(op_new_func, 4) \
+ macro(op_new_func_exp, 3) \
+ macro(op_call, 6) \
+ macro(op_call_eval, 6) \
+ macro(op_call_varargs, 5) \
+ macro(op_tear_off_activation, 3) \
+ macro(op_tear_off_arguments, 2) \
+ macro(op_ret, 2) \
+ macro(op_call_put_result, 2) \
+ macro(op_ret_object_or_this, 3) \
+ macro(op_method_check, 1) \
+ \
+ macro(op_construct, 6) \
+ macro(op_strcat, 4) \
+ macro(op_to_primitive, 3) \
+ \
+ macro(op_get_pnames, 6) \
+ macro(op_next_pname, 7) \
+ \
+ macro(op_push_scope, 2) \
+ macro(op_pop_scope, 1) \
+ macro(op_push_new_scope, 4) \
+ \
+ macro(op_catch, 2) \
+ macro(op_throw, 2) \
+ macro(op_throw_reference_error, 2) \
+ \
+ macro(op_jsr, 3) \
+ macro(op_sret, 2) \
+ \
+ macro(op_debug, 4) \
+ macro(op_profile_will_call, 2) \
+ macro(op_profile_did_call, 2) \
+ \
+ macro(op_end, 2) // end must be the last opcode in the list
+
+ #define OPCODE_ID_ENUM(opcode, length) opcode,
+ typedef enum { FOR_EACH_OPCODE_ID(OPCODE_ID_ENUM) } OpcodeID;
+ #undef OPCODE_ID_ENUM
+
+ const int numOpcodeIDs = op_end + 1;
+
+ #define OPCODE_ID_LENGTHS(id, length) const int id##_length = length;
+ FOR_EACH_OPCODE_ID(OPCODE_ID_LENGTHS);
+ #undef OPCODE_ID_LENGTHS
+
+ #define OPCODE_LENGTH(opcode) opcode##_length
+
+ #define OPCODE_ID_LENGTH_MAP(opcode, length) length,
+ const int opcodeLengths[numOpcodeIDs] = { FOR_EACH_OPCODE_ID(OPCODE_ID_LENGTH_MAP) };
+ #undef OPCODE_ID_LENGTH_MAP
+
+ #define VERIFY_OPCODE_ID(id, size) COMPILE_ASSERT(id <= op_end, ASSERT_THAT_JS_OPCODE_IDS_ARE_VALID);
+ FOR_EACH_OPCODE_ID(VERIFY_OPCODE_ID);
+ #undef VERIFY_OPCODE_ID
+
+#if ENABLE(COMPUTED_GOTO_INTERPRETER)
+#if COMPILER(RVCT) || COMPILER(INTEL)
+ typedef void* Opcode;
+#else
+ typedef const void* Opcode;
+#endif
+#else
+ typedef OpcodeID Opcode;
+#endif
+
+#if !defined(NDEBUG) || ENABLE(OPCODE_SAMPLING) || ENABLE(CODEBLOCK_SAMPLING) || ENABLE(OPCODE_STATS)
+
+#define PADDING_STRING " "
+#define PADDING_STRING_LENGTH static_cast<unsigned>(strlen(PADDING_STRING))
+
+ extern const char* const opcodeNames[];
+
+ inline const char* padOpcodeName(OpcodeID op, unsigned width)
+ {
+ unsigned pad = width - strlen(opcodeNames[op]);
+ pad = std::min(pad, PADDING_STRING_LENGTH);
+ return PADDING_STRING + PADDING_STRING_LENGTH - pad;
+ }
+
+#undef PADDING_STRING_LENGTH
+#undef PADDING_STRING
+
+#endif
+
+#if ENABLE(OPCODE_STATS)
+
+ struct OpcodeStats {
+ OpcodeStats();
+ ~OpcodeStats();
+ static long long opcodeCounts[numOpcodeIDs];
+ static long long opcodePairCounts[numOpcodeIDs][numOpcodeIDs];
+ static int lastOpcode;
+
+ static void recordInstruction(int opcode);
+ static void resetLastInstruction();
+ };
+
+#endif
+
+ inline size_t opcodeLength(OpcodeID opcode)
+ {
+ switch (opcode) {
+#define OPCODE_ID_LENGTHS(id, length) case id: return OPCODE_LENGTH(id);
+ FOR_EACH_OPCODE_ID(OPCODE_ID_LENGTHS)
+#undef OPCODE_ID_LENGTHS
+ }
+ ASSERT_NOT_REACHED();
+ return 0;
+ }
+
+} // namespace JSC
+
+#endif // Opcode_h
diff --git a/Source/JavaScriptCore/bytecode/PredictedType.cpp b/Source/JavaScriptCore/bytecode/PredictedType.cpp
new file mode 100644
index 000000000..9356390a9
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/PredictedType.cpp
@@ -0,0 +1,235 @@
+/*
+ * Copyright (C) 2011 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * its contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "PredictedType.h"
+
+#include "JSByteArray.h"
+#include "JSFunction.h"
+#include "ValueProfile.h"
+#include <wtf/BoundsCheckedPointer.h>
+
+namespace JSC {
+
+#ifndef NDEBUG
+const char* predictionToString(PredictedType value)
+{
+ if (value == PredictNone)
+ return "None";
+
+ static const int size = 256;
+ static char description[size];
+ BoundsCheckedPointer<char> ptr(description, size);
+
+ bool isTop = true;
+
+ if (value & PredictCellOther)
+ ptr.strcat("Othercell");
+ else
+ isTop = false;
+
+ if (value & PredictObjectOther)
+ ptr.strcat("Otherobj");
+ else
+ isTop = false;
+
+ if (value & PredictFinalObject)
+ ptr.strcat("Final");
+ else
+ isTop = false;
+
+ if (value & PredictArray)
+ ptr.strcat("Array");
+ else
+ isTop = false;
+
+ if (value & PredictByteArray)
+ ptr.strcat("Bytearray");
+ else
+ isTop = false;
+
+ if (value & PredictInt8Array)
+ ptr.strcat("Int8array");
+ else
+ isTop = false;
+
+ if (value & PredictInt16Array)
+ ptr.strcat("Int16array");
+ else
+ isTop = false;
+
+ if (value & PredictInt32Array)
+ ptr.strcat("Int32array");
+ else
+ isTop = false;
+
+ if (value & PredictUint8Array)
+ ptr.strcat("Uint8array");
+ else
+ isTop = false;
+
+ if (value & PredictUint16Array)
+ ptr.strcat("Uint16array");
+ else
+ isTop = false;
+
+ if (value & PredictUint32Array)
+ ptr.strcat("Uint32array");
+ else
+ isTop = false;
+
+ if (value & PredictFloat32Array)
+ ptr.strcat("Float32array");
+ else
+ isTop = false;
+
+ if (value & PredictFloat64Array)
+ ptr.strcat("Float64array");
+ else
+ isTop = false;
+
+ if (value & PredictFunction)
+ ptr.strcat("Function");
+ else
+ isTop = false;
+
+ if (value & PredictString)
+ ptr.strcat("String");
+ else
+ isTop = false;
+
+ if (value & PredictInt32)
+ ptr.strcat("Int");
+ else
+ isTop = false;
+
+ if (value & PredictDoubleReal)
+ ptr.strcat("Doublereal");
+ else
+ isTop = false;
+
+ if (value & PredictDoubleNaN)
+ ptr.strcat("Doublenan");
+ else
+ isTop = false;
+
+ if (value & PredictBoolean)
+ ptr.strcat("Bool");
+ else
+ isTop = false;
+
+ if (value & PredictOther)
+ ptr.strcat("Other");
+ else
+ isTop = false;
+
+ if (isTop)
+ return "Top";
+
+ *ptr++ = 0;
+
+ return description;
+}
+#endif
+
+PredictedType predictionFromClassInfo(const ClassInfo* classInfo)
+{
+ if (classInfo == &JSFinalObject::s_info)
+ return PredictFinalObject;
+
+ if (classInfo == &JSArray::s_info)
+ return PredictArray;
+
+ if (classInfo == &JSString::s_info)
+ return PredictString;
+
+ if (classInfo->isSubClassOf(&JSFunction::s_info))
+ return PredictFunction;
+
+ if (classInfo->isSubClassOf(&JSByteArray::s_info))
+ return PredictByteArray;
+
+ if (classInfo->typedArrayStorageType != TypedArrayNone) {
+ switch (classInfo->typedArrayStorageType) {
+ case TypedArrayInt8:
+ return PredictInt8Array;
+ case TypedArrayInt16:
+ return PredictInt16Array;
+ case TypedArrayInt32:
+ return PredictInt32Array;
+ case TypedArrayUint8:
+ return PredictUint8Array;
+ case TypedArrayUint16:
+ return PredictUint16Array;
+ case TypedArrayUint32:
+ return PredictUint32Array;
+ case TypedArrayFloat32:
+ return PredictFloat32Array;
+ case TypedArrayFloat64:
+ return PredictFloat64Array;
+ default:
+ break;
+ }
+ }
+
+ if (classInfo->isSubClassOf(&JSObject::s_info))
+ return PredictObjectOther;
+
+ return PredictCellOther;
+}
+
+PredictedType predictionFromStructure(Structure* structure)
+{
+ return predictionFromClassInfo(structure->classInfo());
+}
+
+PredictedType predictionFromCell(JSCell* cell)
+{
+ return predictionFromStructure(cell->structure());
+}
+
+PredictedType predictionFromValue(JSValue value)
+{
+ if (value.isInt32())
+ return PredictInt32;
+ if (value.isDouble()) {
+ double number = value.asNumber();
+ if (number == number)
+ return PredictDoubleReal;
+ return PredictDoubleNaN;
+ }
+ if (value.isCell())
+ return predictionFromCell(value.asCell());
+ if (value.isBoolean())
+ return PredictBoolean;
+ ASSERT(value.isUndefinedOrNull());
+ return PredictOther;
+}
+
+} // namespace JSC
+
diff --git a/Source/JavaScriptCore/bytecode/PredictedType.h b/Source/JavaScriptCore/bytecode/PredictedType.h
new file mode 100644
index 000000000..3ec03924a
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/PredictedType.h
@@ -0,0 +1,242 @@
+/*
+ * Copyright (C) 2011 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * its contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef PredictedType_h
+#define PredictedType_h
+
+#include "JSValue.h"
+
+namespace JSC {
+
+class Structure;
+
+typedef uint32_t PredictedType;
+static const PredictedType PredictNone = 0x00000000; // We don't know anything yet.
+static const PredictedType PredictFinalObject = 0x00000001; // It's definitely a JSFinalObject.
+static const PredictedType PredictArray = 0x00000002; // It's definitely a JSArray.
+static const PredictedType PredictByteArray = 0x00000004; // It's definitely a JSByteArray or one of its subclasses.
+static const PredictedType PredictFunction = 0x00000008; // It's definitely a JSFunction or one of its subclasses.
+static const PredictedType PredictInt8Array = 0x00000010; // It's definitely an Int8Array or one of its subclasses.
+static const PredictedType PredictInt16Array = 0x00000020; // It's definitely an Int16Array or one of its subclasses.
+static const PredictedType PredictInt32Array = 0x00000040; // It's definitely an Int32Array or one of its subclasses.
+static const PredictedType PredictUint8Array = 0x00000080; // It's definitely an Uint8Array or one of its subclasses.
+static const PredictedType PredictUint16Array = 0x00000100; // It's definitely an Uint16Array or one of its subclasses.
+static const PredictedType PredictUint32Array = 0x00000200; // It's definitely an Uint32Array or one of its subclasses.
+static const PredictedType PredictFloat32Array = 0x00000400; // It's definitely an Uint16Array or one of its subclasses.
+static const PredictedType PredictFloat64Array = 0x00000800; // It's definitely an Uint16Array or one of its subclasses.
+static const PredictedType PredictObjectOther = 0x00001000; // It's definitely an object but not JSFinalObject, JSArray, JSByteArray, or JSFunction.
+static const PredictedType PredictObjectMask = 0x00001fff; // Bitmask used for testing for any kind of object prediction.
+static const PredictedType PredictString = 0x00002000; // It's definitely a JSString.
+static const PredictedType PredictCellOther = 0x00004000; // It's definitely a JSCell but not a subclass of JSObject and definitely not a JSString.
+static const PredictedType PredictCell = 0x00007fff; // It's definitely a JSCell.
+static const PredictedType PredictInt32 = 0x00008000; // It's definitely an Int32.
+static const PredictedType PredictDoubleReal = 0x00010000; // It's definitely a non-NaN double.
+static const PredictedType PredictDoubleNaN = 0x00020000; // It's definitely a NaN.
+static const PredictedType PredictDouble = 0x00030000; // It's either a non-NaN or a NaN double.
+static const PredictedType PredictNumber = 0x00038000; // It's either an Int32 or a Double.
+static const PredictedType PredictBoolean = 0x00040000; // It's definitely a Boolean.
+static const PredictedType PredictOther = 0x40000000; // It's definitely none of the above.
+static const PredictedType PredictTop = 0x7fffffff; // It can be any of the above.
+static const PredictedType FixedIndexedStorageMask = PredictByteArray | PredictInt8Array | PredictInt16Array | PredictInt32Array | PredictUint8Array | PredictUint16Array | PredictUint32Array | PredictFloat32Array | PredictFloat64Array;
+
+typedef bool (*PredictionChecker)(PredictedType);
+
+inline bool isCellPrediction(PredictedType value)
+{
+ return !!(value & PredictCell) && !(value & ~PredictCell);
+}
+
+inline bool isObjectPrediction(PredictedType value)
+{
+ return !!(value & PredictObjectMask) && !(value & ~PredictObjectMask);
+}
+
+inline bool isFinalObjectPrediction(PredictedType value)
+{
+ return value == PredictFinalObject;
+}
+
+inline bool isFinalObjectOrOtherPrediction(PredictedType value)
+{
+ return !!(value & (PredictFinalObject | PredictOther)) && !(value & ~(PredictFinalObject | PredictOther));
+}
+
+inline bool isFixedIndexedStorageObjectPrediction(PredictedType value)
+{
+ return (value & FixedIndexedStorageMask) == value;
+}
+
+inline bool isStringPrediction(PredictedType value)
+{
+ return value == PredictString;
+}
+
+inline bool isArrayPrediction(PredictedType value)
+{
+ return value == PredictArray;
+}
+
+inline bool isFunctionPrediction(PredictedType value)
+{
+ return value == PredictFunction;
+}
+
+inline bool isByteArrayPrediction(PredictedType value)
+{
+ return value == PredictByteArray;
+}
+
+inline bool isInt8ArrayPrediction(PredictedType value)
+{
+ return value == PredictInt8Array;
+}
+
+inline bool isInt16ArrayPrediction(PredictedType value)
+{
+ return value == PredictInt16Array;
+}
+
+inline bool isInt32ArrayPrediction(PredictedType value)
+{
+ return value == PredictInt32Array;
+}
+
+inline bool isUint8ArrayPrediction(PredictedType value)
+{
+ return value == PredictUint8Array;
+}
+
+inline bool isUint16ArrayPrediction(PredictedType value)
+{
+ return value == PredictUint16Array;
+}
+
+inline bool isUint32ArrayPrediction(PredictedType value)
+{
+ return value == PredictUint32Array;
+}
+
+inline bool isFloat32ArrayPrediction(PredictedType value)
+{
+ return value == PredictFloat32Array;
+}
+
+inline bool isFloat64ArrayPrediction(PredictedType value)
+{
+ return value == PredictFloat64Array;
+}
+
+inline bool isActionableMutableArrayPrediction(PredictedType value)
+{
+ return isArrayPrediction(value)
+ || isByteArrayPrediction(value)
+#if CPU(X86) || CPU(X86_64)
+ || isInt8ArrayPrediction(value)
+ || isInt16ArrayPrediction(value)
+#endif
+ || isInt32ArrayPrediction(value)
+ || isUint8ArrayPrediction(value)
+ || isUint16ArrayPrediction(value)
+ || isUint32ArrayPrediction(value)
+#if CPU(X86) || CPU(X86_64)
+ || isFloat32ArrayPrediction(value)
+#endif
+ || isFloat64ArrayPrediction(value);
+}
+
+inline bool isActionableArrayPrediction(PredictedType value)
+{
+ return isStringPrediction(value)
+ || isActionableMutableArrayPrediction(value);
+}
+
+inline bool isArrayOrOtherPrediction(PredictedType value)
+{
+ return !!(value & (PredictArray | PredictOther)) && !(value & ~(PredictArray | PredictOther));
+}
+
+inline bool isInt32Prediction(PredictedType value)
+{
+ return value == PredictInt32;
+}
+
+inline bool isDoubleRealPrediction(PredictedType value)
+{
+ return value == PredictDoubleReal;
+}
+
+inline bool isDoublePrediction(PredictedType value)
+{
+ return (value & PredictDouble) == value;
+}
+
+inline bool isNumberPrediction(PredictedType value)
+{
+ return !!(value & PredictNumber) && !(value & ~PredictNumber);
+}
+
+inline bool isBooleanPrediction(PredictedType value)
+{
+ return value == PredictBoolean;
+}
+
+inline bool isOtherPrediction(PredictedType value)
+{
+ return value == PredictOther;
+}
+
+#ifndef NDEBUG
+const char* predictionToString(PredictedType value);
+#endif
+
+// Merge two predictions. Note that currently this just does left | right. It may
+// seem tempting to do so directly, but you would be doing so at your own peril,
+// since the merging protocol PredictedType may change at any time (and has already
+// changed several times in its history).
+inline PredictedType mergePredictions(PredictedType left, PredictedType right)
+{
+ return left | right;
+}
+
+template<typename T>
+inline bool mergePrediction(T& left, PredictedType right)
+{
+ PredictedType newPrediction = static_cast<T>(mergePredictions(static_cast<PredictedType>(left), right));
+ bool result = newPrediction != static_cast<PredictedType>(left);
+ left = newPrediction;
+ return result;
+}
+
+PredictedType predictionFromClassInfo(const ClassInfo*);
+PredictedType predictionFromStructure(Structure*);
+PredictedType predictionFromCell(JSCell*);
+PredictedType predictionFromValue(JSValue);
+
+} // namespace JSC
+
+#endif // PredictedType_h
diff --git a/Source/JavaScriptCore/bytecode/PredictionTracker.h b/Source/JavaScriptCore/bytecode/PredictionTracker.h
new file mode 100644
index 000000000..7551cd3f3
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/PredictionTracker.h
@@ -0,0 +1,76 @@
+/*
+ * Copyright (C) 2011 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef PredictionTracker_h
+#define PredictionTracker_h
+
+#include "PredictedType.h"
+#include <wtf/HashMap.h>
+
+namespace JSC {
+
+struct PredictionSlot {
+public:
+ PredictionSlot()
+ : m_value(PredictNone)
+ {
+ }
+ PredictedType m_value;
+};
+
+class PredictionTracker {
+public:
+ PredictionTracker()
+ {
+ }
+
+ bool predictGlobalVar(unsigned varNumber, PredictedType prediction)
+ {
+ HashMap<unsigned, PredictionSlot>::iterator iter = m_globalVars.find(varNumber + 1);
+ if (iter == m_globalVars.end()) {
+ PredictionSlot predictionSlot;
+ bool result = mergePrediction(predictionSlot.m_value, prediction);
+ m_globalVars.add(varNumber + 1, predictionSlot);
+ return result;
+ }
+ return mergePrediction(iter->second.m_value, prediction);
+ }
+
+ PredictedType getGlobalVarPrediction(unsigned varNumber)
+ {
+ HashMap<unsigned, PredictionSlot>::iterator iter = m_globalVars.find(varNumber + 1);
+ if (iter == m_globalVars.end())
+ return PredictNone;
+ return iter->second.m_value;
+ }
+
+private:
+ HashMap<unsigned, PredictionSlot> m_globalVars;
+};
+
+} // namespace JSC
+
+#endif // PredictionTracker_h
+
diff --git a/Source/JavaScriptCore/bytecode/SamplingTool.cpp b/Source/JavaScriptCore/bytecode/SamplingTool.cpp
new file mode 100644
index 000000000..0dec25fb7
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/SamplingTool.cpp
@@ -0,0 +1,480 @@
+/*
+ * Copyright (C) 2008, 2009 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * its contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "SamplingTool.h"
+
+#include "CodeBlock.h"
+#include "Interpreter.h"
+#include "Opcode.h"
+
+#if !OS(WINDOWS)
+#include <unistd.h>
+#endif
+
+namespace JSC {
+
+#if ENABLE(SAMPLING_FLAGS)
+
+void SamplingFlags::sample()
+{
+ uint32_t mask = static_cast<uint32_t>(1 << 31);
+ unsigned index;
+
+ for (index = 0; index < 32; ++index) {
+ if (mask & s_flags)
+ break;
+ mask >>= 1;
+ }
+
+ s_flagCounts[32 - index]++;
+}
+
+void SamplingFlags::start()
+{
+ for (unsigned i = 0; i <= 32; ++i)
+ s_flagCounts[i] = 0;
+}
+void SamplingFlags::stop()
+{
+ uint64_t total = 0;
+ for (unsigned i = 0; i <= 32; ++i)
+ total += s_flagCounts[i];
+
+ if (total) {
+ printf("\nSamplingFlags: sample counts with flags set: (%lld total)\n", total);
+ for (unsigned i = 0; i <= 32; ++i) {
+ if (s_flagCounts[i])
+ printf(" [ %02d ] : %lld\t\t(%03.2f%%)\n", i, s_flagCounts[i], (100.0 * s_flagCounts[i]) / total);
+ }
+ printf("\n");
+ } else
+ printf("\nSamplingFlags: no samples.\n\n");
+}
+uint64_t SamplingFlags::s_flagCounts[33];
+
+#else
+void SamplingFlags::start() {}
+void SamplingFlags::stop() {}
+#endif
+
+#if ENABLE(SAMPLING_REGIONS)
+volatile uintptr_t SamplingRegion::s_currentOrReserved;
+Spectrum<const char*>* SamplingRegion::s_spectrum;
+unsigned long SamplingRegion::s_noneOfTheAbove;
+unsigned SamplingRegion::s_numberOfSamplesSinceDump;
+
+SamplingRegion::Locker::Locker()
+{
+ uintptr_t previous;
+ while (true) {
+ previous = s_currentOrReserved;
+ if (previous & 1) {
+#if OS(UNIX)
+ sched_yield();
+#endif
+ continue;
+ }
+ if (WTF::weakCompareAndSwap(&s_currentOrReserved, previous, previous | 1))
+ break;
+ }
+}
+
+SamplingRegion::Locker::~Locker()
+{
+ // We don't need the CAS, but we do it out of an
+ // abundance of caution (and because it gives us a memory fence, which is
+ // never bad).
+ uintptr_t previous;
+ do {
+ previous = s_currentOrReserved;
+ } while (!WTF::weakCompareAndSwap(&s_currentOrReserved, previous, previous & ~1));
+}
+
+void SamplingRegion::sample()
+{
+ // Make sure we lock s_current.
+ Locker locker;
+
+ // Create a spectrum if we don't have one already.
+ if (!s_spectrum)
+ s_spectrum = new Spectrum<const char*>();
+
+ ASSERT(s_currentOrReserved & 1);
+
+ // Walk the region stack, and record each region we see.
+ SamplingRegion* region = bitwise_cast<SamplingRegion*>(s_currentOrReserved & ~1);
+ if (region) {
+ for (; region; region = region->m_previous)
+ s_spectrum->add(region->m_name);
+ } else
+ s_noneOfTheAbove++;
+
+ if (s_numberOfSamplesSinceDump++ == SamplingThread::s_hertz) {
+ s_numberOfSamplesSinceDump = 0;
+ dumpInternal();
+ }
+}
+
+void SamplingRegion::dump()
+{
+ Locker locker;
+
+ dumpInternal();
+}
+
+void SamplingRegion::dumpInternal()
+{
+ if (!s_spectrum) {
+ printf("\nSamplingRegion: was never sampled.\n\n");
+ return;
+ }
+
+ Vector<Spectrum<const char*>::KeyAndCount> list = s_spectrum->buildList();
+
+ unsigned long total = s_noneOfTheAbove;
+ for (unsigned i = list.size(); i--;)
+ total += list[i].count;
+
+ printf("\nSamplingRegion: sample counts for regions: (%lu samples)\n", total);
+
+ for (unsigned i = list.size(); i--;)
+ printf(" %3.2lf%% %s\n", (100.0 * list[i].count) / total, list[i].key);
+}
+#else // ENABLE(SAMPLING_REGIONS)
+void SamplingRegion::dump() { }
+#endif // ENABLE(SAMPLING_REGIONS)
+
+/*
+ Start with flag 16 set.
+ By doing this the monitoring of lower valued flags will be masked out
+ until flag 16 is explictly cleared.
+*/
+uint32_t SamplingFlags::s_flags = 1 << 15;
+
+
+#if OS(WINDOWS)
+
+static void sleepForMicroseconds(unsigned us)
+{
+ unsigned ms = us / 1000;
+ if (us && !ms)
+ ms = 1;
+ Sleep(ms);
+}
+
+#else
+
+static void sleepForMicroseconds(unsigned us)
+{
+ usleep(us);
+}
+
+#endif
+
+static inline unsigned hertz2us(unsigned hertz)
+{
+ return 1000000 / hertz;
+}
+
+
+SamplingTool* SamplingTool::s_samplingTool = 0;
+
+
+bool SamplingThread::s_running = false;
+unsigned SamplingThread::s_hertz = 10000;
+ThreadIdentifier SamplingThread::s_samplingThread;
+
+void* SamplingThread::threadStartFunc(void*)
+{
+ while (s_running) {
+ sleepForMicroseconds(hertz2us(s_hertz));
+
+#if ENABLE(SAMPLING_FLAGS)
+ SamplingFlags::sample();
+#endif
+#if ENABLE(SAMPLING_REGIONS)
+ SamplingRegion::sample();
+#endif
+#if ENABLE(OPCODE_SAMPLING)
+ SamplingTool::sample();
+#endif
+ }
+
+ return 0;
+}
+
+
+void SamplingThread::start(unsigned hertz)
+{
+ ASSERT(!s_running);
+ s_running = true;
+ s_hertz = hertz;
+
+ s_samplingThread = createThread(threadStartFunc, 0, "JavaScriptCore::Sampler");
+}
+
+void SamplingThread::stop()
+{
+ ASSERT(s_running);
+ s_running = false;
+ waitForThreadCompletion(s_samplingThread, 0);
+}
+
+
+void ScriptSampleRecord::sample(CodeBlock* codeBlock, Instruction* vPC)
+{
+ if (!m_samples) {
+ m_size = codeBlock->instructions().size();
+ m_samples = static_cast<int*>(calloc(m_size, sizeof(int)));
+ m_codeBlock = codeBlock;
+ }
+
+ ++m_sampleCount;
+
+ unsigned offest = vPC - codeBlock->instructions().begin();
+ // Since we don't read and write codeBlock and vPC atomically, this check
+ // can fail if we sample mid op_call / op_ret.
+ if (offest < m_size) {
+ m_samples[offest]++;
+ m_opcodeSampleCount++;
+ }
+}
+
+void SamplingTool::doRun()
+{
+ Sample sample(m_sample, m_codeBlock);
+ ++m_sampleCount;
+
+ if (sample.isNull())
+ return;
+
+ if (!sample.inHostFunction()) {
+ unsigned opcodeID = m_interpreter->getOpcodeID(sample.vPC()[0].u.opcode);
+
+ ++m_opcodeSampleCount;
+ ++m_opcodeSamples[opcodeID];
+
+ if (sample.inCTIFunction())
+ m_opcodeSamplesInCTIFunctions[opcodeID]++;
+ }
+
+#if ENABLE(CODEBLOCK_SAMPLING)
+ if (CodeBlock* codeBlock = sample.codeBlock()) {
+ MutexLocker locker(m_scriptSampleMapMutex);
+ ScriptSampleRecord* record = m_scopeSampleMap->get(codeBlock->ownerExecutable());
+ ASSERT(record);
+ record->sample(codeBlock, sample.vPC());
+ }
+#endif
+}
+
+void SamplingTool::sample()
+{
+ s_samplingTool->doRun();
+}
+
+void SamplingTool::notifyOfScope(JSGlobalData& globalData, ScriptExecutable* script)
+{
+#if ENABLE(CODEBLOCK_SAMPLING)
+ MutexLocker locker(m_scriptSampleMapMutex);
+ m_scopeSampleMap->set(script, adoptPtr(new ScriptSampleRecord(globalData, script)));
+#else
+ UNUSED_PARAM(globalData);
+ UNUSED_PARAM(script);
+#endif
+}
+
+void SamplingTool::setup()
+{
+ s_samplingTool = this;
+}
+
+#if ENABLE(OPCODE_SAMPLING)
+
+struct OpcodeSampleInfo {
+ OpcodeID opcode;
+ long long count;
+ long long countInCTIFunctions;
+};
+
+struct LineCountInfo {
+ unsigned line;
+ unsigned count;
+};
+
+static int compareOpcodeIndicesSampling(const void* left, const void* right)
+{
+ const OpcodeSampleInfo* leftSampleInfo = reinterpret_cast<const OpcodeSampleInfo*>(left);
+ const OpcodeSampleInfo* rightSampleInfo = reinterpret_cast<const OpcodeSampleInfo*>(right);
+
+ return (leftSampleInfo->count < rightSampleInfo->count) ? 1 : (leftSampleInfo->count > rightSampleInfo->count) ? -1 : 0;
+}
+
+#if ENABLE(CODEBLOCK_SAMPLING)
+static int compareLineCountInfoSampling(const void* left, const void* right)
+{
+ const LineCountInfo* leftLineCount = reinterpret_cast<const LineCountInfo*>(left);
+ const LineCountInfo* rightLineCount = reinterpret_cast<const LineCountInfo*>(right);
+
+ return (leftLineCount->line > rightLineCount->line) ? 1 : (leftLineCount->line < rightLineCount->line) ? -1 : 0;
+}
+
+static int compareScriptSampleRecords(const void* left, const void* right)
+{
+ const ScriptSampleRecord* const leftValue = *static_cast<const ScriptSampleRecord* const *>(left);
+ const ScriptSampleRecord* const rightValue = *static_cast<const ScriptSampleRecord* const *>(right);
+
+ return (leftValue->m_sampleCount < rightValue->m_sampleCount) ? 1 : (leftValue->m_sampleCount > rightValue->m_sampleCount) ? -1 : 0;
+}
+#endif
+
+void SamplingTool::dump(ExecState* exec)
+{
+ // Tidies up SunSpider output by removing short scripts - such a small number of samples would likely not be useful anyhow.
+ if (m_sampleCount < 10)
+ return;
+
+ // (1) Build and sort 'opcodeSampleInfo' array.
+
+ OpcodeSampleInfo opcodeSampleInfo[numOpcodeIDs];
+ for (int i = 0; i < numOpcodeIDs; ++i) {
+ opcodeSampleInfo[i].opcode = static_cast<OpcodeID>(i);
+ opcodeSampleInfo[i].count = m_opcodeSamples[i];
+ opcodeSampleInfo[i].countInCTIFunctions = m_opcodeSamplesInCTIFunctions[i];
+ }
+
+ qsort(opcodeSampleInfo, numOpcodeIDs, sizeof(OpcodeSampleInfo), compareOpcodeIndicesSampling);
+
+ // (2) Print Opcode sampling results.
+
+ printf("\nBytecode samples [*]\n");
+ printf(" sample %% of %% of | cti cti %%\n");
+ printf("opcode count VM total | count of self\n");
+ printf("------------------------------------------------------- | ----------------\n");
+
+ for (int i = 0; i < numOpcodeIDs; ++i) {
+ long long count = opcodeSampleInfo[i].count;
+ if (!count)
+ continue;
+
+ OpcodeID opcodeID = opcodeSampleInfo[i].opcode;
+
+ const char* opcodeName = opcodeNames[opcodeID];
+ const char* opcodePadding = padOpcodeName(opcodeID, 28);
+ double percentOfVM = (static_cast<double>(count) * 100) / m_opcodeSampleCount;
+ double percentOfTotal = (static_cast<double>(count) * 100) / m_sampleCount;
+ long long countInCTIFunctions = opcodeSampleInfo[i].countInCTIFunctions;
+ double percentInCTIFunctions = (static_cast<double>(countInCTIFunctions) * 100) / count;
+ fprintf(stdout, "%s:%s%-6lld %.3f%%\t%.3f%%\t | %-6lld %.3f%%\n", opcodeName, opcodePadding, count, percentOfVM, percentOfTotal, countInCTIFunctions, percentInCTIFunctions);
+ }
+
+ printf("\n[*] Samples inside host code are not charged to any Bytecode.\n\n");
+ printf("\tSamples inside VM:\t\t%lld / %lld (%.3f%%)\n", m_opcodeSampleCount, m_sampleCount, (static_cast<double>(m_opcodeSampleCount) * 100) / m_sampleCount);
+ printf("\tSamples inside host code:\t%lld / %lld (%.3f%%)\n\n", m_sampleCount - m_opcodeSampleCount, m_sampleCount, (static_cast<double>(m_sampleCount - m_opcodeSampleCount) * 100) / m_sampleCount);
+ printf("\tsample count:\tsamples inside this opcode\n");
+ printf("\t%% of VM:\tsample count / all opcode samples\n");
+ printf("\t%% of total:\tsample count / all samples\n");
+ printf("\t--------------\n");
+ printf("\tcti count:\tsamples inside a CTI function called by this opcode\n");
+ printf("\tcti %% of self:\tcti count / sample count\n");
+
+#if ENABLE(CODEBLOCK_SAMPLING)
+
+ // (3) Build and sort 'codeBlockSamples' array.
+
+ int scopeCount = m_scopeSampleMap->size();
+ Vector<ScriptSampleRecord*> codeBlockSamples(scopeCount);
+ ScriptSampleRecordMap::iterator iter = m_scopeSampleMap->begin();
+ for (int i = 0; i < scopeCount; ++i, ++iter)
+ codeBlockSamples[i] = iter->second.get();
+
+ qsort(codeBlockSamples.begin(), scopeCount, sizeof(ScriptSampleRecord*), compareScriptSampleRecords);
+
+ // (4) Print data from 'codeBlockSamples' array.
+
+ printf("\nCodeBlock samples\n\n");
+
+ for (int i = 0; i < scopeCount; ++i) {
+ ScriptSampleRecord* record = codeBlockSamples[i];
+ CodeBlock* codeBlock = record->m_codeBlock;
+
+ double blockPercent = (record->m_sampleCount * 100.0) / m_sampleCount;
+
+ if (blockPercent >= 1) {
+ //Instruction* code = codeBlock->instructions().begin();
+ printf("#%d: %s:%d: %d / %lld (%.3f%%)\n", i + 1, record->m_executable->sourceURL().utf8().data(), codeBlock->lineNumberForBytecodeOffset(0), record->m_sampleCount, m_sampleCount, blockPercent);
+ if (i < 10) {
+ HashMap<unsigned,unsigned> lineCounts;
+ codeBlock->dump(exec);
+
+ printf(" Opcode and line number samples [*]\n\n");
+ for (unsigned op = 0; op < record->m_size; ++op) {
+ int count = record->m_samples[op];
+ if (count) {
+ printf(" [% 4d] has sample count: % 4d\n", op, count);
+ unsigned line = codeBlock->lineNumberForBytecodeOffset(op);
+ lineCounts.set(line, (lineCounts.contains(line) ? lineCounts.get(line) : 0) + count);
+ }
+ }
+ printf("\n");
+
+ int linesCount = lineCounts.size();
+ Vector<LineCountInfo> lineCountInfo(linesCount);
+ int lineno = 0;
+ for (HashMap<unsigned,unsigned>::iterator iter = lineCounts.begin(); iter != lineCounts.end(); ++iter, ++lineno) {
+ lineCountInfo[lineno].line = iter->first;
+ lineCountInfo[lineno].count = iter->second;
+ }
+
+ qsort(lineCountInfo.begin(), linesCount, sizeof(LineCountInfo), compareLineCountInfoSampling);
+
+ for (lineno = 0; lineno < linesCount; ++lineno) {
+ printf(" Line #%d has sample count %d.\n", lineCountInfo[lineno].line, lineCountInfo[lineno].count);
+ }
+ printf("\n");
+ printf(" [*] Samples inside host code are charged to the calling Bytecode.\n");
+ printf(" Samples on a call / return boundary are not charged to a specific opcode or line.\n\n");
+ printf(" Samples on a call / return boundary: %d / %d (%.3f%%)\n\n", record->m_sampleCount - record->m_opcodeSampleCount, record->m_sampleCount, (static_cast<double>(record->m_sampleCount - record->m_opcodeSampleCount) * 100) / record->m_sampleCount);
+ }
+ }
+ }
+#else
+ UNUSED_PARAM(exec);
+#endif
+}
+
+#else
+
+void SamplingTool::dump(ExecState*)
+{
+}
+
+#endif
+
+} // namespace JSC
diff --git a/Source/JavaScriptCore/bytecode/SamplingTool.h b/Source/JavaScriptCore/bytecode/SamplingTool.h
new file mode 100644
index 000000000..b69ef026e
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/SamplingTool.h
@@ -0,0 +1,363 @@
+/*
+ * Copyright (C) 2008 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * its contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef SamplingTool_h
+#define SamplingTool_h
+
+#include "Strong.h"
+#include "Nodes.h"
+#include "Opcode.h"
+#include "SamplingCounter.h"
+#include <wtf/Assertions.h>
+#include <wtf/Atomics.h>
+#include <wtf/HashMap.h>
+#include <wtf/MainThread.h>
+#include <wtf/Threading.h>
+
+namespace JSC {
+
+ class ScriptExecutable;
+
+ class SamplingFlags {
+ public:
+ static void start();
+ static void stop();
+
+#if ENABLE(SAMPLING_FLAGS)
+ static void setFlag(unsigned flag)
+ {
+ ASSERT(flag >= 1);
+ ASSERT(flag <= 32);
+ s_flags |= 1u << (flag - 1);
+ }
+
+ static void clearFlag(unsigned flag)
+ {
+ ASSERT(flag >= 1);
+ ASSERT(flag <= 32);
+ s_flags &= ~(1u << (flag - 1));
+ }
+
+ static void sample();
+
+ class ScopedFlag {
+ public:
+ ScopedFlag(int flag)
+ : m_flag(flag)
+ {
+ setFlag(flag);
+ }
+
+ ~ScopedFlag()
+ {
+ clearFlag(m_flag);
+ }
+
+ private:
+ int m_flag;
+ };
+
+ static const void* addressOfFlags()
+ {
+ return &s_flags;
+ }
+
+#endif
+ private:
+ static uint32_t s_flags;
+#if ENABLE(SAMPLING_FLAGS)
+ static uint64_t s_flagCounts[33];
+#endif
+ };
+
+#if ENABLE(SAMPLING_REGIONS)
+ class SamplingRegion {
+ public:
+ // Create a scoped sampling region using a C string constant name that describes
+ // what you are doing. This must be a string constant that persists for the
+ // lifetime of the process and is immutable.
+ SamplingRegion(const char* name)
+ {
+ if (!isMainThread()) {
+ m_name = 0;
+ return;
+ }
+
+ m_name = name;
+ exchangeCurrent(this, &m_previous);
+ ASSERT(!m_previous || m_previous > this);
+ }
+
+ ~SamplingRegion()
+ {
+ if (!m_name)
+ return;
+
+ ASSERT(bitwise_cast<SamplingRegion*>(s_currentOrReserved & ~1) == this);
+ exchangeCurrent(m_previous);
+ }
+
+ static void sample();
+
+ static void dump();
+
+ private:
+ const char* m_name;
+ SamplingRegion* m_previous;
+
+ static void exchangeCurrent(SamplingRegion* current, SamplingRegion** previousPtr = 0)
+ {
+ uintptr_t previous;
+ while (true) {
+ previous = s_currentOrReserved;
+
+ // If it's reserved (i.e. sampling thread is reading it), loop around.
+ if (previous & 1) {
+#if OS(UNIX)
+ sched_yield();
+#endif
+ continue;
+ }
+
+ // If we're going to CAS, then make sure previous is set.
+ if (previousPtr)
+ *previousPtr = bitwise_cast<SamplingRegion*>(previous);
+
+ if (WTF::weakCompareAndSwap(&s_currentOrReserved, previous, bitwise_cast<uintptr_t>(current)))
+ break;
+ }
+ }
+
+ static void dumpInternal();
+
+ class Locker {
+ public:
+ Locker();
+ ~Locker();
+ };
+
+ static volatile uintptr_t s_currentOrReserved;
+
+ // rely on identity hashing of string constants
+ static Spectrum<const char*>* s_spectrum;
+
+ static unsigned long s_noneOfTheAbove;
+
+ static unsigned s_numberOfSamplesSinceDump;
+ };
+#else // ENABLE(SAMPLING_REGIONS)
+ class SamplingRegion {
+ public:
+ SamplingRegion(const char*) { }
+ void dump();
+ };
+#endif // ENABLE(SAMPLING_REGIONS)
+
+ class CodeBlock;
+ class ExecState;
+ class Interpreter;
+ class ScopeNode;
+ struct Instruction;
+
+ struct ScriptSampleRecord {
+ ScriptSampleRecord(JSGlobalData& globalData, ScriptExecutable* executable)
+ : m_executable(globalData, executable)
+ , m_codeBlock(0)
+ , m_sampleCount(0)
+ , m_opcodeSampleCount(0)
+ , m_samples(0)
+ , m_size(0)
+ {
+ }
+
+ ~ScriptSampleRecord()
+ {
+ if (m_samples)
+ free(m_samples);
+ }
+
+ void sample(CodeBlock*, Instruction*);
+
+ Strong<ScriptExecutable> m_executable;
+ CodeBlock* m_codeBlock;
+ int m_sampleCount;
+ int m_opcodeSampleCount;
+ int* m_samples;
+ unsigned m_size;
+ };
+
+ typedef HashMap<ScriptExecutable*, OwnPtr<ScriptSampleRecord> > ScriptSampleRecordMap;
+
+ class SamplingThread {
+ public:
+ // Sampling thread state.
+ static bool s_running;
+ static unsigned s_hertz;
+ static ThreadIdentifier s_samplingThread;
+
+ static void start(unsigned hertz=10000);
+ static void stop();
+
+ static void* threadStartFunc(void*);
+ };
+
+ class SamplingTool {
+ public:
+ friend struct CallRecord;
+ friend class HostCallRecord;
+
+#if ENABLE(OPCODE_SAMPLING)
+ class CallRecord {
+ WTF_MAKE_NONCOPYABLE(CallRecord);
+ public:
+ CallRecord(SamplingTool* samplingTool)
+ : m_samplingTool(samplingTool)
+ , m_savedSample(samplingTool->m_sample)
+ , m_savedCodeBlock(samplingTool->m_codeBlock)
+ {
+ }
+
+ ~CallRecord()
+ {
+ m_samplingTool->m_sample = m_savedSample;
+ m_samplingTool->m_codeBlock = m_savedCodeBlock;
+ }
+
+ private:
+ SamplingTool* m_samplingTool;
+ intptr_t m_savedSample;
+ CodeBlock* m_savedCodeBlock;
+ };
+
+ class HostCallRecord : public CallRecord {
+ public:
+ HostCallRecord(SamplingTool* samplingTool)
+ : CallRecord(samplingTool)
+ {
+ samplingTool->m_sample |= 0x1;
+ }
+ };
+#else
+ class CallRecord {
+ WTF_MAKE_NONCOPYABLE(CallRecord);
+ public:
+ CallRecord(SamplingTool*)
+ {
+ }
+ };
+
+ class HostCallRecord : public CallRecord {
+ public:
+ HostCallRecord(SamplingTool* samplingTool)
+ : CallRecord(samplingTool)
+ {
+ }
+ };
+#endif
+
+ SamplingTool(Interpreter* interpreter)
+ : m_interpreter(interpreter)
+ , m_codeBlock(0)
+ , m_sample(0)
+ , m_sampleCount(0)
+ , m_opcodeSampleCount(0)
+#if ENABLE(CODEBLOCK_SAMPLING)
+ , m_scopeSampleMap(adoptPtr(new ScriptSampleRecordMap))
+#endif
+ {
+ memset(m_opcodeSamples, 0, sizeof(m_opcodeSamples));
+ memset(m_opcodeSamplesInCTIFunctions, 0, sizeof(m_opcodeSamplesInCTIFunctions));
+ }
+
+ void setup();
+ void dump(ExecState*);
+
+ void notifyOfScope(JSGlobalData&, ScriptExecutable* scope);
+
+ void sample(CodeBlock* codeBlock, Instruction* vPC)
+ {
+ ASSERT(!(reinterpret_cast<intptr_t>(vPC) & 0x3));
+ m_codeBlock = codeBlock;
+ m_sample = reinterpret_cast<intptr_t>(vPC);
+ }
+
+ CodeBlock** codeBlockSlot() { return &m_codeBlock; }
+ intptr_t* sampleSlot() { return &m_sample; }
+
+ void* encodeSample(Instruction* vPC, bool inCTIFunction = false, bool inHostFunction = false)
+ {
+ ASSERT(!(reinterpret_cast<intptr_t>(vPC) & 0x3));
+ return reinterpret_cast<void*>(reinterpret_cast<intptr_t>(vPC) | (static_cast<intptr_t>(inCTIFunction) << 1) | static_cast<intptr_t>(inHostFunction));
+ }
+
+ static void sample();
+
+ private:
+ class Sample {
+ public:
+ Sample(volatile intptr_t sample, CodeBlock* volatile codeBlock)
+ : m_sample(sample)
+ , m_codeBlock(codeBlock)
+ {
+ }
+
+ bool isNull() { return !m_sample; }
+ CodeBlock* codeBlock() { return m_codeBlock; }
+ Instruction* vPC() { return reinterpret_cast<Instruction*>(m_sample & ~0x3); }
+ bool inHostFunction() { return m_sample & 0x1; }
+ bool inCTIFunction() { return m_sample & 0x2; }
+
+ private:
+ intptr_t m_sample;
+ CodeBlock* m_codeBlock;
+ };
+
+ void doRun();
+ static SamplingTool* s_samplingTool;
+
+ Interpreter* m_interpreter;
+
+ // State tracked by the main thread, used by the sampling thread.
+ CodeBlock* m_codeBlock;
+ intptr_t m_sample;
+
+ // Gathered sample data.
+ long long m_sampleCount;
+ long long m_opcodeSampleCount;
+ unsigned m_opcodeSamples[numOpcodeIDs];
+ unsigned m_opcodeSamplesInCTIFunctions[numOpcodeIDs];
+
+#if ENABLE(CODEBLOCK_SAMPLING)
+ Mutex m_scriptSampleMapMutex;
+ OwnPtr<ScriptSampleRecordMap> m_scopeSampleMap;
+#endif
+ };
+
+} // namespace JSC
+
+#endif // SamplingTool_h
diff --git a/Source/JavaScriptCore/bytecode/StructureStubInfo.cpp b/Source/JavaScriptCore/bytecode/StructureStubInfo.cpp
new file mode 100644
index 000000000..ec18782d5
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/StructureStubInfo.cpp
@@ -0,0 +1,119 @@
+/*
+ * Copyright (C) 2008 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "StructureStubInfo.h"
+
+#include "JSObject.h"
+#include "ScopeChain.h"
+
+namespace JSC {
+
+#if ENABLE(JIT)
+void StructureStubInfo::deref()
+{
+ switch (accessType) {
+ case access_get_by_id_self_list: {
+ PolymorphicAccessStructureList* polymorphicStructures = u.getByIdSelfList.structureList;
+ delete polymorphicStructures;
+ return;
+ }
+ case access_get_by_id_proto_list: {
+ PolymorphicAccessStructureList* polymorphicStructures = u.getByIdProtoList.structureList;
+ delete polymorphicStructures;
+ return;
+ }
+ case access_get_by_id_self:
+ case access_get_by_id_proto:
+ case access_get_by_id_chain:
+ case access_put_by_id_transition_normal:
+ case access_put_by_id_transition_direct:
+ case access_put_by_id_replace:
+ case access_unset:
+ case access_get_by_id_generic:
+ case access_put_by_id_generic:
+ case access_get_array_length:
+ case access_get_string_length:
+ // These instructions don't have to release any allocated memory
+ return;
+ default:
+ ASSERT_NOT_REACHED();
+ }
+}
+
+bool StructureStubInfo::visitWeakReferences()
+{
+ switch (accessType) {
+ case access_get_by_id_self:
+ if (!Heap::isMarked(u.getByIdSelf.baseObjectStructure.get()))
+ return false;
+ break;
+ case access_get_by_id_proto:
+ if (!Heap::isMarked(u.getByIdProto.baseObjectStructure.get())
+ || !Heap::isMarked(u.getByIdProto.prototypeStructure.get()))
+ return false;
+ break;
+ case access_get_by_id_chain:
+ if (!Heap::isMarked(u.getByIdChain.baseObjectStructure.get())
+ || !Heap::isMarked(u.getByIdChain.chain.get()))
+ return false;
+ break;
+ case access_get_by_id_self_list: {
+ PolymorphicAccessStructureList* polymorphicStructures = u.getByIdSelfList.structureList;
+ if (!polymorphicStructures->visitWeak(u.getByIdSelfList.listSize)) {
+ delete polymorphicStructures;
+ return false;
+ }
+ break;
+ }
+ case access_get_by_id_proto_list: {
+ PolymorphicAccessStructureList* polymorphicStructures = u.getByIdProtoList.structureList;
+ if (!polymorphicStructures->visitWeak(u.getByIdSelfList.listSize)) {
+ delete polymorphicStructures;
+ return false;
+ }
+ break;
+ }
+ case access_put_by_id_transition_normal:
+ case access_put_by_id_transition_direct:
+ if (!Heap::isMarked(u.putByIdTransition.previousStructure.get())
+ || !Heap::isMarked(u.putByIdTransition.structure.get())
+ || !Heap::isMarked(u.putByIdTransition.chain.get()))
+ return false;
+ break;
+ case access_put_by_id_replace:
+ if (!Heap::isMarked(u.putByIdReplace.baseObjectStructure.get()))
+ return false;
+ break;
+ default:
+ // The rest of the instructions don't require references, so there is no need to
+ // do anything.
+ break;
+ }
+ return true;
+}
+#endif
+
+} // namespace JSC
diff --git a/Source/JavaScriptCore/bytecode/StructureStubInfo.h b/Source/JavaScriptCore/bytecode/StructureStubInfo.h
new file mode 100644
index 000000000..830b75594
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/StructureStubInfo.h
@@ -0,0 +1,238 @@
+/*
+ * Copyright (C) 2008 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef StructureStubInfo_h
+#define StructureStubInfo_h
+
+#if ENABLE(JIT)
+
+#include "Instruction.h"
+#include "MacroAssembler.h"
+#include "Opcode.h"
+#include "Structure.h"
+
+namespace JSC {
+
+ enum AccessType {
+ access_get_by_id_self,
+ access_get_by_id_proto,
+ access_get_by_id_chain,
+ access_get_by_id_self_list,
+ access_get_by_id_proto_list,
+ access_put_by_id_transition_normal,
+ access_put_by_id_transition_direct,
+ access_put_by_id_replace,
+ access_unset,
+ access_get_by_id_generic,
+ access_put_by_id_generic,
+ access_get_array_length,
+ access_get_string_length,
+ };
+
+ inline bool isGetByIdAccess(AccessType accessType)
+ {
+ switch (accessType) {
+ case access_get_by_id_self:
+ case access_get_by_id_proto:
+ case access_get_by_id_chain:
+ case access_get_by_id_self_list:
+ case access_get_by_id_proto_list:
+ case access_get_by_id_generic:
+ case access_get_array_length:
+ case access_get_string_length:
+ return true;
+ default:
+ return false;
+ }
+ }
+
+ inline bool isPutByIdAccess(AccessType accessType)
+ {
+ switch (accessType) {
+ case access_put_by_id_transition_normal:
+ case access_put_by_id_transition_direct:
+ case access_put_by_id_replace:
+ case access_put_by_id_generic:
+ return true;
+ default:
+ return false;
+ }
+ }
+
+ struct StructureStubInfo {
+ StructureStubInfo()
+ : accessType(access_unset)
+ , seen(false)
+ {
+ }
+
+ void initGetByIdSelf(JSGlobalData& globalData, JSCell* owner, Structure* baseObjectStructure)
+ {
+ accessType = access_get_by_id_self;
+
+ u.getByIdSelf.baseObjectStructure.set(globalData, owner, baseObjectStructure);
+ }
+
+ void initGetByIdProto(JSGlobalData& globalData, JSCell* owner, Structure* baseObjectStructure, Structure* prototypeStructure)
+ {
+ accessType = access_get_by_id_proto;
+
+ u.getByIdProto.baseObjectStructure.set(globalData, owner, baseObjectStructure);
+ u.getByIdProto.prototypeStructure.set(globalData, owner, prototypeStructure);
+ }
+
+ void initGetByIdChain(JSGlobalData& globalData, JSCell* owner, Structure* baseObjectStructure, StructureChain* chain)
+ {
+ accessType = access_get_by_id_chain;
+
+ u.getByIdChain.baseObjectStructure.set(globalData, owner, baseObjectStructure);
+ u.getByIdChain.chain.set(globalData, owner, chain);
+ }
+
+ void initGetByIdSelfList(PolymorphicAccessStructureList* structureList, int listSize)
+ {
+ accessType = access_get_by_id_self_list;
+
+ u.getByIdProtoList.structureList = structureList;
+ u.getByIdProtoList.listSize = listSize;
+ }
+
+ void initGetByIdProtoList(PolymorphicAccessStructureList* structureList, int listSize)
+ {
+ accessType = access_get_by_id_proto_list;
+
+ u.getByIdProtoList.structureList = structureList;
+ u.getByIdProtoList.listSize = listSize;
+ }
+
+ // PutById*
+
+ void initPutByIdTransition(JSGlobalData& globalData, JSCell* owner, Structure* previousStructure, Structure* structure, StructureChain* chain, bool isDirect)
+ {
+ if (isDirect)
+ accessType = access_put_by_id_transition_direct;
+ else
+ accessType = access_put_by_id_transition_normal;
+
+ u.putByIdTransition.previousStructure.set(globalData, owner, previousStructure);
+ u.putByIdTransition.structure.set(globalData, owner, structure);
+ u.putByIdTransition.chain.set(globalData, owner, chain);
+ }
+
+ void initPutByIdReplace(JSGlobalData& globalData, JSCell* owner, Structure* baseObjectStructure)
+ {
+ accessType = access_put_by_id_replace;
+
+ u.putByIdReplace.baseObjectStructure.set(globalData, owner, baseObjectStructure);
+ }
+
+ void reset()
+ {
+ accessType = access_unset;
+
+ stubRoutine = MacroAssemblerCodeRef();
+ }
+
+ void deref();
+
+ bool visitWeakReferences();
+
+ bool seenOnce()
+ {
+ return seen;
+ }
+
+ void setSeen()
+ {
+ seen = true;
+ }
+
+ unsigned bytecodeIndex;
+
+ int8_t accessType;
+ int8_t seen;
+
+#if ENABLE(DFG_JIT)
+ int8_t baseGPR;
+#if USE(JSVALUE32_64)
+ int8_t valueTagGPR;
+#endif
+ int8_t valueGPR;
+ int8_t scratchGPR;
+ int16_t deltaCallToDone;
+ int16_t deltaCallToStructCheck;
+ int16_t deltaCallToSlowCase;
+ int16_t deltaCheckImmToCall;
+#if USE(JSVALUE64)
+ int16_t deltaCallToLoadOrStore;
+#else
+ int16_t deltaCallToTagLoadOrStore;
+ int16_t deltaCallToPayloadLoadOrStore;
+#endif
+#endif // ENABLE(DFG_JIT)
+
+ union {
+ struct {
+ // It would be unwise to put anything here, as it will surely be overwritten.
+ } unset;
+ struct {
+ WriteBarrierBase<Structure> baseObjectStructure;
+ } getByIdSelf;
+ struct {
+ WriteBarrierBase<Structure> baseObjectStructure;
+ WriteBarrierBase<Structure> prototypeStructure;
+ } getByIdProto;
+ struct {
+ WriteBarrierBase<Structure> baseObjectStructure;
+ WriteBarrierBase<StructureChain> chain;
+ } getByIdChain;
+ struct {
+ PolymorphicAccessStructureList* structureList;
+ int listSize;
+ } getByIdSelfList;
+ struct {
+ PolymorphicAccessStructureList* structureList;
+ int listSize;
+ } getByIdProtoList;
+ struct {
+ WriteBarrierBase<Structure> previousStructure;
+ WriteBarrierBase<Structure> structure;
+ WriteBarrierBase<StructureChain> chain;
+ } putByIdTransition;
+ struct {
+ WriteBarrierBase<Structure> baseObjectStructure;
+ } putByIdReplace;
+ } u;
+
+ MacroAssemblerCodeRef stubRoutine;
+ CodeLocationCall callReturnLocation;
+ CodeLocationLabel hotPathBegin;
+ };
+
+} // namespace JSC
+
+#endif
+
+#endif // StructureStubInfo_h
diff --git a/Source/JavaScriptCore/bytecode/ValueProfile.cpp b/Source/JavaScriptCore/bytecode/ValueProfile.cpp
new file mode 100644
index 000000000..2d7770aed
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/ValueProfile.cpp
@@ -0,0 +1,52 @@
+/*
+ * Copyright (C) 2011 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * its contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "ValueProfile.h"
+
+namespace JSC {
+
+#if ENABLE(VALUE_PROFILER)
+PredictedType ValueProfile::computeUpdatedPrediction()
+{
+ for (unsigned i = 0; i < totalNumberOfBuckets; ++i) {
+ JSValue value = JSValue::decode(m_buckets[i]);
+ if (!value)
+ continue;
+
+ m_numberOfSamplesInPrediction++;
+ mergePrediction(m_prediction, predictionFromValue(value));
+
+ m_buckets[i] = JSValue::encode(JSValue());
+ }
+
+ return m_prediction;
+}
+#endif // ENABLE(VALUE_PROFILER)
+
+} // namespace JSC
diff --git a/Source/JavaScriptCore/bytecode/ValueProfile.h b/Source/JavaScriptCore/bytecode/ValueProfile.h
new file mode 100644
index 000000000..02a1d6bf9
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/ValueProfile.h
@@ -0,0 +1,166 @@
+/*
+ * Copyright (C) 2011 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * its contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef ValueProfile_h
+#define ValueProfile_h
+
+#include "JSArray.h"
+#include "PredictedType.h"
+#include "Structure.h"
+#include "WriteBarrier.h"
+
+namespace JSC {
+
+#if ENABLE(VALUE_PROFILER)
+struct ValueProfile {
+ static const unsigned logNumberOfBuckets = 0; // 1 bucket
+ static const unsigned numberOfBuckets = 1 << logNumberOfBuckets;
+ static const unsigned numberOfSpecFailBuckets = 1;
+ static const unsigned bucketIndexMask = numberOfBuckets - 1;
+ static const unsigned totalNumberOfBuckets = numberOfBuckets + numberOfSpecFailBuckets;
+
+ ValueProfile()
+ : m_bytecodeOffset(-1)
+ , m_prediction(PredictNone)
+ , m_numberOfSamplesInPrediction(0)
+ {
+ for (unsigned i = 0; i < totalNumberOfBuckets; ++i)
+ m_buckets[i] = JSValue::encode(JSValue());
+ }
+
+ ValueProfile(int bytecodeOffset)
+ : m_bytecodeOffset(bytecodeOffset)
+ , m_prediction(PredictNone)
+ , m_numberOfSamplesInPrediction(0)
+ {
+ for (unsigned i = 0; i < totalNumberOfBuckets; ++i)
+ m_buckets[i] = JSValue::encode(JSValue());
+ }
+
+ EncodedJSValue* specFailBucket(unsigned i)
+ {
+ ASSERT(numberOfBuckets + i < totalNumberOfBuckets);
+ return m_buckets + numberOfBuckets + i;
+ }
+
+ const ClassInfo* classInfo(unsigned bucket) const
+ {
+ JSValue value = JSValue::decode(m_buckets[bucket]);
+ if (!!value) {
+ if (!value.isCell())
+ return 0;
+ return value.asCell()->structure()->classInfo();
+ }
+ return 0;
+ }
+
+ unsigned numberOfSamples() const
+ {
+ unsigned result = 0;
+ for (unsigned i = 0; i < totalNumberOfBuckets; ++i) {
+ if (!!JSValue::decode(m_buckets[i]))
+ result++;
+ }
+ return result;
+ }
+
+ unsigned totalNumberOfSamples() const
+ {
+ return numberOfSamples() + m_numberOfSamplesInPrediction;
+ }
+
+ bool isLive() const
+ {
+ for (unsigned i = 0; i < totalNumberOfBuckets; ++i) {
+ if (!!JSValue::decode(m_buckets[i]))
+ return true;
+ }
+ return false;
+ }
+
+#ifndef NDEBUG
+ void dump(FILE* out)
+ {
+ fprintf(out,
+ "samples = %u, prediction = %s",
+ totalNumberOfSamples(),
+ predictionToString(m_prediction));
+ bool first = true;
+ for (unsigned i = 0; i < totalNumberOfBuckets; ++i) {
+ JSValue value = JSValue::decode(m_buckets[i]);
+ if (!!value) {
+ if (first) {
+ fprintf(out, ": ");
+ first = false;
+ } else
+ fprintf(out, ", ");
+ fprintf(out, "%s", value.description());
+ }
+ }
+ }
+#endif
+
+ // Updates the prediction and returns the new one.
+ PredictedType computeUpdatedPrediction();
+
+ int m_bytecodeOffset; // -1 for prologue
+
+ PredictedType m_prediction;
+ unsigned m_numberOfSamplesInPrediction;
+
+ EncodedJSValue m_buckets[totalNumberOfBuckets];
+};
+
+inline int getValueProfileBytecodeOffset(ValueProfile* valueProfile)
+{
+ return valueProfile->m_bytecodeOffset;
+}
+
+// This is a mini value profile to catch pathologies. It is a counter that gets
+// incremented when we take the slow path on any instruction.
+struct RareCaseProfile {
+ RareCaseProfile(int bytecodeOffset)
+ : m_bytecodeOffset(bytecodeOffset)
+ , m_counter(0)
+ {
+ }
+
+ int m_bytecodeOffset;
+ uint32_t m_counter;
+};
+
+inline int getRareCaseProfileBytecodeOffset(RareCaseProfile* rareCaseProfile)
+{
+ return rareCaseProfile->m_bytecodeOffset;
+}
+#endif
+
+}
+
+#endif
+
diff --git a/Source/JavaScriptCore/bytecode/ValueRecovery.h b/Source/JavaScriptCore/bytecode/ValueRecovery.h
new file mode 100644
index 000000000..356abe84f
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/ValueRecovery.h
@@ -0,0 +1,337 @@
+/*
+ * Copyright (C) 2011 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef ValueRecovery_h
+#define ValueRecovery_h
+
+#include "DataFormat.h"
+#include "JSValue.h"
+#include "MacroAssembler.h"
+#include "VirtualRegister.h"
+#include <wtf/Platform.h>
+
+#ifndef NDEBUG
+#include <stdio.h>
+#endif
+
+namespace JSC {
+
+// Describes how to recover a given bytecode virtual register at a given
+// code point.
+enum ValueRecoveryTechnique {
+ // It's already in the register file at the right location.
+ AlreadyInRegisterFile,
+ // It's already in the register file but unboxed.
+ AlreadyInRegisterFileAsUnboxedInt32,
+ AlreadyInRegisterFileAsUnboxedCell,
+ AlreadyInRegisterFileAsUnboxedBoolean,
+ AlreadyInRegisterFileAsUnboxedDouble,
+ // It's in a register.
+ InGPR,
+ UnboxedInt32InGPR,
+ UnboxedBooleanInGPR,
+#if USE(JSVALUE32_64)
+ InPair,
+#endif
+ InFPR,
+ UInt32InGPR,
+ // It's in the register file, but at a different location.
+ DisplacedInRegisterFile,
+ // It's in the register file, at a different location, and it's unboxed.
+ Int32DisplacedInRegisterFile,
+ DoubleDisplacedInRegisterFile,
+ CellDisplacedInRegisterFile,
+ BooleanDisplacedInRegisterFile,
+ // It's a constant.
+ Constant,
+ // Don't know how to recover it.
+ DontKnow
+};
+
+class ValueRecovery {
+public:
+ ValueRecovery()
+ : m_technique(DontKnow)
+ {
+ }
+
+ static ValueRecovery alreadyInRegisterFile()
+ {
+ ValueRecovery result;
+ result.m_technique = AlreadyInRegisterFile;
+ return result;
+ }
+
+ static ValueRecovery alreadyInRegisterFileAsUnboxedInt32()
+ {
+ ValueRecovery result;
+ result.m_technique = AlreadyInRegisterFileAsUnboxedInt32;
+ return result;
+ }
+
+ static ValueRecovery alreadyInRegisterFileAsUnboxedCell()
+ {
+ ValueRecovery result;
+ result.m_technique = AlreadyInRegisterFileAsUnboxedCell;
+ return result;
+ }
+
+ static ValueRecovery alreadyInRegisterFileAsUnboxedBoolean()
+ {
+ ValueRecovery result;
+ result.m_technique = AlreadyInRegisterFileAsUnboxedBoolean;
+ return result;
+ }
+
+ static ValueRecovery alreadyInRegisterFileAsUnboxedDouble()
+ {
+ ValueRecovery result;
+ result.m_technique = AlreadyInRegisterFileAsUnboxedDouble;
+ return result;
+ }
+
+ static ValueRecovery inGPR(MacroAssembler::RegisterID gpr, DataFormat dataFormat)
+ {
+ ASSERT(dataFormat != DataFormatNone);
+#if USE(JSVALUE32_64)
+ ASSERT(dataFormat == DataFormatInteger || dataFormat == DataFormatCell || dataFormat == DataFormatBoolean);
+#endif
+ ValueRecovery result;
+ if (dataFormat == DataFormatInteger)
+ result.m_technique = UnboxedInt32InGPR;
+ else if (dataFormat == DataFormatBoolean)
+ result.m_technique = UnboxedBooleanInGPR;
+ else
+ result.m_technique = InGPR;
+ result.m_source.gpr = gpr;
+ return result;
+ }
+
+ static ValueRecovery uint32InGPR(MacroAssembler::RegisterID gpr)
+ {
+ ValueRecovery result;
+ result.m_technique = UInt32InGPR;
+ result.m_source.gpr = gpr;
+ return result;
+ }
+
+#if USE(JSVALUE32_64)
+ static ValueRecovery inPair(MacroAssembler::RegisterID tagGPR, MacroAssembler::RegisterID payloadGPR)
+ {
+ ValueRecovery result;
+ result.m_technique = InPair;
+ result.m_source.pair.tagGPR = tagGPR;
+ result.m_source.pair.payloadGPR = payloadGPR;
+ return result;
+ }
+#endif
+
+ static ValueRecovery inFPR(MacroAssembler::FPRegisterID fpr)
+ {
+ ValueRecovery result;
+ result.m_technique = InFPR;
+ result.m_source.fpr = fpr;
+ return result;
+ }
+
+ static ValueRecovery displacedInRegisterFile(VirtualRegister virtualReg, DataFormat dataFormat)
+ {
+ ValueRecovery result;
+ switch (dataFormat) {
+ case DataFormatInteger:
+ result.m_technique = Int32DisplacedInRegisterFile;
+ break;
+
+ case DataFormatDouble:
+ result.m_technique = DoubleDisplacedInRegisterFile;
+ break;
+
+ case DataFormatCell:
+ result.m_technique = CellDisplacedInRegisterFile;
+ break;
+
+ case DataFormatBoolean:
+ result.m_technique = BooleanDisplacedInRegisterFile;
+ break;
+
+ default:
+ ASSERT(dataFormat != DataFormatNone && dataFormat != DataFormatStorage);
+ result.m_technique = DisplacedInRegisterFile;
+ break;
+ }
+ result.m_source.virtualReg = virtualReg;
+ return result;
+ }
+
+ static ValueRecovery constant(JSValue value)
+ {
+ ValueRecovery result;
+ result.m_technique = Constant;
+ result.m_source.constant = JSValue::encode(value);
+ return result;
+ }
+
+ ValueRecoveryTechnique technique() const { return m_technique; }
+
+ bool isInRegisters() const
+ {
+ switch (m_technique) {
+ case InGPR:
+ case UnboxedInt32InGPR:
+ case UnboxedBooleanInGPR:
+#if USE(JSVALUE32_64)
+ case InPair:
+#endif
+ case InFPR:
+ return true;
+ default:
+ return false;
+ }
+ }
+
+ MacroAssembler::RegisterID gpr() const
+ {
+ ASSERT(m_technique == InGPR || m_technique == UnboxedInt32InGPR || m_technique == UnboxedBooleanInGPR || m_technique == UInt32InGPR);
+ return m_source.gpr;
+ }
+
+#if USE(JSVALUE32_64)
+ MacroAssembler::RegisterID tagGPR() const
+ {
+ ASSERT(m_technique == InPair);
+ return m_source.pair.tagGPR;
+ }
+
+ MacroAssembler::RegisterID payloadGPR() const
+ {
+ ASSERT(m_technique == InPair);
+ return m_source.pair.payloadGPR;
+ }
+#endif
+
+ MacroAssembler::FPRegisterID fpr() const
+ {
+ ASSERT(m_technique == InFPR);
+ return m_source.fpr;
+ }
+
+ VirtualRegister virtualRegister() const
+ {
+ ASSERT(m_technique == DisplacedInRegisterFile || m_technique == Int32DisplacedInRegisterFile || m_technique == DoubleDisplacedInRegisterFile || m_technique == CellDisplacedInRegisterFile || m_technique == BooleanDisplacedInRegisterFile);
+ return m_source.virtualReg;
+ }
+
+ JSValue constant() const
+ {
+ ASSERT(m_technique == Constant);
+ return JSValue::decode(m_source.constant);
+ }
+
+#ifndef NDEBUG
+ void dump(FILE* out) const
+ {
+ switch (technique()) {
+ case AlreadyInRegisterFile:
+ fprintf(out, "-");
+ break;
+ case AlreadyInRegisterFileAsUnboxedInt32:
+ fprintf(out, "(int32)");
+ break;
+ case AlreadyInRegisterFileAsUnboxedCell:
+ fprintf(out, "(cell)");
+ break;
+ case AlreadyInRegisterFileAsUnboxedBoolean:
+ fprintf(out, "(bool)");
+ break;
+ case AlreadyInRegisterFileAsUnboxedDouble:
+ fprintf(out, "(double)");
+ break;
+ case InGPR:
+ fprintf(out, "%%r%d", gpr());
+ break;
+ case UnboxedInt32InGPR:
+ fprintf(out, "int32(%%r%d)", gpr());
+ break;
+ case UnboxedBooleanInGPR:
+ fprintf(out, "bool(%%r%d)", gpr());
+ break;
+ case UInt32InGPR:
+ fprintf(out, "uint32(%%r%d)", gpr());
+ break;
+ case InFPR:
+ fprintf(out, "%%fr%d", fpr());
+ break;
+#if USE(JSVALUE32_64)
+ case InPair:
+ fprintf(out, "pair(%%r%d, %%r%d)", tagGPR(), payloadGPR());
+ break;
+#endif
+ case DisplacedInRegisterFile:
+ fprintf(out, "*%d", virtualRegister());
+ break;
+ case Int32DisplacedInRegisterFile:
+ fprintf(out, "*int32(%d)", virtualRegister());
+ break;
+ case DoubleDisplacedInRegisterFile:
+ fprintf(out, "*double(%d)", virtualRegister());
+ break;
+ case CellDisplacedInRegisterFile:
+ fprintf(out, "*cell(%d)", virtualRegister());
+ break;
+ case BooleanDisplacedInRegisterFile:
+ fprintf(out, "*bool(%d)", virtualRegister());
+ break;
+ case Constant:
+ fprintf(out, "[%s]", constant().description());
+ break;
+ case DontKnow:
+ fprintf(out, "!");
+ break;
+ default:
+ fprintf(out, "?%d", technique());
+ break;
+ }
+ }
+#endif
+
+private:
+ ValueRecoveryTechnique m_technique;
+ union {
+ MacroAssembler::RegisterID gpr;
+ MacroAssembler::FPRegisterID fpr;
+#if USE(JSVALUE32_64)
+ struct {
+ MacroAssembler::RegisterID tagGPR;
+ MacroAssembler::RegisterID payloadGPR;
+ } pair;
+#endif
+ VirtualRegister virtualReg;
+ EncodedJSValue constant;
+ } m_source;
+};
+
+} // namespace JSC
+
+#endif // ValueRecovery_h
diff --git a/Source/JavaScriptCore/bytecode/VirtualRegister.h b/Source/JavaScriptCore/bytecode/VirtualRegister.h
new file mode 100644
index 000000000..b95f8b8fa
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/VirtualRegister.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright (C) 2011 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef VirtualRegister_h
+#define VirtualRegister_h
+
+#include <wtf/Platform.h>
+
+namespace JSC {
+
+// Type for a virtual register number (spill location).
+// Using an enum to make this type-checked at compile time, to avert programmer errors.
+enum VirtualRegister { InvalidVirtualRegister = -1 };
+COMPILE_ASSERT(sizeof(VirtualRegister) == sizeof(int), VirtualRegister_is_32bit);
+
+} // namespace JSC
+
+#endif // VirtualRegister_h