diff options
Diffstat (limited to 'Source/JavaScriptCore/runtime/SymbolTable.h')
-rw-r--r-- | Source/JavaScriptCore/runtime/SymbolTable.h | 247 |
1 files changed, 226 insertions, 21 deletions
diff --git a/Source/JavaScriptCore/runtime/SymbolTable.h b/Source/JavaScriptCore/runtime/SymbolTable.h index f540a12c7..9ddc32c8c 100644 --- a/Source/JavaScriptCore/runtime/SymbolTable.h +++ b/Source/JavaScriptCore/runtime/SymbolTable.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2007, 2008 Apple Inc. All rights reserved. + * Copyright (C) 2007, 2008, 2012 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -31,53 +31,163 @@ #include "JSObject.h" #include "UString.h" +#include "Watchpoint.h" #include <wtf/AlwaysInline.h> #include <wtf/HashTraits.h> namespace JSC { + class Watchpoint; + class WatchpointSet; + static ALWAYS_INLINE int missingSymbolMarker() { return std::numeric_limits<int>::max(); } // The bit twiddling in this class assumes that every register index is a // reasonably small positive or negative number, and therefore has its high // four bits all set or all unset. + // In addition to implementing semantics-mandated variable attributes and + // implementation-mandated variable indexing, this class also implements + // watchpoints to be used for JIT optimizations. Because watchpoints are + // meant to be relatively rare, this class optimizes heavily for the case + // that they are not being used. To that end, this class uses the thin-fat + // idiom: either it is thin, in which case it contains an in-place encoded + // word that consists of attributes, the index, and a bit saying that it is + // thin; or it is fat, in which case it contains a pointer to a malloc'd + // data structure and a bit saying that it is fat. The malloc'd data + // structure will be malloced a second time upon copy, to preserve the + // property that in-place edits to SymbolTableEntry do not manifest in any + // copies. However, the malloc'd FatEntry data structure contains a ref- + // counted pointer to a shared WatchpointSet. Thus, in-place edits of the + // WatchpointSet will manifest in all copies. Here's a picture: + // + // SymbolTableEntry --> FatEntry --> WatchpointSet + // + // If you make a copy of a SymbolTableEntry, you will have: + // + // original: SymbolTableEntry --> FatEntry --> WatchpointSet + // copy: SymbolTableEntry --> FatEntry -----^ + struct SymbolTableEntry { + // Use the SymbolTableEntry::Fast class, either via implicit cast or by calling + // getFast(), when you (1) only care about isNull(), getIndex(), and isReadOnly(), + // and (2) you are in a hot path where you need to minimize the number of times + // that you branch on isFat() when getting the bits(). + class Fast { + public: + Fast() + : m_bits(0) + { + } + + ALWAYS_INLINE Fast(const SymbolTableEntry& entry) + : m_bits(entry.bits()) + { + } + + bool isNull() const + { + return !m_bits; + } + + int getIndex() const + { + return static_cast<int>(m_bits >> FlagBits); + } + + bool isReadOnly() const + { + return m_bits & ReadOnlyFlag; + } + + unsigned getAttributes() const + { + unsigned attributes = 0; + if (m_bits & ReadOnlyFlag) + attributes |= ReadOnly; + if (m_bits & DontEnumFlag) + attributes |= DontEnum; + return attributes; + } + + bool isFat() const + { + return m_bits & FatFlag; + } + + private: + friend struct SymbolTableEntry; + intptr_t m_bits; + }; + SymbolTableEntry() : m_bits(0) { } SymbolTableEntry(int index) + : m_bits(0) { ASSERT(isValidIndex(index)); pack(index, false, false); } SymbolTableEntry(int index, unsigned attributes) + : m_bits(0) { ASSERT(isValidIndex(index)); pack(index, attributes & ReadOnly, attributes & DontEnum); } + ~SymbolTableEntry() + { + freeFatEntry(); + } + + SymbolTableEntry(const SymbolTableEntry& other) + : m_bits(0) + { + *this = other; + } + + SymbolTableEntry& operator=(const SymbolTableEntry& other) + { + if (UNLIKELY(other.isFat())) + return copySlow(other); + freeFatEntry(); + m_bits = other.m_bits; + return *this; + } + bool isNull() const { - return !m_bits; + return !bits(); } int getIndex() const { - return m_bits >> FlagBits; + return static_cast<int>(bits() >> FlagBits); } - + + ALWAYS_INLINE Fast getFast() const + { + return Fast(*this); + } + + ALWAYS_INLINE Fast getFast(bool& wasFat) const + { + Fast result; + wasFat = isFat(); + if (wasFat) + result.m_bits = fatEntry()->m_bits; + else + result.m_bits = m_bits; + return result; + } + unsigned getAttributes() const { - unsigned attributes = 0; - if (m_bits & ReadOnlyFlag) - attributes |= ReadOnly; - if (m_bits & DontEnumFlag) - attributes |= DontEnum; - return attributes; + return getFast().getAttributes(); } void setAttributes(unsigned attributes) @@ -87,30 +197,125 @@ namespace JSC { bool isReadOnly() const { - return m_bits & ReadOnlyFlag; + return bits() & ReadOnlyFlag; } - + + bool couldBeWatched(); + + // Notify an opportunity to create a watchpoint for a variable. This is + // idempotent and fail-silent. It is idempotent in the sense that if + // a watchpoint set had already been created, then another one will not + // be created. Hence two calls to this method have the same effect as + // one call. It is also fail-silent, in the sense that if a watchpoint + // set had been created and had already been invalidated, then this will + // just return. This means that couldBeWatched() may return false even + // immediately after a call to attemptToWatch(). + void attemptToWatch(); + + bool* addressOfIsWatched(); + + void addWatchpoint(Watchpoint*); + + WatchpointSet* watchpointSet() + { + return fatEntry()->m_watchpoints.get(); + } + + ALWAYS_INLINE void notifyWrite() + { + if (LIKELY(!isFat())) + return; + notifyWriteSlow(); + } + private: - static const unsigned ReadOnlyFlag = 0x1; - static const unsigned DontEnumFlag = 0x2; - static const unsigned NotNullFlag = 0x4; - static const unsigned FlagBits = 3; + static const intptr_t FatFlag = 0x1; + static const intptr_t ReadOnlyFlag = 0x2; + static const intptr_t DontEnumFlag = 0x4; + static const intptr_t NotNullFlag = 0x8; + static const intptr_t FlagBits = 4; + + class FatEntry { + WTF_MAKE_FAST_ALLOCATED; + public: + FatEntry(intptr_t bits) + : m_bits(bits | FatFlag) + { + } + + intptr_t m_bits; // always has FatFlag set and exactly matches what the bits would have been if this wasn't fat. + + RefPtr<WatchpointSet> m_watchpoints; + }; + + SymbolTableEntry& copySlow(const SymbolTableEntry&); + JS_EXPORT_PRIVATE void notifyWriteSlow(); + + bool isFat() const + { + return m_bits & FatFlag; + } + + const FatEntry* fatEntry() const + { + ASSERT(isFat()); + return bitwise_cast<const FatEntry*>(m_bits & ~FatFlag); + } + + FatEntry* fatEntry() + { + ASSERT(isFat()); + return bitwise_cast<FatEntry*>(m_bits & ~FatFlag); + } + + FatEntry* inflate() + { + if (LIKELY(isFat())) + return fatEntry(); + return inflateSlow(); + } + + FatEntry* inflateSlow(); + + ALWAYS_INLINE intptr_t bits() const + { + if (isFat()) + return fatEntry()->m_bits; + return m_bits; + } + + ALWAYS_INLINE intptr_t& bits() + { + if (isFat()) + return fatEntry()->m_bits; + return m_bits; + } + + void freeFatEntry() + { + if (LIKELY(!isFat())) + return; + freeFatEntrySlow(); + } + + void freeFatEntrySlow(); void pack(int index, bool readOnly, bool dontEnum) { - m_bits = (index << FlagBits) | NotNullFlag; + intptr_t& bitsRef = bits(); + bitsRef = (static_cast<intptr_t>(index) << FlagBits) | NotNullFlag; if (readOnly) - m_bits |= ReadOnlyFlag; + bitsRef |= ReadOnlyFlag; if (dontEnum) - m_bits |= DontEnumFlag; + bitsRef |= DontEnumFlag; } bool isValidIndex(int index) { - return ((index << FlagBits) >> FlagBits) == index; + return ((static_cast<intptr_t>(index) << FlagBits) >> FlagBits) == static_cast<intptr_t>(index); } - int m_bits; + intptr_t m_bits; }; struct SymbolTableIndexHashTraits : HashTraits<SymbolTableEntry> { |