diff options
Diffstat (limited to 'Lib/functools.py')
| -rw-r--r-- | Lib/functools.py | 300 | 
1 files changed, 158 insertions, 142 deletions
| diff --git a/Lib/functools.py b/Lib/functools.py index 2c299d7133..214523cbc2 100644 --- a/Lib/functools.py +++ b/Lib/functools.py @@ -23,7 +23,7 @@ from types import MappingProxyType  from weakref import WeakKeyDictionary  try:      from _thread import RLock -except: +except ImportError:      class RLock:          'Dummy reentrant lock for builds without threads'          def __enter__(self): pass @@ -94,108 +94,109 @@ def wraps(wrapped,  # infinite recursion that could occur when the operator dispatch logic  # detects a NotImplemented result and then calls a reflected method. -def _gt_from_lt(self, other): +def _gt_from_lt(self, other, NotImplemented=NotImplemented):      'Return a > b.  Computed by @total_ordering from (not a < b) and (a != b).'      op_result = self.__lt__(other)      if op_result is NotImplemented: -        return NotImplemented +        return op_result      return not op_result and self != other -def _le_from_lt(self, other): +def _le_from_lt(self, other, NotImplemented=NotImplemented):      'Return a <= b.  Computed by @total_ordering from (a < b) or (a == b).'      op_result = self.__lt__(other)      return op_result or self == other -def _ge_from_lt(self, other): +def _ge_from_lt(self, other, NotImplemented=NotImplemented):      'Return a >= b.  Computed by @total_ordering from (not a < b).'      op_result = self.__lt__(other)      if op_result is NotImplemented: -        return NotImplemented +        return op_result      return not op_result -def _ge_from_le(self, other): +def _ge_from_le(self, other, NotImplemented=NotImplemented):      'Return a >= b.  Computed by @total_ordering from (not a <= b) or (a == b).'      op_result = self.__le__(other)      if op_result is NotImplemented: -        return NotImplemented +        return op_result      return not op_result or self == other -def _lt_from_le(self, other): +def _lt_from_le(self, other, NotImplemented=NotImplemented):      'Return a < b.  Computed by @total_ordering from (a <= b) and (a != b).'      op_result = self.__le__(other)      if op_result is NotImplemented: -        return NotImplemented +        return op_result      return op_result and self != other -def _gt_from_le(self, other): +def _gt_from_le(self, other, NotImplemented=NotImplemented):      'Return a > b.  Computed by @total_ordering from (not a <= b).'      op_result = self.__le__(other)      if op_result is NotImplemented: -        return NotImplemented +        return op_result      return not op_result -def _lt_from_gt(self, other): +def _lt_from_gt(self, other, NotImplemented=NotImplemented):      'Return a < b.  Computed by @total_ordering from (not a > b) and (a != b).'      op_result = self.__gt__(other)      if op_result is NotImplemented: -        return NotImplemented +        return op_result      return not op_result and self != other -def _ge_from_gt(self, other): +def _ge_from_gt(self, other, NotImplemented=NotImplemented):      'Return a >= b.  Computed by @total_ordering from (a > b) or (a == b).'      op_result = self.__gt__(other)      return op_result or self == other -def _le_from_gt(self, other): +def _le_from_gt(self, other, NotImplemented=NotImplemented):      'Return a <= b.  Computed by @total_ordering from (not a > b).'      op_result = self.__gt__(other)      if op_result is NotImplemented: -        return NotImplemented +        return op_result      return not op_result -def _le_from_ge(self, other): +def _le_from_ge(self, other, NotImplemented=NotImplemented):      'Return a <= b.  Computed by @total_ordering from (not a >= b) or (a == b).'      op_result = self.__ge__(other)      if op_result is NotImplemented: -        return NotImplemented +        return op_result      return not op_result or self == other -def _gt_from_ge(self, other): +def _gt_from_ge(self, other, NotImplemented=NotImplemented):      'Return a > b.  Computed by @total_ordering from (a >= b) and (a != b).'      op_result = self.__ge__(other)      if op_result is NotImplemented: -        return NotImplemented +        return op_result      return op_result and self != other -def _lt_from_ge(self, other): +def _lt_from_ge(self, other, NotImplemented=NotImplemented):      'Return a < b.  Computed by @total_ordering from (not a >= b).'      op_result = self.__ge__(other)      if op_result is NotImplemented: -        return NotImplemented +        return op_result      return not op_result +_convert = { +    '__lt__': [('__gt__', _gt_from_lt), +               ('__le__', _le_from_lt), +               ('__ge__', _ge_from_lt)], +    '__le__': [('__ge__', _ge_from_le), +               ('__lt__', _lt_from_le), +               ('__gt__', _gt_from_le)], +    '__gt__': [('__lt__', _lt_from_gt), +               ('__ge__', _ge_from_gt), +               ('__le__', _le_from_gt)], +    '__ge__': [('__le__', _le_from_ge), +               ('__gt__', _gt_from_ge), +               ('__lt__', _lt_from_ge)] +} +  def total_ordering(cls):      """Class decorator that fills in missing ordering methods""" -    convert = { -        '__lt__': [('__gt__', _gt_from_lt), -                   ('__le__', _le_from_lt), -                   ('__ge__', _ge_from_lt)], -        '__le__': [('__ge__', _ge_from_le), -                   ('__lt__', _lt_from_le), -                   ('__gt__', _gt_from_le)], -        '__gt__': [('__lt__', _lt_from_gt), -                   ('__ge__', _ge_from_gt), -                   ('__le__', _le_from_gt)], -        '__ge__': [('__le__', _le_from_ge), -                   ('__gt__', _gt_from_ge), -                   ('__lt__', _lt_from_ge)] -    }      # Find user-defined comparisons (not those inherited from object). -    roots = [op for op in convert if getattr(cls, op, None) is not getattr(object, op, None)] +    roots = [op for op in _convert if getattr(cls, op, None) is not getattr(object, op, None)]      if not roots:          raise ValueError('must define at least one ordering operation: < > <= >=')      root = max(roots)       # prefer __lt__ to __le__ to __gt__ to __ge__ -    for opname, opfunc in convert[root]: +    for opname, opfunc in _convert[root]:          if opname not in roots:              opfunc.__name__ = opname              setattr(cls, opname, opfunc) @@ -222,8 +223,6 @@ def cmp_to_key(mycmp):              return mycmp(self.obj, other.obj) <= 0          def __ge__(self, other):              return mycmp(self.obj, other.obj) >= 0 -        def __ne__(self, other): -            return mycmp(self.obj, other.obj) != 0          __hash__ = None      return K @@ -242,6 +241,14 @@ def partial(func, *args, **keywords):      """New function with partial application of the given arguments      and keywords.      """ +    if hasattr(func, 'func'): +        args = func.args + args +        tmpkw = func.keywords.copy() +        tmpkw.update(keywords) +        keywords = tmpkw +        del tmpkw +        func = func.func +      def newfunc(*fargs, **fkeywords):          newkeywords = keywords.copy()          newkeywords.update(fkeywords) @@ -291,7 +298,7 @@ class partialmethod(object):                                   for k, v in self.keywords.items())          format_string = "{module}.{cls}({func}, {args}, {keywords})"          return format_string.format(module=self.__class__.__module__, -                                    cls=self.__class__.__name__, +                                    cls=self.__class__.__qualname__,                                      func=self.func,                                      args=args,                                      keywords=keywords) @@ -412,120 +419,129 @@ def lru_cache(maxsize=128, typed=False):      if maxsize is not None and not isinstance(maxsize, int):          raise TypeError('Expected maxsize to be an integer or None') +    def decorating_function(user_function): +        wrapper = _lru_cache_wrapper(user_function, maxsize, typed, _CacheInfo) +        return update_wrapper(wrapper, user_function) + +    return decorating_function + +def _lru_cache_wrapper(user_function, maxsize, typed, _CacheInfo):      # Constants shared by all lru cache instances:      sentinel = object()          # unique object used to signal cache misses      make_key = _make_key         # build a key from the function arguments      PREV, NEXT, KEY, RESULT = 0, 1, 2, 3   # names for the link fields -    def decorating_function(user_function): -        cache = {} -        hits = misses = 0 -        full = False -        cache_get = cache.get    # bound method to lookup a key or return None -        lock = RLock()           # because linkedlist updates aren't threadsafe -        root = []                # root of the circular doubly linked list -        root[:] = [root, root, None, None]     # initialize by pointing to self - -        if maxsize == 0: - -            def wrapper(*args, **kwds): -                # No caching -- just a statistics update after a successful call -                nonlocal misses -                result = user_function(*args, **kwds) -                misses += 1 -                return result +    cache = {} +    hits = misses = 0 +    full = False +    cache_get = cache.get    # bound method to lookup a key or return None +    lock = RLock()           # because linkedlist updates aren't threadsafe +    root = []                # root of the circular doubly linked list +    root[:] = [root, root, None, None]     # initialize by pointing to self + +    if maxsize == 0: + +        def wrapper(*args, **kwds): +            # No caching -- just a statistics update after a successful call +            nonlocal misses +            result = user_function(*args, **kwds) +            misses += 1 +            return result -        elif maxsize is None: +    elif maxsize is None: -            def wrapper(*args, **kwds): -                # Simple caching without ordering or size limit -                nonlocal hits, misses -                key = make_key(args, kwds, typed) -                result = cache_get(key, sentinel) -                if result is not sentinel: -                    hits += 1 -                    return result -                result = user_function(*args, **kwds) -                cache[key] = result -                misses += 1 +        def wrapper(*args, **kwds): +            # Simple caching without ordering or size limit +            nonlocal hits, misses +            key = make_key(args, kwds, typed) +            result = cache_get(key, sentinel) +            if result is not sentinel: +                hits += 1                  return result +            result = user_function(*args, **kwds) +            cache[key] = result +            misses += 1 +            return result -        else: - -            def wrapper(*args, **kwds): -                # Size limited caching that tracks accesses by recency -                nonlocal root, hits, misses, full -                key = make_key(args, kwds, typed) -                with lock: -                    link = cache_get(key) -                    if link is not None: -                        # Move the link to the front of the circular queue -                        link_prev, link_next, _key, result = link -                        link_prev[NEXT] = link_next -                        link_next[PREV] = link_prev -                        last = root[PREV] -                        last[NEXT] = root[PREV] = link -                        link[PREV] = last -                        link[NEXT] = root -                        hits += 1 -                        return result -                result = user_function(*args, **kwds) -                with lock: -                    if key in cache: -                        # Getting here means that this same key was added to the -                        # cache while the lock was released.  Since the link -                        # update is already done, we need only return the -                        # computed result and update the count of misses. -                        pass -                    elif full: -                        # Use the old root to store the new key and result. -                        oldroot = root -                        oldroot[KEY] = key -                        oldroot[RESULT] = result -                        # Empty the oldest link and make it the new root. -                        # Keep a reference to the old key and old result to -                        # prevent their ref counts from going to zero during the -                        # update. That will prevent potentially arbitrary object -                        # clean-up code (i.e. __del__) from running while we're -                        # still adjusting the links. -                        root = oldroot[NEXT] -                        oldkey = root[KEY] -                        oldresult = root[RESULT] -                        root[KEY] = root[RESULT] = None -                        # Now update the cache dictionary. -                        del cache[oldkey] -                        # Save the potentially reentrant cache[key] assignment -                        # for last, after the root and links have been put in -                        # a consistent state. -                        cache[key] = oldroot -                    else: -                        # Put result in a new link at the front of the queue. -                        last = root[PREV] -                        link = [last, root, key, result] -                        last[NEXT] = root[PREV] = cache[key] = link -                        full = (len(cache) >= maxsize) -                    misses += 1 -                return result +    else: -        def cache_info(): -            """Report cache statistics""" +        def wrapper(*args, **kwds): +            # Size limited caching that tracks accesses by recency +            nonlocal root, hits, misses, full +            key = make_key(args, kwds, typed)              with lock: -                return _CacheInfo(hits, misses, maxsize, len(cache)) - -        def cache_clear(): -            """Clear the cache and cache statistics""" -            nonlocal hits, misses, full +                link = cache_get(key) +                if link is not None: +                    # Move the link to the front of the circular queue +                    link_prev, link_next, _key, result = link +                    link_prev[NEXT] = link_next +                    link_next[PREV] = link_prev +                    last = root[PREV] +                    last[NEXT] = root[PREV] = link +                    link[PREV] = last +                    link[NEXT] = root +                    hits += 1 +                    return result +            result = user_function(*args, **kwds)              with lock: -                cache.clear() -                root[:] = [root, root, None, None] -                hits = misses = 0 -                full = False +                if key in cache: +                    # Getting here means that this same key was added to the +                    # cache while the lock was released.  Since the link +                    # update is already done, we need only return the +                    # computed result and update the count of misses. +                    pass +                elif full: +                    # Use the old root to store the new key and result. +                    oldroot = root +                    oldroot[KEY] = key +                    oldroot[RESULT] = result +                    # Empty the oldest link and make it the new root. +                    # Keep a reference to the old key and old result to +                    # prevent their ref counts from going to zero during the +                    # update. That will prevent potentially arbitrary object +                    # clean-up code (i.e. __del__) from running while we're +                    # still adjusting the links. +                    root = oldroot[NEXT] +                    oldkey = root[KEY] +                    oldresult = root[RESULT] +                    root[KEY] = root[RESULT] = None +                    # Now update the cache dictionary. +                    del cache[oldkey] +                    # Save the potentially reentrant cache[key] assignment +                    # for last, after the root and links have been put in +                    # a consistent state. +                    cache[key] = oldroot +                else: +                    # Put result in a new link at the front of the queue. +                    last = root[PREV] +                    link = [last, root, key, result] +                    last[NEXT] = root[PREV] = cache[key] = link +                    full = (len(cache) >= maxsize) +                misses += 1 +            return result -        wrapper.cache_info = cache_info -        wrapper.cache_clear = cache_clear -        return update_wrapper(wrapper, user_function) +    def cache_info(): +        """Report cache statistics""" +        with lock: +            return _CacheInfo(hits, misses, maxsize, len(cache)) + +    def cache_clear(): +        """Clear the cache and cache statistics""" +        nonlocal hits, misses, full +        with lock: +            cache.clear() +            root[:] = [root, root, None, None] +            hits = misses = 0 +            full = False + +    wrapper.cache_info = cache_info +    wrapper.cache_clear = cache_clear +    return wrapper -    return decorating_function +try: +    from _functools import _lru_cache_wrapper +except ImportError: +    pass  ################################################################################ | 
