diff options
Diffstat (limited to 'sphinx/util/__init__.py')
-rw-r--r-- | sphinx/util/__init__.py | 42 |
1 files changed, 31 insertions, 11 deletions
diff --git a/sphinx/util/__init__.py b/sphinx/util/__init__.py index 7ac5c62f7..f24ffb681 100644 --- a/sphinx/util/__init__.py +++ b/sphinx/util/__init__.py @@ -42,19 +42,25 @@ from sphinx.util.nodes import ( # noqa caption_ref_re) from sphinx.util.matching import patfilter # noqa +if False: + # For type annotation + from typing import Any, Callable, Iterable, Pattern, Sequence, Tuple # NOQA + # Generally useful regular expressions. -ws_re = re.compile(r'\s+') -url_re = re.compile(r'(?P<schema>.+)://.*') +ws_re = re.compile(r'\s+') # type: Pattern +url_re = re.compile(r'(?P<schema>.+)://.*') # type: Pattern # High-level utility functions. def docname_join(basedocname, docname): + # type: (unicode, unicode) -> unicode return posixpath.normpath( posixpath.join('/' + basedocname, '..', docname))[1:] def path_stabilize(filepath): + # type: (unicode) -> unicode "normalize path separater and unicode string" newpath = filepath.replace(os.path.sep, SEP) if isinstance(newpath, text_type): @@ -63,6 +69,7 @@ def path_stabilize(filepath): def get_matching_files(dirname, exclude_matchers=()): + # type: (unicode, Tuple[Callable[[unicode], bool], ...]) -> Iterable[unicode] """Get all file names in a directory, recursively. Exclude files and dirs matching some matcher in *exclude_matchers*. @@ -75,9 +82,9 @@ def get_matching_files(dirname, exclude_matchers=()): relativeroot = root[dirlen:] qdirs = enumerate(path_stabilize(path.join(relativeroot, dn)) - for dn in dirs) + for dn in dirs) # type: Iterable[Tuple[int, unicode]] qfiles = enumerate(path_stabilize(path.join(relativeroot, fn)) - for fn in files) + for fn in files) # type: Iterable[Tuple[int, unicode]] for matcher in exclude_matchers: qdirs = [entry for entry in qdirs if not matcher(entry[1])] qfiles = [entry for entry in qfiles if not matcher(entry[1])] @@ -89,6 +96,7 @@ def get_matching_files(dirname, exclude_matchers=()): def get_matching_docs(dirname, suffixes, exclude_matchers=()): + # type: (unicode, List[unicode], Tuple[Callable[[unicode], bool], ...]) -> Iterable[unicode] # NOQA """Get all file names (without suffixes) matching a suffix in a directory, recursively. @@ -97,7 +105,7 @@ def get_matching_docs(dirname, suffixes, exclude_matchers=()): suffixpatterns = ['*' + s for s in suffixes] for filename in get_matching_files(dirname, exclude_matchers): for suffixpattern in suffixpatterns: - if fnmatch.fnmatch(filename, suffixpattern): + if fnmatch.fnmatch(filename, suffixpattern): # type: ignore yield filename[:-len(suffixpattern)+1] break @@ -109,9 +117,10 @@ class FilenameUniqDict(dict): appear in. Used for images and downloadable files in the environment. """ def __init__(self): - self._existing = set() + self._existing = set() # type: Set[unicode] def add_file(self, docname, newfile): + # type: (unicode, unicode) -> unicode if newfile in self: self[newfile][0].add(docname) return self[newfile][1] @@ -126,6 +135,7 @@ class FilenameUniqDict(dict): return uniquename def purge_doc(self, docname): + # type: (unicode) -> None for filename, (docs, unique) in list(self.items()): docs.discard(docname) if not docs: @@ -133,6 +143,7 @@ class FilenameUniqDict(dict): self._existing.discard(unique) def merge_other(self, docnames, other): + # type: (List[unicode], Dict[unicode, Tuple[Set[unicode], Any]]) -> None for filename, (docs, unique) in other.items(): for doc in docs & docnames: self.add_file(doc, filename) @@ -146,6 +157,7 @@ class FilenameUniqDict(dict): def copy_static_entry(source, targetdir, builder, context={}, exclude_matchers=(), level=0): + # type: (unicode, unicode, Any, Dict, Tuple[Callable, ...], int) -> None """[DEPRECATED] Copy a HTML builder static_path entry from source to targetdir. Handles all possible cases of files, directories and subdirectories. @@ -183,6 +195,7 @@ _DEBUG_HEADER = '''\ def save_traceback(app): + # type: (Any) -> unicode """Save the current exception's traceback in a temporary file.""" import sphinx import jinja2 @@ -190,7 +203,7 @@ def save_traceback(app): import platform exc = sys.exc_info()[1] if isinstance(exc, SphinxParallelError): - exc_format = '(Error in parallel process)\n' + exc.traceback + exc_format = '(Error in parallel process)\n' + exc.traceback # type: ignore else: exc_format = traceback.format_exc() fd, path = tempfile.mkstemp('.log', 'sphinx-err-') @@ -220,6 +233,7 @@ def save_traceback(app): def get_module_source(modname): + # type: (str) -> Tuple[unicode, unicode] """Try to find the source code for a module. Can return ('file', 'filename') in which case the source is in the given @@ -259,6 +273,7 @@ def get_module_source(modname): def get_full_modname(modname, attribute): + # type: (str, unicode) -> unicode __import__(modname) module = sys.modules[modname] @@ -277,6 +292,7 @@ _coding_re = re.compile(r'coding[:=]\s*([-\w.]+)') def detect_encoding(readline): + # type: (Callable) -> unicode """Like tokenize.detect_encoding() from Py3k, but a bit simplified.""" def read_or_stop(): @@ -433,10 +449,11 @@ def split_index_msg(type, value): def format_exception_cut_frames(x=1): + # type: (int) -> unicode """Format an exception with traceback, but only the last x frames.""" typ, val, tb = sys.exc_info() # res = ['Traceback (most recent call last):\n'] - res = [] + res = [] # type: List[unicode] tbres = traceback.format_tb(tb) res += tbres[-x:] res += traceback.format_exception_only(typ, val) @@ -449,7 +466,7 @@ class PeekableIterator(object): what's the next item. """ def __init__(self, iterable): - self.remaining = deque() + self.remaining = deque() # type: deque self._iterator = iter(iterable) def __iter__(self): @@ -477,6 +494,7 @@ class PeekableIterator(object): def import_object(objname, source=None): + # type: (str, unicode) -> Any try: module, name = objname.rsplit('.', 1) except ValueError as err: @@ -496,7 +514,8 @@ def import_object(objname, source=None): def encode_uri(uri): - split = list(urlsplit(uri)) + # type: (unicode) -> unicode + split = list(urlsplit(uri)) # type: Any split[1] = split[1].encode('idna').decode('ascii') split[2] = quote_plus(split[2].encode('utf-8'), '/').decode('ascii') query = list((q, quote_plus(v.encode('utf-8'))) @@ -506,8 +525,9 @@ def encode_uri(uri): def split_docinfo(text): + # type: (unicode) -> Sequence[unicode] docinfo_re = re.compile('\A((?:\s*:\w+:.*?\n)+)', re.M) - result = docinfo_re.split(text, 1) + result = docinfo_re.split(text, 1) # type: ignore if len(result) == 1: return '', result[0] else: |