summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.travis.yml2
-rw-r--r--Makefile7
-rw-r--r--test-reqs.txt4
-rw-r--r--tests/conftest.py231
-rwxr-xr-xtests/coverage.py1158
-rwxr-xr-xtests/path.py2
-rw-r--r--tests/py35/test_autodoc_py35.py (renamed from tests/test_autodoc_py35.py)5
-rw-r--r--tests/roots/test-add_enumerable_node/conf.py2
-rw-r--r--tests/roots/test-add_enumerable_node/enumerable_node.py (renamed from tests/roots/test-add_enumerable_node/test_enumerable_node.py)0
-rw-r--r--tests/roots/test-add_source_parser-conflicts-with-users-setting/conf.py2
-rw-r--r--tests/roots/test-add_source_parser-conflicts-with-users-setting/source_parser.py (renamed from tests/roots/test-add_source_parser-conflicts-with-users-setting/test_source_parser.py)0
-rw-r--r--tests/roots/test-add_source_parser/conf.py2
-rw-r--r--tests/roots/test-add_source_parser/source_parser.py (renamed from tests/roots/test-add_source_parser/test_source_parser.py)0
-rwxr-xr-xtests/run.py16
-rw-r--r--tests/test_apidoc.py192
-rw-r--r--tests/test_autodoc.py25
-rw-r--r--tests/test_build.py8
-rw-r--r--tests/test_build_gettext.py12
-rw-r--r--tests/test_build_html.py12
-rw-r--r--tests/test_build_latex.py15
-rw-r--r--tests/test_catalogs.py11
-rw-r--r--tests/test_config.py26
-rw-r--r--tests/test_environment.py2
-rw-r--r--tests/test_ext_graphviz.py33
-rw-r--r--tests/test_ext_inheritance_diagram.py6
-rw-r--r--tests/test_ext_intersphinx.py2
-rw-r--r--tests/test_intl.py3
-rw-r--r--tests/test_metadata.py8
-rw-r--r--tests/test_setup_command.py99
-rw-r--r--tests/test_theming.py11
-rw-r--r--tests/test_util_fileutil.py38
-rw-r--r--tests/test_util_i18n.py126
-rw-r--r--tests/test_websupport.py182
-rw-r--r--tests/util.py285
34 files changed, 792 insertions, 1735 deletions
diff --git a/.travis.yml b/.travis.yml
index 09b89a7ae..0217e5dad 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -12,7 +12,7 @@ python:
- "pypy"
env:
global:
- - TEST='-v --with-timer --timer-top-n 25'
+ - TEST='-v --durations 25'
- PYTHONFAULTHANDLER=x
- PYTHONWARNINGS=all
matrix:
diff --git a/Makefile b/Makefile
index 652ade4f4..887f3e2a7 100644
--- a/Makefile
+++ b/Makefile
@@ -72,14 +72,13 @@ reindent:
@$(PYTHON) utils/reindent.py -r -n .
test:
- @cd tests; $(PYTHON) run.py -I py35 -d -m '^[tT]est' $(TEST)
+ @cd tests; $(PYTHON) run.py --ignore py35 -v $(TEST)
test-async:
- @cd tests; $(PYTHON) run.py -d -m '^[tT]est' $(TEST)
+ @cd tests; $(PYTHON) run.py -v $(TEST)
covertest:
- @cd tests; $(PYTHON) run.py -d -m '^[tT]est' --with-coverage \
- --cover-package=sphinx $(TEST)
+ @cd tests; $(PYTHON) run.py -v --cov=sphinx --junitxml=.junit.xml $(TEST)
build:
@$(PYTHON) setup.py build
diff --git a/test-reqs.txt b/test-reqs.txt
index b53adbfe5..1877886c1 100644
--- a/test-reqs.txt
+++ b/test-reqs.txt
@@ -1,6 +1,6 @@
flake8
-nose
-nose-timer
+pytest>=3.0
+pytest-cov
mock
six>=1.4
Jinja2>=2.3
diff --git a/tests/conftest.py b/tests/conftest.py
new file mode 100644
index 000000000..9a021169e
--- /dev/null
+++ b/tests/conftest.py
@@ -0,0 +1,231 @@
+# -*- coding: utf-8 -*-
+import sys
+import subprocess
+
+import pytest
+from six import StringIO, string_types
+
+from util import SphinxTestApp, path
+
+
+@pytest.fixture
+def app_params(request):
+ """
+ parameters that is specified by 'pytest.mark.sphinx' for
+ sphinx.application.Sphinx initialization
+ """
+ markers = request.node.get_marker("sphinx")
+ pargs = {}
+ kwargs = {}
+
+ if markers is not None:
+ # to avoid stacking positional args
+ for info in reversed(list(markers)):
+ for i, a in enumerate(info.args):
+ pargs[i] = a
+ kwargs.update(info.kwargs)
+
+ args = [pargs[i] for i in sorted(pargs.keys())]
+ return args, kwargs
+
+
+@pytest.fixture
+def test_params(request):
+ """
+ test parameters that is specified by 'pytest.mark.testenv'
+
+ :param bool build:
+ If True, 'app' fixture will be build before test function is called.
+ Default is False.
+ :param Union[str, bool, None] specific_srcdir:
+ If True, testroot directory will be copied into
+ '<TMPDIR>/<TEST FUNCTION NAME>'.
+ If string is specified, it copied into '<TMPDIR>/<THE STRING>'.
+ You can used this feature for providing special crafted source
+ directory. Also you can used for sharing source directory for
+ parametrized testing and/or inter test functions. Default is None.
+ :param Union[str, bool, None] shared_result:
+ If True, app._status and app._warning objects will be shared in the
+ parametrized test functions. If string is specified, the objects will
+ be shred in the test functions that have same 'shared_result' value.
+ If you don't specify specific_srcdir, this option override
+ specific_srcdir param by 'shared_result' value. Default is None.
+ """
+ env = request.node.get_marker('testenv')
+ kwargs = env.kwargs if env else {}
+ result = {
+ 'build': False, # pre build in fixture
+ 'specific_srcdir': None,
+ 'shared_result': None,
+ }
+ result.update(kwargs)
+
+ if (result['shared_result'] and
+ not isinstance(result['shared_result'], string_types)):
+ r = result['shared_result'] = request.node.originalname or request.node.name
+
+ if result['shared_result'] and not result['specific_srcdir']:
+ result['specific_srcdir'] = result['shared_result']
+
+ if (result['specific_srcdir'] and
+ not isinstance(result['specific_srcdir'], string_types)):
+ result['specific_srcdir'] = request.node.originalname or request.node.name
+
+ return result
+
+
+@pytest.fixture(scope='function')
+def app(test_params, app_params, make_app, shared_result):
+ """
+ provides sphinx.application.Sphinx object
+ """
+ args, kwargs = app_params
+ if test_params['specific_srcdir'] and 'srcdir' not in kwargs:
+ kwargs['srcdir'] = test_params['specific_srcdir']
+
+ if test_params['shared_result']:
+ restore = shared_result.restore(test_params['shared_result'])
+ kwargs.update(restore)
+
+ app_ = make_app(*args, **kwargs)
+
+ if test_params['build']:
+ # if listdir is not empty, we can use built cache
+ if not app_.outdir.listdir():
+ app_.build()
+ yield app_
+
+ if test_params['shared_result']:
+ shared_result.store(test_params['shared_result'], app_)
+
+
+@pytest.fixture(scope='function')
+def status(app):
+ """
+ compat for testing with previous @with_app decorator
+ """
+ return app._status
+
+
+@pytest.fixture(scope='function')
+def warning(app):
+ """
+ compat for testing with previous @with_app decorator
+ """
+ return app._warning
+
+
+@pytest.fixture()
+def make_app():
+ """
+ provides make_app function to initialize SphinxTestApp instance.
+ if you want to initialize 'app' in your test function. please use this
+ instead of using SphinxTestApp class directory.
+ """
+ apps = []
+ syspath = sys.path[:]
+
+ def make(*args, **kwargs):
+ status, warning = StringIO(), StringIO()
+ kwargs.setdefault('status', status)
+ kwargs.setdefault('warning', warning)
+ app_ = SphinxTestApp(*args, **kwargs)
+ apps.append(app_)
+ return app_
+ yield make
+
+ sys.path[:] = syspath
+ for app_ in apps:
+ app_.cleanup()
+
+
+class SharedResult(object):
+ cache = {}
+
+ def store(self, key, app_):
+ if key in self.cache:
+ return
+ data = {
+ 'status': app_._status.getvalue(),
+ 'warning': app_._warning.getvalue(),
+ }
+ self.cache[key] = data
+
+ def restore(self, key):
+ if key not in self.cache:
+ return {}
+ data = self.cache[key]
+ return {
+ 'status': StringIO(data['status']),
+ 'warning': StringIO(data['warning']),
+ }
+
+
+@pytest.fixture
+def shared_result():
+ return SharedResult()
+
+
+@pytest.fixture(scope='module', autouse=True)
+def _shared_result_cache():
+ SharedResult.cache.clear()
+
+
+@pytest.fixture
+def if_graphviz_found(app):
+ """
+ The test will be skipped when using 'if_graphviz_found' fixture and graphviz
+ dot command is not found.
+ """
+ graphviz_dot = getattr(app.config, 'graphviz_dot', '')
+ try:
+ if graphviz_dot:
+ dot = subprocess.Popen([graphviz_dot, '-V'],
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE) # show version
+ dot.communicate()
+ return
+ except OSError: # No such file or directory
+ pass
+
+ pytest.skip('graphviz "dot" is not available')
+
+
+@pytest.fixture
+def tempdir(tmpdir):
+ """
+ temporary directory that wrapped with `path` class.
+ this fixture is for compat with old test implementation.
+ """
+ return path(tmpdir)
+
+
+def pytest_addoption(parser):
+ """
+ the test that have pytest.mark.env('foobar') will be skipped when
+ '-S foobar' command-line option is provided.
+ """
+ parser.addoption("-S", action="store", metavar="NAME",
+ help="skip tests matching the environment NAME.")
+
+
+def pytest_configure(config):
+ """
+ the test that have pytest.mark.env('foobar') will be skipped when
+ '-S foobar' command-line option is provided.
+ """
+ # register an additional marker
+ config.addinivalue_line("markers",
+ "env(name): mark test to run only on named environment")
+
+
+def pytest_runtest_setup(item):
+ """
+ the test that have pytest.mark.env('foobar') will be skipped when
+ '-S foobar' command-line option is provided.
+ """
+ envmarker = item.get_marker("env")
+ if envmarker is not None:
+ envname = envmarker.args[0]
+ if envname == item.config.getoption("-S"):
+ pytest.skip("skip test %r" % envname)
diff --git a/tests/coverage.py b/tests/coverage.py
deleted file mode 100755
index cd36e218a..000000000
--- a/tests/coverage.py
+++ /dev/null
@@ -1,1158 +0,0 @@
-#!/usr/bin/python
-#
-# Perforce Defect Tracking Integration Project
-# <http://www.ravenbrook.com/project/p4dti/>
-#
-# COVERAGE.PY -- COVERAGE TESTING
-#
-# Gareth Rees, Ravenbrook Limited, 2001-12-04
-# Ned Batchelder, 2004-12-12
-# http://nedbatchelder.com/code/modules/coverage.html
-#
-#
-# 1. INTRODUCTION
-#
-# This module provides coverage testing for Python code.
-#
-# The intended readership is all Python developers.
-#
-# This document is not confidential.
-#
-# See [GDR 2001-12-04a] for the command-line interface, programmatic
-# interface and limitations. See [GDR 2001-12-04b] for requirements and
-# design.
-
-r"""Usage:
-
-coverage.py -x [-p] MODULE.py [ARG1 ARG2 ...]
- Execute module, passing the given command-line arguments, collecting
- coverage data. With the -p option, write to a temporary file containing
- the machine name and process ID.
-
-coverage.py -e
- Erase collected coverage data.
-
-coverage.py -c
- Collect data from multiple coverage files (as created by -p option above)
- and store it into a single file representing the union of the coverage.
-
-coverage.py -r [-m] [-o dir1,dir2,...] FILE1 FILE2 ...
- Report on the statement coverage for the given files. With the -m
- option, show line numbers of the statements that weren't executed.
-
-coverage.py -a [-d dir] [-o dir1,dir2,...] FILE1 FILE2 ...
- Make annotated copies of the given files, marking statements that
- are executed with > and statements that are missed with !. With
- the -d option, make the copies in that directory. Without the -d
- option, make each copy in the same directory as the original.
-
--o dir,dir2,...
- Omit reporting or annotating files when their filename path starts with
- a directory listed in the omit list.
- e.g. python coverage.py -i -r -o c:\python23,lib\enthought\traits
-
-Coverage data is saved in the file .coverage by default. Set the
-COVERAGE_FILE environment variable to save it somewhere else.
-"""
-from __future__ import print_function
-
-__version__ = "2.85.20080914" # see detailed history at the end of this file.
-
-import compiler
-import compiler.visitor
-import glob
-import os
-import re
-import string
-import symbol
-import sys
-import atexit
-import threading
-import token
-import zipimport
-from socket import gethostname
-
-from six import string_types
-
-
-# 2. IMPLEMENTATION
-#
-# This uses the "singleton" pattern.
-#
-# The word "morf" means a module object (from which the source file can
-# be deduced by suitable manipulation of the __file__ attribute) or a
-# filename.
-#
-# When we generate a coverage report we have to canonicalize every
-# filename in the coverage dictionary just in case it refers to the
-# module we are reporting on. It seems a shame to throw away this
-# information so the data in the coverage dictionary is transferred to
-# the 'cexecuted' dictionary under the canonical filenames.
-#
-# The coverage dictionary is called "c" and the trace function "t". The
-# reason for these short names is that Python looks up variables by name
-# at runtime and so execution time depends on the length of variables!
-# In the bottleneck of this application it's appropriate to abbreviate
-# names to increase speed.
-
-class StatementFindingAstVisitor(compiler.visitor.ASTVisitor):
- """ A visitor for a parsed Abstract Syntax Tree which finds executable
- statements.
- """
- def __init__(self, statements, excluded, suite_spots):
- compiler.visitor.ASTVisitor.__init__(self)
- self.statements = statements
- self.excluded = excluded
- self.suite_spots = suite_spots
- self.excluding_suite = 0
-
- def doRecursive(self, node):
- for n in node.getChildNodes():
- self.dispatch(n)
-
- visitStmt = visitModule = doRecursive
-
- def doCode(self, node):
- if hasattr(node, 'decorators') and node.decorators:
- self.dispatch(node.decorators)
- self.recordAndDispatch(node.code)
- else:
- self.doSuite(node, node.code)
-
- visitFunction = visitClass = doCode
-
- def getFirstLine(self, node):
- # Find the first line in the tree node.
- lineno = node.lineno
- for n in node.getChildNodes():
- f = self.getFirstLine(n)
- if lineno and f:
- lineno = min(lineno, f)
- else:
- lineno = lineno or f
- return lineno
-
- def getLastLine(self, node):
- # Find the first line in the tree node.
- lineno = node.lineno
- for n in node.getChildNodes():
- lineno = max(lineno, self.getLastLine(n))
- return lineno
-
- def doStatement(self, node):
- self.recordLine(self.getFirstLine(node))
-
- visitAssert = visitAssign = visitAssTuple = visitPrint = \
- visitPrintnl = visitRaise = visitSubscript = visitDecorators = \
- doStatement
-
- def visitPass(self, node):
- # Pass statements have weird interactions with docstrings. If this
- # pass statement is part of one of those pairs, claim that the statement
- # is on the later of the two lines.
- l = node.lineno
- if l:
- lines = self.suite_spots.get(l, [l,l])
- self.statements[lines[1]] = 1
-
- def visitDiscard(self, node):
- # Discard nodes are statements that execute an expression, but then
- # discard the results. This includes function calls, so we can't
- # ignore them all. But if the expression is a constant, the statement
- # won't be "executed", so don't count it now.
- if node.expr.__class__.__name__ != 'Const':
- self.doStatement(node)
-
- def recordNodeLine(self, node):
- # Stmt nodes often have None, but shouldn't claim the first line of
- # their children (because the first child might be an ignorable line
- # like "global a").
- if node.__class__.__name__ != 'Stmt':
- return self.recordLine(self.getFirstLine(node))
- else:
- return 0
-
- def recordLine(self, lineno):
- # Returns a bool, whether the line is included or excluded.
- if lineno:
- # Multi-line tests introducing suites have to get charged to their
- # keyword.
- if lineno in self.suite_spots:
- lineno = self.suite_spots[lineno][0]
- # If we're inside an excluded suite, record that this line was
- # excluded.
- if self.excluding_suite:
- self.excluded[lineno] = 1
- return 0
- # If this line is excluded, or suite_spots maps this line to
- # another line that is exlcuded, then we're excluded.
- elif lineno in self.excluded or \
- lineno in self.suite_spots and \
- self.suite_spots[lineno][1] in self.excluded:
- return 0
- # Otherwise, this is an executable line.
- else:
- self.statements[lineno] = 1
- return 1
- return 0
-
- default = recordNodeLine
-
- def recordAndDispatch(self, node):
- self.recordNodeLine(node)
- self.dispatch(node)
-
- def doSuite(self, intro, body, exclude=0):
- exsuite = self.excluding_suite
- if exclude or (intro and not self.recordNodeLine(intro)):
- self.excluding_suite = 1
- self.recordAndDispatch(body)
- self.excluding_suite = exsuite
-
- def doPlainWordSuite(self, prevsuite, suite):
- # Finding the exclude lines for else's is tricky, because they aren't
- # present in the compiler parse tree. Look at the previous suite,
- # and find its last line. If any line between there and the else's
- # first line are excluded, then we exclude the else.
- lastprev = self.getLastLine(prevsuite)
- firstelse = self.getFirstLine(suite)
- for l in range(lastprev+1, firstelse):
- if l in self.suite_spots:
- self.doSuite(None, suite, exclude=l in self.excluded)
- break
- else:
- self.doSuite(None, suite)
-
- def doElse(self, prevsuite, node):
- if node.else_:
- self.doPlainWordSuite(prevsuite, node.else_)
-
- def visitFor(self, node):
- self.doSuite(node, node.body)
- self.doElse(node.body, node)
-
- visitWhile = visitFor
-
- def visitIf(self, node):
- # The first test has to be handled separately from the rest.
- # The first test is credited to the line with the "if", but the others
- # are credited to the line with the test for the elif.
- self.doSuite(node, node.tests[0][1])
- for t, n in node.tests[1:]:
- self.doSuite(t, n)
- self.doElse(node.tests[-1][1], node)
-
- def visitTryExcept(self, node):
- self.doSuite(node, node.body)
- for i in range(len(node.handlers)):
- a, b, h = node.handlers[i]
- if not a:
- # It's a plain "except:". Find the previous suite.
- if i > 0:
- prev = node.handlers[i-1][2]
- else:
- prev = node.body
- self.doPlainWordSuite(prev, h)
- else:
- self.doSuite(a, h)
- self.doElse(node.handlers[-1][2], node)
-
- def visitTryFinally(self, node):
- self.doSuite(node, node.body)
- self.doPlainWordSuite(node.body, node.final)
-
- def visitWith(self, node):
- self.doSuite(node, node.body)
-
- def visitGlobal(self, node):
- # "global" statements don't execute like others (they don't call the
- # trace function), so don't record their line numbers.
- pass
-
-the_coverage = None
-
-class CoverageException(Exception):
- pass
-
-class coverage:
- # Name of the cache file (unless environment variable is set).
- cache_default = ".coverage"
-
- # Environment variable naming the cache file.
- cache_env = "COVERAGE_FILE"
-
- # A dictionary with an entry for (Python source file name, line number
- # in that file) if that line has been executed.
- c = {}
-
- # A map from canonical Python source file name to a dictionary in
- # which there's an entry for each line number that has been
- # executed.
- cexecuted = {}
-
- # Cache of results of calling the analysis2() method, so that you can
- # specify both -r and -a without doing double work.
- analysis_cache = {}
-
- # Cache of results of calling the canonical_filename() method, to
- # avoid duplicating work.
- canonical_filename_cache = {}
-
- def __init__(self):
- global the_coverage
- if the_coverage:
- raise CoverageException("Only one coverage object allowed.")
- self.usecache = 1
- self.cache = None
- self.parallel_mode = False
- self.exclude_re = ''
- self.nesting = 0
- self.cstack = []
- self.xstack = []
- self.relative_dir = self.abs_file(os.curdir)+os.sep
- self.exclude('# *pragma[: ]*[nN][oO] *[cC][oO][vV][eE][rR]')
-
- # t(f, x, y). This method is passed to sys.settrace as a trace function.
- # See [van Rossum 2001-07-20b, 9.2] for an explanation of sys.settrace and
- # the arguments and return value of the trace function.
- # See [van Rossum 2001-07-20a, 3.2] for a description of frame and code
- # objects.
-
- def t(self, f, w, unused): #pragma: no cover
- if w == 'line':
- self.c[(f.f_code.co_filename, f.f_lineno)] = 1
- #-for c in self.cstack:
- #- c[(f.f_code.co_filename, f.f_lineno)] = 1
- return self.t
-
- def help(self, error=None): #pragma: no cover
- if error:
- print(error)
- print()
- print(__doc__)
- sys.exit(1)
-
- def command_line(self, argv, help_fn=None):
- import getopt
- help_fn = help_fn or self.help
- settings = {}
- optmap = {
- '-a': 'annotate',
- '-c': 'collect',
- '-d:': 'directory=',
- '-e': 'erase',
- '-h': 'help',
- '-i': 'ignore-errors',
- '-m': 'show-missing',
- '-p': 'parallel-mode',
- '-r': 'report',
- '-x': 'execute',
- '-o:': 'omit=',
- }
- short_opts = string.join(map(lambda o: o[1:], optmap.keys()), '')
- long_opts = optmap.values()
- options, args = getopt.getopt(argv, short_opts, long_opts)
- for o, a in options:
- if o in optmap:
- settings[optmap[o]] = 1
- elif o + ':' in optmap:
- settings[optmap[o + ':']] = a
- elif o[2:] in long_opts:
- settings[o[2:]] = 1
- elif o[2:] + '=' in long_opts:
- settings[o[2:]+'='] = a
- else: #pragma: no cover
- # Can't get here, because getopt won't return anything unknown.
- pass
-
- if settings.get('help'):
- help_fn()
-
- for i in ['erase', 'execute']:
- for j in ['annotate', 'report', 'collect']:
- if settings.get(i) and settings.get(j):
- help_fn("You can't specify the '%s' and '%s' "
- "options at the same time." % (i, j))
-
- args_needed = (settings.get('execute')
- or settings.get('annotate')
- or settings.get('report'))
- action = (settings.get('erase')
- or settings.get('collect')
- or args_needed)
- if not action:
- help_fn("You must specify at least one of -e, -x, -c, -r, or -a.")
- if not args_needed and args:
- help_fn("Unexpected arguments: %s" % " ".join(args))
-
- self.parallel_mode = settings.get('parallel-mode')
- self.get_ready()
-
- if settings.get('erase'):
- self.erase()
- if settings.get('execute'):
- if not args:
- help_fn("Nothing to do.")
- sys.argv = args
- self.start()
- import __main__
- sys.path[0] = os.path.dirname(sys.argv[0])
- exec(compile(open(sys.argv[0]).read(), sys.argv[0], 'exec'), __main__.__dict__)
- if settings.get('collect'):
- self.collect()
- if not args:
- args = list(self.cexecuted.keys())
-
- ignore_errors = settings.get('ignore-errors')
- show_missing = settings.get('show-missing')
- directory = settings.get('directory=')
-
- omit = settings.get('omit=')
- if omit is not None:
- omit = [self.abs_file(p) for p in omit.split(',')]
- else:
- omit = []
-
- if settings.get('report'):
- self.report(args, show_missing, ignore_errors, omit_prefixes=omit)
- if settings.get('annotate'):
- self.annotate(args, directory, ignore_errors, omit_prefixes=omit)
-
- def use_cache(self, usecache, cache_file=None):
- self.usecache = usecache
- if cache_file and not self.cache:
- self.cache_default = cache_file
-
- def get_ready(self, parallel_mode=False):
- if self.usecache and not self.cache:
- self.cache = os.environ.get(self.cache_env, self.cache_default)
- if self.parallel_mode:
- self.cache += "." + gethostname() + "." + str(os.getpid())
- self.restore()
- self.analysis_cache = {}
-
- def start(self, parallel_mode=False):
- self.get_ready()
- if self.nesting == 0: #pragma: no cover
- sys.settrace(self.t)
- if hasattr(threading, 'settrace'):
- threading.settrace(self.t)
- self.nesting += 1
-
- def stop(self):
- self.nesting -= 1
- if self.nesting == 0: #pragma: no cover
- sys.settrace(None)
- if hasattr(threading, 'settrace'):
- threading.settrace(None)
-
- def erase(self):
- self.get_ready()
- self.c = {}
- self.analysis_cache = {}
- self.cexecuted = {}
- if self.cache and os.path.exists(self.cache):
- os.remove(self.cache)
-
- def exclude(self, re):
- if self.exclude_re:
- self.exclude_re += "|"
- self.exclude_re += "(" + re + ")"
-
- def begin_recursive(self):
- self.cstack.append(self.c)
- self.xstack.append(self.exclude_re)
-
- def end_recursive(self):
- self.c = self.cstack.pop()
- self.exclude_re = self.xstack.pop()
-
- # save(). Save coverage data to the coverage cache.
-
- def save(self):
- if self.usecache and self.cache:
- self.canonicalize_filenames()
- import marshal
- with open(self.cache, 'wb') as cache:
- marshal.dump(self.cexecuted, cache)
-
- # restore(). Restore coverage data from the coverage cache (if it exists).
-
- def restore(self):
- self.c = {}
- self.cexecuted = {}
- assert self.usecache
- if os.path.exists(self.cache):
- self.cexecuted = self.restore_file(self.cache)
-
- def restore_file(self, file_name):
- try:
- import marshal
- with open(file_name, 'rb') as cache:
- cexecuted = marshal.load(cache)
- if isinstance(cexecuted, dict):
- return cexecuted
- else:
- return {}
- except:
- return {}
-
- # collect(). Collect data in multiple files produced by parallel mode
-
- def collect(self):
- cache_dir, local = os.path.split(self.cache)
- for f in os.listdir(cache_dir or '.'):
- if not f.startswith(local):
- continue
-
- full_path = os.path.join(cache_dir, f)
- cexecuted = self.restore_file(full_path)
- self.merge_data(cexecuted)
-
- def merge_data(self, new_data):
- for file_name, file_data in new_data.items():
- if file_name in self.cexecuted:
- self.merge_file_data(self.cexecuted[file_name], file_data)
- else:
- self.cexecuted[file_name] = file_data
-
- def merge_file_data(self, cache_data, new_data):
- for line_number in new_data.keys():
- if line_number not in cache_data:
- cache_data[line_number] = new_data[line_number]
-
- def abs_file(self, filename):
- """ Helper function to turn a filename into an absolute normalized
- filename.
- """
- return os.path.normcase(os.path.abspath(os.path.realpath(filename)))
-
- def get_zip_data(self, filename):
- """ Get data from `filename` if it is a zip file path, or return None
- if it is not.
- """
- markers = ['.zip'+os.sep, '.egg'+os.sep]
- for marker in markers:
- if marker in filename:
- parts = filename.split(marker)
- try:
- zi = zipimport.zipimporter(parts[0]+marker[:-1])
- except zipimport.ZipImportError:
- continue
- try:
- data = zi.get_data(parts[1])
- except IOError:
- continue
- return data
- return None
-
- # canonical_filename(filename). Return a canonical filename for the
- # file (that is, an absolute path with no redundant components and
- # normalized case). See [GDR 2001-12-04b, 3.3].
-
- def canonical_filename(self, filename):
- if filename not in self.canonical_filename_cache:
- f = filename
- if os.path.isabs(f) and not os.path.exists(f):
- if not self.get_zip_data(f):
- f = os.path.basename(f)
- if not os.path.isabs(f):
- for path in [os.curdir] + sys.path:
- g = os.path.join(path, f)
- if os.path.exists(g):
- f = g
- break
- cf = self.abs_file(f)
- self.canonical_filename_cache[filename] = cf
- return self.canonical_filename_cache[filename]
-
- # canonicalize_filenames(). Copy results from "c" to "cexecuted",
- # canonicalizing filenames on the way. Clear the "c" map.
-
- def canonicalize_filenames(self):
- for filename, lineno in self.c.keys():
- if filename == '<string>':
- # Can't do anything useful with exec'd strings, so skip them.
- continue
- f = self.canonical_filename(filename)
- if f not in self.cexecuted:
- self.cexecuted[f] = {}
- self.cexecuted[f][lineno] = 1
- self.c = {}
-
- # morf_filename(morf). Return the filename for a module or file.
-
- def morf_filename(self, morf):
- if hasattr(morf, '__file__'):
- f = morf.__file__
- else:
- f = morf
- return self.canonical_filename(f)
-
- # analyze_morf(morf). Analyze the module or filename passed as
- # the argument. If the source code can't be found, raise an error.
- # Otherwise, return a tuple of (1) the canonical filename of the
- # source code for the module, (2) a list of lines of statements
- # in the source code, (3) a list of lines of excluded statements,
- # and (4), a map of line numbers to multi-line line number ranges, for
- # statements that cross lines.
-
- def analyze_morf(self, morf):
- if morf in self.analysis_cache:
- return self.analysis_cache[morf]
- filename = self.morf_filename(morf)
- ext = os.path.splitext(filename)[1]
- source, sourcef = None, None
- if ext == '.pyc':
- if not os.path.exists(filename[:-1]):
- source = self.get_zip_data(filename[:-1])
- if not source:
- raise CoverageException(
- "No source for compiled code '%s'." % filename
- )
- filename = filename[:-1]
- if not source:
- with open(filename, 'rU') as sourcef:
- source = sourcef.read()
- try:
- lines, excluded_lines, line_map = self.find_executable_statements(
- source, exclude=self.exclude_re
- )
- except SyntaxError as synerr:
- raise CoverageException(
- "Couldn't parse '%s' as Python source: '%s' at line %d" %
- (filename, synerr.msg, synerr.lineno)
- )
- result = filename, lines, excluded_lines, line_map
- self.analysis_cache[morf] = result
- return result
-
- def first_line_of_tree(self, tree):
- while True:
- if len(tree) == 3 and type(tree[2]) == type(1):
- return tree[2]
- tree = tree[1]
-
- def last_line_of_tree(self, tree):
- while True:
- if len(tree) == 3 and type(tree[2]) == type(1):
- return tree[2]
- tree = tree[-1]
-
- def find_docstring_pass_pair(self, tree, spots):
- for i in range(1, len(tree)):
- if (self.is_string_constant(tree[i])
- and self.is_pass_stmt(tree[i+1])):
- first_line = self.first_line_of_tree(tree[i])
- last_line = self.last_line_of_tree(tree[i+1])
- self.record_multiline(spots, first_line, last_line)
-
- def is_string_constant(self, tree):
- try:
- return (tree[0] == symbol.stmt
- and tree[1][1][1][0] == symbol.expr_stmt)
- except:
- return False
-
- def is_pass_stmt(self, tree):
- try:
- return (tree[0] == symbol.stmt
- and tree[1][1][1][0] == symbol.pass_stmt)
- except:
- return False
-
- def record_multiline(self, spots, i, j):
- for l in range(i, j+1):
- spots[l] = (i, j)
-
- def get_suite_spots(self, tree, spots):
- """ Analyze a parse tree to find suite introducers which span a number
- of lines.
- """
- for i in range(1, len(tree)):
- if type(tree[i]) == type(()):
- if tree[i][0] == symbol.suite:
- # Found a suite, look back for the colon and keyword.
- lineno_colon = lineno_word = None
- for j in range(i-1, 0, -1):
- if tree[j][0] == token.COLON:
- # Colons are never executed themselves: we want the
- # line number of the last token before the colon.
- lineno_colon = self.last_line_of_tree(tree[j-1])
- elif tree[j][0] == token.NAME:
- if tree[j][1] == 'elif':
- # Find the line number of the first non-terminal
- # after the keyword.
- t = tree[j+1]
- while t and token.ISNONTERMINAL(t[0]):
- t = t[1]
- if t:
- lineno_word = t[2]
- else:
- lineno_word = tree[j][2]
- break
- elif tree[j][0] == symbol.except_clause:
- # "except" clauses look like:
- # ('except_clause', ('NAME', 'except', lineno), ...)
- if tree[j][1][0] == token.NAME:
- lineno_word = tree[j][1][2]
- break
- if lineno_colon and lineno_word:
- # Found colon and keyword, mark all the lines
- # between the two with the two line numbers.
- self.record_multiline(spots, lineno_word, lineno_colon)
-
- # "pass" statements are tricky: different versions of Python
- # treat them differently, especially in the common case of a
- # function with a doc string and a single pass statement.
- self.find_docstring_pass_pair(tree[i], spots)
-
- elif tree[i][0] == symbol.simple_stmt:
- first_line = self.first_line_of_tree(tree[i])
- last_line = self.last_line_of_tree(tree[i])
- if first_line != last_line:
- self.record_multiline(spots, first_line, last_line)
- self.get_suite_spots(tree[i], spots)
-
- def find_executable_statements(self, text, exclude=None):
- # Find lines which match an exclusion pattern.
- excluded = {}
- suite_spots = {}
- if exclude:
- reExclude = re.compile(exclude)
- lines = text.split('\n')
- for i in range(len(lines)):
- if reExclude.search(lines[i]):
- excluded[i+1] = 1
-
- # Parse the code and analyze the parse tree to find out which statements
- # are multiline, and where suites begin and end.
- import parser
- tree = parser.suite(text+'\n\n').totuple(1)
- self.get_suite_spots(tree, suite_spots)
- #print "Suite spots:", suite_spots
-
- # Use the compiler module to parse the text and find the executable
- # statements. We add newlines to be impervious to final partial lines.
- statements = {}
- ast = compiler.parse(text+'\n\n')
- visitor = StatementFindingAstVisitor(statements, excluded, suite_spots)
- compiler.walk(ast, visitor, walker=visitor)
-
- lines = sorted(statements.keys())
- excluded_lines = sorted(excluded.keys())
- return lines, excluded_lines, suite_spots
-
- # format_lines(statements, lines). Format a list of line numbers
- # for printing by coalescing groups of lines as long as the lines
- # represent consecutive statements. This will coalesce even if
- # there are gaps between statements, so if statements =
- # [1,2,3,4,5,10,11,12,13,14] and lines = [1,2,5,10,11,13,14] then
- # format_lines will return "1-2, 5-11, 13-14".
-
- def format_lines(self, statements, lines):
- pairs = []
- i = 0
- j = 0
- start = None
- pairs = []
- while i < len(statements) and j < len(lines):
- if statements[i] == lines[j]:
- if start is None:
- start = lines[j]
- end = lines[j]
- j = j + 1
- elif start:
- pairs.append((start, end))
- start = None
- i = i + 1
- if start:
- pairs.append((start, end))
- def stringify(pair):
- start, end = pair
- if start == end:
- return "%d" % start
- else:
- return "%d-%d" % (start, end)
- ret = string.join(map(stringify, pairs), ", ")
- return ret
-
- # Backward compatibility with version 1.
- def analysis(self, morf):
- f, s, _, m, mf = self.analysis2(morf)
- return f, s, m, mf
-
- def analysis2(self, morf):
- filename, statements, excluded, line_map = self.analyze_morf(morf)
- self.canonicalize_filenames()
- if filename not in self.cexecuted:
- self.cexecuted[filename] = {}
- missing = []
- for line in statements:
- lines = line_map.get(line, [line, line])
- for l in range(lines[0], lines[1]+1):
- if l in self.cexecuted[filename]:
- break
- else:
- missing.append(line)
- return (filename, statements, excluded, missing,
- self.format_lines(statements, missing))
-
- def relative_filename(self, filename):
- """ Convert filename to relative filename from self.relative_dir.
- """
- return filename.replace(self.relative_dir, "")
-
- def morf_name(self, morf):
- """ Return the name of morf as used in report.
- """
- if hasattr(morf, '__name__'):
- return morf.__name__
- else:
- return self.relative_filename(os.path.splitext(morf)[0])
-
- def filter_by_prefix(self, morfs, omit_prefixes):
- """ Return list of morfs where the morf name does not begin
- with any one of the omit_prefixes.
- """
- filtered_morfs = []
- for morf in morfs:
- for prefix in omit_prefixes:
- if self.morf_name(morf).startswith(prefix):
- break
- else:
- filtered_morfs.append(morf)
-
- return filtered_morfs
-
- def morf_name_compare(self, x, y):
- return cmp(self.morf_name(x), self.morf_name(y))
-
- def report(self, morfs, show_missing=1, ignore_errors=0, file=None,
- omit_prefixes=[]):
- if not isinstance(morfs, list):
- morfs = [morfs]
- # On windows, the shell doesn't expand wildcards. Do it here.
- globbed = []
- for morf in morfs:
- if isinstance(morf, string_types):
- globbed.extend(glob.glob(morf))
- else:
- globbed.append(morf)
- morfs = globbed
-
- morfs = self.filter_by_prefix(morfs, omit_prefixes)
- morfs.sort(self.morf_name_compare)
-
- max_name = max(5, *map(len, map(self.morf_name, morfs)))
- fmt_name = "%%- %ds " % max_name
- fmt_err = fmt_name + "%s: %s"
- header = fmt_name % "Name" + " Stmts Exec Cover"
- fmt_coverage = fmt_name + "% 6d % 6d % 5d%%"
- if show_missing:
- header = header + " Missing"
- fmt_coverage = fmt_coverage + " %s"
- if not file:
- file = sys.stdout
- print(header, file=file)
- print("-" * len(header), file=file)
- total_statements = 0
- total_executed = 0
- for morf in morfs:
- name = self.morf_name(morf)
- try:
- _, statements, _, missing, readable = self.analysis2(morf)
- n = len(statements)
- m = n - len(missing)
- if n > 0:
- pc = 100.0 * m / n
- else:
- pc = 100.0
- args = (name, n, m, pc)
- if show_missing:
- args = args + (readable,)
- print(fmt_coverage % args, file=file)
- total_statements = total_statements + n
- total_executed = total_executed + m
- except KeyboardInterrupt: #pragma: no cover
- raise
- except:
- if not ignore_errors:
- typ, msg = sys.exc_info()[:2]
- print(fmt_err % (name, typ, msg), file=file)
- if len(morfs) > 1:
- print("-" * len(header), file=file)
- if total_statements > 0:
- pc = 100.0 * total_executed / total_statements
- else:
- pc = 100.0
- args = ("TOTAL", total_statements, total_executed, pc)
- if show_missing:
- args = args + ("",)
- print(fmt_coverage % args, file=file)
-
- # annotate(morfs, ignore_errors).
-
- blank_re = re.compile(r"\s*(#|$)")
- else_re = re.compile(r"\s*else\s*:\s*(#|$)")
-
- def annotate(self, morfs, directory=None, ignore_errors=0,
- omit_prefixes=[]):
- morfs = self.filter_by_prefix(morfs, omit_prefixes)
- for morf in morfs:
- try:
- (filename, statements, excluded,
- missing, _) = self.analysis2(morf)
- self.annotate_file(filename, statements, excluded, missing,
- directory)
- except KeyboardInterrupt:
- raise
- except:
- if not ignore_errors:
- raise
-
- def annotate_file(self, filename, statements, excluded, missing,
- directory=None):
- source = open(filename, 'r')
- if directory:
- dest_file = os.path.join(directory,
- os.path.basename(filename)
- + ',cover')
- else:
- dest_file = filename + ',cover'
- dest = open(dest_file, 'w')
- lineno = 0
- i = 0
- j = 0
- covered = 1
- while 1:
- line = source.readline()
- if line == '':
- break
- lineno = lineno + 1
- while i < len(statements) and statements[i] < lineno:
- i = i + 1
- while j < len(missing) and missing[j] < lineno:
- j = j + 1
- if i < len(statements) and statements[i] == lineno:
- covered = j >= len(missing) or missing[j] > lineno
- if self.blank_re.match(line):
- dest.write(' ')
- elif self.else_re.match(line):
- # Special logic for lines containing only 'else:'.
- # See [GDR 2001-12-04b, 3.2].
- if i >= len(statements) and j >= len(missing):
- dest.write('! ')
- elif i >= len(statements) or j >= len(missing):
- dest.write('> ')
- elif statements[i] == missing[j]:
- dest.write('! ')
- else:
- dest.write('> ')
- elif lineno in excluded:
- dest.write('- ')
- elif covered:
- dest.write('> ')
- else:
- dest.write('! ')
- dest.write(line)
- source.close()
- dest.close()
-
-# Singleton object.
-the_coverage = coverage()
-
-# Module functions call methods in the singleton object.
-def use_cache(*args, **kw):
- return the_coverage.use_cache(*args, **kw)
-
-def start(*args, **kw):
- return the_coverage.start(*args, **kw)
-
-def stop(*args, **kw):
- return the_coverage.stop(*args, **kw)
-
-def erase(*args, **kw):
- return the_coverage.erase(*args, **kw)
-
-def begin_recursive(*args, **kw):
- return the_coverage.begin_recursive(*args, **kw)
-
-def end_recursive(*args, **kw):
- return the_coverage.end_recursive(*args, **kw)
-
-def exclude(*args, **kw):
- return the_coverage.exclude(*args, **kw)
-
-def analysis(*args, **kw):
- return the_coverage.analysis(*args, **kw)
-
-def analysis2(*args, **kw):
- return the_coverage.analysis2(*args, **kw)
-
-def report(*args, **kw):
- return the_coverage.report(*args, **kw)
-
-def annotate(*args, **kw):
- return the_coverage.annotate(*args, **kw)
-
-def annotate_file(*args, **kw):
- return the_coverage.annotate_file(*args, **kw)
-
-atexit.register(the_coverage.save)
-
-def main():
- the_coverage.command_line(sys.argv[1:])
-
-# Command-line interface.
-if __name__ == '__main__':
- main()
-
-
-# A. REFERENCES
-#
-# [GDR 2001-12-04a] "Statement coverage for Python"; Gareth Rees;
-# Ravenbrook Limited; 2001-12-04;
-# <http://www.nedbatchelder.com/code/modules/rees-coverage.html>.
-#
-# [GDR 2001-12-04b] "Statement coverage for Python: design and
-# analysis"; Gareth Rees; Ravenbrook Limited; 2001-12-04;
-# <http://www.nedbatchelder.com/code/modules/rees-design.html>.
-#
-# [van Rossum 2001-07-20a] "Python Reference Manual (releae 2.1.1)";
-# Guide van Rossum; 2001-07-20;
-# <http://www.python.org/doc/2.1.1/ref/ref.html>.
-#
-# [van Rossum 2001-07-20b] "Python Library Reference"; Guido van Rossum;
-# 2001-07-20; <http://www.python.org/doc/2.1.1/lib/lib.html>.
-#
-#
-# B. DOCUMENT HISTORY
-#
-# 2001-12-04 GDR Created.
-#
-# 2001-12-06 GDR Added command-line interface and source code
-# annotation.
-#
-# 2001-12-09 GDR Moved design and interface to separate documents.
-#
-# 2001-12-10 GDR Open cache file as binary on Windows. Allow
-# simultaneous -e and -x, or -a and -r.
-#
-# 2001-12-12 GDR Added command-line help. Cache analysis so that it
-# only needs to be done once when you specify -a and -r.
-#
-# 2001-12-13 GDR Improved speed while recording. Portable between
-# Python 1.5.2 and 2.1.1.
-#
-# 2002-01-03 GDR Module-level functions work correctly.
-#
-# 2002-01-07 GDR Update sys.path when running a file with the -x option,
-# so that it matches the value the program would get if it were run on
-# its own.
-#
-# 2004-12-12 NMB Significant code changes.
-# - Finding executable statements has been rewritten so that docstrings and
-# other quirks of Python execution aren't mistakenly identified as missing
-# lines.
-# - Lines can be excluded from consideration, even entire suites of lines.
-# - The filesystem cache of covered lines can be disabled programmatically.
-# - Modernized the code.
-#
-# 2004-12-14 NMB Minor tweaks. Return 'analysis' to its original behavior
-# and add 'analysis2'. Add a global for 'annotate', and factor it, adding
-# 'annotate_file'.
-#
-# 2004-12-31 NMB Allow for keyword arguments in the module global functions.
-# Thanks, Allen.
-#
-# 2005-12-02 NMB Call threading.settrace so that all threads are measured.
-# Thanks Martin Fuzzey. Add a file argument to report so that reports can be
-# captured to a different destination.
-#
-# 2005-12-03 NMB coverage.py can now measure itself.
-#
-# 2005-12-04 NMB Adapted Greg Rogers' patch for using relative filenames,
-# and sorting and omitting files to report on.
-#
-# 2006-07-23 NMB Applied Joseph Tate's patch for function decorators.
-#
-# 2006-08-21 NMB Applied Sigve Tjora and Mark van der Wal's fixes for argument
-# handling.
-#
-# 2006-08-22 NMB Applied Geoff Bache's parallel mode patch.
-#
-# 2006-08-23 NMB Refactorings to improve testability. Fixes to command-line
-# logic for parallel mode and collect.
-#
-# 2006-08-25 NMB "#pragma: nocover" is excluded by default.
-#
-# 2006-09-10 NMB Properly ignore docstrings and other constant expressions that
-# appear in the middle of a function, a problem reported by Tim Leslie.
-# Minor changes to avoid lint warnings.
-#
-# 2006-09-17 NMB coverage.erase() shouldn't clobber the exclude regex.
-# Change how parallel mode is invoked, and fix erase() so that it erases the
-# cache when called programmatically.
-#
-# 2007-07-21 NMB In reports, ignore code executed from strings, since we can't
-# do anything useful with it anyway.
-# Better file handling on Linux, thanks Guillaume Chazarain.
-# Better shell support on Windows, thanks Noel O'Boyle.
-# Python 2.2 support maintained, thanks Catherine Proulx.
-#
-# 2007-07-22 NMB Python 2.5 now fully supported. The method of dealing with
-# multi-line statements is now less sensitive to the exact line that Python
-# reports during execution. Pass statements are handled specially so that their
-# disappearance during execution won't throw off the measurement.
-#
-# 2007-07-23 NMB Now Python 2.5 is *really* fully supported: the body of the
-# new with statement is counted as executable.
-#
-# 2007-07-29 NMB Better packaging.
-#
-# 2007-09-30 NMB Don't try to predict whether a file is Python source based on
-# the extension. Extensionless files are often Pythons scripts. Instead, simply
-# parse the file and catch the syntax errors. Hat tip to Ben Finney.
-#
-# 2008-05-25 NMB Open files in rU mode to avoid line ending craziness.
-# Thanks, Edward Loper.
-#
-# 2008-09-14 NMB Add support for finding source files in eggs.
-# Don't check for morf's being instances of ModuleType, instead use duck typing
-# so that pseudo-modules can participate. Thanks, Imri Goldberg.
-# Use os.realpath as part of the fixing of filenames so that symlinks won't
-# confuse things. Thanks, Patrick Mezard.
-#
-#
-# C. COPYRIGHT AND LICENCE
-#
-# Copyright 2001 Gareth Rees. All rights reserved.
-# Copyright 2004-2008 Ned Batchelder. All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# 1. Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-#
-# 2. Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in the
-# documentation and/or other materials provided with the
-# distribution.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# HOLDERS AND CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
-# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
-# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
-# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
-# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
-# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
-# DAMAGE.
-#
-# $Id: coverage.py 100 2008-10-12 12:08:22Z nedbat $
diff --git a/tests/path.py b/tests/path.py
index ef1f35004..0d6892776 100755
--- a/tests/path.py
+++ b/tests/path.py
@@ -145,7 +145,7 @@ class path(text_type):
mode = 'rU' if PY2 else 'r'
with open(self, mode=mode, encoding=encoding, **kwargs) as f:
text = f.read()
- contents = repr_as(text, '<%s contents>' % self.basename())
+ contents = repr_as(text, '<%s contents: %r>' % (self.basename(), text))
return contents
def bytes(self):
diff --git a/tests/test_autodoc_py35.py b/tests/py35/test_autodoc_py35.py
index 43cda2260..6049f7521 100644
--- a/tests/test_autodoc_py35.py
+++ b/tests/py35/test_autodoc_py35.py
@@ -14,7 +14,7 @@
import six
import sys
from util import TestApp, Struct, raises, SkipTest
-from nose.tools import with_setup, eq_
+import pytest
from six import StringIO
from docutils.statemachine import ViewList
@@ -42,6 +42,7 @@ def teardown_module():
directive = options = None
+@pytest.fixture
def setup_test():
global options, directive
global processed_docstrings, processed_signatures, _warnings
@@ -106,7 +107,7 @@ def skip_member(app, what, name, obj, skip, options):
return True
-@with_setup(setup_test)
+@pytest.mark.usefixtures('setup_test')
def test_generate():
def assert_warns(warn_str, objtype, name, **kw):
inst = AutoDirective._registry[objtype](directive, name)
diff --git a/tests/roots/test-add_enumerable_node/conf.py b/tests/roots/test-add_enumerable_node/conf.py
index d433def93..a4801f6bb 100644
--- a/tests/roots/test-add_enumerable_node/conf.py
+++ b/tests/roots/test-add_enumerable_node/conf.py
@@ -4,7 +4,7 @@ import os
import sys
sys.path.insert(0, os.path.abspath('.'))
-extensions = ['test_enumerable_node']
+extensions = ['enumerable_node']
master_doc = 'index'
numfig = True
diff --git a/tests/roots/test-add_enumerable_node/test_enumerable_node.py b/tests/roots/test-add_enumerable_node/enumerable_node.py
index eb6381fad..eb6381fad 100644
--- a/tests/roots/test-add_enumerable_node/test_enumerable_node.py
+++ b/tests/roots/test-add_enumerable_node/enumerable_node.py
diff --git a/tests/roots/test-add_source_parser-conflicts-with-users-setting/conf.py b/tests/roots/test-add_source_parser-conflicts-with-users-setting/conf.py
index db9fe54a9..c50c0d2df 100644
--- a/tests/roots/test-add_source_parser-conflicts-with-users-setting/conf.py
+++ b/tests/roots/test-add_source_parser-conflicts-with-users-setting/conf.py
@@ -11,7 +11,7 @@ class DummyTestParser(Parser):
pass
-extensions = ['test_source_parser']
+extensions = ['source_parser']
source_suffix = ['.rst', '.test']
source_parsers = {
'.test': DummyTestParser
diff --git a/tests/roots/test-add_source_parser-conflicts-with-users-setting/test_source_parser.py b/tests/roots/test-add_source_parser-conflicts-with-users-setting/source_parser.py
index 0dff7e311..0dff7e311 100644
--- a/tests/roots/test-add_source_parser-conflicts-with-users-setting/test_source_parser.py
+++ b/tests/roots/test-add_source_parser-conflicts-with-users-setting/source_parser.py
diff --git a/tests/roots/test-add_source_parser/conf.py b/tests/roots/test-add_source_parser/conf.py
index f9969341a..5fc0c63d2 100644
--- a/tests/roots/test-add_source_parser/conf.py
+++ b/tests/roots/test-add_source_parser/conf.py
@@ -11,7 +11,7 @@ class DummyMarkdownParser(Parser):
pass
-extensions = ['test_source_parser']
+extensions = ['source_parser']
source_suffix = ['.rst', '.md']
source_parsers = {
'.md': DummyMarkdownParser
diff --git a/tests/roots/test-add_source_parser/test_source_parser.py b/tests/roots/test-add_source_parser/source_parser.py
index 0dff7e311..0dff7e311 100644
--- a/tests/roots/test-add_source_parser/test_source_parser.py
+++ b/tests/roots/test-add_source_parser/source_parser.py
diff --git a/tests/run.py b/tests/run.py
index 273b2ee6b..72a668541 100755
--- a/tests/run.py
+++ b/tests/run.py
@@ -17,14 +17,14 @@ import warnings
import traceback
from path import path
-import nose
+import pytest
testroot = os.path.dirname(__file__) or '.'
sys.path.insert(0, os.path.abspath(os.path.join(testroot, os.path.pardir)))
# check dependencies before testing
print('Checking dependencies...')
-for modname in ('nose', 'mock', 'six', 'docutils', 'jinja2', 'pygments',
+for modname in ('pytest', 'mock', 'six', 'docutils', 'jinja2', 'pygments',
'snowballstemmer', 'babel', 'html5lib'):
try:
__import__(modname)
@@ -50,7 +50,15 @@ print('Running Sphinx test suite (with Python %s)...' % sys.version.split()[0])
sys.stdout.flush()
# filter warnings of test dependencies
-warnings.filterwarnings('ignore', category=DeprecationWarning, module='nose.util')
warnings.filterwarnings('ignore', category=DeprecationWarning, module='site') # virtualenv
-nose.main(argv=sys.argv)
+# exclude 'root' and 'roots' dirs for pytest test collector
+ignore_paths = [
+ os.path.relpath(os.path.join(os.path.dirname(os.path.abspath(__file__)), sub))
+ for sub in ('root', 'roots')
+]
+args = sys.argv[1:]
+for path in ignore_paths:
+ args.extend(['--ignore', path])
+
+sys.exit(pytest.main(args))
diff --git a/tests/test_apidoc.py b/tests/test_apidoc.py
index ff6a147ca..d44868aeb 100644
--- a/tests/test_apidoc.py
+++ b/tests/test_apidoc.py
@@ -11,45 +11,60 @@
from __future__ import print_function
-import sys
-from six import PY2
+from collections import namedtuple
-from sphinx import apidoc
+import pytest
-from util import with_tempdir, with_app, rootdir
+from sphinx.apidoc import main as apidoc_main
+from util import rootdir, remove_unicode_literals
-@with_tempdir
-def test_simple(tempdir):
- codedir = rootdir / 'root'
+
+@pytest.fixture()
+def apidoc(tempdir, apidoc_params):
+ _, kwargs = apidoc_params
+ coderoot = kwargs.get('coderoot', (rootdir / 'root'))
outdir = tempdir / 'out'
- args = ['sphinx-apidoc', '-o', outdir, '-F', codedir]
- apidoc.main(args)
+ args = ['sphinx-apidoc', '-o', outdir, '-F', coderoot] + kwargs.get('options', [])
+ apidoc_main(args)
+ return namedtuple('apidoc', 'coderoot,outdir')(coderoot, outdir)
+
+
+@pytest.fixture
+def apidoc_params(request):
+ markers = request.node.get_marker("apidoc")
+ pargs = {}
+ kwargs = {}
+
+ if markers is not None:
+ for info in reversed(list(markers)):
+ for i, a in enumerate(info.args):
+ pargs[i] = a
+ kwargs.update(info.kwargs)
+ args = [pargs[i] for i in sorted(pargs.keys())]
+ return args, kwargs
+
+
+@pytest.mark.apidoc(coderoot=(rootdir / 'root'))
+def test_simple(make_app, apidoc):
+ outdir = apidoc.outdir
assert (outdir / 'conf.py').isfile()
assert (outdir / 'autodoc_fodder.rst').isfile()
assert (outdir / 'index.rst').isfile()
- @with_app('text', srcdir=outdir)
- def assert_build(app, status, warning):
- app.build()
- print(status.getvalue())
- print(warning.getvalue())
-
- sys.path.append(codedir)
- try:
- assert_build()
- finally:
- sys.path.remove(codedir)
-
+ app = make_app('text', srcdir=outdir)
+ app.build()
+ print(app._status.getvalue())
+ print(app._warning.getvalue())
-@with_tempdir
-def test_pep_0420_enabled(tempdir):
- codedir = rootdir / 'root' / 'pep_0420'
- outdir = tempdir / 'out'
- args = ['sphinx-apidoc', '-o', outdir, '-F', codedir, "--implicit-namespaces"]
- apidoc.main(args)
+@pytest.mark.apidoc(
+ coderoot=(rootdir / 'root' / 'pep_0420'),
+ options=["--implicit-namespaces"],
+)
+def test_pep_0420_enabled(make_app, apidoc):
+ outdir = apidoc.outdir
assert (outdir / 'conf.py').isfile()
assert (outdir / 'a.b.c.rst').isfile()
assert (outdir / 'a.b.x.rst').isfile()
@@ -66,49 +81,28 @@ def test_pep_0420_enabled(tempdir):
assert "automodule:: a.b.x.y\n" in rst
assert "automodule:: a.b.x\n" not in rst
- @with_app('text', srcdir=outdir)
- def assert_build(app, status, warning):
- app.build()
- print(status.getvalue())
- print(warning.getvalue())
-
- sys.path.append(codedir)
- try:
- assert_build()
- finally:
- sys.path.remove(codedir)
+ app = make_app('text', srcdir=outdir)
+ app.build()
+ print(app._status.getvalue())
+ print(app._warning.getvalue())
-@with_tempdir
-def test_pep_0420_disabled(tempdir):
- codedir = rootdir / 'root' / 'pep_0420'
- outdir = tempdir / 'out'
- args = ['sphinx-apidoc', '-o', outdir, '-F', codedir]
- apidoc.main(args)
-
+@pytest.mark.apidoc(coderoot=(rootdir / 'root' / 'pep_0420'))
+def test_pep_0420_disabled(make_app, apidoc):
+ outdir = apidoc.outdir
assert (outdir / 'conf.py').isfile()
assert not (outdir / 'a.b.c.rst').exists()
assert not (outdir / 'a.b.x.rst').exists()
- @with_app('text', srcdir=outdir)
- def assert_build(app, status, warning):
- app.build()
- print(status.getvalue())
- print(warning.getvalue())
-
- sys.path.append(codedir)
- try:
- assert_build()
- finally:
- sys.path.remove(codedir)
-
-@with_tempdir
-def test_pep_0420_disabled_top_level_verify(tempdir):
- codedir = rootdir / 'root' / 'pep_0420' / 'a' / 'b'
- outdir = tempdir / 'out'
- args = ['sphinx-apidoc', '-o', outdir, '-F', codedir]
- apidoc.main(args)
+ app = make_app('text', srcdir=outdir)
+ app.build()
+ print(app._status.getvalue())
+ print(app._warning.getvalue())
+
+@pytest.mark.apidoc(coderoot=(rootdir / 'root' / 'pep_0420' / 'a' / 'b'))
+def test_pep_0420_disabled_top_level_verify(make_app, apidoc):
+ outdir = apidoc.outdir
assert (outdir / 'conf.py').isfile()
assert (outdir / 'c.rst').isfile()
assert not (outdir / 'x.rst').exists()
@@ -119,53 +113,35 @@ def test_pep_0420_disabled_top_level_verify(tempdir):
assert "automodule:: c.d\n" in rst
assert "automodule:: c\n" in rst
- @with_app('text', srcdir=outdir)
- def assert_build(app, status, warning):
- app.build()
- print(status.getvalue())
- print(warning.getvalue())
-
- sys.path.append(codedir)
- try:
- assert_build()
- finally:
- sys.path.remove(codedir)
-
-@with_tempdir
-def test_multibyte_parameters(tempdir):
- codedir = rootdir / 'root'
- outdir = tempdir / 'out'
- args = ['sphinx-apidoc', '-o', outdir, '-F', codedir,
- '--doc-project', u'プロジェクト名'.encode('utf-8'),
- '--doc-author', u'著者名'.encode('utf-8'),
- '--doc-version', u'バージョン'.encode('utf-8'),
- '--doc-release', u'リリース'.encode('utf-8')]
- apidoc.main(args)
-
+ app = make_app('text', srcdir=outdir)
+ app.build()
+ print(app._status.getvalue())
+ print(app._warning.getvalue())
+
+
+@pytest.mark.apidoc(
+ coderoot=(rootdir / 'root'),
+ options=[
+ '--doc-project', u'プロジェクト名'.encode('utf-8'),
+ '--doc-author', u'著者名'.encode('utf-8'),
+ '--doc-version', u'バージョン'.encode('utf-8'),
+ '--doc-release', u'リリース'.encode('utf-8'),
+ ],
+)
+def test_multibyte_parameters(make_app, apidoc):
+ outdir = apidoc.outdir
assert (outdir / 'conf.py').isfile()
assert (outdir / 'autodoc_fodder.rst').isfile()
assert (outdir / 'index.rst').isfile()
conf_py = (outdir / 'conf.py').text()
- if PY2:
- assert u"project = u'プロジェクト名'" in conf_py
- assert u"author = u'著者名'" in conf_py
- assert u"version = u'バージョン'" in conf_py
- assert u"release = u'リリース'" in conf_py
- else:
- assert u"project = 'プロジェクト名'" in conf_py
- assert u"author = '著者名'" in conf_py
- assert u"version = 'バージョン'" in conf_py
- assert u"release = 'リリース'" in conf_py
-
- @with_app('text', srcdir=outdir)
- def assert_build(app, status, warning):
- app.build()
- print(status.getvalue())
- print(warning.getvalue())
-
- sys.path.append(codedir)
- try:
- assert_build()
- finally:
- sys.path.remove(codedir)
+ conf_py_ = remove_unicode_literals(conf_py)
+ assert u"project = 'プロジェクト名'" in conf_py_
+ assert u"author = '著者名'" in conf_py_
+ assert u"version = 'バージョン'" in conf_py_
+ assert u"release = 'リリース'" in conf_py_
+
+ app = make_app('text', srcdir=outdir)
+ app.build()
+ print(app._status.getvalue())
+ print(app._warning.getvalue())
diff --git a/tests/test_autodoc.py b/tests/test_autodoc.py
index 8df943ec7..22eb02b34 100644
--- a/tests/test_autodoc.py
+++ b/tests/test_autodoc.py
@@ -12,7 +12,7 @@
# "raises" imported for usage by autodoc
from util import TestApp, Struct, raises, SkipTest # NOQA
-from nose.tools import with_setup, eq_
+import pytest
import enum
from six import StringIO, add_metaclass
@@ -41,6 +41,7 @@ def teardown_module():
directive = options = None
+@pytest.fixture
def setup_test():
global options, directive
global processed_docstrings, processed_signatures, _warnings
@@ -74,6 +75,10 @@ def setup_test():
processed_signatures = []
_warnings = []
+ yield
+
+ AutoDirective._special_attrgetters.clear()
+
_warnings = []
processed_docstrings = []
@@ -105,7 +110,7 @@ def skip_member(app, what, name, obj, skip, options):
return True
-@with_setup(setup_test)
+@pytest.mark.usefixtures('setup_test')
def test_parse_name():
def verify(objtype, name, result):
inst = AutoDirective._registry[objtype](directive, name)
@@ -147,7 +152,7 @@ def test_parse_name():
del directive.env.temp_data['autodoc:class']
-@with_setup(setup_test)
+@pytest.mark.usefixtures('setup_test')
def test_format_signature():
def formatsig(objtype, name, obj, args, retann):
inst = AutoDirective._registry[objtype](directive, name)
@@ -253,7 +258,7 @@ def test_format_signature():
'(b, c=42, *d, **e)'
-@with_setup(setup_test)
+@pytest.mark.usefixtures('setup_test')
def test_get_doc():
def getdocl(objtype, obj, encoding=None):
inst = AutoDirective._registry[objtype](directive, 'tmp')
@@ -423,7 +428,7 @@ def test_get_doc():
assert getdocl('class', I) == ['Class docstring', '', 'New docstring']
-@with_setup(setup_test)
+@pytest.mark.usefixtures('setup_test')
def test_docstring_processing():
def process(objtype, name, obj):
inst = AutoDirective._registry[objtype](directive, name)
@@ -478,7 +483,7 @@ def test_docstring_processing():
app.disconnect(lid)
-@with_setup(setup_test)
+@pytest.mark.usefixtures('setup_test')
def test_docstring_property_processing():
def genarate_docstring(objtype, name, **kw):
del processed_docstrings[:]
@@ -515,7 +520,7 @@ def test_docstring_property_processing():
assert 'Second line of docstring' in docstrings
-@with_setup(setup_test)
+@pytest.mark.usefixtures('setup_test')
def test_new_documenter():
class MyDocumenter(ModuleLevelDocumenter):
objtype = 'integer'
@@ -543,7 +548,7 @@ def test_new_documenter():
assert_result_contains('.. py:data:: integer', 'module', 'test_autodoc')
-@with_setup(setup_test, AutoDirective._special_attrgetters.clear)
+@pytest.mark.usefixtures('setup_test')
def test_attrgetter_using():
def assert_getter_works(objtype, name, obj, attrs=[], **kw):
getattr_spy = []
@@ -575,7 +580,7 @@ def test_attrgetter_using():
assert_getter_works('class', 'test_autodoc.Class', Class, ['meth', 'inheritedmeth'])
-@with_setup(setup_test)
+@pytest.mark.usefixtures('setup_test')
def test_generate():
def assert_warns(warn_str, objtype, name, **kw):
inst = AutoDirective._registry[objtype](directive, name)
@@ -1084,7 +1089,7 @@ def test_type_hints():
raise SkipTest('Cannot import Python code with function annotations')
def verify_arg_spec(f, expected):
- eq_(formatargspec(f, *getargspec(f)), expected)
+ assert formatargspec(f, *getargspec(f)) == expected
# Class annotations
verify_arg_spec(f0, '(x: int, y: numbers.Integral) -> None')
diff --git a/tests/test_build.py b/tests/test_build.py
index d61291a2f..9f8db6b86 100644
--- a/tests/test_build.py
+++ b/tests/test_build.py
@@ -78,12 +78,12 @@ def test_build_all():
@with_tempdir
-def test_master_doc_not_found(tmpdir):
- (tmpdir / 'conf.py').write_text('master_doc = "index"')
- assert tmpdir.listdir() == ['conf.py']
+def test_master_doc_not_found(tempdir):
+ (tempdir / 'conf.py').write_text('master_doc = "index"')
+ assert tempdir.listdir() == ['conf.py']
try:
- app = TestApp(buildername='dummy', srcdir=tmpdir)
+ app = TestApp(buildername='dummy', srcdir=tempdir)
app.builder.build_all()
assert False # SphinxError not raised
except Exception as exc:
diff --git a/tests/test_build_gettext.py b/tests/test_build_gettext.py
index 2dc62b838..0ba6d5534 100644
--- a/tests/test_build_gettext.py
+++ b/tests/test_build_gettext.py
@@ -15,9 +15,9 @@ import re
import gettext
from subprocess import Popen, PIPE
-from nose.tools import assert_true, assert_equal
-
-from util import with_app, gen_with_app, SkipTest, assert_in
+from util import (
+ with_app, gen_with_app, SkipTest, assert_in, assert_true, assert_equal
+)
@gen_with_app('gettext', srcdir='root-gettext')
@@ -76,7 +76,7 @@ def test_all(app, status, warning):
yield assert_equal, _("Testing various markup"), u"Testing various markup"
-@with_app('gettext', testroot='intl',
+@with_app('gettext', testroot='intl', srcdir='gettext',
confoverrides={'gettext_compact': False})
def test_gettext_index_entries(app, status, warning):
# regression test for #976
@@ -123,7 +123,7 @@ def test_gettext_index_entries(app, status, warning):
assert msgids == []
-@with_app('gettext', testroot='intl',
+@with_app('gettext', testroot='intl', srcdir='gettext',
confoverrides={'gettext_compact': False, 'gettext_additional_targets': []})
def test_gettext_disable_index_entries(app, status, warning):
# regression test for #976
@@ -155,7 +155,7 @@ def test_gettext_disable_index_entries(app, status, warning):
assert msgids == []
-@with_app(buildername='gettext', testroot='intl')
+@with_app(buildername='gettext', testroot='intl', srcdir='gettext')
def test_gettext_template(app, status, warning):
app.builder.build_all()
assert (app.outdir / 'sphinx.pot').isfile()
diff --git a/tests/test_build_html.py b/tests/test_build_html.py
index d8aff88ab..e45c70052 100644
--- a/tests/test_build_html.py
+++ b/tests/test_build_html.py
@@ -24,10 +24,7 @@ TREE_BUILDER = getTreeBuilder('etree', implementation=ElementTree)
HTML_PARSER = HTMLParser(TREE_BUILDER, namespaceHTMLElements=False)
ENV_WARNINGS = """\
-(%(root)s/autodoc_fodder.py:docstring of autodoc_fodder.MarkupError:\\d+: \
-WARNING: duplicate object description of autodoc_fodder.MarkupError, other \
-instance in %(root)s/autodoc.rst, use :noindex: for one of them
-)?%(root)s/autodoc_fodder.py:docstring of autodoc_fodder.MarkupError:\\d+: \
+%(root)s/autodoc_fodder.py:docstring of autodoc_fodder.MarkupError:\\d+: \
WARNING: Explicit markup ends without a blank line; unexpected unindent.
%(root)s/index.rst:\\d+: WARNING: Encoding 'utf-8-sig' used for reading included \
file u'%(root)s/wrongenc.inc' seems to be wrong, try giving an :encoding: option
@@ -36,7 +33,7 @@ file u'%(root)s/wrongenc.inc' seems to be wrong, try giving an :encoding: option
%(root)s/index.rst:\\d+: WARNING: download file not readable: %(root)s/nonexisting.png
%(root)s/index.rst:\\d+: WARNING: invalid single index entry u''
%(root)s/undecodable.rst:\\d+: WARNING: undecodable source characters, replacing \
-with "\\?": b?'here: >>>(\\\\|/)xbb<<<'
+with "\\?": b?'here: >>>(\\\\|/)xbb<<<((\\\\|/)r)?'
"""
HTML_WARNINGS = ENV_WARNINGS + """\
@@ -662,7 +659,7 @@ def test_numfig_without_numbered_toctree(app, status, warning):
yield check_xpath, etree, fname, xpath, check, be_found
-@gen_with_app(buildername='html', testroot='numfig',
+@gen_with_app(buildername='html', testroot='numfig', srcdir='test_build_html_numfig_on',
confoverrides={'numfig': True})
def test_numfig_with_numbered_toctree(app, status, warning):
app.builder.build_all()
@@ -763,6 +760,7 @@ def test_numfig_with_numbered_toctree(app, status, warning):
@gen_with_app(buildername='html', testroot='numfig',
+ srcdir='test_build_html_numfig_format_warn',
confoverrides={'numfig': True,
'numfig_format': {'figure': 'Figure:%s',
'table': 'Tab_%s',
@@ -867,6 +865,7 @@ def test_numfig_with_prefix(app, status, warning):
@gen_with_app(buildername='html', testroot='numfig',
+ srcdir='test_build_html_numfig_depth_2',
confoverrides={'numfig': True, 'numfig_secnum_depth': 2})
def test_numfig_with_secnum_depth(app, status, warning):
app.builder.build_all()
@@ -967,6 +966,7 @@ def test_numfig_with_secnum_depth(app, status, warning):
@gen_with_app(buildername='singlehtml', testroot='numfig',
+ srcdir='test_build_html_numfig_on',
confoverrides={'numfig': True})
def test_numfig_with_singlehtml(app, status, warning):
app.builder.build_all()
diff --git a/tests/test_build_latex.py b/tests/test_build_latex.py
index dfcadc6a1..7366763ca 100644
--- a/tests/test_build_latex.py
+++ b/tests/test_build_latex.py
@@ -12,11 +12,11 @@ from __future__ import print_function
import os
import re
-from functools import wraps
from itertools import product
from subprocess import Popen, PIPE
from six import PY3
+import pytest
from sphinx.errors import SphinxError
from sphinx.util.osutil import cd, ensuredir
@@ -90,14 +90,13 @@ def skip_if_stylefiles_notfound(testfunc):
return testfunc
-def test_latex():
- for engine, docclass in product(LATEX_ENGINES, DOCCLASSES):
- yield build_latex_doc, engine, docclass
-
-
@skip_if_stylefiles_notfound
-@with_app(buildername='latex')
-def build_latex_doc(app, status, warning, engine, docclass):
+@pytest.mark.parametrize(
+ "engine,docclass",
+ product(LATEX_ENGINES, DOCCLASSES),
+)
+@pytest.mark.sphinx('latex')
+def test_build_latex_doc(app, status, warning, engine, docclass):
app.config.latex_engine = engine
app.config.latex_documents[0] = app.config.latex_documents[0][:4] + (docclass,)
diff --git a/tests/test_catalogs.py b/tests/test_catalogs.py
index ca6608490..d0606d945 100644
--- a/tests/test_catalogs.py
+++ b/tests/test_catalogs.py
@@ -10,7 +10,7 @@
"""
import shutil
-from nose.tools import with_setup
+import pytest
from util import with_app, find_files, rootdir, tempdir
@@ -19,6 +19,7 @@ build_dir = root / '_build'
locale_dir = build_dir / 'locale'
+@pytest.fixture
def setup_test():
# delete remnants left over after failed build
root.rmtree(True)
@@ -30,12 +31,12 @@ def setup_test():
copy_po.parent.makedirs()
shutil.copy(root / po, copy_po)
+ yield
-def teardown_test():
build_dir.rmtree(True)
-@with_setup(setup_test, teardown_test)
+@pytest.mark.usefixtures('setup_test')
@with_app(buildername='html', testroot='intl',
confoverrides={'language': 'en', 'locale_dirs': [locale_dir]})
def test_compile_all_catalogs(app, status, warning):
@@ -51,7 +52,7 @@ def test_compile_all_catalogs(app, status, warning):
assert actual == expect
-@with_setup(setup_test, teardown_test)
+@pytest.mark.usefixtures('setup_test')
@with_app(buildername='html', testroot='intl',
confoverrides={'language': 'en', 'locale_dirs': [locale_dir]})
def test_compile_specific_catalogs(app, status, warning):
@@ -66,7 +67,7 @@ def test_compile_specific_catalogs(app, status, warning):
assert actual == set(['admonitions.mo'])
-@with_setup(setup_test, teardown_test)
+@pytest.mark.usefixtures('setup_test')
@with_app(buildername='html', testroot='intl',
confoverrides={'language': 'en', 'locale_dirs': [locale_dir]})
def test_compile_update_catalogs(app, status, warning):
diff --git a/tests/test_config.py b/tests/test_config.py
index 1b3c94957..9464dfd0d 100644
--- a/tests/test_config.py
+++ b/tests/test_config.py
@@ -87,16 +87,16 @@ def test_extension_values(app, status, warning):
@with_tempdir
-def test_errors_warnings(dir):
+def test_errors_warnings(tempdir):
# test the error for syntax errors in the config file
- (dir / 'conf.py').write_text(u'project = \n', encoding='ascii')
- raises_msg(ConfigError, 'conf.py', Config, dir, 'conf.py', {}, None)
+ (tempdir / 'conf.py').write_text(u'project = \n', encoding='ascii')
+ raises_msg(ConfigError, 'conf.py', Config, tempdir, 'conf.py', {}, None)
# test the automatic conversion of 2.x only code in configs
- (dir / 'conf.py').write_text(
+ (tempdir / 'conf.py').write_text(
u'# -*- coding: utf-8\n\nproject = u"Jägermeister"\n',
encoding='utf-8')
- cfg = Config(dir, 'conf.py', {}, None)
+ cfg = Config(tempdir, 'conf.py', {}, None)
cfg.init_values(lambda warning: 1/0)
assert cfg.project == u'Jägermeister'
@@ -105,9 +105,9 @@ def test_errors_warnings(dir):
# skip the test there
if PY3:
return
- (dir / 'conf.py').write_text(
+ (tempdir / 'conf.py').write_text(
u'# -*- coding: latin-1\nproject = "fooä"\n', encoding='latin-1')
- cfg = Config(dir, 'conf.py', {}, None)
+ cfg = Config(tempdir, 'conf.py', {}, None)
warned = [False]
def warn(msg):
@@ -118,10 +118,10 @@ def test_errors_warnings(dir):
@with_tempdir
-def test_errors_if_setup_is_not_callable(dir):
+def test_errors_if_setup_is_not_callable(tempdir):
# test the error to call setup() in the config file
- (dir / 'conf.py').write_text(u'setup = 1')
- raises_msg(ConfigError, 'callable', TestApp, srcdir=dir)
+ (tempdir / 'conf.py').write_text(u'setup = 1')
+ raises_msg(ConfigError, 'callable', TestApp, srcdir=tempdir)
@mock.patch.object(sphinx, '__display_version__', '1.3.4')
@@ -152,12 +152,12 @@ def test_needs_sphinx():
@with_tempdir
-def test_config_eol(tmpdir):
+def test_config_eol(tempdir):
# test config file's eol patterns: LF, CRLF
- configfile = tmpdir / 'conf.py'
+ configfile = tempdir / 'conf.py'
for eol in (b'\n', b'\r\n'):
configfile.write_bytes(b'project = "spam"' + eol)
- cfg = Config(tmpdir, 'conf.py', {}, None)
+ cfg = Config(tempdir, 'conf.py', {}, None)
cfg.init_values(lambda warning: 1/0)
assert cfg.project == u'spam'
diff --git a/tests/test_environment.py b/tests/test_environment.py
index f65a6f3f0..232b6e28b 100644
--- a/tests/test_environment.py
+++ b/tests/test_environment.py
@@ -54,7 +54,6 @@ def test_images():
'http://www.python.org/logo.png')
tree = env.get_doctree('images')
- app._warning.reset()
htmlbuilder = StandaloneHTMLBuilder(app)
htmlbuilder.imgpath = 'dummy'
htmlbuilder.post_process_images(tree)
@@ -64,7 +63,6 @@ def test_images():
assert set(htmlbuilder.images.values()) == \
set(['img.png', 'img1.png', 'simg.png', 'svgimg.svg', 'img.foo.png'])
- app._warning.reset()
latexbuilder = LaTeXBuilder(app)
latexbuilder.post_process_images(tree)
assert set(latexbuilder.images.keys()) == \
diff --git a/tests/test_ext_graphviz.py b/tests/test_ext_graphviz.py
index 7d464343f..e59143d37 100644
--- a/tests/test_ext_graphviz.py
+++ b/tests/test_ext_graphviz.py
@@ -10,37 +10,14 @@
"""
import re
-import subprocess
-from functools import wraps
-from util import with_app, SkipTest
-
-
-def skip_if_graphviz_not_found(fn):
- @wraps(fn)
- def decorator(app, *args, **kwargs):
- found = False
- graphviz_dot = getattr(app.config, 'graphviz_dot', '')
- try:
- if graphviz_dot:
- dot = subprocess.Popen([graphviz_dot, '-V'],
- stdout=subprocess.PIPE,
- stderr=subprocess.PIPE) # show version
- dot.communicate()
- found = True
- except OSError: # No such file or directory
- pass
+import pytest
- if not found:
- raise SkipTest('graphviz "dot" is not available')
-
- return fn(app, *args, **kwargs)
-
- return decorator
+from util import with_app, SkipTest
@with_app('html', testroot='ext-graphviz')
-@skip_if_graphviz_not_found
+@pytest.mark.usefixtures('if_graphviz_found')
def test_graphviz_html(app, status, warning):
app.builder.build_all()
@@ -61,7 +38,7 @@ def test_graphviz_html(app, status, warning):
@with_app('latex', testroot='ext-graphviz')
-@skip_if_graphviz_not_found
+@pytest.mark.usefixtures('if_graphviz_found')
def test_graphviz_latex(app, status, warning):
app.builder.build_all()
@@ -81,7 +58,7 @@ def test_graphviz_latex(app, status, warning):
@with_app('html', testroot='ext-graphviz', confoverrides={'language': 'xx'})
-@skip_if_graphviz_not_found
+@pytest.mark.usefixtures('if_graphviz_found')
def test_graphviz_i18n(app, status, warning):
app.builder.build_all()
diff --git a/tests/test_ext_inheritance_diagram.py b/tests/test_ext_inheritance_diagram.py
index 0171cafe6..fb1d127d9 100644
--- a/tests/test_ext_inheritance_diagram.py
+++ b/tests/test_ext_inheritance_diagram.py
@@ -12,12 +12,12 @@
import re
import sys
from util import with_app, rootdir, raises
-from test_ext_graphviz import skip_if_graphviz_not_found
from sphinx.ext.inheritance_diagram import InheritanceException, import_classes
+import pytest
@with_app('html', testroot='ext-inheritance_diagram')
-@skip_if_graphviz_not_found
+@pytest.mark.usefixtures('if_graphviz_found')
def test_inheritance_diagram_html(app, status, warning):
app.builder.build_all()
@@ -32,7 +32,7 @@ def test_inheritance_diagram_html(app, status, warning):
@with_app('latex', testroot='ext-inheritance_diagram')
-@skip_if_graphviz_not_found
+@pytest.mark.usefixtures('if_graphviz_found')
def test_inheritance_diagram_latex(app, status, warning):
app.builder.build_all()
diff --git a/tests/test_ext_intersphinx.py b/tests/test_ext_intersphinx.py
index e1995e3d3..8de0e2b1e 100644
--- a/tests/test_ext_intersphinx.py
+++ b/tests/test_ext_intersphinx.py
@@ -85,7 +85,7 @@ def test_read_inventory_v2():
@with_app()
@mock.patch('sphinx.ext.intersphinx.read_inventory')
@mock.patch('sphinx.ext.intersphinx._read_from_url')
-def test_fetch_inventory_redirection(app, status, warning, _read_from_url, read_inventory):
+def test_fetch_inventory_redirection(_read_from_url, read_inventory, app, status, warning):
intersphinx_setup(app)
_read_from_url().readline.return_value = '# Sphinx inventory version 2'.encode('utf-8')
diff --git a/tests/test_intl.py b/tests/test_intl.py
index 21d6f763b..f92f44cf4 100644
--- a/tests/test_intl.py
+++ b/tests/test_intl.py
@@ -18,12 +18,11 @@ from docutils import nodes
from subprocess import Popen, PIPE
from babel.messages import pofile
-from nose.tools import assert_equal
from six import string_types
from util import tempdir, rootdir, path, gen_with_app, with_app, SkipTest, \
assert_re_search, assert_not_re_search, assert_in, assert_not_in, \
- assert_startswith, assert_node, repr_as, etree_parse
+ assert_startswith, assert_node, repr_as, etree_parse, assert_equal
root = tempdir / 'test-intl'
diff --git a/tests/test_metadata.py b/tests/test_metadata.py
index 14ab51ccd..2bb1d746c 100644
--- a/tests/test_metadata.py
+++ b/tests/test_metadata.py
@@ -14,8 +14,6 @@
from util import with_app
-from nose.tools import assert_equal
-
@with_app('pseudoxml')
def test_docinfo(app, status, warning):
@@ -53,8 +51,4 @@ def test_docinfo(app, status, warning):
'orphan': u'',
'nocomments': u'',
}
- # I like this way of comparing dicts - easier to see the error.
- for key in exampledocinfo:
- yield assert_equal, exampledocinfo.get(key), expecteddocinfo.get(key)
- # but then we still have to check for missing keys
- yield assert_equal, set(expecteddocinfo.keys()), set(exampledocinfo.keys())
+ assert exampledocinfo == expecteddocinfo
diff --git a/tests/test_setup_command.py b/tests/test_setup_command.py
index c92f6220f..c7874eb25 100644
--- a/tests/test_setup_command.py
+++ b/tests/test_setup_command.py
@@ -12,12 +12,13 @@
import os
import sys
import subprocess
-from functools import wraps
-import tempfile
+from collections import namedtuple
import sphinx
-from util import rootdir, tempdir, SkipTest
-from path import path
+import pytest
+
+from sphinx.util.osutil import cd
+from util import rootdir, tempdir
from textwrap import dedent
root = tempdir / 'test-setup'
@@ -28,57 +29,52 @@ def setup_module():
(rootdir / 'roots' / 'test-setup').copytree(root)
-def with_setup_command(root, *args, **kwds):
+@pytest.fixture
+def setup_command(request, tempdir):
"""
Run `setup.py build_sphinx` with args and kwargs,
pass it to the test and clean up properly.
"""
- def generator(func):
- @wraps(func)
- def deco(*args2, **kwargs2):
- tempdir = path(tempfile.mkdtemp())
- pkgrootdir = (tempdir / 'root')
- root.copytree(pkgrootdir)
- cwd = os.getcwd()
- os.chdir(pkgrootdir)
- pythonpath = os.path.dirname(os.path.dirname(sphinx.__file__))
- if os.getenv('PYTHONPATH'):
- pythonpath = os.getenv('PYTHONPATH') + os.pathsep + pythonpath
- command = [sys.executable, 'setup.py', 'build_sphinx']
- command.extend(args)
- try:
- proc = subprocess.Popen(
- command,
- env=dict(os.environ, PYTHONPATH=pythonpath),
- stdout=subprocess.PIPE,
- stderr=subprocess.PIPE)
- func(pkgrootdir, proc)
- finally:
- tempdir.rmtree(ignore_errors=True)
- os.chdir(cwd)
- return deco
- return generator
-
-
-@with_setup_command(root)
-def test_build_sphinx(pkgroot, proc):
+ marker = request.node.get_marker('setup_command')
+ args = marker.args if marker else []
+
+ pkgrootdir = tempdir / 'root'
+ root.copytree(pkgrootdir)
+
+ with cd(pkgrootdir):
+ pythonpath = os.path.dirname(os.path.dirname(sphinx.__file__))
+ if os.getenv('PYTHONPATH'):
+ pythonpath = os.getenv('PYTHONPATH') + os.pathsep + pythonpath
+ command = [sys.executable, 'setup.py', 'build_sphinx']
+ command.extend(args)
+
+ proc = subprocess.Popen(
+ command,
+ env=dict(os.environ, PYTHONPATH=pythonpath),
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+ yield namedtuple('setup', 'pkgroot,proc')(pkgrootdir, proc)
+
+
+def test_build_sphinx(setup_command):
+ proc = setup_command.proc
out, err = proc.communicate()
print(out)
print(err)
assert proc.returncode == 0
-@with_setup_command(root)
-def test_build_sphinx_with_nonascii_path(pkgroot, proc):
+@pytest.fixture
+def nonascii_srcdir(request, setup_command):
mb_name = u'\u65e5\u672c\u8a9e'
- srcdir = (pkgroot / 'doc')
+ srcdir = (setup_command.pkgroot / 'doc')
try:
(srcdir / mb_name).makedirs()
except UnicodeEncodeError:
from path import FILESYSTEMENCODING
- raise SkipTest(
+ pytest.skip(
'non-ASCII filename not supported on this filesystem encoding: '
- '%s', FILESYSTEMENCODING)
+ '%s' % FILESYSTEMENCODING)
(srcdir / mb_name / (mb_name + '.txt')).write_text(dedent("""
multi byte file name page
@@ -91,41 +87,46 @@ def test_build_sphinx_with_nonascii_path(pkgroot, proc):
%(mb_name)s/%(mb_name)s
""" % locals())
- ).encode('utf-8'))
+ ).encode('utf-8'))
+
+def test_build_sphinx_with_nonascii_path(setup_command, nonascii_srcdir):
+ proc = setup_command.proc
out, err = proc.communicate()
print(out)
print(err)
assert proc.returncode == 0
-@with_setup_command(root, '-b', 'linkcheck')
-def test_build_sphinx_return_nonzero_status(pkgroot, proc):
- srcdir = (pkgroot / 'doc')
+@pytest.mark.setup_command('-b', 'linkcheck')
+def test_build_sphinx_return_nonzero_status(setup_command):
+ srcdir = (setup_command.pkgroot / 'doc')
(srcdir / 'contents.txt').write_text(
'http://localhost.unexistentdomain/index.html')
+ proc = setup_command.proc
out, err = proc.communicate()
print(out)
print(err)
assert proc.returncode != 0, 'expect non-zero status for setup.py'
-@with_setup_command(root)
-def test_build_sphinx_warning_return_zero_status(pkgroot, proc):
- srcdir = (pkgroot / 'doc')
+def test_build_sphinx_warning_return_zero_status(setup_command):
+ srcdir = (setup_command.pkgroot / 'doc')
(srcdir / 'contents.txt').write_text(
'See :ref:`unexisting-reference-label`')
+ proc = setup_command.proc
out, err = proc.communicate()
print(out)
print(err)
assert proc.returncode == 0
-@with_setup_command(root, '--warning-is-error')
-def test_build_sphinx_warning_is_error_return_nonzero_status(pkgroot, proc):
- srcdir = (pkgroot / 'doc')
+@pytest.mark.setup_command('--warning-is-error')
+def test_build_sphinx_warning_is_error_return_nonzero_status(setup_command):
+ srcdir = (setup_command.pkgroot / 'doc')
(srcdir / 'contents.txt').write_text(
'See :ref:`unexisting-reference-label`')
+ proc = setup_command.proc
out, err = proc.communicate()
print(out)
print(err)
diff --git a/tests/test_theming.py b/tests/test_theming.py
index b62cbcd72..bb27f9677 100644
--- a/tests/test_theming.py
+++ b/tests/test_theming.py
@@ -83,7 +83,8 @@ def test_js_source(app, status, warning):
assert 'Underscore.js {v}'.format(v=v) in underscore_src, msg
-def test_double_inheriting_theme():
+@with_app(testroot='double-inheriting-theme')
+def test_double_inheriting_theme(make_app, app_params):
from sphinx.theming import load_theme_plugins # load original before patching
def load_themes():
@@ -92,8 +93,6 @@ def test_double_inheriting_theme():
for t in load_theme_plugins():
yield t
- @mock.patch('sphinx.theming.load_theme_plugins', side_effect=load_themes)
- @with_app(testroot='double-inheriting-theme')
- def test_double_inheriting_theme_(app, status, warning, m_):
- pass
- yield test_double_inheriting_theme_
+ with mock.patch('sphinx.theming.load_theme_plugins', side_effect=load_themes):
+ args, kwargs = app_params
+ make_app(*args, **kwargs)
diff --git a/tests/test_util_fileutil.py b/tests/test_util_fileutil.py
index 5810dd2a8..ef0af07fc 100644
--- a/tests/test_util_fileutil.py
+++ b/tests/test_util_fileutil.py
@@ -25,32 +25,32 @@ class DummyTemplateLoader(BuiltinTemplateLoader):
@with_tempdir
-def test_copy_asset_file(tmpdir):
+def test_copy_asset_file(tempdir):
renderer = DummyTemplateLoader()
# copy normal file
- src = (tmpdir / 'asset.txt')
+ src = (tempdir / 'asset.txt')
src.write_text('# test data')
- dest = (tmpdir / 'output.txt')
+ dest = (tempdir / 'output.txt')
copy_asset_file(src, dest)
assert dest.exists()
assert src.text() == dest.text()
# copy template file
- src = (tmpdir / 'asset.txt_t')
+ src = (tempdir / 'asset.txt_t')
src.write_text('# {{var1}} data')
- dest = (tmpdir / 'output.txt_t')
+ dest = (tempdir / 'output.txt_t')
copy_asset_file(src, dest, {'var1': 'template'}, renderer)
assert not dest.exists()
- assert (tmpdir / 'output.txt').exists()
- assert (tmpdir / 'output.txt').text() == '# template data'
+ assert (tempdir / 'output.txt').exists()
+ assert (tempdir / 'output.txt').text() == '# template data'
# copy template file to subdir
- src = (tmpdir / 'asset.txt_t')
+ src = (tempdir / 'asset.txt_t')
src.write_text('# {{var1}} data')
- subdir1 = (tmpdir / 'subdir')
+ subdir1 = (tempdir / 'subdir')
subdir1.makedirs()
copy_asset_file(src, subdir1, {'var1': 'template'}, renderer)
@@ -58,8 +58,8 @@ def test_copy_asset_file(tmpdir):
assert (subdir1 / 'asset.txt').text() == '# template data'
# copy template file without context
- src = (tmpdir / 'asset.txt_t')
- subdir2 = (tmpdir / 'subdir2')
+ src = (tempdir / 'asset.txt_t')
+ subdir2 = (tempdir / 'subdir2')
subdir2.makedirs()
copy_asset_file(src, subdir2)
@@ -69,11 +69,11 @@ def test_copy_asset_file(tmpdir):
@with_tempdir
-def test_copy_asset(tmpdir):
+def test_copy_asset(tempdir):
renderer = DummyTemplateLoader()
# prepare source files
- source = (tmpdir / 'source')
+ source = (tempdir / 'source')
source.makedirs()
(source / 'index.rst').write_text('index.rst')
(source / 'foo.rst_t').write_text('{{var1}}.rst')
@@ -84,13 +84,13 @@ def test_copy_asset(tmpdir):
(source / '_templates' / 'sidebar.html_t').write_text('sidebar: {{var2}}')
# copy a single file
- assert not (tmpdir / 'test1').exists()
- copy_asset(source / 'index.rst', tmpdir / 'test1')
- assert (tmpdir / 'test1').exists()
- assert (tmpdir / 'test1/index.rst').exists()
+ assert not (tempdir / 'test1').exists()
+ copy_asset(source / 'index.rst', tempdir / 'test1')
+ assert (tempdir / 'test1').exists()
+ assert (tempdir / 'test1/index.rst').exists()
# copy directories
- destdir = tmpdir / 'test2'
+ destdir = tempdir / 'test2'
copy_asset(source, destdir, context=dict(var1='bar', var2='baz'), renderer=renderer)
assert (destdir / 'index.rst').exists()
assert (destdir / 'foo.rst').exists()
@@ -104,7 +104,7 @@ def test_copy_asset(tmpdir):
def excluded(path):
return ('sidebar.html' in path or 'basic.css' in path)
- destdir = tmpdir / 'test3'
+ destdir = tempdir / 'test3'
copy_asset(source, destdir, excluded,
context=dict(var1='bar', var2='baz'), renderer=renderer)
assert (destdir / 'index.rst').exists()
diff --git a/tests/test_util_i18n.py b/tests/test_util_i18n.py
index 849796a8f..8738862c4 100644
--- a/tests/test_util_i18n.py
+++ b/tests/test_util_i18n.py
@@ -38,12 +38,12 @@ def test_catalog_info_for_sub_domain_file_and_path():
@with_tempdir
-def test_catalog_outdated(dir):
- (dir / 'test.po').write_text('#')
- cat = i18n.CatalogInfo(dir, 'test', 'utf-8')
+def test_catalog_outdated(tempdir):
+ (tempdir / 'test.po').write_text('#')
+ cat = i18n.CatalogInfo(tempdir, 'test', 'utf-8')
assert cat.is_outdated() # if mo is not exist
- mo_file = (dir / 'test.mo')
+ mo_file = (tempdir / 'test.mo')
mo_file.write_text('#')
assert not cat.is_outdated() # if mo is exist and newer than po
@@ -52,9 +52,9 @@ def test_catalog_outdated(dir):
@with_tempdir
-def test_catalog_write_mo(dir):
- (dir / 'test.po').write_text('#')
- cat = i18n.CatalogInfo(dir, 'test', 'utf-8')
+def test_catalog_write_mo(tempdir):
+ (tempdir / 'test.po').write_text('#')
+ cat = i18n.CatalogInfo(tempdir, 'test', 'utf-8')
cat.write_mo('en')
assert path.exists(cat.mo_path)
with open(cat.mo_path, 'rb') as f:
@@ -62,20 +62,20 @@ def test_catalog_write_mo(dir):
@with_tempdir
-def test_get_catalogs_for_xx(dir):
- (dir / 'loc1' / 'xx' / 'LC_MESSAGES').makedirs()
- (dir / 'loc1' / 'xx' / 'LC_MESSAGES' / 'test1.po').write_text('#')
- (dir / 'loc1' / 'xx' / 'LC_MESSAGES' / 'test2.po').write_text('#')
- (dir / 'loc1' / 'xx' / 'LC_MESSAGES' / 'test3.pot').write_text('#')
- (dir / 'loc1' / 'xx' / 'LC_MESSAGES' / 'sub').makedirs()
- (dir / 'loc1' / 'xx' / 'LC_MESSAGES' / 'sub' / 'test4.po').write_text('#')
- (dir / 'loc1' / 'xx' / 'LC_MESSAGES' / 'sub' / 'test5.po').write_text('#')
- (dir / 'loc1' / 'en' / 'LC_MESSAGES').makedirs()
- (dir / 'loc1' / 'en' / 'LC_MESSAGES' / 'test6.po').write_text('#')
- (dir / 'loc1' / 'xx' / 'LC_ALL').makedirs()
- (dir / 'loc1' / 'xx' / 'LC_ALL' / 'test7.po').write_text('#')
-
- catalogs = i18n.find_catalog_source_files([dir / 'loc1'], 'xx', force_all=False)
+def test_get_catalogs_for_xx(tempdir):
+ (tempdir / 'loc1' / 'xx' / 'LC_MESSAGES').makedirs()
+ (tempdir / 'loc1' / 'xx' / 'LC_MESSAGES' / 'test1.po').write_text('#')
+ (tempdir / 'loc1' / 'xx' / 'LC_MESSAGES' / 'test2.po').write_text('#')
+ (tempdir / 'loc1' / 'xx' / 'LC_MESSAGES' / 'test3.pot').write_text('#')
+ (tempdir / 'loc1' / 'xx' / 'LC_MESSAGES' / 'sub').makedirs()
+ (tempdir / 'loc1' / 'xx' / 'LC_MESSAGES' / 'sub' / 'test4.po').write_text('#')
+ (tempdir / 'loc1' / 'xx' / 'LC_MESSAGES' / 'sub' / 'test5.po').write_text('#')
+ (tempdir / 'loc1' / 'en' / 'LC_MESSAGES').makedirs()
+ (tempdir / 'loc1' / 'en' / 'LC_MESSAGES' / 'test6.po').write_text('#')
+ (tempdir / 'loc1' / 'xx' / 'LC_ALL').makedirs()
+ (tempdir / 'loc1' / 'xx' / 'LC_ALL' / 'test7.po').write_text('#')
+
+ catalogs = i18n.find_catalog_source_files([tempdir / 'loc1'], 'xx', force_all=False)
domains = set(c.domain for c in catalogs)
assert domains == set([
'test1',
@@ -86,23 +86,23 @@ def test_get_catalogs_for_xx(dir):
@with_tempdir
-def test_get_catalogs_for_en(dir):
- (dir / 'loc1' / 'xx' / 'LC_MESSAGES').makedirs()
- (dir / 'loc1' / 'xx' / 'LC_MESSAGES' / 'xx_dom.po').write_text('#')
- (dir / 'loc1' / 'en' / 'LC_MESSAGES').makedirs()
- (dir / 'loc1' / 'en' / 'LC_MESSAGES' / 'en_dom.po').write_text('#')
+def test_get_catalogs_for_en(tempdir):
+ (tempdir / 'loc1' / 'xx' / 'LC_MESSAGES').makedirs()
+ (tempdir / 'loc1' / 'xx' / 'LC_MESSAGES' / 'xx_dom.po').write_text('#')
+ (tempdir / 'loc1' / 'en' / 'LC_MESSAGES').makedirs()
+ (tempdir / 'loc1' / 'en' / 'LC_MESSAGES' / 'en_dom.po').write_text('#')
- catalogs = i18n.find_catalog_source_files([dir / 'loc1'], 'en', force_all=False)
+ catalogs = i18n.find_catalog_source_files([tempdir / 'loc1'], 'en', force_all=False)
domains = set(c.domain for c in catalogs)
assert domains == set(['en_dom'])
@with_tempdir
-def test_get_catalogs_with_non_existent_locale(dir):
- catalogs = i18n.find_catalog_source_files([dir / 'loc1'], 'xx')
+def test_get_catalogs_with_non_existent_locale(tempdir):
+ catalogs = i18n.find_catalog_source_files([tempdir / 'loc1'], 'xx')
assert not catalogs
- catalogs = i18n.find_catalog_source_files([dir / 'loc1'], None)
+ catalogs = i18n.find_catalog_source_files([tempdir / 'loc1'], None)
assert not catalogs
@@ -112,24 +112,24 @@ def test_get_catalogs_with_non_existent_locale_dirs():
@with_tempdir
-def test_get_catalogs_for_xx_without_outdated(dir):
- (dir / 'loc1' / 'xx' / 'LC_MESSAGES').makedirs()
- (dir / 'loc1' / 'xx' / 'LC_MESSAGES' / 'test1.po').write_text('#')
- (dir / 'loc1' / 'xx' / 'LC_MESSAGES' / 'test1.mo').write_text('#')
- (dir / 'loc1' / 'xx' / 'LC_MESSAGES' / 'test2.po').write_text('#')
- (dir / 'loc1' / 'xx' / 'LC_MESSAGES' / 'test2.mo').write_text('#')
- (dir / 'loc1' / 'xx' / 'LC_MESSAGES' / 'test3.pot').write_text('#')
- (dir / 'loc1' / 'xx' / 'LC_MESSAGES' / 'test3.mo').write_text('#')
- (dir / 'loc1' / 'xx' / 'LC_MESSAGES' / 'sub').makedirs()
- (dir / 'loc1' / 'xx' / 'LC_MESSAGES' / 'sub' / 'test4.po').write_text('#')
- (dir / 'loc1' / 'xx' / 'LC_MESSAGES' / 'sub' / 'test4.mo').write_text('#')
- (dir / 'loc1' / 'xx' / 'LC_MESSAGES' / 'sub' / 'test5.po').write_text('#')
- (dir / 'loc1' / 'xx' / 'LC_MESSAGES' / 'sub' / 'test5.mo').write_text('#')
-
- catalogs = i18n.find_catalog_source_files([dir / 'loc1'], 'xx', force_all=False)
+def test_get_catalogs_for_xx_without_outdated(tempdir):
+ (tempdir / 'loc1' / 'xx' / 'LC_MESSAGES').makedirs()
+ (tempdir / 'loc1' / 'xx' / 'LC_MESSAGES' / 'test1.po').write_text('#')
+ (tempdir / 'loc1' / 'xx' / 'LC_MESSAGES' / 'test1.mo').write_text('#')
+ (tempdir / 'loc1' / 'xx' / 'LC_MESSAGES' / 'test2.po').write_text('#')
+ (tempdir / 'loc1' / 'xx' / 'LC_MESSAGES' / 'test2.mo').write_text('#')
+ (tempdir / 'loc1' / 'xx' / 'LC_MESSAGES' / 'test3.pot').write_text('#')
+ (tempdir / 'loc1' / 'xx' / 'LC_MESSAGES' / 'test3.mo').write_text('#')
+ (tempdir / 'loc1' / 'xx' / 'LC_MESSAGES' / 'sub').makedirs()
+ (tempdir / 'loc1' / 'xx' / 'LC_MESSAGES' / 'sub' / 'test4.po').write_text('#')
+ (tempdir / 'loc1' / 'xx' / 'LC_MESSAGES' / 'sub' / 'test4.mo').write_text('#')
+ (tempdir / 'loc1' / 'xx' / 'LC_MESSAGES' / 'sub' / 'test5.po').write_text('#')
+ (tempdir / 'loc1' / 'xx' / 'LC_MESSAGES' / 'sub' / 'test5.mo').write_text('#')
+
+ catalogs = i18n.find_catalog_source_files([tempdir / 'loc1'], 'xx', force_all=False)
assert not catalogs
- catalogs = i18n.find_catalog_source_files([dir / 'loc1'], 'xx', force_all=True)
+ catalogs = i18n.find_catalog_source_files([tempdir / 'loc1'], 'xx', force_all=True)
domains = set(c.domain for c in catalogs)
assert domains == set([
'test1',
@@ -140,28 +140,28 @@ def test_get_catalogs_for_xx_without_outdated(dir):
@with_tempdir
-def test_get_catalogs_from_multiple_locale_dirs(dir):
- (dir / 'loc1' / 'xx' / 'LC_MESSAGES').makedirs()
- (dir / 'loc1' / 'xx' / 'LC_MESSAGES' / 'test1.po').write_text('#')
- (dir / 'loc2' / 'xx' / 'LC_MESSAGES').makedirs()
- (dir / 'loc2' / 'xx' / 'LC_MESSAGES' / 'test1.po').write_text('#')
- (dir / 'loc2' / 'xx' / 'LC_MESSAGES' / 'test2.po').write_text('#')
-
- catalogs = i18n.find_catalog_source_files([dir / 'loc1', dir / 'loc2'], 'xx')
+def test_get_catalogs_from_multiple_locale_dirs(tempdir):
+ (tempdir / 'loc1' / 'xx' / 'LC_MESSAGES').makedirs()
+ (tempdir / 'loc1' / 'xx' / 'LC_MESSAGES' / 'test1.po').write_text('#')
+ (tempdir / 'loc2' / 'xx' / 'LC_MESSAGES').makedirs()
+ (tempdir / 'loc2' / 'xx' / 'LC_MESSAGES' / 'test1.po').write_text('#')
+ (tempdir / 'loc2' / 'xx' / 'LC_MESSAGES' / 'test2.po').write_text('#')
+
+ catalogs = i18n.find_catalog_source_files([tempdir / 'loc1', tempdir / 'loc2'], 'xx')
domains = sorted(c.domain for c in catalogs)
assert domains == ['test1', 'test1', 'test2']
@with_tempdir
-def test_get_catalogs_with_compact(dir):
- (dir / 'loc1' / 'xx' / 'LC_MESSAGES').makedirs()
- (dir / 'loc1' / 'xx' / 'LC_MESSAGES' / 'test1.po').write_text('#')
- (dir / 'loc1' / 'xx' / 'LC_MESSAGES' / 'test2.po').write_text('#')
- (dir / 'loc1' / 'xx' / 'LC_MESSAGES' / 'sub').makedirs()
- (dir / 'loc1' / 'xx' / 'LC_MESSAGES' / 'sub' / 'test3.po').write_text('#')
- (dir / 'loc1' / 'xx' / 'LC_MESSAGES' / 'sub' / 'test4.po').write_text('#')
-
- catalogs = i18n.find_catalog_source_files([dir / 'loc1'], 'xx', gettext_compact=True)
+def test_get_catalogs_with_compact(tempdir):
+ (tempdir / 'loc1' / 'xx' / 'LC_MESSAGES').makedirs()
+ (tempdir / 'loc1' / 'xx' / 'LC_MESSAGES' / 'test1.po').write_text('#')
+ (tempdir / 'loc1' / 'xx' / 'LC_MESSAGES' / 'test2.po').write_text('#')
+ (tempdir / 'loc1' / 'xx' / 'LC_MESSAGES' / 'sub').makedirs()
+ (tempdir / 'loc1' / 'xx' / 'LC_MESSAGES' / 'sub' / 'test3.po').write_text('#')
+ (tempdir / 'loc1' / 'xx' / 'LC_MESSAGES' / 'sub' / 'test4.po').write_text('#')
+
+ catalogs = i18n.find_catalog_source_files([tempdir / 'loc1'], 'xx', gettext_compact=True)
domains = set(c.domain for c in catalogs)
assert domains == set(['test1', 'test2', 'sub'])
diff --git a/tests/test_websupport.py b/tests/test_websupport.py
index bb41ae1ab..af9ed91ee 100644
--- a/tests/test_websupport.py
+++ b/tests/test_websupport.py
@@ -9,10 +9,6 @@
:license: BSD, see LICENSE for details.
"""
-from functools import wraps
-
-from six import StringIO
-
from sphinx.websupport import WebSupport
from sphinx.websupport.errors import DocumentNotFoundError, \
CommentNotAllowedError, UserNotAuthorizedError
@@ -26,26 +22,28 @@ try:
except ImportError:
sqlalchemy_missing = True
+import pytest
from util import rootdir, tempdir, raises, skip_if
-default_settings = {'builddir': tempdir / 'websupport',
- 'status': StringIO(),
- 'warning': StringIO()}
+@pytest.fixture
+def support(request):
+ settings = {
+ 'srcdir': rootdir / 'root',
+ # to use same directory for 'builddir' in each 'support' fixture, using
+ # 'tempdir' (static) value instead of 'tempdir' fixture value.
+ # each test expect result of db value at previous test case.
+ 'builddir': tempdir / 'websupport'
+ }
+ marker = request.node.get_marker('support')
+ if marker:
+ settings.update(marker.kwargs)
+ support = WebSupport(**settings)
+ yield support
-def with_support(*args, **kwargs):
- """Make a WebSupport object and pass it the test."""
- settings = default_settings.copy()
- settings.update(kwargs)
- def generator(func):
- @wraps(func)
- def new_func(*args2, **kwargs2):
- support = WebSupport(**settings)
- func(support, *args2, **kwargs2)
- return new_func
- return generator
+with_support = pytest.mark.support
class NullStorage(StorageBackend):
@@ -59,7 +57,7 @@ def test_no_srcdir(support):
@skip_if(sqlalchemy_missing, 'needs sqlalchemy')
-@with_support(srcdir=rootdir / 'root')
+@with_support()
def test_build(support):
support.build()
@@ -125,56 +123,6 @@ def test_comments(support):
@skip_if(sqlalchemy_missing, 'needs sqlalchemy')
@with_support()
-def test_voting(support):
- session = Session()
- nodes = session.query(Node).all()
- node = nodes[0]
-
- comment = support.get_data(node.id)['comments'][0]
-
- def check_rating(val):
- data = support.get_data(node.id)
- comment = data['comments'][0]
- assert comment['rating'] == val, '%s != %s' % (comment['rating'], val)
-
- support.process_vote(comment['id'], 'user_one', '1')
- support.process_vote(comment['id'], 'user_two', '1')
- support.process_vote(comment['id'], 'user_three', '1')
- check_rating(3)
- support.process_vote(comment['id'], 'user_one', '-1')
- check_rating(1)
- support.process_vote(comment['id'], 'user_one', '0')
- check_rating(2)
-
- # Make sure a vote with value > 1 or < -1 can't be cast.
- raises(ValueError, support.process_vote, comment['id'], 'user_one', '2')
- raises(ValueError, support.process_vote, comment['id'], 'user_one', '-2')
-
- # Make sure past voting data is associated with comments when they are
- # fetched.
- data = support.get_data(str(node.id), username='user_two')
- comment = data['comments'][0]
- assert comment['vote'] == 1, '%s != 1' % comment['vote']
-
-
-@skip_if(sqlalchemy_missing, 'needs sqlalchemy')
-@with_support()
-def test_proposals(support):
- session = Session()
- node = session.query(Node).first()
-
- data = support.get_data(node.id)
-
- source = data['source']
- proposal = source[:5] + source[10:15] + 'asdf' + source[15:]
-
- support.add_comment('Proposal comment',
- node_id=node.id,
- proposal=proposal)
-
-
-@skip_if(sqlalchemy_missing, 'needs sqlalchemy')
-@with_support()
def test_user_delete_comments(support):
def get_comment():
session = Session()
@@ -194,6 +142,38 @@ def test_user_delete_comments(support):
assert comment['text'] == '[deleted]'
+called = False
+
+
+def moderation_callback(comment):
+ global called
+ called = True
+
+
+@skip_if(sqlalchemy_missing, 'needs sqlalchemy')
+@with_support(moderation_callback=moderation_callback)
+def test_moderation(support):
+ session = Session()
+ nodes = session.query(Node).all()
+ node = nodes[7]
+ session.close()
+ accepted = support.add_comment('Accepted Comment', node_id=node.id,
+ displayed=False)
+ deleted = support.add_comment('Comment to delete', node_id=node.id,
+ displayed=False)
+ # Make sure the moderation_callback is called.
+ assert called
+ # Make sure the user must be a moderator.
+ raises(UserNotAuthorizedError, support.accept_comment, accepted['id'])
+ raises(UserNotAuthorizedError, support.delete_comment, deleted['id'])
+ support.accept_comment(accepted['id'], moderator=True)
+ support.delete_comment(deleted['id'], moderator=True)
+ comments = support.get_data(node.id)['comments']
+ assert len(comments) == 1
+ comments = support.get_data(node.id, moderator=True)['comments']
+ assert len(comments) == 1
+
+
@skip_if(sqlalchemy_missing, 'needs sqlalchemy')
@with_support()
def test_moderator_delete_comments(support):
@@ -228,36 +208,54 @@ def test_update_username(support):
assert len(votes) == 0
-called = False
+@skip_if(sqlalchemy_missing, 'needs sqlalchemy')
+@with_support()
+def test_proposals(support):
+ session = Session()
+ node = session.query(Node).first()
+ data = support.get_data(node.id)
-def moderation_callback(comment):
- global called
- called = True
+ source = data['source']
+ proposal = source[:5] + source[10:15] + 'asdf' + source[15:]
+
+ support.add_comment('Proposal comment',
+ node_id=node.id,
+ proposal=proposal)
@skip_if(sqlalchemy_missing, 'needs sqlalchemy')
-@with_support(moderation_callback=moderation_callback)
-def test_moderation(support):
+@with_support()
+def test_voting(support):
session = Session()
nodes = session.query(Node).all()
- node = nodes[7]
- session.close()
- accepted = support.add_comment('Accepted Comment', node_id=node.id,
- displayed=False)
- deleted = support.add_comment('Comment to delete', node_id=node.id,
- displayed=False)
- # Make sure the moderation_callback is called.
- assert called
- # Make sure the user must be a moderator.
- raises(UserNotAuthorizedError, support.accept_comment, accepted['id'])
- raises(UserNotAuthorizedError, support.delete_comment, deleted['id'])
- support.accept_comment(accepted['id'], moderator=True)
- support.delete_comment(deleted['id'], moderator=True)
- comments = support.get_data(node.id)['comments']
- assert len(comments) == 1
- comments = support.get_data(node.id, moderator=True)['comments']
- assert len(comments) == 1
+ node = nodes[0]
+
+ comment = support.get_data(node.id)['comments'][0]
+
+ def check_rating(val):
+ data = support.get_data(node.id)
+ comment = data['comments'][0]
+ assert comment['rating'] == val, '%s != %s' % (comment['rating'], val)
+
+ support.process_vote(comment['id'], 'user_one', '1')
+ support.process_vote(comment['id'], 'user_two', '1')
+ support.process_vote(comment['id'], 'user_three', '1')
+ check_rating(3)
+ support.process_vote(comment['id'], 'user_one', '-1')
+ check_rating(1)
+ support.process_vote(comment['id'], 'user_one', '0')
+ check_rating(2)
+
+ # Make sure a vote with value > 1 or < -1 can't be cast.
+ raises(ValueError, support.process_vote, comment['id'], 'user_one', '2')
+ raises(ValueError, support.process_vote, comment['id'], 'user_one', '-2')
+
+ # Make sure past voting data is associated with comments when they are
+ # fetched.
+ data = support.get_data(str(node.id), username='user_two')
+ comment = data['comments'][0]
+ assert comment['vote'] == 1, '%s != 1' % comment['vote']
def test_differ():
diff --git a/tests/util.py b/tests/util.py
index 13366a1da..de158cc7f 100644
--- a/tests/util.py
+++ b/tests/util.py
@@ -10,14 +10,13 @@
import os
import re
import sys
-import tempfile
import warnings
from functools import wraps
from xml.etree import ElementTree
-from six import StringIO, string_types
+from six import string_types
-from nose import tools, SkipTest
+import pytest
from docutils import nodes
from docutils.parsers.rst import directives, roles
@@ -27,16 +26,17 @@ from sphinx.builders.latex import LaTeXBuilder
from sphinx.theming import Theme
from sphinx.ext.autodoc import AutoDirective
from sphinx.pycode import ModuleAnalyzer
+from sphinx.deprecation import RemovedInSphinx17Warning
from path import path, repr_as # NOQA
__all__ = [
- 'rootdir', 'tempdir', 'raises', 'raises_msg',
- 'skip_if', 'skip_unless', 'skip_unless_importable', 'Struct',
- 'ListOutput', 'TestApp', 'with_app', 'gen_with_app',
- 'path', 'with_tempdir',
- 'sprint', 'remove_unicode_literals',
+ 'rootdir', 'tempdir',
+ 'skip_unless_importable', 'Struct',
+ 'SphinxTestApp',
+ 'path',
+ 'remove_unicode_literals',
]
@@ -44,36 +44,6 @@ rootdir = path(os.path.dirname(__file__) or '.').abspath()
tempdir = path(os.environ['SPHINX_TEST_TEMPDIR']).abspath()
-def _excstr(exc):
- if type(exc) is tuple:
- return str(tuple(map(_excstr, exc)))
- return exc.__name__
-
-
-def raises(exc, func, *args, **kwds):
- """Raise AssertionError if ``func(*args, **kwds)`` does not raise *exc*."""
- try:
- func(*args, **kwds)
- except exc:
- pass
- else:
- raise AssertionError('%s did not raise %s' %
- (func.__name__, _excstr(exc)))
-
-
-def raises_msg(exc, msg, func, *args, **kwds):
- """Raise AssertionError if ``func(*args, **kwds)`` does not raise *exc*,
- and check if the message contains *msg*.
- """
- try:
- func(*args, **kwds)
- except exc as err:
- assert msg in str(err), "\"%s\" not in \"%s\"" % (msg, err)
- else:
- raise AssertionError('%s did not raise %s' %
- (func.__name__, _excstr(exc)))
-
-
def assert_re_search(regex, text, flags=0):
if not re.search(regex, text, flags):
assert False, '%r did not match %r' % (regex, text)
@@ -118,43 +88,14 @@ def assert_node(node, cls=None, xpath="", **kwargs):
'The node%s[%s] is not %r: %r' % (xpath, key, value, node[key])
-try:
- from nose.tools import assert_in, assert_not_in
-except ImportError:
- def assert_in(x, thing, msg=''):
- if x not in thing:
- assert False, msg or '%r is not in %r' % (x, thing)
-
- def assert_not_in(x, thing, msg=''):
- if x in thing:
- assert False, msg or '%r is in %r' % (x, thing)
-
-
-def skip_if(condition, msg=None):
- """Decorator to skip test if condition is true."""
- def deco(test):
- @tools.make_decorator(test)
- def skipper(*args, **kwds):
- if condition:
- raise SkipTest(msg or 'conditional skip')
- return test(*args, **kwds)
- return skipper
- return deco
-
-
-def skip_unless(condition, msg=None):
- """Decorator to skip test if condition is false."""
- return skip_if(not condition, msg)
-
-
def skip_unless_importable(module, msg=None):
"""Decorator to skip test if module is not importable."""
try:
__import__(module)
except ImportError:
- return skip_if(True, msg)
+ return pytest.mark.skipif(True, reason=(msg or 'conditional skip'))
else:
- return skip_if(False, msg)
+ return pytest.mark.skipif(False, reason=(msg or 'conditional skip'))
def etree_parse(path):
@@ -168,22 +109,7 @@ class Struct(object):
self.__dict__.update(kwds)
-class ListOutput(object):
- """
- File-like object that collects written text in a list.
- """
- def __init__(self, name):
- self.name = name
- self.content = []
-
- def reset(self):
- del self.content[:]
-
- def write(self, text):
- self.content.append(text)
-
-
-class TestApp(application.Sphinx):
+class SphinxTestApp(application.Sphinx):
"""
A subclass of :class:`Sphinx` that runs on the test root, with some
better default values for the initialization parameters.
@@ -222,10 +148,6 @@ class TestApp(application.Sphinx):
doctreedir.makedirs()
if confoverrides is None:
confoverrides = {}
- if status is None:
- status = StringIO()
- if warning is None:
- warning = ListOutput('stderr')
# if warningiserror is None:
warningiserror = False
@@ -263,30 +185,38 @@ class TestApp(application.Sphinx):
return '<%s buildername=%r>' % (self.__class__.__name__, self.builder.name)
-def with_app(*args, **kwargs):
- """
- Make a TestApp with args and kwargs, pass it to the test and clean up
- properly.
- """
- def generator(func):
- @wraps(func)
- def deco(*args2, **kwargs2):
- status, warning = StringIO(), StringIO()
- kwargs['status'] = status
- kwargs['warning'] = warning
- app = TestApp(*args, **kwargs)
- try:
- func(app, status, warning, *args2, **kwargs2)
- finally:
- app.cleanup()
- return deco
- return generator
+_unicode_literals_re = re.compile(r'u(".*?")|u(\'.*?\')')
+
+
+def remove_unicode_literals(s):
+ return _unicode_literals_re.sub(lambda x: x.group(1) or x.group(2), s)
+
+
+def find_files(root, suffix=None):
+ for dirpath, dirs, files in os.walk(root, followlinks=True):
+ dirpath = path(dirpath)
+ for f in [f for f in files if not suffix or f.endswith(suffix)]:
+ fpath = dirpath / f
+ yield os.path.relpath(fpath, root)
+
+
+def strip_escseq(text):
+ return re.sub('\x1b.*?m', '', text)
+
+
+# #############################################
+# DEPRECATED implementations
+
+import tempfile
+from six import StringIO
def gen_with_app(*args, **kwargs):
"""
- Decorate a test generator to pass a TestApp as the first argument to the
- test generator when it's executed.
+ **DEPRECATED**: use pytest.mark.parametrize instead.
+
+ Decorate a test generator to pass a SphinxTestApp as the first argument to
+ the test generator when it's executed.
"""
def generator(func):
@wraps(func)
@@ -294,7 +224,7 @@ def gen_with_app(*args, **kwargs):
status, warning = StringIO(), StringIO()
kwargs['status'] = status
kwargs['warning'] = warning
- app = TestApp(*args, **kwargs)
+ app = SphinxTestApp(*args, **kwargs)
try:
for item in func(app, status, warning, *args2, **kwargs2):
yield item
@@ -304,32 +234,131 @@ def gen_with_app(*args, **kwargs):
return generator
+def skip_if(condition, msg=None):
+ """
+ **DEPRECATED**: use pytest.mark.skipif instead.
+
+ Decorator to skip test if condition is true.
+ """
+ return pytest.mark.skipif(condition, reason=(msg or 'conditional skip'))
+
+
+def skip_unless(condition, msg=None):
+ """
+ **DEPRECATED**: use pytest.mark.skipif instead.
+
+ Decorator to skip test if condition is false.
+ """
+ return pytest.mark.skipif(not condition, reason=(msg or 'conditional skip'))
+
+
def with_tempdir(func):
- def new_func(*args, **kwds):
- new_tempdir = path(tempfile.mkdtemp(dir=tempdir))
- func(new_tempdir, *args, **kwds)
- new_func.__name__ = func.__name__
- return new_func
+ """
+ **DEPRECATED**: use tempdir fixture instead.
+ """
+ return func
-def sprint(*args):
- sys.stderr.write(' '.join(map(str, args)) + '\n')
+def raises(exc, func, *args, **kwds):
+ """
+ **DEPRECATED**: use pytest.raises instead.
+ Raise AssertionError if ``func(*args, **kwds)`` does not raise *exc*.
+ """
+ with pytest.raises(exc):
+ func(*args, **kwds)
-_unicode_literals_re = re.compile(r'u(".*?")|u(\'.*?\')')
+def raises_msg(exc, msg, func, *args, **kwds):
+ """
+ **DEPRECATED**: use pytest.raises instead.
-def remove_unicode_literals(s):
- return _unicode_literals_re.sub(lambda x: x.group(1) or x.group(2), s)
+ Raise AssertionError if ``func(*args, **kwds)`` does not raise *exc*,
+ and check if the message contains *msg*.
+ """
+ with pytest.raises(exc) as excinfo:
+ func(*args, **kwds)
+ assert msg in str(excinfo.value)
-def find_files(root, suffix=None):
- for dirpath, dirs, files in os.walk(root, followlinks=True):
- dirpath = path(dirpath)
- for f in [f for f in files if not suffix or f.endswith(suffix)]:
- fpath = dirpath / f
- yield os.path.relpath(fpath, root)
+def assert_true(v1, msg=''):
+ """
+ **DEPRECATED**: use assert instead.
+ """
+ assert v1, msg
-def strip_escseq(text):
- return re.sub('\x1b.*?m', '', text)
+def assert_equal(v1, v2, msg=''):
+ """
+ **DEPRECATED**: use assert instead.
+ """
+ assert v1 == v2, msg
+
+
+def assert_in(x, thing, msg=''):
+ """
+ **DEPRECATED**: use assert instead.
+ """
+ if x not in thing:
+ assert False, msg or '%r is not in %r' % (x, thing)
+
+
+def assert_not_in(x, thing, msg=''):
+ """
+ **DEPRECATED**: use assert instead.
+ """
+ if x in thing:
+ assert False, msg or '%r is in %r' % (x, thing)
+
+
+class ListOutput(object):
+ """
+ File-like object that collects written text in a list.
+ """
+ def __init__(self, name):
+ self.name = name
+ self.content = []
+
+ def reset(self):
+ del self.content[:]
+
+ def write(self, text):
+ self.content.append(text)
+
+
+# **DEPRECATED**: use pytest.skip instead.
+SkipTest = pytest.skip.Exception
+
+
+class _DeprecationWrapper(object):
+ def __init__(self, mod, deprecated):
+ self._mod = mod
+ self._deprecated = deprecated
+
+ def __getattr__(self, attr):
+ if attr in self._deprecated:
+ obj, instead = self._deprecated[attr]
+ warnings.warn("tests/util.py::%s is deprecated and will be "
+ "removed in Sphinx 1.7, please use %s instead."
+ % (attr, instead),
+ RemovedInSphinx17Warning, stacklevel=2)
+ return obj
+ return getattr(self._mod, attr)
+
+
+sys.modules[__name__] = _DeprecationWrapper(sys.modules[__name__], dict( # type: ignore
+ with_app=(pytest.mark.sphinx, 'pytest.mark.sphinx'),
+ TestApp=(SphinxTestApp, 'SphinxTestApp'),
+ gen_with_app=(gen_with_app, 'pytest.mark.parametrize'),
+ skip_if=(skip_if, 'pytest.skipif'),
+ skip_unless=(skip_unless, 'pytest.skipif'),
+ with_tempdir=(with_tempdir, 'tmpdir pytest fixture'),
+ raises=(raises, 'pytest.raises'),
+ raises_msg=(raises_msg, 'pytest.raises'),
+ assert_true=(assert_true, 'assert'),
+ assert_equal=(assert_equal, 'assert'),
+ assert_in=(assert_in, 'assert'),
+ assert_not_in=(assert_not_in, 'assert'),
+ ListOutput=(ListOutput, 'StringIO'),
+ SkipTest=(SkipTest, 'pytest.skip'),
+))