summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAnthony Sottile <asottile@umich.edu>2021-04-18 09:27:41 -0700
committerGitHub <noreply@github.com>2021-04-18 09:27:41 -0700
commit645cd71f571da1cdc42683cf4228b537ddc2685f (patch)
tree65405b4d3c2c5e1a90fa5eb3d7c8d6b7d4c18c68
parenta7174759e9a651405ef37db511ac1168e3bbdec5 (diff)
parentaf1668bf04079b1a8db5910a5e3697c7c8db8fc9 (diff)
downloadflake8-645cd71f571da1cdc42683cf4228b537ddc2685f.tar.gz
Merge pull request #1319 from PyCQA/format_tests
extend black formatting to tests as well
-rw-r--r--.pre-commit-config.yaml9
-rw-r--r--docs/source/conf.py167
-rw-r--r--example-plugin/setup.py40
-rw-r--r--example-plugin/src/flake8_example_plugin/__init__.py4
-rw-r--r--example-plugin/src/flake8_example_plugin/off_by_default.py13
-rw-r--r--example-plugin/src/flake8_example_plugin/on_by_default.py5
-rw-r--r--setup.py2
-rw-r--r--src/flake8/__init__.py4
-rw-r--r--src/flake8/checker.py4
-rw-r--r--src/flake8/exceptions.py4
-rw-r--r--src/flake8/options/config.py4
-rw-r--r--src/flake8/plugins/manager.py3
-rw-r--r--src/flake8/processor.py4
-rw-r--r--src/flake8/style_guide.py10
-rw-r--r--src/flake8/utils.py7
-rw-r--r--tests/conftest.py2
-rw-r--r--tests/integration/subdir/aplugin.py4
-rw-r--r--tests/integration/test_aggregator.py59
-rw-r--r--tests/integration/test_api_legacy.py4
-rw-r--r--tests/integration/test_checker.py210
-rw-r--r--tests/integration/test_main.py200
-rw-r--r--tests/integration/test_plugins.py29
-rw-r--r--tests/unit/conftest.py14
-rw-r--r--tests/unit/test_application.py46
-rw-r--r--tests/unit/test_base_formatter.py107
-rw-r--r--tests/unit/test_checker_manager.py44
-rw-r--r--tests/unit/test_config_file_finder.py126
-rw-r--r--tests/unit/test_debug.py119
-rw-r--r--tests/unit/test_exceptions.py18
-rw-r--r--tests/unit/test_file_checker.py28
-rw-r--r--tests/unit/test_file_processor.py403
-rw-r--r--tests/unit/test_filenameonly_formatter.py15
-rw-r--r--tests/unit/test_get_local_plugins.py14
-rw-r--r--tests/unit/test_legacy_api.py47
-rw-r--r--tests/unit/test_merged_config_parser.py234
-rw-r--r--tests/unit/test_nothing_formatter.py8
-rw-r--r--tests/unit/test_option.py38
-rw-r--r--tests/unit/test_option_manager.py238
-rw-r--r--tests/unit/test_plugin.py81
-rw-r--r--tests/unit/test_plugin_manager.py39
-rw-r--r--tests/unit/test_plugin_type_manager.py107
-rw-r--r--tests/unit/test_pyflakes_codes.py6
-rw-r--r--tests/unit/test_statistics.py79
-rw-r--r--tests/unit/test_utils.py281
-rw-r--r--tests/unit/test_violation.py81
45 files changed, 1649 insertions, 1312 deletions
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index 11243cf..67771a5 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -1,14 +1,12 @@
-exclude: ^tests/fixtures/example-code/
+exclude: ^tests/fixtures/
repos:
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v3.4.0
hooks:
- id: check-yaml
- id: debug-statements
- exclude: ^tests/fixtures/example-code/invalid-syntax.py$
- id: end-of-file-fixer
- id: trailing-whitespace
- exclude: ^tests/fixtures/diffs/
- repo: https://github.com/asottile/reorder_python_imports
rev: v2.4.0
hooks:
@@ -18,8 +16,7 @@ repos:
rev: 20.8b1
hooks:
- id: black
- args: [--line-length=78]
- files: ^src/
+ args: [--line-length=79]
- repo: https://github.com/asottile/pyupgrade
rev: v2.12.0
hooks:
@@ -29,4 +26,4 @@ repos:
rev: v0.812
hooks:
- id: mypy
- exclude: ^(docs/|example-plugin/|tests/fixtures)
+ exclude: ^(docs/|example-plugin/)
diff --git a/docs/source/conf.py b/docs/source/conf.py
index d11dc15..f6a4d4e 100644
--- a/docs/source/conf.py
+++ b/docs/source/conf.py
@@ -16,45 +16,45 @@ import sys
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
-#sys.path.insert(0, os.path.abspath('.'))
+# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
-needs_sphinx = '1.3'
+needs_sphinx = "1.3"
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
- 'sphinx.ext.autodoc',
- 'sphinx.ext.doctest',
- 'sphinx.ext.extlinks',
- 'sphinx.ext.intersphinx',
- 'sphinx.ext.todo',
- 'sphinx.ext.coverage',
- 'sphinx.ext.viewcode',
- 'sphinx-prompt',
+ "sphinx.ext.autodoc",
+ "sphinx.ext.doctest",
+ "sphinx.ext.extlinks",
+ "sphinx.ext.intersphinx",
+ "sphinx.ext.todo",
+ "sphinx.ext.coverage",
+ "sphinx.ext.viewcode",
+ "sphinx-prompt",
]
# Add any paths that contain templates here, relative to this directory.
-templates_path = ['_templates']
+templates_path = ["_templates"]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
-source_suffix = '.rst'
+source_suffix = ".rst"
# The encoding of source files.
-#source_encoding = 'utf-8-sig'
+# source_encoding = 'utf-8-sig'
# The master toctree document.
-master_doc = 'index'
+master_doc = "index"
# General information about the project.
-project = 'flake8'
-copyright = '2016, Ian Stapleton Cordasco'
-author = 'Ian Stapleton Cordasco'
+project = "flake8"
+copyright = "2016, Ian Stapleton Cordasco"
+author = "Ian Stapleton Cordasco"
import flake8
@@ -80,9 +80,9 @@ language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
-#today = ''
+# today = ''
# Else, today_fmt is used as the format for a strftime call.
-#today_fmt = '%B %d, %Y'
+# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
@@ -90,27 +90,27 @@ exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
-#default_role = None
+# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
-#add_function_parentheses = True
+# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
-#add_module_names = True
+# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
-#show_authors = False
+# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
-pygments_style = 'sphinx'
+pygments_style = "sphinx"
# A list of ignored prefixes for module index sorting.
-#modindex_common_prefix = []
+# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
-#keep_warnings = False
+# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
@@ -120,31 +120,31 @@ todo_include_todos = True
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
-html_theme = 'sphinx_rtd_theme'
+html_theme = "sphinx_rtd_theme"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
-#html_theme_options = {}
+# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
-#html_theme_path = []
+# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
-#html_title = None
+# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
-#html_short_title = None
+# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
-#html_logo = None
+# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
-#html_favicon = None
+# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
@@ -154,109 +154,111 @@ html_theme = 'sphinx_rtd_theme'
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
-#html_extra_path = []
+# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
-#html_last_updated_fmt = '%b %d, %Y'
+# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
-#html_use_smartypants = True
+# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
-#html_sidebars = {}
+# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
-#html_additional_pages = {}
+# html_additional_pages = {}
# If false, no module index is generated.
-#html_domain_indices = True
+# html_domain_indices = True
# If false, no index is generated.
-#html_use_index = True
+# html_use_index = True
# If true, the index is split into individual pages for each letter.
-#html_split_index = False
+# html_split_index = False
# If true, links to the reST sources are added to the pages.
-#html_show_sourcelink = True
+# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
-#html_show_sphinx = True
+# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
-#html_show_copyright = True
+# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
-#html_use_opensearch = ''
+# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
-#html_file_suffix = None
+# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
-#html_search_language = 'en'
+# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
-#html_search_options = {'type': 'default'}
+# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
-#html_search_scorer = 'scorer.js'
+# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
-htmlhelp_basename = 'flake8doc'
+htmlhelp_basename = "flake8doc"
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
-# The paper size ('letterpaper' or 'a4paper').
-#'papersize': 'letterpaper',
-
-# The font size ('10pt', '11pt' or '12pt').
-#'pointsize': '10pt',
-
-# Additional stuff for the LaTeX preamble.
-#'preamble': '',
-
-# Latex figure (float) alignment
-#'figure_align': 'htbp',
+ # The paper size ('letterpaper' or 'a4paper').
+ #'papersize': 'letterpaper',
+ # The font size ('10pt', '11pt' or '12pt').
+ #'pointsize': '10pt',
+ # Additional stuff for the LaTeX preamble.
+ #'preamble': '',
+ # Latex figure (float) alignment
+ #'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
- (master_doc, 'flake8.tex', 'flake8 Documentation',
- 'Ian Stapleton Cordasco', 'manual'),
+ (
+ master_doc,
+ "flake8.tex",
+ "flake8 Documentation",
+ "Ian Stapleton Cordasco",
+ "manual",
+ ),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
-#latex_logo = None
+# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
-#latex_use_parts = False
+# latex_use_parts = False
# If true, show page references after internal links.
-#latex_show_pagerefs = False
+# latex_show_pagerefs = False
# If true, show URL addresses after external links.
-#latex_show_urls = False
+# latex_show_urls = False
# Documents to append as an appendix to all manuals.
-#latex_appendices = []
+# latex_appendices = []
# If false, no module index is generated.
-#latex_domain_indices = True
+# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
@@ -264,12 +266,11 @@ latex_documents = [
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
- ('manpage', 'flake8', 'Flake8 Command Line Documentation',
- [author], 1)
+ ("manpage", "flake8", "Flake8 Command Line Documentation", [author], 1)
]
# If true, show URL addresses after external links.
-#man_show_urls = False
+# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
@@ -278,26 +279,32 @@ man_pages = [
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
- ('index', 'Flake8', 'Flake8 Documentation', 'Tarek Ziade',
- 'Flake8', 'Code checking using pycodestyle, pyflakes and mccabe',
- 'Miscellaneous'),
+ (
+ "index",
+ "Flake8",
+ "Flake8 Documentation",
+ "Tarek Ziade",
+ "Flake8",
+ "Code checking using pycodestyle, pyflakes and mccabe",
+ "Miscellaneous",
+ ),
]
# Documents to append as an appendix to all manuals.
-#texinfo_appendices = []
+# texinfo_appendices = []
# If false, no module index is generated.
-#texinfo_domain_indices = True
+# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
-#texinfo_show_urls = 'footnote'
+# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
-#texinfo_no_detailmenu = False
+# texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
-intersphinx_mapping = {'python': ('https://docs.python.org/3/', None)}
+intersphinx_mapping = {"python": ("https://docs.python.org/3/", None)}
extlinks = {
"issue": ("https://github.com/pycqa/flake8/issues/%s", "#"),
diff --git a/example-plugin/setup.py b/example-plugin/setup.py
index ee125db..70d56fa 100644
--- a/example-plugin/setup.py
+++ b/example-plugin/setup.py
@@ -1,29 +1,29 @@
import setuptools
setuptools.setup(
- name='flake8-example-plugin',
- license='MIT',
- version='1.0.0',
- description='Example plugin to Flake8',
- author='Ian Cordasco',
- author_email='graffatcolmingov@gmail.com',
- url='https://github.com/pycqa/flake8',
- package_dir={'': 'src/'},
- packages=['flake8_example_plugin'],
+ name="flake8-example-plugin",
+ license="MIT",
+ version="1.0.0",
+ description="Example plugin to Flake8",
+ author="Ian Cordasco",
+ author_email="graffatcolmingov@gmail.com",
+ url="https://github.com/pycqa/flake8",
+ package_dir={"": "src/"},
+ packages=["flake8_example_plugin"],
entry_points={
- 'flake8.extension': [
- 'X1 = flake8_example_plugin:ExampleOne',
- 'X2 = flake8_example_plugin:ExampleTwo',
+ "flake8.extension": [
+ "X1 = flake8_example_plugin:ExampleOne",
+ "X2 = flake8_example_plugin:ExampleTwo",
],
},
classifiers=[
- 'Framework :: Flake8',
- 'License :: OSI Approved :: MIT License',
- 'Programming Language :: Python',
- 'Programming Language :: Python :: 3',
- 'Programming Language :: Python :: 3.8',
- 'Programming Language :: Python :: 3.9',
- 'Topic :: Software Development :: Libraries :: Python Modules',
- 'Topic :: Software Development :: Quality Assurance',
+ "Framework :: Flake8",
+ "License :: OSI Approved :: MIT License",
+ "Programming Language :: Python",
+ "Programming Language :: Python :: 3",
+ "Programming Language :: Python :: 3.8",
+ "Programming Language :: Python :: 3.9",
+ "Topic :: Software Development :: Libraries :: Python Modules",
+ "Topic :: Software Development :: Quality Assurance",
],
)
diff --git a/example-plugin/src/flake8_example_plugin/__init__.py b/example-plugin/src/flake8_example_plugin/__init__.py
index 420ce73..3f6f163 100644
--- a/example-plugin/src/flake8_example_plugin/__init__.py
+++ b/example-plugin/src/flake8_example_plugin/__init__.py
@@ -3,6 +3,6 @@ from .off_by_default import ExampleTwo
from .on_by_default import ExampleOne
__all__ = (
- 'ExampleOne',
- 'ExampleTwo',
+ "ExampleOne",
+ "ExampleTwo",
)
diff --git a/example-plugin/src/flake8_example_plugin/off_by_default.py b/example-plugin/src/flake8_example_plugin/off_by_default.py
index 50afa81..93dfb38 100644
--- a/example-plugin/src/flake8_example_plugin/off_by_default.py
+++ b/example-plugin/src/flake8_example_plugin/off_by_default.py
@@ -3,8 +3,9 @@
class ExampleTwo:
"""Second Example Plugin."""
- name = 'off-by-default-example-plugin'
- version = '1.0.0'
+
+ name = "off-by-default-example-plugin"
+ version = "1.0.0"
off_by_default = True
@@ -13,5 +14,9 @@ class ExampleTwo:
def run(self):
"""Do nothing."""
- yield (1, 0, 'X200 The off-by-default plugin was enabled',
- 'OffByDefaultPlugin')
+ yield (
+ 1,
+ 0,
+ "X200 The off-by-default plugin was enabled",
+ "OffByDefaultPlugin",
+ )
diff --git a/example-plugin/src/flake8_example_plugin/on_by_default.py b/example-plugin/src/flake8_example_plugin/on_by_default.py
index c748822..d712718 100644
--- a/example-plugin/src/flake8_example_plugin/on_by_default.py
+++ b/example-plugin/src/flake8_example_plugin/on_by_default.py
@@ -3,8 +3,9 @@
class ExampleOne:
"""First Example Plugin."""
- name = 'on-by-default-example-plugin'
- version = '1.0.0'
+
+ name = "on-by-default-example-plugin"
+ version = "1.0.0"
def __init__(self, tree):
self.tree = tree
diff --git a/setup.py b/setup.py
index e718512..3822d9e 100644
--- a/setup.py
+++ b/setup.py
@@ -4,6 +4,6 @@ import sys
import setuptools
-sys.path.insert(0, os.path.join(os.path.dirname(__file__), 'src'))
+sys.path.insert(0, os.path.join(os.path.dirname(__file__), "src"))
setuptools.setup()
diff --git a/src/flake8/__init__.py b/src/flake8/__init__.py
index fb9cec5..b2df39d 100644
--- a/src/flake8/__init__.py
+++ b/src/flake8/__init__.py
@@ -17,9 +17,7 @@ LOG = logging.getLogger(__name__)
LOG.addHandler(logging.NullHandler())
__version__ = "3.9.1"
-__version_info__ = tuple(
- int(i) for i in __version__.split(".") if i.isdigit()
-)
+__version_info__ = tuple(int(i) for i in __version__.split(".") if i.isdigit())
# There is nothing lower than logging.DEBUG (10) in the logging library,
diff --git a/src/flake8/checker.py b/src/flake8/checker.py
index 6f8c0c1..bfd3f4d 100644
--- a/src/flake8/checker.py
+++ b/src/flake8/checker.py
@@ -241,9 +241,7 @@ class Manager:
"""
results_reported = results_found = 0
for checker in self._all_checkers:
- results = sorted(
- checker.results, key=lambda tup: (tup[1], tup[2])
- )
+ results = sorted(checker.results, key=lambda tup: (tup[1], tup[2]))
filename = checker.display_name
with self.style_guide.processing_file(filename):
results_reported += self._handle_results(filename, results)
diff --git a/src/flake8/exceptions.py b/src/flake8/exceptions.py
index a8d2f6e..4b0ddd1 100644
--- a/src/flake8/exceptions.py
+++ b/src/flake8/exceptions.py
@@ -39,9 +39,7 @@ class InvalidSyntax(Flake8Exception):
def __init__(self, exception: Exception) -> None:
"""Initialize our InvalidSyntax exception."""
self.original_exception = exception
- self.error_message = (
- f"{type(exception).__name__}: {exception.args[0]}"
- )
+ self.error_message = f"{type(exception).__name__}: {exception.args[0]}"
self.error_code = "E902"
self.line_number = 1
self.column_number = 0
diff --git a/src/flake8/options/config.py b/src/flake8/options/config.py
index 0a2ee63..e920e58 100644
--- a/src/flake8/options/config.py
+++ b/src/flake8/options/config.py
@@ -369,6 +369,4 @@ def get_local_plugins(config_finder):
return local_plugins
-LocalPlugins = collections.namedtuple(
- "LocalPlugins", "extension report paths"
-)
+LocalPlugins = collections.namedtuple("LocalPlugins", "extension report paths")
diff --git a/src/flake8/plugins/manager.py b/src/flake8/plugins/manager.py
index 3779b20..6f32e1f 100644
--- a/src/flake8/plugins/manager.py
+++ b/src/flake8/plugins/manager.py
@@ -472,8 +472,7 @@ class Checkers(PluginTypeManager):
plugin.to_dictionary() for plugin in self.logical_line_plugins
],
"physical_line_plugins": [
- plugin.to_dictionary()
- for plugin in self.physical_line_plugins
+ plugin.to_dictionary() for plugin in self.physical_line_plugins
],
}
diff --git a/src/flake8/processor.py b/src/flake8/processor.py
index 5fd78a8..86709c1 100644
--- a/src/flake8/processor.py
+++ b/src/flake8/processor.py
@@ -289,9 +289,7 @@ class FileProcessor:
except (tokenize.TokenError, SyntaxError) as exc:
raise exceptions.InvalidSyntax(exception=exc)
- def _noqa_line_range(
- self, min_line: int, max_line: int
- ) -> Dict[int, str]:
+ def _noqa_line_range(self, min_line: int, max_line: int) -> Dict[int, str]:
line_range = range(min_line, max_line + 1)
joined = "".join(self.lines[min_line - 1 : max_line])
return dict.fromkeys(line_range, joined)
diff --git a/src/flake8/style_guide.py b/src/flake8/style_guide.py
index d862691..aca743a 100644
--- a/src/flake8/style_guide.py
+++ b/src/flake8/style_guide.py
@@ -368,9 +368,7 @@ class StyleGuideManager:
:rtype:
:class:`~flake8.style_guide.StyleGuide`
"""
- per_file = utils.parse_files_to_codes_mapping(
- options.per_file_ignores
- )
+ per_file = utils.parse_files_to_codes_mapping(options.per_file_ignores)
for filename, violations in per_file:
yield self.default_style_guide.copy(
filename=filename, extend_ignore_with=violations
@@ -579,11 +577,7 @@ class StyleGuide:
)
is_not_inline_ignored = error.is_inline_ignored(disable_noqa) is False
is_included_in_diff = error.is_in(self._parsed_diff)
- if (
- error_is_selected
- and is_not_inline_ignored
- and is_included_in_diff
- ):
+ if error_is_selected and is_not_inline_ignored and is_included_in_diff:
self.formatter.handle(error)
self.stats.record(error)
return 1
diff --git a/src/flake8/utils.py b/src/flake8/utils.py
index 96c3485..9c46359 100644
--- a/src/flake8/utils.py
+++ b/src/flake8/utils.py
@@ -271,13 +271,10 @@ def parse_unified_diff(diff: Optional[str] = None) -> Dict[str, Set[int]]:
# comparing.
if hunk_match:
(row, number_of_rows) = [
- 1 if not group else int(group)
- for group in hunk_match.groups()
+ 1 if not group else int(group) for group in hunk_match.groups()
]
assert current_path is not None
- parsed_paths[current_path].update(
- range(row, row + number_of_rows)
- )
+ parsed_paths[current_path].update(range(row, row + number_of_rows))
# We have now parsed our diff into a dictionary that looks like:
# {'file.py': set(range(10, 16), range(18, 20)), ...}
diff --git a/tests/conftest.py b/tests/conftest.py
index 9bf4f95..0f48309 100644
--- a/tests/conftest.py
+++ b/tests/conftest.py
@@ -3,4 +3,4 @@ import sys
import flake8
-flake8.configure_logging(2, 'test-logs-%s.%s.log' % sys.version_info[0:2])
+flake8.configure_logging(2, "test-logs-%s.%s.log" % sys.version_info[0:2])
diff --git a/tests/integration/subdir/aplugin.py b/tests/integration/subdir/aplugin.py
index 266e8d0..fde5890 100644
--- a/tests/integration/subdir/aplugin.py
+++ b/tests/integration/subdir/aplugin.py
@@ -4,8 +4,8 @@
class ExtensionTestPlugin2:
"""Extension test plugin in its own directory."""
- name = 'ExtensionTestPlugin2'
- version = '1.0.0'
+ name = "ExtensionTestPlugin2"
+ version = "1.0.0"
def __init__(self, tree):
"""Construct an instance of test plugin."""
diff --git a/tests/integration/test_aggregator.py b/tests/integration/test_aggregator.py
index 2bdea70..ae75204 100644
--- a/tests/integration/test_aggregator.py
+++ b/tests/integration/test_aggregator.py
@@ -9,7 +9,7 @@ from flake8.options import aggregator
from flake8.options import config
from flake8.options import manager
-CLI_SPECIFIED_CONFIG = 'tests/fixtures/config_files/cli-specified.ini'
+CLI_SPECIFIED_CONFIG = "tests/fixtures/config_files/cli-specified.ini"
@pytest.fixture
@@ -18,8 +18,8 @@ def optmanager():
prelim_parser = argparse.ArgumentParser(add_help=False)
options.register_preliminary_options(prelim_parser)
option_manager = manager.OptionManager(
- prog='flake8',
- version='3.0.0',
+ prog="flake8",
+ version="3.0.0",
parents=[prelim_parser],
)
options.register_default_options(option_manager)
@@ -28,31 +28,50 @@ def optmanager():
def test_aggregate_options_with_config(optmanager):
"""Verify we aggregate options and config values appropriately."""
- arguments = ['flake8', '--select',
- 'E11,E34,E402,W,F', '--exclude', 'tests/*']
+ arguments = [
+ "flake8",
+ "--select",
+ "E11,E34,E402,W,F",
+ "--exclude",
+ "tests/*",
+ ]
config_finder = config.ConfigFileFinder(
- 'flake8',
- config_file=CLI_SPECIFIED_CONFIG)
+ "flake8", config_file=CLI_SPECIFIED_CONFIG
+ )
options, args = aggregator.aggregate_options(
- optmanager, config_finder, arguments)
+ optmanager, config_finder, arguments
+ )
- assert options.select == ['E11', 'E34', 'E402', 'W', 'F']
- assert options.ignore == ['E123', 'W234', 'E111']
- assert options.exclude == [os.path.abspath('tests/*')]
+ assert options.select == ["E11", "E34", "E402", "W", "F"]
+ assert options.ignore == ["E123", "W234", "E111"]
+ assert options.exclude == [os.path.abspath("tests/*")]
def test_aggregate_options_when_isolated(optmanager):
"""Verify we aggregate options and config values appropriately."""
- arguments = ['flake8', '--select', 'E11,E34,E402,W,F',
- '--exclude', 'tests/*']
- config_finder = config.ConfigFileFinder(
- 'flake8', ignore_config_files=True)
- optmanager.extend_default_ignore(['E8'])
+ arguments = [
+ "flake8",
+ "--select",
+ "E11,E34,E402,W,F",
+ "--exclude",
+ "tests/*",
+ ]
+ config_finder = config.ConfigFileFinder("flake8", ignore_config_files=True)
+ optmanager.extend_default_ignore(["E8"])
options, args = aggregator.aggregate_options(
- optmanager, config_finder, arguments)
+ optmanager, config_finder, arguments
+ )
- assert options.select == ['E11', 'E34', 'E402', 'W', 'F']
+ assert options.select == ["E11", "E34", "E402", "W", "F"]
assert sorted(options.ignore) == [
- 'E121', 'E123', 'E126', 'E226', 'E24', 'E704', 'E8', 'W503', 'W504',
+ "E121",
+ "E123",
+ "E126",
+ "E226",
+ "E24",
+ "E704",
+ "E8",
+ "W503",
+ "W504",
]
- assert options.exclude == [os.path.abspath('tests/*')]
+ assert options.exclude == [os.path.abspath("tests/*")]
diff --git a/tests/integration/test_api_legacy.py b/tests/integration/test_api_legacy.py
index 0ffaa22..efb0fc9 100644
--- a/tests/integration/test_api_legacy.py
+++ b/tests/integration/test_api_legacy.py
@@ -5,8 +5,8 @@ from flake8.api import legacy
def test_legacy_api(tmpdir):
"""A basic end-to-end test for the legacy api reporting errors."""
with tmpdir.as_cwd():
- t_py = tmpdir.join('t.py')
- t_py.write('import os # unused import\n')
+ t_py = tmpdir.join("t.py")
+ t_py.write("import os # unused import\n")
style_guide = legacy.get_style_guide()
report = style_guide.check_files([t_py.strpath])
diff --git a/tests/integration/test_checker.py b/tests/integration/test_checker.py
index 1254bf0..8a69a9c 100644
--- a/tests/integration/test_checker.py
+++ b/tests/integration/test_checker.py
@@ -10,13 +10,13 @@ from flake8.processor import FileProcessor
PHYSICAL_LINE = "# Physical line content"
-EXPECTED_REPORT = (1, 1, 'T000 Expected Message')
-EXPECTED_REPORT_PHYSICAL_LINE = (1, 'T000 Expected Message')
+EXPECTED_REPORT = (1, 1, "T000 Expected Message")
+EXPECTED_REPORT_PHYSICAL_LINE = (1, "T000 Expected Message")
EXPECTED_RESULT_PHYSICAL_LINE = (
- 'T000',
+ "T000",
0,
1,
- 'Expected Message',
+ "Expected Message",
None,
)
@@ -24,8 +24,8 @@ EXPECTED_RESULT_PHYSICAL_LINE = (
class PluginClass:
"""Simple file plugin class yielding the expected report."""
- name = 'test'
- version = '1.0.0'
+ name = "test"
+ version = "1.0.0"
def __init__(self, tree):
"""Construct a dummy object to provide mandatory parameter."""
@@ -33,26 +33,26 @@ class PluginClass:
def run(self):
"""Run class yielding one element containing the expected report."""
- yield EXPECTED_REPORT + (type(self), )
+ yield EXPECTED_REPORT + (type(self),)
def plugin_func(func):
"""Decorate file plugins which are implemented as functions."""
- func.name = 'test'
- func.version = '1.0.0'
+ func.name = "test"
+ func.version = "1.0.0"
return func
@plugin_func
def plugin_func_gen(tree):
"""Yield the expected report."""
- yield EXPECTED_REPORT + (type(plugin_func_gen), )
+ yield EXPECTED_REPORT + (type(plugin_func_gen),)
@plugin_func
def plugin_func_list(tree):
"""Return a list of expected reports."""
- return [EXPECTED_REPORT + (type(plugin_func_list), )]
+ return [EXPECTED_REPORT + (type(plugin_func_list),)]
@plugin_func
@@ -98,35 +98,37 @@ def mock_file_checker_with_plugin(plugin_target):
Useful as a starting point for mocking reports/results.
"""
# Mock an entry point returning the plugin target
- entry_point = mock.Mock(spec=['load'])
+ entry_point = mock.Mock(spec=["load"])
entry_point.name = plugin_target.name
entry_point.load.return_value = plugin_target
- entry_point.value = 'mocked:value'
+ entry_point.value = "mocked:value"
# Load the checker plugins using the entry point mock
with mock.patch.object(
- importlib_metadata,
- 'entry_points',
- return_value={'flake8.extension': [entry_point]},
+ importlib_metadata,
+ "entry_points",
+ return_value={"flake8.extension": [entry_point]},
):
checks = manager.Checkers()
# Prevent it from reading lines from stdin or somewhere else
- with mock.patch('flake8.processor.FileProcessor.read_lines',
- return_value=['Line 1']):
+ with mock.patch(
+ "flake8.processor.FileProcessor.read_lines", return_value=["Line 1"]
+ ):
file_checker = checker.FileChecker(
- '-',
- checks.to_dictionary(),
- mock.MagicMock()
+ "-", checks.to_dictionary(), mock.MagicMock()
)
return file_checker
-@pytest.mark.parametrize('plugin_target', [
- PluginClass,
- plugin_func_gen,
- plugin_func_list,
-])
+@pytest.mark.parametrize(
+ "plugin_target",
+ [
+ PluginClass,
+ plugin_func_gen,
+ plugin_func_list,
+ ],
+)
def test_handle_file_plugins(plugin_target):
"""Test the FileChecker class handling different file plugin types."""
file_checker = mock_file_checker_with_plugin(plugin_target)
@@ -138,20 +140,25 @@ def test_handle_file_plugins(plugin_target):
report = mock.Mock()
file_checker.report = report
file_checker.run_ast_checks()
- report.assert_called_once_with(error_code=None,
- line_number=EXPECTED_REPORT[0],
- column=EXPECTED_REPORT[1],
- text=EXPECTED_REPORT[2])
-
-
-@pytest.mark.parametrize('plugin_target,len_results', [
- (plugin_func_physical_ret, 1),
- (plugin_func_physical_none, 0),
- (plugin_func_physical_list_single, 1),
- (plugin_func_physical_list_multiple, 2),
- (plugin_func_physical_gen_single, 1),
- (plugin_func_physical_gen_multiple, 3),
-])
+ report.assert_called_once_with(
+ error_code=None,
+ line_number=EXPECTED_REPORT[0],
+ column=EXPECTED_REPORT[1],
+ text=EXPECTED_REPORT[2],
+ )
+
+
+@pytest.mark.parametrize(
+ "plugin_target,len_results",
+ [
+ (plugin_func_physical_ret, 1),
+ (plugin_func_physical_none, 0),
+ (plugin_func_physical_list_single, 1),
+ (plugin_func_physical_list_multiple, 2),
+ (plugin_func_physical_gen_single, 1),
+ (plugin_func_physical_gen_multiple, 3),
+ ],
+)
def test_line_check_results(plugin_target, len_results):
"""Test the FileChecker class handling results from line checks."""
file_checker = mock_file_checker_with_plugin(plugin_target)
@@ -167,54 +174,100 @@ def test_logical_line_offset_out_of_bounds():
@plugin_func
def _logical_line_out_of_bounds(logical_line):
- yield 10000, 'L100 test'
+ yield 10000, "L100 test"
file_checker = mock_file_checker_with_plugin(_logical_line_out_of_bounds)
logical_ret = (
- '',
+ "",
'print("xxxxxxxxxxx")',
[(0, (1, 0)), (5, (1, 5)), (6, (1, 6)), (19, (1, 19)), (20, (1, 20))],
)
with mock.patch.object(
- FileProcessor, 'build_logical_line', return_value=logical_ret,
+ FileProcessor,
+ "build_logical_line",
+ return_value=logical_ret,
):
file_checker.run_logical_checks()
- assert file_checker.results == [('L100', 0, 0, 'test', None)]
+ assert file_checker.results == [("L100", 0, 0, "test", None)]
PLACEHOLDER_CODE = 'some_line = "of" * code'
-@pytest.mark.parametrize('results, expected_order', [
- # No entries should be added
- ([], []),
- # Results are correctly ordered
- ([('A101', 1, 1, 'placeholder error', PLACEHOLDER_CODE),
- ('A101', 2, 1, 'placeholder error', PLACEHOLDER_CODE)], [0, 1]),
- # Reversed order of lines
- ([('A101', 2, 1, 'placeholder error', PLACEHOLDER_CODE),
- ('A101', 1, 1, 'placeholder error', PLACEHOLDER_CODE)], [1, 0]),
- # Columns are not ordered correctly (when reports are ordered correctly)
- ([('A101', 1, 2, 'placeholder error', PLACEHOLDER_CODE),
- ('A101', 1, 1, 'placeholder error', PLACEHOLDER_CODE),
- ('A101', 2, 1, 'placeholder error', PLACEHOLDER_CODE)], [1, 0, 2]),
- ([('A101', 2, 1, 'placeholder error', PLACEHOLDER_CODE),
- ('A101', 1, 1, 'placeholder error', PLACEHOLDER_CODE),
- ('A101', 1, 2, 'placeholder error', PLACEHOLDER_CODE)], [1, 2, 0]),
- ([('A101', 1, 2, 'placeholder error', PLACEHOLDER_CODE),
- ('A101', 2, 2, 'placeholder error', PLACEHOLDER_CODE),
- ('A101', 2, 1, 'placeholder error', PLACEHOLDER_CODE)], [0, 2, 1]),
- ([('A101', 1, 3, 'placeholder error', PLACEHOLDER_CODE),
- ('A101', 2, 2, 'placeholder error', PLACEHOLDER_CODE),
- ('A101', 3, 1, 'placeholder error', PLACEHOLDER_CODE)], [0, 1, 2]),
- ([('A101', 1, 1, 'placeholder error', PLACEHOLDER_CODE),
- ('A101', 1, 3, 'placeholder error', PLACEHOLDER_CODE),
- ('A101', 2, 2, 'placeholder error', PLACEHOLDER_CODE)], [0, 1, 2]),
- # Previously sort column and message (so reversed) (see bug 196)
- ([('A101', 1, 1, 'placeholder error', PLACEHOLDER_CODE),
- ('A101', 2, 1, 'charlie error', PLACEHOLDER_CODE)], [0, 1]),
-])
+@pytest.mark.parametrize(
+ "results, expected_order",
+ [
+ # No entries should be added
+ ([], []),
+ # Results are correctly ordered
+ (
+ [
+ ("A101", 1, 1, "placeholder error", PLACEHOLDER_CODE),
+ ("A101", 2, 1, "placeholder error", PLACEHOLDER_CODE),
+ ],
+ [0, 1],
+ ),
+ # Reversed order of lines
+ (
+ [
+ ("A101", 2, 1, "placeholder error", PLACEHOLDER_CODE),
+ ("A101", 1, 1, "placeholder error", PLACEHOLDER_CODE),
+ ],
+ [1, 0],
+ ),
+ # Columns are not ordered correctly
+ # (when reports are ordered correctly)
+ (
+ [
+ ("A101", 1, 2, "placeholder error", PLACEHOLDER_CODE),
+ ("A101", 1, 1, "placeholder error", PLACEHOLDER_CODE),
+ ("A101", 2, 1, "placeholder error", PLACEHOLDER_CODE),
+ ],
+ [1, 0, 2],
+ ),
+ (
+ [
+ ("A101", 2, 1, "placeholder error", PLACEHOLDER_CODE),
+ ("A101", 1, 1, "placeholder error", PLACEHOLDER_CODE),
+ ("A101", 1, 2, "placeholder error", PLACEHOLDER_CODE),
+ ],
+ [1, 2, 0],
+ ),
+ (
+ [
+ ("A101", 1, 2, "placeholder error", PLACEHOLDER_CODE),
+ ("A101", 2, 2, "placeholder error", PLACEHOLDER_CODE),
+ ("A101", 2, 1, "placeholder error", PLACEHOLDER_CODE),
+ ],
+ [0, 2, 1],
+ ),
+ (
+ [
+ ("A101", 1, 3, "placeholder error", PLACEHOLDER_CODE),
+ ("A101", 2, 2, "placeholder error", PLACEHOLDER_CODE),
+ ("A101", 3, 1, "placeholder error", PLACEHOLDER_CODE),
+ ],
+ [0, 1, 2],
+ ),
+ (
+ [
+ ("A101", 1, 1, "placeholder error", PLACEHOLDER_CODE),
+ ("A101", 1, 3, "placeholder error", PLACEHOLDER_CODE),
+ ("A101", 2, 2, "placeholder error", PLACEHOLDER_CODE),
+ ],
+ [0, 1, 2],
+ ),
+ # Previously sort column and message (so reversed) (see bug 196)
+ (
+ [
+ ("A101", 1, 1, "placeholder error", PLACEHOLDER_CODE),
+ ("A101", 2, 1, "charlie error", PLACEHOLDER_CODE),
+ ],
+ [0, 1],
+ ),
+ ],
+)
def test_report_order(results, expected_order):
"""
Test in which order the results will be reported.
@@ -222,6 +275,7 @@ def test_report_order(results, expected_order):
It gets a list of reports from the file checkers and verifies that the
result will be ordered independent from the original report.
"""
+
def count_side_effect(name, sorted_results):
"""Side effect for the result handler to tell all are reported."""
return len(sorted_results)
@@ -230,11 +284,11 @@ def test_report_order(results, expected_order):
# tuples to create the expected result lists from the indexes
expected_results = [results[index] for index in expected_order]
- file_checker = mock.Mock(spec=['results', 'display_name'])
+ file_checker = mock.Mock(spec=["results", "display_name"])
file_checker.results = results
- file_checker.display_name = 'placeholder'
+ file_checker.display_name = "placeholder"
- style_guide = mock.MagicMock(spec=['options', 'processing_file'])
+ style_guide = mock.MagicMock(spec=["options", "processing_file"])
# Create a placeholder manager without arguments or plugins
# Just add one custom file checker which just provides the results
@@ -244,9 +298,9 @@ def test_report_order(results, expected_order):
# _handle_results is the first place which gets the sorted result
# Should something non-private be mocked instead?
handler = mock.Mock(side_effect=count_side_effect)
- with mock.patch.object(manager, '_handle_results', handler):
+ with mock.patch.object(manager, "_handle_results", handler):
assert manager.report() == (len(results), len(results))
- handler.assert_called_once_with('placeholder', expected_results)
+ handler.assert_called_once_with("placeholder", expected_results)
def test_acquire_when_multiprocessing_pool_can_initialize():
diff --git a/tests/integration/test_main.py b/tests/integration/test_main.py
index 765e752..45fe9de 100644
--- a/tests/integration/test_main.py
+++ b/tests/integration/test_main.py
@@ -17,7 +17,7 @@ def _call_main(argv, retv=0):
def test_diff_option(tmpdir, capsys):
"""Ensure that `flake8 --diff` works."""
- t_py_contents = '''\
+ t_py_contents = """\
import os
import sys # unused but not part of diff
@@ -26,9 +26,9 @@ print('(to avoid trailing whitespace in test)')
print(os.path.join('foo', 'bar'))
y # part of the diff and an error
-'''
+"""
- diff = '''\
+ diff = """\
diff --git a/t.py b/t.py
index d64ac39..7d943de 100644
--- a/t.py
@@ -39,39 +39,39 @@ index d64ac39..7d943de 100644
print(os.path.join('foo', 'bar'))
+
+y # part of the diff and an error
-'''
+"""
- with mock.patch.object(utils, 'stdin_get_value', return_value=diff):
+ with mock.patch.object(utils, "stdin_get_value", return_value=diff):
with tmpdir.as_cwd():
- tmpdir.join('t.py').write(t_py_contents)
- _call_main(['--diff'], retv=1)
+ tmpdir.join("t.py").write(t_py_contents)
+ _call_main(["--diff"], retv=1)
out, err = capsys.readouterr()
assert out == "t.py:8:1: F821 undefined name 'y'\n"
- assert err == ''
+ assert err == ""
def test_form_feed_line_split(tmpdir, capsys):
"""Test that form feed is treated the same for stdin."""
- src = 'x=1\n\f\ny=1\n'
- expected_out = '''\
+ src = "x=1\n\f\ny=1\n"
+ expected_out = """\
t.py:1:2: E225 missing whitespace around operator
t.py:3:2: E225 missing whitespace around operator
-'''
+"""
with tmpdir.as_cwd():
- tmpdir.join('t.py').write(src)
+ tmpdir.join("t.py").write(src)
- with mock.patch.object(utils, 'stdin_get_value', return_value=src):
- _call_main(['-', '--stdin-display-name=t.py'], retv=1)
+ with mock.patch.object(utils, "stdin_get_value", return_value=src):
+ _call_main(["-", "--stdin-display-name=t.py"], retv=1)
out, err = capsys.readouterr()
assert out == expected_out
- assert err == ''
+ assert err == ""
- _call_main(['t.py'], retv=1)
+ _call_main(["t.py"], retv=1)
out, err = capsys.readouterr()
assert out == expected_out
- assert err == ''
+ assert err == ""
def test_e101_indent_char_does_not_reset(tmpdir, capsys):
@@ -89,82 +89,79 @@ if True:
"""
with tmpdir.as_cwd():
- tmpdir.join('t.py').write(t_py_contents)
- _call_main(['t.py'])
+ tmpdir.join("t.py").write(t_py_contents)
+ _call_main(["t.py"])
def test_statistics_option(tmpdir, capsys):
"""Ensure that `flake8 --statistics` works."""
with tmpdir.as_cwd():
- tmpdir.join('t.py').write('import os\nimport sys\n')
- _call_main(['--statistics', 't.py'], retv=1)
+ tmpdir.join("t.py").write("import os\nimport sys\n")
+ _call_main(["--statistics", "t.py"], retv=1)
- out, err = capsys.readouterr()
- assert out == '''\
+ expected = """\
t.py:1:1: F401 'os' imported but unused
t.py:2:1: F401 'sys' imported but unused
2 F401 'os' imported but unused
-'''
- assert err == ''
+"""
+ out, err = capsys.readouterr()
+ assert out == expected
+ assert err == ""
def test_show_source_option(tmpdir, capsys):
"""Ensure that --show-source and --no-show-source work."""
with tmpdir.as_cwd():
- tmpdir.join('tox.ini').write('[flake8]\nshow_source = true\n')
- tmpdir.join('t.py').write('import os\n')
- _call_main(['t.py'], retv=1)
+ tmpdir.join("tox.ini").write("[flake8]\nshow_source = true\n")
+ tmpdir.join("t.py").write("import os\n")
+ _call_main(["t.py"], retv=1)
- out, err = capsys.readouterr()
- assert out == '''\
+ expected = """\
t.py:1:1: F401 'os' imported but unused
import os
^
-'''
- assert err == ''
+"""
+ out, err = capsys.readouterr()
+ assert out == expected
+ assert err == ""
with tmpdir.as_cwd():
- _call_main(['t.py', '--no-show-source'], retv=1)
+ _call_main(["t.py", "--no-show-source"], retv=1)
- out, err = capsys.readouterr()
- assert out == '''\
+ expected = """\
t.py:1:1: F401 'os' imported but unused
-'''
- assert err == ''
+"""
+ out, err = capsys.readouterr()
+ assert out == expected
+ assert err == ""
def test_extend_exclude(tmpdir, capsys):
"""Ensure that `flake8 --extend-exclude` works."""
- for d in ['project', 'vendor', 'legacy', '.git', '.tox', '.hg']:
- tmpdir.mkdir(d).join('t.py').write('import os\nimport sys\n')
+ for d in ["project", "vendor", "legacy", ".git", ".tox", ".hg"]:
+ tmpdir.mkdir(d).join("t.py").write("import os\nimport sys\n")
with tmpdir.as_cwd():
- _call_main(['--extend-exclude=vendor,legacy/'], retv=1)
+ _call_main(["--extend-exclude=vendor,legacy/"], retv=1)
out, err = capsys.readouterr()
- expected_out = '''\
+ expected_out = """\
./project/t.py:1:1: F401 'os' imported but unused
./project/t.py:2:1: F401 'sys' imported but unused
-'''
- assert out == expected_out.replace('/', os.sep)
- assert err == ''
+"""
+ assert out == expected_out.replace("/", os.sep)
+ assert err == ""
def test_malformed_per_file_ignores_error(tmpdir, capsys):
"""Test the error message for malformed `per-file-ignores`."""
- setup_cfg = '''\
+ setup_cfg = """\
[flake8]
per-file-ignores =
incorrect/*
values/*
-'''
-
- with tmpdir.as_cwd():
- tmpdir.join('setup.cfg').write(setup_cfg)
- _call_main(['.'], retv=1)
-
- out, err = capsys.readouterr()
- assert out == '''\
+"""
+ expected = """\
There was a critical error during execution of Flake8:
Expected `per-file-ignores` to be a mapping from file exclude patterns to ignore codes.
@@ -172,50 +169,59 @@ Configured `per-file-ignores` setting:
incorrect/*
values/*
-''' # noqa: E501
+""" # noqa: E501
+
+ with tmpdir.as_cwd():
+ tmpdir.join("setup.cfg").write(setup_cfg)
+ _call_main(["."], retv=1)
+
+ out, err = capsys.readouterr()
+ assert out == expected
def test_tokenization_error_but_not_syntax_error(tmpdir, capsys):
"""Test that flake8 does not crash on tokenization errors."""
with tmpdir.as_cwd():
# this is a crash in the tokenizer, but not in the ast
- tmpdir.join('t.py').write("b'foo' \\\n")
- _call_main(['t.py'], retv=1)
+ tmpdir.join("t.py").write("b'foo' \\\n")
+ _call_main(["t.py"], retv=1)
out, err = capsys.readouterr()
- assert out == 't.py:1:1: E902 TokenError: EOF in multi-line statement\n'
- assert err == ''
+ assert out == "t.py:1:1: E902 TokenError: EOF in multi-line statement\n"
+ assert err == ""
def test_tokenization_error_is_a_syntax_error(tmpdir, capsys):
"""Test when tokenize raises a SyntaxError."""
with tmpdir.as_cwd():
- tmpdir.join('t.py').write('if True:\n pass\n pass\n')
- _call_main(['t.py'], retv=1)
+ tmpdir.join("t.py").write("if True:\n pass\n pass\n")
+ _call_main(["t.py"], retv=1)
out, err = capsys.readouterr()
- assert out == 't.py:1:1: E902 IndentationError: unindent does not match any outer indentation level\n' # noqa: E501
- assert err == ''
+ expected = "t.py:1:1: E902 IndentationError: unindent does not match any outer indentation level\n" # noqa: E501
+ assert out == expected
+ assert err == ""
def test_bug_report_successful(capsys):
"""Test that --bug-report does not crash."""
- _call_main(['--bug-report'])
+ _call_main(["--bug-report"])
out, err = capsys.readouterr()
assert json.loads(out)
- assert err == ''
+ assert err == ""
def test_specific_noqa_does_not_clobber_pycodestyle_noqa(tmpdir, capsys):
"""See https://github.com/pycqa/flake8/issues/1104."""
with tmpdir.as_cwd():
- tmpdir.join('t.py').write("test = ('ABC' == None) # noqa: E501\n")
- _call_main(['t.py'], retv=1)
+ tmpdir.join("t.py").write("test = ('ABC' == None) # noqa: E501\n")
+ _call_main(["t.py"], retv=1)
- out, err = capsys.readouterr()
- assert out == '''\
+ expected = """\
t.py:1:15: E711 comparison to None should be 'if cond is None:'
-'''
+"""
+ out, err = capsys.readouterr()
+ assert out == expected
def test_specific_noqa_on_line_with_continuation(tmpdir, capsys):
@@ -230,60 +236,64 @@ x = """
'''
with tmpdir.as_cwd():
- tmpdir.join('t.py').write(t_py_src)
- _call_main(['t.py'], retv=0)
+ tmpdir.join("t.py").write(t_py_src)
+ _call_main(["t.py"], retv=0)
out, err = capsys.readouterr()
- assert out == err == ''
+ assert out == err == ""
def test_physical_line_file_not_ending_in_newline(tmpdir, capsys):
"""See https://github.com/PyCQA/pycodestyle/issues/960."""
- t_py_src = 'def f():\n\tpass'
+ t_py_src = "def f():\n\tpass"
with tmpdir.as_cwd():
- tmpdir.join('t.py').write(t_py_src)
- _call_main(['t.py'], retv=1)
+ tmpdir.join("t.py").write(t_py_src)
+ _call_main(["t.py"], retv=1)
- out, err = capsys.readouterr()
- assert out == '''\
+ expected = """\
t.py:2:1: W191 indentation contains tabs
t.py:2:6: W292 no newline at end of file
-'''
+"""
+ out, err = capsys.readouterr()
+ assert out == expected
def test_physical_line_file_not_ending_in_newline_trailing_ws(tmpdir, capsys):
"""See https://github.com/PyCQA/pycodestyle/issues/960."""
- t_py_src = 'x = 1 '
+ t_py_src = "x = 1 "
with tmpdir.as_cwd():
- tmpdir.join('t.py').write(t_py_src)
- _call_main(['t.py'], retv=1)
+ tmpdir.join("t.py").write(t_py_src)
+ _call_main(["t.py"], retv=1)
- out, err = capsys.readouterr()
- assert out == '''\
+ expected = """\
t.py:1:6: W291 trailing whitespace
t.py:1:9: W292 no newline at end of file
-'''
+"""
+ out, err = capsys.readouterr()
+ assert out == expected
def test_obtaining_args_from_sys_argv_when_not_explicity_provided(capsys):
"""Test that arguments are obtained from 'sys.argv'."""
- with mock.patch('sys.argv', ['flake8', '--help']):
+ with mock.patch("sys.argv", ["flake8", "--help"]):
_call_main(None)
out, err = capsys.readouterr()
- assert out.startswith('usage: flake8 [options] file file ...\n')
- assert err == ''
+ assert out.startswith("usage: flake8 [options] file file ...\n")
+ assert err == ""
def test_cli_config_option_respected(tmp_path):
"""Test --config is used."""
config = tmp_path / "flake8.ini"
- config.write_text("""\
+ config.write_text(
+ """\
[flake8]
ignore = F401
-""")
+"""
+ )
py_file = tmp_path / "t.py"
py_file.write_text("import os\n")
@@ -294,10 +304,12 @@ ignore = F401
def test_cli_isolated_overrides_config_option(tmp_path):
"""Test --isolated overrides --config."""
config = tmp_path / "flake8.ini"
- config.write_text("""\
+ config.write_text(
+ """\
[flake8]
ignore = F401
-""")
+"""
+ )
py_file = tmp_path / "t.py"
py_file.write_text("import os\n")
@@ -316,13 +328,13 @@ def test_file_not_found(tmpdir, capsys):
def test_output_file(tmpdir, capsys):
"""Ensure that --output-file is honored."""
- tmpdir.join('t.py').write('import os\n')
+ tmpdir.join("t.py").write("import os\n")
with tmpdir.as_cwd():
- _call_main(['t.py', '--output-file=f'], retv=1)
+ _call_main(["t.py", "--output-file=f"], retv=1)
out, err = capsys.readouterr()
assert out == err == ""
expected = "t.py:1:1: F401 'os' imported but unused\n"
- assert tmpdir.join('f').read() == expected
+ assert tmpdir.join("f").read() == expected
diff --git a/tests/integration/test_plugins.py b/tests/integration/test_plugins.py
index 867a94e..7fff9df 100644
--- a/tests/integration/test_plugins.py
+++ b/tests/integration/test_plugins.py
@@ -1,15 +1,15 @@
"""Integration tests for plugin loading."""
from flake8.main import application
-LOCAL_PLUGIN_CONFIG = 'tests/fixtures/config_files/local-plugin.ini'
-LOCAL_PLUGIN_PATH_CONFIG = 'tests/fixtures/config_files/local-plugin-path.ini'
+LOCAL_PLUGIN_CONFIG = "tests/fixtures/config_files/local-plugin.ini"
+LOCAL_PLUGIN_PATH_CONFIG = "tests/fixtures/config_files/local-plugin-path.ini"
class ExtensionTestPlugin:
"""Extension test plugin."""
- name = 'ExtensionTestPlugin'
- version = '1.0.0'
+ name = "ExtensionTestPlugin"
+ version = "1.0.0"
def __init__(self, tree):
"""Construct an instance of test plugin."""
@@ -20,14 +20,14 @@ class ExtensionTestPlugin:
@classmethod
def add_options(cls, parser):
"""Register options."""
- parser.add_option('--anopt')
+ parser.add_option("--anopt")
class ReportTestPlugin:
"""Report test plugin."""
- name = 'ReportTestPlugin'
- version = '1.0.0'
+ name = "ReportTestPlugin"
+ version = "1.0.0"
def __init__(self, tree):
"""Construct an instance of test plugin."""
@@ -39,28 +39,29 @@ class ReportTestPlugin:
def test_enable_local_plugin_from_config():
"""App can load a local plugin from config file."""
app = application.Application()
- app.initialize(['flake8', '--config', LOCAL_PLUGIN_CONFIG])
+ app.initialize(["flake8", "--config", LOCAL_PLUGIN_CONFIG])
assert app.check_plugins is not None
- assert app.check_plugins['XE'].plugin is ExtensionTestPlugin
+ assert app.check_plugins["XE"].plugin is ExtensionTestPlugin
assert app.formatting_plugins is not None
- assert app.formatting_plugins['XR'].plugin is ReportTestPlugin
+ assert app.formatting_plugins["XR"].plugin is ReportTestPlugin
def test_local_plugin_can_add_option():
"""A local plugin can add a CLI option."""
app = application.Application()
app.initialize(
- ['flake8', '--config', LOCAL_PLUGIN_CONFIG, '--anopt', 'foo'])
+ ["flake8", "--config", LOCAL_PLUGIN_CONFIG, "--anopt", "foo"]
+ )
assert app.options is not None
- assert app.options.anopt == 'foo'
+ assert app.options.anopt == "foo"
def test_enable_local_plugin_at_non_installed_path():
"""Can add a paths option in local-plugins config section for finding."""
app = application.Application()
- app.initialize(['flake8', '--config', LOCAL_PLUGIN_PATH_CONFIG])
+ app.initialize(["flake8", "--config", LOCAL_PLUGIN_PATH_CONFIG])
assert app.check_plugins is not None
- assert app.check_plugins['XE'].plugin.name == 'ExtensionTestPlugin2'
+ assert app.check_plugins["XE"].plugin.name == "ExtensionTestPlugin2"
diff --git a/tests/unit/conftest.py b/tests/unit/conftest.py
index a407b50..2808387 100644
--- a/tests/unit/conftest.py
+++ b/tests/unit/conftest.py
@@ -6,13 +6,13 @@ import pytest
def options_from(**kwargs):
"""Generate a Values instances with our kwargs."""
- kwargs.setdefault('hang_closing', True)
- kwargs.setdefault('max_line_length', 79)
- kwargs.setdefault('max_doc_length', None)
- kwargs.setdefault('indent_size', 4)
- kwargs.setdefault('verbose', False)
- kwargs.setdefault('stdin_display_name', 'stdin')
- kwargs.setdefault('disable_noqa', False)
+ kwargs.setdefault("hang_closing", True)
+ kwargs.setdefault("max_line_length", 79)
+ kwargs.setdefault("max_doc_length", None)
+ kwargs.setdefault("indent_size", 4)
+ kwargs.setdefault("verbose", False)
+ kwargs.setdefault("stdin_display_name", "stdin")
+ kwargs.setdefault("disable_noqa", False)
return argparse.Namespace(**kwargs)
diff --git a/tests/unit/test_application.py b/tests/unit/test_application.py
index d675eaa..b95e383 100644
--- a/tests/unit/test_application.py
+++ b/tests/unit/test_application.py
@@ -10,10 +10,10 @@ from flake8.main import application as app
def options(**kwargs):
"""Generate argparse.Namespace for our Application."""
- kwargs.setdefault('verbose', 0)
- kwargs.setdefault('output_file', None)
- kwargs.setdefault('count', False)
- kwargs.setdefault('exit_zero', False)
+ kwargs.setdefault("verbose", 0)
+ kwargs.setdefault("output_file", None)
+ kwargs.setdefault("count", False)
+ kwargs.setdefault("exit_zero", False)
return argparse.Namespace(**kwargs)
@@ -24,19 +24,20 @@ def application():
@pytest.mark.parametrize(
- 'result_count, catastrophic, exit_zero, value', [
+ "result_count, catastrophic, exit_zero, value",
+ [
(0, False, False, False),
(0, True, False, True),
(2, False, False, True),
(2, True, False, True),
-
(0, True, True, True),
(2, False, True, False),
(2, True, True, True),
- ]
+ ],
)
-def test_exit_does_raise(result_count, catastrophic, exit_zero, value,
- application):
+def test_exit_does_raise(
+ result_count, catastrophic, exit_zero, value, application
+):
"""Verify Application.exit doesn't raise SystemExit."""
application.result_count = result_count
application.catastrophic_failure = catastrophic
@@ -53,10 +54,10 @@ def test_warns_on_unknown_formatter_plugin_name(application):
default = mock.Mock()
execute = default.execute
application.formatting_plugins = {
- 'default': default,
+ "default": default,
}
- with mock.patch.object(app.LOG, 'warning') as warning:
- assert execute is application.formatter_for('fake-plugin-name')
+ with mock.patch.object(app.LOG, "warning") as warning:
+ assert execute is application.formatter_for("fake-plugin-name")
assert warning.called is True
assert warning.call_count == 1
@@ -67,12 +68,12 @@ def test_returns_specified_plugin(application):
desired = mock.Mock()
execute = desired.execute
application.formatting_plugins = {
- 'default': mock.Mock(),
- 'desired': desired,
+ "default": mock.Mock(),
+ "desired": desired,
}
- with mock.patch.object(app.LOG, 'warning') as warning:
- assert execute is application.formatter_for('desired')
+ with mock.patch.object(app.LOG, "warning") as warning:
+ assert execute is application.formatter_for("desired")
assert warning.called is False
@@ -80,10 +81,11 @@ def test_returns_specified_plugin(application):
def test_prelim_opts_args(application):
"""Verify we get sensible prelim opts and args."""
opts, args = application.parse_preliminary_options(
- ['--foo', '--verbose', 'src', 'setup.py', '--statistics', '--version'])
+ ["--foo", "--verbose", "src", "setup.py", "--statistics", "--version"]
+ )
assert opts.verbose
- assert args == ['--foo', 'src', 'setup.py', '--statistics', '--version']
+ assert args == ["--foo", "src", "setup.py", "--statistics", "--version"]
def test_prelim_opts_ignore_help(application):
@@ -91,16 +93,16 @@ def test_prelim_opts_ignore_help(application):
# GIVEN
# WHEN
- _, args = application.parse_preliminary_options(['--help', '-h'])
+ _, args = application.parse_preliminary_options(["--help", "-h"])
# THEN
- assert args == ['--help', '-h']
+ assert args == ["--help", "-h"]
def test_prelim_opts_handles_empty(application):
"""Verify empty argv lists are handled correctly."""
- irrelevant_args = ['myexe', '/path/to/foo']
- with mock.patch.object(sys, 'argv', irrelevant_args):
+ irrelevant_args = ["myexe", "/path/to/foo"]
+ with mock.patch.object(sys, "argv", irrelevant_args):
opts, args = application.parse_preliminary_options([])
assert args == []
diff --git a/tests/unit/test_base_formatter.py b/tests/unit/test_base_formatter.py
index c48adf2..b9e9b8d 100644
--- a/tests/unit/test_base_formatter.py
+++ b/tests/unit/test_base_formatter.py
@@ -10,23 +10,23 @@ from flake8.formatting import base
def options(**kwargs):
"""Create an argparse.Namespace instance."""
- kwargs.setdefault('output_file', None)
- kwargs.setdefault('tee', False)
+ kwargs.setdefault("output_file", None)
+ kwargs.setdefault("tee", False)
return argparse.Namespace(**kwargs)
-@pytest.mark.parametrize('filename', [None, 'out.txt'])
+@pytest.mark.parametrize("filename", [None, "out.txt"])
def test_start(filename):
"""Verify we open a new file in the start method."""
mock_open = mock.mock_open()
formatter = base.BaseFormatter(options(output_file=filename))
- with mock.patch('flake8.formatting.base.open', mock_open):
+ with mock.patch("flake8.formatting.base.open", mock_open):
formatter.start()
if filename is None:
assert mock_open.called is False
else:
- mock_open.assert_called_once_with(filename, 'a')
+ mock_open.assert_called_once_with(filename, "a")
def test_stop():
@@ -45,68 +45,79 @@ def test_format_needs_to_be_implemented():
formatter = base.BaseFormatter(options())
with pytest.raises(NotImplementedError):
formatter.format(
- style_guide.Violation('A000', 'file.py', 1, 1, 'error text', None)
+ style_guide.Violation("A000", "file.py", 1, 1, "error text", None)
)
def test_show_source_returns_nothing_when_not_showing_source():
"""Ensure we return nothing when users want nothing."""
formatter = base.BaseFormatter(options(show_source=False))
- assert formatter.show_source(
- style_guide.Violation('A000', 'file.py', 1, 1, 'error text', 'line')
- ) == ''
+ assert (
+ formatter.show_source(
+ style_guide.Violation(
+ "A000", "file.py", 1, 1, "error text", "line"
+ )
+ )
+ == ""
+ )
def test_show_source_returns_nothing_when_there_is_source():
"""Ensure we return nothing when there is no line."""
formatter = base.BaseFormatter(options(show_source=True))
- assert formatter.show_source(
- style_guide.Violation('A000', 'file.py', 1, 1, 'error text', None)
- ) == ''
-
-
-@pytest.mark.parametrize(('line1', 'line2', 'column'), [
- (
- 'x=1\n',
- ' ^',
- 2,
- ),
- (
- ' x=(1\n +2)\n',
- ' ^',
- 5,
- ),
- (
- '\tx\t=\ty\n',
- '\t \t \t^',
- 6,
- ),
-])
+ assert (
+ formatter.show_source(
+ style_guide.Violation("A000", "file.py", 1, 1, "error text", None)
+ )
+ == ""
+ )
+
+
+@pytest.mark.parametrize(
+ ("line1", "line2", "column"),
+ [
+ (
+ "x=1\n",
+ " ^",
+ 2,
+ ),
+ (
+ " x=(1\n +2)\n",
+ " ^",
+ 5,
+ ),
+ (
+ "\tx\t=\ty\n",
+ "\t \t \t^",
+ 6,
+ ),
+ ],
+)
def test_show_source_updates_physical_line_appropriately(line1, line2, column):
"""Ensure the error column is appropriately indicated."""
formatter = base.BaseFormatter(options(show_source=True))
- error = style_guide.Violation('A000', 'file.py', 1, column, 'error', line1)
+ error = style_guide.Violation("A000", "file.py", 1, column, "error", line1)
output = formatter.show_source(error)
assert output == line1 + line2
-@pytest.mark.parametrize('tee', [False, True])
+@pytest.mark.parametrize("tee", [False, True])
def test_write_uses_an_output_file(tee):
"""Verify that we use the output file when it's present."""
- line = 'Something to write'
- source = 'source'
+ line = "Something to write"
+ source = "source"
filemock = mock.Mock()
formatter = base.BaseFormatter(options(tee=tee))
formatter.output_fd = filemock
- with mock.patch('flake8.formatting.base.print') as print_func:
+ with mock.patch("flake8.formatting.base.print") as print_func:
formatter.write(line, source)
if tee:
assert print_func.called
assert print_func.mock_calls == [
- mock.call(line, end='\n'),
- mock.call(source, end='\n'),
+ mock.call(line, end="\n"),
+ mock.call(source, end="\n"),
]
else:
assert not print_func.called
@@ -119,11 +130,11 @@ def test_write_uses_an_output_file(tee):
]
-@mock.patch('flake8.formatting.base.print')
+@mock.patch("flake8.formatting.base.print")
def test_write_uses_print(print_function):
"""Verify that we use the print function without an output file."""
- line = 'Something to write'
- source = 'source'
+ line = "Something to write"
+ source = "source"
formatter = base.BaseFormatter(options())
formatter.write(line, source)
@@ -131,8 +142,8 @@ def test_write_uses_print(print_function):
assert print_function.called is True
assert print_function.call_count == 2
assert print_function.mock_calls == [
- mock.call(line, end='\n'),
- mock.call(source, end='\n'),
+ mock.call(line, end="\n"),
+ mock.call(source, end="\n"),
]
@@ -163,14 +174,14 @@ def test_handle_formats_the_error():
formatter = FormatFormatter(options(show_source=False))
filemock = formatter.output_fd = mock.Mock()
error = style_guide.Violation(
- code='A001',
- filename='example.py',
+ code="A001",
+ filename="example.py",
line_number=1,
column_number=1,
- text='Fake error',
- physical_line='a = 1',
+ text="Fake error",
+ physical_line="a = 1",
)
formatter.handle(error)
- filemock.write.assert_called_once_with(repr(error) + '\n')
+ filemock.write.assert_called_once_with(repr(error) + "\n")
diff --git a/tests/unit/test_checker_manager.py b/tests/unit/test_checker_manager.py
index b94d0c5..f82dc49 100644
--- a/tests/unit/test_checker_manager.py
+++ b/tests/unit/test_checker_manager.py
@@ -10,10 +10,12 @@ from flake8.main.options import JobsArgument
def style_guide_mock():
"""Create a mock StyleGuide object."""
- return mock.MagicMock(**{
- 'options.diff': False,
- 'options.jobs': JobsArgument("4"),
- })
+ return mock.MagicMock(
+ **{
+ "options.diff": False,
+ "options.jobs": JobsArgument("4"),
+ }
+ )
def _parallel_checker_manager():
@@ -27,21 +29,21 @@ def _parallel_checker_manager():
def test_oserrors_cause_serial_fall_back():
"""Verify that OSErrors will cause the Manager to fallback to serial."""
- err = OSError(errno.ENOSPC, 'Ominous message about spaceeeeee')
- with mock.patch('_multiprocessing.SemLock', side_effect=err):
+ err = OSError(errno.ENOSPC, "Ominous message about spaceeeeee")
+ with mock.patch("_multiprocessing.SemLock", side_effect=err):
manager = _parallel_checker_manager()
- with mock.patch.object(manager, 'run_serial') as serial:
+ with mock.patch.object(manager, "run_serial") as serial:
manager.run()
assert serial.call_count == 1
-@mock.patch('flake8.checker._multiprocessing_is_fork', return_value=True)
+@mock.patch("flake8.checker._multiprocessing_is_fork", return_value=True)
def test_oserrors_are_reraised(is_windows):
"""Verify that unexpected OSErrors will cause the Manager to reraise."""
- err = OSError(errno.EAGAIN, 'Ominous message')
- with mock.patch('_multiprocessing.SemLock', side_effect=err):
+ err = OSError(errno.EAGAIN, "Ominous message")
+ with mock.patch("_multiprocessing.SemLock", side_effect=err):
manager = _parallel_checker_manager()
- with mock.patch.object(manager, 'run_serial') as serial:
+ with mock.patch.object(manager, "run_serial") as serial:
with pytest.raises(OSError):
manager.run()
assert serial.call_count == 0
@@ -50,7 +52,7 @@ def test_oserrors_are_reraised(is_windows):
def test_multiprocessing_is_disabled():
"""Verify not being able to import multiprocessing forces jobs to 0."""
style_guide = style_guide_mock()
- with mock.patch('flake8.checker.multiprocessing', None):
+ with mock.patch("flake8.checker.multiprocessing", None):
manager = checker.Manager(style_guide, [], [])
assert manager.jobs == 0
@@ -58,20 +60,20 @@ def test_multiprocessing_is_disabled():
def test_make_checkers():
"""Verify that we create a list of FileChecker instances."""
style_guide = style_guide_mock()
- files = ['file1', 'file2']
+ files = ["file1", "file2"]
checkplugins = mock.Mock()
checkplugins.to_dictionary.return_value = {
- 'ast_plugins': [],
- 'logical_line_plugins': [],
- 'physical_line_plugins': [],
+ "ast_plugins": [],
+ "logical_line_plugins": [],
+ "physical_line_plugins": [],
}
- with mock.patch('flake8.checker.multiprocessing', None):
+ with mock.patch("flake8.checker.multiprocessing", None):
manager = checker.Manager(style_guide, files, checkplugins)
- with mock.patch('flake8.utils.filenames_from') as filenames_from:
- filenames_from.side_effect = [['file1'], ['file2']]
- with mock.patch('flake8.utils.fnmatch', return_value=True):
- with mock.patch('flake8.processor.FileProcessor'):
+ with mock.patch("flake8.utils.filenames_from") as filenames_from:
+ filenames_from.side_effect = [["file1"], ["file2"]]
+ with mock.patch("flake8.utils.fnmatch", return_value=True):
+ with mock.patch("flake8.processor.FileProcessor"):
manager.make_checkers()
assert manager._all_checkers
diff --git a/tests/unit/test_config_file_finder.py b/tests/unit/test_config_file_finder.py
index d31a692..5116796 100644
--- a/tests/unit/test_config_file_finder.py
+++ b/tests/unit/test_config_file_finder.py
@@ -7,73 +7,93 @@ import pytest
from flake8.options import config
-CLI_SPECIFIED_FILEPATH = 'tests/fixtures/config_files/cli-specified.ini'
-BROKEN_CONFIG_PATH = 'tests/fixtures/config_files/broken.ini'
+CLI_SPECIFIED_FILEPATH = "tests/fixtures/config_files/cli-specified.ini"
+BROKEN_CONFIG_PATH = "tests/fixtures/config_files/broken.ini"
def test_cli_config():
"""Verify opening and reading the file specified via the cli."""
cli_filepath = CLI_SPECIFIED_FILEPATH
- finder = config.ConfigFileFinder('flake8')
+ finder = config.ConfigFileFinder("flake8")
parsed_config = finder.cli_config(cli_filepath)
- assert parsed_config.has_section('flake8')
-
-
-@pytest.mark.parametrize('cwd,expected', [
- # Root directory of project
- (os.path.abspath('.'),
- [os.path.abspath('setup.cfg'),
- os.path.abspath('tox.ini')]),
- # Subdirectory of project directory
- (os.path.abspath('src'),
- [os.path.abspath('setup.cfg'),
- os.path.abspath('tox.ini')]),
- # Outside of project directory
- (os.path.abspath('/'),
- []),
-])
+ assert parsed_config.has_section("flake8")
+
+
+@pytest.mark.parametrize(
+ "cwd,expected",
+ [
+ # Root directory of project
+ (
+ os.path.abspath("."),
+ [os.path.abspath("setup.cfg"), os.path.abspath("tox.ini")],
+ ),
+ # Subdirectory of project directory
+ (
+ os.path.abspath("src"),
+ [os.path.abspath("setup.cfg"), os.path.abspath("tox.ini")],
+ ),
+ # Outside of project directory
+ (os.path.abspath("/"), []),
+ ],
+)
def test_generate_possible_local_files(cwd, expected):
"""Verify generation of all possible config paths."""
- finder = config.ConfigFileFinder('flake8')
+ finder = config.ConfigFileFinder("flake8")
- with mock.patch.object(os, 'getcwd', return_value=cwd):
+ with mock.patch.object(os, "getcwd", return_value=cwd):
config_files = list(finder.generate_possible_local_files())
assert config_files == expected
-@pytest.mark.parametrize('extra_config_files,expected', [
- # Extra config files specified
- ([CLI_SPECIFIED_FILEPATH],
- [os.path.abspath('setup.cfg'),
- os.path.abspath('tox.ini'),
- os.path.abspath(CLI_SPECIFIED_FILEPATH)]),
- # Missing extra config files specified
- ([CLI_SPECIFIED_FILEPATH,
- 'tests/fixtures/config_files/missing.ini'],
- [os.path.abspath('setup.cfg'),
- os.path.abspath('tox.ini'),
- os.path.abspath(CLI_SPECIFIED_FILEPATH)]),
-])
+@pytest.mark.parametrize(
+ "extra_config_files,expected",
+ [
+ # Extra config files specified
+ (
+ [CLI_SPECIFIED_FILEPATH],
+ [
+ os.path.abspath("setup.cfg"),
+ os.path.abspath("tox.ini"),
+ os.path.abspath(CLI_SPECIFIED_FILEPATH),
+ ],
+ ),
+ # Missing extra config files specified
+ (
+ [
+ CLI_SPECIFIED_FILEPATH,
+ "tests/fixtures/config_files/missing.ini",
+ ],
+ [
+ os.path.abspath("setup.cfg"),
+ os.path.abspath("tox.ini"),
+ os.path.abspath(CLI_SPECIFIED_FILEPATH),
+ ],
+ ),
+ ],
+)
def test_local_config_files(extra_config_files, expected):
"""Verify discovery of local config files."""
- finder = config.ConfigFileFinder('flake8', extra_config_files)
+ finder = config.ConfigFileFinder("flake8", extra_config_files)
assert list(finder.local_config_files()) == expected
def test_local_configs():
"""Verify we return a ConfigParser."""
- finder = config.ConfigFileFinder('flake8')
+ finder = config.ConfigFileFinder("flake8")
assert isinstance(finder.local_configs(), configparser.RawConfigParser)
-@pytest.mark.parametrize('files', [
- [BROKEN_CONFIG_PATH],
- [CLI_SPECIFIED_FILEPATH, BROKEN_CONFIG_PATH],
-])
+@pytest.mark.parametrize(
+ "files",
+ [
+ [BROKEN_CONFIG_PATH],
+ [CLI_SPECIFIED_FILEPATH, BROKEN_CONFIG_PATH],
+ ],
+)
def test_read_config_catches_broken_config_files(files):
"""Verify that we do not allow the exception to bubble up."""
_, parsed = config.ConfigFileFinder._read_config(*files)
@@ -82,40 +102,42 @@ def test_read_config_catches_broken_config_files(files):
def test_read_config_catches_decoding_errors(tmpdir):
"""Verify that we do not allow the exception to bubble up."""
- setup_cfg = tmpdir.join('setup.cfg')
+ setup_cfg = tmpdir.join("setup.cfg")
# pick bytes that are unlikely to decode
- setup_cfg.write_binary(b'[x]\ny = \x81\x8d\x90\x9d')
+ setup_cfg.write_binary(b"[x]\ny = \x81\x8d\x90\x9d")
_, parsed = config.ConfigFileFinder._read_config(setup_cfg.strpath)
assert parsed == []
def test_config_file_default_value():
"""Verify the default 'config_file' attribute value."""
- finder = config.ConfigFileFinder('flake8')
+ finder = config.ConfigFileFinder("flake8")
assert finder.config_file is None
def test_setting_config_file_value():
"""Verify the 'config_file' attribute matches constructed value."""
- config_file_value = 'flake8.ini'
- finder = config.ConfigFileFinder('flake8', config_file=config_file_value)
+ config_file_value = "flake8.ini"
+ finder = config.ConfigFileFinder("flake8", config_file=config_file_value)
assert finder.config_file == config_file_value
def test_ignore_config_files_default_value():
"""Verify the default 'ignore_config_files' attribute value."""
- finder = config.ConfigFileFinder('flake8')
+ finder = config.ConfigFileFinder("flake8")
assert finder.ignore_config_files is False
-@pytest.mark.parametrize('ignore_config_files_arg', [
- False,
- True,
-])
+@pytest.mark.parametrize(
+ "ignore_config_files_arg",
+ [
+ False,
+ True,
+ ],
+)
def test_setting_ignore_config_files_value(ignore_config_files_arg):
"""Verify the 'ignore_config_files' attribute matches constructed value."""
finder = config.ConfigFileFinder(
- 'flake8',
- ignore_config_files=ignore_config_files_arg
+ "flake8", ignore_config_files=ignore_config_files_arg
)
assert finder.ignore_config_files is ignore_config_files_arg
diff --git a/tests/unit/test_debug.py b/tests/unit/test_debug.py
index bc31fec..2da4bf8 100644
--- a/tests/unit/test_debug.py
+++ b/tests/unit/test_debug.py
@@ -12,51 +12,82 @@ def test_dependencies():
assert [] == debug.dependencies()
-@pytest.mark.parametrize('plugins, expected', [
- ([], []),
- ([manager.PluginVersion('pycodestyle', '2.0.0', False)],
- [{'plugin': 'pycodestyle', 'version': '2.0.0', 'is_local': False}]),
- ([manager.PluginVersion('pycodestyle', '2.0.0', False),
- manager.PluginVersion('mccabe', '0.5.9', False)],
- [{'plugin': 'mccabe', 'version': '0.5.9', 'is_local': False},
- {'plugin': 'pycodestyle', 'version': '2.0.0', 'is_local': False}]),
- ([manager.PluginVersion('pycodestyle', '2.0.0', False),
- manager.PluginVersion('my-local', '0.0.1', True),
- manager.PluginVersion('mccabe', '0.5.9', False)],
- [{'plugin': 'mccabe', 'version': '0.5.9', 'is_local': False},
- {'plugin': 'my-local', 'version': '0.0.1', 'is_local': True},
- {'plugin': 'pycodestyle', 'version': '2.0.0', 'is_local': False}]),
-])
+@pytest.mark.parametrize(
+ "plugins, expected",
+ [
+ ([], []),
+ (
+ [manager.PluginVersion("pycodestyle", "2.0.0", False)],
+ [
+ {
+ "plugin": "pycodestyle",
+ "version": "2.0.0",
+ "is_local": False,
+ }
+ ],
+ ),
+ (
+ [
+ manager.PluginVersion("pycodestyle", "2.0.0", False),
+ manager.PluginVersion("mccabe", "0.5.9", False),
+ ],
+ [
+ {"plugin": "mccabe", "version": "0.5.9", "is_local": False},
+ {
+ "plugin": "pycodestyle",
+ "version": "2.0.0",
+ "is_local": False,
+ },
+ ],
+ ),
+ (
+ [
+ manager.PluginVersion("pycodestyle", "2.0.0", False),
+ manager.PluginVersion("my-local", "0.0.1", True),
+ manager.PluginVersion("mccabe", "0.5.9", False),
+ ],
+ [
+ {"plugin": "mccabe", "version": "0.5.9", "is_local": False},
+ {"plugin": "my-local", "version": "0.0.1", "is_local": True},
+ {
+ "plugin": "pycodestyle",
+ "version": "2.0.0",
+ "is_local": False,
+ },
+ ],
+ ),
+ ],
+)
def test_plugins_from(plugins, expected):
"""Test that we format plugins appropriately."""
option_manager = mock.Mock(registered_plugins=set(plugins))
assert expected == debug.plugins_from(option_manager)
-@mock.patch('platform.python_implementation', return_value='CPython')
-@mock.patch('platform.python_version', return_value='3.5.3')
-@mock.patch('platform.system', return_value='Linux')
+@mock.patch("platform.python_implementation", return_value="CPython")
+@mock.patch("platform.python_version", return_value="3.5.3")
+@mock.patch("platform.system", return_value="Linux")
def test_information(system, pyversion, pyimpl):
"""Verify that we return all the information we care about."""
expected = {
- 'version': '3.1.0',
- 'plugins': [{'plugin': 'mccabe', 'version': '0.5.9',
- 'is_local': False},
- {'plugin': 'pycodestyle', 'version': '2.0.0',
- 'is_local': False}],
- 'dependencies': [],
- 'platform': {
- 'python_implementation': 'CPython',
- 'python_version': '3.5.3',
- 'system': 'Linux',
+ "version": "3.1.0",
+ "plugins": [
+ {"plugin": "mccabe", "version": "0.5.9", "is_local": False},
+ {"plugin": "pycodestyle", "version": "2.0.0", "is_local": False},
+ ],
+ "dependencies": [],
+ "platform": {
+ "python_implementation": "CPython",
+ "python_version": "3.5.3",
+ "system": "Linux",
},
}
option_manager = mock.Mock(
registered_plugins={
- manager.PluginVersion('pycodestyle', '2.0.0', False),
- manager.PluginVersion('mccabe', '0.5.9', False),
+ manager.PluginVersion("pycodestyle", "2.0.0", False),
+ manager.PluginVersion("mccabe", "0.5.9", False),
},
- version='3.1.0',
+ version="3.1.0",
)
assert expected == debug.information(option_manager)
pyimpl.assert_called_once_with()
@@ -64,14 +95,16 @@ def test_information(system, pyversion, pyimpl):
system.assert_called_once_with()
-@mock.patch('flake8.main.debug.print')
-@mock.patch('flake8.main.debug.information', return_value={})
-@mock.patch('json.dumps', return_value='{}')
+@mock.patch("flake8.main.debug.print")
+@mock.patch("flake8.main.debug.information", return_value={})
+@mock.patch("json.dumps", return_value="{}")
def test_print_information_no_plugins(dumps, information, print_mock):
"""Verify we print and exit only when we have plugins."""
option_manager = mock.Mock(registered_plugins=set())
action = debug.DebugAction(
- "--bug-report", dest="bug_report", option_manager=option_manager,
+ "--bug-report",
+ dest="bug_report",
+ option_manager=option_manager,
)
assert action(None, None, None, None) is None
assert dumps.called is False
@@ -79,21 +112,23 @@ def test_print_information_no_plugins(dumps, information, print_mock):
assert print_mock.called is False
-@mock.patch('flake8.main.debug.print')
-@mock.patch('flake8.main.debug.information', return_value={})
-@mock.patch('json.dumps', return_value='{}')
+@mock.patch("flake8.main.debug.print")
+@mock.patch("flake8.main.debug.information", return_value={})
+@mock.patch("json.dumps", return_value="{}")
def test_print_information(dumps, information, print_mock):
"""Verify we print and exit only when we have plugins."""
plugins = [
- manager.PluginVersion('pycodestyle', '2.0.0', False),
- manager.PluginVersion('mccabe', '0.5.9', False),
+ manager.PluginVersion("pycodestyle", "2.0.0", False),
+ manager.PluginVersion("mccabe", "0.5.9", False),
]
option_manager = mock.Mock(registered_plugins=set(plugins))
action = debug.DebugAction(
- "--bug-report", dest="bug_report", option_manager=option_manager,
+ "--bug-report",
+ dest="bug_report",
+ option_manager=option_manager,
)
with pytest.raises(SystemExit):
action(None, None, None, None)
- print_mock.assert_called_once_with('{}')
+ print_mock.assert_called_once_with("{}")
dumps.assert_called_once_with({}, indent=2, sort_keys=True)
information.assert_called_once_with(option_manager)
diff --git a/tests/unit/test_exceptions.py b/tests/unit/test_exceptions.py
index 89490fa..e9be495 100644
--- a/tests/unit/test_exceptions.py
+++ b/tests/unit/test_exceptions.py
@@ -7,21 +7,21 @@ from flake8 import exceptions
@pytest.mark.parametrize(
- 'err',
+ "err",
(
exceptions.FailedToLoadPlugin(
- plugin_name='plugin_name',
- exception=ValueError('boom!'),
+ plugin_name="plugin_name",
+ exception=ValueError("boom!"),
),
- exceptions.InvalidSyntax(exception=ValueError('Unexpected token: $')),
+ exceptions.InvalidSyntax(exception=ValueError("Unexpected token: $")),
exceptions.PluginRequestedUnknownParameters(
- plugin={'plugin_name': 'plugin_name'},
- exception=ValueError('boom!'),
+ plugin={"plugin_name": "plugin_name"},
+ exception=ValueError("boom!"),
),
exceptions.PluginExecutionFailed(
- plugin={'plugin_name': 'plugin_name'},
- exception=ValueError('boom!'),
- )
+ plugin={"plugin_name": "plugin_name"},
+ exception=ValueError("boom!"),
+ ),
),
)
def test_pickleable(err):
diff --git a/tests/unit/test_file_checker.py b/tests/unit/test_file_checker.py
index 57a47ab..f433ea6 100644
--- a/tests/unit/test_file_checker.py
+++ b/tests/unit/test_file_checker.py
@@ -7,7 +7,7 @@ import flake8
from flake8 import checker
-@mock.patch('flake8.processor.FileProcessor')
+@mock.patch("flake8.processor.FileProcessor")
def test_run_ast_checks_handles_SyntaxErrors(FileProcessor): # noqa: N802,N803
"""Stress our SyntaxError handling.
@@ -15,26 +15,31 @@ def test_run_ast_checks_handles_SyntaxErrors(FileProcessor): # noqa: N802,N803
"""
processor = mock.Mock(lines=[])
FileProcessor.return_value = processor
- processor.build_ast.side_effect = SyntaxError('Failed to build ast',
- ('', 1, 5, 'foo(\n'))
+ processor.build_ast.side_effect = SyntaxError(
+ "Failed to build ast", ("", 1, 5, "foo(\n")
+ )
file_checker = checker.FileChecker(__file__, checks={}, options=object())
- with mock.patch.object(file_checker, 'report') as report:
+ with mock.patch.object(file_checker, "report") as report:
file_checker.run_ast_checks()
report.assert_called_once_with(
- 'E999', 1, 3,
- 'SyntaxError: Failed to build ast',
+ "E999",
+ 1,
+ 3,
+ "SyntaxError: Failed to build ast",
)
-@mock.patch('flake8.checker.FileChecker._make_processor', return_value=None)
+@mock.patch("flake8.checker.FileChecker._make_processor", return_value=None)
def test_repr(*args):
"""Verify we generate a correct repr."""
file_checker = checker.FileChecker(
- 'example.py', checks={}, options=object(),
+ "example.py",
+ checks={},
+ options=object(),
)
- assert repr(file_checker) == 'FileChecker for example.py'
+ assert repr(file_checker) == "FileChecker for example.py"
def test_nonexistent_file():
@@ -50,7 +55,7 @@ def test_nonexistent_file():
def test_raises_exception_on_failed_plugin(tmp_path, default_options):
"""Checks that a failing plugin results in PluginExecutionFailed."""
- foobar = tmp_path / 'foobar.py'
+ foobar = tmp_path / "foobar.py"
foobar.write_text("I exist!") # Create temp file
plugin = {
"name": "failure",
@@ -60,6 +65,7 @@ def test_raises_exception_on_failed_plugin(tmp_path, default_options):
}
"""Verify a failing plugin results in an plugin error"""
fchecker = checker.FileChecker(
- str(foobar), checks=[], options=default_options)
+ str(foobar), checks=[], options=default_options
+ )
with pytest.raises(flake8.exceptions.PluginExecutionFailed):
fchecker.run_check(plugin)
diff --git a/tests/unit/test_file_processor.py b/tests/unit/test_file_processor.py
index ca7f4f6..789135a 100644
--- a/tests/unit/test_file_processor.py
+++ b/tests/unit/test_file_processor.py
@@ -17,7 +17,7 @@ def test_read_lines_splits_lines(default_options):
def _lines_from_file(tmpdir, contents, options):
- f = tmpdir.join('f.py')
+ f = tmpdir.join("f.py")
# be careful to write the bytes exactly to avoid newline munging
f.write_binary(contents)
return processor.FileProcessor(f.strpath, options).lines
@@ -26,111 +26,125 @@ def _lines_from_file(tmpdir, contents, options):
def test_read_lines_universal_newlines(tmpdir, default_options):
r"""Verify that line endings are translated to \n."""
lines = _lines_from_file(
- tmpdir, b'# coding: utf-8\r\nx = 1\r\n', default_options)
- assert lines == ['# coding: utf-8\n', 'x = 1\n']
+ tmpdir, b"# coding: utf-8\r\nx = 1\r\n", default_options
+ )
+ assert lines == ["# coding: utf-8\n", "x = 1\n"]
def test_read_lines_incorrect_utf_16(tmpdir, default_options):
"""Verify that an incorrectly encoded file is read as latin-1."""
lines = _lines_from_file(
- tmpdir, b'# coding: utf16\nx = 1\n', default_options)
- assert lines == ['# coding: utf16\n', 'x = 1\n']
+ tmpdir, b"# coding: utf16\nx = 1\n", default_options
+ )
+ assert lines == ["# coding: utf16\n", "x = 1\n"]
def test_read_lines_unknown_encoding(tmpdir, default_options):
"""Verify that an unknown encoding is still read as latin-1."""
lines = _lines_from_file(
- tmpdir, b'# coding: fake-encoding\nx = 1\n', default_options)
- assert lines == ['# coding: fake-encoding\n', 'x = 1\n']
+ tmpdir, b"# coding: fake-encoding\nx = 1\n", default_options
+ )
+ assert lines == ["# coding: fake-encoding\n", "x = 1\n"]
-@pytest.mark.parametrize('first_line', [
- '\xEF\xBB\xBF"""Module docstring."""\n',
- '\uFEFF"""Module docstring."""\n',
-])
+@pytest.mark.parametrize(
+ "first_line",
+ [
+ '\xEF\xBB\xBF"""Module docstring."""\n',
+ '\uFEFF"""Module docstring."""\n',
+ ],
+)
def test_strip_utf_bom(first_line, default_options):
r"""Verify that we strip '\xEF\xBB\xBF' from the first line."""
lines = [first_line]
- file_processor = processor.FileProcessor('-', default_options, lines[:])
+ file_processor = processor.FileProcessor("-", default_options, lines[:])
assert file_processor.lines != lines
assert file_processor.lines[0] == '"""Module docstring."""\n'
-@pytest.mark.parametrize('lines, expected', [
- (['\xEF\xBB\xBF"""Module docstring."""\n'], False),
- (['\uFEFF"""Module docstring."""\n'], False),
- (['#!/usr/bin/python', '# flake8 is great', 'a = 1'], False),
- (['#!/usr/bin/python', '# flake8: noqa', 'a = 1'], True),
- (['#!/usr/bin/python', '# flake8:noqa', 'a = 1'], True),
- (['# flake8: noqa', '#!/usr/bin/python', 'a = 1'], True),
- (['# flake8:noqa', '#!/usr/bin/python', 'a = 1'], True),
- (['#!/usr/bin/python', 'a = 1', '# flake8: noqa'], True),
- (['#!/usr/bin/python', 'a = 1', '# flake8:noqa'], True),
- (['#!/usr/bin/python', 'a = 1 # flake8: noqa'], False),
- (['#!/usr/bin/python', 'a = 1 # flake8:noqa'], False),
-])
+@pytest.mark.parametrize(
+ "lines, expected",
+ [
+ (['\xEF\xBB\xBF"""Module docstring."""\n'], False),
+ (['\uFEFF"""Module docstring."""\n'], False),
+ (["#!/usr/bin/python", "# flake8 is great", "a = 1"], False),
+ (["#!/usr/bin/python", "# flake8: noqa", "a = 1"], True),
+ (["#!/usr/bin/python", "# flake8:noqa", "a = 1"], True),
+ (["# flake8: noqa", "#!/usr/bin/python", "a = 1"], True),
+ (["# flake8:noqa", "#!/usr/bin/python", "a = 1"], True),
+ (["#!/usr/bin/python", "a = 1", "# flake8: noqa"], True),
+ (["#!/usr/bin/python", "a = 1", "# flake8:noqa"], True),
+ (["#!/usr/bin/python", "a = 1 # flake8: noqa"], False),
+ (["#!/usr/bin/python", "a = 1 # flake8:noqa"], False),
+ ],
+)
def test_should_ignore_file(lines, expected, default_options):
"""Verify that we ignore a file if told to."""
- file_processor = processor.FileProcessor('-', default_options, lines)
+ file_processor = processor.FileProcessor("-", default_options, lines)
assert file_processor.should_ignore_file() is expected
def test_should_ignore_file_to_handle_disable_noqa(default_options):
"""Verify that we ignore a file if told to."""
- lines = ['# flake8: noqa']
- file_processor = processor.FileProcessor('-', default_options, lines)
+ lines = ["# flake8: noqa"]
+ file_processor = processor.FileProcessor("-", default_options, lines)
assert file_processor.should_ignore_file() is True
default_options.disable_noqa = True
- file_processor = processor.FileProcessor('-', default_options, lines)
+ file_processor = processor.FileProcessor("-", default_options, lines)
assert file_processor.should_ignore_file() is False
-@mock.patch('flake8.utils.stdin_get_value')
+@mock.patch("flake8.utils.stdin_get_value")
def test_read_lines_from_stdin(stdin_get_value, default_options):
"""Verify that we use our own utility function to retrieve stdin."""
- stdin_get_value.return_value = ''
- processor.FileProcessor('-', default_options)
+ stdin_get_value.return_value = ""
+ processor.FileProcessor("-", default_options)
stdin_get_value.assert_called_once_with()
-@mock.patch('flake8.utils.stdin_get_value')
+@mock.patch("flake8.utils.stdin_get_value")
def test_stdin_filename_attribute(stdin_get_value, default_options):
"""Verify that we update the filename attribute."""
- stdin_get_value.return_value = ''
- file_processor = processor.FileProcessor('-', default_options)
- assert file_processor.filename == 'stdin'
+ stdin_get_value.return_value = ""
+ file_processor = processor.FileProcessor("-", default_options)
+ assert file_processor.filename == "stdin"
-@mock.patch('flake8.utils.stdin_get_value')
+@mock.patch("flake8.utils.stdin_get_value")
def test_read_lines_uses_display_name(stdin_get_value, default_options):
"""Verify that when processing stdin we use a display name if present."""
- default_options.stdin_display_name = 'display_name.py'
- stdin_get_value.return_value = ''
- file_processor = processor.FileProcessor('-', default_options)
- assert file_processor.filename == 'display_name.py'
+ default_options.stdin_display_name = "display_name.py"
+ stdin_get_value.return_value = ""
+ file_processor = processor.FileProcessor("-", default_options)
+ assert file_processor.filename == "display_name.py"
-@mock.patch('flake8.utils.stdin_get_value')
+@mock.patch("flake8.utils.stdin_get_value")
def test_read_lines_ignores_empty_display_name(
- stdin_get_value, default_options,
+ stdin_get_value,
+ default_options,
):
"""Verify that when processing stdin we use a display name if present."""
- stdin_get_value.return_value = ''
- default_options.stdin_display_name = ''
- file_processor = processor.FileProcessor('-', default_options)
- assert file_processor.filename == 'stdin'
+ stdin_get_value.return_value = ""
+ default_options.stdin_display_name = ""
+ file_processor = processor.FileProcessor("-", default_options)
+ assert file_processor.filename == "stdin"
def test_noqa_line_for(default_options):
"""Verify we grab the correct line from the cached lines."""
- file_processor = processor.FileProcessor('-', default_options, lines=[
- 'Line 1\n',
- 'Line 2\n',
- 'Line 3\n',
- ])
+ file_processor = processor.FileProcessor(
+ "-",
+ default_options,
+ lines=[
+ "Line 1\n",
+ "Line 2\n",
+ "Line 3\n",
+ ],
+ )
for i in range(1, 4):
- assert file_processor.noqa_line_for(i) == f'Line {i}\n'
+ assert file_processor.noqa_line_for(i) == f"Line {i}\n"
def test_noqa_line_for_continuation(default_options):
@@ -145,15 +159,15 @@ world
""" # 7
'''
lines = src.splitlines(True)
- file_processor = processor.FileProcessor('-', default_options, lines=lines)
+ file_processor = processor.FileProcessor("-", default_options, lines=lines)
assert file_processor.noqa_line_for(0) is None
- l_1_2 = 'from foo \\\n import bar # 2\n'
+ l_1_2 = "from foo \\\n import bar # 2\n"
assert file_processor.noqa_line_for(1) == l_1_2
assert file_processor.noqa_line_for(2) == l_1_2
- assert file_processor.noqa_line_for(3) == '\n'
+ assert file_processor.noqa_line_for(3) == "\n"
l_4_7 = 'x = """\nhello\nworld\n""" # 7\n'
for i in (4, 5, 6, 7):
@@ -164,76 +178,104 @@ world
def test_noqa_line_for_no_eol_at_end_of_file(default_options):
"""Verify that we properly handle noqa line at the end of the file."""
- src = 'from foo \\\nimport bar' # no end of file newline
+ src = "from foo \\\nimport bar" # no end of file newline
lines = src.splitlines(True)
- file_processor = processor.FileProcessor('-', default_options, lines=lines)
+ file_processor = processor.FileProcessor("-", default_options, lines=lines)
- l_1_2 = 'from foo \\\nimport bar'
+ l_1_2 = "from foo \\\nimport bar"
assert file_processor.noqa_line_for(1) == l_1_2
assert file_processor.noqa_line_for(2) == l_1_2
def test_next_line(default_options):
"""Verify we update the file_processor state for each new line."""
- file_processor = processor.FileProcessor('-', default_options, lines=[
- 'Line 1',
- 'Line 2',
- 'Line 3',
- ])
+ file_processor = processor.FileProcessor(
+ "-",
+ default_options,
+ lines=[
+ "Line 1",
+ "Line 2",
+ "Line 3",
+ ],
+ )
for i in range(1, 4):
- assert file_processor.next_line() == f'Line {i}'
+ assert file_processor.next_line() == f"Line {i}"
assert file_processor.line_number == i
-@pytest.mark.parametrize('params, args, expected_kwargs', [
- ({'blank_before': True, 'blank_lines': True},
- None,
- {'blank_before': 0, 'blank_lines': 0}),
- ({'noqa': True, 'fake': True},
- {'fake': 'foo'},
- {'noqa': False, 'fake': 'foo'}),
- ({'blank_before': True, 'blank_lines': True, 'noqa': True},
- {'blank_before': 10, 'blank_lines': 5, 'noqa': True},
- {'blank_before': 10, 'blank_lines': 5, 'noqa': True}),
- ({}, {'fake': 'foo'}, {'fake': 'foo'}),
- ({'non-existent': False}, {'fake': 'foo'}, {'fake': 'foo'}),
-])
+@pytest.mark.parametrize(
+ "params, args, expected_kwargs",
+ [
+ (
+ {"blank_before": True, "blank_lines": True},
+ None,
+ {"blank_before": 0, "blank_lines": 0},
+ ),
+ (
+ {"noqa": True, "fake": True},
+ {"fake": "foo"},
+ {"noqa": False, "fake": "foo"},
+ ),
+ (
+ {"blank_before": True, "blank_lines": True, "noqa": True},
+ {"blank_before": 10, "blank_lines": 5, "noqa": True},
+ {"blank_before": 10, "blank_lines": 5, "noqa": True},
+ ),
+ ({}, {"fake": "foo"}, {"fake": "foo"}),
+ ({"non-existent": False}, {"fake": "foo"}, {"fake": "foo"}),
+ ],
+)
def test_keyword_arguments_for(params, args, expected_kwargs, default_options):
"""Verify the keyword args are generated properly."""
- file_processor = processor.FileProcessor('-', default_options, lines=[
- 'Line 1',
- ])
+ file_processor = processor.FileProcessor(
+ "-",
+ default_options,
+ lines=[
+ "Line 1",
+ ],
+ )
kwargs_for = file_processor.keyword_arguments_for
assert kwargs_for(params, args) == expected_kwargs
def test_keyword_arguments_for_does_not_handle_attribute_errors(
- default_options,
+ default_options,
):
"""Verify we re-raise AttributeErrors."""
- file_processor = processor.FileProcessor('-', default_options, lines=[
- 'Line 1',
- ])
+ file_processor = processor.FileProcessor(
+ "-",
+ default_options,
+ lines=[
+ "Line 1",
+ ],
+ )
with pytest.raises(AttributeError):
- file_processor.keyword_arguments_for({'fake': True})
-
-
-@pytest.mark.parametrize('unsplit_line, expected_lines', [
- ('line', []),
- ('line 1\n', ['line 1']),
- ('line 1\nline 2\n', ['line 1', 'line 2']),
- ('line 1\n\nline 2\n', ['line 1', '', 'line 2']),
-])
+ file_processor.keyword_arguments_for({"fake": True})
+
+
+@pytest.mark.parametrize(
+ "unsplit_line, expected_lines",
+ [
+ ("line", []),
+ ("line 1\n", ["line 1"]),
+ ("line 1\nline 2\n", ["line 1", "line 2"]),
+ ("line 1\n\nline 2\n", ["line 1", "", "line 2"]),
+ ],
+)
def test_split_line(unsplit_line, expected_lines, default_options):
"""Verify the token line splitting."""
- file_processor = processor.FileProcessor('-', default_options, lines=[
- 'Line 1',
- ])
+ file_processor = processor.FileProcessor(
+ "-",
+ default_options,
+ lines=[
+ "Line 1",
+ ],
+ )
- token = (1, unsplit_line, (0, 0), (0, 0), '')
+ token = (1, unsplit_line, (0, 0), (0, 0), "")
actual_lines = list(file_processor.split_line(token))
assert expected_lines == actual_lines
@@ -242,9 +284,9 @@ def test_split_line(unsplit_line, expected_lines, default_options):
def test_build_ast(default_options):
"""Verify the logic for how we build an AST for plugins."""
- file_processor = processor.FileProcessor('-', default_options, lines=[
- 'a = 1\n'
- ])
+ file_processor = processor.FileProcessor(
+ "-", default_options, lines=["a = 1\n"]
+ )
module = file_processor.build_ast()
assert isinstance(module, ast.Module)
@@ -252,25 +294,25 @@ def test_build_ast(default_options):
def test_next_logical_line_updates_the_previous_logical_line(default_options):
"""Verify that we update our tracking of the previous logical line."""
- file_processor = processor.FileProcessor('-', default_options, lines=[
- 'a = 1\n'
- ])
+ file_processor = processor.FileProcessor(
+ "-", default_options, lines=["a = 1\n"]
+ )
file_processor.indent_level = 1
- file_processor.logical_line = 'a = 1'
- assert file_processor.previous_logical == ''
+ file_processor.logical_line = "a = 1"
+ assert file_processor.previous_logical == ""
assert file_processor.previous_indent_level == 0
file_processor.next_logical_line()
- assert file_processor.previous_logical == 'a = 1'
+ assert file_processor.previous_logical == "a = 1"
assert file_processor.previous_indent_level == 1
def test_visited_new_blank_line(default_options):
"""Verify we update the number of blank lines seen."""
- file_processor = processor.FileProcessor('-', default_options, lines=[
- 'a = 1\n'
- ])
+ file_processor = processor.FileProcessor(
+ "-", default_options, lines=["a = 1\n"]
+ )
assert file_processor.blank_lines == 0
file_processor.visited_new_blank_line()
@@ -279,9 +321,9 @@ def test_visited_new_blank_line(default_options):
def test_inside_multiline(default_options):
"""Verify we update the line number and reset multiline."""
- file_processor = processor.FileProcessor('-', default_options, lines=[
- 'a = 1\n'
- ])
+ file_processor = processor.FileProcessor(
+ "-", default_options, lines=["a = 1\n"]
+ )
assert file_processor.multiline is False
assert file_processor.line_number == 0
@@ -292,63 +334,87 @@ def test_inside_multiline(default_options):
assert file_processor.multiline is False
-@pytest.mark.parametrize('string, expected', [
- ('""', '""'),
- ("''", "''"),
- ('"a"', '"x"'),
- ("'a'", "'x'"),
- ('"x"', '"x"'),
- ("'x'", "'x'"),
- ('"abcdef"', '"xxxxxx"'),
- ("'abcdef'", "'xxxxxx'"),
- ('""""""', '""""""'),
- ("''''''", "''''''"),
- ('"""a"""', '"""x"""'),
- ("'''a'''", "'''x'''"),
- ('"""x"""', '"""x"""'),
- ("'''x'''", "'''x'''"),
- ('"""abcdef"""', '"""xxxxxx"""'),
- ("'''abcdef'''", "'''xxxxxx'''"),
- ('"""xxxxxx"""', '"""xxxxxx"""'),
- ("'''xxxxxx'''", "'''xxxxxx'''"),
-])
+@pytest.mark.parametrize(
+ "string, expected",
+ [
+ ('""', '""'),
+ ("''", "''"),
+ ('"a"', '"x"'),
+ ("'a'", "'x'"),
+ ('"x"', '"x"'),
+ ("'x'", "'x'"),
+ ('"abcdef"', '"xxxxxx"'),
+ ("'abcdef'", "'xxxxxx'"),
+ ('""""""', '""""""'),
+ ("''''''", "''''''"),
+ ('"""a"""', '"""x"""'),
+ ("'''a'''", "'''x'''"),
+ ('"""x"""', '"""x"""'),
+ ("'''x'''", "'''x'''"),
+ ('"""abcdef"""', '"""xxxxxx"""'),
+ ("'''abcdef'''", "'''xxxxxx'''"),
+ ('"""xxxxxx"""', '"""xxxxxx"""'),
+ ("'''xxxxxx'''", "'''xxxxxx'''"),
+ ],
+)
def test_mutate_string(string, expected, default_options):
"""Verify we appropriately mutate the string to sanitize it."""
actual = processor.mutate_string(string)
assert expected == actual
-@pytest.mark.parametrize('string, expected', [
- (' ', 4),
- (' ', 6),
- ('\t', 8),
- ('\t\t', 16),
- (' \t', 8),
- (' \t', 16),
-])
+@pytest.mark.parametrize(
+ "string, expected",
+ [
+ (" ", 4),
+ (" ", 6),
+ ("\t", 8),
+ ("\t\t", 16),
+ (" \t", 8),
+ (" \t", 16),
+ ],
+)
def test_expand_indent(string, expected):
"""Verify we correctly measure the amount of indentation."""
actual = processor.expand_indent(string)
assert expected == actual
-@pytest.mark.parametrize('token, log_string', [
- [(tokenize.COMMENT, '# this is a comment',
- (1, 0), # (start_row, start_column)
- (1, 19), # (end_ro, end_column)
- '# this is a comment',),
- "l.1\t[:19]\tCOMMENT\t'# this is a comment'"],
- [(tokenize.COMMENT, '# this is a comment',
- (1, 5), # (start_row, start_column)
- (1, 19), # (end_ro, end_column)
- '# this is a comment',),
- "l.1\t[5:19]\tCOMMENT\t'# this is a comment'"],
- [(tokenize.COMMENT, '# this is a comment',
- (1, 0), # (start_row, start_column)
- (2, 19), # (end_ro, end_column)
- '# this is a comment',),
- "l.1\tl.2\tCOMMENT\t'# this is a comment'"],
-])
+@pytest.mark.parametrize(
+ "token, log_string",
+ [
+ [
+ (
+ tokenize.COMMENT,
+ "# this is a comment",
+ (1, 0), # (start_row, start_column)
+ (1, 19), # (end_ro, end_column)
+ "# this is a comment",
+ ),
+ "l.1\t[:19]\tCOMMENT\t'# this is a comment'",
+ ],
+ [
+ (
+ tokenize.COMMENT,
+ "# this is a comment",
+ (1, 5), # (start_row, start_column)
+ (1, 19), # (end_ro, end_column)
+ "# this is a comment",
+ ),
+ "l.1\t[5:19]\tCOMMENT\t'# this is a comment'",
+ ],
+ [
+ (
+ tokenize.COMMENT,
+ "# this is a comment",
+ (1, 0), # (start_row, start_column)
+ (2, 19), # (end_ro, end_column)
+ "# this is a comment",
+ ),
+ "l.1\tl.2\tCOMMENT\t'# this is a comment'",
+ ],
+ ],
+)
def test_log_token(token, log_string):
"""Verify we use the log object passed in."""
log = mock.Mock()
@@ -359,15 +425,18 @@ def test_log_token(token, log_string):
)
-@pytest.mark.parametrize('current_count, token_text, expected', [
- (0, '(', 1),
- (0, '[', 1),
- (0, '{', 1),
- (1, ')', 0),
- (1, ']', 0),
- (1, '}', 0),
- (10, '+', 10),
-])
+@pytest.mark.parametrize(
+ "current_count, token_text, expected",
+ [
+ (0, "(", 1),
+ (0, "[", 1),
+ (0, "{", 1),
+ (1, ")", 0),
+ (1, "]", 0),
+ (1, "}", 0),
+ (10, "+", 10),
+ ],
+)
def test_count_parentheses(current_count, token_text, expected):
"""Verify our arithmetic is correct."""
assert processor.count_parentheses(current_count, token_text) == expected
diff --git a/tests/unit/test_filenameonly_formatter.py b/tests/unit/test_filenameonly_formatter.py
index 8d0c88f..7dda50b 100644
--- a/tests/unit/test_filenameonly_formatter.py
+++ b/tests/unit/test_filenameonly_formatter.py
@@ -7,8 +7,8 @@ from flake8.formatting import default
def options(**kwargs):
"""Create an argparse.Namespace instance."""
- kwargs.setdefault('output_file', None)
- kwargs.setdefault('tee', False)
+ kwargs.setdefault("output_file", None)
+ kwargs.setdefault("tee", False)
return argparse.Namespace(**kwargs)
@@ -18,22 +18,23 @@ def test_caches_filenames_already_printed():
assert formatter.filenames_already_printed == set()
formatter.format(
- style_guide.Violation('code', 'file.py', 1, 1, 'text', 'l'))
- assert formatter.filenames_already_printed == {'file.py'}
+ style_guide.Violation("code", "file.py", 1, 1, "text", "l")
+ )
+ assert formatter.filenames_already_printed == {"file.py"}
def test_only_returns_a_string_once_from_format():
"""Verify format ignores the second error with the same filename."""
formatter = default.FilenameOnly(options())
- error = style_guide.Violation('code', 'file.py', 1, 1, 'text', '1')
+ error = style_guide.Violation("code", "file.py", 1, 1, "text", "1")
- assert formatter.format(error) == 'file.py'
+ assert formatter.format(error) == "file.py"
assert formatter.format(error) is None
def test_show_source_returns_nothing():
"""Verify show_source returns nothing."""
formatter = default.FilenameOnly(options())
- error = style_guide.Violation('code', 'file.py', 1, 1, 'text', '1')
+ error = style_guide.Violation("code", "file.py", 1, 1, "text", "1")
assert formatter.show_source(error) is None
diff --git a/tests/unit/test_get_local_plugins.py b/tests/unit/test_get_local_plugins.py
index 0817550..7e7b318 100644
--- a/tests/unit/test_get_local_plugins.py
+++ b/tests/unit/test_get_local_plugins.py
@@ -23,8 +23,8 @@ def test_get_local_plugins_uses_cli_config():
config_finder = mock.MagicMock()
config_finder.cli_config.return_value = config_obj
config_finder.ignore_config_files = False
- config_obj.get.return_value = ''
- config_file_value = 'foo.ini'
+ config_obj.get.return_value = ""
+ config_file_value = "foo.ini"
config_finder.config_file = config_file_value
config.get_local_plugins(config_finder)
@@ -34,12 +34,12 @@ def test_get_local_plugins_uses_cli_config():
def test_get_local_plugins():
"""Verify get_local_plugins returns expected plugins."""
- config_fixture_path = 'tests/fixtures/config_files/local-plugin.ini'
- config_finder = config.ConfigFileFinder('flake8')
+ config_fixture_path = "tests/fixtures/config_files/local-plugin.ini"
+ config_finder = config.ConfigFileFinder("flake8")
- with mock.patch.object(config_finder, 'local_config_files') as localcfs:
+ with mock.patch.object(config_finder, "local_config_files") as localcfs:
localcfs.return_value = [config_fixture_path]
local_plugins = config.get_local_plugins(config_finder)
- assert local_plugins.extension == ['XE = test_plugins:ExtensionTestPlugin']
- assert local_plugins.report == ['XR = test_plugins:ReportTestPlugin']
+ assert local_plugins.extension == ["XE = test_plugins:ExtensionTestPlugin"]
+ assert local_plugins.report == ["XR = test_plugins:ReportTestPlugin"]
diff --git a/tests/unit/test_legacy_api.py b/tests/unit/test_legacy_api.py
index a1e7d50..1dcdeb6 100644
--- a/tests/unit/test_legacy_api.py
+++ b/tests/unit/test_legacy_api.py
@@ -21,12 +21,14 @@ def test_get_style_guide():
)
mockedapp = mock.Mock()
mockedapp.parse_preliminary_options.return_value = (prelim_opts, [])
- mockedapp.program = 'flake8'
- with mock.patch('flake8.api.legacy.config.ConfigFileFinder') as mock_config_finder: # noqa: E501
+ mockedapp.program = "flake8"
+ with mock.patch(
+ "flake8.api.legacy.config.ConfigFileFinder"
+ ) as mock_config_finder: # noqa: E501
config_finder = ConfigFileFinder(mockedapp.program)
mock_config_finder.return_value = config_finder
- with mock.patch('flake8.main.application.Application') as application:
+ with mock.patch("flake8.main.application.Application") as application:
application.return_value = mockedapp
style_guide = api.get_style_guide()
@@ -35,7 +37,8 @@ def test_get_style_guide():
mockedapp.find_plugins.assert_called_once_with(config_finder)
mockedapp.register_plugin_options.assert_called_once_with()
mockedapp.parse_configuration_and_cli.assert_called_once_with(
- config_finder, [])
+ config_finder, []
+ )
mockedapp.make_formatter.assert_called_once_with()
mockedapp.make_guide.assert_called_once_with()
mockedapp.make_file_checker_manager.assert_called_once_with()
@@ -45,22 +48,22 @@ def test_get_style_guide():
def test_styleguide_options():
"""Show that we proxy the StyleGuide.options attribute."""
app = mock.Mock()
- app.options = 'options'
+ app.options = "options"
style_guide = api.StyleGuide(app)
- assert style_guide.options == 'options'
+ assert style_guide.options == "options"
def test_styleguide_paths():
"""Show that we proxy the StyleGuide.paths attribute."""
app = mock.Mock()
- app.paths = 'paths'
+ app.paths = "paths"
style_guide = api.StyleGuide(app)
- assert style_guide.paths == 'paths'
+ assert style_guide.paths == "paths"
def test_styleguide_check_files():
"""Verify we call the right application methods."""
- paths = ['foo', 'bar']
+ paths = ["foo", "bar"]
app = mock.Mock()
style_guide = api.StyleGuide(app)
report = style_guide.check_files(paths)
@@ -80,8 +83,8 @@ def test_styleguide_excluded():
file_checker_manager = app.file_checker_manager = mock.Mock()
style_guide = api.StyleGuide(app)
- style_guide.excluded('file.py')
- file_checker_manager.is_path_excluded.assert_called_once_with('file.py')
+ style_guide.excluded("file.py")
+ file_checker_manager.is_path_excluded.assert_called_once_with("file.py")
def test_styleguide_excluded_with_parent():
@@ -95,10 +98,10 @@ def test_styleguide_excluded_with_parent():
file_checker_manager.is_path_excluded.return_value = False
style_guide = api.StyleGuide(app)
- style_guide.excluded('file.py', 'parent')
+ style_guide.excluded("file.py", "parent")
assert file_checker_manager.is_path_excluded.call_args_list == [
- mock.call('file.py'),
- mock.call(os.path.join('parent', 'file.py')),
+ mock.call("file.py"),
+ mock.call(os.path.join("parent", "file.py")),
]
@@ -123,7 +126,7 @@ def test_styleguide_init_report_with_non_subclass():
def test_styleguide_init_report():
"""Verify we do the right incantation for the Application."""
- app = mock.Mock(guide='fake')
+ app = mock.Mock(guide="fake")
style_guide = api.StyleGuide(app)
class FakeFormatter(formatter.BaseFormatter):
@@ -140,16 +143,16 @@ def test_styleguide_input_file():
"""Verify we call StyleGuide.check_files with the filename."""
app = mock.Mock()
style_guide = api.StyleGuide(app)
- with mock.patch.object(style_guide, 'check_files') as check_files:
- style_guide.input_file('file.py')
- check_files.assert_called_once_with(['file.py'])
+ with mock.patch.object(style_guide, "check_files") as check_files:
+ style_guide.input_file("file.py")
+ check_files.assert_called_once_with(["file.py"])
def test_report_total_errors():
"""Verify total errors is just a proxy attribute."""
- app = mock.Mock(result_count='Fake count')
+ app = mock.Mock(result_count="Fake count")
report = api.Report(app)
- assert report.total_errors == 'Fake count'
+ assert report.total_errors == "Fake count"
def test_report_get_statistics():
@@ -160,5 +163,5 @@ def test_report_get_statistics():
app = mock.Mock(guide=style_guide)
report = api.Report(app)
- assert report.get_statistics('E') == []
- stats.statistics_for.assert_called_once_with('E')
+ assert report.get_statistics("E") == []
+ stats.statistics_for.assert_called_once_with("E")
diff --git a/tests/unit/test_merged_config_parser.py b/tests/unit/test_merged_config_parser.py
index 8def01b..b19291c 100644
--- a/tests/unit/test_merged_config_parser.py
+++ b/tests/unit/test_merged_config_parser.py
@@ -11,47 +11,54 @@ from flake8.options import manager
@pytest.fixture
def optmanager():
"""Generate an OptionManager with simple values."""
- return manager.OptionManager(prog='flake8', version='3.0.0a1')
+ return manager.OptionManager(prog="flake8", version="3.0.0a1")
@pytest.fixture
def config_finder():
"""Generate a simple ConfigFileFinder."""
- return config.ConfigFileFinder('flake8')
+ return config.ConfigFileFinder("flake8")
def test_parse_cli_config(optmanager, config_finder):
"""Parse the specified config file as a cli config file."""
- optmanager.add_option('--exclude', parse_from_config=True,
- comma_separated_list=True,
- normalize_paths=True)
- optmanager.add_option('--ignore', parse_from_config=True,
- comma_separated_list=True)
- optmanager.add_option('--quiet', parse_from_config=True,
- action='count')
+ optmanager.add_option(
+ "--exclude",
+ parse_from_config=True,
+ comma_separated_list=True,
+ normalize_paths=True,
+ )
+ optmanager.add_option(
+ "--ignore", parse_from_config=True, comma_separated_list=True
+ )
+ optmanager.add_option("--quiet", parse_from_config=True, action="count")
parser = config.MergedConfigParser(optmanager, config_finder)
- config_file = 'tests/fixtures/config_files/cli-specified.ini'
+ config_file = "tests/fixtures/config_files/cli-specified.ini"
parsed_config = parser.parse_cli_config(config_file)
config_dir = os.path.dirname(config_file)
assert parsed_config == {
- 'ignore': ['E123', 'W234', 'E111'],
- 'exclude': [
- os.path.abspath(os.path.join(config_dir, 'foo/')),
- os.path.abspath(os.path.join(config_dir, 'bar/')),
- os.path.abspath(os.path.join(config_dir, 'bogus/')),
+ "ignore": ["E123", "W234", "E111"],
+ "exclude": [
+ os.path.abspath(os.path.join(config_dir, "foo/")),
+ os.path.abspath(os.path.join(config_dir, "bar/")),
+ os.path.abspath(os.path.join(config_dir, "bogus/")),
],
- 'quiet': 1,
+ "quiet": 1,
}
-@pytest.mark.parametrize('filename,is_configured_by', [
- ('tests/fixtures/config_files/cli-specified.ini', True),
- ('tests/fixtures/config_files/no-flake8-section.ini', False),
-])
+@pytest.mark.parametrize(
+ "filename,is_configured_by",
+ [
+ ("tests/fixtures/config_files/cli-specified.ini", True),
+ ("tests/fixtures/config_files/no-flake8-section.ini", False),
+ ],
+)
def test_is_configured_by(
- filename, is_configured_by, optmanager, config_finder):
+ filename, is_configured_by, optmanager, config_finder
+):
"""Verify the behaviour of the is_configured_by method."""
parsed_config, _ = config.ConfigFileFinder._read_config(filename)
parser = config.MergedConfigParser(optmanager, config_finder)
@@ -61,83 +68,94 @@ def test_is_configured_by(
def test_parse_user_config(optmanager, config_finder):
"""Verify parsing of user config files."""
- optmanager.add_option('--exclude', parse_from_config=True,
- comma_separated_list=True,
- normalize_paths=True)
- optmanager.add_option('--ignore', parse_from_config=True,
- comma_separated_list=True)
- optmanager.add_option('--quiet', parse_from_config=True,
- action='count')
+ optmanager.add_option(
+ "--exclude",
+ parse_from_config=True,
+ comma_separated_list=True,
+ normalize_paths=True,
+ )
+ optmanager.add_option(
+ "--ignore", parse_from_config=True, comma_separated_list=True
+ )
+ optmanager.add_option("--quiet", parse_from_config=True, action="count")
parser = config.MergedConfigParser(optmanager, config_finder)
- config_finder.user_config_file = ('tests/fixtures/config_files/'
- 'cli-specified.ini')
+ config_finder.user_config_file = (
+ "tests/fixtures/config_files/" "cli-specified.ini"
+ )
parsed_config = parser.parse_user_config()
assert parsed_config == {
- 'ignore': ['E123', 'W234', 'E111'],
- 'exclude': [
- os.path.abspath('foo/'),
- os.path.abspath('bar/'),
- os.path.abspath('bogus/'),
+ "ignore": ["E123", "W234", "E111"],
+ "exclude": [
+ os.path.abspath("foo/"),
+ os.path.abspath("bar/"),
+ os.path.abspath("bogus/"),
],
- 'quiet': 1,
+ "quiet": 1,
}
def test_parse_local_config(optmanager, config_finder):
"""Verify parsing of local config files."""
- optmanager.add_option('--exclude', parse_from_config=True,
- comma_separated_list=True,
- normalize_paths=True)
- optmanager.add_option('--ignore', parse_from_config=True,
- comma_separated_list=True)
- optmanager.add_option('--quiet', parse_from_config=True,
- action='count')
+ optmanager.add_option(
+ "--exclude",
+ parse_from_config=True,
+ comma_separated_list=True,
+ normalize_paths=True,
+ )
+ optmanager.add_option(
+ "--ignore", parse_from_config=True, comma_separated_list=True
+ )
+ optmanager.add_option("--quiet", parse_from_config=True, action="count")
parser = config.MergedConfigParser(optmanager, config_finder)
- with mock.patch.object(config_finder, 'local_config_files') as localcfs:
+ with mock.patch.object(config_finder, "local_config_files") as localcfs:
localcfs.return_value = [
- 'tests/fixtures/config_files/cli-specified.ini'
+ "tests/fixtures/config_files/cli-specified.ini"
]
parsed_config = parser.parse_local_config()
assert parsed_config == {
- 'ignore': ['E123', 'W234', 'E111'],
- 'exclude': [
- os.path.abspath('foo/'),
- os.path.abspath('bar/'),
- os.path.abspath('bogus/'),
+ "ignore": ["E123", "W234", "E111"],
+ "exclude": [
+ os.path.abspath("foo/"),
+ os.path.abspath("bar/"),
+ os.path.abspath("bogus/"),
],
- 'quiet': 1,
+ "quiet": 1,
}
def test_merge_user_and_local_config(optmanager, config_finder):
"""Verify merging of parsed user and local config files."""
- optmanager.add_option('--exclude', parse_from_config=True,
- comma_separated_list=True,
- normalize_paths=True)
- optmanager.add_option('--ignore', parse_from_config=True,
- comma_separated_list=True)
- optmanager.add_option('--select', parse_from_config=True,
- comma_separated_list=True)
+ optmanager.add_option(
+ "--exclude",
+ parse_from_config=True,
+ comma_separated_list=True,
+ normalize_paths=True,
+ )
+ optmanager.add_option(
+ "--ignore", parse_from_config=True, comma_separated_list=True
+ )
+ optmanager.add_option(
+ "--select", parse_from_config=True, comma_separated_list=True
+ )
parser = config.MergedConfigParser(optmanager, config_finder)
- with mock.patch.object(config_finder, 'local_config_files') as localcfs:
+ with mock.patch.object(config_finder, "local_config_files") as localcfs:
localcfs.return_value = [
- 'tests/fixtures/config_files/local-config.ini'
+ "tests/fixtures/config_files/local-config.ini"
]
- config_finder.user_config_file = ('tests/fixtures/config_files/'
- 'user-config.ini')
+ config_finder.user_config_file = (
+ "tests/fixtures/config_files/" "user-config.ini"
+ )
parsed_config = parser.merge_user_and_local_config()
assert parsed_config == {
- 'exclude': [
- os.path.abspath('docs/')
- ],
- 'ignore': ['D203'],
- 'select': ['E', 'W', 'F'],
+ "exclude": [os.path.abspath("docs/")],
+ "ignore": ["D203"],
+ "select": ["E", "W", "F"],
}
@@ -154,7 +172,7 @@ def test_parse_isolates_config(optmanager):
def test_parse_uses_cli_config(optmanager):
"""Verify behaviour of the parse method with a specified config."""
- config_file_value = 'foo.ini'
+ config_file_value = "foo.ini"
config_finder = mock.MagicMock()
config_finder.config_file = config_file_value
config_finder.ignore_config_files = False
@@ -164,62 +182,74 @@ def test_parse_uses_cli_config(optmanager):
config_finder.cli_config.assert_called_once_with(config_file_value)
-@pytest.mark.parametrize('config_fixture_path', [
- 'tests/fixtures/config_files/cli-specified.ini',
- 'tests/fixtures/config_files/cli-specified-with-inline-comments.ini',
- 'tests/fixtures/config_files/cli-specified-without-inline-comments.ini',
-])
+@pytest.mark.parametrize(
+ "config_fixture_path",
+ [
+ "tests/fixtures/config_files/cli-specified.ini",
+ "tests/fixtures/config_files/cli-specified-with-inline-comments.ini",
+ "tests/fixtures/config_files/cli-specified-without-inline-comments.ini", # noqa: E501
+ ],
+)
def test_parsed_configs_are_equivalent(
- optmanager, config_finder, config_fixture_path):
+ optmanager, config_finder, config_fixture_path
+):
"""Verify the each file matches the expected parsed output.
This is used to ensure our documented behaviour does not regress.
"""
- optmanager.add_option('--exclude', parse_from_config=True,
- comma_separated_list=True,
- normalize_paths=True)
- optmanager.add_option('--ignore', parse_from_config=True,
- comma_separated_list=True)
+ optmanager.add_option(
+ "--exclude",
+ parse_from_config=True,
+ comma_separated_list=True,
+ normalize_paths=True,
+ )
+ optmanager.add_option(
+ "--ignore", parse_from_config=True, comma_separated_list=True
+ )
parser = config.MergedConfigParser(optmanager, config_finder)
- with mock.patch.object(config_finder, 'local_config_files') as localcfs:
+ with mock.patch.object(config_finder, "local_config_files") as localcfs:
localcfs.return_value = [config_fixture_path]
- with mock.patch.object(config_finder,
- 'user_config_file') as usercf:
- usercf.return_value = ''
+ with mock.patch.object(config_finder, "user_config_file") as usercf:
+ usercf.return_value = ""
parsed_config = parser.merge_user_and_local_config()
- assert parsed_config['ignore'] == ['E123', 'W234', 'E111']
- assert parsed_config['exclude'] == [
- os.path.abspath('foo/'),
- os.path.abspath('bar/'),
- os.path.abspath('bogus/'),
+ assert parsed_config["ignore"] == ["E123", "W234", "E111"]
+ assert parsed_config["exclude"] == [
+ os.path.abspath("foo/"),
+ os.path.abspath("bar/"),
+ os.path.abspath("bogus/"),
]
-@pytest.mark.parametrize('config_file', [
- 'tests/fixtures/config_files/config-with-hyphenated-options.ini'
-])
+@pytest.mark.parametrize(
+ "config_file",
+ ["tests/fixtures/config_files/config-with-hyphenated-options.ini"],
+)
def test_parsed_hyphenated_and_underscored_names(
- optmanager, config_finder, config_file):
+ optmanager, config_finder, config_file
+):
"""Verify we find hyphenated option names as well as underscored.
This tests for options like --max-line-length and --enable-extensions
which are able to be specified either as max-line-length or
max_line_length in our config files.
"""
- optmanager.add_option('--max-line-length', parse_from_config=True,
- type=int)
- optmanager.add_option('--enable-extensions', parse_from_config=True,
- comma_separated_list=True)
+ optmanager.add_option(
+ "--max-line-length", parse_from_config=True, type=int
+ )
+ optmanager.add_option(
+ "--enable-extensions",
+ parse_from_config=True,
+ comma_separated_list=True,
+ )
parser = config.MergedConfigParser(optmanager, config_finder)
- with mock.patch.object(config_finder, 'local_config_files') as localcfs:
+ with mock.patch.object(config_finder, "local_config_files") as localcfs:
localcfs.return_value = [config_file]
- with mock.patch.object(config_finder,
- 'user_config_file') as usercf:
- usercf.return_value = ''
+ with mock.patch.object(config_finder, "user_config_file") as usercf:
+ usercf.return_value = ""
parsed_config = parser.merge_user_and_local_config()
- assert parsed_config['max_line_length'] == 110
- assert parsed_config['enable_extensions'] == ['H101', 'H235']
+ assert parsed_config["max_line_length"] == 110
+ assert parsed_config["enable_extensions"] == ["H101", "H235"]
diff --git a/tests/unit/test_nothing_formatter.py b/tests/unit/test_nothing_formatter.py
index 85a2e76..d7cbea6 100644
--- a/tests/unit/test_nothing_formatter.py
+++ b/tests/unit/test_nothing_formatter.py
@@ -7,15 +7,15 @@ from flake8.formatting import default
def options(**kwargs):
"""Create an argparse.Namespace instance."""
- kwargs.setdefault('output_file', None)
- kwargs.setdefault('tee', False)
+ kwargs.setdefault("output_file", None)
+ kwargs.setdefault("tee", False)
return argparse.Namespace(**kwargs)
def test_format_returns_nothing():
"""Verify Nothing.format returns None."""
formatter = default.Nothing(options())
- error = style_guide.Violation('code', 'file.py', 1, 1, 'text', '1')
+ error = style_guide.Violation("code", "file.py", 1, 1, "text", "1")
assert formatter.format(error) is None
@@ -23,6 +23,6 @@ def test_format_returns_nothing():
def test_show_source_returns_nothing():
"""Verify Nothing.show_source returns None."""
formatter = default.Nothing(options())
- error = style_guide.Violation('code', 'file.py', 1, 1, 'text', '1')
+ error = style_guide.Violation("code", "file.py", 1, 1, "text", "1")
assert formatter.show_source(error) is None
diff --git a/tests/unit/test_option.py b/tests/unit/test_option.py
index fc0e288..52aef27 100644
--- a/tests/unit/test_option.py
+++ b/tests/unit/test_option.py
@@ -10,9 +10,9 @@ from flake8.options import manager
def test_to_argparse():
"""Test conversion to an argparse arguments."""
opt = manager.Option(
- short_option_name='-t',
- long_option_name='--test',
- action='count',
+ short_option_name="-t",
+ long_option_name="--test",
+ action="count",
parse_from_config=True,
normalize_paths=True,
)
@@ -20,42 +20,44 @@ def test_to_argparse():
assert opt.parse_from_config is True
args, kwargs = opt.to_argparse()
- assert args == ['-t', '--test']
- assert kwargs == {'action': 'count', 'type': mock.ANY}
- assert isinstance(kwargs['type'], functools.partial)
+ assert args == ["-t", "--test"]
+ assert kwargs == {"action": "count", "type": mock.ANY}
+ assert isinstance(kwargs["type"], functools.partial)
def test_to_optparse():
"""Test that .to_optparse() produces a useful error message."""
with pytest.raises(AttributeError) as excinfo:
- manager.Option('--foo').to_optparse
- msg, = excinfo.value.args
- assert msg == 'to_optparse: flake8 now uses argparse'
+ manager.Option("--foo").to_optparse
+ (msg,) = excinfo.value.args
+ assert msg == "to_optparse: flake8 now uses argparse"
def test_to_argparse_creates_an_option_as_we_expect():
"""Show that we pass all keyword args to argparse."""
- opt = manager.Option('-t', '--test', action='count')
+ opt = manager.Option("-t", "--test", action="count")
args, kwargs = opt.to_argparse()
- assert args == ['-t', '--test']
- assert kwargs == {'action': 'count'}
+ assert args == ["-t", "--test"]
+ assert kwargs == {"action": "count"}
def test_config_name_generation():
"""Show that we generate the config name deterministically."""
- opt = manager.Option(long_option_name='--some-very-long-option-name',
- parse_from_config=True)
+ opt = manager.Option(
+ long_option_name="--some-very-long-option-name",
+ parse_from_config=True,
+ )
- assert opt.config_name == 'some_very_long_option_name'
+ assert opt.config_name == "some_very_long_option_name"
def test_config_name_needs_long_option_name():
"""Show that we error out if the Option should be parsed from config."""
with pytest.raises(ValueError):
- manager.Option('-s', parse_from_config=True)
+ manager.Option("-s", parse_from_config=True)
def test_dest_is_not_overridden():
"""Show that we do not override custom destinations."""
- opt = manager.Option('-s', '--short', dest='something_not_short')
- assert opt.dest == 'something_not_short'
+ opt = manager.Option("-s", "--short", dest="something_not_short")
+ assert opt.dest == "something_not_short"
diff --git a/tests/unit/test_option_manager.py b/tests/unit/test_option_manager.py
index 4dcbaa8..93f94e9 100644
--- a/tests/unit/test_option_manager.py
+++ b/tests/unit/test_option_manager.py
@@ -9,13 +9,13 @@ from flake8 import utils
from flake8.main.options import JobsArgument
from flake8.options import manager
-TEST_VERSION = '3.0.0b1'
+TEST_VERSION = "3.0.0b1"
@pytest.fixture
def optmanager():
"""Generate a simple OptionManager with default test arguments."""
- return manager.OptionManager(prog='flake8', version=TEST_VERSION)
+ return manager.OptionManager(prog="flake8", version=TEST_VERSION)
def test_option_manager_creates_option_parser(optmanager):
@@ -27,30 +27,29 @@ def test_option_manager_including_parent_options():
"""Verify parent options are included in the parsed options."""
# GIVEN
parent_parser = argparse.ArgumentParser(add_help=False)
- parent_parser.add_argument('--parent')
+ parent_parser.add_argument("--parent")
# WHEN
optmanager = manager.OptionManager(
- prog='flake8',
- version=TEST_VERSION,
- parents=[parent_parser])
- option, _ = optmanager.parse_args(['--parent', 'foo'])
+ prog="flake8", version=TEST_VERSION, parents=[parent_parser]
+ )
+ option, _ = optmanager.parse_args(["--parent", "foo"])
# THEN
- assert option.parent == 'foo'
+ assert option.parent == "foo"
def test_parse_args_forwarding_default_values(optmanager):
"""Verify default provided values are present in the final result."""
- namespace = argparse.Namespace(foo='bar')
+ namespace = argparse.Namespace(foo="bar")
options, args = optmanager.parse_args([], namespace)
- assert options.foo == 'bar'
+ assert options.foo == "bar"
def test_parse_args_forwarding_type_coercion(optmanager):
"""Verify default provided values are type converted from add_option."""
- optmanager.add_option('--foo', type=int)
- namespace = argparse.Namespace(foo='5')
+ optmanager.add_option("--foo", type=int)
+ namespace = argparse.Namespace(foo="5")
options, args = optmanager.parse_args([], namespace)
assert options.foo == 5
@@ -60,8 +59,8 @@ def test_add_option_short_option_only(optmanager):
assert optmanager.options == []
assert optmanager.config_options_dict == {}
- optmanager.add_option('-s', help='Test short opt')
- assert optmanager.options[0].short_option_name == '-s'
+ optmanager.add_option("-s", help="Test short opt")
+ assert optmanager.options[0].short_option_name == "-s"
def test_add_option_long_option_only(optmanager):
@@ -69,9 +68,9 @@ def test_add_option_long_option_only(optmanager):
assert optmanager.options == []
assert optmanager.config_options_dict == {}
- optmanager.add_option('--long', help='Test long opt')
+ optmanager.add_option("--long", help="Test long opt")
assert optmanager.options[0].short_option_name is manager._ARG.NO
- assert optmanager.options[0].long_option_name == '--long'
+ assert optmanager.options[0].long_option_name == "--long"
def test_add_short_and_long_option_names(optmanager):
@@ -79,9 +78,9 @@ def test_add_short_and_long_option_names(optmanager):
assert optmanager.options == []
assert optmanager.config_options_dict == {}
- optmanager.add_option('-b', '--both', help='Test both opts')
- assert optmanager.options[0].short_option_name == '-b'
- assert optmanager.options[0].long_option_name == '--both'
+ optmanager.add_option("-b", "--both", help="Test both opts")
+ assert optmanager.options[0].short_option_name == "-b"
+ assert optmanager.options[0].long_option_name == "--both"
def test_add_option_with_custom_args(optmanager):
@@ -89,11 +88,11 @@ def test_add_option_with_custom_args(optmanager):
assert optmanager.options == []
assert optmanager.config_options_dict == {}
- optmanager.add_option('--parse', parse_from_config=True)
- optmanager.add_option('--commas', comma_separated_list=True)
- optmanager.add_option('--files', normalize_paths=True)
+ optmanager.add_option("--parse", parse_from_config=True)
+ optmanager.add_option("--commas", comma_separated_list=True)
+ optmanager.add_option("--files", normalize_paths=True)
- attrs = ['parse_from_config', 'comma_separated_list', 'normalize_paths']
+ attrs = ["parse_from_config", "comma_separated_list", "normalize_paths"]
for option, attr in zip(optmanager.options, attrs):
assert getattr(option, attr) is True
@@ -103,10 +102,10 @@ def test_parse_args_normalize_path(optmanager):
assert optmanager.options == []
assert optmanager.config_options_dict == {}
- optmanager.add_option('--config', normalize_paths=True)
+ optmanager.add_option("--config", normalize_paths=True)
- options, args = optmanager.parse_args(['--config', '../config.ini'])
- assert options.config == os.path.abspath('../config.ini')
+ options, args = optmanager.parse_args(["--config", "../config.ini"])
+ assert options.config == os.path.abspath("../config.ini")
def test_parse_args_handles_comma_separated_defaults(optmanager):
@@ -114,11 +113,12 @@ def test_parse_args_handles_comma_separated_defaults(optmanager):
assert optmanager.options == []
assert optmanager.config_options_dict == {}
- optmanager.add_option('--exclude', default='E123,W234',
- comma_separated_list=True)
+ optmanager.add_option(
+ "--exclude", default="E123,W234", comma_separated_list=True
+ )
options, args = optmanager.parse_args([])
- assert options.exclude == ['E123', 'W234']
+ assert options.exclude == ["E123", "W234"]
def test_parse_args_handles_comma_separated_lists(optmanager):
@@ -126,11 +126,12 @@ def test_parse_args_handles_comma_separated_lists(optmanager):
assert optmanager.options == []
assert optmanager.config_options_dict == {}
- optmanager.add_option('--exclude', default='E123,W234',
- comma_separated_list=True)
+ optmanager.add_option(
+ "--exclude", default="E123,W234", comma_separated_list=True
+ )
- options, args = optmanager.parse_args(['--exclude', 'E201,W111,F280'])
- assert options.exclude == ['E201', 'W111', 'F280']
+ options, args = optmanager.parse_args(["--exclude", "E201,W111,F280"])
+ assert options.exclude == ["E201", "W111", "F280"]
def test_parse_args_normalize_paths(optmanager):
@@ -138,57 +139,61 @@ def test_parse_args_normalize_paths(optmanager):
assert optmanager.options == []
assert optmanager.config_options_dict == {}
- optmanager.add_option('--extra-config', normalize_paths=True,
- comma_separated_list=True)
+ optmanager.add_option(
+ "--extra-config", normalize_paths=True, comma_separated_list=True
+ )
- options, args = optmanager.parse_args([
- '--extra-config', '../config.ini,tox.ini,flake8/some-other.cfg'
- ])
+ options, args = optmanager.parse_args(
+ ["--extra-config", "../config.ini,tox.ini,flake8/some-other.cfg"]
+ )
assert options.extra_config == [
- os.path.abspath('../config.ini'),
- 'tox.ini',
- os.path.abspath('flake8/some-other.cfg'),
+ os.path.abspath("../config.ini"),
+ "tox.ini",
+ os.path.abspath("flake8/some-other.cfg"),
]
def test_generate_versions(optmanager):
"""Verify a comma-separated string is generated of registered plugins."""
optmanager.registered_plugins = [
- manager.PluginVersion('Testing 100', '0.0.0', False),
- manager.PluginVersion('Testing 101', '0.0.0', False),
- manager.PluginVersion('Testing 300', '0.0.0', True),
+ manager.PluginVersion("Testing 100", "0.0.0", False),
+ manager.PluginVersion("Testing 101", "0.0.0", False),
+ manager.PluginVersion("Testing 300", "0.0.0", True),
]
- assert (optmanager.generate_versions()
- == 'Testing 100: 0.0.0, Testing 101: 0.0.0, Testing 300: 0.0.0')
+ assert (
+ optmanager.generate_versions()
+ == "Testing 100: 0.0.0, Testing 101: 0.0.0, Testing 300: 0.0.0"
+ )
def test_plugins_are_sorted_in_generate_versions(optmanager):
"""Verify we sort before joining strings in generate_versions."""
optmanager.registered_plugins = [
- manager.PluginVersion('pyflakes', '1.5.0', False),
- manager.PluginVersion('mccabe', '0.7.0', False),
- manager.PluginVersion('pycodestyle', '2.2.0', False),
- manager.PluginVersion('flake8-docstrings', '0.6.1', False),
- manager.PluginVersion('flake8-bugbear', '2016.12.1', False),
+ manager.PluginVersion("pyflakes", "1.5.0", False),
+ manager.PluginVersion("mccabe", "0.7.0", False),
+ manager.PluginVersion("pycodestyle", "2.2.0", False),
+ manager.PluginVersion("flake8-docstrings", "0.6.1", False),
+ manager.PluginVersion("flake8-bugbear", "2016.12.1", False),
]
- assert (optmanager.generate_versions()
- == 'flake8-bugbear: 2016.12.1, '
- 'flake8-docstrings: 0.6.1, '
- 'mccabe: 0.7.0, '
- 'pycodestyle: 2.2.0, '
- 'pyflakes: 1.5.0')
+ assert (
+ optmanager.generate_versions() == "flake8-bugbear: 2016.12.1, "
+ "flake8-docstrings: 0.6.1, "
+ "mccabe: 0.7.0, "
+ "pycodestyle: 2.2.0, "
+ "pyflakes: 1.5.0"
+ )
def test_generate_versions_with_format_string(optmanager):
"""Verify a comma-separated string is generated of registered plugins."""
- optmanager.registered_plugins.update([
- manager.PluginVersion('Testing', '0.0.0', False),
- manager.PluginVersion('Testing', '0.0.0', False),
- manager.PluginVersion('Testing', '0.0.0', False),
- ])
- assert (
- optmanager.generate_versions() == 'Testing: 0.0.0'
+ optmanager.registered_plugins.update(
+ [
+ manager.PluginVersion("Testing", "0.0.0", False),
+ manager.PluginVersion("Testing", "0.0.0", False),
+ manager.PluginVersion("Testing", "0.0.0", False),
+ ]
)
+ assert optmanager.generate_versions() == "Testing: 0.0.0"
def test_update_version_string(optmanager):
@@ -197,17 +202,20 @@ def test_update_version_string(optmanager):
assert optmanager.version_action.version == TEST_VERSION
optmanager.registered_plugins = [
- manager.PluginVersion('Testing 100', '0.0.0', False),
- manager.PluginVersion('Testing 101', '0.0.0', False),
- manager.PluginVersion('Testing 300', '0.0.0', False),
+ manager.PluginVersion("Testing 100", "0.0.0", False),
+ manager.PluginVersion("Testing 101", "0.0.0", False),
+ manager.PluginVersion("Testing 300", "0.0.0", False),
]
optmanager.update_version_string()
assert optmanager.version == TEST_VERSION
- assert (optmanager.version_action.version == TEST_VERSION
- + ' (Testing 100: 0.0.0, Testing 101: 0.0.0, Testing 300: 0.0.0) '
- + utils.get_python_version())
+ assert (
+ optmanager.version_action.version
+ == TEST_VERSION
+ + " (Testing 100: 0.0.0, Testing 101: 0.0.0, Testing 300: 0.0.0) "
+ + utils.get_python_version()
+ )
def test_generate_epilog(optmanager):
@@ -215,14 +223,14 @@ def test_generate_epilog(optmanager):
assert optmanager.parser.epilog is None
optmanager.registered_plugins = [
- manager.PluginVersion('Testing 100', '0.0.0', False),
- manager.PluginVersion('Testing 101', '0.0.0', False),
- manager.PluginVersion('Testing 300', '0.0.0', False),
+ manager.PluginVersion("Testing 100", "0.0.0", False),
+ manager.PluginVersion("Testing 101", "0.0.0", False),
+ manager.PluginVersion("Testing 300", "0.0.0", False),
]
expected_value = (
- 'Installed plugins: Testing 100: 0.0.0, Testing 101: 0.0.0, Testing'
- ' 300: 0.0.0'
+ "Installed plugins: Testing 100: 0.0.0, Testing 101: 0.0.0, Testing"
+ " 300: 0.0.0"
)
optmanager.generate_epilog()
@@ -233,14 +241,14 @@ def test_extend_default_ignore(optmanager):
"""Verify that we update the extended default ignore list."""
assert optmanager.extended_default_ignore == set()
- optmanager.extend_default_ignore(['T100', 'T101', 'T102'])
- assert optmanager.extended_default_ignore == {'T100', 'T101', 'T102'}
+ optmanager.extend_default_ignore(["T100", "T101", "T102"])
+ assert optmanager.extended_default_ignore == {"T100", "T101", "T102"}
def test_parse_known_args(optmanager):
"""Verify we ignore unknown options."""
- with mock.patch('sys.exit') as sysexit:
- optmanager.parse_known_args(['--max-complexity', '5'])
+ with mock.patch("sys.exit") as sysexit:
+ optmanager.parse_known_args(["--max-complexity", "5"])
assert sysexit.called is False
@@ -249,101 +257,101 @@ def test_optparse_normalize_callback_option_legacy(optmanager):
"""Test the optparse shim for `callback=`."""
callback_foo = mock.Mock()
optmanager.add_option(
- '--foo',
- action='callback',
+ "--foo",
+ action="callback",
callback=callback_foo,
callback_args=(1, 2),
- callback_kwargs={'a': 'b'},
+ callback_kwargs={"a": "b"},
)
callback_bar = mock.Mock()
optmanager.add_option(
- '--bar',
- action='callback',
- type='string',
+ "--bar",
+ action="callback",
+ type="string",
callback=callback_bar,
)
callback_baz = mock.Mock()
optmanager.add_option(
- '--baz',
- action='callback',
- type='string',
+ "--baz",
+ action="callback",
+ type="string",
nargs=2,
callback=callback_baz,
)
- optmanager.parse_args(['--foo', '--bar', 'bararg', '--baz', '1', '2'])
+ optmanager.parse_args(["--foo", "--bar", "bararg", "--baz", "1", "2"])
callback_foo.assert_called_once_with(
mock.ANY, # the option / action instance
- '--foo',
+ "--foo",
None,
mock.ANY, # the OptionParser / ArgumentParser
1,
2,
- a='b',
+ a="b",
)
callback_bar.assert_called_once_with(
mock.ANY, # the option / action instance
- '--bar',
- 'bararg',
+ "--bar",
+ "bararg",
mock.ANY, # the OptionParser / ArgumentParser
)
callback_baz.assert_called_once_with(
mock.ANY, # the option / action instance
- '--baz',
- ('1', '2'),
+ "--baz",
+ ("1", "2"),
mock.ANY, # the OptionParser / ArgumentParser
)
@pytest.mark.parametrize(
- ('type_s', 'input_val', 'expected'),
+ ("type_s", "input_val", "expected"),
(
- ('int', '5', 5),
- ('long', '6', 6),
- ('string', 'foo', 'foo'),
- ('float', '1.5', 1.5),
- ('complex', '1+5j', 1 + 5j),
+ ("int", "5", 5),
+ ("long", "6", 6),
+ ("string", "foo", "foo"),
+ ("float", "1.5", 1.5),
+ ("complex", "1+5j", 1 + 5j),
# optparse allows this but does not document it
- ('str', 'foo', 'foo'),
+ ("str", "foo", "foo"),
),
)
def test_optparse_normalize_types(optmanager, type_s, input_val, expected):
"""Test the optparse shim for type="typename"."""
- optmanager.add_option('--foo', type=type_s)
- opts, args = optmanager.parse_args(['--foo', input_val])
+ optmanager.add_option("--foo", type=type_s)
+ opts, args = optmanager.parse_args(["--foo", input_val])
assert opts.foo == expected
def test_optparse_normalize_choice_type(optmanager):
"""Test the optparse shim for type="choice"."""
- optmanager.add_option('--foo', type='choice', choices=('1', '2', '3'))
- opts, args = optmanager.parse_args(['--foo', '1'])
- assert opts.foo == '1'
+ optmanager.add_option("--foo", type="choice", choices=("1", "2", "3"))
+ opts, args = optmanager.parse_args(["--foo", "1"])
+ assert opts.foo == "1"
# fails to parse
with pytest.raises(SystemExit):
- optmanager.parse_args(['--foo', '4'])
+ optmanager.parse_args(["--foo", "4"])
def test_optparse_normalize_help(optmanager, capsys):
"""Test the optparse shim for %default in help text."""
- optmanager.add_option('--foo', default='bar', help='default: %default')
+ optmanager.add_option("--foo", default="bar", help="default: %default")
with pytest.raises(SystemExit):
- optmanager.parse_args(['--help'])
+ optmanager.parse_args(["--help"])
out, err = capsys.readouterr()
output = out + err
- assert 'default: bar' in output
+ assert "default: bar" in output
def test_optmanager_group(optmanager, capsys):
"""Test that group(...) causes options to be assigned to a group."""
- with optmanager.group('groupname'):
- optmanager.add_option('--foo')
+ with optmanager.group("groupname"):
+ optmanager.add_option("--foo")
with pytest.raises(SystemExit):
- optmanager.parse_args(['--help'])
+ optmanager.parse_args(["--help"])
out, err = capsys.readouterr()
output = out + err
- assert '\ngroupname:\n' in output
+ assert "\ngroupname:\n" in output
@pytest.mark.parametrize(
diff --git a/tests/unit/test_plugin.py b/tests/unit/test_plugin.py
index 204a4c2..c41198e 100644
--- a/tests/unit/test_plugin.py
+++ b/tests/unit/test_plugin.py
@@ -11,8 +11,8 @@ from flake8.plugins import manager
def test_load_plugin_fallsback_on_old_setuptools():
"""Verify we fallback gracefully to on old versions of setuptools."""
- entry_point = mock.Mock(spec=['load'])
- plugin = manager.Plugin('T000', entry_point)
+ entry_point = mock.Mock(spec=["load"])
+ plugin = manager.Plugin("T000", entry_point)
plugin.load_plugin()
entry_point.load.assert_called_once_with()
@@ -20,8 +20,8 @@ def test_load_plugin_fallsback_on_old_setuptools():
def test_load_plugin_is_idempotent():
"""Verify we use the preferred methods on new versions of setuptools."""
- entry_point = mock.Mock(spec=['load'])
- plugin = manager.Plugin('T000', entry_point)
+ entry_point = mock.Mock(spec=["load"])
+ plugin = manager.Plugin("T000", entry_point)
plugin.load_plugin()
plugin.load_plugin()
@@ -31,9 +31,9 @@ def test_load_plugin_is_idempotent():
def test_load_plugin_catches_and_reraises_exceptions():
"""Verify we raise our own FailedToLoadPlugin."""
- entry_point = mock.Mock(spec=['load'])
- entry_point.load.side_effect = ValueError('Test failure')
- plugin = manager.Plugin('T000', entry_point)
+ entry_point = mock.Mock(spec=["load"])
+ entry_point.load.side_effect = ValueError("Test failure")
+ plugin = manager.Plugin("T000", entry_point)
with pytest.raises(exceptions.FailedToLoadPlugin):
plugin.load_plugin()
@@ -41,9 +41,9 @@ def test_load_plugin_catches_and_reraises_exceptions():
def test_load_noncallable_plugin():
"""Verify that we do not load a non-callable plugin."""
- entry_point = mock.Mock(spec=['load'])
+ entry_point = mock.Mock(spec=["load"])
entry_point.load.return_value = mock.NonCallableMock()
- plugin = manager.Plugin('T000', entry_point)
+ plugin = manager.Plugin("T000", entry_point)
with pytest.raises(exceptions.FailedToLoadPlugin):
plugin.load_plugin()
@@ -52,8 +52,8 @@ def test_load_noncallable_plugin():
def test_plugin_property_loads_plugin_on_first_use():
"""Verify that we load our plugin when we first try to use it."""
- entry_point = mock.Mock(spec=['load'])
- plugin = manager.Plugin('T000', entry_point)
+ entry_point = mock.Mock(spec=["load"])
+ plugin = manager.Plugin("T000", entry_point)
assert plugin.plugin is not None
entry_point.load.assert_called_once_with()
@@ -61,14 +61,14 @@ def test_plugin_property_loads_plugin_on_first_use():
def test_execute_calls_plugin_with_passed_arguments():
"""Verify that we pass arguments directly to the plugin."""
- entry_point = mock.Mock(spec=['load'])
+ entry_point = mock.Mock(spec=["load"])
plugin_obj = mock.Mock()
- plugin = manager.Plugin('T000', entry_point)
+ plugin = manager.Plugin("T000", entry_point)
plugin._plugin = plugin_obj
- plugin.execute('arg1', 'arg2', kwarg1='value1', kwarg2='value2')
+ plugin.execute("arg1", "arg2", kwarg1="value1", kwarg2="value2")
plugin_obj.assert_called_once_with(
- 'arg1', 'arg2', kwarg1='value1', kwarg2='value2'
+ "arg1", "arg2", kwarg1="value1", kwarg2="value2"
)
# Extra assertions
@@ -77,23 +77,24 @@ def test_execute_calls_plugin_with_passed_arguments():
def test_version_proxies_to_the_plugin():
"""Verify that we pass arguments directly to the plugin."""
- entry_point = mock.Mock(spec=['load'])
- plugin_obj = mock.Mock(spec_set=['version'])
- plugin_obj.version = 'a.b.c'
- plugin = manager.Plugin('T000', entry_point)
+ entry_point = mock.Mock(spec=["load"])
+ plugin_obj = mock.Mock(spec_set=["version"])
+ plugin_obj.version = "a.b.c"
+ plugin = manager.Plugin("T000", entry_point)
plugin._plugin = plugin_obj
- assert plugin.version == 'a.b.c'
+ assert plugin.version == "a.b.c"
def test_register_options():
"""Verify we call add_options on the plugin only if it exists."""
# Set up our mocks and Plugin object
- entry_point = mock.Mock(spec=['load'])
- plugin_obj = mock.Mock(spec_set=['name', 'version', 'add_options',
- 'parse_options'])
+ entry_point = mock.Mock(spec=["load"])
+ plugin_obj = mock.Mock(
+ spec_set=["name", "version", "add_options", "parse_options"]
+ )
option_manager = mock.MagicMock(spec=options_manager.OptionManager)
- plugin = manager.Plugin('T000', entry_point)
+ plugin = manager.Plugin("T000", entry_point)
plugin._plugin = plugin_obj
# Call the method we're testing.
@@ -106,10 +107,10 @@ def test_register_options():
def test_register_options_checks_plugin_for_method():
"""Verify we call add_options on the plugin only if it exists."""
# Set up our mocks and Plugin object
- entry_point = mock.Mock(spec=['load'])
- plugin_obj = mock.Mock(spec_set=['name', 'version', 'parse_options'])
- option_manager = mock.Mock(spec=['register_plugin'])
- plugin = manager.Plugin('T000', entry_point)
+ entry_point = mock.Mock(spec=["load"])
+ plugin_obj = mock.Mock(spec_set=["name", "version", "parse_options"])
+ option_manager = mock.Mock(spec=["register_plugin"])
+ plugin = manager.Plugin("T000", entry_point)
plugin._plugin = plugin_obj
# Call the method we're testing.
@@ -122,12 +123,13 @@ def test_register_options_checks_plugin_for_method():
def test_provide_options():
"""Verify we call add_options on the plugin only if it exists."""
# Set up our mocks and Plugin object
- entry_point = mock.Mock(spec=['load'])
- plugin_obj = mock.Mock(spec_set=['name', 'version', 'add_options',
- 'parse_options'])
+ entry_point = mock.Mock(spec=["load"])
+ plugin_obj = mock.Mock(
+ spec_set=["name", "version", "add_options", "parse_options"]
+ )
option_values = argparse.Namespace(enable_extensions=[])
option_manager = mock.Mock()
- plugin = manager.Plugin('T000', entry_point)
+ plugin = manager.Plugin("T000", entry_point)
plugin._plugin = plugin_obj
# Call the method we're testing.
@@ -139,10 +141,13 @@ def test_provide_options():
)
-@pytest.mark.parametrize('ignore_list, code, expected_list', [
- (['E', 'W', 'F', 'C9'], 'W', ['E', 'F', 'C9']),
- (['E', 'W', 'F'], 'C9', ['E', 'W', 'F']),
-])
+@pytest.mark.parametrize(
+ "ignore_list, code, expected_list",
+ [
+ (["E", "W", "F", "C9"], "W", ["E", "F", "C9"]),
+ (["E", "W", "F"], "C9", ["E", "W", "F"]),
+ ],
+)
def test_enable(ignore_list, code, expected_list):
"""Verify that enabling a plugin removes it from the ignore list."""
options = mock.Mock(ignore=ignore_list)
@@ -157,8 +162,8 @@ def test_enable(ignore_list, code, expected_list):
def test_enable_without_providing_parsed_options():
"""Verify that enabling a plugin removes it from the ignore list."""
optmanager = mock.Mock()
- plugin = manager.Plugin('U4', mock.Mock())
+ plugin = manager.Plugin("U4", mock.Mock())
plugin.enable(optmanager)
- optmanager.remove_from_default_ignore.assert_called_once_with(['U4'])
+ optmanager.remove_from_default_ignore.assert_called_once_with(["U4"])
diff --git a/tests/unit/test_plugin_manager.py b/tests/unit/test_plugin_manager.py
index 6f95a72..5a38a38 100644
--- a/tests/unit/test_plugin_manager.py
+++ b/tests/unit/test_plugin_manager.py
@@ -5,54 +5,53 @@ from flake8._compat import importlib_metadata
from flake8.plugins import manager
-@mock.patch.object(importlib_metadata, 'entry_points')
+@mock.patch.object(importlib_metadata, "entry_points")
def test_calls_entrypoints_on_instantiation(entry_points_mck):
"""Verify that we call entry_points() when we create a manager."""
entry_points_mck.return_value = {}
- manager.PluginManager(namespace='testing.entrypoints')
+ manager.PluginManager(namespace="testing.entrypoints")
entry_points_mck.assert_called_once_with()
-@mock.patch.object(importlib_metadata, 'entry_points')
+@mock.patch.object(importlib_metadata, "entry_points")
def test_calls_entrypoints_creates_plugins_automaticaly(entry_points_mck):
"""Verify that we create Plugins on instantiation."""
entry_points_mck.return_value = {
- 'testing.entrypoints': [
- importlib_metadata.EntryPoint('T100', '', 'testing.entrypoints'),
- importlib_metadata.EntryPoint('T200', '', 'testing.entrypoints'),
+ "testing.entrypoints": [
+ importlib_metadata.EntryPoint("T100", "", "testing.entrypoints"),
+ importlib_metadata.EntryPoint("T200", "", "testing.entrypoints"),
],
}
- plugin_mgr = manager.PluginManager(namespace='testing.entrypoints')
+ plugin_mgr = manager.PluginManager(namespace="testing.entrypoints")
entry_points_mck.assert_called_once_with()
- assert 'T100' in plugin_mgr.plugins
- assert 'T200' in plugin_mgr.plugins
- assert isinstance(plugin_mgr.plugins['T100'], manager.Plugin)
- assert isinstance(plugin_mgr.plugins['T200'], manager.Plugin)
+ assert "T100" in plugin_mgr.plugins
+ assert "T200" in plugin_mgr.plugins
+ assert isinstance(plugin_mgr.plugins["T100"], manager.Plugin)
+ assert isinstance(plugin_mgr.plugins["T200"], manager.Plugin)
-@mock.patch.object(importlib_metadata, 'entry_points')
+@mock.patch.object(importlib_metadata, "entry_points")
def test_handles_mapping_functions_across_plugins(entry_points_mck):
"""Verify we can use the PluginManager call functions on all plugins."""
entry_points_mck.return_value = {
- 'testing.entrypoints': [
- importlib_metadata.EntryPoint('T100', '', 'testing.entrypoints'),
- importlib_metadata.EntryPoint('T200', '', 'testing.entrypoints'),
+ "testing.entrypoints": [
+ importlib_metadata.EntryPoint("T100", "", "testing.entrypoints"),
+ importlib_metadata.EntryPoint("T200", "", "testing.entrypoints"),
],
}
- plugin_mgr = manager.PluginManager(namespace='testing.entrypoints')
+ plugin_mgr = manager.PluginManager(namespace="testing.entrypoints")
plugins = [plugin_mgr.plugins[name] for name in plugin_mgr.names]
assert list(plugin_mgr.map(lambda x: x)) == plugins
-@mock.patch.object(importlib_metadata, 'entry_points')
+@mock.patch.object(importlib_metadata, "entry_points")
def test_local_plugins(entry_points_mck):
"""Verify PluginManager can load given local plugins."""
entry_points_mck.return_value = {}
plugin_mgr = manager.PluginManager(
- namespace='testing.entrypoints',
- local_plugins=['X = path.to:Plugin']
+ namespace="testing.entrypoints", local_plugins=["X = path.to:Plugin"]
)
- assert plugin_mgr.plugins['X'].entry_point.value == 'path.to:Plugin'
+ assert plugin_mgr.plugins["X"].entry_point.value == "path.to:Plugin"
diff --git a/tests/unit/test_plugin_type_manager.py b/tests/unit/test_plugin_type_manager.py
index 9d00b78..4db1ea9 100644
--- a/tests/unit/test_plugin_type_manager.py
+++ b/tests/unit/test_plugin_type_manager.py
@@ -14,8 +14,8 @@ def create_plugin_mock(raise_exception=False):
plugin = mock.create_autospec(manager.Plugin, instance=True)
if raise_exception:
plugin.load_plugin.side_effect = exceptions.FailedToLoadPlugin(
- plugin_name='T101',
- exception=ValueError('Test failure'),
+ plugin_name="T101",
+ exception=ValueError("Test failure"),
)
return plugin
@@ -28,7 +28,7 @@ def create_mapping_manager_mock(plugins):
yield func(plugin)
# Mock out the PluginManager instance
- manager_mock = mock.Mock(spec=['map'])
+ manager_mock = mock.Mock(spec=["map"])
# Replace the map method
manager_mock.map = fake_map
return manager_mock
@@ -47,7 +47,7 @@ class FakeTestType(manager.PluginTypeManager):
namespace = TEST_NAMESPACE
-@mock.patch('flake8.plugins.manager.PluginManager')
+@mock.patch("flake8.plugins.manager.PluginManager")
def test_instantiates_a_manager(PluginManager): # noqa: N803
"""Verify we create a PluginManager on instantiation."""
FakeTestType()
@@ -55,26 +55,22 @@ def test_instantiates_a_manager(PluginManager): # noqa: N803
PluginManager.assert_called_once_with(TEST_NAMESPACE, local_plugins=None)
-@mock.patch('flake8.plugins.manager.PluginManager')
+@mock.patch("flake8.plugins.manager.PluginManager")
def test_proxies_names_to_manager(PluginManager): # noqa: N803
"""Verify we proxy the names attribute."""
- PluginManager.return_value = mock.Mock(names=[
- 'T100', 'T200', 'T300'
- ])
+ PluginManager.return_value = mock.Mock(names=["T100", "T200", "T300"])
type_mgr = FakeTestType()
- assert type_mgr.names == ['T100', 'T200', 'T300']
+ assert type_mgr.names == ["T100", "T200", "T300"]
-@mock.patch('flake8.plugins.manager.PluginManager')
+@mock.patch("flake8.plugins.manager.PluginManager")
def test_proxies_plugins_to_manager(PluginManager): # noqa: N803
"""Verify we proxy the plugins attribute."""
- PluginManager.return_value = mock.Mock(plugins=[
- 'T100', 'T200', 'T300'
- ])
+ PluginManager.return_value = mock.Mock(plugins=["T100", "T200", "T300"])
type_mgr = FakeTestType()
- assert type_mgr.plugins == ['T100', 'T200', 'T300']
+ assert type_mgr.plugins == ["T100", "T200", "T300"]
def test_generate_call_function():
@@ -82,21 +78,28 @@ def test_generate_call_function():
optmanager = object()
plugin = mock.Mock(method_name=lambda x: x)
func = manager.PluginTypeManager._generate_call_function(
- 'method_name', optmanager,
+ "method_name",
+ optmanager,
)
assert callable(func)
assert func(plugin) is optmanager
-@mock.patch('flake8.plugins.manager.PluginManager')
+@mock.patch("flake8.plugins.manager.PluginManager")
def test_load_plugins(PluginManager): # noqa: N803
"""Verify load plugins loads *every* plugin."""
# Create a bunch of fake plugins
- plugins = [create_plugin_mock(), create_plugin_mock(),
- create_plugin_mock(), create_plugin_mock(),
- create_plugin_mock(), create_plugin_mock(),
- create_plugin_mock(), create_plugin_mock()]
+ plugins = [
+ create_plugin_mock(),
+ create_plugin_mock(),
+ create_plugin_mock(),
+ create_plugin_mock(),
+ create_plugin_mock(),
+ create_plugin_mock(),
+ create_plugin_mock(),
+ create_plugin_mock(),
+ ]
# Return our PluginManager mock
PluginManager.return_value = create_mapping_manager_mock(plugins)
@@ -109,13 +112,19 @@ def test_load_plugins(PluginManager): # noqa: N803
assert type_mgr.plugins_loaded is True
-@mock.patch('flake8.plugins.manager.PluginManager')
+@mock.patch("flake8.plugins.manager.PluginManager")
def test_load_plugins_fails(PluginManager): # noqa: N803
"""Verify load plugins bubbles up exceptions."""
- plugins = [create_plugin_mock(), create_plugin_mock(True),
- create_plugin_mock(), create_plugin_mock(),
- create_plugin_mock(), create_plugin_mock(),
- create_plugin_mock(), create_plugin_mock()]
+ plugins = [
+ create_plugin_mock(),
+ create_plugin_mock(True),
+ create_plugin_mock(),
+ create_plugin_mock(),
+ create_plugin_mock(),
+ create_plugin_mock(),
+ create_plugin_mock(),
+ create_plugin_mock(),
+ ]
# Return our PluginManager mock
PluginManager.return_value = create_mapping_manager_mock(plugins)
@@ -133,13 +142,19 @@ def test_load_plugins_fails(PluginManager): # noqa: N803
assert plugin.load_plugin.called is False
-@mock.patch('flake8.plugins.manager.PluginManager')
+@mock.patch("flake8.plugins.manager.PluginManager")
def test_register_options(PluginManager): # noqa: N803
"""Test that we map over every plugin to register options."""
- plugins = [create_plugin_mock(), create_plugin_mock(),
- create_plugin_mock(), create_plugin_mock(),
- create_plugin_mock(), create_plugin_mock(),
- create_plugin_mock(), create_plugin_mock()]
+ plugins = [
+ create_plugin_mock(),
+ create_plugin_mock(),
+ create_plugin_mock(),
+ create_plugin_mock(),
+ create_plugin_mock(),
+ create_plugin_mock(),
+ create_plugin_mock(),
+ create_plugin_mock(),
+ ]
# Return our PluginManager mock
PluginManager.return_value = create_mapping_manager_mock(plugins)
optmanager = object()
@@ -151,13 +166,19 @@ def test_register_options(PluginManager): # noqa: N803
plugin.register_options.assert_called_with(optmanager)
-@mock.patch('flake8.plugins.manager.PluginManager')
+@mock.patch("flake8.plugins.manager.PluginManager")
def test_provide_options(PluginManager): # noqa: N803
"""Test that we map over every plugin to provide parsed options."""
- plugins = [create_plugin_mock(), create_plugin_mock(),
- create_plugin_mock(), create_plugin_mock(),
- create_plugin_mock(), create_plugin_mock(),
- create_plugin_mock(), create_plugin_mock()]
+ plugins = [
+ create_plugin_mock(),
+ create_plugin_mock(),
+ create_plugin_mock(),
+ create_plugin_mock(),
+ create_plugin_mock(),
+ create_plugin_mock(),
+ create_plugin_mock(),
+ create_plugin_mock(),
+ ]
# Return our PluginManager mock
PluginManager.return_value = create_mapping_manager_mock(plugins)
optmanager = object()
@@ -167,32 +188,30 @@ def test_provide_options(PluginManager): # noqa: N803
type_mgr.provide_options(optmanager, options, [])
for plugin in plugins:
- plugin.provide_options.assert_called_with(optmanager,
- options,
- [])
+ plugin.provide_options.assert_called_with(optmanager, options, [])
-@mock.patch('flake8.plugins.manager.PluginManager')
+@mock.patch("flake8.plugins.manager.PluginManager")
def test_proxy_contains_to_managers_plugins_dict(PluginManager): # noqa: N803
"""Verify that we proxy __contains__ to the manager's dictionary."""
- plugins = {'T10%i' % i: create_plugin_mock() for i in range(8)}
+ plugins = {"T10%i" % i: create_plugin_mock() for i in range(8)}
# Return our PluginManager mock
PluginManager.return_value = create_manager_with_plugins(plugins)
type_mgr = FakeTestType()
for i in range(8):
- key = 'T10%i' % i
+ key = "T10%i" % i
assert key in type_mgr
-@mock.patch('flake8.plugins.manager.PluginManager')
+@mock.patch("flake8.plugins.manager.PluginManager")
def test_proxies_getitem_to_managers_plugins_dict(PluginManager): # noqa: N803
"""Verify that we can use the PluginTypeManager like a dictionary."""
- plugins = {'T10%i' % i: create_plugin_mock() for i in range(8)}
+ plugins = {"T10%i" % i: create_plugin_mock() for i in range(8)}
# Return our PluginManager mock
PluginManager.return_value = create_manager_with_plugins(plugins)
type_mgr = FakeTestType()
for i in range(8):
- key = 'T10%i' % i
+ key = "T10%i" % i
assert type_mgr[key] is plugins[key]
diff --git a/tests/unit/test_pyflakes_codes.py b/tests/unit/test_pyflakes_codes.py
index 77f3e56..526832e 100644
--- a/tests/unit/test_pyflakes_codes.py
+++ b/tests/unit/test_pyflakes_codes.py
@@ -18,14 +18,14 @@ def test_all_pyflakes_messages_have_flake8_codes_assigned():
def test_undefined_local_code():
"""In pyflakes 2.1.0 this code's string formatting was changed."""
- src = '''\
+ src = """\
import sys
def f():
sys = sys
-'''
+"""
tree = ast.parse(src)
- checker = pyflakes_shim.FlakesChecker(tree, (), 't.py')
+ checker = pyflakes_shim.FlakesChecker(tree, (), "t.py")
message_texts = [s for _, _, s, _ in checker.run()]
assert message_texts == [
"F823 local variable 'sys' defined in enclosing scope on line 1 referenced before assignment", # noqa: E501
diff --git a/tests/unit/test_statistics.py b/tests/unit/test_statistics.py
index 6aa4e6f..66565c3 100644
--- a/tests/unit/test_statistics.py
+++ b/tests/unit/test_statistics.py
@@ -4,19 +4,19 @@ import pytest
from flake8 import statistics as stats
from flake8 import style_guide
-DEFAULT_ERROR_CODE = 'E100'
-DEFAULT_FILENAME = 'file.py'
-DEFAULT_TEXT = 'Default text'
+DEFAULT_ERROR_CODE = "E100"
+DEFAULT_FILENAME = "file.py"
+DEFAULT_TEXT = "Default text"
def make_error(**kwargs):
"""Create errors with a bunch of default values."""
return style_guide.Violation(
- code=kwargs.pop('code', DEFAULT_ERROR_CODE),
- filename=kwargs.pop('filename', DEFAULT_FILENAME),
- line_number=kwargs.pop('line_number', 1),
- column_number=kwargs.pop('column_number', 1),
- text=kwargs.pop('text', DEFAULT_TEXT),
+ code=kwargs.pop("code", DEFAULT_ERROR_CODE),
+ filename=kwargs.pop("filename", DEFAULT_FILENAME),
+ line_number=kwargs.pop("line_number", 1),
+ column_number=kwargs.pop("column_number", 1),
+ text=kwargs.pop("text", DEFAULT_TEXT),
physical_line=None,
)
@@ -29,26 +29,29 @@ def test_key_creation():
assert key.code == DEFAULT_ERROR_CODE
-@pytest.mark.parametrize('code, filename, args, expected_result', [
- # Error prefix matches
- ('E123', 'file000.py', ('E', None), True),
- ('E123', 'file000.py', ('E1', None), True),
- ('E123', 'file000.py', ('E12', None), True),
- ('E123', 'file000.py', ('E123', None), True),
- # Error prefix and filename match
- ('E123', 'file000.py', ('E', 'file000.py'), True),
- ('E123', 'file000.py', ('E1', 'file000.py'), True),
- ('E123', 'file000.py', ('E12', 'file000.py'), True),
- ('E123', 'file000.py', ('E123', 'file000.py'), True),
- # Error prefix does not match
- ('E123', 'file000.py', ('W', None), False),
- # Error prefix matches but filename does not
- ('E123', 'file000.py', ('E', 'file001.py'), False),
- # Error prefix does not match but filename does
- ('E123', 'file000.py', ('W', 'file000.py'), False),
- # Neither error prefix match nor filename
- ('E123', 'file000.py', ('W', 'file001.py'), False),
-])
+@pytest.mark.parametrize(
+ "code, filename, args, expected_result",
+ [
+ # Error prefix matches
+ ("E123", "file000.py", ("E", None), True),
+ ("E123", "file000.py", ("E1", None), True),
+ ("E123", "file000.py", ("E12", None), True),
+ ("E123", "file000.py", ("E123", None), True),
+ # Error prefix and filename match
+ ("E123", "file000.py", ("E", "file000.py"), True),
+ ("E123", "file000.py", ("E1", "file000.py"), True),
+ ("E123", "file000.py", ("E12", "file000.py"), True),
+ ("E123", "file000.py", ("E123", "file000.py"), True),
+ # Error prefix does not match
+ ("E123", "file000.py", ("W", None), False),
+ # Error prefix matches but filename does not
+ ("E123", "file000.py", ("E", "file001.py"), False),
+ # Error prefix does not match but filename does
+ ("E123", "file000.py", ("W", "file000.py"), False),
+ # Neither error prefix match nor filename
+ ("E123", "file000.py", ("W", "file001.py"), False),
+ ],
+)
def test_key_matching(code, filename, args, expected_result):
"""Verify Key#matches behaves as we expect with fthe above input."""
key = stats.Key.create_from(make_error(code=code, filename=filename))
@@ -75,7 +78,7 @@ def test_statistic_increment():
def test_recording_statistics():
"""Verify that we appropriately create a new Statistic and store it."""
aggregator = stats.Statistics()
- assert list(aggregator.statistics_for('E')) == []
+ assert list(aggregator.statistics_for("E")) == []
aggregator.record(make_error())
storage = aggregator._store
for key, value in storage.items():
@@ -88,9 +91,9 @@ def test_recording_statistics():
def test_statistics_for_single_record():
"""Show we can retrieve the only statistic recorded."""
aggregator = stats.Statistics()
- assert list(aggregator.statistics_for('E')) == []
+ assert list(aggregator.statistics_for("E")) == []
aggregator.record(make_error())
- statistics = list(aggregator.statistics_for('E'))
+ statistics = list(aggregator.statistics_for("E"))
assert len(statistics) == 1
assert isinstance(statistics[0], stats.Statistic)
@@ -98,11 +101,11 @@ def test_statistics_for_single_record():
def test_statistics_for_filters_by_filename():
"""Show we can retrieve the only statistic recorded."""
aggregator = stats.Statistics()
- assert list(aggregator.statistics_for('E')) == []
+ assert list(aggregator.statistics_for("E")) == []
aggregator.record(make_error())
- aggregator.record(make_error(filename='example.py'))
+ aggregator.record(make_error(filename="example.py"))
- statistics = list(aggregator.statistics_for('E', DEFAULT_FILENAME))
+ statistics = list(aggregator.statistics_for("E", DEFAULT_FILENAME))
assert len(statistics) == 1
assert isinstance(statistics[0], stats.Statistic)
@@ -111,11 +114,11 @@ def test_statistic_for_retrieves_more_than_one_value():
"""Show this works for more than a couple statistic values."""
aggregator = stats.Statistics()
for i in range(50):
- aggregator.record(make_error(code=f'E1{i:02d}'))
- aggregator.record(make_error(code=f'W2{i:02d}'))
+ aggregator.record(make_error(code=f"E1{i:02d}"))
+ aggregator.record(make_error(code=f"W2{i:02d}"))
- statistics = list(aggregator.statistics_for('E'))
+ statistics = list(aggregator.statistics_for("E"))
assert len(statistics) == 50
- statistics = list(aggregator.statistics_for('W22'))
+ statistics = list(aggregator.statistics_for("W22"))
assert len(statistics) == 10
diff --git a/tests/unit/test_utils.py b/tests/unit/test_utils.py
index d31c113..22bb44d 100644
--- a/tests/unit/test_utils.py
+++ b/tests/unit/test_utils.py
@@ -14,86 +14,92 @@ from flake8.plugins import manager as plugin_manager
RELATIVE_PATHS = ["flake8", "pep8", "pyflakes", "mccabe"]
-@pytest.mark.parametrize("value,expected", [
- ("E123,\n\tW234,\n E206", ["E123", "W234", "E206"]),
- ("E123,W234,E206", ["E123", "W234", "E206"]),
- ("E123 W234 E206", ["E123", "W234", "E206"]),
- ("E123\nW234 E206", ["E123", "W234", "E206"]),
- ("E123\nW234\nE206", ["E123", "W234", "E206"]),
- ("E123,W234,E206,", ["E123", "W234", "E206"]),
- ("E123,W234,E206, ,\n", ["E123", "W234", "E206"]),
- ("E123,W234,,E206,,", ["E123", "W234", "E206"]),
- ("E123, W234,, E206,,", ["E123", "W234", "E206"]),
- ("E123,,W234,,E206,,", ["E123", "W234", "E206"]),
- ("", []),
-])
+@pytest.mark.parametrize(
+ "value,expected",
+ [
+ ("E123,\n\tW234,\n E206", ["E123", "W234", "E206"]),
+ ("E123,W234,E206", ["E123", "W234", "E206"]),
+ ("E123 W234 E206", ["E123", "W234", "E206"]),
+ ("E123\nW234 E206", ["E123", "W234", "E206"]),
+ ("E123\nW234\nE206", ["E123", "W234", "E206"]),
+ ("E123,W234,E206,", ["E123", "W234", "E206"]),
+ ("E123,W234,E206, ,\n", ["E123", "W234", "E206"]),
+ ("E123,W234,,E206,,", ["E123", "W234", "E206"]),
+ ("E123, W234,, E206,,", ["E123", "W234", "E206"]),
+ ("E123,,W234,,E206,,", ["E123", "W234", "E206"]),
+ ("", []),
+ ],
+)
def test_parse_comma_separated_list(value, expected):
"""Verify that similar inputs produce identical outputs."""
assert utils.parse_comma_separated_list(value) == expected
@pytest.mark.parametrize(
- ('value', 'expected'),
+ ("value", "expected"),
(
# empty option configures nothing
- ('', []), (' ', []), ('\n\n\n', []),
+ ("", []),
+ (" ", []),
+ ("\n\n\n", []),
# basic case
(
- 'f.py:E123',
- [('f.py', ['E123'])],
+ "f.py:E123",
+ [("f.py", ["E123"])],
),
# multiple filenames, multiple codes
(
- 'f.py,g.py:E,F',
- [('f.py', ['E', 'F']), ('g.py', ['E', 'F'])],
+ "f.py,g.py:E,F",
+ [("f.py", ["E", "F"]), ("g.py", ["E", "F"])],
),
# demonstrate that whitespace is not important around tokens
(
- ' f.py , g.py : E , F ',
- [('f.py', ['E', 'F']), ('g.py', ['E', 'F'])],
+ " f.py , g.py : E , F ",
+ [("f.py", ["E", "F"]), ("g.py", ["E", "F"])],
),
# whitespace can separate groups of configuration
(
- 'f.py:E g.py:F',
- [('f.py', ['E']), ('g.py', ['F'])],
+ "f.py:E g.py:F",
+ [("f.py", ["E"]), ("g.py", ["F"])],
),
# newlines can separate groups of configuration
(
- 'f.py: E\ng.py: F\n',
- [('f.py', ['E']), ('g.py', ['F'])],
+ "f.py: E\ng.py: F\n",
+ [("f.py", ["E"]), ("g.py", ["F"])],
),
# whitespace can be used in place of commas
(
- 'f.py g.py: E F',
- [('f.py', ['E', 'F']), ('g.py', ['E', 'F'])],
+ "f.py g.py: E F",
+ [("f.py", ["E", "F"]), ("g.py", ["E", "F"])],
),
# go ahead, indent your codes
(
- 'f.py:\n E,F\ng.py:\n G,H',
- [('f.py', ['E', 'F']), ('g.py', ['G', 'H'])],
+ "f.py:\n E,F\ng.py:\n G,H",
+ [("f.py", ["E", "F"]), ("g.py", ["G", "H"])],
),
# capitalized filenames are ok too
(
- 'F.py,G.py: F,G',
- [('F.py', ['F', 'G']), ('G.py', ['F', 'G'])],
+ "F.py,G.py: F,G",
+ [("F.py", ["F", "G"]), ("G.py", ["F", "G"])],
),
# it's easier to allow zero filenames or zero codes than forbid it
- (':E', []), ('f.py:', []),
- (':E f.py:F', [('f.py', ['F'])]),
- ('f.py: g.py:F', [('g.py', ['F'])]),
- ('f.py:E:', []),
- ('f.py:E.py:', []),
- ('f.py:Eg.py:F', [('Eg.py', ['F'])]),
+ (":E", []),
+ ("f.py:", []),
+ (":E f.py:F", [("f.py", ["F"])]),
+ ("f.py: g.py:F", [("g.py", ["F"])]),
+ ("f.py:E:", []),
+ ("f.py:E.py:", []),
+ ("f.py:Eg.py:F", [("Eg.py", ["F"])]),
# sequences are also valid (?)
(
- ['f.py:E,F', 'g.py:G,H'],
- [('f.py', ['E', 'F']), ('g.py', ['G', 'H'])],
+ ["f.py:E,F", "g.py:G,H"],
+ [("f.py", ["E", "F"]), ("g.py", ["G", "H"])],
),
# six-digits codes are allowed
(
- 'f.py: ABC123',
- [('f.py', ['ABC123'])],
- )
+ "f.py: ABC123",
+ [("f.py", ["ABC123"])],
+ ),
),
)
def test_parse_files_to_codes_mapping(value, expected):
@@ -102,16 +108,19 @@ def test_parse_files_to_codes_mapping(value, expected):
@pytest.mark.parametrize(
- 'value',
+ "value",
(
# code while looking for filenames
- 'E123', 'f.py,E123', 'f.py E123',
+ "E123",
+ "f.py,E123",
+ "f.py E123",
# eof while looking for filenames
- 'f.py', 'f.py:E,g.py'
+ "f.py",
+ "f.py:E,g.py"
# colon while looking for codes
- 'f.py::',
+ "f.py::",
# no separator between
- 'f.py:E1F1',
+ "f.py:E1F1",
),
)
def test_invalid_file_list(value):
@@ -120,22 +129,32 @@ def test_invalid_file_list(value):
utils.parse_files_to_codes_mapping(value)
-@pytest.mark.parametrize("value,expected", [
- ("flake8", "flake8"),
- ("../flake8", os.path.abspath("../flake8")),
- ("flake8/", os.path.abspath("flake8")),
-])
+@pytest.mark.parametrize(
+ "value,expected",
+ [
+ ("flake8", "flake8"),
+ ("../flake8", os.path.abspath("../flake8")),
+ ("flake8/", os.path.abspath("flake8")),
+ ],
+)
def test_normalize_path(value, expected):
"""Verify that we normalize paths provided to the tool."""
assert utils.normalize_path(value) == expected
-@pytest.mark.parametrize("value,expected", [
- (["flake8", "pep8", "pyflakes", "mccabe"],
- ["flake8", "pep8", "pyflakes", "mccabe"]),
- (["../flake8", "../pep8", "../pyflakes", "../mccabe"],
- [os.path.abspath(f"../{p}") for p in RELATIVE_PATHS]),
-])
+@pytest.mark.parametrize(
+ "value,expected",
+ [
+ (
+ ["flake8", "pep8", "pyflakes", "mccabe"],
+ ["flake8", "pep8", "pyflakes", "mccabe"],
+ ),
+ (
+ ["../flake8", "../pep8", "../pyflakes", "../mccabe"],
+ [os.path.abspath(f"../{p}") for p in RELATIVE_PATHS],
+ ),
+ ],
+)
def test_normalize_paths(value, expected):
"""Verify we normalizes a sequence of paths provided to the tool."""
assert utils.normalize_paths(value) == expected
@@ -143,19 +162,22 @@ def test_normalize_paths(value, expected):
def test_is_windows_checks_for_nt():
"""Verify that we correctly detect Windows."""
- with mock.patch.object(os, 'name', 'nt'):
+ with mock.patch.object(os, "name", "nt"):
assert utils.is_windows() is True
- with mock.patch.object(os, 'name', 'posix'):
+ with mock.patch.object(os, "name", "posix"):
assert utils.is_windows() is False
-@pytest.mark.parametrize('filename,patterns,expected', [
- ('foo.py', [], True),
- ('foo.py', ['*.pyc'], False),
- ('foo.pyc', ['*.pyc'], True),
- ('foo.pyc', ['*.swp', '*.pyc', '*.py'], True),
-])
+@pytest.mark.parametrize(
+ "filename,patterns,expected",
+ [
+ ("foo.py", [], True),
+ ("foo.py", ["*.pyc"], False),
+ ("foo.pyc", ["*.pyc"], True),
+ ("foo.pyc", ["*.swp", "*.pyc", "*.py"], True),
+ ],
+)
def test_fnmatch(filename, patterns, expected):
"""Verify that our fnmatch wrapper works as expected."""
assert utils.fnmatch(filename, patterns) is expected
@@ -165,104 +187,110 @@ def test_fnmatch(filename, patterns, expected):
def files_dir(tmpdir):
"""Create test dir for testing filenames_from."""
with tmpdir.as_cwd():
- tmpdir.join('a/b/c.py').ensure()
- tmpdir.join('a/b/d.py').ensure()
- tmpdir.join('a/b/e/f.py').ensure()
+ tmpdir.join("a/b/c.py").ensure()
+ tmpdir.join("a/b/d.py").ensure()
+ tmpdir.join("a/b/e/f.py").ensure()
yield tmpdir
def _normpath(s):
- return s.replace('/', os.sep)
+ return s.replace("/", os.sep)
def _normpaths(pths):
return {_normpath(pth) for pth in pths}
-@pytest.mark.usefixtures('files_dir')
+@pytest.mark.usefixtures("files_dir")
def test_filenames_from_a_directory():
"""Verify that filenames_from walks a directory."""
- filenames = set(utils.filenames_from(_normpath('a/b/')))
+ filenames = set(utils.filenames_from(_normpath("a/b/")))
# should include all files
- expected = _normpaths(('a/b/c.py', 'a/b/d.py', 'a/b/e/f.py'))
+ expected = _normpaths(("a/b/c.py", "a/b/d.py", "a/b/e/f.py"))
assert filenames == expected
-@pytest.mark.usefixtures('files_dir')
+@pytest.mark.usefixtures("files_dir")
def test_filenames_from_a_directory_with_a_predicate():
"""Verify that predicates filter filenames_from."""
- filenames = set(utils.filenames_from(
- arg=_normpath('a/b/'),
- predicate=lambda path: path.endswith(_normpath('b/c.py')),
- ))
+ filenames = set(
+ utils.filenames_from(
+ arg=_normpath("a/b/"),
+ predicate=lambda path: path.endswith(_normpath("b/c.py")),
+ )
+ )
# should not include c.py
- expected = _normpaths(('a/b/d.py', 'a/b/e/f.py'))
+ expected = _normpaths(("a/b/d.py", "a/b/e/f.py"))
assert filenames == expected
-@pytest.mark.usefixtures('files_dir')
+@pytest.mark.usefixtures("files_dir")
def test_filenames_from_a_directory_with_a_predicate_from_the_current_dir():
"""Verify that predicates filter filenames_from."""
- filenames = set(utils.filenames_from(
- arg=_normpath('./a/b'),
- predicate=lambda path: path == 'c.py',
- ))
+ filenames = set(
+ utils.filenames_from(
+ arg=_normpath("./a/b"),
+ predicate=lambda path: path == "c.py",
+ )
+ )
# none should have matched the predicate so all returned
- expected = _normpaths(('./a/b/c.py', './a/b/d.py', './a/b/e/f.py'))
+ expected = _normpaths(("./a/b/c.py", "./a/b/d.py", "./a/b/e/f.py"))
assert filenames == expected
-@pytest.mark.usefixtures('files_dir')
+@pytest.mark.usefixtures("files_dir")
def test_filenames_from_a_single_file():
"""Verify that we simply yield that filename."""
- filenames = set(utils.filenames_from(_normpath('a/b/c.py')))
- assert filenames == {_normpath('a/b/c.py')}
+ filenames = set(utils.filenames_from(_normpath("a/b/c.py")))
+ assert filenames == {_normpath("a/b/c.py")}
def test_filenames_from_a_single_file_does_not_exist():
"""Verify that a passed filename which does not exist is returned back."""
- filenames = set(utils.filenames_from(_normpath('d/n/e.py')))
- assert filenames == {_normpath('d/n/e.py')}
+ filenames = set(utils.filenames_from(_normpath("d/n/e.py")))
+ assert filenames == {_normpath("d/n/e.py")}
def test_filenames_from_exclude_doesnt_exclude_directory_names(tmpdir):
"""Verify that we don't greedily exclude subdirs."""
- tmpdir.join('1').ensure_dir().join('dont_return_me.py').ensure()
- tmpdir.join('2').join('1').ensure_dir().join('return_me.py').ensure()
- exclude = [tmpdir.join('1').strpath]
+ tmpdir.join("1").ensure_dir().join("dont_return_me.py").ensure()
+ tmpdir.join("2").join("1").ensure_dir().join("return_me.py").ensure()
+ exclude = [tmpdir.join("1").strpath]
# This acts similar to src.flake8.checker.is_path_excluded
def predicate(pth):
return utils.fnmatch(os.path.abspath(pth), exclude)
with tmpdir.as_cwd():
- filenames = list(utils.filenames_from('.', predicate))
- assert filenames == [os.path.join('.', '2', '1', 'return_me.py')]
+ filenames = list(utils.filenames_from(".", predicate))
+ assert filenames == [os.path.join(".", "2", "1", "return_me.py")]
def test_parameters_for_class_plugin():
"""Verify that we can retrieve the parameters for a class plugin."""
+
class FakeCheck:
def __init__(self, tree):
raise NotImplementedError
- plugin = plugin_manager.Plugin('plugin-name', object())
+ plugin = plugin_manager.Plugin("plugin-name", object())
plugin._plugin = FakeCheck
- assert utils.parameters_for(plugin) == {'tree': True}
+ assert utils.parameters_for(plugin) == {"tree": True}
def test_parameters_for_function_plugin():
"""Verify that we retrieve the parameters for a function plugin."""
+
def fake_plugin(physical_line, self, tree, optional=None):
raise NotImplementedError
- plugin = plugin_manager.Plugin('plugin-name', object())
+ plugin = plugin_manager.Plugin("plugin-name", object())
plugin._plugin = fake_plugin
assert utils.parameters_for(plugin) == {
- 'physical_line': True,
- 'self': True,
- 'tree': True,
- 'optional': False,
+ "physical_line": True,
+ "self": True,
+ "tree": True,
+ "optional": False,
}
@@ -273,29 +301,32 @@ def read_diff_file(filename):
return content
-SINGLE_FILE_DIFF = read_diff_file('tests/fixtures/diffs/single_file_diff')
+SINGLE_FILE_DIFF = read_diff_file("tests/fixtures/diffs/single_file_diff")
SINGLE_FILE_INFO = {
- 'flake8/utils.py': set(range(75, 83)).union(set(range(84, 94))),
+ "flake8/utils.py": set(range(75, 83)).union(set(range(84, 94))),
}
-TWO_FILE_DIFF = read_diff_file('tests/fixtures/diffs/two_file_diff')
+TWO_FILE_DIFF = read_diff_file("tests/fixtures/diffs/two_file_diff")
TWO_FILE_INFO = {
- 'flake8/utils.py': set(range(75, 83)).union(set(range(84, 94))),
- 'tests/unit/test_utils.py': set(range(115, 128)),
+ "flake8/utils.py": set(range(75, 83)).union(set(range(84, 94))),
+ "tests/unit/test_utils.py": set(range(115, 128)),
}
-MULTI_FILE_DIFF = read_diff_file('tests/fixtures/diffs/multi_file_diff')
+MULTI_FILE_DIFF = read_diff_file("tests/fixtures/diffs/multi_file_diff")
MULTI_FILE_INFO = {
- 'flake8/utils.py': set(range(75, 83)).union(set(range(84, 94))),
- 'tests/unit/test_utils.py': set(range(115, 129)),
- 'tests/fixtures/diffs/single_file_diff': set(range(1, 28)),
- 'tests/fixtures/diffs/two_file_diff': set(range(1, 46)),
+ "flake8/utils.py": set(range(75, 83)).union(set(range(84, 94))),
+ "tests/unit/test_utils.py": set(range(115, 129)),
+ "tests/fixtures/diffs/single_file_diff": set(range(1, 28)),
+ "tests/fixtures/diffs/two_file_diff": set(range(1, 46)),
}
-@pytest.mark.parametrize("diff, parsed_diff", [
- (SINGLE_FILE_DIFF, SINGLE_FILE_INFO),
- (TWO_FILE_DIFF, TWO_FILE_INFO),
- (MULTI_FILE_DIFF, MULTI_FILE_INFO),
-])
+@pytest.mark.parametrize(
+ "diff, parsed_diff",
+ [
+ (SINGLE_FILE_DIFF, SINGLE_FILE_INFO),
+ (TWO_FILE_DIFF, TWO_FILE_INFO),
+ (MULTI_FILE_DIFF, MULTI_FILE_INFO),
+ ],
+)
def test_parse_unified_diff(diff, parsed_diff):
"""Verify that what we parse from a diff matches expectations."""
assert utils.parse_unified_diff(diff) == parsed_diff
@@ -304,19 +335,19 @@ def test_parse_unified_diff(diff, parsed_diff):
def test_matches_filename_for_excluding_dotfiles():
"""Verify that `.` and `..` are not matched by `.*`."""
logger = logging.Logger(__name__)
- assert not utils.matches_filename('.', ('.*',), '', logger)
- assert not utils.matches_filename('..', ('.*',), '', logger)
+ assert not utils.matches_filename(".", (".*",), "", logger)
+ assert not utils.matches_filename("..", (".*",), "", logger)
def test_stdin_get_value_crlf():
"""Ensure that stdin is normalized from crlf to lf."""
- stdin = io.TextIOWrapper(io.BytesIO(b'1\r\n2\r\n'), 'UTF-8')
- with mock.patch.object(sys, 'stdin', stdin):
- assert utils.stdin_get_value.__wrapped__() == '1\n2\n'
+ stdin = io.TextIOWrapper(io.BytesIO(b"1\r\n2\r\n"), "UTF-8")
+ with mock.patch.object(sys, "stdin", stdin):
+ assert utils.stdin_get_value.__wrapped__() == "1\n2\n"
def test_stdin_unknown_coding_token():
"""Ensure we produce source even for unknown encodings."""
- stdin = io.TextIOWrapper(io.BytesIO(b'# coding: unknown\n'), 'UTF-8')
- with mock.patch.object(sys, 'stdin', stdin):
- assert utils.stdin_get_value.__wrapped__() == '# coding: unknown\n'
+ stdin = io.TextIOWrapper(io.BytesIO(b"# coding: unknown\n"), "UTF-8")
+ with mock.patch.object(sys, "stdin", stdin):
+ assert utils.stdin_get_value.__wrapped__() == "# coding: unknown\n"
diff --git a/tests/unit/test_violation.py b/tests/unit/test_violation.py
index 284cd2a..b9cf1a3 100644
--- a/tests/unit/test_violation.py
+++ b/tests/unit/test_violation.py
@@ -6,61 +6,74 @@ import pytest
from flake8 import style_guide
-@pytest.mark.parametrize('error_code,physical_line,expected_result', [
- ('E111', 'a = 1', False),
- ('E121', 'a = 1 # noqa: E111', False),
- ('E121', 'a = 1 # noqa: E111,W123,F821', False),
- ('E111', 'a = 1 # noqa: E111,W123,F821', True),
- ('W123', 'a = 1 # noqa: E111,W123,F821', True),
- ('W123', 'a = 1 # noqa: E111, W123,F821', True),
- ('E111', 'a = 1 # noqa: E11,W123,F821', True),
- ('E121', 'a = 1 # noqa:E111,W123,F821', False),
- ('E111', 'a = 1 # noqa:E111,W123,F821', True),
- ('W123', 'a = 1 # noqa:E111,W123,F821', True),
- ('W123', 'a = 1 # noqa:E111, W123,F821', True),
- ('E111', 'a = 1 # noqa:E11,W123,F821', True),
- ('E111', 'a = 1 # noqa, analysis:ignore', True),
- ('E111', 'a = 1 # noqa analysis:ignore', True),
- ('E111', 'a = 1 # noqa - We do not care', True),
- ('E111', 'a = 1 # noqa: We do not care', True),
- ('E111', 'a = 1 # noqa:We do not care', True),
- ('ABC123', 'a = 1 # noqa: ABC123', True),
- ('E111', 'a = 1 # noqa: ABC123', False),
- ('ABC123', 'a = 1 # noqa: ABC124', False),
-])
+@pytest.mark.parametrize(
+ "error_code,physical_line,expected_result",
+ [
+ ("E111", "a = 1", False),
+ ("E121", "a = 1 # noqa: E111", False),
+ ("E121", "a = 1 # noqa: E111,W123,F821", False),
+ ("E111", "a = 1 # noqa: E111,W123,F821", True),
+ ("W123", "a = 1 # noqa: E111,W123,F821", True),
+ ("W123", "a = 1 # noqa: E111, W123,F821", True),
+ ("E111", "a = 1 # noqa: E11,W123,F821", True),
+ ("E121", "a = 1 # noqa:E111,W123,F821", False),
+ ("E111", "a = 1 # noqa:E111,W123,F821", True),
+ ("W123", "a = 1 # noqa:E111,W123,F821", True),
+ ("W123", "a = 1 # noqa:E111, W123,F821", True),
+ ("E111", "a = 1 # noqa:E11,W123,F821", True),
+ ("E111", "a = 1 # noqa, analysis:ignore", True),
+ ("E111", "a = 1 # noqa analysis:ignore", True),
+ ("E111", "a = 1 # noqa - We do not care", True),
+ ("E111", "a = 1 # noqa: We do not care", True),
+ ("E111", "a = 1 # noqa:We do not care", True),
+ ("ABC123", "a = 1 # noqa: ABC123", True),
+ ("E111", "a = 1 # noqa: ABC123", False),
+ ("ABC123", "a = 1 # noqa: ABC124", False),
+ ],
+)
def test_is_inline_ignored(error_code, physical_line, expected_result):
"""Verify that we detect inline usage of ``# noqa``."""
error = style_guide.Violation(
- error_code, 'filename.py', 1, 1, 'error text', None)
+ error_code, "filename.py", 1, 1, "error text", None
+ )
# We want `None` to be passed as the physical line so we actually use our
# monkey-patched linecache.getline value.
- with mock.patch('linecache.getline', return_value=physical_line):
+ with mock.patch("linecache.getline", return_value=physical_line):
assert error.is_inline_ignored(False) is expected_result
def test_disable_is_inline_ignored():
"""Verify that is_inline_ignored exits immediately if disabling NoQA."""
error = style_guide.Violation(
- 'E121', 'filename.py', 1, 1, 'error text', 'line')
+ "E121", "filename.py", 1, 1, "error text", "line"
+ )
- with mock.patch('linecache.getline') as getline:
+ with mock.patch("linecache.getline") as getline:
assert error.is_inline_ignored(True) is False
assert getline.called is False
-@pytest.mark.parametrize('violation_file,violation_line,diff,expected', [
- ('file.py', 10, {}, True),
- ('file.py', 1, {'file.py': range(1, 2)}, True),
- ('file.py', 10, {'file.py': range(1, 2)}, False),
- ('file.py', 1, {'other.py': range(1, 2)}, False),
- ('file.py', 10, {'other.py': range(1, 2)}, False),
-])
+@pytest.mark.parametrize(
+ "violation_file,violation_line,diff,expected",
+ [
+ ("file.py", 10, {}, True),
+ ("file.py", 1, {"file.py": range(1, 2)}, True),
+ ("file.py", 10, {"file.py": range(1, 2)}, False),
+ ("file.py", 1, {"other.py": range(1, 2)}, False),
+ ("file.py", 10, {"other.py": range(1, 2)}, False),
+ ],
+)
def test_violation_is_in_diff(violation_file, violation_line, diff, expected):
"""Verify that we find violations within a diff."""
violation = style_guide.Violation(
- 'E001', violation_file, violation_line, 1, 'warning', 'line',
+ "E001",
+ violation_file,
+ violation_line,
+ 1,
+ "warning",
+ "line",
)
assert violation.is_in(diff) is expected