summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorCraig Silverstein <csilvers@khanacademy.org>2018-03-02 20:49:31 -0800
committerCraig Silverstein <csilvers@khanacademy.org>2018-03-02 20:49:31 -0800
commita5573fc8643de1df2bfe7429c81fcb0de42bd784 (patch)
tree678dfc44cf814aa87c159aa7531928090bd2589c /src
parentf8344997267b8ca87a96c690a3515a443005b653 (diff)
downloadflake8-a5573fc8643de1df2bfe7429c81fcb0de42bd784.tar.gz
Move all uses of `pool` inside `run_parallel()`.
This includes creating the pool, tearing it down under normal use, and tearing it down in case of exception. Doing this makes it harder to leak processes, as for instance was happening in #410. Fixes #410
Diffstat (limited to 'src')
-rw-r--r--src/flake8/checker.py61
-rw-r--r--src/flake8/main/application.py1
2 files changed, 30 insertions, 32 deletions
diff --git a/src/flake8/checker.py b/src/flake8/checker.py
index 7a18ce5..97c5ea1 100644
--- a/src/flake8/checker.py
+++ b/src/flake8/checker.py
@@ -74,7 +74,6 @@ class Manager(object):
self.checks = checker_plugins
self.jobs = self._job_count()
self.using_multiprocessing = self.jobs > 1
- self.pool = None
self.processes = []
self.checkers = []
self.statistics = {
@@ -84,14 +83,6 @@ class Manager(object):
'tokens': 0,
}
- if self.using_multiprocessing:
- try:
- self.pool = multiprocessing.Pool(self.jobs, _pool_init)
- except OSError as oserr:
- if oserr.errno not in SERIAL_RETRY_ERRNOS:
- raise
- self.using_multiprocessing = False
-
def _process_statistics(self):
for checker in self.checkers:
for statistic in defaults.STATISTIC_NAMES:
@@ -268,30 +259,40 @@ class Manager(object):
results_found += len(results)
return (results_found, results_reported)
- def _force_cleanup(self):
- if self.pool is not None:
- self.pool.terminate()
- self.pool.join()
-
def run_parallel(self):
"""Run the checkers in parallel."""
final_results = collections.defaultdict(list)
final_statistics = collections.defaultdict(dict)
- pool_map = self.pool.imap_unordered(
- _run_checks,
- self.checkers,
- chunksize=calculate_pool_chunksize(
- len(self.checkers),
- self.jobs,
- ),
- )
- for ret in pool_map:
- filename, results, statistics = ret
- final_results[filename] = results
- final_statistics[filename] = statistics
- self.pool.close()
- self.pool.join()
- self.pool = None
+
+ try:
+ pool = multiprocessing.Pool(self.jobs, _pool_init)
+ except OSError as oserr:
+ if oserr.errno not in SERIAL_RETRY_ERRNOS:
+ raise
+ self.using_multiprocessing = False
+ self.run_serial()
+ return
+
+ try:
+ pool_map = pool.imap_unordered(
+ _run_checks,
+ self.checkers,
+ chunksize=calculate_pool_chunksize(
+ len(self.checkers),
+ self.jobs,
+ ),
+ )
+ for ret in pool_map:
+ filename, results, statistics = ret
+ final_results[filename] = results
+ final_statistics[filename] = statistics
+ pool.close()
+ pool.join()
+ pool = None
+ finally:
+ if pool is not None:
+ pool.terminate()
+ pool.join()
for checker in self.checkers:
filename = checker.display_name
@@ -328,8 +329,6 @@ class Manager(object):
except KeyboardInterrupt:
LOG.warning('Flake8 was interrupted by the user')
raise exceptions.EarlyQuit('Early quit while running checks')
- finally:
- self._force_cleanup()
def start(self, paths=None):
"""Start checking files.
diff --git a/src/flake8/main/application.py b/src/flake8/main/application.py
index aed6175..9c15629 100644
--- a/src/flake8/main/application.py
+++ b/src/flake8/main/application.py
@@ -405,7 +405,6 @@ class Application(object):
print('... stopped')
LOG.critical('Caught keyboard interrupt from user')
LOG.exception(exc)
- self.file_checker_manager._force_cleanup()
self.catastrophic_failure = True
except exceptions.ExecutionError as exc:
print('There was a critical error during execution of Flake8:')