Handle EPIPE IOErrors when using more than 1 job

If someone is using flake8 and piping it to a command like `head`, the
command they are piping flake8's output too may close the pipe earlier
than flake8 expects. To avoid extraneous exception output being printed,
we now catch IOErrors and check their errnos to ensure they're something
we know we can ignore.

This also provides flexibility to add further errnos for ignoring on a
case-by-case basis.

Closes #69
This commit is contained in:
Ian Cordasco 2015-08-16 14:00:31 -05:00
parent 57ec990dad
commit d98e1729b3

View file

@ -2,6 +2,7 @@
# Adapted from a contribution of Johan Dahlin
import collections
import errno
import re
import sys
try:
@ -18,6 +19,20 @@ class BaseQReport(pep8.BaseReport):
"""Base Queue Report."""
_loaded = False # Windows support
# Reasoning for ignored error numbers is in-line below
ignored_errors = set([
# EPIPE: Added by sigmavirus24
# > If output during processing is piped to something that may close
# > its own stdin before we've finished printing results, we need to
# > catch a Broken pipe error and continue on.
# > (See also: https://gitlab.com/pycqa/flake8/issues/69)
errno.EPIPE,
# NOTE(sigmavirus24): When adding to this list, include the reasoning
# on the lines before the error code and always append your error
# code. Further, please always add a trailing `,` to reduce the visual
# noise in diffs.
])
def __init__(self, options):
assert options.jobs > 0
super(BaseQReport, self).__init__(options)
@ -74,6 +89,11 @@ class BaseQReport(pep8.BaseReport):
self._process_main()
except KeyboardInterrupt:
pass
except IOError as ioerr:
# If we happen across an IOError that we aren't certain can/should
# be ignored, we should re-raise the exception.
if ioerr.errno not in self.ignored_errors:
raise
finally:
# ensure all output is flushed before main process continues
sys.stdout.flush()