Use default flake8 config

This commit is contained in:
Anthony Sottile 2019-02-11 19:56:15 -08:00
parent 634383cffd
commit 4575652bd2
16 changed files with 119 additions and 81 deletions

View file

@ -21,22 +21,23 @@ BUILTIN_TYPES = {
}
BuiltinTypeCall = collections.namedtuple('BuiltinTypeCall', ['name', 'line', 'column'])
Call = collections.namedtuple('Call', ['name', 'line', 'column'])
class BuiltinTypeVisitor(ast.NodeVisitor):
class Visitor(ast.NodeVisitor):
def __init__(self, ignore=None, allow_dict_kwargs=True):
# type: (Optional[Sequence[str]], bool) -> None
self.builtin_type_calls = [] # type: List[BuiltinTypeCall]
self.builtin_type_calls = [] # type: List[Call]
self.ignore = set(ignore) if ignore else set()
self.allow_dict_kwargs = allow_dict_kwargs
def _check_dict_call(self, node): # type: (ast.Call) -> bool
return self.allow_dict_kwargs and (getattr(node, 'kwargs', None) or getattr(node, 'keywords', None))
return (
self.allow_dict_kwargs and
(getattr(node, 'kwargs', None) or getattr(node, 'keywords', None))
)
def visit_Call(self, node): # type: (ast.Call) -> None
if not isinstance(node.func, ast.Name):
# Ignore functions that are object attributes (`foo.bar()`).
# Assume that if the user calls `builtins.list()`, they know what
@ -49,15 +50,15 @@ class BuiltinTypeVisitor(ast.NodeVisitor):
elif node.args:
return
self.builtin_type_calls.append(
BuiltinTypeCall(node.func.id, node.lineno, node.col_offset),
Call(node.func.id, node.lineno, node.col_offset),
)
def check_file_for_builtin_type_constructors(filename, ignore=None, allow_dict_kwargs=True):
# type: (str, Optional[Sequence[str]], bool) -> List[BuiltinTypeCall]
def check_file(filename, ignore=None, allow_dict_kwargs=True):
# type: (str, Optional[Sequence[str]], bool) -> List[Call]
with open(filename, 'rb') as f:
tree = ast.parse(f.read(), filename=filename)
visitor = BuiltinTypeVisitor(ignore=ignore, allow_dict_kwargs=allow_dict_kwargs)
visitor = Visitor(ignore=ignore, allow_dict_kwargs=allow_dict_kwargs)
visitor.visit(tree)
return visitor.builtin_type_calls
@ -73,14 +74,17 @@ def main(argv=None): # type: (Optional[Sequence[str]]) -> int
mutex = parser.add_mutually_exclusive_group(required=False)
mutex.add_argument('--allow-dict-kwargs', action='store_true')
mutex.add_argument('--no-allow-dict-kwargs', dest='allow_dict_kwargs', action='store_false')
mutex.add_argument(
'--no-allow-dict-kwargs',
dest='allow_dict_kwargs', action='store_false',
)
mutex.set_defaults(allow_dict_kwargs=True)
args = parser.parse_args(argv)
rc = 0
for filename in args.filenames:
calls = check_file_for_builtin_type_constructors(
calls = check_file(
filename,
ignore=args.ignore,
allow_dict_kwargs=args.allow_dict_kwargs,
@ -89,7 +93,8 @@ def main(argv=None): # type: (Optional[Sequence[str]]) -> int
rc = rc or 1
for call in calls:
print(
'{filename}:{call.line}:{call.column} - Replace {call.name}() with {replacement}'.format(
'{filename}:{call.line}:{call.column}: '
'replace {call.name}() with {replacement}'.format(
filename=filename,
call=call,
replacement=BUILTIN_TYPES[call.name],

View file

@ -18,7 +18,8 @@ def check_has_shebang(path): # type: (str) -> int
print(
'{path}: marked executable but has no (or invalid) shebang!\n'
" If it isn't supposed to be executable, try: chmod -x {quoted}\n"
' If it is supposed to be executable, double-check its shebang.'.format(
' If it is supposed to be executable, double-check its shebang.'
.format(
path=path,
quoted=pipes.quote(path),
),

View file

@ -10,7 +10,7 @@ from typing import Sequence
def main(argv=None): # type: (Optional[Sequence[str]]) -> int
parser = argparse.ArgumentParser()
parser.add_argument('filenames', nargs='*', help='JSON filenames to check.')
parser.add_argument('filenames', nargs='*', help='Filenames to check.')
args = parser.parse_args(argv)
retval = 0

View file

@ -50,7 +50,7 @@ def main(argv=None): # type: (Optional[Sequence[str]]) -> int
'Implies --allow-multiple-documents'
),
)
parser.add_argument('filenames', nargs='*', help='Yaml filenames to check.')
parser.add_argument('filenames', nargs='*', help='Filenames to check.')
args = parser.parse_args(argv)
load_fn = LOAD_FNS[Key(multi=args.multi, unsafe=args.unsafe)]

View file

@ -12,16 +12,16 @@ from typing import Set
from six.moves import configparser
def get_aws_credential_files_from_env(): # type: () -> Set[str]
def get_aws_cred_files_from_env(): # type: () -> Set[str]
"""Extract credential file paths from environment variables."""
files = set()
for env_var in (
'AWS_CONFIG_FILE', 'AWS_CREDENTIAL_FILE', 'AWS_SHARED_CREDENTIALS_FILE',
'BOTO_CONFIG',
):
if env_var in os.environ:
files.add(os.environ[env_var])
return files
return {
os.environ[env_var]
for env_var in (
'AWS_CONFIG_FILE', 'AWS_CREDENTIAL_FILE',
'AWS_SHARED_CREDENTIALS_FILE', 'BOTO_CONFIG',
)
if env_var in os.environ
}
def get_aws_secrets_from_env(): # type: () -> Set[str]
@ -115,7 +115,7 @@ def main(argv=None): # type: (Optional[Sequence[str]]) -> int
# Add the credentials files configured via environment variables to the set
# of files to to gather AWS secrets from.
credential_files |= get_aws_credential_files_from_env()
credential_files |= get_aws_cred_files_from_env()
keys = set() # type: Set[str]
for credential_file in credential_files:

View file

@ -110,7 +110,9 @@ def _to_disp(pragma): # type: (bytes) -> str
def main(argv=None): # type: (Optional[Sequence[str]]) -> int
parser = argparse.ArgumentParser('Fixes the encoding pragma of python files')
parser = argparse.ArgumentParser(
'Fixes the encoding pragma of python files',
)
parser.add_argument('filenames', nargs='*', help='Filenames to fix')
parser.add_argument(
'--pragma', default=DEFAULT_PRAGMA, type=_normalize_pragma,

View file

@ -15,8 +15,9 @@ from typing import Union
from six import text_type
def _get_pretty_format(contents, indent, ensure_ascii=True, sort_keys=True, top_keys=()):
# type: (str, str, bool, bool, Sequence[str]) -> str
def _get_pretty_format(
contents, indent, ensure_ascii=True, sort_keys=True, top_keys=(),
): # type: (str, str, bool, bool, Sequence[str]) -> str
def pairs_first(pairs):
# type: (Sequence[Tuple[str, str]]) -> Mapping[str, str]
before = [pair for pair in pairs if pair[0] in top_keys]
@ -29,7 +30,8 @@ def _get_pretty_format(contents, indent, ensure_ascii=True, sort_keys=True, top_
json.loads(contents, object_pairs_hook=pairs_first),
indent=indent,
ensure_ascii=ensure_ascii,
separators=(',', ': '), # Workaround for https://bugs.python.org/issue16333
# Workaround for https://bugs.python.org/issue16333
separators=(',', ': '),
)
# Ensure unicode (Py2) and add the newline that dumps does not end with.
return text_type(json_pretty) + '\n'
@ -75,7 +77,10 @@ def main(argv=None): # type: (Optional[Sequence[str]]) -> int
action='store_true',
dest='no_ensure_ascii',
default=False,
help='Do NOT convert non-ASCII characters to Unicode escape sequences (\\uXXXX)',
help=(
'Do NOT convert non-ASCII characters to Unicode escape sequences '
'(\\uXXXX)'
),
)
parser.add_argument(
'--no-sort-keys',

View file

@ -61,7 +61,10 @@ def fix_requirements(f): # type: (IO[bytes]) -> int
# If we see a newline before any requirements, then this is a
# top of file comment.
if len(requirements) == 1 and line.strip() == b'':
if len(requirement.comments) and requirement.comments[0].startswith(b'#'):
if (
len(requirement.comments) and
requirement.comments[0].startswith(b'#')
):
requirement.value = b'\n'
else:
requirement.comments.append(line)

View file

@ -60,12 +60,12 @@ def main(argv=None): # type: (Optional[Sequence[str]]) -> int
if '' in md_args:
parser.error('--markdown-linebreak-ext requires a non-empty argument')
all_markdown = '*' in md_args
# normalize all extensions; split at ',', lowercase, and force 1 leading '.'
# normalize extensions; split at ',', lowercase, and force 1 leading '.'
md_exts = [
'.' + x.lower().lstrip('.') for x in ','.join(md_args).split(',')
]
# reject probable "eaten" filename as extension (skip leading '.' with [1:])
# reject probable "eaten" filename as extension: skip leading '.' with [1:]
for ext in md_exts:
if any(c in ext[1:] for c in r'./\:'):
parser.error(