Skip to content
This repository has been archived by the owner on Dec 16, 2022. It is now read-only.

Commit

Permalink
upgrade to latest pylint (#3266)
Browse files Browse the repository at this point in the history
* pylint

* update pylint

* undo a lot of the raise / else

* add bound on typed-ast
  • Loading branch information
joelgrus authored Sep 24, 2019
1 parent d09042e commit 64143c4
Show file tree
Hide file tree
Showing 48 changed files with 191 additions and 193 deletions.
4 changes: 2 additions & 2 deletions .pylintrc
Original file line number Diff line number Diff line change
Expand Up @@ -65,7 +65,7 @@ confidence=
# --enable=similarities". If you want to run only the classes checker, but have
# no Warning level messages displayed, use"--disable=all --enable=classes
# --disable=W"
disable=import-star-module-level,old-octal-literal,oct-method,print-statement,unpacking-in-except,parameter-unpacking,backtick,old-raise-syntax,old-ne-operator,long-suffix,dict-view-method,dict-iter-method,metaclass-assignment,next-method-called,raising-string,indexing-exception,raw_input-builtin,long-builtin,file-builtin,execfile-builtin,coerce-builtin,cmp-builtin,buffer-builtin,basestring-builtin,apply-builtin,filter-builtin-not-iterating,using-cmp-argument,useless-suppression,range-builtin-not-iterating,suppressed-message,no-absolute-import,old-division,cmp-method,reload-builtin,zip-builtin-not-iterating,intern-builtin,unichr-builtin,reduce-builtin,standarderror-builtin,unicode-builtin,xrange-builtin,coerce-method,delslice-method,getslice-method,setslice-method,input-builtin,round-builtin,hex-method,nonzero-method,map-builtin-not-iterating,missing-docstring,too-many-arguments,too-many-locals,too-many-statements,too-many-branches,too-many-nested-blocks,too-many-instance-attributes,fixme,too-few-public-methods,no-else-return
disable=import-star-module-level,old-octal-literal,oct-method,print-statement,unpacking-in-except,parameter-unpacking,backtick,old-raise-syntax,old-ne-operator,long-suffix,dict-view-method,dict-iter-method,metaclass-assignment,next-method-called,raising-string,indexing-exception,raw_input-builtin,long-builtin,file-builtin,execfile-builtin,coerce-builtin,cmp-builtin,buffer-builtin,basestring-builtin,apply-builtin,filter-builtin-not-iterating,using-cmp-argument,useless-suppression,range-builtin-not-iterating,suppressed-message,no-absolute-import,old-division,cmp-method,reload-builtin,zip-builtin-not-iterating,intern-builtin,unichr-builtin,reduce-builtin,standarderror-builtin,unicode-builtin,xrange-builtin,coerce-method,delslice-method,getslice-method,setslice-method,input-builtin,round-builtin,hex-method,nonzero-method,map-builtin-not-iterating,missing-docstring,too-many-arguments,too-many-locals,too-many-statements,too-many-branches,too-many-nested-blocks,too-many-instance-attributes,fixme,too-few-public-methods,no-else-return,logging-fstring-interpolation,unnecessary-pass,no-else-raise


[REPORTS]
Expand Down Expand Up @@ -182,7 +182,7 @@ expected-line-ending-format=
[BASIC]

# Good variable names which should always be accepted, separated by a comma
good-names=i,j,k,ex,Run,_,logger,f1
good-names=i,j,k,ex,Run,_,logger,f1,T,A

# Bad variable names which should always be refused, separated by a comma
bad-names=foo,bar,baz,toto,tutu,tata
Expand Down
4 changes: 2 additions & 2 deletions allennlp/commands/find_learning_rate.py
Original file line number Diff line number Diff line change
Expand Up @@ -163,8 +163,8 @@ def find_learning_rate_model(params: Params, serialization_dir: str,
if os.path.exists(serialization_dir) and os.listdir(serialization_dir):
raise ConfigurationError(f'Serialization directory {serialization_dir} already exists and is '
f'not empty.')
else:
os.makedirs(serialization_dir, exist_ok=True)

os.makedirs(serialization_dir, exist_ok=True)

prepare_environment(params)

Expand Down
2 changes: 1 addition & 1 deletion allennlp/common/file_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@

logger = logging.getLogger(__name__) # pylint: disable=invalid-name

CACHE_ROOT = Path(os.getenv('ALLENNLP_CACHE_ROOT', Path.home() / '.allennlp'))
CACHE_ROOT = Path(os.getenv('ALLENNLP_CACHE_ROOT', Path.home() / '.allennlp')) # pylint: disable=invalid-envvar-default
CACHE_DIRECTORY = str(CACHE_ROOT / "cache")
DEPRECATED_CACHE_DIRECTORY = str(CACHE_ROOT / "datasets")

Expand Down
7 changes: 3 additions & 4 deletions allennlp/common/params.py
Original file line number Diff line number Diff line change
Expand Up @@ -115,8 +115,7 @@ def unflatten(flat_dict: Dict[str, Any]) -> Dict[str, Any]:
raise ConfigurationError("flattened dictionary is invalid")
if not isinstance(curr_dict, dict) or parts[-1] in curr_dict:
raise ConfigurationError("flattened dictionary is invalid")
else:
curr_dict[parts[-1]] = value
curr_dict[parts[-1]] = value

return unflat

Expand Down Expand Up @@ -263,7 +262,7 @@ def pop(self, key: str, default: Any = DEFAULT, keep_as_dict: bool = False) -> A
value = self.params.pop(key, default)

if keep_as_dict or _is_dict_free(value):
logger.info(self.history + key + " = " + str(value)) # type: ignore
logger.info(f"{self.history}{key} = {value}")
return value
else:
return self._check_is_dict(key, value)
Expand Down Expand Up @@ -390,7 +389,7 @@ def log_recursively(parameters, history):
new_local_history = history + key + "."
log_recursively(value, new_local_history)
else:
logger.info(history + key + " = " + str(value))
logger.info(f"{history}{key} = {value}")

logger.info("Converting Params object to dict; logging of default "
"values will not occur when dictionary parameters are "
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -177,7 +177,7 @@ def text_to_instance(self, # type: ignore

@staticmethod
def _normalize_word(word):
if word == "/." or word == "/?":
if word in ("/.", "/?"):
return word[1:]
else:
return word
2 changes: 1 addition & 1 deletion allennlp/data/dataset_readers/ontonotes_ner.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@
logger = logging.getLogger(__name__) # pylint: disable=invalid-name

def _normalize_word(word: str):
if word == "/." or word == "/?":
if word in ("/.", "/?"):
return word[1:]
else:
return word
Expand Down
2 changes: 1 addition & 1 deletion allennlp/data/dataset_readers/semantic_parsing/atis.py
Original file line number Diff line number Diff line change
Expand Up @@ -158,7 +158,7 @@ def text_to_instance(self, # type: ignore
'world' : world_field,
'linking_scores' : ArrayField(world.linking_scores)}

if sql_query_labels != None:
if sql_query_labels is not None:
fields['sql_queries'] = MetadataField(sql_query_labels)
if self._keep_if_unparseable or action_sequence:
for production_rule in action_sequence:
Expand Down
4 changes: 2 additions & 2 deletions allennlp/data/dataset_readers/semantic_parsing/nlvr.py
Original file line number Diff line number Diff line change
Expand Up @@ -158,8 +158,8 @@ def text_to_instance(self, # type: ignore
# pylint: disable=arguments-differ
worlds = []
for structured_representation in structured_representations:
boxes = set([Box(object_list, box_id) for box_id, object_list in
enumerate(structured_representation)])
boxes = {Box(object_list, box_id)
for box_id, object_list in enumerate(structured_representation)}
worlds.append(NlvrLanguage(boxes))
tokenized_sentence = self._tokenizer.tokenize(sentence)
sentence_field = TextField(tokenized_sentence, self._sentence_token_indexers)
Expand Down
10 changes: 4 additions & 6 deletions allennlp/data/fields/label_field.py
Original file line number Diff line number Diff line change
Expand Up @@ -56,12 +56,10 @@ def __init__(self,
if not isinstance(label, int):
raise ConfigurationError("In order to skip indexing, your labels must be integers. "
"Found label = {}".format(label))
else:
self._label_id = label
else:
if not isinstance(label, str):
raise ConfigurationError("LabelFields must be passed a string label if skip_indexing=False. "
"Found label: {} with type: {}.".format(label, type(label)))
self._label_id = label
elif not isinstance(label, str):
raise ConfigurationError("LabelFields must be passed a string label if skip_indexing=False. "
"Found label: {} with type: {}.".format(label, type(label)))

def _maybe_warn_for_namespace(self, label_namespace: str) -> None:
if not (self._label_namespace.endswith("labels") or self._label_namespace.endswith("tags")):
Expand Down
2 changes: 1 addition & 1 deletion allennlp/data/fields/list_field.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ class ListField(SequenceField[DataArray]):
contained ``Field`` objects must be of the same type.
"""
def __init__(self, field_list: List[Field]) -> None:
field_class_set = set([field.__class__ for field in field_list])
field_class_set = {field.__class__ for field in field_list}
assert len(field_class_set) == 1, "ListFields must contain a single field type, found " +\
str(field_class_set)
# Not sure why mypy has a hard time with this type...
Expand Down
10 changes: 5 additions & 5 deletions allennlp/data/tokenizers/word_splitter.py
Original file line number Diff line number Diff line change
Expand Up @@ -49,11 +49,11 @@ class SimpleWordSplitter(WordSplitter):
"""
def __init__(self):
# These are certainly incomplete. But at least it's a start.
self.special_cases = set(['mr.', 'mrs.', 'etc.', 'e.g.', 'cf.', 'c.f.', 'eg.', 'al.'])
self.contractions = set(["n't", "'s", "'ve", "'re", "'ll", "'d", "'m"])
self.contractions |= set([x.replace("'", "’") for x in self.contractions])
self.ending_punctuation = set(['"', "'", '.', ',', ';', ')', ']', '}', ':', '!', '?', '%', '”', "’"])
self.beginning_punctuation = set(['"', "'", '(', '[', '{', '#', '$', '“', "‘"])
self.special_cases = {'mr.', 'mrs.', 'etc.', 'e.g.', 'cf.', 'c.f.', 'eg.', 'al.'}
self.contractions = {"n't", "'s", "'ve", "'re", "'ll", "'d", "'m"}
self.contractions |= {x.replace("'", "’") for x in self.contractions}
self.ending_punctuation = {'"', "'", '.', ',', ';', ')', ']', '}', ':', '!', '?', '%', '”', "’"}
self.beginning_punctuation = {'"', "'", '(', '[', '{', '#', '$', '“', "‘"}

@overrides
def split_words(self, sentence: str) -> List[Token]:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -294,7 +294,7 @@ def _get_checklist_info(self,
"""
terminal_indices = []
target_checklist_list = []
agenda_indices_set = set([int(x) for x in agenda.squeeze(0).detach().cpu().numpy()])
agenda_indices_set = {int(x) for x in agenda.squeeze(0).detach().cpu().numpy()}
for index, action in enumerate(all_actions):
# Each action is a ProductionRule, a tuple where the first item is the production
# rule string.
Expand Down
2 changes: 1 addition & 1 deletion allennlp/modules/stacked_alternating_lstm.py
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,7 @@ def __init__(self,
layers = []
lstm_input_size = input_size
for layer_index in range(num_layers):
go_forward = True if layer_index % 2 == 0 else False
go_forward = layer_index % 2 == 0
layer = AugmentedLstm(lstm_input_size, hidden_size, go_forward,
recurrent_dropout_probability=recurrent_dropout_probability,
use_highway=use_highway,
Expand Down
4 changes: 2 additions & 2 deletions allennlp/nn/initializers.py
Original file line number Diff line number Diff line change
Expand Up @@ -305,7 +305,7 @@ def __call__(self, module: torch.nn.Module) -> None:
The Pytorch module to apply the initializers to.
"""
logger.info("Initializing parameters")
unused_regexes = set([initializer[0] for initializer in self._initializers])
unused_regexes = {initializer[0] for initializer in self._initializers}
uninitialized_parameters = set()
# Store which initialisers were applied to which parameters.
for name, parameter in module.named_parameters():
Expand Down Expand Up @@ -360,7 +360,7 @@ def from_params(cls, params: List[Tuple[str, Params]] = None) -> "InitializerApp
"""
# pylint: disable=arguments-differ
params = params or []
is_prevent = lambda item: item == "prevent" or item == {"type": "prevent"}
is_prevent = lambda item: item == "prevent" or item == {"type": "prevent"} # pylint: disable=consider-using-in
prevent_regexes = [param[0] for param in params if is_prevent(param[1])]
params = [param for param in params if param[1] if not is_prevent(param[1])]
initializers = [(name, Initializer.from_params(init_params)) for name, init_params in params]
Expand Down
1 change: 1 addition & 0 deletions allennlp/predictors/predictor.py
Original file line number Diff line number Diff line change
Expand Up @@ -74,6 +74,7 @@ def json_to_labeled_instances(self, inputs: JsonDict) -> List[Instance]:
List[instance]
A list of :class:`~allennlp.data.instance.Instance`
"""
# pylint: disable=assignment-from-no-return
instance = self._json_to_instance(inputs)
outputs = self._model.forward_on_instance(instance)
new_instances = self.predictions_to_labeled_instances(instance, outputs)
Expand Down
2 changes: 1 addition & 1 deletion allennlp/predictors/sentence_tagger.py
Original file line number Diff line number Diff line change
Expand Up @@ -81,7 +81,7 @@ def predictions_to_labeled_instances(self,
i += 1
tag = predicted_tags[i]
end_idx = i
current_tags = [t if idx >= begin_idx and idx <= end_idx else 'O' \
current_tags = [t if begin_idx <= idx <= end_idx else 'O' \
for idx, t in enumerate(predicted_tags)]
predicted_spans.append(current_tags)
i += 1
Expand Down
8 changes: 3 additions & 5 deletions allennlp/semparse/domain_languages/domain_language.py
Original file line number Diff line number Diff line change
Expand Up @@ -263,7 +263,7 @@ def __init__(self,
start_types: Set[Type] = None) -> None:
self._functions: Dict[str, Callable] = {}
self._function_types: Dict[str, List[PredicateType]] = defaultdict(list)
self._start_types: Set[PredicateType] = set([PredicateType.get_type(type_) for type_ in start_types])
self._start_types: Set[PredicateType] = {PredicateType.get_type(type_) for type_ in start_types}
for name in dir(self):
if isinstance(getattr(self, name), types.MethodType):
function = getattr(self, name)
Expand Down Expand Up @@ -482,8 +482,7 @@ def _execute_expression(self, expression: Any):
else:
if isinstance(expression[0], str):
raise ExecutionError(f"Unrecognized function: {expression[0]}")
else:
raise ExecutionError(f"Unsupported expression type: {expression}")
raise ExecutionError(f"Unsupported expression type: {expression}")
arguments = [self._execute_expression(arg) for arg in expression[1:]]
try:
return function(*arguments)
Expand Down Expand Up @@ -648,8 +647,7 @@ def _get_function_transitions(self,
else:
if isinstance(expression, str):
raise ParsingError(f"Unrecognized function: {expression[0]}")
else:
raise ParsingError(f"Unsupported expression type: {expression}")
raise ParsingError(f"Unsupported expression type: {expression}")
if not isinstance(function_type, FunctionType):
raise ParsingError(f'Zero-arg function or constant called with arguments: {name}')

Expand Down
Loading

0 comments on commit 64143c4

Please sign in to comment.